| // REQUIRES: powerpc-registered-target |
| // RUN: %clang_cc1 -faltivec -triple powerpc-unknown-unknown -emit-llvm %s -o - | FileCheck %s |
| // RUN: %clang_cc1 -faltivec -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s |
| // RUN: %clang_cc1 -faltivec -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK-LE |
| |
| vector bool char vbc = { 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0 }; |
| vector signed char vsc = { 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16 }; |
| vector unsigned char vuc = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 }; |
| vector bool short vbs = { 1, 0, 1, 0, 1, 0, 1, 0 }; |
| vector short vs = { -1, 2, -3, 4, -5, 6, -7, 8 }; |
| vector unsigned short vus = { 1, 2, 3, 4, 5, 6, 7, 8 }; |
| vector pixel vp = { 1, 2, 3, 4, 5, 6, 7, 8 }; |
| vector bool int vbi = { 1, 0, 1, 0 }; |
| vector int vi = { -1, 2, -3, 4 }; |
| vector unsigned int vui = { 1, 2, 3, 4 }; |
| vector float vf = { -1.5, 2.5, -3.5, 4.5 }; |
| |
| vector bool char res_vbc; |
| vector signed char res_vsc; |
| vector unsigned char res_vuc; |
| vector bool short res_vbs; |
| vector short res_vs; |
| vector unsigned short res_vus; |
| vector pixel res_vp; |
| vector bool int res_vbi; |
| vector int res_vi; |
| vector unsigned int res_vui; |
| vector float res_vf; |
| |
| signed char param_sc; |
| unsigned char param_uc; |
| short param_s; |
| unsigned short param_us; |
| int param_i; |
| unsigned int param_ui; |
| float param_f; |
| |
| int res_sc; |
| int res_uc; |
| int res_s; |
| int res_us; |
| int res_i; |
| int res_ui; |
| int res_f; |
| |
| // CHECK-LABEL: define void @test1 |
| void test1() { |
| |
| /* vec_abs */ |
| vsc = vec_abs(vsc); |
| // CHECK: sub <16 x i8> zeroinitializer |
| // CHECK: @llvm.ppc.altivec.vmaxsb |
| // CHECK-LE: sub <16 x i8> zeroinitializer |
| // CHECK-LE: @llvm.ppc.altivec.vmaxsb |
| |
| vs = vec_abs(vs); |
| // CHECK: sub <8 x i16> zeroinitializer |
| // CHECK: @llvm.ppc.altivec.vmaxsh |
| // CHECK-LE: sub <8 x i16> zeroinitializer |
| // CHECK-LE: @llvm.ppc.altivec.vmaxsh |
| |
| vi = vec_abs(vi); |
| // CHECK: sub <4 x i32> zeroinitializer |
| // CHECK: @llvm.ppc.altivec.vmaxsw |
| // CHECK-LE: sub <4 x i32> zeroinitializer |
| // CHECK-LE: @llvm.ppc.altivec.vmaxsw |
| |
| vf = vec_abs(vf); |
| // CHECK: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| /* vec_abs */ |
| vsc = vec_abss(vsc); |
| // CHECK: @llvm.ppc.altivec.vsubsbs |
| // CHECK: @llvm.ppc.altivec.vmaxsb |
| // CHECK-LE: @llvm.ppc.altivec.vsubsbs |
| // CHECK-LE: @llvm.ppc.altivec.vmaxsb |
| |
| vs = vec_abss(vs); |
| // CHECK: @llvm.ppc.altivec.vsubshs |
| // CHECK: @llvm.ppc.altivec.vmaxsh |
| // CHECK-LE: @llvm.ppc.altivec.vsubshs |
| // CHECK-LE: @llvm.ppc.altivec.vmaxsh |
| |
| vi = vec_abss(vi); |
| // CHECK: @llvm.ppc.altivec.vsubsws |
| // CHECK: @llvm.ppc.altivec.vmaxsw |
| // CHECK-LE: @llvm.ppc.altivec.vsubsws |
| // CHECK-LE: @llvm.ppc.altivec.vmaxsw |
| |
| /* vec_add */ |
| res_vsc = vec_add(vsc, vsc); |
| // CHECK: add <16 x i8> |
| // CHECK-LE: add <16 x i8> |
| |
| res_vsc = vec_add(vbc, vsc); |
| // CHECK: add <16 x i8> |
| // CHECK-LE: add <16 x i8> |
| |
| res_vsc = vec_add(vsc, vbc); |
| // CHECK: add <16 x i8> |
| // CHECK-LE: add <16 x i8> |
| |
| res_vuc = vec_add(vuc, vuc); |
| // CHECK: add <16 x i8> |
| // CHECK-LE: add <16 x i8> |
| |
| res_vuc = vec_add(vbc, vuc); |
| // CHECK: add <16 x i8> |
| // CHECK-LE: add <16 x i8> |
| |
| res_vuc = vec_add(vuc, vbc); |
| // CHECK: add <16 x i8> |
| // CHECK-LE: add <16 x i8> |
| |
| res_vs = vec_add(vs, vs); |
| // CHECK: add <8 x i16> |
| // CHECK-LE: add <8 x i16> |
| |
| res_vs = vec_add(vbs, vs); |
| // CHECK: add <8 x i16> |
| // CHECK-LE: add <8 x i16> |
| |
| res_vs = vec_add(vs, vbs); |
| // CHECK: add <8 x i16> |
| // CHECK-LE: add <8 x i16> |
| |
| res_vus = vec_add(vus, vus); |
| // CHECK: add <8 x i16> |
| // CHECK-LE: add <8 x i16> |
| |
| res_vus = vec_add(vbs, vus); |
| // CHECK: add <8 x i16> |
| // CHECK-LE: add <8 x i16> |
| |
| res_vus = vec_add(vus, vbs); |
| // CHECK: add <8 x i16> |
| // CHECK-LE: add <8 x i16> |
| |
| res_vi = vec_add(vi, vi); |
| // CHECK: add <4 x i32> |
| // CHECK-LE: add <4 x i32> |
| |
| res_vi = vec_add(vbi, vi); |
| // CHECK: add <4 x i32> |
| // CHECK-LE: add <4 x i32> |
| |
| res_vi = vec_add(vi, vbi); |
| // CHECK: add <4 x i32> |
| // CHECK-LE: add <4 x i32> |
| |
| res_vui = vec_add(vui, vui); |
| // CHECK: add <4 x i32> |
| // CHECK-LE: add <4 x i32> |
| |
| res_vui = vec_add(vbi, vui); |
| // CHECK: add <4 x i32> |
| // CHECK-LE: add <4 x i32> |
| |
| res_vui = vec_add(vui, vbi); |
| // CHECK: add <4 x i32> |
| // CHECK-LE: add <4 x i32> |
| |
| res_vf = vec_add(vf, vf); |
| // CHECK: fadd <4 x float> |
| // CHECK-LE: fadd <4 x float> |
| |
| res_vsc = vec_vaddubm(vsc, vsc); |
| // CHECK: add <16 x i8> |
| // CHECK-LE: add <16 x i8> |
| |
| res_vsc = vec_vaddubm(vbc, vsc); |
| // CHECK: add <16 x i8> |
| // CHECK-LE: add <16 x i8> |
| |
| res_vsc = vec_vaddubm(vsc, vbc); |
| // CHECK: add <16 x i8> |
| // CHECK-LE: add <16 x i8> |
| |
| res_vuc = vec_vaddubm(vuc, vuc); |
| // CHECK: add <16 x i8> |
| // CHECK-LE: add <16 x i8> |
| |
| res_vuc = vec_vaddubm(vbc, vuc); |
| // CHECK: add <16 x i8> |
| // CHECK-LE: add <16 x i8> |
| |
| res_vuc = vec_vaddubm(vuc, vbc); |
| // CHECK: add <16 x i8> |
| // CHECK-LE: add <16 x i8> |
| |
| res_vs = vec_vadduhm(vs, vs); |
| // CHECK: add <8 x i16> |
| // CHECK-LE: add <8 x i16> |
| |
| res_vs = vec_vadduhm(vbs, vs); |
| // CHECK: add <8 x i16> |
| // CHECK-LE: add <8 x i16> |
| |
| res_vs = vec_vadduhm(vs, vbs); |
| // CHECK: add <8 x i16> |
| // CHECK-LE: add <8 x i16> |
| |
| res_vus = vec_vadduhm(vus, vus); |
| // CHECK: add <8 x i16> |
| // CHECK-LE: add <8 x i16> |
| |
| res_vus = vec_vadduhm(vbs, vus); |
| // CHECK: add <8 x i16> |
| // CHECK-LE: add <8 x i16> |
| |
| res_vus = vec_vadduhm(vus, vbs); |
| // CHECK: add <8 x i16> |
| // CHECK-LE: add <8 x i16> |
| |
| res_vi = vec_vadduwm(vi, vi); |
| // CHECK: add <4 x i32> |
| // CHECK-LE: add <4 x i32> |
| |
| res_vi = vec_vadduwm(vbi, vi); |
| // CHECK: add <4 x i32> |
| // CHECK-LE: add <4 x i32> |
| |
| res_vi = vec_vadduwm(vi, vbi); |
| // CHECK: add <4 x i32> |
| // CHECK-LE: add <4 x i32> |
| |
| res_vui = vec_vadduwm(vui, vui); |
| // CHECK: add <4 x i32> |
| // CHECK-LE: add <4 x i32> |
| |
| res_vui = vec_vadduwm(vbi, vui); |
| // CHECK: add <4 x i32> |
| // CHECK-LE: add <4 x i32> |
| |
| res_vui = vec_vadduwm(vui, vbi); |
| // CHECK: add <4 x i32> |
| // CHECK-LE: add <4 x i32> |
| |
| res_vf = vec_vaddfp(vf, vf); |
| // CHECK: fadd <4 x float> |
| // CHECK-LE: fadd <4 x float> |
| |
| /* vec_addc */ |
| res_vui = vec_addc(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vaddcuw |
| // CHECK-LE: @llvm.ppc.altivec.vaddcuw |
| |
| res_vui = vec_vaddcuw(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vaddcuw |
| // CHECK-LE: @llvm.ppc.altivec.vaddcuw |
| |
| /* vec_adds */ |
| res_vsc = vec_adds(vsc, vsc); |
| // CHECK: @llvm.ppc.altivec.vaddsbs |
| // CHECK-LE: @llvm.ppc.altivec.vaddsbs |
| |
| res_vsc = vec_adds(vbc, vsc); |
| // CHECK: @llvm.ppc.altivec.vaddsbs |
| // CHECK-LE: @llvm.ppc.altivec.vaddsbs |
| |
| res_vsc = vec_adds(vsc, vbc); |
| // CHECK: @llvm.ppc.altivec.vaddsbs |
| // CHECK-LE: @llvm.ppc.altivec.vaddsbs |
| |
| res_vuc = vec_adds(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vaddubs |
| // CHECK-LE: @llvm.ppc.altivec.vaddubs |
| |
| res_vuc = vec_adds(vbc, vuc); |
| // CHECK: @llvm.ppc.altivec.vaddubs |
| // CHECK-LE: @llvm.ppc.altivec.vaddubs |
| |
| res_vuc = vec_adds(vuc, vbc); |
| // CHECK: @llvm.ppc.altivec.vaddubs |
| // CHECK-LE: @llvm.ppc.altivec.vaddubs |
| |
| res_vs = vec_adds(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vaddshs |
| // CHECK-LE: @llvm.ppc.altivec.vaddshs |
| |
| res_vs = vec_adds(vbs, vs); |
| // CHECK: @llvm.ppc.altivec.vaddshs |
| // CHECK-LE: @llvm.ppc.altivec.vaddshs |
| |
| res_vs = vec_adds(vs, vbs); |
| // CHECK: @llvm.ppc.altivec.vaddshs |
| // CHECK-LE: @llvm.ppc.altivec.vaddshs |
| |
| res_vus = vec_adds(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vadduhs |
| // CHECK-LE: @llvm.ppc.altivec.vadduhs |
| |
| res_vus = vec_adds(vbs, vus); |
| // CHECK: @llvm.ppc.altivec.vadduhs |
| // CHECK-LE: @llvm.ppc.altivec.vadduhs |
| |
| res_vus = vec_adds(vus, vbs); |
| // CHECK: @llvm.ppc.altivec.vadduhs |
| // CHECK-LE: @llvm.ppc.altivec.vadduhs |
| |
| res_vi = vec_adds(vi, vi); |
| // CHECK: @llvm.ppc.altivec.vaddsws |
| // CHECK-LE: @llvm.ppc.altivec.vaddsws |
| |
| res_vi = vec_adds(vbi, vi); |
| // CHECK: @llvm.ppc.altivec.vaddsws |
| // CHECK-LE: @llvm.ppc.altivec.vaddsws |
| |
| res_vi = vec_adds(vi, vbi); |
| // CHECK: @llvm.ppc.altivec.vaddsws |
| // CHECK-LE: @llvm.ppc.altivec.vaddsws |
| |
| res_vui = vec_adds(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vadduws |
| // CHECK-LE: @llvm.ppc.altivec.vadduws |
| |
| res_vui = vec_adds(vbi, vui); |
| // CHECK: @llvm.ppc.altivec.vadduws |
| // CHECK-LE: @llvm.ppc.altivec.vadduws |
| |
| res_vui = vec_adds(vui, vbi); |
| // CHECK: @llvm.ppc.altivec.vadduws |
| // CHECK-LE: @llvm.ppc.altivec.vadduws |
| |
| res_vsc = vec_vaddsbs(vsc, vsc); |
| // CHECK: @llvm.ppc.altivec.vaddsbs |
| // CHECK-LE: @llvm.ppc.altivec.vaddsbs |
| |
| res_vsc = vec_vaddsbs(vbc, vsc); |
| // CHECK: @llvm.ppc.altivec.vaddsbs |
| // CHECK-LE: @llvm.ppc.altivec.vaddsbs |
| |
| res_vsc = vec_vaddsbs(vsc, vbc); |
| // CHECK: @llvm.ppc.altivec.vaddsbs |
| // CHECK-LE: @llvm.ppc.altivec.vaddsbs |
| |
| res_vuc = vec_vaddubs(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vaddubs |
| // CHECK-LE: @llvm.ppc.altivec.vaddubs |
| |
| res_vuc = vec_vaddubs(vbc, vuc); |
| // CHECK: @llvm.ppc.altivec.vaddubs |
| // CHECK-LE: @llvm.ppc.altivec.vaddubs |
| |
| res_vuc = vec_vaddubs(vuc, vbc); |
| // CHECK: @llvm.ppc.altivec.vaddubs |
| // CHECK-LE: @llvm.ppc.altivec.vaddubs |
| |
| res_vs = vec_vaddshs(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vaddshs |
| // CHECK-LE: @llvm.ppc.altivec.vaddshs |
| |
| res_vs = vec_vaddshs(vbs, vs); |
| // CHECK: @llvm.ppc.altivec.vaddshs |
| // CHECK-LE: @llvm.ppc.altivec.vaddshs |
| |
| res_vs = vec_vaddshs(vs, vbs); |
| // CHECK: @llvm.ppc.altivec.vaddshs |
| // CHECK-LE: @llvm.ppc.altivec.vaddshs |
| |
| res_vus = vec_vadduhs(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vadduhs |
| // CHECK-LE: @llvm.ppc.altivec.vadduhs |
| |
| res_vus = vec_vadduhs(vbs, vus); |
| // CHECK: @llvm.ppc.altivec.vadduhs |
| // CHECK-LE: @llvm.ppc.altivec.vadduhs |
| |
| res_vus = vec_vadduhs(vus, vbs); |
| // CHECK: @llvm.ppc.altivec.vadduhs |
| // CHECK-LE: @llvm.ppc.altivec.vadduhs |
| |
| res_vi = vec_vaddsws(vi, vi); |
| // CHECK: @llvm.ppc.altivec.vaddsws |
| // CHECK-LE: @llvm.ppc.altivec.vaddsws |
| |
| res_vi = vec_vaddsws(vbi, vi); |
| // CHECK: @llvm.ppc.altivec.vaddsws |
| // CHECK-LE: @llvm.ppc.altivec.vaddsws |
| |
| res_vi = vec_vaddsws(vi, vbi); |
| // CHECK: @llvm.ppc.altivec.vaddsws |
| // CHECK-LE: @llvm.ppc.altivec.vaddsws |
| |
| res_vui = vec_vadduws(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vadduws |
| // CHECK-LE: @llvm.ppc.altivec.vadduws |
| |
| res_vui = vec_vadduws(vbi, vui); |
| // CHECK: @llvm.ppc.altivec.vadduws |
| // CHECK-LE: @llvm.ppc.altivec.vadduws |
| |
| res_vui = vec_vadduws(vui, vbi); |
| // CHECK: @llvm.ppc.altivec.vadduws |
| // CHECK-LE: @llvm.ppc.altivec.vadduws |
| |
| /* vec_and */ |
| res_vsc = vec_and(vsc, vsc); |
| // CHECK: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vsc = vec_and(vbc, vsc); |
| // CHECK: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vsc = vec_and(vsc, vbc); |
| // CHECK: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vuc = vec_and(vuc, vuc); |
| // CHECK: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vuc = vec_and(vbc, vuc); |
| // CHECK: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vuc = vec_and(vuc, vbc); |
| // CHECK: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vbc = vec_and(vbc, vbc); |
| // CHECK: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vs = vec_and(vs, vs); |
| // CHECK: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vs = vec_and(vbs, vs); |
| // CHECK: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vs = vec_and(vs, vbs); |
| // CHECK: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vus = vec_and(vus, vus); |
| // CHECK: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vus = vec_and(vbs, vus); |
| // CHECK: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vus = vec_and(vus, vbs); |
| // CHECK: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vbs = vec_and(vbs, vbs); |
| // CHECK: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vi = vec_and(vi, vi); |
| // CHECK: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vi = vec_and(vbi, vi); |
| // CHECK: and <4 x i32> |
| // CHECK-le: and <4 x i32> |
| |
| res_vi = vec_and(vi, vbi); |
| // CHECK: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vui = vec_and(vui, vui); |
| // CHECK: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vui = vec_and(vbi, vui); |
| // CHECK: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vui = vec_and(vui, vbi); |
| // CHECK: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vbi = vec_and(vbi, vbi); |
| // CHECK: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vsc = vec_vand(vsc, vsc); |
| // CHECK: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vsc = vec_vand(vbc, vsc); |
| // CHECK: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vsc = vec_vand(vsc, vbc); |
| // CHECK: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vuc = vec_vand(vuc, vuc); |
| // CHECK: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vuc = vec_vand(vbc, vuc); |
| // CHECK: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vuc = vec_vand(vuc, vbc); |
| // CHECK: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vbc = vec_vand(vbc, vbc); |
| // CHECK: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vs = vec_vand(vs, vs); |
| // CHECK: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vs = vec_vand(vbs, vs); |
| // CHECK: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vs = vec_vand(vs, vbs); |
| // CHECK: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vus = vec_vand(vus, vus); |
| // CHECK: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vus = vec_vand(vbs, vus); |
| // CHECK: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vus = vec_vand(vus, vbs); |
| // CHECK: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vbs = vec_vand(vbs, vbs); |
| // CHECK: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vi = vec_vand(vi, vi); |
| // CHECK: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vi = vec_vand(vbi, vi); |
| // CHECK: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vi = vec_vand(vi, vbi); |
| // CHECK: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vui = vec_vand(vui, vui); |
| // CHECK: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vui = vec_vand(vbi, vui); |
| // CHECK: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vui = vec_vand(vui, vbi); |
| // CHECK: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vbi = vec_vand(vbi, vbi); |
| // CHECK: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| /* vec_andc */ |
| res_vsc = vec_andc(vsc, vsc); |
| // CHECK: xor <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vsc = vec_andc(vbc, vsc); |
| // CHECK: xor <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vsc = vec_andc(vsc, vbc); |
| // CHECK: xor <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vuc = vec_andc(vuc, vuc); |
| // CHECK: xor <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vuc = vec_andc(vbc, vuc); |
| // CHECK: xor <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vuc = vec_andc(vuc, vbc); |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vbc = vec_andc(vbc, vbc); |
| // CHECK: xor <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vs = vec_andc(vs, vs); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vs = vec_andc(vbs, vs); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vs = vec_andc(vs, vbs); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vus = vec_andc(vus, vus); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vus = vec_andc(vbs, vus); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vus = vec_andc(vus, vbs); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vbs = vec_andc(vbs, vbs); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vi = vec_andc(vi, vi); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vi = vec_andc(vbi, vi); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vi = vec_andc(vi, vbi); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vui = vec_andc(vui, vui); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vui = vec_andc(vbi, vui); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vui = vec_andc(vui, vbi); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vf = vec_andc(vf, vf); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vf = vec_andc(vbi, vf); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vf = vec_andc(vf, vbi); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vsc = vec_vandc(vsc, vsc); |
| // CHECK: xor <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vsc = vec_vandc(vbc, vsc); |
| // CHECK: xor <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vsc = vec_vandc(vsc, vbc); |
| // CHECK: xor <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vuc = vec_vandc(vuc, vuc); |
| // CHECK: xor <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vuc = vec_vandc(vbc, vuc); |
| // CHECK: xor <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vuc = vec_vandc(vuc, vbc); |
| // CHECK: xor <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vbc = vec_vandc(vbc, vbc); |
| // CHECK: xor <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| |
| res_vs = vec_vandc(vs, vs); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vs = vec_vandc(vbs, vs); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vs = vec_vandc(vs, vbs); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vus = vec_vandc(vus, vus); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vus = vec_vandc(vbs, vus); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vus = vec_vandc(vus, vbs); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vbs = vec_vandc(vbs, vbs); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| |
| res_vi = vec_vandc(vi, vi); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vi = vec_vandc(vbi, vi); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vi = vec_vandc(vi, vbi); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vui = vec_vandc(vui, vui); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vui = vec_vandc(vbi, vui); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vui = vec_vandc(vui, vbi); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vf = vec_vandc(vf, vf); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vf = vec_vandc(vbi, vf); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| res_vf = vec_vandc(vf, vbi); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| |
| } |
| |
| // CHECK-LABEL: define void @test2 |
| void test2() { |
| /* vec_avg */ |
| res_vsc = vec_avg(vsc, vsc); |
| // CHECK: @llvm.ppc.altivec.vavgsb |
| // CHECK-LE: @llvm.ppc.altivec.vavgsb |
| |
| res_vuc = vec_avg(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vavgub |
| // CHECK-LE: @llvm.ppc.altivec.vavgub |
| |
| res_vs = vec_avg(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vavgsh |
| // CHECK-LE: @llvm.ppc.altivec.vavgsh |
| |
| res_vus = vec_avg(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vavguh |
| // CHECK-LE: @llvm.ppc.altivec.vavguh |
| |
| res_vi = vec_avg(vi, vi); |
| // CHECK: @llvm.ppc.altivec.vavgsw |
| // CHECK-LE: @llvm.ppc.altivec.vavgsw |
| |
| res_vui = vec_avg(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vavguw |
| // CHECK-LE: @llvm.ppc.altivec.vavguw |
| |
| res_vsc = vec_vavgsb(vsc, vsc); |
| // CHECK: @llvm.ppc.altivec.vavgsb |
| // CHECK-LE: @llvm.ppc.altivec.vavgsb |
| |
| res_vuc = vec_vavgub(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vavgub |
| // CHECK-LE: @llvm.ppc.altivec.vavgub |
| |
| res_vs = vec_vavgsh(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vavgsh |
| // CHECK-LE: @llvm.ppc.altivec.vavgsh |
| |
| res_vus = vec_vavguh(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vavguh |
| // CHECK-LE: @llvm.ppc.altivec.vavguh |
| |
| res_vi = vec_vavgsw(vi, vi); |
| // CHECK: @llvm.ppc.altivec.vavgsw |
| // CHECK-LE: @llvm.ppc.altivec.vavgsw |
| |
| res_vui = vec_vavguw(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vavguw |
| // CHECK-LE: @llvm.ppc.altivec.vavguw |
| |
| /* vec_ceil */ |
| res_vf = vec_ceil(vf); |
| // CHECK: @llvm.ppc.altivec.vrfip |
| // CHECK-LE: @llvm.ppc.altivec.vrfip |
| |
| res_vf = vec_vrfip(vf); |
| // CHECK: @llvm.ppc.altivec.vrfip |
| // CHECK-LE: @llvm.ppc.altivec.vrfip |
| |
| /* vec_cmpb */ |
| res_vi = vec_cmpb(vf, vf); |
| // CHECK: @llvm.ppc.altivec.vcmpbfp |
| // CHECK-LE: @llvm.ppc.altivec.vcmpbfp |
| |
| res_vi = vec_vcmpbfp(vf, vf); |
| // CHECK: @llvm.ppc.altivec.vcmpbfp |
| // CHECK-LE: @llvm.ppc.altivec.vcmpbfp |
| |
| /* vec_cmpeq */ |
| res_vbc = vec_cmpeq(vsc, vsc); |
| // CHECK: @llvm.ppc.altivec.vcmpequb |
| // CHECK-LE: @llvm.ppc.altivec.vcmpequb |
| |
| res_vbc = vec_cmpeq(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vcmpequb |
| // CHECK-LE: @llvm.ppc.altivec.vcmpequb |
| |
| res_vbs = vec_cmpeq(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vcmpequh |
| // CHECK-LE: @llvm.ppc.altivec.vcmpequh |
| |
| res_vbs = vec_cmpeq(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vcmpequh |
| // CHECK-LE: @llvm.ppc.altivec.vcmpequh |
| |
| res_vbi = vec_cmpeq(vi, vi); |
| // CHECK: @llvm.ppc.altivec.vcmpequw |
| // CHECK-LE: @llvm.ppc.altivec.vcmpequw |
| |
| res_vbi = vec_cmpeq(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vcmpequw |
| // CHECK-LE: @llvm.ppc.altivec.vcmpequw |
| |
| res_vbi = vec_cmpeq(vf, vf); |
| // CHECK: @llvm.ppc.altivec.vcmpeqfp |
| // CHECK-LE: @llvm.ppc.altivec.vcmpeqfp |
| |
| /* vec_cmpge */ |
| res_vbc = vec_cmpge(vsc, vsc); |
| // CHECK: @llvm.ppc.altivec.vcmpgtsb |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtsb |
| |
| res_vbc = vec_cmpge(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vcmpgtub |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtub |
| |
| res_vbs = vec_cmpge(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vcmpgtsh |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtsh |
| |
| res_vbs = vec_cmpge(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vcmpgtuh |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtuh |
| |
| res_vbi = vec_cmpge(vi, vi); |
| // CHECK: @llvm.ppc.altivec.vcmpgtsw |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtsw |
| |
| res_vbi = vec_cmpge(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vcmpgtuw |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtuw |
| |
| res_vbi = vec_cmpge(vf, vf); |
| // CHECK: @llvm.ppc.altivec.vcmpgefp |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgefp |
| |
| res_vbi = vec_vcmpgefp(vf, vf); |
| // CHECK: @llvm.ppc.altivec.vcmpgefp |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgefp |
| } |
| |
| // CHECK-LABEL: define void @test5 |
| void test5() { |
| |
| /* vec_cmpgt */ |
| res_vbc = vec_cmpgt(vsc, vsc); |
| // CHECK: @llvm.ppc.altivec.vcmpgtsb |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtsb |
| |
| res_vbc = vec_cmpgt(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vcmpgtub |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtub |
| |
| res_vbs = vec_cmpgt(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vcmpgtsh |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtsh |
| |
| res_vbs = vec_cmpgt(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vcmpgtuh |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtuh |
| |
| res_vbi = vec_cmpgt(vi, vi); |
| // CHECK: @llvm.ppc.altivec.vcmpgtsw |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtsw |
| |
| res_vbi = vec_cmpgt(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vcmpgtuw |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtuw |
| |
| res_vbi = vec_cmpgt(vf, vf); |
| // CHECK: @llvm.ppc.altivec.vcmpgtfp |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtfp |
| |
| res_vbc = vec_vcmpgtsb(vsc, vsc); |
| // CHECK: @llvm.ppc.altivec.vcmpgtsb |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtsb |
| |
| res_vbc = vec_vcmpgtub(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vcmpgtub |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtub |
| |
| res_vbs = vec_vcmpgtsh(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vcmpgtsh |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtsh |
| |
| res_vbs = vec_vcmpgtuh(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vcmpgtuh |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtuh |
| |
| res_vbi = vec_vcmpgtsw(vi, vi); |
| // CHECK: @llvm.ppc.altivec.vcmpgtsw |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtsw |
| |
| res_vbi = vec_vcmpgtuw(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vcmpgtuw |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtuw |
| |
| res_vbi = vec_vcmpgtfp(vf, vf); |
| // CHECK: @llvm.ppc.altivec.vcmpgtfp |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtfp |
| |
| /* vec_cmple */ |
| res_vbc = vec_cmple(vsc, vsc); |
| // CHECK: @llvm.ppc.altivec.vcmpgtsb |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtsb |
| |
| res_vbc = vec_cmple(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vcmpgtub |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtub |
| |
| res_vbs = vec_cmple(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vcmpgtsh |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtsh |
| |
| res_vbs = vec_cmple(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vcmpgtuh |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtuh |
| |
| res_vbi = vec_cmple(vi, vi); |
| // CHECK: @llvm.ppc.altivec.vcmpgtsw |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtsw |
| |
| res_vbi = vec_cmple(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vcmpgtuw |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtuw |
| |
| res_vbi = vec_cmple(vf, vf); |
| // CHECK: @llvm.ppc.altivec.vcmpgefp |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgefp |
| } |
| |
| // CHECK-LABEL: define void @test6 |
| void test6() { |
| /* vec_cmplt */ |
| res_vbc = vec_cmplt(vsc, vsc); |
| // CHECK: @llvm.ppc.altivec.vcmpgtsb |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtsb |
| |
| res_vbc = vec_cmplt(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vcmpgtub |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtub |
| |
| res_vbs = vec_cmplt(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vcmpgtsh |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtsh |
| |
| res_vbs = vec_cmplt(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vcmpgtuh |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtuh |
| |
| res_vbi = vec_cmplt(vi, vi); |
| // CHECK: @llvm.ppc.altivec.vcmpgtsw |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtsw |
| |
| res_vbi = vec_cmplt(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vcmpgtuw |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtuw |
| |
| res_vbi = vec_cmplt(vf, vf); |
| // CHECK: @llvm.ppc.altivec.vcmpgtfp |
| // CHECK-LE: @llvm.ppc.altivec.vcmpgtfp |
| |
| /* vec_ctf */ |
| res_vf = vec_ctf(vi, 0); |
| // CHECK: @llvm.ppc.altivec.vcfsx |
| // CHECK-LE: @llvm.ppc.altivec.vcfsx |
| |
| res_vf = vec_ctf(vui, 0); |
| // CHECK: @llvm.ppc.altivec.vcfux |
| // CHECK-LE: @llvm.ppc.altivec.vcfux |
| |
| res_vf = vec_vcfsx(vi, 0); |
| // CHECK: @llvm.ppc.altivec.vcfsx |
| // CHECK-LE: @llvm.ppc.altivec.vcfsx |
| |
| res_vf = vec_vcfux(vui, 0); |
| // CHECK: @llvm.ppc.altivec.vcfux |
| // CHECK-LE: @llvm.ppc.altivec.vcfux |
| |
| /* vec_cts */ |
| res_vi = vec_cts(vf, 0); |
| // CHECK: @llvm.ppc.altivec.vctsxs |
| // CHECK-LE: @llvm.ppc.altivec.vctsxs |
| |
| res_vi = vec_vctsxs(vf, 0); |
| // CHECK: @llvm.ppc.altivec.vctsxs |
| // CHECK-LE: @llvm.ppc.altivec.vctsxs |
| |
| /* vec_ctu */ |
| res_vui = vec_ctu(vf, 0); |
| // CHECK: @llvm.ppc.altivec.vctuxs |
| // CHECK-LE: @llvm.ppc.altivec.vctuxs |
| |
| res_vui = vec_vctuxs(vf, 0); |
| // CHECK: @llvm.ppc.altivec.vctuxs |
| // CHECK-LE: @llvm.ppc.altivec.vctuxs |
| |
| /* vec_div */ |
| res_vsc = vec_div(vsc, vsc); |
| // CHECK: sdiv <16 x i8> |
| // CHECK-LE: sdiv <16 x i8> |
| |
| res_vuc = vec_div(vuc, vuc); |
| // CHECK: udiv <16 x i8> |
| // CHECK-LE: udiv <16 x i8> |
| |
| res_vs = vec_div(vs, vs); |
| // CHECK: sdiv <8 x i16> |
| // CHECK-LE: sdiv <8 x i16> |
| |
| res_vus = vec_div(vus, vus); |
| // CHECK: udiv <8 x i16> |
| // CHECK-LE: udiv <8 x i16> |
| |
| res_vi = vec_div(vi, vi); |
| // CHECK: sdiv <4 x i32> |
| // CHECK-LE: sdiv <4 x i32> |
| |
| res_vui = vec_div(vui, vui); |
| // CHECK: udiv <4 x i32> |
| // CHECK-LE: udiv <4 x i32> |
| |
| /* vec_dss */ |
| vec_dss(0); |
| // CHECK: @llvm.ppc.altivec.dss |
| // CHECK-LE: @llvm.ppc.altivec.dss |
| |
| /* vec_dssall */ |
| vec_dssall(); |
| // CHECK: @llvm.ppc.altivec.dssall |
| // CHECK-LE: @llvm.ppc.altivec.dssall |
| |
| /* vec_dst */ |
| vec_dst(&vsc, 0, 0); |
| // CHECK: @llvm.ppc.altivec.dst |
| // CHECK-LE: @llvm.ppc.altivec.dst |
| |
| /* vec_dstst */ |
| vec_dstst(&vs, 0, 0); |
| // CHECK: @llvm.ppc.altivec.dstst |
| // CHECK-LE: @llvm.ppc.altivec.dstst |
| |
| /* vec_dststt */ |
| vec_dststt(¶m_i, 0, 0); |
| // CHECK: @llvm.ppc.altivec.dststt |
| // CHECK-LE: @llvm.ppc.altivec.dststt |
| |
| /* vec_dstt */ |
| vec_dstt(&vf, 0, 0); |
| // CHECK: @llvm.ppc.altivec.dstt |
| // CHECK-LE: @llvm.ppc.altivec.dstt |
| |
| /* vec_expte */ |
| res_vf = vec_expte(vf); |
| // CHECK: @llvm.ppc.altivec.vexptefp |
| // CHECK-LE: @llvm.ppc.altivec.vexptefp |
| |
| res_vf = vec_vexptefp(vf); |
| // CHECK: @llvm.ppc.altivec.vexptefp |
| // CHECK-LE: @llvm.ppc.altivec.vexptefp |
| |
| /* vec_floor */ |
| res_vf = vec_floor(vf); |
| // CHECK: @llvm.ppc.altivec.vrfim |
| // CHECK-LE: @llvm.ppc.altivec.vrfim |
| |
| res_vf = vec_vrfim(vf); |
| // CHECK: @llvm.ppc.altivec.vrfim |
| // CHECK-LE: @llvm.ppc.altivec.vrfim |
| |
| /* vec_ld */ |
| res_vsc = vec_ld(0, &vsc); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vsc = vec_ld(0, ¶m_sc); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vuc = vec_ld(0, &vuc); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vuc = vec_ld(0, ¶m_uc); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vbc = vec_ld(0, &vbc); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vs = vec_ld(0, &vs); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vs = vec_ld(0, ¶m_s); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vus = vec_ld(0, &vus); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vus = vec_ld(0, ¶m_us); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vbs = vec_ld(0, &vbs); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vp = vec_ld(0, &vp); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vi = vec_ld(0, &vi); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vi = vec_ld(0, ¶m_i); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vui = vec_ld(0, &vui); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vui = vec_ld(0, ¶m_ui); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vbi = vec_ld(0, &vbi); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vf = vec_ld(0, &vf); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vf = vec_ld(0, ¶m_f); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vsc = vec_lvx(0, &vsc); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vsc = vec_lvx(0, ¶m_sc); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vuc = vec_lvx(0, &vuc); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vuc = vec_lvx(0, ¶m_uc); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vbc = vec_lvx(0, &vbc); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vs = vec_lvx(0, &vs); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vs = vec_lvx(0, ¶m_s); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vus = vec_lvx(0, &vus); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vus = vec_lvx(0, ¶m_us); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vbs = vec_lvx(0, &vbs); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vp = vec_lvx(0, &vp); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vi = vec_lvx(0, &vi); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vi = vec_lvx(0, ¶m_i); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vui = vec_lvx(0, &vui); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vui = vec_lvx(0, ¶m_ui); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vbi = vec_lvx(0, &vbi); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vf = vec_lvx(0, &vf); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| res_vf = vec_lvx(0, ¶m_f); |
| // CHECK: @llvm.ppc.altivec.lvx |
| // CHECK-LE: @llvm.ppc.altivec.lvx |
| |
| /* vec_lde */ |
| res_vsc = vec_lde(0, ¶m_sc); |
| // CHECK: @llvm.ppc.altivec.lvebx |
| // CHECK-LE: @llvm.ppc.altivec.lvebx |
| |
| res_vuc = vec_lde(0, ¶m_uc); |
| // CHECK: @llvm.ppc.altivec.lvebx |
| // CHECK-LE: @llvm.ppc.altivec.lvebx |
| |
| res_vs = vec_lde(0, ¶m_s); |
| // CHECK: @llvm.ppc.altivec.lvehx |
| // CHECK-LE: @llvm.ppc.altivec.lvehx |
| |
| res_vus = vec_lde(0, ¶m_us); |
| // CHECK: @llvm.ppc.altivec.lvehx |
| // CHECK-LE: @llvm.ppc.altivec.lvehx |
| |
| res_vi = vec_lde(0, ¶m_i); |
| // CHECK: @llvm.ppc.altivec.lvewx |
| // CHECK-LE: @llvm.ppc.altivec.lvewx |
| |
| res_vui = vec_lde(0, ¶m_ui); |
| // CHECK: @llvm.ppc.altivec.lvewx |
| // CHECK-LE: @llvm.ppc.altivec.lvewx |
| |
| res_vf = vec_lde(0, ¶m_f); |
| // CHECK: @llvm.ppc.altivec.lvewx |
| // CHECK-LE: @llvm.ppc.altivec.lvewx |
| |
| res_vsc = vec_lvebx(0, ¶m_sc); |
| // CHECK: @llvm.ppc.altivec.lvebx |
| // CHECK-LE: @llvm.ppc.altivec.lvebx |
| |
| res_vuc = vec_lvebx(0, ¶m_uc); |
| // CHECK: @llvm.ppc.altivec.lvebx |
| // CHECK-LE: @llvm.ppc.altivec.lvebx |
| |
| res_vs = vec_lvehx(0, ¶m_s); |
| // CHECK: @llvm.ppc.altivec.lvehx |
| // CHECK-LE: @llvm.ppc.altivec.lvehx |
| |
| res_vus = vec_lvehx(0, ¶m_us); |
| // CHECK: @llvm.ppc.altivec.lvehx |
| // CHECK-LE: @llvm.ppc.altivec.lvehx |
| |
| res_vi = vec_lvewx(0, ¶m_i); |
| // CHECK: @llvm.ppc.altivec.lvewx |
| // CHECK-LE: @llvm.ppc.altivec.lvewx |
| |
| res_vui = vec_lvewx(0, ¶m_ui); |
| // CHECK: @llvm.ppc.altivec.lvewx |
| // CHECK-LE: @llvm.ppc.altivec.lvewx |
| |
| res_vf = vec_lvewx(0, ¶m_f); |
| // CHECK: @llvm.ppc.altivec.lvewx |
| // CHECK-LE: @llvm.ppc.altivec.lvewx |
| |
| /* vec_ldl */ |
| res_vsc = vec_ldl(0, &vsc); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vsc = vec_ldl(0, ¶m_sc); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vuc = vec_ldl(0, &vuc); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vuc = vec_ldl(0, ¶m_uc); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vbc = vec_ldl(0, &vbc); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vs = vec_ldl(0, &vs); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vs = vec_ldl(0, ¶m_s); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vus = vec_ldl(0, &vus); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vus = vec_ldl(0, ¶m_us); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vbs = vec_ldl(0, &vbs); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vp = vec_ldl(0, &vp); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vi = vec_ldl(0, &vi); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vi = vec_ldl(0, ¶m_i); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vui = vec_ldl(0, &vui); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vui = vec_ldl(0, ¶m_ui); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vbi = vec_ldl(0, &vbi); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vf = vec_ldl(0, &vf); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vf = vec_ldl(0, ¶m_f); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vsc = vec_lvxl(0, &vsc); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vsc = vec_lvxl(0, ¶m_sc); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vuc = vec_lvxl(0, &vuc); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vbc = vec_lvxl(0, &vbc); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vuc = vec_lvxl(0, ¶m_uc); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vs = vec_lvxl(0, &vs); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vs = vec_lvxl(0, ¶m_s); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vus = vec_lvxl(0, &vus); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vus = vec_lvxl(0, ¶m_us); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vbs = vec_lvxl(0, &vbs); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vp = vec_lvxl(0, &vp); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vi = vec_lvxl(0, &vi); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vi = vec_lvxl(0, ¶m_i); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vui = vec_lvxl(0, &vui); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vui = vec_lvxl(0, ¶m_ui); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vbi = vec_lvxl(0, &vbi); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vf = vec_lvxl(0, &vf); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| res_vf = vec_lvxl(0, ¶m_f); |
| // CHECK: @llvm.ppc.altivec.lvxl |
| // CHECK-LE: @llvm.ppc.altivec.lvxl |
| |
| /* vec_loge */ |
| res_vf = vec_loge(vf); |
| // CHECK: @llvm.ppc.altivec.vlogefp |
| // CHECK-LE: @llvm.ppc.altivec.vlogefp |
| |
| res_vf = vec_vlogefp(vf); |
| // CHECK: @llvm.ppc.altivec.vlogefp |
| // CHECK-LE: @llvm.ppc.altivec.vlogefp |
| |
| /* vec_lvsl */ |
| res_vuc = vec_lvsl(0, ¶m_i); |
| // CHECK: @llvm.ppc.altivec.lvsl |
| // CHECK-LE: @llvm.ppc.altivec.lvsl |
| |
| /* vec_lvsr */ |
| res_vuc = vec_lvsr(0, ¶m_i); |
| // CHECK: @llvm.ppc.altivec.lvsr |
| // CHECK-LE: @llvm.ppc.altivec.lvsr |
| |
| /* vec_madd */ |
| res_vf =vec_madd(vf, vf, vf); |
| // CHECK: @llvm.ppc.altivec.vmaddfp |
| // CHECK-LE: @llvm.ppc.altivec.vmaddfp |
| |
| res_vf = vec_vmaddfp(vf, vf, vf); |
| // CHECK: @llvm.ppc.altivec.vmaddfp |
| // CHECK-LE: @llvm.ppc.altivec.vmaddfp |
| |
| /* vec_madds */ |
| res_vs = vec_madds(vs, vs, vs); |
| // CHECK: @llvm.ppc.altivec.vmhaddshs |
| // CHECK-LE: @llvm.ppc.altivec.vmhaddshs |
| |
| res_vs = vec_vmhaddshs(vs, vs, vs); |
| // CHECK: @llvm.ppc.altivec.vmhaddshs |
| // CHECK-LE: @llvm.ppc.altivec.vmhaddshs |
| |
| /* vec_max */ |
| res_vsc = vec_max(vsc, vsc); |
| // CHECK: @llvm.ppc.altivec.vmaxsb |
| // CHECK-LE: @llvm.ppc.altivec.vmaxsb |
| |
| res_vsc = vec_max(vbc, vsc); |
| // CHECK: @llvm.ppc.altivec.vmaxsb |
| // CHECK-LE: @llvm.ppc.altivec.vmaxsb |
| |
| res_vsc = vec_max(vsc, vbc); |
| // CHECK: @llvm.ppc.altivec.vmaxsb |
| // CHECK-LE: @llvm.ppc.altivec.vmaxsb |
| |
| res_vuc = vec_max(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vmaxub |
| // CHECK-LE: @llvm.ppc.altivec.vmaxub |
| |
| res_vuc = vec_max(vbc, vuc); |
| // CHECK: @llvm.ppc.altivec.vmaxub |
| // CHECK-LE: @llvm.ppc.altivec.vmaxub |
| |
| res_vuc = vec_max(vuc, vbc); |
| // CHECK: @llvm.ppc.altivec.vmaxub |
| // CHECK-LE: @llvm.ppc.altivec.vmaxub |
| |
| res_vs = vec_max(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vmaxsh |
| // CHECK-LE: @llvm.ppc.altivec.vmaxsh |
| |
| res_vs = vec_max(vbs, vs); |
| // CHECK: @llvm.ppc.altivec.vmaxsh |
| // CHECK-LE: @llvm.ppc.altivec.vmaxsh |
| |
| res_vs = vec_max(vs, vbs); |
| // CHECK: @llvm.ppc.altivec.vmaxsh |
| // CHECK-LE: @llvm.ppc.altivec.vmaxsh |
| |
| res_vus = vec_max(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vmaxuh |
| // CHECK-LE: @llvm.ppc.altivec.vmaxuh |
| |
| res_vus = vec_max(vbs, vus); |
| // CHECK: @llvm.ppc.altivec.vmaxuh |
| // CHECK-LE: @llvm.ppc.altivec.vmaxuh |
| |
| res_vus = vec_max(vus, vbs); |
| // CHECK: @llvm.ppc.altivec.vmaxuh |
| // CHECK-LE: @llvm.ppc.altivec.vmaxuh |
| |
| res_vi = vec_max(vi, vi); |
| // CHECK: @llvm.ppc.altivec.vmaxsw |
| // CHECK-LE: @llvm.ppc.altivec.vmaxsw |
| |
| res_vi = vec_max(vbi, vi); |
| // CHECK: @llvm.ppc.altivec.vmaxsw |
| // CHECK-LE: @llvm.ppc.altivec.vmaxsw |
| |
| res_vi = vec_max(vi, vbi); |
| // CHECK: @llvm.ppc.altivec.vmaxsw |
| // CHECK-LE: @llvm.ppc.altivec.vmaxsw |
| |
| res_vui = vec_max(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vmaxuw |
| // CHECK-LE: @llvm.ppc.altivec.vmaxuw |
| |
| res_vui = vec_max(vbi, vui); |
| // CHECK: @llvm.ppc.altivec.vmaxuw |
| // CHECK-LE: @llvm.ppc.altivec.vmaxuw |
| |
| res_vui = vec_max(vui, vbi); |
| // CHECK: @llvm.ppc.altivec.vmaxuw |
| // CHECK-LE: @llvm.ppc.altivec.vmaxuw |
| |
| res_vf = vec_max(vf, vf); |
| // CHECK: @llvm.ppc.altivec.vmaxfp |
| // CHECK-LE: @llvm.ppc.altivec.vmaxfp |
| |
| res_vsc = vec_vmaxsb(vsc, vsc); |
| // CHECK: @llvm.ppc.altivec.vmaxsb |
| // CHECK-LE: @llvm.ppc.altivec.vmaxsb |
| |
| res_vsc = vec_vmaxsb(vbc, vsc); |
| // CHECK: @llvm.ppc.altivec.vmaxsb |
| // CHECK-LE: @llvm.ppc.altivec.vmaxsb |
| |
| res_vsc = vec_vmaxsb(vsc, vbc); |
| // CHECK: @llvm.ppc.altivec.vmaxsb |
| // CHECK-LE: @llvm.ppc.altivec.vmaxsb |
| |
| res_vuc = vec_vmaxub(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vmaxub |
| // CHECK-LE: @llvm.ppc.altivec.vmaxub |
| |
| res_vuc = vec_vmaxub(vbc, vuc); |
| // CHECK: @llvm.ppc.altivec.vmaxub |
| // CHECK-LE: @llvm.ppc.altivec.vmaxub |
| |
| res_vuc = vec_vmaxub(vuc, vbc); |
| // CHECK: @llvm.ppc.altivec.vmaxub |
| // CHECK-LE: @llvm.ppc.altivec.vmaxub |
| |
| res_vs = vec_vmaxsh(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vmaxsh |
| // CHECK-LE: @llvm.ppc.altivec.vmaxsh |
| |
| res_vs = vec_vmaxsh(vbs, vs); |
| // CHECK: @llvm.ppc.altivec.vmaxsh |
| // CHECK-LE: @llvm.ppc.altivec.vmaxsh |
| |
| res_vs = vec_vmaxsh(vs, vbs); |
| // CHECK: @llvm.ppc.altivec.vmaxsh |
| // CHECK-LE: @llvm.ppc.altivec.vmaxsh |
| |
| res_vus = vec_vmaxuh(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vmaxuh |
| // CHECK-LE: @llvm.ppc.altivec.vmaxuh |
| |
| res_vus = vec_vmaxuh(vbs, vus); |
| // CHECK: @llvm.ppc.altivec.vmaxuh |
| // CHECK-LE: @llvm.ppc.altivec.vmaxuh |
| |
| res_vus = vec_vmaxuh(vus, vbs); |
| // CHECK: @llvm.ppc.altivec.vmaxuh |
| // CHECK-LE: @llvm.ppc.altivec.vmaxuh |
| |
| res_vi = vec_vmaxsw(vi, vi); |
| // CHECK: @llvm.ppc.altivec.vmaxsw |
| // CHECK-LE: @llvm.ppc.altivec.vmaxsw |
| |
| res_vi = vec_vmaxsw(vbi, vi); |
| // CHECK: @llvm.ppc.altivec.vmaxsw |
| // CHECK-LE: @llvm.ppc.altivec.vmaxsw |
| |
| res_vi = vec_vmaxsw(vi, vbi); |
| // CHECK: @llvm.ppc.altivec.vmaxsw |
| // CHECK-LE: @llvm.ppc.altivec.vmaxsw |
| |
| res_vui = vec_vmaxuw(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vmaxuw |
| // CHECK-LE: @llvm.ppc.altivec.vmaxuw |
| |
| res_vui = vec_vmaxuw(vbi, vui); |
| // CHECK: @llvm.ppc.altivec.vmaxuw |
| // CHECK-LE: @llvm.ppc.altivec.vmaxuw |
| |
| res_vui = vec_vmaxuw(vui, vbi); |
| // CHECK: @llvm.ppc.altivec.vmaxuw |
| // CHECK-LE: @llvm.ppc.altivec.vmaxuw |
| |
| res_vf = vec_vmaxfp(vf, vf); |
| // CHECK: @llvm.ppc.altivec.vmaxfp |
| // CHECK-LE: @llvm.ppc.altivec.vmaxfp |
| |
| /* vec_mergeh */ |
| res_vsc = vec_mergeh(vsc, vsc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vuc = vec_mergeh(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vbc = vec_mergeh(vbc, vbc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vs = vec_mergeh(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vp = vec_mergeh(vp, vp); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vus = vec_mergeh(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vbs = vec_mergeh(vbs, vbs); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vi = vec_mergeh(vi, vi); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vui = vec_mergeh(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vbi = vec_mergeh(vbi, vbi); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vf = vec_mergeh(vf, vf); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vsc = vec_vmrghb(vsc, vsc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vuc = vec_vmrghb(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vbc = vec_vmrghb(vbc, vbc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vs = vec_vmrghh(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vp = vec_vmrghh(vp, vp); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vus = vec_vmrghh(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vbs = vec_vmrghh(vbs, vbs); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vi = vec_vmrghw(vi, vi); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vui = vec_vmrghw(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vbi = vec_vmrghw(vbi, vbi); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vf = vec_vmrghw(vf, vf); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| /* vec_mergel */ |
| res_vsc = vec_mergel(vsc, vsc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vuc = vec_mergel(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vbc = vec_mergel(vbc, vbc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vs = vec_mergel(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vp = vec_mergeh(vp, vp); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vus = vec_mergel(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vbs = vec_mergel(vbs, vbs); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vi = vec_mergel(vi, vi); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vui = vec_mergel(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vbi = vec_mergel(vbi, vbi); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vf = vec_mergel(vf, vf); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vsc = vec_vmrglb(vsc, vsc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vuc = vec_vmrglb(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vbc = vec_vmrglb(vbc, vbc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vs = vec_vmrglh(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vp = vec_vmrglh(vp, vp); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vus = vec_vmrglh(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vbs = vec_vmrglh(vbs, vbs); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vi = vec_vmrglw(vi, vi); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vui = vec_vmrglw(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vbi = vec_vmrglw(vbi, vbi); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vf = vec_vmrglw(vf, vf); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| /* vec_mfvscr */ |
| vus = vec_mfvscr(); |
| // CHECK: @llvm.ppc.altivec.mfvscr |
| // CHECK-LE: @llvm.ppc.altivec.mfvscr |
| |
| /* vec_min */ |
| res_vsc = vec_min(vsc, vsc); |
| // CHECK: @llvm.ppc.altivec.vminsb |
| // CHECK-LE: @llvm.ppc.altivec.vminsb |
| |
| res_vsc = vec_min(vbc, vsc); |
| // CHECK: @llvm.ppc.altivec.vminsb |
| // CHECK-LE: @llvm.ppc.altivec.vminsb |
| |
| res_vsc = vec_min(vsc, vbc); |
| // CHECK: @llvm.ppc.altivec.vminsb |
| // CHECK-LE: @llvm.ppc.altivec.vminsb |
| |
| res_vuc = vec_min(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vminub |
| // CHECK-LE: @llvm.ppc.altivec.vminub |
| |
| res_vuc = vec_min(vbc, vuc); |
| // CHECK: @llvm.ppc.altivec.vminub |
| // CHECK-LE: @llvm.ppc.altivec.vminub |
| |
| res_vuc = vec_min(vuc, vbc); |
| // CHECK: @llvm.ppc.altivec.vminub |
| // CHECK-LE: @llvm.ppc.altivec.vminub |
| |
| res_vs = vec_min(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vminsh |
| // CHECK-LE: @llvm.ppc.altivec.vminsh |
| |
| res_vs = vec_min(vbs, vs); |
| // CHECK: @llvm.ppc.altivec.vminsh |
| // CHECK-LE: @llvm.ppc.altivec.vminsh |
| |
| res_vs = vec_min(vs, vbs); |
| // CHECK: @llvm.ppc.altivec.vminsh |
| // CHECK-LE: @llvm.ppc.altivec.vminsh |
| |
| res_vus = vec_min(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vminuh |
| // CHECK-LE: @llvm.ppc.altivec.vminuh |
| |
| res_vus = vec_min(vbs, vus); |
| // CHECK: @llvm.ppc.altivec.vminuh |
| // CHECK-LE: @llvm.ppc.altivec.vminuh |
| |
| res_vus = vec_min(vus, vbs); |
| // CHECK: @llvm.ppc.altivec.vminuh |
| // CHECK-LE: @llvm.ppc.altivec.vminuh |
| |
| res_vi = vec_min(vi, vi); |
| // CHECK: @llvm.ppc.altivec.vminsw |
| // CHECK-LE: @llvm.ppc.altivec.vminsw |
| |
| res_vi = vec_min(vbi, vi); |
| // CHECK: @llvm.ppc.altivec.vminsw |
| // CHECK-LE: @llvm.ppc.altivec.vminsw |
| |
| res_vi = vec_min(vi, vbi); |
| // CHECK: @llvm.ppc.altivec.vminsw |
| // CHECK-LE: @llvm.ppc.altivec.vminsw |
| |
| res_vui = vec_min(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vminuw |
| // CHECK-LE: @llvm.ppc.altivec.vminuw |
| |
| res_vui = vec_min(vbi, vui); |
| // CHECK: @llvm.ppc.altivec.vminuw |
| // CHECK-LE: @llvm.ppc.altivec.vminuw |
| |
| res_vui = vec_min(vui, vbi); |
| // CHECK: @llvm.ppc.altivec.vminuw |
| // CHECK-LE: @llvm.ppc.altivec.vminuw |
| |
| res_vf = vec_min(vf, vf); |
| // CHECK: @llvm.ppc.altivec.vminfp |
| // CHECK-LE: @llvm.ppc.altivec.vminfp |
| |
| res_vsc = vec_vminsb(vsc, vsc); |
| // CHECK: @llvm.ppc.altivec.vminsb |
| // CHECK-LE: @llvm.ppc.altivec.vminsb |
| |
| res_vsc = vec_vminsb(vbc, vsc); |
| // CHECK: @llvm.ppc.altivec.vminsb |
| // CHECK-LE: @llvm.ppc.altivec.vminsb |
| |
| res_vsc = vec_vminsb(vsc, vbc); |
| // CHECK: @llvm.ppc.altivec.vminsb |
| // CHECK-LE: @llvm.ppc.altivec.vminsb |
| |
| res_vuc = vec_vminub(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vminub |
| // CHECK-LE: @llvm.ppc.altivec.vminub |
| |
| res_vuc = vec_vminub(vbc, vuc); |
| // CHECK: @llvm.ppc.altivec.vminub |
| // CHECK-LE: @llvm.ppc.altivec.vminub |
| |
| res_vuc = vec_vminub(vuc, vbc); |
| // CHECK: @llvm.ppc.altivec.vminub |
| // CHECK-LE: @llvm.ppc.altivec.vminub |
| |
| res_vs = vec_vminsh(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vminsh |
| // CHECK-LE: @llvm.ppc.altivec.vminsh |
| |
| res_vs = vec_vminsh(vbs, vs); |
| // CHECK: @llvm.ppc.altivec.vminsh |
| // CHECK-LE: @llvm.ppc.altivec.vminsh |
| |
| res_vs = vec_vminsh(vs, vbs); |
| // CHECK: @llvm.ppc.altivec.vminsh |
| // CHECK-LE: @llvm.ppc.altivec.vminsh |
| |
| res_vus = vec_vminuh(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vminuh |
| // CHECK-LE: @llvm.ppc.altivec.vminuh |
| |
| res_vus = vec_vminuh(vbs, vus); |
| // CHECK: @llvm.ppc.altivec.vminuh |
| // CHECK-LE: @llvm.ppc.altivec.vminuh |
| |
| res_vus = vec_vminuh(vus, vbs); |
| // CHECK: @llvm.ppc.altivec.vminuh |
| // CHECK-LE: @llvm.ppc.altivec.vminuh |
| |
| res_vi = vec_vminsw(vi, vi); |
| // CHECK: @llvm.ppc.altivec.vminsw |
| // CHECK-LE: @llvm.ppc.altivec.vminsw |
| |
| res_vi = vec_vminsw(vbi, vi); |
| // CHECK: @llvm.ppc.altivec.vminsw |
| // CHECK-LE: @llvm.ppc.altivec.vminsw |
| |
| res_vi = vec_vminsw(vi, vbi); |
| // CHECK: @llvm.ppc.altivec.vminsw |
| // CHECK-LE: @llvm.ppc.altivec.vminsw |
| |
| res_vui = vec_vminuw(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vminuw |
| // CHECK-LE: @llvm.ppc.altivec.vminuw |
| |
| res_vui = vec_vminuw(vbi, vui); |
| // CHECK: @llvm.ppc.altivec.vminuw |
| // CHECK-LE: @llvm.ppc.altivec.vminuw |
| |
| res_vui = vec_vminuw(vui, vbi); |
| // CHECK: @llvm.ppc.altivec.vminuw |
| // CHECK-LE: @llvm.ppc.altivec.vminuw |
| |
| res_vf = vec_vminfp(vf, vf); |
| // CHECK: @llvm.ppc.altivec.vminfp |
| // CHECK-LE: @llvm.ppc.altivec.vminfp |
| |
| /* vec_mladd */ |
| res_vus = vec_mladd(vus, vus, vus); |
| // CHECK: mul <8 x i16> |
| // CHECK: add <8 x i16> |
| // CHECK-LE: mul <8 x i16> |
| // CHECK-LE: add <8 x i16> |
| |
| res_vs = vec_mladd(vus, vs, vs); |
| // CHECK: mul <8 x i16> |
| // CHECK: add <8 x i16> |
| // CHECK-LE: mul <8 x i16> |
| // CHECK-LE: add <8 x i16> |
| |
| res_vs = vec_mladd(vs, vus, vus); |
| // CHECK: mul <8 x i16> |
| // CHECK: add <8 x i16> |
| // CHECK-LE: mul <8 x i16> |
| // CHECK-LE: add <8 x i16> |
| |
| res_vs = vec_mladd(vs, vs, vs); |
| // CHECK: mul <8 x i16> |
| // CHECK: add <8 x i16> |
| // CHECK-LE: mul <8 x i16> |
| // CHECK-LE: add <8 x i16> |
| |
| /* vec_mradds */ |
| res_vs = vec_mradds(vs, vs, vs); |
| // CHECK: @llvm.ppc.altivec.vmhraddshs |
| // CHECK-LE: @llvm.ppc.altivec.vmhraddshs |
| |
| res_vs = vec_vmhraddshs(vs, vs, vs); |
| // CHECK: @llvm.ppc.altivec.vmhraddshs |
| // CHECK-LE: @llvm.ppc.altivec.vmhraddshs |
| |
| /* vec_msum */ |
| res_vi = vec_msum(vsc, vuc, vi); |
| // CHECK: @llvm.ppc.altivec.vmsummbm |
| // CHECK-LE: @llvm.ppc.altivec.vmsummbm |
| |
| res_vui = vec_msum(vuc, vuc, vui); |
| // CHECK: @llvm.ppc.altivec.vmsumubm |
| // CHECK-LE: @llvm.ppc.altivec.vmsumubm |
| |
| res_vi = vec_msum(vs, vs, vi); |
| // CHECK: @llvm.ppc.altivec.vmsumshm |
| // CHECK-LE: @llvm.ppc.altivec.vmsumshm |
| |
| res_vui = vec_msum(vus, vus, vui); |
| // CHECK: @llvm.ppc.altivec.vmsumuhm |
| // CHECK-LE: @llvm.ppc.altivec.vmsumuhm |
| |
| res_vi = vec_vmsummbm(vsc, vuc, vi); |
| // CHECK: @llvm.ppc.altivec.vmsummbm |
| // CHECK-LE: @llvm.ppc.altivec.vmsummbm |
| |
| res_vui = vec_vmsumubm(vuc, vuc, vui); |
| // CHECK: @llvm.ppc.altivec.vmsumubm |
| // CHECK-LE: @llvm.ppc.altivec.vmsumubm |
| |
| res_vi = vec_vmsumshm(vs, vs, vi); |
| // CHECK: @llvm.ppc.altivec.vmsumshm |
| // CHECK-LE: @llvm.ppc.altivec.vmsumshm |
| |
| res_vui = vec_vmsumuhm(vus, vus, vui); |
| // CHECK: @llvm.ppc.altivec.vmsumuhm |
| // CHECK-LE: @llvm.ppc.altivec.vmsumuhm |
| |
| /* vec_msums */ |
| res_vi = vec_msums(vs, vs, vi); |
| // CHECK: @llvm.ppc.altivec.vmsumshs |
| // CHECK-LE: @llvm.ppc.altivec.vmsumshs |
| |
| res_vui = vec_msums(vus, vus, vui); |
| // CHECK: @llvm.ppc.altivec.vmsumuhs |
| // CHECK-LE: @llvm.ppc.altivec.vmsumuhs |
| |
| res_vi = vec_vmsumshs(vs, vs, vi); |
| // CHECK: @llvm.ppc.altivec.vmsumshs |
| // CHECK-LE: @llvm.ppc.altivec.vmsumshs |
| |
| res_vui = vec_vmsumuhs(vus, vus, vui); |
| // CHECK: @llvm.ppc.altivec.vmsumuhs |
| // CHECK-LE: @llvm.ppc.altivec.vmsumuhs |
| |
| /* vec_mtvscr */ |
| vec_mtvscr(vsc); |
| // CHECK: @llvm.ppc.altivec.mtvscr |
| // CHECK-LE: @llvm.ppc.altivec.mtvscr |
| |
| vec_mtvscr(vuc); |
| // CHECK: @llvm.ppc.altivec.mtvscr |
| // CHECK-LE: @llvm.ppc.altivec.mtvscr |
| |
| vec_mtvscr(vbc); |
| // CHECK: @llvm.ppc.altivec.mtvscr |
| // CHECK-LE: @llvm.ppc.altivec.mtvscr |
| |
| vec_mtvscr(vs); |
| // CHECK: @llvm.ppc.altivec.mtvscr |
| // CHECK-LE: @llvm.ppc.altivec.mtvscr |
| |
| vec_mtvscr(vus); |
| // CHECK: @llvm.ppc.altivec.mtvscr |
| // CHECK-LE: @llvm.ppc.altivec.mtvscr |
| |
| vec_mtvscr(vbs); |
| // CHECK: @llvm.ppc.altivec.mtvscr |
| // CHECK-LE: @llvm.ppc.altivec.mtvscr |
| |
| vec_mtvscr(vp); |
| // CHECK: @llvm.ppc.altivec.mtvscr |
| // CHECK-LE: @llvm.ppc.altivec.mtvscr |
| |
| vec_mtvscr(vi); |
| // CHECK: @llvm.ppc.altivec.mtvscr |
| // CHECK-LE: @llvm.ppc.altivec.mtvscr |
| |
| vec_mtvscr(vui); |
| // CHECK: @llvm.ppc.altivec.mtvscr |
| // CHECK-LE: @llvm.ppc.altivec.mtvscr |
| |
| vec_mtvscr(vbi); |
| // CHECK: @llvm.ppc.altivec.mtvscr |
| // CHECK-LE: @llvm.ppc.altivec.mtvscr |
| |
| /* vec_mul */ |
| res_vsc = vec_mul(vsc, vsc); |
| // CHECK: mul <16 x i8> |
| // CHECK-LE: mul <16 x i8> |
| |
| res_vuc = vec_mul(vuc, vuc); |
| // CHECK: mul <16 x i8> |
| // CHECK-LE: mul <16 x i8> |
| |
| res_vs = vec_mul(vs, vs); |
| // CHECK: mul <8 x i16> |
| // CHECK-LE: mul <8 x i16> |
| |
| res_vus = vec_mul(vus, vus); |
| // CHECK: mul <8 x i16> |
| // CHECK-LE: mul <8 x i16> |
| |
| res_vi = vec_mul(vi, vi); |
| // CHECK: mul <4 x i32> |
| // CHECK-LE: mul <4 x i32> |
| |
| res_vui = vec_mul(vui, vui); |
| // CHECK: mul <4 x i32> |
| // CHECK-LE: mul <4 x i32> |
| |
| /* vec_mule */ |
| res_vs = vec_mule(vsc, vsc); |
| // CHECK: @llvm.ppc.altivec.vmulesb |
| // CHECK-LE: @llvm.ppc.altivec.vmulosb |
| |
| res_vus = vec_mule(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vmuleub |
| // CHECK-LE: @llvm.ppc.altivec.vmuloub |
| |
| res_vi = vec_mule(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vmulesh |
| // CHECK-LE: @llvm.ppc.altivec.vmulosh |
| |
| res_vui = vec_mule(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vmuleuh |
| // CHECK-LE: @llvm.ppc.altivec.vmulouh |
| |
| res_vs = vec_vmulesb(vsc, vsc); |
| // CHECK: @llvm.ppc.altivec.vmulesb |
| // CHECK-LE: @llvm.ppc.altivec.vmulosb |
| |
| res_vus = vec_vmuleub(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vmuleub |
| // CHECK-LE: @llvm.ppc.altivec.vmuloub |
| |
| res_vi = vec_vmulesh(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vmulesh |
| // CHECK-LE: @llvm.ppc.altivec.vmulosh |
| |
| res_vui = vec_vmuleuh(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vmuleuh |
| // CHECK-LE: @llvm.ppc.altivec.vmulouh |
| |
| /* vec_mulo */ |
| res_vs = vec_mulo(vsc, vsc); |
| // CHECK: @llvm.ppc.altivec.vmulosb |
| // CHECK-LE: @llvm.ppc.altivec.vmulesb |
| |
| res_vus = vec_mulo(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vmuloub |
| // CHECK-LE: @llvm.ppc.altivec.vmuleub |
| |
| res_vi = vec_mulo(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vmulosh |
| // CHECK-LE: @llvm.ppc.altivec.vmulesh |
| |
| res_vui = vec_mulo(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vmulouh |
| // CHECK-LE: @llvm.ppc.altivec.vmuleuh |
| |
| res_vs = vec_vmulosb(vsc, vsc); |
| // CHECK: @llvm.ppc.altivec.vmulosb |
| // CHECK-LE: @llvm.ppc.altivec.vmulesb |
| |
| res_vus = vec_vmuloub(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vmuloub |
| // CHECK-LE: @llvm.ppc.altivec.vmuleub |
| |
| res_vi = vec_vmulosh(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vmulosh |
| // CHECK-LE: @llvm.ppc.altivec.vmulesh |
| |
| res_vui = vec_vmulouh(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vmulouh |
| // CHECK-LE: @llvm.ppc.altivec.vmuleuh |
| |
| /* vec_nmsub */ |
| res_vf = vec_nmsub(vf, vf, vf); |
| // CHECK: @llvm.ppc.altivec.vnmsubfp |
| // CHECK-LE: @llvm.ppc.altivec.vnmsubfp |
| |
| res_vf = vec_vnmsubfp(vf, vf, vf); |
| // CHECK: @llvm.ppc.altivec.vnmsubfp |
| // CHECK-LE: @llvm.ppc.altivec.vnmsubfp |
| |
| /* vec_nor */ |
| res_vsc = vec_nor(vsc, vsc); |
| // CHECK: or <16 x i8> |
| // CHECK: xor <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| |
| res_vuc = vec_nor(vuc, vuc); |
| // CHECK: or <16 x i8> |
| // CHECK: xor <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| |
| res_vuc = vec_nor(vbc, vbc); |
| // CHECK: or <16 x i8> |
| // CHECK: xor <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| |
| res_vs = vec_nor(vs, vs); |
| // CHECK: or <8 x i16> |
| // CHECK: xor <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| |
| res_vus = vec_nor(vus, vus); |
| // CHECK: or <8 x i16> |
| // CHECK: xor <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| |
| res_vus = vec_nor(vbs, vbs); |
| // CHECK: or <8 x i16> |
| // CHECK: xor <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| |
| res_vi = vec_nor(vi, vi); |
| // CHECK: or <4 x i32> |
| // CHECK: xor <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| |
| res_vui = vec_nor(vui, vui); |
| // CHECK: or <4 x i32> |
| // CHECK: xor <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| |
| res_vui = vec_nor(vbi, vbi); |
| // CHECK: or <4 x i32> |
| // CHECK: xor <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| |
| res_vf = vec_nor(vf, vf); |
| // CHECK: or <4 x i32> |
| // CHECK: xor <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| |
| res_vsc = vec_vnor(vsc, vsc); |
| // CHECK: or <16 x i8> |
| // CHECK: xor <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| |
| res_vuc = vec_vnor(vuc, vuc); |
| // CHECK: or <16 x i8> |
| // CHECK: xor <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| |
| res_vuc = vec_vnor(vbc, vbc); |
| // CHECK: or <16 x i8> |
| // CHECK: xor <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| |
| res_vs = vec_vnor(vs, vs); |
| // CHECK: or <8 x i16> |
| // CHECK: xor <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| |
| res_vus = vec_vnor(vus, vus); |
| // CHECK: or <8 x i16> |
| // CHECK: xor <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| |
| res_vus = vec_vnor(vbs, vbs); |
| // CHECK: or <8 x i16> |
| // CHECK: xor <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| |
| res_vi = vec_vnor(vi, vi); |
| // CHECK: or <4 x i32> |
| // CHECK: xor <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| |
| res_vui = vec_vnor(vui, vui); |
| // CHECK: or <4 x i32> |
| // CHECK: xor <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| |
| res_vui = vec_vnor(vbi, vbi); |
| // CHECK: or <4 x i32> |
| // CHECK: xor <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| |
| res_vf = vec_vnor(vf, vf); |
| // CHECK: or <4 x i32> |
| // CHECK: xor <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| |
| /* vec_or */ |
| res_vsc = vec_or(vsc, vsc); |
| // CHECK: or <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vsc = vec_or(vbc, vsc); |
| // CHECK: or <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vsc = vec_or(vsc, vbc); |
| // CHECK: or <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vuc = vec_or(vuc, vuc); |
| // CHECK: or <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vuc = vec_or(vbc, vuc); |
| // CHECK: or <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vuc = vec_or(vuc, vbc); |
| // CHECK: or <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vbc = vec_or(vbc, vbc); |
| // CHECK: or <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vs = vec_or(vs, vs); |
| // CHECK: or <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vs = vec_or(vbs, vs); |
| // CHECK: or <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vs = vec_or(vs, vbs); |
| // CHECK: or <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vus = vec_or(vus, vus); |
| // CHECK: or <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vus = vec_or(vbs, vus); |
| // CHECK: or <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vus = vec_or(vus, vbs); |
| // CHECK: or <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vbs = vec_or(vbs, vbs); |
| // CHECK: or <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vi = vec_or(vi, vi); |
| // CHECK: or <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vi = vec_or(vbi, vi); |
| // CHECK: or <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vi = vec_or(vi, vbi); |
| // CHECK: or <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vui = vec_or(vui, vui); |
| // CHECK: or <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vui = vec_or(vbi, vui); |
| // CHECK: or <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vui = vec_or(vui, vbi); |
| // CHECK: or <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vbi = vec_or(vbi, vbi); |
| // CHECK: or <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vf = vec_or(vf, vf); |
| // CHECK: or <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vf = vec_or(vbi, vf); |
| // CHECK: or <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vf = vec_or(vf, vbi); |
| // CHECK: or <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vsc = vec_vor(vsc, vsc); |
| // CHECK: or <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vsc = vec_vor(vbc, vsc); |
| // CHECK: or <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vsc = vec_vor(vsc, vbc); |
| // CHECK: or <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vuc = vec_vor(vuc, vuc); |
| // CHECK: or <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vuc = vec_vor(vbc, vuc); |
| // CHECK: or <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vuc = vec_vor(vuc, vbc); |
| // CHECK: or <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vbc = vec_vor(vbc, vbc); |
| // CHECK: or <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vs = vec_vor(vs, vs); |
| // CHECK: or <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vs = vec_vor(vbs, vs); |
| // CHECK: or <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vs = vec_vor(vs, vbs); |
| // CHECK: or <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vus = vec_vor(vus, vus); |
| // CHECK: or <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vus = vec_vor(vbs, vus); |
| // CHECK: or <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vus = vec_vor(vus, vbs); |
| // CHECK: or <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vbs = vec_vor(vbs, vbs); |
| // CHECK: or <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vi = vec_vor(vi, vi); |
| // CHECK: or <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vi = vec_vor(vbi, vi); |
| // CHECK: or <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vi = vec_vor(vi, vbi); |
| // CHECK: or <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vui = vec_vor(vui, vui); |
| // CHECK: or <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vui = vec_vor(vbi, vui); |
| // CHECK: or <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vui = vec_vor(vui, vbi); |
| // CHECK: or <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vbi = vec_vor(vbi, vbi); |
| // CHECK: or <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vf = vec_vor(vf, vf); |
| // CHECK: or <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vf = vec_vor(vbi, vf); |
| // CHECK: or <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vf = vec_vor(vf, vbi); |
| // CHECK: or <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| /* vec_pack */ |
| res_vsc = vec_pack(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vuc = vec_pack(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vbc = vec_pack(vbs, vbs); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vs = vec_pack(vi, vi); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vus = vec_pack(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vbs = vec_pack(vbi, vbi); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vsc = vec_vpkuhum(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vuc = vec_vpkuhum(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vbc = vec_vpkuhum(vbs, vbs); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vs = vec_vpkuwum(vi, vi); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vus = vec_vpkuwum(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vbs = vec_vpkuwum(vbi, vbi); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| /* vec_packpx */ |
| res_vp = vec_packpx(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vpkpx |
| // CHECK-LE: @llvm.ppc.altivec.vpkpx |
| |
| res_vp = vec_vpkpx(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vpkpx |
| // CHECK-LE: @llvm.ppc.altivec.vpkpx |
| |
| /* vec_packs */ |
| res_vsc = vec_packs(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vpkshss |
| // CHECK-LE: @llvm.ppc.altivec.vpkshss |
| |
| res_vuc = vec_packs(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vpkuhus |
| // CHECK-LE: @llvm.ppc.altivec.vpkuhus |
| |
| res_vs = vec_packs(vi, vi); |
| // CHECK: @llvm.ppc.altivec.vpkswss |
| // CHECK-LE: @llvm.ppc.altivec.vpkswss |
| |
| res_vus = vec_packs(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vpkuwus |
| // CHECK-LE: @llvm.ppc.altivec.vpkuwus |
| |
| res_vsc = vec_vpkshss(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vpkshss |
| // CHECK-LE: @llvm.ppc.altivec.vpkshss |
| |
| res_vuc = vec_vpkuhus(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vpkuhus |
| // CHECK-LE: @llvm.ppc.altivec.vpkuhus |
| |
| res_vs = vec_vpkswss(vi, vi); |
| // CHECK: @llvm.ppc.altivec.vpkswss |
| // CHECK-LE: @llvm.ppc.altivec.vpkswss |
| |
| res_vus = vec_vpkuwus(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vpkuwus |
| // CHECK-LE: @llvm.ppc.altivec.vpkuwus |
| |
| /* vec_packsu */ |
| res_vuc = vec_packsu(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vpkshus |
| // CHECK-LE: @llvm.ppc.altivec.vpkshus |
| |
| res_vuc = vec_packsu(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vpkuhus |
| // CHECK-LE: @llvm.ppc.altivec.vpkuhus |
| |
| res_vus = vec_packsu(vi, vi); |
| // CHECK: @llvm.ppc.altivec.vpkswus |
| // CHECK-LE: @llvm.ppc.altivec.vpkswus |
| |
| res_vus = vec_packsu(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vpkuwus |
| // CHECK-LE: @llvm.ppc.altivec.vpkuwus |
| |
| res_vuc = vec_vpkshus(vs, vs); |
| // CHECK: @llvm.ppc.altivec.vpkshus |
| // CHECK-LE: @llvm.ppc.altivec.vpkshus |
| |
| res_vuc = vec_vpkshus(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vpkuhus |
| // CHECK-LE: @llvm.ppc.altivec.vpkuhus |
| |
| res_vus = vec_vpkswus(vi, vi); |
| // CHECK: @llvm.ppc.altivec.vpkswus |
| // CHECK-LE: @llvm.ppc.altivec.vpkswus |
| |
| res_vus = vec_vpkswus(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vpkuwus |
| // CHECK-LE: @llvm.ppc.altivec.vpkuwus |
| |
| /* vec_perm */ |
| res_vsc = vec_perm(vsc, vsc, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vuc = vec_perm(vuc, vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vbc = vec_perm(vbc, vbc, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vs = vec_perm(vs, vs, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vus = vec_perm(vus, vus, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vbs = vec_perm(vbs, vbs, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vp = vec_perm(vp, vp, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vi = vec_perm(vi, vi, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vui = vec_perm(vui, vui, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vbi = vec_perm(vbi, vbi, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vf = vec_perm(vf, vf, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vsc = vec_vperm(vsc, vsc, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vuc = vec_vperm(vuc, vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vbc = vec_vperm(vbc, vbc, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vs = vec_vperm(vs, vs, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vus = vec_vperm(vus, vus, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vbs = vec_vperm(vbs, vbs, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vp = vec_vperm(vp, vp, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vi = vec_vperm(vi, vi, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vui = vec_vperm(vui, vui, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vbi = vec_vperm(vbi, vbi, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vf = vec_vperm(vf, vf, vuc); |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| /* vec_re */ |
| res_vf = vec_re(vf); |
| // CHECK: @llvm.ppc.altivec.vrefp |
| // CHECK-LE: @llvm.ppc.altivec.vrefp |
| |
| res_vf = vec_vrefp(vf); |
| // CHECK: @llvm.ppc.altivec.vrefp |
| // CHECK-LE: @llvm.ppc.altivec.vrefp |
| |
| /* vec_rl */ |
| res_vsc = vec_rl(vsc, vuc); |
| // CHECK: @llvm.ppc.altivec.vrlb |
| // CHECK-LE: @llvm.ppc.altivec.vrlb |
| |
| res_vuc = vec_rl(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vrlb |
| // CHECK-LE: @llvm.ppc.altivec.vrlb |
| |
| res_vs = vec_rl(vs, vus); |
| // CHECK: @llvm.ppc.altivec.vrlh |
| // CHECK-LE: @llvm.ppc.altivec.vrlh |
| |
| res_vus = vec_rl(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vrlh |
| // CHECK-LE: @llvm.ppc.altivec.vrlh |
| |
| res_vi = vec_rl(vi, vui); |
| // CHECK: @llvm.ppc.altivec.vrlw |
| // CHECK-LE: @llvm.ppc.altivec.vrlw |
| |
| res_vui = vec_rl(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vrlw |
| // CHECK-LE: @llvm.ppc.altivec.vrlw |
| |
| res_vsc = vec_vrlb(vsc, vuc); |
| // CHECK: @llvm.ppc.altivec.vrlb |
| // CHECK-LE: @llvm.ppc.altivec.vrlb |
| |
| res_vuc = vec_vrlb(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vrlb |
| // CHECK-LE: @llvm.ppc.altivec.vrlb |
| |
| res_vs = vec_vrlh(vs, vus); |
| // CHECK: @llvm.ppc.altivec.vrlh |
| // CHECK-LE: @llvm.ppc.altivec.vrlh |
| |
| res_vus = vec_vrlh(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vrlh |
| // CHECK-LE: @llvm.ppc.altivec.vrlh |
| |
| res_vi = vec_vrlw(vi, vui); |
| // CHECK: @llvm.ppc.altivec.vrlw |
| // CHECK-LE: @llvm.ppc.altivec.vrlw |
| |
| res_vui = vec_vrlw(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vrlw |
| // CHECK-LE: @llvm.ppc.altivec.vrlw |
| |
| /* vec_round */ |
| res_vf = vec_round(vf); |
| // CHECK: @llvm.ppc.altivec.vrfin |
| // CHECK-LE: @llvm.ppc.altivec.vrfin |
| |
| res_vf = vec_vrfin(vf); |
| // CHECK: @llvm.ppc.altivec.vrfin |
| // CHECK-LE: @llvm.ppc.altivec.vrfin |
| |
| /* vec_rsqrte */ |
| res_vf = vec_rsqrte(vf); |
| // CHECK: @llvm.ppc.altivec.vrsqrtefp |
| // CHECK-LE: @llvm.ppc.altivec.vrsqrtefp |
| |
| res_vf = vec_vrsqrtefp(vf); |
| // CHECK: @llvm.ppc.altivec.vrsqrtefp |
| // CHECK-LE: @llvm.ppc.altivec.vrsqrtefp |
| |
| /* vec_sel */ |
| res_vsc = vec_sel(vsc, vsc, vuc); |
| // CHECK: xor <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK: or <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vsc = vec_sel(vsc, vsc, vbc); |
| // CHECK: xor <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK: or <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vuc = vec_sel(vuc, vuc, vuc); |
| // CHECK: xor <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK: or <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vuc = vec_sel(vuc, vuc, vbc); |
| // CHECK: xor <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK: or <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vbc = vec_sel(vbc, vbc, vuc); |
| // CHECK: xor <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK: or <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vbc = vec_sel(vbc, vbc, vbc); |
| // CHECK: xor <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK: or <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vs = vec_sel(vs, vs, vus); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK: or <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vs = vec_sel(vs, vs, vbs); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK: or <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vus = vec_sel(vus, vus, vus); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK: or <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vus = vec_sel(vus, vus, vbs); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK: or <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vbs = vec_sel(vbs, vbs, vus); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK: or <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vbs = vec_sel(vbs, vbs, vbs); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK: or <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vi = vec_sel(vi, vi, vui); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: or <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vi = vec_sel(vi, vi, vbi); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: or <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vui = vec_sel(vui, vui, vui); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: or <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vui = vec_sel(vui, vui, vbi); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: or <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vbi = vec_sel(vbi, vbi, vui); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: or <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vbi = vec_sel(vbi, vbi, vbi); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: or <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vf = vec_sel(vf, vf, vui); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: or <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vf = vec_sel(vf, vf, vbi); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: or <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vsc = vec_vsel(vsc, vsc, vuc); |
| // CHECK: xor <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK: or <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vsc = vec_vsel(vsc, vsc, vbc); |
| // CHECK: xor <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK: or <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vuc = vec_vsel(vuc, vuc, vuc); |
| // CHECK: xor <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK: or <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vuc = vec_vsel(vuc, vuc, vbc); |
| // CHECK: xor <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK: or <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vbc = vec_vsel(vbc, vbc, vuc); |
| // CHECK: xor <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK: or <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vbc = vec_vsel(vbc, vbc, vbc); |
| // CHECK: xor <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK: and <16 x i8> |
| // CHECK: or <16 x i8> |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| // CHECK-LE: and <16 x i8> |
| // CHECK-LE: or <16 x i8> |
| |
| res_vs = vec_vsel(vs, vs, vus); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK: or <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vs = vec_vsel(vs, vs, vbs); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK: or <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vus = vec_vsel(vus, vus, vus); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK: or <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vus = vec_vsel(vus, vus, vbs); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK: or <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vbs = vec_vsel(vbs, vbs, vus); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK: or <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vbs = vec_vsel(vbs, vbs, vbs); |
| // CHECK: xor <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK: and <8 x i16> |
| // CHECK: or <8 x i16> |
| // CHECK-LE: xor <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| // CHECK-LE: and <8 x i16> |
| // CHECK-LE: or <8 x i16> |
| |
| res_vi = vec_vsel(vi, vi, vui); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: or <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vi = vec_vsel(vi, vi, vbi); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: or <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vui = vec_vsel(vui, vui, vui); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: or <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vui = vec_vsel(vui, vui, vbi); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: or <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vbi = vec_vsel(vbi, vbi, vui); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: or <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vbi = vec_vsel(vbi, vbi, vbi); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: or <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vf = vec_vsel(vf, vf, vui); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: or <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| res_vf = vec_vsel(vf, vf, vbi); |
| // CHECK: xor <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: and <4 x i32> |
| // CHECK: or <4 x i32> |
| // CHECK-LE: xor <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: and <4 x i32> |
| // CHECK-LE: or <4 x i32> |
| |
| /* vec_sl */ |
| res_vsc = vec_sl(vsc, vuc); |
| // CHECK: shl <16 x i8> |
| // CHECK-LE: shl <16 x i8> |
| |
| res_vuc = vec_sl(vuc, vuc); |
| // CHECK: shl <16 x i8> |
| // CHECK-LE: shl <16 x i8> |
| |
| res_vs = vec_sl(vs, vus); |
| // CHECK: shl <8 x i16> |
| // CHECK-LE: shl <8 x i16> |
| |
| res_vus = vec_sl(vus, vus); |
| // CHECK: shl <8 x i16> |
| // CHECK-LE: shl <8 x i16> |
| |
| res_vi = vec_sl(vi, vui); |
| // CHECK: shl <4 x i32> |
| // CHECK-LE: shl <4 x i32> |
| |
| res_vui = vec_sl(vui, vui); |
| // CHECK: shl <4 x i32> |
| // CHECK-LE: shl <4 x i32> |
| |
| res_vsc = vec_vslb(vsc, vuc); |
| // CHECK: shl <16 x i8> |
| // CHECK-LE: shl <16 x i8> |
| |
| res_vuc = vec_vslb(vuc, vuc); |
| // CHECK: shl <16 x i8> |
| // CHECK-LE: shl <16 x i8> |
| |
| res_vs = vec_vslh(vs, vus); |
| // CHECK: shl <8 x i16> |
| // CHECK-LE: shl <8 x i16> |
| |
| res_vus = vec_vslh(vus, vus); |
| // CHECK: shl <8 x i16> |
| // CHECK-LE: shl <8 x i16> |
| |
| res_vi = vec_vslw(vi, vui); |
| // CHECK: shl <4 x i32> |
| // CHECK-LE: shl <4 x i32> |
| |
| res_vui = vec_vslw(vui, vui); |
| // CHECK: shl <4 x i32> |
| // CHECK-LE: shl <4 x i32> |
| |
| /* vec_sld */ |
| res_vsc = vec_sld(vsc, vsc, 0); |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: sub nsw i32 16 |
| // CHECK-LE: sub nsw i32 17 |
| // CHECK-LE: sub nsw i32 18 |
| // CHECK-LE: sub nsw i32 31 |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vuc = vec_sld(vuc, vuc, 0); |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: sub nsw i32 16 |
| // CHECK-LE: sub nsw i32 17 |
| // CHECK-LE: sub nsw i32 18 |
| // CHECK-LE: sub nsw i32 31 |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vs = vec_sld(vs, vs, 0); |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: sub nsw i32 16 |
| // CHECK-LE: sub nsw i32 17 |
| // CHECK-LE: sub nsw i32 18 |
| // CHECK-LE: sub nsw i32 31 |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vus = vec_sld(vus, vus, 0); |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: sub nsw i32 16 |
| // CHECK-LE: sub nsw i32 17 |
| // CHECK-LE: sub nsw i32 18 |
| // CHECK-LE: sub nsw i32 31 |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vbs = vec_sld(vbs, vbs, 0); |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 |
| // CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> |
| // CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> |
| // CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> |
| // CHECK-LE: sub nsw i32 16 |
| // CHECK-LE: sub nsw i32 17 |
| // CHECK-LE: sub nsw i32 18 |
| // CHECK-LE: sub nsw i32 31 |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> |
| // CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32> |
| // CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> |
| |
| res_vp = vec_sld(vp, vp, 0); |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: sub nsw i32 16 |
| // CHECK-LE: sub nsw i32 17 |
| // CHECK-LE: sub nsw i32 18 |
| // CHECK-LE: sub nsw i32 31 |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vi = vec_sld(vi, vi, 0); |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: sub nsw i32 16 |
| // CHECK-LE: sub nsw i32 17 |
| // CHECK-LE: sub nsw i32 18 |
| // CHECK-LE: sub nsw i32 31 |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vui = vec_sld(vui, vui, 0); |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: sub nsw i32 16 |
| // CHECK-LE: sub nsw i32 17 |
| // CHECK-LE: sub nsw i32 18 |
| // CHECK-LE: sub nsw i32 31 |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vbi = vec_sld(vbi, vbi, 0); |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 |
| // CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> {{.+}}, <4 x i32> {{.+}}, <16 x i8> |
| // CHECK-LE: sub nsw i32 16 |
| // CHECK-LE: sub nsw i32 17 |
| // CHECK-LE: sub nsw i32 18 |
| // CHECK-LE: sub nsw i32 31 |
| // CHECK-LE: xor <16 x i8> |
| // CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> {{.+}}, <4 x i32> {{.+}}, <16 x i8> |
| |
| res_vf = vec_sld(vf, vf, 0); |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: sub nsw i32 16 |
| // CHECK-LE: sub nsw i32 17 |
| // CHECK-LE: sub nsw i32 18 |
| // CHECK-LE: sub nsw i32 31 |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vsc = vec_vsldoi(vsc, vsc, 0); |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: sub nsw i32 16 |
| // CHECK-LE: sub nsw i32 17 |
| // CHECK-LE: sub nsw i32 18 |
| // CHECK-LE: sub nsw i32 31 |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vuc = vec_vsldoi(vuc, vuc, 0); |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: sub nsw i32 16 |
| // CHECK-LE: sub nsw i32 17 |
| // CHECK-LE: sub nsw i32 18 |
| // CHECK-LE: sub nsw i32 31 |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vs = vec_vsldoi(vs, vs, 0); |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: sub nsw i32 16 |
| // CHECK-LE: sub nsw i32 17 |
| // CHECK-LE: sub nsw i32 18 |
| // CHECK-LE: sub nsw i32 31 |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vus = vec_vsldoi(vus, vus, 0); |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: sub nsw i32 16 |
| // CHECK-LE: sub nsw i32 17 |
| // CHECK-LE: sub nsw i32 18 |
| // CHECK-LE: sub nsw i32 31 |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vp = vec_vsldoi(vp, vp, 0); |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: sub nsw i32 16 |
| // CHECK-LE: sub nsw i32 17 |
| // CHECK-LE: sub nsw i32 18 |
| // CHECK-LE: sub nsw i32 31 |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vi = vec_vsldoi(vi, vi, 0); |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: sub nsw i32 16 |
| // CHECK-LE: sub nsw i32 17 |
| // CHECK-LE: sub nsw i32 18 |
| // CHECK-LE: sub nsw i32 31 |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vui = vec_vsldoi(vui, vui, 0); |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: sub nsw i32 16 |
| // CHECK-LE: sub nsw i32 17 |
| // CHECK-LE: sub nsw i32 18 |
| // CHECK-LE: sub nsw i32 31 |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| res_vf = vec_vsldoi(vf, vf, 0); |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 |
| // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 |
| // CHECK: @llvm.ppc.altivec.vperm |
| // CHECK-LE: sub nsw i32 16 |
| // CHECK-LE: sub nsw i32 17 |
| // CHECK-LE: sub nsw i32 18 |
| // CHECK-LE: sub nsw i32 31 |
| // CHECK-LE: @llvm.ppc.altivec.vperm |
| |
| /* vec_sll */ |
| res_vsc = vec_sll(vsc, vuc); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vsc = vec_sll(vsc, vus); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vsc = vec_sll(vsc, vui); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vuc = vec_sll(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vuc = vec_sll(vuc, vus); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vuc = vec_sll(vuc, vui); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vbc = vec_sll(vbc, vuc); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vbc = vec_sll(vbc, vus); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vbc = vec_sll(vbc, vui); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vs = vec_sll(vs, vuc); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vs = vec_sll(vs, vus); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vs = vec_sll(vs, vui); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vus = vec_sll(vus, vuc); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vus = vec_sll(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vus = vec_sll(vus, vui); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vbs = vec_sll(vbs, vuc); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vbs = vec_sll(vbs, vus); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vbs = vec_sll(vbs, vui); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vp = vec_sll(vp, vuc); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vp = vec_sll(vp, vus); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vp = vec_sll(vp, vui); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vi = vec_sll(vi, vuc); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vi = vec_sll(vi, vus); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vi = vec_sll(vi, vui); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vui = vec_sll(vui, vuc); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vui = vec_sll(vui, vus); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vui = vec_sll(vui, vui); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vbi = vec_sll(vbi, vuc); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vbi = vec_sll(vbi, vus); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vbi = vec_sll(vbi, vui); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vsc = vec_vsl(vsc, vuc); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vsc = vec_vsl(vsc, vus); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vsc = vec_vsl(vsc, vui); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vuc = vec_vsl(vuc, vuc); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vuc = vec_vsl(vuc, vus); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vuc = vec_vsl(vuc, vui); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vbc = vec_vsl(vbc, vuc); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vbc = vec_vsl(vbc, vus); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vbc = vec_vsl(vbc, vui); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vs = vec_vsl(vs, vuc); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vs = vec_vsl(vs, vus); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vs = vec_vsl(vs, vui); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vus = vec_vsl(vus, vuc); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vus = vec_vsl(vus, vus); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vus = vec_vsl(vus, vui); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vbs = vec_vsl(vbs, vuc); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vbs = vec_vsl(vbs, vus); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vbs = vec_vsl(vbs, vui); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vp = vec_vsl(vp, vuc); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vp = vec_vsl(vp, vus); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vp = vec_vsl(vp, vui); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vi = vec_vsl(vi, vuc); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vi = vec_vsl(vi, vus); |
| // CHECK: @llvm.ppc.altivec.vsl |
| // CHECK-LE: @llvm.ppc.altivec.vsl |
| |
| res_vi = vec_vsl(vi, vui); |
| // CHECK: @llvm.ppc.altivec.vsl |
|