| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s |
| |
| declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0 |
| declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #0 |
| |
| define void @SHA256_Compress_Generic(ptr noundef %ctx) #1 { |
| ; CHECK-LABEL: SHA256_Compress_Generic: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movbel 0, %eax |
| ; CHECK-NEXT: movbel 12(%rdi), %ecx |
| ; CHECK-NEXT: vmovd %eax, %xmm0 |
| ; CHECK-NEXT: vmovdqa {{.*#+}} xmm1 = [128,128,128,128,0,1,2,3,128,128,128,128,128,128,128,128] |
| ; CHECK-NEXT: vpshufb %xmm1, %xmm0, %xmm2 |
| ; CHECK-NEXT: vpsrld $17, %xmm2, %xmm0 |
| ; CHECK-NEXT: vpslld $15, %xmm2, %xmm3 |
| ; CHECK-NEXT: vpor %xmm0, %xmm3, %xmm0 |
| ; CHECK-NEXT: vpsrld $19, %xmm2, %xmm3 |
| ; CHECK-NEXT: vpslld $13, %xmm2, %xmm4 |
| ; CHECK-NEXT: vpor %xmm3, %xmm4, %xmm3 |
| ; CHECK-NEXT: vpxor %xmm3, %xmm0, %xmm3 |
| ; CHECK-NEXT: vpxor %xmm2, %xmm3, %xmm0 |
| ; CHECK-NEXT: vmovd %ecx, %xmm4 |
| ; CHECK-NEXT: vpshufb %xmm1, %xmm4, %xmm1 |
| ; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm1 |
| ; CHECK-NEXT: vpsrld $17, %xmm1, %xmm0 |
| ; CHECK-NEXT: vpslld $15, %xmm1, %xmm4 |
| ; CHECK-NEXT: vpor %xmm0, %xmm4, %xmm0 |
| ; CHECK-NEXT: vpsrld $19, %xmm1, %xmm4 |
| ; CHECK-NEXT: vpslld $13, %xmm1, %xmm5 |
| ; CHECK-NEXT: vpor %xmm4, %xmm5, %xmm4 |
| ; CHECK-NEXT: vpxor %xmm4, %xmm0, %xmm0 |
| ; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0 |
| ; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0 |
| ; CHECK-NEXT: vpsrld $17, %xmm0, %xmm4 |
| ; CHECK-NEXT: vpslld $15, %xmm0, %xmm5 |
| ; CHECK-NEXT: vpor %xmm4, %xmm5, %xmm4 |
| ; CHECK-NEXT: vpsrld $19, %xmm0, %xmm5 |
| ; CHECK-NEXT: vpslld $13, %xmm0, %xmm6 |
| ; CHECK-NEXT: vpor %xmm5, %xmm6, %xmm5 |
| ; CHECK-NEXT: vpxor %xmm5, %xmm4, %xmm4 |
| ; CHECK-NEXT: vpsrld $10, %xmm0, %xmm0 |
| ; CHECK-NEXT: vpxor %xmm0, %xmm4, %xmm0 |
| ; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0 |
| ; CHECK-NEXT: vpsrld $17, %xmm0, %xmm4 |
| ; CHECK-NEXT: vpslld $15, %xmm0, %xmm5 |
| ; CHECK-NEXT: vpor %xmm4, %xmm5, %xmm4 |
| ; CHECK-NEXT: vpsrld $19, %xmm0, %xmm5 |
| ; CHECK-NEXT: vpslld $13, %xmm0, %xmm6 |
| ; CHECK-NEXT: vpor %xmm5, %xmm6, %xmm5 |
| ; CHECK-NEXT: vpxor %xmm5, %xmm4, %xmm4 |
| ; CHECK-NEXT: vpsrld $10, %xmm0, %xmm5 |
| ; CHECK-NEXT: vpxor %xmm5, %xmm4, %xmm4 |
| ; CHECK-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3] |
| ; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,0,2,3] |
| ; CHECK-NEXT: vpaddd %xmm4, %xmm2, %xmm2 |
| ; CHECK-NEXT: vpsrld $17, %xmm2, %xmm3 |
| ; CHECK-NEXT: vpslld $15, %xmm2, %xmm4 |
| ; CHECK-NEXT: vpor %xmm3, %xmm4, %xmm3 |
| ; CHECK-NEXT: vpsrld $19, %xmm2, %xmm4 |
| ; CHECK-NEXT: vpslld $13, %xmm2, %xmm5 |
| ; CHECK-NEXT: vpor %xmm4, %xmm5, %xmm4 |
| ; CHECK-NEXT: vpxor %xmm4, %xmm3, %xmm3 |
| ; CHECK-NEXT: vpsrld $10, %xmm2, %xmm2 |
| ; CHECK-NEXT: vpxor %xmm2, %xmm3, %xmm2 |
| ; CHECK-NEXT: vpsrlq $32, %xmm1, %xmm3 |
| ; CHECK-NEXT: vpaddd %xmm2, %xmm3, %xmm1 |
| ; CHECK-NEXT: vpsrld $17, %xmm1, %xmm2 |
| ; CHECK-NEXT: vpslld $15, %xmm1, %xmm4 |
| ; CHECK-NEXT: vpor %xmm2, %xmm4, %xmm2 |
| ; CHECK-NEXT: vpsrld $19, %xmm1, %xmm4 |
| ; CHECK-NEXT: vpslld $13, %xmm1, %xmm5 |
| ; CHECK-NEXT: vpor %xmm4, %xmm5, %xmm4 |
| ; CHECK-NEXT: vpxor %xmm4, %xmm2, %xmm2 |
| ; CHECK-NEXT: vpsrld $10, %xmm1, %xmm4 |
| ; CHECK-NEXT: vpxor %xmm4, %xmm2, %xmm2 |
| ; CHECK-NEXT: vpaddd %xmm2, %xmm3, %xmm2 |
| ; CHECK-NEXT: vpsrld $17, %xmm2, %xmm3 |
| ; CHECK-NEXT: vpslld $15, %xmm2, %xmm4 |
| ; CHECK-NEXT: vpor %xmm3, %xmm4, %xmm3 |
| ; CHECK-NEXT: vpsrld $19, %xmm2, %xmm4 |
| ; CHECK-NEXT: vpslld $13, %xmm2, %xmm5 |
| ; CHECK-NEXT: vpor %xmm4, %xmm5, %xmm4 |
| ; CHECK-NEXT: vpxor %xmm4, %xmm3, %xmm3 |
| ; CHECK-NEXT: vpsrld $10, %xmm2, %xmm2 |
| ; CHECK-NEXT: vpxor %xmm2, %xmm3, %xmm2 |
| ; CHECK-NEXT: vpaddd %xmm2, %xmm0, %xmm0 |
| ; CHECK-NEXT: vpsrld $17, %xmm0, %xmm2 |
| ; CHECK-NEXT: vpslld $15, %xmm0, %xmm3 |
| ; CHECK-NEXT: vpor %xmm2, %xmm3, %xmm2 |
| ; CHECK-NEXT: vpsrld $19, %xmm0, %xmm3 |
| ; CHECK-NEXT: vpslld $13, %xmm0, %xmm4 |
| ; CHECK-NEXT: vpor %xmm3, %xmm4, %xmm3 |
| ; CHECK-NEXT: vpxor %xmm3, %xmm2, %xmm2 |
| ; CHECK-NEXT: vpsrld $10, %xmm0, %xmm3 |
| ; CHECK-NEXT: vpxor %xmm3, %xmm2, %xmm2 |
| ; CHECK-NEXT: vpsllq $32, %xmm1, %xmm3 |
| ; CHECK-NEXT: vpaddd %xmm2, %xmm3, %xmm2 |
| ; CHECK-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 |
| ; CHECK-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] |
| ; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] |
| ; CHECK-NEXT: vmovdqu %ymm0, 132(%rdi) |
| ; CHECK-NEXT: vzeroupper |
| ; CHECK-NEXT: retq |
| entry: |
| %0 = load i32, ptr null, align 4 |
| %1 = tail call i32 asm "bswap $0", "=r,0,~{dirflag},~{fpsr},~{flags}"(i32 %0) #3 |
| %arrayidx14 = getelementptr inbounds [64 x i32], ptr %ctx, i64 0, i64 3 |
| %2 = load i32, ptr %arrayidx14, align 4 |
| %3 = tail call i32 asm "bswap $0", "=r,0,~{dirflag},~{fpsr},~{flags}"(i32 %2) #3 |
| %4 = insertelement <2 x i32> zeroinitializer, i32 %1, i64 1 |
| %5 = tail call <2 x i32> @llvm.fshl.v2i32(<2 x i32> %4, <2 x i32> %4, <2 x i32> <i32 15, i32 15>) |
| %6 = tail call <2 x i32> @llvm.fshl.v2i32(<2 x i32> %4, <2 x i32> %4, <2 x i32> <i32 13, i32 13>) |
| %7 = xor <2 x i32> %5, %6 |
| %8 = lshr <2 x i32> %4, zeroinitializer |
| %9 = xor <2 x i32> %7, %8 |
| %10 = insertelement <2 x i32> zeroinitializer, i32 %3, i64 0 |
| %11 = shufflevector <2 x i32> zeroinitializer, <2 x i32> %10, <2 x i32> <i32 1, i32 2> |
| %12 = add <2 x i32> %11, %9 |
| %13 = tail call <2 x i32> @llvm.fshl.v2i32(<2 x i32> %12, <2 x i32> %12, <2 x i32> <i32 15, i32 15>) |
| %14 = tail call <2 x i32> @llvm.fshl.v2i32(<2 x i32> %12, <2 x i32> %12, <2 x i32> <i32 13, i32 13>) |
| %15 = xor <2 x i32> %13, %14 |
| %16 = lshr <2 x i32> %12, zeroinitializer |
| %17 = xor <2 x i32> %15, %16 |
| %18 = add <2 x i32> %4, %17 |
| %19 = tail call <2 x i32> @llvm.fshl.v2i32(<2 x i32> %18, <2 x i32> %18, <2 x i32> <i32 15, i32 15>) |
| %20 = tail call <2 x i32> @llvm.fshl.v2i32(<2 x i32> %18, <2 x i32> %18, <2 x i32> <i32 13, i32 13>) |
| %21 = xor <2 x i32> %19, %20 |
| %22 = lshr <2 x i32> %18, <i32 10, i32 10> |
| %23 = xor <2 x i32> %21, %22 |
| %24 = add <2 x i32> %4, %23 |
| %25 = tail call <2 x i32> @llvm.fshl.v2i32(<2 x i32> %24, <2 x i32> %24, <2 x i32> <i32 15, i32 15>) |
| %26 = tail call <2 x i32> @llvm.fshl.v2i32(<2 x i32> %24, <2 x i32> %24, <2 x i32> <i32 13, i32 13>) |
| %27 = xor <2 x i32> %25, %26 |
| %28 = lshr <2 x i32> %24, <i32 10, i32 10> |
| %29 = xor <2 x i32> %27, %28 |
| %30 = shufflevector <2 x i32> %4, <2 x i32> %12, <2 x i32> <i32 1, i32 2> |
| %31 = add <2 x i32> %30, %29 |
| %32 = tail call <2 x i32> @llvm.fshl.v2i32(<2 x i32> %31, <2 x i32> %31, <2 x i32> <i32 15, i32 15>) |
| %33 = tail call <2 x i32> @llvm.fshl.v2i32(<2 x i32> %31, <2 x i32> %31, <2 x i32> <i32 13, i32 13>) |
| %34 = xor <2 x i32> %32, %33 |
| %35 = lshr <2 x i32> %31, <i32 10, i32 10> |
| %36 = xor <2 x i32> %34, %35 |
| %37 = shufflevector <2 x i32> %12, <2 x i32> zeroinitializer, <2 x i32> <i32 1, i32 2> |
| %38 = add <2 x i32> %37, %36 |
| %arrayidx918 = getelementptr inbounds [64 x i32], ptr %ctx, i64 0, i64 33 |
| store <2 x i32> %38, ptr %arrayidx918, align 4 |
| %arrayidx1012 = getelementptr inbounds [64 x i32], ptr %ctx, i64 0, i64 35 |
| %39 = tail call <2 x i32> @llvm.fshl.v2i32(<2 x i32> %38, <2 x i32> %38, <2 x i32> <i32 15, i32 15>) |
| %40 = tail call <2 x i32> @llvm.fshl.v2i32(<2 x i32> %38, <2 x i32> %38, <2 x i32> <i32 13, i32 13>) |
| %41 = xor <2 x i32> %39, %40 |
| %42 = lshr <2 x i32> %38, <i32 10, i32 10> |
| %43 = xor <2 x i32> %41, %42 |
| %44 = add <2 x i32> %37, %43 |
| store <2 x i32> zeroinitializer, ptr %arrayidx1012, align 4 |
| %arrayidx1106 = getelementptr inbounds [64 x i32], ptr %ctx, i64 0, i64 37 |
| %45 = tail call <2 x i32> @llvm.fshl.v2i32(<2 x i32> %44, <2 x i32> %44, <2 x i32> <i32 15, i32 15>) |
| %46 = tail call <2 x i32> @llvm.fshl.v2i32(<2 x i32> %44, <2 x i32> %44, <2 x i32> <i32 13, i32 13>) |
| %47 = xor <2 x i32> %45, %46 |
| %48 = lshr <2 x i32> %44, <i32 10, i32 10> |
| %49 = xor <2 x i32> %47, %48 |
| %50 = lshr <2 x i32> %24, zeroinitializer |
| %51 = add <2 x i32> %50, %49 |
| store <2 x i32> %51, ptr %arrayidx1106, align 4 |
| %arrayidx1200 = getelementptr inbounds [64 x i32], ptr %ctx, i64 0, i64 39 |
| %52 = tail call <2 x i32> @llvm.fshl.v2i32(<2 x i32> %51, <2 x i32> %51, <2 x i32> <i32 15, i32 15>) |
| %53 = tail call <2 x i32> @llvm.fshl.v2i32(<2 x i32> %51, <2 x i32> %51, <2 x i32> <i32 13, i32 13>) |
| %54 = xor <2 x i32> %52, %53 |
| %55 = lshr <2 x i32> %51, <i32 10, i32 10> |
| %56 = xor <2 x i32> %54, %55 |
| %57 = shufflevector <2 x i32> %38, <2 x i32> zeroinitializer, <2 x i32> <i32 poison, i32 0> |
| %58 = insertelement <2 x i32> %57, i32 0, i64 0 |
| %59 = add <2 x i32> %58, %56 |
| store <2 x i32> %59, ptr %arrayidx1200, align 4 |
| ret void |
| |
| ; uselistorder directives |
| uselistorder <2 x i32> %4, { 7, 0, 1, 6, 5, 4, 3, 2 } |
| uselistorder <2 x i32> %38, { 6, 5, 4, 3, 2, 1, 0 } |
| } |
| |
| declare <2 x i32> @llvm.fshl.v2i32(<2 x i32>, <2 x i32>, <2 x i32>) #2 |
| |
| ; uselistorder directives |
| uselistorder ptr @llvm.fshl.v2i32, { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 } |
| |
| attributes #0 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) } |
| attributes #1 = { nounwind sspstrong memory(argmem: readwrite) uwtable "min-legal-vector-width"="0" "no-trapping-math"="true" "probe-stack"="inline-asm" "stack-protector-buffer-size"="8" "target-cpu"="skylake" "target-features"="+adx,+aes,+avx,+avx2,+bmi,+bmi2,+clflushopt,+cmov,+crc32,+cx16,+cx8,+f16c,+fma,+fsgsbase,+fxsr,+invpcid,+lzcnt,+mmx,+movbe,+pclmul,+popcnt,+prfchw,+rdrnd,+rdseed,+sahf,+sgx,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsavec,+xsaveopt,+xsaves" } |
| attributes #2 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } |
| attributes #3 = { nounwind memory(none) } |