| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| ; RUN: opt -lower-matrix-intrinsics -fuse-matrix-tile-size=2 -matrix-allow-contract -force-fuse-matrix -verify-dom-info %s -S | FileCheck %s |
| |
| ; REQUIRES: aarch64-registered-target |
| |
| target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" |
| target triple = "aarch64-apple-ios" |
| |
| define void @multiply_all_volatile(<4 x double>* noalias %A, <4 x double>* noalias %B, <4 x double>* noalias %C) { |
| ; CHECK-LABEL: @multiply_all_volatile( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x double>* [[A:%.*]] to double* |
| ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, double* [[TMP0]], i64 0 |
| ; CHECK-NEXT: [[COL_CAST:%.*]] = bitcast double* [[TMP1]] to <4 x double>* |
| ; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x double>* [[COL_CAST]] to double* |
| ; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast double* [[TMP2]] to <2 x double>* |
| ; CHECK-NEXT: load volatile <2 x double>, <2 x double>* [[VEC_CAST]], align 8 |
| ; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP2]], i64 2 |
| ; CHECK-NEXT: [[VEC_CAST1:%.*]] = bitcast double* [[VEC_GEP]] to <2 x double>* |
| ; CHECK-NEXT: load volatile <2 x double>, <2 x double>* [[VEC_CAST1]], align 8 |
| ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x double>* [[B:%.*]] to double* |
| ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr double, double* [[TMP3]], i64 0 |
| ; CHECK-NEXT: [[COL_CAST3:%.*]] = bitcast double* [[TMP4]] to <4 x double>* |
| ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x double>* [[COL_CAST3]] to double* |
| ; CHECK-NEXT: [[VEC_CAST4:%.*]] = bitcast double* [[TMP5]] to <2 x double>* |
| ; CHECK-NEXT: load volatile <2 x double>, <2 x double>* [[VEC_CAST4]], align 8 |
| ; CHECK-NEXT: [[VEC_GEP6:%.*]] = getelementptr double, double* [[TMP5]], i64 2 |
| ; CHECK-NEXT: [[VEC_CAST7:%.*]] = bitcast double* [[VEC_GEP6]] to <2 x double>* |
| ; CHECK-NEXT: load volatile <2 x double>, <2 x double>* [[VEC_CAST7]], align 8 |
| |
| ; CHECK: [[TMP18:%.*]] = bitcast <4 x double>* [[C:%.*]] to double* |
| ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr double, double* [[TMP18]], i64 0 |
| ; CHECK-NEXT: [[COL_CAST18:%.*]] = bitcast double* [[TMP19]] to <4 x double>* |
| ; CHECK-NEXT: [[TMP20:%.*]] = bitcast <4 x double>* [[COL_CAST18]] to double* |
| ; CHECK-NEXT: [[VEC_CAST19:%.*]] = bitcast double* [[TMP20]] to <2 x double>* |
| ; CHECK-NEXT: store volatile <2 x double> {{.*}}, <2 x double>* [[VEC_CAST19]], align 8 |
| ; CHECK-NEXT: [[VEC_GEP20:%.*]] = getelementptr double, double* [[TMP20]], i64 2 |
| ; CHECK-NEXT: [[VEC_CAST21:%.*]] = bitcast double* [[VEC_GEP20]] to <2 x double>* |
| ; CHECK-NEXT: store volatile <2 x double> {{.*}}, <2 x double>* [[VEC_CAST21]], align 8 |
| ; CHECK-NEXT: ret void |
| ; |
| |
| entry: |
| %a = load volatile <4 x double>, <4 x double>* %A, align 8 |
| %b = load volatile <4 x double>, <4 x double>* %B, align 8 |
| |
| %c = call <4 x double> @llvm.matrix.multiply(<4 x double> %a, <4 x double> %b, i32 2, i32 2, i32 2) |
| |
| store volatile <4 x double> %c, <4 x double>* %C, align 8 |
| ret void |
| } |
| |
| |
| define void @multiply_load0_volatile(<4 x double>* noalias %A, <4 x double>* noalias %B, <4 x double>* noalias %C) { |
| ; CHECK-LABEL: @multiply_load0_volatile( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x double>* [[A:%.*]] to double* |
| ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, double* [[TMP0]], i64 0 |
| ; CHECK-NEXT: [[COL_CAST:%.*]] = bitcast double* [[TMP1]] to <4 x double>* |
| ; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x double>* [[COL_CAST]] to double* |
| ; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast double* [[TMP2]] to <2 x double>* |
| ; CHECK-NEXT: load volatile <2 x double>, <2 x double>* [[VEC_CAST]], align 8 |
| ; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP2]], i64 2 |
| ; CHECK-NEXT: [[VEC_CAST1:%.*]] = bitcast double* [[VEC_GEP]] to <2 x double>* |
| ; CHECK-NEXT: load volatile <2 x double>, <2 x double>* [[VEC_CAST1]], align 8 |
| ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x double>* [[B:%.*]] to double* |
| ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr double, double* [[TMP3]], i64 0 |
| ; CHECK-NEXT: [[COL_CAST3:%.*]] = bitcast double* [[TMP4]] to <4 x double>* |
| ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x double>* [[COL_CAST3]] to double* |
| ; CHECK-NEXT: [[VEC_CAST4:%.*]] = bitcast double* [[TMP5]] to <2 x double>* |
| ; CHECK-NEXT: load <2 x double>, <2 x double>* [[VEC_CAST4]], align 8 |
| ; CHECK-NEXT: [[VEC_GEP6:%.*]] = getelementptr double, double* [[TMP5]], i64 2 |
| ; CHECK-NEXT: [[VEC_CAST7:%.*]] = bitcast double* [[VEC_GEP6]] to <2 x double>* |
| ; CHECK-NEXT: load <2 x double>, <2 x double>* [[VEC_CAST7]], align 8 |
| |
| ; CHECK: [[TMP18:%.*]] = bitcast <4 x double>* [[C:%.*]] to double* |
| ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr double, double* [[TMP18]], i64 0 |
| ; CHECK-NEXT: [[COL_CAST18:%.*]] = bitcast double* [[TMP19]] to <4 x double>* |
| ; CHECK-NEXT: [[TMP20:%.*]] = bitcast <4 x double>* [[COL_CAST18]] to double* |
| ; CHECK-NEXT: [[VEC_CAST19:%.*]] = bitcast double* [[TMP20]] to <2 x double>* |
| ; CHECK-NEXT: store <2 x double> {{.*}}, <2 x double>* [[VEC_CAST19]], align 8 |
| ; CHECK-NEXT: [[VEC_GEP20:%.*]] = getelementptr double, double* [[TMP20]], i64 2 |
| ; CHECK-NEXT: [[VEC_CAST21:%.*]] = bitcast double* [[VEC_GEP20]] to <2 x double>* |
| ; CHECK-NEXT: store <2 x double> {{.*}}, <2 x double>* [[VEC_CAST21]], align 8 |
| ; CHECK-NEXT: ret void |
| ; |
| |
| entry: |
| %a = load volatile <4 x double>, <4 x double>* %A, align 8 |
| %b = load <4 x double>, <4 x double>* %B, align 8 |
| |
| %c = call <4 x double> @llvm.matrix.multiply(<4 x double> %a, <4 x double> %b, i32 2, i32 2, i32 2) |
| |
| store <4 x double> %c, <4 x double>* %C, align 8 |
| ret void |
| } |
| |
| define void @multiply_load1_volatile(<4 x double>* noalias %A, <4 x double>* noalias %B, <4 x double>* noalias %C) { |
| ; CHECK-LABEL: @multiply_load1_volatile( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x double>* [[A:%.*]] to double* |
| ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, double* [[TMP0]], i64 0 |
| ; CHECK-NEXT: [[COL_CAST:%.*]] = bitcast double* [[TMP1]] to <4 x double>* |
| ; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x double>* [[COL_CAST]] to double* |
| ; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast double* [[TMP2]] to <2 x double>* |
| ; CHECK-NEXT: load <2 x double>, <2 x double>* [[VEC_CAST]], align 8 |
| ; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP2]], i64 2 |
| ; CHECK-NEXT: [[VEC_CAST1:%.*]] = bitcast double* [[VEC_GEP]] to <2 x double>* |
| ; CHECK-NEXT: load <2 x double>, <2 x double>* [[VEC_CAST1]], align 8 |
| ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x double>* [[B:%.*]] to double* |
| ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr double, double* [[TMP3]], i64 0 |
| ; CHECK-NEXT: [[COL_CAST3:%.*]] = bitcast double* [[TMP4]] to <4 x double>* |
| ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x double>* [[COL_CAST3]] to double* |
| ; CHECK-NEXT: [[VEC_CAST4:%.*]] = bitcast double* [[TMP5]] to <2 x double>* |
| ; CHECK-NEXT: load volatile <2 x double>, <2 x double>* [[VEC_CAST4]], align 8 |
| ; CHECK-NEXT: [[VEC_GEP6:%.*]] = getelementptr double, double* [[TMP5]], i64 2 |
| ; CHECK-NEXT: [[VEC_CAST7:%.*]] = bitcast double* [[VEC_GEP6]] to <2 x double>* |
| ; CHECK-NEXT: load volatile <2 x double>, <2 x double>* [[VEC_CAST7]], align 8 |
| |
| ; CHECK: [[TMP18:%.*]] = bitcast <4 x double>* [[C:%.*]] to double* |
| ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr double, double* [[TMP18]], i64 0 |
| ; CHECK-NEXT: [[COL_CAST18:%.*]] = bitcast double* [[TMP19]] to <4 x double>* |
| ; CHECK-NEXT: [[TMP20:%.*]] = bitcast <4 x double>* [[COL_CAST18]] to double* |
| ; CHECK-NEXT: [[VEC_CAST19:%.*]] = bitcast double* [[TMP20]] to <2 x double>* |
| ; CHECK-NEXT: store <2 x double> {{.*}}, <2 x double>* [[VEC_CAST19]], align 8 |
| ; CHECK-NEXT: [[VEC_GEP20:%.*]] = getelementptr double, double* [[TMP20]], i64 2 |
| ; CHECK-NEXT: [[VEC_CAST21:%.*]] = bitcast double* [[VEC_GEP20]] to <2 x double>* |
| ; CHECK-NEXT: store <2 x double> {{.*}}, <2 x double>* [[VEC_CAST21]], align 8 |
| ; CHECK-NEXT: ret void |
| ; |
| |
| entry: |
| %a = load <4 x double>, <4 x double>* %A, align 8 |
| %b = load volatile <4 x double>, <4 x double>* %B, align 8 |
| |
| %c = call <4 x double> @llvm.matrix.multiply(<4 x double> %a, <4 x double> %b, i32 2, i32 2, i32 2) |
| |
| store <4 x double> %c, <4 x double>* %C, align 8 |
| ret void |
| } |
| |
| define void @multiply_store_volatile(<4 x double>* noalias %A, <4 x double>* noalias %B, <4 x double>* noalias %C) { |
| ; CHECK-LABEL: @multiply_store_volatile( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x double>* [[A:%.*]] to double* |
| ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, double* [[TMP0]], i64 0 |
| ; CHECK-NEXT: [[COL_CAST:%.*]] = bitcast double* [[TMP1]] to <4 x double>* |
| ; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x double>* [[COL_CAST]] to double* |
| ; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast double* [[TMP2]] to <2 x double>* |
| ; CHECK-NEXT: load <2 x double>, <2 x double>* [[VEC_CAST]], align 8 |
| ; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP2]], i64 2 |
| ; CHECK-NEXT: [[VEC_CAST1:%.*]] = bitcast double* [[VEC_GEP]] to <2 x double>* |
| ; CHECK-NEXT: load <2 x double>, <2 x double>* [[VEC_CAST1]], align 8 |
| ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x double>* [[B:%.*]] to double* |
| ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr double, double* [[TMP3]], i64 0 |
| ; CHECK-NEXT: [[COL_CAST3:%.*]] = bitcast double* [[TMP4]] to <4 x double>* |
| ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x double>* [[COL_CAST3]] to double* |
| ; CHECK-NEXT: [[VEC_CAST4:%.*]] = bitcast double* [[TMP5]] to <2 x double>* |
| ; CHECK-NEXT: load <2 x double>, <2 x double>* [[VEC_CAST4]], align 8 |
| ; CHECK-NEXT: [[VEC_GEP6:%.*]] = getelementptr double, double* [[TMP5]], i64 2 |
| ; CHECK-NEXT: [[VEC_CAST7:%.*]] = bitcast double* [[VEC_GEP6]] to <2 x double>* |
| ; CHECK-NEXT: load <2 x double>, <2 x double>* [[VEC_CAST7]], align 8 |
| |
| ; CHECK: [[TMP18:%.*]] = bitcast <4 x double>* [[C:%.*]] to double* |
| ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr double, double* [[TMP18]], i64 0 |
| ; CHECK-NEXT: [[COL_CAST18:%.*]] = bitcast double* [[TMP19]] to <4 x double>* |
| ; CHECK-NEXT: [[TMP20:%.*]] = bitcast <4 x double>* [[COL_CAST18]] to double* |
| ; CHECK-NEXT: [[VEC_CAST19:%.*]] = bitcast double* [[TMP20]] to <2 x double>* |
| ; CHECK-NEXT: store volatile <2 x double> {{.*}}, <2 x double>* [[VEC_CAST19]], align 8 |
| ; CHECK-NEXT: [[VEC_GEP20:%.*]] = getelementptr double, double* [[TMP20]], i64 2 |
| ; CHECK-NEXT: [[VEC_CAST21:%.*]] = bitcast double* [[VEC_GEP20]] to <2 x double>* |
| ; CHECK-NEXT: store volatile <2 x double> {{.*}}, <2 x double>* [[VEC_CAST21]], align 8 |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| %a = load <4 x double>, <4 x double>* %A, align 8 |
| %b = load <4 x double>, <4 x double>* %B, align 8 |
| |
| %c = call <4 x double> @llvm.matrix.multiply(<4 x double> %a, <4 x double> %b, i32 2, i32 2, i32 2) |
| |
| store volatile <4 x double> %c, <4 x double>* %C, align 8 |
| ret void |
| } |
| |
| declare <4 x double> @llvm.matrix.multiply(<4 x double>, <4 x double>, i32, i32, i32) |