| // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py |
| // RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s | FileCheck %s |
| |
| void *my_aligned_alloc(int size, int alignment) __attribute__((assume_aligned(32), alloc_align(2))); |
| |
| // CHECK-LABEL: @t0_immediate0( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[CALL:%.*]] = call align 32 i8* @my_aligned_alloc(i32 320, i32 16) |
| // CHECK-NEXT: ret i8* [[CALL]] |
| // |
| void *t0_immediate0() { |
| return my_aligned_alloc(320, 16); |
| }; |
| |
| // CHECK-LABEL: @t1_immediate1( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[CALL:%.*]] = call align 32 i8* @my_aligned_alloc(i32 320, i32 32) |
| // CHECK-NEXT: ret i8* [[CALL]] |
| // |
| void *t1_immediate1() { |
| return my_aligned_alloc(320, 32); |
| }; |
| |
| // CHECK-LABEL: @t2_immediate2( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[CALL:%.*]] = call align 64 i8* @my_aligned_alloc(i32 320, i32 64) |
| // CHECK-NEXT: ret i8* [[CALL]] |
| // |
| void *t2_immediate2() { |
| return my_aligned_alloc(320, 64); |
| }; |
| |
| // CHECK-LABEL: @t3_variable( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ALIGNMENT_ADDR:%.*]] = alloca i32, align 4 |
| // CHECK-NEXT: store i32 [[ALIGNMENT:%.*]], i32* [[ALIGNMENT_ADDR]], align 4 |
| // CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ALIGNMENT_ADDR]], align 4 |
| // CHECK-NEXT: [[CALL:%.*]] = call align 32 i8* @my_aligned_alloc(i32 320, i32 [[TMP0]]) |
| // CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[TMP0]] to i64 |
| // CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1 |
| // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64 |
| // CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]] |
| // CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 |
| // CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) |
| // CHECK-NEXT: ret i8* [[CALL]] |
| // |
| void *t3_variable(int alignment) { |
| return my_aligned_alloc(320, alignment); |
| }; |