|
3 | 3 | ; Make sure that SROA doesn't lose nonnull metadata
|
4 | 4 | ; on loads from allocas that get optimized out.
|
5 | 5 |
|
6 |
| -; CHECK-LABEL: define float* @yummy_nonnull |
7 |
| -; CHECK: [[RETURN:%(.*)]] = load float*, float** %arg, align 8 |
8 |
| -; CHECK: [[ASSUME:%(.*)]] = icmp ne float* {{.*}}[[RETURN]], null |
9 |
| -; CHECK: call void @llvm.assume(i1 {{.*}}[[ASSUME]]) |
10 |
| -; CHECK: ret float* {{.*}}[[RETURN]] |
11 |
| - |
12 | 6 | define float* @yummy_nonnull(float** %arg) {
|
13 |
| -entry-block: |
14 |
| - %buf = alloca float* |
15 |
| - |
16 |
| - %_arg_i8 = bitcast float** %arg to i8* |
17 |
| - %_buf_i8 = bitcast float** %buf to i8* |
18 |
| - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %_buf_i8, i8* %_arg_i8, i64 8, i32 8, i1 false) |
19 |
| - |
20 |
| - %ret = load float*, float** %buf, align 8, !nonnull !0 |
21 |
| - ret float* %ret |
| 7 | +; CHECK-LABEL: define float* @yummy_nonnull( |
| 8 | +; CHECK-NEXT: entry: |
| 9 | +; CHECK-NEXT: %[[RETURN:.*]] = load float*, float** %arg, align 8 |
| 10 | +; CHECK-NEXT: %[[ASSUME:.*]] = icmp ne float* %[[RETURN]], null |
| 11 | +; CHECK-NEXT: call void @llvm.assume(i1 %[[ASSUME]]) |
| 12 | +; CHECK-NEXT: ret float* %[[RETURN]] |
| 13 | +entry: |
| 14 | + %buf = alloca float* |
| 15 | + %_arg_i8 = bitcast float** %arg to i8* |
| 16 | + %_buf_i8 = bitcast float** %buf to i8* |
| 17 | + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %_buf_i8, i8* %_arg_i8, i64 8, i32 8, i1 false) |
| 18 | + %ret = load float*, float** %buf, align 8, !nonnull !0 |
| 19 | + ret float* %ret |
22 | 20 | }
|
23 | 21 |
|
24 | 22 | declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1)
|
|
0 commit comments