|
1 |
| -// reference: https://github.com/espressif/clang-xtensa/commit/6fb488d2553f06029e6611cf81c6efbd45b56e47#diff-aa74ae1e1ab6b7149789237edb78e688R8450 |
| 1 | +// reference: https://github.com/MabezDev/llvm-project/blob/xtensa_release_9.0.1_with_rust_patches-31-05-2020-cherry-pick/clang/lib/CodeGen/TargetInfo.cpp#L9668-L9767 |
2 | 2 |
|
3 | 3 | use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform};
|
| 4 | +use crate::abi::{Abi, Size}; |
4 | 5 |
|
5 |
| -const NUM_ARG_GPR: u64 = 6; |
| 6 | +const NUM_ARG_GPRS: u64 = 6; |
6 | 7 | const MAX_ARG_IN_REGS_SIZE: u64 = 4 * 32;
|
7 |
| -// const MAX_ARG_DIRECT_SIZE: u64 = MAX_ARG_IN_REGS_SIZE; |
8 | 8 | const MAX_RET_IN_REGS_SIZE: u64 = 2 * 32;
|
9 | 9 |
|
10 | 10 | fn classify_ret_ty<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
|
11 |
| - // The rules for return and argument types are the same, so defer to |
12 |
| - // classifyArgumentType. |
13 |
| - classify_arg_ty(arg, xlen, &mut 2); // two as max return size |
| 11 | + if arg.is_ignore() { |
| 12 | + return; |
| 13 | + } |
| 14 | + |
| 15 | + // The rules for return and argument types are the same, |
| 16 | + // so defer to `classify_arg_ty`. |
| 17 | + let mut arg_gprs_left = 2; |
| 18 | + let fixed = true; |
| 19 | + classify_arg_ty(arg, xlen, fixed, &mut arg_gprs_left); |
14 | 20 | }
|
15 | 21 |
|
16 |
| -fn classify_arg_ty<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64, remaining_gpr: &mut u64) { |
17 |
| - // Determine the number of GPRs needed to pass the current argument |
18 |
| - // according to the ABI. 2*XLen-aligned varargs are passed in "aligned" |
19 |
| - // register pairs, so may consume 3 registers. |
| 22 | +fn classify_arg_ty<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64, fixed: bool, arg_gprs_left: &mut u64) { |
| 23 | + assert!(*arg_gprs_left <= NUM_ARG_GPRS, "Arg GPR tracking underflow"); |
20 | 24 |
|
21 |
| - let arg_size = arg.layout.size; |
22 |
| - if arg_size.bits() > MAX_ARG_IN_REGS_SIZE { |
23 |
| - arg.make_indirect(); |
| 25 | + // Ignore empty structs/unions. |
| 26 | + if arg.layout.is_zst() { |
24 | 27 | return;
|
25 | 28 | }
|
26 | 29 |
|
27 |
| - let alignment = arg.layout.align.abi; |
28 |
| - let mut required_gpr = 1u64; // at least one per arg |
| 30 | + let size = arg.layout.size.bits(); |
| 31 | + let needed_align = arg.layout.align.abi.bits(); |
| 32 | + let mut must_use_stack = false; |
| 33 | + |
| 34 | + // Determine the number of GPRs needed to pass the current argument |
| 35 | + // according to the ABI. 2*XLen-aligned varargs are passed in "aligned" |
| 36 | + // register pairs, so may consume 3 registers. |
| 37 | + let mut needed_arg_gprs = 1u64; |
29 | 38 |
|
30 |
| - if alignment.bits() == 2 * xlen { |
31 |
| - required_gpr = 2 + (*remaining_gpr % 2); |
32 |
| - } else if arg_size.bits() > xlen && arg_size.bits() <= MAX_ARG_IN_REGS_SIZE { |
33 |
| - required_gpr = (arg_size.bits() + (xlen - 1)) / xlen; |
| 39 | + if !fixed && needed_align == 2 * xlen { |
| 40 | + needed_arg_gprs = 2 + (*arg_gprs_left % 2); |
| 41 | + } else if size > xlen && size <= MAX_ARG_IN_REGS_SIZE { |
| 42 | + needed_arg_gprs = (size + xlen - 1) / xlen; |
34 | 43 | }
|
35 | 44 |
|
36 |
| - let mut stack_required = false; |
37 |
| - if required_gpr > *remaining_gpr { |
38 |
| - stack_required = true; |
39 |
| - required_gpr = *remaining_gpr; |
| 45 | + if needed_arg_gprs > *arg_gprs_left { |
| 46 | + must_use_stack = true; |
| 47 | + needed_arg_gprs = *arg_gprs_left; |
40 | 48 | }
|
41 |
| - *remaining_gpr -= required_gpr; |
| 49 | + *arg_gprs_left -= needed_arg_gprs; |
42 | 50 |
|
43 |
| - // if a value can fit in a reg and the |
44 |
| - // stack is not required, extend |
45 |
| - if !arg.layout.is_aggregate() { |
46 |
| - // non-aggregate types |
47 |
| - if arg_size.bits() < xlen && !stack_required { |
| 51 | + if !arg.layout.is_aggregate() && !matches!(arg.layout.abi, Abi::Vector { .. }) { |
| 52 | + // All integral types are promoted to `xlen` |
| 53 | + // width, unless passed on the stack. |
| 54 | + if size < xlen && !must_use_stack { |
48 | 55 | arg.extend_integer_width_to(xlen);
|
| 56 | + return; |
49 | 57 | }
|
50 |
| - } else if arg_size.bits() as u64 <= MAX_ARG_IN_REGS_SIZE { |
51 |
| - // aggregate types |
52 |
| - // Aggregates which are <= 4*32 will be passed in registers if possible, |
53 |
| - // so coerce to integers. |
54 | 58 |
|
55 |
| - // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is |
56 |
| - // required, and a 2-element XLen array if only XLen alignment is |
| 59 | + return; |
| 60 | + } |
| 61 | + |
| 62 | + // Aggregates which are <= 4 * 32 will be passed in |
| 63 | + // registers if possible, so coerce to integers. |
| 64 | + if size as u64 <= MAX_ARG_IN_REGS_SIZE { |
| 65 | + let alignment = arg.layout.align.abi.bits(); |
| 66 | + |
| 67 | + // Use a single `xlen` int if possible, 2 * `xlen` if 2 * `xlen` alignment |
| 68 | + // is required, and a 2-element `xlen` array if only `xlen` alignment is |
57 | 69 | // required.
|
58 |
| - // if alignment == 2 * xlen { |
59 |
| - // arg.extend_integer_width_to(xlen * 2); |
60 |
| - // } else { |
61 |
| - // arg.extend_integer_width_to(arg_size + (xlen - 1) / xlen); |
62 |
| - // } |
63 |
| - if alignment.bits() == 2 * xlen { |
64 |
| - arg.cast_to(Uniform { unit: Reg::i64(), total: arg_size }); |
| 70 | + if size <= xlen { |
| 71 | + arg.cast_to(Reg::i32()); |
| 72 | + return; |
| 73 | + } else if alignment == 2 * xlen { |
| 74 | + arg.cast_to(Reg::i64()); |
| 75 | + return; |
65 | 76 | } else {
|
66 |
| - //FIXME array type - this should be a homogenous array type |
67 |
| - // arg.extend_integer_width_to(arg_size + (xlen - 1) / xlen); |
| 77 | + let total = Size::from_bits(((size + xlen - 1) / xlen) * xlen); |
| 78 | + arg.cast_to(Uniform { unit: Reg::i32(), total }); |
| 79 | + return; |
68 | 80 | }
|
69 |
| - } else { |
70 |
| - // if we get here the stack is required |
71 |
| - assert!(stack_required); |
72 |
| - arg.make_indirect(); |
73 | 81 | }
|
74 | 82 |
|
75 |
| - // if arg_size as u64 <= MAX_ARG_IN_REGS_SIZE { |
76 |
| - // let align = arg.layout.align.abi.bytes(); |
77 |
| - // let total = arg.layout.size; |
78 |
| - // arg.cast_to(Uniform { |
79 |
| - // unit: if align <= 4 { Reg::i32() } else { Reg::i64() }, |
80 |
| - // total |
81 |
| - // }); |
82 |
| - // return; |
83 |
| - // } |
| 83 | + arg.make_indirect(); |
84 | 84 | }
|
85 | 85 |
|
86 |
| -pub fn compute_abi_info<Ty>(fabi: &mut FnAbi<'_, Ty>, xlen: u64) { |
87 |
| - if !fabi.ret.is_ignore() { |
88 |
| - classify_ret_ty(&mut fabi.ret, xlen); |
89 |
| - } |
| 86 | +pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>, xlen: u64) { |
| 87 | + classify_ret_ty(&mut fn_abi.ret, xlen); |
90 | 88 |
|
91 |
| - let return_indirect = |
92 |
| - fabi.ret.layout.size.bits() > MAX_RET_IN_REGS_SIZE || fabi.ret.is_indirect(); |
| 89 | + let is_ret_indirect = |
| 90 | + fn_abi.ret.is_indirect() || fn_abi.ret.layout.size.bits() > MAX_RET_IN_REGS_SIZE; |
93 | 91 |
|
94 |
| - let mut remaining_gpr = if return_indirect { NUM_ARG_GPR - 1 } else { NUM_ARG_GPR }; |
| 92 | + let mut arg_gprs_left = if is_ret_indirect { NUM_ARG_GPRS - 1 } else { NUM_ARG_GPRS }; |
95 | 93 |
|
96 |
| - for arg in &mut fabi.args { |
97 |
| - if arg.is_ignore() { |
98 |
| - continue; |
99 |
| - } |
100 |
| - classify_arg_ty(arg, xlen, &mut remaining_gpr); |
| 94 | + for arg in &mut fn_abi.args { |
| 95 | + let fixed = true; |
| 96 | + classify_arg_ty(arg, xlen, fixed, &mut arg_gprs_left); |
101 | 97 | }
|
102 | 98 | }
|
0 commit comments