@@ -332,6 +332,21 @@ void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) {
332
332
}
333
333
}
334
334
335
+ void Assembler::emit_arith_ndd(int op1, int op2, Register dst, int32_t imm32) {
336
+ assert(isByte(op1) && isByte(op2), "wrong opcode");
337
+ assert(op1 == 0x81, "Unexpected opcode");
338
+ // This code cache friendly optimization saves 3 bytes per encoding, which offsets the EVEX encoding penalty.
339
+ if (is8bit(imm32)) {
340
+ emit_int24(op1 | 0x02, // set sign bit
341
+ op2 | encode(dst),
342
+ imm32 & 0xFF);
343
+ }
344
+ else {
345
+ emit_int16(op1, (op2 | encode(dst)));
346
+ emit_int32(imm32);
347
+ }
348
+ }
349
+
335
350
// Force generation of a 4 byte immediate value even if it fits into 8bit
336
351
void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) {
337
352
assert(isByte(op1) && isByte(op2), "wrong opcode");
@@ -1461,7 +1476,7 @@ void Assembler::addl(Register dst, int32_t imm32) {
1461
1476
void Assembler::eaddl(Register dst, Register src, int32_t imm32, bool no_flags) {
1462
1477
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1463
1478
(void) evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
1464
- emit_arith (0x81, 0xC0, src, imm32);
1479
+ emit_arith_ndd (0x81, 0xC0, src, imm32);
1465
1480
}
1466
1481
1467
1482
void Assembler::addl(Register dst, Address src) {
@@ -1695,7 +1710,7 @@ void Assembler::andl(Register dst, int32_t imm32) {
1695
1710
void Assembler::eandl(Register dst, Register src, int32_t imm32, bool no_flags) {
1696
1711
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1697
1712
(void) evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
1698
- emit_arith (0x81, 0xE0, src, imm32);
1713
+ emit_arith_ndd (0x81, 0xE0, src, imm32);
1699
1714
}
1700
1715
1701
1716
void Assembler::andl(Address dst, Register src) {
@@ -4532,7 +4547,7 @@ void Assembler::orl(Register dst, int32_t imm32) {
4532
4547
void Assembler::eorl(Register dst, Register src, int32_t imm32, bool no_flags) {
4533
4548
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4534
4549
evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
4535
- emit_arith (0x81, 0xC8, src, imm32);
4550
+ emit_arith_ndd (0x81, 0xC8, src, imm32);
4536
4551
}
4537
4552
4538
4553
void Assembler::orl(Register dst, Address src) {
@@ -7171,7 +7186,7 @@ void Assembler::subl(Register dst, int32_t imm32) {
7171
7186
void Assembler::esubl(Register dst, Register src, int32_t imm32, bool no_flags) {
7172
7187
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7173
7188
(void) evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
7174
- emit_arith (0x81, 0xE8, src, imm32);
7189
+ emit_arith_ndd (0x81, 0xE8, src, imm32);
7175
7190
}
7176
7191
7177
7192
// Force generation of a 4 byte immediate value even if it fits into 8bit
@@ -7512,7 +7527,7 @@ void Assembler::xorl(Register dst, int32_t imm32) {
7512
7527
void Assembler::exorl(Register dst, Register src, int32_t imm32, bool no_flags) {
7513
7528
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
7514
7529
evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
7515
- emit_arith (0x81, 0xF0, src, imm32);
7530
+ emit_arith_ndd (0x81, 0xF0, src, imm32);
7516
7531
}
7517
7532
7518
7533
void Assembler::xorl(Register dst, Address src) {
@@ -15158,7 +15173,7 @@ void Assembler::addq(Register dst, int32_t imm32) {
15158
15173
void Assembler::eaddq(Register dst, Register src, int32_t imm32, bool no_flags) {
15159
15174
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15160
15175
(void) evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15161
- emit_arith (0x81, 0xC0, src, imm32);
15176
+ emit_arith_ndd (0x81, 0xC0, src, imm32);
15162
15177
}
15163
15178
15164
15179
void Assembler::addq(Register dst, Address src) {
@@ -15255,7 +15270,7 @@ void Assembler::andq(Register dst, int32_t imm32) {
15255
15270
void Assembler::eandq(Register dst, Register src, int32_t imm32, bool no_flags) {
15256
15271
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
15257
15272
evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
15258
- emit_arith (0x81, 0xE0, src, imm32);
15273
+ emit_arith_ndd (0x81, 0xE0, src, imm32);
15259
15274
}
15260
15275
15261
15276
void Assembler::andq(Register dst, Address src) {
@@ -16142,7 +16157,7 @@ void Assembler::orq(Register dst, int32_t imm32) {
16142
16157
void Assembler::eorq(Register dst, Register src, int32_t imm32, bool no_flags) {
16143
16158
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16144
16159
evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
16145
- emit_arith (0x81, 0xC8, src, imm32);
16160
+ emit_arith_ndd (0x81, 0xC8, src, imm32);
16146
16161
}
16147
16162
16148
16163
void Assembler::orq_imm32(Register dst, int32_t imm32) {
@@ -16830,7 +16845,7 @@ void Assembler::subq(Register dst, int32_t imm32) {
16830
16845
void Assembler::esubq(Register dst, Register src, int32_t imm32, bool no_flags) {
16831
16846
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16832
16847
(void) evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
16833
- emit_arith (0x81, 0xE8, src, imm32);
16848
+ emit_arith_ndd (0x81, 0xE8, src, imm32);
16834
16849
}
16835
16850
16836
16851
// Force generation of a 4 byte immediate value even if it fits into 8bit
@@ -16961,7 +16976,7 @@ void Assembler::xorq(Register dst, int32_t imm32) {
16961
16976
void Assembler::exorq(Register dst, Register src, int32_t imm32, bool no_flags) {
16962
16977
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
16963
16978
evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags);
16964
- emit_arith (0x81, 0xF0, src, imm32);
16979
+ emit_arith_ndd (0x81, 0xF0, src, imm32);
16965
16980
}
16966
16981
16967
16982
void Assembler::xorq(Address dst, int32_t imm32) {
0 commit comments