101
101
(opcode) == POP_JUMP_IF_FALSE || \
102
102
(opcode) == POP_JUMP_IF_TRUE)
103
103
104
+ #define IS_JUMP_OPCODE (opcode ) \
105
+ (IS_VIRTUAL_JUMP_OPCODE(opcode) || \
106
+ is_bit_set_in_table(_PyOpcode_Jump, opcode))
107
+
104
108
/* opcodes which are not emitted in codegen stage, only by the assembler */
105
109
#define IS_ASSEMBLER_OPCODE (opcode ) \
106
110
((opcode) == JUMP_FORWARD || \
124
128
(opcode) == POP_JUMP_BACKWARD_IF_TRUE || \
125
129
(opcode) == POP_JUMP_BACKWARD_IF_FALSE)
126
130
131
+ #define IS_UNCONDITIONAL_JUMP_OPCODE (opcode ) \
132
+ ((opcode) == JUMP || \
133
+ (opcode) == JUMP_NO_INTERRUPT || \
134
+ (opcode) == JUMP_FORWARD || \
135
+ (opcode) == JUMP_BACKWARD || \
136
+ (opcode) == JUMP_BACKWARD_NO_INTERRUPT)
137
+
138
+ #define IS_SCOPE_EXIT_OPCODE (opcode ) \
139
+ ((opcode) == RETURN_VALUE || \
140
+ (opcode) == RAISE_VARARGS || \
141
+ (opcode) == RERAISE)
127
142
128
143
#define IS_TOP_LEVEL_AWAIT (c ) ( \
129
144
(c->c_flags->cf_flags & PyCF_ALLOW_TOP_LEVEL_AWAIT) \
@@ -142,11 +157,6 @@ struct instr {
142
157
int i_end_col_offset ;
143
158
};
144
159
145
- typedef struct excepthandler {
146
- struct instr * setup ;
147
- int offset ;
148
- } ExceptHandler ;
149
-
150
160
typedef struct exceptstack {
151
161
struct basicblock_ * handlers [CO_MAXBLOCKS + 1 ];
152
162
int depth ;
@@ -187,8 +197,7 @@ is_block_push(struct instr *instr)
187
197
static inline int
188
198
is_jump (struct instr * i )
189
199
{
190
- return IS_VIRTUAL_JUMP_OPCODE (i -> i_opcode ) ||
191
- is_bit_set_in_table (_PyOpcode_Jump , i -> i_opcode );
200
+ return IS_JUMP_OPCODE (i -> i_opcode );
192
201
}
193
202
194
203
static int
@@ -254,16 +263,10 @@ typedef struct basicblock_ {
254
263
int b_startdepth ;
255
264
/* instruction offset for block, computed by assemble_jump_offsets() */
256
265
int b_offset ;
257
- /* Basic block has no fall through (it ends with a return, raise or jump) */
258
- unsigned b_nofallthrough : 1 ;
259
266
/* Basic block is an exception handler that preserves lasti */
260
267
unsigned b_preserve_lasti : 1 ;
261
268
/* Used by compiler passes to mark whether they have visited a basic block. */
262
269
unsigned b_visited : 1 ;
263
- /* Basic block exits scope (it ends with a return or raise) */
264
- unsigned b_exit : 1 ;
265
- /* b_return is true if a RETURN_VALUE opcode is inserted. */
266
- unsigned b_return : 1 ;
267
270
/* b_cold is true if this block is not perf critical (like an exception handler) */
268
271
unsigned b_cold : 1 ;
269
272
/* b_warm is used by the cold-detection algorithm to mark blocks which are definitely not cold */
@@ -279,6 +282,29 @@ basicblock_last_instr(basicblock *b) {
279
282
return NULL ;
280
283
}
281
284
285
+ static inline int
286
+ basicblock_returns (basicblock * b ) {
287
+ struct instr * last = basicblock_last_instr (b );
288
+ return last && last -> i_opcode == RETURN_VALUE ;
289
+ }
290
+
291
+ static inline int
292
+ basicblock_exits_scope (basicblock * b ) {
293
+ struct instr * last = basicblock_last_instr (b );
294
+ return last && IS_SCOPE_EXIT_OPCODE (last -> i_opcode );
295
+ }
296
+
297
+ static inline int
298
+ basicblock_nofallthrough (basicblock * b ) {
299
+ struct instr * last = basicblock_last_instr (b );
300
+ return (last &&
301
+ (IS_SCOPE_EXIT_OPCODE (last -> i_opcode ) ||
302
+ IS_UNCONDITIONAL_JUMP_OPCODE (last -> i_opcode )));
303
+ }
304
+
305
+ #define BB_NO_FALLTHROUGH (B ) (basicblock_nofallthrough(B))
306
+ #define BB_HAS_FALLTHROUGH (B ) (!basicblock_nofallthrough(B))
307
+
282
308
/* fblockinfo tracks the current frame block.
283
309
284
310
A frame block is used to handle loops, try/except, and try/finally.
@@ -852,7 +878,7 @@ compiler_copy_block(struct compiler *c, basicblock *block)
852
878
/* Cannot copy a block if it has a fallthrough, since
853
879
* a block can only have one fallthrough predecessor.
854
880
*/
855
- assert (block -> b_nofallthrough );
881
+ assert (BB_NO_FALLTHROUGH ( block ) );
856
882
basicblock * result = compiler_new_block (c );
857
883
if (result == NULL ) {
858
884
return NULL ;
@@ -864,8 +890,6 @@ compiler_copy_block(struct compiler *c, basicblock *block)
864
890
}
865
891
result -> b_instr [n ] = block -> b_instr [i ];
866
892
}
867
- result -> b_exit = block -> b_exit ;
868
- result -> b_nofallthrough = 1 ;
869
893
return result ;
870
894
}
871
895
@@ -1223,11 +1247,7 @@ static int
1223
1247
is_end_of_basic_block (struct instr * instr )
1224
1248
{
1225
1249
int opcode = instr -> i_opcode ;
1226
-
1227
- return is_jump (instr ) ||
1228
- opcode == RETURN_VALUE ||
1229
- opcode == RAISE_VARARGS ||
1230
- opcode == RERAISE ;
1250
+ return is_jump (instr ) || IS_SCOPE_EXIT_OPCODE (opcode );
1231
1251
}
1232
1252
1233
1253
static int
@@ -1263,9 +1283,6 @@ basicblock_addop_line(basicblock *b, int opcode, int line,
1263
1283
struct instr * i = & b -> b_instr [off ];
1264
1284
i -> i_opcode = opcode ;
1265
1285
i -> i_oparg = 0 ;
1266
- if (opcode == RETURN_VALUE ) {
1267
- b -> b_return = 1 ;
1268
- }
1269
1286
i -> i_lineno = line ;
1270
1287
i -> i_end_lineno = end_line ;
1271
1288
i -> i_col_offset = col_offset ;
@@ -7144,11 +7161,8 @@ stackdepth(struct compiler *c, basicblock *entry)
7144
7161
}
7145
7162
depth = new_depth ;
7146
7163
assert (!IS_ASSEMBLER_OPCODE (instr -> i_opcode ));
7147
- if (instr -> i_opcode == JUMP_NO_INTERRUPT ||
7148
- instr -> i_opcode == JUMP ||
7149
- instr -> i_opcode == RETURN_VALUE ||
7150
- instr -> i_opcode == RAISE_VARARGS ||
7151
- instr -> i_opcode == RERAISE )
7164
+ if (IS_UNCONDITIONAL_JUMP_OPCODE (instr -> i_opcode ) ||
7165
+ IS_SCOPE_EXIT_OPCODE (instr -> i_opcode ))
7152
7166
{
7153
7167
/* remaining code is dead */
7154
7168
next = NULL ;
@@ -7159,7 +7173,7 @@ stackdepth(struct compiler *c, basicblock *entry)
7159
7173
}
7160
7174
}
7161
7175
if (next != NULL ) {
7162
- assert (b -> b_nofallthrough == 0 );
7176
+ assert (BB_HAS_FALLTHROUGH ( b ) );
7163
7177
stackdepth_push (& sp , next , depth );
7164
7178
}
7165
7179
}
@@ -7314,7 +7328,7 @@ label_exception_targets(basicblock *entry) {
7314
7328
instr -> i_except = handler ;
7315
7329
assert (i == b -> b_iused - 1 );
7316
7330
if (!instr -> i_target -> b_visited ) {
7317
- if (b -> b_nofallthrough == 0 ) {
7331
+ if (BB_HAS_FALLTHROUGH ( b ) ) {
7318
7332
ExceptStack * copy = copy_except_stack (except_stack );
7319
7333
if (copy == NULL ) {
7320
7334
goto error ;
@@ -7334,7 +7348,7 @@ label_exception_targets(basicblock *entry) {
7334
7348
instr -> i_except = handler ;
7335
7349
}
7336
7350
}
7337
- if (b -> b_nofallthrough == 0 && !b -> b_next -> b_visited ) {
7351
+ if (BB_HAS_FALLTHROUGH ( b ) && !b -> b_next -> b_visited ) {
7338
7352
assert (except_stack != NULL );
7339
7353
b -> b_next -> b_exceptstack = except_stack ;
7340
7354
todo [0 ] = b -> b_next ;
@@ -7373,7 +7387,7 @@ mark_warm(basicblock *entry) {
7373
7387
assert (!b -> b_except_predecessors );
7374
7388
b -> b_warm = 1 ;
7375
7389
basicblock * next = b -> b_next ;
7376
- if (next && ! b -> b_nofallthrough && !next -> b_visited ) {
7390
+ if (next && BB_HAS_FALLTHROUGH ( b ) && !next -> b_visited ) {
7377
7391
* sp ++ = next ;
7378
7392
next -> b_visited = 1 ;
7379
7393
}
@@ -7417,7 +7431,7 @@ mark_cold(basicblock *entry) {
7417
7431
basicblock * b = * (-- sp );
7418
7432
b -> b_cold = 1 ;
7419
7433
basicblock * next = b -> b_next ;
7420
- if (next && ! b -> b_nofallthrough ) {
7434
+ if (next && BB_HAS_FALLTHROUGH ( b ) ) {
7421
7435
if (!next -> b_warm && !next -> b_visited ) {
7422
7436
* sp ++ = next ;
7423
7437
next -> b_visited = 1 ;
@@ -7452,15 +7466,14 @@ push_cold_blocks_to_end(struct compiler *c, basicblock *entry, int code_flags) {
7452
7466
/* If we have a cold block with fallthrough to a warm block, add */
7453
7467
/* an explicit jump instead of fallthrough */
7454
7468
for (basicblock * b = entry ; b != NULL ; b = b -> b_next ) {
7455
- if (b -> b_cold && ! b -> b_nofallthrough && b -> b_next && b -> b_next -> b_warm ) {
7469
+ if (b -> b_cold && BB_HAS_FALLTHROUGH ( b ) && b -> b_next && b -> b_next -> b_warm ) {
7456
7470
basicblock * explicit_jump = compiler_new_block (c );
7457
7471
if (explicit_jump == NULL ) {
7458
7472
return -1 ;
7459
7473
}
7460
7474
basicblock_add_jump (explicit_jump , JUMP , -1 , 0 , 0 , 0 , b -> b_next );
7461
7475
7462
7476
explicit_jump -> b_cold = 1 ;
7463
- explicit_jump -> b_nofallthrough = 1 ;
7464
7477
explicit_jump -> b_next = b -> b_next ;
7465
7478
b -> b_next = explicit_jump ;
7466
7479
}
@@ -7953,7 +7966,7 @@ scan_block_for_local(int target, basicblock *b, bool unsafe_to_start,
7953
7966
if (unsafe ) {
7954
7967
// unsafe at end of this block,
7955
7968
// so unsafe at start of next blocks
7956
- if (b -> b_next && ! b -> b_nofallthrough ) {
7969
+ if (b -> b_next && BB_HAS_FALLTHROUGH ( b ) ) {
7957
7970
MAYBE_PUSH (b -> b_next );
7958
7971
}
7959
7972
if (b -> b_iused > 0 ) {
@@ -8281,9 +8294,10 @@ dump_instr(struct instr *i)
8281
8294
static void
8282
8295
dump_basicblock (const basicblock * b )
8283
8296
{
8284
- const char * b_return = b -> b_return ? "return " : "" ;
8297
+ const char * b_return = basicblock_returns ( b ) ? "return " : "" ;
8285
8298
fprintf (stderr , "[%d %d %d %p] used: %d, depth: %d, offset: %d %s\n" ,
8286
- b -> b_cold , b -> b_warm , b -> b_nofallthrough , b , b -> b_iused , b -> b_startdepth , b -> b_offset , b_return );
8299
+ b -> b_cold , b -> b_warm , BB_NO_FALLTHROUGH (b ), b , b -> b_iused ,
8300
+ b -> b_startdepth , b -> b_offset , b_return );
8287
8301
if (b -> b_instr ) {
8288
8302
int i ;
8289
8303
for (i = 0 ; i < b -> b_iused ; i ++ ) {
@@ -8545,7 +8559,6 @@ remove_redundant_jumps(basicblock *entry) {
8545
8559
b_last_instr -> i_opcode == JUMP_NO_INTERRUPT ) {
8546
8560
if (b_last_instr -> i_target == b -> b_next ) {
8547
8561
assert (b -> b_next -> b_iused );
8548
- b -> b_nofallthrough = 0 ;
8549
8562
b_last_instr -> i_opcode = NOP ;
8550
8563
removed ++ ;
8551
8564
}
@@ -8572,7 +8585,7 @@ assemble(struct compiler *c, int addNone)
8572
8585
}
8573
8586
8574
8587
/* Make sure every block that falls off the end returns None. */
8575
- if (!c -> u -> u_curblock -> b_return ) {
8588
+ if (!basicblock_returns ( c -> u -> u_curblock ) ) {
8576
8589
UNSET_LOC (c );
8577
8590
if (addNone )
8578
8591
ADDOP_LOAD_CONST (c , Py_None );
@@ -9064,7 +9077,6 @@ optimize_basic_block(struct compiler *c, basicblock *bb, PyObject *consts)
9064
9077
jump_if_true = nextop == POP_JUMP_IF_TRUE ;
9065
9078
if (is_true == jump_if_true ) {
9066
9079
bb -> b_instr [i + 1 ].i_opcode = JUMP ;
9067
- bb -> b_nofallthrough = 1 ;
9068
9080
}
9069
9081
else {
9070
9082
bb -> b_instr [i + 1 ].i_opcode = NOP ;
@@ -9084,7 +9096,6 @@ optimize_basic_block(struct compiler *c, basicblock *bb, PyObject *consts)
9084
9096
jump_if_true = nextop == JUMP_IF_TRUE_OR_POP ;
9085
9097
if (is_true == jump_if_true ) {
9086
9098
bb -> b_instr [i + 1 ].i_opcode = JUMP ;
9087
- bb -> b_nofallthrough = 1 ;
9088
9099
}
9089
9100
else {
9090
9101
inst -> i_opcode = NOP ;
@@ -9273,7 +9284,7 @@ extend_block(basicblock *bb) {
9273
9284
last -> i_opcode != JUMP_BACKWARD ) {
9274
9285
return 0 ;
9275
9286
}
9276
- if (last -> i_target -> b_exit && last -> i_target -> b_iused <= MAX_COPY_SIZE ) {
9287
+ if (basicblock_exits_scope ( last -> i_target ) && last -> i_target -> b_iused <= MAX_COPY_SIZE ) {
9277
9288
basicblock * to_copy = last -> i_target ;
9278
9289
last -> i_opcode = NOP ;
9279
9290
for (int i = 0 ; i < to_copy -> b_iused ; i ++ ) {
@@ -9283,7 +9294,6 @@ extend_block(basicblock *bb) {
9283
9294
}
9284
9295
bb -> b_instr [index ] = to_copy -> b_instr [i ];
9285
9296
}
9286
- bb -> b_exit = 1 ;
9287
9297
}
9288
9298
return 0 ;
9289
9299
}
@@ -9341,34 +9351,21 @@ normalize_basic_block(basicblock *bb) {
9341
9351
/* Mark blocks as exit and/or nofallthrough.
9342
9352
Raise SystemError if CFG is malformed. */
9343
9353
for (int i = 0 ; i < bb -> b_iused ; i ++ ) {
9344
- assert (!IS_ASSEMBLER_OPCODE (bb -> b_instr [i ].i_opcode ));
9345
- switch (bb -> b_instr [i ].i_opcode ) {
9346
- case RETURN_VALUE :
9347
- case RAISE_VARARGS :
9348
- case RERAISE :
9349
- bb -> b_exit = 1 ;
9350
- bb -> b_nofallthrough = 1 ;
9351
- break ;
9352
- case JUMP :
9353
- case JUMP_NO_INTERRUPT :
9354
- bb -> b_nofallthrough = 1 ;
9355
- /* fall through */
9356
- case POP_JUMP_IF_NOT_NONE :
9357
- case POP_JUMP_IF_NONE :
9358
- case POP_JUMP_IF_FALSE :
9359
- case POP_JUMP_IF_TRUE :
9360
- case JUMP_IF_FALSE_OR_POP :
9361
- case JUMP_IF_TRUE_OR_POP :
9362
- case FOR_ITER :
9363
- if (i != bb -> b_iused - 1 ) {
9364
- PyErr_SetString (PyExc_SystemError , "malformed control flow graph." );
9365
- return -1 ;
9366
- }
9367
- /* Skip over empty basic blocks. */
9368
- while (bb -> b_instr [i ].i_target -> b_iused == 0 ) {
9369
- bb -> b_instr [i ].i_target = bb -> b_instr [i ].i_target -> b_next ;
9370
- }
9371
-
9354
+ int opcode = bb -> b_instr [i ].i_opcode ;
9355
+ assert (!IS_ASSEMBLER_OPCODE (opcode ));
9356
+ int is_jump = IS_JUMP_OPCODE (opcode );
9357
+ int is_exit = IS_SCOPE_EXIT_OPCODE (opcode );
9358
+ if (is_exit || is_jump ) {
9359
+ if (i != bb -> b_iused - 1 ) {
9360
+ PyErr_SetString (PyExc_SystemError , "malformed control flow graph." );
9361
+ return -1 ;
9362
+ }
9363
+ }
9364
+ if (is_jump ) {
9365
+ /* Skip over empty basic blocks. */
9366
+ while (bb -> b_instr [i ].i_target -> b_iused == 0 ) {
9367
+ bb -> b_instr [i ].i_target = bb -> b_instr [i ].i_target -> b_next ;
9368
+ }
9372
9369
}
9373
9370
}
9374
9371
return 0 ;
@@ -9386,7 +9383,7 @@ mark_reachable(struct assembler *a) {
9386
9383
while (sp > stack ) {
9387
9384
basicblock * b = * (-- sp );
9388
9385
b -> b_visited = 1 ;
9389
- if (b -> b_next && ! b -> b_nofallthrough ) {
9386
+ if (b -> b_next && BB_HAS_FALLTHROUGH ( b ) ) {
9390
9387
if (!b -> b_next -> b_visited ) {
9391
9388
assert (b -> b_next -> b_predecessors == 0 );
9392
9389
* sp ++ = b -> b_next ;
@@ -9475,7 +9472,7 @@ propagate_line_numbers(struct assembler *a) {
9475
9472
COPY_INSTR_LOC (b -> b_instr [i ], prev_instr );
9476
9473
}
9477
9474
}
9478
- if (! b -> b_nofallthrough && b -> b_next -> b_predecessors == 1 ) {
9475
+ if (BB_HAS_FALLTHROUGH ( b ) && b -> b_next -> b_predecessors == 1 ) {
9479
9476
assert (b -> b_next -> b_iused );
9480
9477
if (b -> b_next -> b_instr [0 ].i_lineno < 0 ) {
9481
9478
COPY_INSTR_LOC (prev_instr , b -> b_next -> b_instr [0 ]);
@@ -9523,7 +9520,6 @@ optimize_cfg(struct compiler *c, struct assembler *a, PyObject *consts)
9523
9520
for (basicblock * b = a -> a_entry ; b != NULL ; b = b -> b_next ) {
9524
9521
if (b -> b_predecessors == 0 ) {
9525
9522
b -> b_iused = 0 ;
9526
- b -> b_nofallthrough = 0 ;
9527
9523
}
9528
9524
}
9529
9525
eliminate_empty_basic_blocks (a -> a_entry );
@@ -9563,7 +9559,7 @@ trim_unused_consts(struct assembler *a, PyObject *consts)
9563
9559
9564
9560
static inline int
9565
9561
is_exit_without_lineno (basicblock * b ) {
9566
- if (!b -> b_exit ) {
9562
+ if (!basicblock_exits_scope ( b ) ) {
9567
9563
return 0 ;
9568
9564
}
9569
9565
for (int i = 0 ; i < b -> b_iused ; i ++ ) {
@@ -9614,7 +9610,7 @@ duplicate_exits_without_lineno(struct compiler *c)
9614
9610
/* Any remaining reachable exit blocks without line number can only be reached by
9615
9611
* fall through, and thus can only have a single predecessor */
9616
9612
for (basicblock * b = c -> u -> u_blocks ; b != NULL ; b = b -> b_list ) {
9617
- if (! b -> b_nofallthrough && b -> b_next && b -> b_iused > 0 ) {
9613
+ if (BB_HAS_FALLTHROUGH ( b ) && b -> b_next && b -> b_iused > 0 ) {
9618
9614
if (is_exit_without_lineno (b -> b_next )) {
9619
9615
assert (b -> b_next -> b_iused > 0 );
9620
9616
COPY_INSTR_LOC (b -> b_instr [b -> b_iused - 1 ], b -> b_next -> b_instr [0 ]);
0 commit comments