From 6a4e239773ddd1d507877d78b4388a591b9b1af5 Mon Sep 17 00:00:00 2001 From: Dmitry Stogov Date: Tue, 29 Nov 2022 20:02:07 +0300 Subject: [PATCH] Create a sparate pass to remove unreachableble CFG blocks. SCCP pass removes unreachable blocks before CFG construction. In case of -O0 or -O1 pipeline (without SCCP) it's simpler and faster to unlink unreachable CFG blocks once, then check for reachability in almost any compilation pass. -O2 pipeline (with SCCP) don't need this pass. --- ir.h | 1 + ir_aarch64.dasc | 23 +++-- ir_cfg.c | 241 ++++++++++++++++++++++++++++++++++++++++++++++-- ir_emit.c | 4 +- ir_emit_c.c | 8 +- ir_gcm.c | 13 +-- ir_main.c | 3 + ir_private.h | 16 ++++ ir_ra.c | 21 ++--- ir_sccp.c | 12 +-- ir_test.c | 3 + ir_x86.dasc | 22 +++-- 12 files changed, 301 insertions(+), 66 deletions(-) diff --git a/ir.h b/ir.h index 1410a73..5c82c56 100644 --- a/ir.h +++ b/ir.h @@ -579,6 +579,7 @@ void ir_build_def_use_lists(ir_ctx *ctx); /* CFG - Control Flow Graph (implementation in ir_cfg.c) */ int ir_build_cfg(ir_ctx *ctx); +int ir_remove_unreachable_blocks(ir_ctx *ctx); int ir_build_dominators_tree(ir_ctx *ctx); int ir_find_loops(ir_ctx *ctx); int ir_schedule_blocks(ir_ctx *ctx); diff --git a/ir_aarch64.dasc b/ir_aarch64.dasc index 667d1c5..41f23c0 100644 --- a/ir_aarch64.dasc +++ b/ir_aarch64.dasc @@ -2331,6 +2331,21 @@ static void ir_emit_if_int(ir_ctx *ctx, uint32_t b, ir_ref def, ir_insn *insn) ir_backend_data *data = ctx->data; dasm_State **Dst = &data->dasm_state; + if (IR_IS_CONST_REF(insn->op2)) { + uint32_t true_block, false_block, next_block; + + ir_get_true_false_blocks(ctx, b, &true_block, &false_block, &next_block); + if (ir_const_is_true(&ctx->ir_base[insn->op2])) { + if (true_block != next_block) { + | b =>true_block + } + } else { + if (false_block != next_block) { + | b =>false_block + } + } + return; + } IR_ASSERT(op2_reg != IR_REG_NONE); if ((op2_reg & IR_REG_SPILL_LOAD) || IR_IS_CONST_REF(insn->op2)) { op2_reg &= ~IR_REG_SPILL_LOAD; @@ -4413,9 +4428,7 @@ static void ir_allocate_unique_spill_slots(ir_ctx *ctx) ctx->live_intervals = ir_mem_calloc(ctx->vregs_count + 1 + IR_REG_NUM + 1, sizeof(ir_live_interval*)); for (b = 1, bb = ctx->cfg_blocks + b; b <= ctx->cfg_blocks_count; b++, bb++) { - if (bb->flags & IR_BB_UNREACHABLE) { - continue; - } + IR_ASSERT(!(bb->flags & IR_BB_UNREACHABLE)); for (i = bb->start, insn = ctx->ir_base + i, rule = ctx->rules + i; i <= bb->end;) { switch (ctx->rules ? *rule : insn->op) { case IR_START: @@ -4774,9 +4787,7 @@ void *ir_emit_code(ir_ctx *ctx, size_t *size_ptr) } for (b = 1, bb = ctx->cfg_blocks + b; b <= ctx->cfg_blocks_count; b++, bb++) { - if (bb->flags & IR_BB_UNREACHABLE) { - continue; - } + IR_ASSERT(!(bb->flags & IR_BB_UNREACHABLE)); if (ctx->prev_ref[bb->end] == bb->start && bb->successors_count == 1 && !(bb->flags & (IR_BB_START|IR_BB_ENTRY|IR_BB_DESSA_MOVES))) { diff --git a/ir_cfg.c b/ir_cfg.c index bbf67d3..bf70e46 100644 --- a/ir_cfg.c +++ b/ir_cfg.c @@ -88,8 +88,9 @@ IR_ALWAYS_INLINE void _ir_add_predecessors(ir_insn *insn, ir_worklist *worklist) ir_worklist_push(worklist, ref); } } else if (insn->op != IR_START && insn->op != IR_ENTRY) { - IR_ASSERT(insn->op1); - ir_worklist_push(worklist, insn->op1); + if (EXPECTED(insn->op1)) { + ir_worklist_push(worklist, insn->op1); + } } } @@ -269,10 +270,12 @@ next_successor: bb->predecessors_count = n; edges_count += n; count += n; - } else { + } else if (EXPECTED(insn->op1)) { bb->predecessors_count = 1; edges_count++; count++; + } else { + bb->predecessors_count = 0; } } b++; @@ -335,6 +338,230 @@ next_successor: return 1; } +static void ir_remove_predecessor(ir_ctx *ctx, ir_block *bb, uint32_t from) +{ + uint32_t i, *p, *q, n = 0; + + p = q = &ctx->cfg_edges[bb->predecessors]; + for (i = 0; i < bb->predecessors_count; i++, p++) { + if (*p != from) { + if (p != q) { + *q = *p; + } + q++; + n++; + } + } + IR_ASSERT(n != bb->predecessors_count); + bb->predecessors_count = n; +} + +static void ir_remove_from_use_list(ir_ctx *ctx, ir_ref from, ir_ref ref) +{ + ir_ref j, n, *p, *q, use; + ir_use_list *use_list = &ctx->use_lists[from]; + ir_ref skip = 0; + + n = use_list->count; + for (j = 0, p = q = &ctx->use_edges[use_list->refs]; j < n; j++, p++) { + use = *p; + if (use == ref) { + skip++; + } else { + if (p != q) { + *q = use; + } + q++; + } + } + use_list->count -= skip; +} + +static void ir_remove_merge_input(ir_ctx *ctx, ir_ref merge, ir_ref from) +{ + ir_ref i, j, n, k, *p, use; + ir_insn *use_insn; + ir_use_list *use_list; + ir_bitset life_inputs; + ir_insn *insn = &ctx->ir_base[merge]; + + IR_ASSERT(insn->op == IR_MERGE || insn->op == IR_LOOP_BEGIN); + n = insn->inputs_count; + if (n == 0) { + n = 3; + } + i = 1; + life_inputs = ir_bitset_malloc(n + 1); + for (j = 1; j <= n; j++) { + ir_ref input = ir_insn_op(insn, j); + + if (input != from) { + if (i != j) { + ir_insn_set_op(insn, i, input); + } + ir_bitset_incl(life_inputs, j); + i++; + } + } + i--; + if (i == 1) { + insn->op = IR_BEGIN; + insn->inputs_count = 0; + use_list = &ctx->use_lists[merge]; + for (k = 0, p = &ctx->use_edges[use_list->refs]; k < use_list->count; k++, p++) { + use = *p; + use_insn = &ctx->ir_base[use]; + if (use_insn->op == IR_PHI) { + /* Convert PHI to COPY */ + i = 2; + for (j = 2; j <= n; j++) { + ir_ref input = ir_insn_op(use_insn, j); + + if (ir_bitset_in(life_inputs, j - 1)) { + use_insn->op1 = ir_insn_op(use_insn, j); + } else if (input > 0) { + ir_remove_from_use_list(ctx, input, use); + } + } + use_insn->op = IR_COPY; + use_insn->op2 = IR_UNUSED; + use_insn->op3 = IR_UNUSED; + ir_remove_from_use_list(ctx, merge, use); + } + } + } else { + if (i == 2) { + i = 0; + } + insn->inputs_count = i; + + n++; + use_list = &ctx->use_lists[merge]; + for (k = 0, p = &ctx->use_edges[use_list->refs]; k < use_list->count; k++, p++) { + use = *p; + use_insn = &ctx->ir_base[use]; + if (use_insn->op == IR_PHI) { + i = 2; + for (j = 2; j <= n; j++) { + ir_ref input = ir_insn_op(use_insn, j); + + if (ir_bitset_in(life_inputs, j - 1)) { + IR_ASSERT(input); + if (i != j) { + ir_insn_set_op(use_insn, i, input); + } + i++; + } else if (input > 0) { + ir_remove_from_use_list(ctx, input, use); + } + } + } + } + } + ir_mem_free(life_inputs); + ir_remove_from_use_list(ctx, from, merge); +} + +/* CFG constructed after SCCP pass doesn't have unreachable BBs, otherwise they should be removed */ +int ir_remove_unreachable_blocks(ir_ctx *ctx) +{ + uint32_t b, *p, i; + uint32_t unreachable_count = 0; + uint32_t bb_count = ctx->cfg_blocks_count; + ir_block *bb = ctx->cfg_blocks + 1; + + for (b = 1; b <= bb_count; b++, bb++) { + if (bb->flags & IR_BB_UNREACHABLE) { +#if 0 + do {if (!unreachable_count) ir_dump_cfg(ctx, stderr);} while(0); +#endif + if (bb->successors_count) { + for (i = 0, p = &ctx->cfg_edges[bb->successors]; i < bb->successors_count; i++, p++) { + ir_block *succ_bb = &ctx->cfg_blocks[*p]; + + if (!(succ_bb->flags & IR_BB_UNREACHABLE)) { + ir_remove_predecessor(ctx, succ_bb, b); + ir_remove_merge_input(ctx, succ_bb->start, bb->end); + } + } + } else { + ir_ref prev, ref = bb->end; + ir_insn *insn = &ctx->ir_base[ref]; + + IR_ASSERT(ir_op_flags[insn->op] & IR_OP_FLAG_TERMINATOR); + /* remove from terminators list */ + prev = ctx->ir_base[1].op1; + if (prev == ref) { + ctx->ir_base[1].op1 = insn->op3; + } else { + while (prev) { + if (ctx->ir_base[prev].op3 == ref) { + ctx->ir_base[prev].op3 = insn->op3; + break; + } + prev = ctx->ir_base[prev].op3; + } + } + } + ctx->cfg_map[bb->start] = 0; + ctx->cfg_map[bb->end] = 0; + unreachable_count++; + } + } + + if (unreachable_count) { + ir_block *dst_bb; + uint32_t n = 1; + uint32_t *edges; + + dst_bb = bb = ctx->cfg_blocks + 1; + for (b = 1; b <= bb_count; b++, bb++) { + if (!(bb->flags & IR_BB_UNREACHABLE)) { + if (dst_bb != bb) { + memcpy(dst_bb, bb, sizeof(ir_block)); + ctx->cfg_map[dst_bb->start] = n; + ctx->cfg_map[dst_bb->end] = n; + } + dst_bb->successors_count = 0; + dst_bb++; + n++; + } + } + ctx->cfg_blocks_count = bb_count = n - 1; + + /* Rebuild successor/predecessors control edges */ + edges = ctx->cfg_edges; + bb = ctx->cfg_blocks + 1; + for (b = 1; b <= bb_count; b++, bb++) { + ir_insn *insn = &ctx->ir_base[bb->start]; + ir_ref *p, ref; + + if (bb->predecessors_count > 1) { + uint32_t *q = edges + bb->predecessors; + n = ir_variable_inputs_count(insn); + for (p = insn->ops + 1; n > 0; p++, q++, n--) { + ref = *p; + IR_ASSERT(ref); + ir_ref pred_b = ctx->cfg_map[ref]; + ir_block *pred_bb = &ctx->cfg_blocks[pred_b]; + *q = pred_b; + edges[pred_bb->successors + pred_bb->successors_count++] = b; + } + } else if (bb->predecessors_count == 1) { + ref = insn->op1; + IR_ASSERT(ref); + IR_ASSERT(IR_OPND_KIND(ir_op_flags[insn->op], 1) == IR_OPND_CONTROL); + ir_ref pred_b = ctx->cfg_map[ref]; + ir_block *pred_bb = &ctx->cfg_blocks[pred_b]; + edges[bb->predecessors] = pred_b; + edges[pred_bb->successors + pred_bb->successors_count++] = b; + } + } + } + + return 1; +} + static void compute_postnum(const ir_ctx *ctx, uint32_t *cur, uint32_t b) { uint32_t i, *p; @@ -387,9 +614,7 @@ int ir_build_dominators_tree(ir_ctx *ctx) changed = 0; /* Iterating in Reverse Post Oorder */ for (b = 2, bb = &blocks[2]; b <= blocks_count; b++, bb++) { - if (bb->flags & IR_BB_UNREACHABLE) { - continue; - } + IR_ASSERT(!(bb->flags & IR_BB_UNREACHABLE)); if (bb->predecessors_count == 1) { uint32_t idom = 0; uint32_t pred_b = edges[bb->predecessors]; @@ -443,9 +668,7 @@ int ir_build_dominators_tree(ir_ctx *ctx) /* Construct dominators tree */ for (b = 2, bb = &blocks[2]; b <= blocks_count; b++, bb++) { - if (bb->flags & IR_BB_UNREACHABLE) { - continue; - } + IR_ASSERT(!(bb->flags & IR_BB_UNREACHABLE)); if (bb->flags & IR_BB_ENTRY) { bb->idom = 0; bb->dom_depth = 0; diff --git a/ir_emit.c b/ir_emit.c index 450ca9a..5d14e76 100644 --- a/ir_emit.c +++ b/ir_emit.c @@ -273,9 +273,7 @@ int ir_match(ir_ctx *ctx) ctx->rules = ir_mem_calloc(ctx->insns_count, sizeof(uint32_t)); for (b = ctx->cfg_blocks_count, bb = ctx->cfg_blocks + b; b > 0; b--, bb--) { - if (bb->flags & IR_BB_UNREACHABLE) { - continue; - } + IR_ASSERT(!(bb->flags & IR_BB_UNREACHABLE)); for (i = bb->end; i > bb->start; i = ctx->prev_ref[i]) { if (!ctx->rules[i]) { ctx->rules[i] = ir_match_insn(ctx, i, bb); diff --git a/ir_emit_c.c b/ir_emit_c.c index f7e1e51..115a685 100644 --- a/ir_emit_c.c +++ b/ir_emit_c.c @@ -711,9 +711,7 @@ static int ir_emit_func(ir_ctx *ctx, FILE *f) /* Emit declarations for local variables */ vars = ir_bitset_malloc(ctx->vregs_count + 1); for (b = 1, bb = ctx->cfg_blocks + b; b <= ctx->cfg_blocks_count; b++, bb++) { - if (bb->flags & IR_BB_UNREACHABLE) { - continue; - } + IR_ASSERT(!(bb->flags & IR_BB_UNREACHABLE)); for (i = bb->start, insn = ctx->ir_base + i; i <= bb->end;) { if (ctx->vregs[i]) { if (!ir_bitset_in(vars, ctx->vregs[i])) { @@ -751,9 +749,7 @@ static int ir_emit_func(ir_ctx *ctx, FILE *f) ir_mem_free(vars); for (b = 1, bb = ctx->cfg_blocks + b; b <= ctx->cfg_blocks_count; b++, bb++) { - if (bb->flags & IR_BB_UNREACHABLE) { - continue; - } + IR_ASSERT(!(bb->flags & IR_BB_UNREACHABLE)); if (ctx->prev_ref[bb->end] == bb->start && bb->successors_count == 1 && !(bb->flags & (IR_BB_START|IR_BB_ENTRY|IR_BB_DESSA_MOVES))) { diff --git a/ir_gcm.c b/ir_gcm.c index 04bfae1..809b1e3 100644 --- a/ir_gcm.c +++ b/ir_gcm.c @@ -96,6 +96,7 @@ static void ir_gcm_schedule_late(ir_ctx *ctx, uint32_t *_blocks, ir_bitset visit q++; } b = _blocks[*q]; + IR_ASSERT(b); } lca = !lca ? b : ir_gcm_find_lca(ctx, lca, b); } @@ -191,9 +192,7 @@ int ir_gcm(ir_ctx *ctx) /* pin and collect control and control depended (PARAM, VAR, PHI, PI) instructions */ b = ctx->cfg_blocks_count; for (bb = ctx->cfg_blocks + b; b > 0; bb--, b--) { - if (bb->flags & IR_BB_UNREACHABLE) { - continue; - } + IR_ASSERT(!(bb->flags & IR_BB_UNREACHABLE)); ref = bb->end; do { insn = &ctx->ir_base[ref]; @@ -450,9 +449,7 @@ int ir_schedule(ir_ctx *ctx) scheduled = ir_bitset_malloc(ctx->insns_count); used = ir_bitset_malloc(ctx->consts_count + 1); for (b = 1, bb = ctx->cfg_blocks + 1; b <= ctx->cfg_blocks_count; b++, bb++) { - if (bb->flags & IR_BB_UNREACHABLE) { - continue; - } + IR_ASSERT(!(bb->flags & IR_BB_UNREACHABLE)); /* Schedule BB start */ i = bb->start; ir_bitset_incl(scheduled, i); @@ -755,9 +752,7 @@ void ir_build_prev_refs(ir_ctx *ctx) ctx->prev_ref = ir_mem_malloc(ctx->insns_count * sizeof(ir_ref)); prev = 0; for (b = 1, bb = ctx->cfg_blocks + b; b <= ctx->cfg_blocks_count; b++, bb++) { - if (bb->flags & IR_BB_UNREACHABLE) { - continue; - } + IR_ASSERT(!(bb->flags & IR_BB_UNREACHABLE)); for (i = bb->start, insn = ctx->ir_base + i; i < bb->end;) { ctx->prev_ref[i] = prev; n = ir_operands_count(ctx, insn); diff --git a/ir_main.c b/ir_main.c index 007eb94..d2c923b 100644 --- a/ir_main.c +++ b/ir_main.c @@ -154,6 +154,9 @@ int ir_compile_func(ir_ctx *ctx, int opt_level, uint32_t dump, const char *dump_ /* Schedule */ if (opt_level > 0) { ir_build_cfg(ctx); + if (opt_level == 1) { + ir_remove_unreachable_blocks(ctx); + } ir_build_dominators_tree(ctx); ir_find_loops(ctx); ir_gcm(ctx); diff --git a/ir_private.h b/ir_private.h index 30641b5..79437b9 100644 --- a/ir_private.h +++ b/ir_private.h @@ -680,6 +680,22 @@ extern const char *ir_op_name[IR_LAST_OP]; #define IR_IS_CONST_OP(op) ((op) > IR_NOP && (op) <= IR_C_FLOAT) #define IR_IS_FOLDABLE_OP(op) ((op) <= IR_LAST_FOLDABLE_OP) +IR_ALWAYS_INLINE bool ir_const_is_true(ir_insn *v) +{ + + if (v->type == IR_BOOL) { + return v->val.b; + } else if (IR_IS_TYPE_INT(v->type)) { + return v->val.i64 != 0; + } else if (v->type == IR_DOUBLE) { + return v->val.d != 0.0; + } else if (v->type == IR_FLOAT) { + return v->val.f != 0.0; + } + IR_ASSERT(0 && "NYI"); + return 0; +} + /* IR OP flags */ #define IR_OP_FLAG_OPERANDS_SHIFT 3 diff --git a/ir_ra.c b/ir_ra.c index 5c59440..b2ebfe1 100644 --- a/ir_ra.c +++ b/ir_ra.c @@ -55,9 +55,7 @@ int ir_assign_virtual_registers(ir_ctx *ctx) vregs = ir_mem_calloc(ctx->insns_count, sizeof(ir_ref)); n = 1; for (b = 1, bb = ctx->cfg_blocks + b; b <= ctx->cfg_blocks_count; b++, bb++) { - if (bb->flags & IR_BB_UNREACHABLE) { - continue; - } + IR_ASSERT(!(bb->flags & IR_BB_UNREACHABLE)); i = bb->start; /* skip first instruction */ @@ -369,9 +367,7 @@ int ir_compute_live_ranges(ir_ctx *ctx) ctx->live_intervals = ir_mem_calloc(ctx->vregs_count + 1 + IR_REG_NUM + 1, sizeof(ir_live_interval*)); for (b = ctx->cfg_blocks_count; b > 0; b--) { bb = &ctx->cfg_blocks[b]; - if (bb->flags & IR_BB_UNREACHABLE) { - continue; - } + IR_ASSERT(!(bb->flags & IR_BB_UNREACHABLE)); /* for each successor of b */ #ifdef IR_DEBUG ir_bitset_incl(visited, b); @@ -1008,9 +1004,7 @@ int ir_coalesce(ir_ctx *ctx) /* Collect a list of blocks which are predecossors to block with phi finctions */ ir_worklist_init(&blocks, ctx->cfg_blocks_count + 1); for (b = 1, bb = &ctx->cfg_blocks[1]; b <= ctx->cfg_blocks_count; b++, bb++) { - if (bb->flags & IR_BB_UNREACHABLE) { - continue; - } + IR_ASSERT(!(bb->flags & IR_BB_UNREACHABLE)); if (bb->predecessors_count > 1) { uint32_t i; @@ -1073,6 +1067,7 @@ int ir_coalesce(ir_ctx *ctx) for (b = 1, bb = &ctx->cfg_blocks[1]; b <= ctx->cfg_blocks_count; b++, bb++) { ir_ref i; + IR_ASSERT(!(bb->flags & IR_BB_UNREACHABLE)); i = bb->end; /* skip last instruction */ @@ -1141,9 +1136,7 @@ int ir_compute_dessa_moves(ir_ctx *ctx) ir_insn *insn; for (b = 1, bb = &ctx->cfg_blocks[1]; b <= ctx->cfg_blocks_count; b++, bb++) { - if (bb->flags & IR_BB_UNREACHABLE) { - continue; - } + IR_ASSERT(!(bb->flags & IR_BB_UNREACHABLE)); if (bb->predecessors_count > 1) { use_list = &ctx->use_lists[bb->start]; n = use_list->count; @@ -2278,9 +2271,7 @@ static int ir_linear_scan(ir_ctx *ctx) if (ctx->flags & IR_LR_HAVE_DESSA_MOVES) { /* Add fixed intervals for temporary registers used for DESSA moves */ for (b = 1, bb = &ctx->cfg_blocks[1]; b <= ctx->cfg_blocks_count; b++, bb++) { - if (bb->flags & IR_BB_UNREACHABLE) { - continue; - } + IR_ASSERT(!(bb->flags & IR_BB_UNREACHABLE)); if (bb->flags & IR_BB_DESSA_MOVES) { ctx->data = bb; ir_gen_dessa_moves(ctx, b, ir_fix_dessa_tmps); diff --git a/ir_sccp.c b/ir_sccp.c index ad2f6ef..71da70f 100644 --- a/ir_sccp.c +++ b/ir_sccp.c @@ -136,17 +136,7 @@ static bool ir_sccp_is_true(ir_ctx *ctx, ir_insn *_values, ir_ref a) { ir_insn *v = IR_IS_CONST_REF(a) ? &ctx->ir_base[a] : &_values[a]; - if (v->type == IR_BOOL) { - return v->val.b; - } else if (IR_IS_TYPE_INT(v->type)) { - return v->val.i64 != 0; - } else if (v->type == IR_DOUBLE) { - return v->val.d != 0.0; - } else if (v->type == IR_FLOAT) { - return v->val.f != 0.0; - } - IR_ASSERT(0 && "NYI"); - return 0; + return ir_const_is_true(v); } static bool ir_sccp_is_equal(ir_ctx *ctx, ir_insn *_values, ir_ref a, ir_ref b) diff --git a/ir_test.c b/ir_test.c index 00c4063..ed55f92 100644 --- a/ir_test.c +++ b/ir_test.c @@ -187,6 +187,9 @@ int main(int argc, char **argv) ir_sccp(&ctx); } ir_build_cfg(&ctx); + if (opt_level <= 1) { + ir_remove_unreachable_blocks(&ctx); + } if (opt_level > 0) { ir_build_dominators_tree(&ctx); ir_find_loops(&ctx); diff --git a/ir_x86.dasc b/ir_x86.dasc index 1cea2ee..5f89d63 100644 --- a/ir_x86.dasc +++ b/ir_x86.dasc @@ -3992,7 +3992,19 @@ static void ir_emit_if_int(ir_ctx *ctx, uint32_t b, ir_ref def, ir_insn *insn) } | ASM_REG_REG_OP test, type, op2_reg, op2_reg } else if (IR_IS_CONST_REF(insn->op2)) { - IR_ASSERT(0); + uint32_t true_block, false_block, next_block; + + ir_get_true_false_blocks(ctx, b, &true_block, &false_block, &next_block); + if (ir_const_is_true(&ctx->ir_base[insn->op2])) { + if (true_block != next_block) { + | jmp =>true_block + } + } else { + if (false_block != next_block) { + | jmp =>false_block + } + } + return; } else { int32_t offset = 0; @@ -6885,9 +6897,7 @@ static void ir_allocate_unique_spill_slots(ir_ctx *ctx) ctx->live_intervals = ir_mem_calloc(ctx->vregs_count + 1 + IR_REG_NUM + 1, sizeof(ir_live_interval*)); for (b = 1, bb = ctx->cfg_blocks + b; b <= ctx->cfg_blocks_count; b++, bb++) { - if (bb->flags & IR_BB_UNREACHABLE) { - continue; - } + IR_ASSERT(!(bb->flags & IR_BB_UNREACHABLE)); for (i = bb->start, insn = ctx->ir_base + i, rule = ctx->rules + i; i <= bb->end;) { switch (ctx->rules ? *rule : insn->op) { case IR_START: @@ -7263,9 +7273,7 @@ void *ir_emit_code(ir_ctx *ctx, size_t *size_ptr) } for (b = 1, bb = ctx->cfg_blocks + b; b <= ctx->cfg_blocks_count; b++, bb++) { - if (bb->flags & IR_BB_UNREACHABLE) { - continue; - } + IR_ASSERT(!(bb->flags & IR_BB_UNREACHABLE)); if (ctx->prev_ref[bb->end] == bb->start && bb->successors_count == 1 && !(bb->flags & (IR_BB_START|IR_BB_ENTRY|IR_BB_DESSA_MOVES))) {