diff --git a/ir_aarch64.dasc b/ir_aarch64.dasc index f3bbc56..666099e 100644 --- a/ir_aarch64.dasc +++ b/ir_aarch64.dasc @@ -897,6 +897,10 @@ binop_fp: return insn->op; } +static void ir_match_insn2(ir_ctx *ctx, ir_ref ref, ir_block *bb) +{ +} + /* code genertion */ static int32_t ir_ref_spill_slot(ir_ctx *ctx, ir_ref ref, ir_reg *reg) { diff --git a/ir_emit.c b/ir_emit.c index 5d14e76..7bf62c0 100644 --- a/ir_emit.c +++ b/ir_emit.c @@ -278,6 +278,7 @@ int ir_match(ir_ctx *ctx) if (!ctx->rules[i]) { ctx->rules[i] = ir_match_insn(ctx, i, bb); } + ir_match_insn2(ctx, i, bb); } ctx->rules[i] = IR_SKIP; } diff --git a/ir_x86.dasc b/ir_x86.dasc index d89d79f..a32a813 100644 --- a/ir_x86.dasc +++ b/ir_x86.dasc @@ -1743,6 +1743,25 @@ store_int: return insn->op; } +static void ir_match_insn2(ir_ctx *ctx, ir_ref ref, ir_block *bb) +{ + if (ctx->rules[ref] == IR_LEA_IB) { + ir_insn *insn = &ctx->ir_base[ref]; + + if (ctx->ir_base[insn->op2].op == IR_LOAD) { + ir_match_fuse_load(ctx, insn->op2, bb); + ctx->rules[ref] = IR_BINOP_INT; + } else if (ctx->ir_base[insn->op1].op == IR_LOAD) { + /* swap for better load fusion */ + ir_ref tmp = insn->op1; + insn->op1 = insn->op2; + insn->op2 = tmp; + ir_match_fuse_load(ctx, insn->op2, bb); + ctx->rules[ref] = IR_BINOP_INT; + } + } +} + /* code genertion */ static int32_t ir_ref_spill_slot(ir_ctx *ctx, ir_ref ref, ir_reg *reg) {