mirror of
https://github.com/danog/ir.git
synced 2025-01-22 05:31:32 +01:00
64-bit constants support
This commit is contained in:
parent
d44dc682f4
commit
207dca73e8
12
TODO
12
TODO
@ -68,21 +68,15 @@
|
||||
- ALLOCA, VLOAD, VSTORE, LOAD, STORE
|
||||
+ SWITCH
|
||||
- ir_last_use
|
||||
- 64-bit constant (ir_emit_ref, ir_emit_dssa_move)
|
||||
+ 64-bit load
|
||||
+ 64-bit load into temporary register
|
||||
+ 64-bit constants in dessa
|
||||
+ 64-bit constants in switch
|
||||
- 64-bit IR_IS_POWER_OF_TWO and IR_LOG2
|
||||
- binop_int $imm, mem
|
||||
- commutative insns and swap (binop_int, mul, binop_sse, binop_avx, cmp_int, cmp_fp, cmp_br_int)
|
||||
- dessa_move (push, pop)
|
||||
- param_move
|
||||
- temporary register (e.g. for unsupported immediate operand in mul, div)
|
||||
+ temporary register for swap (dessa3.ir)
|
||||
- temporary register (e.g. for unsupported immediate operand in mul, div, and 64-bit constants)
|
||||
? temporary register for swap (dessa3.ir)
|
||||
- temporary register for spill loads and stores
|
||||
- parallel parameter loading
|
||||
+ parallel argument passing
|
||||
- stack arguments
|
||||
- return merge/split
|
||||
? binary code emission
|
||||
+ DynAsm
|
||||
|
2
ir.g
2
ir.g
@ -255,7 +255,7 @@ DECNUMBER(uint32_t t, ir_val *val):
|
||||
|
||||
HEXNUMBER(uint32_t t, ir_val *val):
|
||||
/0x[0-9A-Fa-f]+/
|
||||
{val->i64 = strtoll((const char*)yy_text + 2, NULL, 16);}
|
||||
{val->u64 = strtoull((const char*)yy_text + 2, NULL, 16);}
|
||||
;
|
||||
|
||||
FLOATNUMBER(uint32_t t, ir_val *val):
|
||||
|
35
ir_private.h
35
ir_private.h
@ -9,7 +9,7 @@
|
||||
|
||||
#define IR_IS_POWER_OF_TWO(x) (!((x) & ((x) - 1)))
|
||||
|
||||
#define IR_LOG2(x) ir_ntz(x)
|
||||
#define IR_LOG2(x) ir_ntzl(x)
|
||||
|
||||
IR_ALWAYS_INLINE uint8_t ir_rol8(uint8_t op1, uint8_t op2)
|
||||
{
|
||||
@ -123,6 +123,39 @@ IR_ALWAYS_INLINE uint32_t ir_ntz(uint32_t num)
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Number of trailing zero bits (0x01 -> 0; 0x40 -> 6; 0x00 -> LEN) */
|
||||
IR_ALWAYS_INLINE uint32_t ir_ntzl(uint64_t num)
|
||||
{
|
||||
#if (defined(__GNUC__) || __has_builtin(__builtin_ctzl))
|
||||
return __builtin_ctzl(num);
|
||||
#elif defined(_WIN32)
|
||||
unsigned long index;
|
||||
|
||||
#if defined(_WIN64)
|
||||
if (!BitScanForward64(&index, num)) {
|
||||
#else
|
||||
if (!BitScanForward(&index, num)) {
|
||||
#endif
|
||||
/* undefined behavior */
|
||||
return 64;
|
||||
}
|
||||
|
||||
return (uint32_t) index;
|
||||
#else
|
||||
uint32_t n;
|
||||
|
||||
if (num == Z_UL(0)) return 64;
|
||||
|
||||
n = 1;
|
||||
if ((num & 0xffffffff) == 0) {n += 32; num = num >> Z_UL(32);}
|
||||
if ((num & 0x0000ffff) == 0) {n += 16; num = num >> 16;}
|
||||
if ((num & 0x000000ff) == 0) {n += 8; num = num >> 8;}
|
||||
if ((num & 0x0000000f) == 0) {n += 4; num = num >> 4;}
|
||||
if ((num & 0x00000003) == 0) {n += 2; num = num >> 2;}
|
||||
return n - (num & 1);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Number of leading zero bits (Undefined for zero) */
|
||||
IR_ALWAYS_INLINE int ir_nlz(uint32_t num)
|
||||
{
|
||||
|
25
ir_x86.dasc
25
ir_x86.dasc
@ -972,12 +972,12 @@ static uint32_t ir_match_insn(ir_ctx *ctx, ir_ref ref, ir_block *bb)
|
||||
// 0
|
||||
} else if (op2_insn->val.u64 == 1) {
|
||||
return IR_COPY_INT;
|
||||
} else if (ir_type_size[insn->type] >= 4) {
|
||||
if (op2_insn->val.u64 == 2 || op2_insn->val.u64 == 4 || op2_insn->val.u64 == 8) {
|
||||
return IR_LEA_SI; // lea ret, [op1.reg*op2.scale]
|
||||
} else if (op2_insn->val.u64 == 3 || op2_insn->val.u64 == 5 || op2_insn->val.u64 == 9) {
|
||||
return IR_LEA_SIB; // lea ret, [op1.reg+op1.reg*op2.scale]
|
||||
}
|
||||
} else if (ir_type_size[insn->type] >= 4 &&
|
||||
(op2_insn->val.u64 == 2 || op2_insn->val.u64 == 4 || op2_insn->val.u64 == 8)) {
|
||||
return IR_LEA_SI; // lea ret, [op1.reg*op2.scale]
|
||||
} else if (ir_type_size[insn->type] >= 4 &&
|
||||
(op2_insn->val.u64 == 3 || op2_insn->val.u64 == 5 || op2_insn->val.u64 == 9)) {
|
||||
return IR_LEA_SIB; // lea ret, [op1.reg+op1.reg*op2.scale]
|
||||
} else if (op2_insn->val.u64 == 2) {
|
||||
return IR_MUL_2; // add op1, op1
|
||||
} else if (IR_IS_POWER_OF_TWO(op2_insn->val.u64)) {
|
||||
@ -1610,7 +1610,7 @@ void ir_emit_mod_pwr2(ir_ctx *ctx, ir_ref def, ir_insn *insn)
|
||||
dasm_State **Dst = &data->dasm_state;
|
||||
ir_reg def_reg = ir_ref_reg(ctx, def);
|
||||
ir_reg op1_reg = ir_ref_reg(ctx, insn->op1);
|
||||
uint32_t mask = IR_LOG2(ctx->ir_base[insn->op2].val.u64) - 1;
|
||||
uint64_t mask = ctx->ir_base[insn->op2].val.u64 - 1;
|
||||
ir_reg reg;
|
||||
|
||||
if (def_reg >= 0) {
|
||||
@ -1620,10 +1620,15 @@ void ir_emit_mod_pwr2(ir_ctx *ctx, ir_ref def, ir_insn *insn)
|
||||
} else {
|
||||
reg = IR_REG_RAX; // TODO: temporary register
|
||||
}
|
||||
if (op1_reg != reg) {
|
||||
ir_emit_load(ctx, insn->type, insn->op1, reg);
|
||||
if (IR_IS_UNSIGNED_32BIT(mask)) {
|
||||
if (op1_reg != reg) {
|
||||
ir_emit_load(ctx, insn->type, insn->op1, reg);
|
||||
}
|
||||
| ASM_REG_IMM_OP and, insn->type, reg, mask
|
||||
} else {
|
||||
ir_emit_load(ctx, insn->type, insn->op2, reg);
|
||||
| ASM_REG_REF_OP and, insn->type, reg, insn->op1
|
||||
}
|
||||
| ASM_REG_IMM_OP and, insn->type, reg, mask
|
||||
if (def_reg != reg) {
|
||||
ir_emit_store(ctx, insn->type, reg, def);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user