Fix compilation warnings and signed/unsigned mess

This commit is contained in:
Dmitry Stogov 2022-11-08 23:09:35 +03:00
parent cc73788981
commit 3535fd2fc4
19 changed files with 236 additions and 196 deletions

View File

@ -7,7 +7,7 @@ SRC_DIR = .
CC = gcc
BUILD_CC = gcc
CFLAGS = -Wall
CFLAGS = -Wall -Wextra -Wno-unused-parameter
LDFLAGS = -lm
PHP = php
LLK = llk

View File

@ -17,7 +17,7 @@ static ir_strtab strtab;
void print_hash(uint32_t *mask, uint32_t count)
{
int i;
uint32_t i;
printf("static const uint32_t _ir_fold_hash[%d] = {\n", count);
for (i = 0; i < count; i++) {

7
ir.c
View File

@ -837,12 +837,13 @@ ir_ref ir_fold3(ir_ctx *ctx, uint32_t opt, ir_ref op1, ir_ref op2, ir_ref op3)
return ir_fold(ctx, opt, op1, op2, op3);
}
ir_ref ir_emit_N(ir_ctx *ctx, uint32_t opt, uint32_t count)
ir_ref ir_emit_N(ir_ctx *ctx, uint32_t opt, int32_t count)
{
int i;
ir_ref *p, ref = ctx->insns_count;
ir_insn *insn;
IR_ASSERT(count >= 0);
while (UNEXPECTED(ref + count/4 >= ctx->insns_limit)) {
ir_grow_top(ctx);
}
@ -860,12 +861,12 @@ ir_ref ir_emit_N(ir_ctx *ctx, uint32_t opt, uint32_t count)
return ref;
}
void ir_set_op(ir_ctx *ctx, ir_ref ref, uint32_t n, ir_ref val)
void ir_set_op(ir_ctx *ctx, ir_ref ref, int32_t n, ir_ref val)
{
ir_insn *insn = &ctx->ir_base[ref];
if (n > 3) {
uint32_t count = 3;
int32_t count = 3;
if (insn->op == IR_MERGE || insn->op == IR_LOOP_BEGIN) {
count = insn->inputs_count;

4
ir.g
View File

@ -152,7 +152,7 @@ ir_insn(ir_parser_ctx *p):
{ir_val val;}
{ir_val count;}
{ir_val flags;}
{uint32_t n;}
{int32_t n;}
(
type(&t)
ID(&str, &len)
@ -269,7 +269,7 @@ val(ir_parser_ctx *p, uint8_t op, uint32_t n, ir_ref *ref):
{*ref = ir_strl(p->ctx, str, len);}
| DECNUMBER(IR_I32, &val)
{if (kind != IR_OPND_NUM && kind != IR_OPND_PROB) yy_error("unexpected number");}
{if (val.u64 < 0 && val.u64 >= 0x7ffffff) yy_error("number out of range");}
{if (val.i64 < 0 && val.i64 >= 0x7ffffff) yy_error("number out of range");}
{*ref = val.u64;}
| "null"
{*ref = IR_UNUSED;}

14
ir.h
View File

@ -459,7 +459,7 @@ typedef struct _ir_ctx {
ir_hashtab *binding;
ir_use_list *use_lists; /* def->use lists for each instruction */
ir_ref *use_edges;
uint32_t use_edges_count;
ir_ref use_edges_count;
uint32_t cfg_blocks_count;
uint32_t cfg_edges_count;
ir_block *cfg_blocks; /* list of Basic Blocks (starts from 1) */
@ -467,7 +467,7 @@ typedef struct _ir_ctx {
uint32_t *cfg_map; /* map of instructions to Basic Block number */
uint32_t *rules;
uint32_t *vregs;
uint32_t vregs_count;
ir_ref vregs_count;
int32_t spill_base; /* base register for special spill area (e.g. PHP VM frame pointer) */
int32_t fixed_stack_red_zone;
int32_t fixed_stack_frame_size;
@ -475,7 +475,7 @@ typedef struct _ir_ctx {
uint64_t fixed_save_regset;
ir_live_interval **live_intervals;
ir_regs *regs;
uint32_t *prev_insn_len;
ir_ref *prev_insn_len;
void *data;
uint32_t rodata_offset;
uint32_t jmp_table_offset;
@ -523,8 +523,8 @@ ir_ref ir_emit1(ir_ctx *ctx, uint32_t opt, ir_ref op1);
ir_ref ir_emit2(ir_ctx *ctx, uint32_t opt, ir_ref op1, ir_ref op2);
ir_ref ir_emit3(ir_ctx *ctx, uint32_t opt, ir_ref op1, ir_ref op2, ir_ref op3);
ir_ref ir_emit_N(ir_ctx *ctx, uint32_t opt, uint32_t count);
void ir_set_op(ir_ctx *ctx, ir_ref ref, uint32_t n, ir_ref val);
ir_ref ir_emit_N(ir_ctx *ctx, uint32_t opt, int32_t count);
void ir_set_op(ir_ctx *ctx, ir_ref ref, int32_t n, ir_ref val);
static inline void ir_set_op1(ir_ctx *ctx, ir_ref ref, ir_ref val)
{
@ -541,13 +541,13 @@ static inline void ir_set_op3(ir_ctx *ctx, ir_ref ref, ir_ref val)
ctx->ir_base[ref].op3 = val;
}
static inline ir_ref ir_insn_op(ir_insn *insn, uint32_t n)
static inline ir_ref ir_insn_op(ir_insn *insn, int32_t n)
{
ir_ref *p = insn->ops + n;
return *p;
}
static inline void ir_insn_set_op(ir_insn *insn, uint32_t n, ir_ref val)
static inline void ir_insn_set_op(ir_insn *insn, int32_t n, ir_ref val)
{
ir_ref *p = insn->ops + n;
*p = val;

View File

@ -13,7 +13,7 @@
#define IR_SPILL_POS_TO_OFFSET(offset) \
((ctx->flags & IR_USE_FRAME_POINTER) ? \
((offset) + sizeof(void*) * 2) : \
((offset) + (int32_t)sizeof(void*) * 2) : \
((offset) + data->call_stack_size))
#define B_IMM (1<<27) // signed imm26 * 4
@ -1759,14 +1759,14 @@ static void ir_emit_overflow(ir_ctx *ctx, ir_ref def, ir_insn *insn)
}
}
static void ir_emit_overflow_and_branch(ir_ctx *ctx, int b, ir_ref def, ir_insn *insn)
static void ir_emit_overflow_and_branch(ir_ctx *ctx, uint32_t b, ir_ref def, ir_insn *insn)
{
ir_backend_data *data = ctx->data;
dasm_State **Dst = &data->dasm_state;
ir_insn *overflow_insn = &ctx->ir_base[insn->op2];
ir_insn *math_insn = &ctx->ir_base[overflow_insn->op1];
ir_type type = math_insn->type;
int true_block, false_block, next_block;
uint32_t true_block, false_block, next_block;
bool reverse = 0;
ir_get_true_false_blocks(ctx, b, &true_block, &false_block, &next_block);
@ -2326,9 +2326,9 @@ static void ir_emit_cmp_fp(ir_ctx *ctx, ir_ref def, ir_insn *insn)
}
}
static void ir_emit_jmp_true(ir_ctx *ctx, int b, ir_ref def)
static void ir_emit_jmp_true(ir_ctx *ctx, uint32_t b, ir_ref def)
{
int true_block, false_block, next_block;
uint32_t true_block, false_block, next_block;
ir_backend_data *data = ctx->data;
dasm_State **Dst = &data->dasm_state;
@ -2338,9 +2338,9 @@ static void ir_emit_jmp_true(ir_ctx *ctx, int b, ir_ref def)
}
}
static void ir_emit_jmp_false(ir_ctx *ctx, int b, ir_ref def)
static void ir_emit_jmp_false(ir_ctx *ctx, uint32_t b, ir_ref def)
{
int true_block, false_block, next_block;
uint32_t true_block, false_block, next_block;
ir_backend_data *data = ctx->data;
dasm_State **Dst = &data->dasm_state;
@ -2350,9 +2350,9 @@ static void ir_emit_jmp_false(ir_ctx *ctx, int b, ir_ref def)
}
}
static void ir_emit_jz(ir_ctx *ctx, uint8_t op, int b, ir_type type, ir_reg reg)
static void ir_emit_jz(ir_ctx *ctx, uint8_t op, uint32_t b, ir_type type, ir_reg reg)
{
int true_block, false_block, next_block;
uint32_t true_block, false_block, next_block;
ir_backend_data *data = ctx->data;
dasm_State **Dst = &data->dasm_state;
@ -2385,9 +2385,9 @@ static void ir_emit_jz(ir_ctx *ctx, uint8_t op, int b, ir_type type, ir_reg reg)
}
}
static void ir_emit_jcc(ir_ctx *ctx, uint8_t op, int b, ir_ref def, ir_insn *insn, bool int_cmp)
static void ir_emit_jcc(ir_ctx *ctx, uint8_t op, uint32_t b, ir_ref def, ir_insn *insn, bool int_cmp)
{
int true_block, false_block, next_block;
uint32_t true_block, false_block, next_block;
ir_backend_data *data = ctx->data;
dasm_State **Dst = &data->dasm_state;
@ -2471,7 +2471,7 @@ static void ir_emit_jcc(ir_ctx *ctx, uint8_t op, int b, ir_ref def, ir_insn *ins
}
}
static void ir_emit_cmp_and_branch_int(ir_ctx *ctx, int b, ir_ref def, ir_insn *insn)
static void ir_emit_cmp_and_branch_int(ir_ctx *ctx, uint32_t b, ir_ref def, ir_insn *insn)
{
ir_insn *cmp_insn = &ctx->ir_base[insn->op2];
ir_op op = cmp_insn->op;
@ -2522,13 +2522,13 @@ static void ir_emit_cmp_and_branch_int(ir_ctx *ctx, int b, ir_ref def, ir_insn *
ir_emit_jcc(ctx, op, b, def, insn, 1);
}
static void ir_emit_cmp_and_branch_fp(ir_ctx *ctx, int b, ir_ref def, ir_insn *insn)
static void ir_emit_cmp_and_branch_fp(ir_ctx *ctx, uint32_t b, ir_ref def, ir_insn *insn)
{
ir_op op = ir_emit_cmp_fp_common(ctx, insn->op2, &ctx->ir_base[insn->op2]);
ir_emit_jcc(ctx, op, b, def, insn, 0);
}
static void ir_emit_if_int(ir_ctx *ctx, int b, ir_ref def, ir_insn *insn)
static void ir_emit_if_int(ir_ctx *ctx, uint32_t b, ir_ref def, ir_insn *insn)
{
ir_type type = ctx->ir_base[insn->op2].type;
ir_reg op2_reg = ctx->regs[def][2];
@ -3368,7 +3368,7 @@ static void ir_emit_alloca(ir_ctx *ctx, ir_ref def, ir_insn *insn)
}
}
static void ir_emit_switch(ir_ctx *ctx, int b, ir_ref def, ir_insn *insn)
static void ir_emit_switch(ir_ctx *ctx, uint32_t b, ir_ref def, ir_insn *insn)
{
ir_backend_data *data = ctx->data;
dasm_State **Dst = &data->dasm_state;
@ -4093,7 +4093,7 @@ static void ir_emit_guard_jcc(ir_ctx *ctx, uint8_t op, void *addr, bool int_cmp)
}
}
static void ir_emit_guard_cmp_int(ir_ctx *ctx, int b, ir_ref def, ir_insn *insn)
static void ir_emit_guard_cmp_int(ir_ctx *ctx, uint32_t b, ir_ref def, ir_insn *insn)
{
ir_backend_data *data = ctx->data;
dasm_State **Dst = &data->dasm_state;
@ -4154,7 +4154,7 @@ static void ir_emit_guard_cmp_int(ir_ctx *ctx, int b, ir_ref def, ir_insn *insn)
ir_emit_guard_jcc(ctx, op, addr, 1);
}
static void ir_emit_guard_cmp_fp(ir_ctx *ctx, int b, ir_ref def, ir_insn *insn)
static void ir_emit_guard_cmp_fp(ir_ctx *ctx, uint32_t b, ir_ref def, ir_insn *insn)
{
ir_op op = ir_emit_cmp_fp_common(ctx, insn->op2, &ctx->ir_base[insn->op2]);
void *addr = ir_jmp_addr(ctx, insn, &ctx->ir_base[insn->op3]);
@ -4303,14 +4303,15 @@ static int ir_emit_dessa_move(ir_ctx *ctx, uint8_t type, ir_ref from, ir_ref to)
int8_t to_reg, from_reg;
ir_block *to_bb;
int j, k = 0;
uint32_t b;
ir_ref phi = 0;
bool spill_store = 0;
IR_ASSERT(from_bb->successors_count == 1);
to_bb = &ctx->cfg_blocks[ctx->cfg_edges[from_bb->successors]];
for (j = 0; j < to_bb->predecessors_count; j++) {
if (ctx->cfg_edges[to_bb->predecessors + j] == from_block) {
k = j + 2;
for (b = 0; b < to_bb->predecessors_count; b++) {
if (ctx->cfg_edges[to_bb->predecessors + b] == from_block) {
k = b + 2;
break;
}
}
@ -4607,7 +4608,7 @@ static void ir_fix_param_spills(ir_ctx *ctx)
static void ir_allocate_unique_spill_slots(ir_ctx *ctx)
{
int b;
uint32_t b;
ir_block *bb;
ir_insn *insn;
ir_ref i, n, j, *p;
@ -4928,7 +4929,7 @@ static void* dasm_labels[ir_lb_MAX];
void *ir_emit_code(ir_ctx *ctx, size_t *size_ptr)
{
int b, n, target;
uint32_t b, n, target;
ir_block *bb;
ir_ref i;
ir_insn *insn;

View File

@ -48,11 +48,12 @@ static ir_ref ir_merge_blocks(ir_ctx *ctx, ir_ref end, ir_ref begin)
int ir_build_cfg(ir_ctx *ctx)
{
ir_ref n, j, *p, ref, b;
ir_ref n, j, *p, ref;
uint32_t b;
ir_insn *insn;
uint32_t flags;
ir_worklist worklist;
uint32_t bb_count = 0;
uint32_t count, bb_count = 0;
uint32_t edges_count = 0;
ir_block *blocks, *bb;
uint32_t *_blocks, *edges;
@ -260,16 +261,16 @@ next_successor:
}
bb = blocks + 1;
n = 0;
count = 0;
for (b = 1; b <= bb_count; b++, bb++) {
bb->successors = n;
n += bb->successors_count;
bb->successors = count;
count += bb->successors_count;
bb->successors_count = 0;
bb->predecessors = n;
n += bb->predecessors_count;
bb->predecessors = count;
count += bb->predecessors_count;
bb->predecessors_count = 0;
}
IR_ASSERT(n == edges_count * 2);
IR_ASSERT(count == edges_count * 2);
/* Create an array of successor control edges */
edges = ir_mem_malloc(edges_count * 2 * sizeof(uint32_t));
@ -390,7 +391,7 @@ int ir_build_dominators_tree(ir_ctx *ctx)
continue;
}
if (bb->predecessors_count == 1) {
int idom = 0;
uint32_t idom = 0;
uint32_t pred_b = edges[bb->predecessors];
ir_block *pred_bb = &blocks[pred_b];
@ -402,7 +403,7 @@ int ir_build_dominators_tree(ir_ctx *ctx)
changed = 1;
}
} else if (bb->predecessors_count) {
int idom = 0;
uint32_t idom = 0;
uint32_t k = bb->predecessors_count;
uint32_t *p = edges + bb->predecessors;
do {
@ -604,7 +605,7 @@ next:
}
if (j != i) {
ir_block *bb = &blocks[j];
if (bb->idom < 0 && j != 1) {
if (bb->idom == 0 && j != 1) {
/* Ignore blocks that are unreachable or only abnormally reachable. */
continue;
}
@ -676,7 +677,7 @@ int ir_schedule_blocks(ir_ctx *ctx)
if (bb->predecessors_count > 1) {
/* Insert empty ENTRY blocks */
for (j = 0, p = &ctx->cfg_edges[bb->predecessors]; j < bb->predecessors_count; j++, p++) {
ir_ref predecessor = *p;
uint32_t predecessor = *p;
if (ir_bitqueue_in(&blocks, predecessor)
&& (ctx->cfg_blocks[predecessor].flags & IR_BB_ENTRY)
@ -798,7 +799,7 @@ int ir_schedule_blocks(ir_ctx *ctx)
}
/* JMP target optimisation */
int ir_skip_empty_target_blocks(ir_ctx *ctx, int b)
uint32_t ir_skip_empty_target_blocks(ir_ctx *ctx, uint32_t b)
{
ir_block *bb;
@ -816,7 +817,7 @@ int ir_skip_empty_target_blocks(ir_ctx *ctx, int b)
return b;
}
int ir_skip_empty_next_blocks(ir_ctx *ctx, int b)
uint32_t ir_skip_empty_next_blocks(ir_ctx *ctx, uint32_t b)
{
ir_block *bb;
@ -838,7 +839,7 @@ int ir_skip_empty_next_blocks(ir_ctx *ctx, int b)
return b;
}
void ir_get_true_false_blocks(ir_ctx *ctx, int b, int *true_block, int *false_block, int *next_block)
void ir_get_true_false_blocks(ir_ctx *ctx, uint32_t b, uint32_t *true_block, uint32_t *false_block, uint32_t *next_block)
{
ir_block *bb;
uint32_t *p, use_block;

View File

@ -113,6 +113,7 @@ bool ir_check(ir_ctx *ctx)
if (j == 1) {
break;
}
IR_FALLTHROUGH;
case IR_ADD:
case IR_SUB:
case IR_MUL:
@ -178,6 +179,7 @@ bool ir_check(ir_ctx *ctx)
ok = 0;
}
}
break;
case IR_OPND_CONTROL_DEP:
if (use >= i
&& !(insn->op == IR_LOOP_BEGIN)) {
@ -190,6 +192,7 @@ bool ir_check(ir_ctx *ctx)
ok = 0;
}
}
break;
case IR_OPND_CONTROL_REF:
if (!(ir_op_flags[use_insn->op] & IR_OP_FLAG_CONTROL)) {
fprintf(stderr, "ir_base[%d].ops[%d] reference (%d) must be CONTROL\n", i, j, use);
@ -258,6 +261,7 @@ bool ir_check(ir_ctx *ctx)
/* UNREACHABLE and IJMP may be used in MERGE with the following ENTRY */
break;
}
IR_FALLTHROUGH;
case IR_RETURN:
if (use_list->count != 0) {
fprintf(stderr, "ir_base[%d].op (%s) must not have successors (%d)\n",

View File

@ -456,7 +456,7 @@ int ir_disasm(const char *name,
for (i = 0; i < count; i++) {
entry = ir_hashtab_find(&labels, (uint32_t)((uintptr_t)insn->address - (uintptr_t)start));
# endif
if (entry != IR_INVALID_VAL) {
if (entry != (ir_ref)IR_INVALID_VAL) {
if (entry >= 0) {
fprintf(f, ".ENTRY_%d:\n", entry);
} else {
@ -496,7 +496,7 @@ int ir_disasm(const char *name,
# endif
if (addr >= (uint64_t)(uintptr_t)end && addr < (uint64_t)(uintptr_t)orig_end) {
entry = ir_hashtab_find(&labels, (uint32_t)((uintptr_t)addr - (uintptr_t)start));
if (entry != IR_INVALID_VAL) {
if (entry != (ir_ref)IR_INVALID_VAL) {
r = q = strstr(p, "(%rip)");
if (r && r > p) {
r--;
@ -544,7 +544,7 @@ int ir_disasm(const char *name,
}
if (addr >= (uint64_t)(uintptr_t)start && addr < (uint64_t)(uintptr_t)orig_end) {
entry = ir_hashtab_find(&labels, (uint32_t)((uintptr_t)addr - (uintptr_t)start));
if (entry != IR_INVALID_VAL) {
if (entry != (ir_ref)IR_INVALID_VAL) {
fwrite(p, 1, q - p, f);
if (entry >= 0) {
fprintf(f, ".ENTRY_%d", entry);
@ -587,7 +587,7 @@ int ir_disasm(const char *name,
while (n > 0) {
entry = ir_hashtab_find(&labels, (uint32_t)((uintptr_t)p - (uintptr_t)start));
if (entry != IR_INVALID_VAL) {
if (entry != (ir_ref)IR_INVALID_VAL) {
if (entry >= 0) {
fprintf(f, ".ENTRY_%d:\n", entry);
} else {
@ -600,7 +600,7 @@ int ir_disasm(const char *name,
j = 15;
while (n > 0 && j > 0) {
entry = ir_hashtab_find(&labels, (uint32_t)((uintptr_t)p - (uintptr_t)start));
if (entry != IR_INVALID_VAL) {
if (entry != (ir_ref)IR_INVALID_VAL) {
break;
}
fprintf(f, ", 0x%02x", (int)*p);
@ -620,7 +620,7 @@ int ir_disasm(const char *name,
p = (uintptr_t*)((char*)start + jmp_table_offset);
while (n > 0) {
entry = ir_hashtab_find(&labels, (uint32_t)((uintptr_t)p - (uintptr_t)start));
if (entry != IR_INVALID_VAL) {
if (entry != (ir_ref)IR_INVALID_VAL) {
if (entry >= 0) {
fprintf(f, ".ENTRY_%d:\n", entry);
} else {
@ -630,7 +630,7 @@ int ir_disasm(const char *name,
if (*p) {
IR_ASSERT((uintptr_t)*p >= (uintptr_t)start && (uintptr_t)*p < (uintptr_t)orig_end);
entry = ir_hashtab_find(&labels, (uint32_t)(*p - (uintptr_t)start));
IR_ASSERT(entry != IR_INVALID_VAL && entry < 0);
IR_ASSERT(entry != (ir_ref)IR_INVALID_VAL && entry < 0);
if (sizeof(void*) == 8) {
fprintf(f, "\t.qword .L%d\n", -entry);
} else {

View File

@ -271,8 +271,7 @@ void ir_dump_cfg_map(ir_ctx *ctx, FILE *f)
void ir_dump_live_ranges(ir_ctx *ctx, FILE *f)
{
uint32_t i, n;
ir_ref j;
ir_ref i, j, n;
if (!ctx->live_intervals) {
return;
@ -289,13 +288,13 @@ void ir_dump_live_ranges(ir_ctx *ctx, FILE *f)
fprintf(f, "TMP");
} else {
for (j = 1; j < ctx->insns_count; j++) {
if (ctx->vregs[j] == i) {
if (ctx->vregs[j] == (uint32_t)i) {
break;
}
}
fprintf(f, "R%d (d_%d", i, j);
for (j++; j < ctx->insns_count; j++) {
if (ctx->vregs[j] == i) {
if (ctx->vregs[j] == (uint32_t)i) {
fprintf(f, ", d_%d", j);
}
}

View File

@ -35,10 +35,6 @@
# define DASM_CHECKS
#endif
#if defined(__GNUC__)
# pragma GCC diagnostic ignored "-Warray-bounds"
#endif
typedef struct _ir_copy {
ir_type type;
ir_reg from;
@ -106,13 +102,29 @@ static void *ir_jmp_addr(ir_ctx *ctx, ir_insn *insn, ir_insn *addr_insn)
return addr;
}
#if defined(__GNUC__)
# pragma GCC push_options
# pragma GCC diagnostic ignored "-Warray-bounds"
# pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
#endif
#if defined(IR_TARGET_X86) || defined(IR_TARGET_X64)
# include "dynasm/dasm_proto.h"
# include "dynasm/dasm_x86.h"
# include "ir_emit_x86.h"
#elif defined(IR_TARGET_AARCH64)
# include "dynasm/dasm_proto.h"
# include "dynasm/dasm_arm64.h"
#else
# error "Unknown IR target"
#endif
#if defined(__GNUC__)
# pragma GCC pop_options
#endif
#if defined(IR_TARGET_X86) || defined(IR_TARGET_X64)
# include "ir_emit_x86.h"
#elif defined(IR_TARGET_AARCH64)
# include "ir_emit_aarch64.h"
#else
# error "Unknown IR target"
@ -120,8 +132,8 @@ static void *ir_jmp_addr(ir_ctx *ctx, ir_insn *insn, ir_insn *addr_insn)
int ir_match(ir_ctx *ctx)
{
int b, n;
ir_ref i;
uint32_t b;
ir_ref i, n;
ir_block *bb;
ir_insn *insn;

View File

@ -461,9 +461,9 @@ static void ir_emit_abs(ir_ctx *ctx, FILE *f, int def, ir_insn *insn)
}
}
static void ir_emit_if(ir_ctx *ctx, FILE *f, int b, ir_ref def, ir_insn *insn)
static void ir_emit_if(ir_ctx *ctx, FILE *f, uint32_t b, ir_ref def, ir_insn *insn)
{
int true_block = 0, false_block = 0, next_block;
uint32_t true_block = 0, false_block = 0, next_block;
bool short_true = 0, short_false = 0;
ir_get_true_false_blocks(ctx, b, &true_block, &false_block, &next_block);
@ -489,7 +489,7 @@ static void ir_emit_if(ir_ctx *ctx, FILE *f, int b, ir_ref def, ir_insn *insn)
}
}
static void ir_emit_switch(ir_ctx *ctx, FILE *f, int b, ir_ref def, ir_insn *insn)
static void ir_emit_switch(ir_ctx *ctx, FILE *f, uint32_t b, ir_ref def, ir_insn *insn)
{
ir_block *bb;
uint32_t n, *p, use_block;
@ -667,7 +667,7 @@ static int ir_emit_func(ir_ctx *ctx, FILE *f)
uint8_t ret_type;
bool has_params = 0;
ir_bitset vars;
int b, target, prev = 0;
uint32_t b, target, prev = 0;
ir_block *bb;
ret_type = ir_get_return_type(ctx);

View File

@ -117,7 +117,7 @@ int ir_gcm(ir_ctx *ctx)
ir_bitset visited;
ir_block *bb;
ir_list queue;
uint32_t *_blocks;
uint32_t *_blocks, b;
ir_insn *insn, *use_insn;
ir_use_list *use_list;
uint32_t flags;
@ -127,7 +127,7 @@ int ir_gcm(ir_ctx *ctx)
visited = ir_bitset_malloc(ctx->insns_count);
/* pin and collect control and control depended (PARAM, VAR, PHI, PI) instructions */
for (i = 1, bb = ctx->cfg_blocks + 1; i <= ctx->cfg_blocks_count; i++, bb++) {
for (b = 1, bb = ctx->cfg_blocks + 1; b <= ctx->cfg_blocks_count; b++, bb++) {
if (bb->flags & IR_BB_UNREACHABLE) {
continue;
}
@ -135,7 +135,7 @@ int ir_gcm(ir_ctx *ctx)
while (1) {
insn = &ctx->ir_base[j];
ir_bitset_incl(visited, j);
_blocks[j] = i; /* pin to block */
_blocks[j] = b; /* pin to block */
flags = ir_op_flags[insn->op];
if (IR_OPND_KIND(flags, 2) == IR_OPND_DATA
|| IR_OPND_KIND(flags, 3) == IR_OPND_DATA
@ -149,7 +149,7 @@ int ir_gcm(ir_ctx *ctx)
ref = *p;
use_insn = &ctx->ir_base[ref];
if (use_insn->op == IR_PARAM || use_insn->op == IR_VAR) {
_blocks[ref] = i; /* pin to block */
_blocks[ref] = b; /* pin to block */
ir_bitset_incl(visited, ref);
} else
if (use_insn->op == IR_PHI || use_insn->op == IR_PI) {
@ -157,7 +157,7 @@ int ir_gcm(ir_ctx *ctx)
if (UNEXPECTED(ctx->use_lists[ref].count == 0)) {
// TODO: Unused PHI ???
} else {
_blocks[ref] = i; /* pin to block */
_blocks[ref] = b; /* pin to block */
ir_list_push(&queue, ref);
}
}
@ -235,7 +235,8 @@ int ir_gcm(ir_ctx *ctx)
static void ir_xlat_binding(ir_ctx *ctx, ir_ref *_xlat)
{
uint32_t n1, n2, pos, key;
uint32_t n1, n2, pos;
ir_ref key;
ir_hashtab_bucket *b1, *b2;
ir_hashtab *binding = ctx->binding;
uint32_t hash_size = (uint32_t)(-(int32_t)binding->mask);
@ -259,8 +260,8 @@ static void ir_xlat_binding(ir_ctx *ctx, ir_ref *_xlat)
b2->val = b1->val;
}
key |= binding->mask;
b2->next = ((uint32_t*)binding->data)[(int32_t)key];
((uint32_t*)binding->data)[(int32_t)key] = pos;
b2->next = ((uint32_t*)binding->data)[key];
((uint32_t*)binding->data)[key] = pos;
pos += sizeof(ir_hashtab_bucket);
b2++;
n2++;
@ -281,7 +282,7 @@ int ir_schedule(ir_ctx *ctx)
ir_use_list *lists;
ir_ref *edges;
ir_bitset used;
ir_ref b;
uint32_t b;
uint32_t *_blocks = ctx->cfg_map;
ir_ref *_next = ir_mem_calloc(ctx->insns_count, sizeof(ir_ref));
ir_ref *_prev = ir_mem_calloc(ctx->insns_count, sizeof(ir_ref));

View File

@ -690,7 +690,7 @@ static int parse_ir_insn(int sym, ir_parser_ctx *p) {
ir_val val;
ir_val count;
ir_val flags;
uint32_t n;
int32_t n;
save_pos = yy_pos;
save_text = yy_text;
save_line = yy_line;
@ -893,7 +893,7 @@ static int parse_val(int sym, ir_parser_ctx *p, uint8_t op, uint32_t n, ir_ref *
} else if (sym == YY_DECNUMBER) {
sym = parse_DECNUMBER(sym, IR_I32, &val);
if (kind != IR_OPND_NUM && kind != IR_OPND_PROB) yy_error("unexpected number");
if (val.u64 < 0 && val.u64 >= 0x7ffffff) yy_error("number out of range");
if (val.i64 < 0 && val.i64 >= 0x7ffffff) yy_error("number out of range");
*ref = val.u64;
} else if (sym == YY_NULL) {
sym = get_sym();

View File

@ -31,6 +31,9 @@
# if __has_attribute(__aligned__)
# define IR_SET_ALIGNED(alignment, decl) decl __attribute__ ((__aligned__ (alignment)))
# endif
# if __has_attribute(__fallthrough__)
# define IR_FALLTHROUGH __attribute__((__fallthrough__))
# endif
#elif defined(_WIN32)
# define IR_SET_ALIGNED(alignment, decl) __declspec(align(alignment)) decl
#endif
@ -47,6 +50,9 @@
#ifndef IR_SET_ALIGNED
# define IR_SET_ALIGNED(alignment, decl) decl
#endif
#ifndef IR_FALLTHROUGH
# define IR_FALLTHROUGH ((void)0)
#endif
/*** Helper routines ***/
@ -595,7 +601,7 @@ IR_ALWAYS_INLINE void ir_worklist_clear(ir_worklist *w)
IR_ALWAYS_INLINE bool ir_worklist_push(ir_worklist *w, ir_ref val)
{
IR_ASSERT(val >= 0 && val < ir_worklist_capasity(w));
IR_ASSERT(val >= 0 && (uint32_t)val < ir_worklist_capasity(w));
if (ir_bitset_in(w->visited, val)) {
return 0;
}
@ -736,7 +742,7 @@ IR_ALWAYS_INLINE ir_ref ir_input_edges_count(ir_ctx *ctx, ir_insn *insn)
IR_ALWAYS_INLINE ir_ref ir_binding_find(ir_ctx *ctx, ir_ref ref)
{
ir_ref var = ir_hashtab_find(ctx->binding, ref);
return (var != IR_INVALID_VAL) ? var : 0;
return (var != (ir_ref)IR_INVALID_VAL) ? var : 0;
}
/*** IR Use Lists ***/
@ -772,22 +778,22 @@ struct _ir_block {
uint32_t predecessors; /* index in ir_ctx->cfg_edges[] array */
uint32_t predecessors_count;
union {
int dom_parent; /* immediate dominator block */
int idom; /* immediate dominator block */
uint32_t dom_parent; /* immediate dominator block */
uint32_t idom; /* immediate dominator block */
};
union {
int dom_depth; /* depth from the root of the dominators tree */
int postnum; /* used temporary during tree constructon */
uint32_t dom_depth; /* depth from the root of the dominators tree */
uint32_t postnum; /* used temporary during tree constructon */
};
int dom_child; /* first dominated blocks */
int dom_next_child; /* next dominated block (linked list) */
int loop_header;
int loop_depth;
uint32_t dom_child; /* first dominated blocks */
uint32_t dom_next_child; /* next dominated block (linked list) */
uint32_t loop_header;
uint32_t loop_depth;
};
int ir_skip_empty_target_blocks(ir_ctx *ctx, int b);
int ir_skip_empty_next_blocks(ir_ctx *ctx, int b);
void ir_get_true_false_blocks(ir_ctx *ctx, int b, int *true_block, int *false_block, int *next_block);
uint32_t ir_skip_empty_target_blocks(ir_ctx *ctx, uint32_t b);
uint32_t ir_skip_empty_next_blocks(ir_ctx *ctx, uint32_t b);
void ir_get_true_false_blocks(ir_ctx *ctx, uint32_t b, uint32_t *true_block, uint32_t *false_block, uint32_t *next_block);
/*** Folding Engine (see ir.c and ir_fold.h) ***/
typedef enum _ir_fold_action {
@ -877,7 +883,7 @@ struct _ir_live_interval {
typedef int (*emit_copy_t)(ir_ctx *ctx, uint8_t type, ir_ref from, ir_ref to);
int ir_gen_dessa_moves(ir_ctx *ctx, int b, emit_copy_t emit_copy);
int ir_gen_dessa_moves(ir_ctx *ctx, uint32_t b, emit_copy_t emit_copy);
void ir_free_live_ranges(ir_live_range *live_range);
void ir_free_live_intervals(ir_live_interval **live_intervals, int count);

149
ir_ra.c
View File

@ -46,7 +46,8 @@ int ir_assign_virtual_registers(ir_ctx *ctx)
{
uint32_t *vregs;
uint32_t vregs_count = 0;
int b, i, n;
uint32_t b;
ir_ref i, n;
ir_block *bb;
ir_insn *insn;
uint32_t flags;
@ -340,8 +341,8 @@ static void ir_add_phi_use(ir_ctx *ctx, int v, int op_num, ir_live_pos pos, ir_r
int ir_compute_live_ranges(ir_ctx *ctx)
{
int i, j, k, n;
int b, succ;
uint32_t b, i, j, k, n, succ;
ir_ref ref;
uint32_t flags, len;
ir_insn *insn;
ir_block *bb, *succ_bb;
@ -393,8 +394,8 @@ int ir_compute_live_ranges(ir_ctx *ctx)
}
}
IR_ASSERT(k != 0);
for (j = 0; j < use_list->count; j++) {
ir_ref use = ctx->use_edges[use_list->refs + j];
for (ref = 0; ref < use_list->count; ref++) {
ir_ref use = ctx->use_edges[use_list->refs + ref];
insn = &ctx->ir_base[use];
if (insn->op == IR_PHI) {
if (ir_insn_op(insn, k) > 0) {
@ -422,84 +423,84 @@ int ir_compute_live_ranges(ir_ctx *ctx)
} IR_BITSET_FOREACH_END();
/* for each operation op of b in reverse order */
for (i = bb->end; i > bb->start; i -= ctx->prev_insn_len[i]) {
for (ref = bb->end; ref > bb->start; ref -= ctx->prev_insn_len[ref]) {
uint8_t def_flags = 0;
insn = &ctx->ir_base[i];
insn = &ctx->ir_base[ref];
flags = ir_op_flags[insn->op];
if (ctx->rules) {
ir_tmp_reg tmp_regs[4];
int n = ir_get_temporary_regs(ctx, i, tmp_regs);
int n = ir_get_temporary_regs(ctx, ref, tmp_regs);
while (n > 0) {
n--;
ir_add_tmp(ctx, i, tmp_regs[n]);
ir_add_tmp(ctx, ref, tmp_regs[n]);
}
}
if ((flags & IR_OP_FLAG_DATA) || ((flags & IR_OP_FLAG_MEM) && insn->type != IR_VOID)) {
if (ctx->vregs[i]) {
if (ir_bitset_in(live, ctx->vregs[i])) {
if (ctx->vregs[ref]) {
if (ir_bitset_in(live, ctx->vregs[ref])) {
if (insn->op == IR_RLOAD) {
ir_fix_live_range(ctx, ctx->vregs[i],
IR_START_LIVE_POS_FROM_REF(bb->start), IR_DEF_LIVE_POS_FROM_REF(i));
ctx->live_intervals[ctx->vregs[i]]->flags = IR_LIVE_INTERVAL_REG_LOAD;
ctx->live_intervals[ctx->vregs[i]]->reg = insn->op2;
ir_fix_live_range(ctx, ctx->vregs[ref],
IR_START_LIVE_POS_FROM_REF(bb->start), IR_DEF_LIVE_POS_FROM_REF(ref));
ctx->live_intervals[ctx->vregs[ref]]->flags = IR_LIVE_INTERVAL_REG_LOAD;
ctx->live_intervals[ctx->vregs[ref]]->reg = insn->op2;
} else if (insn->op != IR_PHI) {
ir_live_pos def_pos;
ir_ref hint_ref = 0;
if (ctx->rules) {
def_flags = ir_get_def_flags(ctx, i, &reg);
def_flags = ir_get_def_flags(ctx, ref, &reg);
} else {
reg = IR_REG_NONE;
}
if (reg != IR_REG_NONE) {
def_pos = IR_SAVE_LIVE_POS_FROM_REF(i);
def_pos = IR_SAVE_LIVE_POS_FROM_REF(ref);
if (insn->op == IR_PARAM) {
/* parameter register must be kept before it's copied */
ir_add_fixed_live_range(ctx, &unused, reg,
IR_START_LIVE_POS_FROM_REF(bb->start), def_pos);
} else {
ir_add_fixed_live_range(ctx, &unused, reg,
IR_DEF_LIVE_POS_FROM_REF(i), def_pos);
IR_DEF_LIVE_POS_FROM_REF(ref), def_pos);
}
} else if (def_flags & IR_DEF_REUSES_OP1_REG) {
/* We add two uses to emulate move from op1 to res */
ir_add_use(ctx, ctx->vregs[i], 0, IR_DEF_LIVE_POS_FROM_REF(i), reg, def_flags, 0);
def_pos = IR_LOAD_LIVE_POS_FROM_REF(i);
ir_add_use(ctx, ctx->vregs[ref], 0, IR_DEF_LIVE_POS_FROM_REF(ref), reg, def_flags, 0);
def_pos = IR_LOAD_LIVE_POS_FROM_REF(ref);
if (!IR_IS_CONST_REF(insn->op1)) {
IR_ASSERT(ctx->vregs[insn->op1]);
hint_ref = insn->op1;
}
} else if (def_flags & IR_DEF_CONFLICTS_WITH_INPUT_REGS) {
def_pos = IR_LOAD_LIVE_POS_FROM_REF(i);
def_pos = IR_LOAD_LIVE_POS_FROM_REF(ref);
} else {
if (insn->op == IR_PARAM) {
/* We may reuse parameter stack slot for spilling */
ctx->live_intervals[ctx->vregs[i]]->flags |= IR_LIVE_INTERVAL_MEM_PARAM;
ctx->live_intervals[ctx->vregs[ref]]->flags |= IR_LIVE_INTERVAL_MEM_PARAM;
} else if (insn->op == IR_VLOAD) {
/* Load may be fused into the useage instruction */
ctx->live_intervals[ctx->vregs[i]]->flags |= IR_LIVE_INTERVAL_MEM_LOAD;
ctx->live_intervals[ctx->vregs[ref]]->flags |= IR_LIVE_INTERVAL_MEM_LOAD;
}
def_pos = IR_DEF_LIVE_POS_FROM_REF(i);
def_pos = IR_DEF_LIVE_POS_FROM_REF(ref);
}
/* intervals[opd].setFrom(op.id) */
ir_fix_live_range(ctx, ctx->vregs[i],
ir_fix_live_range(ctx, ctx->vregs[ref],
IR_START_LIVE_POS_FROM_REF(bb->start), def_pos);
ir_add_use(ctx, ctx->vregs[i], 0, def_pos, reg, def_flags, hint_ref);
ir_add_use(ctx, ctx->vregs[ref], 0, def_pos, reg, def_flags, hint_ref);
} else {
ir_add_use(ctx, ctx->vregs[i], 0, IR_DEF_LIVE_POS_FROM_REF(i), IR_REG_NONE, IR_USE_SHOULD_BE_IN_REG, 0);
ir_add_use(ctx, ctx->vregs[ref], 0, IR_DEF_LIVE_POS_FROM_REF(ref), IR_REG_NONE, IR_USE_SHOULD_BE_IN_REG, 0);
}
/* live.remove(opd) */
ir_bitset_excl(live, ctx->vregs[i]);
ir_bitset_excl(live, ctx->vregs[ref]);
} else if (insn->op == IR_VAR) {
if (ctx->use_lists[i].count > 0) {
ir_add_local_var(ctx, ctx->vregs[i], insn->type);
if (ctx->use_lists[ref].count > 0) {
ir_add_local_var(ctx, ctx->vregs[ref], insn->type);
}
}
}
}
if (insn->op != IR_PHI && (!ctx->rules || ctx->rules[i] != IR_SKIP_MEM)) {
if (insn->op != IR_PHI && (!ctx->rules || ctx->rules[ref] != IR_SKIP_MEM)) {
n = ir_input_edges_count(ctx, insn);
for (j = 1; j <= n; j++) {
if (IR_OPND_KIND(flags, j) == IR_OPND_DATA) {
@ -507,7 +508,7 @@ int ir_compute_live_ranges(ir_ctx *ctx)
uint8_t use_flags;
if (ctx->rules) {
use_flags = ir_get_use_flags(ctx, i, j, &reg);
use_flags = ir_get_use_flags(ctx, ref, j, &reg);
} else {
use_flags = 0;
reg = IR_REG_NONE;
@ -537,23 +538,23 @@ int ir_compute_live_ranges(ir_ctx *ctx)
ir_ref hint_ref = 0;
if ((def_flags & IR_DEF_REUSES_OP1_REG) && j == 1) {
use_pos = IR_LOAD_LIVE_POS_FROM_REF(i);
IR_ASSERT(ctx->vregs[i]);
hint_ref = i;
use_pos = IR_LOAD_LIVE_POS_FROM_REF(ref);
IR_ASSERT(ctx->vregs[ref]);
hint_ref = ref;
if (reg != IR_REG_NONE) {
ir_add_fixed_live_range(ctx, &unused, reg,
use_pos, IR_USE_LIVE_POS_FROM_REF(i));
use_pos, IR_USE_LIVE_POS_FROM_REF(ref));
}
} else {
if (reg != IR_REG_NONE) {
use_pos = IR_LOAD_LIVE_POS_FROM_REF(i);
use_pos = IR_LOAD_LIVE_POS_FROM_REF(ref);
ir_add_fixed_live_range(ctx, &unused, reg,
use_pos, IR_USE_LIVE_POS_FROM_REF(i));
use_pos, IR_USE_LIVE_POS_FROM_REF(ref));
} else if ((def_flags & IR_DEF_REUSES_OP1_REG) && input == insn->op1) {
/* Input is the same as "op1" */
use_pos = IR_LOAD_LIVE_POS_FROM_REF(i);
use_pos = IR_LOAD_LIVE_POS_FROM_REF(ref);
} else {
use_pos = IR_USE_LIVE_POS_FROM_REF(i);
use_pos = IR_USE_LIVE_POS_FROM_REF(ref);
}
}
/* intervals[opd].addRange(b.from, op.id) */
@ -565,7 +566,7 @@ int ir_compute_live_ranges(ir_ctx *ctx)
} else {
if (reg != IR_REG_NONE) {
ir_add_fixed_live_range(ctx, &unused, reg,
IR_LOAD_LIVE_POS_FROM_REF(i), IR_USE_LIVE_POS_FROM_REF(i));
IR_LOAD_LIVE_POS_FROM_REF(ref), IR_USE_LIVE_POS_FROM_REF(ref));
}
}
}
@ -573,17 +574,17 @@ int ir_compute_live_ranges(ir_ctx *ctx)
/* CPU specific constraints */
if (insn->op == IR_CALL) {
ir_add_fixed_live_range(ctx, &unused, IR_REG_NUM,
IR_START_LIVE_POS_FROM_REF(i) + IR_USE_SUB_REF,
IR_START_LIVE_POS_FROM_REF(i) + IR_DEF_SUB_REF);
IR_START_LIVE_POS_FROM_REF(ref) + IR_USE_SUB_REF,
IR_START_LIVE_POS_FROM_REF(ref) + IR_DEF_SUB_REF);
} else if (ctx->rules) {
ir_live_pos start, end;
ir_regset regset = ir_get_scratch_regset(ctx, i, &start, &end);
ir_regset regset = ir_get_scratch_regset(ctx, ref, &start, &end);
if (regset != IR_REGSET_EMPTY) {
IR_REGSET_FOREACH(regset, reg) {
ir_add_fixed_live_range(ctx, &unused, reg,
IR_START_LIVE_POS_FROM_REF(i) + start,
IR_START_LIVE_POS_FROM_REF(i) + end);
IR_START_LIVE_POS_FROM_REF(ref) + start,
IR_START_LIVE_POS_FROM_REF(ref) + end);
} IR_REGSET_FOREACH_END();
}
}
@ -662,7 +663,7 @@ void ir_free_live_ranges(ir_live_range *live_range)
void ir_free_live_intervals(ir_live_interval **live_intervals, int count)
{
uint32_t i;
int i;
ir_live_interval *ival, *next;
ir_use_pos *use_pos;
@ -763,8 +764,8 @@ static void ir_vregs_join(ir_ctx *ctx, ir_live_range **unused, uint32_t r1, uint
static bool ir_try_coalesce(ir_ctx *ctx, ir_live_range **unused, ir_ref from, ir_ref to)
{
ir_ref i;
int v1 = ctx->vregs[from];
int v2 = ctx->vregs[to];
uint32_t v1 = ctx->vregs[from];
uint32_t v2 = ctx->vregs[to];
if (v1 != v2 && !ir_vregs_overlap(ctx, v1, v2)) {
uint8_t f1 = ctx->live_intervals[v1]->flags;
@ -804,7 +805,7 @@ static bool ir_try_coalesce(ir_ctx *ctx, ir_live_range **unused, ir_ref from, ir
return 0;
}
static void ir_add_phi_move(ir_ctx *ctx, int b, ir_ref from, ir_ref to)
static void ir_add_phi_move(ir_ctx *ctx, uint32_t b, ir_ref from, ir_ref to)
{
if (IR_IS_CONST_REF(from) || ctx->vregs[from] != ctx->vregs[to]) {
ctx->cfg_blocks[b].flags |= IR_BB_DESSA_MOVES;
@ -968,7 +969,7 @@ static int ir_try_swap_operands(ir_ctx *ctx, ir_ref i, ir_insn *insn)
int ir_coalesce(ir_ctx *ctx)
{
int b, i, n, succ;
uint32_t b, n, succ;
ir_ref *p, use, input, k, j;
ir_block *bb, *succ_bb;
ir_use_list *use_list;
@ -986,6 +987,8 @@ int ir_coalesce(ir_ctx *ctx)
continue;
}
if (bb->predecessors_count > 1) {
uint32_t i;
use_list = &ctx->use_lists[bb->start];
n = use_list->count;
for (i = 0, p = &ctx->use_edges[use_list->refs]; i < n; i++, p++) {
@ -1004,6 +1007,8 @@ int ir_coalesce(ir_ctx *ctx)
qsort_r(blocks.l.a.refs, ir_worklist_len(&blocks), sizeof(ir_ref), ir_block_cmp, ctx);
while (ir_worklist_len(&blocks)) {
uint32_t i;
b = ir_worklist_pop(&blocks);
bb = &ctx->cfg_blocks[b];
IR_ASSERT(bb->successors_count == 1);
@ -1011,9 +1016,9 @@ int ir_coalesce(ir_ctx *ctx)
succ_bb = &ctx->cfg_blocks[succ];
IR_ASSERT(succ_bb->predecessors_count > 1);
k = 0;
for (j = 0; j < succ_bb->predecessors_count; j++) {
if (ctx->cfg_edges[succ_bb->predecessors + j] == b) {
k = j + 2;
for (i = 0; i < succ_bb->predecessors_count; i++) {
if (ctx->cfg_edges[succ_bb->predecessors + i] == b) {
k = i + 2;
break;
}
}
@ -1047,6 +1052,8 @@ int ir_coalesce(ir_ctx *ctx)
if (ctx->rules) {
/* try to swap operands of commutative instructions for better register allocation */
for (b = 1, bb = &ctx->cfg_blocks[1]; b <= ctx->cfg_blocks_count; b++, bb++) {
ir_ref i;
for (i = bb->start, insn = ctx->ir_base + i; i <= bb->end;) {
if (ir_get_def_flags(ctx, i, &reg) & IR_DEF_REUSES_OP1_REG) {
if (insn->op2 > 0 && insn->op1 != insn->op2
@ -1072,6 +1079,8 @@ int ir_coalesce(ir_ctx *ctx)
if (compact) {
#if 1
ir_ref i, n;
offsets = ir_mem_calloc(ctx->vregs_count + 1, sizeof(uint32_t));
for (i = 1, n = 1; i <= ctx->vregs_count; i++) {
if (ctx->live_intervals[i]) {
@ -1110,7 +1119,7 @@ int ir_coalesce(ir_ctx *ctx)
int ir_compute_dessa_moves(ir_ctx *ctx)
{
int b, i, n;
uint32_t b, i, n;
ir_ref j, k, *p, use;
ir_block *bb;
ir_use_list *use_list;
@ -1141,12 +1150,12 @@ int ir_compute_dessa_moves(ir_ctx *ctx)
return 1;
}
int ir_gen_dessa_moves(ir_ctx *ctx, int b, emit_copy_t emit_copy)
int ir_gen_dessa_moves(ir_ctx *ctx, uint32_t b, emit_copy_t emit_copy)
{
int succ, j, k = 0, n = 0;
uint32_t succ, j, k = 0, n = 0;
ir_block *bb, *succ_bb;
ir_use_list *use_list;
ir_ref *loc, *pred;
ir_ref *loc, *pred, i;
uint32_t len;
ir_bitset todo, ready;
@ -1174,8 +1183,8 @@ int ir_gen_dessa_moves(ir_ctx *ctx, int b, emit_copy_t emit_copy)
todo = ir_bitset_malloc(ctx->insns_count);
ready = ir_bitset_malloc(ctx->insns_count);
for (j = 0; j < use_list->count; j++) {
ir_ref ref = ctx->use_edges[use_list->refs + j];
for (i = 0; i < use_list->count; i++) {
ir_ref ref = ctx->use_edges[use_list->refs + i];
ir_insn *insn = &ctx->ir_base[ref];
if (insn->op == IR_PHI) {
ir_ref input = ir_insn_op(insn, k);
@ -1190,16 +1199,16 @@ int ir_gen_dessa_moves(ir_ctx *ctx, int b, emit_copy_t emit_copy)
}
}
IR_BITSET_FOREACH(todo, len, j) {
if (!loc[j]) {
ir_bitset_incl(ready, j);
IR_BITSET_FOREACH(todo, len, i) {
if (!loc[i]) {
ir_bitset_incl(ready, i);
}
} IR_BITSET_FOREACH_END();
while ((j = ir_bitset_pop_first(todo, len)) >= 0) {
uint32_t a, b, c;
while ((i = ir_bitset_pop_first(todo, len)) >= 0) {
ir_ref a, b, c;
while ((b = ir_bitset_pop_first(ready, len)) != (uint32_t)-1) {
while ((b = ir_bitset_pop_first(ready, len)) >= 0) {
a = pred[b];
c = loc[a];
emit_copy(ctx, ctx->ir_base[b].type, c, b);
@ -1208,7 +1217,7 @@ int ir_gen_dessa_moves(ir_ctx *ctx, int b, emit_copy_t emit_copy)
ir_bitset_incl(ready, a);
}
}
b = j;
b = i;
if (b != loc[pred[b]]) {
emit_copy(ctx, ctx->ir_base[b].type, b, 0);
loc[b] = 0;
@ -1360,7 +1369,7 @@ static ir_live_pos ir_first_use_pos_after(ir_live_interval *ival, ir_live_pos po
static ir_block *ir_block_from_live_pos(ir_ctx *ctx, ir_live_pos pos)
{
int b;
uint32_t b;
ir_block *bb;
ir_ref ref = IR_LIVE_POS_TO_REF(pos);
@ -2223,7 +2232,7 @@ static void ir_assign_bound_spill_slots(ir_ctx *ctx)
static int ir_linear_scan(ir_ctx *ctx)
{
int b;
uint32_t b;
ir_block *bb;
ir_live_interval *unhandled = NULL;
ir_live_interval *active = NULL;
@ -2492,7 +2501,7 @@ static int ir_linear_scan(ir_ctx *ctx)
static void assign_regs(ir_ctx *ctx)
{
uint32_t i;
ir_ref i;
ir_live_interval *ival;
ir_use_pos *use_pos;
int8_t reg;

View File

@ -92,6 +92,7 @@ void ir_save(ir_ctx *ctx, FILE *f)
if (ref == 0) {
break;
}
IR_FALLTHROUGH;
case IR_OPND_NUM:
fprintf(f, "%s%d", first ? "(" : ", ", ref);
first = 0;

View File

@ -199,7 +199,7 @@ ir_ref ir_strtab_update(ir_strtab *strtab, const char *str, uint32_t len, ir_ref
const char *ir_strtab_str(ir_strtab *strtab, ir_ref idx)
{
IR_ASSERT(idx >= 0 && idx < strtab->count);
IR_ASSERT(idx >= 0 && (uint32_t)idx < strtab->count);
return ((ir_strtab_bucket*)strtab->data)[idx].str;
}
@ -217,7 +217,8 @@ void ir_strtab_free(ir_strtab *strtab)
void ir_strtab_apply(ir_strtab *strtab, ir_strtab_apply_t func)
{
ir_ref i;
uint32_t i;
for (i = 0; i < strtab->count; i++) {
ir_strtab_bucket *b = &((ir_strtab_bucket*)strtab->data)[i];
func(b->str, b->len, b->val);

View File

@ -1596,6 +1596,7 @@ binop_fp:
case IR_CALL:
ctx->flags |= IR_HAS_CALLS;
IR_FALLTHROUGH;
case IR_TAILCALL:
if (ir_input_edges_count(ctx, insn) == 2 /* no arguments */
&& insn->op2 > bb->start
@ -2570,13 +2571,13 @@ static void ir_emit_overflow(ir_ctx *ctx, ir_ref def, ir_insn *insn)
}
}
static void ir_emit_overflow_and_branch(ir_ctx *ctx, int b, ir_ref def, ir_insn *insn)
static void ir_emit_overflow_and_branch(ir_ctx *ctx, uint32_t b, ir_ref def, ir_insn *insn)
{
ir_backend_data *data = ctx->data;
dasm_State **Dst = &data->dasm_state;
ir_insn *overflow_insn = &ctx->ir_base[insn->op2];
ir_type type = ctx->ir_base[overflow_insn->op1].type;
int true_block, false_block, next_block;
uint32_t true_block, false_block, next_block;
bool reverse = 0;
ir_get_true_false_blocks(ctx, b, &true_block, &false_block, &next_block);
@ -3869,7 +3870,7 @@ static void ir_emit_test_int_common(ir_ctx *ctx, ir_ref ref, ir_op op)
}
} else if ((op == IR_EQ || op == IR_NE) && val == 0xffff) {
| test Rw(op1_reg), Rw(op1_reg)
} else if ((op == IR_EQ || op == IR_NE) && val == 0xffffffff) {
} else if ((op == IR_EQ || op == IR_NE) && val == -1) {
| test Rd(op1_reg), Rd(op1_reg)
} else {
| ASM_REG_IMM_OP test, type, op1_reg, val
@ -4048,9 +4049,9 @@ static void ir_emit_cmp_fp(ir_ctx *ctx, ir_ref def, ir_insn *insn)
}
}
static void ir_emit_jmp_true(ir_ctx *ctx, int b, ir_ref def)
static void ir_emit_jmp_true(ir_ctx *ctx, uint32_t b, ir_ref def)
{
int true_block, false_block, next_block;
uint32_t true_block, false_block, next_block;
ir_backend_data *data = ctx->data;
dasm_State **Dst = &data->dasm_state;
@ -4060,9 +4061,9 @@ static void ir_emit_jmp_true(ir_ctx *ctx, int b, ir_ref def)
}
}
static void ir_emit_jmp_false(ir_ctx *ctx, int b, ir_ref def)
static void ir_emit_jmp_false(ir_ctx *ctx, uint32_t b, ir_ref def)
{
int true_block, false_block, next_block;
uint32_t true_block, false_block, next_block;
ir_backend_data *data = ctx->data;
dasm_State **Dst = &data->dasm_state;
@ -4072,9 +4073,9 @@ static void ir_emit_jmp_false(ir_ctx *ctx, int b, ir_ref def)
}
}
static void ir_emit_jcc(ir_ctx *ctx, uint8_t op, int b, ir_ref def, ir_insn *insn, bool int_cmp)
static void ir_emit_jcc(ir_ctx *ctx, uint8_t op, uint32_t b, ir_ref def, ir_insn *insn, bool int_cmp)
{
int true_block, false_block, next_block;
uint32_t true_block, false_block, next_block;
ir_backend_data *data = ctx->data;
dasm_State **Dst = &data->dasm_state;
bool swap = 0;
@ -4192,7 +4193,7 @@ static void ir_emit_jcc(ir_ctx *ctx, uint8_t op, int b, ir_ref def, ir_insn *ins
}
}
static void ir_emit_cmp_and_branch_int(ir_ctx *ctx, int b, ir_ref def, ir_insn *insn)
static void ir_emit_cmp_and_branch_int(ir_ctx *ctx, uint32_t b, ir_ref def, ir_insn *insn)
{
ir_insn *cmp_insn = &ctx->ir_base[insn->op2];
ir_op op = cmp_insn->op;
@ -4245,19 +4246,19 @@ static void ir_emit_cmp_and_branch_int(ir_ctx *ctx, int b, ir_ref def, ir_insn *
ir_emit_jcc(ctx, op, b, def, insn, 1);
}
static void ir_emit_test_and_branch_int(ir_ctx *ctx, int b, ir_ref def, ir_insn *insn)
static void ir_emit_test_and_branch_int(ir_ctx *ctx, uint32_t b, ir_ref def, ir_insn *insn)
{
ir_emit_test_int_common(ctx, insn->op2, IR_NE);
ir_emit_jcc(ctx, IR_NE, b, def, insn, 1);
}
static void ir_emit_cmp_and_branch_fp(ir_ctx *ctx, int b, ir_ref def, ir_insn *insn)
static void ir_emit_cmp_and_branch_fp(ir_ctx *ctx, uint32_t b, ir_ref def, ir_insn *insn)
{
ir_op op = ir_emit_cmp_fp_common(ctx, insn->op2, &ctx->ir_base[insn->op2]);
ir_emit_jcc(ctx, op, b, def, insn, 0);
}
static void ir_emit_if_int(ir_ctx *ctx, int b, ir_ref def, ir_insn *insn)
static void ir_emit_if_int(ir_ctx *ctx, uint32_t b, ir_ref def, ir_insn *insn)
{
ir_type type = ctx->ir_base[insn->op2].type;
ir_reg op2_reg = ctx->regs[def][2];
@ -5458,7 +5459,7 @@ static void ir_emit_alloca(ir_ctx *ctx, ir_ref def, ir_insn *insn)
}
}
static void ir_emit_switch(ir_ctx *ctx, int b, ir_ref def, ir_insn *insn)
static void ir_emit_switch(ir_ctx *ctx, uint32_t b, ir_ref def, ir_insn *insn)
{
ir_backend_data *data = ctx->data;
dasm_State **Dst = &data->dasm_state;
@ -5471,7 +5472,10 @@ static void ir_emit_switch(ir_ctx *ctx, int b, ir_ref def, ir_insn *insn)
int count = 0;
ir_val min, max;
int64_t offset;
ir_reg op2_reg, tmp_reg;
ir_reg op2_reg = ctx->regs[def][2];
|.if X64
|| ir_reg tmp_reg = ctx->regs[def][3];
|.endif
type = ctx->ir_base[insn->op2].type;
if (IR_IS_TYPE_SIGNED(type)) {
@ -5506,11 +5510,10 @@ static void ir_emit_switch(ir_ctx *ctx, int b, ir_ref def, ir_insn *insn)
}
}
op2_reg = ctx->regs[def][2];
tmp_reg = ctx->regs[def][3];
IR_ASSERT(op2_reg != IR_REG_NONE);
IR_ASSERT(tmp_reg != IR_REG_NONE || sizeof(void*) != 8);
|.if X64
|| IR_ASSERT(tmp_reg != IR_REG_NONE || sizeof(void*) != 8);
|.endif
if (op2_reg & IR_REG_SPILL_LOAD) {
op2_reg &= ~IR_REG_SPILL_LOAD;
ir_emit_load(ctx, type, op2_reg, insn->op2);
@ -6376,7 +6379,7 @@ static void ir_emit_guard_jcc(ir_ctx *ctx, uint8_t op, void *addr, bool int_cmp)
}
}
static void ir_emit_guard_cmp_int(ir_ctx *ctx, int b, ir_ref def, ir_insn *insn)
static void ir_emit_guard_cmp_int(ir_ctx *ctx, uint32_t b, ir_ref def, ir_insn *insn)
{
ir_backend_data *data = ctx->data;
dasm_State **Dst = &data->dasm_state;
@ -6435,7 +6438,7 @@ static void ir_emit_guard_cmp_int(ir_ctx *ctx, int b, ir_ref def, ir_insn *insn)
ir_emit_guard_jcc(ctx, op, addr, 1);
}
static void ir_emit_guard_cmp_fp(ir_ctx *ctx, int b, ir_ref def, ir_insn *insn)
static void ir_emit_guard_cmp_fp(ir_ctx *ctx, uint32_t b, ir_ref def, ir_insn *insn)
{
ir_op op = ir_emit_cmp_fp_common(ctx, insn->op2, &ctx->ir_base[insn->op2]);
void *addr = ir_jmp_addr(ctx, insn, &ctx->ir_base[insn->op3]);
@ -6770,14 +6773,15 @@ static int ir_emit_dessa_move(ir_ctx *ctx, uint8_t type, ir_ref from, ir_ref to)
int8_t to_reg, from_reg;
ir_block *to_bb;
int j, k = 0;
uint32_t b;
ir_ref phi = 0;
bool spill_store = 0;
IR_ASSERT(from_bb->successors_count == 1);
to_bb = &ctx->cfg_blocks[ctx->cfg_edges[from_bb->successors]];
for (j = 0; j < to_bb->predecessors_count; j++) {
if (ctx->cfg_edges[to_bb->predecessors + j] == from_block) {
k = j + 2;
for (b = 0; b < to_bb->predecessors_count; b++) {
if (ctx->cfg_edges[to_bb->predecessors + b] == from_block) {
k = b + 2;
break;
}
}
@ -7139,7 +7143,7 @@ static uint8_t ir_get_return_type(ir_ctx *ctx)
static void ir_allocate_unique_spill_slots(ir_ctx *ctx)
{
int b;
uint32_t b;
ir_block *bb;
ir_insn *insn;
ir_ref i, n, j, *p;
@ -7474,7 +7478,7 @@ static void* dasm_labels[ir_lb_MAX];
void *ir_emit_code(ir_ctx *ctx, size_t *size_ptr)
{
int b, n, target;
uint32_t b, n, target;
ir_block *bb;
ir_ref i;
ir_insn *insn;