/*
- * Copyright (C) 2012
+ * Copyright (C) 2012, 2013
* Wolfgang Bumiller
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
"variant",
"struct",
"union",
- "array"
+ "array",
+
+ "nil",
+ "<no-expression>"
};
size_t type_sizeof_[TYPE_COUNT] = {
0, /* TYPE_STRUCT */
0, /* TYPE_UNION */
0, /* TYPE_ARRAY */
+ 0, /* TYPE_NIL */
+ 0, /* TYPE_NOESPR */
};
uint16_t type_store_instr[TYPE_COUNT] = {
AINSTR_END, /* struct */
AINSTR_END, /* union */
AINSTR_END, /* array */
+ AINSTR_END, /* nil */
+ AINSTR_END, /* noexpr */
};
uint16_t field_store_instr[TYPE_COUNT] = {
AINSTR_END, /* struct */
AINSTR_END, /* union */
AINSTR_END, /* array */
+ AINSTR_END, /* nil */
+ AINSTR_END, /* noexpr */
};
uint16_t type_storep_instr[TYPE_COUNT] = {
AINSTR_END, /* struct */
AINSTR_END, /* union */
AINSTR_END, /* array */
+ AINSTR_END, /* nil */
+ AINSTR_END, /* noexpr */
};
uint16_t type_eq_instr[TYPE_COUNT] = {
AINSTR_END, /* struct */
AINSTR_END, /* union */
AINSTR_END, /* array */
+ AINSTR_END, /* nil */
+ AINSTR_END, /* noexpr */
};
uint16_t type_ne_instr[TYPE_COUNT] = {
AINSTR_END, /* struct */
AINSTR_END, /* union */
AINSTR_END, /* array */
+ AINSTR_END, /* nil */
+ AINSTR_END, /* noexpr */
};
uint16_t type_not_instr[TYPE_COUNT] = {
AINSTR_END, /* struct */
AINSTR_END, /* union */
AINSTR_END, /* array */
+ AINSTR_END, /* nil */
+ AINSTR_END, /* noexpr */
};
/* protos */
self->extparams = NULL;
self->extparam_protos = NULL;
- self->max_locals = 0;
+ self->first_common_globaltemp = 0;
+ self->max_globaltemps = 0;
+ self->first_common_local = 0;
+ self->max_locals = 0;
self->str_immediate = 0;
self->name = NULL;
return NULL;
}
+ self->nil = ir_value_var("nil", store_value, TYPE_NIL);
+ self->nil->cvq = CV_CONST;
+
return self;
}
for (i = 0; i != vec_size(self->fields); ++i) {
ir_value_delete(self->fields[i]);
}
+ ir_value_delete(self->nil);
vec_free(self->fields);
vec_free(self->filenames);
vec_free(self->filestrings);
self->code_function_def = -1;
self->allocated_locals = 0;
+ self->globaltemps = 0;
self->run_id = 0;
return self;
}
}
- if (!ir_function_naive_phi(self))
+ if (!ir_function_naive_phi(self)) {
+ irerror(self->context, "internal error: ir_function_naive_phi failed");
return false;
+ }
for (i = 0; i < vec_size(self->locals); ++i) {
ir_value *v = self->locals[i];
}
ve = ir_value_var(name, (param ? store_param : store_local), vtype);
+ if (param)
+ ve->locked = true;
vec_push(self->locals, ve);
return ve;
}
return true;
}
-#if 0
-static bool ir_naive_phi_emit_store(ir_block *block, size_t iid, ir_value *old, ir_value *what)
-{
- ir_instr *instr;
- size_t i;
-
- /* create a store */
- if (!ir_block_create_store(block, old, what))
- return false;
-
- /* we now move it up */
- instr = vec_last(block->instr);
- for (i = vec_size(block->instr)-1; i > iid; --i)
- block->instr[i] = block->instr[i-1];
- block->instr[i] = instr;
-
- return true;
-}
-#endif
-
static bool ir_block_naive_phi(ir_block *self)
{
size_t i, p; /*, w;*/
vec_push(b->instr, prevjump);
b->final = true;
}
-
-#if 0
- ir_value *v = instr->phi[p].value;
- for (w = 0; w < vec_size(v->writes); ++w) {
- ir_value *old;
-
- if (!v->writes[w]->_ops[0])
- continue;
-
- /* When the write was to a global, we have to emit a mov */
- old = v->writes[w]->_ops[0];
-
- /* The original instruction now writes to the PHI target local */
- if (v->writes[w]->_ops[0] == v)
- v->writes[w]->_ops[0] = instr->_ops[0];
-
- if (old->store != store_value && old->store != store_local && old->store != store_param)
- {
- /* If it originally wrote to a global we need to store the value
- * there as welli
- */
- if (!ir_naive_phi_emit_store(self, i+1, old, v))
- return false;
- if (i+1 < vec_size(self->instr))
- instr = self->instr[i+1];
- else
- instr = NULL;
- /* In case I forget and access instr later, it'll be NULL
- * when it's a problem, to make sure we crash, rather than accessing
- * invalid data.
- */
- }
- else
- {
- /* If it didn't, we can replace all reads by the phi target now. */
- size_t r;
- for (r = 0; r < vec_size(old->reads); ++r)
- {
- size_t op;
- ir_instr *ri = old->reads[r];
- for (op = 0; op < vec_size(ri->phi); ++op) {
- if (ri->phi[op].value == old)
- ri->phi[op].value = v;
- }
- for (op = 0; op < 3; ++op) {
- if (ri->_ops[op] == old)
- ri->_ops[op] = v;
- }
- }
- }
- }
-#endif
}
ir_instr_delete(instr);
}
void ir_function_enumerate(ir_function *self)
{
size_t i;
- size_t instruction_id = 1;
+ size_t instruction_id = 0;
for (i = 0; i < vec_size(self->blocks); ++i)
{
+ /* each block now gets an additional "entry" instruction id
+ * we can use to avoid point-life issues
+ */
+ self->blocks[i]->entry_id = instruction_id;
+ ++instruction_id;
+
self->blocks[i]->eid = i;
self->blocks[i]->run_id = 0;
ir_block_enumerate(self->blocks[i], &instruction_id);
bool *unique;
} function_allocator;
-static bool function_allocator_alloc(function_allocator *alloc, const ir_value *var)
+static bool function_allocator_alloc(function_allocator *alloc, ir_value *var)
{
ir_value *slot;
size_t vsize = ir_value_sizeof(var);
+ var->code.local = vec_size(alloc->locals);
+
slot = ir_value_var("reg", store_global, var->vtype);
if (!slot)
return false;
return false;
}
+static bool ir_function_allocator_assign(ir_function *self, function_allocator *alloc, ir_value *v)
+{
+ size_t a;
+ ir_value *slot;
+
+ if (v->unique_life)
+ return function_allocator_alloc(alloc, v);
+
+ for (a = 0; a < vec_size(alloc->locals); ++a)
+ {
+ /* if it's reserved for a unique liferange: skip */
+ if (alloc->unique[a])
+ continue;
+
+ slot = alloc->locals[a];
+
+ /* never resize parameters
+ * will be required later when overlapping temps + locals
+ */
+ if (a < vec_size(self->params) &&
+ alloc->sizes[a] < ir_value_sizeof(v))
+ {
+ continue;
+ }
+
+ if (ir_values_overlap(v, slot))
+ continue;
+
+ if (!ir_value_life_merge_into(slot, v))
+ return false;
+
+ /* adjust size for this slot */
+ if (alloc->sizes[a] < ir_value_sizeof(v))
+ alloc->sizes[a] = ir_value_sizeof(v);
+
+ v->code.local = a;
+ return true;
+ }
+ if (a >= vec_size(alloc->locals)) {
+ if (!function_allocator_alloc(alloc, v))
+ return false;
+ }
+ return true;
+}
+
bool ir_function_allocate_locals(ir_function *self)
{
- size_t i, a;
+ size_t i;
bool retval = true;
size_t pos;
+ bool opt_gt = OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS);
- ir_value *slot;
ir_value *v;
- function_allocator alloc;
+ function_allocator lockalloc, globalloc;
if (!vec_size(self->locals) && !vec_size(self->values))
return true;
- alloc.locals = NULL;
- alloc.sizes = NULL;
- alloc.positions = NULL;
- alloc.unique = NULL;
+ globalloc.locals = NULL;
+ globalloc.sizes = NULL;
+ globalloc.positions = NULL;
+ globalloc.unique = NULL;
+ lockalloc.locals = NULL;
+ lockalloc.sizes = NULL;
+ lockalloc.positions = NULL;
+ lockalloc.unique = NULL;
for (i = 0; i < vec_size(self->locals); ++i)
{
- if (!OPTS_OPTIMIZATION(OPTIM_LOCAL_TEMPS))
- self->locals[i]->unique_life = true;
- if (!function_allocator_alloc(&alloc, self->locals[i]))
+ v = self->locals[i];
+ if (!OPTS_OPTIMIZATION(OPTIM_LOCAL_TEMPS)) {
+ v->locked = true;
+ v->unique_life = true;
+ }
+ else if (i >= vec_size(self->params))
+ break;
+ else
+ v->locked = true; /* lock parameters locals */
+ if (!function_allocator_alloc((v->locked || !opt_gt ? &lockalloc : &globalloc), v))
+ goto error;
+ }
+ for (; i < vec_size(self->locals); ++i)
+ {
+ v = self->locals[i];
+ if (!vec_size(v->life))
+ continue;
+ if (!ir_function_allocator_assign(self, (v->locked || !opt_gt ? &lockalloc : &globalloc), v))
goto error;
}
* If the value is a parameter-temp: 1 write, 1 read from a CALL
* and it's not "locked", write it to the OFS_PARM directly.
*/
- if (OPTS_OPTIMIZATION(OPTIM_CALL_STORES) && !v->locked) {
+ if (OPTS_OPTIMIZATION(OPTIM_CALL_STORES) && !v->locked && !v->unique_life) {
if (vec_size(v->reads) == 1 && vec_size(v->writes) == 1 &&
(v->reads[0]->opcode == VINSTR_NRCALL ||
(v->reads[0]->opcode >= INSTR_CALL0 && v->reads[0]->opcode <= INSTR_CALL8)
if (vec_size(v->writes) == 1 && v->writes[0]->opcode == INSTR_CALL0)
{
v->store = store_return;
+ if (v->members[0]) v->members[0]->store = store_return;
+ if (v->members[1]) v->members[1]->store = store_return;
+ if (v->members[2]) v->members[2]->store = store_return;
++opts_optimizationcount[OPTIM_CALL_STORES];
continue;
}
}
- for (a = 0; a < vec_size(alloc.locals); ++a)
- {
- /* if it's reserved for a unique liferange: skip */
- if (alloc.unique[a])
- continue;
-
- slot = alloc.locals[a];
-
- /* never resize parameters
- * will be required later when overlapping temps + locals
- */
- if (a < vec_size(self->params) &&
- alloc.sizes[a] < ir_value_sizeof(v))
- {
- continue;
- }
-
- if (ir_values_overlap(v, slot))
- continue;
-
- if (!ir_value_life_merge_into(slot, v))
- goto error;
-
- /* adjust size for this slot */
- if (alloc.sizes[a] < ir_value_sizeof(v))
- alloc.sizes[a] = ir_value_sizeof(v);
-
- self->values[i]->code.local = a;
- break;
- }
- if (a >= vec_size(alloc.locals)) {
- self->values[i]->code.local = vec_size(alloc.locals);
- if (!function_allocator_alloc(&alloc, v))
- goto error;
- }
+ if (!ir_function_allocator_assign(self, (v->locked || !opt_gt ? &lockalloc : &globalloc), v))
+ goto error;
}
- if (!alloc.sizes) {
+ if (!lockalloc.sizes && !globalloc.sizes) {
goto cleanup;
}
+ vec_push(lockalloc.positions, 0);
+ vec_push(globalloc.positions, 0);
/* Adjust slot positions based on sizes */
- vec_push(alloc.positions, 0);
-
- if (vec_size(alloc.sizes))
- pos = alloc.positions[0] + alloc.sizes[0];
- else
- pos = 0;
- for (i = 1; i < vec_size(alloc.sizes); ++i)
- {
- pos = alloc.positions[i-1] + alloc.sizes[i-1];
- vec_push(alloc.positions, pos);
+ if (lockalloc.sizes) {
+ pos = (vec_size(lockalloc.sizes) ? lockalloc.positions[0] : 0);
+ for (i = 1; i < vec_size(lockalloc.sizes); ++i)
+ {
+ pos = lockalloc.positions[i-1] + lockalloc.sizes[i-1];
+ vec_push(lockalloc.positions, pos);
+ }
+ self->allocated_locals = pos + vec_last(lockalloc.sizes);
+ }
+ if (globalloc.sizes) {
+ pos = (vec_size(globalloc.sizes) ? globalloc.positions[0] : 0);
+ for (i = 1; i < vec_size(globalloc.sizes); ++i)
+ {
+ pos = globalloc.positions[i-1] + globalloc.sizes[i-1];
+ vec_push(globalloc.positions, pos);
+ }
+ self->globaltemps = pos + vec_last(globalloc.sizes);
}
-
- self->allocated_locals = pos + vec_last(alloc.sizes);
/* Locals need to know their new position */
for (i = 0; i < vec_size(self->locals); ++i) {
- self->locals[i]->code.local = alloc.positions[i];
+ v = self->locals[i];
+ if (i >= vec_size(self->params) && !vec_size(v->life))
+ continue;
+ if (v->locked || !opt_gt)
+ v->code.local = lockalloc.positions[v->code.local];
+ else
+ v->code.local = globalloc.positions[v->code.local];
}
/* Take over the actual slot positions on values */
for (i = 0; i < vec_size(self->values); ++i) {
- self->values[i]->code.local = alloc.positions[self->values[i]->code.local];
+ v = self->values[i];
+ if (!vec_size(v->life))
+ continue;
+ if (v->locked || !opt_gt)
+ v->code.local = lockalloc.positions[v->code.local];
+ else
+ v->code.local = globalloc.positions[v->code.local];
}
goto cleanup;
error:
retval = false;
cleanup:
- for (i = 0; i < vec_size(alloc.locals); ++i)
- ir_value_delete(alloc.locals[i]);
- vec_free(alloc.unique);
- vec_free(alloc.locals);
- vec_free(alloc.sizes);
- vec_free(alloc.positions);
+ for (i = 0; i < vec_size(lockalloc.locals); ++i)
+ ir_value_delete(lockalloc.locals[i]);
+ for (i = 0; i < vec_size(globalloc.locals); ++i)
+ ir_value_delete(globalloc.locals[i]);
+ vec_free(globalloc.unique);
+ vec_free(globalloc.locals);
+ vec_free(globalloc.sizes);
+ vec_free(globalloc.positions);
+ vec_free(lockalloc.unique);
+ vec_free(lockalloc.locals);
+ vec_free(lockalloc.sizes);
+ vec_free(lockalloc.positions);
return retval;
}
bool changed = false;
for (i = 0; i != vec_size(self->living); ++i)
{
- if (!self->living[i]->locked)
+ if (!self->living[i]->locked) {
+ self->living[i]->locked = true;
changed = true;
- self->living[i]->locked = true;
+ }
}
return changed;
}
size_t i, o, p, mem;
/* bitmasks which operands are read from or written to */
size_t read, write;
- char dbg_ind[16] = { '#', '0' };
+ char dbg_ind[16];
+ dbg_ind[0] = '#';
+ dbg_ind[1] = '0';
(void)dbg_ind;
if (prev)
/* See which operands are read and write operands */
ir_op_read_write(instr->opcode, &read, &write);
- if (instr->opcode == INSTR_MUL_VF)
- {
- /* the float source will get an additional lifetime */
- tempbool = ir_value_life_merge(instr->_ops[2], instr->eid+1);
- *changed = *changed || tempbool;
- }
- else if (instr->opcode == INSTR_MUL_FV)
- {
- /* the float source will get an additional lifetime */
- tempbool = ir_value_life_merge(instr->_ops[1], instr->eid+1);
- *changed = *changed || tempbool;
- }
-
/* Go through the 3 main operands
* writes first, then reads
*/
}
}
+ if (instr->opcode == INSTR_MUL_VF)
+ {
+ value = instr->_ops[2];
+ /* the float source will get an additional lifetime */
+ if (ir_value_life_merge(value, instr->eid+1))
+ *changed = true;
+ if (value->memberof && ir_value_life_merge(value->memberof, instr->eid+1))
+ *changed = true;
+ }
+ else if (instr->opcode == INSTR_MUL_FV)
+ {
+ value = instr->_ops[1];
+ /* the float source will get an additional lifetime */
+ if (ir_value_life_merge(value, instr->eid+1))
+ *changed = true;
+ if (value->memberof && ir_value_life_merge(value->memberof, instr->eid+1))
+ *changed = true;
+ }
+
for (o = 0; o < 3; ++o)
{
if (!instr->_ops[o]) /* no such operand */
tempbool = ir_block_living_add_instr(self, instr->eid);
/*con_err( "living added values\n");*/
*changed = *changed || tempbool;
-
}
+ /* the "entry" instruction ID */
+ tempbool = ir_block_living_add_instr(self, self->entry_id);
+ *changed = *changed || tempbool;
if (self->run_id == self->owner->run_id)
return true;
size_t stidx;
size_t i;
-tailcall:
block->generated = true;
block->code_start = vec_size(code_statements);
for (i = 0; i < vec_size(block->instr); ++i)
/* for uncoditional jumps, if the target hasn't been generated
* yet, we generate them right here.
*/
- if (!target->generated) {
- block = target;
- goto tailcall;
- }
+ if (!target->generated)
+ return gen_blocks_recursive(func, target);
/* otherwise we generate a jump instruction */
stmt.opcode = INSTR_GOTO;
code_push_statement(&stmt, instr->context.line);
}
if (!ontrue->generated) {
- if (onfalse->generated) {
- block = ontrue;
- goto tailcall;
- }
+ if (onfalse->generated)
+ return gen_blocks_recursive(func, ontrue);
}
if (!onfalse->generated) {
- if (ontrue->generated) {
- block = onfalse;
- goto tailcall;
- }
+ if (ontrue->generated)
+ return gen_blocks_recursive(func, onfalse);
}
/* neither ontrue nor onfalse exist */
stmt.opcode = INSTR_IFNOT;
if (onfalse->generated) {
/* fixup the jump address */
code_statements[stidx].o2.s1 = (onfalse->code_start) - (stidx);
- if (code_statements[stidx].o2.s1 == 1) {
+ if (stidx+2 == vec_size(code_statements) && code_statements[stidx].o2.s1 == 1) {
code_statements[stidx] = code_statements[stidx+1];
if (code_statements[stidx].o1.s1 < 0)
code_statements[stidx].o1.s1++;
code_push_statement(&stmt, instr->context.line);
return true;
}
- else if (code_statements[stidx].o2.s1 == 1) {
+ else if (stidx+2 == vec_size(code_statements) && code_statements[stidx].o2.s1 == 1) {
code_statements[stidx] = code_statements[stidx+1];
if (code_statements[stidx].o1.s1 < 0)
code_statements[stidx].o1.s1++;
code_pop_statement();
}
/* if not, generate now */
- block = onfalse;
- goto tailcall;
+ return gen_blocks_recursive(func, onfalse);
}
if ( (instr->opcode >= INSTR_CALL0 && instr->opcode <= INSTR_CALL8)
if (param->vtype == TYPE_FIELD)
stmt.opcode = field_store_instr[param->fieldtype];
+ else if (param->vtype == TYPE_NIL)
+ stmt.opcode = INSTR_STORE_V;
else
stmt.opcode = type_store_instr[param->vtype];
stmt.o1.u1 = ir_value_code_addr(param);
if (param->vtype == TYPE_FIELD)
stmt.opcode = field_store_instr[param->fieldtype];
+ else if (param->vtype == TYPE_NIL)
+ stmt.opcode = INSTR_STORE_V;
else
stmt.opcode = type_store_instr[param->vtype];
stmt.o1.u1 = ir_value_code_addr(param);
prog_section_function *def;
ir_function *irfun;
size_t i;
- uint32_t firstlocal;
+ uint32_t firstlocal, firstglobal;
irfun = global->constval.vfunc;
def = code_functions + irfun->code_function_def;
++opts_optimizationcount[OPTIM_OVERLAP_LOCALS];
}
+ firstglobal = (OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS) ? ir->first_common_globaltemp : firstlocal);
+
for (i = vec_size(code_globals); i < firstlocal + irfun->allocated_locals; ++i)
vec_push(code_globals, 0);
for (i = 0; i < vec_size(irfun->locals); ++i) {
- ir_value_code_setaddr(irfun->locals[i], firstlocal + irfun->locals[i]->code.local);
- if (!ir_builder_gen_global(ir, irfun->locals[i], true)) {
- irerror(irfun->locals[i]->context, "failed to generate local %s", irfun->locals[i]->name);
- return false;
+ ir_value *v = irfun->locals[i];
+ if (v->locked || !OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS)) {
+ ir_value_code_setaddr(v, firstlocal + v->code.local);
+ if (!ir_builder_gen_global(ir, irfun->locals[i], true)) {
+ irerror(irfun->locals[i]->context, "failed to generate local %s", irfun->locals[i]->name);
+ return false;
+ }
}
+ else
+ ir_value_code_setaddr(v, firstglobal + v->code.local);
}
for (i = 0; i < vec_size(irfun->values); ++i)
{
ir_value *v = irfun->values[i];
if (v->callparam)
continue;
- ir_value_code_setaddr(v, firstlocal + v->code.local);
+ if (v->locked)
+ ir_value_code_setaddr(v, firstlocal + v->code.local);
+ else
+ ir_value_code_setaddr(v, firstglobal + v->code.local);
}
return true;
}
{
self->max_locals = func->allocated_locals;
}
+ if (func && self->max_globaltemps < func->globaltemps)
+ self->max_globaltemps = func->globaltemps;
}
}
}
}
+ /* generate nil */
+ ir_value_code_setaddr(self->nil, vec_size(code_globals));
+ vec_push(code_globals, 0);
+ vec_push(code_globals, 0);
+ vec_push(code_globals, 0);
+
+ /* generate global temps */
+ self->first_common_globaltemp = vec_size(code_globals);
+ for (i = 0; i < self->max_globaltemps; ++i) {
+ vec_push(code_globals, 0);
+ }
/* generate common locals */
self->first_common_local = vec_size(code_globals);
for (i = 0; i < self->max_locals; ++i) {
oprintf("endmodule %s\n", b->name);
}
+static const char *storenames[] = {
+ "[global]", "[local]", "[param]", "[value]", "[return]"
+};
+
void ir_function_dump(ir_function *f, char *ind,
int (*oprintf)(const char*, ...))
{
}
oprintf("%sliferanges:\n", ind);
for (i = 0; i < vec_size(f->locals); ++i) {
+ const char *attr = "";
size_t l, m;
ir_value *v = f->locals[i];
- oprintf("%s\t%s: %s@%i ", ind, v->name, (v->unique_life ? "unique " : ""), (int)v->code.local);
+ if (v->unique_life && v->locked)
+ attr = "unique,locked ";
+ else if (v->unique_life)
+ attr = "unique ";
+ else if (v->locked)
+ attr = "locked ";
+ oprintf("%s\t%s: %s %s %s%s@%i ", ind, v->name, type_name[v->vtype],
+ storenames[v->store],
+ attr, (v->callparam ? "callparam " : ""),
+ (int)v->code.local);
+ if (!v->life)
+ oprintf("[null]");
for (l = 0; l < vec_size(v->life); ++l) {
oprintf("[%i,%i] ", v->life[l].start, v->life[l].end);
}
ir_value *vm = v->members[m];
if (!vm)
continue;
- oprintf("%s\t%s: %s@%i ", ind, vm->name, (vm->unique_life ? "unique " : ""), (int)vm->code.local);
+ oprintf("%s\t%s: @%i ", ind, vm->name, (int)vm->code.local);
for (l = 0; l < vec_size(vm->life); ++l) {
oprintf("[%i,%i] ", vm->life[l].start, vm->life[l].end);
}
}
}
for (i = 0; i < vec_size(f->values); ++i) {
- size_t l;
+ const char *attr = "";
+ size_t l, m;
ir_value *v = f->values[i];
- oprintf("%s\t%s: @%i ", ind, v->name, (int)v->code.local);
+ if (v->unique_life && v->locked)
+ attr = "unique,locked ";
+ else if (v->unique_life)
+ attr = "unique ";
+ else if (v->locked)
+ attr = "locked ";
+ oprintf("%s\t%s: %s %s %s%s@%i ", ind, v->name, type_name[v->vtype],
+ storenames[v->store],
+ attr, (v->callparam ? "callparam " : ""),
+ (int)v->code.local);
+ if (!v->life)
+ oprintf("[null]");
for (l = 0; l < vec_size(v->life); ++l) {
oprintf("[%i,%i] ", v->life[l].start, v->life[l].end);
}
oprintf("\n");
+ for (m = 0; m < 3; ++m) {
+ ir_value *vm = v->members[m];
+ if (!vm)
+ continue;
+ if (vm->unique_life && vm->locked)
+ attr = "unique,locked ";
+ else if (vm->unique_life)
+ attr = "unique ";
+ else if (vm->locked)
+ attr = "locked ";
+ oprintf("%s\t%s: %s@%i ", ind, vm->name, attr, (int)vm->code.local);
+ for (l = 0; l < vec_size(vm->life); ++l) {
+ oprintf("[%i,%i] ", vm->life[l].start, vm->life[l].end);
+ }
+ oprintf("\n");
+ }
}
if (vec_size(f->blocks))
{
oprintf("%s:%s\n", ind, b->label);
strncat(ind, "\t", IND_BUFSZ);
+ if (b->instr && b->instr[0])
+ oprintf("%s (%i) [entry]\n", ind, (int)(b->instr[0]->eid-1));
for (i = 0; i < vec_size(b->instr); ++i)
ir_instr_dump(b->instr[i], ind, oprintf);
ind[strlen(ind)-1] = 0;