"variant",
"struct",
"union",
- "array"
+ "array",
+
+ "nil"
};
-size_t type_sizeof[TYPE_COUNT] = {
+size_t type_sizeof_[TYPE_COUNT] = {
1, /* TYPE_VOID */
1, /* TYPE_STRING */
1, /* TYPE_FLOAT */
0, /* TYPE_STRUCT */
0, /* TYPE_UNION */
0, /* TYPE_ARRAY */
+ 0, /* TYPE_NIL */
};
uint16_t type_store_instr[TYPE_COUNT] = {
AINSTR_END, /* struct */
AINSTR_END, /* union */
AINSTR_END, /* array */
+ AINSTR_END, /* nil */
};
uint16_t field_store_instr[TYPE_COUNT] = {
AINSTR_END, /* struct */
AINSTR_END, /* union */
AINSTR_END, /* array */
+ AINSTR_END, /* nil */
};
uint16_t type_storep_instr[TYPE_COUNT] = {
AINSTR_END, /* struct */
AINSTR_END, /* union */
AINSTR_END, /* array */
+ AINSTR_END, /* nil */
};
uint16_t type_eq_instr[TYPE_COUNT] = {
AINSTR_END, /* struct */
AINSTR_END, /* union */
AINSTR_END, /* array */
+ AINSTR_END, /* nil */
};
uint16_t type_ne_instr[TYPE_COUNT] = {
AINSTR_END, /* struct */
AINSTR_END, /* union */
AINSTR_END, /* array */
+ AINSTR_END, /* nil */
};
uint16_t type_not_instr[TYPE_COUNT] = {
AINSTR_END, /* struct */
AINSTR_END, /* union */
AINSTR_END, /* array */
+ AINSTR_END, /* nil */
};
+/* protos */
+static ir_value* ir_gen_extparam_proto(ir_builder *ir);
+static void ir_gen_extparam (ir_builder *ir);
+
+/* error functions */
+
static void irerror(lex_ctx ctx, const char *msg, ...)
{
va_list ap;
static bool irwarning(lex_ctx ctx, int warntype, const char *fmt, ...)
{
- va_list ap;
- int lvl = LVL_WARNING;
-
- if (warntype && !OPTS_WARN(warntype))
- return false;
-
- if (opts_werror)
- lvl = LVL_ERROR;
-
- va_start(ap, fmt);
- con_vprintmsg(lvl, ctx.file, ctx.line, "warning", fmt, ap);
- va_end(ap);
-
- return opts_werror;
+ bool r;
+ va_list ap;
+ va_start(ap, fmt);
+ r = vcompile_warning(ctx, warntype, fmt, ap);
+ va_end(ap);
+ return r;
}
/***********************************************************************
* Vector utility functions
*/
-bool GMQCC_WARN vec_ir_value_find(ir_value **vec, ir_value *what, size_t *idx)
+bool GMQCC_WARN vec_ir_value_find(ir_value **vec, const ir_value *what, size_t *idx)
{
size_t i;
size_t len = vec_size(vec);
self->functions = NULL;
self->globals = NULL;
self->fields = NULL;
- self->extparams = NULL;
self->filenames = NULL;
self->filestrings = NULL;
+ self->htglobals = util_htnew(IR_HT_SIZE);
+ self->htfields = util_htnew(IR_HT_SIZE);
+ self->htfunctions = util_htnew(IR_HT_SIZE);
+
+ self->extparams = NULL;
+ self->extparam_protos = NULL;
+
+ self->first_common_globaltemp = 0;
+ self->max_globaltemps = 0;
+ self->first_common_local = 0;
+ self->max_locals = 0;
self->str_immediate = 0;
self->name = NULL;
return NULL;
}
+ self->nil = ir_value_var("nil", store_value, TYPE_NIL);
+ self->nil->cvq = CV_CONST;
+
return self;
}
void ir_builder_delete(ir_builder* self)
{
size_t i;
+ util_htdel(self->htglobals);
+ util_htdel(self->htfields);
+ util_htdel(self->htfunctions);
mem_d((void*)self->name);
for (i = 0; i != vec_size(self->functions); ++i) {
ir_function_delete_quick(self->functions[i]);
for (i = 0; i != vec_size(self->fields); ++i) {
ir_value_delete(self->fields[i]);
}
+ ir_value_delete(self->nil);
vec_free(self->fields);
vec_free(self->filenames);
vec_free(self->filestrings);
ir_function* ir_builder_get_function(ir_builder *self, const char *name)
{
- size_t i;
- for (i = 0; i < vec_size(self->functions); ++i) {
- if (!strcmp(name, self->functions[i]->name))
- return self->functions[i];
- }
- return NULL;
+ return (ir_function*)util_htget(self->htfunctions, name);
}
ir_function* ir_builder_create_function(ir_builder *self, const char *name, int outtype)
return NULL;
}
vec_push(self->functions, fn);
+ util_htset(self->htfunctions, name, fn);
fn->value = ir_builder_create_global(self, fn->name, TYPE_FUNCTION);
if (!fn->value) {
return NULL;
}
- fn->value->isconst = true;
+ fn->value->hasvalue = true;
fn->value->outtype = outtype;
fn->value->constval.vfunc = fn;
fn->value->context = fn->context;
ir_value* ir_builder_get_global(ir_builder *self, const char *name)
{
- size_t i;
- for (i = 0; i < vec_size(self->globals); ++i) {
- if (!strcmp(self->globals[i]->name, name))
- return self->globals[i];
- }
- return NULL;
+ return (ir_value*)util_htget(self->htglobals, name);
}
ir_value* ir_builder_create_global(ir_builder *self, const char *name, int vtype)
ve = ir_value_var(name, store_global, vtype);
vec_push(self->globals, ve);
+ util_htset(self->htglobals, name, ve);
return ve;
}
ir_value* ir_builder_get_field(ir_builder *self, const char *name)
{
- size_t i;
- for (i = 0; i < vec_size(self->fields); ++i) {
- if (!strcmp(self->fields[i]->name, name))
- return self->fields[i];
- }
- return NULL;
+ return (ir_value*)util_htget(self->htfields, name);
}
ve = ir_value_var(name, store_global, TYPE_FIELD);
ve->fieldtype = vtype;
vec_push(self->fields, ve);
+ util_htset(self->htfields, name, ve);
return ve;
}
mem_d(self);
return NULL;
}
+ self->flags = 0;
+
self->owner = owner;
self->context.file = "<@no context>";
self->context.line = 0;
self->code_function_def = -1;
self->allocated_locals = 0;
+ self->globaltemps = 0;
self->run_id = 0;
return self;
vec_push(self->values, v);
}
-ir_block* ir_function_create_block(ir_function *self, const char *label)
+ir_block* ir_function_create_block(lex_ctx ctx, ir_function *self, const char *label)
{
ir_block* bn = ir_block_new(self, label);
- memcpy(&bn->context, &self->context, sizeof(self->context));
+ bn->context = ctx;
vec_push(self->blocks, bn);
return bn;
}
+static bool instr_is_operation(uint16_t op)
+{
+ return ( (op >= INSTR_MUL_F && op <= INSTR_GT) ||
+ (op >= INSTR_LOAD_F && op <= INSTR_LOAD_FNC) ||
+ (op == INSTR_ADDRESS) ||
+ (op >= INSTR_NOT_F && op <= INSTR_NOT_FNC) ||
+ (op >= INSTR_AND && op <= INSTR_BITOR) ||
+ (op >= INSTR_CALL0 && op <= INSTR_CALL8) );
+}
+
+bool ir_function_pass_peephole(ir_function *self)
+{
+ size_t b;
+
+ for (b = 0; b < vec_size(self->blocks); ++b) {
+ size_t i;
+ ir_block *block = self->blocks[b];
+
+ for (i = 0; i < vec_size(block->instr); ++i) {
+ ir_instr *inst;
+ inst = block->instr[i];
+
+ if (i >= 1 &&
+ (inst->opcode >= INSTR_STORE_F &&
+ inst->opcode <= INSTR_STORE_FNC))
+ {
+ ir_instr *store;
+ ir_instr *oper;
+ ir_value *value;
+
+ store = inst;
+
+ oper = block->instr[i-1];
+ if (!instr_is_operation(oper->opcode))
+ continue;
+
+ value = oper->_ops[0];
+
+ /* only do it for SSA values */
+ if (value->store != store_value)
+ continue;
+
+ /* don't optimize out the temp if it's used later again */
+ if (vec_size(value->reads) != 1)
+ continue;
+
+ /* The very next store must use this value */
+ if (value->reads[0] != store)
+ continue;
+
+ /* And of course the store must _read_ from it, so it's in
+ * OP 1 */
+ if (store->_ops[1] != value)
+ continue;
+
+ ++opts_optimizationcount[OPTIM_PEEPHOLE];
+ (void)!ir_instr_op(oper, 0, store->_ops[0], true);
+
+ vec_remove(block->instr, i, 1);
+ ir_instr_delete(store);
+ }
+ else if (inst->opcode == VINSTR_COND)
+ {
+ /* COND on a value resulting from a NOT could
+ * remove the NOT and swap its operands
+ */
+ while (true) {
+ ir_block *tmp;
+ size_t inotid;
+ ir_instr *inot;
+ ir_value *value;
+ value = inst->_ops[0];
+
+ if (value->store != store_value ||
+ vec_size(value->reads) != 1 ||
+ value->reads[0] != inst)
+ {
+ break;
+ }
+
+ inot = value->writes[0];
+ if (inot->_ops[0] != value ||
+ inot->opcode < INSTR_NOT_F ||
+ inot->opcode > INSTR_NOT_FNC ||
+ inot->opcode == INSTR_NOT_V || /* can't do these */
+ inot->opcode == INSTR_NOT_S)
+ {
+ break;
+ }
+
+ /* count */
+ ++opts_optimizationcount[OPTIM_PEEPHOLE];
+ /* change operand */
+ (void)!ir_instr_op(inst, 0, inot->_ops[1], false);
+ /* remove NOT */
+ tmp = inot->owner;
+ for (inotid = 0; inotid < vec_size(tmp->instr); ++inotid) {
+ if (tmp->instr[inotid] == inot)
+ break;
+ }
+ if (inotid >= vec_size(tmp->instr)) {
+ compile_error(inst->context, "sanity-check failed: failed to find instruction to optimize out");
+ return false;
+ }
+ vec_remove(tmp->instr, inotid, 1);
+ ir_instr_delete(inot);
+ /* swap ontrue/onfalse */
+ tmp = inst->bops[0];
+ inst->bops[0] = inst->bops[1];
+ inst->bops[1] = tmp;
+ }
+ continue;
+ }
+ }
+ }
+
+ return true;
+}
+
+bool ir_function_pass_tailrecursion(ir_function *self)
+{
+ size_t b, p;
+
+ for (b = 0; b < vec_size(self->blocks); ++b) {
+ ir_value *funcval;
+ ir_instr *ret, *call, *store = NULL;
+ ir_block *block = self->blocks[b];
+
+ if (!block->final || vec_size(block->instr) < 2)
+ continue;
+
+ ret = block->instr[vec_size(block->instr)-1];
+ if (ret->opcode != INSTR_DONE && ret->opcode != INSTR_RETURN)
+ continue;
+
+ call = block->instr[vec_size(block->instr)-2];
+ if (call->opcode >= INSTR_STORE_F && call->opcode <= INSTR_STORE_FNC) {
+ /* account for the unoptimized
+ * CALL
+ * STORE %return, %tmp
+ * RETURN %tmp
+ * version
+ */
+ if (vec_size(block->instr) < 3)
+ continue;
+
+ store = call;
+ call = block->instr[vec_size(block->instr)-3];
+ }
+
+ if (call->opcode < INSTR_CALL0 || call->opcode > INSTR_CALL8)
+ continue;
+
+ if (store) {
+ /* optimize out the STORE */
+ if (ret->_ops[0] &&
+ ret->_ops[0] == store->_ops[0] &&
+ store->_ops[1] == call->_ops[0])
+ {
+ ++opts_optimizationcount[OPTIM_PEEPHOLE];
+ call->_ops[0] = store->_ops[0];
+ vec_remove(block->instr, vec_size(block->instr) - 2, 1);
+ ir_instr_delete(store);
+ }
+ else
+ continue;
+ }
+
+ if (!call->_ops[0])
+ continue;
+
+ funcval = call->_ops[1];
+ if (!funcval)
+ continue;
+ if (funcval->vtype != TYPE_FUNCTION || funcval->constval.vfunc != self)
+ continue;
+
+ /* now we have a CALL and a RET, check if it's a tailcall */
+ if (ret->_ops[0] && call->_ops[0] != ret->_ops[0])
+ continue;
+
+ ++opts_optimizationcount[OPTIM_TAIL_RECURSION];
+ vec_shrinkby(block->instr, 2);
+
+ block->final = false; /* open it back up */
+
+ /* emite parameter-stores */
+ for (p = 0; p < vec_size(call->params); ++p) {
+ /* assert(call->params_count <= self->locals_count); */
+ if (!ir_block_create_store(block, call->context, self->locals[p], call->params[p])) {
+ irerror(call->context, "failed to create tailcall store instruction for parameter %i", (int)p);
+ return false;
+ }
+ }
+ if (!ir_block_create_jump(block, call->context, self->blocks[0])) {
+ irerror(call->context, "failed to create tailcall jump");
+ return false;
+ }
+
+ ir_instr_delete(call);
+ ir_instr_delete(ret);
+ }
+
+ return true;
+}
+
bool ir_function_finalize(ir_function *self)
{
+ size_t i;
+
if (self->builtin)
return true;
+ if (OPTS_OPTIMIZATION(OPTIM_PEEPHOLE)) {
+ if (!ir_function_pass_peephole(self)) {
+ irerror(self->context, "generic optimization pass broke something in `%s`", self->name);
+ return false;
+ }
+ }
+
+ if (OPTS_OPTIMIZATION(OPTIM_TAIL_RECURSION)) {
+ if (!ir_function_pass_tailrecursion(self)) {
+ irerror(self->context, "tail-recursion optimization pass broke something in `%s`", self->name);
+ return false;
+ }
+ }
+
if (!ir_function_naive_phi(self))
return false;
+ for (i = 0; i < vec_size(self->locals); ++i) {
+ ir_value *v = self->locals[i];
+ if (v->vtype == TYPE_VECTOR ||
+ (v->vtype == TYPE_FIELD && v->outtype == TYPE_VECTOR))
+ {
+ ir_value_vector_member(v, 0);
+ ir_value_vector_member(v, 1);
+ ir_value_vector_member(v, 2);
+ }
+ }
+ for (i = 0; i < vec_size(self->values); ++i) {
+ ir_value *v = self->values[i];
+ if (v->vtype == TYPE_VECTOR ||
+ (v->vtype == TYPE_FIELD && v->outtype == TYPE_VECTOR))
+ {
+ ir_value_vector_member(v, 0);
+ ir_value_vector_member(v, 1);
+ ir_value_vector_member(v, 2);
+ }
+ }
+
ir_function_enumerate(self);
if (!ir_function_calculate_liferanges(self))
return true;
}
-ir_value* ir_function_get_local(ir_function *self, const char *name)
-{
- size_t i;
- for (i = 0; i < vec_size(self->locals); ++i) {
- if (!strcmp(self->locals[i]->name, name))
- return self->locals[i];
- }
- return NULL;
-}
-
ir_value* ir_function_create_local(ir_function *self, const char *name, int vtype, bool param)
{
ir_value *ve;
- /*
- if (ir_function_get_local(self, name))
- return NULL;
- */
-
if (param &&
vec_size(self->locals) &&
self->locals[vec_size(self->locals)-1]->store != store_param) {
}
ve = ir_value_var(name, (param ? store_param : store_local), vtype);
+ if (param)
+ ve->locked = true;
vec_push(self->locals, ve);
return ve;
}
*IR Instructions
*/
-ir_instr* ir_instr_new(ir_block* owner, int op)
+ir_instr* ir_instr_new(lex_ctx ctx, ir_block* owner, int op)
{
ir_instr *self;
self = (ir_instr*)mem_a(sizeof(*self));
return NULL;
self->owner = owner;
- self->context.file = "<@no context>";
- self->context.line = 0;
+ self->context = ctx;
self->opcode = op;
self->_ops[0] = NULL;
self->_ops[1] = NULL;
self->reads = NULL;
self->writes = NULL;
- self->isconst = false;
+ self->cvq = CV_NONE;
+ self->hasvalue = false;
self->context.file = "<@no context>";
self->context.line = 0;
self->name = NULL;
self->members[2] = NULL;
self->memberof = NULL;
+ self->unique_life = false;
+ self->locked = false;
+ self->callparam = false;
+
self->life = NULL;
return self;
}
ir_value* ir_value_vector_member(ir_value *self, unsigned int member)
{
+ char *name;
+ size_t len;
ir_value *m;
if (member >= 3)
return NULL;
if (self->members[member])
return self->members[member];
+ if (self->name) {
+ len = strlen(self->name);
+ name = (char*)mem_a(len + 3);
+ memcpy(name, self->name, len);
+ name[len+0] = '_';
+ name[len+1] = 'x' + member;
+ name[len+2] = '\0';
+ }
+ else
+ name = NULL;
+
if (self->vtype == TYPE_VECTOR)
{
- m = ir_value_var(self->name, self->store, TYPE_FLOAT);
+ m = ir_value_var(name, self->store, TYPE_FLOAT);
+ if (name)
+ mem_d(name);
if (!m)
return NULL;
m->context = self->context;
{
if (self->fieldtype != TYPE_VECTOR)
return NULL;
- m = ir_value_var(self->name, self->store, TYPE_FIELD);
+ m = ir_value_var(name, self->store, TYPE_FIELD);
+ if (name)
+ mem_d(name);
if (!m)
return NULL;
m->fieldtype = TYPE_FLOAT;
return m;
}
+static GMQCC_INLINE size_t ir_value_sizeof(const ir_value *self)
+{
+ if (self->vtype == TYPE_FIELD && self->fieldtype == TYPE_VECTOR)
+ return type_sizeof_[TYPE_VECTOR];
+ return type_sizeof_[self->vtype];
+}
+
ir_value* ir_value_out(ir_function *owner, const char *name, int storetype, int vtype)
{
ir_value *v = ir_value_var(name, storetype, vtype);
size_t i;
if (self->name)
mem_d((void*)self->name);
- if (self->isconst)
+ if (self->hasvalue)
{
if (self->vtype == TYPE_STRING)
mem_d((void*)self->constval.vstring);
if (self->vtype != TYPE_FLOAT)
return false;
self->constval.vfloat = f;
- self->isconst = true;
+ self->hasvalue = true;
return true;
}
if (self->vtype != TYPE_FUNCTION)
return false;
self->constval.vint = f;
- self->isconst = true;
+ self->hasvalue = true;
return true;
}
if (self->vtype != TYPE_VECTOR)
return false;
self->constval.vvec = v;
- self->isconst = true;
+ self->hasvalue = true;
return true;
}
if (self->vtype != TYPE_FIELD)
return false;
self->constval.vpointer = fld;
- self->isconst = true;
+ self->hasvalue = true;
return true;
}
{
if (str && !*str) {
/* actually dup empty strings */
- char *out = mem_a(1);
+ char *out = (char*)mem_a(1);
*out = 0;
return out;
}
if (self->vtype != TYPE_STRING)
return false;
self->constval.vstring = ir_strdup(str);
- self->isconst = true;
+ self->hasvalue = true;
return true;
}
if (self->vtype != TYPE_INTEGER)
return false;
self->constval.vint = i;
- self->isconst = true;
+ self->hasvalue = true;
return true;
}
#endif
*IR main operations
*/
-bool ir_block_create_store_op(ir_block *self, int op, ir_value *target, ir_value *what)
+static bool ir_check_unreachable(ir_block *self)
+{
+ /* The IR should never have to deal with unreachable code */
+ if (!self->final/* || OPTS_FLAG(ALLOW_UNREACHABLE_CODE)*/)
+ return true;
+ irerror(self->context, "unreachable statement (%s)", self->label);
+ return false;
+}
+
+bool ir_block_create_store_op(ir_block *self, lex_ctx ctx, int op, ir_value *target, ir_value *what)
{
ir_instr *in;
- if (self->final) {
- irerror(self->context, "unreachable statement (%s)", self->label);
- return false;
- }
- in = ir_instr_new(self, op);
- if (!in)
+ if (!ir_check_unreachable(self))
return false;
if (target->store == store_value &&
return false;
}
- if (!ir_instr_op(in, 0, target, true) ||
+ in = ir_instr_new(ctx, self, op);
+ if (!in)
+ return false;
+
+ if (!ir_instr_op(in, 0, target, (op < INSTR_STOREP_F || op > INSTR_STOREP_FNC)) ||
!ir_instr_op(in, 1, what, false))
{
+ ir_instr_delete(in);
return false;
}
vec_push(self->instr, in);
return true;
}
-bool ir_block_create_store(ir_block *self, ir_value *target, ir_value *what)
+bool ir_block_create_store(ir_block *self, lex_ctx ctx, ir_value *target, ir_value *what)
{
int op = 0;
int vtype;
op = INSTR_STORE_V;
}
- return ir_block_create_store_op(self, op, target, what);
+ return ir_block_create_store_op(self, ctx, op, target, what);
}
-bool ir_block_create_storep(ir_block *self, ir_value *target, ir_value *what)
+bool ir_block_create_storep(ir_block *self, lex_ctx ctx, ir_value *target, ir_value *what)
{
int op = 0;
int vtype;
op = INSTR_STOREP_V;
}
- return ir_block_create_store_op(self, op, target, what);
+ return ir_block_create_store_op(self, ctx, op, target, what);
}
-bool ir_block_create_return(ir_block *self, ir_value *v)
+bool ir_block_create_return(ir_block *self, lex_ctx ctx, ir_value *v)
{
ir_instr *in;
- if (self->final) {
- irerror(self->context, "unreachable statement (%s)", self->label);
+ if (!ir_check_unreachable(self))
return false;
- }
self->final = true;
self->is_return = true;
- in = ir_instr_new(self, INSTR_RETURN);
+ in = ir_instr_new(ctx, self, INSTR_RETURN);
if (!in)
return false;
- if (v && !ir_instr_op(in, 0, v, false))
+ if (v && !ir_instr_op(in, 0, v, false)) {
+ ir_instr_delete(in);
return false;
+ }
vec_push(self->instr, in);
return true;
}
-bool ir_block_create_if(ir_block *self, ir_value *v,
+bool ir_block_create_if(ir_block *self, lex_ctx ctx, ir_value *v,
ir_block *ontrue, ir_block *onfalse)
{
ir_instr *in;
- if (self->final) {
- irerror(self->context, "unreachable statement (%s)", self->label);
+ if (!ir_check_unreachable(self))
return false;
- }
self->final = true;
- /*in = ir_instr_new(self, (v->vtype == TYPE_STRING ? INSTR_IF_S : INSTR_IF_F));*/
- in = ir_instr_new(self, VINSTR_COND);
+ /*in = ir_instr_new(ctx, self, (v->vtype == TYPE_STRING ? INSTR_IF_S : INSTR_IF_F));*/
+ in = ir_instr_new(ctx, self, VINSTR_COND);
if (!in)
return false;
return true;
}
-bool ir_block_create_jump(ir_block *self, ir_block *to)
+bool ir_block_create_jump(ir_block *self, lex_ctx ctx, ir_block *to)
{
ir_instr *in;
- if (self->final) {
- irerror(self->context, "unreachable statement (%s)", self->label);
+ if (!ir_check_unreachable(self))
return false;
- }
self->final = true;
- in = ir_instr_new(self, VINSTR_JUMP);
+ in = ir_instr_new(ctx, self, VINSTR_JUMP);
if (!in)
return false;
return true;
}
-bool ir_block_create_goto(ir_block *self, ir_block *to)
+bool ir_block_create_goto(ir_block *self, lex_ctx ctx, ir_block *to)
{
- ir_instr *in;
- if (self->final) {
- irerror(self->context, "unreachable statement (%s)", self->label);
- return false;
- }
- self->final = true;
- in = ir_instr_new(self, INSTR_GOTO);
- if (!in)
- return false;
-
- in->bops[0] = to;
- vec_push(self->instr, in);
-
- vec_push(self->exits, to);
- vec_push(to->entries, self);
- return true;
+ self->owner->flags |= IR_FLAG_HAS_GOTO;
+ return ir_block_create_jump(self, ctx, to);
}
-ir_instr* ir_block_create_phi(ir_block *self, const char *label, int ot)
+ir_instr* ir_block_create_phi(ir_block *self, lex_ctx ctx, const char *label, int ot)
{
ir_value *out;
ir_instr *in;
- in = ir_instr_new(self, VINSTR_PHI);
+ if (!ir_check_unreachable(self))
+ return NULL;
+ in = ir_instr_new(ctx, self, VINSTR_PHI);
if (!in)
return NULL;
out = ir_value_out(self->owner, label, store_value, ot);
}
/* call related code */
-ir_instr* ir_block_create_call(ir_block *self, const char *label, ir_value *func)
+ir_instr* ir_block_create_call(ir_block *self, lex_ctx ctx, const char *label, ir_value *func, bool noreturn)
{
ir_value *out;
ir_instr *in;
- in = ir_instr_new(self, INSTR_CALL0);
+ if (!ir_check_unreachable(self))
+ return NULL;
+ in = ir_instr_new(ctx, self, (noreturn ? VINSTR_NRCALL : INSTR_CALL0));
if (!in)
return NULL;
+ if (noreturn) {
+ self->final = true;
+ self->is_return = true;
+ }
out = ir_value_out(self->owner, label, (func->outtype == TYPE_VOID) ? store_return : store_value, func->outtype);
if (!out) {
ir_instr_delete(in);
return NULL;
}
vec_push(self->instr, in);
+ /*
+ if (noreturn) {
+ if (!ir_block_create_return(self, ctx, NULL)) {
+ compile_error(ctx, "internal error: failed to generate dummy-return instruction");
+ ir_instr_delete(in);
+ return NULL;
+ }
+ }
+ */
return in;
}
/* binary op related code */
-ir_value* ir_block_create_binop(ir_block *self,
+ir_value* ir_block_create_binop(ir_block *self, lex_ctx ctx,
const char *label, int opcode,
ir_value *left, ir_value *right)
{
return NULL;
}
- return ir_block_create_general_instr(self, label, opcode, left, right, ot);
+ return ir_block_create_general_instr(self, ctx, label, opcode, left, right, ot);
}
-ir_value* ir_block_create_unary(ir_block *self,
+ir_value* ir_block_create_unary(ir_block *self, lex_ctx ctx,
const char *label, int opcode,
ir_value *operand)
{
}
/* let's use the general instruction creator and pass NULL for OPB */
- return ir_block_create_general_instr(self, label, opcode, operand, NULL, ot);
+ return ir_block_create_general_instr(self, ctx, label, opcode, operand, NULL, ot);
}
-ir_value* ir_block_create_general_instr(ir_block *self, const char *label,
+ir_value* ir_block_create_general_instr(ir_block *self, lex_ctx ctx, const char *label,
int op, ir_value *a, ir_value *b, int outype)
{
ir_instr *instr;
if (!out)
return NULL;
- instr = ir_instr_new(self, op);
+ instr = ir_instr_new(ctx, self, op);
if (!instr) {
ir_value_delete(out);
return NULL;
return NULL;
}
-ir_value* ir_block_create_fieldaddress(ir_block *self, const char *label, ir_value *ent, ir_value *field)
+ir_value* ir_block_create_fieldaddress(ir_block *self, lex_ctx ctx, const char *label, ir_value *ent, ir_value *field)
{
ir_value *v;
if (field->vtype != TYPE_FIELD)
return NULL;
- v = ir_block_create_general_instr(self, label, INSTR_ADDRESS, ent, field, TYPE_POINTER);
+ v = ir_block_create_general_instr(self, ctx, label, INSTR_ADDRESS, ent, field, TYPE_POINTER);
v->fieldtype = field->fieldtype;
return v;
}
-ir_value* ir_block_create_load_from_ent(ir_block *self, const char *label, ir_value *ent, ir_value *field, int outype)
+ir_value* ir_block_create_load_from_ent(ir_block *self, lex_ctx ctx, const char *label, ir_value *ent, ir_value *field, int outype)
{
int op;
if (ent->vtype != TYPE_ENTITY)
return NULL;
}
- return ir_block_create_general_instr(self, label, op, ent, field, outype);
+ return ir_block_create_general_instr(self, ctx, label, op, ent, field, outype);
}
-ir_value* ir_block_create_add(ir_block *self,
- const char *label,
- ir_value *left, ir_value *right)
-{
- int op = 0;
- int l = left->vtype;
- int r = right->vtype;
- if (l == r) {
- switch (l) {
- default:
- irerror(self->context, "invalid type for ir_block_create_add: %s", type_name[l]);
- return NULL;
- case TYPE_FLOAT:
- op = INSTR_ADD_F;
- break;
-#if 0
- case TYPE_INTEGER:
- op = INSTR_ADD_I;
- break;
-#endif
- case TYPE_VECTOR:
- op = INSTR_ADD_V;
- break;
- }
- } else {
-#if 0
- if ( (l == TYPE_FLOAT && r == TYPE_INTEGER) )
- op = INSTR_ADD_FI;
- else if ( (l == TYPE_INTEGER && r == TYPE_FLOAT) )
- op = INSTR_ADD_IF;
- else
-#endif
- {
- irerror(self->context, "invalid type for ir_block_create_add: %s", type_name[l]);
- return NULL;
- }
- }
- return ir_block_create_binop(self, label, op, left, right);
-}
+/* PHI resolving breaks the SSA, and must thus be the last
+ * step before life-range calculation.
+ */
-ir_value* ir_block_create_sub(ir_block *self,
- const char *label,
- ir_value *left, ir_value *right)
+static bool ir_block_naive_phi(ir_block *self);
+bool ir_function_naive_phi(ir_function *self)
{
- int op = 0;
- int l = left->vtype;
- int r = right->vtype;
- if (l == r) {
+ size_t i;
- switch (l) {
- default:
- irerror(self->context, "invalid type for ir_block_create_sub: %s", type_name[l]);
- return NULL;
- case TYPE_FLOAT:
- op = INSTR_SUB_F;
- break;
-#if 0
- case TYPE_INTEGER:
- op = INSTR_SUB_I;
- break;
-#endif
- case TYPE_VECTOR:
- op = INSTR_SUB_V;
- break;
- }
- } else {
-#if 0
- if ( (l == TYPE_FLOAT && r == TYPE_INTEGER) )
- op = INSTR_SUB_FI;
- else if ( (l == TYPE_INTEGER && r == TYPE_FLOAT) )
- op = INSTR_SUB_IF;
- else
-#endif
- {
- irerror(self->context, "invalid type for ir_block_create_sub: %s", type_name[l]);
- return NULL;
- }
+ for (i = 0; i < vec_size(self->blocks); ++i)
+ {
+ if (!ir_block_naive_phi(self->blocks[i]))
+ return false;
}
- return ir_block_create_binop(self, label, op, left, right);
+ return true;
}
-ir_value* ir_block_create_mul(ir_block *self,
- const char *label,
- ir_value *left, ir_value *right)
-{
- int op = 0;
- int l = left->vtype;
- int r = right->vtype;
- if (l == r) {
-
- switch (l) {
- default:
- irerror(self->context, "invalid type for ir_block_create_mul: %s", type_name[l]);
- return NULL;
- case TYPE_FLOAT:
- op = INSTR_MUL_F;
- break;
-#if 0
- case TYPE_INTEGER:
- op = INSTR_MUL_I;
- break;
-#endif
- case TYPE_VECTOR:
- op = INSTR_MUL_V;
- break;
- }
- } else {
- if ( (l == TYPE_VECTOR && r == TYPE_FLOAT) )
- op = INSTR_MUL_VF;
- else if ( (l == TYPE_FLOAT && r == TYPE_VECTOR) )
- op = INSTR_MUL_FV;
#if 0
- else if ( (l == TYPE_VECTOR && r == TYPE_INTEGER) )
- op = INSTR_MUL_VI;
- else if ( (l == TYPE_INTEGER && r == TYPE_VECTOR) )
- op = INSTR_MUL_IV;
- else if ( (l == TYPE_FLOAT && r == TYPE_INTEGER) )
- op = INSTR_MUL_FI;
- else if ( (l == TYPE_INTEGER && r == TYPE_FLOAT) )
- op = INSTR_MUL_IF;
-#endif
- else {
- irerror(self->context, "invalid type for ir_block_create_mul: %s", type_name[l]);
- return NULL;
- }
- }
- return ir_block_create_binop(self, label, op, left, right);
-}
-
-ir_value* ir_block_create_div(ir_block *self,
- const char *label,
- ir_value *left, ir_value *right)
+static bool ir_naive_phi_emit_store(ir_block *block, size_t iid, ir_value *old, ir_value *what)
{
- int op = 0;
- int l = left->vtype;
- int r = right->vtype;
- if (l == r) {
-
- switch (l) {
- default:
- irerror(self->context, "invalid type for ir_block_create_div: %s", type_name[l]);
- return NULL;
- case TYPE_FLOAT:
- op = INSTR_DIV_F;
- break;
-#if 0
- case TYPE_INTEGER:
- op = INSTR_DIV_I;
- break;
-#endif
- }
- } else {
-#if 0
- if ( (l == TYPE_VECTOR && r == TYPE_FLOAT) )
- op = INSTR_DIV_VF;
- else if ( (l == TYPE_FLOAT && r == TYPE_INTEGER) )
- op = INSTR_DIV_FI;
- else if ( (l == TYPE_INTEGER && r == TYPE_FLOAT) )
- op = INSTR_DIV_IF;
- else
-#endif
- {
- irerror(self->context, "invalid type for ir_block_create_div: %s", type_name[l]);
- return NULL;
- }
- }
- return ir_block_create_binop(self, label, op, left, right);
-}
-
-/* PHI resolving breaks the SSA, and must thus be the last
- * step before life-range calculation.
- */
-
-static bool ir_block_naive_phi(ir_block *self);
-bool ir_function_naive_phi(ir_function *self)
-{
- size_t i;
-
- for (i = 0; i < vec_size(self->blocks); ++i)
- {
- if (!ir_block_naive_phi(self->blocks[i]))
- return false;
- }
- return true;
-}
-
-#if 0
-static bool ir_naive_phi_emit_store(ir_block *block, size_t iid, ir_value *old, ir_value *what)
-{
- ir_instr *instr;
- size_t i;
+ ir_instr *instr;
+ size_t i;
/* create a store */
if (!ir_block_create_store(block, old, what))
vec_pop(b->instr);
b->final = false;
instr->_ops[0]->store = store_global;
- if (!ir_block_create_store(b, instr->_ops[0], v))
+ if (!ir_block_create_store(b, instr->context, instr->_ops[0], v))
return false;
instr->_ops[0]->store = store_value;
vec_push(b->instr, prevjump);
void ir_function_enumerate(ir_function *self)
{
size_t i;
- size_t instruction_id = 0;
+ size_t instruction_id = 1;
for (i = 0; i < vec_size(self->blocks); ++i)
{
self->blocks[i]->eid = i;
static bool ir_block_life_propagate(ir_block *b, ir_block *prev, bool *changed);
bool ir_function_calculate_liferanges(ir_function *self)
{
- size_t i;
+ size_t i, s;
bool changed;
+ /* parameters live at 0 */
+ for (i = 0; i < vec_size(self->params); ++i)
+ ir_value_life_merge(self->locals[i], 0);
+
do {
self->run_id++;
changed = false;
ir_block *block = self->blocks[0];
for (i = 0; i < vec_size(block->living); ++i) {
ir_value *v = block->living[i];
- if (v->memberof || v->store != store_local)
+ if (v->store != store_local)
+ continue;
+ if (v->vtype == TYPE_VECTOR)
continue;
+ self->flags |= IR_FLAG_HAS_UNINITIALIZED;
+ /* find the instruction reading from it */
+ for (s = 0; s < vec_size(v->reads); ++s) {
+ if (v->reads[s]->eid == v->life[0].end)
+ break;
+ }
+ if (s < vec_size(v->reads)) {
+ if (irwarning(v->context, WARN_USED_UNINITIALIZED,
+ "variable `%s` may be used uninitialized in this function\n"
+ " -> %s:%i",
+ v->name,
+ v->reads[s]->context.file, v->reads[s]->context.line)
+ )
+ {
+ return false;
+ }
+ continue;
+ }
+ if (v->memberof) {
+ ir_value *vec = v->memberof;
+ for (s = 0; s < vec_size(vec->reads); ++s) {
+ if (vec->reads[s]->eid == v->life[0].end)
+ break;
+ }
+ if (s < vec_size(vec->reads)) {
+ if (irwarning(v->context, WARN_USED_UNINITIALIZED,
+ "variable `%s` may be used uninitialized in this function\n"
+ " -> %s:%i",
+ v->name,
+ vec->reads[s]->context.file, vec->reads[s]->context.line)
+ )
+ {
+ return false;
+ }
+ continue;
+ }
+ }
if (irwarning(v->context, WARN_USED_UNINITIALIZED,
"variable `%s` may be used uninitialized in this function", v->name))
{
ir_value **locals;
size_t *sizes;
size_t *positions;
+ bool *unique;
} function_allocator;
-static bool function_allocator_alloc(function_allocator *alloc, const ir_value *var)
+static bool function_allocator_alloc(function_allocator *alloc, ir_value *var)
{
ir_value *slot;
- size_t vsize = type_sizeof[var->vtype];
+ size_t vsize = ir_value_sizeof(var);
+
+ var->code.local = vec_size(alloc->locals);
slot = ir_value_var("reg", store_global, var->vtype);
if (!slot)
vec_push(alloc->locals, slot);
vec_push(alloc->sizes, vsize);
+ vec_push(alloc->unique, var->unique_life);
return true;
return false;
}
+static bool ir_function_allocator_assign(ir_function *self, function_allocator *alloc, ir_value *v)
+{
+ size_t a;
+ ir_value *slot;
+
+ for (a = 0; a < vec_size(alloc->locals); ++a)
+ {
+ /* if it's reserved for a unique liferange: skip */
+ if (alloc->unique[a])
+ continue;
+
+ slot = alloc->locals[a];
+
+ /* never resize parameters
+ * will be required later when overlapping temps + locals
+ */
+ if (a < vec_size(self->params) &&
+ alloc->sizes[a] < ir_value_sizeof(v))
+ {
+ continue;
+ }
+
+ if (ir_values_overlap(v, slot))
+ continue;
+
+ if (!ir_value_life_merge_into(slot, v))
+ return false;
+
+ /* adjust size for this slot */
+ if (alloc->sizes[a] < ir_value_sizeof(v))
+ alloc->sizes[a] = ir_value_sizeof(v);
+
+ v->code.local = a;
+ return true;
+ }
+ if (a >= vec_size(alloc->locals)) {
+ if (!function_allocator_alloc(alloc, v))
+ return false;
+ }
+ return true;
+}
+
bool ir_function_allocate_locals(ir_function *self)
{
- size_t i, a;
+ size_t i;
bool retval = true;
size_t pos;
+ bool opt_gt = OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS);
- ir_value *slot;
- const ir_value *v;
+ ir_value *v;
- function_allocator alloc;
+ function_allocator lockalloc, globalloc;
if (!vec_size(self->locals) && !vec_size(self->values))
return true;
- alloc.locals = NULL;
- alloc.sizes = NULL;
- alloc.positions = NULL;
+ globalloc.locals = NULL;
+ globalloc.sizes = NULL;
+ globalloc.positions = NULL;
+ globalloc.unique = NULL;
+ lockalloc.locals = NULL;
+ lockalloc.sizes = NULL;
+ lockalloc.positions = NULL;
+ lockalloc.unique = NULL;
for (i = 0; i < vec_size(self->locals); ++i)
{
- if (!function_allocator_alloc(&alloc, self->locals[i]))
+ v = self->locals[i];
+ if (!OPTS_OPTIMIZATION(OPTIM_LOCAL_TEMPS)) {
+ v->locked = true;
+ v->unique_life = true;
+ }
+ else if (i >= vec_size(self->params))
+ break;
+ else
+ v->locked = true; /* lock parameters locals */
+ if (!function_allocator_alloc((v->locked || !opt_gt ? &lockalloc : &globalloc), self->locals[i]))
+ goto error;
+ }
+ for (; i < vec_size(self->locals); ++i)
+ {
+ v = self->locals[i];
+ if (!vec_size(v->life))
+ continue;
+ if (!ir_function_allocator_assign(self, (v->locked || !opt_gt ? &lockalloc : &globalloc), v))
goto error;
}
if (!vec_size(v->life))
continue;
- for (a = 0; a < vec_size(alloc.locals); ++a)
- {
- slot = alloc.locals[a];
+ /* CALL optimization:
+ * If the value is a parameter-temp: 1 write, 1 read from a CALL
+ * and it's not "locked", write it to the OFS_PARM directly.
+ */
+ if (OPTS_OPTIMIZATION(OPTIM_CALL_STORES) && !v->locked && !v->unique_life) {
+ if (vec_size(v->reads) == 1 && vec_size(v->writes) == 1 &&
+ (v->reads[0]->opcode == VINSTR_NRCALL ||
+ (v->reads[0]->opcode >= INSTR_CALL0 && v->reads[0]->opcode <= INSTR_CALL8)
+ )
+ )
+ {
+ size_t param;
+ ir_instr *call = v->reads[0];
+ if (!vec_ir_value_find(call->params, v, ¶m)) {
+ irerror(call->context, "internal error: unlocked parameter %s not found", v->name);
+ goto error;
+ }
- if (ir_values_overlap(v, slot))
+ ++opts_optimizationcount[OPTIM_CALL_STORES];
+ v->callparam = true;
+ if (param < 8)
+ ir_value_code_setaddr(v, OFS_PARM0 + 3*param);
+ else {
+ ir_value *ep;
+ param -= 8;
+ if (vec_size(self->owner->extparam_protos) <= param)
+ ep = ir_gen_extparam_proto(self->owner);
+ else
+ ep = self->owner->extparam_protos[param];
+ ir_instr_op(v->writes[0], 0, ep, true);
+ call->params[param+8] = ep;
+ }
continue;
-
- if (!ir_value_life_merge_into(slot, v))
- goto error;
-
- /* adjust size for this slot */
- if (alloc.sizes[a] < type_sizeof[v->vtype])
- alloc.sizes[a] = type_sizeof[v->vtype];
-
- self->values[i]->code.local = a;
- break;
- }
- if (a >= vec_size(alloc.locals)) {
- self->values[i]->code.local = vec_size(alloc.locals);
- if (!function_allocator_alloc(&alloc, v))
- goto error;
+ }
+ if (vec_size(v->writes) == 1 && v->writes[0]->opcode == INSTR_CALL0)
+ {
+ v->store = store_return;
+ if (v->members[0]) v->members[0]->store = store_return;
+ if (v->members[1]) v->members[1]->store = store_return;
+ if (v->members[2]) v->members[2]->store = store_return;
+ ++opts_optimizationcount[OPTIM_CALL_STORES];
+ continue;
+ }
}
+
+ if (!ir_function_allocator_assign(self, (v->locked || !opt_gt ? &lockalloc : &globalloc), v))
+ goto error;
}
- if (!alloc.sizes) {
+ if (!lockalloc.sizes && !globalloc.sizes) {
goto cleanup;
}
+ vec_push(lockalloc.positions, 0);
+ vec_push(globalloc.positions, 0);
/* Adjust slot positions based on sizes */
- vec_push(alloc.positions, 0);
-
- if (vec_size(alloc.sizes))
- pos = alloc.positions[0] + alloc.sizes[0];
- else
- pos = 0;
- for (i = 1; i < vec_size(alloc.sizes); ++i)
- {
- pos = alloc.positions[i-1] + alloc.sizes[i-1];
- vec_push(alloc.positions, pos);
+ if (lockalloc.sizes) {
+ pos = (vec_size(lockalloc.sizes) ? lockalloc.positions[0] : 0);
+ for (i = 1; i < vec_size(lockalloc.sizes); ++i)
+ {
+ pos = lockalloc.positions[i-1] + lockalloc.sizes[i-1];
+ vec_push(lockalloc.positions, pos);
+ }
+ self->allocated_locals = pos + vec_last(lockalloc.sizes);
+ }
+ if (globalloc.sizes) {
+ pos = (vec_size(globalloc.sizes) ? globalloc.positions[0] : 0);
+ for (i = 1; i < vec_size(globalloc.sizes); ++i)
+ {
+ pos = globalloc.positions[i-1] + globalloc.sizes[i-1];
+ vec_push(globalloc.positions, pos);
+ }
+ self->globaltemps = pos + vec_last(globalloc.sizes);
}
- self->allocated_locals = pos + vec_last(alloc.sizes);
-
- /* Take over the actual slot positions */
+ /* Locals need to know their new position */
+ for (i = 0; i < vec_size(self->locals); ++i) {
+ v = self->locals[i];
+ if (i >= vec_size(self->params) && !vec_size(v->life))
+ continue;
+ if (v->locked || !opt_gt)
+ v->code.local = lockalloc.positions[v->code.local];
+ else
+ v->code.local = globalloc.positions[v->code.local];
+ }
+ /* Take over the actual slot positions on values */
for (i = 0; i < vec_size(self->values); ++i) {
- self->values[i]->code.local = alloc.positions[self->values[i]->code.local];
+ v = self->values[i];
+ if (!vec_size(v->life))
+ continue;
+ if (v->locked || !opt_gt)
+ v->code.local = lockalloc.positions[v->code.local];
+ else
+ v->code.local = globalloc.positions[v->code.local];
}
goto cleanup;
error:
retval = false;
cleanup:
- for (i = 0; i < vec_size(alloc.locals); ++i)
- ir_value_delete(alloc.locals[i]);
- vec_free(alloc.locals);
- vec_free(alloc.sizes);
- vec_free(alloc.positions);
+ for (i = 0; i < vec_size(lockalloc.locals); ++i)
+ ir_value_delete(lockalloc.locals[i]);
+ for (i = 0; i < vec_size(globalloc.locals); ++i)
+ ir_value_delete(globalloc.locals[i]);
+ vec_free(globalloc.unique);
+ vec_free(globalloc.locals);
+ vec_free(globalloc.sizes);
+ vec_free(globalloc.positions);
+ vec_free(lockalloc.unique);
+ vec_free(lockalloc.locals);
+ vec_free(lockalloc.sizes);
+ vec_free(lockalloc.positions);
return retval;
}
for (i = 0; i != vec_size(self->living); ++i)
{
tempbool = ir_value_life_merge(self->living[i], eid);
- /* debug
- if (tempbool)
- irerror(self->context, "block_living_add_instr() value instruction added %s: %i", self->living[i]->_name, (int)eid);
- */
changed = changed || tempbool;
}
return changed;
}
+static bool ir_block_living_lock(ir_block *self)
+{
+ size_t i;
+ bool changed = false;
+ for (i = 0; i != vec_size(self->living); ++i)
+ {
+ if (!self->living[i]->locked)
+ changed = true;
+ self->living[i]->locked = true;
+ }
+ return changed;
+}
+
static bool ir_block_life_prop_previous(ir_block* self, ir_block *prev, bool *changed)
{
size_t i;
* So we have to remove whatever does not exist in the previous block.
* They will be re-added on-read, but the liferange merge won't cause
* a change.
- */
for (i = 0; i < vec_size(self->living); ++i)
{
if (!vec_ir_value_find(prev->living, self->living[i], NULL)) {
--i;
}
}
+ */
/* Whatever the previous block still has in its living set
* must now be added to ours as well.
ir_instr *instr;
ir_value *value;
bool tempbool;
- size_t i, o, p;
+ size_t i, o, p, mem;
/* bitmasks which operands are read from or written to */
size_t read, write;
- char dbg_ind[16] = { '#', '0' };
+ char dbg_ind[16];
+ dbg_ind[0] = '#';
+ dbg_ind[1] = '0';
(void)dbg_ind;
if (prev)
{ --i;
instr = self->instr[i];
- /* PHI operands are always read operands */
- for (p = 0; p < vec_size(instr->phi); ++p)
- {
- value = instr->phi[p].value;
- if (value->memberof)
- value = value->memberof;
- if (!vec_ir_value_find(self->living, value, NULL))
- vec_push(self->living, value);
- }
-
- /* call params are read operands too */
- for (p = 0; p < vec_size(instr->params); ++p)
- {
- value = instr->params[p];
- if (value->memberof)
- value = value->memberof;
- if (!vec_ir_value_find(self->living, value, NULL))
- vec_push(self->living, value);
- }
-
/* See which operands are read and write operands */
ir_op_read_write(instr->opcode, &read, &write);
*changed = *changed || tempbool;
}
- /* Go through the 3 main operands */
+ /* Go through the 3 main operands
+ * writes first, then reads
+ */
for (o = 0; o < 3; ++o)
{
if (!instr->_ops[o]) /* no such operand */
continue;
value = instr->_ops[o];
- if (value->memberof)
- value = value->memberof;
/* We only care about locals */
/* we also calculate parameter liferanges so that locals
value->store != store_param)
continue;
- /* read operands */
- if (read & (1<<o))
- {
- if (!vec_ir_value_find(self->living, value, NULL))
- vec_push(self->living, value);
- }
-
/* write operands */
/* When we write to a local, we consider it "dead" for the
* remaining upper part of the function, since in SSA a value
* and make sure it's only printed once
* since this function is run multiple times.
*/
- /* For now: debug info: */
/* con_err( "Value only written %s\n", value->name); */
tempbool = ir_value_life_merge(value, instr->eid);
*changed = *changed || tempbool;
- /*
- ir_instr_dump(instr, dbg_ind, printf);
- abort();
- */
} else {
/* since 'living' won't contain it
* anymore, merge the value, since
* (A) doesn't.
*/
tempbool = ir_value_life_merge(value, instr->eid);
- /*
- if (tempbool)
- con_err( "value added id %s %i\n", value->name, (int)instr->eid);
- */
*changed = *changed || tempbool;
/* Then remove */
vec_remove(self->living, idx, 1);
}
+ /* Removing a vector removes all members */
+ for (mem = 0; mem < 3; ++mem) {
+ if (value->members[mem] && vec_ir_value_find(self->living, value->members[mem], &idx)) {
+ tempbool = ir_value_life_merge(value->members[mem], instr->eid);
+ *changed = *changed || tempbool;
+ vec_remove(self->living, idx, 1);
+ }
+ }
+ /* Removing the last member removes the vector */
+ if (value->memberof) {
+ value = value->memberof;
+ for (mem = 0; mem < 3; ++mem) {
+ if (value->members[mem] && vec_ir_value_find(self->living, value->members[mem], NULL))
+ break;
+ }
+ if (mem == 3 && vec_ir_value_find(self->living, value, &idx)) {
+ tempbool = ir_value_life_merge(value, instr->eid);
+ *changed = *changed || tempbool;
+ vec_remove(self->living, idx, 1);
+ }
+ }
+ }
+ }
+
+ for (o = 0; o < 3; ++o)
+ {
+ if (!instr->_ops[o]) /* no such operand */
+ continue;
+
+ value = instr->_ops[o];
+
+ /* We only care about locals */
+ /* we also calculate parameter liferanges so that locals
+ * can take up parameter slots */
+ if (value->store != store_value &&
+ value->store != store_local &&
+ value->store != store_param)
+ continue;
+
+ /* read operands */
+ if (read & (1<<o))
+ {
+ if (!vec_ir_value_find(self->living, value, NULL))
+ vec_push(self->living, value);
+ /* reading adds the full vector */
+ if (value->memberof && !vec_ir_value_find(self->living, value->memberof, NULL))
+ vec_push(self->living, value->memberof);
+ for (mem = 0; mem < 3; ++mem) {
+ if (value->members[mem] && !vec_ir_value_find(self->living, value->members[mem], NULL))
+ vec_push(self->living, value->members[mem]);
+ }
+ }
+ }
+ /* PHI operands are always read operands */
+ for (p = 0; p < vec_size(instr->phi); ++p)
+ {
+ value = instr->phi[p].value;
+ if (!vec_ir_value_find(self->living, value, NULL))
+ vec_push(self->living, value);
+ /* reading adds the full vector */
+ if (value->memberof && !vec_ir_value_find(self->living, value->memberof, NULL))
+ vec_push(self->living, value->memberof);
+ for (mem = 0; mem < 3; ++mem) {
+ if (value->members[mem] && !vec_ir_value_find(self->living, value->members[mem], NULL))
+ vec_push(self->living, value->members[mem]);
}
}
+
+ /* on a call, all these values must be "locked" */
+ if (instr->opcode >= INSTR_CALL0 && instr->opcode <= INSTR_CALL8) {
+ if (ir_block_living_lock(self))
+ *changed = true;
+ }
+ /* call params are read operands too */
+ for (p = 0; p < vec_size(instr->params); ++p)
+ {
+ value = instr->params[p];
+ if (!vec_ir_value_find(self->living, value, NULL))
+ vec_push(self->living, value);
+ /* reading adds the full vector */
+ if (value->memberof && !vec_ir_value_find(self->living, value->memberof, NULL))
+ vec_push(self->living, value->memberof);
+ for (mem = 0; mem < 3; ++mem) {
+ if (value->members[mem] && !vec_ir_value_find(self->living, value->members[mem], NULL))
+ vec_push(self->living, value->members[mem]);
+ }
+ }
+
/* (A) */
tempbool = ir_block_living_add_instr(self, instr->eid);
/*con_err( "living added values\n");*/
static bool gen_global_field(ir_value *global)
{
- if (global->isconst)
+ if (global->hasvalue)
{
ir_value *fld = global->constval.vpointer;
if (!fld) {
return false;
}
- /* Now, in this case, a relocation would be impossible to code
- * since it looks like this:
- * .vector v = origin; <- parse error, wtf is 'origin'?
- * .vector origin;
- *
- * But we will need a general relocation support later anyway
- * for functions... might as well support that here.
- */
- if (!fld->code.globaladdr) {
- irerror(global->context, "FIXME: Relocation support");
- return false;
- }
-
/* copy the field's value */
ir_value_code_setaddr(global, vec_size(code_globals));
- vec_push(code_globals, code_globals[fld->code.globaladdr]);
+ vec_push(code_globals, fld->code.fieldaddr);
if (global->fieldtype == TYPE_VECTOR) {
- vec_push(code_globals, code_globals[fld->code.globaladdr]+1);
- vec_push(code_globals, code_globals[fld->code.globaladdr]+2);
+ vec_push(code_globals, fld->code.fieldaddr+1);
+ vec_push(code_globals, fld->code.fieldaddr+2);
}
}
else
static bool gen_global_pointer(ir_value *global)
{
- if (global->isconst)
+ if (global->hasvalue)
{
ir_value *target = global->constval.vpointer;
if (!target) {
stmt.o1.s1 = (target->code_start) - vec_size(code_statements);
stmt.o2.s1 = 0;
stmt.o3.s1 = 0;
- vec_push(code_statements, stmt);
+ if (stmt.o1.s1 != 1)
+ code_push_statement(&stmt, instr->context.line);
/* no further instructions can be in this block */
return true;
if (ontrue->generated) {
stmt.opcode = INSTR_IF;
stmt.o2.s1 = (ontrue->code_start) - vec_size(code_statements);
- vec_push(code_statements, stmt);
+ if (stmt.o2.s1 != 1)
+ code_push_statement(&stmt, instr->context.line);
}
if (onfalse->generated) {
stmt.opcode = INSTR_IFNOT;
stmt.o2.s1 = (onfalse->code_start) - vec_size(code_statements);
- vec_push(code_statements, stmt);
+ if (stmt.o2.s1 != 1)
+ code_push_statement(&stmt, instr->context.line);
}
if (!ontrue->generated) {
if (onfalse->generated) {
ontrue = tmp;
}
stidx = vec_size(code_statements);
- vec_push(code_statements, stmt);
+ code_push_statement(&stmt, instr->context.line);
/* on false we jump, so add ontrue-path */
if (!gen_blocks_recursive(func, ontrue))
return false;
if (onfalse->generated) {
/* fixup the jump address */
code_statements[stidx].o2.s1 = (onfalse->code_start) - (stidx);
+ if (code_statements[stidx].o2.s1 == 1) {
+ code_statements[stidx] = code_statements[stidx+1];
+ if (code_statements[stidx].o1.s1 < 0)
+ code_statements[stidx].o1.s1++;
+ code_pop_statement();
+ }
stmt.opcode = vec_last(code_statements).opcode;
if (stmt.opcode == INSTR_GOTO ||
stmt.opcode == INSTR_IF ||
stmt.o1.s1 = (onfalse->code_start) - vec_size(code_statements);
stmt.o2.s1 = 0;
stmt.o3.s1 = 0;
- vec_push(code_statements, stmt);
+ if (stmt.o1.s1 != 1)
+ code_push_statement(&stmt, instr->context.line);
return true;
}
+ else if (code_statements[stidx].o2.s1 == 1) {
+ code_statements[stidx] = code_statements[stidx+1];
+ if (code_statements[stidx].o1.s1 < 0)
+ code_statements[stidx].o1.s1++;
+ code_pop_statement();
+ }
/* if not, generate now */
block = onfalse;
goto tailcall;
}
- if (instr->opcode >= INSTR_CALL0 && instr->opcode <= INSTR_CALL8) {
- /* Trivial call translation:
- * copy all params to OFS_PARM*
- * if the output's storetype is not store_return,
- * add append a STORE instruction!
- *
- * NOTES on how to do it better without much trouble:
- * -) The liferanges!
- * Simply check the liferange of all parameters for
- * other CALLs. For each param with no CALL in its
- * liferange, we can store it in an OFS_PARM at
- * generation already. This would even include later
- * reuse.... probably... :)
- */
+ if ( (instr->opcode >= INSTR_CALL0 && instr->opcode <= INSTR_CALL8)
+ || instr->opcode == VINSTR_NRCALL)
+ {
size_t p, first;
ir_value *retvalue;
for (p = 0; p < first; ++p)
{
ir_value *param = instr->params[p];
+ if (param->callparam)
+ continue;
stmt.opcode = INSTR_STORE_F;
stmt.o3.u1 = 0;
stmt.opcode = type_store_instr[param->vtype];
stmt.o1.u1 = ir_value_code_addr(param);
stmt.o2.u1 = OFS_PARM0 + 3 * p;
- vec_push(code_statements, stmt);
+ code_push_statement(&stmt, instr->context.line);
}
/* Now handle extparams */
first = vec_size(instr->params);
ir_value *param = instr->params[p];
ir_value *targetparam;
- if (p-8 >= vec_size(ir->extparams)) {
- irerror(instr->context, "Not enough extparam-globals have been created");
- return false;
- }
+ if (param->callparam)
+ continue;
+
+ if (p-8 >= vec_size(ir->extparams))
+ ir_gen_extparam(ir);
targetparam = ir->extparams[p-8];
stmt.opcode = type_store_instr[param->vtype];
stmt.o1.u1 = ir_value_code_addr(param);
stmt.o2.u1 = ir_value_code_addr(targetparam);
- vec_push(code_statements, stmt);
+ code_push_statement(&stmt, instr->context.line);
}
stmt.opcode = INSTR_CALL0 + vec_size(instr->params);
stmt.o1.u1 = ir_value_code_addr(instr->_ops[1]);
stmt.o2.u1 = 0;
stmt.o3.u1 = 0;
- vec_push(code_statements, stmt);
+ code_push_statement(&stmt, instr->context.line);
retvalue = instr->_ops[0];
- if (retvalue && retvalue->store != store_return && vec_size(retvalue->life))
+ if (retvalue && retvalue->store != store_return &&
+ (retvalue->store == store_global || vec_size(retvalue->life)))
{
/* not to be kept in OFS_RETURN */
- if (retvalue->vtype == TYPE_FIELD)
- stmt.opcode = field_store_instr[retvalue->vtype];
+ if (retvalue->vtype == TYPE_FIELD && OPTS_FLAG(ADJUST_VECTOR_FIELDS))
+ stmt.opcode = field_store_instr[retvalue->fieldtype];
else
stmt.opcode = type_store_instr[retvalue->vtype];
stmt.o1.u1 = OFS_RETURN;
stmt.o2.u1 = ir_value_code_addr(retvalue);
stmt.o3.u1 = 0;
- vec_push(code_statements, stmt);
+ code_push_statement(&stmt, instr->context.line);
}
continue;
}
/* 2-operand instructions with A -> B */
stmt.o2.u1 = stmt.o3.u1;
stmt.o3.u1 = 0;
+
+ /* tiny optimization, don't output
+ * STORE a, a
+ */
+ if (stmt.o2.u1 == stmt.o1.u1 &&
+ OPTS_OPTIMIZATION(OPTIM_PEEPHOLE))
+ {
+ ++opts_optimizationcount[OPTIM_PEEPHOLE];
+ continue;
+ }
}
- vec_push(code_statements, stmt);
+ code_push_statement(&stmt, instr->context.line);
}
return true;
}
static bool gen_function_code(ir_function *self)
{
ir_block *block;
- prog_section_statement stmt;
+ prog_section_statement stmt, *retst;
/* Starting from entry point, we generate blocks "as they come"
* for now. Dead blocks will not be translated obviously.
return false;
}
- /* otherwise code_write crashes since it debug-prints functions until AINSTR_END */
- stmt.opcode = AINSTR_END;
- stmt.o1.u1 = 0;
- stmt.o2.u1 = 0;
- stmt.o3.u1 = 0;
- vec_push(code_statements, stmt);
+ /* code_write and qcvm -disasm need to know that the function ends here */
+ retst = &vec_last(code_statements);
+ if (OPTS_OPTIMIZATION(OPTIM_VOID_RETURN) &&
+ self->outtype == TYPE_VOID &&
+ retst->opcode == INSTR_RETURN &&
+ !retst->o1.u1 && !retst->o2.u1 && !retst->o3.u1)
+ {
+ retst->opcode = INSTR_DONE;
+ ++opts_optimizationcount[OPTIM_VOID_RETURN];
+ } else {
+ stmt.opcode = INSTR_DONE;
+ stmt.o1.u1 = 0;
+ stmt.o2.u1 = 0;
+ stmt.o3.u1 = 0;
+ code_push_statement(&stmt, vec_last(code_linenums));
+ }
return true;
}
ir_function *irfun;
size_t i;
- size_t local_var_end;
- if (!global->isconst || (!global->constval.vfunc))
+ if (!global->hasvalue || (!global->constval.vfunc))
{
irerror(global->context, "Invalid state of function-global: not constant: %s", global->name);
return false;
fun.nargs = 8;
for (i = 0;i < 8; ++i) {
- if (i >= fun.nargs)
+ if ((int32_t)i >= fun.nargs)
fun.argsize[i] = 0;
else
- fun.argsize[i] = type_sizeof[irfun->params[i]];
- }
-
- fun.firstlocal = vec_size(code_globals);
-
- local_var_end = fun.firstlocal;
- for (i = 0; i < vec_size(irfun->locals); ++i) {
- if (!ir_builder_gen_global(ir, irfun->locals[i], true)) {
- irerror(irfun->locals[i]->context, "Failed to generate local %s", irfun->locals[i]->name);
- return false;
- }
- }
- if (vec_size(irfun->locals)) {
- ir_value *last = vec_last(irfun->locals);
- local_var_end = last->code.globaladdr;
- local_var_end += type_sizeof[last->vtype];
- }
- for (i = 0; i < vec_size(irfun->values); ++i)
- {
- /* generate code.globaladdr for ssa values */
- ir_value *v = irfun->values[i];
- ir_value_code_setaddr(v, local_var_end + v->code.local);
- }
- for (i = 0; i < irfun->allocated_locals; ++i) {
- /* fill the locals with zeros */
- vec_push(code_globals, 0);
+ fun.argsize[i] = type_sizeof_[irfun->params[i]];
}
- fun.locals = vec_size(code_globals) - fun.firstlocal;
+ fun.firstlocal = 0;
+ fun.locals = irfun->allocated_locals;
if (irfun->builtin)
- fun.entry = irfun->builtin;
+ fun.entry = irfun->builtin+1;
else {
irfun->code_function_def = vec_size(code_functions);
fun.entry = vec_size(code_statements);
return true;
}
+static ir_value* ir_gen_extparam_proto(ir_builder *ir)
+{
+ ir_value *global;
+ char name[128];
+
+ snprintf(name, sizeof(name), "EXTPARM#%i", (int)(vec_size(ir->extparam_protos)+8));
+ global = ir_value_var(name, store_global, TYPE_VECTOR);
+
+ vec_push(ir->extparam_protos, global);
+ return global;
+}
+
static void ir_gen_extparam(ir_builder *ir)
{
prog_section_def def;
ir_value *global;
- char name[128];
- snprintf(name, sizeof(name), "EXTPARM#%i", (int)(vec_size(ir->extparams)+8));
- global = ir_value_var(name, store_global, TYPE_VECTOR);
+ if (vec_size(ir->extparam_protos) < vec_size(ir->extparams)+1)
+ global = ir_gen_extparam_proto(ir);
+ else
+ global = ir->extparam_protos[vec_size(ir->extparams)];
- def.name = code_genstring(name);
+ def.name = code_genstring(global->name);
def.type = TYPE_VECTOR;
def.offset = vec_size(code_globals);
}
stmt.o1.u1 = ir_value_code_addr(ep);
stmt.o2.u1 = ir_value_code_addr(self->locals[i]);
- vec_push(code_statements, stmt);
+ code_push_statement(&stmt, self->context.line);
}
return true;
}
+static bool gen_function_locals(ir_builder *ir, ir_value *global)
+{
+ prog_section_function *def;
+ ir_function *irfun;
+ size_t i;
+ uint32_t firstlocal, firstglobal;
+
+ irfun = global->constval.vfunc;
+ def = code_functions + irfun->code_function_def;
+
+ if (opts.g || !OPTS_OPTIMIZATION(OPTIM_OVERLAP_LOCALS) || (irfun->flags & IR_FLAG_MASK_NO_OVERLAP))
+ firstlocal = def->firstlocal = vec_size(code_globals);
+ else {
+ firstlocal = def->firstlocal = ir->first_common_local;
+ ++opts_optimizationcount[OPTIM_OVERLAP_LOCALS];
+ }
+
+ firstglobal = (OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS) ? ir->first_common_globaltemp : firstlocal);
+
+ for (i = vec_size(code_globals); i < firstlocal + irfun->allocated_locals; ++i)
+ vec_push(code_globals, 0);
+ for (i = 0; i < vec_size(irfun->locals); ++i) {
+ ir_value *v = irfun->locals[i];
+ if (v->locked || !OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS)) {
+ ir_value_code_setaddr(v, firstlocal + v->code.local);
+ if (!ir_builder_gen_global(ir, irfun->locals[i], true)) {
+ irerror(irfun->locals[i]->context, "failed to generate local %s", irfun->locals[i]->name);
+ return false;
+ }
+ }
+ else
+ ir_value_code_setaddr(v, firstglobal + v->code.local);
+ }
+ for (i = 0; i < vec_size(irfun->values); ++i)
+ {
+ ir_value *v = irfun->values[i];
+ if (v->callparam)
+ continue;
+ if (v->locked)
+ ir_value_code_setaddr(v, firstlocal + v->code.local);
+ else
+ ir_value_code_setaddr(v, firstglobal + v->code.local);
+ }
+ return true;
+}
+
static bool gen_global_function_code(ir_builder *ir, ir_value *global)
{
prog_section_function *fundef;
irfun = global->constval.vfunc;
if (!irfun) {
- irwarning(global->context, WARN_IMPLICIT_FUNCTION_POINTER,
- "function `%s` has no body and in QC implicitly becomes a function-pointer", global->name);
+ if (global->cvq == CV_NONE) {
+ irwarning(global->context, WARN_IMPLICIT_FUNCTION_POINTER,
+ "function `%s` has no body and in QC implicitly becomes a function-pointer", global->name);
+ }
/* this was a function pointer, don't generate code for those */
return true;
}
fundef = &code_functions[irfun->code_function_def];
fundef->entry = vec_size(code_statements);
+ if (!gen_function_locals(ir, global)) {
+ irerror(irfun->context, "Failed to generate locals for function %s", irfun->name);
+ return false;
+ }
if (!gen_function_extparam_copy(irfun)) {
irerror(irfun->context, "Failed to generate extparam-copy code for function %s", irfun->name);
return false;
return true;
}
+static void gen_vector_defs(prog_section_def def, const char *name)
+{
+ char *component;
+ size_t len, i;
+
+ if (!name || name[0] == '#' || OPTS_FLAG(SINGLE_VECTOR_DEFS))
+ return;
+
+ def.type = TYPE_FLOAT;
+
+ len = strlen(name);
+
+ component = (char*)mem_a(len+3);
+ memcpy(component, name, len);
+ len += 2;
+ component[len-0] = 0;
+ component[len-2] = '_';
+
+ component[len-1] = 'x';
+
+ for (i = 0; i < 3; ++i) {
+ def.name = code_genstring(component);
+ vec_push(code_defs, def);
+ def.offset++;
+ component[len-1]++;
+ }
+}
+
+static void gen_vector_fields(prog_section_field fld, const char *name)
+{
+ char *component;
+ size_t len, i;
+
+ if (!name || OPTS_FLAG(SINGLE_VECTOR_DEFS))
+ return;
+
+ fld.type = TYPE_FLOAT;
+
+ len = strlen(name);
+
+ component = (char*)mem_a(len+3);
+ memcpy(component, name, len);
+ len += 2;
+ component[len-0] = 0;
+ component[len-2] = '_';
+
+ component[len-1] = 'x';
+
+ for (i = 0; i < 3; ++i) {
+ fld.name = code_genstring(component);
+ vec_push(code_fields, fld);
+ fld.offset++;
+ component[len-1]++;
+ }
+}
+
static bool ir_builder_gen_global(ir_builder *self, ir_value *global, bool islocal)
{
size_t i;
int32_t *iptr;
prog_section_def def;
+ bool pushdef = false;
def.type = global->vtype;
def.offset = vec_size(code_globals);
+ def.name = 0;
+ if (opts.g || !islocal)
+ {
+ pushdef = true;
+
+ if (OPTS_OPTIMIZATION(OPTIM_STRIP_CONSTANT_NAMES) &&
+ (global->name[0] == '#' || global->cvq == CV_CONST))
+ {
+ pushdef = false;
+ }
- if (global->name) {
- if (global->name[0] == '#') {
- if (!self->str_immediate)
- self->str_immediate = code_genstring("IMMEDIATE");
- def.name = global->code.name = self->str_immediate;
+ if (pushdef && global->name) {
+ if (global->name[0] == '#') {
+ if (!self->str_immediate)
+ self->str_immediate = code_genstring("IMMEDIATE");
+ def.name = global->code.name = self->str_immediate;
+ }
+ else
+ def.name = global->code.name = code_genstring(global->name);
}
else
- def.name = global->code.name = code_genstring(global->name);
+ def.name = 0;
+ if (islocal) {
+ def.offset = ir_value_code_addr(global);
+ vec_push(code_defs, def);
+ if (global->vtype == TYPE_VECTOR)
+ gen_vector_defs(def, global->name);
+ else if (global->vtype == TYPE_FIELD && global->fieldtype == TYPE_VECTOR)
+ gen_vector_defs(def, global->name);
+ return true;
+ }
}
- else
- def.name = 0;
+ if (islocal)
+ return true;
switch (global->vtype)
{
ir_value_code_setaddr(global, vec_size(code_globals));
vec_push(code_globals, 0);
/* Add the def */
- vec_push(code_defs, def);
+ if (pushdef) vec_push(code_defs, def);
return true;
case TYPE_POINTER:
- vec_push(code_defs, def);
+ if (pushdef) vec_push(code_defs, def);
return gen_global_pointer(global);
case TYPE_FIELD:
- vec_push(code_defs, def);
+ if (pushdef) {
+ vec_push(code_defs, def);
+ if (global->fieldtype == TYPE_VECTOR)
+ gen_vector_defs(def, global->name);
+ }
return gen_global_field(global);
case TYPE_ENTITY:
/* fall through */
case TYPE_FLOAT:
{
ir_value_code_setaddr(global, vec_size(code_globals));
- if (global->isconst) {
+ if (global->hasvalue) {
iptr = (int32_t*)&global->constval.ivec[0];
vec_push(code_globals, *iptr);
} else {
vec_push(code_globals, 0);
- if (!islocal)
- def.type |= DEF_SAVEGLOBAL;
}
- vec_push(code_defs, def);
+ if (!islocal && global->cvq != CV_CONST)
+ def.type |= DEF_SAVEGLOBAL;
+ if (pushdef) vec_push(code_defs, def);
return global->code.globaladdr >= 0;
}
case TYPE_STRING:
{
ir_value_code_setaddr(global, vec_size(code_globals));
- if (global->isconst) {
+ if (global->hasvalue) {
vec_push(code_globals, code_genstring(global->constval.vstring));
} else {
vec_push(code_globals, 0);
- if (!islocal)
- def.type |= DEF_SAVEGLOBAL;
}
- vec_push(code_defs, def);
+ if (!islocal && global->cvq != CV_CONST)
+ def.type |= DEF_SAVEGLOBAL;
+ if (pushdef) vec_push(code_defs, def);
return global->code.globaladdr >= 0;
}
case TYPE_VECTOR:
{
size_t d;
ir_value_code_setaddr(global, vec_size(code_globals));
- if (global->isconst) {
+ if (global->hasvalue) {
iptr = (int32_t*)&global->constval.ivec[0];
vec_push(code_globals, iptr[0]);
if (global->code.globaladdr < 0)
return false;
- for (d = 1; d < type_sizeof[global->vtype]; ++d)
- {
+ for (d = 1; d < type_sizeof_[global->vtype]; ++d) {
vec_push(code_globals, iptr[d]);
}
} else {
vec_push(code_globals, 0);
if (global->code.globaladdr < 0)
return false;
- for (d = 1; d < type_sizeof[global->vtype]; ++d)
- {
+ for (d = 1; d < type_sizeof_[global->vtype]; ++d) {
vec_push(code_globals, 0);
}
- if (!islocal)
- def.type |= DEF_SAVEGLOBAL;
}
+ if (!islocal && global->cvq != CV_CONST)
+ def.type |= DEF_SAVEGLOBAL;
- vec_push(code_defs, def);
+ if (pushdef) {
+ vec_push(code_defs, def);
+ def.type &= ~DEF_SAVEGLOBAL;
+ gen_vector_defs(def, global->name);
+ }
return global->code.globaladdr >= 0;
}
case TYPE_FUNCTION:
ir_value_code_setaddr(global, vec_size(code_globals));
- if (!global->isconst) {
+ if (!global->hasvalue) {
vec_push(code_globals, 0);
if (global->code.globaladdr < 0)
return false;
vec_push(code_globals, vec_size(code_functions));
if (!gen_global_function(self, global))
return false;
- if (!islocal)
- def.type |= DEF_SAVEGLOBAL;
}
- vec_push(code_defs, def);
+ if (!islocal && global->cvq != CV_CONST)
+ def.type |= DEF_SAVEGLOBAL;
+ if (pushdef) vec_push(code_defs, def);
return true;
case TYPE_VARIANT:
/* assume biggest type */
ir_value_code_setaddr(global, vec_size(code_globals));
vec_push(code_globals, 0);
- for (i = 1; i < type_sizeof[TYPE_VARIANT]; ++i)
+ for (i = 1; i < type_sizeof_[TYPE_VARIANT]; ++i)
vec_push(code_globals, 0);
return true;
default:
}
}
+static void ir_builder_prepare_field(ir_value *field)
+{
+ field->code.fieldaddr = code_alloc_field(type_sizeof_[field->fieldtype]);
+}
+
static bool ir_builder_gen_field(ir_builder *self, ir_value *field)
{
prog_section_def def;
def.offset = (uint16_t)vec_size(code_globals);
/* create a global named the same as the field */
- if (opts_standard == COMPILER_GMQCC) {
+ if (opts.standard == COMPILER_GMQCC) {
/* in our standard, the global gets a dot prefix */
size_t len = strlen(field->name);
char name[1024];
return false;
}
- fld.offset = code_alloc_field(type_sizeof[field->fieldtype]);
+ fld.offset = field->code.fieldaddr;
vec_push(code_fields, fld);
vec_push(code_globals, fld.offset+2);
}
+ if (field->fieldtype == TYPE_VECTOR) {
+ gen_vector_defs(def, field->name);
+ gen_vector_fields(fld, field->name);
+ }
+
return field->code.globaladdr >= 0;
}
{
prog_section_statement stmt;
size_t i;
+ char *lnofile = NULL;
code_init();
+ for (i = 0; i < vec_size(self->fields); ++i)
+ {
+ ir_builder_prepare_field(self->fields[i]);
+ }
+
for (i = 0; i < vec_size(self->globals); ++i)
{
if (!ir_builder_gen_global(self, self->globals[i], false)) {
return false;
}
+ if (self->globals[i]->vtype == TYPE_FUNCTION) {
+ ir_function *func = self->globals[i]->constval.vfunc;
+ if (func && self->max_locals < func->allocated_locals &&
+ !(func->flags & IR_FLAG_MASK_NO_OVERLAP))
+ {
+ self->max_locals = func->allocated_locals;
+ }
+ if (func && self->max_globaltemps < func->globaltemps)
+ self->max_globaltemps = func->globaltemps;
+ }
}
for (i = 0; i < vec_size(self->fields); ++i)
}
}
+ /* generate global temps */
+ self->first_common_globaltemp = vec_size(code_globals);
+ for (i = 0; i < self->max_globaltemps; ++i) {
+ vec_push(code_globals, 0);
+ }
+ /* generate common locals */
+ self->first_common_local = vec_size(code_globals);
+ for (i = 0; i < self->max_locals; ++i) {
+ vec_push(code_globals, 0);
+ }
+
/* generate function code */
for (i = 0; i < vec_size(self->globals); ++i)
{
return false;
}
- /* DP errors if the last instruction is not an INSTR_DONE
- * and for debugging purposes we add an additional AINSTR_END
- * to the end of functions, so here it goes:
- */
- stmt.opcode = INSTR_DONE;
- stmt.o1.u1 = 0;
- stmt.o2.u1 = 0;
- stmt.o3.u1 = 0;
- vec_push(code_statements, stmt);
+ /* DP errors if the last instruction is not an INSTR_DONE. */
+ if (vec_last(code_statements).opcode != INSTR_DONE)
+ {
+ stmt.opcode = INSTR_DONE;
+ stmt.o1.u1 = 0;
+ stmt.o2.u1 = 0;
+ stmt.o3.u1 = 0;
+ code_push_statement(&stmt, vec_last(code_linenums));
+ }
+
+ if (opts.pp_only)
+ return true;
- if (!opts_pp_only)
- con_out("writing '%s'...\n", filename);
- return code_write(filename);
+ if (vec_size(code_statements) != vec_size(code_linenums)) {
+ con_err("Linecounter wrong: %lu != %lu\n",
+ (unsigned long)vec_size(code_statements),
+ (unsigned long)vec_size(code_linenums));
+ } else if (OPTS_FLAG(LNO)) {
+ char *dot;
+ size_t filelen = strlen(filename);
+
+ memcpy(vec_add(lnofile, filelen+1), filename, filelen+1);
+ dot = strrchr(lnofile, '.');
+ if (!dot) {
+ vec_pop(lnofile);
+ } else {
+ vec_shrinkto(lnofile, dot - lnofile);
+ }
+ memcpy(vec_add(lnofile, 5), ".lno", 5);
+ }
+
+ if (!opts.quiet) {
+ if (lnofile)
+ con_out("writing '%s' and '%s'...\n", filename, lnofile);
+ else
+ con_out("writing '%s'\n", filename);
+ }
+ if (!code_write(filename, lnofile)) {
+ vec_free(lnofile);
+ return false;
+ }
+ vec_free(lnofile);
+ return true;
}
/***********************************************************************
#define IND_BUFSZ 1024
-#ifdef WIN32
-# define strncat(dst, src, sz) strncat_s(dst, sz, src, _TRUNCATE)
+#ifdef _MSC_VER
+# define strncat(dst, src, sz) strncat_s(dst, sz, src, _TRUNCATE)
#endif
const char *qc_opname(int op)
for (i = 0; i < vec_size(b->globals); ++i)
{
oprintf("global ");
- if (b->globals[i]->isconst)
+ if (b->globals[i]->hasvalue)
oprintf("%s = ", b->globals[i]->name);
ir_value_dump(b->globals[i], oprintf);
oprintf("\n");
}
oprintf("%sliferanges:\n", ind);
for (i = 0; i < vec_size(f->locals); ++i) {
- size_t l;
+ const char *attr = "";
+ size_t l, m;
ir_value *v = f->locals[i];
- oprintf("%s\t%s: unique ", ind, v->name);
+ if (v->unique_life && v->locked)
+ attr = "unique,locked ";
+ else if (v->unique_life)
+ attr = "unique ";
+ else if (v->locked)
+ attr = "locked ";
+ oprintf("%s\t%s: %s %s@%i ", ind, v->name, type_name[v->vtype], attr, (int)v->code.local);
for (l = 0; l < vec_size(v->life); ++l) {
oprintf("[%i,%i] ", v->life[l].start, v->life[l].end);
}
oprintf("\n");
+ for (m = 0; m < 3; ++m) {
+ ir_value *vm = v->members[m];
+ if (!vm)
+ continue;
+ if (vm->unique_life && vm->locked)
+ attr = "unique,locked ";
+ else if (vm->unique_life)
+ attr = "unique ";
+ else if (vm->locked)
+ attr = "locked ";
+ oprintf("%s\t%s: %s@%i ", ind, vm->name, attr, (int)vm->code.local);
+ for (l = 0; l < vec_size(vm->life); ++l) {
+ oprintf("[%i,%i] ", vm->life[l].start, vm->life[l].end);
+ }
+ oprintf("\n");
+ }
}
for (i = 0; i < vec_size(f->values); ++i) {
- size_t l;
+ const char *attr = "";
+ size_t l, m;
ir_value *v = f->values[i];
- oprintf("%s\t%s: @%i ", ind, v->name, (int)v->code.local);
+ if (v->unique_life && v->locked)
+ attr = "unique,locked ";
+ else if (v->unique_life)
+ attr = "unique ";
+ else if (v->locked)
+ attr = "locked ";
+ oprintf("%s\t%s: %s %s@%i ", ind, v->name, type_name[v->vtype], attr, (int)v->code.local);
for (l = 0; l < vec_size(v->life); ++l) {
oprintf("[%i,%i] ", v->life[l].start, v->life[l].end);
}
oprintf("\n");
+ for (m = 0; m < 3; ++m) {
+ ir_value *vm = v->members[m];
+ if (!vm)
+ continue;
+ if (vm->unique_life && vm->locked)
+ attr = "unique,locked ";
+ else if (vm->unique_life)
+ attr = "unique ";
+ else if (vm->locked)
+ attr = "locked ";
+ oprintf("%s\t%s: %s@%i ", ind, vm->name, attr, (int)vm->code.local);
+ for (l = 0; l < vec_size(vm->life); ++l) {
+ oprintf("[%i,%i] ", vm->life[l].start, vm->life[l].end);
+ }
+ oprintf("\n");
+ }
}
if (vec_size(f->blocks))
{
if (in->_ops[1] || in->_ops[2])
oprintf(" <- ");
}
- if (in->opcode == INSTR_CALL0) {
+ if (in->opcode == INSTR_CALL0 || in->opcode == VINSTR_NRCALL) {
oprintf("CALL%i\t", vec_size(in->params));
} else
oprintf("%s\t", qc_opname(in->opcode));
void ir_value_dump(ir_value* v, int (*oprintf)(const char*, ...))
{
- if (v->isconst) {
+ if (v->hasvalue) {
switch (v->vtype) {
default:
case TYPE_VOID: