static bool irwarning(lex_ctx ctx, int warntype, const char *fmt, ...)
{
- va_list ap;
- int lvl = LVL_WARNING;
-
- if (warntype && !OPTS_WARN(warntype))
- return false;
-
- if (opts.werror)
- lvl = LVL_ERROR;
-
- va_start(ap, fmt);
- con_vprintmsg(lvl, ctx.file, ctx.line, (opts.werror ? "error" : "warning"), fmt, ap);
- va_end(ap);
-
- return opts.werror;
+ bool r;
+ va_list ap;
+ va_start(ap, fmt);
+ r = vcompile_warning(ctx, warntype, fmt, ap);
+ va_end(ap);
+ return r;
}
/***********************************************************************
(op >= INSTR_AND && op <= INSTR_BITOR) );
}
-bool ir_function_pass_minor(ir_function *self)
+bool ir_function_pass_peephole(ir_function *self)
{
size_t b;
size_t i;
ir_block *block = self->blocks[b];
- if (vec_size(block->instr) < 2)
- continue;
-
- for (i = 1; i < vec_size(block->instr); ++i) {
- ir_instr *store;
- ir_instr *oper;
- ir_value *value;
+ for (i = 0; i < vec_size(block->instr); ++i) {
+ ir_instr *inst;
+ inst = block->instr[i];
- store = block->instr[i];
- if (store->opcode < INSTR_STORE_F ||
- store->opcode > INSTR_STORE_FNC)
+ if (i >= 1 &&
+ (inst->opcode >= INSTR_STORE_F &&
+ inst->opcode <= INSTR_STORE_FNC))
{
- continue;
- }
+ ir_instr *store;
+ ir_instr *oper;
+ ir_value *value;
- oper = block->instr[i-1];
- if (!instr_is_operation(oper->opcode))
- continue;
+ store = inst;
- value = oper->_ops[0];
+ oper = block->instr[i-1];
+ if (!instr_is_operation(oper->opcode))
+ continue;
- /* only do it for SSA values */
- if (value->store != store_value)
- continue;
+ value = oper->_ops[0];
- /* don't optimize out the temp if it's used later again */
- if (vec_size(value->reads) != 1)
- continue;
+ /* only do it for SSA values */
+ if (value->store != store_value)
+ continue;
- /* The very next store must use this value */
- if (value->reads[0] != store)
- continue;
+ /* don't optimize out the temp if it's used later again */
+ if (vec_size(value->reads) != 1)
+ continue;
- /* And of course the store must _read_ from it, so it's in
- * OP 1 */
- if (store->_ops[1] != value)
- continue;
+ /* The very next store must use this value */
+ if (value->reads[0] != store)
+ continue;
+
+ /* And of course the store must _read_ from it, so it's in
+ * OP 1 */
+ if (store->_ops[1] != value)
+ continue;
- ++optimization_count[OPTIM_PEEPHOLE];
- oper->_ops[0] = store->_ops[0];
+ ++opts_optimizationcount[OPTIM_PEEPHOLE];
+ (void)!ir_instr_op(oper, 0, store->_ops[0], true);
- vec_remove(block->instr, i, 1);
- ir_instr_delete(store);
+ vec_remove(block->instr, i, 1);
+ ir_instr_delete(store);
+ }
+ else if (inst->opcode == VINSTR_COND)
+ {
+ /* COND on a value resulting from a NOT could
+ * remove the NOT and swap its operands
+ */
+ while (true) {
+ ir_block *tmp;
+ size_t inotid;
+ ir_instr *inot;
+ ir_value *value;
+ value = inst->_ops[0];
+
+ if (value->store != store_value ||
+ vec_size(value->reads) != 1 ||
+ value->reads[0] != inst)
+ {
+ break;
+ }
+
+ inot = value->writes[0];
+ if (inot->_ops[0] != value ||
+ inot->opcode < INSTR_NOT_F ||
+ inot->opcode > INSTR_NOT_FNC ||
+ inot->opcode == INSTR_NOT_V) /* can't do this one */
+ {
+ break;
+ }
+
+ /* count */
+ ++opts_optimizationcount[OPTIM_PEEPHOLE];
+ /* change operand */
+ (void)!ir_instr_op(inst, 0, inot->_ops[1], false);
+ /* remove NOT */
+ tmp = inot->owner;
+ for (inotid = 0; inotid < vec_size(tmp->instr); ++inotid) {
+ if (tmp->instr[inotid] == inot)
+ break;
+ }
+ if (inotid >= vec_size(tmp->instr)) {
+ compile_error(inst->context, "sanity-check failed: failed to find instruction to optimize out");
+ return false;
+ }
+ vec_remove(tmp->instr, inotid, 1);
+ ir_instr_delete(inot);
+ /* swap ontrue/onfalse */
+ tmp = inst->bops[0];
+ inst->bops[0] = inst->bops[1];
+ inst->bops[1] = tmp;
+ }
+ continue;
+ }
}
}
ret->_ops[0] == store->_ops[0] &&
store->_ops[1] == call->_ops[0])
{
- ++optimization_count[OPTIM_PEEPHOLE];
+ ++opts_optimizationcount[OPTIM_PEEPHOLE];
call->_ops[0] = store->_ops[0];
vec_remove(block->instr, vec_size(block->instr) - 2, 1);
ir_instr_delete(store);
if (ret->_ops[0] && call->_ops[0] != ret->_ops[0])
continue;
- ++optimization_count[OPTIM_TAIL_RECURSION];
+ ++opts_optimizationcount[OPTIM_TAIL_RECURSION];
vec_shrinkby(block->instr, 2);
block->final = false; /* open it back up */
return true;
if (OPTS_OPTIMIZATION(OPTIM_PEEPHOLE)) {
- if (!ir_function_pass_minor(self)) {
+ if (!ir_function_pass_peephole(self)) {
irerror(self->context, "generic optimization pass broke something in `%s`", self->name);
return false;
}
stmt.o1.s1 = (target->code_start) - vec_size(code_statements);
stmt.o2.s1 = 0;
stmt.o3.s1 = 0;
- code_push_statement(&stmt, instr->context.line);
+ if (stmt.o1.s1 != 1)
+ code_push_statement(&stmt, instr->context.line);
/* no further instructions can be in this block */
return true;
if (ontrue->generated) {
stmt.opcode = INSTR_IF;
stmt.o2.s1 = (ontrue->code_start) - vec_size(code_statements);
- code_push_statement(&stmt, instr->context.line);
+ if (stmt.o2.s1 != 1)
+ code_push_statement(&stmt, instr->context.line);
}
if (onfalse->generated) {
stmt.opcode = INSTR_IFNOT;
stmt.o2.s1 = (onfalse->code_start) - vec_size(code_statements);
- code_push_statement(&stmt, instr->context.line);
+ if (stmt.o2.s1 != 1)
+ code_push_statement(&stmt, instr->context.line);
}
if (!ontrue->generated) {
if (onfalse->generated) {
ontrue = tmp;
}
stidx = vec_size(code_statements);
- code_push_statement(&stmt, instr->context.line);
+ if (stmt.o2.s1 != 1)
+ code_push_statement(&stmt, instr->context.line);
/* on false we jump, so add ontrue-path */
if (!gen_blocks_recursive(func, ontrue))
return false;
stmt.o1.s1 = (onfalse->code_start) - vec_size(code_statements);
stmt.o2.s1 = 0;
stmt.o3.s1 = 0;
- code_push_statement(&stmt, instr->context.line);
+ if (stmt.o1.s1 != 1)
+ code_push_statement(&stmt, instr->context.line);
return true;
}
/* if not, generate now */
if (stmt.o2.u1 == stmt.o1.u1 &&
OPTS_OPTIMIZATION(OPTIM_PEEPHOLE))
{
- ++optimization_count[OPTIM_PEEPHOLE];
+ ++opts_optimizationcount[OPTIM_PEEPHOLE];
continue;
}
}
return false;
}
- /* otherwise code_write crashes since it debug-prints functions until AINSTR_END */
- stmt.opcode = AINSTR_END;
+ /* code_write and qcvm -disasm need to know that the function ends here */
+ stmt.opcode = INSTR_DONE;
stmt.o1.u1 = 0;
stmt.o2.u1 = 0;
stmt.o3.u1 = 0;
return true;
}
+static void gen_vector_defs(prog_section_def def, const char *name)
+{
+ char *component;
+ size_t len, i;
+
+ if (!name || OPTS_FLAG(SINGLE_VECTOR_DEFS))
+ return;
+
+ def.type = TYPE_FLOAT;
+
+ len = strlen(name);
+
+ component = (char*)mem_a(len+3);
+ memcpy(component, name, len);
+ len += 2;
+ component[len-0] = 0;
+ component[len-2] = '_';
+
+ component[len-1] = 'x';
+
+ for (i = 0; i < 3; ++i) {
+ def.name = code_genstring(component);
+ vec_push(code_defs, def);
+ def.offset++;
+ component[len-1]++;
+ }
+}
+
+static void gen_vector_fields(prog_section_field fld, const char *name)
+{
+ char *component;
+ size_t len, i;
+
+ if (!name || OPTS_FLAG(SINGLE_VECTOR_DEFS))
+ return;
+
+ fld.type = TYPE_FLOAT;
+
+ len = strlen(name);
+
+ component = (char*)mem_a(len+3);
+ memcpy(component, name, len);
+ len += 2;
+ component[len-0] = 0;
+ component[len-2] = '_';
+
+ component[len-1] = 'x';
+
+ for (i = 0; i < 3; ++i) {
+ fld.name = code_genstring(component);
+ vec_push(code_fields, fld);
+ fld.offset++;
+ component[len-1]++;
+ }
+}
+
static bool ir_builder_gen_global(ir_builder *self, ir_value *global, bool islocal)
{
size_t i;
return gen_global_pointer(global);
case TYPE_FIELD:
vec_push(code_defs, def);
+ gen_vector_defs(def, global->name);
return gen_global_field(global);
case TYPE_ENTITY:
/* fall through */
vec_push(code_globals, *iptr);
} else {
vec_push(code_globals, 0);
- if (!islocal)
- def.type |= DEF_SAVEGLOBAL;
}
+ if (!islocal && global->cvq != CV_CONST)
+ def.type |= DEF_SAVEGLOBAL;
vec_push(code_defs, def);
return global->code.globaladdr >= 0;
vec_push(code_globals, code_genstring(global->constval.vstring));
} else {
vec_push(code_globals, 0);
- if (!islocal)
- def.type |= DEF_SAVEGLOBAL;
}
+ if (!islocal && global->cvq != CV_CONST)
+ def.type |= DEF_SAVEGLOBAL;
vec_push(code_defs, def);
return global->code.globaladdr >= 0;
}
vec_push(code_globals, iptr[0]);
if (global->code.globaladdr < 0)
return false;
- for (d = 1; d < type_sizeof[global->vtype]; ++d)
- {
+ for (d = 1; d < type_sizeof[global->vtype]; ++d) {
vec_push(code_globals, iptr[d]);
}
} else {
vec_push(code_globals, 0);
if (global->code.globaladdr < 0)
return false;
- for (d = 1; d < type_sizeof[global->vtype]; ++d)
- {
+ for (d = 1; d < type_sizeof[global->vtype]; ++d) {
vec_push(code_globals, 0);
}
- if (!islocal)
- def.type |= DEF_SAVEGLOBAL;
}
+ if (!islocal && global->cvq != CV_CONST)
+ def.type |= DEF_SAVEGLOBAL;
vec_push(code_defs, def);
+ def.type &= ~DEF_SAVEGLOBAL;
+ gen_vector_defs(def, global->name);
return global->code.globaladdr >= 0;
}
case TYPE_FUNCTION:
vec_push(code_globals, vec_size(code_functions));
if (!gen_global_function(self, global))
return false;
- if (!islocal)
- def.type |= DEF_SAVEGLOBAL;
}
+ if (!islocal && global->cvq != CV_CONST)
+ def.type |= DEF_SAVEGLOBAL;
vec_push(code_defs, def);
return true;
case TYPE_VARIANT:
vec_push(code_globals, fld.offset+2);
}
+ if (field->fieldtype == TYPE_VECTOR) {
+ gen_vector_defs(def, field->name);
+ gen_vector_fields(fld, field->name);
+ }
+
return field->code.globaladdr >= 0;
}
return false;
}
- /* DP errors if the last instruction is not an INSTR_DONE
- * and for debugging purposes we add an additional AINSTR_END
- * to the end of functions, so here it goes:
- */
- stmt.opcode = INSTR_DONE;
- stmt.o1.u1 = 0;
- stmt.o2.u1 = 0;
- stmt.o3.u1 = 0;
- code_push_statement(&stmt, vec_last(code_linenums));
+ /* DP errors if the last instruction is not an INSTR_DONE. */
+ if (vec_last(code_statements).opcode != INSTR_DONE)
+ {
+ stmt.opcode = INSTR_DONE;
+ stmt.o1.u1 = 0;
+ stmt.o2.u1 = 0;
+ stmt.o3.u1 = 0;
+ code_push_statement(&stmt, vec_last(code_linenums));
+ }
if (opts.pp_only)
return true;