7 /***********************************************************************
8 * Type sizes used at multiple points in the IR codegen
11 const char *type_name[TYPE_COUNT] = {
30 static size_t type_sizeof_[TYPE_COUNT] = {
37 1, /* TYPE_FUNCTION */
48 const uint16_t type_store_instr[TYPE_COUNT] = {
49 INSTR_STORE_F, /* should use I when having integer support */
56 INSTR_STORE_ENT, /* should use I */
58 INSTR_STORE_I, /* integer type */
63 INSTR_STORE_V, /* variant, should never be accessed */
65 VINSTR_END, /* struct */
66 VINSTR_END, /* union */
67 VINSTR_END, /* array */
69 VINSTR_END, /* noexpr */
72 const uint16_t field_store_instr[TYPE_COUNT] = {
82 INSTR_STORE_FLD, /* integer type */
87 INSTR_STORE_V, /* variant, should never be accessed */
89 VINSTR_END, /* struct */
90 VINSTR_END, /* union */
91 VINSTR_END, /* array */
93 VINSTR_END, /* noexpr */
96 const uint16_t type_storep_instr[TYPE_COUNT] = {
97 INSTR_STOREP_F, /* should use I when having integer support */
104 INSTR_STOREP_ENT, /* should use I */
106 INSTR_STOREP_ENT, /* integer type */
111 INSTR_STOREP_V, /* variant, should never be accessed */
113 VINSTR_END, /* struct */
114 VINSTR_END, /* union */
115 VINSTR_END, /* array */
116 VINSTR_END, /* nil */
117 VINSTR_END, /* noexpr */
120 const uint16_t type_eq_instr[TYPE_COUNT] = {
121 INSTR_EQ_F, /* should use I when having integer support */
126 INSTR_EQ_E, /* FLD has no comparison */
128 INSTR_EQ_E, /* should use I */
135 INSTR_EQ_V, /* variant, should never be accessed */
137 VINSTR_END, /* struct */
138 VINSTR_END, /* union */
139 VINSTR_END, /* array */
140 VINSTR_END, /* nil */
141 VINSTR_END, /* noexpr */
144 const uint16_t type_ne_instr[TYPE_COUNT] = {
145 INSTR_NE_F, /* should use I when having integer support */
150 INSTR_NE_E, /* FLD has no comparison */
152 INSTR_NE_E, /* should use I */
159 INSTR_NE_V, /* variant, should never be accessed */
161 VINSTR_END, /* struct */
162 VINSTR_END, /* union */
163 VINSTR_END, /* array */
164 VINSTR_END, /* nil */
165 VINSTR_END, /* noexpr */
168 const uint16_t type_not_instr[TYPE_COUNT] = {
169 INSTR_NOT_F, /* should use I when having integer support */
170 VINSTR_END, /* not to be used, depends on string related -f flags */
176 INSTR_NOT_ENT, /* should use I */
178 INSTR_NOT_I, /* integer type */
183 INSTR_NOT_V, /* variant, should never be accessed */
185 VINSTR_END, /* struct */
186 VINSTR_END, /* union */
187 VINSTR_END, /* array */
188 VINSTR_END, /* nil */
189 VINSTR_END, /* noexpr */
193 static void ir_value_dump(ir_value*, int (*oprintf)(const char*,...));
195 static ir_value* ir_gen_extparam_proto(ir_builder *ir);
196 static void ir_gen_extparam (ir_builder *ir);
198 static void ir_function_dump(ir_function*, char *ind, int (*oprintf)(const char*,...));
200 static ir_value* ir_block_create_general_instr(ir_block *self, lex_ctx_t, const char *label,
201 int op, ir_value *a, ir_value *b, qc_type outype);
202 static bool GMQCC_WARN ir_block_create_store(ir_block*, lex_ctx_t, ir_value *target, ir_value *what);
203 static void ir_block_dump(ir_block*, char *ind, int (*oprintf)(const char*,...));
205 static bool ir_instr_op(ir_instr*, int op, ir_value *value, bool writing);
206 static void ir_instr_dump(ir_instr* in, char *ind, int (*oprintf)(const char*,...));
207 /* error functions */
209 static void irerror(lex_ctx_t ctx, const char *msg, ...)
213 con_cvprintmsg(ctx, LVL_ERROR, "internal error", msg, ap);
217 static bool GMQCC_WARN irwarning(lex_ctx_t ctx, int warntype, const char *fmt, ...)
222 r = vcompile_warning(ctx, warntype, fmt, ap);
227 /***********************************************************************
228 * Vector utility functions
231 static bool GMQCC_WARN vec_ir_value_find(std::vector<ir_value *> &vec, const ir_value *what, size_t *idx)
233 for (auto &it : vec) {
237 *idx = &it - &vec[0];
243 static bool GMQCC_WARN vec_ir_block_find(ir_block **vec, ir_block *what, size_t *idx)
246 size_t len = vec_size(vec);
247 for (i = 0; i < len; ++i) {
248 if (vec[i] == what) {
256 static bool GMQCC_WARN vec_ir_instr_find(std::vector<ir_instr *> &vec, ir_instr *what, size_t *idx)
258 for (auto &it : vec) {
262 *idx = &it - &vec[0];
268 /***********************************************************************
272 static void ir_block_delete_quick(ir_block* self);
273 static void ir_instr_delete_quick(ir_instr *self);
274 static void ir_function_delete_quick(ir_function *self);
276 ir_builder::ir_builder(const std::string& modulename)
277 : m_name(modulename),
280 m_htglobals = util_htnew(IR_HT_SIZE);
281 m_htfields = util_htnew(IR_HT_SIZE);
282 m_htfunctions = util_htnew(IR_HT_SIZE);
284 m_nil = new ir_value("nil", store_value, TYPE_NIL);
285 m_nil->m_cvq = CV_CONST;
287 for (size_t i = 0; i != IR_MAX_VINSTR_TEMPS; ++i) {
288 /* we write to them, but they're not supposed to be used outside the IR, so
289 * let's not allow the generation of ir_instrs which use these.
290 * So it's a constant noexpr.
292 m_vinstr_temp[i] = new ir_value("vinstr_temp", store_value, TYPE_NOEXPR);
293 m_vinstr_temp[i]->m_cvq = CV_CONST;
297 ir_builder::~ir_builder()
299 util_htdel(m_htglobals);
300 util_htdel(m_htfields);
301 util_htdel(m_htfunctions);
302 for (auto& f : m_functions)
303 ir_function_delete_quick(f.release());
304 m_functions.clear(); // delete them now before deleting the rest:
308 for (size_t i = 0; i != IR_MAX_VINSTR_TEMPS; ++i) {
309 delete m_vinstr_temp[i];
313 m_extparam_protos.clear();
316 static ir_function* ir_builder_get_function(ir_builder *self, const char *name)
318 return (ir_function*)util_htget(self->m_htfunctions, name);
321 ir_function* ir_builder_create_function(ir_builder *self, const std::string& name, qc_type outtype)
323 ir_function *fn = ir_builder_get_function(self, name.c_str());
328 fn = new ir_function(self, outtype);
330 self->m_functions.emplace_back(fn);
331 util_htset(self->m_htfunctions, name.c_str(), fn);
333 fn->m_value = ir_builder_create_global(self, fn->m_name, TYPE_FUNCTION);
339 fn->m_value->m_hasvalue = true;
340 fn->m_value->m_outtype = outtype;
341 fn->m_value->m_constval.vfunc = fn;
342 fn->m_value->m_context = fn->m_context;
347 static ir_value* ir_builder_get_global(ir_builder *self, const char *name)
349 return (ir_value*)util_htget(self->m_htglobals, name);
352 ir_value* ir_builder_create_global(ir_builder *self, const std::string& name, qc_type vtype)
358 ve = ir_builder_get_global(self, name.c_str());
364 ve = new ir_value(std::string(name), store_global, vtype);
365 self->m_globals.emplace_back(ve);
366 util_htset(self->m_htglobals, name.c_str(), ve);
370 ir_value* ir_builder_get_va_count(ir_builder *self)
372 if (self->m_reserved_va_count)
373 return self->m_reserved_va_count;
374 return (self->m_reserved_va_count = ir_builder_create_global(self, "reserved:va_count", TYPE_FLOAT));
377 static ir_value* ir_builder_get_field(ir_builder *self, const char *name)
379 return (ir_value*)util_htget(self->m_htfields, name);
383 ir_value* ir_builder_create_field(ir_builder *self, const std::string& name, qc_type vtype)
385 ir_value *ve = ir_builder_get_field(self, name.c_str());
390 ve = new ir_value(std::string(name), store_global, TYPE_FIELD);
391 ve->m_fieldtype = vtype;
392 self->m_fields.emplace_back(ve);
393 util_htset(self->m_htfields, name.c_str(), ve);
397 /***********************************************************************
401 static bool ir_function_naive_phi(ir_function*);
402 static void ir_function_enumerate(ir_function*);
403 static bool ir_function_calculate_liferanges(ir_function*);
404 static bool ir_function_allocate_locals(ir_function*);
406 ir_function::ir_function(ir_builder* owner_, qc_type outtype_)
408 m_name("<@unnamed>"),
411 m_context.file = "<@no context>";
415 ir_function::~ir_function()
419 static void ir_function_delete_quick(ir_function *self)
421 for (auto& b : self->m_blocks)
422 ir_block_delete_quick(b.release());
426 static void ir_function_collect_value(ir_function *self, ir_value *v)
428 self->m_values.emplace_back(v);
431 ir_block* ir_function_create_block(lex_ctx_t ctx, ir_function *self, const char *label)
433 ir_block* bn = new ir_block(self, label ? std::string(label) : std::string());
435 self->m_blocks.emplace_back(bn);
437 if ((self->m_flags & IR_FLAG_BLOCK_COVERAGE) && self->m_owner->m_coverage_func)
438 (void)ir_block_create_call(bn, ctx, nullptr, self->m_owner->m_coverage_func, false);
443 static bool instr_is_operation(uint16_t op)
445 return ( (op >= INSTR_MUL_F && op <= INSTR_GT) ||
446 (op >= INSTR_LOAD_F && op <= INSTR_LOAD_FNC) ||
447 (op == INSTR_ADDRESS) ||
448 (op >= INSTR_NOT_F && op <= INSTR_NOT_FNC) ||
449 (op >= INSTR_AND && op <= INSTR_BITOR) ||
450 (op >= INSTR_CALL0 && op <= INSTR_CALL8) ||
451 (op >= VINSTR_BITAND_V && op <= VINSTR_NEG_V) );
454 static bool ir_function_pass_peephole(ir_function *self)
456 for (auto& bp : self->m_blocks) {
457 ir_block *block = bp.get();
458 for (size_t i = 0; i < vec_size(block->m_instr); ++i) {
460 inst = block->m_instr[i];
463 (inst->m_opcode >= INSTR_STORE_F &&
464 inst->m_opcode <= INSTR_STORE_FNC))
472 oper = block->m_instr[i-1];
473 if (!instr_is_operation(oper->m_opcode))
476 /* Don't change semantics of MUL_VF in engines where these may not alias. */
477 if (OPTS_FLAG(LEGACY_VECTOR_MATHS)) {
478 if (oper->m_opcode == INSTR_MUL_VF && oper->_m_ops[2]->m_memberof == oper->_m_ops[1])
480 if (oper->m_opcode == INSTR_MUL_FV && oper->_m_ops[1]->m_memberof == oper->_m_ops[2])
484 value = oper->_m_ops[0];
486 /* only do it for SSA values */
487 if (value->m_store != store_value)
490 /* don't optimize out the temp if it's used later again */
491 if (value->m_reads.size() != 1)
494 /* The very next store must use this value */
495 if (value->m_reads[0] != store)
498 /* And of course the store must _read_ from it, so it's in
500 if (store->_m_ops[1] != value)
503 ++opts_optimizationcount[OPTIM_PEEPHOLE];
504 (void)!ir_instr_op(oper, 0, store->_m_ops[0], true);
506 vec_remove(block->m_instr, i, 1);
509 else if (inst->m_opcode == VINSTR_COND)
511 /* COND on a value resulting from a NOT could
512 * remove the NOT and swap its operands
519 value = inst->_m_ops[0];
521 if (value->m_store != store_value || value->m_reads.size() != 1 || value->m_reads[0] != inst)
524 inot = value->m_writes[0];
525 if (inot->_m_ops[0] != value ||
526 inot->m_opcode < INSTR_NOT_F ||
527 inot->m_opcode > INSTR_NOT_FNC ||
528 inot->m_opcode == INSTR_NOT_V || /* can't do these */
529 inot->m_opcode == INSTR_NOT_S)
535 ++opts_optimizationcount[OPTIM_PEEPHOLE];
537 (void)!ir_instr_op(inst, 0, inot->_m_ops[1], false);
540 for (inotid = 0; inotid < vec_size(tmp->m_instr); ++inotid) {
541 if (tmp->m_instr[inotid] == inot)
544 if (inotid >= vec_size(tmp->m_instr)) {
545 compile_error(inst->m_context, "sanity-check failed: failed to find instruction to optimize out");
548 vec_remove(tmp->m_instr, inotid, 1);
550 /* swap ontrue/onfalse */
551 tmp = inst->m_bops[0];
552 inst->m_bops[0] = inst->m_bops[1];
553 inst->m_bops[1] = tmp;
563 static bool ir_function_pass_tailrecursion(ir_function *self)
567 for (auto& bp : self->m_blocks) {
568 ir_block *block = bp.get();
571 ir_instr *ret, *call, *store = nullptr;
573 if (!block->m_final || vec_size(block->m_instr) < 2)
576 ret = block->m_instr[vec_size(block->m_instr)-1];
577 if (ret->m_opcode != INSTR_DONE && ret->m_opcode != INSTR_RETURN)
580 call = block->m_instr[vec_size(block->m_instr)-2];
581 if (call->m_opcode >= INSTR_STORE_F && call->m_opcode <= INSTR_STORE_FNC) {
582 /* account for the unoptimized
584 * STORE %return, %tmp
588 if (vec_size(block->m_instr) < 3)
592 call = block->m_instr[vec_size(block->m_instr)-3];
595 if (call->m_opcode < INSTR_CALL0 || call->m_opcode > INSTR_CALL8)
599 /* optimize out the STORE */
600 if (ret->_m_ops[0] &&
601 ret->_m_ops[0] == store->_m_ops[0] &&
602 store->_m_ops[1] == call->_m_ops[0])
604 ++opts_optimizationcount[OPTIM_PEEPHOLE];
605 call->_m_ops[0] = store->_m_ops[0];
606 vec_remove(block->m_instr, vec_size(block->m_instr) - 2, 1);
613 if (!call->_m_ops[0])
616 funcval = call->_m_ops[1];
619 if (funcval->m_vtype != TYPE_FUNCTION || funcval->m_constval.vfunc != self)
622 /* now we have a CALL and a RET, check if it's a tailcall */
623 if (ret->_m_ops[0] && call->_m_ops[0] != ret->_m_ops[0])
626 ++opts_optimizationcount[OPTIM_TAIL_RECURSION];
627 vec_shrinkby(block->m_instr, 2);
629 block->m_final = false; /* open it back up */
631 /* emite parameter-stores */
632 for (p = 0; p < call->m_params.size(); ++p) {
633 /* assert(call->params_count <= self->locals_count); */
634 if (!ir_block_create_store(block, call->m_context, self->m_locals[p].get(), call->m_params[p])) {
635 irerror(call->m_context, "failed to create tailcall store instruction for parameter %i", (int)p);
639 if (!ir_block_create_jump(block, call->m_context, self->m_blocks[0].get())) {
640 irerror(call->m_context, "failed to create tailcall jump");
651 bool ir_function_finalize(ir_function *self)
656 if (OPTS_OPTIMIZATION(OPTIM_PEEPHOLE)) {
657 if (!ir_function_pass_peephole(self)) {
658 irerror(self->m_context, "generic optimization pass broke something in `%s`", self->m_name.c_str());
663 if (OPTS_OPTIMIZATION(OPTIM_TAIL_RECURSION)) {
664 if (!ir_function_pass_tailrecursion(self)) {
665 irerror(self->m_context, "tail-recursion optimization pass broke something in `%s`", self->m_name.c_str());
670 if (!ir_function_naive_phi(self)) {
671 irerror(self->m_context, "internal error: ir_function_naive_phi failed");
675 for (auto& lp : self->m_locals) {
676 ir_value *v = lp.get();
677 if (v->m_vtype == TYPE_VECTOR ||
678 (v->m_vtype == TYPE_FIELD && v->m_outtype == TYPE_VECTOR))
680 ir_value_vector_member(v, 0);
681 ir_value_vector_member(v, 1);
682 ir_value_vector_member(v, 2);
685 for (auto& vp : self->m_values) {
686 ir_value *v = vp.get();
687 if (v->m_vtype == TYPE_VECTOR ||
688 (v->m_vtype == TYPE_FIELD && v->m_outtype == TYPE_VECTOR))
690 ir_value_vector_member(v, 0);
691 ir_value_vector_member(v, 1);
692 ir_value_vector_member(v, 2);
696 ir_function_enumerate(self);
698 if (!ir_function_calculate_liferanges(self))
700 if (!ir_function_allocate_locals(self))
705 ir_value* ir_function_create_local(ir_function *self, const std::string& name, qc_type vtype, bool param)
710 !self->m_locals.empty() &&
711 self->m_locals.back()->m_store != store_param)
713 irerror(self->m_context, "cannot add parameters after adding locals");
717 ve = new ir_value(std::string(name), (param ? store_param : store_local), vtype);
720 self->m_locals.emplace_back(ve);
724 /***********************************************************************
728 ir_block::ir_block(ir_function* owner, const std::string& name)
732 m_context.file = "<@no context>";
736 ir_block::~ir_block()
738 for (size_t i = 0; i != vec_size(m_instr); ++i)
745 static void ir_block_delete_quick(ir_block* self)
748 for (i = 0; i != vec_size(self->m_instr); ++i)
749 ir_instr_delete_quick(self->m_instr[i]);
750 vec_free(self->m_instr);
754 /***********************************************************************
758 ir_instr::ir_instr(lex_ctx_t ctx, ir_block* owner_, int op)
765 ir_instr::~ir_instr()
767 // The following calls can only delete from
768 // vectors, we still want to delete this instruction
769 // so ignore the return value. Since with the warn_unused_result attribute
770 // gcc doesn't care about an explicit: (void)foo(); to ignore the result,
771 // I have to improvise here and use if(foo());
772 for (auto &it : m_phi) {
774 if (vec_ir_instr_find(it.value->m_writes, this, &idx))
775 it.value->m_writes.erase(it.value->m_writes.begin() + idx);
776 if (vec_ir_instr_find(it.value->m_reads, this, &idx))
777 it.value->m_reads.erase(it.value->m_reads.begin() + idx);
779 for (auto &it : m_params) {
781 if (vec_ir_instr_find(it->m_writes, this, &idx))
782 it->m_writes.erase(it->m_writes.begin() + idx);
783 if (vec_ir_instr_find(it->m_reads, this, &idx))
784 it->m_reads.erase(it->m_reads.begin() + idx);
786 (void)!ir_instr_op(this, 0, nullptr, false);
787 (void)!ir_instr_op(this, 1, nullptr, false);
788 (void)!ir_instr_op(this, 2, nullptr, false);
791 static void ir_instr_delete_quick(ir_instr *self)
794 self->m_params.clear();
795 self->_m_ops[0] = nullptr;
796 self->_m_ops[1] = nullptr;
797 self->_m_ops[2] = nullptr;
801 static bool ir_instr_op(ir_instr *self, int op, ir_value *v, bool writing)
803 if (v && v->m_vtype == TYPE_NOEXPR) {
804 irerror(self->m_context, "tried to use a NOEXPR value");
808 if (self->_m_ops[op]) {
810 if (writing && vec_ir_instr_find(self->_m_ops[op]->m_writes, self, &idx))
811 self->_m_ops[op]->m_writes.erase(self->_m_ops[op]->m_writes.begin() + idx);
812 else if (vec_ir_instr_find(self->_m_ops[op]->m_reads, self, &idx))
813 self->_m_ops[op]->m_reads.erase(self->_m_ops[op]->m_reads.begin() + idx);
817 v->m_writes.push_back(self);
819 v->m_reads.push_back(self);
821 self->_m_ops[op] = v;
825 /***********************************************************************
829 static void ir_value_code_setaddr(ir_value *self, int32_t gaddr)
831 self->m_code.globaladdr = gaddr;
832 if (self->m_members[0]) self->m_members[0]->m_code.globaladdr = gaddr;
833 if (self->m_members[1]) self->m_members[1]->m_code.globaladdr = gaddr;
834 if (self->m_members[2]) self->m_members[2]->m_code.globaladdr = gaddr;
837 static int32_t ir_value_code_addr(const ir_value *self)
839 if (self->m_store == store_return)
840 return OFS_RETURN + self->m_code.addroffset;
841 return self->m_code.globaladdr + self->m_code.addroffset;
844 ir_value::ir_value(std::string&& name_, store_type store_, qc_type vtype_)
845 : m_name(move(name_)),
849 m_fieldtype = TYPE_VOID;
850 m_outtype = TYPE_VOID;
855 m_context.file = "<@no context>";
858 memset(&m_constval, 0, sizeof(m_constval));
859 memset(&m_code, 0, sizeof(m_code));
861 m_members[0] = nullptr;
862 m_members[1] = nullptr;
863 m_members[2] = nullptr;
864 m_memberof = nullptr;
866 m_unique_life = false;
871 ir_value::~ir_value()
875 if (m_vtype == TYPE_STRING)
876 mem_d((void*)m_constval.vstring);
878 if (!(m_flags & IR_FLAG_SPLIT_VECTOR)) {
879 for (i = 0; i < 3; ++i) {
887 /* helper function */
888 static ir_value* ir_builder_imm_float(ir_builder *self, float value, bool add_to_list) {
889 ir_value *v = new ir_value("#IMMEDIATE", store_global, TYPE_FLOAT);
890 v->m_flags |= IR_FLAG_ERASABLE;
891 v->m_hasvalue = true;
893 v->m_constval.vfloat = value;
895 self->m_globals.emplace_back(v);
897 self->m_const_floats.emplace_back(v);
901 ir_value* ir_value_vector_member(ir_value *self, unsigned int member)
908 if (self->m_members[member])
909 return self->m_members[member];
911 if (!self->m_name.empty()) {
912 char member_name[3] = { '_', char('x' + member), 0 };
913 name = self->m_name + member_name;
916 if (self->m_vtype == TYPE_VECTOR)
918 m = new ir_value(move(name), self->m_store, TYPE_FLOAT);
921 m->m_context = self->m_context;
923 self->m_members[member] = m;
924 m->m_code.addroffset = member;
926 else if (self->m_vtype == TYPE_FIELD)
928 if (self->m_fieldtype != TYPE_VECTOR)
930 m = new ir_value(move(name), self->m_store, TYPE_FIELD);
933 m->m_fieldtype = TYPE_FLOAT;
934 m->m_context = self->m_context;
936 self->m_members[member] = m;
937 m->m_code.addroffset = member;
941 irerror(self->m_context, "invalid member access on %s", self->m_name.c_str());
945 m->m_memberof = self;
949 static GMQCC_INLINE size_t ir_value_sizeof(const ir_value *self)
951 if (self->m_vtype == TYPE_FIELD && self->m_fieldtype == TYPE_VECTOR)
952 return type_sizeof_[TYPE_VECTOR];
953 return type_sizeof_[self->m_vtype];
956 static ir_value* ir_value_out(ir_function *owner, const char *name, store_type storetype, qc_type vtype)
958 ir_value *v = new ir_value(name ? std::string(name) : std::string(), storetype, vtype);
961 ir_function_collect_value(owner, v);
965 bool ir_value_set_float(ir_value *self, float f)
967 if (self->m_vtype != TYPE_FLOAT)
969 self->m_constval.vfloat = f;
970 self->m_hasvalue = true;
974 bool ir_value_set_func(ir_value *self, int f)
976 if (self->m_vtype != TYPE_FUNCTION)
978 self->m_constval.vint = f;
979 self->m_hasvalue = true;
983 bool ir_value_set_vector(ir_value *self, vec3_t v)
985 if (self->m_vtype != TYPE_VECTOR)
987 self->m_constval.vvec = v;
988 self->m_hasvalue = true;
992 bool ir_value_set_field(ir_value *self, ir_value *fld)
994 if (self->m_vtype != TYPE_FIELD)
996 self->m_constval.vpointer = fld;
997 self->m_hasvalue = true;
1001 bool ir_value_set_string(ir_value *self, const char *str)
1003 if (self->m_vtype != TYPE_STRING)
1005 self->m_constval.vstring = util_strdupe(str);
1006 self->m_hasvalue = true;
1011 bool ir_value_set_int(ir_value *self, int i)
1013 if (self->m_vtype != TYPE_INTEGER)
1015 self->m_constval.vint = i;
1016 self->m_hasvalue = true;
1021 bool ir_value_lives(ir_value *self, size_t at)
1023 for (auto& l : self->m_life) {
1024 if (l.start <= at && at <= l.end)
1026 if (l.start > at) /* since it's ordered */
1032 static bool ir_value_life_insert(ir_value *self, size_t idx, ir_life_entry_t e)
1034 self->m_life.insert(self->m_life.begin() + idx, e);
1038 static bool ir_value_life_merge(ir_value *self, size_t s)
1041 const size_t vs = self->m_life.size();
1042 ir_life_entry_t *life_found = nullptr;
1043 ir_life_entry_t *before = nullptr;
1044 ir_life_entry_t new_entry;
1046 /* Find the first range >= s */
1047 for (i = 0; i < vs; ++i)
1049 before = life_found;
1050 life_found = &self->m_life[i];
1051 if (life_found->start > s)
1054 /* nothing found? append */
1057 if (life_found && life_found->end+1 == s)
1059 /* previous life range can be merged in */
1063 if (life_found && life_found->end >= s)
1065 e.start = e.end = s;
1066 self->m_life.emplace_back(e);
1072 if (before->end + 1 == s &&
1073 life_found->start - 1 == s)
1076 before->end = life_found->end;
1077 self->m_life.erase(self->m_life.begin()+i);
1080 if (before->end + 1 == s)
1086 /* already contained */
1087 if (before->end >= s)
1091 if (life_found->start - 1 == s)
1093 life_found->start--;
1096 /* insert a new entry */
1097 new_entry.start = new_entry.end = s;
1098 return ir_value_life_insert(self, i, new_entry);
1101 static bool ir_value_life_merge_into(ir_value *self, const ir_value *other)
1105 if (other->m_life.empty())
1108 if (self->m_life.empty()) {
1109 self->m_life = other->m_life;
1114 for (i = 0; i < other->m_life.size(); ++i)
1116 const ir_life_entry_t &otherlife = other->m_life[i];
1119 ir_life_entry_t *entry = &self->m_life[myi];
1121 if (otherlife.end+1 < entry->start)
1123 /* adding an interval before entry */
1124 if (!ir_value_life_insert(self, myi, otherlife))
1130 if (otherlife.start < entry->start &&
1131 otherlife.end+1 >= entry->start)
1133 /* starts earlier and overlaps */
1134 entry->start = otherlife.start;
1137 if (otherlife.end > entry->end &&
1138 otherlife.start <= entry->end+1)
1140 /* ends later and overlaps */
1141 entry->end = otherlife.end;
1144 /* see if our change combines it with the next ranges */
1145 while (myi+1 < self->m_life.size() &&
1146 entry->end+1 >= self->m_life[1+myi].start)
1148 /* overlaps with (myi+1) */
1149 if (entry->end < self->m_life[1+myi].end)
1150 entry->end = self->m_life[1+myi].end;
1151 self->m_life.erase(self->m_life.begin() + (myi + 1));
1152 entry = &self->m_life[myi];
1155 /* see if we're after the entry */
1156 if (otherlife.start > entry->end)
1159 /* append if we're at the end */
1160 if (myi >= self->m_life.size()) {
1161 self->m_life.emplace_back(otherlife);
1164 /* otherweise check the next range */
1173 static bool ir_values_overlap(const ir_value *a, const ir_value *b)
1175 /* For any life entry in A see if it overlaps with
1176 * any life entry in B.
1177 * Note that the life entries are orderes, so we can make a
1178 * more efficient algorithm there than naively translating the
1182 const ir_life_entry_t *la, *lb, *enda, *endb;
1184 /* first of all, if either has no life range, they cannot clash */
1185 if (a->m_life.empty() || b->m_life.empty())
1188 la = &a->m_life.front();
1189 lb = &b->m_life.front();
1190 enda = &a->m_life.back() + 1;
1191 endb = &b->m_life.back() + 1;
1194 /* check if the entries overlap, for that,
1195 * both must start before the other one ends.
1197 if (la->start < lb->end &&
1198 lb->start < la->end)
1203 /* entries are ordered
1204 * one entry is earlier than the other
1205 * that earlier entry will be moved forward
1207 if (la->start < lb->start)
1209 /* order: A B, move A forward
1210 * check if we hit the end with A
1215 else /* if (lb->start < la->start) actually <= */
1217 /* order: B A, move B forward
1218 * check if we hit the end with B
1227 /***********************************************************************
1231 static bool ir_check_unreachable(ir_block *self)
1233 /* The IR should never have to deal with unreachable code */
1234 if (!self->m_final/* || OPTS_FLAG(ALLOW_UNREACHABLE_CODE)*/)
1236 irerror(self->m_context, "unreachable statement (%s)", self->m_label.c_str());
1240 bool ir_block_create_store_op(ir_block *self, lex_ctx_t ctx, int op, ir_value *target, ir_value *what)
1243 if (!ir_check_unreachable(self))
1246 if (target->m_store == store_value &&
1247 (op < INSTR_STOREP_F || op > INSTR_STOREP_FNC))
1249 irerror(self->m_context, "cannot store to an SSA value");
1250 irerror(self->m_context, "trying to store: %s <- %s", target->m_name.c_str(), what->m_name.c_str());
1251 irerror(self->m_context, "instruction: %s", util_instr_str[op]);
1255 in = new ir_instr(ctx, self, op);
1259 if (!ir_instr_op(in, 0, target, (op < INSTR_STOREP_F || op > INSTR_STOREP_FNC)) ||
1260 !ir_instr_op(in, 1, what, false))
1265 vec_push(self->m_instr, in);
1269 bool ir_block_create_state_op(ir_block *self, lex_ctx_t ctx, ir_value *frame, ir_value *think)
1272 if (!ir_check_unreachable(self))
1275 in = new ir_instr(ctx, self, INSTR_STATE);
1279 if (!ir_instr_op(in, 0, frame, false) ||
1280 !ir_instr_op(in, 1, think, false))
1285 vec_push(self->m_instr, in);
1289 static bool ir_block_create_store(ir_block *self, lex_ctx_t ctx, ir_value *target, ir_value *what)
1293 if (target->m_vtype == TYPE_VARIANT)
1294 vtype = what->m_vtype;
1296 vtype = target->m_vtype;
1299 if (vtype == TYPE_FLOAT && what->m_vtype == TYPE_INTEGER)
1300 op = INSTR_CONV_ITOF;
1301 else if (vtype == TYPE_INTEGER && what->m_vtype == TYPE_FLOAT)
1302 op = INSTR_CONV_FTOI;
1304 op = type_store_instr[vtype];
1306 if (OPTS_FLAG(ADJUST_VECTOR_FIELDS)) {
1307 if (op == INSTR_STORE_FLD && what->m_fieldtype == TYPE_VECTOR)
1311 return ir_block_create_store_op(self, ctx, op, target, what);
1314 bool ir_block_create_storep(ir_block *self, lex_ctx_t ctx, ir_value *target, ir_value *what)
1319 if (target->m_vtype != TYPE_POINTER)
1322 /* storing using pointer - target is a pointer, type must be
1323 * inferred from source
1325 vtype = what->m_vtype;
1327 op = type_storep_instr[vtype];
1328 if (OPTS_FLAG(ADJUST_VECTOR_FIELDS)) {
1329 if (op == INSTR_STOREP_FLD && what->m_fieldtype == TYPE_VECTOR)
1330 op = INSTR_STOREP_V;
1333 return ir_block_create_store_op(self, ctx, op, target, what);
1336 bool ir_block_create_return(ir_block *self, lex_ctx_t ctx, ir_value *v)
1339 if (!ir_check_unreachable(self))
1342 self->m_final = true;
1344 self->m_is_return = true;
1345 in = new ir_instr(ctx, self, INSTR_RETURN);
1349 if (v && !ir_instr_op(in, 0, v, false)) {
1354 vec_push(self->m_instr, in);
1358 bool ir_block_create_if(ir_block *self, lex_ctx_t ctx, ir_value *v,
1359 ir_block *ontrue, ir_block *onfalse)
1362 if (!ir_check_unreachable(self))
1364 self->m_final = true;
1365 /*in = new ir_instr(ctx, self, (v->m_vtype == TYPE_STRING ? INSTR_IF_S : INSTR_IF_F));*/
1366 in = new ir_instr(ctx, self, VINSTR_COND);
1370 if (!ir_instr_op(in, 0, v, false)) {
1375 in->m_bops[0] = ontrue;
1376 in->m_bops[1] = onfalse;
1378 vec_push(self->m_instr, in);
1380 vec_push(self->m_exits, ontrue);
1381 vec_push(self->m_exits, onfalse);
1382 vec_push(ontrue->m_entries, self);
1383 vec_push(onfalse->m_entries, self);
1387 bool ir_block_create_jump(ir_block *self, lex_ctx_t ctx, ir_block *to)
1390 if (!ir_check_unreachable(self))
1392 self->m_final = true;
1393 in = new ir_instr(ctx, self, VINSTR_JUMP);
1398 vec_push(self->m_instr, in);
1400 vec_push(self->m_exits, to);
1401 vec_push(to->m_entries, self);
1405 bool ir_block_create_goto(ir_block *self, lex_ctx_t ctx, ir_block *to)
1407 self->m_owner->m_flags |= IR_FLAG_HAS_GOTO;
1408 return ir_block_create_jump(self, ctx, to);
1411 ir_instr* ir_block_create_phi(ir_block *self, lex_ctx_t ctx, const char *label, qc_type ot)
1415 if (!ir_check_unreachable(self))
1417 in = new ir_instr(ctx, self, VINSTR_PHI);
1420 out = ir_value_out(self->m_owner, label, store_value, ot);
1425 if (!ir_instr_op(in, 0, out, true)) {
1429 vec_push(self->m_instr, in);
1433 ir_value* ir_phi_value(ir_instr *self)
1435 return self->_m_ops[0];
1438 void ir_phi_add(ir_instr* self, ir_block *b, ir_value *v)
1442 if (!vec_ir_block_find(self->m_owner->m_entries, b, nullptr)) {
1443 // Must not be possible to cause this, otherwise the AST
1444 // is doing something wrong.
1445 irerror(self->m_context, "Invalid entry block for PHI");
1451 v->m_reads.push_back(self);
1452 self->m_phi.push_back(pe);
1455 /* call related code */
1456 ir_instr* ir_block_create_call(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *func, bool noreturn)
1460 if (!ir_check_unreachable(self))
1462 in = new ir_instr(ctx, self, (noreturn ? VINSTR_NRCALL : INSTR_CALL0));
1466 self->m_final = true;
1467 self->m_is_return = true;
1469 out = ir_value_out(self->m_owner, label, (func->m_outtype == TYPE_VOID) ? store_return : store_value, func->m_outtype);
1474 if (!ir_instr_op(in, 0, out, true) ||
1475 !ir_instr_op(in, 1, func, false))
1480 vec_push(self->m_instr, in);
1483 if (!ir_block_create_return(self, ctx, nullptr)) {
1484 compile_error(ctx, "internal error: failed to generate dummy-return instruction");
1493 ir_value* ir_call_value(ir_instr *self)
1495 return self->_m_ops[0];
1498 void ir_call_param(ir_instr* self, ir_value *v)
1500 self->m_params.push_back(v);
1501 v->m_reads.push_back(self);
1504 /* binary op related code */
1506 ir_value* ir_block_create_binop(ir_block *self, lex_ctx_t ctx,
1507 const char *label, int opcode,
1508 ir_value *left, ir_value *right)
1510 qc_type ot = TYPE_VOID;
1531 case INSTR_SUB_S: /* -- offset of string as float */
1536 case INSTR_BITOR_IF:
1537 case INSTR_BITOR_FI:
1538 case INSTR_BITAND_FI:
1539 case INSTR_BITAND_IF:
1554 case INSTR_BITAND_I:
1557 case INSTR_RSHIFT_I:
1558 case INSTR_LSHIFT_I:
1566 case VINSTR_BITAND_V:
1567 case VINSTR_BITOR_V:
1568 case VINSTR_BITXOR_V:
1569 case VINSTR_BITAND_VF:
1570 case VINSTR_BITOR_VF:
1571 case VINSTR_BITXOR_VF:
1586 * after the following default case, the value of opcode can never
1587 * be 1, 2, 3, 4, 5, 6, 7, 8, 9, 62, 63, 64, 65
1591 /* boolean operations result in floats */
1594 * opcode >= 10 takes true branch opcode is at least 10
1595 * opcode <= 23 takes false branch opcode is at least 24
1597 if (opcode >= INSTR_EQ_F && opcode <= INSTR_GT)
1601 * At condition "opcode <= 23", the value of "opcode" must be
1603 * At condition "opcode <= 23", the value of "opcode" cannot be
1604 * equal to any of {1, 2, 3, 4, 5, 6, 7, 8, 9, 62, 63, 64, 65}.
1605 * The condition "opcode <= 23" cannot be true.
1607 * Thus ot=2 (TYPE_FLOAT) can never be true
1610 else if (opcode >= INSTR_LE && opcode <= INSTR_GT)
1612 else if (opcode >= INSTR_LE_I && opcode <= INSTR_EQ_FI)
1617 if (ot == TYPE_VOID) {
1618 /* The AST or parser were supposed to check this! */
1622 return ir_block_create_general_instr(self, ctx, label, opcode, left, right, ot);
1625 ir_value* ir_block_create_unary(ir_block *self, lex_ctx_t ctx,
1626 const char *label, int opcode,
1629 qc_type ot = TYPE_FLOAT;
1635 case INSTR_NOT_FNC: /*
1636 case INSTR_NOT_I: */
1641 * Negation for virtual instructions is emulated with 0-value. Thankfully
1642 * the operand for 0 already exists so we just source it from here.
1645 return ir_block_create_general_instr(self, ctx, label, INSTR_SUB_F, nullptr, operand, ot);
1647 return ir_block_create_general_instr(self, ctx, label, INSTR_SUB_V, nullptr, operand, TYPE_VECTOR);
1650 ot = operand->m_vtype;
1653 if (ot == TYPE_VOID) {
1654 /* The AST or parser were supposed to check this! */
1658 /* let's use the general instruction creator and pass nullptr for OPB */
1659 return ir_block_create_general_instr(self, ctx, label, opcode, operand, nullptr, ot);
1662 static ir_value* ir_block_create_general_instr(ir_block *self, lex_ctx_t ctx, const char *label,
1663 int op, ir_value *a, ir_value *b, qc_type outype)
1668 out = ir_value_out(self->m_owner, label, store_value, outype);
1672 instr = new ir_instr(ctx, self, op);
1677 if (!ir_instr_op(instr, 0, out, true) ||
1678 !ir_instr_op(instr, 1, a, false) ||
1679 !ir_instr_op(instr, 2, b, false) )
1684 vec_push(self->m_instr, instr);
1692 ir_value* ir_block_create_fieldaddress(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *ent, ir_value *field)
1696 /* Support for various pointer types todo if so desired */
1697 if (ent->m_vtype != TYPE_ENTITY)
1700 if (field->m_vtype != TYPE_FIELD)
1703 v = ir_block_create_general_instr(self, ctx, label, INSTR_ADDRESS, ent, field, TYPE_POINTER);
1704 v->m_fieldtype = field->m_fieldtype;
1708 ir_value* ir_block_create_load_from_ent(ir_block *self, lex_ctx_t ctx, const char *label, ir_value *ent, ir_value *field, qc_type outype)
1711 if (ent->m_vtype != TYPE_ENTITY)
1714 /* at some point we could redirect for TYPE_POINTER... but that could lead to carelessness */
1715 if (field->m_vtype != TYPE_FIELD)
1720 case TYPE_FLOAT: op = INSTR_LOAD_F; break;
1721 case TYPE_VECTOR: op = INSTR_LOAD_V; break;
1722 case TYPE_STRING: op = INSTR_LOAD_S; break;
1723 case TYPE_FIELD: op = INSTR_LOAD_FLD; break;
1724 case TYPE_ENTITY: op = INSTR_LOAD_ENT; break;
1725 case TYPE_FUNCTION: op = INSTR_LOAD_FNC; break;
1727 case TYPE_POINTER: op = INSTR_LOAD_I; break;
1728 case TYPE_INTEGER: op = INSTR_LOAD_I; break;
1731 irerror(self->m_context, "invalid type for ir_block_create_load_from_ent: %s", type_name[outype]);
1735 return ir_block_create_general_instr(self, ctx, label, op, ent, field, outype);
1738 /* PHI resolving breaks the SSA, and must thus be the last
1739 * step before life-range calculation.
1742 static bool ir_block_naive_phi(ir_block *self);
1743 bool ir_function_naive_phi(ir_function *self)
1745 for (auto& b : self->m_blocks)
1746 if (!ir_block_naive_phi(b.get()))
1751 static bool ir_block_naive_phi(ir_block *self)
1754 /* FIXME: optionally, create_phi can add the phis
1755 * to a list so we don't need to loop through blocks
1756 * - anyway: "don't optimize YET"
1758 for (i = 0; i < vec_size(self->m_instr); ++i)
1760 ir_instr *instr = self->m_instr[i];
1761 if (instr->m_opcode != VINSTR_PHI)
1764 vec_remove(self->m_instr, i, 1);
1765 --i; /* NOTE: i+1 below */
1767 for (auto &it : instr->m_phi) {
1768 ir_value *v = it.value;
1769 ir_block *b = it.from;
1770 if (v->m_store == store_value && v->m_reads.size() == 1 && v->m_writes.size() == 1) {
1771 /* replace the value */
1772 if (!ir_instr_op(v->m_writes[0], 0, instr->_m_ops[0], true))
1775 /* force a move instruction */
1776 ir_instr *prevjump = vec_last(b->m_instr);
1777 vec_pop(b->m_instr);
1779 instr->_m_ops[0]->m_store = store_global;
1780 if (!ir_block_create_store(b, instr->m_context, instr->_m_ops[0], v))
1782 instr->_m_ops[0]->m_store = store_value;
1783 vec_push(b->m_instr, prevjump);
1792 /***********************************************************************
1793 *IR Temp allocation code
1794 * Propagating value life ranges by walking through the function backwards
1795 * until no more changes are made.
1796 * In theory this should happen once more than once for every nested loop
1798 * Though this implementation might run an additional time for if nests.
1801 /* Enumerate instructions used by value's life-ranges
1803 static void ir_block_enumerate(ir_block *self, size_t *_eid)
1807 for (i = 0; i < vec_size(self->m_instr); ++i)
1809 self->m_instr[i]->m_eid = eid++;
1814 /* Enumerate blocks and instructions.
1815 * The block-enumeration is unordered!
1816 * We do not really use the block enumreation, however
1817 * the instruction enumeration is important for life-ranges.
1819 void ir_function_enumerate(ir_function *self)
1821 size_t instruction_id = 0;
1822 size_t block_eid = 0;
1823 for (auto& block : self->m_blocks)
1825 /* each block now gets an additional "entry" instruction id
1826 * we can use to avoid point-life issues
1828 block->m_entry_id = instruction_id;
1829 block->m_eid = block_eid;
1833 ir_block_enumerate(block.get(), &instruction_id);
1837 /* Local-value allocator
1838 * After finishing creating the liferange of all values used in a function
1839 * we can allocate their global-positions.
1840 * This is the counterpart to register-allocation in register machines.
1842 struct function_allocator {
1849 static bool function_allocator_alloc(function_allocator *alloc, ir_value *var)
1852 size_t vsize = ir_value_sizeof(var);
1854 var->m_code.local = vec_size(alloc->locals);
1856 slot = new ir_value("reg", store_global, var->m_vtype);
1860 if (!ir_value_life_merge_into(slot, var))
1863 vec_push(alloc->locals, slot);
1864 vec_push(alloc->sizes, vsize);
1865 vec_push(alloc->unique, var->m_unique_life);
1874 static bool ir_function_allocator_assign(ir_function *self, function_allocator *alloc, ir_value *v)
1879 if (v->m_unique_life)
1880 return function_allocator_alloc(alloc, v);
1882 for (a = 0; a < vec_size(alloc->locals); ++a)
1884 /* if it's reserved for a unique liferange: skip */
1885 if (alloc->unique[a])
1888 slot = alloc->locals[a];
1890 /* never resize parameters
1891 * will be required later when overlapping temps + locals
1893 if (a < vec_size(self->m_params) &&
1894 alloc->sizes[a] < ir_value_sizeof(v))
1899 if (ir_values_overlap(v, slot))
1902 if (!ir_value_life_merge_into(slot, v))
1905 /* adjust size for this slot */
1906 if (alloc->sizes[a] < ir_value_sizeof(v))
1907 alloc->sizes[a] = ir_value_sizeof(v);
1909 v->m_code.local = a;
1912 if (a >= vec_size(alloc->locals)) {
1913 if (!function_allocator_alloc(alloc, v))
1919 bool ir_function_allocate_locals(ir_function *self)
1923 bool opt_gt = OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS);
1925 function_allocator lockalloc, globalloc;
1927 if (self->m_locals.empty() && self->m_values.empty())
1930 globalloc.locals = nullptr;
1931 globalloc.sizes = nullptr;
1932 globalloc.positions = nullptr;
1933 globalloc.unique = nullptr;
1934 lockalloc.locals = nullptr;
1935 lockalloc.sizes = nullptr;
1936 lockalloc.positions = nullptr;
1937 lockalloc.unique = nullptr;
1940 for (i = 0; i < self->m_locals.size(); ++i)
1942 ir_value *v = self->m_locals[i].get();
1943 if ((self->m_flags & IR_FLAG_MASK_NO_LOCAL_TEMPS) || !OPTS_OPTIMIZATION(OPTIM_LOCAL_TEMPS)) {
1945 v->m_unique_life = true;
1947 else if (i >= vec_size(self->m_params))
1950 v->m_locked = true; /* lock parameters locals */
1951 if (!function_allocator_alloc((v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
1954 for (; i < self->m_locals.size(); ++i)
1956 ir_value *v = self->m_locals[i].get();
1957 if (v->m_life.empty())
1959 if (!ir_function_allocator_assign(self, (v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
1963 /* Allocate a slot for any value that still exists */
1964 for (i = 0; i < self->m_values.size(); ++i)
1966 ir_value *v = self->m_values[i].get();
1968 if (v->m_life.empty())
1971 /* CALL optimization:
1972 * If the value is a parameter-temp: 1 write, 1 read from a CALL
1973 * and it's not "locked", write it to the OFS_PARM directly.
1975 if (OPTS_OPTIMIZATION(OPTIM_CALL_STORES) && !v->m_locked && !v->m_unique_life) {
1976 if (v->m_reads.size() == 1 && v->m_writes.size() == 1 &&
1977 (v->m_reads[0]->m_opcode == VINSTR_NRCALL ||
1978 (v->m_reads[0]->m_opcode >= INSTR_CALL0 && v->m_reads[0]->m_opcode <= INSTR_CALL8)
1983 ir_instr *call = v->m_reads[0];
1984 if (!vec_ir_value_find(call->m_params, v, ¶m)) {
1985 irerror(call->m_context, "internal error: unlocked parameter %s not found", v->m_name.c_str());
1988 ++opts_optimizationcount[OPTIM_CALL_STORES];
1989 v->m_callparam = true;
1991 ir_value_code_setaddr(v, OFS_PARM0 + 3*param);
1993 size_t nprotos = self->m_owner->m_extparam_protos.size();
1996 if (nprotos > param)
1997 ep = self->m_owner->m_extparam_protos[param].get();
2000 ep = ir_gen_extparam_proto(self->m_owner);
2001 while (++nprotos <= param)
2002 ep = ir_gen_extparam_proto(self->m_owner);
2004 ir_instr_op(v->m_writes[0], 0, ep, true);
2005 call->m_params[param+8] = ep;
2009 if (v->m_writes.size() == 1 && v->m_writes[0]->m_opcode == INSTR_CALL0) {
2010 v->m_store = store_return;
2011 if (v->m_members[0]) v->m_members[0]->m_store = store_return;
2012 if (v->m_members[1]) v->m_members[1]->m_store = store_return;
2013 if (v->m_members[2]) v->m_members[2]->m_store = store_return;
2014 ++opts_optimizationcount[OPTIM_CALL_STORES];
2019 if (!ir_function_allocator_assign(self, (v->m_locked || !opt_gt ? &lockalloc : &globalloc), v))
2023 if (!lockalloc.sizes && !globalloc.sizes) {
2026 vec_push(lockalloc.positions, 0);
2027 vec_push(globalloc.positions, 0);
2029 /* Adjust slot positions based on sizes */
2030 if (lockalloc.sizes) {
2031 pos = (vec_size(lockalloc.sizes) ? lockalloc.positions[0] : 0);
2032 for (i = 1; i < vec_size(lockalloc.sizes); ++i)
2034 pos = lockalloc.positions[i-1] + lockalloc.sizes[i-1];
2035 vec_push(lockalloc.positions, pos);
2037 self->m_allocated_locals = pos + vec_last(lockalloc.sizes);
2039 if (globalloc.sizes) {
2040 pos = (vec_size(globalloc.sizes) ? globalloc.positions[0] : 0);
2041 for (i = 1; i < vec_size(globalloc.sizes); ++i)
2043 pos = globalloc.positions[i-1] + globalloc.sizes[i-1];
2044 vec_push(globalloc.positions, pos);
2046 self->m_globaltemps = pos + vec_last(globalloc.sizes);
2049 /* Locals need to know their new position */
2050 for (auto& local : self->m_locals) {
2051 if (local->m_locked || !opt_gt)
2052 local->m_code.local = lockalloc.positions[local->m_code.local];
2054 local->m_code.local = globalloc.positions[local->m_code.local];
2056 /* Take over the actual slot positions on values */
2057 for (auto& value : self->m_values) {
2058 if (value->m_locked || !opt_gt)
2059 value->m_code.local = lockalloc.positions[value->m_code.local];
2061 value->m_code.local = globalloc.positions[value->m_code.local];
2069 for (i = 0; i < vec_size(lockalloc.locals); ++i)
2070 delete lockalloc.locals[i];
2071 for (i = 0; i < vec_size(globalloc.locals); ++i)
2072 delete globalloc.locals[i];
2073 vec_free(globalloc.unique);
2074 vec_free(globalloc.locals);
2075 vec_free(globalloc.sizes);
2076 vec_free(globalloc.positions);
2077 vec_free(lockalloc.unique);
2078 vec_free(lockalloc.locals);
2079 vec_free(lockalloc.sizes);
2080 vec_free(lockalloc.positions);
2084 /* Get information about which operand
2085 * is read from, or written to.
2087 static void ir_op_read_write(int op, size_t *read, size_t *write)
2107 case INSTR_STOREP_F:
2108 case INSTR_STOREP_V:
2109 case INSTR_STOREP_S:
2110 case INSTR_STOREP_ENT:
2111 case INSTR_STOREP_FLD:
2112 case INSTR_STOREP_FNC:
2123 static bool ir_block_living_add_instr(ir_block *self, size_t eid) {
2124 bool changed = false;
2125 for (auto &it : self->m_living)
2126 if (ir_value_life_merge(it, eid))
2131 static bool ir_block_living_lock(ir_block *self) {
2132 bool changed = false;
2133 for (auto &it : self->m_living) {
2136 it->m_locked = true;
2142 static bool ir_block_life_propagate(ir_block *self, bool *changed)
2146 size_t i, o, p, mem;
2147 // bitmasks which operands are read from or written to
2150 self->m_living.clear();
2152 p = vec_size(self->m_exits);
2153 for (i = 0; i < p; ++i) {
2154 ir_block *prev = self->m_exits[i];
2155 for (auto &it : prev->m_living)
2156 if (!vec_ir_value_find(self->m_living, it, nullptr))
2157 self->m_living.push_back(it);
2160 i = vec_size(self->m_instr);
2163 instr = self->m_instr[i];
2165 /* See which operands are read and write operands */
2166 ir_op_read_write(instr->m_opcode, &read, &write);
2168 /* Go through the 3 main operands
2169 * writes first, then reads
2171 for (o = 0; o < 3; ++o)
2173 if (!instr->_m_ops[o]) /* no such operand */
2176 value = instr->_m_ops[o];
2178 /* We only care about locals */
2179 /* we also calculate parameter liferanges so that locals
2180 * can take up parameter slots */
2181 if (value->m_store != store_value &&
2182 value->m_store != store_local &&
2183 value->m_store != store_param)
2186 /* write operands */
2187 /* When we write to a local, we consider it "dead" for the
2188 * remaining upper part of the function, since in SSA a value
2189 * can only be written once (== created)
2194 bool in_living = vec_ir_value_find(self->m_living, value, &idx);
2197 /* If the value isn't alive it hasn't been read before... */
2198 /* TODO: See if the warning can be emitted during parsing or AST processing
2199 * otherwise have warning printed here.
2200 * IF printing a warning here: include filecontext_t,
2201 * and make sure it's only printed once
2202 * since this function is run multiple times.
2204 /* con_err( "Value only written %s\n", value->m_name); */
2205 if (ir_value_life_merge(value, instr->m_eid))
2208 /* since 'living' won't contain it
2209 * anymore, merge the value, since
2212 if (ir_value_life_merge(value, instr->m_eid))
2215 self->m_living.erase(self->m_living.begin() + idx);
2217 /* Removing a vector removes all members */
2218 for (mem = 0; mem < 3; ++mem) {
2219 if (value->m_members[mem] && vec_ir_value_find(self->m_living, value->m_members[mem], &idx)) {
2220 if (ir_value_life_merge(value->m_members[mem], instr->m_eid))
2222 self->m_living.erase(self->m_living.begin() + idx);
2225 /* Removing the last member removes the vector */
2226 if (value->m_memberof) {
2227 value = value->m_memberof;
2228 for (mem = 0; mem < 3; ++mem) {
2229 if (value->m_members[mem] && vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2232 if (mem == 3 && vec_ir_value_find(self->m_living, value, &idx)) {
2233 if (ir_value_life_merge(value, instr->m_eid))
2235 self->m_living.erase(self->m_living.begin() + idx);
2241 /* These operations need a special case as they can break when using
2242 * same source and destination operand otherwise, as the engine may
2243 * read the source multiple times. */
2244 if (instr->m_opcode == INSTR_MUL_VF ||
2245 instr->m_opcode == VINSTR_BITAND_VF ||
2246 instr->m_opcode == VINSTR_BITOR_VF ||
2247 instr->m_opcode == VINSTR_BITXOR ||
2248 instr->m_opcode == VINSTR_BITXOR_VF ||
2249 instr->m_opcode == VINSTR_BITXOR_V ||
2250 instr->m_opcode == VINSTR_CROSS)
2252 value = instr->_m_ops[2];
2253 /* the float source will get an additional lifetime */
2254 if (ir_value_life_merge(value, instr->m_eid+1))
2256 if (value->m_memberof && ir_value_life_merge(value->m_memberof, instr->m_eid+1))
2260 if (instr->m_opcode == INSTR_MUL_FV ||
2261 instr->m_opcode == INSTR_LOAD_V ||
2262 instr->m_opcode == VINSTR_BITXOR ||
2263 instr->m_opcode == VINSTR_BITXOR_VF ||
2264 instr->m_opcode == VINSTR_BITXOR_V ||
2265 instr->m_opcode == VINSTR_CROSS)
2267 value = instr->_m_ops[1];
2268 /* the float source will get an additional lifetime */
2269 if (ir_value_life_merge(value, instr->m_eid+1))
2271 if (value->m_memberof && ir_value_life_merge(value->m_memberof, instr->m_eid+1))
2275 for (o = 0; o < 3; ++o)
2277 if (!instr->_m_ops[o]) /* no such operand */
2280 value = instr->_m_ops[o];
2282 /* We only care about locals */
2283 /* we also calculate parameter liferanges so that locals
2284 * can take up parameter slots */
2285 if (value->m_store != store_value &&
2286 value->m_store != store_local &&
2287 value->m_store != store_param)
2293 if (!vec_ir_value_find(self->m_living, value, nullptr))
2294 self->m_living.push_back(value);
2295 /* reading adds the full vector */
2296 if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2297 self->m_living.push_back(value->m_memberof);
2298 for (mem = 0; mem < 3; ++mem) {
2299 if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2300 self->m_living.push_back(value->m_members[mem]);
2304 /* PHI operands are always read operands */
2305 for (auto &it : instr->m_phi) {
2307 if (!vec_ir_value_find(self->m_living, value, nullptr))
2308 self->m_living.push_back(value);
2309 /* reading adds the full vector */
2310 if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2311 self->m_living.push_back(value->m_memberof);
2312 for (mem = 0; mem < 3; ++mem) {
2313 if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2314 self->m_living.push_back(value->m_members[mem]);
2318 /* on a call, all these values must be "locked" */
2319 if (instr->m_opcode >= INSTR_CALL0 && instr->m_opcode <= INSTR_CALL8) {
2320 if (ir_block_living_lock(self))
2323 /* call params are read operands too */
2324 for (auto &it : instr->m_params) {
2326 if (!vec_ir_value_find(self->m_living, value, nullptr))
2327 self->m_living.push_back(value);
2328 /* reading adds the full vector */
2329 if (value->m_memberof && !vec_ir_value_find(self->m_living, value->m_memberof, nullptr))
2330 self->m_living.push_back(value->m_memberof);
2331 for (mem = 0; mem < 3; ++mem) {
2332 if (value->m_members[mem] && !vec_ir_value_find(self->m_living, value->m_members[mem], nullptr))
2333 self->m_living.push_back(value->m_members[mem]);
2338 if (ir_block_living_add_instr(self, instr->m_eid))
2341 /* the "entry" instruction ID */
2342 if (ir_block_living_add_instr(self, self->m_entry_id))
2348 bool ir_function_calculate_liferanges(ir_function *self)
2350 /* parameters live at 0 */
2351 for (size_t i = 0; i < vec_size(self->m_params); ++i)
2352 if (!ir_value_life_merge(self->m_locals[i].get(), 0))
2353 compile_error(self->m_context, "internal error: failed value-life merging");
2359 for (auto i = self->m_blocks.rbegin(); i != self->m_blocks.rend(); ++i)
2360 ir_block_life_propagate(i->get(), &changed);
2363 if (self->m_blocks.size()) {
2364 ir_block *block = self->m_blocks[0].get();
2365 for (auto &it : block->m_living) {
2367 if (v->m_store != store_local)
2369 if (v->m_vtype == TYPE_VECTOR)
2371 self->m_flags |= IR_FLAG_HAS_UNINITIALIZED;
2372 /* find the instruction reading from it */
2374 for (; s < v->m_reads.size(); ++s) {
2375 if (v->m_reads[s]->m_eid == v->m_life[0].end)
2378 if (s < v->m_reads.size()) {
2379 if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2380 "variable `%s` may be used uninitialized in this function\n"
2383 v->m_reads[s]->m_context.file, v->m_reads[s]->m_context.line)
2390 if (v->m_memberof) {
2391 ir_value *vec = v->m_memberof;
2392 for (s = 0; s < vec->m_reads.size(); ++s) {
2393 if (vec->m_reads[s]->m_eid == v->m_life[0].end)
2396 if (s < vec->m_reads.size()) {
2397 if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2398 "variable `%s` may be used uninitialized in this function\n"
2401 vec->m_reads[s]->m_context.file, vec->m_reads[s]->m_context.line)
2409 if (irwarning(v->m_context, WARN_USED_UNINITIALIZED,
2410 "variable `%s` may be used uninitialized in this function", v->m_name.c_str()))
2419 /***********************************************************************
2422 * Since the IR has the convention of putting 'write' operands
2423 * at the beginning, we have to rotate the operands of instructions
2424 * properly in order to generate valid QCVM code.
2426 * Having destinations at a fixed position is more convenient. In QC
2427 * this is *mostly* OPC, but FTE adds at least 2 instructions which
2428 * read from from OPA, and store to OPB rather than OPC. Which is
2429 * partially the reason why the implementation of these instructions
2430 * in darkplaces has been delayed for so long.
2432 * Breaking conventions is annoying...
2434 static bool ir_builder_gen_global(ir_builder *self, ir_value *global, bool islocal);
2436 static bool gen_global_field(code_t *code, ir_value *global)
2438 if (global->m_hasvalue)
2440 ir_value *fld = global->m_constval.vpointer;
2442 irerror(global->m_context, "Invalid field constant with no field: %s", global->m_name.c_str());
2446 /* copy the field's value */
2447 ir_value_code_setaddr(global, code->globals.size());
2448 code->globals.push_back(fld->m_code.fieldaddr);
2449 if (global->m_fieldtype == TYPE_VECTOR) {
2450 code->globals.push_back(fld->m_code.fieldaddr+1);
2451 code->globals.push_back(fld->m_code.fieldaddr+2);
2456 ir_value_code_setaddr(global, code->globals.size());
2457 code->globals.push_back(0);
2458 if (global->m_fieldtype == TYPE_VECTOR) {
2459 code->globals.push_back(0);
2460 code->globals.push_back(0);
2463 if (global->m_code.globaladdr < 0)
2468 static bool gen_global_pointer(code_t *code, ir_value *global)
2470 if (global->m_hasvalue)
2472 ir_value *target = global->m_constval.vpointer;
2474 irerror(global->m_context, "Invalid pointer constant: %s", global->m_name.c_str());
2475 /* nullptr pointers are pointing to the nullptr constant, which also
2476 * sits at address 0, but still has an ir_value for itself.
2481 /* Here, relocations ARE possible - in fteqcc-enhanced-qc:
2482 * void() foo; <- proto
2483 * void() *fooptr = &foo;
2484 * void() foo = { code }
2486 if (!target->m_code.globaladdr) {
2487 /* FIXME: Check for the constant nullptr ir_value!
2488 * because then code.globaladdr being 0 is valid.
2490 irerror(global->m_context, "FIXME: Relocation support");
2494 ir_value_code_setaddr(global, code->globals.size());
2495 code->globals.push_back(target->m_code.globaladdr);
2499 ir_value_code_setaddr(global, code->globals.size());
2500 code->globals.push_back(0);
2502 if (global->m_code.globaladdr < 0)
2507 static bool gen_blocks_recursive(code_t *code, ir_function *func, ir_block *block)
2509 prog_section_statement_t stmt;
2518 block->m_generated = true;
2519 block->m_code_start = code->statements.size();
2520 for (i = 0; i < vec_size(block->m_instr); ++i)
2522 instr = block->m_instr[i];
2524 if (instr->m_opcode == VINSTR_PHI) {
2525 irerror(block->m_context, "cannot generate virtual instruction (phi)");
2529 if (instr->m_opcode == VINSTR_JUMP) {
2530 target = instr->m_bops[0];
2531 /* for uncoditional jumps, if the target hasn't been generated
2532 * yet, we generate them right here.
2534 if (!target->m_generated)
2535 return gen_blocks_recursive(code, func, target);
2537 /* otherwise we generate a jump instruction */
2538 stmt.opcode = INSTR_GOTO;
2539 stmt.o1.s1 = target->m_code_start - code->statements.size();
2542 if (stmt.o1.s1 != 1)
2543 code_push_statement(code, &stmt, instr->m_context);
2545 /* no further instructions can be in this block */
2549 if (instr->m_opcode == VINSTR_BITXOR) {
2550 stmt.opcode = INSTR_BITOR;
2551 stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[1]);
2552 stmt.o2.s1 = ir_value_code_addr(instr->_m_ops[2]);
2553 stmt.o3.s1 = ir_value_code_addr(instr->_m_ops[0]);
2554 code_push_statement(code, &stmt, instr->m_context);
2555 stmt.opcode = INSTR_BITAND;
2556 stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[1]);
2557 stmt.o2.s1 = ir_value_code_addr(instr->_m_ops[2]);
2558 stmt.o3.s1 = ir_value_code_addr(func->m_owner->m_vinstr_temp[0]);
2559 code_push_statement(code, &stmt, instr->m_context);
2560 stmt.opcode = INSTR_SUB_F;
2561 stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[0]);
2562 stmt.o2.s1 = ir_value_code_addr(func->m_owner->m_vinstr_temp[0]);
2563 stmt.o3.s1 = ir_value_code_addr(instr->_m_ops[0]);
2564 code_push_statement(code, &stmt, instr->m_context);
2566 /* instruction generated */
2570 if (instr->m_opcode == VINSTR_BITAND_V) {
2571 stmt.opcode = INSTR_BITAND;
2572 stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[1]);
2573 stmt.o2.s1 = ir_value_code_addr(instr->_m_ops[2]);
2574 stmt.o3.s1 = ir_value_code_addr(instr->_m_ops[0]);
2575 code_push_statement(code, &stmt, instr->m_context);
2579 code_push_statement(code, &stmt, instr->m_context);
2583 code_push_statement(code, &stmt, instr->m_context);
2585 /* instruction generated */
2589 if (instr->m_opcode == VINSTR_BITOR_V) {
2590 stmt.opcode = INSTR_BITOR;
2591 stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[1]);
2592 stmt.o2.s1 = ir_value_code_addr(instr->_m_ops[2]);
2593 stmt.o3.s1 = ir_value_code_addr(instr->_m_ops[0]);
2594 code_push_statement(code, &stmt, instr->m_context);
2598 code_push_statement(code, &stmt, instr->m_context);
2602 code_push_statement(code, &stmt, instr->m_context);
2604 /* instruction generated */
2608 if (instr->m_opcode == VINSTR_BITXOR_V) {
2609 for (j = 0; j < 3; ++j) {
2610 stmt.opcode = INSTR_BITOR;
2611 stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[1]) + j;
2612 stmt.o2.s1 = ir_value_code_addr(instr->_m_ops[2]) + j;
2613 stmt.o3.s1 = ir_value_code_addr(instr->_m_ops[0]) + j;
2614 code_push_statement(code, &stmt, instr->m_context);
2615 stmt.opcode = INSTR_BITAND;
2616 stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[1]) + j;
2617 stmt.o2.s1 = ir_value_code_addr(instr->_m_ops[2]) + j;
2618 stmt.o3.s1 = ir_value_code_addr(func->m_owner->m_vinstr_temp[0]) + j;
2619 code_push_statement(code, &stmt, instr->m_context);
2621 stmt.opcode = INSTR_SUB_V;
2622 stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[0]);
2623 stmt.o2.s1 = ir_value_code_addr(func->m_owner->m_vinstr_temp[0]);
2624 stmt.o3.s1 = ir_value_code_addr(instr->_m_ops[0]);
2625 code_push_statement(code, &stmt, instr->m_context);
2627 /* instruction generated */
2631 if (instr->m_opcode == VINSTR_BITAND_VF) {
2632 stmt.opcode = INSTR_BITAND;
2633 stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[1]);
2634 stmt.o2.s1 = ir_value_code_addr(instr->_m_ops[2]);
2635 stmt.o3.s1 = ir_value_code_addr(instr->_m_ops[0]);
2636 code_push_statement(code, &stmt, instr->m_context);
2639 code_push_statement(code, &stmt, instr->m_context);
2642 code_push_statement(code, &stmt, instr->m_context);
2644 /* instruction generated */
2648 if (instr->m_opcode == VINSTR_BITOR_VF) {
2649 stmt.opcode = INSTR_BITOR;
2650 stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[1]);
2651 stmt.o2.s1 = ir_value_code_addr(instr->_m_ops[2]);
2652 stmt.o3.s1 = ir_value_code_addr(instr->_m_ops[0]);
2653 code_push_statement(code, &stmt, instr->m_context);
2656 code_push_statement(code, &stmt, instr->m_context);
2659 code_push_statement(code, &stmt, instr->m_context);
2661 /* instruction generated */
2665 if (instr->m_opcode == VINSTR_BITXOR_VF) {
2666 for (j = 0; j < 3; ++j) {
2667 stmt.opcode = INSTR_BITOR;
2668 stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[1]) + j;
2669 stmt.o2.s1 = ir_value_code_addr(instr->_m_ops[2]);
2670 stmt.o3.s1 = ir_value_code_addr(instr->_m_ops[0]) + j;
2671 code_push_statement(code, &stmt, instr->m_context);
2672 stmt.opcode = INSTR_BITAND;
2673 stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[1]) + j;
2674 stmt.o2.s1 = ir_value_code_addr(instr->_m_ops[2]);
2675 stmt.o3.s1 = ir_value_code_addr(func->m_owner->m_vinstr_temp[0]) + j;
2676 code_push_statement(code, &stmt, instr->m_context);
2678 stmt.opcode = INSTR_SUB_V;
2679 stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[0]);
2680 stmt.o2.s1 = ir_value_code_addr(func->m_owner->m_vinstr_temp[0]);
2681 stmt.o3.s1 = ir_value_code_addr(instr->_m_ops[0]);
2682 code_push_statement(code, &stmt, instr->m_context);
2684 /* instruction generated */
2688 if (instr->m_opcode == VINSTR_CROSS) {
2689 stmt.opcode = INSTR_MUL_F;
2690 for (j = 0; j < 3; ++j) {
2691 stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[1]) + (j + 1) % 3;
2692 stmt.o2.s1 = ir_value_code_addr(instr->_m_ops[2]) + (j + 2) % 3;
2693 stmt.o3.s1 = ir_value_code_addr(instr->_m_ops[0]) + j;
2694 code_push_statement(code, &stmt, instr->m_context);
2695 stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[1]) + (j + 2) % 3;
2696 stmt.o2.s1 = ir_value_code_addr(instr->_m_ops[2]) + (j + 1) % 3;
2697 stmt.o3.s1 = ir_value_code_addr(func->m_owner->m_vinstr_temp[0]) + j;
2698 code_push_statement(code, &stmt, instr->m_context);
2700 stmt.opcode = INSTR_SUB_V;
2701 stmt.o1.s1 = ir_value_code_addr(instr->_m_ops[0]);
2702 stmt.o2.s1 = ir_value_code_addr(func->m_owner->m_vinstr_temp[0]);
2703 stmt.o3.s1 = ir_value_code_addr(instr->_m_ops[0]);
2704 code_push_statement(code, &stmt, instr->m_context);
2706 /* instruction generated */
2710 if (instr->m_opcode == VINSTR_COND) {
2711 ontrue = instr->m_bops[0];
2712 onfalse = instr->m_bops[1];
2713 /* TODO: have the AST signal which block should
2714 * come first: eg. optimize IFs without ELSE...
2717 stmt.o1.u1 = ir_value_code_addr(instr->_m_ops[0]);
2721 if (ontrue->m_generated) {
2722 stmt.opcode = INSTR_IF;
2723 stmt.o2.s1 = ontrue->m_code_start - code->statements.size();
2724 if (stmt.o2.s1 != 1)
2725 code_push_statement(code, &stmt, instr->m_context);
2727 if (onfalse->m_generated) {
2728 stmt.opcode = INSTR_IFNOT;
2729 stmt.o2.s1 = onfalse->m_code_start - code->statements.size();
2730 if (stmt.o2.s1 != 1)
2731 code_push_statement(code, &stmt, instr->m_context);
2733 if (!ontrue->m_generated) {
2734 if (onfalse->m_generated)
2735 return gen_blocks_recursive(code, func, ontrue);
2737 if (!onfalse->m_generated) {
2738 if (ontrue->m_generated)
2739 return gen_blocks_recursive(code, func, onfalse);
2741 /* neither ontrue nor onfalse exist */
2742 stmt.opcode = INSTR_IFNOT;
2743 if (!instr->m_likely) {
2744 /* Honor the likelyhood hint */
2745 ir_block *tmp = onfalse;
2746 stmt.opcode = INSTR_IF;
2750 stidx = code->statements.size();
2751 code_push_statement(code, &stmt, instr->m_context);
2752 /* on false we jump, so add ontrue-path */
2753 if (!gen_blocks_recursive(code, func, ontrue))
2755 /* fixup the jump address */
2756 code->statements[stidx].o2.s1 = code->statements.size() - stidx;
2757 /* generate onfalse path */
2758 if (onfalse->m_generated) {
2759 /* fixup the jump address */
2760 code->statements[stidx].o2.s1 = onfalse->m_code_start - stidx;
2761 if (stidx+2 == code->statements.size() && code->statements[stidx].o2.s1 == 1) {
2762 code->statements[stidx] = code->statements[stidx+1];
2763 if (code->statements[stidx].o1.s1 < 0)
2764 code->statements[stidx].o1.s1++;
2765 code_pop_statement(code);
2767 stmt.opcode = code->statements.back().opcode;
2768 if (stmt.opcode == INSTR_GOTO ||
2769 stmt.opcode == INSTR_IF ||
2770 stmt.opcode == INSTR_IFNOT ||
2771 stmt.opcode == INSTR_RETURN ||
2772 stmt.opcode == INSTR_DONE)
2774 /* no use jumping from here */
2777 /* may have been generated in the previous recursive call */
2778 stmt.opcode = INSTR_GOTO;
2779 stmt.o1.s1 = onfalse->m_code_start - code->statements.size();
2782 if (stmt.o1.s1 != 1)
2783 code_push_statement(code, &stmt, instr->m_context);
2786 else if (stidx+2 == code->statements.size() && code->statements[stidx].o2.s1 == 1) {
2787 code->statements[stidx] = code->statements[stidx+1];
2788 if (code->statements[stidx].o1.s1 < 0)
2789 code->statements[stidx].o1.s1++;
2790 code_pop_statement(code);
2792 /* if not, generate now */
2793 return gen_blocks_recursive(code, func, onfalse);
2796 if ( (instr->m_opcode >= INSTR_CALL0 && instr->m_opcode <= INSTR_CALL8)
2797 || instr->m_opcode == VINSTR_NRCALL)
2802 first = instr->m_params.size();
2805 for (p = 0; p < first; ++p)
2807 ir_value *param = instr->m_params[p];
2808 if (param->m_callparam)
2811 stmt.opcode = INSTR_STORE_F;
2814 if (param->m_vtype == TYPE_FIELD)
2815 stmt.opcode = field_store_instr[param->m_fieldtype];
2816 else if (param->m_vtype == TYPE_NIL)
2817 stmt.opcode = INSTR_STORE_V;
2819 stmt.opcode = type_store_instr[param->m_vtype];
2820 stmt.o1.u1 = ir_value_code_addr(param);
2821 stmt.o2.u1 = OFS_PARM0 + 3 * p;
2823 if (param->m_vtype == TYPE_VECTOR && (param->m_flags & IR_FLAG_SPLIT_VECTOR)) {
2824 /* fetch 3 separate floats */
2825 stmt.opcode = INSTR_STORE_F;
2826 stmt.o1.u1 = ir_value_code_addr(param->m_members[0]);
2827 code_push_statement(code, &stmt, instr->m_context);
2829 stmt.o1.u1 = ir_value_code_addr(param->m_members[1]);
2830 code_push_statement(code, &stmt, instr->m_context);
2832 stmt.o1.u1 = ir_value_code_addr(param->m_members[2]);
2833 code_push_statement(code, &stmt, instr->m_context);
2836 code_push_statement(code, &stmt, instr->m_context);
2838 /* Now handle extparams */
2839 first = instr->m_params.size();
2840 for (; p < first; ++p)
2842 ir_builder *ir = func->m_owner;
2843 ir_value *param = instr->m_params[p];
2844 ir_value *targetparam;
2846 if (param->m_callparam)
2849 if (p-8 >= ir->m_extparams.size())
2850 ir_gen_extparam(ir);
2852 targetparam = ir->m_extparams[p-8];
2854 stmt.opcode = INSTR_STORE_F;
2857 if (param->m_vtype == TYPE_FIELD)
2858 stmt.opcode = field_store_instr[param->m_fieldtype];
2859 else if (param->m_vtype == TYPE_NIL)
2860 stmt.opcode = INSTR_STORE_V;
2862 stmt.opcode = type_store_instr[param->m_vtype];
2863 stmt.o1.u1 = ir_value_code_addr(param);
2864 stmt.o2.u1 = ir_value_code_addr(targetparam);
2865 if (param->m_vtype == TYPE_VECTOR && (param->m_flags & IR_FLAG_SPLIT_VECTOR)) {
2866 /* fetch 3 separate floats */
2867 stmt.opcode = INSTR_STORE_F;
2868 stmt.o1.u1 = ir_value_code_addr(param->m_members[0]);
2869 code_push_statement(code, &stmt, instr->m_context);
2871 stmt.o1.u1 = ir_value_code_addr(param->m_members[1]);
2872 code_push_statement(code, &stmt, instr->m_context);
2874 stmt.o1.u1 = ir_value_code_addr(param->m_members[2]);
2875 code_push_statement(code, &stmt, instr->m_context);
2878 code_push_statement(code, &stmt, instr->m_context);
2881 stmt.opcode = INSTR_CALL0 + instr->m_params.size();
2882 if (stmt.opcode > INSTR_CALL8)
2883 stmt.opcode = INSTR_CALL8;
2884 stmt.o1.u1 = ir_value_code_addr(instr->_m_ops[1]);
2887 code_push_statement(code, &stmt, instr->m_context);
2889 retvalue = instr->_m_ops[0];
2890 if (retvalue && retvalue->m_store != store_return &&
2891 (retvalue->m_store == store_global || retvalue->m_life.size()))
2893 /* not to be kept in OFS_RETURN */
2894 if (retvalue->m_vtype == TYPE_FIELD && OPTS_FLAG(ADJUST_VECTOR_FIELDS))
2895 stmt.opcode = field_store_instr[retvalue->m_fieldtype];
2897 stmt.opcode = type_store_instr[retvalue->m_vtype];
2898 stmt.o1.u1 = OFS_RETURN;
2899 stmt.o2.u1 = ir_value_code_addr(retvalue);
2901 code_push_statement(code, &stmt, instr->m_context);
2906 if (instr->m_opcode == INSTR_STATE) {
2907 stmt.opcode = instr->m_opcode;
2908 if (instr->_m_ops[0])
2909 stmt.o1.u1 = ir_value_code_addr(instr->_m_ops[0]);
2910 if (instr->_m_ops[1])
2911 stmt.o2.u1 = ir_value_code_addr(instr->_m_ops[1]);
2913 code_push_statement(code, &stmt, instr->m_context);
2917 stmt.opcode = instr->m_opcode;
2922 /* This is the general order of operands */
2923 if (instr->_m_ops[0])
2924 stmt.o3.u1 = ir_value_code_addr(instr->_m_ops[0]);
2926 if (instr->_m_ops[1])
2927 stmt.o1.u1 = ir_value_code_addr(instr->_m_ops[1]);
2929 if (instr->_m_ops[2])
2930 stmt.o2.u1 = ir_value_code_addr(instr->_m_ops[2]);
2932 if (stmt.opcode == INSTR_RETURN || stmt.opcode == INSTR_DONE)
2934 stmt.o1.u1 = stmt.o3.u1;
2937 else if ((stmt.opcode >= INSTR_STORE_F &&
2938 stmt.opcode <= INSTR_STORE_FNC) ||
2939 (stmt.opcode >= INSTR_STOREP_F &&
2940 stmt.opcode <= INSTR_STOREP_FNC))
2942 /* 2-operand instructions with A -> B */
2943 stmt.o2.u1 = stmt.o3.u1;
2946 /* tiny optimization, don't output
2949 if (stmt.o2.u1 == stmt.o1.u1 &&
2950 OPTS_OPTIMIZATION(OPTIM_PEEPHOLE))
2952 ++opts_optimizationcount[OPTIM_PEEPHOLE];
2956 code_push_statement(code, &stmt, instr->m_context);
2961 static bool gen_function_code(code_t *code, ir_function *self)
2964 prog_section_statement_t stmt, *retst;
2966 /* Starting from entry point, we generate blocks "as they come"
2967 * for now. Dead blocks will not be translated obviously.
2969 if (self->m_blocks.empty()) {
2970 irerror(self->m_context, "Function '%s' declared without body.", self->m_name.c_str());
2974 block = self->m_blocks[0].get();
2975 if (block->m_generated)
2978 if (!gen_blocks_recursive(code, self, block)) {
2979 irerror(self->m_context, "failed to generate blocks for '%s'", self->m_name.c_str());
2983 /* code_write and qcvm -disasm need to know that the function ends here */
2984 retst = &code->statements.back();
2985 if (OPTS_OPTIMIZATION(OPTIM_VOID_RETURN) &&
2986 self->m_outtype == TYPE_VOID &&
2987 retst->opcode == INSTR_RETURN &&
2988 !retst->o1.u1 && !retst->o2.u1 && !retst->o3.u1)
2990 retst->opcode = INSTR_DONE;
2991 ++opts_optimizationcount[OPTIM_VOID_RETURN];
2995 stmt.opcode = INSTR_DONE;
2999 last.line = code->linenums.back();
3000 last.column = code->columnnums.back();
3002 code_push_statement(code, &stmt, last);
3007 static qcint_t ir_builder_filestring(ir_builder *ir, const char *filename)
3009 /* NOTE: filename pointers are copied, we never strdup them,
3010 * thus we can use pointer-comparison to find the string.
3014 for (size_t i = 0; i != ir->m_filenames.size(); ++i) {
3015 if (!strcmp(ir->m_filenames[i], filename))
3019 str = code_genstring(ir->m_code.get(), filename);
3020 ir->m_filenames.push_back(filename);
3021 ir->m_filestrings.push_back(str);
3025 static bool gen_global_function(ir_builder *ir, ir_value *global)
3027 prog_section_function_t fun;
3032 if (!global->m_hasvalue || (!global->m_constval.vfunc)) {
3033 irerror(global->m_context, "Invalid state of function-global: not constant: %s", global->m_name.c_str());
3037 irfun = global->m_constval.vfunc;
3038 fun.name = global->m_code.name;
3039 fun.file = ir_builder_filestring(ir, global->m_context.file);
3040 fun.profile = 0; /* always 0 */
3041 fun.nargs = vec_size(irfun->m_params);
3045 for (i = 0; i < 8; ++i) {
3046 if ((int32_t)i >= fun.nargs)
3049 fun.argsize[i] = type_sizeof_[irfun->m_params[i]];
3053 fun.locals = irfun->m_allocated_locals;
3055 if (irfun->m_builtin)
3056 fun.entry = irfun->m_builtin+1;
3058 irfun->m_code_function_def = ir->m_code->functions.size();
3059 fun.entry = ir->m_code->statements.size();
3062 ir->m_code->functions.push_back(fun);
3066 static ir_value* ir_gen_extparam_proto(ir_builder *ir)
3070 util_snprintf(name, sizeof(name), "EXTPARM#%i", (int)(ir->m_extparam_protos.size()));
3071 ir_value *global = new ir_value(name, store_global, TYPE_VECTOR);
3072 ir->m_extparam_protos.emplace_back(global);
3077 static void ir_gen_extparam(ir_builder *ir)
3079 prog_section_def_t def;
3082 if (ir->m_extparam_protos.size() < ir->m_extparams.size()+1)
3083 global = ir_gen_extparam_proto(ir);
3085 global = ir->m_extparam_protos[ir->m_extparams.size()].get();
3087 def.name = code_genstring(ir->m_code.get(), global->m_name.c_str());
3088 def.type = TYPE_VECTOR;
3089 def.offset = ir->m_code->globals.size();
3091 ir->m_code->defs.push_back(def);
3093 ir_value_code_setaddr(global, def.offset);
3095 ir->m_code->globals.push_back(0);
3096 ir->m_code->globals.push_back(0);
3097 ir->m_code->globals.push_back(0);
3099 ir->m_extparams.emplace_back(global);
3102 static bool gen_function_extparam_copy(code_t *code, ir_function *self)
3104 ir_builder *ir = self->m_owner;
3106 size_t numparams = vec_size(self->m_params);
3110 prog_section_statement_t stmt;
3111 stmt.opcode = INSTR_STORE_F;
3113 for (size_t i = 8; i < numparams; ++i) {
3115 if (ext >= ir->m_extparams.size())
3116 ir_gen_extparam(ir);
3118 ir_value *ep = ir->m_extparams[ext];
3120 stmt.opcode = type_store_instr[self->m_locals[i]->m_vtype];
3121 if (self->m_locals[i]->m_vtype == TYPE_FIELD &&
3122 self->m_locals[i]->m_fieldtype == TYPE_VECTOR)
3124 stmt.opcode = INSTR_STORE_V;
3126 stmt.o1.u1 = ir_value_code_addr(ep);
3127 stmt.o2.u1 = ir_value_code_addr(self->m_locals[i].get());
3128 code_push_statement(code, &stmt, self->m_context);
3134 static bool gen_function_varargs_copy(code_t *code, ir_function *self)
3136 size_t i, ext, numparams, maxparams;
3138 ir_builder *ir = self->m_owner;
3140 prog_section_statement_t stmt;
3142 numparams = vec_size(self->m_params);
3146 stmt.opcode = INSTR_STORE_V;
3148 maxparams = numparams + self->m_max_varargs;
3149 for (i = numparams; i < maxparams; ++i) {
3151 stmt.o1.u1 = OFS_PARM0 + 3*i;
3152 stmt.o2.u1 = ir_value_code_addr(self->m_locals[i].get());
3153 code_push_statement(code, &stmt, self->m_context);
3157 while (ext >= ir->m_extparams.size())
3158 ir_gen_extparam(ir);
3160 ep = ir->m_extparams[ext];
3162 stmt.o1.u1 = ir_value_code_addr(ep);
3163 stmt.o2.u1 = ir_value_code_addr(self->m_locals[i].get());
3164 code_push_statement(code, &stmt, self->m_context);
3170 static bool gen_function_locals(ir_builder *ir, ir_value *global)
3172 prog_section_function_t *def;
3174 uint32_t firstlocal, firstglobal;
3176 irfun = global->m_constval.vfunc;
3177 def = &ir->m_code->functions[0] + irfun->m_code_function_def;
3179 if (OPTS_OPTION_BOOL(OPTION_G) ||
3180 !OPTS_OPTIMIZATION(OPTIM_OVERLAP_LOCALS) ||
3181 (irfun->m_flags & IR_FLAG_MASK_NO_OVERLAP))
3183 firstlocal = def->firstlocal = ir->m_code->globals.size();
3185 firstlocal = def->firstlocal = ir->m_first_common_local;
3186 ++opts_optimizationcount[OPTIM_OVERLAP_LOCALS];
3189 firstglobal = (OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS) ? ir->m_first_common_globaltemp : firstlocal);
3191 for (size_t i = ir->m_code->globals.size(); i < firstlocal + irfun->m_allocated_locals; ++i)
3192 ir->m_code->globals.push_back(0);
3194 for (auto& lp : irfun->m_locals) {
3195 ir_value *v = lp.get();
3196 if (v->m_locked || !OPTS_OPTIMIZATION(OPTIM_GLOBAL_TEMPS)) {
3197 ir_value_code_setaddr(v, firstlocal + v->m_code.local);
3198 if (!ir_builder_gen_global(ir, v, true)) {
3199 irerror(v->m_context, "failed to generate local %s", v->m_name.c_str());
3204 ir_value_code_setaddr(v, firstglobal + v->m_code.local);
3206 for (auto& vp : irfun->m_values) {
3207 ir_value *v = vp.get();
3211 ir_value_code_setaddr(v, firstlocal + v->m_code.local);
3213 ir_value_code_setaddr(v, firstglobal + v->m_code.local);
3218 static bool gen_global_function_code(ir_builder *ir, ir_value *global)
3220 prog_section_function_t *fundef;
3225 irfun = global->m_constval.vfunc;
3227 if (global->m_cvq == CV_NONE) {
3228 if (irwarning(global->m_context, WARN_IMPLICIT_FUNCTION_POINTER,
3229 "function `%s` has no body and in QC implicitly becomes a function-pointer",
3230 global->m_name.c_str()))
3232 /* Not bailing out just now. If this happens a lot you don't want to have
3233 * to rerun gmqcc for each such function.
3239 /* this was a function pointer, don't generate code for those */
3243 if (irfun->m_builtin)
3247 * If there is no definition and the thing is eraseable, we can ignore
3248 * outputting the function to begin with.
3250 if (global->m_flags & IR_FLAG_ERASABLE && irfun->m_code_function_def < 0) {
3254 if (irfun->m_code_function_def < 0) {
3255 irerror(irfun->m_context, "`%s`: IR global wasn't generated, failed to access function-def", irfun->m_name.c_str());
3258 fundef = &ir->m_code->functions[irfun->m_code_function_def];
3260 fundef->entry = ir->m_code->statements.size();
3261 if (!gen_function_locals(ir, global)) {
3262 irerror(irfun->m_context, "Failed to generate locals for function %s", irfun->m_name.c_str());
3265 if (!gen_function_extparam_copy(ir->m_code.get(), irfun)) {
3266 irerror(irfun->m_context, "Failed to generate extparam-copy code for function %s", irfun->m_name.c_str());
3269 if (irfun->m_max_varargs && !gen_function_varargs_copy(ir->m_code.get(), irfun)) {
3270 irerror(irfun->m_context, "Failed to generate vararg-copy code for function %s", irfun->m_name.c_str());
3273 if (!gen_function_code(ir->m_code.get(), irfun)) {
3274 irerror(irfun->m_context, "Failed to generate code for function %s", irfun->m_name.c_str());
3280 static void gen_vector_defs(code_t *code, prog_section_def_t def, const char *name)
3285 if (!name || name[0] == '#' || OPTS_FLAG(SINGLE_VECTOR_DEFS))
3288 def.type = TYPE_FLOAT;
3292 component = (char*)mem_a(len+3);
3293 memcpy(component, name, len);
3295 component[len-0] = 0;
3296 component[len-2] = '_';
3298 component[len-1] = 'x';
3300 for (i = 0; i < 3; ++i) {
3301 def.name = code_genstring(code, component);
3302 code->defs.push_back(def);
3310 static void gen_vector_fields(code_t *code, prog_section_field_t fld, const char *name)
3315 if (!name || OPTS_FLAG(SINGLE_VECTOR_DEFS))
3318 fld.type = TYPE_FLOAT;
3322 component = (char*)mem_a(len+3);
3323 memcpy(component, name, len);
3325 component[len-0] = 0;
3326 component[len-2] = '_';
3328 component[len-1] = 'x';
3330 for (i = 0; i < 3; ++i) {
3331 fld.name = code_genstring(code, component);
3332 code->fields.push_back(fld);
3340 static bool ir_builder_gen_global(ir_builder *self, ir_value *global, bool islocal)
3344 prog_section_def_t def;
3345 bool pushdef = opts.optimizeoff;
3347 /* we don't generate split-vectors */
3348 if (global->m_vtype == TYPE_VECTOR && (global->m_flags & IR_FLAG_SPLIT_VECTOR))
3351 def.type = global->m_vtype;
3352 def.offset = self->m_code->globals.size();
3354 if (OPTS_OPTION_BOOL(OPTION_G) || !islocal)
3359 * if we're eraseable and the function isn't referenced ignore outputting
3362 if (global->m_flags & IR_FLAG_ERASABLE && global->m_reads.empty()) {
3366 if (OPTS_OPTIMIZATION(OPTIM_STRIP_CONSTANT_NAMES) &&
3367 !(global->m_flags & IR_FLAG_INCLUDE_DEF) &&
3368 (global->m_name[0] == '#' || global->m_cvq == CV_CONST))
3374 if (global->m_name[0] == '#') {
3375 if (!self->m_str_immediate)
3376 self->m_str_immediate = code_genstring(self->m_code.get(), "IMMEDIATE");
3377 def.name = global->m_code.name = self->m_str_immediate;
3380 def.name = global->m_code.name = code_genstring(self->m_code.get(), global->m_name.c_str());
3385 def.offset = ir_value_code_addr(global);
3386 self->m_code->defs.push_back(def);
3387 if (global->m_vtype == TYPE_VECTOR)
3388 gen_vector_defs(self->m_code.get(), def, global->m_name.c_str());
3389 else if (global->m_vtype == TYPE_FIELD && global->m_fieldtype == TYPE_VECTOR)
3390 gen_vector_defs(self->m_code.get(), def, global->m_name.c_str());
3397 switch (global->m_vtype)
3400 if (0 == global->m_name.compare("end_sys_globals")) {
3401 // TODO: remember this point... all the defs before this one
3402 // should be checksummed and added to progdefs.h when we generate it.
3404 else if (0 == global->m_name.compare("end_sys_fields")) {
3405 // TODO: same as above but for entity-fields rather than globsl
3407 else if(irwarning(global->m_context, WARN_VOID_VARIABLES, "unrecognized variable of type void `%s`",
3408 global->m_name.c_str()))
3410 /* Not bailing out */
3413 /* I'd argue setting it to 0 is sufficient, but maybe some depend on knowing how far
3414 * the system fields actually go? Though the engine knows this anyway...
3415 * Maybe this could be an -foption
3416 * fteqcc creates data for end_sys_* - of size 1, so let's do the same
3418 ir_value_code_setaddr(global, self->m_code->globals.size());
3419 self->m_code->globals.push_back(0);
3421 if (pushdef) self->m_code->defs.push_back(def);
3424 if (pushdef) self->m_code->defs.push_back(def);
3425 return gen_global_pointer(self->m_code.get(), global);
3428 self->m_code->defs.push_back(def);
3429 if (global->m_fieldtype == TYPE_VECTOR)
3430 gen_vector_defs(self->m_code.get(), def, global->m_name.c_str());
3432 return gen_global_field(self->m_code.get(), global);
3437 ir_value_code_setaddr(global, self->m_code->globals.size());
3438 if (global->m_hasvalue) {
3439 iptr = (int32_t*)&global->m_constval.ivec[0];
3440 self->m_code->globals.push_back(*iptr);
3442 self->m_code->globals.push_back(0);
3444 if (!islocal && global->m_cvq != CV_CONST)
3445 def.type |= DEF_SAVEGLOBAL;
3446 if (pushdef) self->m_code->defs.push_back(def);
3448 return global->m_code.globaladdr >= 0;
3452 ir_value_code_setaddr(global, self->m_code->globals.size());
3453 if (global->m_hasvalue) {
3454 uint32_t load = code_genstring(self->m_code.get(), global->m_constval.vstring);
3455 self->m_code->globals.push_back(load);
3457 self->m_code->globals.push_back(0);
3459 if (!islocal && global->m_cvq != CV_CONST)
3460 def.type |= DEF_SAVEGLOBAL;
3461 if (pushdef) self->m_code->defs.push_back(def);
3462 return global->m_code.globaladdr >= 0;
3467 ir_value_code_setaddr(global, self->m_code->globals.size());
3468 if (global->m_hasvalue) {
3469 iptr = (int32_t*)&global->m_constval.ivec[0];
3470 self->m_code->globals.push_back(iptr[0]);
3471 if (global->m_code.globaladdr < 0)
3473 for (d = 1; d < type_sizeof_[global->m_vtype]; ++d) {
3474 self->m_code->globals.push_back(iptr[d]);
3477 self->m_code->globals.push_back(0);
3478 if (global->m_code.globaladdr < 0)
3480 for (d = 1; d < type_sizeof_[global->m_vtype]; ++d) {
3481 self->m_code->globals.push_back(0);
3484 if (!islocal && global->m_cvq != CV_CONST)
3485 def.type |= DEF_SAVEGLOBAL;
3488 self->m_code->defs.push_back(def);
3489 def.type &= ~DEF_SAVEGLOBAL;
3490 gen_vector_defs(self->m_code.get(), def, global->m_name.c_str());
3492 return global->m_code.globaladdr >= 0;
3495 ir_value_code_setaddr(global, self->m_code->globals.size());
3496 if (!global->m_hasvalue) {
3497 self->m_code->globals.push_back(0);
3498 if (global->m_code.globaladdr < 0)
3501 self->m_code->globals.push_back(self->m_code->functions.size());
3502 if (!gen_global_function(self, global))
3505 if (!islocal && global->m_cvq != CV_CONST)
3506 def.type |= DEF_SAVEGLOBAL;
3507 if (pushdef) self->m_code->defs.push_back(def);
3510 /* assume biggest type */
3511 ir_value_code_setaddr(global, self->m_code->globals.size());
3512 self->m_code->globals.push_back(0);
3513 for (i = 1; i < type_sizeof_[TYPE_VARIANT]; ++i)
3514 self->m_code->globals.push_back(0);
3517 /* refuse to create 'void' type or any other fancy business. */
3518 irerror(global->m_context, "Invalid type for global variable `%s`: %s",
3519 global->m_name.c_str(), type_name[global->m_vtype]);
3524 static GMQCC_INLINE void ir_builder_prepare_field(code_t *code, ir_value *field)
3526 field->m_code.fieldaddr = code_alloc_field(code, type_sizeof_[field->m_fieldtype]);
3529 static bool ir_builder_gen_field(ir_builder *self, ir_value *field)
3531 prog_section_def_t def;
3532 prog_section_field_t fld;
3536 def.type = (uint16_t)field->m_vtype;
3537 def.offset = (uint16_t)self->m_code->globals.size();
3539 /* create a global named the same as the field */
3540 if (OPTS_OPTION_U32(OPTION_STANDARD) == COMPILER_GMQCC) {
3541 /* in our standard, the global gets a dot prefix */
3542 size_t len = field->m_name.length();
3545 /* we really don't want to have to allocate this, and 1024
3546 * bytes is more than enough for a variable/field name
3548 if (len+2 >= sizeof(name)) {
3549 irerror(field->m_context, "invalid field name size: %u", (unsigned int)len);
3554 memcpy(name+1, field->m_name.c_str(), len); // no strncpy - we used strlen above
3557 def.name = code_genstring(self->m_code.get(), name);
3558 fld.name = def.name + 1; /* we reuse that string table entry */
3560 /* in plain QC, there cannot be a global with the same name,
3561 * and so we also name the global the same.
3562 * FIXME: fteqcc should create a global as well
3563 * check if it actually uses the same name. Probably does
3565 def.name = code_genstring(self->m_code.get(), field->m_name.c_str());
3566 fld.name = def.name;
3569 field->m_code.name = def.name;
3571 self->m_code->defs.push_back(def);
3573 fld.type = field->m_fieldtype;
3575 if (fld.type == TYPE_VOID) {
3576 irerror(field->m_context, "field is missing a type: %s - don't know its size", field->m_name.c_str());
3580 fld.offset = field->m_code.fieldaddr;
3582 self->m_code->fields.push_back(fld);
3584 ir_value_code_setaddr(field, self->m_code->globals.size());
3585 self->m_code->globals.push_back(fld.offset);
3586 if (fld.type == TYPE_VECTOR) {
3587 self->m_code->globals.push_back(fld.offset+1);
3588 self->m_code->globals.push_back(fld.offset+2);
3591 if (field->m_fieldtype == TYPE_VECTOR) {
3592 gen_vector_defs (self->m_code.get(), def, field->m_name.c_str());
3593 gen_vector_fields(self->m_code.get(), fld, field->m_name.c_str());
3596 return field->m_code.globaladdr >= 0;
3599 static void ir_builder_collect_reusables(ir_builder *builder) {
3600 std::vector<ir_value*> reusables;
3602 for (auto& gp : builder->m_globals) {
3603 ir_value *value = gp.get();
3604 if (value->m_vtype != TYPE_FLOAT || !value->m_hasvalue)
3606 if (value->m_cvq == CV_CONST || (value->m_name.length() >= 1 && value->m_name[0] == '#'))
3607 reusables.emplace_back(value);
3609 builder->m_const_floats = move(reusables);
3612 static void ir_builder_split_vector(ir_builder *self, ir_value *vec) {
3613 ir_value* found[3] = { nullptr, nullptr, nullptr };
3615 // must not be written to
3616 if (vec->m_writes.size())
3618 // must not be trying to access individual members
3619 if (vec->m_members[0] || vec->m_members[1] || vec->m_members[2])
3621 // should be actually used otherwise it won't be generated anyway
3622 if (vec->m_reads.empty())
3624 //size_t count = vec->m_reads.size();
3628 // may only be used directly as function parameters, so if we find some other instruction cancel
3629 for (ir_instr *user : vec->m_reads) {
3630 // we only split vectors if they're used directly as parameter to a call only!
3631 if ((user->m_opcode < INSTR_CALL0 || user->m_opcode > INSTR_CALL8) && user->m_opcode != VINSTR_NRCALL)
3635 vec->m_flags |= IR_FLAG_SPLIT_VECTOR;
3637 // find existing floats making up the split
3638 for (ir_value *c : self->m_const_floats) {
3639 if (!found[0] && c->m_constval.vfloat == vec->m_constval.vvec.x)
3641 if (!found[1] && c->m_constval.vfloat == vec->m_constval.vvec.y)
3643 if (!found[2] && c->m_constval.vfloat == vec->m_constval.vvec.z)
3645 if (found[0] && found[1] && found[2])
3649 // generate floats for not yet found components
3651 found[0] = ir_builder_imm_float(self, vec->m_constval.vvec.x, true);
3653 if (vec->m_constval.vvec.y == vec->m_constval.vvec.x)
3654 found[1] = found[0];
3656 found[1] = ir_builder_imm_float(self, vec->m_constval.vvec.y, true);
3659 if (vec->m_constval.vvec.z == vec->m_constval.vvec.x)
3660 found[2] = found[0];
3661 else if (vec->m_constval.vvec.z == vec->m_constval.vvec.y)
3662 found[2] = found[1];
3664 found[2] = ir_builder_imm_float(self, vec->m_constval.vvec.z, true);
3667 // the .members array should be safe to use here
3668 vec->m_members[0] = found[0];
3669 vec->m_members[1] = found[1];
3670 vec->m_members[2] = found[2];
3672 // register the readers for these floats
3673 found[0]->m_reads.insert(found[0]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3674 found[1]->m_reads.insert(found[1]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3675 found[2]->m_reads.insert(found[2]->m_reads.end(), vec->m_reads.begin(), vec->m_reads.end());
3678 static void ir_builder_split_vectors(ir_builder *self) {
3679 // member values may be added to self->m_globals during this operation, but
3680 // no new vectors will be added, we need to iterate via an index as
3681 // c++ iterators would be invalidated
3682 const size_t count = self->m_globals.size();
3683 for (size_t i = 0; i != count; ++i) {
3684 ir_value *v = self->m_globals[i].get();
3685 if (v->m_vtype != TYPE_VECTOR || !v->m_name.length() || v->m_name[0] != '#')
3687 ir_builder_split_vector(self, v);
3691 bool ir_builder_generate(ir_builder *self, const char *filename)
3693 prog_section_statement_t stmt;
3694 char *lnofile = nullptr;
3696 if (OPTS_FLAG(SPLIT_VECTOR_PARAMETERS)) {
3697 ir_builder_collect_reusables(self);
3698 if (!self->m_const_floats.empty())
3699 ir_builder_split_vectors(self);
3702 for (auto& fp : self->m_fields)
3703 ir_builder_prepare_field(self->m_code.get(), fp.get());
3705 for (auto& gp : self->m_globals) {
3706 ir_value *global = gp.get();
3707 if (!ir_builder_gen_global(self, global, false)) {
3710 if (global->m_vtype == TYPE_FUNCTION) {
3711 ir_function *func = global->m_constval.vfunc;
3712 if (func && self->m_max_locals < func->m_allocated_locals &&
3713 !(func->m_flags & IR_FLAG_MASK_NO_OVERLAP))
3715 self->m_max_locals = func->m_allocated_locals;
3717 if (func && self->m_max_globaltemps < func->m_globaltemps)
3718 self->m_max_globaltemps = func->m_globaltemps;
3722 for (auto& fp : self->m_fields) {
3723 if (!ir_builder_gen_field(self, fp.get()))
3728 ir_value_code_setaddr(self->m_nil, self->m_code->globals.size());
3729 self->m_code->globals.push_back(0);
3730 self->m_code->globals.push_back(0);
3731 self->m_code->globals.push_back(0);
3733 // generate virtual-instruction temps
3734 for (size_t i = 0; i < IR_MAX_VINSTR_TEMPS; ++i) {
3735 ir_value_code_setaddr(self->m_vinstr_temp[i], self->m_code->globals.size());
3736 self->m_code->globals.push_back(0);
3737 self->m_code->globals.push_back(0);
3738 self->m_code->globals.push_back(0);
3741 // generate global temps
3742 self->m_first_common_globaltemp = self->m_code->globals.size();
3743 self->m_code->globals.insert(self->m_code->globals.end(), self->m_max_globaltemps, 0);
3745 //for (size_t i = 0; i < self->m_max_globaltemps; ++i) {
3746 // self->m_code->globals.push_back(0);
3748 // generate common locals
3749 self->m_first_common_local = self->m_code->globals.size();
3750 self->m_code->globals.insert(self->m_code->globals.end(), self->m_max_locals, 0);
3752 //for (i = 0; i < self->m_max_locals; ++i) {
3753 // self->m_code->globals.push_back(0);
3756 // generate function code
3758 for (auto& gp : self->m_globals) {
3759 ir_value *global = gp.get();
3760 if (global->m_vtype == TYPE_FUNCTION) {
3761 if (!gen_global_function_code(self, global)) {
3767 if (self->m_code->globals.size() >= 65536) {
3768 irerror(self->m_globals.back()->m_context,
3769 "This progs file would require more globals than the metadata can handle (%zu). Bailing out.",
3770 self->m_code->globals.size());
3774 /* DP errors if the last instruction is not an INSTR_DONE. */
3775 if (self->m_code->statements.back().opcode != INSTR_DONE)
3779 stmt.opcode = INSTR_DONE;
3783 last.line = self->m_code->linenums.back();
3784 last.column = self->m_code->columnnums.back();
3786 code_push_statement(self->m_code.get(), &stmt, last);
3789 if (OPTS_OPTION_BOOL(OPTION_PP_ONLY))
3792 if (self->m_code->statements.size() != self->m_code->linenums.size()) {
3793 con_err("Linecounter wrong: %lu != %lu\n",
3794 self->m_code->statements.size(),
3795 self->m_code->linenums.size());
3796 } else if (OPTS_FLAG(LNO)) {
3798 size_t filelen = strlen(filename);
3800 memcpy(vec_add(lnofile, filelen+1), filename, filelen+1);
3801 dot = strrchr(lnofile, '.');
3805 vec_shrinkto(lnofile, dot - lnofile);
3807 memcpy(vec_add(lnofile, 5), ".lno", 5);
3810 if (!code_write(self->m_code.get(), filename, lnofile)) {
3819 /***********************************************************************
3820 *IR DEBUG Dump functions...
3823 #define IND_BUFSZ 1024
3825 static const char *qc_opname(int op)
3827 if (op < 0) return "<INVALID>";
3828 if (op < VINSTR_END)
3829 return util_instr_str[op];
3831 case VINSTR_END: return "END";
3832 case VINSTR_PHI: return "PHI";
3833 case VINSTR_JUMP: return "JUMP";
3834 case VINSTR_COND: return "COND";
3835 case VINSTR_BITXOR: return "BITXOR";
3836 case VINSTR_BITAND_V: return "BITAND_V";
3837 case VINSTR_BITOR_V: return "BITOR_V";
3838 case VINSTR_BITXOR_V: return "BITXOR_V";
3839 case VINSTR_BITAND_VF: return "BITAND_VF";
3840 case VINSTR_BITOR_VF: return "BITOR_VF";
3841 case VINSTR_BITXOR_VF: return "BITXOR_VF";
3842 case VINSTR_CROSS: return "CROSS";
3843 case VINSTR_NEG_F: return "NEG_F";
3844 case VINSTR_NEG_V: return "NEG_V";
3845 default: return "<UNK>";
3849 void ir_builder_dump(ir_builder *b, int (*oprintf)(const char*, ...))
3852 char indent[IND_BUFSZ];
3856 oprintf("module %s\n", b->m_name.c_str());
3857 for (i = 0; i < b->m_globals.size(); ++i)
3860 if (b->m_globals[i]->m_hasvalue)
3861 oprintf("%s = ", b->m_globals[i]->m_name.c_str());
3862 ir_value_dump(b->m_globals[i].get(), oprintf);
3865 for (i = 0; i < b->m_functions.size(); ++i)
3866 ir_function_dump(b->m_functions[i].get(), indent, oprintf);
3867 oprintf("endmodule %s\n", b->m_name.c_str());
3870 static const char *storenames[] = {
3871 "[global]", "[local]", "[param]", "[value]", "[return]"
3874 void ir_function_dump(ir_function *f, char *ind,
3875 int (*oprintf)(const char*, ...))
3878 if (f->m_builtin != 0) {
3879 oprintf("%sfunction %s = builtin %i\n", ind, f->m_name.c_str(), -f->m_builtin);
3882 oprintf("%sfunction %s\n", ind, f->m_name.c_str());
3883 util_strncat(ind, "\t", IND_BUFSZ-1);
3884 if (f->m_locals.size())
3886 oprintf("%s%i locals:\n", ind, (int)f->m_locals.size());
3887 for (i = 0; i < f->m_locals.size(); ++i) {
3888 oprintf("%s\t", ind);
3889 ir_value_dump(f->m_locals[i].get(), oprintf);
3893 oprintf("%sliferanges:\n", ind);
3894 for (i = 0; i < f->m_locals.size(); ++i) {
3895 const char *attr = "";
3897 ir_value *v = f->m_locals[i].get();
3898 if (v->m_unique_life && v->m_locked)
3899 attr = "unique,locked ";
3900 else if (v->m_unique_life)
3902 else if (v->m_locked)
3904 oprintf("%s\t%s: %s %s %s%s@%i ", ind, v->m_name.c_str(), type_name[v->m_vtype],
3905 storenames[v->m_store],
3906 attr, (v->m_callparam ? "callparam " : ""),
3907 (int)v->m_code.local);
3908 if (v->m_life.empty())
3910 for (l = 0; l < v->m_life.size(); ++l) {
3911 oprintf("[%i,%i] ", v->m_life[l].start, v->m_life[l].end);
3914 for (m = 0; m < 3; ++m) {
3915 ir_value *vm = v->m_members[m];
3918 oprintf("%s\t%s: @%i ", ind, vm->m_name.c_str(), (int)vm->m_code.local);
3919 for (l = 0; l < vm->m_life.size(); ++l) {
3920 oprintf("[%i,%i] ", vm->m_life[l].start, vm->m_life[l].end);
3925 for (i = 0; i < f->m_values.size(); ++i) {
3926 const char *attr = "";
3928 ir_value *v = f->m_values[i].get();
3929 if (v->m_unique_life && v->m_locked)
3930 attr = "unique,locked ";
3931 else if (v->m_unique_life)
3933 else if (v->m_locked)
3935 oprintf("%s\t%s: %s %s %s%s@%i ", ind, v->m_name.c_str(), type_name[v->m_vtype],
3936 storenames[v->m_store],
3937 attr, (v->m_callparam ? "callparam " : ""),
3938 (int)v->m_code.local);
3939 if (v->m_life.empty())
3941 for (l = 0; l < v->m_life.size(); ++l) {
3942 oprintf("[%i,%i] ", v->m_life[l].start, v->m_life[l].end);
3945 for (m = 0; m < 3; ++m) {
3946 ir_value *vm = v->m_members[m];
3949 if (vm->m_unique_life && vm->m_locked)
3950 attr = "unique,locked ";
3951 else if (vm->m_unique_life)
3953 else if (vm->m_locked)
3955 oprintf("%s\t%s: %s@%i ", ind, vm->m_name.c_str(), attr, (int)vm->m_code.local);
3956 for (l = 0; l < vm->m_life.size(); ++l) {
3957 oprintf("[%i,%i] ", vm->m_life[l].start, vm->m_life[l].end);
3962 if (f->m_blocks.size())
3964 oprintf("%slife passes: %i\n", ind, (int)f->m_run_id);
3965 for (i = 0; i < f->m_blocks.size(); ++i) {
3966 ir_block_dump(f->m_blocks[i].get(), ind, oprintf);
3970 ind[strlen(ind)-1] = 0;
3971 oprintf("%sendfunction %s\n", ind, f->m_name.c_str());
3974 void ir_block_dump(ir_block* b, char *ind,
3975 int (*oprintf)(const char*, ...))
3978 oprintf("%s:%s\n", ind, b->m_label.c_str());
3979 util_strncat(ind, "\t", IND_BUFSZ-1);
3981 if (b->m_instr && b->m_instr[0])
3982 oprintf("%s (%i) [entry]\n", ind, (int)(b->m_instr[0]->m_eid-1));
3983 for (i = 0; i < vec_size(b->m_instr); ++i)
3984 ir_instr_dump(b->m_instr[i], ind, oprintf);
3985 ind[strlen(ind)-1] = 0;
3988 static void dump_phi(ir_instr *in, int (*oprintf)(const char*, ...))
3990 oprintf("%s <- phi ", in->_m_ops[0]->m_name.c_str());
3991 for (auto &it : in->m_phi) {
3992 oprintf("([%s] : %s) ", it.from->m_label.c_str(),
3993 it.value->m_name.c_str());
3998 void ir_instr_dump(ir_instr *in, char *ind,
3999 int (*oprintf)(const char*, ...))
4002 const char *comma = nullptr;
4004 oprintf("%s (%i) ", ind, (int)in->m_eid);
4006 if (in->m_opcode == VINSTR_PHI) {
4007 dump_phi(in, oprintf);
4011 util_strncat(ind, "\t", IND_BUFSZ-1);
4013 if (in->_m_ops[0] && (in->_m_ops[1] || in->_m_ops[2])) {
4014 ir_value_dump(in->_m_ops[0], oprintf);
4015 if (in->_m_ops[1] || in->_m_ops[2])
4018 if (in->m_opcode == INSTR_CALL0 || in->m_opcode == VINSTR_NRCALL) {
4019 oprintf("CALL%i\t", in->m_params.size());
4021 oprintf("%s\t", qc_opname(in->m_opcode));
4023 if (in->_m_ops[0] && !(in->_m_ops[1] || in->_m_ops[2])) {
4024 ir_value_dump(in->_m_ops[0], oprintf);
4029 for (i = 1; i != 3; ++i) {
4030 if (in->_m_ops[i]) {
4033 ir_value_dump(in->_m_ops[i], oprintf);
4038 if (in->m_bops[0]) {
4041 oprintf("[%s]", in->m_bops[0]->m_label.c_str());
4045 oprintf("%s[%s]", comma, in->m_bops[1]->m_label.c_str());
4046 if (in->m_params.size()) {
4047 oprintf("\tparams: ");
4048 for (auto &it : in->m_params)
4049 oprintf("%s, ", it->m_name.c_str());
4052 ind[strlen(ind)-1] = 0;
4055 static void ir_value_dump_string(const char *str, int (*oprintf)(const char*, ...))
4058 for (; *str; ++str) {
4060 case '\n': oprintf("\\n"); break;
4061 case '\r': oprintf("\\r"); break;
4062 case '\t': oprintf("\\t"); break;
4063 case '\v': oprintf("\\v"); break;
4064 case '\f': oprintf("\\f"); break;
4065 case '\b': oprintf("\\b"); break;
4066 case '\a': oprintf("\\a"); break;
4067 case '\\': oprintf("\\\\"); break;
4068 case '"': oprintf("\\\""); break;
4069 default: oprintf("%c", *str); break;
4075 void ir_value_dump(ir_value* v, int (*oprintf)(const char*, ...))
4077 if (v->m_hasvalue) {
4078 switch (v->m_vtype) {
4084 oprintf("fn:%s", v->m_name.c_str());
4087 oprintf("%g", v->m_constval.vfloat);
4090 oprintf("'%g %g %g'",
4091 v->m_constval.vvec.x,
4092 v->m_constval.vvec.y,
4093 v->m_constval.vvec.z);
4096 oprintf("(entity)");
4099 ir_value_dump_string(v->m_constval.vstring, oprintf);
4103 oprintf("%i", v->m_constval.vint);
4108 v->m_constval.vpointer->m_name.c_str());
4112 oprintf("%s", v->m_name.c_str());
4116 void ir_value_dump_life(const ir_value *self, int (*oprintf)(const char*,...))
4118 oprintf("Life of %12s:", self->m_name.c_str());
4119 for (size_t i = 0; i < self->m_life.size(); ++i)
4121 oprintf(" + [%i, %i]\n", self->m_life[i].start, self->m_life[i].end);