2 * Copyright (C) 2012, 2013
5 * Permission is hereby granted, free of charge, to any person obtaining a copy of
6 * this software and associated documentation files (the "Software"), to deal in
7 * the Software without restriction, including without limitation the rights to
8 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is furnished to do
10 * so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in all
13 * copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 #define FOLD_STRING_UNTRANSLATE_HTSIZE 1024
30 #define FOLD_STRING_DOTRANSLATE_HTSIZE 1024
33 * There is two stages to constant folding in GMQCC: there is the parse
34 * stage constant folding, where, witht he help of the AST, operator
35 * usages can be constant folded. Then there is the constant folding
36 * in the IR for things like eliding if statements, can occur.
38 * This file is thus, split into two parts.
41 #define isfloat(X) (((ast_expression*)(X))->vtype == TYPE_FLOAT)
42 #define isvector(X) (((ast_expression*)(X))->vtype == TYPE_VECTOR)
43 #define isstring(X) (((ast_expression*)(X))->vtype == TYPE_STRING)
44 #define isfloats(X,Y) (isfloat (X) && isfloat (Y))
47 * Implementation of basic vector math for vec3_t, for trivial constant
50 * TODO: gcc/clang hinting for autovectorization
52 static GMQCC_INLINE vec3_t vec3_add(vec3_t a, vec3_t b) {
60 static GMQCC_INLINE vec3_t vec3_sub(vec3_t a, vec3_t b) {
68 static GMQCC_INLINE vec3_t vec3_neg(vec3_t a) {
76 static GMQCC_INLINE vec3_t vec3_or(vec3_t a, vec3_t b) {
78 out.x = (qcfloat_t)(((qcint_t)a.x) | ((qcint_t)b.x));
79 out.y = (qcfloat_t)(((qcint_t)a.y) | ((qcint_t)b.y));
80 out.z = (qcfloat_t)(((qcint_t)a.z) | ((qcint_t)b.z));
84 static GMQCC_INLINE vec3_t vec3_orvf(vec3_t a, qcfloat_t b) {
86 out.x = (qcfloat_t)(((qcint_t)a.x) | ((qcint_t)b));
87 out.y = (qcfloat_t)(((qcint_t)a.y) | ((qcint_t)b));
88 out.z = (qcfloat_t)(((qcint_t)a.z) | ((qcint_t)b));
92 static GMQCC_INLINE vec3_t vec3_and(vec3_t a, vec3_t b) {
94 out.x = (qcfloat_t)(((qcint_t)a.x) & ((qcint_t)b.x));
95 out.y = (qcfloat_t)(((qcint_t)a.y) & ((qcint_t)b.y));
96 out.z = (qcfloat_t)(((qcint_t)a.z) & ((qcint_t)b.z));
100 static GMQCC_INLINE vec3_t vec3_andvf(vec3_t a, qcfloat_t b) {
102 out.x = (qcfloat_t)(((qcint_t)a.x) & ((qcint_t)b));
103 out.y = (qcfloat_t)(((qcint_t)a.y) & ((qcint_t)b));
104 out.z = (qcfloat_t)(((qcint_t)a.z) & ((qcint_t)b));
108 static GMQCC_INLINE vec3_t vec3_xor(vec3_t a, vec3_t b) {
110 out.x = (qcfloat_t)(((qcint_t)a.x) ^ ((qcint_t)b.x));
111 out.y = (qcfloat_t)(((qcint_t)a.y) ^ ((qcint_t)b.y));
112 out.z = (qcfloat_t)(((qcint_t)a.z) ^ ((qcint_t)b.z));
116 static GMQCC_INLINE vec3_t vec3_xorvf(vec3_t a, qcfloat_t b) {
118 out.x = (qcfloat_t)(((qcint_t)a.x) ^ ((qcint_t)b));
119 out.y = (qcfloat_t)(((qcint_t)a.y) ^ ((qcint_t)b));
120 out.z = (qcfloat_t)(((qcint_t)a.z) ^ ((qcint_t)b));
124 static GMQCC_INLINE vec3_t vec3_not(vec3_t a) {
126 out.x = (qcfloat_t)(~((qcint_t)a.x));
127 out.y = (qcfloat_t)(~((qcint_t)a.y));
128 out.z = (qcfloat_t)(~((qcint_t)a.z));
132 static GMQCC_INLINE qcfloat_t vec3_mulvv(vec3_t a, vec3_t b) {
133 return (a.x * b.x + a.y * b.y + a.z * b.z);
136 static GMQCC_INLINE vec3_t vec3_mulvf(vec3_t a, qcfloat_t b) {
144 static GMQCC_INLINE bool vec3_cmp(vec3_t a, vec3_t b) {
150 static GMQCC_INLINE vec3_t vec3_create(float x, float y, float z) {
158 static GMQCC_INLINE qcfloat_t vec3_notf(vec3_t a) {
159 return (!a.x && !a.y && !a.z);
162 static GMQCC_INLINE bool vec3_pbool(vec3_t a) {
163 return (a.x && a.y && a.z);
166 static lex_ctx_t fold_ctx(fold_t *fold) {
168 if (fold->parser->lex)
169 return parser_ctx(fold->parser);
171 memset(&ctx, 0, sizeof(ctx));
175 static GMQCC_INLINE bool fold_immediate_true(fold_t *fold, ast_value *v) {
176 switch (v->expression.vtype) {
178 return !!v->constval.vfloat;
180 return !!v->constval.vint;
182 if (OPTS_FLAG(CORRECT_LOGIC))
183 return vec3_pbool(v->constval.vvec);
184 return !!(v->constval.vvec.x);
186 if (!v->constval.vstring)
188 if (OPTS_FLAG(TRUE_EMPTY_STRINGS))
190 return !!v->constval.vstring[0];
192 compile_error(fold_ctx(fold), "internal error: fold_immediate_true on invalid type");
195 return !!v->constval.vfunc;
198 /* Handy macros to determine if an ast_value can be constant folded. */
199 #define fold_can_1(X) \
200 (ast_istype(((ast_expression*)(X)), ast_value) && (X)->hasvalue && ((X)->cvq == CV_CONST) && \
201 ((ast_expression*)(X))->vtype != TYPE_FUNCTION)
203 #define fold_can_2(X, Y) (fold_can_1(X) && fold_can_1(Y))
205 #define fold_immvalue_float(E) ((E)->constval.vfloat)
206 #define fold_immvalue_vector(E) ((E)->constval.vvec)
207 #define fold_immvalue_string(E) ((E)->constval.vstring)
209 fold_t *fold_init(parser_t *parser) {
210 fold_t *fold = (fold_t*)mem_a(sizeof(fold_t));
211 fold->parser = parser;
212 fold->imm_float = NULL;
213 fold->imm_vector = NULL;
214 fold->imm_string = NULL;
215 fold->imm_string_untranslate = util_htnew(FOLD_STRING_UNTRANSLATE_HTSIZE);
216 fold->imm_string_dotranslate = util_htnew(FOLD_STRING_DOTRANSLATE_HTSIZE);
219 * prime the tables with common constant values at constant
222 (void)fold_constgen_float (fold, 0.0f);
223 (void)fold_constgen_float (fold, 1.0f);
224 (void)fold_constgen_float (fold, -1.0f);
226 (void)fold_constgen_vector(fold, vec3_create(0.0f, 0.0f, 0.0f));
227 (void)fold_constgen_vector(fold, vec3_create(-1.0f, -1.0f, -1.0f));
232 bool fold_generate(fold_t *fold, ir_builder *ir) {
233 /* generate globals for immediate folded values */
237 for (i = 0; i < vec_size(fold->imm_float); ++i)
238 if (!ast_global_codegen ((cur = fold->imm_float[i]), ir, false)) goto err;
239 for (i = 0; i < vec_size(fold->imm_vector); ++i)
240 if (!ast_global_codegen((cur = fold->imm_vector[i]), ir, false)) goto err;
241 for (i = 0; i < vec_size(fold->imm_string); ++i)
242 if (!ast_global_codegen((cur = fold->imm_string[i]), ir, false)) goto err;
247 con_out("failed to generate global %s\n", cur->name);
248 ir_builder_delete(ir);
252 void fold_cleanup(fold_t *fold) {
255 for (i = 0; i < vec_size(fold->imm_float); ++i) ast_delete(fold->imm_float[i]);
256 for (i = 0; i < vec_size(fold->imm_vector); ++i) ast_delete(fold->imm_vector[i]);
257 for (i = 0; i < vec_size(fold->imm_string); ++i) ast_delete(fold->imm_string[i]);
259 vec_free(fold->imm_float);
260 vec_free(fold->imm_vector);
261 vec_free(fold->imm_string);
263 util_htdel(fold->imm_string_untranslate);
264 util_htdel(fold->imm_string_dotranslate);
269 ast_expression *fold_constgen_float(fold_t *fold, qcfloat_t value) {
270 ast_value *out = NULL;
273 for (i = 0; i < vec_size(fold->imm_float); i++) {
274 if (fold->imm_float[i]->constval.vfloat == value)
275 return (ast_expression*)fold->imm_float[i];
278 out = ast_value_new(fold_ctx(fold), "#IMMEDIATE", TYPE_FLOAT);
280 out->hasvalue = true;
281 out->constval.vfloat = value;
283 vec_push(fold->imm_float, out);
285 return (ast_expression*)out;
288 ast_expression *fold_constgen_vector(fold_t *fold, vec3_t value) {
292 for (i = 0; i < vec_size(fold->imm_vector); i++) {
293 if (vec3_cmp(fold->imm_vector[i]->constval.vvec, value))
294 return (ast_expression*)fold->imm_vector[i];
297 out = ast_value_new(fold_ctx(fold), "#IMMEDIATE", TYPE_VECTOR);
299 out->hasvalue = true;
300 out->constval.vvec = value;
302 vec_push(fold->imm_vector, out);
304 return (ast_expression*)out;
307 ast_expression *fold_constgen_string(fold_t *fold, const char *str, bool translate) {
308 hash_table_t *table = (translate) ? fold->imm_string_untranslate : fold->imm_string_dotranslate;
309 ast_value *out = NULL;
310 size_t hash = util_hthash(table, str);
312 if ((out = (ast_value*)util_htgeth(table, str, hash)))
313 return (ast_expression*)out;
317 util_snprintf(name, sizeof(name), "dotranslate_%lu", (unsigned long)(fold->parser->translated++));
318 out = ast_value_new(parser_ctx(fold->parser), name, TYPE_STRING);
319 out->expression.flags |= AST_FLAG_INCLUDE_DEF; /* def needs to be included for translatables */
321 out = ast_value_new(fold_ctx(fold), "#IMMEDIATE", TYPE_STRING);
324 out->hasvalue = true;
326 out->constval.vstring = parser_strdup(str);
328 vec_push(fold->imm_string, out);
329 util_htseth(table, str, hash, out);
331 return (ast_expression*)out;
335 static GMQCC_INLINE ast_expression *fold_op_mul_vec(fold_t *fold, vec3_t vec, ast_value *sel, const char *set) {
337 * vector-component constant folding works by matching the component sets
338 * to eliminate expensive operations on whole-vectors (3 components at runtime).
339 * to achive this effect in a clean manner this function generalizes the
340 * values through the use of a set paramater, which is used as an indexing method
341 * for creating the elided ast binary expression.
343 * Consider 'n 0 0' where y, and z need to be tested for 0, and x is
344 * used as the value in a binary operation generating an INSTR_MUL instruction,
345 * to acomplish the indexing of the correct component value we use set[0], set[1], set[2]
346 * as x, y, z, where the values of those operations return 'x', 'y', 'z'. Because
347 * of how ASCII works we can easily deliniate:
348 * vec.z is the same as set[2]-'x' for when set[2] is 'z', 'z'-'x' results in a
349 * literal value of 2, using this 2, we know that taking the address of vec->x (float)
350 * and indxing it with this literal will yeild the immediate address of that component
352 * Of course more work needs to be done to generate the correct index for the ast_member_new
353 * call, which is no problem: set[0]-'x' suffices that job.
355 qcfloat_t x = (&vec.x)[set[0]-'x'];
356 qcfloat_t y = (&vec.x)[set[1]-'x'];
357 qcfloat_t z = (&vec.x)[set[2]-'x'];
361 ++opts_optimizationcount[OPTIM_VECTOR_COMPONENTS];
362 out = (ast_expression*)ast_member_new(fold_ctx(fold), (ast_expression*)sel, set[0]-'x', NULL);
363 out->node.keep = false;
364 ((ast_member*)out)->rvalue = true;
366 return (ast_expression*)ast_binary_new(fold_ctx(fold), INSTR_MUL_F, fold_constgen_float(fold, x), out);
372 static GMQCC_INLINE ast_expression *fold_op_neg(fold_t *fold, ast_value *a) {
375 return fold_constgen_float(fold, -fold_immvalue_float(a));
376 } else if (isvector(a)) {
378 return fold_constgen_vector(fold, vec3_neg(fold_immvalue_vector(a)));
383 static GMQCC_INLINE ast_expression *fold_op_not(fold_t *fold, ast_value *a) {
386 return fold_constgen_float(fold, !fold_immvalue_float(a));
387 } else if (isvector(a)) {
389 return fold_constgen_float(fold, vec3_notf(fold_immvalue_vector(a)));
390 } else if (isstring(a)) {
392 if (OPTS_FLAG(TRUE_EMPTY_STRINGS))
393 return fold_constgen_float(fold, !fold_immvalue_string(a));
395 return fold_constgen_float(fold, !fold_immvalue_string(a) || !*fold_immvalue_string(a));
401 static GMQCC_INLINE ast_expression *fold_op_add(fold_t *fold, ast_value *a, ast_value *b) {
403 if (fold_can_2(a, b))
404 return fold_constgen_float(fold, fold_immvalue_float(a) + fold_immvalue_float(b));
405 } else if (isvector(a)) {
406 if (fold_can_2(a, b))
407 return fold_constgen_vector(fold, vec3_add(fold_immvalue_vector(a), fold_immvalue_vector(b)));
412 static GMQCC_INLINE ast_expression *fold_op_sub(fold_t *fold, ast_value *a, ast_value *b) {
414 if (fold_can_2(a, b))
415 return fold_constgen_float(fold, fold_immvalue_float(a) - fold_immvalue_float(b));
416 } else if (isvector(a)) {
417 if (fold_can_2(a, b))
418 return fold_constgen_vector(fold, vec3_sub(fold_immvalue_vector(a), fold_immvalue_vector(b)));
423 static GMQCC_INLINE ast_expression *fold_op_mul(fold_t *fold, ast_value *a, ast_value *b) {
426 if (fold_can_2(a, b))
427 return fold_constgen_vector(fold, vec3_mulvf(fold_immvalue_vector(b), fold_immvalue_float(a)));
429 if (fold_can_2(a, b))
430 return fold_constgen_float(fold, fold_immvalue_float(a) * fold_immvalue_float(b));
432 } else if (isvector(a)) {
434 if (fold_can_2(a, b))
435 return fold_constgen_vector(fold, vec3_mulvf(fold_immvalue_vector(a), fold_immvalue_float(b)));
437 if (fold_can_2(a, b)) {
438 return fold_constgen_float(fold, vec3_mulvv(fold_immvalue_vector(a), fold_immvalue_vector(b)));
439 } else if (OPTS_OPTIMIZATION(OPTIM_VECTOR_COMPONENTS) && fold_can_1(a)) {
441 if ((out = fold_op_mul_vec(fold, fold_immvalue_vector(a), b, "xyz"))) return out;
442 if ((out = fold_op_mul_vec(fold, fold_immvalue_vector(a), b, "yxz"))) return out;
443 if ((out = fold_op_mul_vec(fold, fold_immvalue_vector(a), b, "zxy"))) return out;
444 } else if (OPTS_OPTIMIZATION(OPTIM_VECTOR_COMPONENTS) && fold_can_1(b)) {
446 if ((out = fold_op_mul_vec(fold, fold_immvalue_vector(b), a, "xyz"))) return out;
447 if ((out = fold_op_mul_vec(fold, fold_immvalue_vector(b), a, "yxz"))) return out;
448 if ((out = fold_op_mul_vec(fold, fold_immvalue_vector(b), a, "zxy"))) return out;
455 static GMQCC_INLINE ast_expression *fold_op_div(fold_t *fold, ast_value *a, ast_value *b) {
457 if (fold_can_2(a, b))
458 return fold_constgen_float(fold, fold_immvalue_float(a) / fold_immvalue_float(b));
459 } else if (isvector(a)) {
460 if (fold_can_2(a, b))
461 return fold_constgen_vector(fold, vec3_mulvf(fold_immvalue_vector(a), 1.0f / fold_immvalue_float(b)));
463 return (ast_expression*)ast_binary_new(
468 ? (ast_expression*)fold_constgen_float(fold, 1.0f / fold_immvalue_float(b))
469 : (ast_expression*)ast_binary_new(
472 (ast_expression*)fold->imm_float[1],
481 static GMQCC_INLINE ast_expression *fold_op_mod(fold_t *fold, ast_value *a, ast_value *b) {
482 if (fold_can_2(a, b))
483 return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) % ((qcint_t)fold_immvalue_float(b))));
487 static GMQCC_INLINE ast_expression *fold_op_bor(fold_t *fold, ast_value *a, ast_value *b) {
489 if (fold_can_2(a, b))
490 return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) | ((qcint_t)fold_immvalue_float(b))));
493 if (fold_can_2(a, b))
494 return fold_constgen_vector(fold, vec3_or(fold_immvalue_vector(a), fold_immvalue_vector(b)));
496 if (fold_can_2(a, b))
497 return fold_constgen_vector(fold, vec3_orvf(fold_immvalue_vector(a), fold_immvalue_float(b)));
503 static GMQCC_INLINE ast_expression *fold_op_band(fold_t *fold, ast_value *a, ast_value *b) {
505 if (fold_can_2(a, b))
506 return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) & ((qcint_t)fold_immvalue_float(b))));
509 if (fold_can_2(a, b))
510 return fold_constgen_vector(fold, vec3_and(fold_immvalue_vector(a), fold_immvalue_vector(b)));
512 if (fold_can_2(a, b))
513 return fold_constgen_vector(fold, vec3_andvf(fold_immvalue_vector(a), fold_immvalue_float(b)));
519 static GMQCC_INLINE ast_expression *fold_op_xor(fold_t *fold, ast_value *a, ast_value *b) {
521 if (fold_can_2(a, b))
522 return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) ^ ((qcint_t)fold_immvalue_float(b))));
525 if (fold_can_2(a, b))
526 return fold_constgen_vector(fold, vec3_xor(fold_immvalue_vector(a), fold_immvalue_vector(b)));
528 if (fold_can_2(a, b))
529 return fold_constgen_vector(fold, vec3_xorvf(fold_immvalue_vector(a), fold_immvalue_float(b)));
535 static GMQCC_INLINE ast_expression *fold_op_lshift(fold_t *fold, ast_value *a, ast_value *b) {
536 if (fold_can_2(a, b) && isfloats(a, b))
537 return fold_constgen_float(fold, (qcfloat_t)((qcuint_t)(fold_immvalue_float(a)) << (qcuint_t)(fold_immvalue_float(b))));
541 static GMQCC_INLINE ast_expression *fold_op_rshift(fold_t *fold, ast_value *a, ast_value *b) {
542 if (fold_can_2(a, b) && isfloats(a, b))
543 return fold_constgen_float(fold, (qcfloat_t)((qcuint_t)(fold_immvalue_float(a)) >> (qcuint_t)(fold_immvalue_float(b))));
547 static GMQCC_INLINE ast_expression *fold_op_andor(fold_t *fold, ast_value *a, ast_value *b, float expr) {
548 if (fold_can_2(a, b)) {
549 if (OPTS_FLAG(PERL_LOGIC)) {
550 if (fold_immediate_true(fold, a))
551 return (ast_expression*)b;
553 return fold_constgen_float (
555 ((expr) ? (fold_immediate_true(fold, a) || fold_immediate_true(fold, b))
556 : (fold_immediate_true(fold, a) && fold_immediate_true(fold, b)))
565 static GMQCC_INLINE ast_expression *fold_op_tern(fold_t *fold, ast_value *a, ast_value *b, ast_value *c) {
567 return fold_immediate_true(fold, a)
569 : (ast_expression*)c;
574 static GMQCC_INLINE ast_expression *fold_op_exp(fold_t *fold, ast_value *a, ast_value *b) {
575 if (fold_can_2(a, b))
576 return fold_constgen_float(fold, (qcfloat_t)powf(fold_immvalue_float(a), fold_immvalue_float(b)));
580 static GMQCC_INLINE ast_expression *fold_op_lteqgt(fold_t *fold, ast_value *a, ast_value *b) {
581 if (fold_can_2(a,b)) {
582 if (fold_immvalue_float(a) < fold_immvalue_float(b)) return (ast_expression*)fold->imm_float[2];
583 if (fold_immvalue_float(a) == fold_immvalue_float(b)) return (ast_expression*)fold->imm_float[0];
584 if (fold_immvalue_float(a) > fold_immvalue_float(b)) return (ast_expression*)fold->imm_float[1];
589 static GMQCC_INLINE ast_expression *fold_op_cmp(fold_t *fold, ast_value *a, ast_value *b, bool ne) {
590 if (fold_can_2(a, b)) {
591 return fold_constgen_float(
593 (ne) ? (fold_immvalue_float(a) != fold_immvalue_float(b))
594 : (fold_immvalue_float(a) == fold_immvalue_float(b))
600 static GMQCC_INLINE ast_expression *fold_op_bnot(fold_t *fold, ast_value *a) {
603 return fold_constgen_float(fold, ~((qcint_t)fold_immvalue_float(a)));
607 return fold_constgen_vector(fold, vec3_not(fold_immvalue_vector(a)));
613 ast_expression *fold_op(fold_t *fold, const oper_info *info, ast_expression **opexprs) {
614 ast_value *a = (ast_value*)opexprs[0];
615 ast_value *b = (ast_value*)opexprs[1];
616 ast_value *c = (ast_value*)opexprs[2];
617 ast_expression *e = NULL;
619 /* can a fold operation be applied to this operator usage? */
623 switch(info->operands) {
624 case 3: if(!c) return NULL;
625 case 2: if(!b) return NULL;
628 compile_error(fold_ctx(fold), "internal error: fold_op no operands to fold\n");
634 * we could use a boolean and default case but ironically gcc produces
635 * invalid broken assembly from that operation. clang/tcc get it right,
636 * but interestingly ignore compiling this to a jump-table when I do that,
637 * this happens to be the most efficent method, since you have per-level
638 * granularity on the pointer check happening only for the case you check
639 * it in. Opposed to the default method which would involve a boolean and
640 * pointer check after wards.
642 #define fold_op_case(ARGS, ARGS_OPID, OP, ARGS_FOLD) \
643 case opid##ARGS ARGS_OPID: \
644 if ((e = fold_op_##OP ARGS_FOLD)) { \
645 ++opts_optimizationcount[OPTIM_CONST_FOLD]; \
650 fold_op_case(2, ('-', 'P'), neg, (fold, a));
651 fold_op_case(2, ('!', 'P'), not, (fold, a));
652 fold_op_case(1, ('+'), add, (fold, a, b));
653 fold_op_case(1, ('-'), sub, (fold, a, b));
654 fold_op_case(1, ('*'), mul, (fold, a, b));
655 fold_op_case(1, ('/'), div, (fold, a, b));
656 fold_op_case(1, ('%'), mod, (fold, a, b));
657 fold_op_case(1, ('|'), bor, (fold, a, b));
658 fold_op_case(1, ('&'), band, (fold, a, b));
659 fold_op_case(1, ('^'), xor, (fold, a, b));
660 fold_op_case(2, ('<', '<'), lshift, (fold, a, b));
661 fold_op_case(2, ('>', '>'), rshift, (fold, a, b));
662 fold_op_case(2, ('|', '|'), andor, (fold, a, b, true));
663 fold_op_case(2, ('&', '&'), andor, (fold, a, b, false));
664 fold_op_case(2, ('?', ':'), tern, (fold, a, b, c));
665 fold_op_case(2, ('*', '*'), exp, (fold, a, b));
666 fold_op_case(3, ('<','=','>'), lteqgt, (fold, a, b));
667 fold_op_case(2, ('!', '='), cmp, (fold, a, b, true));
668 fold_op_case(2, ('=', '='), cmp, (fold, a, b, false));
669 fold_op_case(2, ('~', 'P'), bnot, (fold, a));
672 compile_error(fold_ctx(fold), "internal error: attempted to constant-fold for unsupported operator");
677 * These are all the actual constant folding methods that happen in between
678 * the AST/IR stage of the compiler , i.e eliminating branches for const
679 * expressions, which is the only supported thing so far. We undefine the
680 * testing macros here because an ir_value is differant than an ast_value.
685 #undef fold_immvalue_float
686 #undef fold_immvalue_string
687 #undef fold_immvalue_vector
691 #define isfloat(X) ((X)->vtype == TYPE_FLOAT)
692 /*#define isstring(X) ((X)->vtype == TYPE_STRING)*/
693 /*#define isvector(X) ((X)->vtype == TYPE_VECTOR)*/
694 #define fold_immvalue_float(X) ((X)->constval.vfloat)
695 /*#define fold_immvalue_vector(X) ((X)->constval.vvec)*/
696 /*#define fold_immvalue_string(X) ((X)->constval.vstring)*/
697 #define fold_can_1(X) ((X)->hasvalue && (X)->cvq == CV_CONST)
698 /*#define fold_can_2(X,Y) (fold_can_1(X) && fold_can_1(Y))*/
701 int fold_cond(ir_value *condval, ast_function *func, ast_ifthen *branch) {
702 if (isfloat(condval) && fold_can_1(condval) && OPTS_OPTIMIZATION(OPTIM_CONST_FOLD_DCE)) {
703 ast_expression_codegen *cgen;
706 bool istrue = (fold_immvalue_float(condval) == 1.0f && branch->on_true);
707 bool isfalse = (fold_immvalue_float(condval) == 0.0f && branch->on_false);
708 ast_expression *path = (istrue) ? branch->on_true :
709 (isfalse) ? branch->on_false : NULL;
712 if (!(elide = ir_function_create_block(ast_ctx(branch), func->ir_func, ast_function_label(func, ((istrue) ? "ontrue" : "onfalse")))))
714 if (!(*(cgen = path->codegen))((ast_expression*)path, func, false, &dummy))
716 if (!ir_block_create_jump(func->curblock, ast_ctx(branch), elide))
719 * now the branch has been eliminated and the correct block for the constant evaluation
720 * is expanded into the current block for the function.
722 func->curblock = elide;
723 ++opts_optimizationcount[OPTIM_CONST_FOLD_DCE];
726 return -1; /* nothing done */