2 * Copyright (C) 2012, 2013, 2014
5 * Permission is hereby granted, free of charge, to any person obtaining a copy of
6 * this software and associated documentation files (the "Software"), to deal in
7 * the Software without restriction, including without limitation the rights to
8 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is furnished to do
10 * so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in all
13 * copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 #define FOLD_STRING_UNTRANSLATE_HTSIZE 1024
30 #define FOLD_STRING_DOTRANSLATE_HTSIZE 1024
33 * The constant folder is also responsible for validating if the constant
34 * expressions produce valid results. We cannot trust the FPU control
35 * unit for these exceptions because setting FPU control words might not
36 * work. Systems can set and enforce FPU modes of operation. It's also valid
37 * for libc's to simply ignore FPU exceptions. For instance ARM CPUs in
38 * glibc. We implement some trivial and IEE 754 conformant functions which
39 * emulate those operations. This is an entierly optional compiler feature
40 * which shouldn't be enabled for anything other than performing strict
41 * passes on constant expressions since it's quite slow.
43 typedef uint32_t sfloat_t;
54 SFLOAT_UNDERFLOW = 16,
56 } sfloat_exceptionflags_t;
59 SFLOAT_ROUND_NEAREST_EVEN,
63 } sfloat_roundingmode_t;
71 sfloat_roundingmode_t roundingmode;
72 sfloat_exceptionflags_t exceptionflags;
73 sfloat_tdetect_t tiny;
76 /* Count of leading zero bits before the most-significand 1 bit. */
78 /* MSVC has an intrinsic for this */
79 static GMQCC_INLINE uint32_t sfloat_clz(uint32_t x) {
81 _BitScanForward(&r, x);
84 # define SFLOAT_CLZ(X, SUB) \
85 (sfloat_clz((X)) - (SUB))
86 #elif defined(__GNUC__) || defined(__CLANG__)
87 /* Clang and GCC have a builtin for this */
88 # define SFLOAT_CLZ(X, SUB) \
89 (__builtin_clz((X)) - (SUB))
92 static GMQCC_INLINE uint32_t sfloat_popcnt(uint32_t x) {
93 x -= ((x >> 1) & 0x55555555);
94 x = (((x >> 2) & 0x33333333) + (x & 0x33333333));
95 x = (((x >> 4) + x) & 0x0F0F0F0F);
98 return x & 0x0000003F;
100 static GMQCC_INLINE uint32_t sfloat_clz(uint32_t x) {
106 return 32 - sfloat_popcnt(x);
108 # define SFLOAT_CLZ(X, SUB) \
109 (sfloat_clz((X) - (SUB)))
112 /* The value of a NaN */
113 #define SFLOAT_NAN 0xFFC00000
115 #define SFLOAT_ISNAN(A) \
116 (0xFF000000 < (uint32_t)((A) << 1))
117 /* Test if signaling NaN */
118 #define SFLOAT_ISSNAN(A) \
119 (((((A) >> 22) & 0x1FF) == 0x1FE) && ((A) & 0x003FFFFF))
120 /* Raise exception */
121 #define SFLOAT_RAISE(STATE, FLAGS) \
122 ((STATE)->exceptionflags |= (FLAGS))
124 * Shifts `A' right `COUNT' bits. Non-zero bits are stored in LSB. Size
125 * sets the arbitrarly-large limit.
127 #define SFLOAT_SHIFT(SIZE, A, COUNT, Z) \
128 *(Z) = ((COUNT) == 0) \
130 : (((COUNT) < (SIZE)) \
131 ? ((A) >> (COUNT)) | (((A) << ((-(COUNT)) & ((SIZE) - 1))) != 0) \
133 /* Extract fractional component */
134 #define SFLOAT_EXTRACT_FRAC(X) \
135 ((uint32_t)((X) & 0x007FFFFF))
136 /* Extract exponent component */
137 #define SFLOAT_EXTRACT_EXP(X) \
138 ((int16_t)((X) >> 23) & 0xFF)
139 /* Extract sign bit */
140 #define SFLOAT_EXTRACT_SIGN(X) \
142 /* Normalize a subnormal */
143 #define SFLOAT_SUBNORMALIZE(SA, Z, SZ) \
144 (void)(*(SZ) = (SA) << SFLOAT_CLZ((SA), 8), *(SZ) = 1 - SFLOAT_CLZ((SA), 8))
146 * Pack sign, exponent and significand and produce a float.
148 * Integer portions of the significand are added to the exponent. The
149 * exponent input should be one less than the result exponent whenever
150 * the significand is normalized since normalized significand will
151 * always have an integer portion of value one.
153 #define SFLOAT_PACK(SIGN, EXP, SIG) \
154 (sfloat_t)((((uint32_t)(SIGN)) << 31) + (((uint32_t)(EXP)) << 23) + (SIG))
156 /* Calculate NaN. If either operands are signaling then raise invalid */
157 static sfloat_t sfloat_propagate_nan(sfloat_state_t *state, sfloat_t a, sfloat_t b) {
158 bool isnan_a = SFLOAT_ISNAN(a);
159 bool issnan_a = SFLOAT_ISSNAN(a);
160 bool isnan_b = SFLOAT_ISNAN(b);
161 bool issnan_b = SFLOAT_ISSNAN(b);
166 if (issnan_a | issnan_b)
167 SFLOAT_RAISE(state, SFLOAT_INEXACT);
171 return isnan_b ? b : a;
172 } else if (isnan_a) {
173 if (issnan_b | !isnan_b)
176 if ((uint32_t)(a << 1) < (uint32_t)(b << 1)) return b;
177 if ((uint32_t)(b << 1) < (uint32_t)(a << 1)) return a;
178 return (a < b) ? a : b;
184 static sfloat_t SFLOAT_PACK_round(sfloat_state_t *state, bool sign_z, int16_t exp_z, uint32_t sig_z) {
185 sfloat_roundingmode_t mode = state->roundingmode;
186 bool even = !!(mode == SFLOAT_ROUND_NEAREST_EVEN);
187 unsigned char increment = 0x40;
188 unsigned char bits = sig_z & 0x7F;
191 if (mode == SFLOAT_ROUND_TO_ZERO)
196 if (mode == SFLOAT_ROUND_UP)
199 if (mode == SFLOAT_ROUND_DOWN)
205 if (0xFD <= (uint16_t)exp_z) {
206 if ((0xFD < exp_z) || ((exp_z == 0xFD) && ((int32_t)(sig_z + increment) < 0))) {
207 SFLOAT_RAISE(state, SFLOAT_OVERFLOW | SFLOAT_INEXACT);
208 return SFLOAT_PACK(sign_z, 0xFF, 0) - (increment == 0);
211 /* Check for underflow */
212 bool tiny = (state->tiny == SFLOAT_TBEFORE) || (exp_z < -1) || (sig_z + increment < 0x80000000);
213 SFLOAT_SHIFT(32, sig_z, -exp_z, &sig_z);
217 SFLOAT_RAISE(state, SFLOAT_UNDERFLOW);
222 * Significand has point between bits 30 and 29, 7 bits to the left of
223 * the usual place. This shifted significand has to be normalized
224 * or smaller, if it isn't the exponent must be zero, in which case
225 * no rounding occurs since the result will be a subnormal.
228 SFLOAT_RAISE(state, SFLOAT_INEXACT);
229 sig_z = (sig_z + increment) >> 7;
230 sig_z &= ~(((bits ^ 0x40) == 0) & even);
233 return SFLOAT_PACK(sign_z, exp_z, sig_z);
236 /* Normalized round and pack */
237 static sfloat_t SFLOAT_PACK_normal(sfloat_state_t *state, bool sign_z, int16_t exp_z, uint32_t sig_z) {
238 unsigned char c = SFLOAT_CLZ(sig_z, 1);
239 return SFLOAT_PACK_round(state, sign_z, exp_z - c, sig_z << c);
242 static sfloat_t sfloat_add_impl(sfloat_state_t *state, sfloat_t a, sfloat_t b, bool sign_z) {
243 int16_t exp_a = SFLOAT_EXTRACT_EXP(a);
244 int16_t exp_b = SFLOAT_EXTRACT_EXP(b);
246 int16_t exp_d = exp_a - exp_b;
247 uint32_t sig_a = SFLOAT_EXTRACT_FRAC(a) << 6;
248 uint32_t sig_b = SFLOAT_EXTRACT_FRAC(b) << 6;
253 return sig_a ? sfloat_propagate_nan(state, a, b) : a;
258 SFLOAT_SHIFT(32, sig_b, exp_d, &sig_b);
260 } else if (exp_d < 0) {
262 return sig_b ? sfloat_propagate_nan(state, a, b) : SFLOAT_PACK(sign_z, 0xFF, 0);
267 SFLOAT_SHIFT(32, sig_a, -exp_d, &sig_a);
271 return (sig_a | sig_b) ? sfloat_propagate_nan(state, a, b) : a;
273 return SFLOAT_PACK(sign_z, 0, (sig_a + sig_b) >> 6);
274 sig_z = 0x40000000 + sig_a + sig_b;
279 sig_z = (sig_a + sig_b) << 1;
281 if ((int32_t)sig_z < 0) {
282 sig_z = sig_a + sig_b;
286 return SFLOAT_PACK_round(state, sign_z, exp_z, sig_z);
289 static sfloat_t sfloat_sub_impl(sfloat_state_t *state, sfloat_t a, sfloat_t b, bool sign_z) {
290 int16_t exp_a = SFLOAT_EXTRACT_EXP(a);
291 int16_t exp_b = SFLOAT_EXTRACT_EXP(b);
293 int16_t exp_d = exp_a - exp_b;
294 uint32_t sig_a = SFLOAT_EXTRACT_FRAC(a) << 7;
295 uint32_t sig_b = SFLOAT_EXTRACT_FRAC(b) << 7;
298 if (0 < exp_d) goto exp_greater_a;
299 if (exp_d < 0) goto exp_greater_b;
303 return sfloat_propagate_nan(state, a, b);
304 SFLOAT_RAISE(state, SFLOAT_INVALID);
311 if (sig_b < sig_a) goto greater_a;
312 if (sig_a < sig_b) goto greater_b;
314 return SFLOAT_PACK(state->roundingmode == SFLOAT_ROUND_DOWN, 0, 0);
318 return (sig_b) ? sfloat_propagate_nan(state, a, b) : SFLOAT_PACK(sign_z ^ 1, 0xFF, 0);
323 SFLOAT_SHIFT(32, sig_a, -exp_d, &sig_a);
326 sig_z = sig_b - sig_a;
333 return (sig_a) ? sfloat_propagate_nan(state, a, b) : a;
338 SFLOAT_SHIFT(32, sig_b, exp_d, &sig_b);
341 sig_z = sig_a - sig_b;
346 return SFLOAT_PACK_normal(state, sign_z, exp_z, sig_z);
349 static GMQCC_INLINE sfloat_t sfloat_add(sfloat_state_t *state, sfloat_t a, sfloat_t b) {
350 bool sign_a = SFLOAT_EXTRACT_SIGN(a);
351 bool sign_b = SFLOAT_EXTRACT_SIGN(b);
352 return (sign_a == sign_b) ? sfloat_add_impl(state, a, b, sign_a)
353 : sfloat_sub_impl(state, a, b, sign_a);
356 static GMQCC_INLINE sfloat_t sfloat_sub(sfloat_state_t *state, sfloat_t a, sfloat_t b) {
357 bool sign_a = SFLOAT_EXTRACT_SIGN(a);
358 bool sign_b = SFLOAT_EXTRACT_SIGN(b);
359 return (sign_a == sign_b) ? sfloat_sub_impl(state, a, b, sign_a)
360 : sfloat_add_impl(state, a, b, sign_a);
363 static sfloat_t sfloat_mul(sfloat_state_t *state, sfloat_t a, sfloat_t b) {
364 int16_t exp_a = SFLOAT_EXTRACT_EXP(a);
365 int16_t exp_b = SFLOAT_EXTRACT_EXP(b);
367 uint32_t sig_a = SFLOAT_EXTRACT_FRAC(a);
368 uint32_t sig_b = SFLOAT_EXTRACT_FRAC(b);
370 uint64_t sig_z64 = 0;
371 bool sign_a = SFLOAT_EXTRACT_SIGN(a);
372 bool sign_b = SFLOAT_EXTRACT_SIGN(b);
373 bool sign_z = sign_a ^ sign_b;
376 if (sig_a || ((exp_b == 0xFF) && sig_b))
377 return sfloat_propagate_nan(state, a, b);
378 if ((exp_b | sig_b) == 0) {
379 SFLOAT_RAISE(state, SFLOAT_INVALID);
382 return SFLOAT_PACK(sign_z, 0xFF, 0);
386 return sfloat_propagate_nan(state, a, b);
387 if ((exp_a | sig_a) == 0) {
388 SFLOAT_RAISE(state, SFLOAT_INVALID);
391 return SFLOAT_PACK(sign_z, 0xFF, 0);
395 return SFLOAT_PACK(sign_z, 0, 0);
396 SFLOAT_SUBNORMALIZE(sig_a, &exp_a, &sig_a);
400 return SFLOAT_PACK(sign_z, 0, 0);
401 SFLOAT_SUBNORMALIZE(sig_b, &exp_b, &sig_b);
403 exp_z = exp_a + exp_b - 0x7F;
404 sig_a = (sig_a | 0x00800000) << 7;
405 sig_b = (sig_b | 0x00800000) << 8;
406 SFLOAT_SHIFT(64, ((uint64_t)sig_a) * sig_b, 32, &sig_z64);
408 if (0 <= (int32_t)(sig_z << 1)) {
412 return SFLOAT_PACK_round(state, sign_z, exp_z, sig_z);
415 static sfloat_t sfloat_div(sfloat_state_t *state, sfloat_t a, sfloat_t b) {
416 int16_t exp_a = SFLOAT_EXTRACT_EXP(a);
417 int16_t exp_b = SFLOAT_EXTRACT_EXP(b);
419 uint32_t sig_a = SFLOAT_EXTRACT_FRAC(a);
420 uint32_t sig_b = SFLOAT_EXTRACT_FRAC(b);
422 bool sign_a = SFLOAT_EXTRACT_SIGN(a);
423 bool sign_b = SFLOAT_EXTRACT_SIGN(b);
424 bool sign_z = sign_a ^ sign_b;
428 return sfloat_propagate_nan(state, a, b);
431 return sfloat_propagate_nan(state, a, b);
432 SFLOAT_RAISE(state, SFLOAT_INVALID);
435 return SFLOAT_PACK(sign_z, 0xFF, 0);
438 return (sig_b) ? sfloat_propagate_nan(state, a, b) : SFLOAT_PACK(sign_z, 0, 0);
441 if ((exp_a | sig_a) == 0) {
442 SFLOAT_RAISE(state, SFLOAT_INVALID);
445 SFLOAT_RAISE(state, SFLOAT_DIVBYZERO);
446 return SFLOAT_PACK(sign_z, 0xFF, 0);
448 SFLOAT_SUBNORMALIZE(sig_b, &exp_b, &sig_b);
452 return SFLOAT_PACK(sign_z, 0, 0);
453 SFLOAT_SUBNORMALIZE(sig_a, &exp_a, &sig_a);
455 exp_z = exp_a - exp_b + 0x7D;
456 sig_a = (sig_a | 0x00800000) << 7;
457 sig_b = (sig_b | 0x00800000) << 8;
458 if (sig_b <= (sig_a + sig_a)) {
462 sig_z = (((uint64_t)sig_a) << 32) / sig_b;
463 if ((sig_z & 0x3F) == 0)
464 sig_z |= ((uint64_t)sig_b * sig_z != ((uint64_t)sig_a) << 32);
465 return SFLOAT_PACK_round(state, sign_z, exp_z, sig_z);
468 static GMQCC_INLINE void sfloat_check(lex_ctx_t ctx, sfloat_state_t *state, const char *vec) {
469 /* Exception comes from vector component */
471 if (state->exceptionflags & SFLOAT_DIVBYZERO)
472 compile_error(ctx, "division by zero in `%s' component", vec);
473 if (state->exceptionflags & SFLOAT_INVALID)
474 compile_error(ctx, "undefined (inf) in `%s' component", vec);
475 if (state->exceptionflags & SFLOAT_OVERFLOW)
476 compile_error(ctx, "arithmetic overflow in `%s' component", vec);
477 if (state->exceptionflags & SFLOAT_UNDERFLOW)
478 compile_error(ctx, "arithmetic underflow in `%s' component", vec);
481 if (state->exceptionflags & SFLOAT_DIVBYZERO)
482 compile_error(ctx, "division by zero");
483 if (state->exceptionflags & SFLOAT_INVALID)
484 compile_error(ctx, "undefined (inf)");
485 if (state->exceptionflags & SFLOAT_OVERFLOW)
486 compile_error(ctx, "arithmetic overflow");
487 if (state->exceptionflags & SFLOAT_UNDERFLOW)
488 compile_error(ctx, "arithmetic underflow");
492 * There is two stages to constant folding in GMQCC: there is the parse
493 * stage constant folding, where, witht he help of the AST, operator
494 * usages can be constant folded. Then there is the constant folding
495 * in the IR for things like eliding if statements, can occur.
497 * This file is thus, split into two parts.
500 #define isfloat(X) (((ast_expression*)(X))->vtype == TYPE_FLOAT)
501 #define isvector(X) (((ast_expression*)(X))->vtype == TYPE_VECTOR)
502 #define isstring(X) (((ast_expression*)(X))->vtype == TYPE_STRING)
503 #define isfloats(X,Y) (isfloat (X) && isfloat (Y))
506 * Implementation of basic vector math for vec3_t, for trivial constant
509 * TODO: gcc/clang hinting for autovectorization
525 sfloat_state_t state[3];
528 static GMQCC_INLINE vec3_soft_t vec3_soft_convert(vec3_t vec) {
536 static GMQCC_INLINE bool vec3_soft_exception(vec3_soft_state_t *vstate, size_t index) {
537 sfloat_exceptionflags_t flags = vstate->state[index].exceptionflags;
538 if (flags & SFLOAT_DIVBYZERO) return true;
539 if (flags & SFLOAT_INVALID) return true;
540 if (flags & SFLOAT_OVERFLOW) return true;
541 if (flags & SFLOAT_UNDERFLOW) return true;
545 static GMQCC_INLINE void vec3_soft_eval(vec3_soft_state_t *state,
546 sfloat_t (*callback)(sfloat_state_t *, sfloat_t, sfloat_t),
550 vec3_soft_t sa = vec3_soft_convert(a);
551 vec3_soft_t sb = vec3_soft_convert(b);
552 callback(&state->state[0], sa.x.s, sb.x.s);
553 if (vec3_soft_exception(state, 0)) state->faults |= VEC_COMP_X;
554 callback(&state->state[1], sa.y.s, sb.y.s);
555 if (vec3_soft_exception(state, 1)) state->faults |= VEC_COMP_Y;
556 callback(&state->state[2], sa.z.s, sb.z.s);
557 if (vec3_soft_exception(state, 2)) state->faults |= VEC_COMP_Z;
560 static GMQCC_INLINE void vec3_check_except(vec3_t a,
563 sfloat_t (*callback)(sfloat_state_t *, sfloat_t, sfloat_t))
565 vec3_soft_state_t state;
566 if (!OPTS_FLAG(ARITHMETIC_EXCEPTIONS))
569 vec3_soft_eval(&state, callback, a, b);
570 if (state.faults & VEC_COMP_X) sfloat_check(ctx, &state.state[0], "x");
571 if (state.faults & VEC_COMP_Y) sfloat_check(ctx, &state.state[1], "y");
572 if (state.faults & VEC_COMP_Z) sfloat_check(ctx, &state.state[2], "z");
575 static GMQCC_INLINE vec3_t vec3_add(lex_ctx_t ctx, vec3_t a, vec3_t b) {
577 vec3_check_except(a, b, ctx, &sfloat_add);
584 static GMQCC_INLINE vec3_t vec3_sub(lex_ctx_t ctx, vec3_t a, vec3_t b) {
586 vec3_check_except(a, b, ctx, &sfloat_sub);
593 static GMQCC_INLINE vec3_t vec3_neg(vec3_t a) {
601 static GMQCC_INLINE vec3_t vec3_or(vec3_t a, vec3_t b) {
603 out.x = (qcfloat_t)(((qcint_t)a.x) | ((qcint_t)b.x));
604 out.y = (qcfloat_t)(((qcint_t)a.y) | ((qcint_t)b.y));
605 out.z = (qcfloat_t)(((qcint_t)a.z) | ((qcint_t)b.z));
609 static GMQCC_INLINE vec3_t vec3_orvf(vec3_t a, qcfloat_t b) {
611 out.x = (qcfloat_t)(((qcint_t)a.x) | ((qcint_t)b));
612 out.y = (qcfloat_t)(((qcint_t)a.y) | ((qcint_t)b));
613 out.z = (qcfloat_t)(((qcint_t)a.z) | ((qcint_t)b));
617 static GMQCC_INLINE vec3_t vec3_and(vec3_t a, vec3_t b) {
619 out.x = (qcfloat_t)(((qcint_t)a.x) & ((qcint_t)b.x));
620 out.y = (qcfloat_t)(((qcint_t)a.y) & ((qcint_t)b.y));
621 out.z = (qcfloat_t)(((qcint_t)a.z) & ((qcint_t)b.z));
625 static GMQCC_INLINE vec3_t vec3_andvf(vec3_t a, qcfloat_t b) {
627 out.x = (qcfloat_t)(((qcint_t)a.x) & ((qcint_t)b));
628 out.y = (qcfloat_t)(((qcint_t)a.y) & ((qcint_t)b));
629 out.z = (qcfloat_t)(((qcint_t)a.z) & ((qcint_t)b));
633 static GMQCC_INLINE vec3_t vec3_xor(vec3_t a, vec3_t b) {
635 out.x = (qcfloat_t)(((qcint_t)a.x) ^ ((qcint_t)b.x));
636 out.y = (qcfloat_t)(((qcint_t)a.y) ^ ((qcint_t)b.y));
637 out.z = (qcfloat_t)(((qcint_t)a.z) ^ ((qcint_t)b.z));
641 static GMQCC_INLINE vec3_t vec3_xorvf(vec3_t a, qcfloat_t b) {
643 out.x = (qcfloat_t)(((qcint_t)a.x) ^ ((qcint_t)b));
644 out.y = (qcfloat_t)(((qcint_t)a.y) ^ ((qcint_t)b));
645 out.z = (qcfloat_t)(((qcint_t)a.z) ^ ((qcint_t)b));
649 static GMQCC_INLINE vec3_t vec3_not(vec3_t a) {
657 static GMQCC_INLINE qcfloat_t vec3_mulvv(vec3_t a, vec3_t b) {
658 return (a.x * b.x + a.y * b.y + a.z * b.z);
661 static GMQCC_INLINE vec3_t vec3_mulvf(vec3_t a, qcfloat_t b) {
669 static GMQCC_INLINE bool vec3_cmp(vec3_t a, vec3_t b) {
675 static GMQCC_INLINE vec3_t vec3_create(float x, float y, float z) {
683 static GMQCC_INLINE qcfloat_t vec3_notf(vec3_t a) {
684 return (!a.x && !a.y && !a.z);
687 static GMQCC_INLINE bool vec3_pbool(vec3_t a) {
688 return (a.x || a.y || a.z);
691 static GMQCC_INLINE vec3_t vec3_cross(vec3_t a, vec3_t b) {
693 out.x = a.y * b.z - a.z * b.y;
694 out.y = a.z * b.x - a.x * b.z;
695 out.z = a.x * b.y - a.y * b.x;
699 static lex_ctx_t fold_ctx(fold_t *fold) {
701 if (fold->parser->lex)
702 return parser_ctx(fold->parser);
704 memset(&ctx, 0, sizeof(ctx));
708 static GMQCC_INLINE bool fold_immediate_true(fold_t *fold, ast_value *v) {
709 switch (v->expression.vtype) {
711 return !!v->constval.vfloat;
713 return !!v->constval.vint;
715 if (OPTS_FLAG(CORRECT_LOGIC))
716 return vec3_pbool(v->constval.vvec);
717 return !!(v->constval.vvec.x);
719 if (!v->constval.vstring)
721 if (OPTS_FLAG(TRUE_EMPTY_STRINGS))
723 return !!v->constval.vstring[0];
725 compile_error(fold_ctx(fold), "internal error: fold_immediate_true on invalid type");
728 return !!v->constval.vfunc;
731 /* Handy macros to determine if an ast_value can be constant folded. */
732 #define fold_can_1(X) \
733 (ast_istype(((ast_expression*)(X)), ast_value) && (X)->hasvalue && ((X)->cvq == CV_CONST) && \
734 ((ast_expression*)(X))->vtype != TYPE_FUNCTION)
736 #define fold_can_2(X, Y) (fold_can_1(X) && fold_can_1(Y))
738 #define fold_immvalue_float(E) ((E)->constval.vfloat)
739 #define fold_immvalue_vector(E) ((E)->constval.vvec)
740 #define fold_immvalue_string(E) ((E)->constval.vstring)
742 fold_t *fold_init(parser_t *parser) {
743 fold_t *fold = (fold_t*)mem_a(sizeof(fold_t));
744 fold->parser = parser;
745 fold->imm_float = NULL;
746 fold->imm_vector = NULL;
747 fold->imm_string = NULL;
748 fold->imm_string_untranslate = util_htnew(FOLD_STRING_UNTRANSLATE_HTSIZE);
749 fold->imm_string_dotranslate = util_htnew(FOLD_STRING_DOTRANSLATE_HTSIZE);
752 * prime the tables with common constant values at constant
755 (void)fold_constgen_float (fold, 0.0f, false);
756 (void)fold_constgen_float (fold, 1.0f, false);
757 (void)fold_constgen_float (fold, -1.0f, false);
758 (void)fold_constgen_float (fold, 2.0f, false);
760 (void)fold_constgen_vector(fold, vec3_create(0.0f, 0.0f, 0.0f));
761 (void)fold_constgen_vector(fold, vec3_create(-1.0f, -1.0f, -1.0f));
766 bool fold_generate(fold_t *fold, ir_builder *ir) {
767 /* generate globals for immediate folded values */
771 for (i = 0; i < vec_size(fold->imm_float); ++i)
772 if (!ast_global_codegen ((cur = fold->imm_float[i]), ir, false)) goto err;
773 for (i = 0; i < vec_size(fold->imm_vector); ++i)
774 if (!ast_global_codegen((cur = fold->imm_vector[i]), ir, false)) goto err;
775 for (i = 0; i < vec_size(fold->imm_string); ++i)
776 if (!ast_global_codegen((cur = fold->imm_string[i]), ir, false)) goto err;
781 con_out("failed to generate global %s\n", cur->name);
782 ir_builder_delete(ir);
786 void fold_cleanup(fold_t *fold) {
789 for (i = 0; i < vec_size(fold->imm_float); ++i) ast_delete(fold->imm_float[i]);
790 for (i = 0; i < vec_size(fold->imm_vector); ++i) ast_delete(fold->imm_vector[i]);
791 for (i = 0; i < vec_size(fold->imm_string); ++i) ast_delete(fold->imm_string[i]);
793 vec_free(fold->imm_float);
794 vec_free(fold->imm_vector);
795 vec_free(fold->imm_string);
797 util_htdel(fold->imm_string_untranslate);
798 util_htdel(fold->imm_string_dotranslate);
803 ast_expression *fold_constgen_float(fold_t *fold, qcfloat_t value, bool inexact) {
804 ast_value *out = NULL;
807 for (i = 0; i < vec_size(fold->imm_float); i++) {
808 if (!memcmp(&fold->imm_float[i]->constval.vfloat, &value, sizeof(qcfloat_t)))
809 return (ast_expression*)fold->imm_float[i];
812 out = ast_value_new(fold_ctx(fold), "#IMMEDIATE", TYPE_FLOAT);
814 out->hasvalue = true;
815 out->inexact = inexact;
816 out->constval.vfloat = value;
818 vec_push(fold->imm_float, out);
820 return (ast_expression*)out;
823 ast_expression *fold_constgen_vector(fold_t *fold, vec3_t value) {
827 for (i = 0; i < vec_size(fold->imm_vector); i++) {
828 if (vec3_cmp(fold->imm_vector[i]->constval.vvec, value))
829 return (ast_expression*)fold->imm_vector[i];
832 out = ast_value_new(fold_ctx(fold), "#IMMEDIATE", TYPE_VECTOR);
834 out->hasvalue = true;
835 out->constval.vvec = value;
837 vec_push(fold->imm_vector, out);
839 return (ast_expression*)out;
842 ast_expression *fold_constgen_string(fold_t *fold, const char *str, bool translate) {
843 hash_table_t *table = (translate) ? fold->imm_string_untranslate : fold->imm_string_dotranslate;
844 ast_value *out = NULL;
845 size_t hash = util_hthash(table, str);
847 if ((out = (ast_value*)util_htgeth(table, str, hash)))
848 return (ast_expression*)out;
852 util_snprintf(name, sizeof(name), "dotranslate_%lu", (unsigned long)(fold->parser->translated++));
853 out = ast_value_new(parser_ctx(fold->parser), name, TYPE_STRING);
854 out->expression.flags |= AST_FLAG_INCLUDE_DEF; /* def needs to be included for translatables */
856 out = ast_value_new(fold_ctx(fold), "#IMMEDIATE", TYPE_STRING);
859 out->hasvalue = true;
861 out->constval.vstring = parser_strdup(str);
863 vec_push(fold->imm_string, out);
864 util_htseth(table, str, hash, out);
866 return (ast_expression*)out;
870 static GMQCC_INLINE ast_expression *fold_op_mul_vec(fold_t *fold, vec3_t vec, ast_value *sel, const char *set) {
872 * vector-component constant folding works by matching the component sets
873 * to eliminate expensive operations on whole-vectors (3 components at runtime).
874 * to achive this effect in a clean manner this function generalizes the
875 * values through the use of a set paramater, which is used as an indexing method
876 * for creating the elided ast binary expression.
878 * Consider 'n 0 0' where y, and z need to be tested for 0, and x is
879 * used as the value in a binary operation generating an INSTR_MUL instruction,
880 * to acomplish the indexing of the correct component value we use set[0], set[1], set[2]
881 * as x, y, z, where the values of those operations return 'x', 'y', 'z'. Because
882 * of how ASCII works we can easily deliniate:
883 * vec.z is the same as set[2]-'x' for when set[2] is 'z', 'z'-'x' results in a
884 * literal value of 2, using this 2, we know that taking the address of vec->x (float)
885 * and indxing it with this literal will yeild the immediate address of that component
887 * Of course more work needs to be done to generate the correct index for the ast_member_new
888 * call, which is no problem: set[0]-'x' suffices that job.
890 qcfloat_t x = (&vec.x)[set[0]-'x'];
891 qcfloat_t y = (&vec.x)[set[1]-'x'];
892 qcfloat_t z = (&vec.x)[set[2]-'x'];
896 ++opts_optimizationcount[OPTIM_VECTOR_COMPONENTS];
897 out = (ast_expression*)ast_member_new(fold_ctx(fold), (ast_expression*)sel, set[0]-'x', NULL);
898 out->node.keep = false;
899 ((ast_member*)out)->rvalue = true;
901 return (ast_expression*)ast_binary_new(fold_ctx(fold), INSTR_MUL_F, fold_constgen_float(fold, x, false), out);
907 static GMQCC_INLINE ast_expression *fold_op_neg(fold_t *fold, ast_value *a) {
910 return fold_constgen_float(fold, -fold_immvalue_float(a), false);
911 } else if (isvector(a)) {
913 return fold_constgen_vector(fold, vec3_neg(fold_immvalue_vector(a)));
918 static GMQCC_INLINE ast_expression *fold_op_not(fold_t *fold, ast_value *a) {
921 return fold_constgen_float(fold, !fold_immvalue_float(a), false);
922 } else if (isvector(a)) {
924 return fold_constgen_float(fold, vec3_notf(fold_immvalue_vector(a)), false);
925 } else if (isstring(a)) {
927 if (OPTS_FLAG(TRUE_EMPTY_STRINGS))
928 return fold_constgen_float(fold, !fold_immvalue_string(a), false);
930 return fold_constgen_float(fold, !fold_immvalue_string(a) || !*fold_immvalue_string(a), false);
936 static bool fold_check_except_float(sfloat_t (*callback)(sfloat_state_t *, sfloat_t, sfloat_t),
945 if (!OPTS_FLAG(ARITHMETIC_EXCEPTIONS) && !OPTS_WARN(WARN_INEXACT_COMPARES))
948 s.roundingmode = SFLOAT_ROUND_NEAREST_EVEN;
949 s.tiny = SFLOAT_TBEFORE;
950 s.exceptionflags = 0;
951 ca.f = fold_immvalue_float(a);
952 cb.f = fold_immvalue_float(b);
954 callback(&s, ca.s, cb.s);
955 if (s.exceptionflags == 0)
958 if (!OPTS_FLAG(ARITHMETIC_EXCEPTIONS))
959 goto inexact_possible;
961 sfloat_check(fold_ctx(fold), &s, NULL);
964 return s.exceptionflags & SFLOAT_INEXACT;
967 static bool fold_check_inexact_float(fold_t *fold, ast_value *a, ast_value *b) {
968 lex_ctx_t ctx = fold_ctx(fold);
969 if (!OPTS_WARN(WARN_INEXACT_COMPARES))
971 if (!a->inexact && !b->inexact)
973 return compile_warning(ctx, WARN_INEXACT_COMPARES, "inexact value in comparison");
976 static GMQCC_INLINE ast_expression *fold_op_add(fold_t *fold, ast_value *a, ast_value *b) {
978 if (fold_can_2(a, b)) {
979 bool inexact = fold_check_except_float(&sfloat_add, fold, a, b);
980 return fold_constgen_float(fold, fold_immvalue_float(a) + fold_immvalue_float(b), inexact);
982 } else if (isvector(a)) {
983 if (fold_can_2(a, b))
984 return fold_constgen_vector(fold, vec3_add(fold_ctx(fold),
985 fold_immvalue_vector(a),
986 fold_immvalue_vector(b)));
991 static GMQCC_INLINE ast_expression *fold_op_sub(fold_t *fold, ast_value *a, ast_value *b) {
993 if (fold_can_2(a, b)) {
994 bool inexact = fold_check_except_float(&sfloat_sub, fold, a, b);
995 return fold_constgen_float(fold, fold_immvalue_float(a) - fold_immvalue_float(b), inexact);
997 } else if (isvector(a)) {
998 if (fold_can_2(a, b))
999 return fold_constgen_vector(fold, vec3_sub(fold_ctx(fold),
1000 fold_immvalue_vector(a),
1001 fold_immvalue_vector(b)));
1006 static GMQCC_INLINE ast_expression *fold_op_mul(fold_t *fold, ast_value *a, ast_value *b) {
1009 if (fold_can_2(a, b))
1010 return fold_constgen_vector(fold, vec3_mulvf(fold_immvalue_vector(b), fold_immvalue_float(a)));
1012 if (fold_can_2(a, b)) {
1013 bool inexact = fold_check_except_float(&sfloat_mul, fold, a, b);
1014 return fold_constgen_float(fold, fold_immvalue_float(a) * fold_immvalue_float(b), inexact);
1017 } else if (isvector(a)) {
1019 if (fold_can_2(a, b))
1020 return fold_constgen_vector(fold, vec3_mulvf(fold_immvalue_vector(a), fold_immvalue_float(b)));
1022 if (fold_can_2(a, b)) {
1023 return fold_constgen_float(fold, vec3_mulvv(fold_immvalue_vector(a), fold_immvalue_vector(b)), false);
1024 } else if (OPTS_OPTIMIZATION(OPTIM_VECTOR_COMPONENTS) && fold_can_1(a)) {
1025 ast_expression *out;
1026 if ((out = fold_op_mul_vec(fold, fold_immvalue_vector(a), b, "xyz"))) return out;
1027 if ((out = fold_op_mul_vec(fold, fold_immvalue_vector(a), b, "yxz"))) return out;
1028 if ((out = fold_op_mul_vec(fold, fold_immvalue_vector(a), b, "zxy"))) return out;
1029 } else if (OPTS_OPTIMIZATION(OPTIM_VECTOR_COMPONENTS) && fold_can_1(b)) {
1030 ast_expression *out;
1031 if ((out = fold_op_mul_vec(fold, fold_immvalue_vector(b), a, "xyz"))) return out;
1032 if ((out = fold_op_mul_vec(fold, fold_immvalue_vector(b), a, "yxz"))) return out;
1033 if ((out = fold_op_mul_vec(fold, fold_immvalue_vector(b), a, "zxy"))) return out;
1040 static GMQCC_INLINE ast_expression *fold_op_div(fold_t *fold, ast_value *a, ast_value *b) {
1042 if (fold_can_2(a, b)) {
1043 bool inexact = fold_check_except_float(&sfloat_div, fold, a, b);
1044 return fold_constgen_float(fold, fold_immvalue_float(a) / fold_immvalue_float(b), inexact);
1045 } else if (fold_can_1(b)) {
1046 return (ast_expression*)ast_binary_new(
1050 fold_constgen_float(fold, 1.0f / fold_immvalue_float(b), false)
1053 } else if (isvector(a)) {
1054 if (fold_can_2(a, b)) {
1055 return fold_constgen_vector(fold, vec3_mulvf(fold_immvalue_vector(a), 1.0f / fold_immvalue_float(b)));
1057 return (ast_expression*)ast_binary_new(
1062 ? (ast_expression*)fold_constgen_float(fold, 1.0f / fold_immvalue_float(b), false)
1063 : (ast_expression*)ast_binary_new(
1066 (ast_expression*)fold->imm_float[1],
1075 static GMQCC_INLINE ast_expression *fold_op_mod(fold_t *fold, ast_value *a, ast_value *b) {
1076 return (fold_can_2(a, b))
1077 ? fold_constgen_float(fold, fmod(fold_immvalue_float(a), fold_immvalue_float(b)), false)
1081 static GMQCC_INLINE ast_expression *fold_op_bor(fold_t *fold, ast_value *a, ast_value *b) {
1083 if (fold_can_2(a, b))
1084 return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) | ((qcint_t)fold_immvalue_float(b))), false);
1087 if (fold_can_2(a, b))
1088 return fold_constgen_vector(fold, vec3_or(fold_immvalue_vector(a), fold_immvalue_vector(b)));
1090 if (fold_can_2(a, b))
1091 return fold_constgen_vector(fold, vec3_orvf(fold_immvalue_vector(a), fold_immvalue_float(b)));
1097 static GMQCC_INLINE ast_expression *fold_op_band(fold_t *fold, ast_value *a, ast_value *b) {
1099 if (fold_can_2(a, b))
1100 return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) & ((qcint_t)fold_immvalue_float(b))), false);
1103 if (fold_can_2(a, b))
1104 return fold_constgen_vector(fold, vec3_and(fold_immvalue_vector(a), fold_immvalue_vector(b)));
1106 if (fold_can_2(a, b))
1107 return fold_constgen_vector(fold, vec3_andvf(fold_immvalue_vector(a), fold_immvalue_float(b)));
1113 static GMQCC_INLINE ast_expression *fold_op_xor(fold_t *fold, ast_value *a, ast_value *b) {
1115 if (fold_can_2(a, b))
1116 return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) ^ ((qcint_t)fold_immvalue_float(b))), false);
1118 if (fold_can_2(a, b)) {
1120 return fold_constgen_vector(fold, vec3_xor(fold_immvalue_vector(a), fold_immvalue_vector(b)));
1122 return fold_constgen_vector(fold, vec3_xorvf(fold_immvalue_vector(a), fold_immvalue_float(b)));
1128 static GMQCC_INLINE ast_expression *fold_op_lshift(fold_t *fold, ast_value *a, ast_value *b) {
1129 if (fold_can_2(a, b) && isfloats(a, b))
1130 return fold_constgen_float(fold, (qcfloat_t)floorf(fold_immvalue_float(a) * powf(2.0f, fold_immvalue_float(b))), false);
1134 static GMQCC_INLINE ast_expression *fold_op_rshift(fold_t *fold, ast_value *a, ast_value *b) {
1135 if (fold_can_2(a, b) && isfloats(a, b))
1136 return fold_constgen_float(fold, (qcfloat_t)floorf(fold_immvalue_float(a) / powf(2.0f, fold_immvalue_float(b))), false);
1140 static GMQCC_INLINE ast_expression *fold_op_andor(fold_t *fold, ast_value *a, ast_value *b, float expr) {
1141 if (fold_can_2(a, b)) {
1142 if (OPTS_FLAG(PERL_LOGIC)) {
1144 return (fold_immediate_true(fold, a)) ? (ast_expression*)a : (ast_expression*)b;
1146 return (fold_immediate_true(fold, a)) ? (ast_expression*)b : (ast_expression*)a;
1148 return fold_constgen_float (
1150 ((expr) ? (fold_immediate_true(fold, a) || fold_immediate_true(fold, b))
1151 : (fold_immediate_true(fold, a) && fold_immediate_true(fold, b)))
1161 static GMQCC_INLINE ast_expression *fold_op_tern(fold_t *fold, ast_value *a, ast_value *b, ast_value *c) {
1162 if (fold_can_1(a)) {
1163 return fold_immediate_true(fold, a)
1164 ? (ast_expression*)b
1165 : (ast_expression*)c;
1170 static GMQCC_INLINE ast_expression *fold_op_exp(fold_t *fold, ast_value *a, ast_value *b) {
1171 if (fold_can_2(a, b))
1172 return fold_constgen_float(fold, (qcfloat_t)powf(fold_immvalue_float(a), fold_immvalue_float(b)), false);
1176 static GMQCC_INLINE ast_expression *fold_op_lteqgt(fold_t *fold, ast_value *a, ast_value *b) {
1177 if (fold_can_2(a,b)) {
1178 fold_check_inexact_float(fold, a, b);
1179 if (fold_immvalue_float(a) < fold_immvalue_float(b)) return (ast_expression*)fold->imm_float[2];
1180 if (fold_immvalue_float(a) == fold_immvalue_float(b)) return (ast_expression*)fold->imm_float[0];
1181 if (fold_immvalue_float(a) > fold_immvalue_float(b)) return (ast_expression*)fold->imm_float[1];
1186 static GMQCC_INLINE ast_expression *fold_op_ltgt(fold_t *fold, ast_value *a, ast_value *b, bool lt) {
1187 if (fold_can_2(a, b)) {
1188 fold_check_inexact_float(fold, a, b);
1189 return (lt) ? (ast_expression*)fold->imm_float[!!(fold_immvalue_float(a) < fold_immvalue_float(b))]
1190 : (ast_expression*)fold->imm_float[!!(fold_immvalue_float(a) > fold_immvalue_float(b))];
1195 static GMQCC_INLINE ast_expression *fold_op_cmp(fold_t *fold, ast_value *a, ast_value *b, bool ne) {
1196 if (fold_can_2(a, b)) {
1197 if (isfloat(a) && isfloat(b)) {
1198 float la = fold_immvalue_float(a);
1199 float lb = fold_immvalue_float(b);
1200 fold_check_inexact_float(fold, a, b);
1201 return (ast_expression*)fold->imm_float[!(ne ? la == lb : la != lb)];
1202 } if (isvector(a) && isvector(b)) {
1203 vec3_t la = fold_immvalue_vector(a);
1204 vec3_t lb = fold_immvalue_vector(b);
1205 return (ast_expression*)fold->imm_float[!(ne ? vec3_cmp(la, lb) : !vec3_cmp(la, lb))];
1211 static GMQCC_INLINE ast_expression *fold_op_bnot(fold_t *fold, ast_value *a) {
1214 return fold_constgen_float(fold, -1-fold_immvalue_float(a), false);
1218 return fold_constgen_vector(fold, vec3_not(fold_immvalue_vector(a)));
1224 static GMQCC_INLINE ast_expression *fold_op_cross(fold_t *fold, ast_value *a, ast_value *b) {
1225 if (fold_can_2(a, b))
1226 return fold_constgen_vector(fold, vec3_cross(fold_immvalue_vector(a), fold_immvalue_vector(b)));
1230 ast_expression *fold_op(fold_t *fold, const oper_info *info, ast_expression **opexprs) {
1231 ast_value *a = (ast_value*)opexprs[0];
1232 ast_value *b = (ast_value*)opexprs[1];
1233 ast_value *c = (ast_value*)opexprs[2];
1234 ast_expression *e = NULL;
1236 /* can a fold operation be applied to this operator usage? */
1240 switch(info->operands) {
1241 case 3: if(!c) return NULL;
1242 case 2: if(!b) return NULL;
1245 compile_error(fold_ctx(fold), "internal error: fold_op no operands to fold\n");
1251 * we could use a boolean and default case but ironically gcc produces
1252 * invalid broken assembly from that operation. clang/tcc get it right,
1253 * but interestingly ignore compiling this to a jump-table when I do that,
1254 * this happens to be the most efficent method, since you have per-level
1255 * granularity on the pointer check happening only for the case you check
1256 * it in. Opposed to the default method which would involve a boolean and
1257 * pointer check after wards.
1259 #define fold_op_case(ARGS, ARGS_OPID, OP, ARGS_FOLD) \
1260 case opid##ARGS ARGS_OPID: \
1261 if ((e = fold_op_##OP ARGS_FOLD)) { \
1262 ++opts_optimizationcount[OPTIM_CONST_FOLD]; \
1267 fold_op_case(2, ('-', 'P'), neg, (fold, a));
1268 fold_op_case(2, ('!', 'P'), not, (fold, a));
1269 fold_op_case(1, ('+'), add, (fold, a, b));
1270 fold_op_case(1, ('-'), sub, (fold, a, b));
1271 fold_op_case(1, ('*'), mul, (fold, a, b));
1272 fold_op_case(1, ('/'), div, (fold, a, b));
1273 fold_op_case(1, ('%'), mod, (fold, a, b));
1274 fold_op_case(1, ('|'), bor, (fold, a, b));
1275 fold_op_case(1, ('&'), band, (fold, a, b));
1276 fold_op_case(1, ('^'), xor, (fold, a, b));
1277 fold_op_case(1, ('<'), ltgt, (fold, a, b, true));
1278 fold_op_case(1, ('>'), ltgt, (fold, a, b, false));
1279 fold_op_case(2, ('<', '<'), lshift, (fold, a, b));
1280 fold_op_case(2, ('>', '>'), rshift, (fold, a, b));
1281 fold_op_case(2, ('|', '|'), andor, (fold, a, b, true));
1282 fold_op_case(2, ('&', '&'), andor, (fold, a, b, false));
1283 fold_op_case(2, ('?', ':'), tern, (fold, a, b, c));
1284 fold_op_case(2, ('*', '*'), exp, (fold, a, b));
1285 fold_op_case(3, ('<','=','>'), lteqgt, (fold, a, b));
1286 fold_op_case(2, ('!', '='), cmp, (fold, a, b, true));
1287 fold_op_case(2, ('=', '='), cmp, (fold, a, b, false));
1288 fold_op_case(2, ('~', 'P'), bnot, (fold, a));
1289 fold_op_case(2, ('>', '<'), cross, (fold, a, b));
1292 compile_error(fold_ctx(fold), "internal error: attempted to constant-fold for unsupported operator");
1297 * Constant folding for compiler intrinsics, simaler approach to operator
1298 * folding, primarly: individual functions for each intrinsics to fold,
1299 * and a generic selection function.
1301 static GMQCC_INLINE ast_expression *fold_intrin_isfinite(fold_t *fold, ast_value *a) {
1302 return fold_constgen_float(fold, isfinite(fold_immvalue_float(a)), false);
1304 static GMQCC_INLINE ast_expression *fold_intrin_isinf(fold_t *fold, ast_value *a) {
1305 return fold_constgen_float(fold, isinf(fold_immvalue_float(a)), false);
1307 static GMQCC_INLINE ast_expression *fold_intrin_isnan(fold_t *fold, ast_value *a) {
1308 return fold_constgen_float(fold, isnan(fold_immvalue_float(a)), false);
1310 static GMQCC_INLINE ast_expression *fold_intrin_isnormal(fold_t *fold, ast_value *a) {
1311 return fold_constgen_float(fold, isnormal(fold_immvalue_float(a)), false);
1313 static GMQCC_INLINE ast_expression *fold_intrin_signbit(fold_t *fold, ast_value *a) {
1314 return fold_constgen_float(fold, signbit(fold_immvalue_float(a)), false);
1316 static GMQCC_INLINE ast_expression *fold_intirn_acosh(fold_t *fold, ast_value *a) {
1317 return fold_constgen_float(fold, acoshf(fold_immvalue_float(a)), false);
1319 static GMQCC_INLINE ast_expression *fold_intrin_asinh(fold_t *fold, ast_value *a) {
1320 return fold_constgen_float(fold, asinhf(fold_immvalue_float(a)), false);
1322 static GMQCC_INLINE ast_expression *fold_intrin_atanh(fold_t *fold, ast_value *a) {
1323 return fold_constgen_float(fold, (float)atanh(fold_immvalue_float(a)), false);
1325 static GMQCC_INLINE ast_expression *fold_intrin_exp(fold_t *fold, ast_value *a) {
1326 return fold_constgen_float(fold, expf(fold_immvalue_float(a)), false);
1328 static GMQCC_INLINE ast_expression *fold_intrin_exp2(fold_t *fold, ast_value *a) {
1329 return fold_constgen_float(fold, exp2f(fold_immvalue_float(a)), false);
1331 static GMQCC_INLINE ast_expression *fold_intrin_expm1(fold_t *fold, ast_value *a) {
1332 return fold_constgen_float(fold, expm1f(fold_immvalue_float(a)), false);
1334 static GMQCC_INLINE ast_expression *fold_intrin_mod(fold_t *fold, ast_value *lhs, ast_value *rhs) {
1335 return fold_constgen_float(fold, fmodf(fold_immvalue_float(lhs), fold_immvalue_float(rhs)), false);
1337 static GMQCC_INLINE ast_expression *fold_intrin_pow(fold_t *fold, ast_value *lhs, ast_value *rhs) {
1338 return fold_constgen_float(fold, powf(fold_immvalue_float(lhs), fold_immvalue_float(rhs)), false);
1340 static GMQCC_INLINE ast_expression *fold_intrin_fabs(fold_t *fold, ast_value *a) {
1341 return fold_constgen_float(fold, fabsf(fold_immvalue_float(a)), false);
1345 ast_expression *fold_intrin(fold_t *fold, const char *intrin, ast_expression **arg) {
1346 ast_expression *ret = NULL;
1347 ast_value *a = (ast_value*)arg[0];
1348 ast_value *b = (ast_value*)arg[1];
1350 if (!strcmp(intrin, "isfinite")) ret = fold_intrin_isfinite(fold, a);
1351 if (!strcmp(intrin, "isinf")) ret = fold_intrin_isinf(fold, a);
1352 if (!strcmp(intrin, "isnan")) ret = fold_intrin_isnan(fold, a);
1353 if (!strcmp(intrin, "isnormal")) ret = fold_intrin_isnormal(fold, a);
1354 if (!strcmp(intrin, "signbit")) ret = fold_intrin_signbit(fold, a);
1355 if (!strcmp(intrin, "acosh")) ret = fold_intirn_acosh(fold, a);
1356 if (!strcmp(intrin, "asinh")) ret = fold_intrin_asinh(fold, a);
1357 if (!strcmp(intrin, "atanh")) ret = fold_intrin_atanh(fold, a);
1358 if (!strcmp(intrin, "exp")) ret = fold_intrin_exp(fold, a);
1359 if (!strcmp(intrin, "exp2")) ret = fold_intrin_exp2(fold, a);
1360 if (!strcmp(intrin, "expm1")) ret = fold_intrin_expm1(fold, a);
1361 if (!strcmp(intrin, "mod")) ret = fold_intrin_mod(fold, a, b);
1362 if (!strcmp(intrin, "pow")) ret = fold_intrin_pow(fold, a, b);
1363 if (!strcmp(intrin, "fabs")) ret = fold_intrin_fabs(fold, a);
1366 ++opts_optimizationcount[OPTIM_CONST_FOLD];
1372 * These are all the actual constant folding methods that happen in between
1373 * the AST/IR stage of the compiler , i.e eliminating branches for const
1374 * expressions, which is the only supported thing so far. We undefine the
1375 * testing macros here because an ir_value is differant than an ast_value.
1381 #undef fold_immvalue_float
1382 #undef fold_immvalue_string
1383 #undef fold_immvalue_vector
1387 #define isfloat(X) ((X)->vtype == TYPE_FLOAT)
1388 /*#define isstring(X) ((X)->vtype == TYPE_STRING)*/
1389 /*#define isvector(X) ((X)->vtype == TYPE_VECTOR)*/
1390 #define fold_immvalue_float(X) ((X)->constval.vfloat)
1391 #define fold_immvalue_vector(X) ((X)->constval.vvec)
1392 /*#define fold_immvalue_string(X) ((X)->constval.vstring)*/
1393 #define fold_can_1(X) ((X)->hasvalue && (X)->cvq == CV_CONST)
1394 /*#define fold_can_2(X,Y) (fold_can_1(X) && fold_can_1(Y))*/
1396 static ast_expression *fold_superfluous(ast_expression *left, ast_expression *right, int op) {
1397 ast_expression *swapped = NULL; /* using this as bool */
1400 if (!ast_istype(right, ast_value) || !fold_can_1((load = (ast_value*)right))) {
1406 if (!ast_istype(right, ast_value) || !fold_can_1((load = (ast_value*)right)))
1414 if (fold_immvalue_float(load) == 1.0f) {
1415 ++opts_optimizationcount[OPTIM_PEEPHOLE];
1426 if (fold_immvalue_float(load) == 0.0f) {
1427 ++opts_optimizationcount[OPTIM_PEEPHOLE];
1434 if (vec3_cmp(fold_immvalue_vector(load), vec3_create(1, 1, 1))) {
1435 ++opts_optimizationcount[OPTIM_PEEPHOLE];
1445 if (vec3_cmp(fold_immvalue_vector(load), vec3_create(0, 0, 0))) {
1446 ++opts_optimizationcount[OPTIM_PEEPHOLE];
1456 ast_expression *fold_binary(lex_ctx_t ctx, int op, ast_expression *left, ast_expression *right) {
1457 ast_expression *ret = fold_superfluous(left, right, op);
1460 return (ast_expression*)ast_binary_new(ctx, op, left, right);
1463 static GMQCC_INLINE int fold_cond(ir_value *condval, ast_function *func, ast_ifthen *branch) {
1464 if (isfloat(condval) && fold_can_1(condval) && OPTS_OPTIMIZATION(OPTIM_CONST_FOLD_DCE)) {
1465 ast_expression_codegen *cgen;
1468 bool istrue = (fold_immvalue_float(condval) != 0.0f && branch->on_true);
1469 bool isfalse = (fold_immvalue_float(condval) == 0.0f && branch->on_false);
1470 ast_expression *path = (istrue) ? branch->on_true :
1471 (isfalse) ? branch->on_false : NULL;
1474 * no path to take implies that the evaluation is if(0) and there
1475 * is no else block. so eliminate all the code.
1477 ++opts_optimizationcount[OPTIM_CONST_FOLD_DCE];
1481 if (!(elide = ir_function_create_block(ast_ctx(branch), func->ir_func, ast_function_label(func, ((istrue) ? "ontrue" : "onfalse")))))
1483 if (!(*(cgen = path->codegen))((ast_expression*)path, func, false, &dummy))
1485 if (!ir_block_create_jump(func->curblock, ast_ctx(branch), elide))
1488 * now the branch has been eliminated and the correct block for the constant evaluation
1489 * is expanded into the current block for the function.
1491 func->curblock = elide;
1492 ++opts_optimizationcount[OPTIM_CONST_FOLD_DCE];
1495 return -1; /* nothing done */
1498 int fold_cond_ternary(ir_value *condval, ast_function *func, ast_ternary *branch) {
1499 return fold_cond(condval, func, (ast_ifthen*)branch);
1502 int fold_cond_ifthen(ir_value *condval, ast_function *func, ast_ifthen *branch) {
1503 return fold_cond(condval, func, branch);