X-Git-Url: http://de.git.xonotic.org/?p=xonotic%2Fgmqcc.git;a=blobdiff_plain;f=fold.c;h=c9f1a0695c07f8fe1b9c35b62f7bbfab4eaaa360;hp=6a8cec232e1e31d5b5d43129cc83e01473bba494;hb=b1016c7f48c9d9e499351ad3a7c84db71325b81d;hpb=d0ee56f25f6fe0e62c4490cc436e9ae2466e511d diff --git a/fold.c b/fold.c index 6a8cec2..c9f1a06 100644 --- a/fold.c +++ b/fold.c @@ -34,7 +34,7 @@ * stage constant folding, where, witht he help of the AST, operator * usages can be constant folded. Then there is the constant folding * in the IR for things like eliding if statements, can occur. - * + * * This file is thus, split into two parts. */ @@ -46,7 +46,7 @@ /* * Implementation of basic vector math for vec3_t, for trivial constant * folding. - * + * * TODO: gcc/clang hinting for autovectorization */ static GMQCC_INLINE vec3_t vec3_add(vec3_t a, vec3_t b) { @@ -73,6 +73,38 @@ static GMQCC_INLINE vec3_t vec3_neg(vec3_t a) { return out; } +static GMQCC_INLINE vec3_t vec3_or(vec3_t a, vec3_t b) { + vec3_t out; + out.x = (qcfloat_t)(((qcint_t)a.x) | ((qcint_t)b.x)); + out.y = (qcfloat_t)(((qcint_t)a.y) | ((qcint_t)b.y)); + out.z = (qcfloat_t)(((qcint_t)a.z) | ((qcint_t)b.z)); + return out; +} + +static GMQCC_INLINE vec3_t vec3_orvf(vec3_t a, qcfloat_t b) { + vec3_t out; + out.x = (qcfloat_t)(((qcint_t)a.x) | ((qcint_t)b)); + out.y = (qcfloat_t)(((qcint_t)a.y) | ((qcint_t)b)); + out.z = (qcfloat_t)(((qcint_t)a.z) | ((qcint_t)b)); + return out; +} + +static GMQCC_INLINE vec3_t vec3_and(vec3_t a, vec3_t b) { + vec3_t out; + out.x = (qcfloat_t)(((qcint_t)a.x) & ((qcint_t)b.x)); + out.y = (qcfloat_t)(((qcint_t)a.y) & ((qcint_t)b.y)); + out.z = (qcfloat_t)(((qcint_t)a.z) & ((qcint_t)b.z)); + return out; +} + +static GMQCC_INLINE vec3_t vec3_andvf(vec3_t a, qcfloat_t b) { + vec3_t out; + out.x = (qcfloat_t)(((qcint_t)a.x) & ((qcint_t)b)); + out.y = (qcfloat_t)(((qcint_t)a.y) & ((qcint_t)b)); + out.z = (qcfloat_t)(((qcint_t)a.z) & ((qcint_t)b)); + return out; +} + static GMQCC_INLINE vec3_t vec3_xor(vec3_t a, vec3_t b) { vec3_t out; out.x = (qcfloat_t)(((qcint_t)a.x) ^ ((qcint_t)b.x)); @@ -89,6 +121,14 @@ static GMQCC_INLINE vec3_t vec3_xorvf(vec3_t a, qcfloat_t b) { return out; } +static GMQCC_INLINE vec3_t vec3_not(vec3_t a) { + vec3_t out; + out.x = (qcfloat_t)(~((qcint_t)a.x)); + out.y = (qcfloat_t)(~((qcint_t)a.y)); + out.z = (qcfloat_t)(~((qcint_t)a.z)); + return out; +} + static GMQCC_INLINE qcfloat_t vec3_mulvv(vec3_t a, vec3_t b) { return (a.x * b.x + a.y * b.y + a.z * b.z); } @@ -123,13 +163,12 @@ static GMQCC_INLINE bool vec3_pbool(vec3_t a) { return (a.x && a.y && a.z); } -static GMQCC_INLINE bool fold_can_1(const ast_value *val) { - return (ast_istype(((ast_expression*)(val)), ast_value) && val->hasvalue && (val->cvq == CV_CONST) && - ((ast_expression*)(val))->vtype != TYPE_FUNCTION); -} - -static GMQCC_INLINE bool fold_can_2(const ast_value *v1, const ast_value *v2) { - return fold_can_1(v1) && fold_can_1(v2); +static GMQCC_INLINE vec3_t vec3_cross(vec3_t a, vec3_t b) { + vec3_t out; + out.x = a.y * b.z - a.z * b.y; + out.y = a.z * b.x - a.x * b.z; + out.z = a.x * b.y - a.y * b.x; + return out; } static lex_ctx_t fold_ctx(fold_t *fold) { @@ -147,7 +186,7 @@ static GMQCC_INLINE bool fold_immediate_true(fold_t *fold, ast_value *v) { return !!v->constval.vfloat; case TYPE_INTEGER: return !!v->constval.vint; - case TYPE_VECTOR: + case TYPE_VECTOR: if (OPTS_FLAG(CORRECT_LOGIC)) return vec3_pbool(v->constval.vvec); return !!(v->constval.vvec.x); @@ -164,6 +203,13 @@ static GMQCC_INLINE bool fold_immediate_true(fold_t *fold, ast_value *v) { return !!v->constval.vfunc; } +/* Handy macros to determine if an ast_value can be constant folded. */ +#define fold_can_1(X) \ + (ast_istype(((ast_expression*)(X)), ast_value) && (X)->hasvalue && ((X)->cvq == CV_CONST) && \ + ((ast_expression*)(X))->vtype != TYPE_FUNCTION) + +#define fold_can_2(X, Y) (fold_can_1(X) && fold_can_1(Y)) + #define fold_immvalue_float(E) ((E)->constval.vfloat) #define fold_immvalue_vector(E) ((E)->constval.vvec) #define fold_immvalue_string(E) ((E)->constval.vstring) @@ -186,6 +232,7 @@ fold_t *fold_init(parser_t *parser) { (void)fold_constgen_float (fold, -1.0f); (void)fold_constgen_vector(fold, vec3_create(0.0f, 0.0f, 0.0f)); + (void)fold_constgen_vector(fold, vec3_create(-1.0f, -1.0f, -1.0f)); return fold; } @@ -232,7 +279,7 @@ ast_expression *fold_constgen_float(fold_t *fold, qcfloat_t value) { size_t i; for (i = 0; i < vec_size(fold->imm_float); i++) { - if (fold->imm_float[i]->constval.vfloat == value) + if (!memcmp(&fold->imm_float[i]->constval.vfloat, &value, sizeof(qcfloat_t))) return (ast_expression*)fold->imm_float[i]; } @@ -297,19 +344,19 @@ static GMQCC_INLINE ast_expression *fold_op_mul_vec(fold_t *fold, vec3_t vec, as /* * vector-component constant folding works by matching the component sets * to eliminate expensive operations on whole-vectors (3 components at runtime). - * to achive this effect in a clean manner this function generalizes the + * to achive this effect in a clean manner this function generalizes the * values through the use of a set paramater, which is used as an indexing method * for creating the elided ast binary expression. * * Consider 'n 0 0' where y, and z need to be tested for 0, and x is - * used as the value in a binary operation generating an INSTR_MUL instruction + * used as the value in a binary operation generating an INSTR_MUL instruction, * to acomplish the indexing of the correct component value we use set[0], set[1], set[2] * as x, y, z, where the values of those operations return 'x', 'y', 'z'. Because * of how ASCII works we can easily deliniate: * vec.z is the same as set[2]-'x' for when set[2] is 'z', 'z'-'x' results in a * literal value of 2, using this 2, we know that taking the address of vec->x (float) * and indxing it with this literal will yeild the immediate address of that component - * + * * Of course more work needs to be done to generate the correct index for the ast_member_new * call, which is no problem: set[0]-'x' suffices that job. */ @@ -323,7 +370,7 @@ static GMQCC_INLINE ast_expression *fold_op_mul_vec(fold_t *fold, vec3_t vec, as out = (ast_expression*)ast_member_new(fold_ctx(fold), (ast_expression*)sel, set[0]-'x', NULL); out->node.keep = false; ((ast_member*)out)->rvalue = true; - if (!x != -1) + if (x != -1.0f) return (ast_expression*)ast_binary_new(fold_ctx(fold), INSTR_MUL_F, fold_constgen_float(fold, x), out); } return NULL; @@ -383,13 +430,17 @@ static GMQCC_INLINE ast_expression *fold_op_sub(fold_t *fold, ast_value *a, ast_ static GMQCC_INLINE ast_expression *fold_op_mul(fold_t *fold, ast_value *a, ast_value *b) { if (isfloat(a)) { - if (isfloat(b) && fold_can_2(a, b)) - return fold_constgen_vector(fold, vec3_mulvf(fold_immvalue_vector(b), fold_immvalue_float(a))); - else if (fold_can_2(a, b)) - return fold_constgen_float(fold, fold_immvalue_float(a) * fold_immvalue_float(b)); + if (isvector(b)) { + if (fold_can_2(a, b)) + return fold_constgen_vector(fold, vec3_mulvf(fold_immvalue_vector(b), fold_immvalue_float(a))); + } else { + if (fold_can_2(a, b)) + return fold_constgen_float(fold, fold_immvalue_float(a) * fold_immvalue_float(b)); + } } else if (isvector(a)) { - if (isfloat(b) && fold_can_2(a, b)) { - return fold_constgen_vector(fold, vec3_mulvf(fold_immvalue_vector(a), fold_immvalue_float(b))); + if (isfloat(b)) { + if (fold_can_2(a, b)) + return fold_constgen_vector(fold, vec3_mulvf(fold_immvalue_vector(a), fold_immvalue_float(b))); } else { if (fold_can_2(a, b)) { return fold_constgen_float(fold, vec3_mulvv(fold_immvalue_vector(a), fold_immvalue_vector(b))); @@ -411,32 +462,73 @@ static GMQCC_INLINE ast_expression *fold_op_mul(fold_t *fold, ast_value *a, ast_ static GMQCC_INLINE ast_expression *fold_op_div(fold_t *fold, ast_value *a, ast_value *b) { if (isfloat(a)) { - if (fold_can_2(a, b)) + if (fold_can_2(a, b)) { return fold_constgen_float(fold, fold_immvalue_float(a) / fold_immvalue_float(b)); + } else if (fold_can_1(b)) { + return (ast_expression*)ast_binary_new( + fold_ctx(fold), + INSTR_MUL_F, + (ast_expression*)a, + fold_constgen_float(fold, 1.0f / fold_immvalue_float(b)) + ); + } } else if (isvector(a)) { - if (fold_can_2(a, b)) + if (fold_can_2(a, b)) { return fold_constgen_vector(fold, vec3_mulvf(fold_immvalue_vector(a), 1.0f / fold_immvalue_float(b))); - else if (fold_can_1(b)) - return fold_constgen_float (fold, 1.0f / fold_immvalue_float(b)); + } else { + return (ast_expression*)ast_binary_new( + fold_ctx(fold), + INSTR_MUL_VF, + (ast_expression*)a, + (fold_can_1(b)) + ? (ast_expression*)fold_constgen_float(fold, 1.0f / fold_immvalue_float(b)) + : (ast_expression*)ast_binary_new( + fold_ctx(fold), + INSTR_DIV_F, + (ast_expression*)fold->imm_float[1], + (ast_expression*)b + ) + ); + } } return NULL; } static GMQCC_INLINE ast_expression *fold_op_mod(fold_t *fold, ast_value *a, ast_value *b) { - if (fold_can_2(a, b)) - return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) % ((qcint_t)fold_immvalue_float(b)))); - return NULL; + return (fold_can_2(a, b)) + ? fold_constgen_float(fold, fmod(fold_immvalue_float(a), fold_immvalue_float(b))) + : NULL; } static GMQCC_INLINE ast_expression *fold_op_bor(fold_t *fold, ast_value *a, ast_value *b) { - if (fold_can_2(a, b)) - return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) | ((qcint_t)fold_immvalue_float(b)))); + if (isfloat(a)) { + if (fold_can_2(a, b)) + return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) | ((qcint_t)fold_immvalue_float(b)))); + } else { + if (isvector(b)) { + if (fold_can_2(a, b)) + return fold_constgen_vector(fold, vec3_or(fold_immvalue_vector(a), fold_immvalue_vector(b))); + } else { + if (fold_can_2(a, b)) + return fold_constgen_vector(fold, vec3_orvf(fold_immvalue_vector(a), fold_immvalue_float(b))); + } + } return NULL; } static GMQCC_INLINE ast_expression *fold_op_band(fold_t *fold, ast_value *a, ast_value *b) { - if (fold_can_2(a, b)) - return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) & ((qcint_t)fold_immvalue_float(b)))); + if (isfloat(a)) { + if (fold_can_2(a, b)) + return fold_constgen_float(fold, (qcfloat_t)(((qcint_t)fold_immvalue_float(a)) & ((qcint_t)fold_immvalue_float(b)))); + } else { + if (isvector(b)) { + if (fold_can_2(a, b)) + return fold_constgen_vector(fold, vec3_and(fold_immvalue_vector(a), fold_immvalue_vector(b))); + } else { + if (fold_can_2(a, b)) + return fold_constgen_vector(fold, vec3_andvf(fold_immvalue_vector(a), fold_immvalue_float(b))); + } + } return NULL; } @@ -468,18 +560,18 @@ static GMQCC_INLINE ast_expression *fold_op_rshift(fold_t *fold, ast_value *a, a return NULL; } -static GMQCC_INLINE ast_expression *fold_op_andor(fold_t *fold, ast_value *a, ast_value *b, float or) { +static GMQCC_INLINE ast_expression *fold_op_andor(fold_t *fold, ast_value *a, ast_value *b, float expr) { if (fold_can_2(a, b)) { if (OPTS_FLAG(PERL_LOGIC)) { if (fold_immediate_true(fold, a)) return (ast_expression*)b; } else { return fold_constgen_float ( - fold, - ((or) ? (fold_immediate_true(fold, a) || fold_immediate_true(fold, b)) - : (fold_immediate_true(fold, a) && fold_immediate_true(fold, b))) - ? 1.0f - : 0.0f + fold, + ((expr) ? (fold_immediate_true(fold, a) || fold_immediate_true(fold, b)) + : (fold_immediate_true(fold, a) && fold_immediate_true(fold, b))) + ? 1 + : 0 ); } } @@ -522,15 +614,29 @@ static GMQCC_INLINE ast_expression *fold_op_cmp(fold_t *fold, ast_value *a, ast_ } static GMQCC_INLINE ast_expression *fold_op_bnot(fold_t *fold, ast_value *a) { - if (fold_can_1(a)) - return fold_constgen_float(fold, ~((qcint_t)fold_immvalue_float(a))); + if (isfloat(a)) { + if (fold_can_1(a)) + return fold_constgen_float(fold, ~((qcint_t)fold_immvalue_float(a))); + } else { + if (isvector(a)) { + if (fold_can_1(a)) + return fold_constgen_vector(fold, vec3_not(fold_immvalue_vector(a))); + } + } + return NULL; +} + +static GMQCC_INLINE ast_expression *fold_op_cross(fold_t *fold, ast_value *a, ast_value *b) { + if (fold_can_2(a, b)) + return fold_constgen_vector(fold, vec3_cross(fold_immvalue_vector(a), fold_immvalue_vector(b))); return NULL; } ast_expression *fold_op(fold_t *fold, const oper_info *info, ast_expression **opexprs) { - ast_value *a = (ast_value*)opexprs[0]; - ast_value *b = (ast_value*)opexprs[1]; - ast_value *c = (ast_value*)opexprs[2]; + ast_value *a = (ast_value*)opexprs[0]; + ast_value *b = (ast_value*)opexprs[1]; + ast_value *c = (ast_value*)opexprs[2]; + ast_expression *e = NULL; /* can a fold operation be applied to this operator usage? */ if (!info->folds) @@ -541,32 +647,234 @@ ast_expression *fold_op(fold_t *fold, const oper_info *info, ast_expression **op case 2: if(!b) return NULL; case 1: if(!a) { - compile_error(fold_ctx(fold), "interal error: fold_op no operands to fold\n"); + compile_error(fold_ctx(fold), "internal error: fold_op no operands to fold\n"); return NULL; } } + /* + * we could use a boolean and default case but ironically gcc produces + * invalid broken assembly from that operation. clang/tcc get it right, + * but interestingly ignore compiling this to a jump-table when I do that, + * this happens to be the most efficent method, since you have per-level + * granularity on the pointer check happening only for the case you check + * it in. Opposed to the default method which would involve a boolean and + * pointer check after wards. + */ + #define fold_op_case(ARGS, ARGS_OPID, OP, ARGS_FOLD) \ + case opid##ARGS ARGS_OPID: \ + if ((e = fold_op_##OP ARGS_FOLD)) { \ + ++opts_optimizationcount[OPTIM_CONST_FOLD]; \ + } \ + return e + switch(info->id) { - case opid2('-','P'): return fold_op_neg (fold, a); - case opid2('!','P'): return fold_op_not (fold, a); - case opid1('+'): return fold_op_add (fold, a, b); - case opid1('-'): return fold_op_sub (fold, a, b); - case opid1('*'): return fold_op_mul (fold, a, b); - case opid1('/'): return fold_op_div (fold, a, b); - case opid1('%'): return fold_op_mod (fold, a, b); - case opid1('|'): return fold_op_bor (fold, a, b); - case opid1('&'): return fold_op_band (fold, a, b); - case opid1('^'): return fold_op_xor (fold, a, b); - case opid2('<','<'): return fold_op_lshift (fold, a, b); - case opid2('>','>'): return fold_op_rshift (fold, a, b); - case opid2('|','|'): return fold_op_andor (fold, a, b, true); - case opid2('&','&'): return fold_op_andor (fold, a, b, false); - case opid2('?',':'): return fold_op_tern (fold, a, b, c); - case opid2('*','*'): return fold_op_exp (fold, a, b); - case opid3('<','=','>'): return fold_op_lteqgt (fold, a, b); - case opid2('!','='): return fold_op_cmp (fold, a, b, true); - case opid2('=','='): return fold_op_cmp (fold, a, b, false); - case opid2('~','P'): return fold_op_bnot (fold, a); + fold_op_case(2, ('-', 'P'), neg, (fold, a)); + fold_op_case(2, ('!', 'P'), not, (fold, a)); + fold_op_case(1, ('+'), add, (fold, a, b)); + fold_op_case(1, ('-'), sub, (fold, a, b)); + fold_op_case(1, ('*'), mul, (fold, a, b)); + fold_op_case(1, ('/'), div, (fold, a, b)); + fold_op_case(1, ('%'), mod, (fold, a, b)); + fold_op_case(1, ('|'), bor, (fold, a, b)); + fold_op_case(1, ('&'), band, (fold, a, b)); + fold_op_case(1, ('^'), xor, (fold, a, b)); + fold_op_case(2, ('<', '<'), lshift, (fold, a, b)); + fold_op_case(2, ('>', '>'), rshift, (fold, a, b)); + fold_op_case(2, ('|', '|'), andor, (fold, a, b, true)); + fold_op_case(2, ('&', '&'), andor, (fold, a, b, false)); + fold_op_case(2, ('?', ':'), tern, (fold, a, b, c)); + fold_op_case(2, ('*', '*'), exp, (fold, a, b)); + fold_op_case(3, ('<','=','>'), lteqgt, (fold, a, b)); + fold_op_case(2, ('!', '='), cmp, (fold, a, b, true)); + fold_op_case(2, ('=', '='), cmp, (fold, a, b, false)); + fold_op_case(2, ('~', 'P'), bnot, (fold, a)); + fold_op_case(2, ('>', '<'), cross, (fold, a, b)); } + #undef fold_op_case + compile_error(fold_ctx(fold), "internal error: attempted to constant-fold for unsupported operator"); return NULL; } + +/* + * Constant folding for compiler intrinsics, simaler approach to operator + * folding, primarly: individual functions for each intrinsics to fold, + * and a generic selection function. + */ +static GMQCC_INLINE ast_expression *fold_intrin_mod(fold_t *fold, ast_value *lhs, ast_value *rhs) { + return fold_constgen_float( + fold, + fmodf( + fold_immvalue_float(lhs), + fold_immvalue_float(rhs) + ) + ); +} + +static GMQCC_INLINE ast_expression *fold_intrin_pow(fold_t *fold, ast_value *lhs, ast_value *rhs) { + return fold_constgen_float( + fold, + powf( + fold_immvalue_float(lhs), + fold_immvalue_float(rhs) + ) + ); +} + +static GMQCC_INLINE ast_expression *fold_intrin_exp(fold_t *fold, ast_value *value) { + return fold_constgen_float(fold, exp(fold_immvalue_float(value))); +} + +static GMQCC_INLINE ast_expression *fold_intrin_isnan(fold_t *fold, ast_value *value) { + return fold_constgen_float(fold, isnan(fold_immvalue_float(value)) != 0.0f); +} + +static GMQCC_INLINE ast_expression *fold_intrin_fabs(fold_t *fold, ast_value *value) { + return fold_constgen_float(fold, fabs(fold_immvalue_float(value))); +} + +ast_expression *fold_intrin(fold_t *fold, const char *intrin, ast_expression **arg) { + ast_expression *ret = NULL; + + if (!strcmp(intrin, "mod")) ret = fold_intrin_mod (fold, (ast_value*)arg[0], (ast_value*)arg[1]); + if (!strcmp(intrin, "pow")) ret = fold_intrin_pow (fold, (ast_value*)arg[0], (ast_value*)arg[1]); + if (!strcmp(intrin, "exp")) ret = fold_intrin_exp (fold, (ast_value*)arg[0]); + if (!strcmp(intrin, "isnan")) ret = fold_intrin_isnan(fold, (ast_value*)arg[0]); + if (!strcmp(intrin, "fabs")) ret = fold_intrin_fabs (fold, (ast_value*)arg[0]); + + if (ret) + ++opts_optimizationcount[OPTIM_CONST_FOLD]; + + return ret; +} + +/* + * These are all the actual constant folding methods that happen in between + * the AST/IR stage of the compiler , i.e eliminating branches for const + * expressions, which is the only supported thing so far. We undefine the + * testing macros here because an ir_value is differant than an ast_value. + */ +#undef expect +#undef isfloat +#undef isstring +#undef isvector +#undef fold_immvalue_float +#undef fold_immvalue_string +#undef fold_immvalue_vector +#undef fold_can_1 +#undef fold_can_2 + +#define isfloat(X) ((X)->vtype == TYPE_FLOAT) +/*#define isstring(X) ((X)->vtype == TYPE_STRING)*/ +/*#define isvector(X) ((X)->vtype == TYPE_VECTOR)*/ +#define fold_immvalue_float(X) ((X)->constval.vfloat) +#define fold_immvalue_vector(X) ((X)->constval.vvec) +/*#define fold_immvalue_string(X) ((X)->constval.vstring)*/ +#define fold_can_1(X) ((X)->hasvalue && (X)->cvq == CV_CONST) +/*#define fold_can_2(X,Y) (fold_can_1(X) && fold_can_1(Y))*/ + +static ast_expression *fold_superfluous(ast_expression *left, ast_expression *right, int op) { + ast_expression *swapped = NULL; /* using this as bool */ + ast_value *load; + + if (!ast_istype(right, ast_value) || !fold_can_1((load = (ast_value*)right))) { + swapped = left; + left = right; + right = swapped; + } + + if (!ast_istype(right, ast_value) || !fold_can_1((load = (ast_value*)right))) + return NULL; + + switch (op) { + case INSTR_DIV_F: + if (swapped) + return NULL; + case INSTR_MUL_F: + if (fold_immvalue_float(load) == 1.0f) { + ++opts_optimizationcount[OPTIM_PEEPHOLE]; + ast_unref(right); + return left; + } + break; + + + case INSTR_ADD_F: + case INSTR_SUB_F: + if (fold_immvalue_float(load) == 0.0f) { + ++opts_optimizationcount[OPTIM_PEEPHOLE]; + ast_unref(right); + return left; + } + break; + + case INSTR_MUL_V: + if (vec3_cmp(fold_immvalue_vector(load), vec3_create(1, 1, 1))) { + ++opts_optimizationcount[OPTIM_PEEPHOLE]; + ast_unref(right); + return left; + } + break; + + case INSTR_ADD_V: + case INSTR_SUB_V: + if (vec3_cmp(fold_immvalue_vector(load), vec3_create(0, 0, 0))) { + ++opts_optimizationcount[OPTIM_PEEPHOLE]; + ast_unref(right); + return left; + } + break; + } + + return NULL; +} + +ast_expression *fold_binary(lex_ctx_t ctx, int op, ast_expression *left, ast_expression *right) { + ast_expression *ret = fold_superfluous(left, right, op); + if (ret) + return ret; + return (ast_expression*)ast_binary_new(ctx, op, left, right); +} + +static GMQCC_INLINE int fold_cond(ir_value *condval, ast_function *func, ast_ifthen *branch) { + if (isfloat(condval) && fold_can_1(condval) && OPTS_OPTIMIZATION(OPTIM_CONST_FOLD_DCE)) { + ast_expression_codegen *cgen; + ir_block *elide; + ir_value *dummy; + bool istrue = (fold_immvalue_float(condval) != 0.0f && branch->on_true); + bool isfalse = (fold_immvalue_float(condval) == 0.0f && branch->on_false); + ast_expression *path = (istrue) ? branch->on_true : + (isfalse) ? branch->on_false : NULL; + if (!path) { + /* + * no path to take implies that the evaluation is if(0) and there + * is no else block. so eliminate all the code. + */ + ++opts_optimizationcount[OPTIM_CONST_FOLD_DCE]; + return true; + } + + if (!(elide = ir_function_create_block(ast_ctx(branch), func->ir_func, ast_function_label(func, ((istrue) ? "ontrue" : "onfalse"))))) + return false; + if (!(*(cgen = path->codegen))((ast_expression*)path, func, false, &dummy)) + return false; + if (!ir_block_create_jump(func->curblock, ast_ctx(branch), elide)) + return false; + /* + * now the branch has been eliminated and the correct block for the constant evaluation + * is expanded into the current block for the function. + */ + func->curblock = elide; + ++opts_optimizationcount[OPTIM_CONST_FOLD_DCE]; + return true; + } + return -1; /* nothing done */ +} + +int fold_cond_ternary(ir_value *condval, ast_function *func, ast_ternary *branch) { + return fold_cond(condval, func, (ast_ifthen*)branch); +} + +int fold_cond_ifthen(ir_value *condval, ast_function *func, ast_ifthen *branch) { + return fold_cond(condval, func, branch); +}