return (a.x && a.y && a.z);
}
-static GMQCC_INLINE bool fold_can_1(const ast_value *val) {
- return (ast_istype(((ast_expression*)(val)), ast_value) && val->hasvalue && (val->cvq == CV_CONST) &&
- ((ast_expression*)(val))->vtype != TYPE_FUNCTION);
-}
-
-static GMQCC_INLINE bool fold_can_2(const ast_value *v1, const ast_value *v2) {
- return fold_can_1(v1) && fold_can_1(v2);
-}
-
static lex_ctx_t fold_ctx(fold_t *fold) {
lex_ctx_t ctx;
if (fold->parser->lex)
return !!v->constval.vfunc;
}
+/* Handy macros to determine if an ast_value can be constant folded. */
+#define fold_can_1(X) \
+ (ast_istype(((ast_expression*)(X)), ast_value) && (X)->hasvalue && ((X)->cvq == CV_CONST) && \
+ ((ast_expression*)(X))->vtype != TYPE_FUNCTION)
+
+#define fold_can_2(X, Y) (fold_can_1(X) && fold_can_1(Y))
+
#define fold_immvalue_float(E) ((E)->constval.vfloat)
#define fold_immvalue_vector(E) ((E)->constval.vvec)
#define fold_immvalue_string(E) ((E)->constval.vstring)
* for creating the elided ast binary expression.
*
* Consider 'n 0 0' where y, and z need to be tested for 0, and x is
- * used as the value in a binary operation generating an INSTR_MUL instruction
+ * used as the value in a binary operation generating an INSTR_MUL instruction,
* to acomplish the indexing of the correct component value we use set[0], set[1], set[2]
* as x, y, z, where the values of those operations return 'x', 'y', 'z'. Because
* of how ASCII works we can easily deliniate:
static GMQCC_INLINE ast_expression *fold_op_mul(fold_t *fold, ast_value *a, ast_value *b) {
if (isfloat(a)) {
- if (isfloat(b)) {
+ if (isvector(b)) {
if (fold_can_2(a, b))
return fold_constgen_vector(fold, vec3_mulvf(fold_immvalue_vector(b), fold_immvalue_float(a)));
} else {
return NULL;
}
-static GMQCC_INLINE ast_expression *fold_op_andor(fold_t *fold, ast_value *a, ast_value *b, float or) {
+static GMQCC_INLINE ast_expression *fold_op_andor(fold_t *fold, ast_value *a, ast_value *b, float expr) {
if (fold_can_2(a, b)) {
if (OPTS_FLAG(PERL_LOGIC)) {
if (fold_immediate_true(fold, a))
} else {
return fold_constgen_float (
fold,
- ((or) ? (fold_immediate_true(fold, a) || fold_immediate_true(fold, b))
- : (fold_immediate_true(fold, a) && fold_immediate_true(fold, b)))
+ ((expr) ? (fold_immediate_true(fold, a) || fold_immediate_true(fold, b))
+ : (fold_immediate_true(fold, a) && fold_immediate_true(fold, b)))
? 1
: 0
);
}
ast_expression *fold_op(fold_t *fold, const oper_info *info, ast_expression **opexprs) {
- ast_value *a = (ast_value*)opexprs[0];
- ast_value *b = (ast_value*)opexprs[1];
- ast_value *c = (ast_value*)opexprs[2];
+ ast_value *a = (ast_value*)opexprs[0];
+ ast_value *b = (ast_value*)opexprs[1];
+ ast_value *c = (ast_value*)opexprs[2];
+ ast_expression *e = NULL;
/* can a fold operation be applied to this operator usage? */
if (!info->folds)
case 2: if(!b) return NULL;
case 1:
if(!a) {
- compile_error(fold_ctx(fold), "interal error: fold_op no operands to fold\n");
+ compile_error(fold_ctx(fold), "internal error: fold_op no operands to fold\n");
return NULL;
}
}
+ /*
+ * we could use a boolean and default case but ironically gcc produces
+ * invalid broken assembly from that operation. clang/tcc get it right,
+ * but interestingly ignore compiling this to a jump-table when I do that,
+ * this happens to be the most efficent method, since you have per-level
+ * granularity on the pointer check happening only for the case you check
+ * it in. Opposed to the default method which would involve a boolean and
+ * pointer check after wards.
+ */
+ #define fold_op_case(ARGS, ARGS_OPID, OP, ARGS_FOLD) \
+ case opid##ARGS ARGS_OPID: \
+ if ((e = fold_op_##OP ARGS_FOLD)) { \
+ ++opts_optimizationcount[OPTIM_CONST_FOLD]; \
+ } \
+ return e
+
switch(info->id) {
- case opid2('-','P'): return fold_op_neg (fold, a);
- case opid2('!','P'): return fold_op_not (fold, a);
- case opid1('+'): return fold_op_add (fold, a, b);
- case opid1('-'): return fold_op_sub (fold, a, b);
- case opid1('*'): return fold_op_mul (fold, a, b);
- case opid1('/'): return fold_op_div (fold, a, b);
- case opid1('%'): return fold_op_mod (fold, a, b);
- case opid1('|'): return fold_op_bor (fold, a, b);
- case opid1('&'): return fold_op_band (fold, a, b);
- case opid1('^'): return fold_op_xor (fold, a, b);
- case opid2('<','<'): return fold_op_lshift (fold, a, b);
- case opid2('>','>'): return fold_op_rshift (fold, a, b);
- case opid2('|','|'): return fold_op_andor (fold, a, b, true);
- case opid2('&','&'): return fold_op_andor (fold, a, b, false);
- case opid2('?',':'): return fold_op_tern (fold, a, b, c);
- case opid2('*','*'): return fold_op_exp (fold, a, b);
- case opid3('<','=','>'): return fold_op_lteqgt (fold, a, b);
- case opid2('!','='): return fold_op_cmp (fold, a, b, true);
- case opid2('=','='): return fold_op_cmp (fold, a, b, false);
- case opid2('~','P'): return fold_op_bnot (fold, a);
+ fold_op_case(2, ('-', 'P'), neg, (fold, a));
+ fold_op_case(2, ('!', 'P'), not, (fold, a));
+ fold_op_case(1, ('+'), add, (fold, a, b));
+ fold_op_case(1, ('-'), sub, (fold, a, b));
+ fold_op_case(1, ('*'), mul, (fold, a, b));
+ fold_op_case(1, ('/'), div, (fold, a, b));
+ fold_op_case(1, ('%'), mod, (fold, a, b));
+ fold_op_case(1, ('|'), bor, (fold, a, b));
+ fold_op_case(1, ('&'), band, (fold, a, b));
+ fold_op_case(1, ('^'), xor, (fold, a, b));
+ fold_op_case(2, ('<', '<'), lshift, (fold, a, b));
+ fold_op_case(2, ('>', '>'), rshift, (fold, a, b));
+ fold_op_case(2, ('|', '|'), andor, (fold, a, b, true));
+ fold_op_case(2, ('&', '&'), andor, (fold, a, b, false));
+ fold_op_case(2, ('?', ':'), tern, (fold, a, b, c));
+ fold_op_case(2, ('*', '*'), exp, (fold, a, b));
+ fold_op_case(3, ('<','=','>'), lteqgt, (fold, a, b));
+ fold_op_case(2, ('!', '='), cmp, (fold, a, b, true));
+ fold_op_case(2, ('=', '='), cmp, (fold, a, b, false));
+ fold_op_case(2, ('~', 'P'), bnot, (fold, a));
}
+ #undef fold_op_case
+ compile_error(fold_ctx(fold), "internal error: attempted to constant-fold for unsupported operator");
return NULL;
}
+
+/*
+ * These are all the actual constant folding methods that happen in between
+ * the AST/IR stage of the compiler , i.e eliminating branches for const
+ * expressions, which is the only supported thing so far. We undefine the
+ * testing macros here because an ir_value is differant than an ast_value.
+ */
+#undef isfloat
+#undef isstring
+#undef isvector
+#undef fold_immvalue_float
+#undef fold_immvalue_string
+#undef fold_immvalue_vector
+#undef fold_can_1
+#undef fold_can_2
+
+#define isfloat(X) ((X)->vtype == TYPE_FLOAT)
+/*#define isstring(X) ((X)->vtype == TYPE_STRING)*/
+/*#define isvector(X) ((X)->vtype == TYPE_VECTOR)*/
+#define fold_immvalue_float(X) ((X)->constval.vfloat)
+/*#define fold_immvalue_vector(X) ((X)->constval.vvec)*/
+/*#define fold_immvalue_string(X) ((X)->constval.vstring)*/
+#define fold_can_1(X) ((X)->hasvalue && (X)->cvq == CV_CONST)
+/*#define fold_can_2(X,Y) (fold_can_1(X) && fold_can_1(Y))*/
+
+
+int fold_cond(ir_value *condval, ast_function *func, ast_ifthen *branch) {
+ if (isfloat(condval) && fold_can_1(condval) && OPTS_OPTIMIZATION(OPTIM_CONST_FOLD_DCE)) {
+ ast_expression_codegen *cgen;
+ ir_block *elide;
+ ir_value *dummy;
+ bool istrue = (fold_immvalue_float(condval) == 1.0f && branch->on_true);
+ bool isfalse = (fold_immvalue_float(condval) == 0.0f && branch->on_false);
+ ast_expression *path = (istrue) ? branch->on_true :
+ (isfalse) ? branch->on_false : NULL;
+ if (!path)
+ return false;
+ if (!(elide = ir_function_create_block(ast_ctx(branch), func->ir_func, ast_function_label(func, ((istrue) ? "ontrue" : "onfalse")))))
+ return false;
+ if (!(*(cgen = path->codegen))((ast_expression*)path, func, false, &dummy))
+ return false;
+ if (!ir_block_create_jump(func->curblock, ast_ctx(branch), elide))
+ return false;
+ /*
+ * now the branch has been eliminated and the correct block for the constant evaluation
+ * is expanded into the current block for the function.
+ */
+ func->curblock = elide;
+ ++opts_optimizationcount[OPTIM_CONST_FOLD_DCE];
+ return true;
+ }
+ return -1; /* nothing done */
+}