@@ -964,24 +964,25 @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
}
}
+static void finish_bb(OptContext *ctx)
+{
+ /* We only optimize memory barriers across basic blocks. */
+ ctx->prev_mb = NULL;
+}
+
+static void finish_ebb(OptContext *ctx)
+{
+ finish_bb(ctx);
+ /* We only optimize across extended basic blocks. */
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
+ remove_mem_copy_all(ctx);
+}
+
static void finish_folding(OptContext *ctx, TCGOp *op)
{
const TCGOpDef *def = &tcg_op_defs[op->opc];
int i, nb_oargs;
- /*
- * We only optimize extended basic blocks. If the opcode ends a BB
- * and is not a conditional branch, reset all temp data.
- */
- if (def->flags & TCG_OPF_BB_END) {
- ctx->prev_mb = NULL;
- if (!(def->flags & TCG_OPF_COND_BRANCH)) {
- memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
- remove_mem_copy_all(ctx);
- }
- return;
- }
-
nb_oargs = def->nb_oargs;
for (i = 0; i < nb_oargs; i++) {
TCGTemp *ts = arg_temp(op->args[i]);
@@ -1351,8 +1352,11 @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
if (i > 0) {
op->opc = INDEX_op_br;
op->args[0] = op->args[3];
+ finish_ebb(ctx);
+ } else {
+ finish_bb(ctx);
}
- return false;
+ return true;
}
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
@@ -1443,9 +1447,12 @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
}
op->opc = INDEX_op_br;
op->args[0] = label;
- break;
+ finish_ebb(ctx);
+ return true;
}
- return false;
+
+ finish_bb(ctx);
+ return true;
}
static bool fold_bswap(OptContext *ctx, TCGOp *op)
@@ -3037,6 +3044,14 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64_VEC(xor):
done = fold_xor(&ctx, op);
break;
+ case INDEX_op_set_label:
+ case INDEX_op_br:
+ case INDEX_op_exit_tb:
+ case INDEX_op_goto_tb:
+ case INDEX_op_goto_ptr:
+ finish_ebb(&ctx);
+ done = true;
+ break;
default:
break;
}