@@ -42,6 +42,7 @@ DEF(mov, 1, 1, 0, TCG_OPF_INT | TCG_OPF_NOT_PRESENT)
DEF(add, 1, 2, 0, TCG_OPF_INT)
DEF(and, 1, 2, 0, TCG_OPF_INT)
DEF(andc, 1, 2, 0, TCG_OPF_INT)
+DEF(divs, 1, 2, 0, TCG_OPF_INT)
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
DEF(mul, 1, 2, 0, TCG_OPF_INT)
DEF(mulsh, 1, 2, 0, TCG_OPF_INT)
@@ -68,7 +69,6 @@ DEF(st8_i32, 0, 2, 1, 0)
DEF(st16_i32, 0, 2, 1, 0)
DEF(st_i32, 0, 2, 1, 0)
/* arith */
-DEF(div_i32, 1, 2, 0, 0)
DEF(divu_i32, 1, 2, 0, 0)
DEF(rem_i32, 1, 2, 0, 0)
DEF(remu_i32, 1, 2, 0, 0)
@@ -116,7 +116,6 @@ DEF(st16_i64, 0, 2, 1, 0)
DEF(st32_i64, 0, 2, 1, 0)
DEF(st_i64, 0, 2, 1, 0)
/* arith */
-DEF(div_i64, 1, 2, 0, 0)
DEF(divu_i64, 1, 2, 0, 0)
DEF(rem_i64, 1, 2, 0, 0)
DEF(remu_i64, 1, 2, 0, 0)
@@ -544,13 +544,15 @@ static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type,
muls64(&l64, &h64, x, y);
return h64;
- case INDEX_op_div_i32:
+ case INDEX_op_divs:
/* Avoid crashing on divide by zero, otherwise undefined. */
- return (int32_t)x / ((int32_t)y ? : 1);
+ if (type == TCG_TYPE_I32) {
+ return (int32_t)x / ((int32_t)y ? : 1);
+ }
+ return (int64_t)x / ((int64_t)y ? : 1);
+
case INDEX_op_divu_i32:
return (uint32_t)x / ((uint32_t)y ? : 1);
- case INDEX_op_div_i64:
- return (int64_t)x / ((int64_t)y ? : 1);
case INDEX_op_divu_i64:
return (uint64_t)x / ((uint64_t)y ? : 1);
@@ -2893,7 +2895,7 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(deposit):
done = fold_deposit(&ctx, op);
break;
- CASE_OP_32_64(div):
+ case INDEX_op_divs:
CASE_OP_32_64(divu):
done = fold_divide(&ctx, op);
break;
@@ -601,8 +601,8 @@ void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- if (tcg_op_supported(INDEX_op_div_i32, TCG_TYPE_I32, 0)) {
- tcg_gen_op3_i32(INDEX_op_div_i32, ret, arg1, arg2);
+ if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I32, 0)) {
+ tcg_gen_op3_i32(INDEX_op_divs, ret, arg1, arg2);
} else if (TCG_TARGET_HAS_div2_i32) {
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
tcg_gen_sari_i32(t0, arg1, 31);
@@ -617,9 +617,9 @@ void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
if (TCG_TARGET_HAS_rem_i32) {
tcg_gen_op3_i32(INDEX_op_rem_i32, ret, arg1, arg2);
- } else if (tcg_op_supported(INDEX_op_div_i32, TCG_TYPE_I32, 0)) {
+ } else if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I32, 0)) {
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
- tcg_gen_op3_i32(INDEX_op_div_i32, t0, arg1, arg2);
+ tcg_gen_op3_i32(INDEX_op_divs, t0, arg1, arg2);
tcg_gen_mul_i32(t0, t0, arg2);
tcg_gen_sub_i32(ret, arg1, t0);
tcg_temp_free_i32(t0);
@@ -1969,8 +1969,8 @@ void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
- if (tcg_op_supported(INDEX_op_div_i64, TCG_TYPE_I64, 0)) {
- tcg_gen_op3_i64(INDEX_op_div_i64, ret, arg1, arg2);
+ if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I64, 0)) {
+ tcg_gen_op3_i64(INDEX_op_divs, ret, arg1, arg2);
} else if (TCG_TARGET_HAS_div2_i64) {
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
tcg_gen_sari_i64(t0, arg1, 63);
@@ -1985,9 +1985,9 @@ void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
if (TCG_TARGET_HAS_rem_i64) {
tcg_gen_op3_i64(INDEX_op_rem_i64, ret, arg1, arg2);
- } else if (tcg_op_supported(INDEX_op_div_i64, TCG_TYPE_I64, 0)) {
+ } else if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I64, 0)) {
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
- tcg_gen_op3_i64(INDEX_op_div_i64, t0, arg1, arg2);
+ tcg_gen_op3_i64(INDEX_op_divs, t0, arg1, arg2);
tcg_gen_mul_i64(t0, t0, arg2);
tcg_gen_sub_i64(ret, arg1, t0);
tcg_temp_free_i64(t0);
@@ -1020,8 +1020,7 @@ static const TCGOutOp * const all_outop[NB_OPS] = {
OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add),
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
- OUTOP(INDEX_op_div_i32, TCGOutOpBinary, outop_divs),
- OUTOP(INDEX_op_div_i64, TCGOutOpBinary, outop_divs),
+ OUTOP(INDEX_op_divs, TCGOutOpBinary, outop_divs),
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
OUTOP(INDEX_op_mulsh, TCGOutOpBinary, outop_mulsh),
@@ -5411,8 +5410,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
case INDEX_op_add:
case INDEX_op_and:
case INDEX_op_andc:
- case INDEX_op_div_i32:
- case INDEX_op_div_i64:
+ case INDEX_op_divs:
case INDEX_op_eqv:
case INDEX_op_mul:
case INDEX_op_mulsh:
@@ -720,7 +720,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
/* Arithmetic operations (64 bit). */
- case INDEX_op_div_i64:
+ case INDEX_op_divs:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2];
break;
@@ -1071,6 +1071,7 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
case INDEX_op_add:
case INDEX_op_and:
case INDEX_op_andc:
+ case INDEX_op_divs:
case INDEX_op_eqv:
case INDEX_op_mul:
case INDEX_op_nand:
@@ -1079,8 +1080,6 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
case INDEX_op_orc:
case INDEX_op_sub:
case INDEX_op_xor:
- case INDEX_op_div_i32:
- case INDEX_op_div_i64:
case INDEX_op_rem_i32:
case INDEX_op_rem_i64:
case INDEX_op_divu_i32:
@@ -277,7 +277,7 @@ Arithmetic
- | *t0* = *t1* * *t2*
- * - div_i32/i64 *t0*, *t1*, *t2*
+ * - divs *t0*, *t1*, *t2*
- | *t0* = *t1* / *t2* (signed)
| Undefined behavior if division by zero or overflow.
@@ -651,7 +651,7 @@ static void tgen_divs(TCGContext *s, TCGType type,
{
TCGOpcode opc = (type == TCG_TYPE_I32
? INDEX_op_tci_divs32
- : INDEX_op_div_i64);
+ : INDEX_op_divs);
tcg_out_op_rrr(s, opc, a0, a1, a2);
}
Rename to INDEX_op_divs to emphasize signed inputs, and mirroring INDEX_op_divu_*. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- include/tcg/tcg-opc.h | 3 +-- tcg/optimize.c | 12 +++++++----- tcg/tcg-op.c | 16 ++++++++-------- tcg/tcg.c | 6 ++---- tcg/tci.c | 5 ++--- docs/devel/tcg-ops.rst | 2 +- tcg/tci/tcg-target.c.inc | 2 +- 7 files changed, 22 insertions(+), 24 deletions(-)