@@ -43,6 +43,7 @@ DEF(add, 1, 2, 0, TCG_OPF_INT)
DEF(and, 1, 2, 0, TCG_OPF_INT)
DEF(andc, 1, 2, 0, TCG_OPF_INT)
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
+DEF(nand, 1, 2, 0, TCG_OPF_INT)
DEF(or, 1, 2, 0, TCG_OPF_INT)
DEF(orc, 1, 2, 0, TCG_OPF_INT)
DEF(xor, 1, 2, 0, TCG_OPF_INT)
@@ -94,7 +95,6 @@ DEF(bswap16_i32, 1, 1, 1, 0)
DEF(bswap32_i32, 1, 1, 1, 0)
DEF(not_i32, 1, 1, 0, 0)
DEF(neg_i32, 1, 1, 0, 0)
-DEF(nand_i32, 1, 2, 0, 0)
DEF(nor_i32, 1, 2, 0, 0)
DEF(clz_i32, 1, 2, 0, 0)
DEF(ctz_i32, 1, 2, 0, 0)
@@ -147,7 +147,6 @@ DEF(bswap32_i64, 1, 1, 1, 0)
DEF(bswap64_i64, 1, 1, 1, 0)
DEF(not_i64, 1, 1, 0, 0)
DEF(neg_i64, 1, 1, 0, 0)
-DEF(nand_i64, 1, 2, 0, 0)
DEF(nor_i64, 1, 2, 0, 0)
DEF(clz_i64, 1, 2, 0, 0)
DEF(ctz_i64, 1, 2, 0, 0)
@@ -481,7 +481,8 @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
case INDEX_op_eqv_vec:
return ~(x ^ y);
- CASE_OP_32_64_VEC(nand):
+ case INDEX_op_nand:
+ case INDEX_op_nand_vec:
return ~(x & y);
CASE_OP_32_64_VEC(nor):
@@ -2980,7 +2981,8 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(mulu2):
done = fold_multiply2(&ctx, op);
break;
- CASE_OP_32_64_VEC(nand):
+ case INDEX_op_nand:
+ case INDEX_op_nand_vec:
done = fold_nand(&ctx, op);
break;
CASE_OP_32_64(neg):
@@ -690,8 +690,8 @@ void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- if (tcg_op_supported(INDEX_op_nand_i32, TCG_TYPE_I32, 0)) {
- tcg_gen_op3_i32(INDEX_op_nand_i32, ret, arg1, arg2);
+ if (tcg_op_supported(INDEX_op_nand, TCG_TYPE_I32, 0)) {
+ tcg_gen_op3_i32(INDEX_op_nand, ret, arg1, arg2);
} else {
tcg_gen_and_i32(ret, arg1, arg2);
tcg_gen_not_i32(ret, ret);
@@ -2292,8 +2292,8 @@ void tcg_gen_nand_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_nand_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
tcg_gen_nand_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
- } else if (tcg_op_supported(INDEX_op_nand_i64, TCG_TYPE_I64, 0)) {
- tcg_gen_op3_i64(INDEX_op_nand_i64, ret, arg1, arg2);
+ } else if (tcg_op_supported(INDEX_op_nand, TCG_TYPE_I64, 0)) {
+ tcg_gen_op3_i64(INDEX_op_nand, ret, arg1, arg2);
} else {
tcg_gen_and_i64(ret, arg1, arg2);
tcg_gen_not_i64(ret, ret);
@@ -1008,8 +1008,7 @@ static const TCGOutOp * const all_outop[NB_OPS] = {
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
- OUTOP(INDEX_op_nand_i32, TCGOutOpBinary, outop_nand),
- OUTOP(INDEX_op_nand_i64, TCGOutOpBinary, outop_nand),
+ OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand),
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
@@ -5428,8 +5427,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
case INDEX_op_and:
case INDEX_op_andc:
case INDEX_op_eqv:
- case INDEX_op_nand_i32:
- case INDEX_op_nand_i64:
+ case INDEX_op_nand:
case INDEX_op_or:
case INDEX_op_orc:
case INDEX_op_xor:
@@ -559,7 +559,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = ~(regs[r1] ^ regs[r2]);
break;
- CASE_32_64(nand)
+ case INDEX_op_nand:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = ~(regs[r1] & regs[r2]);
break;
@@ -1078,6 +1078,7 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
case INDEX_op_and:
case INDEX_op_andc:
case INDEX_op_eqv:
+ case INDEX_op_nand:
case INDEX_op_or:
case INDEX_op_orc:
case INDEX_op_xor:
@@ -1085,8 +1086,6 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
case INDEX_op_sub_i64:
case INDEX_op_mul_i32:
case INDEX_op_mul_i64:
- case INDEX_op_nand_i32:
- case INDEX_op_nand_i64:
case INDEX_op_nor_i32:
case INDEX_op_nor_i64:
case INDEX_op_div_i32:
@@ -327,7 +327,7 @@ Logical
- | *t0* = ~(*t1* ^ *t2*), or equivalently, *t0* = *t1* ^ ~\ *t2*
- * - nand_i32/i64 *t0*, *t1*, *t2*
+ * - nand *t0*, *t1*, *t2*
- | *t0* = ~(*t1* & *t2*)
@@ -672,7 +672,7 @@ static const TCGOutOpBinary outop_eqv = {
static void tgen_nand(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2)
{
- tcg_out_op_rrr(s, glue(INDEX_op_nand_i,TCG_TARGET_REG_BITS), a0, a1, a2);
+ tcg_out_op_rrr(s, INDEX_op_nand, a0, a1, a2);
}
static const TCGOutOpBinary outop_nand = {