@@ -54,6 +54,7 @@ DEF(divs2, 2, 3, 0, TCG_OPF_INT)
DEF(divu, 1, 2, 0, TCG_OPF_INT)
DEF(divu2, 2, 3, 0, TCG_OPF_INT)
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
+DEF(extract, 1, 1, 2, TCG_OPF_INT)
DEF(movcond, 1, 4, 1, TCG_OPF_INT)
DEF(mul, 1, 2, 0, TCG_OPF_INT)
DEF(muls2, 2, 2, 0, TCG_OPF_INT)
@@ -89,7 +90,6 @@ DEF(st16_i32, 0, 2, 1, 0)
DEF(st_i32, 0, 2, 1, 0)
/* shifts/rotates */
DEF(deposit_i32, 1, 2, 2, 0)
-DEF(extract_i32, 1, 1, 2, 0)
DEF(sextract_i32, 1, 1, 2, 0)
DEF(extract2_i32, 1, 2, 1, 0)
@@ -112,7 +112,6 @@ DEF(st32_i64, 0, 2, 1, 0)
DEF(st_i64, 0, 2, 1, 0)
/* shifts/rotates */
DEF(deposit_i64, 1, 2, 2, 0)
-DEF(extract_i64, 1, 1, 2, 0)
DEF(sextract_i64, 1, 1, 2, 0)
DEF(extract2_i64, 1, 2, 1, 0)
@@ -2305,7 +2305,7 @@ static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
{
- TCGOpcode uext_opc = 0, sext_opc = 0;
+ TCGOpcode sext_opc = 0;
TCGCond cond = op->args[3];
TCGArg ret, src1, src2;
TCGOp *op2;
@@ -2326,17 +2326,11 @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
switch (ctx->type) {
case TCG_TYPE_I32:
- if (TCG_TARGET_extract_valid(TCG_TYPE_I32, sh, 1)) {
- uext_opc = INDEX_op_extract_i32;
- }
if (TCG_TARGET_sextract_valid(TCG_TYPE_I32, sh, 1)) {
sext_opc = INDEX_op_sextract_i32;
}
break;
case TCG_TYPE_I64:
- if (TCG_TARGET_extract_valid(TCG_TYPE_I64, sh, 1)) {
- uext_opc = INDEX_op_extract_i64;
- }
if (TCG_TARGET_sextract_valid(TCG_TYPE_I64, sh, 1)) {
sext_opc = INDEX_op_sextract_i64;
}
@@ -2355,8 +2349,8 @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
op->args[2] = sh;
op->args[3] = 1;
return;
- } else if (sh && uext_opc) {
- op->opc = uext_opc;
+ } else if (sh && TCG_TARGET_extract_valid(ctx->type, sh, 1)) {
+ op->opc = INDEX_op_extract;
op->args[1] = src1;
op->args[2] = sh;
op->args[3] = 1;
@@ -2885,7 +2879,7 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_eqv_vec:
done = fold_eqv(&ctx, op);
break;
- CASE_OP_32_64(extract):
+ case INDEX_op_extract:
done = fold_extract(&ctx, op);
break;
CASE_OP_32_64(extract2):
@@ -998,7 +998,7 @@ void tcg_gen_extract_i32(TCGv_i32 ret, TCGv_i32 arg,
}
if (TCG_TARGET_extract_valid(TCG_TYPE_I32, ofs, len)) {
- tcg_gen_op4ii_i32(INDEX_op_extract_i32, ret, arg, ofs, len);
+ tcg_gen_op4ii_i32(INDEX_op_extract, ret, arg, ofs, len);
return;
}
if (ofs == 0) {
@@ -1008,7 +1008,7 @@ void tcg_gen_extract_i32(TCGv_i32 ret, TCGv_i32 arg,
/* Assume that zero-extension, if available, is cheaper than a shift. */
if (TCG_TARGET_extract_valid(TCG_TYPE_I32, 0, ofs + len)) {
- tcg_gen_op4ii_i32(INDEX_op_extract_i32, ret, arg, 0, ofs + len);
+ tcg_gen_op4ii_i32(INDEX_op_extract, ret, arg, 0, ofs + len);
tcg_gen_shri_i32(ret, ret, ofs);
return;
}
@@ -2670,7 +2670,7 @@ void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg,
}
if (TCG_TARGET_extract_valid(TCG_TYPE_I64, ofs, len)) {
- tcg_gen_op4ii_i64(INDEX_op_extract_i64, ret, arg, ofs, len);
+ tcg_gen_op4ii_i64(INDEX_op_extract, ret, arg, ofs, len);
return;
}
if (ofs == 0) {
@@ -2680,7 +2680,7 @@ void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg,
/* Assume that zero-extension, if available, is cheaper than a shift. */
if (TCG_TARGET_extract_valid(TCG_TYPE_I64, 0, ofs + len)) {
- tcg_gen_op4ii_i64(INDEX_op_extract_i64, ret, arg, 0, ofs + len);
+ tcg_gen_op4ii_i64(INDEX_op_extract, ret, arg, 0, ofs + len);
tcg_gen_shri_i64(ret, ret, ofs);
return;
}
@@ -1091,8 +1091,7 @@ static const TCGOutOp * const all_outop[NB_OPS] = {
OUTOP(INDEX_op_divs2, TCGOutOpDivRem, outop_divs2),
OUTOP(INDEX_op_divu2, TCGOutOpDivRem, outop_divu2),
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
- OUTOP(INDEX_op_extract_i32, TCGOutOpExtract, outop_extract),
- OUTOP(INDEX_op_extract_i64, TCGOutOpExtract, outop_extract),
+ OUTOP(INDEX_op_extract, TCGOutOpExtract, outop_extract),
OUTOP(INDEX_op_movcond, TCGOutOpMovcond, outop_movcond),
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
OUTOP(INDEX_op_muls2, TCGOutOpMul2, outop_muls2),
@@ -2326,6 +2325,7 @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_add:
case INDEX_op_and:
case INDEX_op_brcond:
+ case INDEX_op_extract:
case INDEX_op_mov:
case INDEX_op_movcond:
case INDEX_op_negsetcond:
@@ -2342,7 +2342,6 @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_st8_i32:
case INDEX_op_st16_i32:
case INDEX_op_st_i32:
- case INDEX_op_extract_i32:
case INDEX_op_sextract_i32:
case INDEX_op_deposit_i32:
return true;
@@ -2371,7 +2370,6 @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_st_i64:
case INDEX_op_ext_i32_i64:
case INDEX_op_extu_i32_i64:
- case INDEX_op_extract_i64:
case INDEX_op_sextract_i64:
case INDEX_op_deposit_i64:
return TCG_TARGET_REG_BITS == 64;
@@ -5507,8 +5505,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
}
break;
- case INDEX_op_extract_i32:
- case INDEX_op_extract_i64:
+ case INDEX_op_extract:
{
const TCGOutOpExtract *out =
container_of(all_outop[op->opc], TCGOutOpExtract, base);
@@ -27,6 +27,7 @@
#define ctpop_tr glue(ctpop, TCG_TARGET_REG_BITS)
+#define extract_tr glue(extract, TCG_TARGET_REG_BITS)
/*
* Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
@@ -656,9 +657,9 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
regs[r0] = deposit32(regs[r1], pos, len, regs[r2]);
break;
- case INDEX_op_extract_i32:
+ case INDEX_op_extract:
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
- regs[r0] = extract32(regs[r1], pos, len);
+ regs[r0] = extract_tr(regs[r1], pos, len);
break;
case INDEX_op_sextract_i32:
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
@@ -772,10 +773,6 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
regs[r0] = deposit64(regs[r1], pos, len, regs[r2]);
break;
- case INDEX_op_extract_i64:
- tci_args_rrbb(insn, &r0, &r1, &pos, &len);
- regs[r0] = extract64(regs[r1], pos, len);
- break;
case INDEX_op_sextract_i64:
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
regs[r0] = sextract64(regs[r1], pos, len);
@@ -1057,8 +1054,7 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
op_name, str_r(r0), str_r(r1), str_r(r2), pos, len);
break;
- case INDEX_op_extract_i32:
- case INDEX_op_extract_i64:
+ case INDEX_op_extract:
case INDEX_op_sextract_i32:
case INDEX_op_sextract_i64:
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
@@ -456,7 +456,7 @@ Misc
|
| *dest* = (*t1* & ~0x0f00) | ((*t2* << 8) & 0x0f00)
- * - extract_i32/i64 *dest*, *t1*, *pos*, *len*
+ * - extract *dest*, *t1*, *pos*, *len*
sextract_i32/i64 *dest*, *t1*, *pos*, *len*
@@ -467,12 +467,12 @@ Misc
to the left with zeros; for sextract_*, the result will be extended
to the left with copies of the bitfield sign bit at *pos* + *len* - 1.
|
- | For example, "sextract_i32 dest, t1, 8, 4" indicates a 4-bit field
+ | For example, "sextract dest, t1, 8, 4" indicates a 4-bit field
at bit 8. This operation would be equivalent to
|
| *dest* = (*t1* << 20) >> 28
|
- | (using an arithmetic right shift).
+ | (using an arithmetic right shift) on TCG_TYPE_I32.
* - extract2_i32/i64 *dest*, *t1*, *t2*, *pos*
@@ -436,10 +436,7 @@ static void tcg_out_movi(TCGContext *s, TCGType type,
static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd,
TCGReg rs, unsigned pos, unsigned len)
{
- TCGOpcode opc = type == TCG_TYPE_I32 ?
- INDEX_op_extract_i32 :
- INDEX_op_extract_i64;
- tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
+ tcg_out_op_rrbb(s, INDEX_op_extract, rd, rs, pos, len);
}
static const TCGOutOpExtract outop_extract = {
Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- include/tcg/tcg-opc.h | 3 +-- tcg/optimize.c | 14 ++++---------- tcg/tcg-op.c | 8 ++++---- tcg/tcg.c | 9 +++------ tcg/tci.c | 12 ++++-------- docs/devel/tcg-ops.rst | 6 +++--- tcg/tci/tcg-target.c.inc | 5 +---- 7 files changed, 20 insertions(+), 37 deletions(-)