@@ -560,6 +560,11 @@ struct TCGContext {
int nb_ops;
TCGType addr_type; /* TCG_TYPE_I32 or TCG_TYPE_I64 */
+#ifdef CONFIG_SOFTMMU
+ int page_mask;
+ uint8_t page_bits;
+#endif
+
TCGRegSet reserved_regs;
intptr_t current_frame_offset;
intptr_t frame_start;
@@ -357,6 +357,10 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tb_set_page_addr1(tb, -1);
tcg_ctx->gen_tb = tb;
tcg_ctx->addr_type = TCG_TYPE_TL;
+#ifdef CONFIG_SOFTMMU
+ tcg_ctx->page_bits = TARGET_PAGE_BITS;
+ tcg_ctx->page_mask = TARGET_PAGE_MASK;
+#endif
tb_overflow:
@@ -1685,7 +1685,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
ldst->oi = oi;
ldst->addrlo_reg = addr_reg;
- mask_type = (TARGET_PAGE_BITS + CPU_TLB_DYN_MAX_BITS > 32
+ mask_type = (s->page_bits + CPU_TLB_DYN_MAX_BITS > 32
? TCG_TYPE_I64 : TCG_TYPE_I32);
/* Load env_tlb(env)->f[mmu_idx].{mask,table} into {x0,x1}. */
@@ -1699,7 +1699,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
/* Extract the TLB index from the address into X0. */
tcg_out_insn(s, 3502S, AND_LSR, mask_type == TCG_TYPE_I64,
TCG_REG_X0, TCG_REG_X0, addr_reg,
- TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+ s->page_bits - CPU_TLB_ENTRY_BITS);
/* Add the tlb_table pointer, creating the CPUTLBEntry address into X1. */
tcg_out_insn(s, 3502, ADD, 1, TCG_REG_X1, TCG_REG_X1, TCG_REG_X0);
@@ -1723,7 +1723,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
TCG_REG_X3, addr_reg, s_mask - a_mask);
x3 = TCG_REG_X3;
}
- compare_mask = (uint64_t)TARGET_PAGE_MASK | a_mask;
+ compare_mask = (uint64_t)s->page_mask | a_mask;
/* Store the page mask part of the address into X3. */
tcg_out_logicali(s, I3404_ANDI, addr_type, TCG_REG_X3, x3, compare_mask);
@@ -1424,7 +1424,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
/* Extract the tlb index from the address into R0. */
tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo,
- SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS));
+ SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS));
/*
* Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
@@ -1468,8 +1468,8 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr,
addrlo, s_mask - a_mask);
}
- if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) {
- tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(TARGET_PAGE_MASK | a_mask));
+ if (use_armv7_instructions && s->page_bits <= 16) {
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask));
tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
t_addr, TCG_REG_TMP, 0);
tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0);
@@ -1479,10 +1479,10 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
}
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr,
- SHIFT_IMM_LSR(TARGET_PAGE_BITS));
+ SHIFT_IMM_LSR(s->page_bits));
tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP,
0, TCG_REG_R2, TCG_REG_TMP,
- SHIFT_IMM_LSL(TARGET_PAGE_BITS));
+ SHIFT_IMM_LSL(s->page_bits));
}
if (s->addr_type != TCG_TYPE_I32) {
@@ -1979,7 +1979,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
trexw = (ttype == TCG_TYPE_I32 ? 0 : P_REXW);
if (TCG_TYPE_PTR == TCG_TYPE_I64) {
hrexw = P_REXW;
- if (TARGET_PAGE_BITS + CPU_TLB_DYN_MAX_BITS > 32) {
+ if (s->page_bits + CPU_TLB_DYN_MAX_BITS > 32) {
tlbtype = TCG_TYPE_I64;
tlbrexw = P_REXW;
}
@@ -1988,7 +1988,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
tcg_out_mov(s, tlbtype, TCG_REG_L0, addrlo);
tcg_out_shifti(s, SHIFT_SHR + tlbrexw, TCG_REG_L0,
- TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+ s->page_bits - CPU_TLB_ENTRY_BITS);
tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, TCG_REG_L0, TCG_AREG0,
TLB_MASK_TABLE_OFS(mem_index) +
@@ -2009,7 +2009,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1,
addrlo, s_mask - a_mask);
}
- tlb_mask = TARGET_PAGE_MASK | a_mask;
+ tlb_mask = s->page_mask | a_mask;
tgen_arithi(s, ARITH_AND + trexw, TCG_REG_L1, tlb_mask, 0);
/* cmp 0(TCG_REG_L0), TCG_REG_L1 */
@@ -870,7 +870,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg,
- TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+ s->page_bits - CPU_TLB_ENTRY_BITS);
tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
@@ -894,7 +894,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg);
}
tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO,
- a_bits, TARGET_PAGE_BITS - 1);
+ a_bits, s->page_bits - 1);
/* Compare masked address with the TLB entry. */
ldst->label_ptr[0] = s->code_ptr;
@@ -1189,10 +1189,10 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
/* Extract the TLB index from the address into TMP3. */
if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) {
tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, addrlo,
- TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+ s->page_bits - CPU_TLB_ENTRY_BITS);
} else {
tcg_out_dsrl(s, TCG_TMP3, addrlo,
- TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+ s->page_bits - CPU_TLB_ENTRY_BITS);
}
tcg_out_opc_reg(s, OPC_AND, TCG_TMP3, TCG_TMP3, TCG_TMP0);
@@ -1214,7 +1214,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
* For unaligned accesses, compare against the end of the access to
* verify that it does not cross a page boundary.
*/
- tcg_out_movi(s, addr_type, TCG_TMP1, TARGET_PAGE_MASK | a_mask);
+ tcg_out_movi(s, addr_type, TCG_TMP1, s->page_mask | a_mask);
if (a_mask < s_mask) {
if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) {
tcg_out_opc_imm(s, OPC_ADDIU, TCG_TMP2, addrlo, s_mask - a_mask);
@@ -2090,10 +2090,10 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
/* Extract the page index, shifted into place for tlb index. */
if (TCG_TARGET_REG_BITS == 32) {
tcg_out_shri32(s, TCG_REG_R0, addrlo,
- TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+ s->page_bits - CPU_TLB_ENTRY_BITS);
} else {
tcg_out_shri64(s, TCG_REG_R0, addrlo,
- TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+ s->page_bits - CPU_TLB_ENTRY_BITS);
}
tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0));
@@ -2132,7 +2132,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
a_bits = s_bits;
}
tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
- (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
+ (32 - a_bits) & 31, 31 - s->page_bits);
} else {
TCGReg t = addrlo;
@@ -2153,13 +2153,13 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
/* Mask the address for the requested alignment. */
if (TARGET_LONG_BITS == 32) {
tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
- (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
+ (32 - a_bits) & 31, 31 - s->page_bits);
} else if (a_bits == 0) {
- tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS);
+ tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - s->page_bits);
} else {
tcg_out_rld(s, RLDICL, TCG_REG_R0, t,
- 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits);
- tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
+ 64 - s->page_bits, s->page_bits - a_bits);
+ tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, s->page_bits, 0);
}
}
@@ -937,7 +937,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg,
- TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+ s->page_bits - CPU_TLB_ENTRY_BITS);
tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
@@ -952,7 +952,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
tcg_out_opc_imm(s, TARGET_LONG_BITS == 32 ? OPC_ADDIW : OPC_ADDI,
addr_adj, addr_reg, s_mask - a_mask);
}
- compare_mask = TARGET_PAGE_MASK | a_mask;
+ compare_mask = s->page_mask | a_mask;
if (compare_mask == sextreg(compare_mask, 0, 12)) {
tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask);
} else {
@@ -1768,7 +1768,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
ldst->addrlo_reg = addr_reg;
tcg_out_sh64(s, RSY_SRLG, TCG_TMP0, addr_reg, TCG_REG_NONE,
- TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+ s->page_bits - CPU_TLB_ENTRY_BITS);
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19));
@@ -1781,7 +1781,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
* cross pages using the address of the last byte of the access.
*/
a_off = (a_mask >= s_mask ? 0 : s_mask - a_mask);
- tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask;
+ tlb_mask = (uint64_t)s->page_mask | a_mask;
if (a_off == 0) {
tgen_andi_risbg(s, TCG_REG_R0, addr_reg, tlb_mask);
} else {
@@ -1056,7 +1056,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
/* Extract the page index, shifted into place for tlb index. */
tcg_out_arithi(s, TCG_REG_T1, addr_reg,
- TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
+ s->page_bits - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T2, ARITH_AND);
/* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */
@@ -1068,7 +1068,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
h->base = TCG_REG_T1;
/* Mask out the page offset, except for the required alignment. */
- compare_mask = TARGET_PAGE_MASK | a_mask;
+ compare_mask = s->page_mask | a_mask;
if (check_fit_tl(compare_mask, 13)) {
tcg_out_arithi(s, TCG_REG_T3, addr_reg, compare_mask, ARITH_AND);
} else {