@@ -879,6 +879,7 @@ void armv7m_nvic_complete_irq(void *opaque, int irq);
* Crn, Crm, opc1, opc2 fields
* 32 or 64 bit register (ie is it accessed via MRC/MCR
* or via MRRC/MCRR?)
+ * non-secure/secure bank (AArch32 only)
* We allow 4 bits for opc1 because MRRC/MCRR have a 4 bit field.
* (In this case crn and opc2 should be zero.)
* For AArch64, there is no 32/64 bit size distinction;
@@ -896,9 +897,16 @@ void armv7m_nvic_complete_irq(void *opaque, int irq);
#define CP_REG_AA64_SHIFT 28
#define CP_REG_AA64_MASK (1 << CP_REG_AA64_SHIFT)
-#define ENCODE_CP_REG(cp, is64, crn, crm, opc1, opc2) \
- (((cp) << 16) | ((is64) << 15) | ((crn) << 11) | \
- ((crm) << 7) | ((opc1) << 3) | (opc2))
+/* To enable banking of coprocessor registers depending on ns-bit we
+ * add a bit to distinguish between secure and non-secure cpregs in the
+ * hashtable.
+ */
+#define CP_REG_NS_SHIFT 29
+#define CP_REG_NS_MASK(nsbit) (nsbit << CP_REG_NS_SHIFT)
+
+#define ENCODE_CP_REG(cp, is64, crn, crm, opc1, opc2, ns) \
+ (CP_REG_NS_MASK(ns) | ((cp) << 16) | ((is64) << 15) | \
+ ((crn) << 11) | ((crm) << 7) | ((opc1) << 3) | (opc2))
#define ENCODE_AA64_CP_REG(cp, crn, crm, op0, op1, op2) \
(CP_REG_AA64_MASK | \
@@ -917,8 +925,15 @@ static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid)
uint32_t cpregid = kvmid;
if ((kvmid & CP_REG_ARCH_MASK) == CP_REG_ARM64) {
cpregid |= CP_REG_AA64_MASK;
- } else if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) {
- cpregid |= (1 << 15);
+ } else {
+ if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) {
+ cpregid |= (1 << 15);
+ }
+
+ /* KVM is always non-secure so add the NS flag on AArch32 register
+ * entries.
+ */
+ cpregid |= CP_REG_NS_MASK(SCR_NS);
}
return cpregid;
}
@@ -3288,7 +3288,7 @@ CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
void *opaque, int state,
- int crm, int opc1, int opc2)
+ int crm, int opc1, int opc2, int nsbit)
{
/* Private utility function for define_one_arm_cp_reg_with_opaque():
* add a single reginfo struct to the hash table.
@@ -3327,7 +3327,7 @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
*key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
r2->opc0, opc1, opc2);
} else {
- *key = ENCODE_CP_REG(r2->cp, is64, r2->crn, crm, opc1, opc2);
+ *key = ENCODE_CP_REG(r2->cp, is64, r2->crn, crm, opc1, opc2, nsbit);
}
if (opaque) {
r2->opaque = opaque;
@@ -3477,7 +3477,7 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
continue;
}
add_cpreg_to_hashtable(cpu, r, opaque, state,
- crm, opc1, opc2);
+ crm, opc1, opc2, SCR_NS);
}
}
}
@@ -712,7 +712,7 @@ static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
/*
* generate a conditional branch based on ARM condition code cc.
- * This is common between ARM and Aarch64 targets.
+ * This is common between ARM and AArch64 targets.
*/
void arm_gen_test_cc(int cc, int label)
{
@@ -7074,7 +7074,7 @@ static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
rt = (insn >> 12) & 0xf;
ri = get_arm_cp_reginfo(s->cp_regs,
- ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
+ ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2, s->ns));
if (ri) {
/* Check access permissions */
if (!cp_access_ok(s->current_el, ri, isread)) {
@@ -7264,12 +7264,16 @@ static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
*/
if (is64) {
qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
- "64 bit system register cp:%d opc1: %d crm:%d\n",
- isread ? "read" : "write", cpnum, opc1, crm);
+ "64 bit system register cp:%d opc1: %d crm:%d "
+ "(%s)\n",
+ isread ? "read" : "write", cpnum, opc1, crm,
+ s->ns ? "non-secure" : "secure");
} else {
qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
- "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d\n",
- isread ? "read" : "write", cpnum, opc1, crn, crm, opc2);
+ "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
+ "(%s)\n",
+ isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
+ s->ns ? "non-secure" : "secure");
}
return 1;
Added additional NS-bit to CPREG hash encoding. Updated hash lookup locations to specify hash bit currently set to non-secure. Signed-off-by: Greg Bellows <greg.bellows@linaro.org> --- v5 -> v6 - Globally replace Aarch# with AArch# --- target-arm/cpu.h | 25 ++++++++++++++++++++----- target-arm/helper.c | 6 +++--- target-arm/translate.c | 16 ++++++++++------ 3 files changed, 33 insertions(+), 14 deletions(-)