@@ -4103,6 +4103,39 @@ static inline bool isar_feature_aa64_tgran16_2_lpa2(const ARMISARegisters *id)
return t >= 3 || (t == 0 && isar_feature_aa64_tgran16_lpa2(id));
}
+static inline bool isar_feature_aa64_tgran4(const ARMISARegisters *id)
+{
+ return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4) >= 0;
+}
+
+static inline bool isar_feature_aa64_tgran16(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16) >= 1;
+}
+
+static inline bool isar_feature_aa64_tgran64(const ARMISARegisters *id)
+{
+ return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN64) >= 0;
+}
+
+static inline bool isar_feature_aa64_tgran4_2(const ARMISARegisters *id)
+{
+ unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4_2);
+ return t >= 2 || (t == 0 && isar_feature_aa64_tgran4(id));
+}
+
+static inline bool isar_feature_aa64_tgran16_2(const ARMISARegisters *id)
+{
+ unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16_2);
+ return t >= 2 || (t == 0 && isar_feature_aa64_tgran16(id));
+}
+
+static inline bool isar_feature_aa64_tgran64_2(const ARMISARegisters *id)
+{
+ unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN64_2);
+ return t >= 2 || (t == 0 && isar_feature_aa64_tgran64(id));
+}
+
static inline bool isar_feature_aa64_ccidx(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, CCIDX) != 0;
@@ -10289,20 +10289,113 @@ static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
}
}
+typedef enum GranuleSize {
+ /* Same order as TG0 encoding */
+ Gran4K,
+ Gran64K,
+ Gran16K,
+ GranInvalid,
+} GranuleSize;
+
+static GranuleSize tg0_to_gran_size(int tg)
+{
+ switch (tg) {
+ case 0:
+ return Gran4K;
+ case 1:
+ return Gran64K;
+ case 2:
+ return Gran16K;
+ default:
+ return GranInvalid;
+ }
+}
+
+static GranuleSize tg1_to_gran_size(int tg)
+{
+ switch (tg) {
+ case 1:
+ return Gran16K;
+ case 2:
+ return Gran4K;
+ case 3:
+ return Gran64K;
+ default:
+ return GranInvalid;
+ }
+}
+
+static inline bool have4k(ARMCPU *cpu, bool stage2)
+{
+ return stage2 ? cpu_isar_feature(aa64_tgran4_2, cpu)
+ : cpu_isar_feature(aa64_tgran4, cpu);
+}
+
+static inline bool have16k(ARMCPU *cpu, bool stage2)
+{
+ return stage2 ? cpu_isar_feature(aa64_tgran16_2, cpu)
+ : cpu_isar_feature(aa64_tgran16, cpu);
+}
+
+static inline bool have64k(ARMCPU *cpu, bool stage2)
+{
+ return stage2 ? cpu_isar_feature(aa64_tgran64_2, cpu)
+ : cpu_isar_feature(aa64_tgran64, cpu);
+}
+
+static GranuleSize sanitize_gran_size(ARMCPU *cpu, GranuleSize gran,
+ bool stage2)
+{
+ switch (gran) {
+ case Gran4K:
+ if (have4k(cpu, stage2)) {
+ return gran;
+ }
+ break;
+ case Gran16K:
+ if (have16k(cpu, stage2)) {
+ return gran;
+ }
+ break;
+ case Gran64K:
+ if (have64k(cpu, stage2)) {
+ return gran;
+ }
+ break;
+ case GranInvalid:
+ break;
+ }
+ /*
+ * If the guest selects a granule size that isn't implemented,
+ * the architecture requires that we behave as if it selected one
+ * that is (with an IMPDEF choice of which one to pick). We choose
+ * to implement the smallest supported granule size.
+ */
+ if (have4k(cpu, stage2)) {
+ return Gran4K;
+ }
+ if (have16k(cpu, stage2)) {
+ return Gran16K;
+ }
+ assert(have64k(cpu, stage2));
+ return Gran64K;
+}
+
ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
ARMMMUIdx mmu_idx, bool data)
{
uint64_t tcr = regime_tcr(env, mmu_idx);
bool epd, hpd, using16k, using64k, tsz_oob, ds;
int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
+ GranuleSize gran;
ARMCPU *cpu = env_archcpu(env);
+ bool stage2 = mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S;
if (!regime_has_2_ranges(mmu_idx)) {
select = 0;
tsz = extract32(tcr, 0, 6);
- using64k = extract32(tcr, 14, 1);
- using16k = extract32(tcr, 15, 1);
- if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
+ gran = tg0_to_gran_size(extract32(tcr, 14, 2));
+ if (stage2) {
/* VTCR_EL2 */
hpd = false;
} else {
@@ -10320,16 +10413,13 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
select = extract64(va, 55, 1);
if (!select) {
tsz = extract32(tcr, 0, 6);
+ gran = tg0_to_gran_size(extract32(tcr, 14, 2));
epd = extract32(tcr, 7, 1);
sh = extract32(tcr, 12, 2);
- using64k = extract32(tcr, 14, 1);
- using16k = extract32(tcr, 15, 1);
hpd = extract64(tcr, 41, 1);
} else {
- int tg = extract32(tcr, 30, 2);
- using16k = tg == 1;
- using64k = tg == 3;
tsz = extract32(tcr, 16, 6);
+ gran = tg1_to_gran_size(extract32(tcr, 30, 2));
epd = extract32(tcr, 23, 1);
sh = extract32(tcr, 28, 2);
hpd = extract64(tcr, 42, 1);
@@ -10338,6 +10428,10 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
ds = extract64(tcr, 59, 1);
}
+ gran = sanitize_gran_size(cpu, gran, stage2);
+ using64k = gran == Gran64K;
+ using16k = gran == Gran16K;
+
if (cpu_isar_feature(aa64_st, cpu)) {
max_tsz = 48 - using64k;
} else {
Arm CPUs support some subset of the granule (page) sizes 4K, 16K and 64K. The guest selects the one it wants using bits in the TCR_ELx registers. If it tries to program these registers with a value that is either reserved or which requests a size that the CPU does not implement, the architecture requires that the CPU behaves as if the field was programmed to some size that has been implemented. Currently we don't implement this, and instead let the guest use any granule size, even if the CPU ID register fields say it isn't present. Make aa64_va_parameters() check against the supported granule size and force use of a different one if it is not implemented. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> --- Unusually, the architecture would allow us to do this sanitizing when the TCR_ELx register is written, because it permits that the value of the register read back can be one corresponding to the IMPDEF chosen size rather than having to be the value written. But I opted to do the handling in aa64_va_parameters() anyway, on the assumption that this isn't critically in the fast path. --- target/arm/cpu.h | 33 +++++++++++++ target/arm/helper.c | 110 ++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 135 insertions(+), 8 deletions(-)