@@ -153,18 +153,16 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
* If we have VHE then the Hyp code will reset CPACR_EL1 to
* CPACR_EL1_DEFAULT and we need to reenable SME.
*/
- if (has_vhe()) {
- if (system_supports_sme()) {
- /* Also restore EL0 state seen on entry */
- if (vcpu->arch.flags & KVM_ARM64_HOST_SME_ENABLED)
- sysreg_clear_set(CPACR_EL1, 0,
- CPACR_EL1_SMEN_EL0EN |
- CPACR_EL1_SMEN_EL1EN);
- else
- sysreg_clear_set(CPACR_EL1,
- CPACR_EL1_SMEN_EL0EN,
- CPACR_EL1_SMEN_EL1EN);
- }
+ if (has_vhe() && system_supports_sme()) {
+ /* Also restore EL0 state seen on entry */
+ if (vcpu->arch.flags & KVM_ARM64_HOST_SME_ENABLED)
+ sysreg_clear_set(CPACR_EL1, 0,
+ CPACR_EL1_SMEN_EL0EN |
+ CPACR_EL1_SMEN_EL1EN);
+ else
+ sysreg_clear_set(CPACR_EL1,
+ CPACR_EL1_SMEN_EL0EN,
+ CPACR_EL1_SMEN_EL1EN);
}
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
@@ -47,21 +47,20 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
__activate_traps_fpsimd32(vcpu);
}
- if (IS_ENABLED(CONFIG_ARM64_SME) && cpus_have_final_cap(ARM64_SME))
+ if (cpus_have_final_cap(ARM64_SME))
val |= CPTR_EL2_TSM;
write_sysreg(val, cptr_el2);
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
- if (IS_ENABLED(CONFIG_ARM64_SME) && cpus_have_final_cap(ARM64_SME) &&
- cpus_have_final_cap(ARM64_HAS_FGT)) {
+ if (cpus_have_final_cap(ARM64_SME)) {
val = read_sysreg_s(SYS_HFGRTR_EL2);
- val &= ~(HFGxTR_EL2_nTPIDR_EL0_MASK |
+ val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK |
HFGxTR_EL2_nSMPRI_EL1_MASK);
write_sysreg_s(val, SYS_HFGRTR_EL2);
val = read_sysreg_s(SYS_HFGWTR_EL2);
- val &= ~(HFGxTR_EL2_nTPIDR_EL0_MASK |
+ val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK |
HFGxTR_EL2_nSMPRI_EL1_MASK);
write_sysreg_s(val, SYS_HFGWTR_EL2);
}
@@ -109,23 +108,24 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
- if (IS_ENABLED(CONFIG_ARM64_SME) && cpus_have_final_cap(ARM64_SME) &&
- cpus_have_final_cap(ARM64_HAS_FGT)) {
+ if (cpus_have_final_cap(ARM64_SME)) {
u64 val;
val = read_sysreg_s(SYS_HFGRTR_EL2);
- val |= HFGxTR_EL2_nTPIDR_EL0_MASK | HFGxTR_EL2_nSMPRI_EL1_MASK;
+ val |= HFGxTR_EL2_nTPIDR2_EL0_MASK |
+ HFGxTR_EL2_nSMPRI_EL1_MASK;
write_sysreg_s(val, SYS_HFGRTR_EL2);
val = read_sysreg_s(SYS_HFGWTR_EL2);
- val |= HFGxTR_EL2_nTPIDR_EL0_MASK | HFGxTR_EL2_nSMPRI_EL1_MASK;
+ val |= HFGxTR_EL2_nTPIDR2_EL0_MASK |
+ HFGxTR_EL2_nSMPRI_EL1_MASK;
write_sysreg_s(val, SYS_HFGWTR_EL2);
}
cptr = CPTR_EL2_DEFAULT;
if (vcpu_has_sve(vcpu) && (vcpu->arch.flags & KVM_ARM64_FP_ENABLED))
cptr |= CPTR_EL2_TZ;
- if (IS_ENABLED(CONFIG_ARM64_SME) && cpus_have_final_cap(ARM64_SME))
+ if (cpus_have_final_cap(ARM64_SME))
cptr &= ~CPTR_EL2_TSM;
write_sysreg(cptr, cptr_el2);
@@ -60,7 +60,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
__activate_traps_fpsimd32(vcpu);
}
- if (IS_ENABLED(CONFIG_ARM64_SME) && cpus_have_final_cap(ARM64_SME))
+ if (cpus_have_final_cap(ARM64_SME))
write_sysreg(read_sysreg(sctlr_el2) & ~SCTLR_ELx_ENTP2,
sctlr_el2);
@@ -85,7 +85,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
*/
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
- if (IS_ENABLED(CONFIG_ARM64_SME) && cpus_have_final_cap(ARM64_SME))
+ if (cpus_have_final_cap(ARM64_SME))
write_sysreg(read_sysreg(sctlr_el2) | SCTLR_ELx_ENTP2,
sctlr_el2);