Message ID | 20180605152303.14450-7-julien.grall@arm.com |
---|---|
State | Superseded |
Headers | show |
Series | xen/arm: SSBD (aka Spectre-v4) mitigation (XSA-263) | expand |
On Tue, 5 Jun 2018, Julien Grall wrote: > In order to offer ARCH_WORKAROUND_2 support to guests, we need to track the > state of the workaround per-vCPU. The field 'pad' in cpu_info is now > repurposed to store flags easily accessible in assembly. > > As the hypervisor will always run with the workaround enabled, we may > need to enable (on guest exit) or disable (on guest entry) the > workaround. > > A follow-up patch will add fastpath for the workaround for arm64 guests. > > Note that check_workaround_ssbd() is used instead of ssbd_get_state() > because the former is implemented using an alternative. Thefore the code > will be shortcut on affected platform. > > This is part of XSA-263. > > Signed-off-by: Julien Grall <julien.grall@arm.com> Reviewed-by: Stefano Stabellini <sstabellini@kernel.org> > --- > Changes in v2: > - Fix the condition in need_ssbd_flip() > --- > xen/arch/arm/domain.c | 8 ++++++++ > xen/arch/arm/traps.c | 20 ++++++++++++++++++++ > xen/arch/arm/vsmc.c | 37 +++++++++++++++++++++++++++++++++++++ > xen/include/asm-arm/current.h | 6 +++++- > 4 files changed, 70 insertions(+), 1 deletion(-) > > diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c > index 5a2a9a6b83..4baecc2447 100644 > --- a/xen/arch/arm/domain.c > +++ b/xen/arch/arm/domain.c > @@ -21,6 +21,7 @@ > #include <xen/wait.h> > > #include <asm/alternative.h> > +#include <asm/cpuerrata.h> > #include <asm/cpufeature.h> > #include <asm/current.h> > #include <asm/event.h> > @@ -572,6 +573,13 @@ int vcpu_initialise(struct vcpu *v) > if ( (rc = vcpu_vtimer_init(v)) != 0 ) > goto fail; > > + /* > + * The workaround 2 (i.e SSBD mitigation) is enabled by default if > + * supported. > + */ > + if ( get_ssbd_state() == ARM_SSBD_RUNTIME ) > + v->arch.cpu_info->flags |= CPUINFO_WORKAROUND_2_FLAG; > + > return rc; > > fail: > diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c > index 5c18e918b0..315fc61f77 100644 > --- a/xen/arch/arm/traps.c > +++ b/xen/arch/arm/traps.c > @@ -2011,10 +2011,23 @@ inject_abt: > inject_iabt_exception(regs, gva, hsr.len); > } > > +static inline bool needs_ssbd_flip(struct vcpu *v) > +{ > + if ( !check_workaround_ssbd() ) > + return false; > + > + return !(v->arch.cpu_info->flags & CPUINFO_WORKAROUND_2_FLAG) && > + cpu_require_ssbd_mitigation(); > +} > + > static void enter_hypervisor_head(struct cpu_user_regs *regs) > { > if ( guest_mode(regs) ) > { > + /* If the guest has disabled the workaround, bring it back on. */ > + if ( needs_ssbd_flip(current) ) > + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2_FID, 1, NULL); > + > /* > * If we pended a virtual abort, preserve it until it gets cleared. > * See ARM ARM DDI 0487A.j D1.14.3 (Virtual Interrupts) for details, > @@ -2260,6 +2273,13 @@ void leave_hypervisor_tail(void) > */ > SYNCHRONIZE_SERROR(SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT); > > + /* > + * The hypervisor runs with the workaround always present. > + * If the guest wants it disabled, so be it... > + */ > + if ( needs_ssbd_flip(current) ) > + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2_FID, 0, NULL); > + > return; > } > local_irq_enable(); > diff --git a/xen/arch/arm/vsmc.c b/xen/arch/arm/vsmc.c > index 40a80d5760..c4ccae6030 100644 > --- a/xen/arch/arm/vsmc.c > +++ b/xen/arch/arm/vsmc.c > @@ -18,6 +18,7 @@ > #include <xen/lib.h> > #include <xen/types.h> > #include <public/arch-arm/smccc.h> > +#include <asm/cpuerrata.h> > #include <asm/cpufeature.h> > #include <asm/monitor.h> > #include <asm/regs.h> > @@ -104,6 +105,23 @@ static bool handle_arch(struct cpu_user_regs *regs) > if ( cpus_have_cap(ARM_HARDEN_BRANCH_PREDICTOR) ) > ret = 0; > break; > + case ARM_SMCCC_ARCH_WORKAROUND_2_FID: > + switch ( get_ssbd_state() ) > + { > + case ARM_SSBD_UNKNOWN: > + case ARM_SSBD_FORCE_DISABLE: > + break; > + > + case ARM_SSBD_RUNTIME: > + ret = ARM_SMCCC_SUCCESS; > + break; > + > + case ARM_SSBD_FORCE_ENABLE: > + case ARM_SSBD_MITIGATED: > + ret = ARM_SMCCC_NOT_REQUIRED; > + break; > + } > + break; > } > > set_user_reg(regs, 0, ret); > @@ -114,6 +132,25 @@ static bool handle_arch(struct cpu_user_regs *regs) > case ARM_SMCCC_ARCH_WORKAROUND_1_FID: > /* No return value */ > return true; > + > + case ARM_SMCCC_ARCH_WORKAROUND_2_FID: > + { > + bool enable = (uint32_t)get_user_reg(regs, 1); > + > + /* > + * ARM_WORKAROUND_2_FID should only be called when mitigation > + * state can be changed at runtime. > + */ > + if ( unlikely(get_ssbd_state() != ARM_SSBD_RUNTIME) ) > + return true; > + > + if ( enable ) > + get_cpu_info()->flags |= CPUINFO_WORKAROUND_2_FLAG; > + else > + get_cpu_info()->flags &= ~CPUINFO_WORKAROUND_2_FLAG; > + > + return true; > + } > } > > return false; > diff --git a/xen/include/asm-arm/current.h b/xen/include/asm-arm/current.h > index 7a0971fdea..f9819b34fc 100644 > --- a/xen/include/asm-arm/current.h > +++ b/xen/include/asm-arm/current.h > @@ -7,6 +7,10 @@ > #include <asm/percpu.h> > #include <asm/processor.h> > > +/* Tell whether the guest vCPU enabled Workaround 2 (i.e variant 4) */ > +#define CPUINFO_WORKAROUND_2_FLAG_SHIFT 0 > +#define CPUINFO_WORKAROUND_2_FLAG (_AC(1, U) << CPUINFO_WORKAROUND_2_FLAG_SHIFT) > + > #ifndef __ASSEMBLY__ > > struct vcpu; > @@ -21,7 +25,7 @@ DECLARE_PER_CPU(struct vcpu *, curr_vcpu); > struct cpu_info { > struct cpu_user_regs guest_cpu_user_regs; > unsigned long elr; > - unsigned int pad; > + uint32_t flags; > }; > > static inline struct cpu_info *get_cpu_info(void) > -- > 2.11.0 >
diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c index 5a2a9a6b83..4baecc2447 100644 --- a/xen/arch/arm/domain.c +++ b/xen/arch/arm/domain.c @@ -21,6 +21,7 @@ #include <xen/wait.h> #include <asm/alternative.h> +#include <asm/cpuerrata.h> #include <asm/cpufeature.h> #include <asm/current.h> #include <asm/event.h> @@ -572,6 +573,13 @@ int vcpu_initialise(struct vcpu *v) if ( (rc = vcpu_vtimer_init(v)) != 0 ) goto fail; + /* + * The workaround 2 (i.e SSBD mitigation) is enabled by default if + * supported. + */ + if ( get_ssbd_state() == ARM_SSBD_RUNTIME ) + v->arch.cpu_info->flags |= CPUINFO_WORKAROUND_2_FLAG; + return rc; fail: diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index 5c18e918b0..315fc61f77 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -2011,10 +2011,23 @@ inject_abt: inject_iabt_exception(regs, gva, hsr.len); } +static inline bool needs_ssbd_flip(struct vcpu *v) +{ + if ( !check_workaround_ssbd() ) + return false; + + return !(v->arch.cpu_info->flags & CPUINFO_WORKAROUND_2_FLAG) && + cpu_require_ssbd_mitigation(); +} + static void enter_hypervisor_head(struct cpu_user_regs *regs) { if ( guest_mode(regs) ) { + /* If the guest has disabled the workaround, bring it back on. */ + if ( needs_ssbd_flip(current) ) + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2_FID, 1, NULL); + /* * If we pended a virtual abort, preserve it until it gets cleared. * See ARM ARM DDI 0487A.j D1.14.3 (Virtual Interrupts) for details, @@ -2260,6 +2273,13 @@ void leave_hypervisor_tail(void) */ SYNCHRONIZE_SERROR(SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT); + /* + * The hypervisor runs with the workaround always present. + * If the guest wants it disabled, so be it... + */ + if ( needs_ssbd_flip(current) ) + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2_FID, 0, NULL); + return; } local_irq_enable(); diff --git a/xen/arch/arm/vsmc.c b/xen/arch/arm/vsmc.c index 40a80d5760..c4ccae6030 100644 --- a/xen/arch/arm/vsmc.c +++ b/xen/arch/arm/vsmc.c @@ -18,6 +18,7 @@ #include <xen/lib.h> #include <xen/types.h> #include <public/arch-arm/smccc.h> +#include <asm/cpuerrata.h> #include <asm/cpufeature.h> #include <asm/monitor.h> #include <asm/regs.h> @@ -104,6 +105,23 @@ static bool handle_arch(struct cpu_user_regs *regs) if ( cpus_have_cap(ARM_HARDEN_BRANCH_PREDICTOR) ) ret = 0; break; + case ARM_SMCCC_ARCH_WORKAROUND_2_FID: + switch ( get_ssbd_state() ) + { + case ARM_SSBD_UNKNOWN: + case ARM_SSBD_FORCE_DISABLE: + break; + + case ARM_SSBD_RUNTIME: + ret = ARM_SMCCC_SUCCESS; + break; + + case ARM_SSBD_FORCE_ENABLE: + case ARM_SSBD_MITIGATED: + ret = ARM_SMCCC_NOT_REQUIRED; + break; + } + break; } set_user_reg(regs, 0, ret); @@ -114,6 +132,25 @@ static bool handle_arch(struct cpu_user_regs *regs) case ARM_SMCCC_ARCH_WORKAROUND_1_FID: /* No return value */ return true; + + case ARM_SMCCC_ARCH_WORKAROUND_2_FID: + { + bool enable = (uint32_t)get_user_reg(regs, 1); + + /* + * ARM_WORKAROUND_2_FID should only be called when mitigation + * state can be changed at runtime. + */ + if ( unlikely(get_ssbd_state() != ARM_SSBD_RUNTIME) ) + return true; + + if ( enable ) + get_cpu_info()->flags |= CPUINFO_WORKAROUND_2_FLAG; + else + get_cpu_info()->flags &= ~CPUINFO_WORKAROUND_2_FLAG; + + return true; + } } return false; diff --git a/xen/include/asm-arm/current.h b/xen/include/asm-arm/current.h index 7a0971fdea..f9819b34fc 100644 --- a/xen/include/asm-arm/current.h +++ b/xen/include/asm-arm/current.h @@ -7,6 +7,10 @@ #include <asm/percpu.h> #include <asm/processor.h> +/* Tell whether the guest vCPU enabled Workaround 2 (i.e variant 4) */ +#define CPUINFO_WORKAROUND_2_FLAG_SHIFT 0 +#define CPUINFO_WORKAROUND_2_FLAG (_AC(1, U) << CPUINFO_WORKAROUND_2_FLAG_SHIFT) + #ifndef __ASSEMBLY__ struct vcpu; @@ -21,7 +25,7 @@ DECLARE_PER_CPU(struct vcpu *, curr_vcpu); struct cpu_info { struct cpu_user_regs guest_cpu_user_regs; unsigned long elr; - unsigned int pad; + uint32_t flags; }; static inline struct cpu_info *get_cpu_info(void)
In order to offer ARCH_WORKAROUND_2 support to guests, we need to track the state of the workaround per-vCPU. The field 'pad' in cpu_info is now repurposed to store flags easily accessible in assembly. As the hypervisor will always run with the workaround enabled, we may need to enable (on guest exit) or disable (on guest entry) the workaround. A follow-up patch will add fastpath for the workaround for arm64 guests. Note that check_workaround_ssbd() is used instead of ssbd_get_state() because the former is implemented using an alternative. Thefore the code will be shortcut on affected platform. This is part of XSA-263. Signed-off-by: Julien Grall <julien.grall@arm.com> --- Changes in v2: - Fix the condition in need_ssbd_flip() --- xen/arch/arm/domain.c | 8 ++++++++ xen/arch/arm/traps.c | 20 ++++++++++++++++++++ xen/arch/arm/vsmc.c | 37 +++++++++++++++++++++++++++++++++++++ xen/include/asm-arm/current.h | 6 +++++- 4 files changed, 70 insertions(+), 1 deletion(-)