Message ID | 20250513203803.2636561-2-sohil.mehta@intel.com |
---|---|
State | New |
Headers | show |
Series | [v6,1/9] x86/fred, KVM: VMX: Pass event data to the FRED entry point from KVM | expand |
On Tue, May 13, 2025, Sohil Mehta wrote: > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > index 5c5766467a61..1d43d4a2f6b6 100644 > --- a/arch/x86/kvm/vmx/vmx.c > +++ b/arch/x86/kvm/vmx/vmx.c > @@ -7079,7 +7079,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu, > > kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ); > if (cpu_feature_enabled(X86_FEATURE_FRED)) > - fred_entry_from_kvm(EVENT_TYPE_EXTINT, vector); > + fred_entry_from_kvm(EVENT_TYPE_EXTINT, vector, 0); > else > vmx_do_interrupt_irqoff(gate_offset((gate_desc *)host_idt_base + vector)); > kvm_after_interrupt(vcpu); > @@ -7393,7 +7393,8 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, > is_nmi(vmx_get_intr_info(vcpu))) { > kvm_before_interrupt(vcpu, KVM_HANDLING_NMI); > if (cpu_feature_enabled(X86_FEATURE_FRED)) > - fred_entry_from_kvm(EVENT_TYPE_NMI, NMI_VECTOR); > + fred_entry_from_kvm(EVENT_TYPE_NMI, NMI_VECTOR, > + vmx_get_exit_qual(vcpu)); > else > vmx_do_nmi_irqoff(); > kvm_after_interrupt(vcpu); As a prep patch, what if we provide separate wrappers for IRQ vs. NMI? That way KVM doesn't need to shove a '0' literal for the IRQ case. There isn't that much code that's actually shared between the two, once you account for KVM having to hardcode the NMI information. Compile tested only... -- From: Sean Christopherson <seanjc@google.com> Date: Wed, 14 May 2025 07:07:55 -0700 Subject: [PATCH] x86/fred: Provide separate IRQ vs. NMI wrappers for "entry" from KVM Provide separate wrappers for forwarding IRQs vs NMIs from KVM in anticipation of adding support for NMI source reporting, which will add an NMI-only parameter, i.e. will further pollute the current API with a param that is a hardcoded for one of the two call sites. Opportunistically tag the non-FRED NMI wrapper __always_inline, as the compiler could theoretically generate a function call and trigger and a (completely benign) "leaving noinstr" warning. Signed-off-by: Sean Christopherson <seanjc@google.com> --- arch/x86/include/asm/fred.h | 24 +++++++++++++++++++----- arch/x86/kvm/vmx/vmx.c | 4 ++-- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/arch/x86/include/asm/fred.h b/arch/x86/include/asm/fred.h index 2a29e5216881..dfb4f5e6a37a 100644 --- a/arch/x86/include/asm/fred.h +++ b/arch/x86/include/asm/fred.h @@ -10,6 +10,7 @@ #include <asm/asm.h> #include <asm/trapnr.h> +#include <asm/irq_vectors.h> /* * FRED event return instruction opcodes for ERET{S,U}; supported in @@ -70,14 +71,26 @@ __visible void fred_entry_from_user(struct pt_regs *regs); __visible void fred_entry_from_kernel(struct pt_regs *regs); __visible void __fred_entry_from_kvm(struct pt_regs *regs); -/* Can be called from noinstr code, thus __always_inline */ -static __always_inline void fred_entry_from_kvm(unsigned int type, unsigned int vector) +/* Must be called from noinstr code, thus __always_inline */ +static __always_inline void fred_nmi_from_kvm(void) { struct fred_ss ss = { .ss =__KERNEL_DS, - .type = type, + .type = EVENT_TYPE_NMI, + .vector = NMI_VECTOR, + .nmi = true, + .lm = 1, + }; + + asm_fred_entry_from_kvm(ss); +} + +static inline void fred_irq_from_kvm(unsigned int vector) +{ + struct fred_ss ss = { + .ss =__KERNEL_DS, + .type = EVENT_TYPE_EXTINT, .vector = vector, - .nmi = type == EVENT_TYPE_NMI, .lm = 1, }; @@ -109,7 +122,8 @@ static __always_inline unsigned long fred_event_data(struct pt_regs *regs) { ret static inline void cpu_init_fred_exceptions(void) { } static inline void cpu_init_fred_rsps(void) { } static inline void fred_complete_exception_setup(void) { } -static inline void fred_entry_from_kvm(unsigned int type, unsigned int vector) { } +static __always_inline void fred_nmi_from_kvm(void) { } +static inline void fred_irq_from_kvm(unsigned int vector) { } static inline void fred_sync_rsp0(unsigned long rsp0) { } static inline void fred_update_rsp0(void) { } #endif /* CONFIG_X86_FRED */ diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 5c5766467a61..271f92fee76b 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7079,7 +7079,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu, kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ); if (cpu_feature_enabled(X86_FEATURE_FRED)) - fred_entry_from_kvm(EVENT_TYPE_EXTINT, vector); + fred_irq_from_kvm(vector); else vmx_do_interrupt_irqoff(gate_offset((gate_desc *)host_idt_base + vector)); kvm_after_interrupt(vcpu); @@ -7393,7 +7393,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, is_nmi(vmx_get_intr_info(vcpu))) { kvm_before_interrupt(vcpu, KVM_HANDLING_NMI); if (cpu_feature_enabled(X86_FEATURE_FRED)) - fred_entry_from_kvm(EVENT_TYPE_NMI, NMI_VECTOR); + fred_nmi_from_kvm(); else vmx_do_nmi_irqoff(); kvm_after_interrupt(vcpu); base-commit: 9f35e33144ae5377d6a8de86dd3bd4d995c6ac65 --
On 5/14/2025 7:15 AM, Sean Christopherson wrote: > On Tue, May 13, 2025, Sohil Mehta wrote: >> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c >> index 5c5766467a61..1d43d4a2f6b6 100644 >> --- a/arch/x86/kvm/vmx/vmx.c >> +++ b/arch/x86/kvm/vmx/vmx.c >> @@ -7079,7 +7079,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu, >> >> kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ); >> if (cpu_feature_enabled(X86_FEATURE_FRED)) >> - fred_entry_from_kvm(EVENT_TYPE_EXTINT, vector); >> + fred_entry_from_kvm(EVENT_TYPE_EXTINT, vector, 0); >> else >> vmx_do_interrupt_irqoff(gate_offset((gate_desc *)host_idt_base + vector)); >> kvm_after_interrupt(vcpu); >> @@ -7393,7 +7393,8 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, >> is_nmi(vmx_get_intr_info(vcpu))) { >> kvm_before_interrupt(vcpu, KVM_HANDLING_NMI); >> if (cpu_feature_enabled(X86_FEATURE_FRED)) >> - fred_entry_from_kvm(EVENT_TYPE_NMI, NMI_VECTOR); >> + fred_entry_from_kvm(EVENT_TYPE_NMI, NMI_VECTOR, >> + vmx_get_exit_qual(vcpu)); >> else >> vmx_do_nmi_irqoff(); >> kvm_after_interrupt(vcpu); > > As a prep patch, what if we provide separate wrappers for IRQ vs. NMI? That way > KVM doesn't need to shove a '0' literal for the IRQ case. There isn't that much > code that's actually shared between the two, once you account for KVM having to > hardcode the NMI information. > Seems reasonable to me. I don't see the IRQ side using the Event data anytime soon (or ever). We still need to pass 0 to asm_fred_entry_from_kvm() for the IRQ case but that would be contained in asm/fred.h and would not affect KVM. > Compile tested only... > No worries. I'll test it out. I am assuming you want this patch to go as part of this series. > -- > From: Sean Christopherson <seanjc@google.com> > Date: Wed, 14 May 2025 07:07:55 -0700 > Subject: [PATCH] x86/fred: Provide separate IRQ vs. NMI wrappers for "entry" > from KVM > > Provide separate wrappers for forwarding IRQs vs NMIs from KVM in > anticipation of adding support for NMI source reporting, which will add > an NMI-only parameter, i.e. will further pollute the current API with a > param that is a hardcoded for one of the two call sites. > > Opportunistically tag the non-FRED NMI wrapper __always_inline, as the > compiler could theoretically generate a function call and trigger and a > (completely benign) "leaving noinstr" warning. > If this is really a concern, wouldn't there be similar semantics in other places as well? > Signed-off-by: Sean Christopherson <seanjc@google.com> > --- > arch/x86/include/asm/fred.h | 24 +++++++++++++++++++----- > arch/x86/kvm/vmx/vmx.c | 4 ++-- > 2 files changed, 21 insertions(+), 7 deletions(-) > > diff --git a/arch/x86/include/asm/fred.h b/arch/x86/include/asm/fred.h > index 2a29e5216881..dfb4f5e6a37a 100644 > --- a/arch/x86/include/asm/fred.h > +++ b/arch/x86/include/asm/fred.h > @@ -10,6 +10,7 @@ > > #include <asm/asm.h> > #include <asm/trapnr.h> > +#include <asm/irq_vectors.h> > I'll move the header insertion to keep it alphabetically ordered. > /* > * FRED event return instruction opcodes for ERET{S,U}; supported in > @@ -70,14 +71,26 @@ __visible void fred_entry_from_user(struct pt_regs *regs); > __visible void fred_entry_from_kernel(struct pt_regs *regs); > __visible void __fred_entry_from_kvm(struct pt_regs *regs); > > -/* Can be called from noinstr code, thus __always_inline */ > -static __always_inline void fred_entry_from_kvm(unsigned int type, unsigned int vector) > +/* Must be called from noinstr code, thus __always_inline */ > +static __always_inline void fred_nmi_from_kvm(void) > { > struct fred_ss ss = { > .ss =__KERNEL_DS, > - .type = type, > + .type = EVENT_TYPE_NMI, > + .vector = NMI_VECTOR, > + .nmi = true, > + .lm = 1, > + }; > + > + asm_fred_entry_from_kvm(ss); > +} > + The original code uses spaces for alignment. Since we are modifying it, I am thinking of changing it to tabs. > +static inline void fred_irq_from_kvm(unsigned int vector) > +{ > + struct fred_ss ss = { > + .ss =__KERNEL_DS, > + .type = EVENT_TYPE_EXTINT, > .vector = vector, > - .nmi = type == EVENT_TYPE_NMI, > .lm = 1, > }; > Thanks, Sohil
On Wed, May 14, 2025, Sohil Mehta wrote: > On 5/14/2025 7:15 AM, Sean Christopherson wrote: > > Compile tested only... > > > > No worries. I'll test it out. I am assuming you want this patch to go as > part of this series. Yes please. I can also post it separately, but that seems unnecessary. > > -- > > From: Sean Christopherson <seanjc@google.com> > > Date: Wed, 14 May 2025 07:07:55 -0700 > > Subject: [PATCH] x86/fred: Provide separate IRQ vs. NMI wrappers for "entry" > > from KVM > > > > Provide separate wrappers for forwarding IRQs vs NMIs from KVM in > > anticipation of adding support for NMI source reporting, which will add > > an NMI-only parameter, i.e. will further pollute the current API with a > > param that is a hardcoded for one of the two call sites. > > > > Opportunistically tag the non-FRED NMI wrapper __always_inline, as the > > compiler could theoretically generate a function call and trigger and a > > (completely benign) "leaving noinstr" warning. > > > > If this is really a concern, wouldn't there be similar semantics in > other places as well? There are, e.g. the stubs in include/linux/context_tracking_state.h and many other places. It looks ridiculous, but the compiler will generate RET+CALL for literal nops if the right sanitizers are enabled. E.g. see commit 432727f1cb6e ("KVM: VMX: Always inline to_vmx() and to_kvm_vmx()"). > > @@ -70,14 +71,26 @@ __visible void fred_entry_from_user(struct pt_regs *regs); > > __visible void fred_entry_from_kernel(struct pt_regs *regs); > > __visible void __fred_entry_from_kvm(struct pt_regs *regs); > > > > -/* Can be called from noinstr code, thus __always_inline */ > > -static __always_inline void fred_entry_from_kvm(unsigned int type, unsigned int vector) > > +/* Must be called from noinstr code, thus __always_inline */ > > +static __always_inline void fred_nmi_from_kvm(void) > > { > > struct fred_ss ss = { > > .ss =__KERNEL_DS, > > - .type = type, > > + .type = EVENT_TYPE_NMI, > > + .vector = NMI_VECTOR, > > + .nmi = true, > > + .lm = 1, > > + }; > > + > > + asm_fred_entry_from_kvm(ss); > > +} > > + > > The original code uses spaces for alignment. Since we are modifying it, > I am thinking of changing it to tabs. Oof, yeah, definitely do that.
diff --git a/arch/x86/entry/entry_64_fred.S b/arch/x86/entry/entry_64_fred.S index 29c5c32c16c3..a61256be9703 100644 --- a/arch/x86/entry/entry_64_fred.S +++ b/arch/x86/entry/entry_64_fred.S @@ -93,7 +93,7 @@ SYM_FUNC_START(asm_fred_entry_from_kvm) * +--------+-----------------+ */ push $0 /* Reserved, must be 0 */ - push $0 /* Event data, 0 for IRQ/NMI */ + push %rsi /* Event data for IRQ/NMI */ push %rdi /* fred_ss handed in by the caller */ push %rbp pushf diff --git a/arch/x86/include/asm/fred.h b/arch/x86/include/asm/fred.h index 2a29e5216881..a4de57e578c4 100644 --- a/arch/x86/include/asm/fred.h +++ b/arch/x86/include/asm/fred.h @@ -64,14 +64,15 @@ static __always_inline unsigned long fred_event_data(struct pt_regs *regs) void asm_fred_entrypoint_user(void); void asm_fred_entrypoint_kernel(void); -void asm_fred_entry_from_kvm(struct fred_ss); +void asm_fred_entry_from_kvm(struct fred_ss ss, unsigned long edata); __visible void fred_entry_from_user(struct pt_regs *regs); __visible void fred_entry_from_kernel(struct pt_regs *regs); __visible void __fred_entry_from_kvm(struct pt_regs *regs); /* Can be called from noinstr code, thus __always_inline */ -static __always_inline void fred_entry_from_kvm(unsigned int type, unsigned int vector) +static __always_inline void fred_entry_from_kvm(unsigned int type, unsigned int vector, + unsigned long edata) { struct fred_ss ss = { .ss =__KERNEL_DS, @@ -81,7 +82,7 @@ static __always_inline void fred_entry_from_kvm(unsigned int type, unsigned int .lm = 1, }; - asm_fred_entry_from_kvm(ss); + asm_fred_entry_from_kvm(ss, edata); } void cpu_init_fred_exceptions(void); @@ -109,7 +110,7 @@ static __always_inline unsigned long fred_event_data(struct pt_regs *regs) { ret static inline void cpu_init_fred_exceptions(void) { } static inline void cpu_init_fred_rsps(void) { } static inline void fred_complete_exception_setup(void) { } -static inline void fred_entry_from_kvm(unsigned int type, unsigned int vector) { } +static inline void fred_entry_from_kvm(unsigned int type, unsigned int vector, unsigned long edata) { } static inline void fred_sync_rsp0(unsigned long rsp0) { } static inline void fred_update_rsp0(void) { } #endif /* CONFIG_X86_FRED */ diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 5c5766467a61..1d43d4a2f6b6 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7079,7 +7079,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu, kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ); if (cpu_feature_enabled(X86_FEATURE_FRED)) - fred_entry_from_kvm(EVENT_TYPE_EXTINT, vector); + fred_entry_from_kvm(EVENT_TYPE_EXTINT, vector, 0); else vmx_do_interrupt_irqoff(gate_offset((gate_desc *)host_idt_base + vector)); kvm_after_interrupt(vcpu); @@ -7393,7 +7393,8 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, is_nmi(vmx_get_intr_info(vcpu))) { kvm_before_interrupt(vcpu, KVM_HANDLING_NMI); if (cpu_feature_enabled(X86_FEATURE_FRED)) - fred_entry_from_kvm(EVENT_TYPE_NMI, NMI_VECTOR); + fred_entry_from_kvm(EVENT_TYPE_NMI, NMI_VECTOR, + vmx_get_exit_qual(vcpu)); else vmx_do_nmi_irqoff(); kvm_after_interrupt(vcpu);