diff mbox series

[2/7] xen/events: don't unmask an event channel when an eoi is pending

Message ID 20210206104932.29064-3-jgross@suse.com
State New
Headers show
Series xen/events: bug fixes and some diagnostic aids | expand

Commit Message

Juergen Gross Feb. 6, 2021, 10:49 a.m. UTC
An event channel should be kept masked when an eoi is pending for it.
When being migrated to another cpu it might be unmasked, though.

In order to avoid this keep two different flags for each event channel
to be able to distinguish "normal" masking/unmasking from eoi related
masking/unmasking. The event channel should only be able to generate
an interrupt if both flags are cleared.

Cc: stable@vger.kernel.org
Fixes: 54c9de89895e0a36047 ("xen/events: add a new late EOI evtchn framework")
Reported-by: Julien Grall <julien@xen.org>
Signed-off-by: Juergen Gross <jgross@suse.com>
---
 drivers/xen/events/events_base.c | 63 +++++++++++++++++++++++++++-----
 1 file changed, 53 insertions(+), 10 deletions(-)

Comments

Jan Beulich Feb. 8, 2021, 10:06 a.m. UTC | #1
On 06.02.2021 11:49, Juergen Gross wrote:
> @@ -1798,6 +1818,29 @@ static void mask_ack_dynirq(struct irq_data *data)

>  	ack_dynirq(data);

>  }

>  

> +static void lateeoi_ack_dynirq(struct irq_data *data)

> +{

> +	struct irq_info *info = info_for_irq(data->irq);

> +	evtchn_port_t evtchn = info ? info->evtchn : 0;

> +

> +	if (VALID_EVTCHN(evtchn)) {

> +		info->eoi_pending = true;

> +		mask_evtchn(evtchn);

> +	}

> +}

> +

> +static void lateeoi_mask_ack_dynirq(struct irq_data *data)

> +{

> +	struct irq_info *info = info_for_irq(data->irq);

> +	evtchn_port_t evtchn = info ? info->evtchn : 0;

> +

> +	if (VALID_EVTCHN(evtchn)) {

> +		info->masked = true;

> +		info->eoi_pending = true;

> +		mask_evtchn(evtchn);

> +	}

> +}

> +

>  static int retrigger_dynirq(struct irq_data *data)

>  {

>  	evtchn_port_t evtchn = evtchn_from_irq(data->irq);

> @@ -2023,8 +2066,8 @@ static struct irq_chip xen_lateeoi_chip __read_mostly = {

>  	.irq_mask		= disable_dynirq,

>  	.irq_unmask		= enable_dynirq,

>  

> -	.irq_ack		= mask_ack_dynirq,

> -	.irq_mask_ack		= mask_ack_dynirq,

> +	.irq_ack		= lateeoi_ack_dynirq,

> +	.irq_mask_ack		= lateeoi_mask_ack_dynirq,

>  

>  	.irq_set_affinity	= set_affinity_irq,

>  	.irq_retrigger		= retrigger_dynirq,

> 


Unlike the prior handler the two new ones don't call ack_dynirq()
anymore, and the description doesn't give a hint towards this
difference. As a consequence, clear_evtchn() also doesn't get
called anymore - patch 3 adds the calls, but claims an older
commit to have been at fault. _If_ ack_dynirq() indeed isn't to
be called here, shouldn't the clear_evtchn() calls get added
right here?

Jan
Juergen Gross Feb. 8, 2021, 10:21 a.m. UTC | #2
On 08.02.21 11:06, Jan Beulich wrote:
> On 06.02.2021 11:49, Juergen Gross wrote:

>> @@ -1798,6 +1818,29 @@ static void mask_ack_dynirq(struct irq_data *data)

>>   	ack_dynirq(data);

>>   }

>>   

>> +static void lateeoi_ack_dynirq(struct irq_data *data)

>> +{

>> +	struct irq_info *info = info_for_irq(data->irq);

>> +	evtchn_port_t evtchn = info ? info->evtchn : 0;

>> +

>> +	if (VALID_EVTCHN(evtchn)) {

>> +		info->eoi_pending = true;

>> +		mask_evtchn(evtchn);

>> +	}

>> +}

>> +

>> +static void lateeoi_mask_ack_dynirq(struct irq_data *data)

>> +{

>> +	struct irq_info *info = info_for_irq(data->irq);

>> +	evtchn_port_t evtchn = info ? info->evtchn : 0;

>> +

>> +	if (VALID_EVTCHN(evtchn)) {

>> +		info->masked = true;

>> +		info->eoi_pending = true;

>> +		mask_evtchn(evtchn);

>> +	}

>> +}

>> +

>>   static int retrigger_dynirq(struct irq_data *data)

>>   {

>>   	evtchn_port_t evtchn = evtchn_from_irq(data->irq);

>> @@ -2023,8 +2066,8 @@ static struct irq_chip xen_lateeoi_chip __read_mostly = {

>>   	.irq_mask		= disable_dynirq,

>>   	.irq_unmask		= enable_dynirq,

>>   

>> -	.irq_ack		= mask_ack_dynirq,

>> -	.irq_mask_ack		= mask_ack_dynirq,

>> +	.irq_ack		= lateeoi_ack_dynirq,

>> +	.irq_mask_ack		= lateeoi_mask_ack_dynirq,

>>   

>>   	.irq_set_affinity	= set_affinity_irq,

>>   	.irq_retrigger		= retrigger_dynirq,

>>

> 

> Unlike the prior handler the two new ones don't call ack_dynirq()

> anymore, and the description doesn't give a hint towards this

> difference. As a consequence, clear_evtchn() also doesn't get

> called anymore - patch 3 adds the calls, but claims an older

> commit to have been at fault. _If_ ack_dynirq() indeed isn't to

> be called here, shouldn't the clear_evtchn() calls get added

> right here?


There was clearly too much time between writing this patch and looking
at its performance impact. :-(

Somehow I managed to overlook that I just introduced the bug here. This
OTOH explains why there are not tons of complaints with the current
implementation. :-)

Will merge patch 3 into this one.


Juergen
diff mbox series

Patch

diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index e850f79351cb..6a836d131e73 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -97,7 +97,9 @@  struct irq_info {
 	short refcnt;
 	u8 spurious_cnt;
 	u8 is_accounted;
-	enum xen_irq_type type; /* type */
+	short type;		/* type: IRQT_* */
+	bool masked;		/* Is event explicitly masked? */
+	bool eoi_pending;	/* Is EOI pending? */
 	unsigned irq;
 	evtchn_port_t evtchn;   /* event channel */
 	unsigned short cpu;     /* cpu bound */
@@ -302,6 +304,8 @@  static int xen_irq_info_common_setup(struct irq_info *info,
 	info->irq = irq;
 	info->evtchn = evtchn;
 	info->cpu = cpu;
+	info->masked = true;
+	info->eoi_pending = false;
 
 	ret = set_evtchn_to_irq(evtchn, irq);
 	if (ret < 0)
@@ -585,7 +589,10 @@  static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
 	}
 
 	info->eoi_time = 0;
-	unmask_evtchn(evtchn);
+	info->eoi_pending = false;
+
+	if (!info->masked)
+		unmask_evtchn(evtchn);
 }
 
 static void xen_irq_lateeoi_worker(struct work_struct *work)
@@ -830,7 +837,11 @@  static unsigned int __startup_pirq(unsigned int irq)
 		goto err;
 
 out:
-	unmask_evtchn(evtchn);
+	info->masked = false;
+
+	if (!info->eoi_pending)
+		unmask_evtchn(evtchn);
+
 	eoi_pirq(irq_get_irq_data(irq));
 
 	return 0;
@@ -857,6 +868,7 @@  static void shutdown_pirq(struct irq_data *data)
 	if (!VALID_EVTCHN(evtchn))
 		return;
 
+	info->masked = true;
 	mask_evtchn(evtchn);
 	xen_evtchn_close(evtchn);
 	xen_irq_info_cleanup(info);
@@ -1768,18 +1780,26 @@  static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
 
 static void enable_dynirq(struct irq_data *data)
 {
-	evtchn_port_t evtchn = evtchn_from_irq(data->irq);
+	struct irq_info *info = info_for_irq(data->irq);
+	evtchn_port_t evtchn = info ? info->evtchn : 0;
 
-	if (VALID_EVTCHN(evtchn))
-		unmask_evtchn(evtchn);
+	if (VALID_EVTCHN(evtchn)) {
+		info->masked = false;
+
+		if (!info->eoi_pending)
+			unmask_evtchn(evtchn);
+	}
 }
 
 static void disable_dynirq(struct irq_data *data)
 {
-	evtchn_port_t evtchn = evtchn_from_irq(data->irq);
+	struct irq_info *info = info_for_irq(data->irq);
+	evtchn_port_t evtchn = info ? info->evtchn : 0;
 
-	if (VALID_EVTCHN(evtchn))
+	if (VALID_EVTCHN(evtchn)) {
+		info->masked = true;
 		mask_evtchn(evtchn);
+	}
 }
 
 static void ack_dynirq(struct irq_data *data)
@@ -1798,6 +1818,29 @@  static void mask_ack_dynirq(struct irq_data *data)
 	ack_dynirq(data);
 }
 
+static void lateeoi_ack_dynirq(struct irq_data *data)
+{
+	struct irq_info *info = info_for_irq(data->irq);
+	evtchn_port_t evtchn = info ? info->evtchn : 0;
+
+	if (VALID_EVTCHN(evtchn)) {
+		info->eoi_pending = true;
+		mask_evtchn(evtchn);
+	}
+}
+
+static void lateeoi_mask_ack_dynirq(struct irq_data *data)
+{
+	struct irq_info *info = info_for_irq(data->irq);
+	evtchn_port_t evtchn = info ? info->evtchn : 0;
+
+	if (VALID_EVTCHN(evtchn)) {
+		info->masked = true;
+		info->eoi_pending = true;
+		mask_evtchn(evtchn);
+	}
+}
+
 static int retrigger_dynirq(struct irq_data *data)
 {
 	evtchn_port_t evtchn = evtchn_from_irq(data->irq);
@@ -2023,8 +2066,8 @@  static struct irq_chip xen_lateeoi_chip __read_mostly = {
 	.irq_mask		= disable_dynirq,
 	.irq_unmask		= enable_dynirq,
 
-	.irq_ack		= mask_ack_dynirq,
-	.irq_mask_ack		= mask_ack_dynirq,
+	.irq_ack		= lateeoi_ack_dynirq,
+	.irq_mask_ack		= lateeoi_mask_ack_dynirq,
 
 	.irq_set_affinity	= set_affinity_irq,
 	.irq_retrigger		= retrigger_dynirq,