Message ID | 20180321163235.12529-13-andre.przywara@linaro.org |
---|---|
State | New |
Headers | show |
Series | New VGIC(-v2) implementation | expand |
On Wed, 21 Mar 2018, Andre Przywara wrote: > Adds the sorting function to cover the case where you have more IRQs > to consider than you have LRs. We consider their priorities. > This uses the new sort_list() implementation imported from Linux. > > This is based on Linux commit 8e4447457965, written by Christoffer Dall. > > Signed-off-by: Andre Przywara <andre.przywara@linaro.org> > Reviewed-by: Julien Grall <julien.grall@arm.com> Acked-by: Stefano Stabellini <sstabellini@kernel.org> > --- > xen/arch/arm/vgic/vgic.c | 59 ++++++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 59 insertions(+) > > diff --git a/xen/arch/arm/vgic/vgic.c b/xen/arch/arm/vgic/vgic.c > index f7dfd01c1d..ee0de8d2e0 100644 > --- a/xen/arch/arm/vgic/vgic.c > +++ b/xen/arch/arm/vgic/vgic.c > @@ -15,6 +15,7 @@ > * along with this program. If not, see <http://www.gnu.org/licenses/>. > */ > > +#include <xen/list_sort.h> > #include <xen/sched.h> > #include <asm/bug.h> > #include <asm/event.h> > @@ -193,6 +194,64 @@ static struct vcpu *vgic_target_oracle(struct vgic_irq *irq) > return NULL; > } > > +/* > + * The order of items in the ap_lists defines how we'll pack things in LRs as > + * well, the first items in the list being the first things populated in the > + * LRs. > + * > + * A hard rule is that active interrupts can never be pushed out of the LRs > + * (and therefore take priority) since we cannot reliably trap on deactivation > + * of IRQs and therefore they have to be present in the LRs. > + * > + * Otherwise things should be sorted by the priority field and the GIC > + * hardware support will take care of preemption of priority groups etc. > + * > + * Return negative if "a" sorts before "b", 0 to preserve order, and positive > + * to sort "b" before "a". > + */ > +static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b) > +{ > + struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list); > + struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list); > + bool penda, pendb; > + int ret; > + > + spin_lock(&irqa->irq_lock); > + spin_lock(&irqb->irq_lock); > + > + if ( irqa->active || irqb->active ) > + { > + ret = (int)irqb->active - (int)irqa->active; > + goto out; > + } > + > + penda = irqa->enabled && irq_is_pending(irqa); > + pendb = irqb->enabled && irq_is_pending(irqb); > + > + if ( !penda || !pendb ) > + { > + ret = (int)pendb - (int)penda; > + goto out; > + } > + > + /* Both pending and enabled, sort by priority */ > + ret = irqa->priority - irqb->priority; > +out: > + spin_unlock(&irqb->irq_lock); > + spin_unlock(&irqa->irq_lock); > + return ret; > +} > + > +/* Must be called with the ap_list_lock held */ > +static void vgic_sort_ap_list(struct vcpu *vcpu) > +{ > + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic; > + > + ASSERT(spin_is_locked(&vgic_cpu->ap_list_lock)); > + > + list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp); > +} > + > /* > * Only valid injection if changing level for level-triggered IRQs or for a > * rising edge. > -- > 2.14.1 >
diff --git a/xen/arch/arm/vgic/vgic.c b/xen/arch/arm/vgic/vgic.c index f7dfd01c1d..ee0de8d2e0 100644 --- a/xen/arch/arm/vgic/vgic.c +++ b/xen/arch/arm/vgic/vgic.c @@ -15,6 +15,7 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <xen/list_sort.h> #include <xen/sched.h> #include <asm/bug.h> #include <asm/event.h> @@ -193,6 +194,64 @@ static struct vcpu *vgic_target_oracle(struct vgic_irq *irq) return NULL; } +/* + * The order of items in the ap_lists defines how we'll pack things in LRs as + * well, the first items in the list being the first things populated in the + * LRs. + * + * A hard rule is that active interrupts can never be pushed out of the LRs + * (and therefore take priority) since we cannot reliably trap on deactivation + * of IRQs and therefore they have to be present in the LRs. + * + * Otherwise things should be sorted by the priority field and the GIC + * hardware support will take care of preemption of priority groups etc. + * + * Return negative if "a" sorts before "b", 0 to preserve order, and positive + * to sort "b" before "a". + */ +static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b) +{ + struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list); + struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list); + bool penda, pendb; + int ret; + + spin_lock(&irqa->irq_lock); + spin_lock(&irqb->irq_lock); + + if ( irqa->active || irqb->active ) + { + ret = (int)irqb->active - (int)irqa->active; + goto out; + } + + penda = irqa->enabled && irq_is_pending(irqa); + pendb = irqb->enabled && irq_is_pending(irqb); + + if ( !penda || !pendb ) + { + ret = (int)pendb - (int)penda; + goto out; + } + + /* Both pending and enabled, sort by priority */ + ret = irqa->priority - irqb->priority; +out: + spin_unlock(&irqb->irq_lock); + spin_unlock(&irqa->irq_lock); + return ret; +} + +/* Must be called with the ap_list_lock held */ +static void vgic_sort_ap_list(struct vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic; + + ASSERT(spin_is_locked(&vgic_cpu->ap_list_lock)); + + list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp); +} + /* * Only valid injection if changing level for level-triggered IRQs or for a * rising edge.