@@ -55,6 +55,7 @@ static struct {
static irq_desc_t irq_desc[NR_IRQS];
static DEFINE_PER_CPU(irq_desc_t[NR_LOCAL_IRQS], local_irq_desc);
static DEFINE_PER_CPU(uint64_t, lr_mask);
+static DEFINE_PER_CPU(uint8_t, lr_clear_all);
static unsigned nr_lrs;
@@ -67,7 +68,7 @@ static DEFINE_PER_CPU(u8, gic_cpu_id);
/* Maximum cpu interface per GIC */
#define NR_GIC_CPU_IF 8
-static void gic_clear_lrs(struct vcpu *v);
+static void gic_clear_lrs(struct vcpu *v, bool_t all);
static unsigned int gic_cpu_mask(const cpumask_t *cpumask)
{
@@ -109,6 +110,7 @@ void gic_save_state(struct vcpu *v)
v->arch.gic_lr[i] = GICH[GICH_LR + i];
v->arch.lr_mask = this_cpu(lr_mask);
v->arch.gic_apr = GICH[GICH_APR];
+ this_cpu(lr_clear_all) = 0ULL;
/* Disable until next VCPU scheduled */
GICH[GICH_HCR] = 0;
isb();
@@ -122,13 +124,14 @@ void gic_restore_state(struct vcpu *v)
return;
this_cpu(lr_mask) = v->arch.lr_mask;
+ this_cpu(lr_clear_all) = 0ULL;
for ( i=0; i<nr_lrs; i++)
GICH[GICH_LR + i] = v->arch.gic_lr[i];
GICH[GICH_APR] = v->arch.gic_apr;
GICH[GICH_HCR] = GICH_HCR_EN;
isb();
- gic_clear_lrs(v);
+ gic_clear_lrs(v, 1);
gic_restore_pending_irqs(v);
}
@@ -372,6 +375,7 @@ static void __cpuinit gic_hyp_init(void)
GICH[GICH_MISR] = GICH_MISR_EOI;
this_cpu(lr_mask) = 0ULL;
+ this_cpu(lr_clear_all) = 0ULL;
}
static void __cpuinit gic_hyp_disable(void)
@@ -726,11 +730,19 @@ static void _gic_clear_lr(struct vcpu *v, int i, int vgic_locked)
}
}
-static void gic_clear_lrs(struct vcpu *v)
+static void gic_clear_lrs(struct vcpu *v, bool_t all)
{
int i = 0;
+ uint64_t elsr;
+
+ if ( !all )
+ {
+ elsr = GICH[GICH_ELSR0] | (((uint64_t) GICH[GICH_ELSR1]) << 32);
+ elsr &= this_cpu(lr_mask);
+ } else
+ elsr = this_cpu(lr_mask);
- while ((i = find_next_bit((const long unsigned int *) &this_cpu(lr_mask),
+ while ((i = find_next_bit((const long unsigned int *) &elsr,
nr_lrs, i)) < nr_lrs) {
_gic_clear_lr(v, i, 0);
@@ -743,6 +755,11 @@ void gic_set_clear_lr(struct vcpu *v, struct pending_irq *p)
_gic_clear_lr(v, p->lr, 1);
}
+void gic_set_clear_lrs_other(struct vcpu *v)
+{
+ set_bit(0, &per_cpu(lr_clear_all, v->processor));
+}
+
static void gic_restore_pending_irqs(struct vcpu *v)
{
int i;
@@ -796,7 +813,7 @@ int gic_events_need_delivery(void)
void gic_inject(void)
{
- gic_clear_lrs(current);
+ gic_clear_lrs(current, test_and_clear_bit(0, &this_cpu(lr_clear_all)));
gic_restore_pending_irqs(current);
if (!gic_events_need_delivery())
@@ -716,6 +716,7 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq)
return;
} else {
set_bit(GIC_IRQ_GUEST_PENDING, &n->status);
+ gic_set_clear_lrs_other(v);
goto out;
}
}
@@ -185,6 +185,7 @@ extern int gic_route_irq_to_guest(struct domain *d,
const struct dt_irq *irq,
const char * devname);
extern void gic_set_clear_lr(struct vcpu *v, struct pending_irq *p);
+extern void gic_set_clear_lrs_other(struct vcpu *v);
/* Accept an interrupt from the GIC and dispatch its handler */
extern void gic_interrupt(struct cpu_user_regs *regs, int is_fiq);
Read GICH_ELSR0 and GICH_ELSR1 to figure out which GICH_LR registers do not contain valid interrupts. Only call _gic_clear_lr on those. If a cpu is trying to inject an interrupt that is already inflight into another cpu, it sets GIC_IRQ_GUEST_PENDING and sends an SGI to it. The target cpu is going to be interrupted and _gic_clear_lr, called by gic_clear_lrs, will take care of setting GICH_LR_PENDING if the irq is active. In order to make sure that _gic_clear_lr is called for this irq, avoid filtering lr_mask with GICH_ELSR[01] in this case (so that in this situation we call _gic_clear_lr on all the GICH_LRs). Use a simple percpu bit, lr_clear_all, set by the sender cpu and reset by the receiver cpu, to understand whether we need to evaluate all GICH_LRs or we can filter them. Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> --- xen/arch/arm/gic.c | 27 ++++++++++++++++++++++----- xen/arch/arm/vgic.c | 1 + xen/include/asm-arm/gic.h | 1 + 3 files changed, 24 insertions(+), 5 deletions(-)