@@ -675,6 +675,23 @@ static inline void init_irq_proc(void)
}
#endif
+#ifdef CONFIG_IRQ_TIMINGS
+
+#define IRQ_TIMINGS_SHIFT 3
+#define IRQ_TIMINGS_SIZE (1 << IRQ_TIMINGS_SHIFT)
+#define IRQ_TIMINGS_MASK (IRQ_TIMINGS_SIZE - 1)
+
+struct irq_timings {
+ u64 values[IRQ_TIMINGS_SIZE]; /* our circular buffer */
+ u64 timestamp; /* latest timestamp */
+ unsigned int w_index; /* current buffer index */
+};
+
+struct irq_timings *irq_timings_get_next(int *irq);
+void irq_timings_enable(void);
+void irq_timings_disable(void);
+#endif
+
struct seq_file;
int show_interrupts(struct seq_file *p, void *v);
int arch_show_interrupts(struct seq_file *p, int prec);
@@ -12,6 +12,7 @@ struct proc_dir_entry;
struct module;
struct irq_desc;
struct irq_domain;
+struct irq_timings;
struct pt_regs;
/**
@@ -51,6 +52,9 @@ struct irq_desc {
struct irq_data irq_data;
unsigned int __percpu *kstat_irqs;
irq_flow_handler_t handle_irq;
+#ifdef CONFIG_IRQ_TIMINGS
+ struct irq_timings __percpu *timings;
+#endif
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
irq_preflow_handler_t preflow_handler;
#endif
@@ -81,6 +81,9 @@ config GENERIC_MSI_IRQ_DOMAIN
config HANDLE_DOMAIN_IRQ
bool
+config IRQ_TIMINGS
+ bool
+
config IRQ_DOMAIN_DEBUG
bool "Expose hardware/virtual IRQ mapping via debugfs"
depends on IRQ_DOMAIN && DEBUG_FS
@@ -9,3 +9,4 @@ obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o
obj-$(CONFIG_PM_SLEEP) += pm.o
obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o
obj-$(CONFIG_GENERIC_IRQ_IPI) += ipi.o
+obj-$(CONFIG_IRQ_TIMINGS) += timings.o
@@ -138,6 +138,8 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
unsigned int flags = 0, irq = desc->irq_data.irq;
struct irqaction *action;
+ handle_timings(desc);
+
for_each_action_of_desc(desc, action) {
irqreturn_t res;
@@ -57,6 +57,7 @@ enum {
IRQS_WAITING = 0x00000080,
IRQS_PENDING = 0x00000200,
IRQS_SUSPENDED = 0x00000800,
+ IRQS_TIMINGS = 0x00001000,
};
#include "debug.h"
@@ -223,3 +224,63 @@ irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) { }
static inline void
irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { }
#endif
+
+#ifdef CONFIG_IRQ_TIMINGS
+static inline int alloc_timings(struct irq_desc *desc)
+{
+ desc->timings = alloc_percpu(struct irq_timings);
+ if (!desc->timings)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void free_timings(struct irq_desc *desc)
+{
+ free_percpu(desc->timings);
+}
+
+static inline void remove_timings(struct irq_desc *desc)
+{
+ desc->istate &= ~IRQS_TIMINGS;
+}
+
+static inline void setup_timings(struct irq_desc *desc, struct irqaction *act)
+{
+ /*
+ * We don't need the measurement because the idle code already
+ * knows the next expiry event.
+ */
+ if (act->flags & __IRQF_TIMER)
+ return;
+
+ desc->istate |= IRQS_TIMINGS;
+}
+
+extern struct static_key_false irq_timing_enabled;
+
+extern void __handle_timings(struct irq_desc *desc);
+
+/*
+ * The function handle_timings is only called in one place in the
+ * interrupts handler. We want this function always inline so the
+ * code inside is embedded in the function and the static key branching
+ * code can act at the higher level. Without the explicit __always_inline
+ * we can end up with a call to the 'handle_timings' function with a small
+ * overhead in the hotpath for nothing.
+ */
+static __always_inline void handle_timings(struct irq_desc *desc)
+{
+ if (static_key_enabled(&irq_timing_enabled)) {
+ if (desc->istate & IRQS_TIMINGS)
+ __handle_timings(desc);
+ }
+}
+#else
+static inline int alloc_timings(struct irq_desc *desc) { return 0; }
+static inline void free_timings(struct irq_desc *desc) {}
+static inline void handle_timings(struct irq_desc *desc) {}
+static inline void remove_timings(struct irq_desc *desc) {}
+static inline void setup_timings(struct irq_desc *desc,
+ struct irqaction *act) {};
+#endif
@@ -174,6 +174,9 @@ static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
if (alloc_masks(desc, gfp, node))
goto err_kstat;
+ if (alloc_timings(desc))
+ goto err_mask;
+
raw_spin_lock_init(&desc->lock);
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
init_rcu_head(&desc->rcu);
@@ -182,6 +185,8 @@ static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
return desc;
+err_mask:
+ free_masks(desc);
err_kstat:
free_percpu(desc->kstat_irqs);
err_desc:
@@ -193,6 +198,7 @@ static void delayed_free_desc(struct rcu_head *rhp)
{
struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu);
+ free_timings(desc);
free_masks(desc);
free_percpu(desc->kstat_irqs);
kfree(desc);
@@ -1350,6 +1350,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
__enable_irq(desc);
}
+ setup_timings(desc, new);
+
raw_spin_unlock_irqrestore(&desc->lock, flags);
/*
@@ -1480,6 +1482,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
irq_settings_clr_disable_unlazy(desc);
irq_shutdown(desc);
irq_release_resources(desc);
+ remove_timings(desc);
}
#ifdef CONFIG_SMP
new file mode 100644
@@ -0,0 +1,104 @@
+/*
+ * linux/kernel/irq/timings.c
+ *
+ * Copyright (C) 2016, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+#include <linux/percpu.h>
+#include <linux/static_key.h>
+
+#include "internals.h"
+
+DEFINE_STATIC_KEY_FALSE(irq_timing_enabled);
+
+void irq_timings_enable(void)
+{
+ static_branch_inc(&irq_timing_enabled);
+}
+
+void irq_timings_disable(void)
+{
+ static_branch_dec(&irq_timing_enabled);
+}
+
+/**
+ * __handle_timings - stores an irq timing when an interrupt occurs
+ *
+ * @desc: the irq descriptor
+ *
+ * For all interruptions with their IRQS_TIMINGS flag set, the function
+ * computes the time interval between two interrupt events and store it
+ * in a circular buffer.
+ */
+void __handle_timings(struct irq_desc *desc)
+{
+ struct irq_timings *timings;
+ u64 prev, now, diff;
+
+ timings = this_cpu_ptr(desc->timings);
+ now = local_clock();
+ prev = timings->timestamp;
+ timings->timestamp = now;
+
+ /*
+ * In case it is the first time this function is called, the
+ * 'prev' variable will be zero which reflects the time origin
+ * when the system booted.
+ */
+ diff = now - prev;
+
+ /* The oldest value corresponds to the next index. */
+ timings->w_index = (timings->w_index + 1) & IRQ_TIMINGS_MASK;
+ timings->values[timings->w_index] = diff;
+}
+
+/**
+ * irqtiming_get_next - return the next irq timing
+ *
+ * @irq: a pointer to an integer representing the interrupt number
+ *
+ * This function allows to browse safely the interrupt descriptors in order
+ * to retrieve the interrupts timings. The parameter gives the interrupt
+ * number to begin with and will return the interrupt timings for the next
+ * allocated irq. This approach gives us the possibility to go through the
+ * different interrupts without having to handle the sparse irq.
+ *
+ * The function changes @irq to the next allocated irq + 1, it should be
+ * passed back again and again until NULL is returned. Usually this function
+ * is called the first time with @irq = 0.
+ *
+ * Returns a struct irq_timings, NULL if we reach the end of the interrupts
+ * list.
+ */
+struct irq_timings *irq_timings_get_next(int *irq)
+{
+ struct irq_desc *desc;
+ int next;
+
+again:
+ /* Do a racy lookup of the next allocated irq */
+ next = irq_get_next_irq(*irq);
+ if (next >= nr_irqs)
+ return NULL;
+
+ *irq = next + 1;
+
+ /*
+ * Now lookup the descriptor. It's RCU protected. This
+ * descriptor might belong to an uninteresting interrupt or
+ * one that is not measured. Look for the next interrupt in
+ * that case.
+ */
+ desc = irq_to_desc(next);
+ if (!desc || !(desc->istate & IRQS_TIMINGS))
+ goto again;
+
+ return this_cpu_ptr(desc->timings);
+}