@@ -741,14 +741,81 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
}
}
+#ifdef CONFIG_IRQ_REMAP
+static int (*iommu_ga_log_notifier)(int, int);
+
+int amd_iommu_register_ga_log_notifier(int (*notifier)(int, int))
+{
+ iommu_ga_log_notifier = notifier;
+
+ return 0;
+}
+EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);
+
+static void iommu_poll_ga_log(struct amd_iommu *iommu)
+{
+ u32 head, tail, cnt = 0;
+
+ if (iommu->ga_log == NULL)
+ return;
+
+ head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
+ tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
+
+ while (head != tail) {
+ volatile u64 *raw;
+ u64 log_entry;
+
+ raw = (u64 *)(iommu->ga_log + head);
+ cnt++;
+
+ /* Avoid memcpy function-call overhead */
+ log_entry = *raw;
+
+ /* Update head pointer of hardware ring-buffer */
+ head = (head + GA_ENTRY_SIZE) % GA_LOG_SIZE;
+ writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
+
+ /* Handle GA entry */
+ switch (GA_REQ_TYPE(log_entry)) {
+ case GA_GUEST_NR: {
+ u32 ga_tag = GA_TAG(log_entry);
+
+ if (!iommu_ga_log_notifier)
+ break;
+
+ pr_debug("AMD-Vi: %s: devid=%#x, ga_tag=%#x\n",
+ __func__, GA_DEVID(log_entry), ga_tag);
+
+ if (iommu_ga_log_notifier(GATAG_TO_VMID(ga_tag),
+ GATAG_TO_VCPUID(ga_tag)) != 0)
+ pr_err("AMD-Vi: GA log notifier failed.\n");
+ break;
+ }
+ default:
+ break;
+ }
+
+ /* Refresh ring-buffer information */
+ head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
+ tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
+ }
+}
+#endif /* CONFIG_IRQ_REMAP */
+
+#define AMD_IOMMU_INT_MASK \
+ (MMIO_STATUS_EVT_INT_MASK | \
+ MMIO_STATUS_PPR_INT_MASK | \
+ MMIO_STATUS_GALOG_INT_MASK)
+
irqreturn_t amd_iommu_int_thread(int irq, void *data)
{
struct amd_iommu *iommu = (struct amd_iommu *) data;
u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
- while (status & (MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK)) {
- /* Enable EVT and PPR interrupts again */
- writel((MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK),
+ while (status & AMD_IOMMU_INT_MASK) {
+ /* Enable EVT and PPR and GA interrupts again */
+ writel(AMD_IOMMU_INT_MASK,
iommu->mmio_base + MMIO_STATUS_OFFSET);
if (status & MMIO_STATUS_EVT_INT_MASK) {
@@ -761,6 +828,13 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
iommu_poll_ppr_log(iommu);
}
+#ifdef CONFIG_IRQ_REMAP
+ if (status & MMIO_STATUS_GALOG_INT_MASK) {
+ pr_devel("AMD-Vi: Processing IOMMU GA Log\n");
+ iommu_poll_ga_log(iommu);
+ }
+#endif
+
/*
* Hardware bug: ERBT1312
* When re-enabling interrupt (by writing 1
@@ -765,6 +765,11 @@ static int iommu_init_ga(struct amd_iommu *iommu)
!iommu_feature(iommu, FEATURE_GAM_VAPIC))
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
+ if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
+ hash_init(iommu->gatag_ir_hash);
+ spin_lock_init(&iommu->gatag_ir_hash_lock);
+ }
+
ret = iommu_init_ga_log(iommu);
#endif /* CONFIG_IRQ_REMAP */
@@ -27,6 +27,7 @@
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/irqreturn.h>
+#include <linux/hashtable.h>
/*
* Maximum number of IOMMUs supported
@@ -120,6 +121,14 @@
#define MMIO_STATUS_GALOG_OVERFLOW_MASK (1 << 9)
#define MMIO_STATUS_GALOG_INT_MASK (1 << 10)
+#define AMD_IOMMU_GA_HASH_BITS 16
+#define AMD_IOMMU_GA_HASH_MASK ((1U << AMD_IOMMU_GA_HASH_BITS) - 1)
+#define AMD_IOMMU_GATAG(x, y) \
+ ((((x & 0xFF) << 8) | (y & 0xFF)) & AMD_IOMMU_GA_HASH_MASK)
+
+#define GATAG_TO_VMID(x) ((x >> 8) & 0xFF)
+#define GATAG_TO_VCPUID(x) (x & 0xFF)
+
/* event logging constants */
#define EVENT_ENTRY_SIZE 0x10
#define EVENT_TYPE_SHIFT 28
@@ -565,6 +574,16 @@ struct amd_iommu {
struct irq_domain *msi_domain;
struct amd_irte_ops *irte_ops;
+
+ /*
+ * Hash table for mapping ga_tag to struct amd_ir_data
+ * which contains interrupt remapping information
+ * (e.g. cached irte and reference to the entry in the
+ * table). This is used to avoid IRTE scaning when we need to
+ * update IRTEs when vCPU is scheduled to a particular CPU.
+ */
+ DECLARE_HASHTABLE(gatag_ir_hash, AMD_IOMMU_GA_HASH_BITS);
+ spinlock_t gatag_ir_hash_lock;
#endif
};
@@ -819,6 +838,7 @@ struct amd_irte_ops {
};
struct amd_ir_data {
+ struct hlist_node hnode;
struct irq_2_irte irq_2_irte;
void *entry;
union {
@@ -168,11 +168,25 @@ typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, int pasid);
extern int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
amd_iommu_invalidate_ctx cb);
-
-#else
+#else /* CONFIG_AMD_IOMMU */
static inline int amd_iommu_detect(void) { return -ENODEV; }
-#endif
+#endif /* CONFIG_AMD_IOMMU */
+
+#if defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP)
+
+/* IOMMU AVIC Function */
+extern int amd_iommu_register_ga_log_notifier(int (*notifier)(int, int));
+
+#else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
+
+static inline int
+amd_iommu_register_ga_log_notifier(int (*notifier)(int, int))
+{
+ return 0;
+}
+
+#endif /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
#endif /* _ASM_X86_AMD_IOMMU_H */