diff options
Diffstat (limited to 'drivers/iommu/amd/init.c')
-rw-r--r-- | drivers/iommu/amd/init.c | 131 |
1 files changed, 94 insertions, 37 deletions
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index ea0f1ab94178..45efb7e5d725 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -483,6 +483,10 @@ static void iommu_disable(struct amd_iommu *iommu) iommu_feature_disable(iommu, CONTROL_GALOG_EN); iommu_feature_disable(iommu, CONTROL_GAINT_EN); + /* Disable IOMMU PPR logging */ + iommu_feature_disable(iommu, CONTROL_PPRLOG_EN); + iommu_feature_disable(iommu, CONTROL_PPRINT_EN); + /* Disable IOMMU hardware itself */ iommu_feature_disable(iommu, CONTROL_IOMMU_EN); @@ -753,37 +757,61 @@ static int __init alloc_command_buffer(struct amd_iommu *iommu) } /* + * Interrupt handler has processed all pending events and adjusted head + * and tail pointer. Reset overflow mask and restart logging again. + */ +static void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type, + u8 cntrl_intr, u8 cntrl_log, + u32 status_run_mask, u32 status_overflow_mask) +{ + u32 status; + + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); + if (status & status_run_mask) + return; + + pr_info_ratelimited("IOMMU %s log restarting\n", evt_type); + + iommu_feature_disable(iommu, cntrl_log); + iommu_feature_disable(iommu, cntrl_intr); + + writel(status_overflow_mask, iommu->mmio_base + MMIO_STATUS_OFFSET); + + iommu_feature_enable(iommu, cntrl_intr); + iommu_feature_enable(iommu, cntrl_log); +} + +/* * This function restarts event logging in case the IOMMU experienced * an event log buffer overflow. */ void amd_iommu_restart_event_logging(struct amd_iommu *iommu) { - iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); - iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); + amd_iommu_restart_log(iommu, "Event", CONTROL_EVT_INT_EN, + CONTROL_EVT_LOG_EN, MMIO_STATUS_EVT_RUN_MASK, + MMIO_STATUS_EVT_OVERFLOW_MASK); } /* * This function restarts event logging in case the IOMMU experienced - * an GA log overflow. + * GA log overflow. */ void amd_iommu_restart_ga_log(struct amd_iommu *iommu) { - u32 status; - - status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); - if (status & MMIO_STATUS_GALOG_RUN_MASK) - return; - - pr_info_ratelimited("IOMMU GA Log restarting\n"); - - iommu_feature_disable(iommu, CONTROL_GALOG_EN); - iommu_feature_disable(iommu, CONTROL_GAINT_EN); - - writel(MMIO_STATUS_GALOG_OVERFLOW_MASK, - iommu->mmio_base + MMIO_STATUS_OFFSET); + amd_iommu_restart_log(iommu, "GA", CONTROL_GAINT_EN, + CONTROL_GALOG_EN, MMIO_STATUS_GALOG_RUN_MASK, + MMIO_STATUS_GALOG_OVERFLOW_MASK); +} - iommu_feature_enable(iommu, CONTROL_GAINT_EN); - iommu_feature_enable(iommu, CONTROL_GALOG_EN); +/* + * This function restarts ppr logging in case the IOMMU experienced + * PPR log overflow. + */ +void amd_iommu_restart_ppr_log(struct amd_iommu *iommu) +{ + amd_iommu_restart_log(iommu, "PPR", CONTROL_PPRINT_EN, + CONTROL_PPRLOG_EN, MMIO_STATUS_PPR_RUN_MASK, + MMIO_STATUS_PPR_OVERFLOW_MASK); } /* @@ -906,6 +934,8 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu) if (iommu->ppr_log == NULL) return; + iommu_feature_enable(iommu, CONTROL_PPR_EN); + entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, @@ -916,7 +946,7 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu) writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); iommu_feature_enable(iommu, CONTROL_PPRLOG_EN); - iommu_feature_enable(iommu, CONTROL_PPR_EN); + iommu_feature_enable(iommu, CONTROL_PPRINT_EN); } static void __init free_ppr_log(struct amd_iommu *iommu) @@ -2311,6 +2341,7 @@ static int intcapxt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq struct irq_data *irqd = irq_domain_get_irq_data(domain, i); irqd->chip = &intcapxt_controller; + irqd->hwirq = info->hwirq; irqd->chip_data = info->data; __irq_set_handler(i, handle_edge_irq, 0, "edge"); } @@ -2337,22 +2368,14 @@ static void intcapxt_unmask_irq(struct irq_data *irqd) xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0); xt.destid_24_31 = cfg->dest_apicid >> 24; - /** - * Current IOMMU implementation uses the same IRQ for all - * 3 IOMMU interrupts. - */ - writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET); - writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET); - writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET); + writeq(xt.capxt, iommu->mmio_base + irqd->hwirq); } static void intcapxt_mask_irq(struct irq_data *irqd) { struct amd_iommu *iommu = irqd->chip_data; - writeq(0, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET); - writeq(0, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET); - writeq(0, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET); + writeq(0, iommu->mmio_base + irqd->hwirq); } @@ -2415,7 +2438,8 @@ static struct irq_domain *iommu_get_irqdomain(void) return iommu_irqdomain; } -static int iommu_setup_intcapxt(struct amd_iommu *iommu) +static int __iommu_setup_intcapxt(struct amd_iommu *iommu, const char *devname, + int hwirq, irq_handler_t thread_fn) { struct irq_domain *domain; struct irq_alloc_info info; @@ -2429,6 +2453,7 @@ static int iommu_setup_intcapxt(struct amd_iommu *iommu) init_irq_alloc_info(&info, NULL); info.type = X86_IRQ_ALLOC_TYPE_AMDVI; info.data = iommu; + info.hwirq = hwirq; irq = irq_domain_alloc_irqs(domain, 1, node, &info); if (irq < 0) { @@ -2437,7 +2462,7 @@ static int iommu_setup_intcapxt(struct amd_iommu *iommu) } ret = request_threaded_irq(irq, amd_iommu_int_handler, - amd_iommu_int_thread, 0, "AMD-Vi", iommu); + thread_fn, 0, devname, iommu); if (ret) { irq_domain_free_irqs(irq, 1); irq_domain_remove(domain); @@ -2447,6 +2472,37 @@ static int iommu_setup_intcapxt(struct amd_iommu *iommu) return 0; } +static int iommu_setup_intcapxt(struct amd_iommu *iommu) +{ + int ret; + + snprintf(iommu->evt_irq_name, sizeof(iommu->evt_irq_name), + "AMD-Vi%d-Evt", iommu->index); + ret = __iommu_setup_intcapxt(iommu, iommu->evt_irq_name, + MMIO_INTCAPXT_EVT_OFFSET, + amd_iommu_int_thread_evtlog); + if (ret) + return ret; + + snprintf(iommu->ppr_irq_name, sizeof(iommu->ppr_irq_name), + "AMD-Vi%d-PPR", iommu->index); + ret = __iommu_setup_intcapxt(iommu, iommu->ppr_irq_name, + MMIO_INTCAPXT_PPR_OFFSET, + amd_iommu_int_thread_pprlog); + if (ret) + return ret; + +#ifdef CONFIG_IRQ_REMAP + snprintf(iommu->ga_irq_name, sizeof(iommu->ga_irq_name), + "AMD-Vi%d-GA", iommu->index); + ret = __iommu_setup_intcapxt(iommu, iommu->ga_irq_name, + MMIO_INTCAPXT_GALOG_OFFSET, + amd_iommu_int_thread_galog); +#endif + + return ret; +} + static int iommu_init_irq(struct amd_iommu *iommu) { int ret; @@ -2472,8 +2528,6 @@ enable_faults: iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); - if (iommu->ppr_log != NULL) - iommu_feature_enable(iommu, CONTROL_PPRINT_EN); return 0; } @@ -2889,8 +2943,6 @@ static void enable_iommus_vapic(void) static void enable_iommus(void) { early_enable_iommus(); - enable_iommus_vapic(); - enable_iommus_v2(); } static void disable_iommus(void) @@ -3154,6 +3206,13 @@ static int amd_iommu_enable_interrupts(void) goto out; } + /* + * Interrupt handler is ready to process interrupts. Enable + * PPR and GA log interrupt for all IOMMUs. + */ + enable_iommus_vapic(); + enable_iommus_v2(); + out: return ret; } @@ -3233,8 +3292,6 @@ static int __init state_next(void) register_syscore_ops(&amd_iommu_syscore_ops); ret = amd_iommu_init_pci(); init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; - enable_iommus_vapic(); - enable_iommus_v2(); break; case IOMMU_PCI_INIT: ret = amd_iommu_enable_interrupts(); |