diff options
author | Christoph Hellwig <hch@lst.de> | 2020-09-25 06:19:19 +0200 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2020-09-25 06:19:19 +0200 |
commit | 8c1c6c7588b2faf9dd45fb3cfb7497fd09bbfe7c (patch) | |
tree | 50a7ddcfd7e7b1a7e4fb656bb88a036dbcff4bd6 /drivers/iommu | |
parent | 38225f2ef2f4263fc65acae63fdd4e8489c9ffcf (diff) | |
parent | 171d4ff79f965c1f164705ef0aaea102a6ad238b (diff) |
Merge branch 'master' of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux into dma-mapping-for-next
Pull in the latest 5.9 tree for the commit to revert the
V4L2_FLAG_MEMORY_NON_CONSISTENT uapi addition.
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/amd/Kconfig | 2 | ||||
-rw-r--r-- | drivers/iommu/amd/init.c | 21 | ||||
-rw-r--r-- | drivers/iommu/amd/iommu.c | 32 | ||||
-rw-r--r-- | drivers/iommu/amd/iommu_v2.c | 7 | ||||
-rw-r--r-- | drivers/iommu/intel/iommu.c | 114 | ||||
-rw-r--r-- | drivers/iommu/intel/irq_remapping.c | 10 |
6 files changed, 124 insertions, 62 deletions
diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig index 1f061d91e0b8..626b97d0dd21 100644 --- a/drivers/iommu/amd/Kconfig +++ b/drivers/iommu/amd/Kconfig @@ -10,7 +10,7 @@ config AMD_IOMMU select IOMMU_API select IOMMU_IOVA select IOMMU_DMA - depends on X86_64 && PCI && ACPI + depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE help With this option you can enable support for AMD IOMMU hardware in your system. An IOMMU is a hardware component which provides diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index c652f16eb702..445a08d23fed 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -1511,7 +1511,14 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) iommu->mmio_phys_end = MMIO_REG_END_OFFSET; else iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; - if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) + + /* + * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. + * GAM also requires GA mode. Therefore, we need to + * check cmpxchg16b support before enabling it. + */ + if (!boot_cpu_has(X86_FEATURE_CX16) || + ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; break; case 0x11: @@ -1520,8 +1527,18 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) iommu->mmio_phys_end = MMIO_REG_END_OFFSET; else iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; - if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) + + /* + * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. + * XT, GAM also requires GA mode. Therefore, we need to + * check cmpxchg16b support before enabling them. + */ + if (!boot_cpu_has(X86_FEATURE_CX16) || + ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) { amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; + break; + } + /* * Note: Since iommu_update_intcapxt() leverages * the IOMMU MMIO access to MSI capability block registers diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index ba9f3dbc5b94..10e4200d3552 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2659,7 +2659,12 @@ static int amd_iommu_def_domain_type(struct device *dev) if (!dev_data) return 0; - if (dev_data->iommu_v2) + /* + * Do not identity map IOMMUv2 capable devices when memory encryption is + * active, because some of those devices (AMD GPUs) don't have the + * encryption bit in their DMA-mask and require remapping. + */ + if (!mem_encrypt_active() && dev_data->iommu_v2) return IOMMU_DOMAIN_IDENTITY; return 0; @@ -3292,6 +3297,7 @@ out: static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte, struct amd_ir_data *data) { + bool ret; struct irq_remap_table *table; struct amd_iommu *iommu; unsigned long flags; @@ -3309,10 +3315,18 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte, entry = (struct irte_ga *)table->table; entry = &entry[index]; - entry->lo.fields_remap.valid = 0; - entry->hi.val = irte->hi.val; - entry->lo.val = irte->lo.val; - entry->lo.fields_remap.valid = 1; + + ret = cmpxchg_double(&entry->lo.val, &entry->hi.val, + entry->lo.val, entry->hi.val, + irte->lo.val, irte->hi.val); + /* + * We use cmpxchg16 to atomically update the 128-bit IRTE, + * and it cannot be updated by the hardware or other processors + * behind us, so the return value of cmpxchg16 should be the + * same as the old value. + */ + WARN_ON(!ret); + if (data) data->ref = entry; @@ -3826,14 +3840,18 @@ int amd_iommu_activate_guest_mode(void *data) { struct amd_ir_data *ir_data = (struct amd_ir_data *)data; struct irte_ga *entry = (struct irte_ga *) ir_data->entry; + u64 valid; if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || !entry || entry->lo.fields_vapic.guest_mode) return 0; + valid = entry->lo.fields_vapic.valid; + entry->lo.val = 0; entry->hi.val = 0; + entry->lo.fields_vapic.valid = valid; entry->lo.fields_vapic.guest_mode = 1; entry->lo.fields_vapic.ga_log_intr = 1; entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr; @@ -3850,14 +3868,18 @@ int amd_iommu_deactivate_guest_mode(void *data) struct amd_ir_data *ir_data = (struct amd_ir_data *)data; struct irte_ga *entry = (struct irte_ga *) ir_data->entry; struct irq_cfg *cfg = ir_data->cfg; + u64 valid; if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || !entry || !entry->lo.fields_vapic.guest_mode) return 0; + valid = entry->lo.fields_remap.valid; + entry->lo.val = 0; entry->hi.val = 0; + entry->lo.fields_remap.valid = valid; entry->lo.fields_remap.dm = apic->irq_dest_mode; entry->lo.fields_remap.int_type = apic->irq_delivery_mode; entry->hi.fields.vector = cfg->vector; diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c index c259108ab6dd..0d175aed1d92 100644 --- a/drivers/iommu/amd/iommu_v2.c +++ b/drivers/iommu/amd/iommu_v2.c @@ -737,6 +737,13 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids) might_sleep(); + /* + * When memory encryption is active the device is likely not in a + * direct-mapped domain. Forbid using IOMMUv2 functionality for now. + */ + if (mem_encrypt_active()) + return -ENODEV; + if (!amd_iommu_v2_supported()) return -ENODEV; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 7983c13b9eef..ed5c57e96e8b 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -123,29 +123,29 @@ static inline unsigned int level_to_offset_bits(int level) return (level - 1) * LEVEL_STRIDE; } -static inline int pfn_level_offset(unsigned long pfn, int level) +static inline int pfn_level_offset(u64 pfn, int level) { return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK; } -static inline unsigned long level_mask(int level) +static inline u64 level_mask(int level) { - return -1UL << level_to_offset_bits(level); + return -1ULL << level_to_offset_bits(level); } -static inline unsigned long level_size(int level) +static inline u64 level_size(int level) { - return 1UL << level_to_offset_bits(level); + return 1ULL << level_to_offset_bits(level); } -static inline unsigned long align_to_level(unsigned long pfn, int level) +static inline u64 align_to_level(u64 pfn, int level) { return (pfn + level_size(level) - 1) & level_mask(level); } static inline unsigned long lvl_to_nr_pages(unsigned int lvl) { - return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH); + return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH); } /* VT-d pages must always be _smaller_ than MM pages. Otherwise things @@ -364,7 +364,6 @@ static int iommu_skip_te_disable; int intel_iommu_gfx_mapped; EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); -#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) #define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2)) struct device_domain_info *get_domain_info(struct device *dev) { @@ -374,8 +373,7 @@ struct device_domain_info *get_domain_info(struct device *dev) return NULL; info = dev_iommu_priv_get(dev); - if (unlikely(info == DUMMY_DEVICE_DOMAIN_INFO || - info == DEFER_DEVICE_DOMAIN_INFO)) + if (unlikely(info == DEFER_DEVICE_DOMAIN_INFO)) return NULL; return info; @@ -742,11 +740,6 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, return &context[devfn]; } -static int iommu_dummy(struct device *dev) -{ - return dev_iommu_priv_get(dev) == DUMMY_DEVICE_DOMAIN_INFO; -} - static bool attach_deferred(struct device *dev) { return dev_iommu_priv_get(dev) == DEFER_DEVICE_DOMAIN_INFO; @@ -779,6 +772,53 @@ is_downstream_to_pci_bridge(struct device *dev, struct device *bridge) return false; } +static bool quirk_ioat_snb_local_iommu(struct pci_dev *pdev) +{ + struct dmar_drhd_unit *drhd; + u32 vtbar; + int rc; + + /* We know that this device on this chipset has its own IOMMU. + * If we find it under a different IOMMU, then the BIOS is lying + * to us. Hope that the IOMMU for this device is actually + * disabled, and it needs no translation... + */ + rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar); + if (rc) { + /* "can't" happen */ + dev_info(&pdev->dev, "failed to run vt-d quirk\n"); + return false; + } + vtbar &= 0xffff0000; + + /* we know that the this iommu should be at offset 0xa000 from vtbar */ + drhd = dmar_find_matched_drhd_unit(pdev); + if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) { + pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"); + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); + return true; + } + + return false; +} + +static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev) +{ + if (!iommu || iommu->drhd->ignored) + return true; + + if (dev_is_pci(dev)) { + struct pci_dev *pdev = to_pci_dev(dev); + + if (pdev->vendor == PCI_VENDOR_ID_INTEL && + pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SNB && + quirk_ioat_snb_local_iommu(pdev)) + return true; + } + + return false; +} + struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) { struct dmar_drhd_unit *drhd = NULL; @@ -788,7 +828,7 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) u16 segment = 0; int i; - if (!dev || iommu_dummy(dev)) + if (!dev) return NULL; if (dev_is_pci(dev)) { @@ -805,7 +845,7 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) dev = &ACPI_COMPANION(dev)->dev; rcu_read_lock(); - for_each_active_iommu(iommu, drhd) { + for_each_iommu(iommu, drhd) { if (pdev && segment != drhd->segment) continue; @@ -841,6 +881,9 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) } iommu = NULL; out: + if (iommu_is_dummy(iommu, dev)) + iommu = NULL; + rcu_read_unlock(); return iommu; @@ -2447,7 +2490,7 @@ struct dmar_domain *find_domain(struct device *dev) { struct device_domain_info *info; - if (unlikely(attach_deferred(dev) || iommu_dummy(dev))) + if (unlikely(attach_deferred(dev))) return NULL; /* No lock here, assumes no domain exit in normal case */ @@ -3989,35 +4032,6 @@ static void __init iommu_exit_mempool(void) iova_cache_put(); } -static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) -{ - struct dmar_drhd_unit *drhd; - u32 vtbar; - int rc; - - /* We know that this device on this chipset has its own IOMMU. - * If we find it under a different IOMMU, then the BIOS is lying - * to us. Hope that the IOMMU for this device is actually - * disabled, and it needs no translation... - */ - rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar); - if (rc) { - /* "can't" happen */ - dev_info(&pdev->dev, "failed to run vt-d quirk\n"); - return; - } - vtbar &= 0xffff0000; - - /* we know that the this iommu should be at offset 0xa000 from vtbar */ - drhd = dmar_find_matched_drhd_unit(pdev); - if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) { - pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"); - add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); - dev_iommu_priv_set(&pdev->dev, DUMMY_DEVICE_DOMAIN_INFO); - } -} -DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu); - static void __init init_no_remapping_devices(void) { struct dmar_drhd_unit *drhd; @@ -4049,12 +4063,8 @@ static void __init init_no_remapping_devices(void) /* This IOMMU has *only* gfx devices. Either bypass it or set the gfx_mapped flag, as appropriate */ drhd->gfx_dedicated = 1; - if (!dmar_map_gfx) { + if (!dmar_map_gfx) drhd->ignored = 1; - for_each_active_dev_scope(drhd->devices, - drhd->devices_cnt, i, dev) - dev_iommu_priv_set(dev, DUMMY_DEVICE_DOMAIN_INFO); - } } } diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c index 23583b0e66a5..8f4ce72570ce 100644 --- a/drivers/iommu/intel/irq_remapping.c +++ b/drivers/iommu/intel/irq_remapping.c @@ -508,12 +508,18 @@ static void iommu_enable_irq_remapping(struct intel_iommu *iommu) /* Enable interrupt-remapping */ iommu->gcmd |= DMA_GCMD_IRE; - iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); - IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_IRES), sts); + /* Block compatibility-format MSIs */ + if (sts & DMA_GSTS_CFIS) { + iommu->gcmd &= ~DMA_GCMD_CFI; + writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, + readl, !(sts & DMA_GSTS_CFIS), sts); + } + /* * With CFI clear in the Global Command register, we should be * protected from dangerous (i.e. compatibility) interrupts |