diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-06 11:48:46 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-06 11:48:46 -0700 |
commit | 2732ea9e8528ec3d065e04ecd357f61eec4869a8 (patch) | |
tree | 64ed2014fb448caf250ff6b09c894a42d7d6d6c0 /drivers | |
parent | 75bcc84445bccf40a6215ff6fc6dd91978b1490e (diff) | |
parent | c0981b863a31a1891aa2719957983f4297770f87 (diff) |
Merge tag 'iommu-updates-v3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu into next
Pull IOMMU updates from Joerg Roedel:
"The changes include:
- a new IOMMU driver for ARM Renesas SOCs
- updates and fixes for the ARM Exynos driver to bring it closer to a
usable state again
- convert the AMD IOMMUv2 driver to use the mmu_notifier->release
call-back instead of the task_exit notifier
- random other fixes and minor improvements to a number of other
IOMMU drivers"
* tag 'iommu-updates-v3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (54 commits)
iommu/msm: Use devm_ioremap_resource to simplify code
iommu/amd: Fix recently introduced compile warnings
arm/ipmmu-vmsa: Fix compile error
iommu/exynos: Fix checkpatch warning
iommu/exynos: Fix trivial typo
iommu/exynos: Remove invalid symbol dependency
iommu: fsl_pamu.c: Fix for possible null pointer dereference
iommu/amd: Remove duplicate checking code
iommu/amd: Handle parallel invalidate_range_start/end calls correctly
iommu/amd: Remove IOMMUv2 pasid_state_list
iommu/amd: Implement mmu_notifier_release call-back
iommu/amd: Convert IOMMUv2 state_table into state_list
iommu/amd: Don't access IOMMUv2 state_table directly
iommu/ipmmu-vmsa: Support clearing mappings
iommu/ipmmu-vmsa: Remove stage 2 PTE bits definitions
iommu/ipmmu-vmsa: Support 2MB mappings
iommu/ipmmu-vmsa: Rewrite page table management
iommu/ipmmu-vmsa: PMD is never folded, PUD always is
iommu/ipmmu-vmsa: Set the PTE contiguous hint bit when possible
iommu/ipmmu-vmsa: Define driver-specific page directory sizes
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/iommu/Kconfig | 26 | ||||
-rw-r--r-- | drivers/iommu/Makefile | 1 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu.c | 8 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_v2.c | 184 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu.c | 4 | ||||
-rw-r--r-- | drivers/iommu/exynos-iommu.c | 1033 | ||||
-rw-r--r-- | drivers/iommu/fsl_pamu.c | 3 | ||||
-rw-r--r-- | drivers/iommu/ipmmu-vmsa.c | 1255 | ||||
-rw-r--r-- | drivers/iommu/msm_iommu_dev.c | 38 | ||||
-rw-r--r-- | drivers/iommu/omap-iommu.c | 31 | ||||
-rw-r--r-- | drivers/iommu/omap-iopgtable.h | 3 | ||||
-rw-r--r-- | drivers/iommu/shmobile-ipmmu.c | 20 |
12 files changed, 1993 insertions, 613 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index df56e4c74a7e..d260605e6d5f 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -178,13 +178,13 @@ config TEGRA_IOMMU_SMMU config EXYNOS_IOMMU bool "Exynos IOMMU Support" - depends on ARCH_EXYNOS && EXYNOS_DEV_SYSMMU + depends on ARCH_EXYNOS select IOMMU_API help - Support for the IOMMU(System MMU) of Samsung Exynos application - processor family. This enables H/W multimedia accellerators to see - non-linear physical memory chunks as a linear memory in their - address spaces + Support for the IOMMU (System MMU) of Samsung Exynos application + processor family. This enables H/W multimedia accelerators to see + non-linear physical memory chunks as linear memory in their + address space. If unsure, say N here. @@ -193,9 +193,9 @@ config EXYNOS_IOMMU_DEBUG depends on EXYNOS_IOMMU help Select this to see the detailed log message that shows what - happens in the IOMMU driver + happens in the IOMMU driver. - Say N unless you need kernel log message for IOMMU debugging + Say N unless you need kernel log message for IOMMU debugging. config SHMOBILE_IPMMU bool @@ -272,6 +272,18 @@ config SHMOBILE_IOMMU_L1SIZE default 256 if SHMOBILE_IOMMU_ADDRSIZE_64MB default 128 if SHMOBILE_IOMMU_ADDRSIZE_32MB +config IPMMU_VMSA + bool "Renesas VMSA-compatible IPMMU" + depends on ARM_LPAE + depends on ARCH_SHMOBILE || COMPILE_TEST + select IOMMU_API + select ARM_DMA_USE_IOMMU + help + Support for the Renesas VMSA-compatible IPMMU Renesas found in the + R-Mobile APE6 and R-Car H2/M2 SoCs. + + If unsure, say N. + config SPAPR_TCE_IOMMU bool "sPAPR TCE IOMMU Support" depends on PPC_POWERNV || PPC_PSERIES diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 5d58bf16e9e3..8893bad048e0 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o obj-$(CONFIG_ARM_SMMU) += arm-smmu.o obj-$(CONFIG_DMAR_TABLE) += dmar.o obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o +obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 57068e8035b5..4aec6a29e316 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3499,8 +3499,6 @@ int __init amd_iommu_init_passthrough(void) { struct iommu_dev_data *dev_data; struct pci_dev *dev = NULL; - struct amd_iommu *iommu; - u16 devid; int ret; ret = alloc_passthrough_domain(); @@ -3514,12 +3512,6 @@ int __init amd_iommu_init_passthrough(void) dev_data = get_dev_data(&dev->dev); dev_data->passthrough = true; - devid = get_device_id(&dev->dev); - - iommu = amd_iommu_rlookup_table[devid]; - if (!iommu) - continue; - attach_device(&dev->dev, pt_domain); } diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 203b2e6a91cf..d4daa05efe60 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -45,6 +45,8 @@ struct pri_queue { struct pasid_state { struct list_head list; /* For global state-list */ atomic_t count; /* Reference count */ + atomic_t mmu_notifier_count; /* Counting nested mmu_notifier + calls */ struct task_struct *task; /* Task bound to this PASID */ struct mm_struct *mm; /* mm_struct for the faults */ struct mmu_notifier mn; /* mmu_otifier handle */ @@ -56,6 +58,8 @@ struct pasid_state { }; struct device_state { + struct list_head list; + u16 devid; atomic_t count; struct pci_dev *pdev; struct pasid_state **states; @@ -81,13 +85,9 @@ struct fault { u16 flags; }; -static struct device_state **state_table; +static LIST_HEAD(state_list); static spinlock_t state_lock; -/* List and lock for all pasid_states */ -static LIST_HEAD(pasid_state_list); -static DEFINE_SPINLOCK(ps_lock); - static struct workqueue_struct *iommu_wq; /* @@ -99,7 +99,6 @@ static u64 *empty_page_table; static void free_pasid_states(struct device_state *dev_state); static void unbind_pasid(struct device_state *dev_state, int pasid); -static int task_exit(struct notifier_block *nb, unsigned long e, void *data); static u16 device_id(struct pci_dev *pdev) { @@ -111,13 +110,25 @@ static u16 device_id(struct pci_dev *pdev) return devid; } +static struct device_state *__get_device_state(u16 devid) +{ + struct device_state *dev_state; + + list_for_each_entry(dev_state, &state_list, list) { + if (dev_state->devid == devid) + return dev_state; + } + + return NULL; +} + static struct device_state *get_device_state(u16 devid) { struct device_state *dev_state; unsigned long flags; spin_lock_irqsave(&state_lock, flags); - dev_state = state_table[devid]; + dev_state = __get_device_state(devid); if (dev_state != NULL) atomic_inc(&dev_state->count); spin_unlock_irqrestore(&state_lock, flags); @@ -158,29 +169,6 @@ static void put_device_state_wait(struct device_state *dev_state) free_device_state(dev_state); } -static struct notifier_block profile_nb = { - .notifier_call = task_exit, -}; - -static void link_pasid_state(struct pasid_state *pasid_state) -{ - spin_lock(&ps_lock); - list_add_tail(&pasid_state->list, &pasid_state_list); - spin_unlock(&ps_lock); -} - -static void __unlink_pasid_state(struct pasid_state *pasid_state) -{ - list_del(&pasid_state->list); -} - -static void unlink_pasid_state(struct pasid_state *pasid_state) -{ - spin_lock(&ps_lock); - __unlink_pasid_state(pasid_state); - spin_unlock(&ps_lock); -} - /* Must be called under dev_state->lock */ static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state, int pasid, bool alloc) @@ -337,7 +325,6 @@ static void unbind_pasid(struct device_state *dev_state, int pasid) if (pasid_state == NULL) return; - unlink_pasid_state(pasid_state); __unbind_pasid(pasid_state); put_pasid_state_wait(pasid_state); /* Reference taken in this function */ } @@ -379,7 +366,12 @@ static void free_pasid_states(struct device_state *dev_state) continue; put_pasid_state(pasid_state); - unbind_pasid(dev_state, i); + + /* + * This will call the mn_release function and + * unbind the PASID + */ + mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); } if (dev_state->pasid_levels == 2) @@ -443,8 +435,11 @@ static void mn_invalidate_range_start(struct mmu_notifier *mn, pasid_state = mn_to_state(mn); dev_state = pasid_state->device_state; - amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid, - __pa(empty_page_table)); + if (atomic_add_return(1, &pasid_state->mmu_notifier_count) == 1) { + amd_iommu_domain_set_gcr3(dev_state->domain, + pasid_state->pasid, + __pa(empty_page_table)); + } } static void mn_invalidate_range_end(struct mmu_notifier *mn, @@ -457,11 +452,31 @@ static void mn_invalidate_range_end(struct mmu_notifier *mn, pasid_state = mn_to_state(mn); dev_state = pasid_state->device_state; - amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid, - __pa(pasid_state->mm->pgd)); + if (atomic_dec_and_test(&pasid_state->mmu_notifier_count)) { + amd_iommu_domain_set_gcr3(dev_state->domain, + pasid_state->pasid, + __pa(pasid_state->mm->pgd)); + } +} + +static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm) +{ + struct pasid_state *pasid_state; + struct device_state *dev_state; + + might_sleep(); + + pasid_state = mn_to_state(mn); + dev_state = pasid_state->device_state; + + if (pasid_state->device_state->inv_ctx_cb) + dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid); + + unbind_pasid(dev_state, pasid_state->pasid); } static struct mmu_notifier_ops iommu_mn = { + .release = mn_release, .clear_flush_young = mn_clear_flush_young, .change_pte = mn_change_pte, .invalidate_page = mn_invalidate_page, @@ -606,53 +621,6 @@ static struct notifier_block ppr_nb = { .notifier_call = ppr_notifier, }; -static int task_exit(struct notifier_block *nb, unsigned long e, void *data) -{ - struct pasid_state *pasid_state; - struct task_struct *task; - - task = data; - - /* - * Using this notifier is a hack - but there is no other choice - * at the moment. What I really want is a sleeping notifier that - * is called when an MM goes down. But such a notifier doesn't - * exist yet. The notifier needs to sleep because it has to make - * sure that the device does not use the PASID and the address - * space anymore before it is destroyed. This includes waiting - * for pending PRI requests to pass the workqueue. The - * MMU-Notifiers would be a good fit, but they use RCU and so - * they are not allowed to sleep. Lets see how we can solve this - * in a more intelligent way in the future. - */ -again: - spin_lock(&ps_lock); - list_for_each_entry(pasid_state, &pasid_state_list, list) { - struct device_state *dev_state; - int pasid; - - if (pasid_state->task != task) - continue; - - /* Drop Lock and unbind */ - spin_unlock(&ps_lock); - - dev_state = pasid_state->device_state; - pasid = pasid_state->pasid; - - if (pasid_state->device_state->inv_ctx_cb) - dev_state->inv_ctx_cb(dev_state->pdev, pasid); - - unbind_pasid(dev_state, pasid); - - /* Task may be in the list multiple times */ - goto again; - } - spin_unlock(&ps_lock); - - return NOTIFY_OK; -} - int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid, struct task_struct *task) { @@ -682,6 +650,7 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid, goto out; atomic_set(&pasid_state->count, 1); + atomic_set(&pasid_state->mmu_notifier_count, 0); init_waitqueue_head(&pasid_state->wq); spin_lock_init(&pasid_state->lock); @@ -705,8 +674,6 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid, if (ret) goto out_clear_state; - link_pasid_state(pasid_state); - return 0; out_clear_state: @@ -727,6 +694,7 @@ EXPORT_SYMBOL(amd_iommu_bind_pasid); void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid) { + struct pasid_state *pasid_state; struct device_state *dev_state; u16 devid; @@ -743,7 +711,17 @@ void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid) if (pasid < 0 || pasid >= dev_state->max_pasids) goto out; - unbind_pasid(dev_state, pasid); + pasid_state = get_pasid_state(dev_state, pasid); + if (pasid_state == NULL) + goto out; + /* + * Drop reference taken here. We are safe because we still hold + * the reference taken in the amd_iommu_bind_pasid function. + */ + put_pasid_state(pasid_state); + + /* This will call the mn_release function and unbind the PASID */ + mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); out: put_device_state(dev_state); @@ -773,7 +751,8 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids) spin_lock_init(&dev_state->lock); init_waitqueue_head(&dev_state->wq); - dev_state->pdev = pdev; + dev_state->pdev = pdev; + dev_state->devid = devid; tmp = pasids; for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9) @@ -803,13 +782,13 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids) spin_lock_irqsave(&state_lock, flags); - if (state_table[devid] != NULL) { + if (__get_device_state(devid) != NULL) { spin_unlock_irqrestore(&state_lock, flags); ret = -EBUSY; goto out_free_domain; } - state_table[devid] = dev_state; + list_add_tail(&dev_state->list, &state_list); spin_unlock_irqrestore(&state_lock, flags); @@ -841,13 +820,13 @@ void amd_iommu_free_device(struct pci_dev *pdev) spin_lock_irqsave(&state_lock, flags); - dev_state = state_table[devid]; + dev_state = __get_device_state(devid); if (dev_state == NULL) { spin_unlock_irqrestore(&state_lock, flags); return; } - state_table[devid] = NULL; + list_del(&dev_state->list); spin_unlock_irqrestore(&state_lock, flags); @@ -874,7 +853,7 @@ int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev, spin_lock_irqsave(&state_lock, flags); ret = -EINVAL; - dev_state = state_table[devid]; + dev_state = __get_device_state(devid); if (dev_state == NULL) goto out_unlock; @@ -905,7 +884,7 @@ int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev, spin_lock_irqsave(&state_lock, flags); ret = -EINVAL; - dev_state = state_table[devid]; + dev_state = __get_device_state(devid); if (dev_state == NULL) goto out_unlock; @@ -922,7 +901,6 @@ EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb); static int __init amd_iommu_v2_init(void) { - size_t state_table_size; int ret; pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>\n"); @@ -938,16 +916,10 @@ static int __init amd_iommu_v2_init(void) spin_lock_init(&state_lock); - state_table_size = MAX_DEVICES * sizeof(struct device_state *); - state_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - get_order(state_table_size)); - if (state_table == NULL) - return -ENOMEM; - ret = -ENOMEM; iommu_wq = create_workqueue("amd_iommu_v2"); if (iommu_wq == NULL) - goto out_free; + goto out; ret = -ENOMEM; empty_page_table = (u64 *)get_zeroed_page(GFP_KERNEL); @@ -955,29 +927,24 @@ static int __init amd_iommu_v2_init(void) goto out_destroy_wq; amd_iommu_register_ppr_notifier(&ppr_nb); - profile_event_register(PROFILE_TASK_EXIT, &profile_nb); return 0; out_destroy_wq: destroy_workqueue(iommu_wq); -out_free: - free_pages((unsigned long)state_table, get_order(state_table_size)); - +out: return ret; } static void __exit amd_iommu_v2_exit(void) { struct device_state *dev_state; - size_t state_table_size; int i; if (!amd_iommu_v2_supported()) return; - profile_event_unregister(PROFILE_TASK_EXIT, &profile_nb); amd_iommu_unregister_ppr_notifier(&ppr_nb); flush_workqueue(iommu_wq); @@ -1000,9 +967,6 @@ static void __exit amd_iommu_v2_exit(void) destroy_workqueue(iommu_wq); - state_table_size = MAX_DEVICES * sizeof(struct device_state *); - free_pages((unsigned long)state_table, get_order(state_table_size)); - free_page((unsigned long)empty_page_table); } diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 647c3c7fd742..1599354e974d 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1167,7 +1167,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, for (i = 0; i < master->num_streamids; ++i) { u32 idx, s2cr; idx = master->smrs ? master->smrs[i].idx : master->streamids[i]; - s2cr = (S2CR_TYPE_TRANS << S2CR_TYPE_SHIFT) | + s2cr = S2CR_TYPE_TRANS | (smmu_domain->root_cfg.cbndx << S2CR_CBNDX_SHIFT); writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx)); } @@ -1804,7 +1804,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) * allocation (PTRS_PER_PGD). */ #ifdef CONFIG_64BIT - smmu->s1_output_size = min(39UL, size); + smmu->s1_output_size = min((unsigned long)VA_BITS, size); #else smmu->s1_output_size = min(32UL, size); #endif diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 685f9263cfee..99054d2c040d 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -29,7 +29,8 @@ #include <asm/cacheflush.h> #include <asm/pgtable.h> -#include <mach/sysmmu.h> +typedef u32 sysmmu_iova_t; +typedef u32 sysmmu_pte_t; /* We does not consider super section mapping (16MB) */ #define SECT_ORDER 20 @@ -44,28 +45,44 @@ #define LPAGE_MASK (~(LPAGE_SIZE - 1)) #define SPAGE_MASK (~(SPAGE_SIZE - 1)) -#define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3)) -#define lv1ent_page(sent) ((*(sent) & 3) == 1) +#define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \ + ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3)) +#define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK) +#define lv1ent_page_zero(sent) ((*(sent) & 3) == 1) +#define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \ + ((*(sent) & 3) == 1)) #define lv1ent_section(sent) ((*(sent) & 3) == 2) #define lv2ent_fault(pent) ((*(pent) & 3) == 0) #define lv2ent_small(pent) ((*(pent) & 2) == 2) #define lv2ent_large(pent) ((*(pent) & 3) == 1) +static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size) +{ + return iova & (size - 1); +} + #define section_phys(sent) (*(sent) & SECT_MASK) -#define section_offs(iova) ((iova) & 0xFFFFF) +#define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE) #define lpage_phys(pent) (*(pent) & LPAGE_MASK) -#define lpage_offs(iova) ((iova) & 0xFFFF) +#define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE) #define spage_phys(pent) (*(pent) & SPAGE_MASK) -#define spage_offs(iova) ((iova) & 0xFFF) - -#define lv1ent_offset(iova) ((iova) >> SECT_ORDER) -#define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER) +#define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE) #define NUM_LV1ENTRIES 4096 -#define NUM_LV2ENTRIES 256 +#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE) + +static u32 lv1ent_offset(sysmmu_iova_t iova) +{ + return iova >> SECT_ORDER; +} + +static u32 lv2ent_offset(sysmmu_iova_t iova) +{ + return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1); +} -#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long)) +#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t)) #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE) @@ -80,6 +97,13 @@ #define CTRL_BLOCK 0x7 #define CTRL_DISABLE 0x0 +#define CFG_LRU 0x1 +#define CFG_QOS(n) ((n & 0xF) << 7) +#define CFG_MASK 0x0150FFFF /* Selecting bit 0-15, 20, 22 and 24 */ +#define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */ +#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */ +#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */ + #define REG_MMU_CTRL 0x000 #define REG_MMU_CFG 0x004 #define REG_MMU_STATUS 0x008 @@ -96,19 +120,32 @@ #define REG_MMU_VERSION 0x034 +#define MMU_MAJ_VER(val) ((val) >> 7) +#define MMU_MIN_VER(val) ((val) & 0x7F) +#define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */ + +#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F)) + #define REG_PB0_SADDR 0x04C #define REG_PB0_EADDR 0x050 #define REG_PB1_SADDR 0x054 #define REG_PB1_EADDR 0x058 -static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova) +#define has_sysmmu(dev) (dev->archdata.iommu != NULL) + +static struct kmem_cache *lv2table_kmem_cache; +static sysmmu_pte_t *zero_lv2_table; +#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table)) + +static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova) { return pgtable + lv1ent_offset(iova); } -static unsigned long *page_entry(unsigned long *sent, unsigned long iova) +static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova) { - return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova); + return (sysmmu_pte_t *)phys_to_virt( + lv2table_base(sent)) + lv2ent_offset(iova); } enum exynos_sysmmu_inttype { @@ -124,16 +161,6 @@ enum exynos_sysmmu_inttype { SYSMMU_FAULTS_NUM }; -/* - * @itype: type of fault. - * @pgtable_base: the physical address of page table base. This is 0 if @itype - * is SYSMMU_BUSERROR. - * @fault_addr: the device (virtual) address that the System MMU tried to - * translated. This is 0 if @itype is SYSMMU_BUSERROR. - */ -typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype, - unsigned long pgtable_base, unsigned long fault_addr); - static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = { REG_PAGE_FAULT_ADDR, REG_AR_FAULT_ADDR, @@ -157,27 +184,34 @@ static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = { "UNKNOWN FAULT" }; +/* attached to dev.archdata.iommu of the master device */ +struct exynos_iommu_owner { + struct list_head client; /* entry of exynos_iommu_domain.clients */ + struct device *dev; + struct device *sysmmu; + struct iommu_domain *domain; + void *vmm_data; /* IO virtual memory manager's data */ + spinlock_t lock; /* Lock to preserve consistency of System MMU */ +}; + struct exynos_iommu_domain { struct list_head clients; /* list of sysmmu_drvdata.node */ - unsigned long *pgtable; /* lv1 page table, 16KB */ + sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */ short *lv2entcnt; /* free lv2 entry counter for each section */ spinlock_t lock; /* lock for this structure */ spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */ }; struct sysmmu_drvdata { - struct list_head node; /* entry of exynos_iommu_domain.clients */ struct device *sysmmu; /* System MMU's device descriptor */ - struct device *dev; /* Owner of system MMU */ - char *dbgname; - int nsfrs; - void __iomem **sfrbases; - struct clk *clk[2]; + struct device *master; /* Owner of system MMU */ + void __iomem *sfrbase; + struct clk *clk; + struct clk *clk_master; int activations; - rwlock_t lock; + spinlock_t lock; struct iommu_domain *domain; - sysmmu_fault_handler_t fault_handler; - unsigned long pgtable; + phys_addr_t pgtable; }; static bool set_sysmmu_active(struct sysmmu_drvdata *data) @@ -204,6 +238,11 @@ static void sysmmu_unblock(void __iomem *sfrbase) __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL); } +static unsigned int __raw_sysmmu_version(struct sysmmu_drvdata *data) +{ + return MMU_RAW_VER(__raw_readl(data->sfrbase + REG_MMU_VERSION)); +} + static bool sysmmu_block(void __iomem *sfrbase) { int i = 120; @@ -226,429 +265,428 @@ static void __sysmmu_tlb_invalidate(void __iomem *sfrbase) } static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase, - unsigned long iova) + sysmmu_iova_t iova, unsigned int num_inv) { - __raw_writel((iova & SPAGE_MASK) | 1, sfrbase + REG_MMU_FLUSH_ENTRY); + unsigned int i; + + for (i = 0; i < num_inv; i++) { + __raw_writel((iova & SPAGE_MASK) | 1, + sfrbase + REG_MMU_FLUSH_ENTRY); + iova += SPAGE_SIZE; + } } static void __sysmmu_set_ptbase(void __iomem *sfrbase, - unsigned long pgd) + phys_addr_t pgd) { - __raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */ __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR); __sysmmu_tlb_invalidate(sfrbase); } -static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base, - unsigned long size, int idx) +static void show_fault_information(const char *name, + enum exynos_sysmmu_inttype itype, + phys_addr_t pgtable_base, sysmmu_iova_t fault_addr) { - __raw_writel(base, sfrbase + REG_PB0_SADDR + idx * 8); - __raw_writel(size - 1 + base, sfrbase + REG_PB0_EADDR + idx * 8); -} - -static void __set_fault_handler(struct sysmmu_drvdata *data, - sysmmu_fault_handler_t handler) -{ - unsigned long flags; - - write_lock_irqsave(&data->lock, flags); - data->fault_handler = handler; - write_unlock_irqrestore(&data->lock, flags); -} - -void exynos_sysmmu_set_fault_handler(struct device *dev, - sysmmu_fault_handler_t handler) -{ - struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); - - __set_fault_handler(data, handler); -} - -static int default_fault_handler(enum exynos_sysmmu_inttype itype, - unsigned long pgtable_base, unsigned long fault_addr) -{ - unsigned long *ent; + sysmmu_pte_t *ent; if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT)) itype = SYSMMU_FAULT_UNKNOWN; - pr_err("%s occurred at 0x%lx(Page table base: 0x%lx)\n", - sysmmu_fault_name[itype], fault_addr, pgtable_base); + pr_err("%s occurred at %#x by %s(Page table base: %pa)\n", + sysmmu_fault_name[itype], fault_addr, name, &pgtable_base); - ent = section_entry(__va(pgtable_base), fault_addr); - pr_err("\tLv1 entry: 0x%lx\n", *ent); + ent = section_entry(phys_to_virt(pgtable_base), fault_addr); + pr_err("\tLv1 entry: %#x\n", *ent); if (lv1ent_page(ent)) { ent = page_entry(ent, fault_addr); - pr_err("\t Lv2 entry: 0x%lx\n", *ent); + pr_err("\t Lv2 entry: %#x\n", *ent); } - - pr_err("Generating Kernel OOPS... because it is unrecoverable.\n"); - - BUG(); - - return 0; } static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id) { /* SYSMMU is in blocked when interrupt occurred. */ struct sysmmu_drvdata *data = dev_id; - struct resource *irqres; - struct platform_device *pdev; enum exynos_sysmmu_inttype itype; - unsigned long addr = -1; - - int i, ret = -ENOSYS; - - read_lock(&data->lock); + sysmmu_iova_t addr = -1; + int ret = -ENOSYS; WARN_ON(!is_sysmmu_active(data)); - pdev = to_platform_device(data->sysmmu); - for (i = 0; i < (pdev->num_resources / 2); i++) { - irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i); - if (irqres && ((int)irqres->start == irq)) - break; - } + spin_lock(&data->lock); + + if (!IS_ERR(data->clk_master)) + clk_enable(data->clk_master); - if (i == pdev->num_resources) { + itype = (enum exynos_sysmmu_inttype) + __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS)); + if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN)))) itype = SYSMMU_FAULT_UNKNOWN; + else + addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]); + + if (itype == SYSMMU_FAULT_UNKNOWN) { + pr_err("%s: Fault is not occurred by System MMU '%s'!\n", + __func__, dev_name(data->sysmmu)); + pr_err("%s: Please check if IRQ is correctly configured.\n", + __func__); + BUG(); } else { - itype = (enum exynos_sysmmu_inttype) - __ffs(__raw_readl(data->sfrbases[i] + REG_INT_STATUS)); - if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN)))) - itype = SYSMMU_FAULT_UNKNOWN; - else - addr = __raw_readl( - data->sfrbases[i] + fault_reg_offset[itype]); + unsigned int base = + __raw_readl(data->sfrbase + REG_PT_BASE_ADDR); + show_fault_information(dev_name(data->sysmmu), + itype, base, addr); + if (data->domain) + ret = report_iommu_fault(data->domain, + data->master, addr, itype); } - if (data->domain) - ret = report_iommu_fault(data->domain, data->dev, - addr, itype); + /* fault is not recovered by fault handler */ + BUG_ON(ret != 0); - if ((ret == -ENOSYS) && data->fault_handler) { - unsigned long base = data->pgtable; - if (itype != SYSMMU_FAULT_UNKNOWN) - base = __raw_readl( - data->sfrbases[i] + REG_PT_BASE_ADDR); - ret = data->fault_handler(itype, base, addr); - } + __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR); - if (!ret && (itype != SYSMMU_FAULT_UNKNOWN)) - __raw_writel(1 << itype, data->sfrbases[i] + REG_INT_CLEAR); - else - dev_dbg(data->sysmmu, "(%s) %s is not handled.\n", - data->dbgname, sysmmu_fault_name[itype]); + sysmmu_unblock(data->sfrbase); - if (itype != SYSMMU_FAULT_UNKNOWN) - sysmmu_unblock(data->sfrbases[i]); + if (!IS_ERR(data->clk_master)) + clk_disable(data->clk_master); - read_unlock(&data->lock); + spin_unlock(&data->lock); return IRQ_HANDLED; } -static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data) +static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data) { + if (!IS_ERR(data->clk_master)) + clk_enable(data->clk_master); + + __raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL); + __raw_writel(0, data->sfrbase + REG_MMU_CFG); + + clk_disable(data->clk); + if (!IS_ERR(data->clk_master)) + clk_disable(data->clk_master); +} + +static bool __sysmmu_disable(struct sysmmu_drvdata *data) +{ + bool disabled; unsigned long flags; - bool disabled = false; - int i; - write_lock_irqsave(&data->lock, flags); + spin_lock_irqsave(&data->lock, flags); - if (!set_sysmmu_inactive(data)) - goto finish; + disabled = set_sysmmu_inactive(data); - for (i = 0; i < data->nsfrs; i++) - __raw_writel(CTRL_DISABLE, data->sfrbases[i] + REG_MMU_CTRL); + if (disabled) { + data->pgtable = 0; + data->domain = NULL; - if (data->clk[1]) - clk_disable(data->clk[1]); - if (data->clk[0]) - clk_disable(data->clk[0]); + __sysmmu_disable_nocount(data); - disabled = true; - data->pgtable = 0; - data->domain = NULL; -finish: - write_unlock_irqrestore(&data->lock, flags); + dev_dbg(data->sysmmu, "Disabled\n"); + } else { + dev_dbg(data->sysmmu, "%d times left to disable\n", + data->activations); + } - if (disabled) - dev_dbg(data->sysmmu, "(%s) Disabled\n", data->dbgname); - else - dev_dbg(data->sysmmu, "(%s) %d times left to be disabled\n", - data->dbgname, data->activations); + spin_unlock_irqrestore(&data->lock, flags); return disabled; } -/* __exynos_sysmmu_enable: Enables System MMU - * - * returns -error if an error occurred and System MMU is not enabled, - * 0 if the System MMU has been just enabled and 1 if System MMU was already - * enabled before. - */ -static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data, - unsigned long pgtable, struct iommu_domain *domain) +static void __sysmmu_init_config(struct sysmmu_drvdata *data) { - int i, ret = 0; - unsigned long flags; + unsigned int cfg = CFG_LRU | CFG_QOS(15); + unsigned int ver; + + ver = __raw_sysmmu_version(data); + if (MMU_MAJ_VER(ver) == 3) { + if (MMU_MIN_VER(ver) >= 2) { + cfg |= CFG_FLPDCACHE; + if (MMU_MIN_VER(ver) == 3) { + cfg |= CFG_ACGEN; + cfg &= ~CFG_LRU; + } else { + cfg |= CFG_SYSSEL; + } + } + } - write_lock_irqsave(&data->lock, flags); + __raw_writel(cfg, data->sfrbase + REG_MMU_CFG); +} - if (!set_sysmmu_active(data)) { - if (WARN_ON(pgtable != data->pgtable)) { - ret = -EBUSY; - set_sysmmu_inactive(data); - } else { - ret = 1; - } +static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data) +{ + if (!IS_ERR(data->clk_master)) + clk_enable(data->clk_master); + clk_enable(data->clk); - dev_dbg(data->sysmmu, "(%s) Already enabled\n", data->dbgname); - goto finish; - } + __raw_writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); - if (data->clk[0]) - clk_enable(data->clk[0]); - if (data->clk[1]) - clk_enable(data->clk[1]); + __sysmmu_init_config(data); - data->pgtable = pgtable; + __sysmmu_set_ptbase(data->sfrbase, data->pgtable); - for (i = 0; i < data->nsfrs; i++) { - __sysmmu_set_ptbase(data->sfrbases[i], pgtable); + __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); - if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) { - /* System MMU version is 3.x */ - __raw_writel((1 << 12) | (2 << 28), - data->sfrbases[i] + REG_MMU_CFG); - __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 0); - __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 1); - } + if (!IS_ERR(data->clk_master)) + clk_disable(data->clk_master); +} + +static int __sysmmu_enable(struct sysmmu_drvdata *data, + phys_addr_t pgtable, struct iommu_domain *domain) +{ + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&data->lock, flags); + if (set_sysmmu_active(data)) { + data->pgtable = pgtable; + data->domain = domain; + + __sysmmu_enable_nocount(data); + + dev_dbg(data->sysmmu, "Enabled\n"); + } else { + ret = (pgtable == data->pgtable) ? 1 : -EBUSY; - __raw_writel(CTRL_ENABLE, data->sfrbases[i] + REG_MMU_CTRL); + dev_dbg(data->sysmmu, "already enabled\n"); } - data->domain = domain; + if (WARN_ON(ret < 0)) + set_sysmmu_inactive(data); /* decrement count */ - dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname); -finish: - write_unlock_irqrestore(&data->lock, flags); + spin_unlock_irqrestore(&data->lock, flags); return ret; } -int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable) +/* __exynos_sysmmu_enable: Enables System MMU + * + * returns -error if an error occurred and System MMU is not enabled, + * 0 if the System MMU has been just enabled and 1 if System MMU was already + * enabled before. + */ +static int __exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable, + struct iommu_domain *domain) { - struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); - int ret; + int ret = 0; + unsigned long flags; + struct exynos_iommu_owner *owner = dev->archdata.iommu; + struct sysmmu_drvdata *data; - BUG_ON(!memblock_is_memory(pgtable)); + BUG_ON(!has_sysmmu(dev)); - ret = pm_runtime_get_sync(data->sysmmu); - if (ret < 0) { - dev_dbg(data->sysmmu, "(%s) Failed to enable\n", data->dbgname); - return ret; - } + spin_lock_irqsave(&owner->lock, flags); - ret = __exynos_sysmmu_enable(data, pgtable, NULL); - if (WARN_ON(ret < 0)) { - pm_runtime_put(data->sysmmu); - dev_err(data->sysmmu, - "(%s) Already enabled with page table %#lx\n", - data->dbgname, data->pgtable); - } else { - data->dev = dev; - } + data = dev_get_drvdata(owner->sysmmu); + + ret = __sysmmu_enable(data, pgtable, domain); + if (ret >= 0) + data->master = dev; + + spin_unlock_irqrestore(&owner->lock, flags); return ret; } +int exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable) +{ + BUG_ON(!memblock_is_memory(pgtable)); + + return __exynos_sysmmu_enable(dev, pgtable, NULL); +} + static bool exynos_sysmmu_disable(struct device *dev) { - struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); - bool disabled; + unsigned long flags; + bool disabled = true; + struct exynos_iommu_owner *owner = dev->archdata.iommu; + struct sysmmu_drvdata *data; + + BUG_ON(!has_sysmmu(dev)); + + spin_lock_irqsave(&owner->lock, flags); + + data = dev_get_drvdata(owner->sysmmu); + + disabled = __sysmmu_disable(data); + if (disabled) + data->master = NULL; - disabled = __exynos_sysmmu_disable(data); - pm_runtime_put(data->sysmmu); + spin_unlock_irqrestore(&owner->lock, flags); return disabled; } -static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova) +static void __sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data, + sysmmu_iova_t iova) +{ + if (__raw_sysmmu_version(data) == MAKE_MMU_VER(3, 3)) + __raw_writel(iova | 0x1, data->sfrbase + REG_MMU_FLUSH_ENTRY); +} + +static void sysmmu_tlb_invalidate_flpdcache(struct device *dev, + sysmmu_iova_t iova) { unsigned long flags; - struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); + struct exynos_iommu_owner *owner = dev->archdata.iommu; + struct sysmmu_drvdata *data = dev_get_drvdata(owner->sysmmu); + + if (!IS_ERR(data->clk_master)) + clk_enable(data->clk_master); - read_lock_irqsave(&data->lock, flags); + spin_lock_irqsave(&data->lock, flags); + if (is_sysmmu_active(data)) + __sysmmu_tlb_invalidate_flpdcache(data, iova); + spin_unlock_irqrestore(&data->lock, flags); + if (!IS_ERR(data->clk_master)) + clk_disable(data->clk_master); +} + +static void sysmmu_tlb_invalidate_entry(struct device *dev, sysmmu_iova_t iova, + size_t size) +{ + struct exynos_iommu_owner *owner = dev->archdata.iommu; + unsigned long flags; + struct sysmmu_drvdata *data; + + data = dev_get_drvdata(owner->sysmmu); + + spin_lock_irqsave(&data->lock, flags); if (is_sysmmu_active(data)) { - int i; - for (i = 0; i < data->nsfrs; i++) { - if (sysmmu_block(data->sfrbases[i])) { - __sysmmu_tlb_invalidate_entry( - data->sfrbases[i], iova); - sysmmu_unblock(data->sfrbases[i]); - } + unsigned int num_inv = 1; + + if (!IS_ERR(data->clk_master)) + clk_enable(data->clk_master); + + /* + * L2TLB invalidation required + * 4KB page: 1 invalidation + * 64KB page: 16 invalidation + * 1MB page: 64 invalidation + * because it is set-associative TLB + * with 8-way and 64 sets. + * 1MB page can be cached in one of all sets. + * 64KB page can be one of 16 consecutive sets. + */ + if (MMU_MAJ_VER(__raw_sysmmu_version(data)) == 2) + num_inv = min_t(unsigned int, size / PAGE_SIZE, 64); + + if (sysmmu_block(data->sfrbase)) { + __sysmmu_tlb_invalidate_entry( + data->sfrbase, iova, num_inv); + sysmmu_unblock(data->sfrbase); } + if (!IS_ERR(data->clk_master)) + clk_disable(data->clk_master); } else { - dev_dbg(data->sysmmu, - "(%s) Disabled. Skipping invalidating TLB.\n", - data->dbgname); + dev_dbg(dev, "disabled. Skipping TLB invalidation @ %#x\n", + iova); } - - read_unlock_irqrestore(&data->lock, flags); + spin_unlock_irqrestore(&data->lock, flags); } void exynos_sysmmu_tlb_invalidate(struct device *dev) { + struct exynos_iommu_owner *owner = dev->archdata.iommu; unsigned long flags; - struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); + struct sysmmu_drvdata *data; - read_lock_irqsave(&data->lock, flags); + data = dev_get_drvdata(owner->sysmmu); + spin_lock_irqsave(&data->lock, flags); if (is_sysmmu_active(data)) { - int i; - for (i = 0; i < data->nsfrs; i++) { - if (sysmmu_block(data->sfrbases[i])) { - __sysmmu_tlb_invalidate(data->sfrbases[i]); - sysmmu_unblock(data->sfrbases[i]); - } + if (!IS_ERR(data->clk_master)) + clk_enable(data->clk_master); + if (sysmmu_block(data->sfrbase)) { + __sysmmu_tlb_invalidate(data->sfrbase); + sysmmu_unblock(data->sfrbase); } + if (!IS_ERR(data->clk_master)) + clk_disable(data->clk_master); } else { - dev_dbg(data->sysmmu, - "(%s) Disabled. Skipping invalidating TLB.\n", - data->dbgname); + dev_dbg(dev, "disabled. Skipping TLB invalidation\n"); } - - read_unlock_irqrestore(&data->lock, flags); + spin_unlock_irqrestore(&data->lock, flags); } -static int exynos_sysmmu_probe(struct platform_device *pdev) +static int __init exynos_sysmmu_probe(struct platform_device *pdev) { - int i, ret; - struct device *dev; + int irq, ret; + struct device *dev = &pdev->dev; struct sysmmu_drvdata *data; + struct resource *res; - dev = &pdev->dev; + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) { - dev_dbg(dev, "Not enough memory\n"); - ret = -ENOMEM; - goto err_alloc; - } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->sfrbase = devm_ioremap_resource(dev, res); + if (IS_ERR(data->sfrbase)) + return PTR_ERR(data->sfrbase); - dev_set_drvdata(dev, data); - data->nsfrs = pdev->num_resources / 2; - data->sfrbases = kmalloc(sizeof(*data->sfrbases) * data->nsfrs, - GFP_KERNEL); - if (data->sfrbases == NULL) { - dev_dbg(dev, "Not enough memory\n"); - ret = -ENOMEM; - goto err_init; + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + dev_err(dev, "Unable to find IRQ resource\n"); + return irq; } - for (i = 0; i < data->nsfrs; i++) { - struct resource *res; - res = platform_get_resource(pdev, IORESOURCE_MEM, i); - if (!res) { - dev_dbg(dev, "Unable to find IOMEM region\n"); - ret = -ENOENT; - goto err_res; - } - - data->sfrbases[i] = ioremap(res->start, resource_size(res)); - if (!data->sfrbases[i]) { - dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n", - res->start); - ret = -ENOENT; - goto err_res; - } + ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0, + dev_name(dev), data); + if (ret) { + dev_err(dev, "Unabled to register handler of irq %d\n", irq); + return ret; } - for (i = 0; i < data->nsfrs; i++) { - ret = platform_get_irq(pdev, i); - if (ret <= 0) { - dev_dbg(dev, "Unable to find IRQ resource\n"); - goto err_irq; - } - - ret = request_irq(ret, exynos_sysmmu_irq, 0, - dev_name(dev), data); + data->clk = devm_clk_get(dev, "sysmmu"); + if (IS_ERR(data->clk)) { + dev_err(dev, "Failed to get clock!\n"); + return PTR_ERR(data->clk); + } else { + ret = clk_prepare(data->clk); if (ret) { - dev_dbg(dev, "Unabled to register interrupt handler\n"); - goto err_irq; + dev_err(dev, "Failed to prepare clk\n"); + return ret; } } - if (dev_get_platdata(dev)) { - char *deli, *beg; - struct sysmmu_platform_data *platdata = dev_get_platdata(dev); - - beg = platdata->clockname; - - for (deli = beg; (*deli != '\0') && (*deli != ','); deli++) - /* NOTHING */; - - if (*deli == '\0') - deli = NULL; - else - *deli = '\0'; - - data->clk[0] = clk_get(dev, beg); - if (IS_ERR(data->clk[0])) { - data->clk[0] = NULL; - dev_dbg(dev, "No clock descriptor registered\n"); - } - - if (data->clk[0] && deli) { - *deli = ','; - data->clk[1] = clk_get(dev, deli + 1); - if (IS_ERR(data->clk[1])) - data->clk[1] = NULL; + data->clk_master = devm_clk_get(dev, "master"); + if (!IS_ERR(data->clk_master)) { + ret = clk_prepare(data->clk_master); + if (ret) { + clk_unprepare(data->clk); + dev_err(dev, "Failed to prepare master's clk\n"); + return ret; } - - data->dbgname = platdata->dbgname; } data->sysmmu = dev; - rwlock_init(&data->lock); - INIT_LIST_HEAD(&data->node); + spin_lock_init(&data->lock); - __set_fault_handler(data, &default_fault_handler); + platform_set_drvdata(pdev, data); - if (dev->parent) - pm_runtime_enable(dev); + pm_runtime_enable(dev); - dev_dbg(dev, "(%s) Initialized\n", data->dbgname); return 0; -err_irq: - while (i-- > 0) { - int irq; - - irq = platform_get_irq(pdev, i); - free_irq(irq, data); - } -err_res: - while (data->nsfrs-- > 0) - iounmap(data->sfrbases[data->nsfrs]); - kfree(data->sfrbases); -err_init: - kfree(data); -err_alloc: - dev_err(dev, "Failed to initialize\n"); - return ret; } -static struct platform_driver exynos_sysmmu_driver = { - .probe = exynos_sysmmu_probe, - .driver = { +static const struct of_device_id sysmmu_of_match[] __initconst = { + { .compatible = "samsung,exynos-sysmmu", }, + { }, +}; + +static struct platform_driver exynos_sysmmu_driver __refdata = { + .probe = exynos_sysmmu_probe, + .driver = { .owner = THIS_MODULE, .name = "exynos-sysmmu", + .of_match_table = sysmmu_of_match, } }; @@ -662,21 +700,32 @@ static inline void pgtable_flush(void *vastart, void *vaend) static int exynos_iommu_domain_init(struct iommu_domain *domain) { struct exynos_iommu_domain *priv; + int i; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; - priv->pgtable = (unsigned long *)__get_free_pages( - GFP_KERNEL | __GFP_ZERO, 2); + priv->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2); if (!priv->pgtable) goto err_pgtable; - priv->lv2entcnt = (short *)__get_free_pages( - GFP_KERNEL | __GFP_ZERO, 1); + priv->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); if (!priv->lv2entcnt) goto err_counter; + /* w/a of System MMU v3.3 to prevent caching 1MiB mapping */ + for (i = 0; i < NUM_LV1ENTRIES; i += 8) { + priv->pgtable[i + 0] = ZERO_LV2LINK; + priv->pgtable[i + 1] = ZERO_LV2LINK; + priv->pgtable[i + 2] = ZERO_LV2LINK; + priv->pgtable[i + 3] = ZERO_LV2LINK; + priv->pgtable[i + 4] = ZERO_LV2LINK; + priv->pgtable[i + 5] = ZERO_LV2LINK; + priv->pgtable[i + 6] = ZERO_LV2LINK; + priv->pgtable[i + 7] = ZERO_LV2LINK; + } + pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES); spin_lock_init(&priv->lock); @@ -700,7 +749,7 @@ err_pgtable: static void exynos_iommu_domain_destroy(struct iommu_domain *domain) { struct exynos_iommu_domain *priv = domain->priv; - struct sysmmu_drvdata *data; + struct exynos_iommu_owner *owner; unsigned long flags; int i; @@ -708,16 +757,20 @@ static void exynos_iommu_domain_destroy(struct iommu_domain *domain) spin_lock_irqsave(&priv->lock, flags); - list_for_each_entry(data, &priv->clients, node) { - while (!exynos_sysmmu_disable(data->dev)) + list_for_each_entry(owner, &priv->clients, client) { + while (!exynos_sysmmu_disable(owner->dev)) ; /* until System MMU is actually disabled */ } + while (!list_empty(&priv->clients)) + list_del_init(priv->clients.next); + spin_unlock_irqrestore(&priv->lock, flags); for (i = 0; i < NUM_LV1ENTRIES; i++) if (lv1ent_page(priv->pgtable + i)) - kfree(__va(lv2table_base(priv->pgtable + i))); + kmem_cache_free(lv2table_kmem_cache, + phys_to_virt(lv2table_base(priv->pgtable + i))); free_pages((unsigned long)priv->pgtable, 2); free_pages((unsigned long)priv->lv2entcnt, 1); @@ -728,114 +781,134 @@ static void exynos_iommu_domain_destroy(struct iommu_domain *domain) static int exynos_iommu_attach_device(struct iommu_domain *domain, struct device *dev) { - struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); + struct exynos_iommu_owner *owner = dev->archdata.iommu; struct exynos_iommu_domain *priv = domain->priv; + phys_addr_t pagetable = virt_to_phys(priv->pgtable); unsigned long flags; int ret; - ret = pm_runtime_get_sync(data->sysmmu); - if (ret < 0) - return ret; - - ret = 0; - spin_lock_irqsave(&priv->lock, flags); - ret = __exynos_sysmmu_enable(data, __pa(priv->pgtable), domain); - + ret = __exynos_sysmmu_enable(dev, pagetable, domain); if (ret == 0) { - /* 'data->node' must not be appeared in priv->clients */ - BUG_ON(!list_empty(&data->node)); - data->dev = dev; - list_add_tail(&data->node, &priv->clients); + list_add_tail(&owner->client, &priv->clients); + owner->domain = domain; } spin_unlock_irqrestore(&priv->lock, flags); if (ret < 0) { - dev_err(dev, "%s: Failed to attach IOMMU with pgtable %#lx\n", - __func__, __pa(priv->pgtable)); - pm_runtime_put(data->sysmmu); - } else if (ret > 0) { - dev_dbg(dev, "%s: IOMMU with pgtable 0x%lx already attached\n", - __func__, __pa(priv->pgtable)); - } else { - dev_dbg(dev, "%s: Attached new IOMMU with pgtable 0x%lx\n", - __func__, __pa(priv->pgtable)); + dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n", + __func__, &pagetable); + return ret; } + dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n", + __func__, &pagetable, (ret == 0) ? "" : ", again"); + return ret; } static void exynos_iommu_detach_device(struct iommu_domain *domain, struct device *dev) { - struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); + struct exynos_iommu_owner *owner; struct exynos_iommu_domain *priv = domain->priv; - struct list_head *pos; + phys_addr_t pagetable = virt_to_phys(priv->pgtable); unsigned long flags; - bool found = false; spin_lock_irqsave(&priv->lock, flags); - list_for_each(pos, &priv->clients) { - if (list_entry(pos, struct sysmmu_drvdata, node) == data) { - found = true; + list_for_each_entry(owner, &priv->clients, client) { + if (owner == dev->archdata.iommu) { + if (exynos_sysmmu_disable(dev)) { + list_del_init(&owner->client); + owner->domain = NULL; + } break; } } - if (!found) - goto finish; - - if (__exynos_sysmmu_disable(data)) { - dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n", - __func__, __pa(priv->pgtable)); - list_del_init(&data->node); - - } else { - dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed", - __func__, __pa(priv->pgtable)); - } - -finish: spin_unlock_irqrestore(&priv->lock, flags); - if (found) - pm_runtime_put(data->sysmmu); + if (owner == dev->archdata.iommu) + dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", + __func__, &pagetable); + else + dev_err(dev, "%s: No IOMMU is attached\n", __func__); } -static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova, - short *pgcounter) +static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *priv, + sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter) { + if (lv1ent_section(sent)) { + WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova); + return ERR_PTR(-EADDRINUSE); + } + if (lv1ent_fault(sent)) { - unsigned long *pent; + sysmmu_pte_t *pent; + bool need_flush_flpd_cache = lv1ent_zero(sent); - pent = kzalloc(LV2TABLE_SIZE, GFP_ATOMIC); - BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1)); + pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC); + BUG_ON((unsigned int)pent & (LV2TABLE_SIZE - 1)); if (!pent) - return NULL; + return ERR_PTR(-ENOMEM); - *sent = mk_lv1ent_page(__pa(pent)); + *sent = mk_lv1ent_page(virt_to_phys(pent)); *pgcounter = NUM_LV2ENTRIES; pgtable_flush(pent, pent + NUM_LV2ENTRIES); pgtable_flush(sent, sent + 1); + + /* + * If pretched SLPD is a fault SLPD in zero_l2_table, FLPD cache + * may caches the address of zero_l2_table. This function + * replaces the zero_l2_table with new L2 page table to write + * valid mappings. + * Accessing the valid area may cause page fault since FLPD + * cache may still caches zero_l2_table for the valid area + * instead of new L2 page table that have the mapping + * information of the valid area + * Thus any replacement of zero_l2_table with other valid L2 + * page table must involve FLPD cache invalidation for System + * MMU v3.3. + * FLPD cache invalidation is performed with TLB invalidation + * by VPN without blocking. It is safe to invalidate TLB without + * blocking because the target address of TLB invalidation is + * not currently mapped. + */ + if (need_flush_flpd_cache) { + struct exynos_iommu_owner *owner; + + spin_lock(&priv->lock); + list_for_each_entry(owner, &priv->clients, client) + sysmmu_tlb_invalidate_flpdcache( + owner->dev, iova); + spin_unlock(&priv->lock); + } } return page_entry(sent, iova); } -static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt) +static int lv1set_section(struct exynos_iommu_domain *priv, + sysmmu_pte_t *sent, sysmmu_iova_t iova, + phys_addr_t paddr, short *pgcnt) { - if (lv1ent_section(sent)) + if (lv1ent_section(sent)) { + WARN(1, "Trying mapping on 1MiB@%#08x that is mapped", + iova); return -EADDRINUSE; + } if (lv1ent_page(sent)) { - if (*pgcnt != NUM_LV2ENTRIES) + if (*pgcnt != NUM_LV2ENTRIES) { + WARN(1, "Trying mapping on 1MiB@%#08x that is mapped", + iova); return -EADDRINUSE; + } - kfree(page_entry(sent, 0)); - + kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0)); *pgcnt = 0; } @@ -843,14 +916,26 @@ static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt) pgtable_flush(sent, sent + 1); + spin_lock(&priv->lock); + if (lv1ent_page_zero(sent)) { + struct exynos_iommu_owner *owner; + /* + * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD + * entry by speculative prefetch of SLPD which has no mapping. + */ + list_for_each_entry(owner, &priv->clients, client) + sysmmu_tlb_invalidate_flpdcache(owner->dev, iova); + } + spin_unlock(&priv->lock); + return 0; } -static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size, +static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size, short *pgcnt) { if (size == SPAGE_SIZE) { - if (!lv2ent_fault(pent)) + if (WARN_ON(!lv2ent_fault(pent))) return -EADDRINUSE; *pent = mk_lv2ent_spage(paddr); @@ -858,9 +943,11 @@ static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size, *pgcnt -= 1; } else { /* size == LPAGE_SIZE */ int i; + for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) { - if (!lv2ent_fault(pent)) { - memset(pent, 0, sizeof(*pent) * i); + if (WARN_ON(!lv2ent_fault(pent))) { + if (i > 0) + memset(pent - i, 0, sizeof(*pent) * i); return -EADDRINUSE; } @@ -873,11 +960,38 @@ static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size, return 0; } -static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova, +/* + * *CAUTION* to the I/O virtual memory managers that support exynos-iommu: + * + * System MMU v3.x have an advanced logic to improve address translation + * performance with caching more page table entries by a page table walk. + * However, the logic has a bug that caching fault page table entries and System + * MMU reports page fault if the cached fault entry is hit even though the fault + * entry is updated to a valid entry after the entry is cached. + * To prevent caching fault page table entries which may be updated to valid + * entries later, the virtual memory manager should care about the w/a about the + * problem. The followings describe w/a. + * + * Any two consecutive I/O virtual address regions must have a hole of 128KiB + * in maximum to prevent misbehavior of System MMU 3.x. (w/a of h/w bug) + * + * Precisely, any start address of I/O virtual region must be aligned by + * the following sizes for System MMU v3.1 and v3.2. + * System MMU v3.1: 128KiB + * System MMU v3.2: 256KiB + * + * Because System MMU v3.3 caches page table entries more aggressively, it needs + * more w/a. + * - Any two consecutive I/O virtual regions must be have a hole of larger size + * than or equal size to 128KiB. + * - Start address of an I/O virtual region must be aligned by 128KiB. + */ +static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova, phys_addr_t paddr, size_t size, int prot) { struct exynos_iommu_domain *priv = domain->priv; - unsigned long *entry; + sysmmu_pte_t *entry; + sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; unsigned long flags; int ret = -ENOMEM; @@ -888,38 +1002,52 @@ static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova, entry = section_entry(priv->pgtable, iova); if (size == SECT_SIZE) { - ret = lv1set_section(entry, paddr, + ret = lv1set_section(priv, entry, iova, paddr, &priv->lv2entcnt[lv1ent_offset(iova)]); } else { - unsigned long *pent; + sysmmu_pte_t *pent; - pent = alloc_lv2entry(entry, iova, + pent = alloc_lv2entry(priv, entry, iova, &priv->lv2entcnt[lv1ent_offset(iova)]); - if (!pent) - ret = -ENOMEM; + if (IS_ERR(pent)) + ret = PTR_ERR(pent); else ret = lv2set_page(pent, paddr, size, &priv->lv2entcnt[lv1ent_offset(iova)]); } - if (ret) { - pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n", - __func__, iova, size); - } + if (ret) + pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n", + __func__, ret, size, iova); spin_unlock_irqrestore(&priv->pgtablelock, flags); return ret; } +static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *priv, + sysmmu_iova_t iova, size_t size) +{ + struct exynos_iommu_owner *owner; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + list_for_each_entry(owner, &priv->clients, client) + sysmmu_tlb_invalidate_entry(owner->dev, iova, size); + + spin_unlock_irqrestore(&priv->lock, flags); +} + static size_t exynos_iommu_unmap(struct iommu_domain *domain, - unsigned long iova, size_t size) + unsigned long l_iova, size_t size) { struct exynos_iommu_domain *priv = domain->priv; - struct sysmmu_drvdata *data; + sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; + sysmmu_pte_t *ent; + size_t err_pgsize; unsigned long flags; - unsigned long *ent; BUG_ON(priv->pgtable == NULL); @@ -928,9 +1056,12 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain, ent = section_entry(priv->pgtable, iova); if (lv1ent_section(ent)) { - BUG_ON(size < SECT_SIZE); + if (WARN_ON(size < SECT_SIZE)) { + err_pgsize = SECT_SIZE; + goto err; + } - *ent = 0; + *ent = ZERO_LV2LINK; /* w/a for h/w bug in Sysmem MMU v3.3 */ pgtable_flush(ent, ent + 1); size = SECT_SIZE; goto done; @@ -954,34 +1085,42 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain, if (lv2ent_small(ent)) { *ent = 0; size = SPAGE_SIZE; + pgtable_flush(ent, ent + 1); priv->lv2entcnt[lv1ent_offset(iova)] += 1; goto done; } /* lv1ent_large(ent) == true here */ - BUG_ON(size < LPAGE_SIZE); + if (WARN_ON(size < LPAGE_SIZE)) { + err_pgsize = LPAGE_SIZE; + goto err; + } memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE); + pgtable_flush(ent, ent + SPAGES_PER_LPAGE); size = LPAGE_SIZE; priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE; done: spin_unlock_irqrestore(&priv->pgtablelock, flags); - spin_lock_irqsave(&priv->lock, flags); - list_for_each_entry(data, &priv->clients, node) - sysmmu_tlb_invalidate_entry(data->dev, iova); - spin_unlock_irqrestore(&priv->lock, flags); - + exynos_iommu_tlb_invalidate_entry(priv, iova, size); return size; +err: + spin_unlock_irqrestore(&priv->pgtablelock, flags); + + pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n", + __func__, size, iova, err_pgsize); + + return 0; } static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { struct exynos_iommu_domain *priv = domain->priv; - unsigned long *entry; + sysmmu_pte_t *entry; unsigned long flags; phys_addr_t phys = 0; @@ -1005,6 +1144,32 @@ static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain, return phys; } +static int exynos_iommu_add_device(struct device *dev) +{ + struct iommu_group *group; + int ret; + + group = iommu_group_get(dev); + + if (!group) { + group = iommu_group_alloc(); + if (IS_ERR(group)) { + dev_err(dev, "Failed to allocate IOMMU group\n"); + return PTR_ERR(group); + } + } + + ret = iommu_group_add_device(group, dev); + iommu_group_put(group); + + return ret; +} + +static void exynos_iommu_remove_device(struct device *dev) +{ + iommu_group_remove_device(dev); +} + static struct iommu_ops exynos_iommu_ops = { .domain_init = exynos_iommu_domain_init, .domain_destroy = exynos_iommu_domain_destroy, @@ -1013,6 +1178,8 @@ static struct iommu_ops exynos_iommu_ops = { .map = exynos_iommu_map, .unmap = exynos_iommu_unmap, .iova_to_phys = exynos_iommu_iova_to_phys, + .add_device = exynos_iommu_add_device, + .remove_device = exynos_iommu_remove_device, .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE, }; @@ -1020,11 +1187,41 @@ static int __init exynos_iommu_init(void) { int ret; + lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", + LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); + if (!lv2table_kmem_cache) { + pr_err("%s: Failed to create kmem cache\n", __func__); + return -ENOMEM; + } + ret = platform_driver_register(&exynos_sysmmu_driver); + if (ret) { + pr_err("%s: Failed to register driver\n", __func__); + goto err_reg_driver; + } - if (ret == 0) - bus_set_iommu(&platform_bus_type, &exynos_iommu_ops); + zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL); + if (zero_lv2_table == NULL) { + pr_err("%s: Failed to allocate zero level2 page table\n", + __func__); + ret = -ENOMEM; + goto err_zero_lv2; + } + ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops); + if (ret) { + pr_err("%s: Failed to register exynos-iommu driver.\n", + __func__); + goto err_set_iommu; + } + + return 0; +err_set_iommu: + kmem_cache_free(lv2table_kmem_cache, zero_lv2_table); +err_zero_lv2: + platform_driver_unregister(&exynos_sysmmu_driver); +err_reg_driver: + kmem_cache_destroy(lv2table_kmem_cache); return ret; } subsys_initcall(exynos_iommu_init); diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c index cba0498eb011..b99dd88e31b9 100644 --- a/drivers/iommu/fsl_pamu.c +++ b/drivers/iommu/fsl_pamu.c @@ -592,8 +592,7 @@ found_cpu_node: /* advance to next node in cache hierarchy */ node = of_find_node_by_phandle(*prop); if (!node) { - pr_debug("Invalid node for cache hierarchy %s\n", - node->full_name); + pr_debug("Invalid node for cache hierarchy\n"); return ~(u32)0; } } diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c new file mode 100644 index 000000000000..53cde086e83b --- /dev/null +++ b/drivers/iommu/ipmmu-vmsa.c @@ -0,0 +1,1255 @@ +/* + * IPMMU VMSA + * + * Copyright (C) 2014 Renesas Electronics Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/export.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iommu.h> +#include <linux/module.h> +#include <linux/platform_data/ipmmu-vmsa.h> +#include <linux/platform_device.h> +#include <linux/sizes.h> +#include <linux/slab.h> + +#include <asm/dma-iommu.h> +#include <asm/pgalloc.h> + +struct ipmmu_vmsa_device { + struct device *dev; + void __iomem *base; + struct list_head list; + + const struct ipmmu_vmsa_platform_data *pdata; + unsigned int num_utlbs; + + struct dma_iommu_mapping *mapping; +}; + +struct ipmmu_vmsa_domain { + struct ipmmu_vmsa_device *mmu; + struct iommu_domain *io_domain; + + unsigned int context_id; + spinlock_t lock; /* Protects mappings */ + pgd_t *pgd; +}; + +struct ipmmu_vmsa_archdata { + struct ipmmu_vmsa_device *mmu; + unsigned int utlb; +}; + +static DEFINE_SPINLOCK(ipmmu_devices_lock); +static LIST_HEAD(ipmmu_devices); + +#define TLB_LOOP_TIMEOUT 100 /* 100us */ + +/* ----------------------------------------------------------------------------- + * Registers Definition + */ + +#define IM_CTX_SIZE 0x40 + +#define IMCTR 0x0000 +#define IMCTR_TRE (1 << 17) +#define IMCTR_AFE (1 << 16) +#define IMCTR_RTSEL_MASK (3 << 4) +#define IMCTR_RTSEL_SHIFT 4 +#define IMCTR_TREN (1 << 3) +#define IMCTR_INTEN (1 << 2) +#define IMCTR_FLUSH (1 << 1) +#define IMCTR_MMUEN (1 << 0) + +#define IMCAAR 0x0004 + +#define IMTTBCR 0x0008 +#define IMTTBCR_EAE (1 << 31) +#define IMTTBCR_PMB (1 << 30) +#define IMTTBCR_SH1_NON_SHAREABLE (0 << 28) +#define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28) +#define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28) +#define IMTTBCR_SH1_MASK (3 << 28) +#define IMTTBCR_ORGN1_NC (0 << 26) +#define IMTTBCR_ORGN1_WB_WA (1 << 26) +#define IMTTBCR_ORGN1_WT (2 << 26) +#define IMTTBCR_ORGN1_WB (3 << 26) +#define IMTTBCR_ORGN1_MASK (3 << 26) +#define IMTTBCR_IRGN1_NC (0 << 24) +#define IMTTBCR_IRGN1_WB_WA (1 << 24) +#define IMTTBCR_IRGN1_WT (2 << 24) +#define IMTTBCR_IRGN1_WB (3 << 24) +#define IMTTBCR_IRGN1_MASK (3 << 24) +#define IMTTBCR_TSZ1_MASK (7 << 16) +#define IMTTBCR_TSZ1_SHIFT 16 +#define IMTTBCR_SH0_NON_SHAREABLE (0 << 12) +#define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12) +#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) +#define IMTTBCR_SH0_MASK (3 << 12) +#define IMTTBCR_ORGN0_NC (0 << 10) +#define IMTTBCR_ORGN0_WB_WA (1 << 10) +#define IMTTBCR_ORGN0_WT (2 << 10) +#define IMTTBCR_ORGN0_WB (3 << 10) +#define IMTTBCR_ORGN0_MASK (3 << 10) +#define IMTTBCR_IRGN0_NC (0 << 8) +#define IMTTBCR_IRGN0_WB_WA (1 << 8) +#define IMTTBCR_IRGN0_WT (2 << 8) +#define IMTTBCR_IRGN0_WB (3 << 8) +#define IMTTBCR_IRGN0_MASK (3 << 8) +#define IMTTBCR_SL0_LVL_2 (0 << 4) +#define IMTTBCR_SL0_LVL_1 (1 << 4) +#define IMTTBCR_TSZ0_MASK (7 << 0) +#define IMTTBCR_TSZ0_SHIFT O + +#define IMBUSCR 0x000c +#define IMBUSCR_DVM (1 << 2) +#define IMBUSCR_BUSSEL_SYS (0 << 0) +#define IMBUSCR_BUSSEL_CCI (1 << 0) +#define IMBUSCR_BUSSEL_IMCAAR (2 << 0) +#define IMBUSCR_BUSSEL_CCI_IMCAAR (3 << 0) +#define IMBUSCR_BUSSEL_MASK (3 << 0) + +#define IMTTLBR0 0x0010 +#define IMTTUBR0 0x0014 +#define IMTTLBR1 0x0018 +#define IMTTUBR1 0x001c + +#define IMSTR 0x0020 +#define IMSTR_ERRLVL_MASK (3 << 12) +#define IMSTR_ERRLVL_SHIFT 12 +#define IMSTR_ERRCODE_TLB_FORMAT (1 << 8) +#define IMSTR_ERRCODE_ACCESS_PERM (4 << 8) +#define IMSTR_ERRCODE_SECURE_ACCESS (5 << 8) +#define IMSTR_ERRCODE_MASK (7 << 8) +#define IMSTR_MHIT (1 << 4) +#define IMSTR_ABORT (1 << 2) +#define IMSTR_PF (1 << 1) +#define IMSTR_TF (1 << 0) + +#define IMMAIR0 0x0028 +#define IMMAIR1 0x002c +#define IMMAIR_ATTR_MASK 0xff +#define IMMAIR_ATTR_DEVICE 0x04 +#define IMMAIR_ATTR_NC 0x44 +#define IMMAIR_ATTR_WBRWA 0xff +#define IMMAIR_ATTR_SHIFT(n) ((n) << 3) +#define IMMAIR_ATTR_IDX_NC 0 +#define IMMAIR_ATTR_IDX_WBRWA 1 +#define IMMAIR_ATTR_IDX_DEV 2 + +#define IMEAR 0x0030 + +#define IMPCTR 0x0200 +#define IMPSTR 0x0208 +#define IMPEAR 0x020c +#define IMPMBA(n) (0x0280 + ((n) * 4)) +#define IMPMBD(n) (0x02c0 + ((n) * 4)) + +#define IMUCTR(n) (0x0300 + ((n) * 16)) +#define IMUCTR_FIXADDEN (1 << 31) +#define IMUCTR_FIXADD_MASK (0xff << 16) +#define IMUCTR_FIXADD_SHIFT 16 +#define IMUCTR_TTSEL_MMU(n) ((n) << 4) +#define IMUCTR_TTSEL_PMB (8 << 4) +#define IMUCTR_TTSEL_MASK (15 << 4) +#define IMUCTR_FLUSH (1 << 1) +#define IMUCTR_MMUEN (1 << 0) + +#define IMUASID(n) (0x0308 + ((n) * 16)) +#define IMUASID_ASID8_MASK (0xff << 8) +#define IMUASID_ASID8_SHIFT 8 +#define IMUASID_ASID0_MASK (0xff << 0) +#define IMUASID_ASID0_SHIFT 0 + +/* ----------------------------------------------------------------------------- + * Page Table Bits + */ + +/* + * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory access, + * Long-descriptor format" that the NStable bit being set in a table descriptor + * will result in the NStable and NS bits of all child entries being ignored and + * considered as being set. The IPMMU seems not to comply with this, as it + * generates a secure access page fault if any of the NStable and NS bits isn't + * set when running in non-secure mode. + */ +#ifndef PMD_NSTABLE +#define PMD_NSTABLE (_AT(pmdval_t, 1) << 63) +#endif + +#define ARM_VMSA_PTE_XN (((pteval_t)3) << 53) +#define ARM_VMSA_PTE_CONT (((pteval_t)1) << 52) +#define ARM_VMSA_PTE_AF (((pteval_t)1) << 10) +#define ARM_VMSA_PTE_SH_NS (((pteval_t)0) << 8) +#define ARM_VMSA_PTE_SH_OS (((pteval_t)2) << 8) +#define ARM_VMSA_PTE_SH_IS (((pteval_t)3) << 8) +#define ARM_VMSA_PTE_SH_MASK (((pteval_t)3) << 8) +#define ARM_VMSA_PTE_NS (((pteval_t)1) << 5) +#define ARM_VMSA_PTE_PAGE (((pteval_t)3) << 0) + +/* Stage-1 PTE */ +#define ARM_VMSA_PTE_nG (((pteval_t)1) << 11) +#define ARM_VMSA_PTE_AP_UNPRIV (((pteval_t)1) << 6) +#define ARM_VMSA_PTE_AP_RDONLY (((pteval_t)2) << 6) +#define ARM_VMSA_PTE_AP_MASK (((pteval_t)3) << 6) +#define ARM_VMSA_PTE_ATTRINDX_MASK (((pteval_t)3) << 2) +#define ARM_VMSA_PTE_ATTRINDX_SHIFT 2 + +#define ARM_VMSA_PTE_ATTRS_MASK \ + (ARM_VMSA_PTE_XN | ARM_VMSA_PTE_CONT | ARM_VMSA_PTE_nG | \ + ARM_VMSA_PTE_AF | ARM_VMSA_PTE_SH_MASK | ARM_VMSA_PTE_AP_MASK | \ + ARM_VMSA_PTE_NS | ARM_VMSA_PTE_ATTRINDX_MASK) + +#define ARM_VMSA_PTE_CONT_ENTRIES 16 +#define ARM_VMSA_PTE_CONT_SIZE (PAGE_SIZE * ARM_VMSA_PTE_CONT_ENTRIES) + +#define IPMMU_PTRS_PER_PTE 512 +#define IPMMU_PTRS_PER_PMD 512 +#define IPMMU_PTRS_PER_PGD 4 + +/* ----------------------------------------------------------------------------- + * Read/Write Access + */ + +static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) +{ + return ioread32(mmu->base + offset); +} + +static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, + u32 data) +{ + iowrite32(data, mmu->base + offset); +} + +static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg) +{ + return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg); +} + +static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg, + u32 data) +{ + ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data); +} + +/* ----------------------------------------------------------------------------- + * TLB and microTLB Management + */ + +/* Wait for any pending TLB invalidations to complete */ +static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) +{ + unsigned int count = 0; + + while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) { + cpu_relax(); + if (++count == TLB_LOOP_TIMEOUT) { + dev_err_ratelimited(domain->mmu->dev, + "TLB sync timed out -- MMU may be deadlocked\n"); + return; + } + udelay(1); + } +} + +static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) +{ + u32 reg; + + reg = ipmmu_ctx_read(domain, IMCTR); + reg |= IMCTR_FLUSH; + ipmmu_ctx_write(domain, IMCTR, reg); + + ipmmu_tlb_sync(domain); +} + +/* + * Enable MMU translation for the microTLB. + */ +static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain, + unsigned int utlb) +{ + struct ipmmu_vmsa_device *mmu = domain->mmu; + + /* + * TODO: Reference-count the microTLB as several bus masters can be + * connected to the same microTLB. + */ + + /* TODO: What should we set the ASID to ? */ + ipmmu_write(mmu, IMUASID(utlb), 0); + /* TODO: Do we need to flush the microTLB ? */ + ipmmu_write(mmu, IMUCTR(utlb), + IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH | + IMUCTR_MMUEN); +} + +/* + * Disable MMU translation for the microTLB. + */ +static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain, + unsigned int utlb) +{ + struct ipmmu_vmsa_device *mmu = domain->mmu; + + ipmmu_write(mmu, IMUCTR(utlb), 0); +} + +static void ipmmu_flush_pgtable(struct ipmmu_vmsa_device *mmu, void *addr, + size_t size) +{ + unsigned long offset = (unsigned long)addr & ~PAGE_MASK; + + /* + * TODO: Add support for coherent walk through CCI with DVM and remove + * cache handling. + */ + dma_map_page(mmu->dev, virt_to_page(addr), offset, size, DMA_TO_DEVICE); +} + +/* ----------------------------------------------------------------------------- + * Domain/Context Management + */ + +static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) +{ + phys_addr_t ttbr; + u32 reg; + + /* + * TODO: When adding support for multiple contexts, find an unused + * context. + */ + domain->context_id = 0; + + /* TTBR0 */ + ipmmu_flush_pgtable(domain->mmu, domain->pgd, + IPMMU_PTRS_PER_PGD * sizeof(*domain->pgd)); + ttbr = __pa(domain->pgd); + ipmmu_ctx_write(domain, IMTTLBR0, ttbr); + ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32); + + /* + * TTBCR + * We use long descriptors with inner-shareable WBWA tables and allocate + * the whole 32-bit VA space to TTBR0. + */ + ipmmu_ctx_write(domain, IMTTBCR, IMTTBCR_EAE | + IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | + IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1); + + /* + * MAIR0 + * We need three attributes only, non-cacheable, write-back read/write + * allocate and device memory. + */ + reg = (IMMAIR_ATTR_NC << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_NC)) + | (IMMAIR_ATTR_WBRWA << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_WBRWA)) + | (IMMAIR_ATTR_DEVICE << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_DEV)); + ipmmu_ctx_write(domain, IMMAIR0, reg); + + /* IMBUSCR */ + ipmmu_ctx_write(domain, IMBUSCR, + ipmmu_ctx_read(domain, IMBUSCR) & + ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK)); + + /* + * IMSTR + * Clear all interrupt flags. + */ + ipmmu_ctx_write(domain, IMSTR, ipmmu_ctx_read(domain, IMSTR)); + + /* + * IMCTR + * Enable the MMU and interrupt generation. The long-descriptor + * translation table format doesn't use TEX remapping. Don't enable AF + * software management as we have no use for it. Flush the TLB as + * required when modifying the context registers. + */ + ipmmu_ctx_write(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN); + + return 0; +} + +static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) +{ + /* + * Disable the context. Flush the TLB as required when modifying the + * context registers. + * + * TODO: Is TLB flush really needed ? + */ + ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH); + ipmmu_tlb_sync(domain); +} + +/* ----------------------------------------------------------------------------- + * Fault Handling + */ + +static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) +{ + const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF; + struct ipmmu_vmsa_device *mmu = domain->mmu; + u32 status; + u32 iova; + + status = ipmmu_ctx_read(domain, IMSTR); + if (!(status & err_mask)) + return IRQ_NONE; + + iova = ipmmu_ctx_read(domain, IMEAR); + + /* + * Clear the error status flags. Unlike traditional interrupt flag + * registers that must be cleared by writing 1, this status register + * seems to require 0. The error address register must be read before, + * otherwise its value will be 0. + */ + ipmmu_ctx_write(domain, IMSTR, 0); + + /* Log fatal errors. */ + if (status & IMSTR_MHIT) + dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n", + iova); + if (status & IMSTR_ABORT) + dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n", + iova); + + if (!(status & (IMSTR_PF | IMSTR_TF))) + return IRQ_NONE; + + /* + * Try to handle page faults and translation faults. + * + * TODO: We need to look up the faulty device based on the I/O VA. Use + * the IOMMU device for now. + */ + if (!report_iommu_fault(domain->io_domain, mmu->dev, iova, 0)) + return IRQ_HANDLED; + + dev_err_ratelimited(mmu->dev, + "Unhandled fault: status 0x%08x iova 0x%08x\n", + status, iova); + + return IRQ_HANDLED; +} + +static irqreturn_t ipmmu_irq(int irq, void *dev) +{ + struct ipmmu_vmsa_device *mmu = dev; + struct iommu_domain *io_domain; + struct ipmmu_vmsa_domain *domain; + + if (!mmu->mapping) + return IRQ_NONE; + + io_domain = mmu->mapping->domain; + domain = io_domain->priv; + + return ipmmu_domain_irq(domain); +} + +/* ----------------------------------------------------------------------------- + * Page Table Management + */ + +#define pud_pgtable(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK)) + +static void ipmmu_free_ptes(pmd_t *pmd) +{ + pgtable_t table = pmd_pgtable(*pmd); + __free_page(table); +} + +static void ipmmu_free_pmds(pud_t *pud) +{ + pmd_t *pmd = pmd_offset(pud, 0); + pgtable_t table; + unsigned int i; + + for (i = 0; i < IPMMU_PTRS_PER_PMD; ++i) { + if (!pmd_table(*pmd)) + continue; + + ipmmu_free_ptes(pmd); + pmd++; + } + + table = pud_pgtable(*pud); + __free_page(table); +} + +static void ipmmu_free_pgtables(struct ipmmu_vmsa_domain *domain) +{ + pgd_t *pgd, *pgd_base = domain->pgd; + unsigned int i; + + /* + * Recursively free the page tables for this domain. We don't care about + * speculative TLB filling, because the TLB will be nuked next time this + * context bank is re-allocated and no devices currently map to these + * tables. + */ + pgd = pgd_base; + for (i = 0; i < IPMMU_PTRS_PER_PGD; ++i) { + if (pgd_none(*pgd)) + continue; + ipmmu_free_pmds((pud_t *)pgd); + pgd++; + } + + kfree(pgd_base); +} + +/* + * We can't use the (pgd|pud|pmd|pte)_populate or the set_(pgd|pud|pmd|pte) + * functions as they would flush the CPU TLB. + */ + +static pte_t *ipmmu_alloc_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd, + unsigned long iova) +{ + pte_t *pte; + + if (!pmd_none(*pmd)) + return pte_offset_kernel(pmd, iova); + + pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); + if (!pte) + return NULL; + + ipmmu_flush_pgtable(mmu, pte, PAGE_SIZE); + *pmd = __pmd(__pa(pte) | PMD_NSTABLE | PMD_TYPE_TABLE); + ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd)); + + return pte + pte_index(iova); +} + +static pmd_t *ipmmu_alloc_pmd(struct ipmmu_vmsa_device *mmu, pgd_t *pgd, + unsigned long iova) +{ + pud_t *pud = (pud_t *)pgd; + pmd_t *pmd; + + if (!pud_none(*pud)) + return pmd_offset(pud, iova); + + pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); + if (!pmd) + return NULL; + + ipmmu_flush_pgtable(mmu, pmd, PAGE_SIZE); + *pud = __pud(__pa(pmd) | PMD_NSTABLE | PMD_TYPE_TABLE); + ipmmu_flush_pgtable(mmu, pud, sizeof(*pud)); + + return pmd + pmd_index(iova); +} + +static u64 ipmmu_page_prot(unsigned int prot, u64 type) +{ + u64 pgprot = ARM_VMSA_PTE_XN | ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF + | ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV + | ARM_VMSA_PTE_NS | type; + + if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) + pgprot |= ARM_VMSA_PTE_AP_RDONLY; + + if (prot & IOMMU_CACHE) + pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT; + + if (prot & IOMMU_EXEC) + pgprot &= ~ARM_VMSA_PTE_XN; + else if (!(prot & (IOMMU_READ | IOMMU_WRITE))) + /* If no access create a faulting entry to avoid TLB fills. */ + pgprot &= ~ARM_VMSA_PTE_PAGE; + + return pgprot; +} + +static int ipmmu_alloc_init_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd, + unsigned long iova, unsigned long pfn, + size_t size, int prot) +{ + pteval_t pteval = ipmmu_page_prot(prot, ARM_VMSA_PTE_PAGE); + unsigned int num_ptes = 1; + pte_t *pte, *start; + unsigned int i; + + pte = ipmmu_alloc_pte(mmu, pmd, iova); + if (!pte) + return -ENOMEM; + + start = pte; + + /* + * Install the page table entries. We can be called both for a single + * page or for a block of 16 physically contiguous pages. In the latter + * case set the PTE contiguous hint. + */ + if (size == SZ_64K) { + pteval |= ARM_VMSA_PTE_CONT; + num_ptes = ARM_VMSA_PTE_CONT_ENTRIES; + } + + for (i = num_ptes; i; --i) + *pte++ = pfn_pte(pfn++, __pgprot(pteval)); + + ipmmu_flush_pgtable(mmu, start, sizeof(*pte) * num_ptes); + + return 0; +} + +static int ipmmu_alloc_init_pmd(struct ipmmu_vmsa_device *mmu, pmd_t *pmd, + unsigned long iova, unsigned long pfn, + int prot) +{ + pmdval_t pmdval = ipmmu_page_prot(prot, PMD_TYPE_SECT); + + *pmd = pfn_pmd(pfn, __pgprot(pmdval)); + ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd)); + + return 0; +} + +static int ipmmu_create_mapping(struct ipmmu_vmsa_domain *domain, + unsigned long iova, phys_addr_t paddr, + size_t size, int prot) +{ + struct ipmmu_vmsa_device *mmu = domain->mmu; + pgd_t *pgd = domain->pgd; + unsigned long flags; + unsigned long pfn; + pmd_t *pmd; + int ret; + + if (!pgd) + return -EINVAL; + + if (size & ~PAGE_MASK) + return -EINVAL; + + if (paddr & ~((1ULL << 40) - 1)) + return -ERANGE; + + pfn = __phys_to_pfn(paddr); + pgd += pgd_index(iova); + + /* Update the page tables. */ + spin_lock_irqsave(&domain->lock, flags); + + pmd = ipmmu_alloc_pmd(mmu, pgd, iova); + if (!pmd) { + ret = -ENOMEM; + goto done; + } + + switch (size) { + case SZ_2M: + ret = ipmmu_alloc_init_pmd(mmu, pmd, iova, pfn, prot); + break; + case SZ_64K: + case SZ_4K: + ret = ipmmu_alloc_init_pte(mmu, pmd, iova, pfn, size, prot); + break; + default: + ret = -EINVAL; + break; + } + +done: + spin_unlock_irqrestore(&domain->lock, flags); + + if (!ret) + ipmmu_tlb_invalidate(domain); + + return ret; +} + +static void ipmmu_clear_pud(struct ipmmu_vmsa_device *mmu, pud_t *pud) +{ + /* Free the page table. */ + pgtable_t table = pud_pgtable(*pud); + __free_page(table); + + /* Clear the PUD. */ + *pud = __pud(0); + ipmmu_flush_pgtable(mmu, pud, sizeof(*pud)); +} + +static void ipmmu_clear_pmd(struct ipmmu_vmsa_device *mmu, pud_t *pud, + pmd_t *pmd) +{ + unsigned int i; + + /* Free the page table. */ + if (pmd_table(*pmd)) { + pgtable_t table = pmd_pgtable(*pmd); + __free_page(table); + } + + /* Clear the PMD. */ + *pmd = __pmd(0); + ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd)); + + /* Check whether the PUD is still needed. */ + pmd = pmd_offset(pud, 0); + for (i = 0; i < IPMMU_PTRS_PER_PMD; ++i) { + if (!pmd_none(pmd[i])) + return; + } + + /* Clear the parent PUD. */ + ipmmu_clear_pud(mmu, pud); +} + +static void ipmmu_clear_pte(struct ipmmu_vmsa_device *mmu, pud_t *pud, + pmd_t *pmd, pte_t *pte, unsigned int num_ptes) +{ + unsigned int i; + + /* Clear the PTE. */ + for (i = num_ptes; i; --i) + pte[i-1] = __pte(0); + + ipmmu_flush_pgtable(mmu, pte, sizeof(*pte) * num_ptes); + + /* Check whether the PMD is still needed. */ + pte = pte_offset_kernel(pmd, 0); + for (i = 0; i < IPMMU_PTRS_PER_PTE; ++i) { + if (!pte_none(pte[i])) + return; + } + + /* Clear the parent PMD. */ + ipmmu_clear_pmd(mmu, pud, pmd); +} + +static int ipmmu_split_pmd(struct ipmmu_vmsa_device *mmu, pmd_t *pmd) +{ + pte_t *pte, *start; + pteval_t pteval; + unsigned long pfn; + unsigned int i; + + pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); + if (!pte) + return -ENOMEM; + + /* Copy the PMD attributes. */ + pteval = (pmd_val(*pmd) & ARM_VMSA_PTE_ATTRS_MASK) + | ARM_VMSA_PTE_CONT | ARM_VMSA_PTE_PAGE; + + pfn = pmd_pfn(*pmd); + start = pte; + + for (i = IPMMU_PTRS_PER_PTE; i; --i) + *pte++ = pfn_pte(pfn++, __pgprot(pteval)); + + ipmmu_flush_pgtable(mmu, start, PAGE_SIZE); + *pmd = __pmd(__pa(start) | PMD_NSTABLE | PMD_TYPE_TABLE); + ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd)); + + return 0; +} + +static void ipmmu_split_pte(struct ipmmu_vmsa_device *mmu, pte_t *pte) +{ + unsigned int i; + + for (i = ARM_VMSA_PTE_CONT_ENTRIES; i; --i) + pte[i-1] = __pte(pte_val(*pte) & ~ARM_VMSA_PTE_CONT); + + ipmmu_flush_pgtable(mmu, pte, sizeof(*pte) * ARM_VMSA_PTE_CONT_ENTRIES); +} + +static int ipmmu_clear_mapping(struct ipmmu_vmsa_domain *domain, + unsigned long iova, size_t size) +{ + struct ipmmu_vmsa_device *mmu = domain->mmu; + unsigned long flags; + pgd_t *pgd = domain->pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + int ret = 0; + + if (!pgd) + return -EINVAL; + + if (size & ~PAGE_MASK) + return -EINVAL; + + pgd += pgd_index(iova); + pud = (pud_t *)pgd; + + spin_lock_irqsave(&domain->lock, flags); + + /* If there's no PUD or PMD we're done. */ + if (pud_none(*pud)) + goto done; + + pmd = pmd_offset(pud, iova); + if (pmd_none(*pmd)) + goto done; + + /* + * When freeing a 2MB block just clear the PMD. In the unlikely case the + * block is mapped as individual pages this will free the corresponding + * PTE page table. + */ + if (size == SZ_2M) { + ipmmu_clear_pmd(mmu, pud, pmd); + goto done; + } + + /* + * If the PMD has been mapped as a section remap it as pages to allow + * freeing individual pages. + */ + if (pmd_sect(*pmd)) + ipmmu_split_pmd(mmu, pmd); + + pte = pte_offset_kernel(pmd, iova); + + /* + * When freeing a 64kB block just clear the PTE entries. We don't have + * to care about the contiguous hint of the surrounding entries. + */ + if (size == SZ_64K) { + ipmmu_clear_pte(mmu, pud, pmd, pte, ARM_VMSA_PTE_CONT_ENTRIES); + goto done; + } + + /* + * If the PTE has been mapped with the contiguous hint set remap it and + * its surrounding PTEs to allow unmapping a single page. + */ + if (pte_val(*pte) & ARM_VMSA_PTE_CONT) + ipmmu_split_pte(mmu, pte); + + /* Clear the PTE. */ + ipmmu_clear_pte(mmu, pud, pmd, pte, 1); + +done: + spin_unlock_irqrestore(&domain->lock, flags); + + if (ret) + ipmmu_tlb_invalidate(domain); + + return 0; +} + +/* ----------------------------------------------------------------------------- + * IOMMU Operations + */ + +static int ipmmu_domain_init(struct iommu_domain *io_domain) +{ + struct ipmmu_vmsa_domain *domain; + + domain = kzalloc(sizeof(*domain), GFP_KERNEL); + if (!domain) + return -ENOMEM; + + spin_lock_init(&domain->lock); + + domain->pgd = kzalloc(IPMMU_PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); + if (!domain->pgd) { + kfree(domain); + return -ENOMEM; + } + + io_domain->priv = domain; + domain->io_domain = io_domain; + + return 0; +} + +static void ipmmu_domain_destroy(struct iommu_domain *io_domain) +{ + struct ipmmu_vmsa_domain *domain = io_domain->priv; + + /* + * Free the domain resources. We assume that all devices have already + * been detached. + */ + ipmmu_domain_destroy_context(domain); + ipmmu_free_pgtables(domain); + kfree(domain); +} + +static int ipmmu_attach_device(struct iommu_domain *io_domain, + struct device *dev) +{ + struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; + struct ipmmu_vmsa_device *mmu = archdata->mmu; + struct ipmmu_vmsa_domain *domain = io_domain->priv; + unsigned long flags; + int ret = 0; + + if (!mmu) { + dev_err(dev, "Cannot attach to IPMMU\n"); + return -ENXIO; + } + + spin_lock_irqsave(&domain->lock, flags); + + if (!domain->mmu) { + /* The domain hasn't been used yet, initialize it. */ + domain->mmu = mmu; + ret = ipmmu_domain_init_context(domain); + } else if (domain->mmu != mmu) { + /* + * Something is wrong, we can't attach two devices using + * different IOMMUs to the same domain. + */ + dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n", + dev_name(mmu->dev), dev_name(domain->mmu->dev)); + ret = -EINVAL; + } + + spin_unlock_irqrestore(&domain->lock, flags); + + if (ret < 0) + return ret; + + ipmmu_utlb_enable(domain, archdata->utlb); + + return 0; +} + +static void ipmmu_detach_device(struct iommu_domain *io_domain, + struct device *dev) +{ + struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; + struct ipmmu_vmsa_domain *domain = io_domain->priv; + + ipmmu_utlb_disable(domain, archdata->utlb); + + /* + * TODO: Optimize by disabling the context when no device is attached. + */ +} + +static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) +{ + struct ipmmu_vmsa_domain *domain = io_domain->priv; + + if (!domain) + return -ENODEV; + + return ipmmu_create_mapping(domain, iova, paddr, size, prot); +} + +static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, + size_t size) +{ + struct ipmmu_vmsa_domain *domain = io_domain->priv; + int ret; + + ret = ipmmu_clear_mapping(domain, iova, size); + return ret ? 0 : size; +} + +static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, + dma_addr_t iova) +{ + struct ipmmu_vmsa_domain *domain = io_domain->priv; + pgd_t pgd; + pud_t pud; + pmd_t pmd; + pte_t pte; + + /* TODO: Is locking needed ? */ + + if (!domain->pgd) + return 0; + + pgd = *(domain->pgd + pgd_index(iova)); + if (pgd_none(pgd)) + return 0; + + pud = *pud_offset(&pgd, iova); + if (pud_none(pud)) + return 0; + + pmd = *pmd_offset(&pud, iova); + if (pmd_none(pmd)) + return 0; + + if (pmd_sect(pmd)) + return __pfn_to_phys(pmd_pfn(pmd)) | (iova & ~PMD_MASK); + + pte = *(pmd_page_vaddr(pmd) + pte_index(iova)); + if (pte_none(pte)) + return 0; + + return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK); +} + +static int ipmmu_find_utlb(struct ipmmu_vmsa_device *mmu, struct device *dev) +{ + const struct ipmmu_vmsa_master *master = mmu->pdata->masters; + const char *devname = dev_name(dev); + unsigned int i; + + for (i = 0; i < mmu->pdata->num_masters; ++i, ++master) { + if (strcmp(master->name, devname) == 0) + return master->utlb; + } + + return -1; +} + +static int ipmmu_add_device(struct device *dev) +{ + struct ipmmu_vmsa_archdata *archdata; + struct ipmmu_vmsa_device *mmu; + struct iommu_group *group; + int utlb = -1; + int ret; + + if (dev->archdata.iommu) { + dev_warn(dev, "IOMMU driver already assigned to device %s\n", + dev_name(dev)); + return -EINVAL; + } + + /* Find the master corresponding to the device. */ + spin_lock(&ipmmu_devices_lock); + + list_for_each_entry(mmu, &ipmmu_devices, list) { + utlb = ipmmu_find_utlb(mmu, dev); + if (utlb >= 0) { + /* + * TODO Take a reference to the MMU to protect + * against device removal. + */ + break; + } + } + + spin_unlock(&ipmmu_devices_lock); + + if (utlb < 0) + return -ENODEV; + + if (utlb >= mmu->num_utlbs) + return -EINVAL; + + /* Create a device group and add the device to it. */ + group = iommu_group_alloc(); + if (IS_ERR(group)) { + dev_err(dev, "Failed to allocate IOMMU group\n"); + return PTR_ERR(group); + } + + ret = iommu_group_add_device(group, dev); + iommu_group_put(group); + + if (ret < 0) { + dev_err(dev, "Failed to add device to IPMMU group\n"); + return ret; + } + + archdata = kzalloc(sizeof(*archdata), GFP_KERNEL); + if (!archdata) { + ret = -ENOMEM; + goto error; + } + + archdata->mmu = mmu; + archdata->utlb = utlb; + dev->archdata.iommu = archdata; + + /* + * Create the ARM mapping, used by the ARM DMA mapping core to allocate + * VAs. This will allocate a corresponding IOMMU domain. + * + * TODO: + * - Create one mapping per context (TLB). + * - Make the mapping size configurable ? We currently use a 2GB mapping + * at a 1GB offset to ensure that NULL VAs will fault. + */ + if (!mmu->mapping) { + struct dma_iommu_mapping *mapping; + + mapping = arm_iommu_create_mapping(&platform_bus_type, + SZ_1G, SZ_2G); + if (IS_ERR(mapping)) { + dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n"); + return PTR_ERR(mapping); + } + + mmu->mapping = mapping; + } + + /* Attach the ARM VA mapping to the device. */ + ret = arm_iommu_attach_device(dev, mmu->mapping); + if (ret < 0) { + dev_err(dev, "Failed to attach device to VA mapping\n"); + goto error; + } + + return 0; + +error: + kfree(dev->archdata.iommu); + dev->archdata.iommu = NULL; + iommu_group_remove_device(dev); + return ret; +} + +static void ipmmu_remove_device(struct device *dev) +{ + arm_iommu_detach_device(dev); + iommu_group_remove_device(dev); + kfree(dev->archdata.iommu); + dev->archdata.iommu = NULL; +} + +static struct iommu_ops ipmmu_ops = { + .domain_init = ipmmu_domain_init, + .domain_destroy = ipmmu_domain_destroy, + .attach_dev = ipmmu_attach_device, + .detach_dev = ipmmu_detach_device, + .map = ipmmu_map, + .unmap = ipmmu_unmap, + .iova_to_phys = ipmmu_iova_to_phys, + .add_device = ipmmu_add_device, + .remove_device = ipmmu_remove_device, + .pgsize_bitmap = SZ_2M | SZ_64K | SZ_4K, +}; + +/* ----------------------------------------------------------------------------- + * Probe/remove and init + */ + +static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu) +{ + unsigned int i; + + /* Disable all contexts. */ + for (i = 0; i < 4; ++i) + ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0); +} + +static int ipmmu_probe(struct platform_device *pdev) +{ + struct ipmmu_vmsa_device *mmu; + struct resource *res; + int irq; + int ret; + + if (!pdev->dev.platform_data) { + dev_err(&pdev->dev, "missing platform data\n"); + return -EINVAL; + } + + mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); + if (!mmu) { + dev_err(&pdev->dev, "cannot allocate device data\n"); + return -ENOMEM; + } + + mmu->dev = &pdev->dev; + mmu->pdata = pdev->dev.platform_data; + mmu->num_utlbs = 32; + + /* Map I/O memory and request IRQ. */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mmu->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(mmu->base)) + return PTR_ERR(mmu->base); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "no IRQ found\n"); + return irq; + } + + ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0, + dev_name(&pdev->dev), mmu); + if (ret < 0) { + dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); + return irq; + } + + ipmmu_device_reset(mmu); + + /* + * We can't create the ARM mapping here as it requires the bus to have + * an IOMMU, which only happens when bus_set_iommu() is called in + * ipmmu_init() after the probe function returns. + */ + + spin_lock(&ipmmu_devices_lock); + list_add(&mmu->list, &ipmmu_devices); + spin_unlock(&ipmmu_devices_lock); + + platform_set_drvdata(pdev, mmu); + + return 0; +} + +static int ipmmu_remove(struct platform_device *pdev) +{ + struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev); + + spin_lock(&ipmmu_devices_lock); + list_del(&mmu->list); + spin_unlock(&ipmmu_devices_lock); + + arm_iommu_release_mapping(mmu->mapping); + + ipmmu_device_reset(mmu); + + return 0; +} + +static struct platform_driver ipmmu_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "ipmmu-vmsa", + }, + .probe = ipmmu_probe, + .remove = ipmmu_remove, +}; + +static int __init ipmmu_init(void) +{ + int ret; + + ret = platform_driver_register(&ipmmu_driver); + if (ret < 0) + return ret; + + if (!iommu_present(&platform_bus_type)) + bus_set_iommu(&platform_bus_type, &ipmmu_ops); + + return 0; +} + +static void __exit ipmmu_exit(void) +{ + return platform_driver_unregister(&ipmmu_driver); +} + +subsys_initcall(ipmmu_init); +module_exit(ipmmu_exit); + +MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU"); +MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c index 08ba4972da9d..61def7cb5263 100644 --- a/drivers/iommu/msm_iommu_dev.c +++ b/drivers/iommu/msm_iommu_dev.c @@ -127,13 +127,12 @@ static void msm_iommu_reset(void __iomem *base, int ncb) static int msm_iommu_probe(struct platform_device *pdev) { - struct resource *r, *r2; + struct resource *r; struct clk *iommu_clk; struct clk *iommu_pclk; struct msm_iommu_drvdata *drvdata; struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data; void __iomem *regs_base; - resource_size_t len; int ret, irq, par; if (pdev->id == -1) { @@ -178,35 +177,16 @@ static int msm_iommu_probe(struct platform_device *pdev) iommu_clk = NULL; r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "physbase"); - - if (!r) { - ret = -ENODEV; - goto fail_clk; - } - - len = resource_size(r); - - r2 = request_mem_region(r->start, len, r->name); - if (!r2) { - pr_err("Could not request memory region: start=%p, len=%d\n", - (void *) r->start, len); - ret = -EBUSY; + regs_base = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(regs_base)) { + ret = PTR_ERR(regs_base); goto fail_clk; } - regs_base = ioremap(r2->start, len); - - if (!regs_base) { - pr_err("Could not ioremap: start=%p, len=%d\n", - (void *) r2->start, len); - ret = -EBUSY; - goto fail_mem; - } - irq = platform_get_irq_byname(pdev, "secure_irq"); if (irq < 0) { ret = -ENODEV; - goto fail_io; + goto fail_clk; } msm_iommu_reset(regs_base, iommu_dev->ncb); @@ -222,14 +202,14 @@ static int msm_iommu_probe(struct platform_device *pdev) if (!par) { pr_err("%s: Invalid PAR value detected\n", iommu_dev->name); ret = -ENODEV; - goto fail_io; + goto fail_clk; } ret = request_irq(irq, msm_iommu_fault_handler, 0, "msm_iommu_secure_irpt_handler", drvdata); if (ret) { pr_err("Request IRQ %d failed with ret=%d\n", irq, ret); - goto fail_io; + goto fail_clk; } @@ -250,10 +230,6 @@ static int msm_iommu_probe(struct platform_device *pdev) clk_disable(iommu_pclk); return 0; -fail_io: - iounmap(regs_base); -fail_mem: - release_mem_region(r->start, len); fail_clk: if (iommu_clk) { clk_disable(iommu_clk); diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 7fcbfc498fa9..895af06a667f 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -34,6 +34,9 @@ #include "omap-iopgtable.h" #include "omap-iommu.h" +#define to_iommu(dev) \ + ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))) + #define for_each_iotlb_cr(obj, n, __i, cr) \ for (__i = 0; \ (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \ @@ -391,6 +394,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da) __func__, start, da, bytes); iotlb_load_cr(obj, &cr); iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); + break; } } pm_runtime_put_sync(obj->dev); @@ -1037,19 +1041,18 @@ static void iopte_cachep_ctor(void *iopte) clean_dcache_area(iopte, IOPTE_TABLE_SIZE); } -static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, - u32 flags) +static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz) { memset(e, 0, sizeof(*e)); e->da = da; e->pa = pa; - e->valid = 1; + e->valid = MMU_CAM_V; /* FIXME: add OMAP1 support */ - e->pgsz = flags & MMU_CAM_PGSZ_MASK; - e->endian = flags & MMU_RAM_ENDIAN_MASK; - e->elsz = flags & MMU_RAM_ELSZ_MASK; - e->mixed = flags & MMU_RAM_MIXED_MASK; + e->pgsz = pgsz; + e->endian = MMU_RAM_ENDIAN_LITTLE; + e->elsz = MMU_RAM_ELSZ_8; + e->mixed = 0; return iopgsz_to_bytes(e->pgsz); } @@ -1062,9 +1065,8 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, struct device *dev = oiommu->dev; struct iotlb_entry e; int omap_pgsz; - u32 ret, flags; + u32 ret; - /* we only support mapping a single iommu page for now */ omap_pgsz = bytes_to_iopgsz(bytes); if (omap_pgsz < 0) { dev_err(dev, "invalid size to map: %d\n", bytes); @@ -1073,9 +1075,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes); - flags = omap_pgsz | prot; - - iotlb_init_entry(&e, da, pa, flags); + iotlb_init_entry(&e, da, pa, omap_pgsz); ret = omap_iopgtable_store_entry(oiommu, &e); if (ret) @@ -1248,12 +1248,6 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, return ret; } -static int omap_iommu_domain_has_cap(struct iommu_domain *domain, - unsigned long cap) -{ - return 0; -} - static int omap_iommu_add_device(struct device *dev) { struct omap_iommu_arch_data *arch_data; @@ -1305,7 +1299,6 @@ static struct iommu_ops omap_iommu_ops = { .map = omap_iommu_map, .unmap = omap_iommu_unmap, .iova_to_phys = omap_iommu_iova_to_phys, - .domain_has_cap = omap_iommu_domain_has_cap, .add_device = omap_iommu_add_device, .remove_device = omap_iommu_remove_device, .pgsize_bitmap = OMAP_IOMMU_PGSIZES, diff --git a/drivers/iommu/omap-iopgtable.h b/drivers/iommu/omap-iopgtable.h index b6f9a51746ca..f891683e3f05 100644 --- a/drivers/iommu/omap-iopgtable.h +++ b/drivers/iommu/omap-iopgtable.h @@ -93,6 +93,3 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask) /* to find an entry in the second-level page table. */ #define iopte_index(da) (((da) >> IOPTE_SHIFT) & (PTRS_PER_IOPTE - 1)) #define iopte_offset(iopgd, da) (iopgd_page_vaddr(iopgd) + iopte_index(da)) - -#define to_iommu(dev) \ - (platform_get_drvdata(to_platform_device(dev))) diff --git a/drivers/iommu/shmobile-ipmmu.c b/drivers/iommu/shmobile-ipmmu.c index e3bc2e19b6dd..bd97adecb1fd 100644 --- a/drivers/iommu/shmobile-ipmmu.c +++ b/drivers/iommu/shmobile-ipmmu.c @@ -94,11 +94,6 @@ static int ipmmu_probe(struct platform_device *pdev) struct resource *res; struct shmobile_ipmmu_platform_data *pdata = pdev->dev.platform_data; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "cannot get platform resources\n"); - return -ENOENT; - } ipmmu = devm_kzalloc(&pdev->dev, sizeof(*ipmmu), GFP_KERNEL); if (!ipmmu) { dev_err(&pdev->dev, "cannot allocate device data\n"); @@ -106,19 +101,18 @@ static int ipmmu_probe(struct platform_device *pdev) } spin_lock_init(&ipmmu->flush_lock); ipmmu->dev = &pdev->dev; - ipmmu->ipmmu_base = devm_ioremap_nocache(&pdev->dev, res->start, - resource_size(res)); - if (!ipmmu->ipmmu_base) { - dev_err(&pdev->dev, "ioremap_nocache failed\n"); - return -ENOMEM; - } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ipmmu->ipmmu_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ipmmu->ipmmu_base)) + return PTR_ERR(ipmmu->ipmmu_base); + ipmmu->dev_names = pdata->dev_names; ipmmu->num_dev_names = pdata->num_dev_names; platform_set_drvdata(pdev, ipmmu); ipmmu_reg_write(ipmmu, IMCTR1, 0x0); /* disable TLB */ ipmmu_reg_write(ipmmu, IMCTR2, 0x0); /* disable PMB */ - ipmmu_iommu_init(ipmmu); - return 0; + return ipmmu_iommu_init(ipmmu); } static struct platform_driver ipmmu_driver = { |