From fd18de50b9e7965f93d231e7390436fb8900c0e6 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sun, 10 May 2009 23:57:41 +0100 Subject: intel-iommu: PAE memory corruption fix PAGE_MASK is 0xFFFFF000 on i386 -- even with PAE. So it's not sufficient to ensure that you use phys_addr_t or uint64_t everywhere you handle physical addresses -- you also have to avoid using the construct 'addr & PAGE_MASK', because that will strip the high 32 bits of the address. This patch avoids that problem by using PHYSICAL_PAGE_MASK instead of PAGE_MASK where appropriate. It leaves '& PAGE_MASK' in a few instances that don't matter -- where it's being used on the virtual bus addresses we're dishing out, which are 32-bit anyway. Since PHYSICAL_PAGE_MASK is not present on other architectures, we have to define it (to PAGE_MASK) if it's not already defined. Maybe it would be better just to fix PAGE_MASK for i386/PAE? Signed-off-by: David Woodhouse Signed-off-by: Linus Torvalds --- drivers/pci/intel-iommu.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'drivers/pci') diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 001b328adf80..a563fbe559d0 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -59,6 +59,10 @@ #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) +#ifndef PHYSICAL_PAGE_MASK +#define PHYSICAL_PAGE_MASK PAGE_MASK +#endif + /* global iommu list, set NULL for ignored DMAR units */ static struct intel_iommu **g_iommus; @@ -1216,7 +1220,7 @@ static void dmar_init_reserved_ranges(void) if (!r->flags || !(r->flags & IORESOURCE_MEM)) continue; addr = r->start; - addr &= PAGE_MASK; + addr &= PHYSICAL_PAGE_MASK; size = r->end - addr; size = PAGE_ALIGN(size); iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr), @@ -2173,7 +2177,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, * is not a big problem */ ret = domain_page_mapping(domain, start_paddr, - ((u64)paddr) & PAGE_MASK, size, prot); + ((u64)paddr) & PHYSICAL_PAGE_MASK, + size, prot); if (ret) goto error; @@ -2463,8 +2468,8 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne addr = page_to_phys(sg_page(sg)) + sg->offset; size = aligned_size((u64)addr, sg->length); ret = domain_page_mapping(domain, start_addr + offset, - ((u64)addr) & PAGE_MASK, - size, prot); + ((u64)addr) & PHYSICAL_PAGE_MASK, + size, prot); if (ret) { /* clear the page */ dma_pte_clear_range(domain, start_addr, -- cgit v1.2.3-58-ga151