summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/xtensa/kernel/pci-dma.c40
-rw-r--r--arch/xtensa/mm/init.c70
2 files changed, 93 insertions, 17 deletions
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
index 623720a11143..732631ce250f 100644
--- a/arch/xtensa/kernel/pci-dma.c
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -16,6 +16,7 @@
*/
#include <linux/dma-contiguous.h>
+#include <linux/dma-direct.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/mm.h>
@@ -123,7 +124,7 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
unsigned long attrs)
{
unsigned long ret;
- unsigned long uncached = 0;
+ unsigned long uncached;
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page *page = NULL;
@@ -144,15 +145,27 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
if (!page)
return NULL;
- ret = (unsigned long)page_address(page);
+ *handle = phys_to_dma(dev, page_to_phys(page));
- /* We currently don't support coherent memory outside KSEG */
+#ifdef CONFIG_MMU
+ if (PageHighMem(page)) {
+ void *p;
+ p = dma_common_contiguous_remap(page, size, VM_MAP,
+ pgprot_noncached(PAGE_KERNEL),
+ __builtin_return_address(0));
+ if (!p) {
+ if (!dma_release_from_contiguous(dev, page, count))
+ __free_pages(page, get_order(size));
+ }
+ return p;
+ }
+#endif
+ ret = (unsigned long)page_address(page);
BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
- *handle = virt_to_bus((void *)ret);
__invalidate_dcache_range(ret, size);
return (void *)uncached;
@@ -161,13 +174,20 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
- unsigned long addr = (unsigned long)vaddr +
- XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
- struct page *page = virt_to_page(addr);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
- BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR ||
- addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
+ unsigned long addr = (unsigned long)vaddr;
+ struct page *page;
+
+ if (addr >= XCHAL_KSEG_BYPASS_VADDR &&
+ addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE) {
+ addr += XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
+ page = virt_to_page(addr);
+ } else {
+#ifdef CONFIG_MMU
+ dma_common_free_remap(vaddr, size, VM_MAP);
+#endif
+ page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
+ }
if (!dma_release_from_contiguous(dev, page, count))
__free_pages(page, get_order(size));
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index d776ec0d7b22..34aead7dcb48 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -79,19 +79,75 @@ void __init zones_init(void)
free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL);
}
+#ifdef CONFIG_HIGHMEM
+static void __init free_area_high(unsigned long pfn, unsigned long end)
+{
+ for (; pfn < end; pfn++)
+ free_highmem_page(pfn_to_page(pfn));
+}
+
+static void __init free_highpages(void)
+{
+ unsigned long max_low = max_low_pfn;
+ struct memblock_region *mem, *res;
+
+ reset_all_zones_managed_pages();
+ /* set highmem page free */
+ for_each_memblock(memory, mem) {
+ unsigned long start = memblock_region_memory_base_pfn(mem);
+ unsigned long end = memblock_region_memory_end_pfn(mem);
+
+ /* Ignore complete lowmem entries */
+ if (end <= max_low)
+ continue;
+
+ if (memblock_is_nomap(mem))
+ continue;
+
+ /* Truncate partial highmem entries */
+ if (start < max_low)
+ start = max_low;
+
+ /* Find and exclude any reserved regions */
+ for_each_memblock(reserved, res) {
+ unsigned long res_start, res_end;
+
+ res_start = memblock_region_reserved_base_pfn(res);
+ res_end = memblock_region_reserved_end_pfn(res);
+
+ if (res_end < start)
+ continue;
+ if (res_start < start)
+ res_start = start;
+ if (res_start > end)
+ res_start = end;
+ if (res_end > end)
+ res_end = end;
+ if (res_start != start)
+ free_area_high(start, res_start);
+ start = res_end;
+ if (start == end)
+ break;
+ }
+
+ /* And now free anything which remains */
+ if (start < end)
+ free_area_high(start, end);
+ }
+}
+#else
+static void __init free_highpages(void)
+{
+}
+#endif
+
/*
* Initialize memory pages.
*/
void __init mem_init(void)
{
-#ifdef CONFIG_HIGHMEM
- unsigned long tmp;
-
- reset_all_zones_managed_pages();
- for (tmp = max_low_pfn; tmp < max_pfn; tmp++)
- free_highmem_page(pfn_to_page(tmp));
-#endif
+ free_highpages();
max_mapnr = max_pfn - ARCH_PFN_OFFSET;
high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);