diff options
Diffstat (limited to 'drivers/gpu/drm/gma500/gtt.c')
-rw-r--r-- | drivers/gpu/drm/gma500/gtt.c | 326 |
1 files changed, 68 insertions, 258 deletions
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c index 55a2a6919533..309ffe921bfd 100644 --- a/drivers/gpu/drm/gma500/gtt.c +++ b/drivers/gpu/drm/gma500/gtt.c @@ -7,10 +7,7 @@ * Alan Cox <alan@linux.intel.com> */ -#include <linux/shmem_fs.h> - -#include <asm/set_memory.h> - +#include "gem.h" /* TODO: for struct psb_gem_object, see psb_gtt_restore() */ #include "psb_drv.h" @@ -18,6 +15,33 @@ * GTT resource allocator - manage page mappings in GTT space */ +int psb_gtt_allocate_resource(struct drm_psb_private *pdev, struct resource *res, + const char *name, resource_size_t size, resource_size_t align, + bool stolen, u32 *offset) +{ + struct resource *root = pdev->gtt_mem; + resource_size_t start, end; + int ret; + + if (stolen) { + /* The start of the GTT is backed by stolen pages. */ + start = root->start; + end = root->start + pdev->gtt.stolen_size - 1; + } else { + /* The rest is backed by system pages. */ + start = root->start + pdev->gtt.stolen_size; + end = root->end; + } + + res->name = name; + ret = allocate_resource(root, res, size, start, end, align, NULL, NULL); + if (ret) + return ret; + *offset = res->start - root->start; + + return 0; +} + /** * psb_gtt_mask_pte - generate GTT pte entry * @pfn: page number to encode @@ -43,281 +67,62 @@ static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type) return (pfn << PAGE_SHIFT) | mask; } -/** - * psb_gtt_entry - find the GTT entries for a gtt_range - * @dev: our DRM device - * @r: our GTT range - * - * Given a gtt_range object return the GTT offset of the page table - * entries for this gtt_range - */ -static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r) +static u32 __iomem *psb_gtt_entry(struct drm_psb_private *pdev, const struct resource *res) { - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - unsigned long offset; + unsigned long offset = res->start - pdev->gtt_mem->start; - offset = r->resource.start - dev_priv->gtt_mem->start; - - return dev_priv->gtt_map + (offset >> PAGE_SHIFT); + return pdev->gtt_map + (offset >> PAGE_SHIFT); } -/** - * psb_gtt_insert - put an object into the GTT - * @dev: our DRM device - * @r: our GTT range - * @resume: on resume - * - * Take our preallocated GTT range and insert the GEM object into - * the GTT. This is protected via the gtt mutex which the caller - * must hold. +/* + * Take our preallocated GTT range and insert the GEM object into + * the GTT. This is protected via the gtt mutex which the caller + * must hold. */ -static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r, - int resume) +void psb_gtt_insert_pages(struct drm_psb_private *pdev, const struct resource *res, + struct page **pages) { + resource_size_t npages, i; u32 __iomem *gtt_slot; u32 pte; - struct page **pages; - int i; - if (r->pages == NULL) { - WARN_ON(1); - return -EINVAL; - } - - WARN_ON(r->stolen); /* refcount these maybe ? */ - - gtt_slot = psb_gtt_entry(dev, r); - pages = r->pages; + /* Write our page entries into the GTT itself */ - if (!resume) { - /* Make sure changes are visible to the GPU */ - set_pages_array_wc(pages, r->npage); - } + npages = resource_size(res) >> PAGE_SHIFT; + gtt_slot = psb_gtt_entry(pdev, res); - /* Write our page entries into the GTT itself */ - for (i = 0; i < r->npage; i++) { - pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), - PSB_MMU_CACHED_MEMORY); - iowrite32(pte, gtt_slot++); + for (i = 0; i < npages; ++i, ++gtt_slot) { + pte = psb_gtt_mask_pte(page_to_pfn(pages[i]), PSB_MMU_CACHED_MEMORY); + iowrite32(pte, gtt_slot); } /* Make sure all the entries are set before we return */ ioread32(gtt_slot - 1); - - return 0; } -/** - * psb_gtt_remove - remove an object from the GTT - * @dev: our DRM device - * @r: our GTT range - * - * Remove a preallocated GTT range from the GTT. Overwrite all the - * page table entries with the dummy page. This is protected via the gtt - * mutex which the caller must hold. +/* + * Remove a preallocated GTT range from the GTT. Overwrite all the + * page table entries with the dummy page. This is protected via the gtt + * mutex which the caller must hold. */ -static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r) +void psb_gtt_remove_pages(struct drm_psb_private *pdev, const struct resource *res) { - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); + resource_size_t npages, i; u32 __iomem *gtt_slot; u32 pte; - int i; - - WARN_ON(r->stolen); - - gtt_slot = psb_gtt_entry(dev, r); - pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), - PSB_MMU_CACHED_MEMORY); - - for (i = 0; i < r->npage; i++) - iowrite32(pte, gtt_slot++); - ioread32(gtt_slot - 1); - set_pages_array_wb(r->pages, r->npage); -} - -/** - * psb_gtt_attach_pages - attach and pin GEM pages - * @gt: the gtt range - * - * Pin and build an in kernel list of the pages that back our GEM object. - * While we hold this the pages cannot be swapped out. This is protected - * via the gtt mutex which the caller must hold. - */ -static int psb_gtt_attach_pages(struct gtt_range *gt) -{ - struct page **pages; - - WARN_ON(gt->pages); - - pages = drm_gem_get_pages(>->gem); - if (IS_ERR(pages)) - return PTR_ERR(pages); - - gt->npage = gt->gem.size / PAGE_SIZE; - gt->pages = pages; - - return 0; -} - -/** - * psb_gtt_detach_pages - attach and pin GEM pages - * @gt: the gtt range - * - * Undo the effect of psb_gtt_attach_pages. At this point the pages - * must have been removed from the GTT as they could now be paged out - * and move bus address. This is protected via the gtt mutex which the - * caller must hold. - */ -static void psb_gtt_detach_pages(struct gtt_range *gt) -{ - drm_gem_put_pages(>->gem, gt->pages, true, false); - gt->pages = NULL; -} -/** - * psb_gtt_pin - pin pages into the GTT - * @gt: range to pin - * - * Pin a set of pages into the GTT. The pins are refcounted so that - * multiple pins need multiple unpins to undo. - * - * Non GEM backed objects treat this as a no-op as they are always GTT - * backed objects. - */ -int psb_gtt_pin(struct gtt_range *gt) -{ - int ret = 0; - struct drm_device *dev = gt->gem.dev; - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - u32 gpu_base = dev_priv->gtt.gatt_start; + /* Install scratch page for the resource */ - mutex_lock(&dev_priv->gtt_mutex); + pte = psb_gtt_mask_pte(page_to_pfn(pdev->scratch_page), PSB_MMU_CACHED_MEMORY); - if (gt->in_gart == 0 && gt->stolen == 0) { - ret = psb_gtt_attach_pages(gt); - if (ret < 0) - goto out; - ret = psb_gtt_insert(dev, gt, 0); - if (ret < 0) { - psb_gtt_detach_pages(gt); - goto out; - } - psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu), - gt->pages, (gpu_base + gt->offset), - gt->npage, 0, 0, PSB_MMU_CACHED_MEMORY); - } - gt->in_gart++; -out: - mutex_unlock(&dev_priv->gtt_mutex); - return ret; -} + npages = resource_size(res) >> PAGE_SHIFT; + gtt_slot = psb_gtt_entry(pdev, res); -/** - * psb_gtt_unpin - Drop a GTT pin requirement - * @gt: range to pin - * - * Undoes the effect of psb_gtt_pin. On the last drop the GEM object - * will be removed from the GTT which will also drop the page references - * and allow the VM to clean up or page stuff. - * - * Non GEM backed objects treat this as a no-op as they are always GTT - * backed objects. - */ -void psb_gtt_unpin(struct gtt_range *gt) -{ - struct drm_device *dev = gt->gem.dev; - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - u32 gpu_base = dev_priv->gtt.gatt_start; + for (i = 0; i < npages; ++i, ++gtt_slot) + iowrite32(pte, gtt_slot); - mutex_lock(&dev_priv->gtt_mutex); - - WARN_ON(!gt->in_gart); - - gt->in_gart--; - if (gt->in_gart == 0 && gt->stolen == 0) { - psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu), - (gpu_base + gt->offset), gt->npage, 0, 0); - psb_gtt_remove(dev, gt); - psb_gtt_detach_pages(gt); - } - - mutex_unlock(&dev_priv->gtt_mutex); -} - -/* - * GTT resource allocator - allocate and manage GTT address space - */ - -/** - * psb_gtt_alloc_range - allocate GTT address space - * @dev: Our DRM device - * @len: length (bytes) of address space required - * @name: resource name - * @backed: resource should be backed by stolen pages - * @align: requested alignment - * - * Ask the kernel core to find us a suitable range of addresses - * to use for a GTT mapping. - * - * Returns a gtt_range structure describing the object, or NULL on - * error. On successful return the resource is both allocated and marked - * as in use. - */ -struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len, - const char *name, int backed, u32 align) -{ - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - struct gtt_range *gt; - struct resource *r = dev_priv->gtt_mem; - int ret; - unsigned long start, end; - - if (backed) { - /* The start of the GTT is the stolen pages */ - start = r->start; - end = r->start + dev_priv->gtt.stolen_size - 1; - } else { - /* The rest we will use for GEM backed objects */ - start = r->start + dev_priv->gtt.stolen_size; - end = r->end; - } - - gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL); - if (gt == NULL) - return NULL; - gt->resource.name = name; - gt->stolen = backed; - gt->in_gart = backed; - /* Ensure this is set for non GEM objects */ - gt->gem.dev = dev; - ret = allocate_resource(dev_priv->gtt_mem, >->resource, - len, start, end, align, NULL, NULL); - if (ret == 0) { - gt->offset = gt->resource.start - r->start; - return gt; - } - kfree(gt); - return NULL; -} - -/** - * psb_gtt_free_range - release GTT address space - * @dev: our DRM device - * @gt: a mapping created with psb_gtt_alloc_range - * - * Release a resource that was allocated with psb_gtt_alloc_range. If the - * object has been pinned by mmap users we clean this up here currently. - */ -void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt) -{ - /* Undo the mmap pin if we are destroying the object */ - if (gt->mmapping) { - psb_gtt_unpin(gt); - gt->mmapping = 0; - } - WARN_ON(gt->in_gart && !gt->stolen); - release_resource(>->resource); - kfree(gt); + /* Make sure all the entries are set before we return */ + ioread32(gtt_slot - 1); } static void psb_gtt_alloc(struct drm_device *dev) @@ -498,7 +303,7 @@ int psb_gtt_restore(struct drm_device *dev) { struct drm_psb_private *dev_priv = to_drm_psb_private(dev); struct resource *r = dev_priv->gtt_mem->child; - struct gtt_range *range; + struct psb_gem_object *pobj; unsigned int restored = 0, total = 0, size = 0; /* On resume, the gtt_mutex is already initialized */ @@ -506,10 +311,15 @@ int psb_gtt_restore(struct drm_device *dev) psb_gtt_init(dev, 1); while (r != NULL) { - range = container_of(r, struct gtt_range, resource); - if (range->pages) { - psb_gtt_insert(dev, range, 1); - size += range->resource.end - range->resource.start; + /* + * TODO: GTT restoration needs a refactoring, so that we don't have to touch + * struct psb_gem_object here. The type represents a GEM object and is + * not related to the GTT itself. + */ + pobj = container_of(r, struct psb_gem_object, resource); + if (pobj->pages) { + psb_gtt_insert_pages(dev_priv, &pobj->resource, pobj->pages); + size += pobj->resource.end - pobj->resource.start; restored++; } r = r->sibling; |