diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_object.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_object.c | 126 |
1 files changed, 36 insertions, 90 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 316e35d3f8a9..ab81e35cb060 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -113,57 +113,29 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) rbo->placements[c].fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; rbo->placements[c].mem_type = TTM_PL_VRAM; - rbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED; + rbo->placements[c++].flags = 0; } rbo->placements[c].fpfn = 0; rbo->placements[c].mem_type = TTM_PL_VRAM; - rbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED; + rbo->placements[c++].flags = 0; } if (domain & RADEON_GEM_DOMAIN_GTT) { - if (rbo->flags & RADEON_GEM_GTT_UC) { - rbo->placements[c].fpfn = 0; - rbo->placements[c].mem_type = TTM_PL_TT; - rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED; - - } else if ((rbo->flags & RADEON_GEM_GTT_WC) || - (rbo->rdev->flags & RADEON_IS_AGP)) { - rbo->placements[c].fpfn = 0; - rbo->placements[c].mem_type = TTM_PL_TT; - rbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED; - } else { - rbo->placements[c].fpfn = 0; - rbo->placements[c].mem_type = TTM_PL_TT; - rbo->placements[c++].flags = TTM_PL_FLAG_CACHED; - } + rbo->placements[c].fpfn = 0; + rbo->placements[c].mem_type = TTM_PL_TT; + rbo->placements[c++].flags = 0; } if (domain & RADEON_GEM_DOMAIN_CPU) { - if (rbo->flags & RADEON_GEM_GTT_UC) { - rbo->placements[c].fpfn = 0; - rbo->placements[c].mem_type = TTM_PL_SYSTEM; - rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED; - - } else if ((rbo->flags & RADEON_GEM_GTT_WC) || - rbo->rdev->flags & RADEON_IS_AGP) { - rbo->placements[c].fpfn = 0; - rbo->placements[c].mem_type = TTM_PL_SYSTEM; - rbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED; - } else { - rbo->placements[c].fpfn = 0; - rbo->placements[c].mem_type = TTM_PL_SYSTEM; - rbo->placements[c++].flags = TTM_PL_FLAG_CACHED; - } + rbo->placements[c].fpfn = 0; + rbo->placements[c].mem_type = TTM_PL_SYSTEM; + rbo->placements[c++].flags = 0; } if (!c) { rbo->placements[c].fpfn = 0; rbo->placements[c].mem_type = TTM_PL_SYSTEM; - rbo->placements[c++].flags = TTM_PL_MASK_CACHING; + rbo->placements[c++].flags = 0; } rbo->placement.num_placement = c; @@ -334,8 +306,8 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm)) return -EPERM; - if (bo->pin_count) { - bo->pin_count++; + if (bo->tbo.pin_count) { + ttm_bo_pin(&bo->tbo); if (gpu_addr) *gpu_addr = radeon_bo_gpu_offset(bo); @@ -367,13 +339,11 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; else bo->placements[i].lpfn = max_offset >> PAGE_SHIFT; - - bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; } r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (likely(r == 0)) { - bo->pin_count = 1; + ttm_bo_pin(&bo->tbo); if (gpu_addr != NULL) *gpu_addr = radeon_bo_gpu_offset(bo); if (domain == RADEON_GEM_DOMAIN_VRAM) @@ -391,36 +361,22 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); } -int radeon_bo_unpin(struct radeon_bo *bo) +void radeon_bo_unpin(struct radeon_bo *bo) { - struct ttm_operation_ctx ctx = { false, false }; - int r, i; - - if (!bo->pin_count) { - dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); - return 0; - } - bo->pin_count--; - if (bo->pin_count) - return 0; - for (i = 0; i < bo->placement.num_placement; i++) { - bo->placements[i].lpfn = 0; - bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; - } - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (likely(r == 0)) { + ttm_bo_unpin(&bo->tbo); + if (!bo->tbo.pin_count) { if (bo->tbo.mem.mem_type == TTM_PL_VRAM) bo->rdev->vram_pin_size -= radeon_bo_size(bo); else bo->rdev->gart_pin_size -= radeon_bo_size(bo); - } else { - dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); } - return r; } int radeon_bo_evict_vram(struct radeon_device *rdev) { + struct ttm_bo_device *bdev = &rdev->mman.bdev; + struct ttm_resource_manager *man; + /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ #ifndef CONFIG_HIBERNATION if (rdev->flags & RADEON_IS_IGP) { @@ -429,7 +385,8 @@ int radeon_bo_evict_vram(struct radeon_device *rdev) return 0; } #endif - return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); + man = ttm_manager_type(bdev, TTM_PL_VRAM); + return ttm_resource_manager_evict_all(bdev, man); } void radeon_bo_force_delete(struct radeon_device *rdev) @@ -549,7 +506,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, list_for_each_entry(lobj, head, tv.head) { struct radeon_bo *bo = lobj->robj; - if (!bo->pin_count) { + if (!bo->tbo.pin_count) { u32 domain = lobj->preferred_domains; u32 allowed = lobj->allowed_domains; u32 current_domain = @@ -629,7 +586,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo) break; old_object = reg->bo; - if (old_object->pin_count == 0) + if (old_object->tbo.pin_count == 0) steal = i; } @@ -794,7 +751,7 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo, radeon_update_memory_usage(rbo, new_mem->mem_type, 1); } -int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) +vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) { struct ttm_operation_ctx ctx = { false, false }; struct radeon_device *rdev; @@ -816,8 +773,8 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) return 0; /* Can't move a pinned BO to visible VRAM */ - if (rbo->pin_count > 0) - return -EINVAL; + if (rbo->tbo.pin_count > 0) + return VM_FAULT_SIGBUS; /* hurrah the memory is not visible ! */ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); @@ -831,34 +788,23 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) r = ttm_bo_validate(bo, &rbo->placement, &ctx); if (unlikely(r == -ENOMEM)) { radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); - return ttm_bo_validate(bo, &rbo->placement, &ctx); - } else if (unlikely(r != 0)) { - return r; + r = ttm_bo_validate(bo, &rbo->placement, &ctx); + } else if (likely(!r)) { + offset = bo->mem.start << PAGE_SHIFT; + /* this should never happen */ + if ((offset + size) > rdev->mc.visible_vram_size) + return VM_FAULT_SIGBUS; } - offset = bo->mem.start << PAGE_SHIFT; - /* this should never happen */ - if ((offset + size) > rdev->mc.visible_vram_size) - return -EINVAL; + if (unlikely(r == -EBUSY || r == -ERESTARTSYS)) + return VM_FAULT_NOPAGE; + else if (unlikely(r)) + return VM_FAULT_SIGBUS; + ttm_bo_move_to_lru_tail_unlocked(bo); return 0; } -int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) -{ - int r; - - r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); - if (unlikely(r != 0)) - return r; - if (mem_type) - *mem_type = bo->tbo.mem.mem_type; - - r = ttm_bo_wait(&bo->tbo, true, no_wait); - ttm_bo_unreserve(&bo->tbo); - return r; -} - /** * radeon_bo_fence - add fence to buffer object * |