summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm/ttm_bo.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c262
1 files changed, 113 insertions, 149 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index db3dc7ef5382..75d308ec173d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -69,107 +69,54 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
}
}
-static inline void ttm_bo_move_to_pinned(struct ttm_buffer_object *bo)
-{
- struct ttm_device *bdev = bo->bdev;
-
- list_move_tail(&bo->lru, &bdev->pinned);
-
- if (bdev->funcs->del_from_lru_notify)
- bdev->funcs->del_from_lru_notify(bo);
-}
-
-static inline void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
-{
- struct ttm_device *bdev = bo->bdev;
-
- list_del_init(&bo->lru);
-
- if (bdev->funcs->del_from_lru_notify)
- bdev->funcs->del_from_lru_notify(bo);
-}
-
-static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
- struct ttm_buffer_object *bo)
-{
- if (!pos->first)
- pos->first = bo;
- pos->last = bo;
-}
-
-void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
- struct ttm_resource *mem,
- struct ttm_lru_bulk_move *bulk)
+/**
+ * ttm_bo_move_to_lru_tail
+ *
+ * @bo: The buffer object.
+ *
+ * Move this BO to the tail of all lru lists used to lookup and reserve an
+ * object. This function must be called with struct ttm_global::lru_lock
+ * held, and is used to make a BO less likely to be considered for eviction.
+ */
+void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
{
- struct ttm_device *bdev = bo->bdev;
- struct ttm_resource_manager *man;
-
- if (!bo->deleted)
- dma_resv_assert_held(bo->base.resv);
-
- if (bo->pin_count) {
- ttm_bo_move_to_pinned(bo);
- return;
- }
-
- if (!mem)
- return;
-
- man = ttm_manager_type(bdev, mem->mem_type);
- list_move_tail(&bo->lru, &man->lru[bo->priority]);
-
- if (bdev->funcs->del_from_lru_notify)
- bdev->funcs->del_from_lru_notify(bo);
-
- if (bulk && !bo->pin_count) {
- switch (bo->resource->mem_type) {
- case TTM_PL_TT:
- ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
- break;
+ dma_resv_assert_held(bo->base.resv);
- case TTM_PL_VRAM:
- ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo);
- break;
- }
- }
+ if (bo->resource)
+ ttm_resource_move_to_lru_tail(bo->resource);
}
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
-void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
+/**
+ * ttm_bo_set_bulk_move - update BOs bulk move object
+ *
+ * @bo: The buffer object.
+ *
+ * Update the BOs bulk move object, making sure that resources are added/removed
+ * as well. A bulk move allows to move many resource on the LRU at once,
+ * resulting in much less overhead of maintaining the LRU.
+ * The only requirement is that the resources stay together on the LRU and are
+ * never separated. This is enforces by setting the bulk_move structure on a BO.
+ * ttm_lru_bulk_move_tail() should be used to move all resources to the tail of
+ * their LRU list.
+ */
+void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
+ struct ttm_lru_bulk_move *bulk)
{
- unsigned i;
-
- for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
- struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i];
- struct ttm_resource_manager *man;
-
- if (!pos->first)
- continue;
-
- dma_resv_assert_held(pos->first->base.resv);
- dma_resv_assert_held(pos->last->base.resv);
-
- man = ttm_manager_type(pos->first->bdev, TTM_PL_TT);
- list_bulk_move_tail(&man->lru[i], &pos->first->lru,
- &pos->last->lru);
- }
-
- for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
- struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i];
- struct ttm_resource_manager *man;
-
- if (!pos->first)
- continue;
+ dma_resv_assert_held(bo->base.resv);
- dma_resv_assert_held(pos->first->base.resv);
- dma_resv_assert_held(pos->last->base.resv);
+ if (bo->bulk_move == bulk)
+ return;
- man = ttm_manager_type(pos->first->bdev, TTM_PL_VRAM);
- list_bulk_move_tail(&man->lru[i], &pos->first->lru,
- &pos->last->lru);
- }
+ spin_lock(&bo->bdev->lru_lock);
+ if (bo->bulk_move && bo->resource)
+ ttm_lru_bulk_move_del(bo->bulk_move, bo->resource);
+ bo->bulk_move = bulk;
+ if (bo->bulk_move && bo->resource)
+ ttm_lru_bulk_move_add(bo->bulk_move, bo->resource);
+ spin_unlock(&bo->bdev->lru_lock);
}
-EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
+EXPORT_SYMBOL(ttm_bo_set_bulk_move);
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
struct ttm_resource *mem, bool evict,
@@ -204,6 +151,10 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
}
}
+ ret = dma_resv_reserve_fences(bo->base.resv, 1);
+ if (ret)
+ goto out_err;
+
ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
if (ret) {
if (ret == -EMULTIHOP)
@@ -272,7 +223,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
struct dma_resv_iter cursor;
struct dma_fence *fence;
- dma_resv_iter_begin(&cursor, resv, true);
+ dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
if (!fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
@@ -301,7 +252,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
struct dma_resv *resv = &bo->base._resv;
int ret;
- if (dma_resv_test_signaled(resv, true))
+ if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP))
ret = 0;
else
ret = -EBUSY;
@@ -313,7 +264,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
dma_resv_unlock(bo->base.resv);
spin_unlock(&bo->bdev->lru_lock);
- lret = dma_resv_wait_timeout(resv, true, interruptible,
+ lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
+ interruptible,
30 * HZ);
if (lret < 0)
@@ -344,7 +296,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
return ret;
}
- ttm_bo_move_to_pinned(bo);
list_del_init(&bo->ddestroy);
spin_unlock(&bo->bdev->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
@@ -409,6 +360,7 @@ static void ttm_bo_release(struct kref *kref)
int ret;
WARN_ON_ONCE(bo->pin_count);
+ WARN_ON_ONCE(bo->bulk_move);
if (!bo->deleted) {
ret = ttm_bo_individualize_resv(bo);
@@ -416,7 +368,8 @@ static void ttm_bo_release(struct kref *kref)
/* Last resort, if we fail to allocate memory for the
* fences block for the BO to become idle
*/
- dma_resv_wait_timeout(bo->base.resv, true, false,
+ dma_resv_wait_timeout(bo->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP, false,
30 * HZ);
}
@@ -427,7 +380,7 @@ static void ttm_bo_release(struct kref *kref)
ttm_mem_io_free(bdev, bo->resource);
}
- if (!dma_resv_test_signaled(bo->base.resv, true) ||
+ if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP) ||
!dma_resv_trylock(bo->base.resv)) {
/* The BO is not idle, resurrect it for delayed destroy */
ttm_bo_flush_all_fences(bo);
@@ -445,7 +398,7 @@ static void ttm_bo_release(struct kref *kref)
*/
if (bo->pin_count) {
bo->pin_count = 0;
- ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
+ ttm_resource_move_to_lru_tail(bo->resource);
}
kref_init(&bo->kref);
@@ -458,7 +411,6 @@ static void ttm_bo_release(struct kref *kref)
}
spin_lock(&bo->bdev->lru_lock);
- ttm_bo_del_from_lru(bo);
list_del(&bo->ddestroy);
spin_unlock(&bo->bdev->lru_lock);
@@ -466,7 +418,6 @@ static void ttm_bo_release(struct kref *kref)
dma_resv_unlock(bo->base.resv);
atomic_dec(&ttm_glob.bo_count);
- dma_fence_put(bo->moving);
bo->destroy(bo);
}
@@ -673,36 +624,29 @@ int ttm_mem_evict_first(struct ttm_device *bdev,
struct ww_acquire_ctx *ticket)
{
struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
+ struct ttm_resource_cursor cursor;
+ struct ttm_resource *res;
bool locked = false;
- unsigned i;
int ret;
spin_lock(&bdev->lru_lock);
- for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
- list_for_each_entry(bo, &man->lru[i], lru) {
- bool busy;
-
- if (!ttm_bo_evict_swapout_allowable(bo, ctx, place,
- &locked, &busy)) {
- if (busy && !busy_bo && ticket !=
- dma_resv_locking_ctx(bo->base.resv))
- busy_bo = bo;
- continue;
- }
-
- if (!ttm_bo_get_unless_zero(bo)) {
- if (locked)
- dma_resv_unlock(bo->base.resv);
- continue;
- }
- break;
+ ttm_resource_manager_for_each_res(man, &cursor, res) {
+ bool busy;
+
+ if (!ttm_bo_evict_swapout_allowable(res->bo, ctx, place,
+ &locked, &busy)) {
+ if (busy && !busy_bo && ticket !=
+ dma_resv_locking_ctx(res->bo->base.resv))
+ busy_bo = res->bo;
+ continue;
}
- /* If the inner loop terminated early, we have our candidate */
- if (&bo->lru != &man->lru[i])
+ if (ttm_bo_get_unless_zero(res->bo)) {
+ bo = res->bo;
break;
-
- bo = NULL;
+ }
+ if (locked)
+ dma_resv_unlock(res->bo->base.resv);
}
if (!bo) {
@@ -734,10 +678,43 @@ int ttm_mem_evict_first(struct ttm_device *bdev,
return ret;
}
+/**
+ * ttm_bo_pin - Pin the buffer object.
+ * @bo: The buffer object to pin
+ *
+ * Make sure the buffer is not evicted any more during memory pressure.
+ * @bo must be unpinned again by calling ttm_bo_unpin().
+ */
+void ttm_bo_pin(struct ttm_buffer_object *bo)
+{
+ dma_resv_assert_held(bo->base.resv);
+ WARN_ON_ONCE(!kref_read(&bo->kref));
+ if (!(bo->pin_count++) && bo->bulk_move && bo->resource)
+ ttm_lru_bulk_move_del(bo->bulk_move, bo->resource);
+}
+EXPORT_SYMBOL(ttm_bo_pin);
+
+/**
+ * ttm_bo_unpin - Unpin the buffer object.
+ * @bo: The buffer object to unpin
+ *
+ * Allows the buffer object to be evicted again during memory pressure.
+ */
+void ttm_bo_unpin(struct ttm_buffer_object *bo)
+{
+ dma_resv_assert_held(bo->base.resv);
+ WARN_ON_ONCE(!kref_read(&bo->kref));
+ if (WARN_ON_ONCE(!bo->pin_count))
+ return;
+
+ if (!(--bo->pin_count) && bo->bulk_move && bo->resource)
+ ttm_lru_bulk_move_add(bo->bulk_move, bo->resource);
+}
+EXPORT_SYMBOL(ttm_bo_unpin);
+
/*
- * Add the last move fence to the BO and reserve a new shared slot. We only use
- * a shared slot to avoid unecessary sync and rely on the subsequent bo move to
- * either stall or use an exclusive fence respectively set bo->moving.
+ * Add the last move fence to the BO as kernel dependency and reserve a new
+ * fence slot.
*/
static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
struct ttm_resource_manager *man,
@@ -760,17 +737,11 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
return ret;
}
- dma_resv_add_shared_fence(bo->base.resv, fence);
-
- ret = dma_resv_reserve_shared(bo->base.resv, 1);
- if (unlikely(ret)) {
- dma_fence_put(fence);
- return ret;
- }
+ dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
- dma_fence_put(bo->moving);
- bo->moving = fence;
- return 0;
+ ret = dma_resv_reserve_fences(bo->base.resv, 1);
+ dma_fence_put(fence);
+ return ret;
}
/*
@@ -821,7 +792,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
bool type_found = false;
int i, ret;
- ret = dma_resv_reserve_shared(bo->base.resv, 1);
+ ret = dma_resv_reserve_fences(bo->base.resv, 1);
if (unlikely(ret))
return ret;
@@ -875,9 +846,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
}
error:
- if (bo->resource->mem_type == TTM_PL_SYSTEM && !bo->pin_count)
- ttm_bo_move_to_lru_tail_unlocked(bo);
-
return ret;
}
EXPORT_SYMBOL(ttm_bo_mem_space);
@@ -971,14 +939,13 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
kref_init(&bo->kref);
- INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->ddestroy);
bo->bdev = bdev;
bo->type = type;
bo->page_alignment = page_alignment;
- bo->moving = NULL;
bo->pin_count = 0;
bo->sg = sg;
+ bo->bulk_move = NULL;
if (resv) {
bo->base.resv = resv;
dma_resv_assert_held(bo->base.resv);
@@ -1021,8 +988,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
return ret;
}
- ttm_bo_move_to_lru_tail_unlocked(bo);
-
return ret;
}
EXPORT_SYMBOL(ttm_bo_init_reserved);
@@ -1072,14 +1037,14 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
long timeout = 15 * HZ;
if (no_wait) {
- if (dma_resv_test_signaled(bo->base.resv, true))
+ if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP))
return 0;
else
return -EBUSY;
}
- timeout = dma_resv_wait_timeout(bo->base.resv, true, interruptible,
- timeout);
+ timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ interruptible, timeout);
if (timeout < 0)
return timeout;
@@ -1123,7 +1088,6 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
return ret == -EBUSY ? -ENOSPC : ret;
}
- ttm_bo_move_to_pinned(bo);
/* TODO: Cleanup the locking */
spin_unlock(&bo->bdev->lru_lock);