summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_exec_queue.c
diff options
context:
space:
mode:
authorDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>2023-08-22 10:33:34 -0700
committerRodrigo Vivi <rodrigo.vivi@intel.com>2023-12-21 11:40:27 -0500
commit9e9526352d6f7f94a4348cebce9859dfebed1dea (patch)
tree3b4d148c1b2b2fe0546757509471961db9be991d /drivers/gpu/drm/xe/xe_exec_queue.c
parent923e42381745f55ba27a8805a055b51139af6830 (diff)
drm/xe: standardize vm-less kernel submissions
The current only submission in the driver that doesn't use a vm is the WA setup. We still pass a vm structure (the migration one), but we don't actually use it at submission time and we instead have an hack to use GGTT for this particular engine. Instead of special-casing the WA engine, we can skip providing a VM and use that as selector for whether to use GGTT or PPGTT. As part of this change, we can drop the special engine flag for the WA engine and switch the WA submission to use the standard job functions instead of dedicated ones. v2: rebased on s/engine/exec_queue Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Cc: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Link: https://lore.kernel.org/r/20230822173334.1664332-4-daniele.ceraolospurio@intel.com Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_exec_queue.c')
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 867465b0c57b..f28bceceb99a 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -95,7 +95,7 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
* can perform GuC CT actions when needed. Caller is expected to
* have already grabbed the rpm ref outside any sensitive locks.
*/
- if (q->flags & EXEC_QUEUE_FLAG_VM)
+ if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM))
drm_WARN_ON(&xe->drm, !xe_device_mem_access_get_if_ongoing(xe));
return q;
@@ -174,7 +174,7 @@ void xe_exec_queue_fini(struct xe_exec_queue *q)
xe_lrc_finish(q->lrc + i);
if (q->vm)
xe_vm_put(q->vm);
- if (q->flags & EXEC_QUEUE_FLAG_VM)
+ if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM))
xe_device_mem_access_put(gt_to_xe(q->gt));
kfree(q);