summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-08-16 08:35:50 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-08-16 08:35:50 -0700
commit37b20e9a5810e132a21c54f858043b22671396dd (patch)
tree3f342e4d34684240787bd7ef35bc5d83fc46c04c
parentd7a5aa4b3c007fae50405ca75c40258d90300e96 (diff)
parentfee9d135e2fd5963a7f466cd1ef2060731a1ab29 (diff)
Merge tag 'drm-fixes-2024-08-16' of https://gitlab.freedesktop.org/drm/kernel
Pull drm fixes from Dave Airlie: "Weekly drm fixes, mostly amdgpu and xe. The larger amdgpu fix is for a new IP block introduced in rc1, so should be fine. The xe fixes contain some missed fixes from the end of the previous round along with some fixes which required precursor changes, but otherwise everything seems fine, mediatek: - fix cursor crash amdgpu: - Fix MES ring buffer overflow - DCN 3.5 fix - DCN 3.2.1 fix - DP MST fix - Cursor fixes - JPEG fixes - Context ops validation - MES 12 fixes - VCN 5.0 fix - HDP fix panel: - dt bindings style fix - orientation quirks rockchip: - inno-hdmi: fix infoframe upload v3d: - fix OOB access in v3d_csd_job_run() xe: - Validate user fence during creation - Fix use after free when client stats are captured - SRIOV fixes - Runtime PM fixes" * tag 'drm-fixes-2024-08-16' of https://gitlab.freedesktop.org/drm/kernel: (37 commits) drm/xe: Hold a PM ref when GT TLB invalidations are inflight drm/xe: Drop xe_gt_tlb_invalidation_wait drm/xe: Add xe_gt_tlb_invalidation_fence_init helper drm/xe/pf: Fix VF config validation on multi-GT platforms drm/xe: Build PM into GuC CT layer drm/xe/vf: Fix register value lookup drm/xe: Fix use after free when client stats are captured drm/xe: Take a ref to xe file when user creates a VM drm/xe: Add ref counting for xe_file drm/xe: Move part of xe_file cleanup to a helper drm/xe: Validate user fence during creation drm/rockchip: inno-hdmi: Fix infoframe upload drm/amd/amdgpu: add HDP_SD support on gc 12.0.0/1 drm/amdgpu: Update kmd_fw_shared for VCN5 drm/amd/amdgpu: command submission parser for JPEG drm/amdgpu/mes12: fix suspend issue drm/amdgpu/mes12: sw/hw fini for unified mes drm/amdgpu/mes12: configure two pipes hardware resources drm/amdgpu/mes12: adjust mes12 sw/hw init for multiple pipes drm/amdgpu/mes12: add mes pipe switch support ...
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c296
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc24.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c33
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c3
-rw-r--r--drivers/gpu/drm/amd/include/mes_v12_api_def.h7
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c12
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c4
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c14
-rw-r--r--drivers/gpu/drm/xe/xe_device.c59
-rw-r--r--drivers/gpu/drm/xe/xe_device.h3
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c5
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c10
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h7
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c11
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c201
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h12
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c10
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c4
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c26
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c12
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c38
47 files changed, 699 insertions, 426 deletions
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml b/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml
index 87c601bcf20a..032f783eefc4 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml
@@ -18,12 +18,12 @@ properties:
# Samsung 13.3" FHD (1920x1080 pixels) eDP AMOLED panel
- const: samsung,atna33xc20
- items:
- - enum:
- # Samsung 14.5" WQXGA+ (2880x1800 pixels) eDP AMOLED panel
- - samsung,atna45af01
- # Samsung 14.5" 3K (2944x1840 pixels) eDP AMOLED panel
- - samsung,atna45dc02
- - const: samsung,atna33xc20
+ - enum:
+ # Samsung 14.5" WQXGA+ (2880x1800 pixels) eDP AMOLED panel
+ - samsung,atna45af01
+ # Samsung 14.5" 3K (2944x1840 pixels) eDP AMOLED panel
+ - samsung,atna45dc02
+ - const: samsung,atna33xc20
enable-gpios: true
port: true
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 9aa952f258cf..6dfdff58bffd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1057,6 +1057,9 @@ static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
r = amdgpu_ring_parse_cs(ring, p, job, ib);
if (r)
return r;
+
+ if (ib->sa_bo)
+ ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
} else {
ib->ptr = (uint32_t *)kptr;
r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 5cb33ac99f70..c43d1b6e5d66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -685,16 +685,24 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
switch (args->in.op) {
case AMDGPU_CTX_OP_ALLOC_CTX:
+ if (args->in.flags)
+ return -EINVAL;
r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
args->out.alloc.ctx_id = id;
break;
case AMDGPU_CTX_OP_FREE_CTX:
+ if (args->in.flags)
+ return -EINVAL;
r = amdgpu_ctx_free(fpriv, id);
break;
case AMDGPU_CTX_OP_QUERY_STATE:
+ if (args->in.flags)
+ return -EINVAL;
r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
break;
case AMDGPU_CTX_OP_QUERY_STATE2:
+ if (args->in.flags)
+ return -EINVAL;
r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
break;
case AMDGPU_CTX_OP_GET_STABLE_PSTATE:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 82452606ae6c..c770cb201e64 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -509,6 +509,16 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
int i, r = 0;
int j;
+ if (adev->enable_mes) {
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_compute_rings;
+ amdgpu_mes_unmap_legacy_queue(adev,
+ &adev->gfx.compute_ring[j],
+ RESET_QUEUES, 0, 0);
+ }
+ return 0;
+ }
+
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
@@ -551,6 +561,18 @@ int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
int i, r = 0;
int j;
+ if (adev->enable_mes) {
+ if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_gfx_rings;
+ amdgpu_mes_unmap_legacy_queue(adev,
+ &adev->gfx.gfx_ring[j],
+ PREEMPT_QUEUES, 0, 0);
+ }
+ }
+ return 0;
+ }
+
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
@@ -995,7 +1017,7 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_
if (amdgpu_device_skip_hw_access(adev))
return 0;
- if (adev->mes.ring.sched.ready)
+ if (adev->mes.ring[0].sched.ready)
return amdgpu_mes_rreg(adev, reg);
BUG_ON(!ring->funcs->emit_rreg);
@@ -1065,7 +1087,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint3
if (amdgpu_device_skip_hw_access(adev))
return;
- if (adev->mes.ring.sched.ready) {
+ if (adev->mes.ring[0].sched.ready) {
amdgpu_mes_wreg(adev, reg, v);
return;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index c02659025656..b49b3650fd62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -589,7 +589,8 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
ring = adev->rings[i];
vmhub = ring->vm_hub;
- if (ring == &adev->mes.ring ||
+ if (ring == &adev->mes.ring[0] ||
+ ring == &adev->mes.ring[1] ||
ring == &adev->umsch_mm.ring)
continue;
@@ -761,7 +762,7 @@ void amdgpu_gmc_fw_reg_write_reg_wait(struct amdgpu_device *adev,
unsigned long flags;
uint32_t seq;
- if (adev->mes.ring.sched.ready) {
+ if (adev->mes.ring[0].sched.ready) {
amdgpu_mes_reg_write_reg_wait(adev, reg0, reg1,
ref, mask);
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index dac88d2dd70d..1cb1ec7beefe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -135,9 +135,11 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
idr_init(&adev->mes.queue_id_idr);
ida_init(&adev->mes.doorbell_ida);
spin_lock_init(&adev->mes.queue_id_lock);
- spin_lock_init(&adev->mes.ring_lock);
mutex_init(&adev->mes.mutex_hidden);
+ for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++)
+ spin_lock_init(&adev->mes.ring_lock[i]);
+
adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
adev->mes.vmid_mask_mmhub = 0xffffff00;
adev->mes.vmid_mask_gfxhub = 0xffffff00;
@@ -163,36 +165,38 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
adev->mes.sdma_hqd_mask[i] = 0xfc;
}
- r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
- if (r) {
- dev_err(adev->dev,
- "(%d) ring trail_fence_offs wb alloc failed\n", r);
- goto error_ids;
- }
- adev->mes.sch_ctx_gpu_addr =
- adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
- adev->mes.sch_ctx_ptr =
- (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
+ for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
+ r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs[i]);
+ if (r) {
+ dev_err(adev->dev,
+ "(%d) ring trail_fence_offs wb alloc failed\n",
+ r);
+ goto error;
+ }
+ adev->mes.sch_ctx_gpu_addr[i] =
+ adev->wb.gpu_addr + (adev->mes.sch_ctx_offs[i] * 4);
+ adev->mes.sch_ctx_ptr[i] =
+ (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs[i]];
- r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
- if (r) {
- amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
- dev_err(adev->dev,
- "(%d) query_status_fence_offs wb alloc failed\n", r);
- goto error_ids;
+ r = amdgpu_device_wb_get(adev,
+ &adev->mes.query_status_fence_offs[i]);
+ if (r) {
+ dev_err(adev->dev,
+ "(%d) query_status_fence_offs wb alloc failed\n",
+ r);
+ goto error;
+ }
+ adev->mes.query_status_fence_gpu_addr[i] = adev->wb.gpu_addr +
+ (adev->mes.query_status_fence_offs[i] * 4);
+ adev->mes.query_status_fence_ptr[i] =
+ (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs[i]];
}
- adev->mes.query_status_fence_gpu_addr =
- adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
- adev->mes.query_status_fence_ptr =
- (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
if (r) {
- amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
- amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
dev_err(adev->dev,
"(%d) read_val_offs alloc failed\n", r);
- goto error_ids;
+ goto error;
}
adev->mes.read_val_gpu_addr =
adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
@@ -212,10 +216,16 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
error_doorbell:
amdgpu_mes_doorbell_free(adev);
error:
- amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
- amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
- amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
-error_ids:
+ for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
+ if (adev->mes.sch_ctx_ptr[i])
+ amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
+ if (adev->mes.query_status_fence_ptr[i])
+ amdgpu_device_wb_free(adev,
+ adev->mes.query_status_fence_offs[i]);
+ }
+ if (adev->mes.read_val_ptr)
+ amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
+
idr_destroy(&adev->mes.pasid_idr);
idr_destroy(&adev->mes.gang_id_idr);
idr_destroy(&adev->mes.queue_id_idr);
@@ -226,13 +236,22 @@ error_ids:
void amdgpu_mes_fini(struct amdgpu_device *adev)
{
+ int i;
+
amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
&adev->mes.event_log_gpu_addr,
&adev->mes.event_log_cpu_addr);
- amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
- amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
- amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
+ for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
+ if (adev->mes.sch_ctx_ptr[i])
+ amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
+ if (adev->mes.query_status_fence_ptr[i])
+ amdgpu_device_wb_free(adev,
+ adev->mes.query_status_fence_offs[i]);
+ }
+ if (adev->mes.read_val_ptr)
+ amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
+
amdgpu_mes_doorbell_free(adev);
idr_destroy(&adev->mes.pasid_idr);
@@ -1499,7 +1518,7 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
sizeof(ucode_prefix));
- if (adev->enable_uni_mes && pipe == AMDGPU_MES_SCHED_PIPE) {
+ if (adev->enable_uni_mes) {
snprintf(fw_name, sizeof(fw_name),
"amdgpu/%s_uni_mes.bin", ucode_prefix);
} else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index 2d659c612f03..0bc837dab578 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -82,8 +82,8 @@ struct amdgpu_mes {
uint64_t default_process_quantum;
uint64_t default_gang_quantum;
- struct amdgpu_ring ring;
- spinlock_t ring_lock;
+ struct amdgpu_ring ring[AMDGPU_MAX_MES_PIPES];
+ spinlock_t ring_lock[AMDGPU_MAX_MES_PIPES];
const struct firmware *fw[AMDGPU_MAX_MES_PIPES];
@@ -112,12 +112,12 @@ struct amdgpu_mes {
uint32_t gfx_hqd_mask[AMDGPU_MES_MAX_GFX_PIPES];
uint32_t sdma_hqd_mask[AMDGPU_MES_MAX_SDMA_PIPES];
uint32_t aggregated_doorbells[AMDGPU_MES_PRIORITY_NUM_LEVELS];
- uint32_t sch_ctx_offs;
- uint64_t sch_ctx_gpu_addr;
- uint64_t *sch_ctx_ptr;
- uint32_t query_status_fence_offs;
- uint64_t query_status_fence_gpu_addr;
- uint64_t *query_status_fence_ptr;
+ uint32_t sch_ctx_offs[AMDGPU_MAX_MES_PIPES];
+ uint64_t sch_ctx_gpu_addr[AMDGPU_MAX_MES_PIPES];
+ uint64_t *sch_ctx_ptr[AMDGPU_MAX_MES_PIPES];
+ uint32_t query_status_fence_offs[AMDGPU_MAX_MES_PIPES];
+ uint64_t query_status_fence_gpu_addr[AMDGPU_MAX_MES_PIPES];
+ uint64_t *query_status_fence_ptr[AMDGPU_MAX_MES_PIPES];
uint32_t read_val_offs;
uint64_t read_val_gpu_addr;
uint32_t *read_val_ptr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index ad49cecb20b8..e6344a6b0a9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -212,6 +212,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
*/
if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
sched_hw_submission = max(sched_hw_submission, 256);
+ if (ring->funcs->type == AMDGPU_RING_TYPE_MES)
+ sched_hw_submission = 8;
else if (ring == &adev->sdma.instance[0].page)
sched_hw_submission = 256;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 1a5439abd1a0..c87d68d4be53 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -461,8 +461,11 @@ struct amdgpu_vcn5_fw_shared {
struct amdgpu_fw_shared_unified_queue_struct sq;
uint8_t pad1[8];
struct amdgpu_fw_shared_fw_logging fw_log;
+ uint8_t pad2[20];
struct amdgpu_fw_shared_rb_setup rb_setup;
- uint8_t pad2[4];
+ struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface;
+ struct amdgpu_fw_shared_drm_key_wa drm_key_wa;
+ uint8_t pad3[9];
};
#define VCN_BLOCK_ENCODE_DISABLE_MASK 0x80
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 111c380f929b..b287a82e6177 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -858,7 +858,7 @@ void amdgpu_virt_post_reset(struct amdgpu_device *adev)
adev->gfx.is_poweron = false;
}
- adev->mes.ring.sched.ready = false;
+ adev->mes.ring[0].sched.ready = false;
}
bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 506fa8003388..2c611b8577a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -3546,33 +3546,9 @@ static int gfx_v12_0_hw_init(void *handle)
return r;
}
-static int gfx_v12_0_kiq_disable_kgq(struct amdgpu_device *adev)
-{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
- struct amdgpu_ring *kiq_ring = &kiq->ring;
- int i, r = 0;
-
- if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
- return -EINVAL;
-
- if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
- adev->gfx.num_gfx_rings))
- return -ENOMEM;
-
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
- PREEMPT_QUEUES, 0, 0);
-
- if (adev->gfx.kiq[0].ring.sched.ready)
- r = amdgpu_ring_test_helper(kiq_ring);
-
- return r;
-}
-
static int gfx_v12_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int r;
uint32_t tmp;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
@@ -3580,8 +3556,7 @@ static int gfx_v12_0_hw_fini(void *handle)
if (!adev->no_hw_access) {
if (amdgpu_async_gfx_ring) {
- r = gfx_v12_0_kiq_disable_kgq(adev);
- if (r)
+ if (amdgpu_gfx_disable_kgq(adev, 0))
DRM_ERROR("KGQ disable failed\n");
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index b88a6fa173b3..2797fd84432b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -231,7 +231,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
/* This is necessary for SRIOV as well as for GFXOFF to function
* properly under bare metal
*/
- if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) &&
+ if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring[0].sched.ready) &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
1 << vmid, GET_INST(GC, 0));
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index 26efce9aa410..edcb5351f8cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -299,7 +299,7 @@ static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
/* This is necessary for SRIOV as well as for GFXOFF to function
* properly under bare metal
*/
- if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) &&
+ if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring[0].sched.ready) &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
const unsigned eng = 17;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 99adf3625657..98aa3ccd0d20 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -538,11 +538,11 @@ void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+ amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+ amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index ad524ddc9760..6ae5a784e187 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -23,6 +23,7 @@
#include "amdgpu.h"
#include "amdgpu_jpeg.h"
+#include "amdgpu_cs.h"
#include "soc15.h"
#include "soc15d.h"
#include "jpeg_v4_0_3.h"
@@ -782,11 +783,15 @@ void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+ if (ring->funcs->parse_cs)
+ amdgpu_ring_write(ring, 0);
+ else
+ amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+ amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
@@ -1084,6 +1089,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_3_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_3_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_3_dec_ring_set_wptr,
+ .parse_cs = jpeg_v4_0_3_dec_ring_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -1248,3 +1254,56 @@ static void jpeg_v4_0_3_set_ras_funcs(struct amdgpu_device *adev)
{
adev->jpeg.ras = &jpeg_v4_0_3_ras;
}
+
+/**
+ * jpeg_v4_0_3_dec_ring_parse_cs - command submission parser
+ *
+ * @parser: Command submission parser context
+ * @job: the job to parse
+ * @ib: the IB to parse
+ *
+ * Parse the command stream, return -EINVAL for invalid packet,
+ * 0 otherwise
+ */
+int jpeg_v4_0_3_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
+{
+ uint32_t i, reg, res, cond, type;
+ struct amdgpu_device *adev = parser->adev;
+
+ for (i = 0; i < ib->length_dw ; i += 2) {
+ reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
+ res = CP_PACKETJ_GET_RES(ib->ptr[i]);
+ cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
+ type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
+
+ if (res) /* only support 0 at the moment */
+ return -EINVAL;
+
+ switch (type) {
+ case PACKETJ_TYPE0:
+ if (cond != PACKETJ_CONDITION_CHECK0 || reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END) {
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ }
+ break;
+ case PACKETJ_TYPE3:
+ if (cond != PACKETJ_CONDITION_CHECK3 || reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END) {
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ }
+ break;
+ case PACKETJ_TYPE6:
+ if (ib->ptr[i] == CP_PACKETJ_NOP)
+ continue;
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ default:
+ dev_err(adev->dev, "Unknown packet type %d !\n", type);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
index 747a3e5f6856..71c54b294e15 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
@@ -46,6 +46,9 @@
#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
+#define JPEG_REG_RANGE_START 0x4000
+#define JPEG_REG_RANGE_END 0x41c2
+
extern const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block;
void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
@@ -62,5 +65,7 @@ void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring);
void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask);
-
+int jpeg_v4_0_3_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib);
#endif /* __JPEG_V4_0_3_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
index d694a276498a..f4daff90c770 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
@@ -646,6 +646,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v5_0_0_dec_ring_get_rptr,
.get_wptr = jpeg_v5_0_0_dec_ring_get_wptr,
.set_wptr = jpeg_v5_0_0_dec_ring_set_wptr,
+ .parse_cs = jpeg_v4_0_3_dec_ring_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index f9343642ae7e..2ea8223eb969 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -162,13 +162,13 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
union MESAPI__QUERY_MES_STATUS mes_status_pkt;
signed long timeout = 3000000; /* 3000 ms */
struct amdgpu_device *adev = mes->adev;
- struct amdgpu_ring *ring = &mes->ring;
+ struct amdgpu_ring *ring = &mes->ring[0];
struct MES_API_STATUS *api_status;
union MESAPI__MISC *x_pkt = pkt;
const char *op_str, *misc_op_str;
unsigned long flags;
u64 status_gpu_addr;
- u32 status_offset;
+ u32 seq, status_offset;
u64 *status_ptr;
signed long r;
int ret;
@@ -191,11 +191,18 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
status_ptr = (u64 *)&adev->wb.wb[status_offset];
*status_ptr = 0;
- spin_lock_irqsave(&mes->ring_lock, flags);
+ spin_lock_irqsave(&mes->ring_lock[0], flags);
r = amdgpu_ring_alloc(ring, (size + sizeof(mes_status_pkt)) / 4);
if (r)
goto error_unlock_free;
+ seq = ++ring->fence_drv.sync_seq;
+ r = amdgpu_fence_wait_polling(ring,
+ seq - ring->fence_drv.num_fences_mask,
+ timeout);
+ if (r < 1)
+ goto error_undo;
+
api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off);
api_status->api_completion_fence_addr = status_gpu_addr;
api_status->api_completion_fence_value = 1;
@@ -208,14 +215,13 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
mes_status_pkt.api_status.api_completion_fence_addr =
ring->fence_drv.gpu_addr;
- mes_status_pkt.api_status.api_completion_fence_value =
- ++ring->fence_drv.sync_seq;
+ mes_status_pkt.api_status.api_completion_fence_value = seq;
amdgpu_ring_write_multiple(ring, &mes_status_pkt,
sizeof(mes_status_pkt) / 4);
amdgpu_ring_commit(ring);
- spin_unlock_irqrestore(&mes->ring_lock, flags);
+ spin_unlock_irqrestore(&mes->ring_lock[0], flags);
op_str = mes_v11_0_get_op_string(x_pkt);
misc_op_str = mes_v11_0_get_misc_op_string(x_pkt);
@@ -229,7 +235,7 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
dev_dbg(adev->dev, "MES msg=%d was emitted\n",
x_pkt->header.opcode);
- r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq, timeout);
+ r = amdgpu_fence_wait_polling(ring, seq, timeout);
if (r < 1 || !*status_ptr) {
if (misc_op_str)
@@ -252,8 +258,12 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
amdgpu_device_wb_free(adev, status_offset);
return 0;
+error_undo:
+ dev_err(adev->dev, "MES ring buffer is full.\n");
+ amdgpu_ring_undo(ring);
+
error_unlock_free:
- spin_unlock_irqrestore(&mes->ring_lock, flags);
+ spin_unlock_irqrestore(&mes->ring_lock[0], flags);
error_wb_free:
amdgpu_device_wb_free(adev, status_offset);
@@ -512,9 +522,9 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes_set_hw_res_pkt.vmid_mask_gfxhub = mes->vmid_mask_gfxhub;
mes_set_hw_res_pkt.gds_size = adev->gds.gds_size;
mes_set_hw_res_pkt.paging_vmid = 0;
- mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr = mes->sch_ctx_gpu_addr;
+ mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr = mes->sch_ctx_gpu_addr[0];
mes_set_hw_res_pkt.query_status_fence_gpu_mc_ptr =
- mes->query_status_fence_gpu_addr;
+ mes->query_status_fence_gpu_addr[0];
for (i = 0; i < MAX_COMPUTE_PIPES; i++)
mes_set_hw_res_pkt.compute_hqd_mask[i] =
@@ -1015,7 +1025,7 @@ static int mes_v11_0_kiq_enable_queue(struct amdgpu_device *adev)
return r;
}
- kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring);
+ kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring[0]);
return amdgpu_ring_test_helper(kiq_ring);
}
@@ -1029,7 +1039,7 @@ static int mes_v11_0_queue_init(struct amdgpu_device *adev,
if (pipe == AMDGPU_MES_KIQ_PIPE)
ring = &adev->gfx.kiq[0].ring;
else if (pipe == AMDGPU_MES_SCHED_PIPE)
- ring = &adev->mes.ring;
+ ring = &adev->mes.ring[0];
else
BUG();
@@ -1071,7 +1081,7 @@ static int mes_v11_0_ring_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- ring = &adev->mes.ring;
+ ring = &adev->mes.ring[0];
ring->funcs = &mes_v11_0_ring_funcs;
@@ -1124,7 +1134,7 @@ static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev,
if (pipe == AMDGPU_MES_KIQ_PIPE)
ring = &adev->gfx.kiq[0].ring;
else if (pipe == AMDGPU_MES_SCHED_PIPE)
- ring = &adev->mes.ring;
+ ring = &adev->mes.ring[0];
else
BUG();
@@ -1200,9 +1210,6 @@ static int mes_v11_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int pipe;
- amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
- amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
-
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
kfree(adev->mes.mqd_backup[pipe]);
@@ -1216,12 +1223,12 @@ static int mes_v11_0_sw_fini(void *handle)
&adev->gfx.kiq[0].ring.mqd_gpu_addr,
&adev->gfx.kiq[0].ring.mqd_ptr);
- amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj,
- &adev->mes.ring.mqd_gpu_addr,
- &adev->mes.ring.mqd_ptr);
+ amdgpu_bo_free_kernel(&adev->mes.ring[0].mqd_obj,
+ &adev->mes.ring[0].mqd_gpu_addr,
+ &adev->mes.ring[0].mqd_ptr);
amdgpu_ring_fini(&adev->gfx.kiq[0].ring);
- amdgpu_ring_fini(&adev->mes.ring);
+ amdgpu_ring_fini(&adev->mes.ring[0]);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
mes_v11_0_free_ucode_buffers(adev, AMDGPU_MES_KIQ_PIPE);
@@ -1332,9 +1339,9 @@ failure:
static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev)
{
- if (adev->mes.ring.sched.ready) {
- mes_v11_0_kiq_dequeue(&adev->mes.ring);
- adev->mes.ring.sched.ready = false;
+ if (adev->mes.ring[0].sched.ready) {
+ mes_v11_0_kiq_dequeue(&adev->mes.ring[0]);
+ adev->mes.ring[0].sched.ready = false;
}
if (amdgpu_sriov_vf(adev)) {
@@ -1352,7 +1359,7 @@ static int mes_v11_0_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->mes.ring.sched.ready)
+ if (adev->mes.ring[0].sched.ready)
goto out;
if (!adev->enable_mes_kiq) {
@@ -1397,7 +1404,7 @@ out:
* with MES enabled.
*/
adev->gfx.kiq[0].ring.sched.ready = false;
- adev->mes.ring.sched.ready = true;
+ adev->mes.ring[0].sched.ready = true;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 0713bc3eb263..e39a58d262c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -142,19 +142,20 @@ static const char *mes_v12_0_get_misc_op_string(union MESAPI__MISC *x_pkt)
}
static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
- void *pkt, int size,
- int api_status_off)
+ int pipe, void *pkt, int size,
+ int api_status_off)
{
union MESAPI__QUERY_MES_STATUS mes_status_pkt;
signed long timeout = 3000000; /* 3000 ms */
struct amdgpu_device *adev = mes->adev;
- struct amdgpu_ring *ring = &mes->ring;
+ struct amdgpu_ring *ring = &mes->ring[pipe];
+ spinlock_t *ring_lock = &mes->ring_lock[pipe];
struct MES_API_STATUS *api_status;
union MESAPI__MISC *x_pkt = pkt;
const char *op_str, *misc_op_str;
unsigned long flags;
u64 status_gpu_addr;
- u32 status_offset;
+ u32 seq, status_offset;
u64 *status_ptr;
signed long r;
int ret;
@@ -177,11 +178,18 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
status_ptr = (u64 *)&adev->wb.wb[status_offset];
*status_ptr = 0;
- spin_lock_irqsave(&mes->ring_lock, flags);
+ spin_lock_irqsave(ring_lock, flags);
r = amdgpu_ring_alloc(ring, (size + sizeof(mes_status_pkt)) / 4);
if (r)
goto error_unlock_free;
+ seq = ++ring->fence_drv.sync_seq;
+ r = amdgpu_fence_wait_polling(ring,
+ seq - ring->fence_drv.num_fences_mask,
+ timeout);
+ if (r < 1)
+ goto error_undo;
+
api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off);
api_status->api_completion_fence_addr = status_gpu_addr;
api_status->api_completion_fence_value = 1;
@@ -194,39 +202,39 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
mes_status_pkt.api_status.api_completion_fence_addr =
ring->fence_drv.gpu_addr;
- mes_status_pkt.api_status.api_completion_fence_value =
- ++ring->fence_drv.sync_seq;
+ mes_status_pkt.api_status.api_completion_fence_value = seq;
amdgpu_ring_write_multiple(ring, &mes_status_pkt,
sizeof(mes_status_pkt) / 4);
amdgpu_ring_commit(ring);
- spin_unlock_irqrestore(&mes->ring_lock, flags);
+ spin_unlock_irqrestore(ring_lock, flags);
op_str = mes_v12_0_get_op_string(x_pkt);
misc_op_str = mes_v12_0_get_misc_op_string(x_pkt);
if (misc_op_str)
- dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str,
- misc_op_str);
+ dev_dbg(adev->dev, "MES(%d) msg=%s (%s) was emitted\n",
+ pipe, op_str, misc_op_str);
else if (op_str)
- dev_dbg(adev->dev, "MES msg=%s was emitted\n", op_str);
+ dev_dbg(adev->dev, "MES(%d) msg=%s was emitted\n",
+ pipe, op_str);
else
- dev_dbg(adev->dev, "MES msg=%d was emitted\n",
- x_pkt->header.opcode);
+ dev_dbg(adev->dev, "MES(%d) msg=%d was emitted\n",
+ pipe, x_pkt->header.opcode);
- r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq, timeout);
+ r = amdgpu_fence_wait_polling(ring, seq, timeout);
if (r < 1 || !*status_ptr) {
if (misc_op_str)
- dev_err(adev->dev, "MES failed to respond to msg=%s (%s)\n",
- op_str, misc_op_str);
+ dev_err(adev->dev, "MES(%d) failed to respond to msg=%s (%s)\n",
+ pipe, op_str, misc_op_str);
else if (op_str)
- dev_err(adev->dev, "MES failed to respond to msg=%s\n",
- op_str);
+ dev_err(adev->dev, "MES(%d) failed to respond to msg=%s\n",
+ pipe, op_str);
else
- dev_err(adev->dev, "MES failed to respond to msg=%d\n",
- x_pkt->header.opcode);
+ dev_err(adev->dev, "MES(%d) failed to respond to msg=%d\n",
+ pipe, x_pkt->header.opcode);
while (halt_if_hws_hang)
schedule();
@@ -238,8 +246,12 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
amdgpu_device_wb_free(adev, status_offset);
return 0;
+error_undo:
+ dev_err(adev->dev, "MES ring buffer is full.\n");
+ amdgpu_ring_undo(ring);
+
error_unlock_free:
- spin_unlock_irqrestore(&mes->ring_lock, flags);
+ spin_unlock_irqrestore(ring_lock, flags);
error_wb_free:
amdgpu_device_wb_free(adev, status_offset);
@@ -254,6 +266,8 @@ static int convert_to_mes_queue_type(int queue_type)
return MES_QUEUE_TYPE_COMPUTE;
else if (queue_type == AMDGPU_RING_TYPE_SDMA)
return MES_QUEUE_TYPE_SDMA;
+ else if (queue_type == AMDGPU_RING_TYPE_MES)
+ return MES_QUEUE_TYPE_SCHQ;
else
BUG();
return -1;
@@ -311,6 +325,7 @@ static int mes_v12_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.gds_size = input->queue_size;
return mes_v12_0_submit_pkt_and_poll_completion(mes,
+ AMDGPU_MES_SCHED_PIPE,
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
offsetof(union MESAPI__ADD_QUEUE, api_status));
}
@@ -330,6 +345,7 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes,
mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr;
return mes_v12_0_submit_pkt_and_poll_completion(mes,
+ AMDGPU_MES_SCHED_PIPE,
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
}
@@ -338,6 +354,7 @@ static int mes_v12_0_map_legacy_queue(struct amdgpu_mes *mes,
struct mes_map_legacy_queue_input *input)
{
union MESAPI__ADD_QUEUE mes_add_queue_pkt;
+ int pipe;
memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt));
@@ -354,7 +371,12 @@ static int mes_v12_0_map_legacy_queue(struct amdgpu_mes *mes,
convert_to_mes_queue_type(input->queue_type);
mes_add_queue_pkt.map_legacy_kq = 1;
- return mes_v12_0_submit_pkt_and_poll_completion(mes,
+ if (mes->adev->enable_uni_mes)
+ pipe = AMDGPU_MES_KIQ_PIPE;
+ else
+ pipe = AMDGPU_MES_SCHED_PIPE;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
offsetof(union MESAPI__ADD_QUEUE, api_status));
}
@@ -363,6 +385,7 @@ static int mes_v12_0_unmap_legacy_queue(struct amdgpu_mes *mes,
struct mes_unmap_legacy_queue_input *input)
{
union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
+ int pipe;
memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
@@ -387,7 +410,12 @@ static int mes_v12_0_unmap_legacy_queue(struct amdgpu_mes *mes,
convert_to_mes_queue_type(input->queue_type);
}
- return mes_v12_0_submit_pkt_and_poll_completion(mes,
+ if (mes->adev->enable_uni_mes)
+ pipe = AMDGPU_MES_KIQ_PIPE;
+ else
+ pipe = AMDGPU_MES_SCHED_PIPE;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
}
@@ -404,7 +432,7 @@ static int mes_v12_0_resume_gang(struct amdgpu_mes *mes,
return 0;
}
-static int mes_v12_0_query_sched_status(struct amdgpu_mes *mes)
+static int mes_v12_0_query_sched_status(struct amdgpu_mes *mes, int pipe)
{
union MESAPI__QUERY_MES_STATUS mes_status_pkt;
@@ -414,7 +442,7 @@ static int mes_v12_0_query_sched_status(struct amdgpu_mes *mes)
mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS;
mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
- return mes_v12_0_submit_pkt_and_poll_completion(mes,
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
&mes_status_pkt, sizeof(mes_status_pkt),
offsetof(union MESAPI__QUERY_MES_STATUS, api_status));
}
@@ -423,6 +451,7 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes,
struct mes_misc_op_input *input)
{
union MESAPI__MISC misc_pkt;
+ int pipe;
memset(&misc_pkt, 0, sizeof(misc_pkt));
@@ -475,12 +504,17 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes,
return -EINVAL;
}
- return mes_v12_0_submit_pkt_and_poll_completion(mes,
+ if (mes->adev->enable_uni_mes)
+ pipe = AMDGPU_MES_KIQ_PIPE;
+ else
+ pipe = AMDGPU_MES_SCHED_PIPE;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
&misc_pkt, sizeof(misc_pkt),
offsetof(union MESAPI__MISC, api_status));
}
-static int mes_v12_0_set_hw_resources_1(struct amdgpu_mes *mes)
+static int mes_v12_0_set_hw_resources_1(struct amdgpu_mes *mes, int pipe)
{
union MESAPI_SET_HW_RESOURCES_1 mes_set_hw_res_1_pkt;
@@ -491,12 +525,12 @@ static int mes_v12_0_set_hw_resources_1(struct amdgpu_mes *mes)
mes_set_hw_res_1_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
mes_set_hw_res_1_pkt.mes_kiq_unmap_timeout = 100;
- return mes_v12_0_submit_pkt_and_poll_completion(mes,
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
&mes_set_hw_res_1_pkt, sizeof(mes_set_hw_res_1_pkt),
offsetof(union MESAPI_SET_HW_RESOURCES_1, api_status));
}
-static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes)
+static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
{
int i;
struct amdgpu_device *adev = mes->adev;
@@ -508,27 +542,33 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes)
mes_set_hw_res_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC;
mes_set_hw_res_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
- mes_set_hw_res_pkt.vmid_mask_mmhub = mes->vmid_mask_mmhub;
- mes_set_hw_res_pkt.vmid_mask_gfxhub = mes->vmid_mask_gfxhub;
- mes_set_hw_res_pkt.gds_size = adev->gds.gds_size;
- mes_set_hw_res_pkt.paging_vmid = 0;
- mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr = mes->sch_ctx_gpu_addr;
- mes_set_hw_res_pkt.query_status_fence_gpu_mc_ptr =
- mes->query_status_fence_gpu_addr;
-
- for (i = 0; i < MAX_COMPUTE_PIPES; i++)
- mes_set_hw_res_pkt.compute_hqd_mask[i] =
- mes->compute_hqd_mask[i];
-
- for (i = 0; i < MAX_GFX_PIPES; i++)
- mes_set_hw_res_pkt.gfx_hqd_mask[i] = mes->gfx_hqd_mask[i];
-
- for (i = 0; i < MAX_SDMA_PIPES; i++)
- mes_set_hw_res_pkt.sdma_hqd_mask[i] = mes->sdma_hqd_mask[i];
+ if (pipe == AMDGPU_MES_SCHED_PIPE) {
+ mes_set_hw_res_pkt.vmid_mask_mmhub = mes->vmid_mask_mmhub;
+ mes_set_hw_res_pkt.vmid_mask_gfxhub = mes->vmid_mask_gfxhub;
+ mes_set_hw_res_pkt.gds_size = adev->gds.gds_size;
+ mes_set_hw_res_pkt.paging_vmid = 0;
+
+ for (i = 0; i < MAX_COMPUTE_PIPES; i++)
+ mes_set_hw_res_pkt.compute_hqd_mask[i] =
+ mes->compute_hqd_mask[i];
+
+ for (i = 0; i < MAX_GFX_PIPES; i++)
+ mes_set_hw_res_pkt.gfx_hqd_mask[i] =
+ mes->gfx_hqd_mask[i];
+
+ for (i = 0; i < MAX_SDMA_PIPES; i++)
+ mes_set_hw_res_pkt.sdma_hqd_mask[i] =
+ mes->sdma_hqd_mask[i];
+
+ for (i = 0; i < AMD_PRIORITY_NUM_LEVELS; i++)
+ mes_set_hw_res_pkt.aggregated_doorbells[i] =
+ mes->aggregated_doorbells[i];
+ }
- for (i = 0; i < AMD_PRIORITY_NUM_LEVELS; i++)
- mes_set_hw_res_pkt.aggregated_doorbells[i] =
- mes->aggregated_doorbells[i];
+ mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr =
+ mes->sch_ctx_gpu_addr[pipe];
+ mes_set_hw_res_pkt.query_status_fence_gpu_mc_ptr =
+ mes->query_status_fence_gpu_addr[pipe];
for (i = 0; i < 5; i++) {
mes_set_hw_res_pkt.gc_base[i] = adev->reg_offset[GC_HWIP][0][i];
@@ -556,7 +596,7 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes)
mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
}
- return mes_v12_0_submit_pkt_and_poll_completion(mes,
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
offsetof(union MESAPI_SET_HW_RESOURCES, api_status));
}
@@ -734,16 +774,11 @@ static void mes_v12_0_enable(struct amdgpu_device *adev, bool enable)
if (enable) {
data = RREG32_SOC15(GC, 0, regCP_MES_CNTL);
data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
- data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET,
- (!adev->enable_uni_mes && adev->enable_mes_kiq) ? 1 : 0);
+ data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET, 1);
WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
mutex_lock(&adev->srbm_mutex);
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
- if ((!adev->enable_mes_kiq || adev->enable_uni_mes) &&
- pipe == AMDGPU_MES_KIQ_PIPE)
- continue;
-
soc21_grbm_select(adev, 3, pipe, 0, 0);
ucode_addr = adev->mes.uc_start_addr[pipe] >> 2;
@@ -757,8 +792,7 @@ static void mes_v12_0_enable(struct amdgpu_device *adev, bool enable)
/* unhalt MES and activate pipe0 */
data = REG_SET_FIELD(0, CP_MES_CNTL, MES_PIPE0_ACTIVE, 1);
- data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE,
- (!adev->enable_uni_mes && adev->enable_mes_kiq) ? 1 : 0);
+ data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE, 1);
WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
if (amdgpu_emu_mode)
@@ -774,8 +808,7 @@ static void mes_v12_0_enable(struct amdgpu_device *adev, bool enable)
data = REG_SET_FIELD(data, CP_MES_CNTL,
MES_INVALIDATE_ICACHE, 1);
data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
- data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET,
- (!adev->enable_uni_mes && adev->enable_mes_kiq) ? 1 : 0);
+ data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET, 1);
data = REG_SET_FIELD(data, CP_MES_CNTL, MES_HALT, 1);
WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
}
@@ -790,10 +823,6 @@ static void mes_v12_0_set_ucode_start_addr(struct amdgpu_device *adev)
mutex_lock(&adev->srbm_mutex);
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
- if ((!adev->enable_mes_kiq || adev->enable_uni_mes) &&
- pipe == AMDGPU_MES_KIQ_PIPE)
- continue;
-
/* me=3, queue=0 */
soc21_grbm_select(adev, 3, pipe, 0, 0);
@@ -1085,7 +1114,7 @@ static int mes_v12_0_kiq_enable_queue(struct amdgpu_device *adev)
return r;
}
- kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring);
+ kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring[0]);
r = amdgpu_ring_test_ring(kiq_ring);
if (r) {
@@ -1101,14 +1130,12 @@ static int mes_v12_0_queue_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring;
int r;
- if (pipe == AMDGPU_MES_KIQ_PIPE)
+ if (!adev->enable_uni_mes && pipe == AMDGPU_MES_KIQ_PIPE)
ring = &adev->gfx.kiq[0].ring;
- else if (pipe == AMDGPU_MES_SCHED_PIPE)
- ring = &adev->mes.ring;
else
- BUG();
+ ring = &adev->mes.ring[pipe];
- if ((pipe == AMDGPU_MES_SCHED_PIPE) &&
+ if ((adev->enable_uni_mes || pipe == AMDGPU_MES_SCHED_PIPE) &&
(amdgpu_in_reset(adev) || adev->in_suspend)) {
*(ring->wptr_cpu_addr) = 0;
*(ring->rptr_cpu_addr) = 0;
@@ -1120,13 +1147,12 @@ static int mes_v12_0_queue_init(struct amdgpu_device *adev,
return r;
if (pipe == AMDGPU_MES_SCHED_PIPE) {
- if (adev->enable_uni_mes) {
- mes_v12_0_queue_init_register(ring);
- } else {
+ if (adev->enable_uni_mes)
+ r = amdgpu_mes_map_legacy_queue(adev, ring);
+ else
r = mes_v12_0_kiq_enable_queue(adev);
- if (r)
- return r;
- }
+ if (r)
+ return r;
} else {
mes_v12_0_queue_init_register(ring);
}
@@ -1146,25 +1172,29 @@ static int mes_v12_0_queue_init(struct amdgpu_device *adev,
return 0;
}
-static int mes_v12_0_ring_init(struct amdgpu_device *adev)
+static int mes_v12_0_ring_init(struct amdgpu_device *adev, int pipe)
{
struct amdgpu_ring *ring;
- ring = &adev->mes.ring;
+ ring = &adev->mes.ring[pipe];
ring->funcs = &mes_v12_0_ring_funcs;
ring->me = 3;
- ring->pipe = 0;
+ ring->pipe = pipe;
ring->queue = 0;
ring->ring_obj = NULL;
ring->use_doorbell = true;
- ring->doorbell_index = adev->doorbell_index.mes_ring0 << 1;
- ring->eop_gpu_addr = adev->mes.eop_gpu_addr[AMDGPU_MES_SCHED_PIPE];
+ ring->eop_gpu_addr = adev->mes.eop_gpu_addr[pipe];
ring->no_scheduler = true;
sprintf(ring->name, "mes_%d.%d.%d", ring->me, ring->pipe, ring->queue);
+ if (pipe == AMDGPU_MES_SCHED_PIPE)
+ ring->doorbell_index = adev->doorbell_index.mes_ring0 << 1;
+ else
+ ring->doorbell_index = adev->doorbell_index.mes_ring1 << 1;
+
return amdgpu_ring_init(adev, ring, 1024, NULL, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
}
@@ -1178,7 +1208,7 @@ static int mes_v12_0_kiq_ring_init(struct amdgpu_device *adev)
ring = &adev->gfx.kiq[0].ring;
ring->me = 3;
- ring->pipe = adev->enable_uni_mes ? 0 : 1;
+ ring->pipe = 1;
ring->queue = 0;
ring->adev = NULL;
@@ -1200,12 +1230,10 @@ static int mes_v12_0_mqd_sw_init(struct amdgpu_device *adev,
int r, mqd_size = sizeof(struct v12_compute_mqd);
struct amdgpu_ring *ring;
- if (pipe == AMDGPU_MES_KIQ_PIPE)
+ if (!adev->enable_uni_mes && pipe == AMDGPU_MES_KIQ_PIPE)
ring = &adev->gfx.kiq[0].ring;
- else if (pipe == AMDGPU_MES_SCHED_PIPE)
- ring = &adev->mes.ring;
else
- BUG();
+ ring = &adev->mes.ring[pipe];
if (ring->mqd_obj)
return 0;
@@ -1246,9 +1274,6 @@ static int mes_v12_0_sw_init(void *handle)
return r;
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
- if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
- continue;
-
r = mes_v12_0_allocate_eop_buf(adev, pipe);
if (r)
return r;
@@ -1256,18 +1281,15 @@ static int mes_v12_0_sw_init(void *handle)
r = mes_v12_0_mqd_sw_init(adev, pipe);
if (r)
return r;
- }
- if (adev->enable_mes_kiq) {
- r = mes_v12_0_kiq_ring_init(adev);
+ if (!adev->enable_uni_mes && pipe == AMDGPU_MES_KIQ_PIPE)
+ r = mes_v12_0_kiq_ring_init(adev);
+ else
+ r = mes_v12_0_ring_init(adev, pipe);
if (r)
return r;
}
- r = mes_v12_0_ring_init(adev);
- if (r)
- return r;
-
return 0;
}
@@ -1276,9 +1298,6 @@ static int mes_v12_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int pipe;
- amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
- amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
-
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
kfree(adev->mes.mqd_backup[pipe]);
@@ -1286,18 +1305,21 @@ static int mes_v12_0_sw_fini(void *handle)
&adev->mes.eop_gpu_addr[pipe],
NULL);
amdgpu_ucode_release(&adev->mes.fw[pipe]);
- }
-
- amdgpu_bo_free_kernel(&adev->gfx.kiq[0].ring.mqd_obj,
- &adev->gfx.kiq[0].ring.mqd_gpu_addr,
- &adev->gfx.kiq[0].ring.mqd_ptr);
- amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj,
- &adev->mes.ring.mqd_gpu_addr,
- &adev->mes.ring.mqd_ptr);
+ if (adev->enable_uni_mes || pipe == AMDGPU_MES_SCHED_PIPE) {
+ amdgpu_bo_free_kernel(&adev->mes.ring[pipe].mqd_obj,
+ &adev->mes.ring[pipe].mqd_gpu_addr,
+ &adev->mes.ring[pipe].mqd_ptr);
+ amdgpu_ring_fini(&adev->mes.ring[pipe]);
+ }
+ }
- amdgpu_ring_fini(&adev->gfx.kiq[0].ring);
- amdgpu_ring_fini(&adev->mes.ring);
+ if (!adev->enable_uni_mes) {
+ amdgpu_bo_free_kernel(&adev->gfx.kiq[0].ring.mqd_obj,
+ &adev->gfx.kiq[0].ring.mqd_gpu_addr,
+ &adev->gfx.kiq[0].ring.mqd_ptr);
+ amdgpu_ring_fini(&adev->gfx.kiq[0].ring);
+ }
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
mes_v12_0_free_ucode_buffers(adev, AMDGPU_MES_KIQ_PIPE);
@@ -1341,7 +1363,7 @@ static void mes_v12_0_kiq_dequeue_sched(struct amdgpu_device *adev)
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- adev->mes.ring.sched.ready = false;
+ adev->mes.ring[0].sched.ready = false;
}
static void mes_v12_0_kiq_setting(struct amdgpu_ring *ring)
@@ -1362,10 +1384,10 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
{
int r = 0;
- mes_v12_0_kiq_setting(&adev->gfx.kiq[0].ring);
-
if (adev->enable_uni_mes)
- return mes_v12_0_hw_init(adev);
+ mes_v12_0_kiq_setting(&adev->mes.ring[AMDGPU_MES_KIQ_PIPE]);
+ else
+ mes_v12_0_kiq_setting(&adev->gfx.kiq[0].ring);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
@@ -1392,6 +1414,14 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
if (r)
goto failure;
+ if (adev->enable_uni_mes) {
+ r = mes_v12_0_set_hw_resources(&adev->mes, AMDGPU_MES_KIQ_PIPE);
+ if (r)
+ goto failure;
+
+ mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_KIQ_PIPE);
+ }
+
r = mes_v12_0_hw_init(adev);
if (r)
goto failure;
@@ -1405,9 +1435,15 @@ failure:
static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev)
{
- if (adev->mes.ring.sched.ready) {
- mes_v12_0_kiq_dequeue_sched(adev);
- adev->mes.ring.sched.ready = false;
+ if (adev->mes.ring[0].sched.ready) {
+ if (adev->enable_uni_mes)
+ amdgpu_mes_unmap_legacy_queue(adev,
+ &adev->mes.ring[AMDGPU_MES_SCHED_PIPE],
+ RESET_QUEUES, 0, 0);
+ else
+ mes_v12_0_kiq_dequeue_sched(adev);
+
+ adev->mes.ring[0].sched.ready = false;
}
mes_v12_0_enable(adev, false);
@@ -1420,10 +1456,10 @@ static int mes_v12_0_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->mes.ring.sched.ready)
+ if (adev->mes.ring[0].sched.ready)
goto out;
- if (!adev->enable_mes_kiq || adev->enable_uni_mes) {
+ if (!adev->enable_mes_kiq) {
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
r = mes_v12_0_load_microcode(adev,
AMDGPU_MES_SCHED_PIPE, true);
@@ -1443,23 +1479,23 @@ static int mes_v12_0_hw_init(void *handle)
mes_v12_0_enable(adev, true);
}
+ /* Enable the MES to handle doorbell ring on unmapped queue */
+ mes_v12_0_enable_unmapped_doorbell_handling(&adev->mes, true);
+
r = mes_v12_0_queue_init(adev, AMDGPU_MES_SCHED_PIPE);
if (r)
goto failure;
- r = mes_v12_0_set_hw_resources(&adev->mes);
+ r = mes_v12_0_set_hw_resources(&adev->mes, AMDGPU_MES_SCHED_PIPE);
if (r)
goto failure;
if (adev->enable_uni_mes)
- mes_v12_0_set_hw_resources_1(&adev->mes);
+ mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_SCHED_PIPE);
mes_v12_0_init_aggregated_doorbell(&adev->mes);
- /* Enable the MES to handle doorbell ring on unmapped queue */
- mes_v12_0_enable_unmapped_doorbell_handling(&adev->mes, true);
-
- r = mes_v12_0_query_sched_status(&adev->mes);
+ r = mes_v12_0_query_sched_status(&adev->mes, AMDGPU_MES_SCHED_PIPE);
if (r) {
DRM_ERROR("MES is busy\n");
goto failure;
@@ -1472,7 +1508,7 @@ out:
* with MES enabled.
*/
adev->gfx.kiq[0].ring.sched.ready = false;
- adev->mes.ring.sched.ready = true;
+ adev->mes.ring[0].sched.ready = true;
return 0;
@@ -1515,17 +1551,7 @@ static int mes_v12_0_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int pipe, r;
- if (adev->enable_uni_mes) {
- r = amdgpu_mes_init_microcode(adev, AMDGPU_MES_SCHED_PIPE);
- if (!r)
- return 0;
-
- adev->enable_uni_mes = false;
- }
-
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
- if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
- continue;
r = amdgpu_mes_init_microcode(adev, pipe);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
index 2357ff39323f..e74e1983da53 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
@@ -76,6 +76,12 @@
((cond & 0xF) << 24) | \
((type & 0xF) << 28))
+#define CP_PACKETJ_NOP 0x60000000
+#define CP_PACKETJ_GET_REG(x) ((x) & 0x3FFFF)
+#define CP_PACKETJ_GET_RES(x) (((x) >> 18) & 0x3F)
+#define CP_PACKETJ_GET_COND(x) (((x) >> 24) & 0xF)
+#define CP_PACKETJ_GET_TYPE(x) (((x) >> 28) & 0xF)
+
/* Packet 3 types */
#define PACKET3_NOP 0x10
#define PACKET3_SET_BASE 0x11
diff --git a/drivers/gpu/drm/amd/amdgpu/soc24.c b/drivers/gpu/drm/amd/amdgpu/soc24.c
index 7d641d0dadba..b0c3678cfb31 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc24.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc24.c
@@ -406,6 +406,7 @@ static int soc24_common_early_init(void *handle)
AMD_CG_SUPPORT_ATHUB_MGCG |
AMD_CG_SUPPORT_ATHUB_LS |
AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_HDP_SD |
AMD_CG_SUPPORT_MC_LS;
adev->pg_flags = AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_JPEG |
@@ -424,6 +425,7 @@ static int soc24_common_early_init(void *handle)
AMD_CG_SUPPORT_ATHUB_MGCG |
AMD_CG_SUPPORT_ATHUB_LS |
AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_HDP_SD |
AMD_CG_SUPPORT_MC_LS;
adev->pg_flags = AMD_PG_SUPPORT_VCN |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 7e7929f24ae4..983a977632ff 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2893,6 +2893,9 @@ static int dm_suspend(void *handle)
hpd_rx_irq_work_suspend(dm);
+ if (adev->dm.dc->caps.ips_support)
+ dc_allow_idle_optimizations(adev->dm.dc, true);
+
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 915eb2c08ece..2e9f6da1acdc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -804,12 +804,25 @@ struct dsc_mst_fairness_params {
};
#if defined(CONFIG_DRM_AMD_DC_FP)
-static int kbps_to_peak_pbn(int kbps)
+static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
+{
+ u8 link_coding_cap;
+ uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
+
+ link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
+ if (link_coding_cap == DP_128b_132b_ENCODING)
+ fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
+
+ return fec_overhead_multiplier_x1000;
+}
+
+static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
{
u64 peak_kbps = kbps;
peak_kbps *= 1006;
- peak_kbps = div_u64(peak_kbps, 1000);
+ peak_kbps *= fec_overhead_multiplier_x1000;
+ peak_kbps = div_u64(peak_kbps, 1000 * 1000);
return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
}
@@ -910,11 +923,12 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
int link_timeslots_used;
int fair_pbn_alloc;
int ret = 0;
+ uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled) {
initial_slack[i] =
- kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn;
+ kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
bpp_increased[i] = false;
remaining_to_increase += 1;
} else {
@@ -1010,6 +1024,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
int next_index;
int remaining_to_try = 0;
int ret;
+ uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled
@@ -1039,7 +1054,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
if (next_index == -1)
break;
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
+ vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@@ -1052,8 +1067,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
vars[next_index].dsc_enabled = false;
vars[next_index].bpp_x16 = 0;
} else {
- vars[next_index].pbn = kbps_to_peak_pbn(
- params[next_index].bw_range.max_kbps);
+ vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@@ -1082,6 +1096,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
int count = 0;
int i, k, ret;
bool debugfs_overwrite = false;
+ uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
memset(params, 0, sizeof(params));
@@ -1146,7 +1161,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
/* Try no compression */
for (i = 0; i < count; i++) {
vars[i + k].aconnector = params[i].aconnector;
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
@@ -1165,7 +1180,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
/* Try max compression */
for (i = 0; i < count; i++) {
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
vars[i + k].dsc_enabled = true;
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1173,7 +1188,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (ret < 0)
return ret;
} else {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index fa84d34b7373..600d6e221011 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -46,6 +46,9 @@
#define SYNAPTICS_CASCADED_HUB_ID 0x5A
#define IS_SYNAPTICS_CASCADED_PANAMERA(devName, data) ((IS_SYNAPTICS_PANAMERA(devName) && ((int)data[2] == SYNAPTICS_CASCADED_HUB_ID)) ? 1 : 0)
+#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
+#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000
+
enum mst_msg_ready_type {
NONE_MSG_RDY_EVENT = 0,
DOWN_REP_MSG_RDY_EVENT = 1,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index ff03b1d98aa7..14a902ff3b8a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -3589,7 +3589,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
(int)hubp->curs_attr.width || pos_cpy.x
<= (int)hubp->curs_attr.width +
pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = temp_x + viewport_width;
+ pos_cpy.x = 2 * viewport_width - temp_x;
}
}
} else {
@@ -3682,7 +3682,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
(int)hubp->curs_attr.width || pos_cpy.x
<= (int)hubp->curs_attr.width +
pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = 2 * viewport_width - temp_x;
+ pos_cpy.x = temp_x + viewport_width;
}
}
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 9a3cc0514a36..8e0588b1cf30 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -1778,6 +1778,9 @@ static bool dcn321_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ /* Use pipe context based otg sync logic */
+ dc->config.use_pipe_ctx_sync_logic = true;
+
dc->config.dc_mode_clk_limit_support = true;
dc->config.enable_windowed_mpo_odm = true;
/* read VBIOS LTTPR caps */
diff --git a/drivers/gpu/drm/amd/include/mes_v12_api_def.h b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
index 4cf2c9f30b3d..101e2fe962c6 100644
--- a/drivers/gpu/drm/amd/include/mes_v12_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
@@ -97,6 +97,7 @@ enum MES_QUEUE_TYPE {
MES_QUEUE_TYPE_SDMA,
MES_QUEUE_TYPE_MAX,
+ MES_QUEUE_TYPE_SCHQ = MES_QUEUE_TYPE_MAX,
};
struct MES_API_STATUS {
@@ -242,8 +243,12 @@ union MESAPI_SET_HW_RESOURCES {
uint32_t send_write_data : 1;
uint32_t os_tdr_timeout_override : 1;
uint32_t use_rs64mem_for_proc_gang_ctx : 1;
+ uint32_t halt_on_misaligned_access : 1;
+ uint32_t use_add_queue_unmap_flag_addr : 1;
+ uint32_t enable_mes_sch_stb_log : 1;
+ uint32_t limit_single_process : 1;
uint32_t unmapped_doorbell_handling: 2;
- uint32_t reserved : 15;
+ uint32_t reserved : 11;
};
uint32_t uint32_all;
};
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index c16c7678237e..0830cae9a4d0 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -208,6 +208,18 @@ static const struct dmi_system_id orientation_data[] = {
DMI_MATCH(DMI_BOARD_NAME, "KUN"),
},
.driver_data = (void *)&lcd1600x2560_rightside_up,
+ }, { /* AYN Loki Max */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ayn"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Loki Max"),
+ },
+ .driver_data = (void *)&lcd1080x1920_leftside_up,
+ }, { /* AYN Loki Zero */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ayn"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Loki Zero"),
+ },
+ .driver_data = (void *)&lcd1080x1920_leftside_up,
}, { /* Chuwi HiBook (CWI514) */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index ae5c6ec24a1e..77b50c56c124 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -539,8 +539,8 @@ static int mtk_drm_kms_init(struct drm_device *drm)
}
/* IGT will check if the cursor size is configured */
- drm->mode_config.cursor_width = drm->mode_config.max_width;
- drm->mode_config.cursor_height = drm->mode_config.max_height;
+ drm->mode_config.cursor_width = 512;
+ drm->mode_config.cursor_height = 512;
/* Use OVL device for all DMA memory allocations */
crtc = drm_crtc_from_index(drm, 0);
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 2241e53a2946..dec6913cec5b 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -279,7 +279,6 @@ static int inno_hdmi_upload_frame(struct drm_connector *connector,
const u8 *buffer, size_t len)
{
struct inno_hdmi *hdmi = connector_to_inno_hdmi(connector);
- u8 packed_frame[HDMI_MAXIMUM_INFO_FRAME_SIZE];
ssize_t i;
if (type != HDMI_INFOFRAME_TYPE_AVI) {
@@ -291,8 +290,7 @@ static int inno_hdmi_upload_frame(struct drm_connector *connector,
inno_hdmi_disable_frame(connector, type);
for (i = 0; i < len; i++)
- hdmi_writeb(hdmi, HDMI_CONTROL_PACKET_ADDR + i,
- packed_frame[i]);
+ hdmi_writeb(hdmi, HDMI_CONTROL_PACKET_ADDR + i, buffer[i]);
return 0;
}
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 9bd7453b25ad..b8682818bafa 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -315,7 +315,7 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
struct v3d_dev *v3d = job->base.v3d;
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
- int i, csd_cfg0_reg, csd_cfg_reg_count;
+ int i, csd_cfg0_reg;
v3d->csd_job = job;
@@ -335,9 +335,17 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
v3d_switch_perfmon(v3d, &job->base);
csd_cfg0_reg = V3D_CSD_QUEUED_CFG0(v3d->ver);
- csd_cfg_reg_count = v3d->ver < 71 ? 6 : 7;
- for (i = 1; i <= csd_cfg_reg_count; i++)
+ for (i = 1; i <= 6; i++)
V3D_CORE_WRITE(0, csd_cfg0_reg + 4 * i, job->args.cfg[i]);
+
+ /* Although V3D 7.1 has an eighth configuration register, we are not
+ * using it. Therefore, make sure it remains unused.
+ *
+ * XXX: Set the CFG7 register
+ */
+ if (v3d->ver >= 71)
+ V3D_CORE_WRITE(0, V3D_V7_CSD_QUEUED_CFG7, 0);
+
/* CFG0 write kicks off the job. */
V3D_CORE_WRITE(0, csd_cfg0_reg, job->args.cfg[0]);
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 76109415eba6..f2f1d8ddb221 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -87,9 +87,55 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
spin_unlock(&xe->clients.lock);
file->driver_priv = xef;
+ kref_init(&xef->refcount);
+
return 0;
}
+static void xe_file_destroy(struct kref *ref)
+{
+ struct xe_file *xef = container_of(ref, struct xe_file, refcount);
+ struct xe_device *xe = xef->xe;
+
+ xa_destroy(&xef->exec_queue.xa);
+ mutex_destroy(&xef->exec_queue.lock);
+ xa_destroy(&xef->vm.xa);
+ mutex_destroy(&xef->vm.lock);
+
+ spin_lock(&xe->clients.lock);
+ xe->clients.count--;
+ spin_unlock(&xe->clients.lock);
+
+ xe_drm_client_put(xef->client);
+ kfree(xef);
+}
+
+/**
+ * xe_file_get() - Take a reference to the xe file object
+ * @xef: Pointer to the xe file
+ *
+ * Anyone with a pointer to xef must take a reference to the xe file
+ * object using this call.
+ *
+ * Return: xe file pointer
+ */
+struct xe_file *xe_file_get(struct xe_file *xef)
+{
+ kref_get(&xef->refcount);
+ return xef;
+}
+
+/**
+ * xe_file_put() - Drop a reference to the xe file object
+ * @xef: Pointer to the xe file
+ *
+ * Used to drop reference to the xef object
+ */
+void xe_file_put(struct xe_file *xef)
+{
+ kref_put(&xef->refcount, xe_file_destroy);
+}
+
static void xe_file_close(struct drm_device *dev, struct drm_file *file)
{
struct xe_device *xe = to_xe_device(dev);
@@ -98,6 +144,8 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
struct xe_exec_queue *q;
unsigned long idx;
+ xe_pm_runtime_get(xe);
+
/*
* No need for exec_queue.lock here as there is no contention for it
* when FD is closing as IOCTLs presumably can't be modifying the
@@ -108,21 +156,14 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
xe_exec_queue_kill(q);
xe_exec_queue_put(q);
}
- xa_destroy(&xef->exec_queue.xa);
- mutex_destroy(&xef->exec_queue.lock);
mutex_lock(&xef->vm.lock);
xa_for_each(&xef->vm.xa, idx, vm)
xe_vm_close_and_put(vm);
mutex_unlock(&xef->vm.lock);
- xa_destroy(&xef->vm.xa);
- mutex_destroy(&xef->vm.lock);
- spin_lock(&xe->clients.lock);
- xe->clients.count--;
- spin_unlock(&xe->clients.lock);
+ xe_file_put(xef);
- xe_drm_client_put(xef->client);
- kfree(xef);
+ xe_pm_runtime_put(xe);
}
static const struct drm_ioctl_desc xe_ioctls[] = {
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index bb07f5669dbb..b3952718b3c1 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -170,4 +170,7 @@ static inline bool xe_device_wedged(struct xe_device *xe)
void xe_device_declare_wedged(struct xe_device *xe);
+struct xe_file *xe_file_get(struct xe_file *xef);
+void xe_file_put(struct xe_file *xef);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 3bca6d344744..cbc582bcc90a 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -566,6 +566,9 @@ struct xe_file {
/** @client: drm client */
struct xe_drm_client *client;
+
+ /** @refcount: ref count of this xe file */
+ struct kref refcount;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index 6a26923fa10e..7ddd59908334 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -251,11 +251,8 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
/* Accumulate all the exec queues from this client */
mutex_lock(&xef->exec_queue.lock);
- xa_for_each(&xef->exec_queue.xa, i, q) {
+ xa_for_each(&xef->exec_queue.xa, i, q)
xe_exec_queue_update_run_ticks(q);
- xef->run_ticks[q->class] += q->run_ticks - q->old_run_ticks;
- q->old_run_ticks = q->run_ticks;
- }
mutex_unlock(&xef->exec_queue.lock);
/* Get the total GPU cycles */
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 0ba37835849b..a39384bb9553 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -37,6 +37,10 @@ static void __xe_exec_queue_free(struct xe_exec_queue *q)
{
if (q->vm)
xe_vm_put(q->vm);
+
+ if (q->xef)
+ xe_file_put(q->xef);
+
kfree(q);
}
@@ -649,6 +653,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
goto kill_exec_queue;
args->exec_queue_id = id;
+ q->xef = xe_file_get(xef);
return 0;
@@ -762,6 +767,7 @@ bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
*/
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
{
+ struct xe_file *xef;
struct xe_lrc *lrc;
u32 old_ts, new_ts;
@@ -773,6 +779,8 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
if (!q->vm || !q->vm->xef)
return;
+ xef = q->vm->xef;
+
/*
* Only sample the first LRC. For parallel submission, all of them are
* scheduled together and we compensate that below by multiplying by
@@ -783,7 +791,7 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
*/
lrc = q->lrc[0];
new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
- q->run_ticks += (new_ts - old_ts) * q->width;
+ xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
}
void xe_exec_queue_kill(struct xe_exec_queue *q)
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 201588ec33c3..a35ce24c9798 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -38,6 +38,9 @@ enum xe_exec_queue_priority {
* a kernel object.
*/
struct xe_exec_queue {
+ /** @xef: Back pointer to xe file if this is user created exec queue */
+ struct xe_file *xef;
+
/** @gt: graphics tile this exec queue can submit to */
struct xe_gt *gt;
/**
@@ -139,10 +142,6 @@ struct xe_exec_queue {
* Protected by @vm's resv. Unused if @vm == NULL.
*/
u64 tlb_flush_seqno;
- /** @old_run_ticks: prior hw engine class run time in ticks for this exec queue */
- u64 old_run_ticks;
- /** @run_ticks: hw engine class run time in ticks for this exec queue */
- u64 run_ticks;
/** @lrc: logical ring context for this exec queue */
struct xe_lrc *lrc[];
};
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 4699b7836001..b6f0a7299c03 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -1927,6 +1927,7 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt;
struct xe_device *xe = gt_to_xe(gt);
+ bool is_primary = !xe_gt_is_media_type(gt);
bool valid_ggtt, valid_ctxs, valid_dbs;
bool valid_any, valid_all;
@@ -1935,13 +1936,17 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
valid_dbs = pf_get_vf_config_dbs(gt, vfid);
/* note that GuC doorbells are optional */
- valid_any = valid_ggtt || valid_ctxs || valid_dbs;
- valid_all = valid_ggtt && valid_ctxs;
+ valid_any = valid_ctxs || valid_dbs;
+ valid_all = valid_ctxs;
+
+ /* and GGTT/LMEM is configured on primary GT only */
+ valid_all = valid_all && valid_ggtt;
+ valid_any = valid_any || (valid_ggtt && is_primary);
if (IS_DGFX(xe)) {
bool valid_lmem = pf_get_vf_config_ggtt(primary_gt, vfid);
- valid_any = valid_any || valid_lmem;
+ valid_any = valid_any || (valid_lmem && is_primary);
valid_all = valid_all && valid_lmem;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index 41e46a00c01e..8892d6c2291e 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -850,7 +850,7 @@ static struct vf_runtime_reg *vf_lookup_reg(struct xe_gt *gt, u32 addr)
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
- return bsearch(&key, runtime->regs, runtime->regs_size, sizeof(key),
+ return bsearch(&key, runtime->regs, runtime->num_regs, sizeof(key),
vf_runtime_reg_cmp);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index d9359976ab8b..481d83d07367 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -13,10 +13,13 @@
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_mmio.h"
+#include "xe_pm.h"
#include "xe_sriov.h"
#include "xe_trace.h"
#include "regs/xe_guc_regs.h"
+#define FENCE_STACK_BIT DMA_FENCE_FLAG_USER_BITS
+
/*
* TLB inval depends on pending commands in the CT queue and then the real
* invalidation time. Double up the time to process full CT queue
@@ -33,6 +36,24 @@ static long tlb_timeout_jiffies(struct xe_gt *gt)
return hw_tlb_timeout + 2 * delay;
}
+static void
+__invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
+{
+ bool stack = test_bit(FENCE_STACK_BIT, &fence->base.flags);
+
+ trace_xe_gt_tlb_invalidation_fence_signal(xe, fence);
+ xe_gt_tlb_invalidation_fence_fini(fence);
+ dma_fence_signal(&fence->base);
+ if (!stack)
+ dma_fence_put(&fence->base);
+}
+
+static void
+invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
+{
+ list_del(&fence->link);
+ __invalidation_fence_signal(xe, fence);
+}
static void xe_gt_tlb_fence_timeout(struct work_struct *work)
{
@@ -54,10 +75,8 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
xe_gt_err(gt, "TLB invalidation fence timeout, seqno=%d recv=%d",
fence->seqno, gt->tlb_invalidation.seqno_recv);
- list_del(&fence->link);
fence->base.error = -ETIME;
- dma_fence_signal(&fence->base);
- dma_fence_put(&fence->base);
+ invalidation_fence_signal(xe, fence);
}
if (!list_empty(&gt->tlb_invalidation.pending_fences))
queue_delayed_work(system_wq,
@@ -87,21 +106,6 @@ int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
return 0;
}
-static void
-__invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
-{
- trace_xe_gt_tlb_invalidation_fence_signal(xe, fence);
- dma_fence_signal(&fence->base);
- dma_fence_put(&fence->base);
-}
-
-static void
-invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
-{
- list_del(&fence->link);
- __invalidation_fence_signal(xe, fence);
-}
-
/**
* xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset
* @gt: graphics tile
@@ -111,7 +115,6 @@ invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fe
void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
{
struct xe_gt_tlb_invalidation_fence *fence, *next;
- struct xe_guc *guc = &gt->uc.guc;
int pending_seqno;
/*
@@ -134,7 +137,6 @@ void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
else
pending_seqno = gt->tlb_invalidation.seqno - 1;
WRITE_ONCE(gt->tlb_invalidation.seqno_recv, pending_seqno);
- wake_up_all(&guc->ct.wq);
list_for_each_entry_safe(fence, next,
&gt->tlb_invalidation.pending_fences, link)
@@ -165,6 +167,8 @@ static int send_tlb_invalidation(struct xe_guc *guc,
int seqno;
int ret;
+ xe_gt_assert(gt, fence);
+
/*
* XXX: The seqno algorithm relies on TLB invalidation being processed
* in order which they currently are, if that changes the algorithm will
@@ -173,10 +177,8 @@ static int send_tlb_invalidation(struct xe_guc *guc,
mutex_lock(&guc->ct.lock);
seqno = gt->tlb_invalidation.seqno;
- if (fence) {
- fence->seqno = seqno;
- trace_xe_gt_tlb_invalidation_fence_send(xe, fence);
- }
+ fence->seqno = seqno;
+ trace_xe_gt_tlb_invalidation_fence_send(xe, fence);
action[1] = seqno;
ret = xe_guc_ct_send_locked(&guc->ct, action, len,
G2H_LEN_DW_TLB_INVALIDATE, 1);
@@ -209,7 +211,6 @@ static int send_tlb_invalidation(struct xe_guc *guc,
TLB_INVALIDATION_SEQNO_MAX;
if (!gt->tlb_invalidation.seqno)
gt->tlb_invalidation.seqno = 1;
- ret = seqno;
}
mutex_unlock(&guc->ct.lock);
@@ -223,14 +224,16 @@ static int send_tlb_invalidation(struct xe_guc *guc,
/**
* xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
* @gt: graphics tile
+ * @fence: invalidation fence which will be signal on TLB invalidation
+ * completion
*
* Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and
- * caller can use seqno + xe_gt_tlb_invalidation_wait to wait for completion.
+ * caller can use the invalidation fence to wait for completion.
*
- * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
- * negative error code on error.
+ * Return: 0 on success, negative error code on error
*/
-static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt)
+static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence)
{
u32 action[] = {
XE_GUC_ACTION_TLB_INVALIDATION,
@@ -238,7 +241,7 @@ static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt)
MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
};
- return send_tlb_invalidation(&gt->uc.guc, NULL, action,
+ return send_tlb_invalidation(&gt->uc.guc, fence, action,
ARRAY_SIZE(action));
}
@@ -257,13 +260,17 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
if (xe_guc_ct_enabled(&gt->uc.guc.ct) &&
gt->uc.guc.submission_state.enabled) {
- int seqno;
-
- seqno = xe_gt_tlb_invalidation_guc(gt);
- if (seqno <= 0)
- return seqno;
+ struct xe_gt_tlb_invalidation_fence fence;
+ int ret;
+
+ xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
+ ret = xe_gt_tlb_invalidation_guc(gt, &fence);
+ if (ret < 0) {
+ xe_gt_tlb_invalidation_fence_fini(&fence);
+ return ret;
+ }
- xe_gt_tlb_invalidation_wait(gt, seqno);
+ xe_gt_tlb_invalidation_fence_wait(&fence);
} else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
if (IS_SRIOV_VF(xe))
return 0;
@@ -290,18 +297,16 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
*
* @gt: graphics tile
* @fence: invalidation fence which will be signal on TLB invalidation
- * completion, can be NULL
+ * completion
* @start: start address
* @end: end address
* @asid: address space id
*
* Issue a range based TLB invalidation if supported, if not fallback to a full
- * TLB invalidation. Completion of TLB is asynchronous and caller can either use
- * the invalidation fence or seqno + xe_gt_tlb_invalidation_wait to wait for
- * completion.
+ * TLB invalidation. Completion of TLB is asynchronous and caller can use
+ * the invalidation fence to wait for completion.
*
- * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
- * negative error code on error.
+ * Return: Negative error code on error, 0 on success
*/
int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
@@ -312,11 +317,11 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
u32 action[MAX_TLB_INVALIDATION_LEN];
int len = 0;
+ xe_gt_assert(gt, fence);
+
/* Execlists not supported */
if (gt_to_xe(gt)->info.force_execlist) {
- if (fence)
- __invalidation_fence_signal(xe, fence);
-
+ __invalidation_fence_signal(xe, fence);
return 0;
}
@@ -382,12 +387,10 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
* @vma: VMA to invalidate
*
* Issue a range based TLB invalidation if supported, if not fallback to a full
- * TLB invalidation. Completion of TLB is asynchronous and caller can either use
- * the invalidation fence or seqno + xe_gt_tlb_invalidation_wait to wait for
- * completion.
+ * TLB invalidation. Completion of TLB is asynchronous and caller can use
+ * the invalidation fence to wait for completion.
*
- * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
- * negative error code on error.
+ * Return: Negative error code on error, 0 on success
*/
int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
@@ -401,43 +404,6 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
}
/**
- * xe_gt_tlb_invalidation_wait - Wait for TLB to complete
- * @gt: graphics tile
- * @seqno: seqno to wait which was returned from xe_gt_tlb_invalidation
- *
- * Wait for tlb_timeout_jiffies() for a TLB invalidation to complete.
- *
- * Return: 0 on success, -ETIME on TLB invalidation timeout
- */
-int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
-{
- struct xe_guc *guc = &gt->uc.guc;
- int ret;
-
- /* Execlists not supported */
- if (gt_to_xe(gt)->info.force_execlist)
- return 0;
-
- /*
- * XXX: See above, this algorithm only works if seqno are always in
- * order
- */
- ret = wait_event_timeout(guc->ct.wq,
- tlb_invalidation_seqno_past(gt, seqno),
- tlb_timeout_jiffies(gt));
- if (!ret) {
- struct drm_printer p = xe_gt_err_printer(gt);
-
- xe_gt_err(gt, "TLB invalidation time'd out, seqno=%d, recv=%d\n",
- seqno, gt->tlb_invalidation.seqno_recv);
- xe_guc_ct_print(&guc->ct, &p, true);
- return -ETIME;
- }
-
- return 0;
-}
-
-/**
* xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler
* @guc: guc
* @msg: message indicating TLB invalidation done
@@ -480,12 +446,7 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
return 0;
}
- /*
- * wake_up_all() and wait_event_timeout() already have the correct
- * barriers.
- */
WRITE_ONCE(gt->tlb_invalidation.seqno_recv, msg[0]);
- wake_up_all(&guc->ct.wq);
list_for_each_entry_safe(fence, next,
&gt->tlb_invalidation.pending_fences, link) {
@@ -508,3 +469,59 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
return 0;
}
+
+static const char *
+invalidation_fence_get_driver_name(struct dma_fence *dma_fence)
+{
+ return "xe";
+}
+
+static const char *
+invalidation_fence_get_timeline_name(struct dma_fence *dma_fence)
+{
+ return "invalidation_fence";
+}
+
+static const struct dma_fence_ops invalidation_fence_ops = {
+ .get_driver_name = invalidation_fence_get_driver_name,
+ .get_timeline_name = invalidation_fence_get_timeline_name,
+};
+
+/**
+ * xe_gt_tlb_invalidation_fence_init - Initialize TLB invalidation fence
+ * @gt: GT
+ * @fence: TLB invalidation fence to initialize
+ * @stack: fence is stack variable
+ *
+ * Initialize TLB invalidation fence for use. xe_gt_tlb_invalidation_fence_fini
+ * must be called if fence is not signaled.
+ */
+void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence,
+ bool stack)
+{
+ xe_pm_runtime_get_noresume(gt_to_xe(gt));
+
+ spin_lock_irq(&gt->tlb_invalidation.lock);
+ dma_fence_init(&fence->base, &invalidation_fence_ops,
+ &gt->tlb_invalidation.lock,
+ dma_fence_context_alloc(1), 1);
+ spin_unlock_irq(&gt->tlb_invalidation.lock);
+ INIT_LIST_HEAD(&fence->link);
+ if (stack)
+ set_bit(FENCE_STACK_BIT, &fence->base.flags);
+ else
+ dma_fence_get(&fence->base);
+ fence->gt = gt;
+}
+
+/**
+ * xe_gt_tlb_invalidation_fence_fini - Finalize TLB invalidation fence
+ * @fence: TLB invalidation fence to finalize
+ *
+ * Drop PM ref which fence took durinig init.
+ */
+void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence)
+{
+ xe_pm_runtime_put(gt_to_xe(fence->gt));
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
index bf3bebd9f985..a84065fa324c 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
@@ -23,7 +23,17 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
u64 start, u64 end, u32 asid);
-int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno);
int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
+void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence,
+ bool stack);
+void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence);
+
+static inline void
+xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence)
+{
+ dma_fence_wait(&fence->base, false);
+}
+
#endif /* _XE_GT_TLB_INVALIDATION_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h
index 934c828efe31..de6e825e0851 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h
@@ -8,6 +8,8 @@
#include <linux/dma-fence.h>
+struct xe_gt;
+
/**
* struct xe_gt_tlb_invalidation_fence - XE GT TLB invalidation fence
*
@@ -17,6 +19,8 @@
struct xe_gt_tlb_invalidation_fence {
/** @base: dma fence base */
struct dma_fence base;
+ /** @gt: GT which fence belong to */
+ struct xe_gt *gt;
/** @link: link into list of pending tlb fences */
struct list_head link;
/** @seqno: seqno of TLB invalidation to signal fence one */
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 7d2e937da1d8..64afc90ad2c5 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -327,6 +327,8 @@ static void xe_guc_ct_set_state(struct xe_guc_ct *ct,
xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 ||
state == XE_GUC_CT_STATE_STOPPED);
+ if (ct->g2h_outstanding)
+ xe_pm_runtime_put(ct_to_xe(ct));
ct->g2h_outstanding = 0;
ct->state = state;
@@ -495,10 +497,15 @@ static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len)
static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
{
xe_gt_assert(ct_to_gt(ct), g2h_len <= ct->ctbs.g2h.info.space);
+ xe_gt_assert(ct_to_gt(ct), (!g2h_len && !num_g2h) ||
+ (g2h_len && num_g2h));
if (g2h_len) {
lockdep_assert_held(&ct->fast_lock);
+ if (!ct->g2h_outstanding)
+ xe_pm_runtime_get_noresume(ct_to_xe(ct));
+
ct->ctbs.g2h.info.space -= g2h_len;
ct->g2h_outstanding += num_g2h;
}
@@ -511,7 +518,8 @@ static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space);
ct->ctbs.g2h.info.space += g2h_len;
- --ct->g2h_outstanding;
+ if (!--ct->g2h_outstanding)
+ xe_pm_runtime_put(ct_to_xe(ct));
}
static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 8d7e7f4bbff7..6398629e6b4e 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1393,6 +1393,8 @@ static void guc_exec_queue_process_msg(struct xe_sched_msg *msg)
default:
XE_WARN_ON("Unknown message type");
}
+
+ xe_pm_runtime_put(guc_to_xe(exec_queue_to_guc(msg->private_data)));
}
static const struct drm_sched_backend_ops drm_sched_ops = {
@@ -1482,6 +1484,8 @@ static void guc_exec_queue_kill(struct xe_exec_queue *q)
static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg *msg,
u32 opcode)
{
+ xe_pm_runtime_get_noresume(guc_to_xe(exec_queue_to_guc(q)));
+
INIT_LIST_HEAD(&msg->link);
msg->opcode = opcode;
msg->private_data = q;
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index ade9e7a3a0ad..31a751a5de3f 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -1115,23 +1115,6 @@ struct invalidation_fence {
u32 asid;
};
-static const char *
-invalidation_fence_get_driver_name(struct dma_fence *dma_fence)
-{
- return "xe";
-}
-
-static const char *
-invalidation_fence_get_timeline_name(struct dma_fence *dma_fence)
-{
- return "invalidation_fence";
-}
-
-static const struct dma_fence_ops invalidation_fence_ops = {
- .get_driver_name = invalidation_fence_get_driver_name,
- .get_timeline_name = invalidation_fence_get_timeline_name,
-};
-
static void invalidation_fence_cb(struct dma_fence *fence,
struct dma_fence_cb *cb)
{
@@ -1170,15 +1153,8 @@ static int invalidation_fence_init(struct xe_gt *gt,
trace_xe_gt_tlb_invalidation_fence_create(gt_to_xe(gt), &ifence->base);
- spin_lock_irq(&gt->tlb_invalidation.lock);
- dma_fence_init(&ifence->base.base, &invalidation_fence_ops,
- &gt->tlb_invalidation.lock,
- dma_fence_context_alloc(1), 1);
- spin_unlock_irq(&gt->tlb_invalidation.lock);
-
- INIT_LIST_HEAD(&ifence->base.link);
+ xe_gt_tlb_invalidation_fence_init(gt, &ifence->base, false);
- dma_fence_get(&ifence->base.base); /* Ref for caller */
ifence->fence = fence;
ifence->gt = gt;
ifence->start = start;
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index c4e018aa2982..e8d31e010860 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -53,14 +53,18 @@ static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr,
u64 value)
{
struct xe_user_fence *ufence;
+ u64 __user *ptr = u64_to_user_ptr(addr);
+
+ if (!access_ok(ptr, sizeof(ptr)))
+ return ERR_PTR(-EFAULT);
ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
if (!ufence)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ufence->xe = xe;
kref_init(&ufence->refcount);
- ufence->addr = u64_to_user_ptr(addr);
+ ufence->addr = ptr;
ufence->value = value;
ufence->mm = current->mm;
mmgrab(ufence->mm);
@@ -183,8 +187,8 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
} else {
sync->ufence = user_fence_create(xe, sync_in.addr,
sync_in.timeline_value);
- if (XE_IOCTL_DBG(xe, !sync->ufence))
- return -ENOMEM;
+ if (XE_IOCTL_DBG(xe, IS_ERR(sync->ufence)))
+ return PTR_ERR(sync->ufence);
}
break;
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 5b166fa03684..c7561a56abaf 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1601,6 +1601,10 @@ static void vm_destroy_work_func(struct work_struct *w)
XE_WARN_ON(vm->pt_root[id]);
trace_xe_vm_free(vm);
+
+ if (vm->xef)
+ xe_file_put(vm->xef);
+
kfree(vm);
}
@@ -1916,7 +1920,7 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
}
args->vm_id = id;
- vm->xef = xef;
+ vm->xef = xe_file_get(xef);
/* Record BO memory for VM pagetable created against client */
for_each_tile(tile, xe, id)
@@ -3337,10 +3341,10 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
{
struct xe_device *xe = xe_vma_vm(vma)->xe;
struct xe_tile *tile;
+ struct xe_gt_tlb_invalidation_fence fence[XE_MAX_TILES_PER_DEVICE];
u32 tile_needs_invalidate = 0;
- int seqno[XE_MAX_TILES_PER_DEVICE];
u8 id;
- int ret;
+ int ret = 0;
xe_assert(xe, !xe_vma_is_null(vma));
trace_xe_vma_invalidate(vma);
@@ -3365,29 +3369,33 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
for_each_tile(tile, xe, id) {
if (xe_pt_zap_ptes(tile, vma)) {
- tile_needs_invalidate |= BIT(id);
xe_device_wmb(xe);
+ xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
+ &fence[id], true);
+
/*
* FIXME: We potentially need to invalidate multiple
* GTs within the tile
*/
- seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
- if (seqno[id] < 0)
- return seqno[id];
- }
- }
+ ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
+ &fence[id], vma);
+ if (ret < 0) {
+ xe_gt_tlb_invalidation_fence_fini(&fence[id]);
+ goto wait;
+ }
- for_each_tile(tile, xe, id) {
- if (tile_needs_invalidate & BIT(id)) {
- ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
- if (ret < 0)
- return ret;
+ tile_needs_invalidate |= BIT(id);
}
}
+wait:
+ for_each_tile(tile, xe, id)
+ if (tile_needs_invalidate & BIT(id))
+ xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+
vma->tile_invalidated = vma->tile_mask;
- return 0;
+ return ret;
}
struct xe_vm_snapshot {