summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/disp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/disp')
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h32
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c592
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h32
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h97
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c112
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c79
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c753
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c157
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h95
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c94
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h23
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c215
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h80
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c161
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c62
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c32
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h19
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c279
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h115
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c245
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c260
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c170
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c78
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h93
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c78
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h31
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c64
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c14
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c81
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c252
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c15
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c15
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c22
50 files changed, 3349 insertions, 1269 deletions
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
index 90ae6c9ccc95..b5b6e7031fb9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
@@ -10,46 +10,42 @@
/**
* dpu_core_irq_preinstall - perform pre-installation of core IRQ handler
- * @dpu_kms: DPU handle
+ * @kms: MSM KMS handle
* @return: none
*/
-void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms);
+void dpu_core_irq_preinstall(struct msm_kms *kms);
/**
* dpu_core_irq_uninstall - uninstall core IRQ handler
- * @dpu_kms: DPU handle
+ * @kms: MSM KMS handle
* @return: none
*/
-void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms);
+void dpu_core_irq_uninstall(struct msm_kms *kms);
/**
* dpu_core_irq - core IRQ handler
- * @dpu_kms: DPU handle
+ * @kms: MSM KMS handle
* @return: interrupt handling status
*/
-irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms);
+irqreturn_t dpu_core_irq(struct msm_kms *kms);
/**
* dpu_core_irq_read - IRQ helper function for reading IRQ status
* @dpu_kms: DPU handle
* @irq_idx: irq index
- * @clear: True to clear the irq after read
* @return: non-zero if irq detected; otherwise no irq detected
*/
u32 dpu_core_irq_read(
struct dpu_kms *dpu_kms,
- int irq_idx,
- bool clear);
+ int irq_idx);
/**
* dpu_core_irq_register_callback - For registering callback function on IRQ
* interrupt
* @dpu_kms: DPU handle
* @irq_idx: irq index
- * @irq_cb: IRQ callback structure, containing callback function
- * and argument. Passing NULL for irq_cb will unregister
- * the callback for the given irq_idx
- * This must exist until un-registration.
+ * @irq_cb: IRQ callback funcion.
+ * @irq_arg: IRQ callback argument.
* @return: 0 for success registering callback, otherwise failure
*
* This function supports registration of multiple callbacks for each interrupt.
@@ -57,25 +53,21 @@ u32 dpu_core_irq_read(
int dpu_core_irq_register_callback(
struct dpu_kms *dpu_kms,
int irq_idx,
- struct dpu_irq_callback *irq_cb);
+ void (*irq_cb)(void *arg, int irq_idx),
+ void *irq_arg);
/**
* dpu_core_irq_unregister_callback - For unregistering callback function on IRQ
* interrupt
* @dpu_kms: DPU handle
* @irq_idx: irq index
- * @irq_cb: IRQ callback structure, containing callback function
- * and argument. Passing NULL for irq_cb will unregister
- * the callback for the given irq_idx
- * This must match with registration.
* @return: 0 for success registering callback, otherwise failure
*
* This function supports registration of multiple callbacks for each interrupt.
*/
int dpu_core_irq_unregister_callback(
struct dpu_kms *dpu_kms,
- int irq_idx,
- struct dpu_irq_callback *irq_cb);
+ int irq_idx);
/**
* dpu_debugfs_core_irq_init - register core irq debugfs
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 7763558ef566..b56f777dbd0e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -204,7 +204,8 @@ static int dpu_crtc_get_crc(struct drm_crtc *crtc)
rc = m->hw_lm->ops.collect_misr(m->hw_lm, &crcs[i]);
if (rc) {
- DRM_DEBUG_DRIVER("MISR read failed\n");
+ if (rc != -ENODATA)
+ DRM_DEBUG_DRIVER("MISR read failed\n");
return rc;
}
}
@@ -869,6 +870,13 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
DPU_ATRACE_BEGIN("crtc_commit");
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc->state->encoder_mask) {
+ if (!dpu_encoder_is_valid_for_commit(encoder)) {
+ DRM_DEBUG_ATOMIC("invalid FB not kicking off crtc\n");
+ goto end;
+ }
+ }
/*
* Encoder will flush/start now, unless it has a tx pending. If so, it
* may delay and flush at an irq event (e.g. ppdone)
@@ -891,6 +899,8 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
dpu_encoder_kickoff(encoder);
reinit_completion(&dpu_crtc->frame_done_comp);
+
+end:
DPU_ATRACE_END("crtc_commit");
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 3940b9c6323b..52516eb20cb8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
+ * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
* Author: Rob Clark <robdclark@gmail.com>
*/
@@ -21,6 +23,8 @@
#include "dpu_hw_intf.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_dspp.h"
+#include "dpu_hw_dsc.h"
+#include "dpu_hw_merge3d.h"
#include "dpu_formats.h"
#include "dpu_encoder_phys.h"
#include "dpu_crtc.h"
@@ -34,18 +38,6 @@
#define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
-#define DPU_DEBUG_PHYS(p, fmt, ...) DRM_DEBUG_ATOMIC("enc%d intf%d pp%d " fmt,\
- (p) ? (p)->parent->base.id : -1, \
- (p) ? (p)->intf_idx - INTF_0 : -1, \
- (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
- ##__VA_ARGS__)
-
-#define DPU_ERROR_PHYS(p, fmt, ...) DPU_ERROR("enc%d intf%d pp%d " fmt,\
- (p) ? (p)->parent->base.id : -1, \
- (p) ? (p)->intf_idx - INTF_0 : -1, \
- (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
- ##__VA_ARGS__)
-
/*
* Two to anticipate panels that can do cmd/vid dynamic switching
* plan is to create all possible physical encoder types, and switch between
@@ -135,6 +127,8 @@ enum dpu_enc_rc_states {
* @cur_slave: As above but for the slave encoder.
* @hw_pp: Handle to the pingpong blocks used for the display. No.
* pingpong blocks can be different than num_phys_encs.
+ * @hw_dsc: Handle to the DSC blocks used for the display.
+ * @dsc_mask: Bitmask of used DSC blocks.
* @intfs_swapped: Whether or not the phys_enc interfaces have been swapped
* for partial update right-only cases, such as pingpong
* split where virtual pingpong does not generate IRQs
@@ -168,6 +162,7 @@ enum dpu_enc_rc_states {
* @vsync_event_work: worker to handle vsync event for autorefresh
* @topology: topology of the display
* @idle_timeout: idle timeout duration in milliseconds
+ * @dsc: msm_display_dsc_config pointer, for DSC-enabled encoders
*/
struct dpu_encoder_virt {
struct drm_encoder base;
@@ -180,6 +175,9 @@ struct dpu_encoder_virt {
struct dpu_encoder_phys *cur_master;
struct dpu_encoder_phys *cur_slave;
struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
+
+ unsigned int dsc_mask;
bool intfs_swapped;
@@ -206,6 +204,11 @@ struct dpu_encoder_virt {
struct msm_display_topology topology;
u32 idle_timeout;
+
+ bool wide_bus_en;
+
+ /* DSC configuration */
+ struct msm_display_dsc_config *dsc;
};
#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
@@ -214,6 +217,14 @@ static u32 dither_matrix[DITHER_MATRIX_SZ] = {
15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
};
+
+bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
+{
+ const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ return dpu_enc->wide_bus_en;
+}
+
static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc)
{
struct dpu_hw_dither_cfg dither_cfg = { 0 };
@@ -240,12 +251,30 @@ static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bp
hw_pp->ops.setup_dither(hw_pp, &dither_cfg);
}
+static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode)
+{
+ switch (intf_mode) {
+ case INTF_MODE_VIDEO:
+ return "INTF_MODE_VIDEO";
+ case INTF_MODE_CMD:
+ return "INTF_MODE_CMD";
+ case INTF_MODE_WB_BLOCK:
+ return "INTF_MODE_WB_BLOCK";
+ case INTF_MODE_WB_LINE:
+ return "INTF_MODE_WB_LINE";
+ default:
+ return "INTF_MODE_UNKNOWN";
+ }
+}
+
void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
enum dpu_intr_idx intr_idx)
{
- DRM_ERROR("irq timeout id=%u, intf=%d, pp=%d, intr=%d\n",
- DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
- phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
+ DRM_ERROR("irq timeout id=%u, intf_mode=%s intf=%d wb=%d, pp=%d, intr=%d\n",
+ DRMID(phys_enc->parent),
+ dpu_encoder_helper_get_intf_type(phys_enc->intf_mode),
+ phys_enc->intf_idx - INTF_0, phys_enc->wb_idx - WB_0,
+ phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
if (phys_enc->parent_ops->handle_frame_done)
phys_enc->parent_ops->handle_frame_done(
@@ -257,73 +286,69 @@ static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
u32 irq_idx, struct dpu_encoder_wait_info *info);
int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
- enum dpu_intr_idx intr_idx,
+ int irq,
+ void (*func)(void *arg, int irq_idx),
struct dpu_encoder_wait_info *wait_info)
{
- struct dpu_encoder_irq *irq;
u32 irq_status;
int ret;
- if (!wait_info || intr_idx >= INTR_IDX_MAX) {
+ if (!wait_info) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
- irq = &phys_enc->irq[intr_idx];
-
/* note: do master / slave checking outside */
/* return EWOULDBLOCK since we know the wait isn't necessary */
if (phys_enc->enable_state == DPU_ENC_DISABLED) {
- DRM_ERROR("encoder is disabled id=%u, intr=%d, irq=%d\n",
- DRMID(phys_enc->parent), intr_idx,
- irq->irq_idx);
+ DRM_ERROR("encoder is disabled id=%u, callback=%ps, irq=%d\n",
+ DRMID(phys_enc->parent), func,
+ irq);
return -EWOULDBLOCK;
}
- if (irq->irq_idx < 0) {
- DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, irq=%s\n",
- DRMID(phys_enc->parent), intr_idx,
- irq->name);
+ if (irq < 0) {
+ DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n",
+ DRMID(phys_enc->parent), func);
return 0;
}
- DRM_DEBUG_KMS("id=%u, intr=%d, irq=%d, pp=%d, pending_cnt=%d\n",
- DRMID(phys_enc->parent), intr_idx,
- irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
+ DRM_DEBUG_KMS("id=%u, callback=%ps, irq=%d, pp=%d, pending_cnt=%d\n",
+ DRMID(phys_enc->parent), func,
+ irq, phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
ret = dpu_encoder_helper_wait_event_timeout(
DRMID(phys_enc->parent),
- irq->irq_idx,
+ irq,
wait_info);
if (ret <= 0) {
- irq_status = dpu_core_irq_read(phys_enc->dpu_kms,
- irq->irq_idx, true);
+ irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq);
if (irq_status) {
unsigned long flags;
- DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, irq=%d, pp=%d, atomic_cnt=%d\n",
- DRMID(phys_enc->parent), intr_idx,
- irq->irq_idx,
+ DRM_DEBUG_KMS("irq not triggered id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
+ DRMID(phys_enc->parent), func,
+ irq,
phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
local_irq_save(flags);
- irq->cb.func(phys_enc, irq->irq_idx);
+ func(phys_enc, irq);
local_irq_restore(flags);
ret = 0;
} else {
ret = -ETIMEDOUT;
- DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, irq=%d, pp=%d, atomic_cnt=%d\n",
- DRMID(phys_enc->parent), intr_idx,
- irq->irq_idx,
+ DRM_DEBUG_KMS("irq timeout id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
+ DRMID(phys_enc->parent), func,
+ irq,
phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
}
} else {
ret = 0;
trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
- intr_idx, irq->irq_idx,
+ func, irq,
phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
}
@@ -331,70 +356,6 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
return ret;
}
-int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
- enum dpu_intr_idx intr_idx)
-{
- struct dpu_encoder_irq *irq;
- int ret = 0;
-
- if (intr_idx >= INTR_IDX_MAX) {
- DPU_ERROR("invalid params\n");
- return -EINVAL;
- }
- irq = &phys_enc->irq[intr_idx];
-
- if (irq->irq_idx < 0) {
- DPU_ERROR_PHYS(phys_enc,
- "invalid IRQ index:%d\n", irq->irq_idx);
- return -EINVAL;
- }
-
- ret = dpu_core_irq_register_callback(phys_enc->dpu_kms, irq->irq_idx,
- &irq->cb);
- if (ret) {
- DPU_ERROR_PHYS(phys_enc,
- "failed to register IRQ callback for %s\n",
- irq->name);
- irq->irq_idx = -EINVAL;
- return ret;
- }
-
- trace_dpu_enc_irq_register_success(DRMID(phys_enc->parent), intr_idx,
- irq->irq_idx);
-
- return ret;
-}
-
-int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
- enum dpu_intr_idx intr_idx)
-{
- struct dpu_encoder_irq *irq;
- int ret;
-
- irq = &phys_enc->irq[intr_idx];
-
- /* silently skip irqs that weren't registered */
- if (irq->irq_idx < 0) {
- DRM_ERROR("duplicate unregister id=%u, intr=%d, irq=%d",
- DRMID(phys_enc->parent), intr_idx,
- irq->irq_idx);
- return 0;
- }
-
- ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, irq->irq_idx,
- &irq->cb);
- if (ret) {
- DRM_ERROR("unreg cb fail id=%u, intr=%d, irq=%d ret=%d",
- DRMID(phys_enc->parent), intr_idx,
- irq->irq_idx, ret);
- }
-
- trace_dpu_enc_irq_unregister_success(DRMID(phys_enc->parent), intr_idx,
- irq->irq_idx);
-
- return 0;
-}
-
int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
@@ -501,6 +462,22 @@ void dpu_encoder_helper_split_config(
}
}
+bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ int i, intf_count = 0, num_dsc = 0;
+
+ for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
+ if (dpu_enc->phys_encs[i])
+ intf_count++;
+
+ /* See dpu_encoder_get_topology, we only support 2:2:1 topology */
+ if (dpu_enc->dsc)
+ num_dsc = 2;
+
+ return (num_dsc > 0) && (num_dsc > intf_count);
+}
+
static struct msm_display_topology dpu_encoder_get_topology(
struct dpu_encoder_virt *dpu_enc,
struct dpu_kms *dpu_kms,
@@ -541,8 +518,21 @@ static struct msm_display_topology dpu_encoder_get_topology(
topology.num_enc = 0;
topology.num_intf = intf_count;
+ if (dpu_enc->dsc) {
+ /* In case of Display Stream Compression (DSC), we would use
+ * 2 encoders, 2 layer mixers and 1 interface
+ * this is power optimal and can drive up to (including) 4k
+ * screens
+ */
+ topology.num_enc = 2;
+ topology.num_dsc = 2;
+ topology.num_intf = 1;
+ topology.num_lm = 2;
+ }
+
return topology;
}
+
static int dpu_encoder_virt_atomic_check(
struct drm_encoder *drm_enc,
struct drm_crtc_state *crtc_state,
@@ -929,6 +919,40 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
return 0;
}
+void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
+ struct drm_writeback_job *job)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys->ops.prepare_wb_job)
+ phys->ops.prepare_wb_job(phys, job);
+
+ }
+}
+
+void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
+ struct drm_writeback_job *job)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys->ops.cleanup_wb_job)
+ phys->ops.cleanup_wb_job(phys, job);
+
+ }
+}
+
static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
@@ -942,7 +966,9 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
- int num_lm, num_ctl, num_pp;
+ struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
+ int num_lm, num_ctl, num_pp, num_dsc;
+ unsigned int dsc_mask = 0;
int i;
if (!drm_enc) {
@@ -980,6 +1006,18 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
: NULL;
+ if (dpu_enc->dsc) {
+ num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_DSC,
+ hw_dsc, ARRAY_SIZE(hw_dsc));
+ for (i = 0; i < num_dsc; i++) {
+ dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]);
+ dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0);
+ }
+ }
+
+ dpu_enc->dsc_mask = dsc_mask;
+
cstate = to_dpu_crtc_state(crtc_state);
for (i = 0; i < num_lm; i++) {
@@ -1015,9 +1053,18 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
if (phys->intf_idx >= INTF_0 && phys->intf_idx < INTF_MAX)
phys->hw_intf = dpu_rm_get_intf(&dpu_kms->rm, phys->intf_idx);
- if (!phys->hw_intf) {
+ if (phys->wb_idx >= WB_0 && phys->wb_idx < WB_MAX)
+ phys->hw_wb = dpu_rm_get_wb(&dpu_kms->rm, phys->wb_idx);
+
+ if (!phys->hw_intf && !phys->hw_wb) {
DPU_ERROR_ENC(dpu_enc,
- "no intf block assigned at idx: %d\n", i);
+ "no intf or wb block assigned at idx: %d\n", i);
+ return;
+ }
+
+ if (phys->hw_intf && phys->hw_wb) {
+ DPU_ERROR_ENC(dpu_enc,
+ "invalid phys both intf and wb block at idx: %d\n", i);
return;
}
@@ -1165,16 +1212,35 @@ static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
{
int i = 0;
- for (i = 0; i < catalog->intf_count; i++) {
- if (catalog->intf[i].type == type
- && catalog->intf[i].controller_id == controller_id) {
- return catalog->intf[i].id;
+ if (type != INTF_WB) {
+ for (i = 0; i < catalog->intf_count; i++) {
+ if (catalog->intf[i].type == type
+ && catalog->intf[i].controller_id == controller_id) {
+ return catalog->intf[i].id;
+ }
}
}
return INTF_MAX;
}
+static enum dpu_wb dpu_encoder_get_wb(struct dpu_mdss_cfg *catalog,
+ enum dpu_intf_type type, u32 controller_id)
+{
+ int i = 0;
+
+ if (type != INTF_WB)
+ goto end;
+
+ for (i = 0; i < catalog->wb_count; i++) {
+ if (catalog->wb[i].id == controller_id)
+ return catalog->wb[i].id;
+ }
+
+end:
+ return WB_MAX;
+}
+
static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
struct dpu_encoder_phys *phy_enc)
{
@@ -1288,8 +1354,9 @@ static void dpu_encoder_frame_done_callback(
* suppress frame_done without waiter,
* likely autorefresh
*/
- trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc),
- event, ready_phys->intf_idx);
+ trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc), event,
+ dpu_encoder_helper_get_intf_type(ready_phys->intf_mode),
+ ready_phys->intf_idx, ready_phys->wb_idx);
return;
}
@@ -1367,9 +1434,11 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
if (ctl->ops.get_pending_flush)
ret = ctl->ops.get_pending_flush(ctl);
- trace_dpu_enc_trigger_flush(DRMID(drm_enc), phys->intf_idx,
- pending_kickoff_cnt, ctl->idx,
- extra_flush_bits, ret);
+ trace_dpu_enc_trigger_flush(DRMID(drm_enc),
+ dpu_encoder_helper_get_intf_type(phys->intf_mode),
+ phys->intf_idx, phys->wb_idx,
+ pending_kickoff_cnt, ctl->idx,
+ extra_flush_bits, ret);
}
/**
@@ -1677,6 +1746,95 @@ static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
}
+static u32
+dpu_encoder_dsc_initial_line_calc(struct msm_display_dsc_config *dsc,
+ u32 enc_ip_width)
+{
+ int ssm_delay, total_pixels, soft_slice_per_enc;
+
+ soft_slice_per_enc = enc_ip_width / dsc->drm->slice_width;
+
+ /*
+ * minimum number of initial line pixels is a sum of:
+ * 1. sub-stream multiplexer delay (83 groups for 8bpc,
+ * 91 for 10 bpc) * 3
+ * 2. for two soft slice cases, add extra sub-stream multiplexer * 3
+ * 3. the initial xmit delay
+ * 4. total pipeline delay through the "lock step" of encoder (47)
+ * 5. 6 additional pixels as the output of the rate buffer is
+ * 48 bits wide
+ */
+ ssm_delay = ((dsc->drm->bits_per_component < 10) ? 84 : 92);
+ total_pixels = ssm_delay * 3 + dsc->drm->initial_xmit_delay + 47;
+ if (soft_slice_per_enc > 1)
+ total_pixels += (ssm_delay * 3);
+ return DIV_ROUND_UP(total_pixels, dsc->drm->slice_width);
+}
+
+static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc,
+ struct dpu_hw_pingpong *hw_pp,
+ struct msm_display_dsc_config *dsc,
+ u32 common_mode,
+ u32 initial_lines)
+{
+ if (hw_dsc->ops.dsc_config)
+ hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, initial_lines);
+
+ if (hw_dsc->ops.dsc_config_thresh)
+ hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc);
+
+ if (hw_pp->ops.setup_dsc)
+ hw_pp->ops.setup_dsc(hw_pp);
+
+ if (hw_pp->ops.enable_dsc)
+ hw_pp->ops.enable_dsc(hw_pp);
+}
+
+static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
+ struct msm_display_dsc_config *dsc)
+{
+ /* coding only for 2LM, 2enc, 1 dsc config */
+ struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
+ struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+ int this_frame_slices;
+ int intf_ip_w, enc_ip_w;
+ int dsc_common_mode;
+ int pic_width;
+ u32 initial_lines;
+ int i;
+
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ hw_pp[i] = dpu_enc->hw_pp[i];
+ hw_dsc[i] = dpu_enc->hw_dsc[i];
+
+ if (!hw_pp[i] || !hw_dsc[i]) {
+ DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n");
+ return;
+ }
+ }
+
+ dsc_common_mode = 0;
+ pic_width = dsc->drm->pic_width;
+
+ dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
+ if (enc_master->intf_mode == INTF_MODE_VIDEO)
+ dsc_common_mode |= DSC_MODE_VIDEO;
+
+ this_frame_slices = pic_width / dsc->drm->slice_width;
+ intf_ip_w = this_frame_slices * dsc->drm->slice_width;
+
+ /*
+ * dsc merge case: when using 2 encoders for the same stream,
+ * no. of slices need to be same on both the encoders.
+ */
+ enc_ip_w = intf_ip_w / 2;
+ initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
+
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
+ dpu_encoder_dsc_pipe_cfg(hw_dsc[i], hw_pp[i], dsc, dsc_common_mode, initial_lines);
+}
+
void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
@@ -1708,6 +1866,30 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
}
}
+
+ if (dpu_enc->dsc)
+ dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc);
+}
+
+bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ unsigned int i;
+ struct dpu_encoder_phys *phys;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL) {
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys->ops.is_valid_for_commit && !phys->ops.is_valid_for_commit(phys)) {
+ DPU_DEBUG("invalid FB not kicking off\n");
+ return false;
+ }
+ }
+ }
+
+ return true;
}
void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
@@ -1751,6 +1933,102 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
DPU_ATRACE_END("encoder_kickoff");
}
+static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_mixer_cfg mixer;
+ int i, num_lm;
+ u32 flush_mask = 0;
+ struct dpu_global_state *global_state;
+ struct dpu_hw_blk *hw_lm[2];
+ struct dpu_hw_mixer *hw_mixer[2];
+ struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
+
+ memset(&mixer, 0, sizeof(mixer));
+
+ /* reset all mixers for this encoder */
+ if (phys_enc->hw_ctl->ops.clear_all_blendstages)
+ phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
+
+ global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms);
+
+ num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state,
+ phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
+
+ for (i = 0; i < num_lm; i++) {
+ hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]);
+ flush_mask = phys_enc->hw_ctl->ops.get_bitmask_mixer(ctl, hw_mixer[i]->idx);
+ if (phys_enc->hw_ctl->ops.update_pending_flush)
+ phys_enc->hw_ctl->ops.update_pending_flush(ctl, flush_mask);
+
+ /* clear all blendstages */
+ if (phys_enc->hw_ctl->ops.setup_blendstage)
+ phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
+ }
+}
+
+void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
+ struct dpu_hw_intf_cfg intf_cfg = { 0 };
+ int i;
+ struct dpu_encoder_virt *dpu_enc;
+
+ dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+
+ phys_enc->hw_ctl->ops.reset(ctl);
+
+ dpu_encoder_helper_reset_mixers(phys_enc);
+
+ /*
+ * TODO: move the once-only operation like CTL flush/trigger
+ * into dpu_encoder_virt_disable() and all operations which need
+ * to be done per phys encoder into the phys_disable() op.
+ */
+ if (phys_enc->hw_wb) {
+ /* disable the PP block */
+ if (phys_enc->hw_wb->ops.bind_pingpong_blk)
+ phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, false,
+ phys_enc->hw_pp->idx);
+
+ /* mark WB flush as pending */
+ if (phys_enc->hw_ctl->ops.update_pending_flush_wb)
+ phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx);
+ } else {
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk)
+ phys_enc->hw_intf->ops.bind_pingpong_blk(
+ dpu_enc->phys_encs[i]->hw_intf, false,
+ dpu_enc->phys_encs[i]->hw_pp->idx);
+
+ /* mark INTF flush as pending */
+ if (phys_enc->hw_ctl->ops.update_pending_flush_intf)
+ phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl,
+ dpu_enc->phys_encs[i]->hw_intf->idx);
+ }
+ }
+
+ /* reset the merge 3D HW block */
+ if (phys_enc->hw_pp->merge_3d) {
+ phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
+ BLEND_3D_NONE);
+ if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
+ phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl,
+ phys_enc->hw_pp->merge_3d->idx);
+ }
+
+ intf_cfg.stream_sel = 0; /* Don't care value for video mode */
+ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+ if (phys_enc->hw_pp->merge_3d)
+ intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
+
+ if (ctl->ops.reset_intf_cfg)
+ ctl->ops.reset_intf_cfg(ctl, &intf_cfg);
+
+ ctl->ops.trigger_flush(ctl);
+ ctl->ops.trigger_start(ctl);
+ ctl->ops.clear_pending_flush(ctl);
+}
+
void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
@@ -1780,22 +2058,12 @@ static int _dpu_encoder_status_show(struct seq_file *s, void *data)
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- seq_printf(s, "intf:%d vsync:%8d underrun:%8d ",
- phys->intf_idx - INTF_0,
+ seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d ",
+ phys->intf_idx - INTF_0, phys->wb_idx - WB_0,
atomic_read(&phys->vsync_cnt),
atomic_read(&phys->underrun_cnt));
- switch (phys->intf_mode) {
- case INTF_MODE_VIDEO:
- seq_puts(s, "mode: video\n");
- break;
- case INTF_MODE_CMD:
- seq_puts(s, "mode: command\n");
- break;
- default:
- seq_puts(s, "mode: ???\n");
- break;
- }
+ seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode));
}
mutex_unlock(&dpu_enc->enc_lock);
@@ -1854,7 +2122,7 @@ static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
}
static int dpu_encoder_virt_add_phys_encs(
- u32 display_caps,
+ struct msm_display_info *disp_info,
struct dpu_encoder_virt *dpu_enc,
struct dpu_enc_phys_init_params *params)
{
@@ -1873,7 +2141,7 @@ static int dpu_encoder_virt_add_phys_encs(
return -EINVAL;
}
- if (display_caps & MSM_DISPLAY_CAP_VID_MODE) {
+ if (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE) {
enc = dpu_encoder_phys_vid_init(params);
if (IS_ERR_OR_NULL(enc)) {
@@ -1886,7 +2154,7 @@ static int dpu_encoder_virt_add_phys_encs(
++dpu_enc->num_phys_encs;
}
- if (display_caps & MSM_DISPLAY_CAP_CMD_MODE) {
+ if (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
enc = dpu_encoder_phys_cmd_init(params);
if (IS_ERR_OR_NULL(enc)) {
@@ -1899,6 +2167,19 @@ static int dpu_encoder_virt_add_phys_encs(
++dpu_enc->num_phys_encs;
}
+ if (disp_info->intf_type == DRM_MODE_ENCODER_VIRTUAL) {
+ enc = dpu_encoder_phys_wb_init(params);
+
+ if (IS_ERR_OR_NULL(enc)) {
+ DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n",
+ PTR_ERR(enc));
+ return enc == NULL ? -EINVAL : PTR_ERR(enc);
+ }
+
+ dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
+ ++dpu_enc->num_phys_encs;
+ }
+
if (params->split_role == ENC_ROLE_SLAVE)
dpu_enc->cur_slave = enc;
else
@@ -1942,6 +2223,9 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
case DRM_MODE_ENCODER_TMDS:
intf_type = INTF_DP;
break;
+ case DRM_MODE_ENCODER_VIRTUAL:
+ intf_type = INTF_WB;
+ break;
}
WARN_ON(disp_info->num_of_h_tiles < 1);
@@ -1953,6 +2237,8 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
dpu_enc->idle_pc_supported =
dpu_kms->catalog->caps->has_idle_pc;
+ dpu_enc->dsc = disp_info->dsc;
+
mutex_lock(&dpu_enc->enc_lock);
for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
/*
@@ -1977,16 +2263,30 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
phys_params.intf_idx = dpu_encoder_get_intf(dpu_kms->catalog,
intf_type,
controller_id);
- if (phys_params.intf_idx == INTF_MAX) {
- DPU_ERROR_ENC(dpu_enc, "could not get intf: type %d, id %d\n",
+
+ phys_params.wb_idx = dpu_encoder_get_wb(dpu_kms->catalog,
+ intf_type, controller_id);
+ /*
+ * The phys_params might represent either an INTF or a WB unit, but not
+ * both of them at the same time.
+ */
+ if ((phys_params.intf_idx == INTF_MAX) &&
+ (phys_params.wb_idx == WB_MAX)) {
+ DPU_ERROR_ENC(dpu_enc, "could not get intf or wb: type %d, id %d\n",
+ intf_type, controller_id);
+ ret = -EINVAL;
+ }
+
+ if ((phys_params.intf_idx != INTF_MAX) &&
+ (phys_params.wb_idx != WB_MAX)) {
+ DPU_ERROR_ENC(dpu_enc, "both intf and wb present: type %d, id %d\n",
intf_type, controller_id);
ret = -EINVAL;
}
if (!ret) {
- ret = dpu_encoder_virt_add_phys_encs(disp_info->capabilities,
- dpu_enc,
- &phys_params);
+ ret = dpu_encoder_virt_add_phys_encs(disp_info,
+ dpu_enc, &phys_params);
if (ret)
DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
}
@@ -2066,6 +2366,9 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
timer_setup(&dpu_enc->vsync_event_timer,
dpu_encoder_vsync_event_handler,
0);
+ else if (disp_info->intf_type == DRM_MODE_ENCODER_TMDS)
+ dpu_enc->wide_bus_en = msm_dp_wide_bus_available(
+ priv->dp[disp_info->h_tile_instance[0]]);
INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
dpu_encoder_off_work);
@@ -2100,8 +2403,9 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
if (!dpu_enc)
return ERR_PTR(-ENOMEM);
+
rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
- drm_enc_mode, NULL);
+ drm_enc_mode, NULL);
if (rc) {
devm_kfree(dev->dev, dpu_enc);
return ERR_PTR(rc);
@@ -2180,3 +2484,11 @@ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
return INTF_MODE_NONE;
}
+
+unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc)
+{
+ struct drm_encoder *encoder = phys_enc->parent;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
+
+ return dpu_enc->dsc_mask;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index 42db6ce12caf..781d41c91994 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -27,6 +27,7 @@
* based on num_of_h_tiles
* @is_te_using_watchdog_timer: Boolean to indicate watchdog TE is
* used instead of panel TE in cmd mode panels
+ * @dsc: DSC configuration data for DSC-enabled displays
*/
struct msm_display_info {
int intf_type;
@@ -34,6 +35,7 @@ struct msm_display_info {
uint32_t num_of_h_tiles;
uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
bool is_te_using_watchdog_timer;
+ struct msm_display_dsc_config *dsc;
};
/**
@@ -170,4 +172,34 @@ int dpu_encoder_get_linecount(struct drm_encoder *drm_enc);
*/
int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc);
+bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc);
+
+/**
+ * dpu_encoder_use_dsc_merge - returns true if the encoder uses DSC merge topology.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
+bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc);
+
+/**
+ * dpu_encoder_prepare_wb_job - prepare writeback job for the encoder.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * @job: Pointer to the current drm writeback job
+ */
+void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
+ struct drm_writeback_job *job);
+
+/**
+ * dpu_encoder_cleanup_wb_job - cleanup writeback job for the encoder.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * @job: Pointer to the current drm writeback job
+ */
+void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
+ struct drm_writeback_job *job);
+
+/**
+ * dpu_encoder_is_valid_for_commit - check if encode has valid parameters for commit.
+ * @drm_enc: Pointer to drm encoder structure
+ */
+bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc);
+
#endif /* __DPU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index fa8493ac0340..f2af07d87f56 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -1,15 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
*/
#ifndef __DPU_ENCODER_PHYS_H__
#define __DPU_ENCODER_PHYS_H__
+#include <drm/drm_writeback.h>
#include <linux/jiffies.h>
#include "dpu_kms.h"
#include "dpu_hw_intf.h"
+#include "dpu_hw_wb.h"
#include "dpu_hw_pingpong.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_top.h"
@@ -135,6 +138,11 @@ struct dpu_encoder_phys_ops {
void (*restore)(struct dpu_encoder_phys *phys);
int (*get_line_count)(struct dpu_encoder_phys *phys);
int (*get_frame_count)(struct dpu_encoder_phys *phys);
+ void (*prepare_wb_job)(struct dpu_encoder_phys *phys_enc,
+ struct drm_writeback_job *job);
+ void (*cleanup_wb_job)(struct dpu_encoder_phys *phys_enc,
+ struct drm_writeback_job *job);
+ bool (*is_valid_for_commit)(struct dpu_encoder_phys *phys_enc);
};
/**
@@ -143,6 +151,7 @@ struct dpu_encoder_phys_ops {
* @INTR_IDX_PINGPONG: Pingpong done unterrupt for cmd mode panel
* @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel
* @INTR_IDX_RDPTR: Readpointer done unterrupt for cmd mode panel
+ * @INTR_IDX_WB_DONE: Writeback fone interrupt for virtual connector
*/
enum dpu_intr_idx {
INTR_IDX_VSYNC,
@@ -150,25 +159,11 @@ enum dpu_intr_idx {
INTR_IDX_UNDERRUN,
INTR_IDX_CTL_START,
INTR_IDX_RDPTR,
+ INTR_IDX_WB_DONE,
INTR_IDX_MAX,
};
/**
- * dpu_encoder_irq - tracking structure for interrupts
- * @name: string name of interrupt
- * @intr_idx: Encoder interrupt enumeration
- * @irq_idx: IRQ interface lookup index from DPU IRQ framework
- * will be -EINVAL if IRQ is not registered
- * @irq_cb: interrupt callback
- */
-struct dpu_encoder_irq {
- const char *name;
- enum dpu_intr_idx intr_idx;
- int irq_idx;
- struct dpu_irq_callback cb;
-};
-
-/**
* struct dpu_encoder_phys - physical encoder that drives a single INTF block
* tied to a specific panel / sub-panel. Abstract type, sub-classed by
* phys_vid or phys_cmd for video mode or command mode encs respectively.
@@ -179,12 +174,14 @@ struct dpu_encoder_irq {
* @hw_ctl: Hardware interface to the ctl registers
* @hw_pp: Hardware interface to the ping pong registers
* @hw_intf: Hardware interface to the intf registers
+ * @hw_wb: Hardware interface to the wb registers
* @dpu_kms: Pointer to the dpu_kms top level
* @cached_mode: DRM mode cached at mode_set time, acted on in enable
* @enabled: Whether the encoder has enabled and running a mode
* @split_role: Role to play in a split-panel configuration
* @intf_mode: Interface mode
* @intf_idx: Interface index on dpu hardware
+ * @wb_idx: Writeback index on dpu hardware
* @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
* @enable_state: Enable state tracking
* @vblank_refcount: Reference count of vblank request
@@ -197,7 +194,7 @@ struct dpu_encoder_irq {
* @pending_ctlstart_cnt: Atomic counter tracking the number of ctl start
* pending.
* @pending_kickoff_wq: Wait queue for blocking until kickoff completes
- * @irq: IRQ tracking structures
+ * @irq: IRQ indices
*/
struct dpu_encoder_phys {
struct drm_encoder *parent;
@@ -207,11 +204,13 @@ struct dpu_encoder_phys {
struct dpu_hw_ctl *hw_ctl;
struct dpu_hw_pingpong *hw_pp;
struct dpu_hw_intf *hw_intf;
+ struct dpu_hw_wb *hw_wb;
struct dpu_kms *dpu_kms;
struct drm_display_mode cached_mode;
enum dpu_enc_split_role split_role;
enum dpu_intf_mode intf_mode;
enum dpu_intf intf_idx;
+ enum dpu_wb wb_idx;
spinlock_t *enc_spinlock;
enum dpu_enc_enable_state enable_state;
atomic_t vblank_refcount;
@@ -220,7 +219,7 @@ struct dpu_encoder_phys {
atomic_t pending_ctlstart_cnt;
atomic_t pending_kickoff_cnt;
wait_queue_head_t pending_kickoff_wq;
- struct dpu_encoder_irq irq[INTR_IDX_MAX];
+ int irq[INTR_IDX_MAX];
};
static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
@@ -230,6 +229,27 @@ static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
}
/**
+ * struct dpu_encoder_phys_wb - sub-class of dpu_encoder_phys to handle command
+ * mode specific operations
+ * @base: Baseclass physical encoder structure
+ * @wbirq_refcount: Reference count of writeback interrupt
+ * @wb_done_timeout_cnt: number of wb done irq timeout errors
+ * @wb_cfg: writeback block config to store fb related details
+ * @wb_conn: backpointer to writeback connector
+ * @wb_job: backpointer to current writeback job
+ * @dest: dpu buffer layout for current writeback output buffer
+ */
+struct dpu_encoder_phys_wb {
+ struct dpu_encoder_phys base;
+ atomic_t wbirq_refcount;
+ int wb_done_timeout_cnt;
+ struct dpu_hw_wb_cfg wb_cfg;
+ struct drm_writeback_connector *wb_conn;
+ struct drm_writeback_job *wb_job;
+ struct dpu_hw_fmt_layout dest;
+};
+
+/**
* struct dpu_encoder_phys_cmd - sub-class of dpu_encoder_phys to handle command
* mode specific operations
* @base: Baseclass physical encoder structure
@@ -257,6 +277,7 @@ struct dpu_encoder_phys_cmd {
* @parent_ops: Callbacks exposed by the parent to the phys_enc
* @split_role: Role to play in a split-panel configuration
* @intf_idx: Interface index this phys_enc will control
+ * @wb_idx: Writeback index this phys_enc will control
* @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
*/
struct dpu_enc_phys_init_params {
@@ -265,6 +286,7 @@ struct dpu_enc_phys_init_params {
const struct dpu_encoder_virt_ops *parent_ops;
enum dpu_enc_split_role split_role;
enum dpu_intf intf_idx;
+ enum dpu_wb wb_idx;
spinlock_t *enc_spinlock;
};
@@ -297,6 +319,13 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
struct dpu_enc_phys_init_params *p);
/**
+ * dpu_encoder_phys_wb_init - initialize writeback encoder
+ * @init: Pointer to init info structure with initialization params
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
+ struct dpu_enc_phys_init_params *p);
+
+/**
* dpu_encoder_helper_trigger_start - control start helper function
* This helper function may be optionally specified by physical
* encoders if they require ctl_start triggering.
@@ -314,14 +343,24 @@ static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state);
+ /* Use merge_3d unless DSC MERGE topology is used */
if (phys_enc->split_role == ENC_ROLE_SOLO &&
- dpu_cstate->num_mixers == CRTC_DUAL_MIXERS)
+ dpu_cstate->num_mixers == CRTC_DUAL_MIXERS &&
+ !dpu_encoder_use_dsc_merge(phys_enc->parent))
return BLEND_3D_H_ROW_INT;
return BLEND_3D_NONE;
}
/**
+ * dpu_encoder_helper_get_dsc - get DSC blocks mask for the DPU encoder
+ * This helper function is used by physical encoder to get DSC blocks mask
+ * used for this encoder.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc);
+
+/**
* dpu_encoder_helper_split_config - split display configuration helper function
* This helper function may be used by physical encoders to configure
* the split display related registers.
@@ -345,30 +384,20 @@ void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
* dpu_encoder_helper_wait_for_irq - utility to wait on an irq.
* note: will call dpu_encoder_helper_wait_for_irq on timeout
* @phys_enc: Pointer to physical encoder structure
- * @intr_idx: encoder interrupt index
+ * @irq: IRQ index
+ * @func: IRQ callback to be called in case of timeout
* @wait_info: wait info struct
* @Return: 0 or -ERROR
*/
int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
- enum dpu_intr_idx intr_idx,
+ int irq,
+ void (*func)(void *arg, int irq_idx),
struct dpu_encoder_wait_info *wait_info);
/**
- * dpu_encoder_helper_register_irq - register and enable an irq
+ * dpu_encoder_helper_phys_cleanup - helper to cleanup dpu pipeline
* @phys_enc: Pointer to physical encoder structure
- * @intr_idx: encoder interrupt index
- * @Return: 0 or -ERROR
*/
-int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
- enum dpu_intr_idx intr_idx);
-
-/**
- * dpu_encoder_helper_unregister_irq - unregister and disable an irq
- * @phys_enc: Pointer to physical encoder structure
- * @intr_idx: encoder interrupt index
- * @Return: 0 or -ERROR
- */
-int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
- enum dpu_intr_idx intr_idx);
+void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc);
#endif /* __dpu_encoder_phys_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index d59802b67d15..ae28b2b93e69 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -62,6 +62,13 @@ static void _dpu_encoder_phys_cmd_update_intf_cfg(
intf_cfg.stream_sel = cmd_enc->stream_sel;
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
+
+ /* setup which pp blk will connect to this intf */
+ if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) && phys_enc->hw_intf->ops.bind_pingpong_blk)
+ phys_enc->hw_intf->ops.bind_pingpong_blk(
+ phys_enc->hw_intf,
+ true,
+ phys_enc->hw_pp->idx);
}
static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
@@ -140,19 +147,13 @@ static void dpu_encoder_phys_cmd_atomic_mode_set(
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct dpu_encoder_irq *irq;
-
- irq = &phys_enc->irq[INTR_IDX_CTL_START];
- irq->irq_idx = phys_enc->hw_ctl->caps->intr_start;
+ phys_enc->irq[INTR_IDX_CTL_START] = phys_enc->hw_ctl->caps->intr_start;
- irq = &phys_enc->irq[INTR_IDX_PINGPONG];
- irq->irq_idx = phys_enc->hw_pp->caps->intr_done;
+ phys_enc->irq[INTR_IDX_PINGPONG] = phys_enc->hw_pp->caps->intr_done;
- irq = &phys_enc->irq[INTR_IDX_RDPTR];
- irq->irq_idx = phys_enc->hw_pp->caps->intr_rdptr;
+ phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr;
- irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
- irq->irq_idx = phys_enc->hw_intf->cap->intr_underrun;
+ phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
}
static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
@@ -192,7 +193,8 @@ static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
cmd_enc->pp_timeout_report_cnt,
atomic_read(&phys_enc->pending_kickoff_cnt));
msm_disp_snapshot_state(drm_enc->dev);
- dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_RDPTR]);
}
atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
@@ -219,7 +221,9 @@ static int _dpu_encoder_phys_cmd_wait_for_idle(
wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
- ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_PINGPONG,
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc,
+ phys_enc->irq[INTR_IDX_PINGPONG],
+ dpu_encoder_phys_cmd_pp_tx_done_irq,
&wait_info);
if (ret == -ETIMEDOUT)
_dpu_encoder_phys_cmd_handle_ppdone_timeout(phys_enc);
@@ -258,10 +262,13 @@ static int dpu_encoder_phys_cmd_control_vblank_irq(
enable ? "true" : "false", refcount);
if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
- ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_RDPTR);
+ ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_RDPTR],
+ dpu_encoder_phys_cmd_pp_rd_ptr_irq,
+ phys_enc);
else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
- ret = dpu_encoder_helper_unregister_irq(phys_enc,
- INTR_IDX_RDPTR);
+ ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_RDPTR]);
end:
if (ret) {
@@ -282,21 +289,31 @@ static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
enable, atomic_read(&phys_enc->vblank_refcount));
if (enable) {
- dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_PINGPONG);
- dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
+ dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_PINGPONG],
+ dpu_encoder_phys_cmd_pp_tx_done_irq,
+ phys_enc);
+ dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_UNDERRUN],
+ dpu_encoder_phys_cmd_underrun_irq,
+ phys_enc);
dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
if (dpu_encoder_phys_cmd_is_master(phys_enc))
- dpu_encoder_helper_register_irq(phys_enc,
- INTR_IDX_CTL_START);
+ dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_CTL_START],
+ dpu_encoder_phys_cmd_ctl_start_irq,
+ phys_enc);
} else {
if (dpu_encoder_phys_cmd_is_master(phys_enc))
- dpu_encoder_helper_unregister_irq(phys_enc,
- INTR_IDX_CTL_START);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_CTL_START]);
- dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_UNDERRUN]);
dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
- dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_PINGPONG);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_PINGPONG]);
}
}
@@ -488,6 +505,7 @@ static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_hw_ctl *ctl;
if (!phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
@@ -504,6 +522,17 @@ static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
if (phys_enc->hw_pp->ops.enable_tearcheck)
phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, false);
+
+ if (phys_enc->hw_intf->ops.bind_pingpong_blk) {
+ phys_enc->hw_intf->ops.bind_pingpong_blk(
+ phys_enc->hw_intf,
+ false,
+ phys_enc->hw_pp->idx);
+
+ ctl = phys_enc->hw_ctl;
+ ctl->ops.update_pending_flush_intf(ctl, phys_enc->intf_idx);
+ }
+
phys_enc->enable_state = DPU_ENC_DISABLED;
}
@@ -623,7 +652,9 @@ static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
- ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_CTL_START,
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc,
+ phys_enc->irq[INTR_IDX_CTL_START],
+ dpu_encoder_phys_cmd_ctl_start_irq,
&wait_info);
if (ret == -ETIMEDOUT) {
DPU_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
@@ -681,7 +712,9 @@ static int dpu_encoder_phys_cmd_wait_for_vblank(
atomic_inc(&cmd_enc->pending_vblank_cnt);
- rc = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_RDPTR,
+ rc = dpu_encoder_helper_wait_for_irq(phys_enc,
+ phys_enc->irq[INTR_IDX_RDPTR],
+ dpu_encoder_phys_cmd_pp_rd_ptr_irq,
&wait_info);
return rc;
@@ -731,7 +764,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
{
struct dpu_encoder_phys *phys_enc = NULL;
struct dpu_encoder_phys_cmd *cmd_enc = NULL;
- struct dpu_encoder_irq *irq;
int i, ret = 0;
DPU_DEBUG("intf %d\n", p->intf_idx - INTF_0);
@@ -755,32 +787,8 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
phys_enc->enc_spinlock = p->enc_spinlock;
cmd_enc->stream_sel = 0;
phys_enc->enable_state = DPU_ENC_DISABLED;
- for (i = 0; i < INTR_IDX_MAX; i++) {
- irq = &phys_enc->irq[i];
- INIT_LIST_HEAD(&irq->cb.list);
- irq->irq_idx = -EINVAL;
- irq->cb.arg = phys_enc;
- }
-
- irq = &phys_enc->irq[INTR_IDX_CTL_START];
- irq->name = "ctl_start";
- irq->intr_idx = INTR_IDX_CTL_START;
- irq->cb.func = dpu_encoder_phys_cmd_ctl_start_irq;
-
- irq = &phys_enc->irq[INTR_IDX_PINGPONG];
- irq->name = "pp_done";
- irq->intr_idx = INTR_IDX_PINGPONG;
- irq->cb.func = dpu_encoder_phys_cmd_pp_tx_done_irq;
-
- irq = &phys_enc->irq[INTR_IDX_RDPTR];
- irq->name = "pp_rd_ptr";
- irq->intr_idx = INTR_IDX_RDPTR;
- irq->cb.func = dpu_encoder_phys_cmd_pp_rd_ptr_irq;
-
- irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
- irq->name = "underrun";
- irq->intr_idx = INTR_IDX_UNDERRUN;
- irq->cb.func = dpu_encoder_phys_cmd_underrun_irq;
+ for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++)
+ phys_enc->irq[i] = -EINVAL;
atomic_set(&phys_enc->vblank_refcount, 0);
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index f49f42e70b29..2c14646661b7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -91,25 +91,27 @@ static void drm_mode_to_intf_timing_params(
timing->vsync_polarity = 0;
}
- /*
- * For edp only:
- * DISPLAY_V_START = (VBP * HCYCLE) + HBP
- * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
- */
- /*
- * if (vid_enc->hw->cap->type == INTF_EDP) {
- * display_v_start += mode->htotal - mode->hsync_start;
- * display_v_end -= mode->hsync_start - mode->hdisplay;
- * }
- */
/* for DP/EDP, Shift timings to align it to bottom right */
- if ((phys_enc->hw_intf->cap->type == INTF_DP) ||
- (phys_enc->hw_intf->cap->type == INTF_EDP)) {
+ if (phys_enc->hw_intf->cap->type == INTF_DP) {
timing->h_back_porch += timing->h_front_porch;
timing->h_front_porch = 0;
timing->v_back_porch += timing->v_front_porch;
timing->v_front_porch = 0;
}
+
+ timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
+
+ /*
+ * for DP, divide the horizonal parameters by 2 when
+ * widebus is enabled
+ */
+ if (phys_enc->hw_intf->cap->type == INTF_DP && timing->wide_bus_en) {
+ timing->width = timing->width >> 1;
+ timing->xres = timing->xres >> 1;
+ timing->h_back_porch = timing->h_back_porch >> 1;
+ timing->h_front_porch = timing->h_front_porch >> 1;
+ timing->hsync_pulse_width = timing->hsync_pulse_width >> 1;
+ }
}
static u32 get_horizontal_total(const struct intf_timing_params *timing)
@@ -353,13 +355,9 @@ static void dpu_encoder_phys_vid_atomic_mode_set(
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct dpu_encoder_irq *irq;
-
- irq = &phys_enc->irq[INTR_IDX_VSYNC];
- irq->irq_idx = phys_enc->hw_intf->cap->intr_vsync;
+ phys_enc->irq[INTR_IDX_VSYNC] = phys_enc->hw_intf->cap->intr_vsync;
- irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
- irq->irq_idx = phys_enc->hw_intf->cap->intr_underrun;
+ phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
}
static int dpu_encoder_phys_vid_control_vblank_irq(
@@ -385,10 +383,13 @@ static int dpu_encoder_phys_vid_control_vblank_irq(
atomic_read(&phys_enc->vblank_refcount));
if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
- ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_VSYNC);
+ ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_VSYNC],
+ dpu_encoder_phys_vid_vblank_irq,
+ phys_enc);
else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
- ret = dpu_encoder_helper_unregister_irq(phys_enc,
- INTR_IDX_VSYNC);
+ ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_VSYNC]);
end:
if (ret) {
@@ -461,7 +462,9 @@ static int dpu_encoder_phys_vid_wait_for_vblank(
}
/* Wait for kickoff to complete */
- ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_VSYNC,
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc,
+ phys_enc->irq[INTR_IDX_VSYNC],
+ dpu_encoder_phys_vid_vblank_irq,
&wait_info);
if (ret == -ETIMEDOUT) {
@@ -513,7 +516,8 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
DPU_ERROR_VIDENC(phys_enc, "ctl %d reset failure: %d\n",
ctl->idx, rc);
msm_disp_snapshot_state(drm_enc->dev);
- dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_VSYNC]);
}
}
@@ -602,10 +606,14 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
if (WARN_ON(ret))
return;
- dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
+ dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_UNDERRUN],
+ dpu_encoder_phys_vid_underrun_irq,
+ phys_enc);
} else {
dpu_encoder_phys_vid_control_vblank_irq(phys_enc, false);
- dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_UNDERRUN]);
}
}
@@ -669,7 +677,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
struct dpu_enc_phys_init_params *p)
{
struct dpu_encoder_phys *phys_enc = NULL;
- struct dpu_encoder_irq *irq;
int i;
if (!p) {
@@ -695,22 +702,8 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
phys_enc->split_role = p->split_role;
phys_enc->intf_mode = INTF_MODE_VIDEO;
phys_enc->enc_spinlock = p->enc_spinlock;
- for (i = 0; i < INTR_IDX_MAX; i++) {
- irq = &phys_enc->irq[i];
- INIT_LIST_HEAD(&irq->cb.list);
- irq->irq_idx = -EINVAL;
- irq->cb.arg = phys_enc;
- }
-
- irq = &phys_enc->irq[INTR_IDX_VSYNC];
- irq->name = "vsync_irq";
- irq->intr_idx = INTR_IDX_VSYNC;
- irq->cb.func = dpu_encoder_phys_vid_vblank_irq;
-
- irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
- irq->name = "underrun";
- irq->intr_idx = INTR_IDX_UNDERRUN;
- irq->cb.func = dpu_encoder_phys_vid_underrun_irq;
+ for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++)
+ phys_enc->irq[i] = -EINVAL;
atomic_set(&phys_enc->vblank_refcount, 0);
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
new file mode 100644
index 000000000000..4829d1ce0cf8
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -0,0 +1,753 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+
+#include "dpu_encoder_phys.h"
+#include "dpu_formats.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_wb.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_blk.h"
+#include "dpu_hw_merge3d.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_core_irq.h"
+#include "dpu_vbif.h"
+#include "dpu_crtc.h"
+#include "disp/msm_disp_snapshot.h"
+
+#define DEFAULT_MAX_WRITEBACK_WIDTH 2048
+
+#define to_dpu_encoder_phys_wb(x) \
+ container_of(x, struct dpu_encoder_phys_wb, base)
+
+/**
+ * dpu_encoder_phys_wb_is_master - report wb always as master encoder
+ */
+static bool dpu_encoder_phys_wb_is_master(struct dpu_encoder_phys *phys_enc)
+{
+ /* there is only one physical enc for dpu_writeback */
+ return true;
+}
+
+/**
+ * dpu_encoder_phys_wb_set_ot_limit - set OT limit for writeback interface
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_set_ot_limit(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
+ struct dpu_vbif_set_ot_params ot_params;
+
+ memset(&ot_params, 0, sizeof(ot_params));
+ ot_params.xin_id = hw_wb->caps->xin_id;
+ ot_params.num = hw_wb->idx - WB_0;
+ ot_params.width = phys_enc->cached_mode.hdisplay;
+ ot_params.height = phys_enc->cached_mode.vdisplay;
+ ot_params.is_wfd = true;
+ ot_params.frame_rate = drm_mode_vrefresh(&phys_enc->cached_mode);
+ ot_params.vbif_idx = hw_wb->caps->vbif_idx;
+ ot_params.clk_ctrl = hw_wb->caps->clk_ctrl;
+ ot_params.rd = false;
+
+ dpu_vbif_set_ot_limit(phys_enc->dpu_kms, &ot_params);
+}
+
+/**
+ * dpu_encoder_phys_wb_set_qos_remap - set QoS remapper for writeback
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_set_qos_remap(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb;
+ struct dpu_vbif_set_qos_params qos_params;
+
+ if (!phys_enc || !phys_enc->parent || !phys_enc->parent->crtc) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+
+ if (!phys_enc->hw_wb || !phys_enc->hw_wb->caps) {
+ DPU_ERROR("invalid writeback hardware\n");
+ return;
+ }
+
+ hw_wb = phys_enc->hw_wb;
+
+ memset(&qos_params, 0, sizeof(qos_params));
+ qos_params.vbif_idx = hw_wb->caps->vbif_idx;
+ qos_params.xin_id = hw_wb->caps->xin_id;
+ qos_params.clk_ctrl = hw_wb->caps->clk_ctrl;
+ qos_params.num = hw_wb->idx - WB_0;
+ qos_params.is_rt = false;
+
+ DPU_DEBUG("[qos_remap] wb:%d vbif:%d xin:%d is_rt:%d\n",
+ qos_params.num,
+ qos_params.vbif_idx,
+ qos_params.xin_id, qos_params.is_rt);
+
+ dpu_vbif_set_qos_remap(phys_enc->dpu_kms, &qos_params);
+}
+
+/**
+ * dpu_encoder_phys_wb_set_qos - set QoS/danger/safe LUTs for writeback
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_set_qos(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb;
+ struct dpu_hw_wb_qos_cfg qos_cfg;
+ struct dpu_mdss_cfg *catalog;
+ struct dpu_qos_lut_tbl *qos_lut_tb;
+
+ if (!phys_enc || !phys_enc->dpu_kms || !phys_enc->dpu_kms->catalog) {
+ DPU_ERROR("invalid parameter(s)\n");
+ return;
+ }
+
+ catalog = phys_enc->dpu_kms->catalog;
+
+ hw_wb = phys_enc->hw_wb;
+
+ memset(&qos_cfg, 0, sizeof(struct dpu_hw_wb_qos_cfg));
+ qos_cfg.danger_safe_en = true;
+ qos_cfg.danger_lut =
+ catalog->perf.danger_lut_tbl[DPU_QOS_LUT_USAGE_NRT];
+
+ qos_cfg.safe_lut = catalog->perf.safe_lut_tbl[DPU_QOS_LUT_USAGE_NRT];
+
+ qos_lut_tb = &catalog->perf.qos_lut_tbl[DPU_QOS_LUT_USAGE_NRT];
+ qos_cfg.creq_lut = _dpu_hw_get_qos_lut(qos_lut_tb, 0);
+
+ if (hw_wb->ops.setup_qos_lut)
+ hw_wb->ops.setup_qos_lut(hw_wb, &qos_cfg);
+}
+
+/**
+ * dpu_encoder_phys_wb_setup_fb - setup output framebuffer
+ * @phys_enc: Pointer to physical encoder
+ * @fb: Pointer to output framebuffer
+ * @wb_roi: Pointer to output region of interest
+ */
+static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
+ struct drm_framebuffer *fb)
+{
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+ struct dpu_hw_wb *hw_wb;
+ struct dpu_hw_wb_cfg *wb_cfg;
+ struct dpu_hw_cdp_cfg cdp_cfg;
+
+ if (!phys_enc || !phys_enc->dpu_kms || !phys_enc->dpu_kms->catalog) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ hw_wb = phys_enc->hw_wb;
+ wb_cfg = &wb_enc->wb_cfg;
+
+ wb_cfg->intf_mode = phys_enc->intf_mode;
+ wb_cfg->roi.x1 = 0;
+ wb_cfg->roi.x2 = phys_enc->cached_mode.hdisplay;
+ wb_cfg->roi.y1 = 0;
+ wb_cfg->roi.y2 = phys_enc->cached_mode.vdisplay;
+
+ if (hw_wb->ops.setup_roi)
+ hw_wb->ops.setup_roi(hw_wb, wb_cfg);
+
+ if (hw_wb->ops.setup_outformat)
+ hw_wb->ops.setup_outformat(hw_wb, wb_cfg);
+
+ if (hw_wb->ops.setup_cdp) {
+ memset(&cdp_cfg, 0, sizeof(struct dpu_hw_cdp_cfg));
+
+ cdp_cfg.enable = phys_enc->dpu_kms->catalog->perf.cdp_cfg
+ [DPU_PERF_CDP_USAGE_NRT].wr_enable;
+ cdp_cfg.ubwc_meta_enable =
+ DPU_FORMAT_IS_UBWC(wb_cfg->dest.format);
+ cdp_cfg.tile_amortize_enable =
+ DPU_FORMAT_IS_UBWC(wb_cfg->dest.format) ||
+ DPU_FORMAT_IS_TILE(wb_cfg->dest.format);
+ cdp_cfg.preload_ahead = DPU_WB_CDP_PRELOAD_AHEAD_64;
+
+ hw_wb->ops.setup_cdp(hw_wb, &cdp_cfg);
+ }
+
+ if (hw_wb->ops.setup_outaddress)
+ hw_wb->ops.setup_outaddress(hw_wb, wb_cfg);
+}
+
+/**
+ * dpu_encoder_phys_wb_setup_cdp - setup chroma down prefetch block
+ * @phys_enc:Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_setup_cdp(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb;
+ struct dpu_hw_ctl *ctl;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ hw_wb = phys_enc->hw_wb;
+ ctl = phys_enc->hw_ctl;
+
+ if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) &&
+ (phys_enc->hw_ctl &&
+ phys_enc->hw_ctl->ops.setup_intf_cfg)) {
+ struct dpu_hw_intf_cfg intf_cfg = {0};
+ struct dpu_hw_pingpong *hw_pp = phys_enc->hw_pp;
+ enum dpu_3d_blend_mode mode_3d;
+
+ mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+
+ intf_cfg.intf = DPU_NONE;
+ intf_cfg.wb = hw_wb->idx;
+
+ if (mode_3d && hw_pp && hw_pp->merge_3d)
+ intf_cfg.merge_3d = hw_pp->merge_3d->idx;
+
+ if (phys_enc->hw_pp->merge_3d && phys_enc->hw_pp->merge_3d->ops.setup_3d_mode)
+ phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
+ mode_3d);
+
+ /* setup which pp blk will connect to this wb */
+ if (hw_pp && phys_enc->hw_wb->ops.bind_pingpong_blk)
+ phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, true,
+ phys_enc->hw_pp->idx);
+
+ phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+ } else if (phys_enc->hw_ctl && phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ struct dpu_hw_intf_cfg intf_cfg = {0};
+
+ intf_cfg.intf = DPU_NONE;
+ intf_cfg.wb = hw_wb->idx;
+ intf_cfg.mode_3d =
+ dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+ phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+ }
+}
+
+/**
+ * dpu_encoder_phys_wb_atomic_check - verify and fixup given atomic states
+ * @phys_enc: Pointer to physical encoder
+ * @crtc_state: Pointer to CRTC atomic state
+ * @conn_state: Pointer to connector atomic state
+ */
+static int dpu_encoder_phys_wb_atomic_check(
+ struct dpu_encoder_phys *phys_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_framebuffer *fb;
+ const struct drm_display_mode *mode = &crtc_state->mode;
+
+ DPU_DEBUG("[atomic_check:%d, \"%s\",%d,%d]\n",
+ phys_enc->wb_idx, mode->name, mode->hdisplay, mode->vdisplay);
+
+ if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ return 0;
+
+ fb = conn_state->writeback_job->fb;
+
+ if (!conn_state || !conn_state->connector) {
+ DPU_ERROR("invalid connector state\n");
+ return -EINVAL;
+ } else if (conn_state->connector->status !=
+ connector_status_connected) {
+ DPU_ERROR("connector not connected %d\n",
+ conn_state->connector->status);
+ return -EINVAL;
+ }
+
+ DPU_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id,
+ fb->width, fb->height);
+
+ if (fb->width != mode->hdisplay) {
+ DPU_ERROR("invalid fb w=%d, mode w=%d\n", fb->width,
+ mode->hdisplay);
+ return -EINVAL;
+ } else if (fb->height != mode->vdisplay) {
+ DPU_ERROR("invalid fb h=%d, mode h=%d\n", fb->height,
+ mode->vdisplay);
+ return -EINVAL;
+ } else if (fb->width > DEFAULT_MAX_WRITEBACK_WIDTH) {
+ DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n",
+ fb->width, DEFAULT_MAX_WRITEBACK_WIDTH);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+/**
+ * _dpu_encoder_phys_wb_update_flush - flush hardware update
+ * @phys_enc: Pointer to physical encoder
+ */
+static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb;
+ struct dpu_hw_ctl *hw_ctl;
+ struct dpu_hw_pingpong *hw_pp;
+ u32 pending_flush = 0;
+
+ if (!phys_enc)
+ return;
+
+ hw_wb = phys_enc->hw_wb;
+ hw_pp = phys_enc->hw_pp;
+ hw_ctl = phys_enc->hw_ctl;
+
+ DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
+
+ if (!hw_ctl) {
+ DPU_DEBUG("[wb:%d] no ctl assigned\n", hw_wb->idx - WB_0);
+ return;
+ }
+
+ if (hw_ctl->ops.update_pending_flush_wb)
+ hw_ctl->ops.update_pending_flush_wb(hw_ctl, hw_wb->idx);
+
+ if (hw_ctl->ops.update_pending_flush_merge_3d && hw_pp && hw_pp->merge_3d)
+ hw_ctl->ops.update_pending_flush_merge_3d(hw_ctl,
+ hw_pp->merge_3d->idx);
+
+ if (hw_ctl->ops.get_pending_flush)
+ pending_flush = hw_ctl->ops.get_pending_flush(hw_ctl);
+
+ DPU_DEBUG("Pending flush mask for CTL_%d is 0x%x, WB %d\n",
+ hw_ctl->idx - CTL_0, pending_flush,
+ hw_wb->idx - WB_0);
+}
+
+/**
+ * dpu_encoder_phys_wb_setup - setup writeback encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_setup(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
+ struct drm_display_mode mode = phys_enc->cached_mode;
+ struct drm_framebuffer *fb = NULL;
+
+ DPU_DEBUG("[mode_set:%d, \"%s\",%d,%d]\n",
+ hw_wb->idx - WB_0, mode.name,
+ mode.hdisplay, mode.vdisplay);
+
+ dpu_encoder_phys_wb_set_ot_limit(phys_enc);
+
+ dpu_encoder_phys_wb_set_qos_remap(phys_enc);
+
+ dpu_encoder_phys_wb_set_qos(phys_enc);
+
+ dpu_encoder_phys_wb_setup_fb(phys_enc, fb);
+
+ dpu_encoder_phys_wb_setup_cdp(phys_enc);
+
+}
+
+static void _dpu_encoder_phys_wb_frame_done_helper(void *arg)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+
+ struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
+ unsigned long lock_flags;
+ u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
+
+ DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
+
+ if (phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(phys_enc->parent,
+ phys_enc, event);
+
+ if (phys_enc->parent_ops->handle_vblank_virt)
+ phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ if (wb_enc->wb_conn)
+ drm_writeback_signal_completion(wb_enc->wb_conn, 0);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+}
+
+/**
+ * dpu_encoder_phys_wb_done_irq - writeback interrupt handler
+ * @arg: Pointer to writeback encoder
+ * @irq_idx: interrupt index
+ */
+static void dpu_encoder_phys_wb_done_irq(void *arg, int irq_idx)
+{
+ _dpu_encoder_phys_wb_frame_done_helper(arg);
+}
+
+/**
+ * dpu_encoder_phys_wb_irq_ctrl - irq control of WB
+ * @phys: Pointer to physical encoder
+ * @enable: indicates enable or disable interrupts
+ */
+static void dpu_encoder_phys_wb_irq_ctrl(
+ struct dpu_encoder_phys *phys, bool enable)
+{
+
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys);
+
+ if (enable && atomic_inc_return(&wb_enc->wbirq_refcount) == 1)
+ dpu_core_irq_register_callback(phys->dpu_kms,
+ phys->irq[INTR_IDX_WB_DONE], dpu_encoder_phys_wb_done_irq, phys);
+ else if (!enable &&
+ atomic_dec_return(&wb_enc->wbirq_refcount) == 0)
+ dpu_core_irq_unregister_callback(phys->dpu_kms, phys->irq[INTR_IDX_WB_DONE]);
+}
+
+static void dpu_encoder_phys_wb_atomic_mode_set(
+ struct dpu_encoder_phys *phys_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+
+ phys_enc->irq[INTR_IDX_WB_DONE] = phys_enc->hw_wb->caps->intr_wb_done;
+}
+
+static void _dpu_encoder_phys_wb_handle_wbdone_timeout(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+ u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
+
+ wb_enc->wb_done_timeout_cnt++;
+
+ if (wb_enc->wb_done_timeout_cnt == 1)
+ msm_disp_snapshot_state(phys_enc->parent->dev);
+
+ atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+
+ /* request a ctl reset before the next kickoff */
+ phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
+
+ if (wb_enc->wb_conn)
+ drm_writeback_signal_completion(wb_enc->wb_conn, 0);
+
+ if (phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(
+ phys_enc->parent, phys_enc, frame_event);
+}
+
+/**
+ * dpu_encoder_phys_wb_wait_for_commit_done - wait until request is committed
+ * @phys_enc: Pointer to physical encoder
+ */
+static int dpu_encoder_phys_wb_wait_for_commit_done(
+ struct dpu_encoder_phys *phys_enc)
+{
+ unsigned long ret;
+ struct dpu_encoder_wait_info wait_info;
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_WB_DONE,
+ dpu_encoder_phys_wb_done_irq, &wait_info);
+ if (ret == -ETIMEDOUT)
+ _dpu_encoder_phys_wb_handle_wbdone_timeout(phys_enc);
+ else if (!ret)
+ wb_enc->wb_done_timeout_cnt = 0;
+
+ return ret;
+}
+
+/**
+ * dpu_encoder_phys_wb_prepare_for_kickoff - pre-kickoff processing
+ * @phys_enc: Pointer to physical encoder
+ * Returns: Zero on success
+ */
+static void dpu_encoder_phys_wb_prepare_for_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+ struct drm_connector *drm_conn;
+ struct drm_connector_state *state;
+
+ DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
+
+ if (!wb_enc->wb_conn || !wb_enc->wb_job) {
+ DPU_ERROR("invalid wb_conn or wb_job\n");
+ return;
+ }
+
+ drm_conn = &wb_enc->wb_conn->base;
+ state = drm_conn->state;
+
+ if (wb_enc->wb_conn && wb_enc->wb_job)
+ drm_writeback_queue_job(wb_enc->wb_conn, state);
+
+ dpu_encoder_phys_wb_setup(phys_enc);
+
+ _dpu_encoder_phys_wb_update_flush(phys_enc);
+}
+
+/**
+ * dpu_encoder_phys_wb_needs_single_flush - trigger flush processing
+ * @phys_enc: Pointer to physical encoder
+ */
+static bool dpu_encoder_phys_wb_needs_single_flush(struct dpu_encoder_phys *phys_enc)
+{
+ DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
+ return false;
+}
+
+/**
+ * dpu_encoder_phys_wb_handle_post_kickoff - post-kickoff processing
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_handle_post_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
+
+}
+
+/**
+ * dpu_encoder_phys_wb_enable - enable writeback encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_enable(struct dpu_encoder_phys *phys_enc)
+{
+ DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
+ phys_enc->enable_state = DPU_ENC_ENABLED;
+}
+/**
+ * dpu_encoder_phys_wb_disable - disable writeback encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
+ struct dpu_hw_ctl *hw_ctl = phys_enc->hw_ctl;
+
+ DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
+
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+ DPU_ERROR("encoder is already disabled\n");
+ return;
+ }
+
+ /* reset h/w before final flush */
+ if (phys_enc->hw_ctl->ops.clear_pending_flush)
+ phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl);
+
+ /*
+ * New CTL reset sequence from 5.0 MDP onwards.
+ * If has_3d_merge_reset is not set, legacy reset
+ * sequence is executed.
+ *
+ * Legacy reset sequence has not been implemented yet.
+ * Any target earlier than SM8150 will need it and when
+ * WB support is added to those targets will need to add
+ * the legacy teardown sequence as well.
+ */
+ if (hw_ctl->caps->features & BIT(DPU_CTL_ACTIVE_CFG))
+ dpu_encoder_helper_phys_cleanup(phys_enc);
+
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+/**
+ * dpu_encoder_phys_wb_destroy - destroy writeback encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_destroy(struct dpu_encoder_phys *phys_enc)
+{
+ DPU_DEBUG("[wb:%d]\n", phys_enc->wb_idx - WB_0);
+
+ if (!phys_enc)
+ return;
+
+ kfree(phys_enc);
+}
+
+static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc,
+ struct drm_writeback_job *job)
+{
+ const struct msm_format *format;
+ struct msm_gem_address_space *aspace;
+ struct dpu_hw_wb_cfg *wb_cfg;
+ int ret;
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+
+ if (!job->fb)
+ return;
+
+ wb_enc->wb_job = job;
+ wb_enc->wb_conn = job->connector;
+ aspace = phys_enc->dpu_kms->base.aspace;
+
+ wb_cfg = &wb_enc->wb_cfg;
+
+ memset(wb_cfg, 0, sizeof(struct dpu_hw_wb_cfg));
+
+ ret = msm_framebuffer_prepare(job->fb, aspace, false);
+ if (ret) {
+ DPU_ERROR("prep fb failed, %d\n", ret);
+ return;
+ }
+
+ format = msm_framebuffer_format(job->fb);
+
+ wb_cfg->dest.format = dpu_get_dpu_format_ext(
+ format->pixel_format, job->fb->modifier);
+ if (!wb_cfg->dest.format) {
+ /* this error should be detected during atomic_check */
+ DPU_ERROR("failed to get format %x\n", format->pixel_format);
+ return;
+ }
+
+ ret = dpu_format_populate_layout(aspace, job->fb, &wb_cfg->dest);
+ if (ret) {
+ DPU_DEBUG("failed to populate layout %d\n", ret);
+ return;
+ }
+
+ wb_cfg->dest.width = job->fb->width;
+ wb_cfg->dest.height = job->fb->height;
+ wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes;
+
+ if ((wb_cfg->dest.format->fetch_planes == DPU_PLANE_PLANAR) &&
+ (wb_cfg->dest.format->element[0] == C1_B_Cb))
+ swap(wb_cfg->dest.plane_addr[1], wb_cfg->dest.plane_addr[2]);
+
+ DPU_DEBUG("[fb_offset:%8.8x,%8.8x,%8.8x,%8.8x]\n",
+ wb_cfg->dest.plane_addr[0], wb_cfg->dest.plane_addr[1],
+ wb_cfg->dest.plane_addr[2], wb_cfg->dest.plane_addr[3]);
+
+ DPU_DEBUG("[fb_stride:%8.8x,%8.8x,%8.8x,%8.8x]\n",
+ wb_cfg->dest.plane_pitch[0], wb_cfg->dest.plane_pitch[1],
+ wb_cfg->dest.plane_pitch[2], wb_cfg->dest.plane_pitch[3]);
+}
+
+static void dpu_encoder_phys_wb_cleanup_wb_job(struct dpu_encoder_phys *phys_enc,
+ struct drm_writeback_job *job)
+{
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+ struct msm_gem_address_space *aspace;
+
+ if (!job->fb)
+ return;
+
+ aspace = phys_enc->dpu_kms->base.aspace;
+
+ msm_framebuffer_cleanup(job->fb, aspace, false);
+ wb_enc->wb_job = NULL;
+ wb_enc->wb_conn = NULL;
+}
+
+static bool dpu_encoder_phys_wb_is_valid_for_commit(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+
+ if (wb_enc->wb_job)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * dpu_encoder_phys_wb_init_ops - initialize writeback operations
+ * @ops: Pointer to encoder operation table
+ */
+static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops)
+{
+ ops->is_master = dpu_encoder_phys_wb_is_master;
+ ops->atomic_mode_set = dpu_encoder_phys_wb_atomic_mode_set;
+ ops->enable = dpu_encoder_phys_wb_enable;
+ ops->disable = dpu_encoder_phys_wb_disable;
+ ops->destroy = dpu_encoder_phys_wb_destroy;
+ ops->atomic_check = dpu_encoder_phys_wb_atomic_check;
+ ops->wait_for_commit_done = dpu_encoder_phys_wb_wait_for_commit_done;
+ ops->prepare_for_kickoff = dpu_encoder_phys_wb_prepare_for_kickoff;
+ ops->handle_post_kickoff = dpu_encoder_phys_wb_handle_post_kickoff;
+ ops->needs_single_flush = dpu_encoder_phys_wb_needs_single_flush;
+ ops->trigger_start = dpu_encoder_helper_trigger_start;
+ ops->prepare_wb_job = dpu_encoder_phys_wb_prepare_wb_job;
+ ops->cleanup_wb_job = dpu_encoder_phys_wb_cleanup_wb_job;
+ ops->irq_control = dpu_encoder_phys_wb_irq_ctrl;
+ ops->is_valid_for_commit = dpu_encoder_phys_wb_is_valid_for_commit;
+
+}
+
+/**
+ * dpu_encoder_phys_wb_init - initialize writeback encoder
+ * @init: Pointer to init info structure with initialization params
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
+ struct dpu_enc_phys_init_params *p)
+{
+ struct dpu_encoder_phys *phys_enc = NULL;
+ struct dpu_encoder_phys_wb *wb_enc = NULL;
+ int ret = 0;
+ int i;
+
+ DPU_DEBUG("\n");
+
+ if (!p || !p->parent) {
+ DPU_ERROR("invalid params\n");
+ ret = -EINVAL;
+ goto fail_alloc;
+ }
+
+ wb_enc = kzalloc(sizeof(*wb_enc), GFP_KERNEL);
+ if (!wb_enc) {
+ DPU_ERROR("failed to allocate wb phys_enc enc\n");
+ ret = -ENOMEM;
+ goto fail_alloc;
+ }
+
+ phys_enc = &wb_enc->base;
+ phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
+ phys_enc->wb_idx = p->wb_idx;
+
+ dpu_encoder_phys_wb_init_ops(&phys_enc->ops);
+ phys_enc->parent = p->parent;
+ phys_enc->parent_ops = p->parent_ops;
+ phys_enc->dpu_kms = p->dpu_kms;
+ phys_enc->split_role = p->split_role;
+ phys_enc->intf_mode = INTF_MODE_WB_LINE;
+ phys_enc->wb_idx = p->wb_idx;
+ phys_enc->enc_spinlock = p->enc_spinlock;
+
+ atomic_set(&wb_enc->wbirq_refcount, 0);
+
+ for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++)
+ phys_enc->irq[i] = -EINVAL;
+
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ atomic_set(&phys_enc->vblank_refcount, 0);
+ wb_enc->wb_done_timeout_cnt = 0;
+
+ init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+
+ DPU_DEBUG("Created dpu_encoder_phys for wb %d\n",
+ phys_enc->wb_idx);
+
+ return phys_enc;
+
+fail_alloc:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
index 418f5ae91293..84b8b3289f18 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
@@ -21,6 +21,28 @@ const struct dpu_format *dpu_get_dpu_format_ext(
#define dpu_get_dpu_format(f) dpu_get_dpu_format_ext(f, 0)
/**
+ * dpu_find_format - validate if the pixel format is supported
+ * @format: dpu format
+ * @supported_formats: supported formats by dpu HW
+ * @num_formatss: total number of formats
+ *
+ * Return: false if not valid format, true on success
+ */
+static inline bool dpu_find_format(u32 format, const u32 *supported_formats,
+ size_t num_formats)
+{
+ int i;
+
+ for (i = 0; i < num_formats; i++) {
+ /* check for valid formats supported */
+ if (format == supported_formats[i])
+ return true;
+ }
+
+ return false;
+}
+
+/**
* dpu_get_msm_format - get an dpu_format by its msm_format base
* callback function registers with the msm_kms layer
* @kms: kms driver
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index a4fe77cddfea..400ebceb56bb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
@@ -35,6 +36,9 @@
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
+#define VIG_SC7280_MASK \
+ (VIG_SC7180_MASK | BIT(DPU_SSPP_INLINE_ROTATION))
+
#define DMA_SDM845_MASK \
(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
@@ -117,6 +121,16 @@
BIT(MDP_AD4_0_INTR) | \
BIT(MDP_AD4_1_INTR))
+#define WB_SM8250_MASK (BIT(DPU_WB_LINE_MODE) | \
+ BIT(DPU_WB_UBWC) | \
+ BIT(DPU_WB_YUV_CONFIG) | \
+ BIT(DPU_WB_PIPE_ALPHA) | \
+ BIT(DPU_WB_XY_ROI_OFFSET) | \
+ BIT(DPU_WB_QOS) | \
+ BIT(DPU_WB_QOS_8LVL) | \
+ BIT(DPU_WB_CDP) | \
+ BIT(DPU_WB_INPUT_CTRL))
+
#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
#define DEFAULT_DPU_LINE_WIDTH 2048
#define DEFAULT_DPU_OUTPUT_LINE_WIDTH 2560
@@ -203,6 +217,45 @@ static const uint32_t plane_formats_yuv[] = {
DRM_FORMAT_YVU420,
};
+static const u32 rotation_v2_formats[] = {
+ DRM_FORMAT_NV12,
+ /* TODO add formats after validation */
+};
+
+static const uint32_t wb2_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_BGRA4444,
+ DRM_FORMAT_BGRX4444,
+ DRM_FORMAT_XBGR4444,
+};
+
/*************************************************************
* DPU sub blocks config
*************************************************************/
@@ -223,6 +276,17 @@ static const struct dpu_caps msm8998_dpu_caps = {
.max_vdeci_exp = MAX_VERT_DECIMATION,
};
+static const struct dpu_caps qcm2290_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0x4,
+ .smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
+ .ubwc_version = DPU_HW_UBWC_VER_20,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .max_linewidth = 2160,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
static const struct dpu_caps sdm845_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
@@ -338,17 +402,6 @@ static const struct dpu_mdp_cfg msm8998_mdp[] = {
},
};
-static const struct dpu_caps qcm2290_dpu_caps = {
- .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
- .max_mixer_blendstages = 0x4,
- .smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
- .ubwc_version = DPU_HW_UBWC_VER_20,
- .has_dim_layer = true,
- .has_idle_pc = true,
- .max_linewidth = 2160,
- .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
-};
-
static const struct dpu_mdp_cfg sdm845_mdp[] = {
{
.name = "top_0", .id = MDP_TOP,
@@ -440,6 +493,8 @@ static const struct dpu_mdp_cfg sm8250_mdp[] = {
.reg_off = 0x2C4, .bit_off = 8},
.clk_ctrls[DPU_CLK_CTRL_REG_DMA] = {
.reg_off = 0x2BC, .bit_off = 20},
+ .clk_ctrls[DPU_CLK_CTRL_WB2] = {
+ .reg_off = 0x3B8, .bit_off = 24},
},
};
@@ -642,7 +697,6 @@ static const struct dpu_ctl_cfg qcm2290_ctl[] = {
*************************************************************/
/* SSPP common configuration */
-
#define _VIG_SBLK(num, sdma_pri, qseed_ver) \
{ \
.maxdwnscale = MAX_DOWNSCALE_RATIO, \
@@ -660,6 +714,27 @@ static const struct dpu_ctl_cfg qcm2290_ctl[] = {
.num_formats = ARRAY_SIZE(plane_formats_yuv), \
.virt_format_list = plane_formats, \
.virt_num_formats = ARRAY_SIZE(plane_formats), \
+ .rotation_cfg = NULL, \
+ }
+
+#define _VIG_SBLK_ROT(num, sdma_pri, qseed_ver, rot_cfg) \
+ { \
+ .maxdwnscale = MAX_DOWNSCALE_RATIO, \
+ .maxupscale = MAX_UPSCALE_RATIO, \
+ .smart_dma_priority = sdma_pri, \
+ .src_blk = {.name = STRCAT("sspp_src_", num), \
+ .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
+ .scaler_blk = {.name = STRCAT("sspp_scaler", num), \
+ .id = qseed_ver, \
+ .base = 0xa00, .len = 0xa0,}, \
+ .csc_blk = {.name = STRCAT("sspp_csc", num), \
+ .id = DPU_SSPP_CSC_10BIT, \
+ .base = 0x1a00, .len = 0x100,}, \
+ .format_list = plane_formats_yuv, \
+ .num_formats = ARRAY_SIZE(plane_formats_yuv), \
+ .virt_format_list = plane_formats, \
+ .virt_num_formats = ARRAY_SIZE(plane_formats), \
+ .rotation_cfg = rot_cfg, \
}
#define _DMA_SBLK(num, sdma_pri) \
@@ -684,6 +759,12 @@ static const struct dpu_sspp_sub_blks msm8998_vig_sblk_2 =
static const struct dpu_sspp_sub_blks msm8998_vig_sblk_3 =
_VIG_SBLK("3", 0, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = {
+ .rot_maxheight = 1088,
+ .rot_num_formats = ARRAY_SIZE(rotation_v2_formats),
+ .rot_format_list = rotation_v2_formats,
+};
+
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 =
_VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3);
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 =
@@ -751,6 +832,9 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = {
static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 =
_VIG_SBLK("0", 4, DPU_SSPP_SCALER_QSEED4);
+static const struct dpu_sspp_sub_blks sc7280_vig_sblk_0 =
+ _VIG_SBLK_ROT("0", 4, DPU_SSPP_SCALER_QSEED4, &dpu_rot_sc7280_cfg_v2);
+
static const struct dpu_sspp_cfg sc7180_sspp[] = {
SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
sc7180_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
@@ -791,8 +875,8 @@ static const struct dpu_sspp_cfg sm8250_sspp[] = {
};
static const struct dpu_sspp_cfg sc7280_sspp[] = {
- SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
- sc7180_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
+ SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7280_MASK,
+ sc7280_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_CURSOR_SDM845_MASK,
@@ -1117,6 +1201,24 @@ static const struct dpu_pingpong_cfg sc7280_pp[] = {
PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1),
PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1),
};
+
+/*************************************************************
+ * DSC sub blocks config
+ *************************************************************/
+#define DSC_BLK(_name, _id, _base) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x140, \
+ .features = 0, \
+ }
+
+static struct dpu_dsc_cfg sdm845_dsc[] = {
+ DSC_BLK("dsc_0", DSC_0, 0x80000),
+ DSC_BLK("dsc_1", DSC_1, 0x80400),
+ DSC_BLK("dsc_2", DSC_2, 0x80800),
+ DSC_BLK("dsc_3", DSC_3, 0x80c00),
+};
+
/*************************************************************
* INTF sub blocks config
*************************************************************/
@@ -1180,6 +1282,29 @@ static const struct dpu_intf_cfg qcm2290_intf[] = {
};
/*************************************************************
+ * Writeback blocks config
+ *************************************************************/
+#define WB_BLK(_name, _id, _base, _features, _clk_ctrl, \
+ __xin_id, vbif_id, _reg, _wb_done_bit) \
+ { \
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x2c8, \
+ .features = _features, \
+ .format_list = wb2_formats, \
+ .num_formats = ARRAY_SIZE(wb2_formats), \
+ .clk_ctrl = _clk_ctrl, \
+ .xin_id = __xin_id, \
+ .vbif_idx = vbif_id, \
+ .maxlinewidth = DEFAULT_DPU_LINE_WIDTH, \
+ .intr_wb_done = DPU_IRQ_IDX(_reg, _wb_done_bit) \
+ }
+
+static const struct dpu_wb_cfg sm8250_wb[] = {
+ WB_BLK("wb_2", WB_2, 0x65000, WB_SM8250_MASK, DPU_CLK_CTRL_WB2, 6,
+ VBIF_RT, MDP_SSPP_TOP0_INTR, 4),
+};
+
+/*************************************************************
* VBIF sub blocks config
*************************************************************/
/* VBIF QOS remap */
@@ -1643,6 +1768,8 @@ static void sdm845_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
.mixer = sdm845_lm,
.pingpong_count = ARRAY_SIZE(sdm845_pp),
.pingpong = sdm845_pp,
+ .dsc_count = ARRAY_SIZE(sdm845_dsc),
+ .dsc = sdm845_dsc,
.intf_count = ARRAY_SIZE(sdm845_intf),
.intf = sdm845_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
@@ -1775,6 +1902,8 @@ static void sm8250_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
.intf = sm8150_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
.vbif = sdm845_vbif,
+ .wb_count = ARRAY_SIZE(sm8250_wb),
+ .wb = sm8250_wb,
.reg_dma_count = 1,
.dma_cfg = sm8250_regdma,
.perf = sm8250_perf_data,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index b85b24bd3f53..8cb6d1f25bf9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -1,5 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
*/
#ifndef _DPU_HW_CATALOG_H
@@ -112,6 +114,7 @@ enum {
* @DPU_SSPP_TS_PREFILL Supports prefill with traffic shaper
* @DPU_SSPP_TS_PREFILL_REC1 Supports prefill with traffic shaper multirec
* @DPU_SSPP_CDP Supports client driven prefetch
+ * @DPU_SSPP_INLINE_ROTATION Support inline rotation
* @DPU_SSPP_MAX maximum value
*/
enum {
@@ -132,6 +135,7 @@ enum {
DPU_SSPP_TS_PREFILL,
DPU_SSPP_TS_PREFILL_REC1,
DPU_SSPP_CDP,
+ DPU_SSPP_INLINE_ROTATION,
DPU_SSPP_MAX
};
@@ -212,6 +216,42 @@ enum {
};
/**
+ * WB sub-blocks and features
+ * @DPU_WB_LINE_MODE Writeback module supports line/linear mode
+ * @DPU_WB_BLOCK_MODE Writeback module supports block mode read
+ * @DPU_WB_CHROMA_DOWN, Writeback chroma down block,
+ * @DPU_WB_DOWNSCALE, Writeback integer downscaler,
+ * @DPU_WB_DITHER, Dither block
+ * @DPU_WB_TRAFFIC_SHAPER, Writeback traffic shaper bloc
+ * @DPU_WB_UBWC, Writeback Universal bandwidth compression
+ * @DPU_WB_YUV_CONFIG Writeback supports output of YUV colorspace
+ * @DPU_WB_PIPE_ALPHA Writeback supports pipe alpha
+ * @DPU_WB_XY_ROI_OFFSET Writeback supports x/y-offset of out ROI in
+ * the destination image
+ * @DPU_WB_QOS, Writeback supports QoS control, danger/safe/creq
+ * @DPU_WB_QOS_8LVL, Writeback supports 8-level QoS control
+ * @DPU_WB_CDP Writeback supports client driven prefetch
+ * @DPU_WB_INPUT_CTRL Writeback supports from which pp block input pixel
+ * data arrives.
+ * @DPU_WB_CROP CWB supports cropping
+ * @DPU_WB_MAX maximum value
+ */
+enum {
+ DPU_WB_LINE_MODE = 0x1,
+ DPU_WB_BLOCK_MODE,
+ DPU_WB_UBWC,
+ DPU_WB_YUV_CONFIG,
+ DPU_WB_PIPE_ALPHA,
+ DPU_WB_XY_ROI_OFFSET,
+ DPU_WB_QOS,
+ DPU_WB_QOS_8LVL,
+ DPU_WB_CDP,
+ DPU_WB_INPUT_CTRL,
+ DPU_WB_CROP,
+ DPU_WB_MAX
+};
+
+/**
* VBIF sub-blocks and features
* @DPU_VBIF_QOS_OTLIM VBIF supports OT Limit
* @DPU_VBIF_QOS_REMAP VBIF supports QoS priority remap
@@ -315,6 +355,18 @@ struct dpu_qos_lut_tbl {
};
/**
+ * struct dpu_rotation_cfg - define inline rotation config
+ * @rot_maxheight: max pre rotated height allowed for rotation
+ * @rot_num_formats: number of elements in @rot_format_list
+ * @rot_format_list: list of supported rotator formats
+ */
+struct dpu_rotation_cfg {
+ u32 rot_maxheight;
+ size_t rot_num_formats;
+ const u32 *rot_format_list;
+};
+
+/**
* struct dpu_caps - define DPU capabilities
* @max_mixer_width max layer mixer line width support.
* @max_mixer_blendstages max layer mixer blend stages or
@@ -369,6 +421,7 @@ struct dpu_caps {
* @num_formats: Number of supported formats
* @virt_format_list: Pointer to list of supported formats for virtual planes
* @virt_num_formats: Number of supported formats for virtual planes
+ * @dpu_rotation_cfg: inline rotation configuration
*/
struct dpu_sspp_sub_blks {
u32 creq_vblank;
@@ -390,6 +443,7 @@ struct dpu_sspp_sub_blks {
u32 num_formats;
const u32 *virt_format_list;
u32 virt_num_formats;
+ const struct dpu_rotation_cfg *rotation_cfg;
};
/**
@@ -444,6 +498,7 @@ enum dpu_clk_ctrl_type {
DPU_CLK_CTRL_CURSOR1,
DPU_CLK_CTRL_INLINE_ROT0_SSPP,
DPU_CLK_CTRL_REG_DMA,
+ DPU_CLK_CTRL_WB2,
DPU_CLK_CTRL_MAX,
};
@@ -562,6 +617,16 @@ struct dpu_merge_3d_cfg {
};
/**
+ * struct dpu_dsc_cfg - information of DSC blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ */
+struct dpu_dsc_cfg {
+ DPU_HW_BLK_INFO;
+};
+
+/**
* struct dpu_intf_cfg - information of timing engine blocks
* @id enum identifying this block
* @base register offset of this block
@@ -582,6 +647,28 @@ struct dpu_intf_cfg {
};
/**
+ * struct dpu_wb_cfg - information of writeback blocks
+ * @DPU_HW_BLK_INFO: refer to the description above for DPU_HW_BLK_INFO
+ * @vbif_idx: vbif client index
+ * @maxlinewidth: max line width supported by writeback block
+ * @xin_id: bus client identifier
+ * @intr_wb_done: interrupt index for WB_DONE
+ * @format_list: list of formats supported by this writeback block
+ * @num_formats: number of formats supported by this writeback block
+ * @clk_ctrl: clock control identifier
+ */
+struct dpu_wb_cfg {
+ DPU_HW_BLK_INFO;
+ u8 vbif_idx;
+ u32 maxlinewidth;
+ u32 xin_id;
+ s32 intr_wb_done;
+ const u32 *format_list;
+ u32 num_formats;
+ enum dpu_clk_ctrl_type clk_ctrl;
+};
+
+/**
* struct dpu_vbif_dynamic_ot_cfg - dynamic OT setting
* @pps pixel per seconds
* @ot_limit OT limit to use up to specified pixel per second
@@ -757,12 +844,18 @@ struct dpu_mdss_cfg {
u32 merge_3d_count;
const struct dpu_merge_3d_cfg *merge_3d;
+ u32 dsc_count;
+ struct dpu_dsc_cfg *dsc;
+
u32 intf_count;
const struct dpu_intf_cfg *intf;
u32 vbif_count;
const struct dpu_vbif_cfg *vbif;
+ u32 wb_count;
+ const struct dpu_wb_cfg *wb;
+
u32 reg_dma_count;
struct dpu_reg_dma_cfg dma_cfg;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 3584f5ee6bb3..c33e7ef611a6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
@@ -23,8 +24,12 @@
#define CTL_SW_RESET 0x030
#define CTL_LAYER_EXTN_OFFSET 0x40
#define CTL_MERGE_3D_ACTIVE 0x0E4
+#define CTL_WB_ACTIVE 0x0EC
#define CTL_INTF_ACTIVE 0x0F4
#define CTL_MERGE_3D_FLUSH 0x100
+#define CTL_DSC_ACTIVE 0x0E8
+#define CTL_DSC_FLUSH 0x104
+#define CTL_WB_FLUSH 0x108
#define CTL_INTF_FLUSH 0x110
#define CTL_INTF_MASTER 0x134
#define CTL_FETCH_PIPE_ACTIVE 0x0FC
@@ -34,7 +39,9 @@
#define DPU_REG_RESET_TIMEOUT_US 2000
#define MERGE_3D_IDX 23
+#define DSC_IDX 22
#define INTF_IDX 31
+#define WB_IDX 16
#define CTL_INVALID_BIT 0xffff
#define CTL_DEFAULT_GROUP_ID 0xf
@@ -126,13 +133,15 @@ static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
{
-
if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX))
DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
ctx->pending_merge_3d_flush_mask);
if (ctx->pending_flush_mask & BIT(INTF_IDX))
DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
ctx->pending_intf_flush_mask);
+ if (ctx->pending_flush_mask & BIT(WB_IDX))
+ DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
+ ctx->pending_wb_flush_mask);
DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
}
@@ -253,6 +262,27 @@ static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
}
}
+static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx,
+ enum dpu_wb wb)
+{
+ switch (wb) {
+ case WB_0:
+ case WB_1:
+ case WB_2:
+ ctx->pending_flush_mask |= BIT(WB_IDX);
+ break;
+ default:
+ break;
+ }
+}
+
+static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
+ enum dpu_wb wb)
+{
+ ctx->pending_wb_flush_mask |= BIT(wb - WB_0);
+ ctx->pending_flush_mask |= BIT(WB_IDX);
+}
+
static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
enum dpu_intf intf)
{
@@ -502,6 +532,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 intf_active = 0;
+ u32 wb_active = 0;
u32 mode_sel = 0;
/* CTL_TOP[31:28] carries group_id to collate CTL paths
@@ -511,17 +542,32 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
mode_sel = CTL_DEFAULT_GROUP_ID << 28;
+ if (cfg->dsc)
+ DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH, cfg->dsc);
+
if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
mode_sel |= BIT(17);
intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
- intf_active |= BIT(cfg->intf - INTF_0);
+ wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
+
+ if (cfg->intf)
+ intf_active |= BIT(cfg->intf - INTF_0);
+
+ if (cfg->wb)
+ wb_active |= BIT(cfg->wb - WB_0);
DPU_REG_WRITE(c, CTL_TOP, mode_sel);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
+ DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
+
if (cfg->merge_3d)
DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
BIT(cfg->merge_3d - MERGE_3D_0));
+ if (cfg->dsc) {
+ DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, DSC_IDX);
+ DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc);
+ }
}
static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
@@ -537,6 +583,9 @@ static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
intf_cfg |= (cfg->mode_3d - 0x1) << 20;
}
+ if (cfg->wb)
+ intf_cfg |= (cfg->wb & 0x3) + 2;
+
switch (cfg->intf_mode_sel) {
case DPU_CTL_MODE_SEL_VID:
intf_cfg &= ~BIT(17);
@@ -554,6 +603,44 @@ static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
}
+static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 intf_active = 0;
+ u32 wb_active = 0;
+ u32 merge3d_active = 0;
+
+ /*
+ * This API resets each portion of the CTL path namely,
+ * clearing the sspps staged on the lm, merge_3d block,
+ * interfaces , writeback etc to ensure clean teardown of the pipeline.
+ * This will be used for writeback to begin with to have a
+ * proper teardown of the writeback session but upon further
+ * validation, this can be extended to all interfaces.
+ */
+ if (cfg->merge_3d) {
+ merge3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE);
+ merge3d_active &= ~BIT(cfg->merge_3d - MERGE_3D_0);
+ DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
+ merge3d_active);
+ }
+
+ dpu_hw_ctl_clear_all_blendstages(ctx);
+
+ if (cfg->intf) {
+ intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
+ intf_active &= ~BIT(cfg->intf - INTF_0);
+ DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
+ }
+
+ if (cfg->wb) {
+ wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
+ wb_active &= ~BIT(cfg->wb - WB_0);
+ DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
+ }
+}
+
static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
unsigned long *fetch_active)
{
@@ -577,15 +664,18 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
+ ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
ops->update_pending_flush_intf =
dpu_hw_ctl_update_pending_flush_intf_v1;
ops->update_pending_flush_merge_3d =
dpu_hw_ctl_update_pending_flush_merge_3d_v1;
+ ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
} else {
ops->trigger_flush = dpu_hw_ctl_trigger_flush;
ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
ops->update_pending_flush_intf =
dpu_hw_ctl_update_pending_flush_intf;
+ ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
}
ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index ac1544474022..5755307089b5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _DPU_HW_CTL_H
@@ -40,13 +41,16 @@ struct dpu_hw_stage_cfg {
* @merge_3d: 3d merge block used
* @intf_mode_sel: Interface mode, cmd / vid
* @stream_sel: Stream selection for multi-stream interfaces
+ * @dsc: DSC BIT masks used
*/
struct dpu_hw_intf_cfg {
enum dpu_intf intf;
+ enum dpu_wb wb;
enum dpu_3d_blend_mode mode_3d;
enum dpu_merge_3d merge_3d;
enum dpu_ctl_mode_sel intf_mode_sel;
int stream_sel;
+ unsigned int dsc;
};
/**
@@ -100,6 +104,15 @@ struct dpu_hw_ctl_ops {
u32 flushbits);
/**
+ * OR in the given flushbits to the cached pending_(wb_)flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : writeback block index
+ */
+ void (*update_pending_flush_wb)(struct dpu_hw_ctl *ctx,
+ enum dpu_wb blk);
+
+ /**
* OR in the given flushbits to the cached pending_(intf_)flush_mask
* No effect on hardware
* @ctx : ctl path ctx pointer
@@ -138,6 +151,14 @@ struct dpu_hw_ctl_ops {
void (*setup_intf_cfg)(struct dpu_hw_ctl *ctx,
struct dpu_hw_intf_cfg *cfg);
+ /**
+ * reset ctl_path interface config
+ * @ctx : ctl path ctx pointer
+ * @cfg : interface config structure pointer
+ */
+ void (*reset_intf_cfg)(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg);
+
int (*reset)(struct dpu_hw_ctl *c);
/*
@@ -189,6 +210,7 @@ struct dpu_hw_ctl_ops {
* @mixer_hw_caps: mixer hardware capabilities
* @pending_flush_mask: storage for pending ctl_flush managed via ops
* @pending_intf_flush_mask: pending INTF flush
+ * @pending_wb_flush_mask: pending WB flush
* @ops: operation list
*/
struct dpu_hw_ctl {
@@ -202,6 +224,7 @@ struct dpu_hw_ctl {
const struct dpu_lm_cfg *mixer_hw_caps;
u32 pending_flush_mask;
u32 pending_intf_flush_mask;
+ u32 pending_wb_flush_mask;
u32 pending_merge_3d_flush_mask;
/* ops */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
new file mode 100644
index 000000000000..4ad8991fc7d9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2022, Linaro Limited
+ */
+
+#include "dpu_kms.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_dsc.h"
+
+#define DSC_COMMON_MODE 0x000
+#define DSC_ENC 0x004
+#define DSC_PICTURE 0x008
+#define DSC_SLICE 0x00C
+#define DSC_CHUNK_SIZE 0x010
+#define DSC_DELAY 0x014
+#define DSC_SCALE_INITIAL 0x018
+#define DSC_SCALE_DEC_INTERVAL 0x01C
+#define DSC_SCALE_INC_INTERVAL 0x020
+#define DSC_FIRST_LINE_BPG_OFFSET 0x024
+#define DSC_BPG_OFFSET 0x028
+#define DSC_DSC_OFFSET 0x02C
+#define DSC_FLATNESS 0x030
+#define DSC_RC_MODEL_SIZE 0x034
+#define DSC_RC 0x038
+#define DSC_RC_BUF_THRESH 0x03C
+#define DSC_RANGE_MIN_QP 0x074
+#define DSC_RANGE_MAX_QP 0x0B0
+#define DSC_RANGE_BPG_OFFSET 0x0EC
+
+static void dpu_hw_dsc_disable(struct dpu_hw_dsc *dsc)
+{
+ struct dpu_hw_blk_reg_map *c = &dsc->hw;
+
+ DPU_REG_WRITE(c, DSC_COMMON_MODE, 0);
+}
+
+static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
+ struct msm_display_dsc_config *dsc,
+ u32 mode,
+ u32 initial_lines)
+{
+ struct dpu_hw_blk_reg_map *c = &hw_dsc->hw;
+ u32 data, lsb, bpp;
+ u32 slice_last_group_size;
+ u32 det_thresh_flatness;
+ bool is_cmd_mode = !(mode & DSC_MODE_VIDEO);
+
+ DPU_REG_WRITE(c, DSC_COMMON_MODE, mode);
+
+ if (is_cmd_mode)
+ initial_lines += 1;
+
+ slice_last_group_size = 3 - (dsc->drm->slice_width % 3);
+ data = (initial_lines << 20);
+ data |= ((slice_last_group_size - 1) << 18);
+ /* bpp is 6.4 format, 4 LSBs bits are for fractional part */
+ data |= dsc->drm->bits_per_pixel << 12;
+ lsb = dsc->drm->bits_per_pixel % 4;
+ bpp = dsc->drm->bits_per_pixel / 4;
+ bpp *= 4;
+ bpp <<= 4;
+ bpp |= lsb;
+
+ data |= bpp << 8;
+ data |= (dsc->drm->block_pred_enable << 7);
+ data |= (dsc->drm->line_buf_depth << 3);
+ data |= (dsc->drm->simple_422 << 2);
+ data |= (dsc->drm->convert_rgb << 1);
+ data |= dsc->drm->bits_per_component;
+
+ DPU_REG_WRITE(c, DSC_ENC, data);
+
+ data = dsc->drm->pic_width << 16;
+ data |= dsc->drm->pic_height;
+ DPU_REG_WRITE(c, DSC_PICTURE, data);
+
+ data = dsc->drm->slice_width << 16;
+ data |= dsc->drm->slice_height;
+ DPU_REG_WRITE(c, DSC_SLICE, data);
+
+ data = dsc->drm->slice_chunk_size << 16;
+ DPU_REG_WRITE(c, DSC_CHUNK_SIZE, data);
+
+ data = dsc->drm->initial_dec_delay << 16;
+ data |= dsc->drm->initial_xmit_delay;
+ DPU_REG_WRITE(c, DSC_DELAY, data);
+
+ data = dsc->drm->initial_scale_value;
+ DPU_REG_WRITE(c, DSC_SCALE_INITIAL, data);
+
+ data = dsc->drm->scale_decrement_interval;
+ DPU_REG_WRITE(c, DSC_SCALE_DEC_INTERVAL, data);
+
+ data = dsc->drm->scale_increment_interval;
+ DPU_REG_WRITE(c, DSC_SCALE_INC_INTERVAL, data);
+
+ data = dsc->drm->first_line_bpg_offset;
+ DPU_REG_WRITE(c, DSC_FIRST_LINE_BPG_OFFSET, data);
+
+ data = dsc->drm->nfl_bpg_offset << 16;
+ data |= dsc->drm->slice_bpg_offset;
+ DPU_REG_WRITE(c, DSC_BPG_OFFSET, data);
+
+ data = dsc->drm->initial_offset << 16;
+ data |= dsc->drm->final_offset;
+ DPU_REG_WRITE(c, DSC_DSC_OFFSET, data);
+
+ det_thresh_flatness = 7 + 2 * (dsc->drm->bits_per_component - 8);
+ data = det_thresh_flatness << 10;
+ data |= dsc->drm->flatness_max_qp << 5;
+ data |= dsc->drm->flatness_min_qp;
+ DPU_REG_WRITE(c, DSC_FLATNESS, data);
+
+ data = dsc->drm->rc_model_size;
+ DPU_REG_WRITE(c, DSC_RC_MODEL_SIZE, data);
+
+ data = dsc->drm->rc_tgt_offset_low << 18;
+ data |= dsc->drm->rc_tgt_offset_high << 14;
+ data |= dsc->drm->rc_quant_incr_limit1 << 9;
+ data |= dsc->drm->rc_quant_incr_limit0 << 4;
+ data |= dsc->drm->rc_edge_factor;
+ DPU_REG_WRITE(c, DSC_RC, data);
+}
+
+static void dpu_hw_dsc_config_thresh(struct dpu_hw_dsc *hw_dsc,
+ struct msm_display_dsc_config *dsc)
+{
+ struct drm_dsc_rc_range_parameters *rc = dsc->drm->rc_range_params;
+ struct dpu_hw_blk_reg_map *c = &hw_dsc->hw;
+ u32 off;
+ int i;
+
+ off = DSC_RC_BUF_THRESH;
+ for (i = 0; i < DSC_NUM_BUF_RANGES - 1 ; i++) {
+ DPU_REG_WRITE(c, off, dsc->drm->rc_buf_thresh[i]);
+ off += 4;
+ }
+
+ off = DSC_RANGE_MIN_QP;
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ DPU_REG_WRITE(c, off, rc[i].range_min_qp);
+ off += 4;
+ }
+
+ off = DSC_RANGE_MAX_QP;
+ for (i = 0; i < 15; i++) {
+ DPU_REG_WRITE(c, off, rc[i].range_max_qp);
+ off += 4;
+ }
+
+ off = DSC_RANGE_BPG_OFFSET;
+ for (i = 0; i < 15; i++) {
+ DPU_REG_WRITE(c, off, rc[i].range_bpg_offset);
+ off += 4;
+ }
+}
+
+static struct dpu_dsc_cfg *_dsc_offset(enum dpu_dsc dsc,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->dsc_count; i++) {
+ if (dsc == m->dsc[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->dsc[i].base;
+ b->length = m->dsc[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_DSC;
+ return &m->dsc[i];
+ }
+ }
+
+ return NULL;
+}
+
+static void _setup_dsc_ops(struct dpu_hw_dsc_ops *ops,
+ unsigned long cap)
+{
+ ops->dsc_disable = dpu_hw_dsc_disable;
+ ops->dsc_config = dpu_hw_dsc_config;
+ ops->dsc_config_thresh = dpu_hw_dsc_config_thresh;
+};
+
+struct dpu_hw_dsc *dpu_hw_dsc_init(enum dpu_dsc idx, void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_dsc *c;
+ struct dpu_dsc_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _dsc_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->idx = idx;
+ c->caps = cfg;
+ _setup_dsc_ops(&c->ops, c->caps->features);
+
+ return c;
+}
+
+void dpu_hw_dsc_destroy(struct dpu_hw_dsc *dsc)
+{
+ kfree(dsc);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
new file mode 100644
index 000000000000..b39ee4ed32f7
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2020-2022, Linaro Limited */
+
+#ifndef _DPU_HW_DSC_H
+#define _DPU_HW_DSC_H
+
+#include <drm/display/drm_dsc.h>
+
+#define DSC_MODE_SPLIT_PANEL BIT(0)
+#define DSC_MODE_MULTIPLEX BIT(1)
+#define DSC_MODE_VIDEO BIT(2)
+
+struct dpu_hw_dsc;
+
+/**
+ * struct dpu_hw_dsc_ops - interface to the dsc hardware driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_dsc_ops {
+ /**
+ * dsc_disable - disable dsc
+ * @hw_dsc: Pointer to dsc context
+ */
+ void (*dsc_disable)(struct dpu_hw_dsc *hw_dsc);
+
+ /**
+ * dsc_config - configures dsc encoder
+ * @hw_dsc: Pointer to dsc context
+ * @dsc: panel dsc parameters
+ * @mode: dsc topology mode to be set
+ * @initial_lines: amount of initial lines to be used
+ */
+ void (*dsc_config)(struct dpu_hw_dsc *hw_dsc,
+ struct msm_display_dsc_config *dsc,
+ u32 mode,
+ u32 initial_lines);
+
+ /**
+ * dsc_config_thresh - programs panel thresholds
+ * @hw_dsc: Pointer to dsc context
+ * @dsc: panel dsc parameters
+ */
+ void (*dsc_config_thresh)(struct dpu_hw_dsc *hw_dsc,
+ struct msm_display_dsc_config *dsc);
+};
+
+struct dpu_hw_dsc {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* dsc */
+ enum dpu_dsc idx;
+ const struct dpu_dsc_cfg *caps;
+
+ /* ops */
+ struct dpu_hw_dsc_ops ops;
+};
+
+/**
+ * dpu_hw_dsc_init - initializes the dsc block for the passed dsc idx.
+ * @idx: DSC index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m: Pointer to mdss catalog data
+ * Returns: Error code or allocated dpu_hw_dsc context
+ */
+struct dpu_hw_dsc *dpu_hw_dsc_init(enum dpu_dsc idx, void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_dsc_destroy - destroys dsc driver context
+ * @dsc: Pointer to dsc driver context returned by dpu_hw_dsc_init
+ */
+void dpu_hw_dsc_destroy(struct dpu_hw_dsc *dsc);
+
+static inline struct dpu_hw_dsc *to_dpu_hw_dsc(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_dsc, base);
+}
+
+#endif /* _DPU_HW_DSC_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index c61b5b283f08..61284e6c313d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -151,25 +151,22 @@ static const struct dpu_intr_reg dpu_intr_set[] = {
*/
static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
{
- struct dpu_irq_callback *cb;
-
VERB("irq_idx=%d\n", irq_idx);
- if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx]))
+ if (!dpu_kms->hw_intr->irq_tbl[irq_idx].cb)
DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
- atomic_inc(&dpu_kms->hw_intr->irq_counts[irq_idx]);
+ atomic_inc(&dpu_kms->hw_intr->irq_tbl[irq_idx].count);
/*
* Perform registered function callback
*/
- list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[irq_idx], list)
- if (cb->func)
- cb->func(cb->arg, irq_idx);
+ dpu_kms->hw_intr->irq_tbl[irq_idx].cb(dpu_kms->hw_intr->irq_tbl[irq_idx].arg, irq_idx);
}
-irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
+irqreturn_t dpu_core_irq(struct msm_kms *kms)
{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
struct dpu_hw_intr *intr = dpu_kms->hw_intr;
int reg_idx;
int irq_idx;
@@ -362,7 +359,7 @@ static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
wmb();
}
-u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
+u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx)
{
struct dpu_hw_intr *intr = dpu_kms->hw_intr;
int reg_idx;
@@ -389,7 +386,7 @@ u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
intr_status = DPU_REG_READ(&intr->hw,
dpu_intr_set[reg_idx].status_off) &
DPU_IRQ_MASK(irq_idx);
- if (intr_status && clear)
+ if (intr_status)
DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
intr_status);
@@ -413,24 +410,18 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
struct dpu_mdss_cfg *m)
{
struct dpu_hw_intr *intr;
+ int nirq = MDP_INTR_MAX * 32;
if (!addr || !m)
return ERR_PTR(-EINVAL);
- intr = kzalloc(sizeof(*intr), GFP_KERNEL);
+ intr = kzalloc(struct_size(intr, irq_tbl, nirq), GFP_KERNEL);
if (!intr)
return ERR_PTR(-ENOMEM);
__intr_offset(m, addr, &intr->hw);
- intr->total_irqs = ARRAY_SIZE(dpu_intr_set) * 32;
-
- intr->cache_irq_mask = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
- GFP_KERNEL);
- if (intr->cache_irq_mask == NULL) {
- kfree(intr);
- return ERR_PTR(-ENOMEM);
- }
+ intr->total_irqs = nirq;
intr->irq_mask = m->mdss_irqs;
@@ -441,31 +432,18 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
{
- if (intr) {
- kfree(intr->cache_irq_mask);
-
- kfree(intr->irq_cb_tbl);
- kfree(intr->irq_counts);
-
- kfree(intr);
- }
+ kfree(intr);
}
int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
- struct dpu_irq_callback *register_irq_cb)
+ void (*irq_cb)(void *arg, int irq_idx),
+ void *irq_arg)
{
unsigned long irq_flags;
+ int ret;
- if (!dpu_kms->hw_intr->irq_cb_tbl) {
- DPU_ERROR("invalid params\n");
- return -EINVAL;
- }
-
- if (!register_irq_cb || !register_irq_cb->func) {
- DPU_ERROR("invalid irq_cb:%d func:%d\n",
- register_irq_cb != NULL,
- register_irq_cb ?
- register_irq_cb->func != NULL : -1);
+ if (!irq_cb) {
+ DPU_ERROR("invalid ird_idx:%d irq_cb:%ps\n", irq_idx, irq_cb);
return -EINVAL;
}
@@ -477,41 +455,34 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
- trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
- list_del_init(&register_irq_cb->list);
- list_add_tail(&register_irq_cb->list,
- &dpu_kms->hw_intr->irq_cb_tbl[irq_idx]);
- if (list_is_first(&register_irq_cb->list,
- &dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
- int ret = dpu_hw_intr_enable_irq_locked(
+
+ if (unlikely(WARN_ON(dpu_kms->hw_intr->irq_tbl[irq_idx].cb))) {
+ spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+
+ return -EBUSY;
+ }
+
+ trace_dpu_core_irq_register_callback(irq_idx, irq_cb);
+ dpu_kms->hw_intr->irq_tbl[irq_idx].arg = irq_arg;
+ dpu_kms->hw_intr->irq_tbl[irq_idx].cb = irq_cb;
+
+ ret = dpu_hw_intr_enable_irq_locked(
dpu_kms->hw_intr,
irq_idx);
- if (ret)
- DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+ if (ret)
+ DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
irq_idx);
- }
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+ trace_dpu_irq_register_success(irq_idx);
+
return 0;
}
-int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
- struct dpu_irq_callback *register_irq_cb)
+int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx)
{
unsigned long irq_flags;
-
- if (!dpu_kms->hw_intr->irq_cb_tbl) {
- DPU_ERROR("invalid params\n");
- return -EINVAL;
- }
-
- if (!register_irq_cb || !register_irq_cb->func) {
- DPU_ERROR("invalid irq_cb:%d func:%d\n",
- register_irq_cb != NULL,
- register_irq_cb ?
- register_irq_cb->func != NULL : -1);
- return -EINVAL;
- }
+ int ret;
if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
@@ -521,20 +492,20 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
- trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
- list_del_init(&register_irq_cb->list);
- /* empty callback list but interrupt is still enabled */
- if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
- int ret = dpu_hw_intr_disable_irq_locked(
- dpu_kms->hw_intr,
- irq_idx);
- if (ret)
- DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
- irq_idx);
- VERB("irq_idx=%d ret=%d\n", irq_idx, ret);
- }
+ trace_dpu_core_irq_unregister_callback(irq_idx);
+
+ ret = dpu_hw_intr_disable_irq_locked(dpu_kms->hw_intr, irq_idx);
+ if (ret)
+ DPU_ERROR("Fail to disable IRQ for irq_idx:%d: %d\n",
+ irq_idx, ret);
+
+ dpu_kms->hw_intr->irq_tbl[irq_idx].cb = NULL;
+ dpu_kms->hw_intr->irq_tbl[irq_idx].arg = NULL;
+
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+ trace_dpu_irq_unregister_success(irq_idx);
+
return 0;
}
@@ -542,24 +513,18 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
{
struct dpu_kms *dpu_kms = s->private;
- struct dpu_irq_callback *cb;
unsigned long irq_flags;
- int i, irq_count, cb_count;
-
- if (WARN_ON(!dpu_kms->hw_intr->irq_cb_tbl))
- return 0;
+ int i, irq_count;
+ void *cb;
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
- cb_count = 0;
- irq_count = atomic_read(&dpu_kms->hw_intr->irq_counts[i]);
- list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[i], list)
- cb_count++;
+ irq_count = atomic_read(&dpu_kms->hw_intr->irq_tbl[i].count);
+ cb = dpu_kms->hw_intr->irq_tbl[i].cb;
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
- if (irq_count || cb_count)
- seq_printf(s, "idx:%d irq:%d cb:%d\n",
- i, irq_count, cb_count);
+ if (irq_count || cb)
+ seq_printf(s, "idx:%d irq:%d cb:%ps\n", i, irq_count, cb);
}
return 0;
@@ -575,8 +540,9 @@ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
}
#endif
-void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
+void dpu_core_irq_preinstall(struct msm_kms *kms)
{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
int i;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
@@ -584,24 +550,21 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
dpu_disable_all_irqs(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
- /* Create irq callbacks for all possible irq_idx */
- dpu_kms->hw_intr->irq_cb_tbl = kcalloc(dpu_kms->hw_intr->total_irqs,
- sizeof(struct list_head), GFP_KERNEL);
- dpu_kms->hw_intr->irq_counts = kcalloc(dpu_kms->hw_intr->total_irqs,
- sizeof(atomic_t), GFP_KERNEL);
- for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
- INIT_LIST_HEAD(&dpu_kms->hw_intr->irq_cb_tbl[i]);
- atomic_set(&dpu_kms->hw_intr->irq_counts[i], 0);
- }
+ for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
+ atomic_set(&dpu_kms->hw_intr->irq_tbl[i].count, 0);
}
-void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
+void dpu_core_irq_uninstall(struct msm_kms *kms)
{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
int i;
+ if (!dpu_kms->hw_intr)
+ return;
+
pm_runtime_get_sync(&dpu_kms->pdev->dev);
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
- if (!list_empty(&dpu_kms->hw_intr->irq_cb_tbl[i]))
+ if (dpu_kms->hw_intr->irq_tbl[i].cb)
DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
dpu_clear_irqs(dpu_kms);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
index 37379966d8ec..4154c5e2b4ae 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -44,19 +44,21 @@ enum dpu_hw_intr_reg {
* @save_irq_status: array of IRQ status reg storage created during init
* @total_irqs: total number of irq_idx mapped in the hw_interrupts
* @irq_lock: spinlock for accessing IRQ resources
- * @irq_cb_tbl: array of IRQ callbacks lists
- * @irq_counts: array of IRQ counts
+ * @irq_cb_tbl: array of IRQ callbacks
*/
struct dpu_hw_intr {
struct dpu_hw_blk_reg_map hw;
- u32 *cache_irq_mask;
+ u32 cache_irq_mask[MDP_INTR_MAX];
u32 *save_irq_status;
u32 total_irqs;
spinlock_t irq_lock;
unsigned long irq_mask;
- struct list_head *irq_cb_tbl;
- atomic_t *irq_counts;
+ struct {
+ void (*cb)(void *arg, int irq_idx);
+ void *arg;
+ atomic_t count;
+ } irq_tbl[];
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index 116e2b5b1a90..3f4d2c6e1b45 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -33,6 +33,7 @@
#define INTF_TP_COLOR1 0x05C
#define INTF_CONFIG2 0x060
#define INTF_DISPLAY_DATA_HCTL 0x064
+#define INTF_ACTIVE_DATA_HCTL 0x068
#define INTF_FRAME_LINE_COUNT_EN 0x0A8
#define INTF_FRAME_COUNT 0x0AC
#define INTF_LINE_COUNT 0x0B0
@@ -60,6 +61,12 @@
#define INTF_MUX 0x25C
+#define INTF_CFG_ACTIVE_H_EN BIT(29)
+#define INTF_CFG_ACTIVE_V_EN BIT(30)
+
+#define INTF_CFG2_DATABUS_WIDEN BIT(0)
+#define INTF_CFG2_DATA_HCTL_EN BIT(4)
+
static const struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
const struct dpu_mdss_cfg *m,
void __iomem *addr,
@@ -90,15 +97,23 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
u32 hsync_period, vsync_period;
u32 display_v_start, display_v_end;
u32 hsync_start_x, hsync_end_x;
+ u32 hsync_data_start_x, hsync_data_end_x;
u32 active_h_start, active_h_end;
u32 active_v_start, active_v_end;
u32 active_hctl, display_hctl, hsync_ctl;
u32 polarity_ctl, den_polarity, hsync_polarity, vsync_polarity;
u32 panel_format;
- u32 intf_cfg, intf_cfg2 = 0, display_data_hctl = 0;
+ u32 intf_cfg, intf_cfg2 = 0;
+ u32 display_data_hctl = 0, active_data_hctl = 0;
+ u32 data_width;
+ bool dp_intf = false;
/* read interface_cfg */
intf_cfg = DPU_REG_READ(c, INTF_CONFIG);
+
+ if (ctx->cap->type == INTF_DP)
+ dp_intf = true;
+
hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width +
p->h_front_porch;
vsync_period = p->vsync_pulse_width + p->v_back_porch + p->height +
@@ -112,7 +127,7 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
hsync_end_x = hsync_period - p->h_front_porch - 1;
- if (p->width != p->xres) {
+ if (p->width != p->xres) { /* border fill added */
active_h_start = hsync_start_x;
active_h_end = active_h_start + p->xres - 1;
} else {
@@ -120,7 +135,7 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
active_h_end = 0;
}
- if (p->height != p->yres) {
+ if (p->height != p->yres) { /* border fill added */
active_v_start = display_v_start;
active_v_end = active_v_start + (p->yres * hsync_period) - 1;
} else {
@@ -130,27 +145,46 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
if (active_h_end) {
active_hctl = (active_h_end << 16) | active_h_start;
- intf_cfg |= BIT(29); /* ACTIVE_H_ENABLE */
+ intf_cfg |= INTF_CFG_ACTIVE_H_EN;
} else {
active_hctl = 0;
}
if (active_v_end)
- intf_cfg |= BIT(30); /* ACTIVE_V_ENABLE */
+ intf_cfg |= INTF_CFG_ACTIVE_V_EN;
hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
display_hctl = (hsync_end_x << 16) | hsync_start_x;
- if (ctx->cap->type == INTF_EDP || ctx->cap->type == INTF_DP) {
+ /*
+ * DATA_HCTL_EN controls data timing which can be different from
+ * video timing. It is recommended to enable it for all cases, except
+ * if compression is enabled in 1 pixel per clock mode
+ */
+ if (p->wide_bus_en)
+ intf_cfg2 |= INTF_CFG2_DATABUS_WIDEN | INTF_CFG2_DATA_HCTL_EN;
+
+ data_width = p->width;
+
+ hsync_data_start_x = hsync_start_x;
+ hsync_data_end_x = hsync_start_x + data_width - 1;
+
+ display_data_hctl = (hsync_data_end_x << 16) | hsync_data_start_x;
+
+ if (dp_intf) {
+ /* DP timing adjustment */
+ display_v_start += p->hsync_pulse_width + p->h_back_porch;
+ display_v_end -= p->h_front_porch;
+
active_h_start = hsync_start_x;
active_h_end = active_h_start + p->xres - 1;
active_v_start = display_v_start;
active_v_end = active_v_start + (p->yres * hsync_period) - 1;
- display_v_start += p->hsync_pulse_width + p->h_back_porch;
-
active_hctl = (active_h_end << 16) | active_h_start;
display_hctl = active_hctl;
+
+ intf_cfg |= INTF_CFG_ACTIVE_H_EN | INTF_CFG_ACTIVE_V_EN;
}
den_polarity = 0;
@@ -180,13 +214,6 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
(COLOR_8BIT << 4) |
(0x21 << 8));
- if (ctx->cap->features & BIT(DPU_DATA_HCTL_EN)) {
- intf_cfg2 |= BIT(4);
- display_data_hctl = display_hctl;
- DPU_REG_WRITE(c, INTF_CONFIG2, intf_cfg2);
- DPU_REG_WRITE(c, INTF_DISPLAY_DATA_HCTL, display_data_hctl);
- }
-
DPU_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl);
DPU_REG_WRITE(c, INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period);
DPU_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0,
@@ -204,6 +231,11 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
DPU_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3);
DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
+ if (ctx->cap->features & BIT(DPU_DATA_HCTL_EN)) {
+ DPU_REG_WRITE(c, INTF_CONFIG2, intf_cfg2);
+ DPU_REG_WRITE(c, INTF_DISPLAY_DATA_HCTL, display_data_hctl);
+ DPU_REG_WRITE(c, INTF_ACTIVE_DATA_HCTL, active_data_hctl);
+ }
}
static void dpu_hw_intf_enable_timing_engine(
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index 230d122fa43b..7b2d96ac61e8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -30,6 +30,8 @@ struct intf_timing_params {
u32 border_clr;
u32 underflow_clr;
u32 hsync_skew;
+
+ bool wide_bus_en;
};
struct intf_prog_fetch {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index 86363c0ec834..462f5082099e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -138,7 +138,7 @@ static int dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx, u32 *misr_value)
ctrl = DPU_REG_READ(c, LM_MISR_CTRL);
if (!(ctrl & LM_MISR_CTRL_ENABLE))
- return -EINVAL;
+ return -ENODATA;
if (!(ctrl & LM_MISR_CTRL_STATUS))
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index bb9ceadeb0bb..9f402be55fbf 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -97,6 +97,7 @@ enum dpu_hw_blk_type {
DPU_HW_BLK_WB,
DPU_HW_BLK_DSPP,
DPU_HW_BLK_MERGE_3D,
+ DPU_HW_BLK_DSC,
DPU_HW_BLK_MAX,
};
@@ -176,6 +177,17 @@ enum dpu_ctl {
CTL_MAX
};
+enum dpu_dsc {
+ DSC_NONE = 0,
+ DSC_0,
+ DSC_1,
+ DSC_2,
+ DSC_3,
+ DSC_4,
+ DSC_5,
+ DSC_MAX
+};
+
enum dpu_pingpong {
PINGPONG_0 = 1,
PINGPONG_1,
@@ -205,14 +217,21 @@ enum dpu_intf {
INTF_MAX
};
+/*
+ * Historically these values correspond to the values written to the
+ * DISP_INTF_SEL register, which had to programmed manually. On newer MDP
+ * generations this register is NOP, but we keep the values for historical
+ * reasons.
+ */
enum dpu_intf_type {
INTF_NONE = 0x0,
INTF_DSI = 0x1,
INTF_HDMI = 0x3,
INTF_LCDC = 0x5,
+ /* old eDP found on 8x74 and 8x84 */
INTF_EDP = 0x9,
+ /* both DP and eDP, handled by the new DP driver */
INTF_DP = 0xa,
- INTF_TYPE_MAX,
/* virtual interfaces */
INTF_WB = 0x100,
@@ -437,5 +456,6 @@ struct dpu_mdss_color {
#define DPU_DBG_MASK_VBIF (1 << 8)
#define DPU_DBG_MASK_ROT (1 << 9)
#define DPU_DBG_MASK_DSPP (1 << 10)
+#define DPU_DBG_MASK_DSC (1 << 11)
#endif /* _DPU_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
index 55766c97c4c8..47c6ab6caf95 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -28,6 +28,9 @@
#define PP_FBC_MODE 0x034
#define PP_FBC_BUDGET_CTL 0x038
#define PP_FBC_LOSSY_MODE 0x03C
+#define PP_DSC_MODE 0x0a0
+#define PP_DCE_DATA_IN_SWAP 0x0ac
+#define PP_DCE_DATA_OUT_SWAP 0x0c8
#define PP_DITHER_EN 0x000
#define PP_DITHER_BITDEPTH 0x004
@@ -245,6 +248,32 @@ static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
return line;
}
+static int dpu_hw_pp_dsc_enable(struct dpu_hw_pingpong *pp)
+{
+ struct dpu_hw_blk_reg_map *c = &pp->hw;
+
+ DPU_REG_WRITE(c, PP_DSC_MODE, 1);
+ return 0;
+}
+
+static void dpu_hw_pp_dsc_disable(struct dpu_hw_pingpong *pp)
+{
+ struct dpu_hw_blk_reg_map *c = &pp->hw;
+
+ DPU_REG_WRITE(c, PP_DSC_MODE, 0);
+}
+
+static int dpu_hw_pp_setup_dsc(struct dpu_hw_pingpong *pp)
+{
+ struct dpu_hw_blk_reg_map *pp_c = &pp->hw;
+ int data;
+
+ data = DPU_REG_READ(pp_c, PP_DCE_DATA_OUT_SWAP);
+ data |= BIT(18); /* endian flip */
+ DPU_REG_WRITE(pp_c, PP_DCE_DATA_OUT_SWAP, data);
+ return 0;
+}
+
static void _setup_pingpong_ops(struct dpu_hw_pingpong *c,
unsigned long features)
{
@@ -256,6 +285,9 @@ static void _setup_pingpong_ops(struct dpu_hw_pingpong *c,
c->ops.get_autorefresh = dpu_hw_pp_get_autorefresh_config;
c->ops.poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr;
c->ops.get_line_count = dpu_hw_pp_get_line_count;
+ c->ops.setup_dsc = dpu_hw_pp_setup_dsc;
+ c->ops.enable_dsc = dpu_hw_pp_dsc_enable;
+ c->ops.disable_dsc = dpu_hw_pp_dsc_disable;
if (test_bit(DPU_PINGPONG_DITHER, &features))
c->ops.setup_dither = dpu_hw_pp_setup_dither;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index 89d08a715c16..12758468d9ca 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -124,6 +124,20 @@ struct dpu_hw_pingpong_ops {
*/
void (*setup_dither)(struct dpu_hw_pingpong *pp,
struct dpu_hw_dither_cfg *cfg);
+ /**
+ * Enable DSC
+ */
+ int (*enable_dsc)(struct dpu_hw_pingpong *pp);
+
+ /**
+ * Disable DSC
+ */
+ void (*disable_dsc)(struct dpu_hw_pingpong *pp);
+
+ /**
+ * Setup DSC
+ */
+ int (*setup_dsc)(struct dpu_hw_pingpong *pp);
};
struct dpu_hw_merge_3d;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 09cdc3576653..0a0864dff783 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -627,7 +627,7 @@ static void dpu_hw_sspp_setup_qos_ctrl(struct dpu_hw_pipe *ctx,
}
static void dpu_hw_sspp_setup_cdp(struct dpu_hw_pipe *ctx,
- struct dpu_hw_pipe_cdp_cfg *cfg,
+ struct dpu_hw_cdp_cfg *cfg,
enum dpu_sspp_multirect_index index)
{
u32 idx;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index 92b071b78fdb..a81e16657d61 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -193,22 +193,6 @@ enum {
};
/**
- * struct dpu_hw_pipe_cdp_cfg : CDP configuration
- * @enable: true to enable CDP
- * @ubwc_meta_enable: true to enable ubwc metadata preload
- * @tile_amortize_enable: true to enable amortization control for tile format
- * @preload_ahead: number of request to preload ahead
- * DPU_SSPP_CDP_PRELOAD_AHEAD_32,
- * DPU_SSPP_CDP_PRELOAD_AHEAD_64
- */
-struct dpu_hw_pipe_cdp_cfg {
- bool enable;
- bool ubwc_meta_enable;
- bool tile_amortize_enable;
- u32 preload_ahead;
-};
-
-/**
* struct dpu_hw_pipe_ts_cfg - traffic shaper configuration
* @size: size to prefill in bytes, or zero to disable
* @time: time to prefill in usec, or zero to disable
@@ -359,7 +343,7 @@ struct dpu_hw_sspp_ops {
* @index: rectangle index in multirect
*/
void (*setup_cdp)(struct dpu_hw_pipe *ctx,
- struct dpu_hw_pipe_cdp_cfg *cfg,
+ struct dpu_hw_cdp_cfg *cfg,
enum dpu_sspp_multirect_index index);
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
index aad85116b0a0..512316f25a51 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -422,3 +422,28 @@ void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
DPU_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]);
DPU_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]);
}
+
+/**
+ * _dpu_hw_get_qos_lut - get LUT mapping based on fill level
+ * @tbl: Pointer to LUT table
+ * @total_fl: fill level
+ * Return: LUT setting corresponding to the fill level
+ */
+u64 _dpu_hw_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
+ u32 total_fl)
+{
+ int i;
+
+ if (!tbl || !tbl->nentry || !tbl->entries)
+ return 0;
+
+ for (i = 0; i < tbl->nentry; i++)
+ if (total_fl <= tbl->entries[i].fl)
+ return tbl->entries[i].lut;
+
+ /* if last fl is zero, use as default */
+ if (!tbl->entries[i-1].fl)
+ return tbl->entries[i-1].lut;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
index 39134754579e..e4a65eb4f769 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -9,6 +9,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include "dpu_hw_mdss.h"
+#include "dpu_hw_catalog.h"
#define REG_MASK(n) ((BIT(n)) - 1)
@@ -298,6 +299,21 @@ struct dpu_drm_scaler_v2 {
struct dpu_drm_de_v1 de;
};
+/**
+ * struct dpu_hw_cdp_cfg : CDP configuration
+ * @enable: true to enable CDP
+ * @ubwc_meta_enable: true to enable ubwc metadata preload
+ * @tile_amortize_enable: true to enable amortization control for tile format
+ * @preload_ahead: number of request to preload ahead
+ * DPU_*_CDP_PRELOAD_AHEAD_32,
+ * DPU_*_CDP_PRELOAD_AHEAD_64
+ */
+struct dpu_hw_cdp_cfg {
+ bool enable;
+ bool ubwc_meta_enable;
+ bool tile_amortize_enable;
+ u32 preload_ahead;
+};
u32 *dpu_hw_util_get_log_mask_ptr(void);
@@ -324,4 +340,7 @@ void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
u32 csc_reg_off,
const struct dpu_csc_cfg *data, bool csc10);
+u64 _dpu_hw_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
+ u32 total_fl);
+
#endif /* _DPU_HW_UTIL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
new file mode 100644
index 000000000000..bcccce292937
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0-only
+ /*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved
+ */
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_wb.h"
+#include "dpu_formats.h"
+#include "dpu_kms.h"
+
+#define WB_DST_FORMAT 0x000
+#define WB_DST_OP_MODE 0x004
+#define WB_DST_PACK_PATTERN 0x008
+#define WB_DST0_ADDR 0x00C
+#define WB_DST1_ADDR 0x010
+#define WB_DST2_ADDR 0x014
+#define WB_DST3_ADDR 0x018
+#define WB_DST_YSTRIDE0 0x01C
+#define WB_DST_YSTRIDE1 0x020
+#define WB_DST_YSTRIDE1 0x020
+#define WB_DST_DITHER_BITDEPTH 0x024
+#define WB_DST_MATRIX_ROW0 0x030
+#define WB_DST_MATRIX_ROW1 0x034
+#define WB_DST_MATRIX_ROW2 0x038
+#define WB_DST_MATRIX_ROW3 0x03C
+#define WB_DST_WRITE_CONFIG 0x048
+#define WB_ROTATION_DNSCALER 0x050
+#define WB_ROTATOR_PIPE_DOWNSCALER 0x054
+#define WB_N16_INIT_PHASE_X_C03 0x060
+#define WB_N16_INIT_PHASE_X_C12 0x064
+#define WB_N16_INIT_PHASE_Y_C03 0x068
+#define WB_N16_INIT_PHASE_Y_C12 0x06C
+#define WB_OUT_SIZE 0x074
+#define WB_ALPHA_X_VALUE 0x078
+#define WB_DANGER_LUT 0x084
+#define WB_SAFE_LUT 0x088
+#define WB_QOS_CTRL 0x090
+#define WB_CREQ_LUT_0 0x098
+#define WB_CREQ_LUT_1 0x09C
+#define WB_UBWC_STATIC_CTRL 0x144
+#define WB_MUX 0x150
+#define WB_CROP_CTRL 0x154
+#define WB_CROP_OFFSET 0x158
+#define WB_CSC_BASE 0x260
+#define WB_DST_ADDR_SW_STATUS 0x2B0
+#define WB_CDP_CNTL 0x2B4
+#define WB_OUT_IMAGE_SIZE 0x2C0
+#define WB_OUT_XY 0x2C4
+
+/* WB_QOS_CTRL */
+#define WB_QOS_CTRL_DANGER_SAFE_EN BIT(0)
+
+static const struct dpu_wb_cfg *_wb_offset(enum dpu_wb wb,
+ const struct dpu_mdss_cfg *m, void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->wb_count; i++) {
+ if (wb == m->wb[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->wb[i].base;
+ b->length = m->wb[i].len;
+ b->hwversion = m->hwversion;
+ return &m->wb[i];
+ }
+ }
+ return ERR_PTR(-EINVAL);
+}
+
+static void dpu_hw_wb_setup_outaddress(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_cfg *data)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ DPU_REG_WRITE(c, WB_DST0_ADDR, data->dest.plane_addr[0]);
+ DPU_REG_WRITE(c, WB_DST1_ADDR, data->dest.plane_addr[1]);
+ DPU_REG_WRITE(c, WB_DST2_ADDR, data->dest.plane_addr[2]);
+ DPU_REG_WRITE(c, WB_DST3_ADDR, data->dest.plane_addr[3]);
+}
+
+static void dpu_hw_wb_setup_format(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_cfg *data)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ const struct dpu_format *fmt = data->dest.format;
+ u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
+ u32 write_config = 0;
+ u32 opmode = 0;
+ u32 dst_addr_sw = 0;
+
+ chroma_samp = fmt->chroma_sample;
+
+ dst_format = (chroma_samp << 23) |
+ (fmt->fetch_planes << 19) |
+ (fmt->bits[C3_ALPHA] << 6) |
+ (fmt->bits[C2_R_Cr] << 4) |
+ (fmt->bits[C1_B_Cb] << 2) |
+ (fmt->bits[C0_G_Y] << 0);
+
+ if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
+ dst_format |= BIT(8); /* DSTC3_EN */
+ if (!fmt->alpha_enable ||
+ !(ctx->caps->features & BIT(DPU_WB_PIPE_ALPHA)))
+ dst_format |= BIT(14); /* DST_ALPHA_X */
+ }
+
+ pattern = (fmt->element[3] << 24) |
+ (fmt->element[2] << 16) |
+ (fmt->element[1] << 8) |
+ (fmt->element[0] << 0);
+
+ dst_format |= (fmt->unpack_align_msb << 18) |
+ (fmt->unpack_tight << 17) |
+ ((fmt->unpack_count - 1) << 12) |
+ ((fmt->bpp - 1) << 9);
+
+ ystride0 = data->dest.plane_pitch[0] |
+ (data->dest.plane_pitch[1] << 16);
+ ystride1 = data->dest.plane_pitch[2] |
+ (data->dest.plane_pitch[3] << 16);
+
+ if (drm_rect_height(&data->roi) && drm_rect_width(&data->roi))
+ outsize = (drm_rect_height(&data->roi) << 16) | drm_rect_width(&data->roi);
+ else
+ outsize = (data->dest.height << 16) | data->dest.width;
+
+ DPU_REG_WRITE(c, WB_ALPHA_X_VALUE, 0xFF);
+ DPU_REG_WRITE(c, WB_DST_FORMAT, dst_format);
+ DPU_REG_WRITE(c, WB_DST_OP_MODE, opmode);
+ DPU_REG_WRITE(c, WB_DST_PACK_PATTERN, pattern);
+ DPU_REG_WRITE(c, WB_DST_YSTRIDE0, ystride0);
+ DPU_REG_WRITE(c, WB_DST_YSTRIDE1, ystride1);
+ DPU_REG_WRITE(c, WB_OUT_SIZE, outsize);
+ DPU_REG_WRITE(c, WB_DST_WRITE_CONFIG, write_config);
+ DPU_REG_WRITE(c, WB_DST_ADDR_SW_STATUS, dst_addr_sw);
+}
+
+static void dpu_hw_wb_roi(struct dpu_hw_wb *ctx, struct dpu_hw_wb_cfg *wb)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 image_size, out_size, out_xy;
+
+ image_size = (wb->dest.height << 16) | wb->dest.width;
+ out_xy = 0;
+ out_size = (drm_rect_height(&wb->roi) << 16) | drm_rect_width(&wb->roi);
+
+ DPU_REG_WRITE(c, WB_OUT_IMAGE_SIZE, image_size);
+ DPU_REG_WRITE(c, WB_OUT_XY, out_xy);
+ DPU_REG_WRITE(c, WB_OUT_SIZE, out_size);
+}
+
+static void dpu_hw_wb_setup_qos_lut(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_qos_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 qos_ctrl = 0;
+
+ if (!ctx || !cfg)
+ return;
+
+ DPU_REG_WRITE(c, WB_DANGER_LUT, cfg->danger_lut);
+ DPU_REG_WRITE(c, WB_SAFE_LUT, cfg->safe_lut);
+
+ /*
+ * for chipsets not using DPU_WB_QOS_8LVL but still using DPU
+ * driver such as msm8998, the reset value of WB_CREQ_LUT is
+ * sufficient for writeback to work. SW doesn't need to explicitly
+ * program a value.
+ */
+ if (ctx->caps && test_bit(DPU_WB_QOS_8LVL, &ctx->caps->features)) {
+ DPU_REG_WRITE(c, WB_CREQ_LUT_0, cfg->creq_lut);
+ DPU_REG_WRITE(c, WB_CREQ_LUT_1, cfg->creq_lut >> 32);
+ }
+
+ if (cfg->danger_safe_en)
+ qos_ctrl |= WB_QOS_CTRL_DANGER_SAFE_EN;
+
+ DPU_REG_WRITE(c, WB_QOS_CTRL, qos_ctrl);
+}
+
+static void dpu_hw_wb_setup_cdp(struct dpu_hw_wb *ctx,
+ struct dpu_hw_cdp_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 cdp_cntl = 0;
+
+ if (!ctx || !cfg)
+ return;
+
+ c = &ctx->hw;
+
+ if (cfg->enable)
+ cdp_cntl |= BIT(0);
+ if (cfg->ubwc_meta_enable)
+ cdp_cntl |= BIT(1);
+ if (cfg->preload_ahead == DPU_WB_CDP_PRELOAD_AHEAD_64)
+ cdp_cntl |= BIT(3);
+
+ DPU_REG_WRITE(c, WB_CDP_CNTL, cdp_cntl);
+}
+
+static void dpu_hw_wb_bind_pingpong_blk(
+ struct dpu_hw_wb *ctx,
+ bool enable, const enum dpu_pingpong pp)
+{
+ struct dpu_hw_blk_reg_map *c;
+ int mux_cfg;
+
+ if (!ctx)
+ return;
+
+ c = &ctx->hw;
+
+ mux_cfg = DPU_REG_READ(c, WB_MUX);
+ mux_cfg &= ~0xf;
+
+ if (enable)
+ mux_cfg |= (pp - PINGPONG_0) & 0x7;
+ else
+ mux_cfg |= 0xf;
+
+ DPU_REG_WRITE(c, WB_MUX, mux_cfg);
+}
+
+static void _setup_wb_ops(struct dpu_hw_wb_ops *ops,
+ unsigned long features)
+{
+ ops->setup_outaddress = dpu_hw_wb_setup_outaddress;
+ ops->setup_outformat = dpu_hw_wb_setup_format;
+
+ if (test_bit(DPU_WB_XY_ROI_OFFSET, &features))
+ ops->setup_roi = dpu_hw_wb_roi;
+
+ if (test_bit(DPU_WB_QOS, &features))
+ ops->setup_qos_lut = dpu_hw_wb_setup_qos_lut;
+
+ if (test_bit(DPU_WB_CDP, &features))
+ ops->setup_cdp = dpu_hw_wb_setup_cdp;
+
+ if (test_bit(DPU_WB_INPUT_CTRL, &features))
+ ops->bind_pingpong_blk = dpu_hw_wb_bind_pingpong_blk;
+}
+
+struct dpu_hw_wb *dpu_hw_wb_init(enum dpu_wb idx,
+ void __iomem *addr, const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_wb *c;
+ const struct dpu_wb_cfg *cfg;
+
+ if (!addr || !m)
+ return ERR_PTR(-EINVAL);
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _wb_offset(idx, m, addr, &c->hw);
+ if (IS_ERR(cfg)) {
+ WARN(1, "Unable to find wb idx=%d\n", idx);
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ c->mdp = &m->mdp[0];
+ c->idx = idx;
+ c->caps = cfg;
+ _setup_wb_ops(&c->ops, c->caps->features);
+
+ return c;
+}
+
+void dpu_hw_wb_destroy(struct dpu_hw_wb *hw_wb)
+{
+ kfree(hw_wb);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
new file mode 100644
index 000000000000..3ff5a48541e2
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved
+ */
+
+#ifndef _DPU_HW_WB_H
+#define _DPU_HW_WB_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_pingpong.h"
+
+struct dpu_hw_wb;
+
+struct dpu_hw_wb_cfg {
+ struct dpu_hw_fmt_layout dest;
+ enum dpu_intf_mode intf_mode;
+ struct drm_rect roi;
+ struct drm_rect crop;
+};
+
+/**
+ * enum CDP preload ahead address size
+ */
+enum {
+ DPU_WB_CDP_PRELOAD_AHEAD_32,
+ DPU_WB_CDP_PRELOAD_AHEAD_64
+};
+
+/**
+ * struct dpu_hw_wb_qos_cfg : Writeback pipe QoS configuration
+ * @danger_lut: LUT for generate danger level based on fill level
+ * @safe_lut: LUT for generate safe level based on fill level
+ * @creq_lut: LUT for generate creq level based on fill level
+ * @danger_safe_en: enable danger safe generation
+ */
+struct dpu_hw_wb_qos_cfg {
+ u32 danger_lut;
+ u32 safe_lut;
+ u64 creq_lut;
+ bool danger_safe_en;
+};
+
+/**
+ *
+ * struct dpu_hw_wb_ops : Interface to the wb hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @setup_outaddress: setup output address from the writeback job
+ * @setup_outformat: setup output format of writeback block from writeback job
+ * @setup_qos_lut: setup qos LUT for writeback block based on input
+ * @setup_cdp: setup chroma down prefetch block for writeback block
+ * @bind_pingpong_blk: enable/disable the connection with ping-pong block
+ */
+struct dpu_hw_wb_ops {
+ void (*setup_outaddress)(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_cfg *wb);
+
+ void (*setup_outformat)(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_cfg *wb);
+
+ void (*setup_roi)(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_cfg *wb);
+
+ void (*setup_qos_lut)(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_qos_cfg *cfg);
+
+ void (*setup_cdp)(struct dpu_hw_wb *ctx,
+ struct dpu_hw_cdp_cfg *cfg);
+
+ void (*bind_pingpong_blk)(struct dpu_hw_wb *ctx,
+ bool enable, const enum dpu_pingpong pp);
+};
+
+/**
+ * struct dpu_hw_wb : WB driver object
+ * @hw: block hardware details
+ * @mdp: pointer to associated mdp portion of the catalog
+ * @idx: hardware index number within type
+ * @wb_hw_caps: hardware capabilities
+ * @ops: function pointers
+ * @hw_mdp: MDP top level hardware block
+ */
+struct dpu_hw_wb {
+ struct dpu_hw_blk_reg_map hw;
+ const struct dpu_mdp_cfg *mdp;
+
+ /* wb path */
+ int idx;
+ const struct dpu_wb_cfg *caps;
+
+ /* ops */
+ struct dpu_hw_wb_ops ops;
+
+ struct dpu_hw_mdp *hw_mdp;
+};
+
+/**
+ * dpu_hw_wb_init(): Initializes and return writeback hw driver object.
+ * @idx: wb_path index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_wb *dpu_hw_wb_init(enum dpu_wb idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_wb_destroy(): Destroy writeback hw driver object.
+ * @hw_wb: Pointer to writeback hw driver object
+ */
+void dpu_hw_wb_destroy(struct dpu_hw_wb *hw_wb);
+
+#endif /*_DPU_HW_WB_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index e29796c4f27b..bce47647d891 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
* Author: Rob Clark <robdclark@gmail.com>
*/
@@ -15,6 +17,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_file.h>
#include <drm/drm_vblank.h>
+#include <drm/drm_writeback.h>
#include "msm_drv.h"
#include "msm_mmu.h"
@@ -29,6 +32,7 @@
#include "dpu_kms.h"
#include "dpu_plane.h"
#include "dpu_vbif.h"
+#include "dpu_writeback.h"
#define CREATE_TRACE_POINTS
#include "dpu_trace.h"
@@ -380,9 +384,13 @@ static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
struct icc_path *path0;
struct icc_path *path1;
struct drm_device *dev = dpu_kms->dev;
+ struct device *dpu_dev = dev->dev;
+ struct device *mdss_dev = dpu_dev->parent;
- path0 = of_icc_get(dev->dev, "mdp0-mem");
- path1 = of_icc_get(dev->dev, "mdp1-mem");
+ /* Interconnects are a part of MDSS device tree binding, not the
+ * MDP/DPU device. */
+ path0 = of_icc_get(mdss_dev, "mdp0-mem");
+ path1 = of_icc_get(mdss_dev, "mdp1-mem");
if (IS_ERR_OR_NULL(path0))
return PTR_ERR_OR_ZERO(path0);
@@ -565,8 +573,6 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
return PTR_ERR(encoder);
}
- priv->encoders[priv->num_encoders++] = encoder;
-
memset(&info, 0, sizeof(info));
info.intf_type = encoder->encoder_type;
@@ -582,6 +588,8 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
MSM_DISPLAY_CAP_CMD_MODE :
MSM_DISPLAY_CAP_VID_MODE;
+ info.dsc = msm_dsi_get_dsc_config(priv->dsi[i]);
+
if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && priv->dsi[other]) {
rc = msm_dsi_modeset_init(priv->dsi[other], dev, encoder);
if (rc) {
@@ -629,8 +637,6 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
return rc;
}
- priv->encoders[priv->num_encoders++] = encoder;
-
info.num_of_h_tiles = 1;
info.h_tile_instance[0] = i;
info.capabilities = MSM_DISPLAY_CAP_VID_MODE;
@@ -646,6 +652,45 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
return 0;
}
+static int _dpu_kms_initialize_writeback(struct drm_device *dev,
+ struct msm_drm_private *priv, struct dpu_kms *dpu_kms,
+ const u32 *wb_formats, int n_formats)
+{
+ struct drm_encoder *encoder = NULL;
+ struct msm_display_info info;
+ int rc;
+
+ encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_VIRTUAL);
+ if (IS_ERR(encoder)) {
+ DPU_ERROR("encoder init failed for dsi display\n");
+ return PTR_ERR(encoder);
+ }
+
+ memset(&info, 0, sizeof(info));
+
+ rc = dpu_writeback_init(dev, encoder, wb_formats,
+ n_formats);
+ if (rc) {
+ DPU_ERROR("dpu_writeback_init, rc = %d\n", rc);
+ drm_encoder_cleanup(encoder);
+ return rc;
+ }
+
+ info.num_of_h_tiles = 1;
+ /* use only WB idx 2 instance for DPU */
+ info.h_tile_instance[0] = WB_2;
+ info.intf_type = encoder->encoder_type;
+
+ rc = dpu_encoder_setup(dev, encoder, &info);
+ if (rc) {
+ DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
+ encoder->base.id, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
/**
* _dpu_kms_setup_displays - create encoders, bridges and connectors
* for underlying displays
@@ -659,6 +704,7 @@ static int _dpu_kms_setup_displays(struct drm_device *dev,
struct dpu_kms *dpu_kms)
{
int rc = 0;
+ int i;
rc = _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
if (rc) {
@@ -672,39 +718,33 @@ static int _dpu_kms_setup_displays(struct drm_device *dev,
return rc;
}
- return rc;
-}
-
-static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
-{
- struct msm_drm_private *priv;
- int i;
-
- priv = dpu_kms->dev->dev_private;
-
- for (i = 0; i < priv->num_crtcs; i++)
- priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
- priv->num_crtcs = 0;
-
- for (i = 0; i < priv->num_planes; i++)
- priv->planes[i]->funcs->destroy(priv->planes[i]);
- priv->num_planes = 0;
-
- for (i = 0; i < priv->num_connectors; i++)
- priv->connectors[i]->funcs->destroy(priv->connectors[i]);
- priv->num_connectors = 0;
+ /* Since WB isn't a driver check the catalog before initializing */
+ if (dpu_kms->catalog->wb_count) {
+ for (i = 0; i < dpu_kms->catalog->wb_count; i++) {
+ if (dpu_kms->catalog->wb[i].id == WB_2) {
+ rc = _dpu_kms_initialize_writeback(dev, priv, dpu_kms,
+ dpu_kms->catalog->wb[i].format_list,
+ dpu_kms->catalog->wb[i].num_formats);
+ if (rc) {
+ DPU_ERROR("initialize_WB failed, rc = %d\n", rc);
+ return rc;
+ }
+ }
+ }
+ }
- for (i = 0; i < priv->num_encoders; i++)
- priv->encoders[i]->funcs->destroy(priv->encoders[i]);
- priv->num_encoders = 0;
+ return rc;
}
+#define MAX_PLANES 20
static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
{
struct drm_device *dev;
struct drm_plane *primary_planes[MAX_PLANES], *plane;
struct drm_plane *cursor_planes[MAX_PLANES] = { NULL };
struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ unsigned int num_encoders;
struct msm_drm_private *priv;
struct dpu_mdss_cfg *catalog;
@@ -721,9 +761,13 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
*/
ret = _dpu_kms_setup_displays(dev, priv, dpu_kms);
if (ret)
- goto fail;
+ return ret;
+
+ num_encoders = 0;
+ drm_for_each_encoder(encoder, dev)
+ num_encoders++;
- max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
+ max_crtc_count = min(catalog->mixer_count, num_encoders);
/* Create the planes, keeping track of one primary/cursor per crtc */
for (i = 0; i < catalog->sspp_count; i++) {
@@ -746,9 +790,8 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
if (IS_ERR(plane)) {
DPU_ERROR("dpu_plane_init failed\n");
ret = PTR_ERR(plane);
- goto fail;
+ return ret;
}
- priv->planes[priv->num_planes++] = plane;
if (type == DRM_PLANE_TYPE_CURSOR)
cursor_planes[cursor_planes_idx++] = plane;
@@ -763,19 +806,16 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]);
if (IS_ERR(crtc)) {
ret = PTR_ERR(crtc);
- goto fail;
+ return ret;
}
priv->crtcs[priv->num_crtcs++] = crtc;
}
/* All CRTCs are compatible with all encoders */
- for (i = 0; i < priv->num_encoders; i++)
- priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
+ drm_for_each_encoder(encoder, dev)
+ encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
return 0;
-fail:
- _dpu_kms_drm_obj_destroy(dpu_kms);
- return ret;
}
static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
@@ -793,8 +833,10 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
- if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx])
+ if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx]) {
dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
+ dpu_kms->hw_vbif[vbif_idx] = NULL;
+ }
}
}
@@ -837,20 +879,9 @@ static void dpu_kms_destroy(struct msm_kms *kms)
_dpu_kms_hw_destroy(dpu_kms);
msm_kms_destroy(&dpu_kms->base);
-}
-static irqreturn_t dpu_irq(struct msm_kms *kms)
-{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-
- return dpu_core_irq(dpu_kms);
-}
-
-static void dpu_irq_preinstall(struct msm_kms *kms)
-{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-
- dpu_core_irq_preinstall(dpu_kms);
+ if (dpu_kms->rpm_enabled)
+ pm_runtime_disable(&dpu_kms->pdev->dev);
}
static int dpu_irq_postinstall(struct msm_kms *kms)
@@ -872,13 +903,6 @@ static int dpu_irq_postinstall(struct msm_kms *kms)
return 0;
}
-static void dpu_irq_uninstall(struct msm_kms *kms)
-{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-
- dpu_core_irq_uninstall(dpu_kms);
-}
-
static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_kms *kms)
{
int i;
@@ -923,6 +947,11 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
msm_disp_snapshot_add_block(disp_state, cat->mixer[i].len,
dpu_kms->mmio + cat->mixer[i].base, "lm_%d", i);
+ /* dump WB sub-blocks HW regs info */
+ for (i = 0; i < cat->wb_count; i++)
+ msm_disp_snapshot_add_block(disp_state, cat->wb[i].len,
+ dpu_kms->mmio + cat->wb[i].base, "wb_%d", i);
+
msm_disp_snapshot_add_block(disp_state, top->hw.length,
dpu_kms->mmio + top->hw.blk_off, "top");
@@ -931,10 +960,10 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
static const struct msm_kms_funcs kms_funcs = {
.hw_init = dpu_kms_hw_init,
- .irq_preinstall = dpu_irq_preinstall,
+ .irq_preinstall = dpu_core_irq_preinstall,
.irq_postinstall = dpu_irq_postinstall,
- .irq_uninstall = dpu_irq_uninstall,
- .irq = dpu_irq,
+ .irq_uninstall = dpu_core_irq_uninstall,
+ .irq = dpu_core_irq,
.enable_commit = dpu_kms_enable_commit,
.disable_commit = dpu_kms_disable_commit,
.vsync_time = dpu_kms_vsync_time,
@@ -973,12 +1002,16 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
struct iommu_domain *domain;
struct msm_gem_address_space *aspace;
struct msm_mmu *mmu;
+ struct device *dpu_dev = dpu_kms->dev->dev;
+ struct device *mdss_dev = dpu_dev->parent;
domain = iommu_domain_alloc(&platform_bus_type);
if (!domain)
return 0;
- mmu = msm_iommu_new(dpu_kms->dev->dev, domain);
+ /* IOMMUs are a part of MDSS device tree binding, not the
+ * MDP/DPU device. */
+ mmu = msm_iommu_new(mdss_dev, domain);
if (IS_ERR(mmu)) {
iommu_domain_free(domain);
return PTR_ERR(mmu);
@@ -1056,7 +1089,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_kms_parse_data_bus_icc_path(dpu_kms);
- pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ rc = pm_runtime_resume_and_get(&dpu_kms->pdev->dev);
+ if (rc < 0)
+ goto error;
dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
@@ -1172,37 +1207,16 @@ error:
return rc;
}
-struct msm_kms *dpu_kms_init(struct drm_device *dev)
-{
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
- int irq;
-
- if (!dev) {
- DPU_ERROR("drm device node invalid\n");
- return ERR_PTR(-EINVAL);
- }
-
- priv = dev->dev_private;
- dpu_kms = to_dpu_kms(priv->kms);
-
- irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
- if (irq < 0) {
- DPU_ERROR("failed to get irq: %d\n", irq);
- return ERR_PTR(irq);
- }
- dpu_kms->base.irq = irq;
-
- return &dpu_kms->base;
-}
-
-static int dpu_bind(struct device *dev, struct device *master, void *data)
+static int dpu_kms_init(struct drm_device *ddev)
{
- struct msm_drm_private *priv = dev_get_drvdata(master);
+ struct msm_drm_private *priv = ddev->dev_private;
+ struct device *dev = ddev->dev;
struct platform_device *pdev = to_platform_device(dev);
- struct drm_device *ddev = priv->dev;
struct dpu_kms *dpu_kms;
+ int irq;
+ struct dev_pm_opp *opp;
int ret = 0;
+ unsigned long max_freq = ULONG_MAX;
dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
if (!dpu_kms)
@@ -1225,7 +1239,11 @@ static int dpu_bind(struct device *dev, struct device *master, void *data)
}
dpu_kms->num_clocks = ret;
- platform_set_drvdata(pdev, dpu_kms);
+ opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
+ if (!IS_ERR(opp))
+ dev_pm_opp_put(opp);
+
+ dev_pm_opp_set_rate(dev, max_freq);
ret = msm_kms_init(&dpu_kms->base, &kms_funcs);
if (ret) {
@@ -1240,31 +1258,25 @@ static int dpu_bind(struct device *dev, struct device *master, void *data)
priv->kms = &dpu_kms->base;
- return ret;
-}
-
-static void dpu_unbind(struct device *dev, struct device *master, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+ irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
+ if (!irq) {
+ DPU_ERROR("failed to get irq\n");
+ return -EINVAL;
+ }
+ dpu_kms->base.irq = irq;
- if (dpu_kms->rpm_enabled)
- pm_runtime_disable(&pdev->dev);
+ return 0;
}
-static const struct component_ops dpu_ops = {
- .bind = dpu_bind,
- .unbind = dpu_unbind,
-};
-
static int dpu_dev_probe(struct platform_device *pdev)
{
- return component_add(&pdev->dev, &dpu_ops);
+ return msm_drv_probe(&pdev->dev, dpu_kms_init);
}
static int dpu_dev_remove(struct platform_device *pdev)
{
- component_del(&pdev->dev, &dpu_ops);
+ component_master_del(&pdev->dev, &msm_drm_ops);
+
return 0;
}
@@ -1272,7 +1284,8 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev)
{
int i;
struct platform_device *pdev = to_platform_device(dev);
- struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
/* Drop the performance state vote */
dev_pm_opp_set_rate(dev, 0);
@@ -1288,7 +1301,8 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
{
int rc = -1;
struct platform_device *pdev = to_platform_device(dev);
- struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct drm_encoder *encoder;
struct drm_device *ddev;
int i;
@@ -1318,9 +1332,11 @@ static const struct dev_pm_ops dpu_pm_ops = {
SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
+ .prepare = msm_pm_prepare,
+ .complete = msm_pm_complete,
};
-const struct of_device_id dpu_dt_match[] = {
+static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,msm8998-dpu", },
{ .compatible = "qcom,qcm2290-dpu", },
{ .compatible = "qcom,sdm845-dpu", },
@@ -1336,6 +1352,7 @@ MODULE_DEVICE_TABLE(of, dpu_dt_match);
static struct platform_driver dpu_driver = {
.probe = dpu_dev_probe,
.remove = dpu_dev_remove,
+ .shutdown = msm_drv_shutdown,
.driver = {
.name = "msm_dpu",
.of_match_table = dpu_dt_match,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 779e7bd01efd..832a0769f2e7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -65,18 +65,6 @@
#define DPU_NAME_SIZE 12
-/*
- * struct dpu_irq_callback - IRQ callback handlers
- * @list: list to callback
- * @func: intr handler
- * @arg: argument for the handler
- */
-struct dpu_irq_callback {
- struct list_head list;
- void (*func)(void *arg, int irq_idx);
- void *arg;
-};
-
struct dpu_kms {
struct msm_kms base;
struct drm_device *dev;
@@ -145,6 +133,7 @@ struct dpu_global_state {
uint32_t mixer_to_enc_id[LM_MAX - LM_0];
uint32_t ctl_to_enc_id[CTL_MAX - CTL_0];
uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0];
+ uint32_t dsc_to_enc_id[DSC_MAX - DSC_0];
};
struct dpu_global_state
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
deleted file mode 100644
index b10ca505f9ac..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- * SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2018, The Linux Foundation
- */
-
-#include <linux/irq.h>
-#include <linux/irqchip.h>
-#include <linux/irqdesc.h>
-#include <linux/irqchip/chained_irq.h>
-#include "dpu_kms.h"
-
-#define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)
-
-#define HW_REV 0x0
-#define HW_INTR_STATUS 0x0010
-
-#define UBWC_STATIC 0x144
-#define UBWC_CTRL_2 0x150
-#define UBWC_PREDICTION_MODE 0x154
-
-/* Max BW defined in KBps */
-#define MAX_BW 6800000
-
-struct dpu_irq_controller {
- unsigned long enabled_mask;
- struct irq_domain *domain;
-};
-
-struct dpu_mdss {
- struct msm_mdss base;
- void __iomem *mmio;
- struct clk_bulk_data *clocks;
- size_t num_clocks;
- struct dpu_irq_controller irq_controller;
-};
-
-static void dpu_mdss_irq(struct irq_desc *desc)
-{
- struct dpu_mdss *dpu_mdss = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
- u32 interrupts;
-
- chained_irq_enter(chip, desc);
-
- interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
-
- while (interrupts) {
- irq_hw_number_t hwirq = fls(interrupts) - 1;
- int rc;
-
- rc = generic_handle_domain_irq(dpu_mdss->irq_controller.domain,
- hwirq);
- if (rc < 0) {
- DRM_ERROR("handle irq fail: irq=%lu rc=%d\n",
- hwirq, rc);
- break;
- }
-
- interrupts &= ~(1 << hwirq);
- }
-
- chained_irq_exit(chip, desc);
-}
-
-static void dpu_mdss_irq_mask(struct irq_data *irqd)
-{
- struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
-
- /* memory barrier */
- smp_mb__before_atomic();
- clear_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
- /* memory barrier */
- smp_mb__after_atomic();
-}
-
-static void dpu_mdss_irq_unmask(struct irq_data *irqd)
-{
- struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
-
- /* memory barrier */
- smp_mb__before_atomic();
- set_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
- /* memory barrier */
- smp_mb__after_atomic();
-}
-
-static struct irq_chip dpu_mdss_irq_chip = {
- .name = "dpu_mdss",
- .irq_mask = dpu_mdss_irq_mask,
- .irq_unmask = dpu_mdss_irq_unmask,
-};
-
-static struct lock_class_key dpu_mdss_lock_key, dpu_mdss_request_key;
-
-static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
- unsigned int irq, irq_hw_number_t hwirq)
-{
- struct dpu_mdss *dpu_mdss = domain->host_data;
-
- irq_set_lockdep_class(irq, &dpu_mdss_lock_key, &dpu_mdss_request_key);
- irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
- return irq_set_chip_data(irq, dpu_mdss);
-}
-
-static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
- .map = dpu_mdss_irqdomain_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
-static int _dpu_mdss_irq_domain_add(struct dpu_mdss *dpu_mdss)
-{
- struct device *dev;
- struct irq_domain *domain;
-
- dev = dpu_mdss->base.dev;
-
- domain = irq_domain_add_linear(dev->of_node, 32,
- &dpu_mdss_irqdomain_ops, dpu_mdss);
- if (!domain) {
- DPU_ERROR("failed to add irq_domain\n");
- return -EINVAL;
- }
-
- dpu_mdss->irq_controller.enabled_mask = 0;
- dpu_mdss->irq_controller.domain = domain;
-
- return 0;
-}
-
-static void _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
-{
- if (dpu_mdss->irq_controller.domain) {
- irq_domain_remove(dpu_mdss->irq_controller.domain);
- dpu_mdss->irq_controller.domain = NULL;
- }
-}
-static int dpu_mdss_enable(struct msm_mdss *mdss)
-{
- struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
- int ret;
-
- ret = clk_bulk_prepare_enable(dpu_mdss->num_clocks, dpu_mdss->clocks);
- if (ret) {
- DPU_ERROR("clock enable failed, ret:%d\n", ret);
- return ret;
- }
-
- /*
- * ubwc config is part of the "mdss" region which is not accessible
- * from the rest of the driver. hardcode known configurations here
- */
- switch (readl_relaxed(dpu_mdss->mmio + HW_REV)) {
- case DPU_HW_VER_500:
- case DPU_HW_VER_501:
- writel_relaxed(0x420, dpu_mdss->mmio + UBWC_STATIC);
- break;
- case DPU_HW_VER_600:
- /* TODO: 0x102e for LP_DDR4 */
- writel_relaxed(0x103e, dpu_mdss->mmio + UBWC_STATIC);
- writel_relaxed(2, dpu_mdss->mmio + UBWC_CTRL_2);
- writel_relaxed(1, dpu_mdss->mmio + UBWC_PREDICTION_MODE);
- break;
- case DPU_HW_VER_620:
- writel_relaxed(0x1e, dpu_mdss->mmio + UBWC_STATIC);
- break;
- case DPU_HW_VER_720:
- writel_relaxed(0x101e, dpu_mdss->mmio + UBWC_STATIC);
- break;
- }
-
- return ret;
-}
-
-static int dpu_mdss_disable(struct msm_mdss *mdss)
-{
- struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
-
- clk_bulk_disable_unprepare(dpu_mdss->num_clocks, dpu_mdss->clocks);
-
- return 0;
-}
-
-static void dpu_mdss_destroy(struct msm_mdss *mdss)
-{
- struct platform_device *pdev = to_platform_device(mdss->dev);
- struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
- int irq;
-
- pm_runtime_suspend(mdss->dev);
- pm_runtime_disable(mdss->dev);
- _dpu_mdss_irq_domain_fini(dpu_mdss);
- irq = platform_get_irq(pdev, 0);
- irq_set_chained_handler_and_data(irq, NULL, NULL);
-
- if (dpu_mdss->mmio)
- devm_iounmap(&pdev->dev, dpu_mdss->mmio);
- dpu_mdss->mmio = NULL;
-}
-
-static const struct msm_mdss_funcs mdss_funcs = {
- .enable = dpu_mdss_enable,
- .disable = dpu_mdss_disable,
- .destroy = dpu_mdss_destroy,
-};
-
-int dpu_mdss_init(struct platform_device *pdev)
-{
- struct msm_drm_private *priv = platform_get_drvdata(pdev);
- struct dpu_mdss *dpu_mdss;
- int ret;
- int irq;
-
- dpu_mdss = devm_kzalloc(&pdev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
- if (!dpu_mdss)
- return -ENOMEM;
-
- dpu_mdss->mmio = msm_ioremap(pdev, "mdss");
- if (IS_ERR(dpu_mdss->mmio))
- return PTR_ERR(dpu_mdss->mmio);
-
- DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio);
-
- ret = devm_clk_bulk_get_all(&pdev->dev, &dpu_mdss->clocks);
- if (ret < 0) {
- DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
- goto clk_parse_err;
- }
- dpu_mdss->num_clocks = ret;
-
- dpu_mdss->base.dev = &pdev->dev;
- dpu_mdss->base.funcs = &mdss_funcs;
-
- ret = _dpu_mdss_irq_domain_add(dpu_mdss);
- if (ret)
- goto irq_domain_error;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto irq_error;
- }
-
- irq_set_chained_handler_and_data(irq, dpu_mdss_irq,
- dpu_mdss);
-
- priv->mdss = &dpu_mdss->base;
-
- pm_runtime_enable(&pdev->dev);
-
- return 0;
-
-irq_error:
- _dpu_mdss_irq_domain_fini(dpu_mdss);
-irq_domain_error:
-clk_parse_err:
- if (dpu_mdss->mmio)
- devm_iounmap(&pdev->dev, dpu_mdss->mmio);
- dpu_mdss->mmio = NULL;
- return ret;
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 6565682fe227..5b5aef249390 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -280,31 +280,6 @@ static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
}
/**
- * _dpu_plane_get_qos_lut - get LUT mapping based on fill level
- * @tbl: Pointer to LUT table
- * @total_fl: fill level
- * Return: LUT setting corresponding to the fill level
- */
-static u64 _dpu_plane_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
- u32 total_fl)
-{
- int i;
-
- if (!tbl || !tbl->nentry || !tbl->entries)
- return 0;
-
- for (i = 0; i < tbl->nentry; i++)
- if (total_fl <= tbl->entries[i].fl)
- return tbl->entries[i].lut;
-
- /* if last fl is zero, use as default */
- if (!tbl->entries[i-1].fl)
- return tbl->entries[i-1].lut;
-
- return 0;
-}
-
-/**
* _dpu_plane_set_qos_lut - set QoS LUT of the given plane
* @plane: Pointer to drm plane
* @fb: Pointer to framebuffer associated with the given plane
@@ -333,7 +308,7 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
lut_usage = DPU_QOS_LUT_USAGE_MACROTILE;
}
- qos_lut = _dpu_plane_get_qos_lut(
+ qos_lut = _dpu_hw_get_qos_lut(
&pdpu->catalog->perf.qos_lut_tbl[lut_usage], total_fl);
trace_dpu_perf_set_qos_luts(pdpu->pipe - SSPP_VIG0,
@@ -528,11 +503,19 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
struct dpu_plane_state *pstate,
uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h,
struct dpu_hw_scaler3_cfg *scale_cfg,
- struct dpu_hw_pixel_ext *pixel_ext,
const struct dpu_format *fmt,
uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v)
{
uint32_t i;
+ bool inline_rotation = pstate->rotation & DRM_MODE_ROTATE_90;
+
+ /*
+ * For inline rotation cases, scaler config is post-rotation,
+ * so swap the dimensions here. However, pixel extension will
+ * need pre-rotation settings.
+ */
+ if (inline_rotation)
+ swap(src_w, src_h);
scale_cfg->phase_step_x[DPU_SSPP_COMP_0] =
mult_frac((1 << PHASE_STEP_SHIFT), src_w, dst_w);
@@ -571,11 +554,6 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H;
scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
}
-
- pixel_ext->num_ext_pxls_top[i] =
- scale_cfg->src_height[i];
- pixel_ext->num_ext_pxls_left[i] =
- scale_cfg->src_width[i];
}
if (!(DPU_FORMAT_IS_YUV(fmt)) && (src_h == dst_h)
&& (src_w == dst_w))
@@ -591,6 +569,24 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
scale_cfg->enable = 1;
}
+static void _dpu_plane_setup_pixel_ext(struct dpu_hw_scaler3_cfg *scale_cfg,
+ struct dpu_hw_pixel_ext *pixel_ext,
+ uint32_t src_w, uint32_t src_h,
+ uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v)
+{
+ int i;
+
+ for (i = 0; i < DPU_MAX_PLANES; i++) {
+ if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) {
+ src_w /= chroma_subsmpl_h;
+ src_h /= chroma_subsmpl_v;
+ }
+
+ pixel_ext->num_ext_pxls_top[i] = src_h;
+ pixel_ext->num_ext_pxls_left[i] = src_w;
+ }
+}
+
static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
{
/* S15.16 format */
@@ -654,6 +650,10 @@ static void _dpu_plane_setup_scaler(struct dpu_plane *pdpu,
const struct drm_format_info *info = drm_format_info(fmt->base.pixel_format);
struct dpu_hw_scaler3_cfg scaler3_cfg;
struct dpu_hw_pixel_ext pixel_ext;
+ u32 src_width = drm_rect_width(&pipe_cfg->src_rect);
+ u32 src_height = drm_rect_height(&pipe_cfg->src_rect);
+ u32 dst_width = drm_rect_width(&pipe_cfg->dst_rect);
+ u32 dst_height = drm_rect_height(&pipe_cfg->dst_rect);
memset(&scaler3_cfg, 0, sizeof(scaler3_cfg));
memset(&pixel_ext, 0, sizeof(pixel_ext));
@@ -661,13 +661,17 @@ static void _dpu_plane_setup_scaler(struct dpu_plane *pdpu,
/* don't chroma subsample if decimating */
/* update scaler. calculate default config for QSEED3 */
_dpu_plane_setup_scaler3(pdpu, pstate,
- drm_rect_width(&pipe_cfg->src_rect),
- drm_rect_height(&pipe_cfg->src_rect),
- drm_rect_width(&pipe_cfg->dst_rect),
- drm_rect_height(&pipe_cfg->dst_rect),
- &scaler3_cfg, &pixel_ext, fmt,
+ src_width,
+ src_height,
+ dst_width,
+ dst_height,
+ &scaler3_cfg, fmt,
info->hsub, info->vsub);
+ /* configure pixel extension based on scalar config */
+ _dpu_plane_setup_pixel_ext(&scaler3_cfg, &pixel_ext,
+ src_width, src_height, info->hsub, info->vsub);
+
if (pdpu->pipe_hw->ops.setup_pe)
pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
&pixel_ext);
@@ -956,6 +960,34 @@ static bool dpu_plane_validate_src(struct drm_rect *src,
drm_rect_equals(fb_rect, src);
}
+static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu,
+ const struct dpu_sspp_sub_blks *sblk,
+ struct drm_rect src, const struct dpu_format *fmt)
+{
+ size_t num_formats;
+ const u32 *supported_formats;
+
+ if (!sblk->rotation_cfg) {
+ DPU_ERROR("invalid rotation cfg\n");
+ return -EINVAL;
+ }
+
+ if (drm_rect_width(&src) > sblk->rotation_cfg->rot_maxheight) {
+ DPU_DEBUG_PLANE(pdpu, "invalid height for inline rot:%d max:%d\n",
+ src.y2, sblk->rotation_cfg->rot_maxheight);
+ return -EINVAL;
+ }
+
+ supported_formats = sblk->rotation_cfg->rot_format_list;
+ num_formats = sblk->rotation_cfg->rot_num_formats;
+
+ if (!DPU_FORMAT_IS_UBWC(fmt) ||
+ !dpu_find_format(fmt->base.pixel_format, supported_formats, num_formats))
+ return -EINVAL;
+
+ return 0;
+}
+
static int dpu_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -968,15 +1000,19 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
const struct dpu_format *fmt;
struct drm_rect src, dst, fb_rect = { 0 };
uint32_t min_src_size, max_linewidth;
+ unsigned int rotation;
+ uint32_t supported_rotations;
+ const struct dpu_sspp_cfg *pipe_hw_caps = pdpu->pipe_hw->cap;
+ const struct dpu_sspp_sub_blks *sblk = pdpu->pipe_hw->cap->sblk;
if (new_plane_state->crtc)
crtc_state = drm_atomic_get_new_crtc_state(state,
new_plane_state->crtc);
- min_scale = FRAC_16_16(1, pdpu->pipe_hw->cap->sblk->maxupscale);
+ min_scale = FRAC_16_16(1, sblk->maxupscale);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
min_scale,
- pdpu->pipe_hw->cap->sblk->maxdwnscale << 16,
+ sblk->maxdwnscale << 16,
true, true);
if (ret) {
DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
@@ -1002,8 +1038,8 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
if (DPU_FORMAT_IS_YUV(fmt) &&
- (!(pdpu->pipe_hw->cap->features & DPU_SSPP_SCALER) ||
- !(pdpu->pipe_hw->cap->features & DPU_SSPP_CSC_ANY))) {
+ (!(pipe_hw_caps->features & DPU_SSPP_SCALER) ||
+ !(pipe_hw_caps->features & DPU_SSPP_CSC_ANY))) {
DPU_DEBUG_PLANE(pdpu,
"plane doesn't have scaler/csc for yuv\n");
return -EINVAL;
@@ -1036,6 +1072,22 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
return -E2BIG;
}
+ supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0;
+
+ if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION))
+ supported_rotations |= DRM_MODE_ROTATE_90;
+
+ rotation = drm_rotation_simplify(new_plane_state->rotation,
+ supported_rotations);
+
+ if ((pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION)) &&
+ (rotation & DRM_MODE_ROTATE_90)) {
+ ret = dpu_plane_check_inline_rotation(pdpu, sblk, src, fmt);
+ if (ret)
+ return ret;
+ }
+
+ pstate->rotation = rotation;
pstate->needs_qos_remap = drm_atomic_crtc_needs_modeset(crtc_state);
return 0;
@@ -1151,29 +1203,27 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
pstate->multirect_mode);
if (pdpu->pipe_hw->ops.setup_format) {
- unsigned int rotation;
+ unsigned int rotation = pstate->rotation;
src_flags = 0x0;
- rotation = drm_rotation_simplify(state->rotation,
- DRM_MODE_ROTATE_0 |
- DRM_MODE_REFLECT_X |
- DRM_MODE_REFLECT_Y);
-
if (rotation & DRM_MODE_REFLECT_X)
src_flags |= DPU_SSPP_FLIP_LR;
if (rotation & DRM_MODE_REFLECT_Y)
src_flags |= DPU_SSPP_FLIP_UD;
+ if (rotation & DRM_MODE_ROTATE_90)
+ src_flags |= DPU_SSPP_ROT_90;
+
/* update format */
pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw, fmt, src_flags,
pstate->multirect_index);
if (pdpu->pipe_hw->ops.setup_cdp) {
- struct dpu_hw_pipe_cdp_cfg cdp_cfg;
+ struct dpu_hw_cdp_cfg cdp_cfg;
- memset(&cdp_cfg, 0, sizeof(struct dpu_hw_pipe_cdp_cfg));
+ memset(&cdp_cfg, 0, sizeof(struct dpu_hw_cdp_cfg));
cdp_cfg.enable = pdpu->catalog->perf.cdp_cfg
[DPU_PERF_CDP_USAGE_RT].rd_enable;
@@ -1411,13 +1461,9 @@ static bool dpu_plane_format_mod_supported(struct drm_plane *plane,
if (modifier == DRM_FORMAT_MOD_LINEAR)
return true;
- if (modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED) {
- int i;
- for (i = 0; i < ARRAY_SIZE(qcom_compressed_supported_formats); i++) {
- if (format == qcom_compressed_supported_formats[i])
- return true;
- }
- }
+ if (modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED)
+ return dpu_find_format(format, qcom_compressed_supported_formats,
+ ARRAY_SIZE(qcom_compressed_supported_formats));
return false;
}
@@ -1462,6 +1508,7 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
struct dpu_kms *kms = to_dpu_kms(priv->kms);
int zpos_max = DPU_ZPOS_MAX;
uint32_t num_formats;
+ uint32_t supported_rotations;
int ret = -EINVAL;
/* create and zero local structure */
@@ -1530,12 +1577,13 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE));
+ supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
+
+ if (pdpu->pipe_hw->cap->features & BIT(DPU_SSPP_INLINE_ROTATION))
+ supported_rotations |= DRM_MODE_ROTATE_MASK;
+
drm_plane_create_rotation_property(plane,
- DRM_MODE_ROTATE_0,
- DRM_MODE_ROTATE_0 |
- DRM_MODE_ROTATE_180 |
- DRM_MODE_REFLECT_X |
- DRM_MODE_REFLECT_Y);
+ DRM_MODE_ROTATE_0, supported_rotations);
drm_plane_enable_fb_damage_clips(plane);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
index 50781e2d3577..e1463107a6fc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -26,6 +26,7 @@
* @plane_fetch_bw: calculated BW per plane
* @plane_clk: calculated clk per plane
* @needs_dirtyfb: whether attached CRTC needs pixel data explicitly flushed
+ * @rotation: simplified drm rotation hint
*/
struct dpu_plane_state {
struct drm_plane_state base;
@@ -40,6 +41,7 @@ struct dpu_plane_state {
u64 plane_clk;
bool needs_dirtyfb;
+ unsigned int rotation;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 7497538adae1..06f03e7081bc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -9,8 +9,10 @@
#include "dpu_hw_ctl.h"
#include "dpu_hw_pingpong.h"
#include "dpu_hw_intf.h"
+#include "dpu_hw_wb.h"
#include "dpu_hw_dspp.h"
#include "dpu_hw_merge3d.h"
+#include "dpu_hw_dsc.h"
#include "dpu_encoder.h"
#include "dpu_trace.h"
@@ -77,6 +79,18 @@ int dpu_rm_destroy(struct dpu_rm *rm)
for (i = 0; i < ARRAY_SIZE(rm->hw_intf); i++)
dpu_hw_intf_destroy(rm->hw_intf[i]);
+ for (i = 0; i < ARRAY_SIZE(rm->dsc_blks); i++) {
+ struct dpu_hw_dsc *hw;
+
+ if (rm->dsc_blks[i]) {
+ hw = to_dpu_hw_dsc(rm->dsc_blks[i]);
+ dpu_hw_dsc_destroy(hw);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rm->hw_wb); i++)
+ dpu_hw_wb_destroy(rm->hw_wb[i]);
+
return 0;
}
@@ -176,6 +190,24 @@ int dpu_rm_init(struct dpu_rm *rm,
rm->hw_intf[intf->id - INTF_0] = hw;
}
+ for (i = 0; i < cat->wb_count; i++) {
+ struct dpu_hw_wb *hw;
+ const struct dpu_wb_cfg *wb = &cat->wb[i];
+
+ if (wb->id < WB_0 || wb->id >= WB_MAX) {
+ DPU_ERROR("skip intf %d with invalid id\n", wb->id);
+ continue;
+ }
+
+ hw = dpu_hw_wb_init(wb->id, mmio, cat);
+ if (IS_ERR(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed wb object creation: err %d\n", rc);
+ goto fail;
+ }
+ rm->hw_wb[wb->id - WB_0] = hw;
+ }
+
for (i = 0; i < cat->ctl_count; i++) {
struct dpu_hw_ctl *hw;
const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
@@ -210,6 +242,19 @@ int dpu_rm_init(struct dpu_rm *rm,
rm->dspp_blks[dspp->id - DSPP_0] = &hw->base;
}
+ for (i = 0; i < cat->dsc_count; i++) {
+ struct dpu_hw_dsc *hw;
+ const struct dpu_dsc_cfg *dsc = &cat->dsc[i];
+
+ hw = dpu_hw_dsc_init(dsc->id, mmio, cat);
+ if (IS_ERR_OR_NULL(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed dsc object creation: err %d\n", rc);
+ goto fail;
+ }
+ rm->dsc_blks[dsc->id - DSC_0] = &hw->base;
+ }
+
return 0;
fail:
@@ -441,6 +486,28 @@ static int _dpu_rm_reserve_ctls(
return 0;
}
+static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ struct drm_encoder *enc,
+ const struct msm_display_topology *top)
+{
+ int num_dsc = top->num_dsc;
+ int i;
+
+ /* check if DSC required are allocated or not */
+ for (i = 0; i < num_dsc; i++) {
+ if (global_state->dsc_to_enc_id[i]) {
+ DPU_ERROR("DSC %d is already allocated\n", i);
+ return -EIO;
+ }
+ }
+
+ for (i = 0; i < num_dsc; i++)
+ global_state->dsc_to_enc_id[i] = enc->base.id;
+
+ return 0;
+}
+
static int _dpu_rm_make_reservation(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
@@ -462,6 +529,10 @@ static int _dpu_rm_make_reservation(
return ret;
}
+ ret = _dpu_rm_reserve_dsc(rm, global_state, enc, &reqs->topology);
+ if (ret)
+ return ret;
+
return ret;
}
@@ -499,6 +570,8 @@ void dpu_rm_release(struct dpu_global_state *global_state,
ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id);
_dpu_rm_clear_mapping(global_state->ctl_to_enc_id,
ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
+ _dpu_rm_clear_mapping(global_state->dsc_to_enc_id,
+ ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id);
}
int dpu_rm_reserve(
@@ -567,6 +640,11 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
hw_to_enc_id = global_state->dspp_to_enc_id;
max_blks = ARRAY_SIZE(rm->dspp_blks);
break;
+ case DPU_HW_BLK_DSC:
+ hw_blks = rm->dsc_blks;
+ hw_to_enc_id = global_state->dsc_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->dsc_blks);
+ break;
default:
DPU_ERROR("blk type %d not managed by rm\n", type);
return 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index 9b13200a050a..2f34a31d8d0d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -19,6 +19,7 @@ struct dpu_global_state;
* @mixer_blks: array of layer mixer hardware resources
* @ctl_blks: array of ctl hardware resources
* @hw_intf: array of intf hardware resources
+ * @hw_wb: array of wb hardware resources
* @dspp_blks: array of dspp hardware resources
*/
struct dpu_rm {
@@ -26,8 +27,10 @@ struct dpu_rm {
struct dpu_hw_blk *mixer_blks[LM_MAX - LM_0];
struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0];
struct dpu_hw_intf *hw_intf[INTF_MAX - INTF_0];
+ struct dpu_hw_wb *hw_wb[WB_MAX - WB_0];
struct dpu_hw_blk *dspp_blks[DSPP_MAX - DSPP_0];
struct dpu_hw_blk *merge_3d_blks[MERGE_3D_MAX - MERGE_3D_0];
+ struct dpu_hw_blk *dsc_blks[DSC_MAX - DSC_0];
};
/**
@@ -95,5 +98,15 @@ static inline struct dpu_hw_intf *dpu_rm_get_intf(struct dpu_rm *rm, enum dpu_in
return rm->hw_intf[intf_idx - INTF_0];
}
+/**
+ * dpu_rm_get_wb - Return a struct dpu_hw_wb instance given it's index.
+ * @rm: DPU Resource Manager handle
+ * @wb_idx: WB index
+ */
+static inline struct dpu_hw_wb *dpu_rm_get_wb(struct dpu_rm *rm, enum dpu_wb wb_idx)
+{
+ return rm->hw_wb[wb_idx - WB_0];
+}
+
#endif /* __DPU_RM_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
index 54d74341e690..76169f406505 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -167,55 +167,46 @@ TRACE_EVENT(dpu_perf_crtc_update,
__entry->update_clk)
);
-DECLARE_EVENT_CLASS(dpu_enc_irq_template,
- TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx,
- int irq_idx),
- TP_ARGS(drm_id, intr_idx, irq_idx),
+DECLARE_EVENT_CLASS(dpu_irq_template,
+ TP_PROTO(int irq_idx),
+ TP_ARGS(irq_idx),
TP_STRUCT__entry(
- __field( uint32_t, drm_id )
- __field( enum dpu_intr_idx, intr_idx )
__field( int, irq_idx )
),
TP_fast_assign(
- __entry->drm_id = drm_id;
- __entry->intr_idx = intr_idx;
__entry->irq_idx = irq_idx;
),
- TP_printk("id=%u, intr=%d, irq=%d",
- __entry->drm_id, __entry->intr_idx,
- __entry->irq_idx)
+ TP_printk("irq=%d", __entry->irq_idx)
);
-DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_register_success,
- TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx,
- int irq_idx),
- TP_ARGS(drm_id, intr_idx, irq_idx)
+DEFINE_EVENT(dpu_irq_template, dpu_irq_register_success,
+ TP_PROTO(int irq_idx),
+ TP_ARGS(irq_idx)
);
-DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_unregister_success,
- TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx,
- int irq_idx),
- TP_ARGS(drm_id, intr_idx, irq_idx)
+DEFINE_EVENT(dpu_irq_template, dpu_irq_unregister_success,
+ TP_PROTO(int irq_idx),
+ TP_ARGS(irq_idx)
);
TRACE_EVENT(dpu_enc_irq_wait_success,
- TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx,
+ TP_PROTO(uint32_t drm_id, void *func,
int irq_idx, enum dpu_pingpong pp_idx, int atomic_cnt),
- TP_ARGS(drm_id, intr_idx, irq_idx, pp_idx, atomic_cnt),
+ TP_ARGS(drm_id, func, irq_idx, pp_idx, atomic_cnt),
TP_STRUCT__entry(
__field( uint32_t, drm_id )
- __field( enum dpu_intr_idx, intr_idx )
+ __field( void *, func )
__field( int, irq_idx )
__field( enum dpu_pingpong, pp_idx )
__field( int, atomic_cnt )
),
TP_fast_assign(
__entry->drm_id = drm_id;
- __entry->intr_idx = intr_idx;
+ __entry->func = func;
__entry->irq_idx = irq_idx;
__entry->pp_idx = pp_idx;
__entry->atomic_cnt = atomic_cnt;
),
- TP_printk("id=%u, intr=%d, irq=%d, pp=%d, atomic_cnt=%d",
- __entry->drm_id, __entry->intr_idx,
+ TP_printk("id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d",
+ __entry->drm_id, __entry->func,
__entry->irq_idx, __entry->pp_idx, __entry->atomic_cnt)
);
@@ -389,20 +380,26 @@ TRACE_EVENT(dpu_enc_rc,
);
TRACE_EVENT(dpu_enc_frame_done_cb_not_busy,
- TP_PROTO(uint32_t drm_id, u32 event, enum dpu_intf intf_idx),
- TP_ARGS(drm_id, event, intf_idx),
+ TP_PROTO(uint32_t drm_id, u32 event, char *intf_mode, enum dpu_intf intf_idx,
+ enum dpu_wb wb_idx),
+ TP_ARGS(drm_id, event, intf_mode, intf_idx, wb_idx),
TP_STRUCT__entry(
__field( uint32_t, drm_id )
__field( u32, event )
+ __string( intf_mode_str, intf_mode )
__field( enum dpu_intf, intf_idx )
+ __field( enum dpu_wb, wb_idx )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->event = event;
+ __assign_str(intf_mode_str, intf_mode);
__entry->intf_idx = intf_idx;
+ __entry->wb_idx = wb_idx;
),
- TP_printk("id=%u, event=%u, intf=%d", __entry->drm_id, __entry->event,
- __entry->intf_idx)
+ TP_printk("id=%u, event=%u, intf_mode=%s intf=%d wb=%d", __entry->drm_id,
+ __entry->event, __get_str(intf_mode_str),
+ __entry->intf_idx, __entry->wb_idx)
);
TRACE_EVENT(dpu_enc_frame_done_cb,
@@ -424,14 +421,16 @@ TRACE_EVENT(dpu_enc_frame_done_cb,
);
TRACE_EVENT(dpu_enc_trigger_flush,
- TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx,
+ TP_PROTO(uint32_t drm_id, char *intf_mode, enum dpu_intf intf_idx, enum dpu_wb wb_idx,
int pending_kickoff_cnt, int ctl_idx, u32 extra_flush_bits,
u32 pending_flush_ret),
- TP_ARGS(drm_id, intf_idx, pending_kickoff_cnt, ctl_idx,
+ TP_ARGS(drm_id, intf_mode, intf_idx, wb_idx, pending_kickoff_cnt, ctl_idx,
extra_flush_bits, pending_flush_ret),
TP_STRUCT__entry(
__field( uint32_t, drm_id )
+ __string( intf_mode_str, intf_mode )
__field( enum dpu_intf, intf_idx )
+ __field( enum dpu_wb, wb_idx )
__field( int, pending_kickoff_cnt )
__field( int, ctl_idx )
__field( u32, extra_flush_bits )
@@ -439,15 +438,17 @@ TRACE_EVENT(dpu_enc_trigger_flush,
),
TP_fast_assign(
__entry->drm_id = drm_id;
+ __assign_str(intf_mode_str, intf_mode);
__entry->intf_idx = intf_idx;
+ __entry->wb_idx = wb_idx;
__entry->pending_kickoff_cnt = pending_kickoff_cnt;
__entry->ctl_idx = ctl_idx;
__entry->extra_flush_bits = extra_flush_bits;
__entry->pending_flush_ret = pending_flush_ret;
),
- TP_printk("id=%u, intf_idx=%d, pending_kickoff_cnt=%d ctl_idx=%d "
+ TP_printk("id=%u, intf_mode=%s, intf_idx=%d, wb_idx=%d, pending_kickoff_cnt=%d ctl_idx=%d "
"extra_flush_bits=0x%x pending_flush_ret=0x%x",
- __entry->drm_id, __entry->intf_idx,
+ __entry->drm_id, __get_str(intf_mode_str), __entry->intf_idx, __entry->wb_idx,
__entry->pending_kickoff_cnt, __entry->ctl_idx,
__entry->extra_flush_bits, __entry->pending_flush_ret)
);
@@ -871,27 +872,31 @@ TRACE_EVENT(dpu_pp_connect_ext_te,
TP_printk("pp:%d cfg:%u", __entry->pp, __entry->cfg)
);
-DECLARE_EVENT_CLASS(dpu_core_irq_callback_template,
- TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+TRACE_EVENT(dpu_core_irq_register_callback,
+ TP_PROTO(int irq_idx, void *callback),
TP_ARGS(irq_idx, callback),
TP_STRUCT__entry(
__field( int, irq_idx )
- __field( struct dpu_irq_callback *, callback)
+ __field( void *, callback)
),
TP_fast_assign(
__entry->irq_idx = irq_idx;
__entry->callback = callback;
),
- TP_printk("irq_idx:%d callback:%pK", __entry->irq_idx,
+ TP_printk("irq_idx:%d callback:%ps", __entry->irq_idx,
__entry->callback)
);
-DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_register_callback,
- TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
- TP_ARGS(irq_idx, callback)
-);
-DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_unregister_callback,
- TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
- TP_ARGS(irq_idx, callback)
+
+TRACE_EVENT(dpu_core_irq_unregister_callback,
+ TP_PROTO(int irq_idx),
+ TP_ARGS(irq_idx),
+ TP_STRUCT__entry(
+ __field( int, irq_idx )
+ ),
+ TP_fast_assign(
+ __entry->irq_idx = irq_idx;
+ ),
+ TP_printk("irq_idx:%d", __entry->irq_idx)
);
TRACE_EVENT(dpu_core_perf_update_clk,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
new file mode 100644
index 000000000000..399115e4e217
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "dpu_writeback.h"
+
+static int dpu_wb_conn_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+
+ return drm_add_modes_noedid(connector, dpu_kms->catalog->caps->max_linewidth,
+ dev->mode_config.max_height);
+}
+
+static const struct drm_connector_funcs dpu_wb_conn_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int dpu_wb_conn_prepare_job(struct drm_writeback_connector *connector,
+ struct drm_writeback_job *job)
+{
+
+ struct dpu_wb_connector *dpu_wb_conn = to_dpu_wb_conn(connector);
+
+ if (!job->fb)
+ return 0;
+
+ dpu_encoder_prepare_wb_job(dpu_wb_conn->wb_enc, job);
+
+ return 0;
+}
+
+static void dpu_wb_conn_cleanup_job(struct drm_writeback_connector *connector,
+ struct drm_writeback_job *job)
+{
+ struct dpu_wb_connector *dpu_wb_conn = to_dpu_wb_conn(connector);
+
+ if (!job->fb)
+ return;
+
+ dpu_encoder_cleanup_wb_job(dpu_wb_conn->wb_enc, job);
+}
+
+static const struct drm_connector_helper_funcs dpu_wb_conn_helper_funcs = {
+ .get_modes = dpu_wb_conn_get_modes,
+ .prepare_writeback_job = dpu_wb_conn_prepare_job,
+ .cleanup_writeback_job = dpu_wb_conn_cleanup_job,
+};
+
+int dpu_writeback_init(struct drm_device *dev, struct drm_encoder *enc,
+ const u32 *format_list, u32 num_formats)
+{
+ struct dpu_wb_connector *dpu_wb_conn;
+ int rc = 0;
+
+ dpu_wb_conn = devm_kzalloc(dev->dev, sizeof(*dpu_wb_conn), GFP_KERNEL);
+
+ drm_connector_helper_add(&dpu_wb_conn->base.base, &dpu_wb_conn_helper_funcs);
+
+ /* DPU initializes the encoder and sets it up completely for writeback
+ * cases and hence should use the new API drm_writeback_connector_init_with_encoder
+ * to initialize the writeback connector
+ */
+ rc = drm_writeback_connector_init_with_encoder(dev, &dpu_wb_conn->base, enc,
+ &dpu_wb_conn_funcs, format_list, num_formats);
+
+ if (!rc)
+ dpu_wb_conn->wb_enc = enc;
+
+ return rc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h
new file mode 100644
index 000000000000..5a75ea916101
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DPU_WRITEBACK_H
+#define _DPU_WRITEBACK_H
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_file.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_writeback.h>
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_encoder_phys.h"
+
+struct dpu_wb_connector {
+ struct drm_writeback_connector base;
+ struct drm_encoder *wb_enc;
+};
+
+static inline struct dpu_wb_connector *to_dpu_wb_conn(struct drm_writeback_connector *conn)
+{
+ return container_of(conn, struct dpu_wb_connector, base);
+}
+
+int dpu_writeback_init(struct drm_device *dev, struct drm_encoder *enc,
+ const u32 *format_list, u32 num_formats);
+
+#endif /*_DPU_WRITEBACK_H */
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
index aaf2f26f8505..39b8fe53c29d 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
@@ -11,6 +11,8 @@
#include "mdp4_kms.h"
+#ifdef CONFIG_DRM_MSM_DSI
+
struct mdp4_dsi_encoder {
struct drm_encoder base;
struct drm_panel *panel;
@@ -170,3 +172,4 @@ fail:
return ERR_PTR(ret);
}
+#endif /* CONFIG_DRM_MSM_DSI */
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 3cf476c55158..fb48c8c19ec3 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -21,7 +21,6 @@ static int mdp4_hw_init(struct msm_kms *kms)
struct drm_device *dev = mdp4_kms->dev;
u32 dmap_cfg, vg_cfg;
unsigned long clk;
- int ret = 0;
pm_runtime_get_sync(dev->dev);
@@ -72,7 +71,7 @@ static int mdp4_hw_init(struct msm_kms *kms)
pm_runtime_put_sync(dev->dev);
- return ret;
+ return 0;
}
static void mdp4_enable_commit(struct msm_kms *kms)
@@ -229,9 +228,6 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
return PTR_ERR(connector);
}
- priv->encoders[priv->num_encoders++] = encoder;
- priv->connectors[priv->num_connectors++] = connector;
-
break;
case DRM_MODE_ENCODER_TMDS:
encoder = mdp4_dtv_encoder_init(dev);
@@ -252,8 +248,6 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
}
}
- priv->encoders[priv->num_encoders++] = encoder;
-
break;
case DRM_MODE_ENCODER_DSI:
/* only DSI1 supported for now */
@@ -272,7 +266,6 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
/* TODO: Add DMA_S later? */
encoder->possible_crtcs = 1 << DMA_P;
- priv->encoders[priv->num_encoders++] = encoder;
ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
if (ret) {
@@ -324,7 +317,6 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
ret = PTR_ERR(plane);
goto fail;
}
- priv->planes[priv->num_planes++] = plane;
}
for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) {
@@ -389,7 +381,7 @@ static void read_mdp_hw_revision(struct mdp4_kms *mdp4_kms,
DRM_DEV_INFO(dev->dev, "MDP4 version v%d.%d", *major, *minor);
}
-struct msm_kms *mdp4_kms_init(struct drm_device *dev)
+static int mdp4_kms_init(struct drm_device *dev)
{
struct platform_device *pdev = to_platform_device(dev->dev);
struct mdp4_platform_config *config = mdp4_get_config(pdev);
@@ -403,8 +395,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
if (!mdp4_kms) {
DRM_DEV_ERROR(dev->dev, "failed to allocate kms\n");
- ret = -ENOMEM;
- goto fail;
+ return -ENOMEM;
}
ret = mdp_kms_init(&mdp4_kms->base, &kms_funcs);
@@ -551,12 +542,13 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
- return kms;
+ return 0;
fail:
if (kms)
mdp4_destroy(kms);
- return ERR_PTR(ret);
+
+ return ret;
}
static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
@@ -569,3 +561,47 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
return &config;
}
+
+static const struct dev_pm_ops mdp4_pm_ops = {
+ .prepare = msm_pm_prepare,
+ .complete = msm_pm_complete,
+};
+
+static int mdp4_probe(struct platform_device *pdev)
+{
+ return msm_drv_probe(&pdev->dev, mdp4_kms_init);
+}
+
+static int mdp4_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &msm_drm_ops);
+
+ return 0;
+}
+
+static const struct of_device_id mdp4_dt_match[] = {
+ { .compatible = "qcom,mdp4" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mdp4_dt_match);
+
+static struct platform_driver mdp4_platform_driver = {
+ .probe = mdp4_probe,
+ .remove = mdp4_remove,
+ .shutdown = msm_drv_shutdown,
+ .driver = {
+ .name = "mdp4",
+ .of_match_table = mdp4_dt_match,
+ .pm = &mdp4_pm_ops,
+ },
+};
+
+void __init msm_mdp4_register(void)
+{
+ platform_driver_register(&mdp4_platform_driver);
+}
+
+void __exit msm_mdp4_unregister(void)
+{
+ platform_driver_unregister(&mdp4_platform_driver);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
index ec6c7b09865e..a640af22eafc 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
@@ -8,6 +8,8 @@
#include "mdp5_kms.h"
+#ifdef CONFIG_DRM_MSM_DSI
+
static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
{
struct msm_drm_private *priv = encoder->dev->dev_private;
@@ -198,3 +200,4 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
return 0;
}
+#endif /* CONFIG_DRM_MSM_DSI */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index b966cd69f99d..31447da0af25 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -612,9 +612,15 @@ static int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
if (ret)
return ret;
- mdp5_mixer_release(new_crtc_state->state, old_mixer);
+ ret = mdp5_mixer_release(new_crtc_state->state, old_mixer);
+ if (ret)
+ return ret;
+
if (old_r_mixer) {
- mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
+ ret = mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
+ if (ret)
+ return ret;
+
if (!need_right_mixer)
pipeline->r_mixer = NULL;
}
@@ -991,8 +997,10 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace,
&mdp5_crtc->cursor.iova);
- if (ret)
+ if (ret) {
+ drm_gem_object_put(cursor_bo);
return -EINVAL;
+ }
pm_runtime_get_sync(&pdev->dev);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 3b92372e7bdf..3d5621a68f85 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -203,6 +203,8 @@ static int mdp5_set_split_display(struct msm_kms *kms,
slave_encoder);
}
+static void mdp5_destroy(struct platform_device *pdev);
+
static void mdp5_kms_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
@@ -221,6 +223,7 @@ static void mdp5_kms_destroy(struct msm_kms *kms)
}
mdp_kms_destroy(&mdp5_kms->base);
+ mdp5_destroy(mdp5_kms->pdev);
}
#ifdef CONFIG_DEBUG_FS
@@ -319,7 +322,6 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
struct mdp5_ctl *ctl)
{
struct drm_device *dev = mdp5_kms->dev;
- struct msm_drm_private *priv = dev->dev_private;
struct drm_encoder *encoder;
encoder = mdp5_encoder_init(dev, intf, ctl);
@@ -328,8 +330,6 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
return encoder;
}
- priv->encoders[priv->num_encoders++] = encoder;
-
return encoder;
}
@@ -434,6 +434,8 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
int i, ret, pi = 0, ci = 0;
struct drm_plane *primary[MAX_BASES] = { NULL };
struct drm_plane *cursor[MAX_BASES] = { NULL };
+ struct drm_encoder *encoder;
+ unsigned int num_encoders;
/*
* Construct encoders and modeset initialize connector devices
@@ -445,12 +447,16 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
goto fail;
}
+ num_encoders = 0;
+ drm_for_each_encoder(encoder, dev)
+ num_encoders++;
+
/*
* We should ideally have less number of encoders (set up by parsing
* the MDP5 interfaces) than the number of layer mixers present in HW,
* but let's be safe here anyway
*/
- num_crtcs = min(priv->num_encoders, mdp5_kms->num_hwmixers);
+ num_crtcs = min(num_encoders, mdp5_kms->num_hwmixers);
/*
* Construct planes equaling the number of hw pipes, and CRTCs for the
@@ -475,7 +481,6 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
DRM_DEV_ERROR(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
goto fail;
}
- priv->planes[priv->num_planes++] = plane;
if (type == DRM_PLANE_TYPE_PRIMARY)
primary[pi++] = plane;
@@ -499,11 +504,8 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
* Now that we know the number of crtcs we've created, set the possible
* crtcs for the encoders
*/
- for (i = 0; i < priv->num_encoders; i++) {
- struct drm_encoder *encoder = priv->encoders[i];
-
+ drm_for_each_encoder(encoder, dev)
encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
- }
return 0;
@@ -544,7 +546,9 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
return 0;
}
-struct msm_kms *mdp5_kms_init(struct drm_device *dev)
+static int mdp5_init(struct platform_device *pdev, struct drm_device *dev);
+
+static int mdp5_kms_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev;
@@ -555,10 +559,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
int irq, i, ret;
struct device *iommu_dev;
+ ret = mdp5_init(to_platform_device(dev->dev), dev);
+
/* priv->kms would have been populated by the MDP5 driver */
kms = priv->kms;
if (!kms)
- return NULL;
+ return -ENOMEM;
mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
pdev = mdp5_kms->pdev;
@@ -570,9 +576,9 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
}
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- if (irq < 0) {
- ret = irq;
- DRM_DEV_ERROR(&pdev->dev, "failed to get irq: %d\n", ret);
+ if (!irq) {
+ ret = -EINVAL;
+ DRM_DEV_ERROR(&pdev->dev, "failed to get irq\n");
goto fail;
}
@@ -637,11 +643,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */
dev->vblank_disable_immediate = true;
- return kms;
+ return 0;
fail:
if (kms)
mdp5_kms_destroy(kms);
- return ERR_PTR(ret);
+
+ return ret;
}
static void mdp5_destroy(struct platform_device *pdev)
@@ -912,35 +919,14 @@ fail:
return ret;
}
-static int mdp5_bind(struct device *dev, struct device *master, void *data)
-{
- struct msm_drm_private *priv = dev_get_drvdata(master);
- struct drm_device *ddev = priv->dev;
- struct platform_device *pdev = to_platform_device(dev);
-
- DBG("");
-
- return mdp5_init(pdev, ddev);
-}
-
-static void mdp5_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- mdp5_destroy(pdev);
-}
-
-static const struct component_ops mdp5_ops = {
- .bind = mdp5_bind,
- .unbind = mdp5_unbind,
-};
-
static int mdp5_setup_interconnect(struct platform_device *pdev)
{
- struct icc_path *path0 = of_icc_get(&pdev->dev, "mdp0-mem");
- struct icc_path *path1 = of_icc_get(&pdev->dev, "mdp1-mem");
- struct icc_path *path_rot = of_icc_get(&pdev->dev, "rotator-mem");
+ /* Interconnects are a part of MDSS device tree binding, not the
+ * MDP5 device. */
+ struct device *mdss_dev = pdev->dev.parent;
+ struct icc_path *path0 = of_icc_get(mdss_dev, "mdp0-mem");
+ struct icc_path *path1 = of_icc_get(mdss_dev, "mdp1-mem");
+ struct icc_path *path_rot = of_icc_get(mdss_dev, "rotator-mem");
if (IS_ERR(path0))
return PTR_ERR(path0);
@@ -976,13 +962,13 @@ static int mdp5_dev_probe(struct platform_device *pdev)
if (ret)
return ret;
- return component_add(&pdev->dev, &mdp5_ops);
+ return msm_drv_probe(&pdev->dev, mdp5_kms_init);
}
static int mdp5_dev_remove(struct platform_device *pdev)
{
DBG("");
- component_del(&pdev->dev, &mdp5_ops);
+ component_master_del(&pdev->dev, &msm_drm_ops);
return 0;
}
@@ -1008,9 +994,11 @@ static __maybe_unused int mdp5_runtime_resume(struct device *dev)
static const struct dev_pm_ops mdp5_pm_ops = {
SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL)
+ .prepare = msm_pm_prepare,
+ .complete = msm_pm_complete,
};
-const struct of_device_id mdp5_dt_match[] = {
+static const struct of_device_id mdp5_dt_match[] = {
{ .compatible = "qcom,mdp5", },
/* to support downstream DT files */
{ .compatible = "qcom,mdss_mdp", },
@@ -1021,6 +1009,7 @@ MODULE_DEVICE_TABLE(of, mdp5_dt_match);
static struct platform_driver mdp5_driver = {
.probe = mdp5_dev_probe,
.remove = mdp5_dev_remove,
+ .shutdown = msm_drv_shutdown,
.driver = {
.name = "msm_mdp",
.of_match_table = mdp5_dt_match,
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
deleted file mode 100644
index 049c6784a531..000000000000
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
+++ /dev/null
@@ -1,252 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/irqdomain.h>
-#include <linux/irq.h>
-
-#include "msm_drv.h"
-#include "mdp5_kms.h"
-
-#define to_mdp5_mdss(x) container_of(x, struct mdp5_mdss, base)
-
-struct mdp5_mdss {
- struct msm_mdss base;
-
- void __iomem *mmio, *vbif;
-
- struct clk *ahb_clk;
- struct clk *axi_clk;
- struct clk *vsync_clk;
-
- struct {
- volatile unsigned long enabled_mask;
- struct irq_domain *domain;
- } irqcontroller;
-};
-
-static inline void mdss_write(struct mdp5_mdss *mdp5_mdss, u32 reg, u32 data)
-{
- msm_writel(data, mdp5_mdss->mmio + reg);
-}
-
-static inline u32 mdss_read(struct mdp5_mdss *mdp5_mdss, u32 reg)
-{
- return msm_readl(mdp5_mdss->mmio + reg);
-}
-
-static irqreturn_t mdss_irq(int irq, void *arg)
-{
- struct mdp5_mdss *mdp5_mdss = arg;
- u32 intr;
-
- intr = mdss_read(mdp5_mdss, REG_MDSS_HW_INTR_STATUS);
-
- VERB("intr=%08x", intr);
-
- while (intr) {
- irq_hw_number_t hwirq = fls(intr) - 1;
-
- generic_handle_domain_irq(mdp5_mdss->irqcontroller.domain, hwirq);
- intr &= ~(1 << hwirq);
- }
-
- return IRQ_HANDLED;
-}
-
-/*
- * interrupt-controller implementation, so sub-blocks (MDP/HDMI/eDP/DSI/etc)
- * can register to get their irq's delivered
- */
-
-#define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_MDP | \
- MDSS_HW_INTR_STATUS_INTR_DSI0 | \
- MDSS_HW_INTR_STATUS_INTR_DSI1 | \
- MDSS_HW_INTR_STATUS_INTR_HDMI | \
- MDSS_HW_INTR_STATUS_INTR_EDP)
-
-static void mdss_hw_mask_irq(struct irq_data *irqd)
-{
- struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
-
- smp_mb__before_atomic();
- clear_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
- smp_mb__after_atomic();
-}
-
-static void mdss_hw_unmask_irq(struct irq_data *irqd)
-{
- struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
-
- smp_mb__before_atomic();
- set_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
- smp_mb__after_atomic();
-}
-
-static struct irq_chip mdss_hw_irq_chip = {
- .name = "mdss",
- .irq_mask = mdss_hw_mask_irq,
- .irq_unmask = mdss_hw_unmask_irq,
-};
-
-static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- struct mdp5_mdss *mdp5_mdss = d->host_data;
-
- if (!(VALID_IRQS & (1 << hwirq)))
- return -EPERM;
-
- irq_set_chip_and_handler(irq, &mdss_hw_irq_chip, handle_level_irq);
- irq_set_chip_data(irq, mdp5_mdss);
-
- return 0;
-}
-
-static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
- .map = mdss_hw_irqdomain_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
-
-static int mdss_irq_domain_init(struct mdp5_mdss *mdp5_mdss)
-{
- struct device *dev = mdp5_mdss->base.dev;
- struct irq_domain *d;
-
- d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
- mdp5_mdss);
- if (!d) {
- DRM_DEV_ERROR(dev, "mdss irq domain add failed\n");
- return -ENXIO;
- }
-
- mdp5_mdss->irqcontroller.enabled_mask = 0;
- mdp5_mdss->irqcontroller.domain = d;
-
- return 0;
-}
-
-static int mdp5_mdss_enable(struct msm_mdss *mdss)
-{
- struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
- DBG("");
-
- clk_prepare_enable(mdp5_mdss->ahb_clk);
- clk_prepare_enable(mdp5_mdss->axi_clk);
- clk_prepare_enable(mdp5_mdss->vsync_clk);
-
- return 0;
-}
-
-static int mdp5_mdss_disable(struct msm_mdss *mdss)
-{
- struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
- DBG("");
-
- clk_disable_unprepare(mdp5_mdss->vsync_clk);
- clk_disable_unprepare(mdp5_mdss->axi_clk);
- clk_disable_unprepare(mdp5_mdss->ahb_clk);
-
- return 0;
-}
-
-static int msm_mdss_get_clocks(struct mdp5_mdss *mdp5_mdss)
-{
- struct platform_device *pdev =
- to_platform_device(mdp5_mdss->base.dev);
-
- mdp5_mdss->ahb_clk = msm_clk_get(pdev, "iface");
- if (IS_ERR(mdp5_mdss->ahb_clk))
- mdp5_mdss->ahb_clk = NULL;
-
- mdp5_mdss->axi_clk = msm_clk_get(pdev, "bus");
- if (IS_ERR(mdp5_mdss->axi_clk))
- mdp5_mdss->axi_clk = NULL;
-
- mdp5_mdss->vsync_clk = msm_clk_get(pdev, "vsync");
- if (IS_ERR(mdp5_mdss->vsync_clk))
- mdp5_mdss->vsync_clk = NULL;
-
- return 0;
-}
-
-static void mdp5_mdss_destroy(struct msm_mdss *mdss)
-{
- struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
-
- if (!mdp5_mdss)
- return;
-
- irq_domain_remove(mdp5_mdss->irqcontroller.domain);
- mdp5_mdss->irqcontroller.domain = NULL;
-
- pm_runtime_disable(mdss->dev);
-}
-
-static const struct msm_mdss_funcs mdss_funcs = {
- .enable = mdp5_mdss_enable,
- .disable = mdp5_mdss_disable,
- .destroy = mdp5_mdss_destroy,
-};
-
-int mdp5_mdss_init(struct platform_device *pdev)
-{
- struct msm_drm_private *priv = platform_get_drvdata(pdev);
- struct mdp5_mdss *mdp5_mdss;
- int ret;
-
- DBG("");
-
- if (!of_device_is_compatible(pdev->dev.of_node, "qcom,mdss"))
- return 0;
-
- mdp5_mdss = devm_kzalloc(&pdev->dev, sizeof(*mdp5_mdss), GFP_KERNEL);
- if (!mdp5_mdss) {
- ret = -ENOMEM;
- goto fail;
- }
-
- mdp5_mdss->base.dev = &pdev->dev;
-
- mdp5_mdss->mmio = msm_ioremap(pdev, "mdss_phys");
- if (IS_ERR(mdp5_mdss->mmio)) {
- ret = PTR_ERR(mdp5_mdss->mmio);
- goto fail;
- }
-
- mdp5_mdss->vbif = msm_ioremap(pdev, "vbif_phys");
- if (IS_ERR(mdp5_mdss->vbif)) {
- ret = PTR_ERR(mdp5_mdss->vbif);
- goto fail;
- }
-
- ret = msm_mdss_get_clocks(mdp5_mdss);
- if (ret) {
- DRM_DEV_ERROR(&pdev->dev, "failed to get clocks: %d\n", ret);
- goto fail;
- }
-
- ret = devm_request_irq(&pdev->dev, platform_get_irq(pdev, 0),
- mdss_irq, 0, "mdss_isr", mdp5_mdss);
- if (ret) {
- DRM_DEV_ERROR(&pdev->dev, "failed to init irq: %d\n", ret);
- goto fail;
- }
-
- ret = mdss_irq_domain_init(mdp5_mdss);
- if (ret) {
- DRM_DEV_ERROR(&pdev->dev, "failed to init sub-block irqs: %d\n", ret);
- goto fail;
- }
-
- mdp5_mdss->base.funcs = &mdss_funcs;
- priv->mdss = &mdp5_mdss->base;
-
- pm_runtime_enable(&pdev->dev);
-
- return 0;
-fail:
- return ret;
-}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
index 954db683ae44..2536def2a000 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
@@ -116,21 +116,28 @@ int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
return 0;
}
-void mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
+int mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
{
struct mdp5_global_state *global_state = mdp5_get_global_state(s);
- struct mdp5_hw_mixer_state *new_state = &global_state->hwmixer;
+ struct mdp5_hw_mixer_state *new_state;
if (!mixer)
- return;
+ return 0;
+
+ if (IS_ERR(global_state))
+ return PTR_ERR(global_state);
+
+ new_state = &global_state->hwmixer;
if (WARN_ON(!new_state->hwmixer_to_crtc[mixer->idx]))
- return;
+ return -EINVAL;
DBG("%s: release from crtc %s", mixer->name,
new_state->hwmixer_to_crtc[mixer->idx]->name);
new_state->hwmixer_to_crtc[mixer->idx] = NULL;
+
+ return 0;
}
void mdp5_mixer_destroy(struct mdp5_hw_mixer *mixer)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
index 43c9ba43ce18..545ee223b9d7 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
@@ -30,7 +30,7 @@ void mdp5_mixer_destroy(struct mdp5_hw_mixer *lm);
int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
uint32_t caps, struct mdp5_hw_mixer **mixer,
struct mdp5_hw_mixer **r_mixer);
-void mdp5_mixer_release(struct drm_atomic_state *s,
- struct mdp5_hw_mixer *mixer);
+int mdp5_mixer_release(struct drm_atomic_state *s,
+ struct mdp5_hw_mixer *mixer);
#endif /* __MDP5_LM_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
index ba6695963aa6..a4f5cb90f3e8 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
@@ -119,18 +119,23 @@ int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
return 0;
}
-void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
+int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
{
struct msm_drm_private *priv = s->dev->dev_private;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
struct mdp5_global_state *state = mdp5_get_global_state(s);
- struct mdp5_hw_pipe_state *new_state = &state->hwpipe;
+ struct mdp5_hw_pipe_state *new_state;
if (!hwpipe)
- return;
+ return 0;
+
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ new_state = &state->hwpipe;
if (WARN_ON(!new_state->hwpipe_to_plane[hwpipe->idx]))
- return;
+ return -EINVAL;
DBG("%s: release from plane %s", hwpipe->name,
new_state->hwpipe_to_plane[hwpipe->idx]->name);
@@ -141,6 +146,8 @@ void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
}
new_state->hwpipe_to_plane[hwpipe->idx] = NULL;
+
+ return 0;
}
void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
index 9b26d0761bd4..cca67938cab2 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
@@ -37,7 +37,7 @@ int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
uint32_t caps, uint32_t blkcfg,
struct mdp5_hw_pipe **hwpipe,
struct mdp5_hw_pipe **r_hwpipe);
-void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe);
+int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe);
struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
uint32_t reg_offset, uint32_t caps);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index c478d25f7825..e8c47a4a1d31 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -314,12 +314,24 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
mdp5_state->r_hwpipe = NULL;
- mdp5_pipe_release(state->state, old_hwpipe);
- mdp5_pipe_release(state->state, old_right_hwpipe);
+ ret = mdp5_pipe_release(state->state, old_hwpipe);
+ if (ret)
+ return ret;
+
+ ret = mdp5_pipe_release(state->state, old_right_hwpipe);
+ if (ret)
+ return ret;
+
}
} else {
- mdp5_pipe_release(state->state, mdp5_state->hwpipe);
- mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
+ ret = mdp5_pipe_release(state->state, mdp5_state->hwpipe);
+ if (ret)
+ return ret;
+
+ ret = mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
+ if (ret)
+ return ret;
+
mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL;
}
@@ -385,8 +397,6 @@ static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
if (!crtc_state->active)
return -EINVAL;
- mdp5_state = to_mdp5_plane_state(new_plane_state);
-
/* don't use fast path if we don't have a hwpipe allocated yet */
if (!mdp5_state->hwpipe)
return -EINVAL;