summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm')
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c9
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c9
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c53
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c16
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c10
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c30
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c4
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c3
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c15
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h47
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h70
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c9
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c72
20 files changed, 269 insertions, 113 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 4534633fe7cd..8fb847c174ff 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -571,13 +571,14 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
}
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
- ret = IS_ERR(icc_path);
- if (ret)
+ if (IS_ERR(icc_path)) {
+ ret = PTR_ERR(icc_path);
goto fail;
+ }
ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem");
- ret = IS_ERR(ocmem_icc_path);
- if (ret) {
+ if (IS_ERR(ocmem_icc_path)) {
+ ret = PTR_ERR(ocmem_icc_path);
/* allow -ENODATA, ocmem icc is optional */
if (ret != -ENODATA)
goto fail;
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 82bebb40234d..a96ee79cc5e0 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -699,13 +699,14 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
}
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
- ret = IS_ERR(icc_path);
- if (ret)
+ if (IS_ERR(icc_path)) {
+ ret = PTR_ERR(icc_path);
goto fail;
+ }
ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem");
- ret = IS_ERR(ocmem_icc_path);
- if (ret) {
+ if (IS_ERR(ocmem_icc_path)) {
+ ret = PTR_ERR(ocmem_icc_path);
/* allow -ENODATA, ocmem icc is optional */
if (ret != -ENODATA)
goto fail;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index a7c58018959f..8b73f70766a4 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -296,6 +296,8 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
u32 val;
int request, ack;
+ WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
+
if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
return -EINVAL;
@@ -337,6 +339,8 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
{
int bit;
+ WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
+
if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
return;
@@ -1482,6 +1486,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (!pdev)
return -ENODEV;
+ mutex_init(&gmu->lock);
+
gmu->dev = &pdev->dev;
of_dma_configure(gmu->dev, node, true);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 3c74f64e3126..84bd516f01e8 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -44,6 +44,9 @@ struct a6xx_gmu_bo {
struct a6xx_gmu {
struct device *dev;
+ /* For serializing communication with the GMU: */
+ struct mutex lock;
+
struct msm_gem_address_space *aspace;
void * __iomem mmio;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 40c9fef457a4..267a880811d6 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -106,7 +106,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
u32 asid;
u64 memptr = rbmemptr(ring, ttbr0);
- if (ctx == a6xx_gpu->cur_ctx)
+ if (ctx->seqno == a6xx_gpu->cur_ctx_seqno)
return;
if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
@@ -139,7 +139,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, 0x31);
- a6xx_gpu->cur_ctx = ctx;
+ a6xx_gpu->cur_ctx_seqno = ctx->seqno;
}
static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
@@ -881,7 +881,7 @@ static int a6xx_zap_shader_init(struct msm_gpu *gpu)
A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
-static int a6xx_hw_init(struct msm_gpu *gpu)
+static int hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
@@ -1081,7 +1081,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
/* Always come up on rb 0 */
a6xx_gpu->cur_ring = gpu->rb[0];
- a6xx_gpu->cur_ctx = NULL;
+ a6xx_gpu->cur_ctx_seqno = 0;
/* Enable the SQE_to start the CP engine */
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
@@ -1135,6 +1135,19 @@ out:
return ret;
}
+static int a6xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int ret;
+
+ mutex_lock(&a6xx_gpu->gmu.lock);
+ ret = hw_init(gpu);
+ mutex_unlock(&a6xx_gpu->gmu.lock);
+
+ return ret;
+}
+
static void a6xx_dump(struct msm_gpu *gpu)
{
DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n",
@@ -1509,7 +1522,9 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
trace_msm_gpu_resume(0);
+ mutex_lock(&a6xx_gpu->gmu.lock);
ret = a6xx_gmu_resume(a6xx_gpu);
+ mutex_unlock(&a6xx_gpu->gmu.lock);
if (ret)
return ret;
@@ -1532,7 +1547,9 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
msm_devfreq_suspend(gpu);
+ mutex_lock(&a6xx_gpu->gmu.lock);
ret = a6xx_gmu_stop(a6xx_gpu);
+ mutex_unlock(&a6xx_gpu->gmu.lock);
if (ret)
return ret;
@@ -1547,18 +1564,19 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- static DEFINE_MUTEX(perfcounter_oob);
- mutex_lock(&perfcounter_oob);
+ mutex_lock(&a6xx_gpu->gmu.lock);
/* Force the GPU power on so we can read this register */
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
*value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
- REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
+ REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
- mutex_unlock(&perfcounter_oob);
+
+ mutex_unlock(&a6xx_gpu->gmu.lock);
+
return 0;
}
@@ -1622,6 +1640,16 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
return (unsigned long)busy_time;
}
+void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ mutex_lock(&a6xx_gpu->gmu.lock);
+ a6xx_gmu_set_freq(gpu, opp);
+ mutex_unlock(&a6xx_gpu->gmu.lock);
+}
+
static struct msm_gem_address_space *
a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
{
@@ -1766,7 +1794,7 @@ static const struct adreno_gpu_funcs funcs = {
#endif
.gpu_busy = a6xx_gpu_busy,
.gpu_get_freq = a6xx_gmu_get_freq,
- .gpu_set_freq = a6xx_gmu_set_freq,
+ .gpu_set_freq = a6xx_gpu_set_freq,
#if defined(CONFIG_DRM_MSM_GPU_STATE)
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
@@ -1810,6 +1838,13 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), info->rev)))
adreno_gpu->base.hw_apriv = true;
+ /*
+ * For now only clamp to idle freq for devices where this is known not
+ * to cause power supply issues:
+ */
+ if (info && (info->revn == 618))
+ gpu->clamp_to_idle = true;
+
a6xx_llc_slices_init(pdev, a6xx_gpu);
ret = a6xx_set_supported_hw(&pdev->dev, config->rev);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 0bc2d062f54a..8e5527c881b1 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -19,7 +19,16 @@ struct a6xx_gpu {
uint64_t sqe_iova;
struct msm_ringbuffer *cur_ring;
- struct msm_file_private *cur_ctx;
+
+ /**
+ * cur_ctx_seqno:
+ *
+ * The ctx->seqno value of the context with current pgtables
+ * installed. Tracked by seqno rather than pointer value to
+ * avoid dangling pointers, and cases where a ctx can be freed
+ * and a new one created with the same address.
+ */
+ int cur_ctx_seqno;
struct a6xx_gmu gmu;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index b131fd376192..700d65e39feb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -794,7 +794,7 @@ static const struct dpu_pingpong_cfg sm8150_pp[] = {
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
-1),
PP_BLK("pingpong_5", PINGPONG_5, 0x72800, MERGE_3D_2, sdm845_pp_sblk,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
-1),
};
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index f482e0911d03..bb7d066618e6 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -1125,6 +1125,20 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc)
__drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
}
+static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = mdp5_crtc_destroy,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = mdp5_crtc_reset,
+ .atomic_duplicate_state = mdp5_crtc_duplicate_state,
+ .atomic_destroy_state = mdp5_crtc_destroy_state,
+ .atomic_print_state = mdp5_crtc_atomic_print_state,
+ .get_vblank_counter = mdp5_crtc_get_vblank_counter,
+ .enable_vblank = msm_crtc_enable_vblank,
+ .disable_vblank = msm_crtc_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+};
+
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = mdp5_crtc_destroy,
@@ -1313,6 +1327,8 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;
drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
+ cursor_plane ?
+ &mdp5_crtc_no_lm_cursor_funcs :
&mdp5_crtc_funcs, NULL);
drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index fbe4c2cd52a3..a0392e4d8134 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -1309,14 +1309,14 @@ static int dp_pm_resume(struct device *dev)
* can not declared display is connected unless
* HDMI cable is plugged in and sink_count of
* dongle become 1
+ * also only signal audio when disconnected
*/
- if (dp->link->sink_count)
+ if (dp->link->sink_count) {
dp->dp_display.is_connected = true;
- else
+ } else {
dp->dp_display.is_connected = false;
-
- dp_display_handle_plugged_change(g_dp_display,
- dp->dp_display.is_connected);
+ dp_display_handle_plugged_change(g_dp_display, false);
+ }
DRM_DEBUG_DP("After, sink_count=%d is_connected=%d core_inited=%d power_on=%d\n",
dp->link->sink_count, dp->dp_display.is_connected,
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 614dc7f26f2c..75ae3008b68f 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -215,8 +215,10 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
goto fail;
}
- if (!msm_dsi_manager_validate_current_config(msm_dsi->id))
+ if (!msm_dsi_manager_validate_current_config(msm_dsi->id)) {
+ ret = -EINVAL;
goto fail;
+ }
msm_dsi->encoder = encoder;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index e269df285136..c86b5090fae6 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -451,7 +451,7 @@ static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
return 0;
err:
- for (; i > 0; i--)
+ while (--i >= 0)
clk_disable_unprepare(msm_host->bus_clks[i]);
return ret;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index d13552b2213b..5b4e991f220d 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -110,14 +110,13 @@ static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX];
static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,
u32 nb_tries, u32 timeout_us)
{
- bool pll_locked = false;
+ bool pll_locked = false, pll_ready = false;
void __iomem *base = pll_14nm->phy->pll_base;
u32 tries, val;
tries = nb_tries;
while (tries--) {
- val = dsi_phy_read(base +
- REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
+ val = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
pll_locked = !!(val & BIT(5));
if (pll_locked)
@@ -126,23 +125,24 @@ static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,
udelay(timeout_us);
}
- if (!pll_locked) {
- tries = nb_tries;
- while (tries--) {
- val = dsi_phy_read(base +
- REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
- pll_locked = !!(val & BIT(0));
+ if (!pll_locked)
+ goto out;
- if (pll_locked)
- break;
+ tries = nb_tries;
+ while (tries--) {
+ val = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
+ pll_ready = !!(val & BIT(0));
- udelay(timeout_us);
- }
+ if (pll_ready)
+ break;
+
+ udelay(timeout_us);
}
- DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
+out:
+ DBG("DSI PLL is %slocked, %sready", pll_locked ? "" : "*not* ", pll_ready ? "" : "*not* ");
- return pll_locked;
+ return pll_locked && pll_ready;
}
static void dsi_pll_14nm_config_init(struct dsi_pll_config *pconf)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index aaa37456f4ee..71ed4aa0dc67 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -428,7 +428,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
bytediv->reg = pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;
snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->phy->id);
- snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id);
+ snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id + 1);
bytediv_init.name = clk_name;
bytediv_init.ops = &clk_bytediv_ops;
@@ -442,7 +442,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
return ret;
provided_clocks[DSI_BYTE_PLL_CLK] = &bytediv->hw;
- snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id);
+ snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id + 1);
/* DIV3 */
hw = devm_clk_hw_register_divider(dev, clk_name,
parent_name, 0, pll_28nm->phy->pll_base +
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
index 4fb397ee7c84..fe1366b4c49f 100644
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -1116,7 +1116,7 @@ void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on)
int msm_edp_ctrl_init(struct msm_edp *edp)
{
struct edp_ctrl *ctrl = NULL;
- struct device *dev = &edp->pdev->dev;
+ struct device *dev;
int ret;
if (!edp) {
@@ -1124,6 +1124,7 @@ int msm_edp_ctrl_init(struct msm_edp *edp)
return -EINVAL;
}
+ dev = &edp->pdev->dev;
ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
return -ENOMEM;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 2e6fc185e54d..d4e09703a87d 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -630,10 +630,11 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (ret)
goto err_msm_uninit;
- ret = msm_disp_snapshot_init(ddev);
- if (ret)
- DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
-
+ if (kms) {
+ ret = msm_disp_snapshot_init(ddev);
+ if (ret)
+ DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
+ }
drm_mode_config_reset(ddev);
#ifdef CONFIG_DRM_FBDEV_EMULATION
@@ -682,6 +683,7 @@ static void load_gpu(struct drm_device *dev)
static int context_init(struct drm_device *dev, struct drm_file *file)
{
+ static atomic_t ident = ATOMIC_INIT(0);
struct msm_drm_private *priv = dev->dev_private;
struct msm_file_private *ctx;
@@ -689,12 +691,17 @@ static int context_init(struct drm_device *dev, struct drm_file *file)
if (!ctx)
return -ENOMEM;
+ INIT_LIST_HEAD(&ctx->submitqueues);
+ rwlock_init(&ctx->queuelock);
+
kref_init(&ctx->ref);
msm_submitqueue_init(dev, ctx);
ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
file->driver_priv = ctx;
+ ctx->seqno = atomic_inc_return(&ident);
+
return 0;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 8b005d1ac899..c552f0c3890c 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -53,14 +53,6 @@ struct msm_disp_state;
#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
-struct msm_file_private {
- rwlock_t queuelock;
- struct list_head submitqueues;
- int queueid;
- struct msm_gem_address_space *aspace;
- struct kref ref;
-};
-
enum msm_mdp_plane_property {
PLANE_PROP_ZPOS,
PLANE_PROP_ALPHA,
@@ -488,41 +480,6 @@ void msm_writel(u32 data, void __iomem *addr);
u32 msm_readl(const void __iomem *addr);
void msm_rmw(void __iomem *addr, u32 mask, u32 or);
-struct msm_gpu_submitqueue;
-int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
-struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
- u32 id);
-int msm_submitqueue_create(struct drm_device *drm,
- struct msm_file_private *ctx,
- u32 prio, u32 flags, u32 *id);
-int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
- struct drm_msm_submitqueue_query *args);
-int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
-void msm_submitqueue_close(struct msm_file_private *ctx);
-
-void msm_submitqueue_destroy(struct kref *kref);
-
-static inline void __msm_file_private_destroy(struct kref *kref)
-{
- struct msm_file_private *ctx = container_of(kref,
- struct msm_file_private, ref);
-
- msm_gem_address_space_put(ctx->aspace);
- kfree(ctx);
-}
-
-static inline void msm_file_private_put(struct msm_file_private *ctx)
-{
- kref_put(&ctx->ref, __msm_file_private_destroy);
-}
-
-static inline struct msm_file_private *msm_file_private_get(
- struct msm_file_private *ctx)
-{
- kref_get(&ctx->ref);
- return ctx;
-}
-
#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
@@ -547,7 +504,7 @@ static inline int align_pitch(int width, int bpp)
static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
{
ktime_t now = ktime_get();
- unsigned long remaining_jiffies;
+ s64 remaining_jiffies;
if (ktime_compare(*timeout, now) < 0) {
remaining_jiffies = 0;
@@ -556,7 +513,7 @@ static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
}
- return remaining_jiffies;
+ return clamp(remaining_jiffies, 0LL, (s64)INT_MAX);
}
#endif /* __MSM_DRV_H__ */
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index fdc5367aecaa..151d19e4453c 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -46,7 +46,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
if (!submit)
return ERR_PTR(-ENOMEM);
- ret = drm_sched_job_init(&submit->base, &queue->entity, queue);
+ ret = drm_sched_job_init(&submit->base, queue->entity, queue);
if (ret) {
kfree(submit);
return ERR_PTR(ret);
@@ -171,7 +171,8 @@ out:
static int submit_lookup_cmds(struct msm_gem_submit *submit,
struct drm_msm_gem_submit *args, struct drm_file *file)
{
- unsigned i, sz;
+ unsigned i;
+ size_t sz;
int ret = 0;
for (i = 0; i < args->nr_cmds; i++) {
@@ -907,7 +908,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
/* The scheduler owns a ref now: */
msm_gem_submit_get(submit);
- drm_sched_entity_push_job(&submit->base, &queue->entity);
+ drm_sched_entity_push_job(&submit->base, queue->entity);
args->fence = submit->fence_id;
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 0e4b45bff2e6..ee25d556c8a1 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -203,6 +203,10 @@ struct msm_gpu {
uint32_t suspend_count;
struct msm_gpu_state *crashstate;
+
+ /* Enable clamping to idle freq when inactive: */
+ bool clamp_to_idle;
+
/* True if the hardware supports expanded apriv (a650 and newer) */
bool hw_apriv;
@@ -258,6 +262,39 @@ struct msm_gpu_perfcntr {
#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN)
/**
+ * struct msm_file_private - per-drm_file context
+ *
+ * @queuelock: synchronizes access to submitqueues list
+ * @submitqueues: list of &msm_gpu_submitqueue created by userspace
+ * @queueid: counter incremented each time a submitqueue is created,
+ * used to assign &msm_gpu_submitqueue.id
+ * @aspace: the per-process GPU address-space
+ * @ref: reference count
+ * @seqno: unique per process seqno
+ */
+struct msm_file_private {
+ rwlock_t queuelock;
+ struct list_head submitqueues;
+ int queueid;
+ struct msm_gem_address_space *aspace;
+ struct kref ref;
+ int seqno;
+
+ /**
+ * entities:
+ *
+ * Table of per-priority-level sched entities used by submitqueues
+ * associated with this &drm_file. Because some userspace apps
+ * make assumptions about rendering from multiple gl contexts
+ * (of the same priority) within the process happening in FIFO
+ * order without requiring any fencing beyond MakeCurrent(), we
+ * create at most one &drm_sched_entity per-process per-priority-
+ * level.
+ */
+ struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];
+};
+
+/**
* msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
*
* @gpu: the gpu instance
@@ -304,6 +341,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
}
/**
+ * struct msm_gpu_submitqueues - Userspace created context.
+ *
* A submitqueue is associated with a gl context or vk queue (or equiv)
* in userspace.
*
@@ -321,7 +360,7 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
* seqno, protected by submitqueue lock
* @lock: submitqueue lock
* @ref: reference count
- * @entity: the submit job-queue
+ * @entity: the submit job-queue
*/
struct msm_gpu_submitqueue {
int id;
@@ -333,7 +372,7 @@ struct msm_gpu_submitqueue {
struct idr fence_idr;
struct mutex lock;
struct kref ref;
- struct drm_sched_entity entity;
+ struct drm_sched_entity *entity;
};
struct msm_gpu_state_bo {
@@ -421,6 +460,33 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu);
+int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
+struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
+ u32 id);
+int msm_submitqueue_create(struct drm_device *drm,
+ struct msm_file_private *ctx,
+ u32 prio, u32 flags, u32 *id);
+int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
+ struct drm_msm_submitqueue_query *args);
+int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
+void msm_submitqueue_close(struct msm_file_private *ctx);
+
+void msm_submitqueue_destroy(struct kref *kref);
+
+void __msm_file_private_destroy(struct kref *kref);
+
+static inline void msm_file_private_put(struct msm_file_private *ctx)
+{
+ kref_put(&ctx->ref, __msm_file_private_destroy);
+}
+
+static inline struct msm_file_private *msm_file_private_get(
+ struct msm_file_private *ctx)
+{
+ kref_get(&ctx->ref);
+ return ctx;
+}
+
void msm_devfreq_init(struct msm_gpu *gpu);
void msm_devfreq_cleanup(struct msm_gpu *gpu);
void msm_devfreq_resume(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index 0a1ee20296a2..20006d060b5b 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -151,6 +151,9 @@ void msm_devfreq_active(struct msm_gpu *gpu)
unsigned int idle_time;
unsigned long target_freq = df->idle_freq;
+ if (!df->devfreq)
+ return;
+
/*
* Hold devfreq lock to synchronize with get_dev_status()/
* target() callbacks
@@ -186,6 +189,9 @@ void msm_devfreq_idle(struct msm_gpu *gpu)
struct msm_gpu_devfreq *df = &gpu->devfreq;
unsigned long idle_freq, target_freq = 0;
+ if (!df->devfreq)
+ return;
+
/*
* Hold devfreq lock to synchronize with get_dev_status()/
* target() callbacks
@@ -194,7 +200,8 @@ void msm_devfreq_idle(struct msm_gpu *gpu)
idle_freq = get_freq(gpu);
- msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
+ if (gpu->clamp_to_idle)
+ msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
df->idle_time = ktime_get();
df->idle_freq = idle_freq;
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index 32a55d81b58b..b8621c6e0554 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -7,6 +7,24 @@
#include "msm_gpu.h"
+void __msm_file_private_destroy(struct kref *kref)
+{
+ struct msm_file_private *ctx = container_of(kref,
+ struct msm_file_private, ref);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) {
+ if (!ctx->entities[i])
+ continue;
+
+ drm_sched_entity_destroy(ctx->entities[i]);
+ kfree(ctx->entities[i]);
+ }
+
+ msm_gem_address_space_put(ctx->aspace);
+ kfree(ctx);
+}
+
void msm_submitqueue_destroy(struct kref *kref)
{
struct msm_gpu_submitqueue *queue = container_of(kref,
@@ -14,8 +32,6 @@ void msm_submitqueue_destroy(struct kref *kref)
idr_destroy(&queue->fence_idr);
- drm_sched_entity_destroy(&queue->entity);
-
msm_file_private_put(queue->ctx);
kfree(queue);
@@ -61,13 +77,47 @@ void msm_submitqueue_close(struct msm_file_private *ctx)
}
}
+static struct drm_sched_entity *
+get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
+ unsigned ring_nr, enum drm_sched_priority sched_prio)
+{
+ static DEFINE_MUTEX(entity_lock);
+ unsigned idx = (ring_nr * NR_SCHED_PRIORITIES) + sched_prio;
+
+ /* We should have already validated that the requested priority is
+ * valid by the time we get here.
+ */
+ if (WARN_ON(idx >= ARRAY_SIZE(ctx->entities)))
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&entity_lock);
+
+ if (!ctx->entities[idx]) {
+ struct drm_sched_entity *entity;
+ struct drm_gpu_scheduler *sched = &ring->sched;
+ int ret;
+
+ entity = kzalloc(sizeof(*ctx->entities[idx]), GFP_KERNEL);
+
+ ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL);
+ if (ret) {
+ kfree(entity);
+ return ERR_PTR(ret);
+ }
+
+ ctx->entities[idx] = entity;
+ }
+
+ mutex_unlock(&entity_lock);
+
+ return ctx->entities[idx];
+}
+
int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
u32 prio, u32 flags, u32 *id)
{
struct msm_drm_private *priv = drm->dev_private;
struct msm_gpu_submitqueue *queue;
- struct msm_ringbuffer *ring;
- struct drm_gpu_scheduler *sched;
enum drm_sched_priority sched_prio;
unsigned ring_nr;
int ret;
@@ -91,12 +141,10 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
queue->flags = flags;
queue->ring_nr = ring_nr;
- ring = priv->gpu->rb[ring_nr];
- sched = &ring->sched;
-
- ret = drm_sched_entity_init(&queue->entity,
- sched_prio, &sched, 1, NULL);
- if (ret) {
+ queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
+ ring_nr, sched_prio);
+ if (IS_ERR(queue->entity)) {
+ ret = PTR_ERR(queue->entity);
kfree(queue);
return ret;
}
@@ -140,10 +188,6 @@ int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
*/
default_prio = DIV_ROUND_UP(max_priority, 2);
- INIT_LIST_HEAD(&ctx->submitqueues);
-
- rwlock_init(&ctx->queuelock);
-
return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
}