diff options
Diffstat (limited to 'drivers/gpu/drm/msm')
31 files changed, 650 insertions, 550 deletions
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 78c9e5a5e793..9f2029eca39f 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -21,6 +21,11 @@ config DRM_MSM help DRM/KMS driver for MSM/snapdragon. +config DRM_MSM_GPU_STATE + bool + depends on DRM_MSM && (DEBUG_FS || DEV_COREDUMP) + default y + config DRM_MSM_REGISTER_LOGGING bool "MSM DRM register logging" depends on DRM_MSM diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 56a70c74af4e..7a05cbf2f820 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-y := -Idrivers/gpu/drm/msm -ccflags-y += -Idrivers/gpu/drm/msm/disp/dpu1 -ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi +ccflags-y := -I $(srctree)/$(src) +ccflags-y += -I $(srctree)/$(src)/disp/dpu1 +ccflags-$(CONFIG_DRM_MSM_DSI) += -I $(srctree)/$(src)/dsi msm-y := \ adreno/adreno_device.o \ @@ -15,7 +15,6 @@ msm-y := \ adreno/a6xx_gpu.o \ adreno/a6xx_gmu.o \ adreno/a6xx_hfi.o \ - adreno/a6xx_gpu_state.o \ hdmi/hdmi.o \ hdmi/hdmi_audio.o \ hdmi/hdmi_bridge.o \ @@ -96,6 +95,8 @@ msm-y := \ msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o +msm-$(CONFIG_DRM_MSM_GPU_STATE) += adreno/a6xx_gpu_state.o + msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index d5f5e56422f5..e5fcefa49f19 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -15,9 +15,6 @@ #include <linux/types.h> #include <linux/cpumask.h> #include <linux/qcom_scm.h> -#include <linux/dma-mapping.h> -#include <linux/of_address.h> -#include <linux/soc/qcom/mdt_loader.h> #include <linux/pm_opp.h> #include <linux/nvmem-consumer.h> #include <linux/slab.h> @@ -30,94 +27,6 @@ static void a5xx_dump(struct msm_gpu *gpu); #define GPU_PAS_ID 13 -static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname) -{ - struct device *dev = &gpu->pdev->dev; - const struct firmware *fw; - struct device_node *np; - struct resource r; - phys_addr_t mem_phys; - ssize_t mem_size; - void *mem_region = NULL; - int ret; - - if (!IS_ENABLED(CONFIG_ARCH_QCOM)) - return -EINVAL; - - np = of_get_child_by_name(dev->of_node, "zap-shader"); - if (!np) - return -ENODEV; - - np = of_parse_phandle(np, "memory-region", 0); - if (!np) - return -EINVAL; - - ret = of_address_to_resource(np, 0, &r); - if (ret) - return ret; - - mem_phys = r.start; - mem_size = resource_size(&r); - - /* Request the MDT file for the firmware */ - fw = adreno_request_fw(to_adreno_gpu(gpu), fwname); - if (IS_ERR(fw)) { - DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname); - return PTR_ERR(fw); - } - - /* Figure out how much memory we need */ - mem_size = qcom_mdt_get_size(fw); - if (mem_size < 0) { - ret = mem_size; - goto out; - } - - /* Allocate memory for the firmware image */ - mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC); - if (!mem_region) { - ret = -ENOMEM; - goto out; - } - - /* - * Load the rest of the MDT - * - * Note that we could be dealing with two different paths, since - * with upstream linux-firmware it would be in a qcom/ subdir.. - * adreno_request_fw() handles this, but qcom_mdt_load() does - * not. But since we've already gotten thru adreno_request_fw() - * we know which of the two cases it is: - */ - if (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY) { - ret = qcom_mdt_load(dev, fw, fwname, GPU_PAS_ID, - mem_region, mem_phys, mem_size, NULL); - } else { - char *newname; - - newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname); - - ret = qcom_mdt_load(dev, fw, newname, GPU_PAS_ID, - mem_region, mem_phys, mem_size, NULL); - kfree(newname); - } - if (ret) - goto out; - - /* Send the image to the secure world */ - ret = qcom_scm_pas_auth_and_reset(GPU_PAS_ID); - if (ret) - DRM_DEV_ERROR(dev, "Unable to authorize the image\n"); - -out: - if (mem_region) - memunmap(mem_region); - - release_firmware(fw); - - return ret; -} - static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -563,8 +472,6 @@ static int a5xx_zap_shader_resume(struct msm_gpu *gpu) static int a5xx_zap_shader_init(struct msm_gpu *gpu) { static bool loaded; - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - struct platform_device *pdev = gpu->pdev; int ret; /* @@ -574,23 +481,9 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu) if (loaded) return a5xx_zap_shader_resume(gpu); - /* We need SCM to be able to load the firmware */ - if (!qcom_scm_is_available()) { - DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n"); - return -EPROBE_DEFER; - } - - /* Each GPU has a target specific zap shader firmware name to use */ - if (!adreno_gpu->info->zapfw) { - DRM_DEV_ERROR(&pdev->dev, - "Zap shader firmware file not specified for this target\n"); - return -ENODEV; - } - - ret = zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw); + ret = adreno_zap_shader_load(gpu, GPU_PAS_ID); loaded = !ret; - return ret; } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index d1662a75c7ec..9155dafae2a9 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -3,12 +3,31 @@ #include <linux/clk.h> #include <linux/interconnect.h> +#include <linux/pm_domain.h> #include <linux/pm_opp.h> #include <soc/qcom/cmd-db.h> #include "a6xx_gpu.h" #include "a6xx_gmu.xml.h" +static void a6xx_gmu_fault(struct a6xx_gmu *gmu) +{ + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + struct drm_device *dev = gpu->dev; + struct msm_drm_private *priv = dev->dev_private; + + /* FIXME: add a banner here */ + gmu->hung = true; + + /* Turn off the hangcheck timer while we are resetting */ + del_timer(&gpu->hangcheck_timer); + + /* Queue the GPU handler because we need to treat this as a recovery */ + queue_work(priv->wq, &gpu->recover_work); +} + static irqreturn_t a6xx_gmu_irq(int irq, void *data) { struct a6xx_gmu *gmu = data; @@ -20,8 +39,7 @@ static irqreturn_t a6xx_gmu_irq(int irq, void *data) if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) { dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); - /* Temporary until we can recover safely */ - BUG(); + a6xx_gmu_fault(gmu); } if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR) @@ -45,8 +63,7 @@ static irqreturn_t a6xx_hfi_irq(int irq, void *data) if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) { dev_err_ratelimited(gmu->dev, "GMU firmware fault\n"); - /* Temporary until we can recover safely */ - BUG(); + a6xx_gmu_fault(gmu); } return IRQ_HANDLED; @@ -165,10 +182,8 @@ static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) } /* Wait for the GMU to get to its most idle state */ -int a6xx_gmu_wait_for_idle(struct a6xx_gpu *a6xx_gpu) +int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu) { - struct a6xx_gmu *gmu = &a6xx_gpu->gmu; - return spin_until(a6xx_gmu_check_idle_level(gmu)); } @@ -567,7 +582,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) if (!rpmh_init) { a6xx_gmu_rpmh_init(gmu); rpmh_init = true; - } else if (state != GMU_RESET) { + } else { ret = a6xx_rpmh_start(gmu); if (ret) return ret; @@ -633,20 +648,6 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \ A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) -static void a6xx_gmu_irq_enable(struct a6xx_gmu *gmu) -{ - gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0); - gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0); - - gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, - ~A6XX_GMU_IRQ_MASK); - gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, - ~A6XX_HFI_IRQ_MASK); - - enable_irq(gmu->gmu_irq); - enable_irq(gmu->hfi_irq); -} - static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu) { disable_irq(gmu->gmu_irq); @@ -656,21 +657,10 @@ static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu) gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0); } -int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu) +static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu) { - struct a6xx_gmu *gmu = &a6xx_gpu->gmu; - int ret; u32 val; - /* Flush all the queues */ - a6xx_hfi_stop(gmu); - - /* Stop the interrupts */ - a6xx_gmu_irq_disable(gmu); - - /* Force off SPTP in case the GMU is managing it */ - a6xx_sptprac_disable(gmu); - /* Make sure there are no outstanding RPMh votes */ gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val, (val & 1), 100, 10000); @@ -680,37 +670,22 @@ int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu) (val & 1), 100, 10000); gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val, (val & 1), 100, 1000); +} - /* Force off the GX GSDC */ - regulator_force_disable(gmu->gx); - - /* Disable the resources */ - clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); - pm_runtime_put_sync(gmu->dev); - - /* Re-enable the resources */ - pm_runtime_get_sync(gmu->dev); - - /* Use a known rate to bring up the GMU */ - clk_set_rate(gmu->core_clk, 200000000); - ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); - if (ret) - goto out; - - a6xx_gmu_irq_enable(gmu); - - ret = a6xx_gmu_fw_start(gmu, GMU_RESET); - if (!ret) - ret = a6xx_hfi_start(gmu, GMU_COLD_BOOT); +/* Force the GMU off in case it isn't responsive */ +static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) +{ + /* Flush all the queues */ + a6xx_hfi_stop(gmu); - /* Set the GPU back to the highest power frequency */ - __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1); + /* Stop the interrupts */ + a6xx_gmu_irq_disable(gmu); -out: - if (ret) - a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); + /* Force off SPTP in case the GMU is managing it */ + a6xx_sptprac_disable(gmu); - return ret; + /* Make sure there are no outstanding RPMh votes */ + a6xx_gmu_rpmh_off(gmu); } int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) @@ -723,19 +698,26 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) if (WARN(!gmu->mmio, "The GMU is not set up yet\n")) return 0; + gmu->hung = false; + /* Turn on the resources */ pm_runtime_get_sync(gmu->dev); /* Use a known rate to bring up the GMU */ clk_set_rate(gmu->core_clk, 200000000); ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); - if (ret) - goto out; + if (ret) { + pm_runtime_put(gmu->dev); + return ret; + } /* Set the bus quota to a reasonable value for boot */ icc_set_bw(gpu->icc_path, 0, MBps_to_icc(3072)); - a6xx_gmu_irq_enable(gmu); + /* Enable the GMU interrupt */ + gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0); + gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK); + enable_irq(gmu->gmu_irq); /* Check to see if we are doing a cold or warm boot */ status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ? @@ -746,14 +728,35 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) goto out; ret = a6xx_hfi_start(gmu, status); + if (ret) + goto out; + + /* + * Turn on the GMU firmware fault interrupt after we know the boot + * sequence is successful + */ + gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0); + gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK); + enable_irq(gmu->hfi_irq); /* Set the GPU to the highest power frequency */ __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1); + /* + * "enable" the GX power domain which won't actually do anything but it + * will make sure that the refcounting is correct in case we need to + * bring down the GX after a GMU failure + */ + if (!IS_ERR(gmu->gxpd)) + pm_runtime_get(gmu->gxpd); + out: - /* Make sure to turn off the boot OOB request on error */ - if (ret) - a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); + /* On failure, shut down the GMU to leave it in a good state */ + if (ret) { + disable_irq(gmu->gmu_irq); + a6xx_rpmh_stop(gmu); + pm_runtime_put(gmu->dev); + } return ret; } @@ -773,11 +776,12 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) return true; } -int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) +/* Gracefully try to shut down the GMU and by extension the GPU */ +static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct msm_gpu *gpu = &adreno_gpu->base; - struct a6xx_gmu *gmu = &a6xx_gpu->gmu; u32 val; /* @@ -787,10 +791,19 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); if (val != 0xf) { - int ret = a6xx_gmu_wait_for_idle(a6xx_gpu); + int ret = a6xx_gmu_wait_for_idle(gmu); - /* Temporary until we can recover safely */ - BUG_ON(ret); + /* If the GMU isn't responding assume it is hung */ + if (ret) { + a6xx_gmu_force_off(gmu); + return; + } + + /* Clear the VBIF pipe before shutting down */ + gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); + spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf) + == 0xf); + gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); /* tell the GMU we want to slumber */ a6xx_gmu_notify_slumber(gmu); @@ -822,10 +835,37 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) /* Tell RPMh to power off the GPU */ a6xx_rpmh_stop(gmu); +} + + +int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) +{ + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + struct msm_gpu *gpu = &a6xx_gpu->base.base; + + if (!pm_runtime_active(gmu->dev)) + return 0; + + /* + * Force the GMU off if we detected a hang, otherwise try to shut it + * down gracefully + */ + if (gmu->hung) + a6xx_gmu_force_off(gmu); + else + a6xx_gmu_shutdown(gmu); /* Remove the bus vote */ icc_set_bw(gpu->icc_path, 0, 0); + /* + * Make sure the GX domain is off before turning off the GMU (CX) + * domain. Usually the GMU does this but only if the shutdown sequence + * was successful + */ + if (!IS_ERR(gmu->gxpd)) + pm_runtime_put_sync(gmu->gxpd); + clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); pm_runtime_put_sync(gmu->dev); @@ -948,25 +988,20 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) } /* Return the 'arc-level' for the given frequency */ -static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq) +static unsigned int a6xx_gmu_get_arc_level(struct device *dev, + unsigned long freq) { struct dev_pm_opp *opp; - struct device_node *np; - u32 val = 0; + unsigned int val; if (!freq) return 0; - opp = dev_pm_opp_find_freq_exact(dev, freq, true); + opp = dev_pm_opp_find_freq_exact(dev, freq, true); if (IS_ERR(opp)) return 0; - np = dev_pm_opp_get_of_node(opp); - - if (np) { - of_property_read_u32(np, "opp-level", &val); - of_node_put(np); - } + val = dev_pm_opp_get_level(opp); dev_pm_opp_put(opp); @@ -1002,7 +1037,7 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, /* Construct a vote for each frequency */ for (i = 0; i < freqs_count; i++) { u8 pindex = 0, sindex = 0; - u32 level = a6xx_gmu_get_arc_level(dev, freqs[i]); + unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]); /* Get the primary index that matches the arc level */ for (j = 0; j < pri_count; j++) { @@ -1195,9 +1230,15 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) if (IS_ERR_OR_NULL(gmu->mmio)) return; - pm_runtime_disable(gmu->dev); a6xx_gmu_stop(a6xx_gpu); + pm_runtime_disable(gmu->dev); + + if (!IS_ERR(gmu->gxpd)) { + pm_runtime_disable(gmu->gxpd); + dev_pm_domain_detach(gmu->gxpd, false); + } + a6xx_gmu_irq_disable(gmu); a6xx_gmu_memory_free(gmu, gmu->hfi); @@ -1223,7 +1264,6 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node) gmu->idle_level = GMU_IDLE_STATE_ACTIVE; pm_runtime_enable(gmu->dev); - gmu->gx = devm_regulator_get(gmu->dev, "vdd"); /* Get the list of clocks */ ret = a6xx_gmu_clocks_probe(gmu); @@ -1257,6 +1297,12 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node) if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) goto err; + /* + * Get a link to the GX power domain to reset the GPU in case of GMU + * crash + */ + gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); + /* Get the power levels for the GMU and GPU */ a6xx_gmu_pwrlevels_probe(gmu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h index c721d9165d8e..bedd8e6a63aa 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h @@ -27,9 +27,6 @@ struct a6xx_gmu_bo { /* the GMU is coming up for the first time or back from a power collapse */ #define GMU_COLD_BOOT 1 -/* The GMU is being soft reset after a fault */ -#define GMU_RESET 2 - /* * These define the level of control that the GMU has - the higher the number * the more things that the GMU hardware controls on its own. @@ -52,11 +49,11 @@ struct a6xx_gmu { int hfi_irq; int gmu_irq; - struct regulator *gx; - struct iommu_domain *domain; u64 uncached_iova_base; + struct device *gxpd; + int idle_level; struct a6xx_gmu_bo *hfi; @@ -78,7 +75,7 @@ struct a6xx_gmu { struct a6xx_hfi_queue queues[2]; - struct tasklet_struct hfi_tasklet; + bool hung; }; static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index fefe773c989e..e74dce474250 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -10,6 +10,8 @@ #include <linux/devfreq.h> +#define GPU_PAS_ID 13 + static inline bool _a6xx_check_idle(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -343,6 +345,20 @@ static int a6xx_ucode_init(struct msm_gpu *gpu) return 0; } +static int a6xx_zap_shader_init(struct msm_gpu *gpu) +{ + static bool loaded; + int ret; + + if (loaded) + return 0; + + ret = adreno_zap_shader_load(gpu, GPU_PAS_ID); + + loaded = !ret; + return ret; +} + #define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \ A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \ A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \ @@ -491,7 +507,28 @@ static int a6xx_hw_init(struct msm_gpu *gpu) if (ret) goto out; - gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0); + /* + * Try to load a zap shader into the secure world. If successful + * we can use the CP to switch out of secure mode. If not then we + * have no resource but to try to switch ourselves out manually. If we + * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will + * be blocked and a permissions violation will soon follow. + */ + ret = a6xx_zap_shader_init(gpu); + if (!ret) { + OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1); + OUT_RING(gpu->rb[0], 0x00000000); + + a6xx_flush(gpu, gpu->rb[0]); + if (!a6xx_idle(gpu, gpu->rb[0])) + return -EINVAL; + } else { + /* Print a warning so if we die, we know why */ + dev_warn_once(gpu->dev->dev, + "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n"); + gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0); + ret = 0; + } out: /* @@ -678,13 +715,15 @@ static int a6xx_pm_resume(struct msm_gpu *gpu) struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); int ret; - ret = a6xx_gmu_resume(a6xx_gpu); - gpu->needs_hw_init = true; + ret = a6xx_gmu_resume(a6xx_gpu); + if (ret) + return ret; + msm_gpu_resume_devfreq(gpu); - return ret; + return 0; } static int a6xx_pm_suspend(struct msm_gpu *gpu) @@ -694,18 +733,6 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu) devfreq_suspend_device(gpu->devfreq.devfreq); - /* - * Make sure the GMU is idle before continuing (because some transitions - * may use VBIF - */ - a6xx_gmu_wait_for_idle(a6xx_gpu); - - /* Clear the VBIF pipe before shutting down */ - /* FIXME: This accesses the GPU - do we need to make sure it is on? */ - gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); - spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf) == 0xf); - gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); - return a6xx_gmu_stop(a6xx_gpu); } @@ -781,14 +808,16 @@ static const struct adreno_gpu_funcs funcs = { .active_ring = a6xx_active_ring, .irq = a6xx_irq, .destroy = a6xx_destroy, -#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) +#if defined(CONFIG_DRM_MSM_GPU_STATE) .show = a6xx_show, #endif .gpu_busy = a6xx_gpu_busy, .gpu_get_freq = a6xx_gmu_get_freq, .gpu_set_freq = a6xx_gmu_set_freq, +#if defined(CONFIG_DRM_MSM_GPU_STATE) .gpu_state_get = a6xx_gpu_state_get, .gpu_state_put = a6xx_gpu_state_put, +#endif }, .get_timestamp = a6xx_get_timestamp, }; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h index 528a4cfe07cd..b46279eb18c5 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h @@ -46,9 +46,8 @@ struct a6xx_gpu { int a6xx_gmu_resume(struct a6xx_gpu *gpu); int a6xx_gmu_stop(struct a6xx_gpu *gpu); -int a6xx_gmu_wait_for_idle(struct a6xx_gpu *gpu); +int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu); -int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu); bool a6xx_gmu_isidle(struct a6xx_gmu *gmu); int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state); diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 714ed6505e47..b907245d3d96 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -155,6 +155,7 @@ static const struct adreno_info gpulist[] = { .gmem = SZ_1M, .inactive_period = DRM_MSM_INACTIVE_PERIOD, .init = a6xx_gpu_init, + .zapfw = "a630_zap.mdt", }, }; @@ -229,6 +230,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev) ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) { + pm_runtime_put_sync(&pdev->dev); DRM_DEV_ERROR(dev->dev, "Couldn't power up the GPU: %d\n", ret); return NULL; } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 27898475cdf4..6f7f4114afcf 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -19,13 +19,148 @@ #include <linux/ascii85.h> #include <linux/interconnect.h> +#include <linux/qcom_scm.h> #include <linux/kernel.h> +#include <linux/of_address.h> #include <linux/pm_opp.h> #include <linux/slab.h> +#include <linux/soc/qcom/mdt_loader.h> #include "adreno_gpu.h" #include "msm_gem.h" #include "msm_mmu.h" +static bool zap_available = true; + +static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname, + u32 pasid) +{ + struct device *dev = &gpu->pdev->dev; + const struct firmware *fw; + struct device_node *np, *mem_np; + struct resource r; + phys_addr_t mem_phys; + ssize_t mem_size; + void *mem_region = NULL; + int ret; + + if (!IS_ENABLED(CONFIG_ARCH_QCOM)) { + zap_available = false; + return -EINVAL; + } + + np = of_get_child_by_name(dev->of_node, "zap-shader"); + if (!np) { + zap_available = false; + return -ENODEV; + } + + mem_np = of_parse_phandle(np, "memory-region", 0); + of_node_put(np); + if (!mem_np) { + zap_available = false; + return -EINVAL; + } + + ret = of_address_to_resource(mem_np, 0, &r); + of_node_put(mem_np); + if (ret) + return ret; + + mem_phys = r.start; + mem_size = resource_size(&r); + + /* Request the MDT file for the firmware */ + fw = adreno_request_fw(to_adreno_gpu(gpu), fwname); + if (IS_ERR(fw)) { + DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname); + return PTR_ERR(fw); + } + + /* Figure out how much memory we need */ + mem_size = qcom_mdt_get_size(fw); + if (mem_size < 0) { + ret = mem_size; + goto out; + } + + /* Allocate memory for the firmware image */ + mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC); + if (!mem_region) { + ret = -ENOMEM; + goto out; + } + + /* + * Load the rest of the MDT + * + * Note that we could be dealing with two different paths, since + * with upstream linux-firmware it would be in a qcom/ subdir.. + * adreno_request_fw() handles this, but qcom_mdt_load() does + * not. But since we've already gotten through adreno_request_fw() + * we know which of the two cases it is: + */ + if (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY) { + ret = qcom_mdt_load(dev, fw, fwname, pasid, + mem_region, mem_phys, mem_size, NULL); + } else { + char *newname; + + newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname); + + ret = qcom_mdt_load(dev, fw, newname, pasid, + mem_region, mem_phys, mem_size, NULL); + kfree(newname); + } + if (ret) + goto out; + + /* Send the image to the secure world */ + ret = qcom_scm_pas_auth_and_reset(pasid); + + /* + * If the scm call returns -EOPNOTSUPP we assume that this target + * doesn't need/support the zap shader so quietly fail + */ + if (ret == -EOPNOTSUPP) + zap_available = false; + else if (ret) + DRM_DEV_ERROR(dev, "Unable to authorize the image\n"); + +out: + if (mem_region) + memunmap(mem_region); + + release_firmware(fw); + + return ret; +} + +int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct platform_device *pdev = gpu->pdev; + + /* Short cut if we determine the zap shader isn't available/needed */ + if (!zap_available) + return -ENODEV; + + /* We need SCM to be able to load the firmware */ + if (!qcom_scm_is_available()) { + DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n"); + return -EPROBE_DEFER; + } + + /* Each GPU has a target specific zap shader firmware name to use */ + if (!adreno_gpu->info->zapfw) { + zap_available = false; + DRM_DEV_ERROR(&pdev->dev, + "Zap shader firmware file not specified for this target\n"); + return -ENODEV; + } + + return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); +} + int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -63,6 +198,12 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) case MSM_PARAM_NR_RINGS: *value = gpu->nr_rings; return 0; + case MSM_PARAM_PP_PGTABLE: + *value = 0; + return 0; + case MSM_PARAM_FAULTS: + *value = gpu->global_faults; + return 0; default: DBG("%s: invalid param: %u", gpu->name, param); return -EINVAL; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 5db459bc28a7..0925606ec9b5 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -252,6 +252,12 @@ void adreno_gpu_state_destroy(struct msm_gpu_state *state); int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state); int adreno_gpu_state_put(struct msm_gpu_state *state); +/* + * For a5xx and a6xx targets load the zap shader that is used to pull the GPU + * out of secure mode + */ +int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid); + /* ringbuffer helpers (the parts that are adreno specific) */ static inline void diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index b776fca571f3..dfdfa766da8f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -46,6 +46,9 @@ #define LEFT_MIXER 0 #define RIGHT_MIXER 1 +/* timeout in ms waiting for frame done */ +#define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60 + static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc) { struct msm_drm_private *priv = crtc->dev->dev_private; @@ -425,65 +428,6 @@ void dpu_crtc_complete_commit(struct drm_crtc *crtc, trace_dpu_crtc_complete_commit(DRMID(crtc)); } -static void _dpu_crtc_setup_mixer_for_encoder( - struct drm_crtc *crtc, - struct drm_encoder *enc) -{ - struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); - struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); - struct dpu_rm *rm = &dpu_kms->rm; - struct dpu_crtc_mixer *mixer; - struct dpu_hw_ctl *last_valid_ctl = NULL; - int i; - struct dpu_rm_hw_iter lm_iter, ctl_iter; - - dpu_rm_init_hw_iter(&lm_iter, enc->base.id, DPU_HW_BLK_LM); - dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL); - - /* Set up all the mixers and ctls reserved by this encoder */ - for (i = cstate->num_mixers; i < ARRAY_SIZE(cstate->mixers); i++) { - mixer = &cstate->mixers[i]; - - if (!dpu_rm_get_hw(rm, &lm_iter)) - break; - mixer->hw_lm = (struct dpu_hw_mixer *)lm_iter.hw; - - /* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */ - if (!dpu_rm_get_hw(rm, &ctl_iter)) { - DPU_DEBUG("no ctl assigned to lm %d, using previous\n", - mixer->hw_lm->idx - LM_0); - mixer->lm_ctl = last_valid_ctl; - } else { - mixer->lm_ctl = (struct dpu_hw_ctl *)ctl_iter.hw; - last_valid_ctl = mixer->lm_ctl; - } - - /* Shouldn't happen, mixers are always >= ctls */ - if (!mixer->lm_ctl) { - DPU_ERROR("no valid ctls found for lm %d\n", - mixer->hw_lm->idx - LM_0); - return; - } - - cstate->num_mixers++; - DPU_DEBUG("setup mixer %d: lm %d\n", - i, mixer->hw_lm->idx - LM_0); - DPU_DEBUG("setup mixer %d: ctl %d\n", - i, mixer->lm_ctl->idx - CTL_0); - } -} - -static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc) -{ - struct drm_encoder *enc; - - WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); - - /* Check for mixers on all encoders attached to this crtc */ - drm_for_each_encoder_mask(enc, crtc->dev, crtc->state->encoder_mask) - _dpu_crtc_setup_mixer_for_encoder(crtc, enc); -} - static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc, struct drm_crtc_state *state) { @@ -533,10 +477,7 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, dev = crtc->dev; smmu_state = &dpu_crtc->smmu_state; - if (!cstate->num_mixers) { - _dpu_crtc_setup_mixers(crtc); - _dpu_crtc_setup_lm_bounds(crtc, crtc->state); - } + _dpu_crtc_setup_lm_bounds(crtc, crtc->state); if (dpu_crtc->event) { WARN_ON(dpu_crtc->event); @@ -683,7 +624,7 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc) DPU_ATRACE_BEGIN("frame done completion wait"); ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp, - msecs_to_jiffies(DPU_FRAME_DONE_TIMEOUT)); + msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS)); if (!ret) { DRM_ERROR("frame done wait timed out, ret:%d\n", ret); rc = -ETIMEDOUT; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 5aa3307f3f0c..82bf16d61a45 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -69,6 +69,9 @@ #define MAX_VDISPLAY_SPLIT 1080 +/* timeout in frames waiting for frame done */ +#define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5 + /** * enum dpu_enc_rc_events - events for resource control state machine * @DPU_ENC_RC_EVENT_KICKOFF: @@ -158,7 +161,7 @@ enum dpu_enc_rc_states { * Bit0 = phys_encs[0] etc. * @crtc_frame_event_cb: callback handler for frame event * @crtc_frame_event_cb_data: callback handler private data - * @frame_done_timeout: frame done timeout in Hz + * @frame_done_timeout_ms: frame done timeout in ms * @frame_done_timer: watchdog timer for frame done event * @vsync_event_timer: vsync timer * @disp_info: local copy of msm_display_info struct @@ -196,7 +199,7 @@ struct dpu_encoder_virt { void (*crtc_frame_event_cb)(void *, u32 event); void *crtc_frame_event_cb_data; - atomic_t frame_done_timeout; + atomic_t frame_done_timeout_ms; struct timer_list frame_done_timer; struct timer_list vsync_event_timer; @@ -520,8 +523,8 @@ static void _dpu_encoder_adjust_mode(struct drm_connector *connector, list_for_each_entry(cur_mode, &connector->modes, head) { if (cur_mode->vdisplay == adj_mode->vdisplay && - cur_mode->hdisplay == adj_mode->hdisplay && - cur_mode->vrefresh == adj_mode->vrefresh) { + cur_mode->hdisplay == adj_mode->hdisplay && + drm_mode_vrefresh(cur_mode) == drm_mode_vrefresh(adj_mode)) { adj_mode->private = cur_mode->private; adj_mode->private_flags |= cur_mode->private_flags; } @@ -959,10 +962,14 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, struct dpu_kms *dpu_kms; struct list_head *connector_list; struct drm_connector *conn = NULL, *conn_iter; - struct dpu_rm_hw_iter pp_iter, ctl_iter; + struct drm_crtc *drm_crtc; + struct dpu_crtc_state *cstate; + struct dpu_rm_hw_iter hw_iter; struct msm_display_topology topology; struct dpu_hw_ctl *hw_ctl[MAX_CHANNELS_PER_ENC] = { NULL }; - int i = 0, ret; + struct dpu_hw_mixer *hw_lm[MAX_CHANNELS_PER_ENC] = { NULL }; + int num_lm = 0, num_ctl = 0; + int i, j, ret; if (!drm_enc) { DPU_ERROR("invalid encoder\n"); @@ -990,10 +997,14 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, return; } + drm_for_each_crtc(drm_crtc, drm_enc->dev) + if (drm_crtc->state->encoder_mask & drm_encoder_mask(drm_enc)) + break; + topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode); /* Reserve dynamic resources now. Indicating non-AtomicTest phase */ - ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_enc->crtc->state, + ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_crtc->state, topology, false); if (ret) { DPU_ERROR_ENC(dpu_enc, @@ -1001,21 +1012,41 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, return; } - dpu_rm_init_hw_iter(&pp_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG); + dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG); for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { dpu_enc->hw_pp[i] = NULL; - if (!dpu_rm_get_hw(&dpu_kms->rm, &pp_iter)) + if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter)) + break; + dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) hw_iter.hw; + } + + dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_CTL); + for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { + if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter)) break; - dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) pp_iter.hw; + hw_ctl[i] = (struct dpu_hw_ctl *)hw_iter.hw; + num_ctl++; } - dpu_rm_init_hw_iter(&ctl_iter, drm_enc->base.id, DPU_HW_BLK_CTL); + dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_LM); for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { - if (!dpu_rm_get_hw(&dpu_kms->rm, &ctl_iter)) + if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter)) break; - hw_ctl[i] = (struct dpu_hw_ctl *)ctl_iter.hw; + hw_lm[i] = (struct dpu_hw_mixer *)hw_iter.hw; + num_lm++; } + cstate = to_dpu_crtc_state(drm_crtc->state); + + for (i = 0; i < num_lm; i++) { + int ctl_idx = (i < num_ctl) ? i : (num_ctl-1); + + cstate->mixers[i].hw_lm = hw_lm[i]; + cstate->mixers[i].lm_ctl = hw_ctl[ctl_idx]; + } + + cstate->num_mixers = num_lm; + for (i = 0; i < dpu_enc->num_phys_encs; i++) { struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; @@ -1023,18 +1054,38 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, if (!dpu_enc->hw_pp[i]) { DPU_ERROR_ENC(dpu_enc, "no pp block assigned" "at idx: %d\n", i); - return; + goto error; } if (!hw_ctl[i]) { DPU_ERROR_ENC(dpu_enc, "no ctl block assigned" "at idx: %d\n", i); - return; + goto error; } phys->hw_pp = dpu_enc->hw_pp[i]; phys->hw_ctl = hw_ctl[i]; + dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, + DPU_HW_BLK_INTF); + for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) { + struct dpu_hw_intf *hw_intf; + + if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter)) + break; + + hw_intf = (struct dpu_hw_intf *)hw_iter.hw; + if (hw_intf->idx == phys->intf_idx) + phys->hw_intf = hw_intf; + } + + if (!phys->hw_intf) { + DPU_ERROR_ENC(dpu_enc, + "no intf block assigned at idx: %d\n", + i); + goto error; + } + phys->connector = conn->state->connector; if (phys->ops.mode_set) phys->ops.mode_set(phys, mode, adj_mode); @@ -1042,6 +1093,9 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, } dpu_enc->mode_set_complete = true; + +error: + dpu_rm_release(&dpu_kms->rm, drm_enc); } static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) @@ -1182,7 +1236,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) } /* after phys waits for frame-done, should be no more frames pending */ - if (atomic_xchg(&dpu_enc->frame_done_timeout, 0)) { + if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) { DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id); del_timer_sync(&dpu_enc->frame_done_timer); } @@ -1339,7 +1393,7 @@ static void dpu_encoder_frame_done_callback( } if (!dpu_enc->frame_busy_mask[0]) { - atomic_set(&dpu_enc->frame_done_timeout, 0); + atomic_set(&dpu_enc->frame_done_timeout_ms, 0); del_timer(&dpu_enc->frame_done_timer); dpu_encoder_resource_control(drm_enc, @@ -1547,8 +1601,14 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc, if (!ctl) continue; - if (phys->split_role != ENC_ROLE_SLAVE) + /* + * This is cleared in frame_done worker, which isn't invoked + * for async commits. So don't set this for async, since it'll + * roll over to the next commit. + */ + if (!async && phys->split_role != ENC_ROLE_SLAVE) set_bit(i, dpu_enc->frame_busy_mask); + if (!phys->ops.needs_single_flush || !phys->ops.needs_single_flush(phys)) _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0, @@ -1800,11 +1860,20 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc, bool async) trace_dpu_enc_kickoff(DRMID(drm_enc)); - atomic_set(&dpu_enc->frame_done_timeout, - DPU_FRAME_DONE_TIMEOUT * 1000 / - drm_enc->crtc->state->adjusted_mode.vrefresh); - mod_timer(&dpu_enc->frame_done_timer, jiffies + - ((atomic_read(&dpu_enc->frame_done_timeout) * HZ) / 1000)); + /* + * Asynchronous frames don't handle FRAME_DONE events. As such, they + * shouldn't enable the frame_done watchdog since it will always time + * out. + */ + if (!async) { + unsigned long timeout_ms; + timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 / + drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode); + + atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms); + mod_timer(&dpu_enc->frame_done_timer, + jiffies + msecs_to_jiffies(timeout_ms)); + } /* All phys encs are ready to go, trigger the kickoff */ _dpu_encoder_kickoff_phys(dpu_enc, async); @@ -2124,7 +2193,7 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t) DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n", DRMID(drm_enc), dpu_enc->frame_busy_mask[0]); return; - } else if (!atomic_xchg(&dpu_enc->frame_done_timeout, 0)) { + } else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) { DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc)); return; } @@ -2170,7 +2239,7 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc, spin_lock_init(&dpu_enc->enc_spinlock); - atomic_set(&dpu_enc->frame_done_timeout, 0); + atomic_set(&dpu_enc->frame_done_timeout_ms, 0); timer_setup(&dpu_enc->frame_done_timer, dpu_encoder_frame_done_timeout, 0); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h index db94f3d3bea3..97fb868a4ef6 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h @@ -200,6 +200,7 @@ struct dpu_encoder_irq { * @hw_mdptop: Hardware interface to the top registers * @hw_ctl: Hardware interface to the ctl registers * @hw_pp: Hardware interface to the ping pong registers + * @hw_intf: Hardware interface to the intf registers * @dpu_kms: Pointer to the dpu_kms top level * @cached_mode: DRM mode cached at mode_set time, acted on in enable * @enabled: Whether the encoder has enabled and running a mode @@ -228,6 +229,7 @@ struct dpu_encoder_phys { struct dpu_hw_mdp *hw_mdptop; struct dpu_hw_ctl *hw_ctl; struct dpu_hw_pingpong *hw_pp; + struct dpu_hw_intf *hw_intf; struct dpu_kms *dpu_kms; struct drm_display_mode cached_mode; enum dpu_enc_split_role split_role; @@ -251,19 +253,6 @@ static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys) } /** - * struct dpu_encoder_phys_vid - sub-class of dpu_encoder_phys to handle video - * mode specific operations - * @base: Baseclass physical encoder structure - * @hw_intf: Hardware interface to the intf registers - * @timing_params: Current timing parameter - */ -struct dpu_encoder_phys_vid { - struct dpu_encoder_phys base; - struct dpu_hw_intf *hw_intf; - struct intf_timing_params timing_params; -}; - -/** * struct dpu_encoder_phys_cmd - sub-class of dpu_encoder_phys to handle command * mode specific operations * @base: Baseclass physical encoder structure diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c index a399e1edd313..973737fb5c9f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c @@ -404,7 +404,8 @@ static void dpu_encoder_phys_cmd_tearcheck_config( return; } - tc_cfg.vsync_count = vsync_hz / (mode->vtotal * mode->vrefresh); + tc_cfg.vsync_count = vsync_hz / + (mode->vtotal * drm_mode_vrefresh(mode)); /* enable external TE after kickoff to avoid premature autorefresh */ tc_cfg.hw_vsync_mode = 0; @@ -424,7 +425,7 @@ static void dpu_encoder_phys_cmd_tearcheck_config( DPU_DEBUG_CMDENC(cmd_enc, "tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n", phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz, - mode->vtotal, mode->vrefresh); + mode->vtotal, drm_mode_vrefresh(mode)); DPU_DEBUG_CMDENC(cmd_enc, "tc %d enable %u start_pos %u rd_ptr_irq %u\n", phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index 3c4eb470a82c..1b7a335a6140 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -18,14 +18,14 @@ #include "dpu_trace.h" #define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \ - (e) && (e)->base.parent ? \ - (e)->base.parent->base.id : -1, \ + (e) && (e)->parent ? \ + (e)->parent->base.id : -1, \ (e) && (e)->hw_intf ? \ (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__) #define DPU_ERROR_VIDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \ - (e) && (e)->base.parent ? \ - (e)->base.parent->base.id : -1, \ + (e) && (e)->parent ? \ + (e)->parent->base.id : -1, \ (e) && (e)->hw_intf ? \ (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__) @@ -44,7 +44,7 @@ static bool dpu_encoder_phys_vid_is_master( } static void drm_mode_to_intf_timing_params( - const struct dpu_encoder_phys_vid *vid_enc, + const struct dpu_encoder_phys *phys_enc, const struct drm_display_mode *mode, struct intf_timing_params *timing) { @@ -92,7 +92,7 @@ static void drm_mode_to_intf_timing_params( timing->hsync_skew = mode->hskew; /* DSI controller cannot handle active-low sync signals. */ - if (vid_enc->hw_intf->cap->type == INTF_DSI) { + if (phys_enc->hw_intf->cap->type == INTF_DSI) { timing->hsync_polarity = 0; timing->vsync_polarity = 0; } @@ -143,11 +143,11 @@ static u32 get_vertical_total(const struct intf_timing_params *timing) * lines based on the chip worst case latencies. */ static u32 programmable_fetch_get_num_lines( - struct dpu_encoder_phys_vid *vid_enc, + struct dpu_encoder_phys *phys_enc, const struct intf_timing_params *timing) { u32 worst_case_needed_lines = - vid_enc->hw_intf->cap->prog_fetch_lines_worst_case; + phys_enc->hw_intf->cap->prog_fetch_lines_worst_case; u32 start_of_frame_lines = timing->v_back_porch + timing->vsync_pulse_width; u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines; @@ -155,26 +155,26 @@ static u32 programmable_fetch_get_num_lines( /* Fetch must be outside active lines, otherwise undefined. */ if (start_of_frame_lines >= worst_case_needed_lines) { - DPU_DEBUG_VIDENC(vid_enc, + DPU_DEBUG_VIDENC(phys_enc, "prog fetch is not needed, large vbp+vsw\n"); actual_vfp_lines = 0; } else if (timing->v_front_porch < needed_vfp_lines) { /* Warn fetch needed, but not enough porch in panel config */ pr_warn_once ("low vbp+vfp may lead to perf issues in some cases\n"); - DPU_DEBUG_VIDENC(vid_enc, + DPU_DEBUG_VIDENC(phys_enc, "less vfp than fetch req, using entire vfp\n"); actual_vfp_lines = timing->v_front_porch; } else { - DPU_DEBUG_VIDENC(vid_enc, "room in vfp for needed prefetch\n"); + DPU_DEBUG_VIDENC(phys_enc, "room in vfp for needed prefetch\n"); actual_vfp_lines = needed_vfp_lines; } - DPU_DEBUG_VIDENC(vid_enc, + DPU_DEBUG_VIDENC(phys_enc, "v_front_porch %u v_back_porch %u vsync_pulse_width %u\n", timing->v_front_porch, timing->v_back_porch, timing->vsync_pulse_width); - DPU_DEBUG_VIDENC(vid_enc, + DPU_DEBUG_VIDENC(phys_enc, "wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n", worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines); @@ -194,8 +194,6 @@ static u32 programmable_fetch_get_num_lines( static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc, const struct intf_timing_params *timing) { - struct dpu_encoder_phys_vid *vid_enc = - to_dpu_encoder_phys_vid(phys_enc); struct intf_prog_fetch f = { 0 }; u32 vfp_fetch_lines = 0; u32 horiz_total = 0; @@ -203,10 +201,10 @@ static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc, u32 vfp_fetch_start_vsync_counter = 0; unsigned long lock_flags; - if (WARN_ON_ONCE(!vid_enc->hw_intf->ops.setup_prg_fetch)) + if (WARN_ON_ONCE(!phys_enc->hw_intf->ops.setup_prg_fetch)) return; - vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing); + vfp_fetch_lines = programmable_fetch_get_num_lines(phys_enc, timing); if (vfp_fetch_lines) { vert_total = get_vertical_total(timing); horiz_total = get_horizontal_total(timing); @@ -216,12 +214,12 @@ static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc, f.fetch_start = vfp_fetch_start_vsync_counter; } - DPU_DEBUG_VIDENC(vid_enc, + DPU_DEBUG_VIDENC(phys_enc, "vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n", vfp_fetch_lines, vfp_fetch_start_vsync_counter); spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); - vid_enc->hw_intf->ops.setup_prg_fetch(vid_enc->hw_intf, &f); + phys_enc->hw_intf->ops.setup_prg_fetch(phys_enc->hw_intf, &f); spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); } @@ -231,7 +229,7 @@ static bool dpu_encoder_phys_vid_mode_fixup( struct drm_display_mode *adj_mode) { if (phys_enc) - DPU_DEBUG_VIDENC(to_dpu_encoder_phys_vid(phys_enc), "\n"); + DPU_DEBUG_VIDENC(phys_enc, "\n"); /* * Modifying mode has consequences when the mode comes back to us @@ -242,7 +240,6 @@ static bool dpu_encoder_phys_vid_mode_fixup( static void dpu_encoder_phys_vid_setup_timing_engine( struct dpu_encoder_phys *phys_enc) { - struct dpu_encoder_phys_vid *vid_enc; struct drm_display_mode mode; struct intf_timing_params timing_params = { 0 }; const struct dpu_format *fmt = NULL; @@ -256,13 +253,12 @@ static void dpu_encoder_phys_vid_setup_timing_engine( } mode = phys_enc->cached_mode; - vid_enc = to_dpu_encoder_phys_vid(phys_enc); - if (!vid_enc->hw_intf->ops.setup_timing_gen) { + if (!phys_enc->hw_intf->ops.setup_timing_gen) { DPU_ERROR("timing engine setup is not supported\n"); return; } - DPU_DEBUG_VIDENC(vid_enc, "enabling mode:\n"); + DPU_DEBUG_VIDENC(phys_enc, "enabling mode:\n"); drm_mode_debug_printmodeline(&mode); if (phys_enc->split_role != ENC_ROLE_SOLO) { @@ -271,32 +267,30 @@ static void dpu_encoder_phys_vid_setup_timing_engine( mode.hsync_start >>= 1; mode.hsync_end >>= 1; - DPU_DEBUG_VIDENC(vid_enc, + DPU_DEBUG_VIDENC(phys_enc, "split_role %d, halve horizontal %d %d %d %d\n", phys_enc->split_role, mode.hdisplay, mode.htotal, mode.hsync_start, mode.hsync_end); } - drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params); + drm_mode_to_intf_timing_params(phys_enc, &mode, &timing_params); fmt = dpu_get_dpu_format(fmt_fourcc); - DPU_DEBUG_VIDENC(vid_enc, "fmt_fourcc 0x%X\n", fmt_fourcc); + DPU_DEBUG_VIDENC(phys_enc, "fmt_fourcc 0x%X\n", fmt_fourcc); - intf_cfg.intf = vid_enc->hw_intf->idx; + intf_cfg.intf = phys_enc->hw_intf->idx; intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID; intf_cfg.stream_sel = 0; /* Don't care value for video mode */ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); - vid_enc->hw_intf->ops.setup_timing_gen(vid_enc->hw_intf, + phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf, &timing_params, fmt); phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg); spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); programmable_fetch_config(phys_enc, &timing_params); - - vid_enc->timing_params = timing_params; } static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx) @@ -353,22 +347,10 @@ static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx) phys_enc); } -static bool _dpu_encoder_phys_is_dual_ctl(struct dpu_encoder_phys *phys_enc) -{ - struct dpu_crtc_state *dpu_cstate; - - if (!phys_enc) - return false; - - dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state); - - return dpu_cstate->num_ctls > 1; -} - static bool dpu_encoder_phys_vid_needs_single_flush( struct dpu_encoder_phys *phys_enc) { - return (phys_enc && _dpu_encoder_phys_is_dual_ctl(phys_enc)); + return phys_enc->split_role != ENC_ROLE_SOLO; } static void _dpu_encoder_phys_vid_setup_irq_hw_idx( @@ -396,19 +378,15 @@ static void dpu_encoder_phys_vid_mode_set( struct drm_display_mode *mode, struct drm_display_mode *adj_mode) { - struct dpu_encoder_phys_vid *vid_enc; - if (!phys_enc || !phys_enc->dpu_kms) { DPU_ERROR("invalid encoder/kms\n"); return; } - vid_enc = to_dpu_encoder_phys_vid(phys_enc); - if (adj_mode) { phys_enc->cached_mode = *adj_mode; drm_mode_debug_printmodeline(adj_mode); - DPU_DEBUG_VIDENC(vid_enc, "caching mode:\n"); + DPU_DEBUG_VIDENC(phys_enc, "caching mode:\n"); } _dpu_encoder_phys_vid_setup_irq_hw_idx(phys_enc); @@ -419,7 +397,6 @@ static int dpu_encoder_phys_vid_control_vblank_irq( bool enable) { int ret = 0; - struct dpu_encoder_phys_vid *vid_enc; int refcount; if (!phys_enc) { @@ -428,7 +405,6 @@ static int dpu_encoder_phys_vid_control_vblank_irq( } refcount = atomic_read(&phys_enc->vblank_refcount); - vid_enc = to_dpu_encoder_phys_vid(phys_enc); /* Slave encoders don't report vblank */ if (!dpu_encoder_phys_vid_is_master(phys_enc)) @@ -453,7 +429,7 @@ end: if (ret) { DRM_ERROR("failed: id:%u intf:%d ret:%d enable:%d refcnt:%d\n", DRMID(phys_enc->parent), - vid_enc->hw_intf->idx - INTF_0, ret, enable, + phys_enc->hw_intf->idx - INTF_0, ret, enable, refcount); } return ret; @@ -461,43 +437,17 @@ end: static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc) { - struct msm_drm_private *priv; - struct dpu_encoder_phys_vid *vid_enc; - struct dpu_rm_hw_iter iter; struct dpu_hw_ctl *ctl; u32 flush_mask = 0; - if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev || - !phys_enc->parent->dev->dev_private) { - DPU_ERROR("invalid encoder/device\n"); - return; - } - priv = phys_enc->parent->dev->dev_private; - - vid_enc = to_dpu_encoder_phys_vid(phys_enc); ctl = phys_enc->hw_ctl; - dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_INTF); - while (dpu_rm_get_hw(&phys_enc->dpu_kms->rm, &iter)) { - struct dpu_hw_intf *hw_intf = (struct dpu_hw_intf *)iter.hw; - - if (hw_intf->idx == phys_enc->intf_idx) { - vid_enc->hw_intf = hw_intf; - break; - } - } - - if (!vid_enc->hw_intf) { - DPU_ERROR("hw_intf not assigned\n"); - return; - } - - DPU_DEBUG_VIDENC(vid_enc, "\n"); + DPU_DEBUG_VIDENC(phys_enc, "\n"); - if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing)) + if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing)) return; - dpu_encoder_helper_split_config(phys_enc, vid_enc->hw_intf->idx); + dpu_encoder_helper_split_config(phys_enc, phys_enc->hw_intf->idx); dpu_encoder_phys_vid_setup_timing_engine(phys_enc); @@ -510,12 +460,13 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc) !dpu_encoder_phys_vid_is_master(phys_enc)) goto skip_flush; - ctl->ops.get_bitmask_intf(ctl, &flush_mask, vid_enc->hw_intf->idx); + ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->hw_intf->idx); ctl->ops.update_pending_flush(ctl, flush_mask); skip_flush: - DPU_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d flush_mask %x\n", - ctl->idx - CTL_0, flush_mask); + DPU_DEBUG_VIDENC(phys_enc, + "update pending flush ctl %d flush_mask %x\n", + ctl->idx - CTL_0, flush_mask); /* ctl_flush & timing engine enable will be triggered by framework */ if (phys_enc->enable_state == DPU_ENC_DISABLED) @@ -524,16 +475,13 @@ skip_flush: static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc) { - struct dpu_encoder_phys_vid *vid_enc; - if (!phys_enc) { DPU_ERROR("invalid encoder\n"); return; } - vid_enc = to_dpu_encoder_phys_vid(phys_enc); - DPU_DEBUG_VIDENC(vid_enc, "\n"); - kfree(vid_enc); + DPU_DEBUG_VIDENC(phys_enc, "\n"); + kfree(phys_enc); } static void dpu_encoder_phys_vid_get_hw_resources( @@ -589,7 +537,6 @@ static int dpu_encoder_phys_vid_wait_for_vblank( static void dpu_encoder_phys_vid_prepare_for_kickoff( struct dpu_encoder_phys *phys_enc) { - struct dpu_encoder_phys_vid *vid_enc; struct dpu_hw_ctl *ctl; int rc; @@ -597,7 +544,6 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff( DPU_ERROR("invalid encoder/parameters\n"); return; } - vid_enc = to_dpu_encoder_phys_vid(phys_enc); ctl = phys_enc->hw_ctl; if (!ctl || !ctl->ops.wait_reset_status) @@ -609,7 +555,7 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff( */ rc = ctl->ops.wait_reset_status(ctl); if (rc) { - DPU_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n", + DPU_ERROR_VIDENC(phys_enc, "ctl %d reset failure: %d\n", ctl->idx, rc); dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC); } @@ -618,7 +564,6 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff( static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc) { struct msm_drm_private *priv; - struct dpu_encoder_phys_vid *vid_enc; unsigned long lock_flags; int ret; @@ -629,16 +574,13 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc) } priv = phys_enc->parent->dev->dev_private; - vid_enc = to_dpu_encoder_phys_vid(phys_enc); - if (!vid_enc->hw_intf || !phys_enc->hw_ctl) { + if (!phys_enc->hw_intf || !phys_enc->hw_ctl) { DPU_ERROR("invalid hw_intf %d hw_ctl %d\n", - vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0); + phys_enc->hw_intf != 0, phys_enc->hw_ctl != 0); return; } - DPU_DEBUG_VIDENC(vid_enc, "\n"); - - if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing)) + if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing)) return; if (phys_enc->enable_state == DPU_ENC_DISABLED) { @@ -647,7 +589,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc) } spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); - vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 0); + phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 0); if (dpu_encoder_phys_vid_is_master(phys_enc)) dpu_encoder_phys_inc_pending(phys_enc); spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); @@ -666,7 +608,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc) atomic_set(&phys_enc->pending_kickoff_cnt, 0); DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n", DRMID(phys_enc->parent), - vid_enc->hw_intf->idx - INTF_0, ret); + phys_enc->hw_intf->idx - INTF_0, ret); } } @@ -677,25 +619,21 @@ static void dpu_encoder_phys_vid_handle_post_kickoff( struct dpu_encoder_phys *phys_enc) { unsigned long lock_flags; - struct dpu_encoder_phys_vid *vid_enc; if (!phys_enc) { DPU_ERROR("invalid encoder\n"); return; } - vid_enc = to_dpu_encoder_phys_vid(phys_enc); - DPU_DEBUG_VIDENC(vid_enc, "enable_state %d\n", phys_enc->enable_state); - /* * Video mode must flush CTL before enabling timing engine * Video encoders need to turn on their interfaces now */ if (phys_enc->enable_state == DPU_ENC_ENABLING) { trace_dpu_enc_phys_vid_post_kickoff(DRMID(phys_enc->parent), - vid_enc->hw_intf->idx - INTF_0); + phys_enc->hw_intf->idx - INTF_0); spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); - vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 1); + phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 1); spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); phys_enc->enable_state = DPU_ENC_ENABLED; } @@ -704,16 +642,13 @@ static void dpu_encoder_phys_vid_handle_post_kickoff( static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc, bool enable) { - struct dpu_encoder_phys_vid *vid_enc; int ret; if (!phys_enc) return; - vid_enc = to_dpu_encoder_phys_vid(phys_enc); - trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent), - vid_enc->hw_intf->idx - INTF_0, + phys_enc->hw_intf->idx - INTF_0, enable, atomic_read(&phys_enc->vblank_refcount)); @@ -732,19 +667,16 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc, static int dpu_encoder_phys_vid_get_line_count( struct dpu_encoder_phys *phys_enc) { - struct dpu_encoder_phys_vid *vid_enc; - if (!phys_enc) return -EINVAL; if (!dpu_encoder_phys_vid_is_master(phys_enc)) return -EINVAL; - vid_enc = to_dpu_encoder_phys_vid(phys_enc); - if (!vid_enc->hw_intf || !vid_enc->hw_intf->ops.get_line_count) + if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_line_count) return -EINVAL; - return vid_enc->hw_intf->ops.get_line_count(vid_enc->hw_intf); + return phys_enc->hw_intf->ops.get_line_count(phys_enc->hw_intf); } static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops) @@ -771,7 +703,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init( struct dpu_enc_phys_init_params *p) { struct dpu_encoder_phys *phys_enc = NULL; - struct dpu_encoder_phys_vid *vid_enc = NULL; struct dpu_encoder_irq *irq; int i, ret = 0; @@ -780,18 +711,16 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init( goto fail; } - vid_enc = kzalloc(sizeof(*vid_enc), GFP_KERNEL); - if (!vid_enc) { + phys_enc = kzalloc(sizeof(*phys_enc), GFP_KERNEL); + if (!phys_enc) { ret = -ENOMEM; goto fail; } - phys_enc = &vid_enc->base; - phys_enc->hw_mdptop = p->dpu_kms->hw_mdp; phys_enc->intf_idx = p->intf_idx; - DPU_DEBUG_VIDENC(vid_enc, "\n"); + DPU_DEBUG_VIDENC(phys_enc, "\n"); dpu_encoder_phys_vid_init_ops(&phys_enc->ops); phys_enc->parent = p->parent; @@ -825,13 +754,13 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init( init_waitqueue_head(&phys_enc->pending_kickoff_wq); phys_enc->enable_state = DPU_ENC_DISABLED; - DPU_DEBUG_VIDENC(vid_enc, "created intf idx:%d\n", p->intf_idx); + DPU_DEBUG_VIDENC(phys_enc, "created intf idx:%d\n", p->intf_idx); return phys_enc; fail: DPU_ERROR("failed to create encoder\n"); - if (vid_enc) + if (phys_enc) dpu_encoder_phys_vid_destroy(phys_enc); return ERR_PTR(ret); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h index ac75cfc267f4..31e9ef96ca5d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h @@ -73,9 +73,6 @@ #define DPU_NAME_SIZE 12 -/* timeout in frames waiting for frame done */ -#define DPU_FRAME_DONE_TIMEOUT 60 - /* * struct dpu_irq_callback - IRQ callback handlers * @list: list to callback diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index b01183b309b9..da1f727d7495 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -387,7 +387,7 @@ static void _dpu_plane_set_ot_limit(struct drm_plane *plane, ot_params.width = drm_rect_width(&pdpu->pipe_cfg.src_rect); ot_params.height = drm_rect_height(&pdpu->pipe_cfg.src_rect); ot_params.is_wfd = !pdpu->is_rt_pipe; - ot_params.frame_rate = crtc->mode.vrefresh; + ot_params.frame_rate = drm_mode_vrefresh(&crtc->mode); ot_params.vbif_idx = VBIF_RT; ot_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl; ot_params.rd = true; diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c index 9bf9d6065c55..7b9edc21bc2c 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c @@ -59,10 +59,10 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder, return -EINVAL; } - total_lines_x100 = mode->vtotal * mode->vrefresh; + total_lines_x100 = mode->vtotal * drm_mode_vrefresh(mode); if (!total_lines_x100) { DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n", - __func__, mode->vtotal, mode->vrefresh); + __func__, mode->vtotal, drm_mode_vrefresh(mode)); return -EINVAL; } diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index fb423d309e91..67ef300559cf 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -75,7 +75,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file) struct msm_gpu_show_priv *show_priv; int ret; - if (!gpu) + if (!gpu || !gpu->funcs->gpu_state_get) return -ENODEV; show_priv = kmalloc(sizeof(*show_priv), GFP_KERNEL); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 0bdd93648761..31deb87abfc6 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -39,9 +39,10 @@ * MSM_GEM_INFO ioctl. * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get * GEM object's debug name + * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl */ #define MSM_VERSION_MAJOR 1 -#define MSM_VERSION_MINOR 4 +#define MSM_VERSION_MINOR 5 #define MSM_VERSION_PATCHLEVEL 0 static const struct drm_mode_config_funcs mode_config_funcs = { @@ -457,6 +458,9 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) priv->wq = alloc_ordered_workqueue("msm", 0); + INIT_WORK(&priv->free_work, msm_gem_free_work); + init_llist_head(&priv->free_list); + INIT_LIST_HEAD(&priv->inactive_list); drm_mode_config_init(ddev); @@ -964,6 +968,11 @@ static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data, args->flags, &args->id); } +static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data, + struct drm_file *file) +{ + return msm_submitqueue_query(dev, file->driver_priv, data); +} static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data, struct drm_file *file) @@ -984,6 +993,7 @@ static const struct drm_ioctl_desc msm_ioctls[] = { DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_AUTH|DRM_RENDER_ALLOW), }; static const struct vm_operations_struct vm_ops = { @@ -1019,7 +1029,7 @@ static struct drm_driver msm_driver = { .irq_uninstall = msm_irq_uninstall, .enable_vblank = msm_enable_vblank, .disable_vblank = msm_disable_vblank, - .gem_free_object = msm_gem_free_object, + .gem_free_object_unlocked = msm_gem_free_object, .gem_vm_ops = &vm_ops, .dumb_create = msm_gem_dumb_create, .dumb_map_offset = msm_gem_dumb_map_offset, @@ -1027,7 +1037,6 @@ static struct drm_driver msm_driver = { .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, .gem_prime_import = drm_gem_prime_import, - .gem_prime_res_obj = msm_gem_prime_res_obj, .gem_prime_pin = msm_gem_prime_pin, .gem_prime_unpin = msm_gem_prime_unpin, .gem_prime_get_sg_table = msm_gem_prime_get_sg_table, diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index c56dade2c1dc..eb33d2d00d77 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -185,6 +185,10 @@ struct msm_drm_private { /* list of GEM objects: */ struct list_head inactive_list; + /* worker for delayed free of objects: */ + struct work_struct free_work; + struct llist_head free_list; + struct workqueue_struct *wq; unsigned int num_planes; @@ -292,7 +296,6 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); void *msm_gem_prime_vmap(struct drm_gem_object *obj); void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); -struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj); struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); int msm_gem_prime_pin(struct drm_gem_object *obj); @@ -325,6 +328,7 @@ void msm_gem_kernel_put(struct drm_gem_object *bo, struct msm_gem_address_space *aspace, bool locked); struct drm_gem_object *msm_gem_import(struct drm_device *dev, struct dma_buf *dmabuf, struct sg_table *sgt); +void msm_gem_free_work(struct work_struct *work); __printf(2, 3) void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...); @@ -420,6 +424,8 @@ struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx, u32 id); int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, u32 prio, u32 flags, u32 *id); +int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx, + struct drm_msm_submitqueue_query *args); int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id); void msm_submitqueue_close(struct msm_file_private *ctx); diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index c03e860ba737..d088299babf3 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -122,13 +122,9 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, fbdev->fb = fb; helper->fb = fb; - fbi->par = helper; fbi->fbops = &msm_fb_ops; - strcpy(fbi->fix.id, "msm"); - - drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth); - drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); + drm_fb_helper_fill_info(fbi, helper, sizes); dev->mode_config.fb_base = paddr; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 18ca651ab942..31d5a744d84f 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -672,14 +672,13 @@ void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass) int msm_gem_sync_object(struct drm_gem_object *obj, struct msm_fence_context *fctx, bool exclusive) { - struct msm_gem_object *msm_obj = to_msm_bo(obj); struct reservation_object_list *fobj; struct dma_fence *fence; int i, ret; - fobj = reservation_object_get_list(msm_obj->resv); + fobj = reservation_object_get_list(obj->resv); if (!fobj || (fobj->shared_count == 0)) { - fence = reservation_object_get_excl(msm_obj->resv); + fence = reservation_object_get_excl(obj->resv); /* don't need to wait on our own fences, since ring is fifo */ if (fence && (fence->context != fctx->context)) { ret = dma_fence_wait(fence, true); @@ -693,7 +692,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, for (i = 0; i < fobj->shared_count; i++) { fence = rcu_dereference_protected(fobj->shared[i], - reservation_object_held(msm_obj->resv)); + reservation_object_held(obj->resv)); if (fence->context != fctx->context) { ret = dma_fence_wait(fence, true); if (ret) @@ -711,9 +710,9 @@ void msm_gem_move_to_active(struct drm_gem_object *obj, WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); msm_obj->gpu = gpu; if (exclusive) - reservation_object_add_excl_fence(msm_obj->resv, fence); + reservation_object_add_excl_fence(obj->resv, fence); else - reservation_object_add_shared_fence(msm_obj->resv, fence); + reservation_object_add_shared_fence(obj->resv, fence); list_del_init(&msm_obj->mm_list); list_add_tail(&msm_obj->mm_list, &gpu->active_list); } @@ -733,13 +732,12 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj) int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) { - struct msm_gem_object *msm_obj = to_msm_bo(obj); bool write = !!(op & MSM_PREP_WRITE); unsigned long remain = op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); long ret; - ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write, + ret = reservation_object_wait_timeout_rcu(obj->resv, write, true, remain); if (ret == 0) return remain == 0 ? -EBUSY : -ETIMEDOUT; @@ -771,7 +769,7 @@ static void describe_fence(struct dma_fence *fence, const char *type, void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - struct reservation_object *robj = msm_obj->resv; + struct reservation_object *robj = obj->resv; struct reservation_object_list *fobj; struct dma_fence *fence; struct msm_gem_vma *vma; @@ -853,8 +851,18 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) /* don't call directly! Use drm_gem_object_put() and friends */ void msm_gem_free_object(struct drm_gem_object *obj) { - struct drm_device *dev = obj->dev; struct msm_gem_object *msm_obj = to_msm_bo(obj); + struct drm_device *dev = obj->dev; + struct msm_drm_private *priv = dev->dev_private; + + if (llist_add(&msm_obj->freed, &priv->free_list)) + queue_work(priv->wq, &priv->free_work); +} + +static void free_object(struct msm_gem_object *msm_obj) +{ + struct drm_gem_object *obj = &msm_obj->base; + struct drm_device *dev = obj->dev; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); @@ -883,15 +891,35 @@ void msm_gem_free_object(struct drm_gem_object *obj) put_pages(obj); } - if (msm_obj->resv == &msm_obj->_resv) - reservation_object_fini(msm_obj->resv); - drm_gem_object_release(obj); mutex_unlock(&msm_obj->lock); kfree(msm_obj); } +void msm_gem_free_work(struct work_struct *work) +{ + struct msm_drm_private *priv = + container_of(work, struct msm_drm_private, free_work); + struct drm_device *dev = priv->dev; + struct llist_node *freed; + struct msm_gem_object *msm_obj, *next; + + while ((freed = llist_del_all(&priv->free_list))) { + + mutex_lock(&dev->struct_mutex); + + llist_for_each_entry_safe(msm_obj, next, + freed, freed) + free_object(msm_obj); + + mutex_unlock(&dev->struct_mutex); + + if (need_resched()) + break; + } +} + /* convenience method to construct a GEM buffer object, and userspace handle */ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, uint32_t size, uint32_t flags, uint32_t *handle, @@ -945,12 +973,8 @@ static int msm_gem_new_impl(struct drm_device *dev, msm_obj->flags = flags; msm_obj->madv = MSM_MADV_WILLNEED; - if (resv) { - msm_obj->resv = resv; - } else { - msm_obj->resv = &msm_obj->_resv; - reservation_object_init(msm_obj->resv); - } + if (resv) + msm_obj->base.resv = resv; INIT_LIST_HEAD(&msm_obj->submit_entry); INIT_LIST_HEAD(&msm_obj->vmas); @@ -1026,6 +1050,13 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, ret = drm_gem_object_init(dev, obj, size); if (ret) goto fail; + /* + * Our buffers are kept pinned, so allocating them from the + * MOVABLE zone is a really bad idea, and conflicts with CMA. + * See comments above new_inode() why this is required _and_ + * expected if you're going to pin these pages. + */ + mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); } return obj; diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 2064fac871b8..c5ac781dffee 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -84,6 +84,8 @@ struct msm_gem_object { struct list_head vmas; /* list of msm_gem_vma */ + struct llist_node freed; + /* normally (resv == &_resv) except for imported bo's */ struct reservation_object *resv; struct reservation_object _resv; @@ -133,6 +135,7 @@ enum msm_gem_lock { void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass); void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass); +void msm_gem_free_work(struct work_struct *work); /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, * associated with the cmdstream submission for synchronization (and @@ -163,7 +166,10 @@ struct msm_gem_submit { } *cmd; /* array of size nr_cmds */ struct { uint32_t flags; - struct msm_gem_object *obj; + union { + struct msm_gem_object *obj; + uint32_t handle; + }; uint64_t iova; } bos[0]; }; diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index 13403c6da6c7..60bb290700ce 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -70,10 +70,3 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj) if (!obj->import_attach) msm_gem_put_pages(obj); } - -struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj) -{ - struct msm_gem_object *msm_obj = to_msm_bo(obj); - - return msm_obj->resv; -} diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 12b983fc0b56..1b681306aca3 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -74,27 +74,14 @@ void msm_gem_submit_free(struct msm_gem_submit *submit) kfree(submit); } -static inline unsigned long __must_check -copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) -{ - if (access_ok(from, n)) - return __copy_from_user_inatomic(to, from, n); - return -EFAULT; -} - static int submit_lookup_objects(struct msm_gem_submit *submit, struct drm_msm_gem_submit *args, struct drm_file *file) { unsigned i; int ret = 0; - spin_lock(&file->table_lock); - pagefault_disable(); - for (i = 0; i < args->nr_bos; i++) { struct drm_msm_gem_submit_bo submit_bo; - struct drm_gem_object *obj; - struct msm_gem_object *msm_obj; void __user *userptr = u64_to_user_ptr(args->bos + (i * sizeof(submit_bo))); @@ -103,15 +90,10 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, */ submit->bos[i].flags = 0; - if (copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo))) { - pagefault_enable(); - spin_unlock(&file->table_lock); - if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) { - ret = -EFAULT; - goto out; - } - spin_lock(&file->table_lock); - pagefault_disable(); + if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) { + ret = -EFAULT; + i = 0; + goto out; } /* at least one of READ and/or WRITE flags should be set: */ @@ -121,19 +103,28 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, !(submit_bo.flags & MANDATORY_FLAGS)) { DRM_ERROR("invalid flags: %x\n", submit_bo.flags); ret = -EINVAL; - goto out_unlock; + i = 0; + goto out; } + submit->bos[i].handle = submit_bo.handle; submit->bos[i].flags = submit_bo.flags; /* in validate_objects() we figure out if this is true: */ submit->bos[i].iova = submit_bo.presumed; + } + + spin_lock(&file->table_lock); + + for (i = 0; i < args->nr_bos; i++) { + struct drm_gem_object *obj; + struct msm_gem_object *msm_obj; /* normally use drm_gem_object_lookup(), but for bulk lookup * all under single table_lock just hit object_idr directly: */ - obj = idr_find(&file->object_idr, submit_bo.handle); + obj = idr_find(&file->object_idr, submit->bos[i].handle); if (!obj) { - DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i); + DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i); ret = -EINVAL; goto out_unlock; } @@ -142,7 +133,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, if (!list_empty(&msm_obj->submit_entry)) { DRM_ERROR("handle %u at index %u already on submit list\n", - submit_bo.handle, i); + submit->bos[i].handle, i); ret = -EINVAL; goto out_unlock; } @@ -155,7 +146,6 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, } out_unlock: - pagefault_enable(); spin_unlock(&file->table_lock); out: @@ -173,7 +163,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, msm_gem_unpin_iova(&msm_obj->base, submit->gpu->aspace); if (submit->bos[i].flags & BO_LOCKED) - ww_mutex_unlock(&msm_obj->resv->lock); + ww_mutex_unlock(&msm_obj->base.resv->lock); if (backoff && !(submit->bos[i].flags & BO_VALID)) submit->bos[i].iova = 0; @@ -196,7 +186,7 @@ retry: contended = i; if (!(submit->bos[i].flags & BO_LOCKED)) { - ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock, + ret = ww_mutex_lock_interruptible(&msm_obj->base.resv->lock, &submit->ticket); if (ret) goto fail; @@ -218,7 +208,7 @@ fail: if (ret == -EDEADLK) { struct msm_gem_object *msm_obj = submit->bos[contended].obj; /* we lost out in a seqno race, lock and retry.. */ - ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock, + ret = ww_mutex_lock_slow_interruptible(&msm_obj->base.resv->lock, &submit->ticket); if (!ret) { submit->bos[contended].flags |= BO_LOCKED; @@ -244,7 +234,7 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit) * strange place to call it. OTOH this is a * convenient can-fail point to hook it in. */ - ret = reservation_object_reserve_shared(msm_obj->resv, + ret = reservation_object_reserve_shared(msm_obj->base.resv, 1); if (ret) return ret; diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 49c04829cf34..fcf7a83f0e6f 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -85,7 +85,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace, vma->mapped = true; - if (aspace->mmu) + if (aspace && aspace->mmu) ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, size, prot); diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 10babd18e286..bf4ee2766431 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -443,24 +443,15 @@ static void recover_worker(struct work_struct *work) if (submit) { struct task_struct *task; + /* Increment the fault counts */ + gpu->global_faults++; + submit->queue->faults++; + task = get_pid_task(submit->pid, PIDTYPE_PID); if (task) { comm = kstrdup(task->comm, GFP_KERNEL); - - /* - * So slightly annoying, in other paths like - * mmap'ing gem buffers, mmap_sem is acquired - * before struct_mutex, which means we can't - * hold struct_mutex across the call to - * get_cmdline(). But submits are retired - * from the same in-order workqueue, so we can - * safely drop the lock here without worrying - * about the submit going away. - */ - mutex_unlock(&dev->struct_mutex); cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL); put_task_struct(task); - mutex_lock(&dev->struct_mutex); } if (comm && cmd) { diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 6241986bab51..f2739cd97cea 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -104,6 +104,9 @@ struct msm_gpu { /* does gpu need hw_init? */ bool needs_hw_init; + /* number of GPU hangs (for all contexts) */ + int global_faults; + /* worker for handling active-list retiring: */ struct work_struct retire_work; diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 4d62790cd425..12bb54cefd46 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -38,13 +38,8 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names, int cnt) { struct msm_iommu *iommu = to_msm_iommu(mmu); - int ret; - pm_runtime_get_sync(mmu->dev); - ret = iommu_attach_device(iommu->domain, mmu->dev); - pm_runtime_put_sync(mmu->dev); - - return ret; + return iommu_attach_device(iommu->domain, mmu->dev); } static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names, @@ -52,9 +47,7 @@ static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names, { struct msm_iommu *iommu = to_msm_iommu(mmu); - pm_runtime_get_sync(mmu->dev); iommu_detach_device(iommu->domain, mmu->dev); - pm_runtime_put_sync(mmu->dev); } static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova, @@ -63,9 +56,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova, struct msm_iommu *iommu = to_msm_iommu(mmu); size_t ret; -// pm_runtime_get_sync(mmu->dev); ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot); -// pm_runtime_put_sync(mmu->dev); WARN_ON(!ret); return (ret == len) ? 0 : -EINVAL; @@ -75,9 +66,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len) { struct msm_iommu *iommu = to_msm_iommu(mmu); - pm_runtime_get_sync(mmu->dev); iommu_unmap(iommu->domain, iova, len); - pm_runtime_put_sync(mmu->dev); return 0; } diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c index 5115f75b5b7f..f160ec40a39b 100644 --- a/drivers/gpu/drm/msm/msm_submitqueue.c +++ b/drivers/gpu/drm/msm/msm_submitqueue.c @@ -120,6 +120,47 @@ int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx) return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL); } +static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue, + struct drm_msm_submitqueue_query *args) +{ + size_t size = min_t(size_t, args->len, sizeof(queue->faults)); + int ret; + + /* If a zero length was passed in, return the data size we expect */ + if (!args->len) { + args->len = sizeof(queue->faults); + return 0; + } + + /* Set the length to the actual size of the data */ + args->len = size; + + ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size); + + return ret ? -EFAULT : 0; +} + +int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx, + struct drm_msm_submitqueue_query *args) +{ + struct msm_gpu_submitqueue *queue; + int ret = -EINVAL; + + if (args->pad) + return -EINVAL; + + queue = msm_submitqueue_get(ctx, args->id); + if (!queue) + return -ENOENT; + + if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS) + ret = msm_submitqueue_query_faults(queue, args); + + msm_submitqueue_put(queue); + + return ret; +} + int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id) { struct msm_gpu_submitqueue *entry; |