diff options
author | Sergey Khimich <serghox@gmail.com> | 2024-03-19 14:59:32 +0300 |
---|---|---|
committer | Ulf Hansson <ulf.hansson@linaro.org> | 2024-04-02 12:21:39 +0200 |
commit | 53ab7f7fe412abd294262e86459f17d965cd65b9 (patch) | |
tree | 0346c0f6fec06d0dd733a0433ef1c661760b02c2 /drivers/mmc/host/sdhci-of-dwcmshc.c | |
parent | 52bf134fca61f0cdb400f4b27766149ca6e1550c (diff) |
mmc: sdhci-of-dwcmshc: Implement SDHCI CQE support
For enabling CQE support just set 'supports-cqe' in your DevTree file
for appropriate mmc node.
Signed-off-by: Sergey Khimich <serghox@gmail.com>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Link: https://lore.kernel.org/r/20240319115932.4108904-3-serghox@gmail.com
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Diffstat (limited to 'drivers/mmc/host/sdhci-of-dwcmshc.c')
-rw-r--r-- | drivers/mmc/host/sdhci-of-dwcmshc.c | 191 |
1 files changed, 189 insertions, 2 deletions
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c index 1d8f5a76096a..f29edb71f052 100644 --- a/drivers/mmc/host/sdhci-of-dwcmshc.c +++ b/drivers/mmc/host/sdhci-of-dwcmshc.c @@ -21,6 +21,7 @@ #include <linux/sizes.h> #include "sdhci-pltfm.h" +#include "cqhci.h" #define SDHCI_DWCMSHC_ARG2_STUFF GENMASK(31, 16) @@ -52,6 +53,9 @@ #define AT_CTRL_SWIN_TH_VAL_MASK GENMASK(31, 24) /* bits [31:24] */ #define AT_CTRL_SWIN_TH_VAL 0x9 /* sampling window threshold */ +/* DWC IP vendor area 2 pointer */ +#define DWCMSHC_P_VENDOR_AREA2 0xea + /* Sophgo CV18XX specific Registers */ #define CV18XX_SDHCI_MSHC_CTRL 0x00 #define CV18XX_EMMC_FUNC_EN BIT(0) @@ -181,6 +185,10 @@ #define BOUNDARY_OK(addr, len) \ ((addr | (SZ_128M - 1)) == ((addr + len - 1) | (SZ_128M - 1))) +#define DWCMSHC_SDHCI_CQE_TRNS_MODE (SDHCI_TRNS_MULTI | \ + SDHCI_TRNS_BLK_CNT_EN | \ + SDHCI_TRNS_DMA) + enum dwcmshc_rk_type { DWCMSHC_RK3568, DWCMSHC_RK3588, @@ -196,7 +204,9 @@ struct rk35xx_priv { struct dwcmshc_priv { struct clk *bus_clk; - int vendor_specific_area1; /* P_VENDOR_SPECIFIC_AREA reg */ + int vendor_specific_area1; /* P_VENDOR_SPECIFIC_AREA1 reg */ + int vendor_specific_area2; /* P_VENDOR_SPECIFIC_AREA2 reg */ + void *priv; /* pointer to SoC private stuff */ u16 delay_line; u16 flags; @@ -455,6 +465,90 @@ static void dwcmshc_hs400_enhanced_strobe(struct mmc_host *mmc, sdhci_writel(host, vendor, reg); } +static int dwcmshc_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + int err = sdhci_execute_tuning(mmc, opcode); + struct sdhci_host *host = mmc_priv(mmc); + + if (err) + return err; + + /* + * Tuning can leave the IP in an active state (Buffer Read Enable bit + * set) which prevents the entry to low power states (i.e. S0i3). Data + * reset will clear it. + */ + sdhci_reset(host, SDHCI_RESET_DATA); + + return 0; +} + +static u32 dwcmshc_cqe_irq_handler(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + + return 0; +} + +static void dwcmshc_sdhci_cqe_enable(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + u8 ctrl; + + sdhci_writew(host, DWCMSHC_SDHCI_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE); + + sdhci_cqe_enable(mmc); + + /* + * The "DesignWare Cores Mobile Storage Host Controller + * DWC_mshc / DWC_mshc_lite Databook" says: + * when Host Version 4 Enable" is 1 in Host Control 2 register, + * SDHCI_CTRL_ADMA32 bit means ADMA2 is selected. + * Selection of 32-bit/64-bit System Addressing: + * either 32-bit or 64-bit system addressing is selected by + * 64-bit Addressing bit in Host Control 2 register. + * + * On the other hand the "DesignWare Cores Mobile Storage Host + * Controller DWC_mshc / DWC_mshc_lite User Guide" says, that we have to + * set DMA_SEL to ADMA2 _only_ mode in the Host Control 2 register. + */ + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); + ctrl &= ~SDHCI_CTRL_DMA_MASK; + ctrl |= SDHCI_CTRL_ADMA32; + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); +} + +static void dwcmshc_set_tran_desc(struct cqhci_host *cq_host, u8 **desc, + dma_addr_t addr, int len, bool end, bool dma64) +{ + int tmplen, offset; + + if (likely(!len || BOUNDARY_OK(addr, len))) { + cqhci_set_tran_desc(*desc, addr, len, end, dma64); + return; + } + + offset = addr & (SZ_128M - 1); + tmplen = SZ_128M - offset; + cqhci_set_tran_desc(*desc, addr, tmplen, false, dma64); + + addr += tmplen; + len -= tmplen; + *desc += cq_host->trans_desc_len; + cqhci_set_tran_desc(*desc, addr, len, end, dma64); +} + +static void dwcmshc_cqhci_dumpregs(struct mmc_host *mmc) +{ + sdhci_dumpregs(mmc_priv(mmc)); +} + static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -692,6 +786,7 @@ static const struct sdhci_ops sdhci_dwcmshc_ops = { .get_max_clock = dwcmshc_get_max_clock, .reset = sdhci_reset, .adma_write_desc = dwcmshc_adma_write_desc, + .irq = dwcmshc_cqe_irq_handler, }; static const struct sdhci_ops sdhci_dwcmshc_rk35xx_ops = { @@ -758,6 +853,73 @@ static const struct sdhci_pltfm_data sdhci_dwcmshc_cv18xx_pdata = { .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, }; +static const struct cqhci_host_ops dwcmshc_cqhci_ops = { + .enable = dwcmshc_sdhci_cqe_enable, + .disable = sdhci_cqe_disable, + .dumpregs = dwcmshc_cqhci_dumpregs, + .set_tran_desc = dwcmshc_set_tran_desc, +}; + +static void dwcmshc_cqhci_init(struct sdhci_host *host, struct platform_device *pdev) +{ + struct cqhci_host *cq_host; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); + bool dma64 = false; + u16 clk; + int err; + + host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; + cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL); + if (!cq_host) { + dev_err(mmc_dev(host->mmc), "Unable to setup CQE: not enough memory\n"); + goto dsbl_cqe_caps; + } + + /* + * For dwcmshc host controller we have to enable internal clock + * before access to some registers from Vendor Specific Area 2. + */ + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + clk |= SDHCI_CLOCK_INT_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + if (!(clk & SDHCI_CLOCK_INT_EN)) { + dev_err(mmc_dev(host->mmc), "Unable to setup CQE: internal clock enable error\n"); + goto free_cq_host; + } + + cq_host->mmio = host->ioaddr + priv->vendor_specific_area2; + cq_host->ops = &dwcmshc_cqhci_ops; + + /* Enable using of 128-bit task descriptors */ + dma64 = host->flags & SDHCI_USE_64_BIT_DMA; + if (dma64) { + dev_dbg(mmc_dev(host->mmc), "128-bit task descriptors\n"); + cq_host->caps |= CQHCI_TASK_DESC_SZ_128; + } + err = cqhci_init(cq_host, host->mmc, dma64); + if (err) { + dev_err(mmc_dev(host->mmc), "Unable to setup CQE: error %d\n", err); + goto int_clock_disable; + } + + dev_dbg(mmc_dev(host->mmc), "CQE init done\n"); + + return; + +int_clock_disable: + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + clk &= ~SDHCI_CLOCK_INT_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + +free_cq_host: + devm_kfree(&pdev->dev, cq_host); + +dsbl_cqe_caps: + host->mmc->caps2 &= ~(MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD); +} + static int dwcmshc_rk35xx_init(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv) { int err; @@ -862,7 +1024,7 @@ static int dwcmshc_probe(struct platform_device *pdev) struct rk35xx_priv *rk_priv = NULL; const struct sdhci_pltfm_data *pltfm_data; int err; - u32 extra; + u32 extra, caps; pltfm_data = device_get_match_data(&pdev->dev); if (!pltfm_data) { @@ -913,6 +1075,7 @@ static int dwcmshc_probe(struct platform_device *pdev) host->mmc_host_ops.request = dwcmshc_request; host->mmc_host_ops.hs400_enhanced_strobe = dwcmshc_hs400_enhanced_strobe; + host->mmc_host_ops.execute_tuning = dwcmshc_execute_tuning; if (pltfm_data == &sdhci_dwcmshc_rk35xx_pdata) { rk_priv = devm_kzalloc(&pdev->dev, sizeof(struct rk35xx_priv), GFP_KERNEL); @@ -962,6 +1125,10 @@ static int dwcmshc_probe(struct platform_device *pdev) sdhci_enable_v4_mode(host); #endif + caps = sdhci_readl(host, SDHCI_CAPABILITIES); + if (caps & SDHCI_CAN_64BIT_V4) + sdhci_enable_v4_mode(host); + host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; pm_runtime_get_noresume(dev); @@ -972,6 +1139,14 @@ static int dwcmshc_probe(struct platform_device *pdev) if (err) goto err_rpm; + /* Setup Command Queue Engine if enabled */ + if (device_property_read_bool(&pdev->dev, "supports-cqe")) { + priv->vendor_specific_area2 = + sdhci_readw(host, DWCMSHC_P_VENDOR_AREA2); + + dwcmshc_cqhci_init(host, pdev); + } + if (rk_priv) dwcmshc_rk35xx_postinit(host, priv); @@ -1044,6 +1219,12 @@ static int dwcmshc_suspend(struct device *dev) pm_runtime_resume(dev); + if (host->mmc->caps2 & MMC_CAP2_CQE) { + ret = cqhci_suspend(host->mmc); + if (ret) + return ret; + } + ret = sdhci_suspend_host(host); if (ret) return ret; @@ -1088,6 +1269,12 @@ static int dwcmshc_resume(struct device *dev) if (ret) goto disable_rockchip_clks; + if (host->mmc->caps2 & MMC_CAP2_CQE) { + ret = cqhci_resume(host->mmc); + if (ret) + goto disable_rockchip_clks; + } + return 0; disable_rockchip_clks: |