summaryrefslogtreecommitdiff
path: root/drivers/spi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-06-28 13:48:42 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-06-28 13:48:42 -0700
commit84fccbba93103b22044617e419ba20e1403b4a65 (patch)
treed2b2ca8757e8183e3c8692cdcdb6f82b379c2fa8 /drivers/spi
parent362067b6d5ca5b59a849a8e1183fb51d616fcf19 (diff)
parente884a133340a470070b2c59833c9ff87aa6517ba (diff)
Merge tag 'spi-v6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi
Pull spi updates from Mark Brown: "One small core feature this time around but mostly driver improvements and additions for SPI: - Add support for controlling the idle state of MOSI, some systems can support this and depending on the system integration may need it to avoid glitching in some situations - Support for polling mode in the S3C64xx driver and DMA on the Qualcomm QSPI driver - Support for several Allwinner SoCs, AMD Pensando Elba, Intel Mount Evans, Renesas RZ/V2M, and ST STM32H7" * tag 'spi-v6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi: (66 commits) spi: dt-bindings: atmel,at91rm9200-spi: fix broken sam9x7 compatible spi: dt-bindings: atmel,at91rm9200-spi: add sam9x7 compatible spi: Add support for Renesas CSI spi: dt-bindings: Add bindings for RZ/V2M CSI spi: sun6i: Use the new helper to derive the xfer timeout value spi: atmel: Prevent false timeouts on long transfers spi: dt-bindings: stm32: do not disable spi-slave property for stm32f4-f7 spi: Create a helper to derive adaptive timeouts spi: spi-geni-qcom: correctly handle -EPROBE_DEFER from dma_request_chan() spi: stm32: disable spi-slave property for stm32f4-f7 spi: stm32: introduction of stm32h7 SPI device mode support spi: stm32: use dmaengine_terminate_{a}sync instead of _all spi: stm32: renaming of spi_master into spi_controller spi: dw: Remove misleading comment for Mount Evans SoC spi: dt-bindings: snps,dw-apb-ssi: Add compatible for Intel Mount Evans SoC spi: dw: Add compatible for Intel Mount Evans SoC spi: s3c64xx: Use dev_err_probe() spi: s3c64xx: Use the managed spi master allocation function spi: spl022: Probe defer is no error spi: spi-imx: fix mixing of native and gpio chipselects for imx51/imx53/imx6 variants ...
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/Kconfig7
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-atmel.c18
-rw-r--r--drivers/spi/spi-cadence-quadspi.c19
-rw-r--r--drivers/spi/spi-cadence.c1
-rw-r--r--drivers/spi/spi-dw-core.c14
-rw-r--r--drivers/spi/spi-dw-dma.c76
-rw-r--r--drivers/spi/spi-dw-mmio.c22
-rw-r--r--drivers/spi/spi-dw.h1
-rw-r--r--drivers/spi/spi-fsl-lpspi.c8
-rw-r--r--drivers/spi/spi-geni-qcom.c2
-rw-r--r--drivers/spi/spi-hisi-kunpeng.c2
-rw-r--r--drivers/spi/spi-imx.c63
-rw-r--r--drivers/spi/spi-mt65xx.c33
-rw-r--r--drivers/spi/spi-pl022.c4
-rw-r--r--drivers/spi/spi-qcom-qspi.c218
-rw-r--r--drivers/spi/spi-rzv2m-csi.c667
-rw-r--r--drivers/spi/spi-s3c64xx.c197
-rw-r--r--drivers/spi/spi-sc18is602.c2
-rw-r--r--drivers/spi/spi-sn-f-ospi.c17
-rw-r--r--drivers/spi/spi-stm32.c274
-rw-r--r--drivers/spi/spi-sun6i.c133
-rw-r--r--drivers/spi/spi-xcomm.c2
-rw-r--r--drivers/spi/spidev.c3
24 files changed, 1437 insertions, 347 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 3de2ebe8294a..abbd1fb5fbc0 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -825,6 +825,12 @@ config SPI_RSPI
help
SPI driver for Renesas RSPI and QSPI blocks.
+config SPI_RZV2M_CSI
+ tristate "Renesas RZV2M CSI controller"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ help
+ SPI driver for Renesas RZ/V2M Clocked Serial Interface (CSI)
+
config SPI_QCOM_QSPI
tristate "QTI QSPI controller"
depends on ARCH_QCOM || COMPILE_TEST
@@ -936,6 +942,7 @@ config SPI_SPRD_ADI
config SPI_STM32
tristate "STMicroelectronics STM32 SPI controller"
depends on ARCH_STM32 || COMPILE_TEST
+ select SPI_SLAVE
help
SPI driver for STMicroelectronics STM32 SoCs.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 28c4817a8a74..080c2c1b3ec1 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -113,6 +113,7 @@ obj-$(CONFIG_SPI_RB4XX) += spi-rb4xx.o
obj-$(CONFIG_MACH_REALTEK_RTL) += spi-realtek-rtl.o
obj-$(CONFIG_SPI_RPCIF) += spi-rpc-if.o
obj-$(CONFIG_SPI_RSPI) += spi-rspi.o
+obj-$(CONFIG_SPI_RZV2M_CSI) += spi-rzv2m-csi.o
obj-$(CONFIG_SPI_S3C64XX) += spi-s3c64xx.o
obj-$(CONFIG_SPI_SC18IS602) += spi-sc18is602.o
obj-$(CONFIG_SPI_SH) += spi-sh.o
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 7f06305e16cb..152cd6773403 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -233,7 +233,8 @@
*/
#define DMA_MIN_BYTES 16
-#define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
+#define SPI_DMA_MIN_TIMEOUT (msecs_to_jiffies(1000))
+#define SPI_DMA_TIMEOUT_PER_10K (msecs_to_jiffies(4))
#define AUTOSUSPEND_TIMEOUT 2000
@@ -1279,7 +1280,8 @@ static int atmel_spi_one_transfer(struct spi_controller *host,
struct atmel_spi_device *asd;
int timeout;
int ret;
- unsigned long dma_timeout;
+ unsigned int dma_timeout;
+ long ret_timeout;
as = spi_controller_get_devdata(host);
@@ -1333,11 +1335,13 @@ static int atmel_spi_one_transfer(struct spi_controller *host,
atmel_spi_unlock(as);
}
- dma_timeout = wait_for_completion_timeout(&as->xfer_completion,
- SPI_DMA_TIMEOUT);
- if (WARN_ON(dma_timeout == 0)) {
- dev_err(&spi->dev, "spi transfer timeout\n");
- as->done_status = -EIO;
+ dma_timeout = msecs_to_jiffies(spi_controller_xfer_timeout(host, xfer));
+ ret_timeout = wait_for_completion_interruptible_timeout(&as->xfer_completion,
+ dma_timeout);
+ if (ret_timeout <= 0) {
+ dev_err(&spi->dev, "spi transfer %s\n",
+ !ret_timeout ? "timeout" : "canceled");
+ as->done_status = ret_timeout < 0 ? ret_timeout : -EIO;
}
if (as->done_status)
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 32449bef4415..abf10f92415d 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -40,6 +40,7 @@
#define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2)
#define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3)
#define CQSPI_SLOW_SRAM BIT(4)
+#define CQSPI_NEEDS_APB_AHB_HAZARD_WAR BIT(5)
/* Capabilities */
#define CQSPI_SUPPORTS_OCTAL BIT(0)
@@ -90,6 +91,7 @@ struct cqspi_st {
u32 pd_dev_id;
bool wr_completion;
bool slow_sram;
+ bool apb_ahb_hazard;
};
struct cqspi_driver_platdata {
@@ -1027,6 +1029,13 @@ static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata,
if (cqspi->wr_delay)
ndelay(cqspi->wr_delay);
+ /*
+ * If a hazard exists between the APB and AHB interfaces, perform a
+ * dummy readback from the controller to ensure synchronization.
+ */
+ if (cqspi->apb_ahb_hazard)
+ readl(reg_base + CQSPI_REG_INDIRECTWR);
+
while (remaining > 0) {
size_t write_words, mod_bytes;
@@ -1754,6 +1763,8 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->wr_completion = false;
if (ddata->quirks & CQSPI_SLOW_SRAM)
cqspi->slow_sram = true;
+ if (ddata->quirks & CQSPI_NEEDS_APB_AHB_HAZARD_WAR)
+ cqspi->apb_ahb_hazard = true;
if (of_device_is_compatible(pdev->dev.of_node,
"xlnx,versal-ospi-1.0")) {
@@ -1888,6 +1899,10 @@ static const struct cqspi_driver_platdata jh7110_qspi = {
.quirks = CQSPI_DISABLE_DAC_MODE,
};
+static const struct cqspi_driver_platdata pensando_cdns_qspi = {
+ .quirks = CQSPI_NEEDS_APB_AHB_HAZARD_WAR | CQSPI_DISABLE_DAC_MODE,
+};
+
static const struct of_device_id cqspi_dt_ids[] = {
{
.compatible = "cdns,qspi-nor",
@@ -1917,6 +1932,10 @@ static const struct of_device_id cqspi_dt_ids[] = {
.compatible = "starfive,jh7110-qspi",
.data = &jh7110_qspi,
},
+ {
+ .compatible = "amd,pensando-elba-qspi",
+ .data = &pensando_cdns_qspi,
+ },
{ /* end of table */ }
};
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index 26e663369319..de8fe3c5becb 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -102,6 +102,7 @@
* @regs: Virtual address of the SPI controller registers
* @ref_clk: Pointer to the peripheral clock
* @pclk: Pointer to the APB clock
+ * @clk_rate: Reference clock frequency, taken from @ref_clk
* @speed_hz: Current SPI bus clock speed in Hz
* @txbuf: Pointer to the TX buffer
* @rxbuf: Pointer to the RX buffer
diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c
index ae3108c70f50..a8ba41ad4541 100644
--- a/drivers/spi/spi-dw-core.c
+++ b/drivers/spi/spi-dw-core.c
@@ -57,21 +57,17 @@ static const struct debugfs_reg32 dw_spi_dbgfs_regs[] = {
DW_SPI_DBGFS_REG("RX_SAMPLE_DLY", DW_SPI_RX_SAMPLE_DLY),
};
-static int dw_spi_debugfs_init(struct dw_spi *dws)
+static void dw_spi_debugfs_init(struct dw_spi *dws)
{
char name[32];
snprintf(name, 32, "dw_spi%d", dws->master->bus_num);
dws->debugfs = debugfs_create_dir(name, NULL);
- if (!dws->debugfs)
- return -ENOMEM;
dws->regset.regs = dw_spi_dbgfs_regs;
dws->regset.nregs = ARRAY_SIZE(dw_spi_dbgfs_regs);
dws->regset.base = dws->regs;
debugfs_create_regset32("registers", 0400, dws->debugfs, &dws->regset);
-
- return 0;
}
static void dw_spi_debugfs_remove(struct dw_spi *dws)
@@ -80,9 +76,8 @@ static void dw_spi_debugfs_remove(struct dw_spi *dws)
}
#else
-static inline int dw_spi_debugfs_init(struct dw_spi *dws)
+static inline void dw_spi_debugfs_init(struct dw_spi *dws)
{
- return 0;
}
static inline void dw_spi_debugfs_remove(struct dw_spi *dws)
@@ -426,7 +421,10 @@ static int dw_spi_transfer_one(struct spi_controller *master,
int ret;
dws->dma_mapped = 0;
- dws->n_bytes = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE);
+ dws->n_bytes =
+ roundup_pow_of_two(DIV_ROUND_UP(transfer->bits_per_word,
+ BITS_PER_BYTE));
+
dws->tx = (void *)transfer->tx_buf;
dws->tx_len = transfer->len / dws->n_bytes;
dws->rx = transfer->rx_buf;
diff --git a/drivers/spi/spi-dw-dma.c b/drivers/spi/spi-dw-dma.c
index ababb910b391..df819652901a 100644
--- a/drivers/spi/spi-dw-dma.c
+++ b/drivers/spi/spi-dw-dma.c
@@ -72,12 +72,22 @@ static void dw_spi_dma_maxburst_init(struct dw_spi *dws)
dw_writel(dws, DW_SPI_DMATDLR, dws->txburst);
}
-static void dw_spi_dma_sg_burst_init(struct dw_spi *dws)
+static int dw_spi_dma_caps_init(struct dw_spi *dws)
{
- struct dma_slave_caps tx = {0}, rx = {0};
+ struct dma_slave_caps tx, rx;
+ int ret;
+
+ ret = dma_get_slave_caps(dws->txchan, &tx);
+ if (ret)
+ return ret;
- dma_get_slave_caps(dws->txchan, &tx);
- dma_get_slave_caps(dws->rxchan, &rx);
+ ret = dma_get_slave_caps(dws->rxchan, &rx);
+ if (ret)
+ return ret;
+
+ if (!(tx.directions & BIT(DMA_MEM_TO_DEV) &&
+ rx.directions & BIT(DMA_DEV_TO_MEM)))
+ return -ENXIO;
if (tx.max_sg_burst > 0 && rx.max_sg_burst > 0)
dws->dma_sg_burst = min(tx.max_sg_burst, rx.max_sg_burst);
@@ -87,6 +97,15 @@ static void dw_spi_dma_sg_burst_init(struct dw_spi *dws)
dws->dma_sg_burst = rx.max_sg_burst;
else
dws->dma_sg_burst = 0;
+
+ /*
+ * Assuming both channels belong to the same DMA controller hence the
+ * peripheral side address width capabilities most likely would be
+ * the same.
+ */
+ dws->dma_addr_widths = tx.dst_addr_widths & rx.src_addr_widths;
+
+ return 0;
}
static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws)
@@ -95,6 +114,7 @@ static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws)
struct dw_dma_slave dma_rx = { .src_id = 0 }, *rx = &dma_rx;
struct pci_dev *dma_dev;
dma_cap_mask_t mask;
+ int ret = -EBUSY;
/*
* Get pci device for DMA controller, currently it could only
@@ -124,20 +144,25 @@ static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws)
init_completion(&dws->dma_completion);
- dw_spi_dma_maxburst_init(dws);
+ ret = dw_spi_dma_caps_init(dws);
+ if (ret)
+ goto free_txchan;
- dw_spi_dma_sg_burst_init(dws);
+ dw_spi_dma_maxburst_init(dws);
pci_dev_put(dma_dev);
return 0;
+free_txchan:
+ dma_release_channel(dws->txchan);
+ dws->txchan = NULL;
free_rxchan:
dma_release_channel(dws->rxchan);
dws->rxchan = NULL;
err_exit:
pci_dev_put(dma_dev);
- return -EBUSY;
+ return ret;
}
static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws)
@@ -163,12 +188,17 @@ static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws)
init_completion(&dws->dma_completion);
- dw_spi_dma_maxburst_init(dws);
+ ret = dw_spi_dma_caps_init(dws);
+ if (ret)
+ goto free_txchan;
- dw_spi_dma_sg_burst_init(dws);
+ dw_spi_dma_maxburst_init(dws);
return 0;
+free_txchan:
+ dma_release_channel(dws->txchan);
+ dws->txchan = NULL;
free_rxchan:
dma_release_channel(dws->rxchan);
dws->rxchan = NULL;
@@ -198,22 +228,32 @@ static irqreturn_t dw_spi_dma_transfer_handler(struct dw_spi *dws)
return IRQ_HANDLED;
}
+static enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes)
+{
+ switch (n_bytes) {
+ case 1:
+ return DMA_SLAVE_BUSWIDTH_1_BYTE;
+ case 2:
+ return DMA_SLAVE_BUSWIDTH_2_BYTES;
+ case 4:
+ return DMA_SLAVE_BUSWIDTH_4_BYTES;
+ default:
+ return DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ }
+}
+
static bool dw_spi_can_dma(struct spi_controller *master,
struct spi_device *spi, struct spi_transfer *xfer)
{
struct dw_spi *dws = spi_controller_get_devdata(master);
+ enum dma_slave_buswidth dma_bus_width;
- return xfer->len > dws->fifo_len;
-}
+ if (xfer->len <= dws->fifo_len)
+ return false;
-static enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes)
-{
- if (n_bytes == 1)
- return DMA_SLAVE_BUSWIDTH_1_BYTE;
- else if (n_bytes == 2)
- return DMA_SLAVE_BUSWIDTH_2_BYTES;
+ dma_bus_width = dw_spi_dma_convert_width(dws->n_bytes);
- return DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ return dws->dma_addr_widths & BIT(dma_bus_width);
}
static int dw_spi_dma_wait(struct dw_spi *dws, unsigned int len, u32 speed)
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 15f5e9cb54ad..a963bc96c223 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -236,6 +236,24 @@ static int dw_spi_intel_init(struct platform_device *pdev,
return 0;
}
+/*
+ * DMA-based mem ops are not configured for this device and are not tested.
+ */
+static int dw_spi_mountevans_imc_init(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio)
+{
+ /*
+ * The Intel Mount Evans SoC's Integrated Management Complex DW
+ * apb_ssi_v4.02a controller has an errata where a full TX FIFO can
+ * result in data corruption. The suggested workaround is to never
+ * completely fill the FIFO. The TX FIFO has a size of 32 so the
+ * fifo_len is set to 31.
+ */
+ dwsmmio->dws.fifo_len = 31;
+
+ return 0;
+}
+
static int dw_spi_canaan_k210_init(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio)
{
@@ -405,6 +423,10 @@ static const struct of_device_id dw_spi_mmio_of_match[] = {
{ .compatible = "snps,dwc-ssi-1.01a", .data = dw_spi_hssi_init},
{ .compatible = "intel,keembay-ssi", .data = dw_spi_intel_init},
{ .compatible = "intel,thunderbay-ssi", .data = dw_spi_intel_init},
+ {
+ .compatible = "intel,mountevans-imc-ssi",
+ .data = dw_spi_mountevans_imc_init,
+ },
{ .compatible = "microchip,sparx5-spi", dw_spi_mscc_sparx5_init},
{ .compatible = "canaan,k210-spi", dw_spi_canaan_k210_init},
{ .compatible = "amd,pensando-elba-spi", .data = dw_spi_elba_init},
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index 9e8eb2b52d5c..3962e6dcf880 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -190,6 +190,7 @@ struct dw_spi {
struct dma_chan *rxchan;
u32 rxburst;
u32 dma_sg_burst;
+ u32 dma_addr_widths;
unsigned long dma_chan_busy;
dma_addr_t dma_addr; /* phy address of the Data register */
const struct dw_spi_dma_ops *dma_ops;
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 4b70038ceb6b..fb68c72df171 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -303,6 +303,12 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
perclk_rate = clk_get_rate(fsl_lpspi->clk_per);
+ if (!config.speed_hz) {
+ dev_err(fsl_lpspi->dev,
+ "error: the transmission speed provided is 0!\n");
+ return -EINVAL;
+ }
+
if (config.speed_hz > perclk_rate / 2) {
dev_err(fsl_lpspi->dev,
"per-clk should be at least two times of transfer speed");
@@ -911,7 +917,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
if (ret == -EPROBE_DEFER)
goto out_pm_get;
if (ret < 0)
- dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret);
+ dev_warn(&pdev->dev, "dma setup error %d, use pio\n", ret);
else
/*
* disable LPSPI module IRQ when enable DMA mode successfully,
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 135cdf394b76..26ce959d98df 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -35,7 +35,7 @@
#define CS_DEMUX_OUTPUT_SEL GENMASK(3, 0)
#define SE_SPI_TRANS_CFG 0x25c
-#define CS_TOGGLE BIT(0)
+#define CS_TOGGLE BIT(1)
#define SE_SPI_WORD_LEN 0x268
#define WORD_LEN_MSK GENMASK(9, 0)
diff --git a/drivers/spi/spi-hisi-kunpeng.c b/drivers/spi/spi-hisi-kunpeng.c
index 524eadbef87b..2b4b3d2a22b8 100644
--- a/drivers/spi/spi-hisi-kunpeng.c
+++ b/drivers/spi/spi-hisi-kunpeng.c
@@ -169,7 +169,7 @@ static int hisi_spi_debugfs_init(struct hisi_spi *hs)
master = container_of(hs->dev, struct spi_controller, dev);
snprintf(name, 32, "hisi_spi%d", master->bus_num);
hs->debugfs = debugfs_create_dir(name, NULL);
- if (!hs->debugfs)
+ if (IS_ERR(hs->debugfs))
return -ENOMEM;
hs->regset.regs = hisi_spi_regs;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 34e5f81ec431..528ae46c087f 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -281,6 +281,7 @@ static bool spi_imx_can_dma(struct spi_controller *controller, struct spi_device
#define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs & 3) + 4))
#define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs & 3) + 8))
#define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs & 3) + 12))
+#define MX51_ECSPI_CONFIG_DATACTL(cs) (1 << ((cs & 3) + 16))
#define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs & 3) + 20))
#define MX51_ECSPI_INT 0x10
@@ -516,6 +517,13 @@ static void mx51_ecspi_disable(struct spi_imx_data *spi_imx)
writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
}
+static int mx51_ecspi_channel(const struct spi_device *spi)
+{
+ if (!spi_get_csgpiod(spi, 0))
+ return spi_get_chipselect(spi, 0);
+ return spi->controller->unused_native_cs;
+}
+
static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
struct spi_message *msg)
{
@@ -526,6 +534,7 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
u32 testreg, delay;
u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
u32 current_cfg = cfg;
+ int channel = mx51_ecspi_channel(spi);
/* set Master or Slave mode */
if (spi_imx->slave_mode)
@@ -540,7 +549,7 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl);
/* set chip select to use */
- ctrl |= MX51_ECSPI_CTRL_CS(spi_get_chipselect(spi, 0));
+ ctrl |= MX51_ECSPI_CTRL_CS(channel);
/*
* The ctrl register must be written first, with the EN bit set other
@@ -561,22 +570,27 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
* BURST_LENGTH + 1 bits are received
*/
if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
- cfg &= ~MX51_ECSPI_CONFIG_SBBCTRL(spi_get_chipselect(spi, 0));
+ cfg &= ~MX51_ECSPI_CONFIG_SBBCTRL(channel);
else
- cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi_get_chipselect(spi, 0));
+ cfg |= MX51_ECSPI_CONFIG_SBBCTRL(channel);
if (spi->mode & SPI_CPOL) {
- cfg |= MX51_ECSPI_CONFIG_SCLKPOL(spi_get_chipselect(spi, 0));
- cfg |= MX51_ECSPI_CONFIG_SCLKCTL(spi_get_chipselect(spi, 0));
+ cfg |= MX51_ECSPI_CONFIG_SCLKPOL(channel);
+ cfg |= MX51_ECSPI_CONFIG_SCLKCTL(channel);
} else {
- cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(spi_get_chipselect(spi, 0));
- cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(spi_get_chipselect(spi, 0));
+ cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(channel);
+ cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(channel);
}
+ if (spi->mode & SPI_MOSI_IDLE_LOW)
+ cfg |= MX51_ECSPI_CONFIG_DATACTL(channel);
+ else
+ cfg &= ~MX51_ECSPI_CONFIG_DATACTL(channel);
+
if (spi->mode & SPI_CS_HIGH)
- cfg |= MX51_ECSPI_CONFIG_SSBPOL(spi_get_chipselect(spi, 0));
+ cfg |= MX51_ECSPI_CONFIG_SSBPOL(channel);
else
- cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(spi_get_chipselect(spi, 0));
+ cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(channel);
if (cfg == current_cfg)
return 0;
@@ -621,14 +635,15 @@ static void mx51_configure_cpha(struct spi_imx_data *spi_imx,
bool cpha = (spi->mode & SPI_CPHA);
bool flip_cpha = (spi->mode & SPI_RX_CPHA_FLIP) && spi_imx->rx_only;
u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
+ int channel = mx51_ecspi_channel(spi);
/* Flip cpha logical value iff flip_cpha */
cpha ^= flip_cpha;
if (cpha)
- cfg |= MX51_ECSPI_CONFIG_SCLKPHA(spi_get_chipselect(spi, 0));
+ cfg |= MX51_ECSPI_CONFIG_SCLKPHA(channel);
else
- cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(spi_get_chipselect(spi, 0));
+ cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(channel);
writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
}
@@ -1737,20 +1752,21 @@ static int spi_imx_probe(struct platform_device *pdev)
else
controller->num_chipselect = 3;
- spi_imx->controller->transfer_one = spi_imx_transfer_one;
- spi_imx->controller->setup = spi_imx_setup;
- spi_imx->controller->cleanup = spi_imx_cleanup;
- spi_imx->controller->prepare_message = spi_imx_prepare_message;
- spi_imx->controller->unprepare_message = spi_imx_unprepare_message;
- spi_imx->controller->slave_abort = spi_imx_slave_abort;
- spi_imx->controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_NO_CS;
+ controller->transfer_one = spi_imx_transfer_one;
+ controller->setup = spi_imx_setup;
+ controller->cleanup = spi_imx_cleanup;
+ controller->prepare_message = spi_imx_prepare_message;
+ controller->unprepare_message = spi_imx_unprepare_message;
+ controller->slave_abort = spi_imx_slave_abort;
+ controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_NO_CS |
+ SPI_MOSI_IDLE_LOW;
if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx) ||
is_imx53_ecspi(spi_imx))
- spi_imx->controller->mode_bits |= SPI_LOOP | SPI_READY;
+ controller->mode_bits |= SPI_LOOP | SPI_READY;
if (is_imx51_ecspi(spi_imx) || is_imx53_ecspi(spi_imx))
- spi_imx->controller->mode_bits |= SPI_RX_CPHA_FLIP;
+ controller->mode_bits |= SPI_RX_CPHA_FLIP;
if (is_imx51_ecspi(spi_imx) &&
device_property_read_u32(&pdev->dev, "cs-gpios", NULL))
@@ -1759,7 +1775,12 @@ static int spi_imx_probe(struct platform_device *pdev)
* setting the burst length to the word size. This is
* considerably faster than manually controlling the CS.
*/
- spi_imx->controller->mode_bits |= SPI_CS_WORD;
+ controller->mode_bits |= SPI_CS_WORD;
+
+ if (is_imx51_ecspi(spi_imx) || is_imx53_ecspi(spi_imx)) {
+ controller->max_native_cs = 4;
+ controller->flags |= SPI_MASTER_GPIO_SS;
+ }
spi_imx->spi_drctl = spi_drctl;
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index d7432e2219d8..39272ad6641b 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -1144,7 +1144,8 @@ static int mtk_spi_probe(struct platform_device *pdev)
if (mdata->dev_comp->must_tx)
master->flags = SPI_MASTER_MUST_TX;
if (mdata->dev_comp->ipm_design)
- master->mode_bits |= SPI_LOOP;
+ master->mode_bits |= SPI_LOOP | SPI_RX_DUAL | SPI_TX_DUAL |
+ SPI_RX_QUAD | SPI_TX_QUAD;
if (mdata->dev_comp->ipm_design) {
mdata->dev = dev;
@@ -1269,7 +1270,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
return 0;
}
-static int mtk_spi_remove(struct platform_device *pdev)
+static void mtk_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct mtk_spi *mdata = spi_master_get_devdata(master);
@@ -1278,21 +1279,25 @@ static int mtk_spi_remove(struct platform_device *pdev)
if (mdata->use_spimem && !completion_done(&mdata->spimem_done))
complete(&mdata->spimem_done);
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret < 0)
- return ret;
-
- mtk_spi_reset(mdata);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_warn(&pdev->dev, "Failed to resume hardware (%pe)\n", ERR_PTR(ret));
+ } else {
+ /*
+ * If pm runtime resume failed, clks are disabled and
+ * unprepared. So don't access the hardware and skip clk
+ * unpreparing.
+ */
+ mtk_spi_reset(mdata);
- if (mdata->dev_comp->no_need_unprepare) {
- clk_unprepare(mdata->spi_clk);
- clk_unprepare(mdata->spi_hclk);
+ if (mdata->dev_comp->no_need_unprepare) {
+ clk_unprepare(mdata->spi_clk);
+ clk_unprepare(mdata->spi_hclk);
+ }
}
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1311,7 +1316,7 @@ static int mtk_spi_suspend(struct device *dev)
clk_disable_unprepare(mdata->spi_hclk);
}
- return ret;
+ return 0;
}
static int mtk_spi_resume(struct device *dev)
@@ -1412,7 +1417,7 @@ static struct platform_driver mtk_spi_driver = {
.of_match_table = mtk_spi_of_match,
},
.probe = mtk_spi_probe,
- .remove = mtk_spi_remove,
+ .remove_new = mtk_spi_remove,
};
module_platform_driver(mtk_spi_driver);
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 982407bc5d9f..1af75eff26b6 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -2217,8 +2217,8 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
amba_set_drvdata(adev, pl022);
status = devm_spi_register_master(&adev->dev, master);
if (status != 0) {
- dev_err(&adev->dev,
- "probe - problem registering spi master\n");
+ dev_err_probe(&adev->dev, status,
+ "problem registering spi master\n");
goto err_spi_register;
}
dev_dbg(dev, "probe succeeded\n");
diff --git a/drivers/spi/spi-qcom-qspi.c b/drivers/spi/spi-qcom-qspi.c
index fab155389999..a8a683d6145c 100644
--- a/drivers/spi/spi-qcom-qspi.c
+++ b/drivers/spi/spi-qcom-qspi.c
@@ -2,6 +2,8 @@
// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
#include <linux/clk.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -62,6 +64,7 @@
#define WR_FIFO_FULL BIT(10)
#define WR_FIFO_OVERRUN BIT(11)
#define TRANSACTION_DONE BIT(16)
+#define DMA_CHAIN_DONE BIT(31)
#define QSPI_ERR_IRQS (RESP_FIFO_UNDERRUN | HRESP_FROM_NOC_ERR | \
WR_FIFO_OVERRUN)
#define QSPI_ALL_IRQS (QSPI_ERR_IRQS | RESP_FIFO_RDY | \
@@ -108,18 +111,34 @@
#define RD_FIFO_RESET 0x0030
#define RESET_FIFO BIT(0)
+#define NEXT_DMA_DESC_ADDR 0x0040
+#define CURRENT_DMA_DESC_ADDR 0x0044
+#define CURRENT_MEM_ADDR 0x0048
+
#define CUR_MEM_ADDR 0x0048
#define HW_VERSION 0x004c
#define RD_FIFO 0x0050
#define SAMPLING_CLK_CFG 0x0090
#define SAMPLING_CLK_STATUS 0x0094
+#define QSPI_ALIGN_REQ 32
enum qspi_dir {
QSPI_READ,
QSPI_WRITE,
};
+struct qspi_cmd_desc {
+ u32 data_address;
+ u32 next_descriptor;
+ u32 direction:1;
+ u32 multi_io_mode:3;
+ u32 reserved1:4;
+ u32 fragment:1;
+ u32 reserved2:7;
+ u32 length:16;
+};
+
struct qspi_xfer {
union {
const void *tx_buf;
@@ -137,11 +156,23 @@ enum qspi_clocks {
QSPI_NUM_CLKS
};
+/*
+ * Number of entries in sgt returned from spi framework that-
+ * will be supported. Can be modified as required.
+ * In practice, given max_dma_len is 64KB, the number of
+ * entries is not expected to exceed 1.
+ */
+#define QSPI_MAX_SG 5
+
struct qcom_qspi {
void __iomem *base;
struct device *dev;
struct clk_bulk_data *clks;
struct qspi_xfer xfer;
+ struct dma_pool *dma_cmd_pool;
+ dma_addr_t dma_cmd_desc[QSPI_MAX_SG];
+ void *virt_cmd_desc[QSPI_MAX_SG];
+ unsigned int n_cmd_desc;
struct icc_path *icc_path_cpu_to_qspi;
unsigned long last_speed;
/* Lock to protect data accessed by IRQs */
@@ -153,21 +184,22 @@ static u32 qspi_buswidth_to_iomode(struct qcom_qspi *ctrl,
{
switch (buswidth) {
case 1:
- return SDR_1BIT << MULTI_IO_MODE_SHFT;
+ return SDR_1BIT;
case 2:
- return SDR_2BIT << MULTI_IO_MODE_SHFT;
+ return SDR_2BIT;
case 4:
- return SDR_4BIT << MULTI_IO_MODE_SHFT;
+ return SDR_4BIT;
default:
dev_warn_once(ctrl->dev,
"Unexpected bus width: %u\n", buswidth);
- return SDR_1BIT << MULTI_IO_MODE_SHFT;
+ return SDR_1BIT;
}
}
static void qcom_qspi_pio_xfer_cfg(struct qcom_qspi *ctrl)
{
u32 pio_xfer_cfg;
+ u32 iomode;
const struct qspi_xfer *xfer;
xfer = &ctrl->xfer;
@@ -179,7 +211,8 @@ static void qcom_qspi_pio_xfer_cfg(struct qcom_qspi *ctrl)
else
pio_xfer_cfg |= TRANSFER_FRAGMENT;
pio_xfer_cfg &= ~MULTI_IO_MODE_MSK;
- pio_xfer_cfg |= qspi_buswidth_to_iomode(ctrl, xfer->buswidth);
+ iomode = qspi_buswidth_to_iomode(ctrl, xfer->buswidth);
+ pio_xfer_cfg |= iomode << MULTI_IO_MODE_SHFT;
writel(pio_xfer_cfg, ctrl->base + PIO_XFER_CFG);
}
@@ -217,12 +250,22 @@ static void qcom_qspi_pio_xfer(struct qcom_qspi *ctrl)
static void qcom_qspi_handle_err(struct spi_master *master,
struct spi_message *msg)
{
+ u32 int_status;
struct qcom_qspi *ctrl = spi_master_get_devdata(master);
unsigned long flags;
+ int i;
spin_lock_irqsave(&ctrl->lock, flags);
writel(0, ctrl->base + MSTR_INT_EN);
+ int_status = readl(ctrl->base + MSTR_INT_STATUS);
+ writel(int_status, ctrl->base + MSTR_INT_STATUS);
ctrl->xfer.rem_bytes = 0;
+
+ /* free cmd descriptors if they are around (DMA mode) */
+ for (i = 0; i < ctrl->n_cmd_desc; i++)
+ dma_pool_free(ctrl->dma_cmd_pool, ctrl->virt_cmd_desc[i],
+ ctrl->dma_cmd_desc[i]);
+ ctrl->n_cmd_desc = 0;
spin_unlock_irqrestore(&ctrl->lock, flags);
}
@@ -242,7 +285,7 @@ static int qcom_qspi_set_speed(struct qcom_qspi *ctrl, unsigned long speed_hz)
}
/*
- * Set BW quota for CPU as driver supports FIFO mode only.
+ * Set BW quota for CPU.
* We don't have explicit peak requirement so keep it equal to avg_bw.
*/
avg_bw_cpu = Bps_to_icc(speed_hz);
@@ -258,6 +301,102 @@ static int qcom_qspi_set_speed(struct qcom_qspi *ctrl, unsigned long speed_hz)
return 0;
}
+static int qcom_qspi_alloc_desc(struct qcom_qspi *ctrl, dma_addr_t dma_ptr,
+ uint32_t n_bytes)
+{
+ struct qspi_cmd_desc *virt_cmd_desc, *prev;
+ dma_addr_t dma_cmd_desc;
+
+ /* allocate for dma cmd descriptor */
+ virt_cmd_desc = dma_pool_alloc(ctrl->dma_cmd_pool, GFP_KERNEL | __GFP_ZERO, &dma_cmd_desc);
+ if (!virt_cmd_desc)
+ return -ENOMEM;
+
+ ctrl->virt_cmd_desc[ctrl->n_cmd_desc] = virt_cmd_desc;
+ ctrl->dma_cmd_desc[ctrl->n_cmd_desc] = dma_cmd_desc;
+ ctrl->n_cmd_desc++;
+
+ /* setup cmd descriptor */
+ virt_cmd_desc->data_address = dma_ptr;
+ virt_cmd_desc->direction = ctrl->xfer.dir;
+ virt_cmd_desc->multi_io_mode = qspi_buswidth_to_iomode(ctrl, ctrl->xfer.buswidth);
+ virt_cmd_desc->fragment = !ctrl->xfer.is_last;
+ virt_cmd_desc->length = n_bytes;
+
+ /* update previous descriptor */
+ if (ctrl->n_cmd_desc >= 2) {
+ prev = (ctrl->virt_cmd_desc)[ctrl->n_cmd_desc - 2];
+ prev->next_descriptor = dma_cmd_desc;
+ prev->fragment = 1;
+ }
+
+ return 0;
+}
+
+static int qcom_qspi_setup_dma_desc(struct qcom_qspi *ctrl,
+ struct spi_transfer *xfer)
+{
+ int ret;
+ struct sg_table *sgt;
+ dma_addr_t dma_ptr_sg;
+ unsigned int dma_len_sg;
+ int i;
+
+ if (ctrl->n_cmd_desc) {
+ dev_err(ctrl->dev, "Remnant dma buffers n_cmd_desc-%d\n", ctrl->n_cmd_desc);
+ return -EIO;
+ }
+
+ sgt = (ctrl->xfer.dir == QSPI_READ) ? &xfer->rx_sg : &xfer->tx_sg;
+ if (!sgt->nents || sgt->nents > QSPI_MAX_SG) {
+ dev_warn_once(ctrl->dev, "Cannot handle %d entries in scatter list\n", sgt->nents);
+ return -EAGAIN;
+ }
+
+ for (i = 0; i < sgt->nents; i++) {
+ dma_ptr_sg = sg_dma_address(sgt->sgl + i);
+ if (!IS_ALIGNED(dma_ptr_sg, QSPI_ALIGN_REQ)) {
+ dev_warn_once(ctrl->dev, "dma_address not aligned to %d\n", QSPI_ALIGN_REQ);
+ return -EAGAIN;
+ }
+ }
+
+ for (i = 0; i < sgt->nents; i++) {
+ dma_ptr_sg = sg_dma_address(sgt->sgl + i);
+ dma_len_sg = sg_dma_len(sgt->sgl + i);
+
+ ret = qcom_qspi_alloc_desc(ctrl, dma_ptr_sg, dma_len_sg);
+ if (ret)
+ goto cleanup;
+ }
+ return 0;
+
+cleanup:
+ for (i = 0; i < ctrl->n_cmd_desc; i++)
+ dma_pool_free(ctrl->dma_cmd_pool, ctrl->virt_cmd_desc[i],
+ ctrl->dma_cmd_desc[i]);
+ ctrl->n_cmd_desc = 0;
+ return ret;
+}
+
+static void qcom_qspi_dma_xfer(struct qcom_qspi *ctrl)
+{
+ /* Setup new interrupts */
+ writel(DMA_CHAIN_DONE, ctrl->base + MSTR_INT_EN);
+
+ /* kick off transfer */
+ writel((u32)((ctrl->dma_cmd_desc)[0]), ctrl->base + NEXT_DMA_DESC_ADDR);
+}
+
+/* Switch to DMA if transfer length exceeds this */
+#define QSPI_MAX_BYTES_FIFO 64
+
+static bool qcom_qspi_can_dma(struct spi_controller *ctlr,
+ struct spi_device *slv, struct spi_transfer *xfer)
+{
+ return xfer->len > QSPI_MAX_BYTES_FIFO;
+}
+
static int qcom_qspi_transfer_one(struct spi_master *master,
struct spi_device *slv,
struct spi_transfer *xfer)
@@ -266,6 +405,7 @@ static int qcom_qspi_transfer_one(struct spi_master *master,
int ret;
unsigned long speed_hz;
unsigned long flags;
+ u32 mstr_cfg;
speed_hz = slv->max_speed_hz;
if (xfer->speed_hz)
@@ -276,6 +416,7 @@ static int qcom_qspi_transfer_one(struct spi_master *master,
return ret;
spin_lock_irqsave(&ctrl->lock, flags);
+ mstr_cfg = readl(ctrl->base + MSTR_CONFIG);
/* We are half duplex, so either rx or tx will be set */
if (xfer->rx_buf) {
@@ -290,10 +431,36 @@ static int qcom_qspi_transfer_one(struct spi_master *master,
ctrl->xfer.is_last = list_is_last(&xfer->transfer_list,
&master->cur_msg->transfers);
ctrl->xfer.rem_bytes = xfer->len;
+
+ if (xfer->rx_sg.nents || xfer->tx_sg.nents) {
+ /* do DMA transfer */
+ if (!(mstr_cfg & DMA_ENABLE)) {
+ mstr_cfg |= DMA_ENABLE;
+ writel(mstr_cfg, ctrl->base + MSTR_CONFIG);
+ }
+
+ ret = qcom_qspi_setup_dma_desc(ctrl, xfer);
+ if (ret != -EAGAIN) {
+ if (!ret)
+ qcom_qspi_dma_xfer(ctrl);
+ goto exit;
+ }
+ dev_warn_once(ctrl->dev, "DMA failure, falling back to PIO\n");
+ ret = 0; /* We'll retry w/ PIO */
+ }
+
+ if (mstr_cfg & DMA_ENABLE) {
+ mstr_cfg &= ~DMA_ENABLE;
+ writel(mstr_cfg, ctrl->base + MSTR_CONFIG);
+ }
qcom_qspi_pio_xfer(ctrl);
+exit:
spin_unlock_irqrestore(&ctrl->lock, flags);
+ if (ret)
+ return ret;
+
/* We'll call spi_finalize_current_transfer() when done */
return 1;
}
@@ -328,6 +495,16 @@ static int qcom_qspi_prepare_message(struct spi_master *master,
return 0;
}
+static int qcom_qspi_alloc_dma(struct qcom_qspi *ctrl)
+{
+ ctrl->dma_cmd_pool = dmam_pool_create("qspi cmd desc pool",
+ ctrl->dev, sizeof(struct qspi_cmd_desc), 0, 0);
+ if (!ctrl->dma_cmd_pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
static irqreturn_t pio_read(struct qcom_qspi *ctrl)
{
u32 rd_fifo_status;
@@ -426,6 +603,7 @@ static irqreturn_t qcom_qspi_irq(int irq, void *dev_id)
int_status = readl(ctrl->base + MSTR_INT_STATUS);
writel(int_status, ctrl->base + MSTR_INT_STATUS);
+ /* PIO mode handling */
if (ctrl->xfer.dir == QSPI_WRITE) {
if (int_status & WR_FIFO_EMPTY)
ret = pio_write(ctrl);
@@ -449,6 +627,22 @@ static irqreturn_t qcom_qspi_irq(int irq, void *dev_id)
spi_finalize_current_transfer(dev_get_drvdata(ctrl->dev));
}
+ /* DMA mode handling */
+ if (int_status & DMA_CHAIN_DONE) {
+ int i;
+
+ writel(0, ctrl->base + MSTR_INT_EN);
+ ctrl->xfer.rem_bytes = 0;
+
+ for (i = 0; i < ctrl->n_cmd_desc; i++)
+ dma_pool_free(ctrl->dma_cmd_pool, ctrl->virt_cmd_desc[i],
+ ctrl->dma_cmd_desc[i]);
+ ctrl->n_cmd_desc = 0;
+
+ ret = IRQ_HANDLED;
+ spi_finalize_current_transfer(dev_get_drvdata(ctrl->dev));
+ }
+
spin_unlock(&ctrl->lock);
return ret;
}
@@ -517,7 +711,13 @@ static int qcom_qspi_probe(struct platform_device *pdev)
return ret;
}
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return dev_err_probe(dev, ret, "could not set DMA mask\n");
+
master->max_speed_hz = 300000000;
+ master->max_dma_len = 65536; /* as per HPG */
+ master->dma_alignment = QSPI_ALIGN_REQ;
master->num_chipselect = QSPI_NUM_CS;
master->bus_num = -1;
master->dev.of_node = pdev->dev.of_node;
@@ -528,6 +728,8 @@ static int qcom_qspi_probe(struct platform_device *pdev)
master->prepare_message = qcom_qspi_prepare_message;
master->transfer_one = qcom_qspi_transfer_one;
master->handle_err = qcom_qspi_handle_err;
+ if (of_property_read_bool(pdev->dev.of_node, "iommus"))
+ master->can_dma = qcom_qspi_can_dma;
master->auto_runtime_pm = true;
ret = devm_pm_opp_set_clkname(&pdev->dev, "core");
@@ -540,6 +742,10 @@ static int qcom_qspi_probe(struct platform_device *pdev)
return ret;
}
+ ret = qcom_qspi_alloc_dma(ctrl);
+ if (ret)
+ return ret;
+
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 250);
pm_runtime_enable(dev);
diff --git a/drivers/spi/spi-rzv2m-csi.c b/drivers/spi/spi-rzv2m-csi.c
new file mode 100644
index 000000000000..14ad65da930d
--- /dev/null
+++ b/drivers/spi/spi-rzv2m-csi.c
@@ -0,0 +1,667 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Renesas RZ/V2M Clocked Serial Interface (CSI) driver
+ *
+ * Copyright (C) 2023 Renesas Electronics Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/count_zeros.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/spi/spi.h>
+
+/* Registers */
+#define CSI_MODE 0x00 /* CSI mode control */
+#define CSI_CLKSEL 0x04 /* CSI clock select */
+#define CSI_CNT 0x08 /* CSI control */
+#define CSI_INT 0x0C /* CSI interrupt status */
+#define CSI_IFIFOL 0x10 /* CSI receive FIFO level display */
+#define CSI_OFIFOL 0x14 /* CSI transmit FIFO level display */
+#define CSI_IFIFO 0x18 /* CSI receive window */
+#define CSI_OFIFO 0x1C /* CSI transmit window */
+#define CSI_FIFOTRG 0x20 /* CSI FIFO trigger level */
+
+/* CSI_MODE */
+#define CSI_MODE_CSIE BIT(7)
+#define CSI_MODE_TRMD BIT(6)
+#define CSI_MODE_CCL BIT(5)
+#define CSI_MODE_DIR BIT(4)
+#define CSI_MODE_CSOT BIT(0)
+
+#define CSI_MODE_SETUP 0x00000040
+
+/* CSI_CLKSEL */
+#define CSI_CLKSEL_CKP BIT(17)
+#define CSI_CLKSEL_DAP BIT(16)
+#define CSI_CLKSEL_SLAVE BIT(15)
+#define CSI_CLKSEL_CKS GENMASK(14, 1)
+
+/* CSI_CNT */
+#define CSI_CNT_CSIRST BIT(28)
+#define CSI_CNT_R_TRGEN BIT(19)
+#define CSI_CNT_UNDER_E BIT(13)
+#define CSI_CNT_OVERF_E BIT(12)
+#define CSI_CNT_TREND_E BIT(9)
+#define CSI_CNT_CSIEND_E BIT(8)
+#define CSI_CNT_T_TRGR_E BIT(4)
+#define CSI_CNT_R_TRGR_E BIT(0)
+
+/* CSI_INT */
+#define CSI_INT_UNDER BIT(13)
+#define CSI_INT_OVERF BIT(12)
+#define CSI_INT_TREND BIT(9)
+#define CSI_INT_CSIEND BIT(8)
+#define CSI_INT_T_TRGR BIT(4)
+#define CSI_INT_R_TRGR BIT(0)
+
+/* CSI_FIFOTRG */
+#define CSI_FIFOTRG_R_TRG GENMASK(2, 0)
+
+#define CSI_FIFO_SIZE_BYTES 32
+#define CSI_FIFO_HALF_SIZE 16
+#define CSI_EN_DIS_TIMEOUT_US 100
+#define CSI_CKS_MAX 0x3FFF
+
+#define UNDERRUN_ERROR BIT(0)
+#define OVERFLOW_ERROR BIT(1)
+#define TX_TIMEOUT_ERROR BIT(2)
+#define RX_TIMEOUT_ERROR BIT(3)
+
+#define CSI_MAX_SPI_SCKO 8000000
+
+struct rzv2m_csi_priv {
+ void __iomem *base;
+ struct clk *csiclk;
+ struct clk *pclk;
+ struct device *dev;
+ struct spi_controller *controller;
+ const u8 *txbuf;
+ u8 *rxbuf;
+ int buffer_len;
+ int bytes_sent;
+ int bytes_received;
+ int bytes_to_transfer;
+ int words_to_transfer;
+ unsigned char bytes_per_word;
+ wait_queue_head_t wait;
+ u8 errors;
+ u32 status;
+};
+
+static const unsigned char x_trg[] = {
+ 0, 1, 1, 2, 2, 2, 2, 3,
+ 3, 3, 3, 3, 3, 3, 3, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 5
+};
+
+static const unsigned char x_trg_words[] = {
+ 1, 2, 2, 4, 4, 4, 4, 8,
+ 8, 8, 8, 8, 8, 8, 8, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 32
+};
+
+static void rzv2m_csi_reg_write_bit(const struct rzv2m_csi_priv *csi,
+ int reg_offs, int bit_mask, u32 value)
+{
+ int nr_zeros;
+ u32 tmp;
+
+ nr_zeros = count_trailing_zeros(bit_mask);
+ value <<= nr_zeros;
+
+ tmp = (readl(csi->base + reg_offs) & ~bit_mask) | value;
+ writel(tmp, csi->base + reg_offs);
+}
+
+static int rzv2m_csi_sw_reset(struct rzv2m_csi_priv *csi, int assert)
+{
+ u32 reg;
+
+ rzv2m_csi_reg_write_bit(csi, CSI_CNT, CSI_CNT_CSIRST, assert);
+
+ if (assert) {
+ return readl_poll_timeout(csi->base + CSI_MODE, reg,
+ !(reg & CSI_MODE_CSOT), 0,
+ CSI_EN_DIS_TIMEOUT_US);
+ }
+
+ return 0;
+}
+
+static int rzv2m_csi_start_stop_operation(const struct rzv2m_csi_priv *csi,
+ int enable, bool wait)
+{
+ u32 reg;
+
+ rzv2m_csi_reg_write_bit(csi, CSI_MODE, CSI_MODE_CSIE, enable);
+
+ if (!enable && wait)
+ return readl_poll_timeout(csi->base + CSI_MODE, reg,
+ !(reg & CSI_MODE_CSOT), 0,
+ CSI_EN_DIS_TIMEOUT_US);
+
+ return 0;
+}
+
+static int rzv2m_csi_fill_txfifo(struct rzv2m_csi_priv *csi)
+{
+ int i;
+
+ if (readl(csi->base + CSI_OFIFOL))
+ return -EIO;
+
+ if (csi->bytes_per_word == 2) {
+ u16 *buf = (u16 *)csi->txbuf;
+
+ for (i = 0; i < csi->words_to_transfer; i++)
+ writel(buf[i], csi->base + CSI_OFIFO);
+ } else {
+ u8 *buf = (u8 *)csi->txbuf;
+
+ for (i = 0; i < csi->words_to_transfer; i++)
+ writel(buf[i], csi->base + CSI_OFIFO);
+ }
+
+ csi->txbuf += csi->bytes_to_transfer;
+ csi->bytes_sent += csi->bytes_to_transfer;
+
+ return 0;
+}
+
+static int rzv2m_csi_read_rxfifo(struct rzv2m_csi_priv *csi)
+{
+ int i;
+
+ if (readl(csi->base + CSI_IFIFOL) != csi->bytes_to_transfer)
+ return -EIO;
+
+ if (csi->bytes_per_word == 2) {
+ u16 *buf = (u16 *)csi->rxbuf;
+
+ for (i = 0; i < csi->words_to_transfer; i++)
+ buf[i] = (u16)readl(csi->base + CSI_IFIFO);
+ } else {
+ u8 *buf = (u8 *)csi->rxbuf;
+
+ for (i = 0; i < csi->words_to_transfer; i++)
+ buf[i] = (u8)readl(csi->base + CSI_IFIFO);
+ }
+
+ csi->rxbuf += csi->bytes_to_transfer;
+ csi->bytes_received += csi->bytes_to_transfer;
+
+ return 0;
+}
+
+static inline void rzv2m_csi_calc_current_transfer(struct rzv2m_csi_priv *csi)
+{
+ int bytes_transferred = max_t(int, csi->bytes_received, csi->bytes_sent);
+ int bytes_remaining = csi->buffer_len - bytes_transferred;
+ int to_transfer;
+
+ if (csi->txbuf)
+ /*
+ * Leaving a little bit of headroom in the FIFOs makes it very
+ * hard to raise an overflow error (which is only possible
+ * when IP transmits and receives at the same time).
+ */
+ to_transfer = min_t(int, CSI_FIFO_HALF_SIZE, bytes_remaining);
+ else
+ to_transfer = min_t(int, CSI_FIFO_SIZE_BYTES, bytes_remaining);
+
+ if (csi->bytes_per_word == 2)
+ to_transfer >>= 1;
+
+ /*
+ * We can only choose a trigger level from a predefined set of values.
+ * This will pick a value that is the greatest possible integer that's
+ * less than or equal to the number of bytes we need to transfer.
+ * This may result in multiple smaller transfers.
+ */
+ csi->words_to_transfer = x_trg_words[to_transfer - 1];
+
+ if (csi->bytes_per_word == 2)
+ csi->bytes_to_transfer = csi->words_to_transfer << 1;
+ else
+ csi->bytes_to_transfer = csi->words_to_transfer;
+}
+
+static inline void rzv2m_csi_set_rx_fifo_trigger_level(struct rzv2m_csi_priv *csi)
+{
+ rzv2m_csi_reg_write_bit(csi, CSI_FIFOTRG, CSI_FIFOTRG_R_TRG,
+ x_trg[csi->words_to_transfer - 1]);
+}
+
+static inline void rzv2m_csi_enable_rx_trigger(struct rzv2m_csi_priv *csi,
+ bool enable)
+{
+ rzv2m_csi_reg_write_bit(csi, CSI_CNT, CSI_CNT_R_TRGEN, enable);
+}
+
+static void rzv2m_csi_disable_irqs(const struct rzv2m_csi_priv *csi,
+ u32 enable_bits)
+{
+ u32 cnt = readl(csi->base + CSI_CNT);
+
+ writel(cnt & ~enable_bits, csi->base + CSI_CNT);
+}
+
+static void rzv2m_csi_disable_all_irqs(struct rzv2m_csi_priv *csi)
+{
+ rzv2m_csi_disable_irqs(csi, CSI_CNT_R_TRGR_E | CSI_CNT_T_TRGR_E |
+ CSI_CNT_CSIEND_E | CSI_CNT_TREND_E |
+ CSI_CNT_OVERF_E | CSI_CNT_UNDER_E);
+}
+
+static inline void rzv2m_csi_clear_irqs(struct rzv2m_csi_priv *csi, u32 irqs)
+{
+ writel(irqs, csi->base + CSI_INT);
+}
+
+static void rzv2m_csi_clear_all_irqs(struct rzv2m_csi_priv *csi)
+{
+ rzv2m_csi_clear_irqs(csi, CSI_INT_UNDER | CSI_INT_OVERF |
+ CSI_INT_TREND | CSI_INT_CSIEND | CSI_INT_T_TRGR |
+ CSI_INT_R_TRGR);
+}
+
+static void rzv2m_csi_enable_irqs(struct rzv2m_csi_priv *csi, u32 enable_bits)
+{
+ u32 cnt = readl(csi->base + CSI_CNT);
+
+ writel(cnt | enable_bits, csi->base + CSI_CNT);
+}
+
+static int rzv2m_csi_wait_for_interrupt(struct rzv2m_csi_priv *csi,
+ u32 wait_mask, u32 enable_bits)
+{
+ int ret;
+
+ rzv2m_csi_enable_irqs(csi, enable_bits);
+
+ ret = wait_event_timeout(csi->wait,
+ ((csi->status & wait_mask) == wait_mask) ||
+ csi->errors, HZ);
+
+ rzv2m_csi_disable_irqs(csi, enable_bits);
+
+ if (csi->errors)
+ return -EIO;
+
+ if (!ret)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int rzv2m_csi_wait_for_tx_empty(struct rzv2m_csi_priv *csi)
+{
+ int ret;
+
+ if (readl(csi->base + CSI_OFIFOL) == 0)
+ return 0;
+
+ ret = rzv2m_csi_wait_for_interrupt(csi, CSI_INT_TREND, CSI_CNT_TREND_E);
+
+ if (ret == -ETIMEDOUT)
+ csi->errors |= TX_TIMEOUT_ERROR;
+
+ return ret;
+}
+
+static inline int rzv2m_csi_wait_for_rx_ready(struct rzv2m_csi_priv *csi)
+{
+ int ret;
+
+ if (readl(csi->base + CSI_IFIFOL) == csi->bytes_to_transfer)
+ return 0;
+
+ ret = rzv2m_csi_wait_for_interrupt(csi, CSI_INT_R_TRGR,
+ CSI_CNT_R_TRGR_E);
+
+ if (ret == -ETIMEDOUT)
+ csi->errors |= RX_TIMEOUT_ERROR;
+
+ return ret;
+}
+
+static irqreturn_t rzv2m_csi_irq_handler(int irq, void *data)
+{
+ struct rzv2m_csi_priv *csi = (struct rzv2m_csi_priv *)data;
+
+ csi->status = readl(csi->base + CSI_INT);
+ rzv2m_csi_disable_irqs(csi, csi->status);
+
+ if (csi->status & CSI_INT_OVERF)
+ csi->errors |= OVERFLOW_ERROR;
+ if (csi->status & CSI_INT_UNDER)
+ csi->errors |= UNDERRUN_ERROR;
+
+ wake_up(&csi->wait);
+
+ return IRQ_HANDLED;
+}
+
+static void rzv2m_csi_setup_clock(struct rzv2m_csi_priv *csi, u32 spi_hz)
+{
+ unsigned long csiclk_rate = clk_get_rate(csi->csiclk);
+ unsigned long pclk_rate = clk_get_rate(csi->pclk);
+ unsigned long csiclk_rate_limit = pclk_rate >> 1;
+ u32 cks;
+
+ /*
+ * There is a restriction on the frequency of CSICLK, it has to be <=
+ * PCLK / 2.
+ */
+ if (csiclk_rate > csiclk_rate_limit) {
+ clk_set_rate(csi->csiclk, csiclk_rate >> 1);
+ csiclk_rate = clk_get_rate(csi->csiclk);
+ } else if ((csiclk_rate << 1) <= csiclk_rate_limit) {
+ clk_set_rate(csi->csiclk, csiclk_rate << 1);
+ csiclk_rate = clk_get_rate(csi->csiclk);
+ }
+
+ spi_hz = spi_hz > CSI_MAX_SPI_SCKO ? CSI_MAX_SPI_SCKO : spi_hz;
+
+ cks = DIV_ROUND_UP(csiclk_rate, spi_hz << 1);
+ if (cks > CSI_CKS_MAX)
+ cks = CSI_CKS_MAX;
+
+ dev_dbg(csi->dev, "SPI clk rate is %ldHz\n", csiclk_rate / (cks << 1));
+
+ rzv2m_csi_reg_write_bit(csi, CSI_CLKSEL, CSI_CLKSEL_CKS, cks);
+}
+
+static void rzv2m_csi_setup_operating_mode(struct rzv2m_csi_priv *csi,
+ struct spi_transfer *t)
+{
+ if (t->rx_buf && !t->tx_buf)
+ /* Reception-only mode */
+ rzv2m_csi_reg_write_bit(csi, CSI_MODE, CSI_MODE_TRMD, 0);
+ else
+ /* Send and receive mode */
+ rzv2m_csi_reg_write_bit(csi, CSI_MODE, CSI_MODE_TRMD, 1);
+
+ csi->bytes_per_word = t->bits_per_word / 8;
+ rzv2m_csi_reg_write_bit(csi, CSI_MODE, CSI_MODE_CCL,
+ csi->bytes_per_word == 2);
+}
+
+static int rzv2m_csi_setup(struct spi_device *spi)
+{
+ struct rzv2m_csi_priv *csi = spi_controller_get_devdata(spi->controller);
+ int ret;
+
+ rzv2m_csi_sw_reset(csi, 0);
+
+ writel(CSI_MODE_SETUP, csi->base + CSI_MODE);
+
+ /* Setup clock polarity and phase timing */
+ rzv2m_csi_reg_write_bit(csi, CSI_CLKSEL, CSI_CLKSEL_CKP,
+ !(spi->mode & SPI_CPOL));
+ rzv2m_csi_reg_write_bit(csi, CSI_CLKSEL, CSI_CLKSEL_DAP,
+ !(spi->mode & SPI_CPHA));
+
+ /* Setup serial data order */
+ rzv2m_csi_reg_write_bit(csi, CSI_MODE, CSI_MODE_DIR,
+ !!(spi->mode & SPI_LSB_FIRST));
+
+ /* Set the operation mode as master */
+ rzv2m_csi_reg_write_bit(csi, CSI_CLKSEL, CSI_CLKSEL_SLAVE, 0);
+
+ /* Give the IP a SW reset */
+ ret = rzv2m_csi_sw_reset(csi, 1);
+ if (ret)
+ return ret;
+ rzv2m_csi_sw_reset(csi, 0);
+
+ /*
+ * We need to enable the communication so that the clock will settle
+ * for the right polarity before enabling the CS.
+ */
+ rzv2m_csi_start_stop_operation(csi, 1, false);
+ udelay(10);
+ rzv2m_csi_start_stop_operation(csi, 0, false);
+
+ return 0;
+}
+
+static int rzv2m_csi_pio_transfer(struct rzv2m_csi_priv *csi)
+{
+ bool tx_completed = csi->txbuf ? false : true;
+ bool rx_completed = csi->rxbuf ? false : true;
+ int ret = 0;
+
+ /* Make sure the TX FIFO is empty */
+ writel(0, csi->base + CSI_OFIFOL);
+
+ csi->bytes_sent = 0;
+ csi->bytes_received = 0;
+ csi->errors = 0;
+
+ rzv2m_csi_disable_all_irqs(csi);
+ rzv2m_csi_clear_all_irqs(csi);
+ rzv2m_csi_enable_rx_trigger(csi, true);
+
+ while (!tx_completed || !rx_completed) {
+ /*
+ * Decide how many words we are going to transfer during
+ * this cycle (for both TX and RX), then set the RX FIFO trigger
+ * level accordingly. No need to set a trigger level for the
+ * TX FIFO, as this IP comes with an interrupt that fires when
+ * the TX FIFO is empty.
+ */
+ rzv2m_csi_calc_current_transfer(csi);
+ rzv2m_csi_set_rx_fifo_trigger_level(csi);
+
+ rzv2m_csi_enable_irqs(csi, CSI_INT_OVERF | CSI_INT_UNDER);
+
+ /* Make sure the RX FIFO is empty */
+ writel(0, csi->base + CSI_IFIFOL);
+
+ writel(readl(csi->base + CSI_INT), csi->base + CSI_INT);
+ csi->status = 0;
+
+ rzv2m_csi_start_stop_operation(csi, 1, false);
+
+ /* TX */
+ if (csi->txbuf) {
+ ret = rzv2m_csi_fill_txfifo(csi);
+ if (ret)
+ break;
+
+ ret = rzv2m_csi_wait_for_tx_empty(csi);
+ if (ret)
+ break;
+
+ if (csi->bytes_sent == csi->buffer_len)
+ tx_completed = true;
+ }
+
+ /*
+ * Make sure the RX FIFO contains the desired number of words.
+ * We then either flush its content, or we copy it onto
+ * csi->rxbuf.
+ */
+ ret = rzv2m_csi_wait_for_rx_ready(csi);
+ if (ret)
+ break;
+
+ /* RX */
+ if (csi->rxbuf) {
+ rzv2m_csi_start_stop_operation(csi, 0, false);
+
+ ret = rzv2m_csi_read_rxfifo(csi);
+ if (ret)
+ break;
+
+ if (csi->bytes_received == csi->buffer_len)
+ rx_completed = true;
+ }
+
+ ret = rzv2m_csi_start_stop_operation(csi, 0, true);
+ if (ret)
+ goto pio_quit;
+
+ if (csi->errors) {
+ ret = -EIO;
+ goto pio_quit;
+ }
+ }
+
+ rzv2m_csi_start_stop_operation(csi, 0, true);
+
+pio_quit:
+ rzv2m_csi_disable_all_irqs(csi);
+ rzv2m_csi_enable_rx_trigger(csi, false);
+ rzv2m_csi_clear_all_irqs(csi);
+
+ return ret;
+}
+
+static int rzv2m_csi_transfer_one(struct spi_controller *controller,
+ struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct rzv2m_csi_priv *csi = spi_controller_get_devdata(controller);
+ struct device *dev = csi->dev;
+ int ret;
+
+ csi->txbuf = transfer->tx_buf;
+ csi->rxbuf = transfer->rx_buf;
+ csi->buffer_len = transfer->len;
+
+ rzv2m_csi_setup_operating_mode(csi, transfer);
+
+ rzv2m_csi_setup_clock(csi, transfer->speed_hz);
+
+ ret = rzv2m_csi_pio_transfer(csi);
+ if (ret) {
+ if (csi->errors & UNDERRUN_ERROR)
+ dev_err(dev, "Underrun error\n");
+ if (csi->errors & OVERFLOW_ERROR)
+ dev_err(dev, "Overflow error\n");
+ if (csi->errors & TX_TIMEOUT_ERROR)
+ dev_err(dev, "TX timeout error\n");
+ if (csi->errors & RX_TIMEOUT_ERROR)
+ dev_err(dev, "RX timeout error\n");
+ }
+
+ return ret;
+}
+
+static int rzv2m_csi_probe(struct platform_device *pdev)
+{
+ struct spi_controller *controller;
+ struct device *dev = &pdev->dev;
+ struct rzv2m_csi_priv *csi;
+ struct reset_control *rstc;
+ int irq;
+ int ret;
+
+ controller = devm_spi_alloc_master(dev, sizeof(*csi));
+ if (!controller)
+ return -ENOMEM;
+
+ csi = spi_controller_get_devdata(controller);
+ platform_set_drvdata(pdev, csi);
+
+ csi->dev = dev;
+ csi->controller = controller;
+
+ csi->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(csi->base))
+ return PTR_ERR(csi->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ csi->csiclk = devm_clk_get(dev, "csiclk");
+ if (IS_ERR(csi->csiclk))
+ return dev_err_probe(dev, PTR_ERR(csi->csiclk),
+ "could not get csiclk\n");
+
+ csi->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(csi->pclk))
+ return dev_err_probe(dev, PTR_ERR(csi->pclk),
+ "could not get pclk\n");
+
+ rstc = devm_reset_control_get_shared(dev, NULL);
+ if (IS_ERR(rstc))
+ return dev_err_probe(dev, PTR_ERR(rstc), "Missing reset ctrl\n");
+
+ init_waitqueue_head(&csi->wait);
+
+ controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ controller->dev.of_node = pdev->dev.of_node;
+ controller->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
+ controller->setup = rzv2m_csi_setup;
+ controller->transfer_one = rzv2m_csi_transfer_one;
+ controller->use_gpio_descriptors = true;
+
+ ret = devm_request_irq(dev, irq, rzv2m_csi_irq_handler, 0,
+ dev_name(dev), csi);
+ if (ret)
+ return dev_err_probe(dev, ret, "cannot request IRQ\n");
+
+ /*
+ * The reset also affects other HW that is not under the control
+ * of Linux. Therefore, all we can do is make sure the reset is
+ * deasserted.
+ */
+ reset_control_deassert(rstc);
+
+ /* Make sure the IP is in SW reset state */
+ ret = rzv2m_csi_sw_reset(csi, 1);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(csi->csiclk);
+ if (ret)
+ return dev_err_probe(dev, ret, "could not enable csiclk\n");
+
+ ret = spi_register_controller(controller);
+ if (ret) {
+ clk_disable_unprepare(csi->csiclk);
+ return dev_err_probe(dev, ret, "register controller failed\n");
+ }
+
+ return 0;
+}
+
+static int rzv2m_csi_remove(struct platform_device *pdev)
+{
+ struct rzv2m_csi_priv *csi = platform_get_drvdata(pdev);
+
+ spi_unregister_controller(csi->controller);
+ rzv2m_csi_sw_reset(csi, 1);
+ clk_disable_unprepare(csi->csiclk);
+
+ return 0;
+}
+
+static const struct of_device_id rzv2m_csi_match[] = {
+ { .compatible = "renesas,rzv2m-csi" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rzv2m_csi_match);
+
+static struct platform_driver rzv2m_csi_drv = {
+ .probe = rzv2m_csi_probe,
+ .remove = rzv2m_csi_remove,
+ .driver = {
+ .name = "rzv2m_csi",
+ .of_match_table = rzv2m_csi_match,
+ },
+};
+module_platform_driver(rzv2m_csi_drv);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Fabrizio Castro <castro.fabrizio.jz@renesas.com>");
+MODULE_DESCRIPTION("Clocked Serial Interface Driver");
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 7ac17f0d18a9..fd55697144cc 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -19,7 +19,6 @@
#include <linux/platform_data/spi-s3c64xx.h>
#define MAX_SPI_PORTS 12
-#define S3C64XX_SPI_QUIRK_POLL (1 << 0)
#define S3C64XX_SPI_QUIRK_CS_AUTO (1 << 1)
#define AUTOSUSPEND_TIMEOUT 2000
@@ -59,6 +58,8 @@
#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17)
#define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17)
#define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17)
+#define S3C64XX_SPI_MODE_RX_RDY_LVL GENMASK(16, 11)
+#define S3C64XX_SPI_MODE_RX_RDY_LVL_SHIFT 11
#define S3C64XX_SPI_MODE_SELF_LOOPBACK (1<<3)
#define S3C64XX_SPI_MODE_RXDMA_ON (1<<2)
#define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
@@ -115,8 +116,10 @@
#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
+#define S3C64XX_SPI_POLLING_SIZE 32
+
#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
-#define is_polling(x) (x->port_conf->quirks & S3C64XX_SPI_QUIRK_POLL)
+#define is_polling(x) (x->cntrlr_info->polling)
#define RXBUSY (1<<2)
#define TXBUSY (1<<3)
@@ -553,7 +556,7 @@ static int s3c64xx_wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
}
static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
- struct spi_transfer *xfer)
+ struct spi_transfer *xfer, bool use_irq)
{
void __iomem *regs = sdd->regs;
unsigned long val;
@@ -562,11 +565,24 @@ static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
u32 cpy_len;
u8 *buf;
int ms;
+ unsigned long time_us;
- /* millisecs to xfer 'len' bytes @ 'cur_speed' */
- ms = xfer->len * 8 * 1000 / sdd->cur_speed;
+ /* microsecs to xfer 'len' bytes @ 'cur_speed' */
+ time_us = (xfer->len * 8 * 1000 * 1000) / sdd->cur_speed;
+ ms = (time_us / 1000);
ms += 10; /* some tolerance */
+ /* sleep during signal transfer time */
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ if (RX_FIFO_LVL(status, sdd) < xfer->len)
+ usleep_range(time_us / 2, time_us);
+
+ if (use_irq) {
+ val = msecs_to_jiffies(ms);
+ if (!wait_for_completion_timeout(&sdd->xfer_completion, val))
+ return -EIO;
+ }
+
val = msecs_to_loops(ms);
do {
status = readl(regs + S3C64XX_SPI_STATUS);
@@ -729,10 +745,13 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
void *rx_buf = NULL;
int target_len = 0, origin_len = 0;
int use_dma = 0;
+ bool use_irq = false;
int status;
u32 speed;
u8 bpw;
unsigned long flags;
+ u32 rdy_lv;
+ u32 val;
reinit_completion(&sdd->xfer_completion);
@@ -753,17 +772,46 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
sdd->rx_dma.ch && sdd->tx_dma.ch) {
use_dma = 1;
- } else if (xfer->len > fifo_len) {
+ } else if (xfer->len >= fifo_len) {
tx_buf = xfer->tx_buf;
rx_buf = xfer->rx_buf;
origin_len = xfer->len;
-
target_len = xfer->len;
- if (xfer->len > fifo_len)
- xfer->len = fifo_len;
+ xfer->len = fifo_len - 1;
}
do {
+ /* transfer size is greater than 32, change to IRQ mode */
+ if (!use_dma && xfer->len > S3C64XX_SPI_POLLING_SIZE)
+ use_irq = true;
+
+ if (use_irq) {
+ reinit_completion(&sdd->xfer_completion);
+
+ rdy_lv = xfer->len;
+ /* Setup RDY_FIFO trigger Level
+ * RDY_LVL =
+ * fifo_lvl up to 64 byte -> N bytes
+ * 128 byte -> RDY_LVL * 2 bytes
+ * 256 byte -> RDY_LVL * 4 bytes
+ */
+ if (fifo_len == 128)
+ rdy_lv /= 2;
+ else if (fifo_len == 256)
+ rdy_lv /= 4;
+
+ val = readl(sdd->regs + S3C64XX_SPI_MODE_CFG);
+ val &= ~S3C64XX_SPI_MODE_RX_RDY_LVL;
+ val |= (rdy_lv << S3C64XX_SPI_MODE_RX_RDY_LVL_SHIFT);
+ writel(val, sdd->regs + S3C64XX_SPI_MODE_CFG);
+
+ /* Enable FIFO_RDY_EN IRQ */
+ val = readl(sdd->regs + S3C64XX_SPI_INT_EN);
+ writel((val | S3C64XX_SPI_INT_RX_FIFORDY_EN),
+ sdd->regs + S3C64XX_SPI_INT_EN);
+
+ }
+
spin_lock_irqsave(&sdd->lock, flags);
/* Pending only which is to be done */
@@ -785,7 +833,7 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
if (use_dma)
status = s3c64xx_wait_for_dma(sdd, xfer);
else
- status = s3c64xx_wait_for_pio(sdd, xfer);
+ status = s3c64xx_wait_for_pio(sdd, xfer, use_irq);
if (status) {
dev_err(&spi->dev,
@@ -824,8 +872,8 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
if (xfer->rx_buf)
xfer->rx_buf += xfer->len;
- if (target_len > fifo_len)
- xfer->len = fifo_len;
+ if (target_len >= fifo_len)
+ xfer->len = fifo_len - 1;
else
xfer->len = target_len;
}
@@ -995,6 +1043,14 @@ static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
dev_err(&spi->dev, "TX underrun\n");
}
+ if (val & S3C64XX_SPI_ST_RX_FIFORDY) {
+ complete(&sdd->xfer_completion);
+ /* No pending clear irq, turn-off INT_EN_RX_FIFO_RDY */
+ val = readl(sdd->regs + S3C64XX_SPI_INT_EN);
+ writel((val & ~S3C64XX_SPI_INT_RX_FIFORDY_EN),
+ sdd->regs + S3C64XX_SPI_INT_EN);
+ }
+
/* Clear the pending irq by setting and then clearing it */
writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR);
writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR);
@@ -1068,6 +1124,7 @@ static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
}
sci->no_cs = of_property_read_bool(dev->of_node, "no-cs-readback");
+ sci->polling = !of_property_present(dev->of_node, "dmas");
return sci;
}
@@ -1103,29 +1160,23 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
return PTR_ERR(sci);
}
- if (!sci) {
- dev_err(&pdev->dev, "platform_data missing!\n");
- return -ENODEV;
- }
+ if (!sci)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "Platform_data missing!\n");
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (mem_res == NULL) {
- dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
- return -ENXIO;
- }
+ if (!mem_res)
+ return dev_err_probe(&pdev->dev, -ENXIO,
+ "Unable to get SPI MEM resource\n");
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq);
- return irq;
- }
+ if (irq < 0)
+ return dev_err_probe(&pdev->dev, irq, "Failed to get IRQ\n");
- master = spi_alloc_master(&pdev->dev,
- sizeof(struct s3c64xx_spi_driver_data));
- if (master == NULL) {
- dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
- return -ENOMEM;
- }
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(*sdd));
+ if (!master)
+ return dev_err_probe(&pdev->dev, -ENOMEM,
+ "Unable to allocate SPI Master\n");
platform_set_drvdata(pdev, master);
@@ -1137,11 +1188,9 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
sdd->sfr_start = mem_res->start;
if (pdev->dev.of_node) {
ret = of_alias_get_id(pdev->dev.of_node, "spi");
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to get alias id, errno %d\n",
- ret);
- goto err_deref_master;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to get alias id\n");
sdd->port_id = ret;
} else {
sdd->port_id = pdev->id;
@@ -1175,59 +1224,31 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
master->can_dma = s3c64xx_spi_can_dma;
sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
- if (IS_ERR(sdd->regs)) {
- ret = PTR_ERR(sdd->regs);
- goto err_deref_master;
- }
+ if (IS_ERR(sdd->regs))
+ return PTR_ERR(sdd->regs);
- if (sci->cfg_gpio && sci->cfg_gpio()) {
- dev_err(&pdev->dev, "Unable to config gpio\n");
- ret = -EBUSY;
- goto err_deref_master;
- }
+ if (sci->cfg_gpio && sci->cfg_gpio())
+ return dev_err_probe(&pdev->dev, -EBUSY,
+ "Unable to config gpio\n");
/* Setup clocks */
- sdd->clk = devm_clk_get(&pdev->dev, "spi");
- if (IS_ERR(sdd->clk)) {
- dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
- ret = PTR_ERR(sdd->clk);
- goto err_deref_master;
- }
-
- ret = clk_prepare_enable(sdd->clk);
- if (ret) {
- dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
- goto err_deref_master;
- }
+ sdd->clk = devm_clk_get_enabled(&pdev->dev, "spi");
+ if (IS_ERR(sdd->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(sdd->clk),
+ "Unable to acquire clock 'spi'\n");
sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
- sdd->src_clk = devm_clk_get(&pdev->dev, clk_name);
- if (IS_ERR(sdd->src_clk)) {
- dev_err(&pdev->dev,
- "Unable to acquire clock '%s'\n", clk_name);
- ret = PTR_ERR(sdd->src_clk);
- goto err_disable_clk;
- }
-
- ret = clk_prepare_enable(sdd->src_clk);
- if (ret) {
- dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
- goto err_disable_clk;
- }
+ sdd->src_clk = devm_clk_get_enabled(&pdev->dev, clk_name);
+ if (IS_ERR(sdd->src_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(sdd->src_clk),
+ "Unable to acquire clock '%s'\n",
+ clk_name);
if (sdd->port_conf->clk_ioclk) {
- sdd->ioclk = devm_clk_get(&pdev->dev, "spi_ioclk");
- if (IS_ERR(sdd->ioclk)) {
- dev_err(&pdev->dev, "Unable to acquire 'ioclk'\n");
- ret = PTR_ERR(sdd->ioclk);
- goto err_disable_src_clk;
- }
-
- ret = clk_prepare_enable(sdd->ioclk);
- if (ret) {
- dev_err(&pdev->dev, "Couldn't enable clock 'ioclk'\n");
- goto err_disable_src_clk;
- }
+ sdd->ioclk = devm_clk_get_enabled(&pdev->dev, "spi_ioclk");
+ if (IS_ERR(sdd->ioclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(sdd->ioclk),
+ "Unable to acquire 'ioclk'\n");
}
pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
@@ -1275,14 +1296,6 @@ err_pm_put:
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
- clk_disable_unprepare(sdd->ioclk);
-err_disable_src_clk:
- clk_disable_unprepare(sdd->src_clk);
-err_disable_clk:
- clk_disable_unprepare(sdd->clk);
-err_deref_master:
- spi_master_put(master);
-
return ret;
}
@@ -1300,12 +1313,6 @@ static void s3c64xx_spi_remove(struct platform_device *pdev)
dma_release_channel(sdd->tx_dma.ch);
}
- clk_disable_unprepare(sdd->ioclk);
-
- clk_disable_unprepare(sdd->src_clk);
-
- clk_disable_unprepare(sdd->clk);
-
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
index 70012333020b..d52ed67243f7 100644
--- a/drivers/spi/spi-sc18is602.c
+++ b/drivers/spi/spi-sc18is602.c
@@ -337,7 +337,7 @@ static struct i2c_driver sc18is602_driver = {
.name = "sc18is602",
.of_match_table = of_match_ptr(sc18is602_of_match),
},
- .probe_new = sc18is602_probe,
+ .probe = sc18is602_probe,
.id_table = sc18is602_id,
};
diff --git a/drivers/spi/spi-sn-f-ospi.c b/drivers/spi/spi-sn-f-ospi.c
index a2bd9dcde075..d64d3f75c726 100644
--- a/drivers/spi/spi-sn-f-ospi.c
+++ b/drivers/spi/spi-sn-f-ospi.c
@@ -526,7 +526,7 @@ static int f_ospi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
static bool f_ospi_supports_op_width(struct spi_mem *mem,
const struct spi_mem_op *op)
{
- u8 width_available[] = { 0, 1, 2, 4, 8 };
+ static const u8 width_available[] = { 0, 1, 2, 4, 8 };
u8 width_op[] = { op->cmd.buswidth, op->addr.buswidth,
op->dummy.buswidth, op->data.buswidth };
bool is_match_found;
@@ -566,7 +566,7 @@ static bool f_ospi_supports_op(struct spi_mem *mem,
static int f_ospi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
- op->data.nbytes = min((int)op->data.nbytes, (int)(OSPI_DAT_SIZE_MAX));
+ op->data.nbytes = min_t(int, op->data.nbytes, OSPI_DAT_SIZE_MAX);
return 0;
}
@@ -634,18 +634,12 @@ static int f_ospi_probe(struct platform_device *pdev)
goto err_put_ctlr;
}
- ospi->clk = devm_clk_get(dev, NULL);
+ ospi->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(ospi->clk)) {
ret = PTR_ERR(ospi->clk);
goto err_put_ctlr;
}
- ret = clk_prepare_enable(ospi->clk);
- if (ret) {
- dev_err(dev, "Failed to enable the clock\n");
- goto err_disable_clk;
- }
-
mutex_init(&ospi->mlock);
ret = f_ospi_init(ospi);
@@ -661,9 +655,6 @@ static int f_ospi_probe(struct platform_device *pdev)
err_destroy_mutex:
mutex_destroy(&ospi->mlock);
-err_disable_clk:
- clk_disable_unprepare(ospi->clk);
-
err_put_ctlr:
spi_controller_put(ctlr);
@@ -674,8 +665,6 @@ static void f_ospi_remove(struct platform_device *pdev)
{
struct f_ospi *ospi = platform_get_drvdata(pdev);
- clk_disable_unprepare(ospi->clk);
-
mutex_destroy(&ospi->mlock);
}
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index d6598e4116bd..6d10fa4ab783 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
//
-// STMicroelectronics STM32 SPI Controller driver (master mode only)
+// STMicroelectronics STM32 SPI Controller driver
//
// Copyright (C) 2017, STMicroelectronics - All Rights Reserved
// Author(s): Amelie Delaunay <amelie.delaunay@st.com> for STMicroelectronics.
@@ -117,6 +117,7 @@
#define STM32H7_SPI_CFG2_CPHA BIT(24)
#define STM32H7_SPI_CFG2_CPOL BIT(25)
#define STM32H7_SPI_CFG2_SSM BIT(26)
+#define STM32H7_SPI_CFG2_SSIOP BIT(28)
#define STM32H7_SPI_CFG2_AFCNTR BIT(31)
/* STM32H7_SPI_IER bit fields */
@@ -170,6 +171,10 @@
*/
#define SPI_DMA_MIN_BYTES 16
+/* STM32 SPI driver helpers */
+#define STM32_SPI_MASTER_MODE(stm32_spi) (!(stm32_spi)->device_mode)
+#define STM32_SPI_DEVICE_MODE(stm32_spi) ((stm32_spi)->device_mode)
+
/**
* struct stm32_spi_reg - stm32 SPI register & bitfield desc
* @reg: register offset
@@ -190,6 +195,7 @@ struct stm32_spi_reg {
* @cpol: clock polarity register and polarity bit
* @cpha: clock phase register and phase bit
* @lsb_first: LSB transmitted first register and bit
+ * @cs_high: chips select active value
* @br: baud rate register and bitfields
* @rx: SPI RX data register
* @tx: SPI TX data register
@@ -201,6 +207,7 @@ struct stm32_spi_regspec {
const struct stm32_spi_reg cpol;
const struct stm32_spi_reg cpha;
const struct stm32_spi_reg lsb_first;
+ const struct stm32_spi_reg cs_high;
const struct stm32_spi_reg br;
const struct stm32_spi_reg rx;
const struct stm32_spi_reg tx;
@@ -258,7 +265,7 @@ struct stm32_spi_cfg {
/**
* struct stm32_spi - private data of the SPI controller
* @dev: driver model representation of the controller
- * @master: controller master interface
+ * @ctrl: controller interface
* @cfg: compatible configuration data
* @base: virtual memory area
* @clk: hw kernel clock feeding the SPI clock generator
@@ -280,10 +287,11 @@ struct stm32_spi_cfg {
* @dma_tx: dma channel for TX transfer
* @dma_rx: dma channel for RX transfer
* @phys_addr: SPI registers physical base address
+ * @device_mode: the controller is configured as SPI device
*/
struct stm32_spi {
struct device *dev;
- struct spi_master *master;
+ struct spi_controller *ctrl;
const struct stm32_spi_cfg *cfg;
void __iomem *base;
struct clk *clk;
@@ -307,6 +315,8 @@ struct stm32_spi {
struct dma_chan *dma_tx;
struct dma_chan *dma_rx;
dma_addr_t phys_addr;
+
+ bool device_mode;
};
static const struct stm32_spi_regspec stm32f4_spi_regspec = {
@@ -318,6 +328,7 @@ static const struct stm32_spi_regspec stm32f4_spi_regspec = {
.cpol = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPOL },
.cpha = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPHA },
.lsb_first = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_LSBFRST },
+ .cs_high = {},
.br = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_BR, STM32F4_SPI_CR1_BR_SHIFT },
.rx = { STM32F4_SPI_DR },
@@ -336,6 +347,7 @@ static const struct stm32_spi_regspec stm32h7_spi_regspec = {
.cpol = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPOL },
.cpha = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPHA },
.lsb_first = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_LSBFRST },
+ .cs_high = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_SSIOP },
.br = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_MBR,
STM32H7_SPI_CFG1_MBR_SHIFT },
@@ -437,9 +449,9 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz,
div = DIV_ROUND_CLOSEST(spi->clk_rate & ~0x1, speed_hz);
/*
- * SPI framework set xfer->speed_hz to master->max_speed_hz if
- * xfer->speed_hz is greater than master->max_speed_hz, and it returns
- * an error when xfer->speed_hz is lower than master->min_speed_hz, so
+ * SPI framework set xfer->speed_hz to ctrl->max_speed_hz if
+ * xfer->speed_hz is greater than ctrl->max_speed_hz, and it returns
+ * an error when xfer->speed_hz is lower than ctrl->min_speed_hz, so
* no need to check it there.
* However, we need to ensure the following calculations.
*/
@@ -657,9 +669,9 @@ static void stm32f4_spi_disable(struct stm32_spi *spi)
}
if (spi->cur_usedma && spi->dma_tx)
- dmaengine_terminate_all(spi->dma_tx);
+ dmaengine_terminate_async(spi->dma_tx);
if (spi->cur_usedma && spi->dma_rx)
- dmaengine_terminate_all(spi->dma_rx);
+ dmaengine_terminate_async(spi->dma_rx);
stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_SPE);
@@ -696,9 +708,9 @@ static void stm32h7_spi_disable(struct stm32_spi *spi)
}
if (spi->cur_usedma && spi->dma_tx)
- dmaengine_terminate_all(spi->dma_tx);
+ dmaengine_terminate_async(spi->dma_tx);
if (spi->cur_usedma && spi->dma_rx)
- dmaengine_terminate_all(spi->dma_rx);
+ dmaengine_terminate_async(spi->dma_rx);
stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE);
@@ -714,19 +726,19 @@ static void stm32h7_spi_disable(struct stm32_spi *spi)
/**
* stm32_spi_can_dma - Determine if the transfer is eligible for DMA use
- * @master: controller master interface
+ * @ctrl: controller interface
* @spi_dev: pointer to the spi device
* @transfer: pointer to spi transfer
*
* If driver has fifo and the current transfer size is greater than fifo size,
* use DMA. Otherwise use DMA for transfer longer than defined DMA min bytes.
*/
-static bool stm32_spi_can_dma(struct spi_master *master,
+static bool stm32_spi_can_dma(struct spi_controller *ctrl,
struct spi_device *spi_dev,
struct spi_transfer *transfer)
{
unsigned int dma_size;
- struct stm32_spi *spi = spi_master_get_devdata(master);
+ struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
if (spi->cfg->has_fifo)
dma_size = spi->fifo_size;
@@ -742,12 +754,12 @@ static bool stm32_spi_can_dma(struct spi_master *master,
/**
* stm32f4_spi_irq_event - Interrupt handler for SPI controller events
* @irq: interrupt line
- * @dev_id: SPI controller master interface
+ * @dev_id: SPI controller ctrl interface
*/
static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id)
{
- struct spi_master *master = dev_id;
- struct stm32_spi *spi = spi_master_get_devdata(master);
+ struct spi_controller *ctrl = dev_id;
+ struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
u32 sr, mask = 0;
bool end = false;
@@ -830,14 +842,14 @@ end_irq:
/**
* stm32f4_spi_irq_thread - Thread of interrupt handler for SPI controller
* @irq: interrupt line
- * @dev_id: SPI controller master interface
+ * @dev_id: SPI controller interface
*/
static irqreturn_t stm32f4_spi_irq_thread(int irq, void *dev_id)
{
- struct spi_master *master = dev_id;
- struct stm32_spi *spi = spi_master_get_devdata(master);
+ struct spi_controller *ctrl = dev_id;
+ struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(ctrl);
stm32f4_spi_disable(spi);
return IRQ_HANDLED;
@@ -846,12 +858,12 @@ static irqreturn_t stm32f4_spi_irq_thread(int irq, void *dev_id)
/**
* stm32h7_spi_irq_thread - Thread of interrupt handler for SPI controller
* @irq: interrupt line
- * @dev_id: SPI controller master interface
+ * @dev_id: SPI controller interface
*/
static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
{
- struct spi_master *master = dev_id;
- struct stm32_spi *spi = spi_master_get_devdata(master);
+ struct spi_controller *ctrl = dev_id;
+ struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
u32 sr, ier, mask;
unsigned long flags;
bool end = false;
@@ -931,7 +943,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
if (end) {
stm32h7_spi_disable(spi);
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(ctrl);
}
return IRQ_HANDLED;
@@ -939,13 +951,13 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
/**
* stm32_spi_prepare_msg - set up the controller to transfer a single message
- * @master: controller master interface
+ * @ctrl: controller interface
* @msg: pointer to spi message
*/
-static int stm32_spi_prepare_msg(struct spi_master *master,
+static int stm32_spi_prepare_msg(struct spi_controller *ctrl,
struct spi_message *msg)
{
- struct stm32_spi *spi = spi_master_get_devdata(master);
+ struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
struct spi_device *spi_dev = msg->spi;
struct device_node *np = spi_dev->dev.of_node;
unsigned long flags;
@@ -971,6 +983,11 @@ static int stm32_spi_prepare_msg(struct spi_master *master,
else
clrb |= spi->cfg->regs->lsb_first.mask;
+ if (STM32_SPI_DEVICE_MODE(spi) && spi_dev->mode & SPI_CS_HIGH)
+ setb |= spi->cfg->regs->cs_high.mask;
+ else
+ clrb |= spi->cfg->regs->cs_high.mask;
+
dev_dbg(spi->dev, "cpol=%d cpha=%d lsb_first=%d cs_high=%d\n",
!!(spi_dev->mode & SPI_CPOL),
!!(spi_dev->mode & SPI_CPHA),
@@ -984,9 +1001,9 @@ static int stm32_spi_prepare_msg(struct spi_master *master,
if (spi->cfg->set_number_of_data) {
int ret;
- ret = spi_split_transfers_maxwords(master, msg,
- STM32H7_SPI_TSIZE_MAX,
- GFP_KERNEL | GFP_DMA);
+ ret = spi_split_transfers_maxsize(ctrl, msg,
+ STM32H7_SPI_TSIZE_MAX,
+ GFP_KERNEL | GFP_DMA);
if (ret)
return ret;
}
@@ -1016,7 +1033,7 @@ static void stm32f4_spi_dma_tx_cb(void *data)
struct stm32_spi *spi = data;
if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) {
- spi_finalize_current_transfer(spi->master);
+ spi_finalize_current_transfer(spi->ctrl);
stm32f4_spi_disable(spi);
}
}
@@ -1031,7 +1048,7 @@ static void stm32_spi_dma_rx_cb(void *data)
{
struct stm32_spi *spi = data;
- spi_finalize_current_transfer(spi->master);
+ spi_finalize_current_transfer(spi->ctrl);
spi->cfg->disable(spi);
}
@@ -1161,7 +1178,8 @@ static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi)
if (spi->tx_buf)
stm32h7_spi_write_txfifo(spi);
- stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
+ if (STM32_SPI_MASTER_MODE(spi))
+ stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
writel_relaxed(ier, spi->base + STM32H7_SPI_IER);
@@ -1208,7 +1226,8 @@ static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
stm32_spi_enable(spi);
- stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
+ if (STM32_SPI_MASTER_MODE(spi))
+ stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
}
/**
@@ -1302,7 +1321,7 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
dma_submit_error:
if (spi->dma_rx)
- dmaengine_terminate_all(spi->dma_rx);
+ dmaengine_terminate_sync(spi->dma_rx);
dma_desc_error:
stm32_spi_clr_bits(spi, spi->cfg->regs->dma_rx_en.reg,
@@ -1536,16 +1555,18 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
spi->cfg->set_bpw(spi);
/* Update spi->cur_speed with real clock speed */
- mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz,
- spi->cfg->baud_rate_div_min,
- spi->cfg->baud_rate_div_max);
- if (mbr < 0) {
- ret = mbr;
- goto out;
- }
+ if (STM32_SPI_MASTER_MODE(spi)) {
+ mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz,
+ spi->cfg->baud_rate_div_min,
+ spi->cfg->baud_rate_div_max);
+ if (mbr < 0) {
+ ret = mbr;
+ goto out;
+ }
- transfer->speed_hz = spi->cur_speed;
- stm32_spi_set_mbr(spi, mbr);
+ transfer->speed_hz = spi->cur_speed;
+ stm32_spi_set_mbr(spi, mbr);
+ }
comm_type = stm32_spi_communication_type(spi_dev, transfer);
ret = spi->cfg->set_mode(spi, comm_type);
@@ -1554,7 +1575,7 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
spi->cur_comm = comm_type;
- if (spi->cfg->set_data_idleness)
+ if (STM32_SPI_MASTER_MODE(spi) && spi->cfg->set_data_idleness)
spi->cfg->set_data_idleness(spi, transfer->len);
if (spi->cur_bpw <= 8)
@@ -1575,7 +1596,8 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
dev_dbg(spi->dev,
"data frame of %d-bit, data packet of %d data frames\n",
spi->cur_bpw, spi->cur_fthlv);
- dev_dbg(spi->dev, "speed set to %dHz\n", spi->cur_speed);
+ if (STM32_SPI_MASTER_MODE(spi))
+ dev_dbg(spi->dev, "speed set to %dHz\n", spi->cur_speed);
dev_dbg(spi->dev, "transfer of %d bytes (%d data frames)\n",
spi->cur_xferlen, nb_words);
dev_dbg(spi->dev, "dma %s\n",
@@ -1589,18 +1611,18 @@ out:
/**
* stm32_spi_transfer_one - transfer a single spi_transfer
- * @master: controller master interface
+ * @ctrl: controller interface
* @spi_dev: pointer to the spi device
* @transfer: pointer to spi transfer
*
* It must return 0 if the transfer is finished or 1 if the transfer is still
* in progress.
*/
-static int stm32_spi_transfer_one(struct spi_master *master,
+static int stm32_spi_transfer_one(struct spi_controller *ctrl,
struct spi_device *spi_dev,
struct spi_transfer *transfer)
{
- struct stm32_spi *spi = spi_master_get_devdata(master);
+ struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
int ret;
spi->tx_buf = transfer->tx_buf;
@@ -1608,8 +1630,8 @@ static int stm32_spi_transfer_one(struct spi_master *master,
spi->tx_len = spi->tx_buf ? transfer->len : 0;
spi->rx_len = spi->rx_buf ? transfer->len : 0;
- spi->cur_usedma = (master->can_dma &&
- master->can_dma(master, spi_dev, transfer));
+ spi->cur_usedma = (ctrl->can_dma &&
+ ctrl->can_dma(ctrl, spi_dev, transfer));
ret = stm32_spi_transfer_one_setup(spi, spi_dev, transfer);
if (ret) {
@@ -1625,13 +1647,13 @@ static int stm32_spi_transfer_one(struct spi_master *master,
/**
* stm32_spi_unprepare_msg - relax the hardware
- * @master: controller master interface
+ * @ctrl: controller interface
* @msg: pointer to the spi message
*/
-static int stm32_spi_unprepare_msg(struct spi_master *master,
+static int stm32_spi_unprepare_msg(struct spi_controller *ctrl,
struct spi_message *msg)
{
- struct stm32_spi *spi = spi_master_get_devdata(master);
+ struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
spi->cfg->disable(spi);
@@ -1670,12 +1692,13 @@ static int stm32f4_spi_config(struct stm32_spi *spi)
}
/**
- * stm32h7_spi_config - Configure SPI controller as SPI master
+ * stm32h7_spi_config - Configure SPI controller
* @spi: pointer to the spi controller data structure
*/
static int stm32h7_spi_config(struct stm32_spi *spi)
{
unsigned long flags;
+ u32 cr1 = 0, cfg2 = 0;
spin_lock_irqsave(&spi->lock, flags);
@@ -1683,24 +1706,28 @@ static int stm32h7_spi_config(struct stm32_spi *spi)
stm32_spi_clr_bits(spi, STM32H7_SPI_I2SCFGR,
STM32H7_SPI_I2SCFGR_I2SMOD);
- /*
- * - SS input value high
- * - transmitter half duplex direction
- * - automatic communication suspend when RX-Fifo is full
- */
- stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SSI |
- STM32H7_SPI_CR1_HDDIR |
- STM32H7_SPI_CR1_MASRX);
+ if (STM32_SPI_DEVICE_MODE(spi)) {
+ /* Use native device select */
+ cfg2 &= ~STM32H7_SPI_CFG2_SSM;
+ } else {
+ /*
+ * - Transmitter half duplex direction
+ * - Automatic communication suspend when RX-Fifo is full
+ * - SS input value high
+ */
+ cr1 |= STM32H7_SPI_CR1_HDDIR | STM32H7_SPI_CR1_MASRX | STM32H7_SPI_CR1_SSI;
- /*
- * - Set the master mode (default Motorola mode)
- * - Consider 1 master/n slaves configuration and
- * SS input value is determined by the SSI bit
- * - keep control of all associated GPIOs
- */
- stm32_spi_set_bits(spi, STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_MASTER |
- STM32H7_SPI_CFG2_SSM |
- STM32H7_SPI_CFG2_AFCNTR);
+ /*
+ * - Set the master mode (default Motorola mode)
+ * - Consider 1 master/n devices configuration and
+ * SS input value is determined by the SSI bit
+ * - keep control of all associated GPIOs
+ */
+ cfg2 |= STM32H7_SPI_CFG2_MASTER | STM32H7_SPI_CFG2_SSM | STM32H7_SPI_CFG2_AFCNTR;
+ }
+
+ stm32_spi_set_bits(spi, STM32H7_SPI_CR1, cr1);
+ stm32_spi_set_bits(spi, STM32H7_SPI_CFG2, cfg2);
spin_unlock_irqrestore(&spi->lock, flags);
@@ -1756,24 +1783,38 @@ static const struct of_device_id stm32_spi_of_match[] = {
};
MODULE_DEVICE_TABLE(of, stm32_spi_of_match);
+static int stm32h7_spi_device_abort(struct spi_controller *ctrl)
+{
+ spi_finalize_current_transfer(ctrl);
+ return 0;
+}
+
static int stm32_spi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *ctrl;
struct stm32_spi *spi;
struct resource *res;
struct reset_control *rst;
+ struct device_node *np = pdev->dev.of_node;
+ bool device_mode;
int ret;
- master = devm_spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
- if (!master) {
- dev_err(&pdev->dev, "spi master allocation failed\n");
+ device_mode = of_property_read_bool(np, "spi-slave");
+
+ if (device_mode)
+ ctrl = devm_spi_alloc_slave(&pdev->dev, sizeof(struct stm32_spi));
+ else
+ ctrl = devm_spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
+ if (!ctrl) {
+ dev_err(&pdev->dev, "spi controller allocation failed\n");
return -ENOMEM;
}
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, ctrl);
- spi = spi_master_get_devdata(master);
+ spi = spi_controller_get_devdata(ctrl);
spi->dev = &pdev->dev;
- spi->master = master;
+ spi->ctrl = ctrl;
+ spi->device_mode = device_mode;
spin_lock_init(&spi->lock);
spi->cfg = (const struct stm32_spi_cfg *)
@@ -1794,7 +1835,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
ret = devm_request_threaded_irq(&pdev->dev, spi->irq,
spi->cfg->irq_handler_event,
spi->cfg->irq_handler_thread,
- IRQF_ONESHOT, pdev->name, master);
+ IRQF_ONESHOT, pdev->name, ctrl);
if (ret) {
dev_err(&pdev->dev, "irq%d request failed: %d\n", spi->irq,
ret);
@@ -1843,19 +1884,21 @@ static int stm32_spi_probe(struct platform_device *pdev)
goto err_clk_disable;
}
- master->dev.of_node = pdev->dev.of_node;
- master->auto_runtime_pm = true;
- master->bus_num = pdev->id;
- master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST |
- SPI_3WIRE;
- master->bits_per_word_mask = spi->cfg->get_bpw_mask(spi);
- master->max_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_min;
- master->min_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_max;
- master->use_gpio_descriptors = true;
- master->prepare_message = stm32_spi_prepare_msg;
- master->transfer_one = stm32_spi_transfer_one;
- master->unprepare_message = stm32_spi_unprepare_msg;
- master->flags = spi->cfg->flags;
+ ctrl->dev.of_node = pdev->dev.of_node;
+ ctrl->auto_runtime_pm = true;
+ ctrl->bus_num = pdev->id;
+ ctrl->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST |
+ SPI_3WIRE;
+ ctrl->bits_per_word_mask = spi->cfg->get_bpw_mask(spi);
+ ctrl->max_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_min;
+ ctrl->min_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_max;
+ ctrl->use_gpio_descriptors = true;
+ ctrl->prepare_message = stm32_spi_prepare_msg;
+ ctrl->transfer_one = stm32_spi_transfer_one;
+ ctrl->unprepare_message = stm32_spi_unprepare_msg;
+ ctrl->flags = spi->cfg->flags;
+ if (STM32_SPI_DEVICE_MODE(spi))
+ ctrl->slave_abort = stm32h7_spi_device_abort;
spi->dma_tx = dma_request_chan(spi->dev, "tx");
if (IS_ERR(spi->dma_tx)) {
@@ -1866,7 +1909,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "failed to request tx dma channel\n");
} else {
- master->dma_tx = spi->dma_tx;
+ ctrl->dma_tx = spi->dma_tx;
}
spi->dma_rx = dma_request_chan(spi->dev, "rx");
@@ -1878,11 +1921,11 @@ static int stm32_spi_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "failed to request rx dma channel\n");
} else {
- master->dma_rx = spi->dma_rx;
+ ctrl->dma_rx = spi->dma_rx;
}
if (spi->dma_tx || spi->dma_rx)
- master->can_dma = stm32_spi_can_dma;
+ ctrl->can_dma = stm32_spi_can_dma;
pm_runtime_set_autosuspend_delay(&pdev->dev,
STM32_SPI_AUTOSUSPEND_DELAY);
@@ -1891,9 +1934,9 @@ static int stm32_spi_probe(struct platform_device *pdev)
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- ret = spi_register_master(master);
+ ret = spi_register_controller(ctrl);
if (ret) {
- dev_err(&pdev->dev, "spi master registration failed: %d\n",
+ dev_err(&pdev->dev, "spi controller registration failed: %d\n",
ret);
goto err_pm_disable;
}
@@ -1901,7 +1944,8 @@ static int stm32_spi_probe(struct platform_device *pdev)
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
- dev_info(&pdev->dev, "driver initialized\n");
+ dev_info(&pdev->dev, "driver initialized (%s mode)\n",
+ STM32_SPI_MASTER_MODE(spi) ? "master" : "device");
return 0;
@@ -1923,12 +1967,12 @@ err_clk_disable:
static void stm32_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct stm32_spi *spi = spi_master_get_devdata(master);
+ struct spi_controller *ctrl = platform_get_drvdata(pdev);
+ struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
pm_runtime_get_sync(&pdev->dev);
- spi_unregister_master(master);
+ spi_unregister_controller(ctrl);
spi->cfg->disable(spi);
pm_runtime_disable(&pdev->dev);
@@ -1936,10 +1980,10 @@ static void stm32_spi_remove(struct platform_device *pdev)
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
- if (master->dma_tx)
- dma_release_channel(master->dma_tx);
- if (master->dma_rx)
- dma_release_channel(master->dma_rx);
+ if (ctrl->dma_tx)
+ dma_release_channel(ctrl->dma_tx);
+ if (ctrl->dma_rx)
+ dma_release_channel(ctrl->dma_rx);
clk_disable_unprepare(spi->clk);
@@ -1949,8 +1993,8 @@ static void stm32_spi_remove(struct platform_device *pdev)
static int __maybe_unused stm32_spi_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct stm32_spi *spi = spi_master_get_devdata(master);
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
clk_disable_unprepare(spi->clk);
@@ -1959,8 +2003,8 @@ static int __maybe_unused stm32_spi_runtime_suspend(struct device *dev)
static int __maybe_unused stm32_spi_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct stm32_spi *spi = spi_master_get_devdata(master);
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
int ret;
ret = pinctrl_pm_select_default_state(dev);
@@ -1972,10 +2016,10 @@ static int __maybe_unused stm32_spi_runtime_resume(struct device *dev)
static int __maybe_unused stm32_spi_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
int ret;
- ret = spi_master_suspend(master);
+ ret = spi_controller_suspend(ctrl);
if (ret)
return ret;
@@ -1984,15 +2028,15 @@ static int __maybe_unused stm32_spi_suspend(struct device *dev)
static int __maybe_unused stm32_spi_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct stm32_spi *spi = spi_master_get_devdata(master);
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
int ret;
ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
- ret = spi_master_resume(master);
+ ret = spi_controller_resume(ctrl);
if (ret) {
clk_disable_unprepare(spi->clk);
return ret;
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index 7532c85a352c..30d541612253 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -42,7 +42,9 @@
#define SUN6I_TFR_CTL_CS_MANUAL BIT(6)
#define SUN6I_TFR_CTL_CS_LEVEL BIT(7)
#define SUN6I_TFR_CTL_DHB BIT(8)
+#define SUN6I_TFR_CTL_SDC BIT(11)
#define SUN6I_TFR_CTL_FBS BIT(12)
+#define SUN6I_TFR_CTL_SDM BIT(13)
#define SUN6I_TFR_CTL_XCH BIT(31)
#define SUN6I_INT_CTL_REG 0x10
@@ -85,6 +87,11 @@
#define SUN6I_TXDATA_REG 0x200
#define SUN6I_RXDATA_REG 0x300
+struct sun6i_spi_cfg {
+ unsigned long fifo_depth;
+ bool has_clk_ctl;
+};
+
struct sun6i_spi {
struct spi_master *master;
void __iomem *base_addr;
@@ -99,7 +106,7 @@ struct sun6i_spi {
const u8 *tx_buf;
u8 *rx_buf;
int len;
- unsigned long fifo_depth;
+ const struct sun6i_spi_cfg *cfg;
};
static inline u32 sun6i_spi_read(struct sun6i_spi *sspi, u32 reg)
@@ -156,7 +163,7 @@ static inline void sun6i_spi_fill_fifo(struct sun6i_spi *sspi)
u8 byte;
/* See how much data we can fit */
- cnt = sspi->fifo_depth - sun6i_spi_get_tx_fifo_count(sspi);
+ cnt = sspi->cfg->fifo_depth - sun6i_spi_get_tx_fifo_count(sspi);
len = min((int)cnt, sspi->len);
@@ -256,7 +263,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
struct spi_transfer *tfr)
{
struct sun6i_spi *sspi = spi_master_get_devdata(master);
- unsigned int mclk_rate, div, div_cdr1, div_cdr2, timeout;
+ unsigned int div, div_cdr1, div_cdr2, timeout;
unsigned int start, end, tx_time;
unsigned int trig_level;
unsigned int tx_len = 0, rx_len = 0;
@@ -289,14 +296,14 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
* the hardcoded value used in old generation of Allwinner
* SPI controller. (See spi-sun4i.c)
*/
- trig_level = sspi->fifo_depth / 4 * 3;
+ trig_level = sspi->cfg->fifo_depth / 4 * 3;
} else {
/*
* Setup FIFO DMA request trigger level
* We choose 1/2 of the full fifo depth, that value will
* be used as DMA burst length.
*/
- trig_level = sspi->fifo_depth / 2;
+ trig_level = sspi->cfg->fifo_depth / 2;
if (tfr->tx_buf)
reg |= SUN6I_FIFO_CTL_TF_DRQ_EN;
@@ -346,39 +353,65 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg);
- /* Ensure that we have a parent clock fast enough */
- mclk_rate = clk_get_rate(sspi->mclk);
- if (mclk_rate < (2 * tfr->speed_hz)) {
- clk_set_rate(sspi->mclk, 2 * tfr->speed_hz);
- mclk_rate = clk_get_rate(sspi->mclk);
- }
+ if (sspi->cfg->has_clk_ctl) {
+ unsigned int mclk_rate = clk_get_rate(sspi->mclk);
- /*
- * Setup clock divider.
- *
- * We have two choices there. Either we can use the clock
- * divide rate 1, which is calculated thanks to this formula:
- * SPI_CLK = MOD_CLK / (2 ^ cdr)
- * Or we can use CDR2, which is calculated with the formula:
- * SPI_CLK = MOD_CLK / (2 * (cdr + 1))
- * Wether we use the former or the latter is set through the
- * DRS bit.
- *
- * First try CDR2, and if we can't reach the expected
- * frequency, fall back to CDR1.
- */
- div_cdr1 = DIV_ROUND_UP(mclk_rate, tfr->speed_hz);
- div_cdr2 = DIV_ROUND_UP(div_cdr1, 2);
- if (div_cdr2 <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) {
- reg = SUN6I_CLK_CTL_CDR2(div_cdr2 - 1) | SUN6I_CLK_CTL_DRS;
- tfr->effective_speed_hz = mclk_rate / (2 * div_cdr2);
+ /* Ensure that we have a parent clock fast enough */
+ if (mclk_rate < (2 * tfr->speed_hz)) {
+ clk_set_rate(sspi->mclk, 2 * tfr->speed_hz);
+ mclk_rate = clk_get_rate(sspi->mclk);
+ }
+
+ /*
+ * Setup clock divider.
+ *
+ * We have two choices there. Either we can use the clock
+ * divide rate 1, which is calculated thanks to this formula:
+ * SPI_CLK = MOD_CLK / (2 ^ cdr)
+ * Or we can use CDR2, which is calculated with the formula:
+ * SPI_CLK = MOD_CLK / (2 * (cdr + 1))
+ * Wether we use the former or the latter is set through the
+ * DRS bit.
+ *
+ * First try CDR2, and if we can't reach the expected
+ * frequency, fall back to CDR1.
+ */
+ div_cdr1 = DIV_ROUND_UP(mclk_rate, tfr->speed_hz);
+ div_cdr2 = DIV_ROUND_UP(div_cdr1, 2);
+ if (div_cdr2 <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) {
+ reg = SUN6I_CLK_CTL_CDR2(div_cdr2 - 1) | SUN6I_CLK_CTL_DRS;
+ tfr->effective_speed_hz = mclk_rate / (2 * div_cdr2);
+ } else {
+ div = min(SUN6I_CLK_CTL_CDR1_MASK, order_base_2(div_cdr1));
+ reg = SUN6I_CLK_CTL_CDR1(div);
+ tfr->effective_speed_hz = mclk_rate / (1 << div);
+ }
+
+ sun6i_spi_write(sspi, SUN6I_CLK_CTL_REG, reg);
} else {
- div = min(SUN6I_CLK_CTL_CDR1_MASK, order_base_2(div_cdr1));
- reg = SUN6I_CLK_CTL_CDR1(div);
- tfr->effective_speed_hz = mclk_rate / (1 << div);
+ clk_set_rate(sspi->mclk, tfr->speed_hz);
+ tfr->effective_speed_hz = clk_get_rate(sspi->mclk);
+
+ /*
+ * Configure work mode.
+ *
+ * There are three work modes depending on the controller clock
+ * frequency:
+ * - normal sample mode : CLK <= 24MHz SDM=1 SDC=0
+ * - delay half-cycle sample mode : CLK <= 40MHz SDM=0 SDC=0
+ * - delay one-cycle sample mode : CLK >= 80MHz SDM=0 SDC=1
+ */
+ reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
+ reg &= ~(SUN6I_TFR_CTL_SDM | SUN6I_TFR_CTL_SDC);
+
+ if (tfr->effective_speed_hz <= 24000000)
+ reg |= SUN6I_TFR_CTL_SDM;
+ else if (tfr->effective_speed_hz >= 80000000)
+ reg |= SUN6I_TFR_CTL_SDC;
+
+ sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg);
}
- sun6i_spi_write(sspi, SUN6I_CLK_CTL_REG, reg);
/* Finally enable the bus - doing so before might raise SCK to HIGH */
reg = sun6i_spi_read(sspi, SUN6I_GBL_CTL_REG);
reg |= SUN6I_GBL_CTL_BUS_ENABLE;
@@ -410,9 +443,9 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
reg = SUN6I_INT_CTL_TC;
if (!use_dma) {
- if (rx_len > sspi->fifo_depth)
+ if (rx_len > sspi->cfg->fifo_depth)
reg |= SUN6I_INT_CTL_RF_RDY;
- if (tx_len > sspi->fifo_depth)
+ if (tx_len > sspi->cfg->fifo_depth)
reg |= SUN6I_INT_CTL_TF_ERQ;
}
@@ -422,7 +455,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
- tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
+ tx_time = spi_controller_xfer_timeout(master, tfr);
start = jiffies;
timeout = wait_for_completion_timeout(&sspi->done,
msecs_to_jiffies(tx_time));
@@ -543,7 +576,7 @@ static bool sun6i_spi_can_dma(struct spi_master *master,
* the fifo length we can just fill the fifo and wait for a single
* irq, so don't bother setting up dma
*/
- return xfer->len > sspi->fifo_depth;
+ return xfer->len > sspi->cfg->fifo_depth;
}
static int sun6i_spi_probe(struct platform_device *pdev)
@@ -582,7 +615,7 @@ static int sun6i_spi_probe(struct platform_device *pdev)
}
sspi->master = master;
- sspi->fifo_depth = (unsigned long)of_device_get_match_data(&pdev->dev);
+ sspi->cfg = of_device_get_match_data(&pdev->dev);
master->max_speed_hz = 100 * 1000 * 1000;
master->min_speed_hz = 3 * 1000;
@@ -695,9 +728,27 @@ static void sun6i_spi_remove(struct platform_device *pdev)
dma_release_channel(master->dma_rx);
}
+static const struct sun6i_spi_cfg sun6i_a31_spi_cfg = {
+ .fifo_depth = SUN6I_FIFO_DEPTH,
+ .has_clk_ctl = true,
+};
+
+static const struct sun6i_spi_cfg sun8i_h3_spi_cfg = {
+ .fifo_depth = SUN8I_FIFO_DEPTH,
+ .has_clk_ctl = true,
+};
+
+static const struct sun6i_spi_cfg sun50i_r329_spi_cfg = {
+ .fifo_depth = SUN8I_FIFO_DEPTH,
+};
+
static const struct of_device_id sun6i_spi_match[] = {
- { .compatible = "allwinner,sun6i-a31-spi", .data = (void *)SUN6I_FIFO_DEPTH },
- { .compatible = "allwinner,sun8i-h3-spi", .data = (void *)SUN8I_FIFO_DEPTH },
+ { .compatible = "allwinner,sun6i-a31-spi", .data = &sun6i_a31_spi_cfg },
+ { .compatible = "allwinner,sun8i-h3-spi", .data = &sun8i_h3_spi_cfg },
+ {
+ .compatible = "allwinner,sun50i-r329-spi",
+ .data = &sun50i_r329_spi_cfg
+ },
{}
};
MODULE_DEVICE_TABLE(of, sun6i_spi_match);
diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c
index 5d23411f2a3e..ae6218bcd02a 100644
--- a/drivers/spi/spi-xcomm.c
+++ b/drivers/spi/spi-xcomm.c
@@ -241,7 +241,7 @@ static struct i2c_driver spi_xcomm_driver = {
.name = "spi-xcomm",
},
.id_table = spi_xcomm_ids,
- .probe_new = spi_xcomm_probe,
+ .probe = spi_xcomm_probe,
};
module_i2c_driver(spi_xcomm_driver);
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 8d009275a59d..d13dc15cc191 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -64,7 +64,8 @@ static_assert(N_SPI_MINORS > 0 && N_SPI_MINORS <= 256);
| SPI_NO_CS | SPI_READY | SPI_TX_DUAL \
| SPI_TX_QUAD | SPI_TX_OCTAL | SPI_RX_DUAL \
| SPI_RX_QUAD | SPI_RX_OCTAL \
- | SPI_RX_CPHA_FLIP)
+ | SPI_RX_CPHA_FLIP | SPI_3WIRE_HIZ \
+ | SPI_MOSI_IDLE_LOW)
struct spidev_data {
dev_t devt;