summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/firmware/xilinx/zynqmp.c7
-rw-r--r--drivers/spi/Kconfig31
-rw-r--r--drivers/spi/Makefile3
-rw-r--r--drivers/spi/atmel-quadspi.c34
-rw-r--r--drivers/spi/spi-aspeed-smc.c6
-rw-r--r--drivers/spi/spi-bcm-qspi.c5
-rw-r--r--drivers/spi/spi-bcm-qspi.h2
-rw-r--r--drivers/spi/spi-bcm63xx.c3
-rw-r--r--drivers/spi/spi-brcmstb-qspi.c4
-rw-r--r--drivers/spi/spi-cadence-quadspi.c15
-rw-r--r--drivers/spi/spi-cadence-xspi.c4
-rw-r--r--drivers/spi/spi-fsl-cpm.c2
-rw-r--r--drivers/spi/spi-fsl-dspi.c36
-rw-r--r--drivers/spi/spi-fsl-lpspi.c5
-rw-r--r--drivers/spi/spi-gpio.c16
-rw-r--r--drivers/spi/spi-hisi-sfc-v3xx.c2
-rw-r--r--drivers/spi/spi-img-spfi.c3
-rw-r--r--drivers/spi/spi-imx.c8
-rw-r--r--drivers/spi/spi-intel.c78
-rw-r--r--drivers/spi/spi-iproc-qspi.c4
-rw-r--r--drivers/spi/spi-meson-spicc.c39
-rw-r--r--drivers/spi/spi-microchip-core.c9
-rw-r--r--drivers/spi/spi-mt65xx.c12
-rw-r--r--drivers/spi/spi-mtk-nor.c69
-rw-r--r--drivers/spi/spi-mtk-snfi.c29
-rw-r--r--drivers/spi/spi-mxic.c3
-rw-r--r--drivers/spi/spi-npcm-fiu.c4
-rw-r--r--drivers/spi/spi-nxp-fspi.c2
-rw-r--r--drivers/spi/spi-pci1xxxx.c475
-rw-r--r--drivers/spi/spi-pxa2xx.c195
-rw-r--r--drivers/spi/spi-sc18is602.c6
-rw-r--r--drivers/spi/spi-sn-f-ospi.c703
-rw-r--r--drivers/spi/spi-wpcm-fiu.c508
-rw-r--r--drivers/spi/spi-xcomm.c5
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c191
-rw-r--r--drivers/spi/spi.c70
-rw-r--r--drivers/spi/spidev.c21
37 files changed, 2282 insertions, 327 deletions
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index ff5cabe70a2b..6bc6b6c84241 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -843,6 +843,13 @@ int zynqmp_pm_read_pggs(u32 index, u32 *value)
}
EXPORT_SYMBOL_GPL(zynqmp_pm_read_pggs);
+int zynqmp_pm_set_tapdelay_bypass(u32 index, u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_TAPDELAY_BYPASS,
+ index, value, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_tapdelay_bypass);
+
/**
* zynqmp_pm_set_boot_health_status() - PM API for setting healthy boot status
* @value: Status value to be written
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index d1bb62f7368b..3b1c0878bb85 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -241,7 +241,7 @@ config SPI_CADENCE_QUADSPI
config SPI_CADENCE_XSPI
tristate "Cadence XSPI controller"
- depends on (OF || COMPILE_TEST) && HAS_IOMEM
+ depends on OF && HAS_IOMEM
depends on SPI_MEM
help
Enable support for the Cadence XSPI Flash controller.
@@ -635,6 +635,17 @@ config SPI_MTK_SNFI
is implemented as a SPI-MEM controller with pipelined ECC
capcability.
+config SPI_WPCM_FIU
+ tristate "Nuvoton WPCM450 Flash Interface Unit"
+ depends on ARCH_NPCM || COMPILE_TEST
+ select REGMAP
+ help
+ This enables support got the Flash Interface Unit SPI controller
+ present in the Nuvoton WPCM450 SoC.
+
+ This driver does not support generic SPI. The implementation only
+ supports the spi-mem interface.
+
config SPI_NPCM_FIU
tristate "Nuvoton NPCM FLASH Interface Unit"
depends on ARCH_NPCM || COMPILE_TEST
@@ -710,6 +721,15 @@ config SPI_ORION
This enables using the SPI master controller on the Orion
and MVEBU chips.
+config SPI_PCI1XXXX
+ tristate "PCI1XXXX SPI Bus support"
+ depends on PCI
+ help
+ Say "yes" to Enable the SPI Bus support for the PCI1xxxx card
+ This is a PCI to SPI Bus driver
+ This driver can be built as module. If so, the module will be
+ called as spi-pci1xxxx.
+
config SPI_PIC32
tristate "Microchip PIC32 series SPI"
depends on MACH_PIC32 || COMPILE_TEST
@@ -897,6 +917,15 @@ config SPI_SLAVE_MT27XX
say Y or M here.If you are not sure, say N.
SPI slave drivers for Mediatek MT27XX series ARM SoCs.
+config SPI_SN_F_OSPI
+ tristate "Socionext F_OSPI SPI flash controller"
+ depends on OF && HAS_IOMEM
+ depends on SPI_MEM
+ help
+ This enables support for the Socionext F_OSPI controller
+ for connecting an SPI Flash memory over up to 8-bit wide bus.
+ It supports indirect access mode only.
+
config SPI_SPRD
tristate "Spreadtrum SPI controller"
depends on ARCH_SPRD || COMPILE_TEST
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 4b34e855c841..be9ba40ef8d0 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -83,6 +83,7 @@ obj-$(CONFIG_SPI_MTK_NOR) += spi-mtk-nor.o
obj-$(CONFIG_SPI_MTK_SNFI) += spi-mtk-snfi.o
obj-$(CONFIG_SPI_MXIC) += spi-mxic.o
obj-$(CONFIG_SPI_MXS) += spi-mxs.o
+obj-$(CONFIG_SPI_WPCM_FIU) += spi-wpcm-fiu.o
obj-$(CONFIG_SPI_NPCM_FIU) += spi-npcm-fiu.o
obj-$(CONFIG_SPI_NPCM_PSPI) += spi-npcm-pspi.o
obj-$(CONFIG_SPI_NXP_FLEXSPI) += spi-nxp-fspi.o
@@ -94,6 +95,7 @@ obj-$(CONFIG_SPI_OMAP_100K) += spi-omap-100k.o
obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o
obj-$(CONFIG_SPI_TI_QSPI) += spi-ti-qspi.o
obj-$(CONFIG_SPI_ORION) += spi-orion.o
+obj-$(CONFIG_SPI_PCI1XXXX) += spi-pci1xxxx.o
obj-$(CONFIG_SPI_PIC32) += spi-pic32.o
obj-$(CONFIG_SPI_PIC32_SQI) += spi-pic32-sqi.o
obj-$(CONFIG_SPI_PL022) += spi-pl022.o
@@ -120,6 +122,7 @@ obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
obj-$(CONFIG_SPI_SIFIVE) += spi-sifive.o
obj-$(CONFIG_SPI_SLAVE_MT27XX) += spi-slave-mt27xx.o
+obj-$(CONFIG_SPI_SN_F_OSPI) += spi-sn-f-ospi.o
obj-$(CONFIG_SPI_SPRD) += spi-sprd.o
obj-$(CONFIG_SPI_SPRD_ADI) += spi-sprd-adi.o
obj-$(CONFIG_SPI_STM32) += spi-stm32.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 976a217e356d..70637e46290a 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -510,6 +510,39 @@ static int atmel_qspi_setup(struct spi_device *spi)
return 0;
}
+static int atmel_qspi_set_cs_timing(struct spi_device *spi)
+{
+ struct spi_controller *ctrl = spi->master;
+ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
+ unsigned long clk_rate;
+ u32 cs_setup;
+ int delay;
+ int ret;
+
+ delay = spi_delay_to_ns(&spi->cs_setup, NULL);
+ if (delay <= 0)
+ return delay;
+
+ clk_rate = clk_get_rate(aq->pclk);
+ if (!clk_rate)
+ return -EINVAL;
+
+ cs_setup = DIV_ROUND_UP((delay * DIV_ROUND_UP(clk_rate, 1000000)),
+ 1000);
+
+ ret = pm_runtime_resume_and_get(ctrl->dev.parent);
+ if (ret < 0)
+ return ret;
+
+ aq->scr |= QSPI_SCR_DLYBS(cs_setup);
+ atmel_qspi_write(aq->scr, aq, QSPI_SCR);
+
+ pm_runtime_mark_last_busy(ctrl->dev.parent);
+ pm_runtime_put_autosuspend(ctrl->dev.parent);
+
+ return 0;
+}
+
static void atmel_qspi_init(struct atmel_qspi *aq)
{
/* Reset the QSPI controller */
@@ -555,6 +588,7 @@ static int atmel_qspi_probe(struct platform_device *pdev)
ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
ctrl->setup = atmel_qspi_setup;
+ ctrl->set_cs_timing = atmel_qspi_set_cs_timing;
ctrl->bus_num = -1;
ctrl->mem_ops = &atmel_qspi_mem_ops;
ctrl->num_chipselect = 1;
diff --git a/drivers/spi/spi-aspeed-smc.c b/drivers/spi/spi-aspeed-smc.c
index b90571396a60..873ff2cf72c9 100644
--- a/drivers/spi/spi-aspeed-smc.c
+++ b/drivers/spi/spi-aspeed-smc.c
@@ -734,13 +734,11 @@ static int aspeed_spi_probe(struct platform_device *pdev)
aspi->data = data;
aspi->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- aspi->regs = devm_ioremap_resource(dev, res);
+ aspi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(aspi->regs))
return PTR_ERR(aspi->regs);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- aspi->ahb_base = devm_ioremap_resource(dev, res);
+ aspi->ahb_base = devm_platform_get_and_ioremap_resource(pdev, 1, &res);
if (IS_ERR(aspi->ahb_base)) {
dev_err(dev, "missing AHB mapping window\n");
return PTR_ERR(aspi->ahb_base);
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index cad2d55dcd3d..0eee574d3e1f 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -1682,7 +1682,7 @@ qspi_probe_err:
/* probe function to be called by SoC specific platform driver probe */
EXPORT_SYMBOL_GPL(bcm_qspi_probe);
-int bcm_qspi_remove(struct platform_device *pdev)
+void bcm_qspi_remove(struct platform_device *pdev)
{
struct bcm_qspi *qspi = platform_get_drvdata(pdev);
@@ -1690,9 +1690,8 @@ int bcm_qspi_remove(struct platform_device *pdev)
bcm_qspi_hw_uninit(qspi);
clk_disable_unprepare(qspi->clk);
kfree(qspi->dev_ids);
-
- return 0;
}
+
/* function to be called by SoC specific platform driver remove() */
EXPORT_SYMBOL_GPL(bcm_qspi_remove);
diff --git a/drivers/spi/spi-bcm-qspi.h b/drivers/spi/spi-bcm-qspi.h
index 01aec6460108..3d7c359c0239 100644
--- a/drivers/spi/spi-bcm-qspi.h
+++ b/drivers/spi/spi-bcm-qspi.h
@@ -96,7 +96,7 @@ static inline u32 get_qspi_mask(int type)
/* The common driver functions to be called by the SoC platform driver */
int bcm_qspi_probe(struct platform_device *pdev,
struct bcm_qspi_soc_intc *soc_intc);
-int bcm_qspi_remove(struct platform_device *pdev);
+void bcm_qspi_remove(struct platform_device *pdev);
/* pm_ops used by the SoC platform driver called on PM suspend/resume */
extern const struct dev_pm_ops bcm_qspi_pm_ops;
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 80fa0ef8909c..3686d78c44a6 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -547,8 +547,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
bs->pdev = pdev;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bs->regs = devm_ioremap_resource(&pdev->dev, r);
+ bs->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(bs->regs)) {
ret = PTR_ERR(bs->regs);
goto out_err;
diff --git a/drivers/spi/spi-brcmstb-qspi.c b/drivers/spi/spi-brcmstb-qspi.c
index 75e9b76dab1e..de362b35718f 100644
--- a/drivers/spi/spi-brcmstb-qspi.c
+++ b/drivers/spi/spi-brcmstb-qspi.c
@@ -23,7 +23,9 @@ static int brcmstb_qspi_probe(struct platform_device *pdev)
static int brcmstb_qspi_remove(struct platform_device *pdev)
{
- return bcm_qspi_remove(pdev);
+ bcm_qspi_remove(pdev);
+
+ return 0;
}
static struct platform_driver brcmstb_qspi_driver = {
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 447230547945..676313e1bdad 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -1119,6 +1119,14 @@ static void cqspi_config_baudrate_div(struct cqspi_st *cqspi)
/* Recalculate the baudrate divisor based on QSPI specification. */
div = DIV_ROUND_UP(ref_clk_hz, 2 * cqspi->sclk) - 1;
+ /* Maximum baud divisor */
+ if (div > CQSPI_REG_CONFIG_BAUD_MASK) {
+ div = CQSPI_REG_CONFIG_BAUD_MASK;
+ dev_warn(&cqspi->pdev->dev,
+ "Unable to adjust clock <= %d hz. Reduced to %d hz\n",
+ cqspi->sclk, ref_clk_hz/((div+1)*2));
+ }
+
reg = readl(reg_base + CQSPI_REG_CONFIG);
reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
reg |= (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB;
@@ -1580,7 +1588,6 @@ static int cqspi_probe(struct platform_device *pdev)
struct spi_master *master;
struct resource *res_ahb;
struct cqspi_st *cqspi;
- struct resource *res;
int ret;
int irq;
@@ -1616,8 +1623,7 @@ static int cqspi_probe(struct platform_device *pdev)
}
/* Obtain and remap controller address. */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cqspi->iobase = devm_ioremap_resource(dev, res);
+ cqspi->iobase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cqspi->iobase)) {
dev_err(dev, "Cannot remap controller address.\n");
ret = PTR_ERR(cqspi->iobase);
@@ -1625,8 +1631,7 @@ static int cqspi_probe(struct platform_device *pdev)
}
/* Obtain and remap AHB address. */
- res_ahb = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- cqspi->ahb_base = devm_ioremap_resource(dev, res_ahb);
+ cqspi->ahb_base = devm_platform_get_and_ioremap_resource(pdev, 1, &res_ahb);
if (IS_ERR(cqspi->ahb_base)) {
dev_err(dev, "Cannot remap AHB address.\n");
ret = PTR_ERR(cqspi->ahb_base);
diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
index 9e187f9c6c95..520b4cc69cdc 100644
--- a/drivers/spi/spi-cadence-xspi.c
+++ b/drivers/spi/spi-cadence-xspi.c
@@ -607,7 +607,6 @@ static int cdns_xspi_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_OF
static const struct of_device_id cdns_xspi_of_match[] = {
{
.compatible = "cdns,xspi-nor",
@@ -615,9 +614,6 @@ static const struct of_device_id cdns_xspi_of_match[] = {
{ /* end of table */}
};
MODULE_DEVICE_TABLE(of, cdns_xspi_of_match);
-#else
-#define cdns_xspi_of_match NULL
-#endif /* CONFIG_OF */
static struct platform_driver cdns_xspi_platform_driver = {
.probe = cdns_xspi_probe,
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
index ee905880769e..17a44d4f5021 100644
--- a/drivers/spi/spi-fsl-cpm.c
+++ b/drivers/spi/spi-fsl-cpm.c
@@ -333,7 +333,7 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
goto err_bds;
}
- mspi->dma_dummy_tx = dma_map_single(dev, empty_zero_page, PAGE_SIZE,
+ mspi->dma_dummy_tx = dma_map_single(dev, ZERO_PAGE(0), PAGE_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, mspi->dma_dummy_tx)) {
dev_err(dev, "unable to map dummy tx buffer\n");
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index a33e547b7d39..e419642eb10e 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -900,12 +900,31 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void dspi_assert_cs(struct spi_device *spi, bool *cs)
+{
+ if (!spi->cs_gpiod || *cs)
+ return;
+
+ gpiod_set_value_cansleep(spi->cs_gpiod, true);
+ *cs = true;
+}
+
+static void dspi_deassert_cs(struct spi_device *spi, bool *cs)
+{
+ if (!spi->cs_gpiod || !*cs)
+ return;
+
+ gpiod_set_value_cansleep(spi->cs_gpiod, false);
+ *cs = false;
+}
+
static int dspi_transfer_one_message(struct spi_controller *ctlr,
struct spi_message *message)
{
struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
struct spi_device *spi = message->spi;
struct spi_transfer *transfer;
+ bool cs = false;
int status = 0;
message->actual_length = 0;
@@ -914,9 +933,14 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
dspi->cur_transfer = transfer;
dspi->cur_msg = message;
dspi->cur_chip = spi_get_ctldata(spi);
+
+ dspi_assert_cs(spi, &cs);
+
/* Prepare command word for CMD FIFO */
- dspi->tx_cmd = SPI_PUSHR_CMD_CTAS(0) |
- SPI_PUSHR_CMD_PCS(spi->chip_select);
+ dspi->tx_cmd = SPI_PUSHR_CMD_CTAS(0);
+ if (!spi->cs_gpiod)
+ dspi->tx_cmd |= SPI_PUSHR_CMD_PCS(spi->chip_select);
+
if (list_is_last(&dspi->cur_transfer->transfer_list,
&dspi->cur_msg->transfers)) {
/* Leave PCS activated after last transfer when
@@ -964,6 +988,9 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
break;
spi_transfer_delay_exec(transfer);
+
+ if (!(dspi->tx_cmd & SPI_PUSHR_CMD_CONT))
+ dspi_deassert_cs(spi, &cs);
}
message->status = status;
@@ -981,6 +1008,7 @@ static int dspi_setup(struct spi_device *spi)
unsigned char pasc = 0, asc = 0;
struct chip_data *chip;
unsigned long clkrate;
+ bool cs = true;
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
@@ -1030,6 +1058,9 @@ static int dspi_setup(struct spi_device *spi)
chip->ctar_val |= SPI_CTAR_LSBFE;
}
+ gpiod_direction_output(spi->cs_gpiod, false);
+ dspi_deassert_cs(spi, &cs);
+
spi_set_ctldata(spi, chip);
return 0;
@@ -1248,6 +1279,7 @@ static int dspi_probe(struct platform_device *pdev)
ctlr->cleanup = dspi_cleanup;
ctlr->slave_abort = dspi_slave_abort;
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ ctlr->use_gpio_descriptors = true;
pdata = dev_get_platdata(&pdev->dev);
if (pdata) {
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index e8c1c8a4c6c8..34488de55587 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -98,6 +98,7 @@ struct fsl_lpspi_data {
struct clk *clk_ipg;
struct clk *clk_per;
bool is_slave;
+ u32 num_cs;
bool is_only_cs1;
bool is_first_byte;
@@ -840,6 +841,9 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
fsl_lpspi->is_slave = is_slave;
fsl_lpspi->is_only_cs1 = of_property_read_bool((&pdev->dev)->of_node,
"fsl,spi-only-use-cs1-sel");
+ if (of_property_read_u32((&pdev->dev)->of_node, "num-cs",
+ &fsl_lpspi->num_cs))
+ fsl_lpspi->num_cs = 1;
controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
controller->transfer_one = fsl_lpspi_transfer_one;
@@ -849,6 +853,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
controller->dev.of_node = pdev->dev.of_node;
controller->bus_num = pdev->id;
+ controller->num_chipselect = fsl_lpspi->num_cs;
controller->slave_abort = fsl_lpspi_slave_abort;
if (!fsl_lpspi->is_slave)
controller->use_gpio_descriptors = true;
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 4b12c4964a66..9c8c7948044e 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -268,9 +268,19 @@ static int spi_gpio_set_direction(struct spi_device *spi, bool output)
if (output)
return gpiod_direction_output(spi_gpio->mosi, 1);
- ret = gpiod_direction_input(spi_gpio->mosi);
- if (ret)
- return ret;
+ /*
+ * Only change MOSI to an input if using 3WIRE mode.
+ * Otherwise, MOSI could be left floating if there is
+ * no pull resistor connected to the I/O pin, or could
+ * be left logic high if there is a pull-up. Transmitting
+ * logic high when only clocking MISO data in can put some
+ * SPI devices in to a bad state.
+ */
+ if (spi->mode & SPI_3WIRE) {
+ ret = gpiod_direction_input(spi_gpio->mosi);
+ if (ret)
+ return ret;
+ }
/*
* Send a turnaround high impedance cycle when switching
* from output to input. Theoretically there should be
diff --git a/drivers/spi/spi-hisi-sfc-v3xx.c b/drivers/spi/spi-hisi-sfc-v3xx.c
index d3a23b1c2a4c..f07d1045a30a 100644
--- a/drivers/spi/spi-hisi-sfc-v3xx.c
+++ b/drivers/spi/spi-hisi-sfc-v3xx.c
@@ -165,7 +165,7 @@ static int hisi_sfc_v3xx_adjust_op_size(struct spi_mem *mem,
}
/*
- * The controller only supports Standard SPI mode, Duall mode and
+ * The controller only supports Standard SPI mode, Dual mode and
* Quad mode. Double sanitize the ops here to avoid OOB access.
*/
static bool hisi_sfc_v3xx_supports_op(struct spi_mem *mem,
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index bfd12247f173..257046f843ff 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -540,8 +540,7 @@ static int img_spfi_probe(struct platform_device *pdev)
spfi->master = master;
spin_lock_init(&spfi->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- spfi->regs = devm_ioremap_resource(spfi->dev, res);
+ spfi->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(spfi->regs)) {
ret = PTR_ERR(spfi->regs);
goto put_spi;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index d209930069cf..e4ccd0c329d0 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -77,7 +77,6 @@ struct spi_imx_devtype_data {
void (*reset)(struct spi_imx_data *spi_imx);
void (*setup_wml)(struct spi_imx_data *spi_imx);
void (*disable)(struct spi_imx_data *spi_imx);
- void (*disable_dma)(struct spi_imx_data *spi_imx);
bool has_dmamode;
bool has_slavemode;
unsigned int fifo_size;
@@ -496,11 +495,6 @@ static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
}
-static void mx51_disable_dma(struct spi_imx_data *spi_imx)
-{
- writel(0, spi_imx->base + MX51_ECSPI_DMA);
-}
-
static void mx51_ecspi_disable(struct spi_imx_data *spi_imx)
{
u32 ctrl;
@@ -1042,7 +1036,6 @@ static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
.rx_available = mx51_ecspi_rx_available,
.reset = mx51_ecspi_reset,
.setup_wml = mx51_setup_wml,
- .disable_dma = mx51_disable_dma,
.fifo_size = 64,
.has_dmamode = true,
.dynamic_burst = true,
@@ -1057,7 +1050,6 @@ static struct spi_imx_devtype_data imx53_ecspi_devtype_data = {
.prepare_transfer = mx51_ecspi_prepare_transfer,
.trigger = mx51_ecspi_trigger,
.rx_available = mx51_ecspi_rx_available,
- .disable_dma = mx51_disable_dma,
.reset = mx51_ecspi_reset,
.fifo_size = 64,
.has_dmamode = true,
diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c
index 3ac73691fbb5..f619212b0d5c 100644
--- a/drivers/spi/spi-intel.c
+++ b/drivers/spi/spi-intel.c
@@ -33,6 +33,7 @@
#define HSFSTS_CTL_FCYCLE_WRITE (0x02 << HSFSTS_CTL_FCYCLE_SHIFT)
#define HSFSTS_CTL_FCYCLE_ERASE (0x03 << HSFSTS_CTL_FCYCLE_SHIFT)
#define HSFSTS_CTL_FCYCLE_ERASE_64K (0x04 << HSFSTS_CTL_FCYCLE_SHIFT)
+#define HSFSTS_CTL_FCYCLE_RDSFDP (0x05 << HSFSTS_CTL_FCYCLE_SHIFT)
#define HSFSTS_CTL_FCYCLE_RDID (0x06 << HSFSTS_CTL_FCYCLE_SHIFT)
#define HSFSTS_CTL_FCYCLE_WRSR (0x07 << HSFSTS_CTL_FCYCLE_SHIFT)
#define HSFSTS_CTL_FCYCLE_RDSR (0x08 << HSFSTS_CTL_FCYCLE_SHIFT)
@@ -352,34 +353,21 @@ static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype)
return 0;
}
-static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, size_t len)
+static int intel_spi_hw_cycle(struct intel_spi *ispi,
+ const struct intel_spi_mem_op *iop, size_t len)
{
u32 val, status;
int ret;
- val = readl(ispi->base + HSFSTS_CTL);
- val &= ~(HSFSTS_CTL_FCYCLE_MASK | HSFSTS_CTL_FDBC_MASK);
-
- switch (opcode) {
- case SPINOR_OP_RDID:
- val |= HSFSTS_CTL_FCYCLE_RDID;
- break;
- case SPINOR_OP_WRSR:
- val |= HSFSTS_CTL_FCYCLE_WRSR;
- break;
- case SPINOR_OP_RDSR:
- val |= HSFSTS_CTL_FCYCLE_RDSR;
- break;
- default:
- return -EINVAL;
- }
-
- if (len > INTEL_SPI_FIFO_SZ)
+ if (!iop->replacement_op)
return -EINVAL;
+ val = readl(ispi->base + HSFSTS_CTL);
+ val &= ~(HSFSTS_CTL_FCYCLE_MASK | HSFSTS_CTL_FDBC_MASK);
val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT;
val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
val |= HSFSTS_CTL_FGO;
+ val |= iop->replacement_op;
writel(val, ispi->base + HSFSTS_CTL);
ret = intel_spi_wait_hw_busy(ispi);
@@ -406,9 +394,6 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len,
if (ret < 0)
return ret;
- if (len > INTEL_SPI_FIFO_SZ)
- return -EINVAL;
-
/*
* Always clear it after each SW sequencer operation regardless
* of whether it is successful or not.
@@ -473,17 +458,18 @@ static int intel_spi_read_reg(struct intel_spi *ispi, const struct spi_mem *mem,
const struct intel_spi_mem_op *iop,
const struct spi_mem_op *op)
{
+ u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
size_t nbytes = op->data.nbytes;
u8 opcode = op->cmd.opcode;
int ret;
- writel(intel_spi_chip_addr(ispi, mem), ispi->base + FADDR);
+ writel(addr, ispi->base + FADDR);
if (ispi->swseq_reg)
ret = intel_spi_sw_cycle(ispi, opcode, nbytes,
OPTYPE_READ_NO_ADDR);
else
- ret = intel_spi_hw_cycle(ispi, opcode, nbytes);
+ ret = intel_spi_hw_cycle(ispi, iop, nbytes);
if (ret)
return ret;
@@ -495,6 +481,7 @@ static int intel_spi_write_reg(struct intel_spi *ispi, const struct spi_mem *mem
const struct intel_spi_mem_op *iop,
const struct spi_mem_op *op)
{
+ u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
size_t nbytes = op->data.nbytes;
u8 opcode = op->cmd.opcode;
int ret;
@@ -538,7 +525,7 @@ static int intel_spi_write_reg(struct intel_spi *ispi, const struct spi_mem *mem
if (opcode == SPINOR_OP_WRDI)
return 0;
- writel(intel_spi_chip_addr(ispi, mem), ispi->base + FADDR);
+ writel(addr, ispi->base + FADDR);
/* Write the value beforehand */
ret = intel_spi_write_block(ispi, op->data.buf.out, nbytes);
@@ -548,7 +535,7 @@ static int intel_spi_write_reg(struct intel_spi *ispi, const struct spi_mem *mem
if (ispi->swseq_reg)
return intel_spi_sw_cycle(ispi, opcode, nbytes,
OPTYPE_WRITE_NO_ADDR);
- return intel_spi_hw_cycle(ispi, opcode, nbytes);
+ return intel_spi_hw_cycle(ispi, iop, nbytes);
}
static int intel_spi_read(struct intel_spi *ispi, const struct spi_mem *mem,
@@ -713,6 +700,12 @@ static int intel_spi_erase(struct intel_spi *ispi, const struct spi_mem *mem,
return 0;
}
+static int intel_spi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ op->data.nbytes = clamp_val(op->data.nbytes, 0, INTEL_SPI_FIFO_SZ);
+ return 0;
+}
+
static bool intel_spi_cmp_mem_op(const struct intel_spi_mem_op *iop,
const struct spi_mem_op *op)
{
@@ -853,6 +846,7 @@ static ssize_t intel_spi_dirmap_write(struct spi_mem_dirmap_desc *desc, u64 offs
}
static const struct spi_controller_mem_ops intel_spi_mem_ops = {
+ .adjust_op_size = intel_spi_adjust_op_size,
.supports_op = intel_spi_supports_mem_op,
.exec_op = intel_spi_exec_mem_op,
.get_name = intel_spi_get_name,
@@ -912,18 +906,26 @@ static const struct spi_controller_mem_ops intel_spi_mem_ops = {
*/
#define INTEL_SPI_GENERIC_OPS \
/* Status register operations */ \
- INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1), \
- SPI_MEM_OP_NO_ADDR, \
- INTEL_SPI_OP_DATA_IN(1), \
- intel_spi_read_reg), \
- INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1), \
- SPI_MEM_OP_NO_ADDR, \
- INTEL_SPI_OP_DATA_IN(1), \
- intel_spi_read_reg), \
- INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1), \
- SPI_MEM_OP_NO_ADDR, \
- INTEL_SPI_OP_DATA_OUT(1), \
- intel_spi_write_reg), \
+ INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read_reg, \
+ HSFSTS_CTL_FCYCLE_RDID), \
+ INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read_reg, \
+ HSFSTS_CTL_FCYCLE_RDSR), \
+ INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ INTEL_SPI_OP_DATA_OUT(1), \
+ intel_spi_write_reg, \
+ HSFSTS_CTL_FCYCLE_WRSR), \
+ INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_RDSFDP, 1), \
+ INTEL_SPI_OP_ADDR(3), \
+ INTEL_SPI_OP_DATA_IN(1), \
+ intel_spi_read_reg, \
+ HSFSTS_CTL_FCYCLE_RDSFDP), \
/* Normal read */ \
INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
INTEL_SPI_OP_ADDR(3), \
diff --git a/drivers/spi/spi-iproc-qspi.c b/drivers/spi/spi-iproc-qspi.c
index de297dacfd57..91cf8eb7213c 100644
--- a/drivers/spi/spi-iproc-qspi.c
+++ b/drivers/spi/spi-iproc-qspi.c
@@ -129,7 +129,9 @@ static int bcm_iproc_probe(struct platform_device *pdev)
static int bcm_iproc_remove(struct platform_device *pdev)
{
- return bcm_qspi_remove(pdev);
+ bcm_qspi_remove(pdev);
+
+ return 0;
}
static const struct of_device_id bcm_iproc_of_match[] = {
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index 1b4195c54ee2..d47f2623a60f 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -21,6 +21,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/reset.h>
+#include <linux/pinctrl/consumer.h>
/*
* The Meson SPICC controller could support DMA based transfers, but is not
@@ -168,6 +169,9 @@ struct meson_spicc_device {
unsigned long tx_remain;
unsigned long rx_remain;
unsigned long xfer_remain;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_idle_high;
+ struct pinctrl_state *pins_idle_low;
};
#define pow2_clk_to_spicc(_div) container_of(_div, struct meson_spicc_device, pow2_div)
@@ -176,8 +180,22 @@ static void meson_spicc_oen_enable(struct meson_spicc_device *spicc)
{
u32 conf;
- if (!spicc->data->has_oen)
+ if (!spicc->data->has_oen) {
+ /* Try to get pinctrl states for idle high/low */
+ spicc->pins_idle_high = pinctrl_lookup_state(spicc->pinctrl,
+ "idle-high");
+ if (IS_ERR(spicc->pins_idle_high)) {
+ dev_warn(&spicc->pdev->dev, "can't get idle-high pinctrl\n");
+ spicc->pins_idle_high = NULL;
+ }
+ spicc->pins_idle_low = pinctrl_lookup_state(spicc->pinctrl,
+ "idle-low");
+ if (IS_ERR(spicc->pins_idle_low)) {
+ dev_warn(&spicc->pdev->dev, "can't get idle-low pinctrl\n");
+ spicc->pins_idle_low = NULL;
+ }
return;
+ }
conf = readl_relaxed(spicc->base + SPICC_ENH_CTL0) |
SPICC_ENH_MOSI_OEN | SPICC_ENH_CLK_OEN | SPICC_ENH_CS_OEN;
@@ -459,6 +477,16 @@ static int meson_spicc_prepare_message(struct spi_master *master,
else
conf &= ~SPICC_POL;
+ if (!spicc->data->has_oen) {
+ if (spi->mode & SPI_CPOL) {
+ if (spicc->pins_idle_high)
+ pinctrl_select_state(spicc->pinctrl, spicc->pins_idle_high);
+ } else {
+ if (spicc->pins_idle_low)
+ pinctrl_select_state(spicc->pinctrl, spicc->pins_idle_low);
+ }
+ }
+
if (spi->mode & SPI_CPHA)
conf |= SPICC_PHA;
else
@@ -505,6 +533,9 @@ static int meson_spicc_unprepare_transfer(struct spi_master *master)
/* Set default configuration, keeping datarate field */
writel_relaxed(conf, spicc->base + SPICC_CONREG);
+ if (!spicc->data->has_oen)
+ pinctrl_select_default_state(&spicc->pdev->dev);
+
return 0;
}
@@ -818,6 +849,12 @@ static int meson_spicc_probe(struct platform_device *pdev)
goto out_core_clk;
}
+ spicc->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(spicc->pinctrl)) {
+ ret = PTR_ERR(spicc->pinctrl);
+ goto out_clk;
+ }
+
device_reset_optional(&pdev->dev);
master->num_chipselect = 4;
diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c
index d352844c798c..aeaa1da88f39 100644
--- a/drivers/spi/spi-microchip-core.c
+++ b/drivers/spi/spi-microchip-core.c
@@ -119,15 +119,6 @@ static inline void mchp_corespi_write(struct mchp_corespi *spi, unsigned int reg
writel(val, spi->regs + reg);
}
-static inline void mchp_corespi_enable(struct mchp_corespi *spi)
-{
- u32 control = mchp_corespi_read(spi, REG_CONTROL);
-
- control |= CONTROL_ENABLE;
-
- mchp_corespi_write(spi, REG_CONTROL, control);
-}
-
static inline void mchp_corespi_disable(struct mchp_corespi *spi)
{
u32 control = mchp_corespi_read(spi, REG_CONTROL);
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index d6aff909fc36..6de8360e5c2a 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -1192,11 +1192,6 @@ static int mtk_spi_probe(struct platform_device *pdev)
else
dma_set_max_seg_size(dev, SZ_256K);
- ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
- IRQF_TRIGGER_NONE, dev_name(dev), master);
- if (ret)
- return dev_err_probe(dev, ret, "failed to register irq\n");
-
mdata->parent_clk = devm_clk_get(dev, "parent-clk");
if (IS_ERR(mdata->parent_clk))
return dev_err_probe(dev, PTR_ERR(mdata->parent_clk),
@@ -1266,6 +1261,13 @@ static int mtk_spi_probe(struct platform_device *pdev)
return dev_err_probe(dev, ret, "failed to register master\n");
}
+ ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
+ IRQF_TRIGGER_NONE, dev_name(dev), master);
+ if (ret) {
+ pm_runtime_disable(dev);
+ return dev_err_probe(dev, ret, "failed to register irq\n");
+ }
+
return 0;
}
diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c
index d167699a1a96..aad92a58c4b8 100644
--- a/drivers/spi/spi-mtk-nor.c
+++ b/drivers/spi/spi-mtk-nor.c
@@ -80,6 +80,9 @@
#define MTK_NOR_REG_DMA_FADR 0x71c
#define MTK_NOR_REG_DMA_DADR 0x720
#define MTK_NOR_REG_DMA_END_DADR 0x724
+#define MTK_NOR_REG_CG_DIS 0x728
+#define MTK_NOR_SFC_SW_RST BIT(2)
+
#define MTK_NOR_REG_DMA_DADR_HB 0x738
#define MTK_NOR_REG_DMA_END_DADR_HB 0x73c
@@ -147,6 +150,15 @@ static inline int mtk_nor_cmd_exec(struct mtk_nor *sp, u32 cmd, ulong clk)
return ret;
}
+static void mtk_nor_reset(struct mtk_nor *sp)
+{
+ mtk_nor_rmw(sp, MTK_NOR_REG_CG_DIS, 0, MTK_NOR_SFC_SW_RST);
+ mb(); /* flush previous writes */
+ mtk_nor_rmw(sp, MTK_NOR_REG_CG_DIS, MTK_NOR_SFC_SW_RST, 0);
+ mb(); /* flush previous writes */
+ writel(MTK_NOR_ENABLE_SF_CMD, sp->base + MTK_NOR_REG_WP);
+}
+
static void mtk_nor_set_addr(struct mtk_nor *sp, const struct spi_mem_op *op)
{
u32 addr = op->addr.val;
@@ -354,7 +366,7 @@ static int mtk_nor_dma_exec(struct mtk_nor *sp, u32 from, unsigned int length,
dma_addr_t dma_addr)
{
int ret = 0;
- ulong delay;
+ u32 delay, timeout;
u32 reg;
writel(from, sp->base + MTK_NOR_REG_DMA_FADR);
@@ -376,15 +388,16 @@ static int mtk_nor_dma_exec(struct mtk_nor *sp, u32 from, unsigned int length,
mtk_nor_rmw(sp, MTK_NOR_REG_DMA_CTL, MTK_NOR_DMA_START, 0);
delay = CLK_TO_US(sp, (length + 5) * BITS_PER_BYTE);
+ timeout = (delay + 1) * 100;
if (sp->has_irq) {
if (!wait_for_completion_timeout(&sp->op_done,
- (delay + 1) * 100))
+ usecs_to_jiffies(max(timeout, 10000U))))
ret = -ETIMEDOUT;
} else {
ret = readl_poll_timeout(sp->base + MTK_NOR_REG_DMA_CTL, reg,
!(reg & MTK_NOR_DMA_START), delay / 3,
- (delay + 1) * 100);
+ timeout);
}
if (ret < 0)
@@ -443,36 +456,28 @@ static int mtk_nor_read_pio(struct mtk_nor *sp, const struct spi_mem_op *op)
return ret;
}
-static int mtk_nor_write_buffer_enable(struct mtk_nor *sp)
+static int mtk_nor_setup_write_buffer(struct mtk_nor *sp, bool on)
{
int ret;
u32 val;
- if (sp->wbuf_en)
+ if (!(sp->wbuf_en ^ on))
return 0;
val = readl(sp->base + MTK_NOR_REG_CFG2);
- writel(val | MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2);
- ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val,
- val & MTK_NOR_WR_BUF_EN, 0, 10000);
- if (!ret)
- sp->wbuf_en = true;
- return ret;
-}
-
-static int mtk_nor_write_buffer_disable(struct mtk_nor *sp)
-{
- int ret;
- u32 val;
+ if (on) {
+ writel(val | MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2);
+ ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val,
+ val & MTK_NOR_WR_BUF_EN, 0, 10000);
+ } else {
+ writel(val & ~MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2);
+ ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val,
+ !(val & MTK_NOR_WR_BUF_EN), 0, 10000);
+ }
- if (!sp->wbuf_en)
- return 0;
- val = readl(sp->base + MTK_NOR_REG_CFG2);
- writel(val & ~MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2);
- ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val,
- !(val & MTK_NOR_WR_BUF_EN), 0, 10000);
if (!ret)
- sp->wbuf_en = false;
+ sp->wbuf_en = on;
+
return ret;
}
@@ -482,7 +487,7 @@ static int mtk_nor_pp_buffered(struct mtk_nor *sp, const struct spi_mem_op *op)
u32 val;
int ret, i;
- ret = mtk_nor_write_buffer_enable(sp);
+ ret = mtk_nor_setup_write_buffer(sp, true);
if (ret < 0)
return ret;
@@ -501,7 +506,7 @@ static int mtk_nor_pp_unbuffered(struct mtk_nor *sp,
const u8 *buf = op->data.buf.out;
int ret;
- ret = mtk_nor_write_buffer_disable(sp);
+ ret = mtk_nor_setup_write_buffer(sp, false);
if (ret < 0)
return ret;
writeb(buf[0], sp->base + MTK_NOR_REG_WDATA);
@@ -608,7 +613,7 @@ static int mtk_nor_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
}
if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op)) {
- ret = mtk_nor_write_buffer_disable(sp);
+ ret = mtk_nor_setup_write_buffer(sp, false);
if (ret < 0)
return ret;
mtk_nor_setup_bus(sp, op);
@@ -616,7 +621,15 @@ static int mtk_nor_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
mtk_nor_set_addr(sp, op);
return mtk_nor_read_pio(sp, op);
} else {
- return mtk_nor_read_dma(sp, op);
+ ret = mtk_nor_read_dma(sp, op);
+ if (unlikely(ret)) {
+ /* Handle rare bus glitch */
+ mtk_nor_reset(sp);
+ mtk_nor_setup_bus(sp, op);
+ return mtk_nor_read_dma(sp, op);
+ }
+
+ return ret;
}
}
diff --git a/drivers/spi/spi-mtk-snfi.c b/drivers/spi/spi-mtk-snfi.c
index d66bf9762557..fa8412ba20e2 100644
--- a/drivers/spi/spi-mtk-snfi.c
+++ b/drivers/spi/spi-mtk-snfi.c
@@ -126,7 +126,8 @@
#define STR_DATA BIT(0)
#define NFI_STA 0x060
-#define NFI_NAND_FSM GENMASK(28, 24)
+#define NFI_NAND_FSM_7622 GENMASK(28, 24)
+#define NFI_NAND_FSM_7986 GENMASK(29, 23)
#define NFI_FSM GENMASK(19, 16)
#define READ_EMPTY BIT(12)
@@ -158,6 +159,7 @@
#define MAS_WR GENMASK(5, 3)
#define MAS_RDDLY GENMASK(2, 0)
#define NFI_MASTERSTA_MASK_7622 (MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY)
+#define NFI_MASTERSTA_MASK_7986 3
// SNFI registers
#define SNF_MAC_CTL 0x500
@@ -220,6 +222,11 @@
static const u8 mt7622_spare_sizes[] = { 16, 26, 27, 28 };
+static const u8 mt7986_spare_sizes[] = {
+ 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64, 67,
+ 74
+};
+
struct mtk_snand_caps {
u16 sector_size;
u16 max_sectors;
@@ -230,6 +237,7 @@ struct mtk_snand_caps {
bool bbm_swap;
bool empty_page_check;
u32 mastersta_mask;
+ u32 nandfsm_mask;
const u8 *spare_sizes;
u32 num_spare_size;
@@ -244,6 +252,7 @@ static const struct mtk_snand_caps mt7622_snand_caps = {
.bbm_swap = false,
.empty_page_check = false,
.mastersta_mask = NFI_MASTERSTA_MASK_7622,
+ .nandfsm_mask = NFI_NAND_FSM_7622,
.spare_sizes = mt7622_spare_sizes,
.num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
};
@@ -257,10 +266,25 @@ static const struct mtk_snand_caps mt7629_snand_caps = {
.bbm_swap = true,
.empty_page_check = false,
.mastersta_mask = NFI_MASTERSTA_MASK_7622,
+ .nandfsm_mask = NFI_NAND_FSM_7622,
.spare_sizes = mt7622_spare_sizes,
.num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
};
+static const struct mtk_snand_caps mt7986_snand_caps = {
+ .sector_size = 1024,
+ .max_sectors = 8,
+ .fdm_size = 8,
+ .fdm_ecc_size = 1,
+ .fifo_size = 64,
+ .bbm_swap = true,
+ .empty_page_check = true,
+ .mastersta_mask = NFI_MASTERSTA_MASK_7986,
+ .nandfsm_mask = NFI_NAND_FSM_7986,
+ .spare_sizes = mt7986_spare_sizes,
+ .num_spare_size = ARRAY_SIZE(mt7986_spare_sizes)
+};
+
struct mtk_snand_conf {
size_t page_size;
size_t oob_size;
@@ -360,7 +384,7 @@ static int mtk_nfi_reset(struct mtk_snand *snf)
}
ret = readl_poll_timeout(snf->nfi_base + NFI_STA, val,
- !(val & (NFI_FSM | NFI_NAND_FSM)), 0,
+ !(val & (NFI_FSM | snf->caps->nandfsm_mask)), 0,
SNFI_POLL_INTERVAL);
if (ret) {
dev_err(snf->dev, "Failed to reset NFI\n");
@@ -1295,6 +1319,7 @@ static irqreturn_t mtk_snand_irq(int irq, void *id)
static const struct of_device_id mtk_snand_ids[] = {
{ .compatible = "mediatek,mt7622-snand", .data = &mt7622_snand_caps },
{ .compatible = "mediatek,mt7629-snand", .data = &mt7629_snand_caps },
+ { .compatible = "mediatek,mt7986-snand", .data = &mt7986_snand_caps },
{},
};
diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c
index 65be8e085ab8..a3dba17390eb 100644
--- a/drivers/spi/spi-mxic.c
+++ b/drivers/spi/spi-mxic.c
@@ -772,8 +772,7 @@ static int mxic_spi_probe(struct platform_device *pdev)
if (IS_ERR(mxic->send_dly_clk))
return PTR_ERR(mxic->send_dly_clk);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
- mxic->regs = devm_ioremap_resource(&pdev->dev, res);
+ mxic->regs = devm_platform_ioremap_resource_byname(pdev, "regs");
if (IS_ERR(mxic->regs))
return PTR_ERR(mxic->regs);
diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
index 49f6424e35af..559d3a5b4062 100644
--- a/drivers/spi/spi-npcm-fiu.c
+++ b/drivers/spi/spi-npcm-fiu.c
@@ -699,7 +699,6 @@ static int npcm_fiu_probe(struct platform_device *pdev)
struct spi_controller *ctrl;
struct npcm_fiu_spi *fiu;
void __iomem *regbase;
- struct resource *res;
int id, ret;
ctrl = devm_spi_alloc_master(dev, sizeof(*fiu));
@@ -725,8 +724,7 @@ static int npcm_fiu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fiu);
fiu->dev = dev;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control");
- regbase = devm_ioremap_resource(dev, res);
+ regbase = devm_platform_ioremap_resource_byname(pdev, "control");
if (IS_ERR(regbase))
return PTR_ERR(regbase);
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index d6a65a989ef8..1c1991a26c15 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -924,7 +924,7 @@ static int nxp_fspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
static void erratum_err050568(struct nxp_fspi *f)
{
- const struct soc_device_attribute ls1028a_soc_attr[] = {
+ static const struct soc_device_attribute ls1028a_soc_attr[] = {
{ .family = "QorIQ LS1028A" },
{ /* sentinel */ }
};
diff --git a/drivers/spi/spi-pci1xxxx.c b/drivers/spi/spi-pci1xxxx.c
new file mode 100644
index 000000000000..a31c3b612a43
--- /dev/null
+++ b/drivers/spi/spi-pci1xxxx.c
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: GPL-2.0
+// PCI1xxxx SPI driver
+// Copyright (C) 2022 Microchip Technology Inc.
+// Authors: Tharun Kumar P <tharunkumar.pasumarthi@microchip.com>
+// Kumaravel Thiagarajan <Kumaravel.Thiagarajan@microchip.com>
+
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+
+#define DRV_NAME "spi-pci1xxxx"
+
+#define SYS_FREQ_DEFAULT (62500000)
+
+#define PCI1XXXX_SPI_MAX_CLOCK_HZ (30000000)
+#define PCI1XXXX_SPI_CLK_20MHZ (20000000)
+#define PCI1XXXX_SPI_CLK_15MHZ (15000000)
+#define PCI1XXXX_SPI_CLK_12MHZ (12000000)
+#define PCI1XXXX_SPI_CLK_10MHZ (10000000)
+#define PCI1XXXX_SPI_MIN_CLOCK_HZ (2000000)
+
+#define PCI1XXXX_SPI_BUFFER_SIZE (320)
+
+#define SPI_MST_CTL_DEVSEL_MASK (GENMASK(27, 25))
+#define SPI_MST_CTL_CMD_LEN_MASK (GENMASK(16, 8))
+#define SPI_MST_CTL_SPEED_MASK (GENMASK(7, 5))
+#define SPI_MSI_VECTOR_SEL_MASK (GENMASK(4, 4))
+
+#define SPI_MST_CTL_FORCE_CE (BIT(4))
+#define SPI_MST_CTL_MODE_SEL (BIT(2))
+#define SPI_MST_CTL_GO (BIT(0))
+
+#define SPI_MST1_ADDR_BASE (0x800)
+
+/* x refers to SPI Host Controller HW instance id in the below macros - 0 or 1 */
+
+#define SPI_MST_CMD_BUF_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x00)
+#define SPI_MST_RSP_BUF_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x200)
+#define SPI_MST_CTL_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x400)
+#define SPI_MST_EVENT_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x420)
+#define SPI_MST_EVENT_MASK_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x424)
+#define SPI_MST_PAD_CTL_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x460)
+#define SPIALERT_MST_DB_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x464)
+#define SPIALERT_MST_VAL_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x468)
+#define SPI_PCI_CTRL_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x480)
+
+#define PCI1XXXX_IRQ_FLAGS (IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE)
+#define SPI_MAX_DATA_LEN 320
+
+#define PCI1XXXX_SPI_TIMEOUT (msecs_to_jiffies(100))
+
+#define SPI_INTR BIT(8)
+#define SPI_FORCE_CE BIT(4)
+
+#define SPI_CHIP_SEL_COUNT 7
+#define VENDOR_ID_MCHP 0x1055
+
+#define SPI_SUSPEND_CONFIG 0x101
+#define SPI_RESUME_CONFIG 0x303
+
+struct pci1xxxx_spi_internal {
+ u8 hw_inst;
+ bool spi_xfer_in_progress;
+ int irq;
+ struct completion spi_xfer_done;
+ struct spi_master *spi_host;
+ struct pci1xxxx_spi *parent;
+ struct {
+ unsigned int dev_sel : 3;
+ unsigned int msi_vector_sel : 1;
+ } prev_val;
+};
+
+struct pci1xxxx_spi {
+ struct pci_dev *dev;
+ u8 total_hw_instances;
+ void __iomem *reg_base;
+ struct pci1xxxx_spi_internal *spi_int[];
+};
+
+static const struct pci_device_id pci1xxxx_spi_pci_id_table[] = {
+ { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
+ { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
+ { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
+ { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
+ { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
+ { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
+ { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
+ { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
+ { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
+ { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
+ { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
+ { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
+ { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
+ { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
+ { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
+ { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
+ { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
+ { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
+ { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
+ { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, pci1xxxx_spi_pci_id_table);
+
+static void pci1xxxx_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(spi->controller);
+ struct pci1xxxx_spi *par = p->parent;
+ u32 regval;
+
+ /* Set the DEV_SEL bits of the SPI_MST_CTL_REG */
+ regval = readl(par->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst));
+ if (enable) {
+ regval &= ~SPI_MST_CTL_DEVSEL_MASK;
+ regval |= (spi->chip_select << 25);
+ writel(regval,
+ par->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst));
+ } else {
+ regval &= ~(spi->chip_select << 25);
+ writel(regval,
+ par->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst));
+
+ }
+}
+
+static u8 pci1xxxx_get_clock_div(u32 hz)
+{
+ u8 val = 0;
+
+ if (hz >= PCI1XXXX_SPI_MAX_CLOCK_HZ)
+ val = 2;
+ else if ((hz < PCI1XXXX_SPI_MAX_CLOCK_HZ) && (hz >= PCI1XXXX_SPI_CLK_20MHZ))
+ val = 3;
+ else if ((hz < PCI1XXXX_SPI_CLK_20MHZ) && (hz >= PCI1XXXX_SPI_CLK_15MHZ))
+ val = 4;
+ else if ((hz < PCI1XXXX_SPI_CLK_15MHZ) && (hz >= PCI1XXXX_SPI_CLK_12MHZ))
+ val = 5;
+ else if ((hz < PCI1XXXX_SPI_CLK_12MHZ) && (hz >= PCI1XXXX_SPI_CLK_10MHZ))
+ val = 6;
+ else if ((hz < PCI1XXXX_SPI_CLK_10MHZ) && (hz >= PCI1XXXX_SPI_MIN_CLOCK_HZ))
+ val = 7;
+ else
+ val = 2;
+
+ return val;
+}
+
+static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr,
+ struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(spi_ctlr);
+ int mode, len, loop_iter, transfer_len;
+ struct pci1xxxx_spi *par = p->parent;
+ unsigned long bytes_transfered;
+ unsigned long bytes_recvd;
+ unsigned long loop_count;
+ u8 *rx_buf, result;
+ const u8 *tx_buf;
+ u32 regval;
+ u8 clkdiv;
+
+ p->spi_xfer_in_progress = true;
+ mode = spi->mode;
+ clkdiv = pci1xxxx_get_clock_div(xfer->speed_hz);
+ tx_buf = xfer->tx_buf;
+ rx_buf = xfer->rx_buf;
+ transfer_len = xfer->len;
+ regval = readl(par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
+ writel(regval, par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
+
+ if (tx_buf) {
+ bytes_transfered = 0;
+ bytes_recvd = 0;
+ loop_count = transfer_len / SPI_MAX_DATA_LEN;
+ if (transfer_len % SPI_MAX_DATA_LEN != 0)
+ loop_count += 1;
+
+ for (loop_iter = 0; loop_iter < loop_count; loop_iter++) {
+ len = SPI_MAX_DATA_LEN;
+ if ((transfer_len % SPI_MAX_DATA_LEN != 0) &&
+ (loop_iter == loop_count - 1))
+ len = transfer_len % SPI_MAX_DATA_LEN;
+
+ reinit_completion(&p->spi_xfer_done);
+ memcpy_toio(par->reg_base + SPI_MST_CMD_BUF_OFFSET(p->hw_inst),
+ &tx_buf[bytes_transfered], len);
+ bytes_transfered += len;
+ regval = readl(par->reg_base +
+ SPI_MST_CTL_REG_OFFSET(p->hw_inst));
+ regval &= ~(SPI_MST_CTL_MODE_SEL | SPI_MST_CTL_CMD_LEN_MASK |
+ SPI_MST_CTL_SPEED_MASK);
+
+ if (mode == SPI_MODE_3)
+ regval |= SPI_MST_CTL_MODE_SEL;
+ else
+ regval &= ~SPI_MST_CTL_MODE_SEL;
+
+ regval |= ((clkdiv << 5) | SPI_FORCE_CE | (len << 8));
+ regval &= ~SPI_MST_CTL_CMD_LEN_MASK;
+ writel(regval, par->reg_base +
+ SPI_MST_CTL_REG_OFFSET(p->hw_inst));
+ regval = readl(par->reg_base +
+ SPI_MST_CTL_REG_OFFSET(p->hw_inst));
+ regval |= SPI_MST_CTL_GO;
+ writel(regval, par->reg_base +
+ SPI_MST_CTL_REG_OFFSET(p->hw_inst));
+
+ /* Wait for DMA_TERM interrupt */
+ result = wait_for_completion_timeout(&p->spi_xfer_done,
+ PCI1XXXX_SPI_TIMEOUT);
+ if (!result)
+ return -ETIMEDOUT;
+
+ if (rx_buf) {
+ memcpy_fromio(&rx_buf[bytes_recvd], par->reg_base +
+ SPI_MST_RSP_BUF_OFFSET(p->hw_inst), len);
+ bytes_recvd += len;
+ }
+ }
+ }
+
+ regval = readl(par->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst));
+ regval &= ~SPI_FORCE_CE;
+ writel(regval, par->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst));
+ p->spi_xfer_in_progress = false;
+
+ return 0;
+}
+
+static irqreturn_t pci1xxxx_spi_isr(int irq, void *dev)
+{
+ struct pci1xxxx_spi_internal *p = dev;
+ irqreturn_t spi_int_fired = IRQ_NONE;
+ u32 regval;
+
+ /* Clear the SPI GO_BIT Interrupt */
+ regval = readl(p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
+ if (regval & SPI_INTR) {
+ /* Clear xfer_done */
+ complete(&p->spi_xfer_done);
+ spi_int_fired = IRQ_HANDLED;
+ }
+
+ writel(regval, p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
+
+ return spi_int_fired;
+}
+
+static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ u8 hw_inst_cnt, iter, start, only_sec_inst;
+ struct pci1xxxx_spi_internal *spi_sub_ptr;
+ struct device *dev = &pdev->dev;
+ struct pci1xxxx_spi *spi_bus;
+ struct spi_master *spi_host;
+ u32 regval;
+ int ret;
+
+ hw_inst_cnt = ent->driver_data & 0x0f;
+ start = (ent->driver_data & 0xf0) >> 4;
+ if (start == 1)
+ only_sec_inst = 1;
+ else
+ only_sec_inst = 0;
+
+ spi_bus = devm_kzalloc(&pdev->dev,
+ struct_size(spi_bus, spi_int, hw_inst_cnt),
+ GFP_KERNEL);
+ if (!spi_bus)
+ return -ENOMEM;
+
+ spi_bus->dev = pdev;
+ spi_bus->total_hw_instances = hw_inst_cnt;
+ pci_set_master(pdev);
+
+ for (iter = 0; iter < hw_inst_cnt; iter++) {
+ spi_bus->spi_int[iter] = devm_kzalloc(&pdev->dev,
+ sizeof(struct pci1xxxx_spi_internal),
+ GFP_KERNEL);
+ spi_sub_ptr = spi_bus->spi_int[iter];
+ spi_sub_ptr->spi_host = devm_spi_alloc_master(dev, sizeof(struct spi_master));
+ if (!spi_sub_ptr->spi_host)
+ return -ENOMEM;
+
+ spi_sub_ptr->parent = spi_bus;
+ spi_sub_ptr->spi_xfer_in_progress = false;
+
+ if (!iter) {
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return -ENOMEM;
+
+ ret = pci_request_regions(pdev, DRV_NAME);
+ if (ret)
+ return -ENOMEM;
+
+ spi_bus->reg_base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ if (!spi_bus->reg_base) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ ret = pci_alloc_irq_vectors(pdev, hw_inst_cnt, hw_inst_cnt,
+ PCI_IRQ_ALL_TYPES);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Error allocating MSI vectors\n");
+ goto error;
+ }
+
+ init_completion(&spi_sub_ptr->spi_xfer_done);
+ /* Initialize Interrupts - SPI_INT */
+ regval = readl(spi_bus->reg_base +
+ SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
+ regval &= ~SPI_INTR;
+ writel(regval, spi_bus->reg_base +
+ SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
+ spi_sub_ptr->irq = pci_irq_vector(pdev, 0);
+
+ ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq,
+ pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS,
+ pci_name(pdev), spi_sub_ptr);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to request irq : %d",
+ spi_sub_ptr->irq);
+ ret = -ENODEV;
+ goto error;
+ }
+
+ /* This register is only applicable for 1st instance */
+ regval = readl(spi_bus->reg_base + SPI_PCI_CTRL_REG_OFFSET(0));
+ if (!only_sec_inst)
+ regval |= (BIT(4));
+ else
+ regval &= ~(BIT(4));
+
+ writel(regval, spi_bus->reg_base + SPI_PCI_CTRL_REG_OFFSET(0));
+ }
+
+ spi_sub_ptr->hw_inst = start++;
+
+ if (iter == 1) {
+ init_completion(&spi_sub_ptr->spi_xfer_done);
+ /* Initialize Interrupts - SPI_INT */
+ regval = readl(spi_bus->reg_base +
+ SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
+ regval &= ~SPI_INTR;
+ writel(regval, spi_bus->reg_base +
+ SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
+ spi_sub_ptr->irq = pci_irq_vector(pdev, iter);
+ ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq,
+ pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS,
+ pci_name(pdev), spi_sub_ptr);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to request irq : %d",
+ spi_sub_ptr->irq);
+ ret = -ENODEV;
+ goto error;
+ }
+ }
+
+ spi_host = spi_sub_ptr->spi_host;
+ spi_host->num_chipselect = SPI_CHIP_SEL_COUNT;
+ spi_host->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_RX_DUAL |
+ SPI_TX_DUAL | SPI_LOOP;
+ spi_host->transfer_one = pci1xxxx_spi_transfer_one;
+ spi_host->set_cs = pci1xxxx_spi_set_cs;
+ spi_host->bits_per_word_mask = SPI_BPW_MASK(8);
+ spi_host->max_speed_hz = PCI1XXXX_SPI_MAX_CLOCK_HZ;
+ spi_host->min_speed_hz = PCI1XXXX_SPI_MIN_CLOCK_HZ;
+ spi_host->flags = SPI_MASTER_MUST_TX;
+ spi_master_set_devdata(spi_host, spi_sub_ptr);
+ ret = devm_spi_register_master(dev, spi_host);
+ if (ret)
+ goto error;
+ }
+ pci_set_drvdata(pdev, spi_bus);
+
+ return 0;
+
+error:
+ pci_release_regions(pdev);
+ return ret;
+}
+
+static void store_restore_config(struct pci1xxxx_spi *spi_ptr,
+ struct pci1xxxx_spi_internal *spi_sub_ptr,
+ u8 inst, bool store)
+{
+ u32 regval;
+
+ if (store) {
+ regval = readl(spi_ptr->reg_base +
+ SPI_MST_CTL_REG_OFFSET(spi_sub_ptr->hw_inst));
+ regval &= SPI_MST_CTL_DEVSEL_MASK;
+ spi_sub_ptr->prev_val.dev_sel = (regval >> 25) & 7;
+ regval = readl(spi_ptr->reg_base +
+ SPI_PCI_CTRL_REG_OFFSET(spi_sub_ptr->hw_inst));
+ regval &= SPI_MSI_VECTOR_SEL_MASK;
+ spi_sub_ptr->prev_val.msi_vector_sel = (regval >> 4) & 1;
+ } else {
+ regval = readl(spi_ptr->reg_base + SPI_MST_CTL_REG_OFFSET(inst));
+ regval &= ~SPI_MST_CTL_DEVSEL_MASK;
+ regval |= (spi_sub_ptr->prev_val.dev_sel << 25);
+ writel(regval,
+ spi_ptr->reg_base + SPI_MST_CTL_REG_OFFSET(inst));
+ writel((spi_sub_ptr->prev_val.msi_vector_sel << 4),
+ spi_ptr->reg_base + SPI_PCI_CTRL_REG_OFFSET(inst));
+ }
+}
+
+static int pci1xxxx_spi_resume(struct device *dev)
+{
+ struct pci1xxxx_spi *spi_ptr = dev_get_drvdata(dev);
+ struct pci1xxxx_spi_internal *spi_sub_ptr;
+ u32 regval = SPI_RESUME_CONFIG;
+ u8 iter;
+
+ for (iter = 0; iter < spi_ptr->total_hw_instances; iter++) {
+ spi_sub_ptr = spi_ptr->spi_int[iter];
+ spi_master_resume(spi_sub_ptr->spi_host);
+ writel(regval, spi_ptr->reg_base +
+ SPI_MST_EVENT_MASK_REG_OFFSET(iter));
+
+ /* Restore config at resume */
+ store_restore_config(spi_ptr, spi_sub_ptr, iter, 0);
+ }
+
+ return 0;
+}
+
+static int pci1xxxx_spi_suspend(struct device *dev)
+{
+ struct pci1xxxx_spi *spi_ptr = dev_get_drvdata(dev);
+ struct pci1xxxx_spi_internal *spi_sub_ptr;
+ u32 reg1 = SPI_SUSPEND_CONFIG;
+ u8 iter;
+
+ for (iter = 0; iter < spi_ptr->total_hw_instances; iter++) {
+ spi_sub_ptr = spi_ptr->spi_int[iter];
+
+ while (spi_sub_ptr->spi_xfer_in_progress)
+ msleep(20);
+
+ /* Store existing config before suspend */
+ store_restore_config(spi_ptr, spi_sub_ptr, iter, 1);
+ spi_master_suspend(spi_sub_ptr->spi_host);
+ writel(reg1, spi_ptr->reg_base +
+ SPI_MST_EVENT_MASK_REG_OFFSET(iter));
+ }
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(spi_pm_ops, pci1xxxx_spi_suspend,
+ pci1xxxx_spi_resume);
+
+static struct pci_driver pci1xxxx_spi_driver = {
+ .name = DRV_NAME,
+ .id_table = pci1xxxx_spi_pci_id_table,
+ .probe = pci1xxxx_spi_probe,
+ .driver = {
+ .pm = pm_sleep_ptr(&spi_pm_ops),
+ },
+};
+
+module_pci_driver(pci1xxxx_spi_driver);
+
+MODULE_DESCRIPTION("Microchip Technology Inc. pci1xxxx SPI bus driver");
+MODULE_AUTHOR("Tharun Kumar P<tharunkumar.pasumarthi@microchip.com>");
+MODULE_AUTHOR("Kumaravel Thiagarajan<kumaravel.thiagarajan@microchip.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 2bf21c2e7a52..32cc82a89ec1 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -20,7 +20,6 @@
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/of.h>
-#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
@@ -1322,134 +1321,11 @@ static void cleanup(struct spi_device *spi)
kfree(chip);
}
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
- { "INT33C0", LPSS_LPT_SSP },
- { "INT33C1", LPSS_LPT_SSP },
- { "INT3430", LPSS_LPT_SSP },
- { "INT3431", LPSS_LPT_SSP },
- { "80860F0E", LPSS_BYT_SSP },
- { "8086228E", LPSS_BSW_SSP },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
-#endif
-
-/*
- * PCI IDs of compound devices that integrate both host controller and private
- * integrated DMA engine. Please note these are not used in module
- * autoloading and probing in this module but matching the LPSS SSP type.
- */
-static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
- /* SPT-LP */
- { PCI_VDEVICE(INTEL, 0x9d29), LPSS_SPT_SSP },
- { PCI_VDEVICE(INTEL, 0x9d2a), LPSS_SPT_SSP },
- /* SPT-H */
- { PCI_VDEVICE(INTEL, 0xa129), LPSS_SPT_SSP },
- { PCI_VDEVICE(INTEL, 0xa12a), LPSS_SPT_SSP },
- /* KBL-H */
- { PCI_VDEVICE(INTEL, 0xa2a9), LPSS_SPT_SSP },
- { PCI_VDEVICE(INTEL, 0xa2aa), LPSS_SPT_SSP },
- /* CML-V */
- { PCI_VDEVICE(INTEL, 0xa3a9), LPSS_SPT_SSP },
- { PCI_VDEVICE(INTEL, 0xa3aa), LPSS_SPT_SSP },
- /* BXT A-Step */
- { PCI_VDEVICE(INTEL, 0x0ac2), LPSS_BXT_SSP },
- { PCI_VDEVICE(INTEL, 0x0ac4), LPSS_BXT_SSP },
- { PCI_VDEVICE(INTEL, 0x0ac6), LPSS_BXT_SSP },
- /* BXT B-Step */
- { PCI_VDEVICE(INTEL, 0x1ac2), LPSS_BXT_SSP },
- { PCI_VDEVICE(INTEL, 0x1ac4), LPSS_BXT_SSP },
- { PCI_VDEVICE(INTEL, 0x1ac6), LPSS_BXT_SSP },
- /* GLK */
- { PCI_VDEVICE(INTEL, 0x31c2), LPSS_BXT_SSP },
- { PCI_VDEVICE(INTEL, 0x31c4), LPSS_BXT_SSP },
- { PCI_VDEVICE(INTEL, 0x31c6), LPSS_BXT_SSP },
- /* ICL-LP */
- { PCI_VDEVICE(INTEL, 0x34aa), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0x34ab), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0x34fb), LPSS_CNL_SSP },
- /* EHL */
- { PCI_VDEVICE(INTEL, 0x4b2a), LPSS_BXT_SSP },
- { PCI_VDEVICE(INTEL, 0x4b2b), LPSS_BXT_SSP },
- { PCI_VDEVICE(INTEL, 0x4b37), LPSS_BXT_SSP },
- /* JSL */
- { PCI_VDEVICE(INTEL, 0x4daa), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0x4dab), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0x4dfb), LPSS_CNL_SSP },
- /* TGL-H */
- { PCI_VDEVICE(INTEL, 0x43aa), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0x43ab), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0x43fb), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0x43fd), LPSS_CNL_SSP },
- /* ADL-P */
- { PCI_VDEVICE(INTEL, 0x51aa), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0x51ab), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0x51fb), LPSS_CNL_SSP },
- /* ADL-M */
- { PCI_VDEVICE(INTEL, 0x54aa), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0x54ab), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0x54fb), LPSS_CNL_SSP },
- /* APL */
- { PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
- { PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
- { PCI_VDEVICE(INTEL, 0x5ac6), LPSS_BXT_SSP },
- /* RPL-S */
- { PCI_VDEVICE(INTEL, 0x7a2a), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0x7a2b), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0x7a79), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0x7a7b), LPSS_CNL_SSP },
- /* ADL-S */
- { PCI_VDEVICE(INTEL, 0x7aaa), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0x7aab), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0x7af9), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0x7afb), LPSS_CNL_SSP },
- /* MTL-P */
- { PCI_VDEVICE(INTEL, 0x7e27), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0x7e30), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0x7e46), LPSS_CNL_SSP },
- /* CNL-LP */
- { PCI_VDEVICE(INTEL, 0x9daa), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0x9dab), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0x9dfb), LPSS_CNL_SSP },
- /* CNL-H */
- { PCI_VDEVICE(INTEL, 0xa32a), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0xa32b), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0xa37b), LPSS_CNL_SSP },
- /* CML-LP */
- { PCI_VDEVICE(INTEL, 0x02aa), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0x02ab), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0x02fb), LPSS_CNL_SSP },
- /* CML-H */
- { PCI_VDEVICE(INTEL, 0x06aa), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0x06ab), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0x06fb), LPSS_CNL_SSP },
- /* TGL-LP */
- { PCI_VDEVICE(INTEL, 0xa0aa), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0xa0ab), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0xa0de), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0xa0df), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0xa0fb), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0xa0fd), LPSS_CNL_SSP },
- { PCI_VDEVICE(INTEL, 0xa0fe), LPSS_CNL_SSP },
- { },
-};
-
-static const struct of_device_id pxa2xx_spi_of_match[] = {
- { .compatible = "marvell,mmp2-ssp", .data = (void *)MMP2_SSP },
- {},
-};
-MODULE_DEVICE_TABLE(of, pxa2xx_spi_of_match);
-
-#ifdef CONFIG_PCI
-
static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
{
return param == chan->device->dev;
}
-#endif /* CONFIG_PCI */
-
static struct pxa2xx_spi_controller *
pxa2xx_spi_init_pdata(struct platform_device *pdev)
{
@@ -1458,46 +1334,51 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
struct device *parent = dev->parent;
struct ssp_device *ssp;
struct resource *res;
- struct pci_dev *pcidev = dev_is_pci(parent) ? to_pci_dev(parent) : NULL;
- const struct pci_device_id *pcidev_id = NULL;
- enum pxa_ssp_type type;
+ enum pxa_ssp_type type = SSP_UNDEFINED;
const void *match;
+ bool is_lpss_priv;
int status;
u64 uid;
- if (pcidev)
- pcidev_id = pci_match_id(pxa2xx_spi_pci_compound_match, pcidev);
+ is_lpss_priv = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpss_priv");
- match = device_get_match_data(&pdev->dev);
+ match = device_get_match_data(dev);
if (match)
type = (enum pxa_ssp_type)match;
- else if (pcidev_id)
- type = (enum pxa_ssp_type)pcidev_id->driver_data;
- else
+ else if (is_lpss_priv) {
+ u32 value;
+
+ status = device_property_read_u32(dev, "intel,spi-pxa2xx-type", &value);
+ if (status)
+ return ERR_PTR(status);
+
+ type = (enum pxa_ssp_type)value;
+ }
+
+ /* Validate the SSP type correctness */
+ if (!(type > SSP_UNDEFINED && type < SSP_MAX))
return ERR_PTR(-EINVAL);
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM);
ssp = &pdata->ssp;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ssp->mmio_base = devm_ioremap_resource(&pdev->dev, res);
+ ssp->mmio_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(ssp->mmio_base))
return ERR_CAST(ssp->mmio_base);
ssp->phys_base = res->start;
-#ifdef CONFIG_PCI
- if (pcidev_id) {
+ /* Platforms with iDMA 64-bit */
+ if (is_lpss_priv) {
pdata->tx_param = parent;
pdata->rx_param = parent;
pdata->dma_filter = pxa2xx_spi_idma_filter;
}
-#endif
- ssp->clk = devm_clk_get(&pdev->dev, NULL);
+ ssp->clk = devm_clk_get(dev, NULL);
if (IS_ERR(ssp->clk))
return ERR_CAST(ssp->clk);
@@ -1506,7 +1387,7 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
return ERR_PTR(ssp->irq);
ssp->type = type;
- ssp->dev = &pdev->dev;
+ ssp->dev = dev;
status = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &uid);
if (status)
@@ -1514,7 +1395,7 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
else
ssp->port_id = uid;
- pdata->is_slave = device_property_read_bool(&pdev->dev, "spi-slave");
+ pdata->is_slave = device_property_read_bool(dev, "spi-slave");
pdata->num_chipselect = 1;
pdata->enable_dma = true;
pdata->dma_burst_size = 1;
@@ -1807,7 +1688,6 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int pxa2xx_spi_suspend(struct device *dev)
{
struct driver_data *drv_data = dev_get_drvdata(dev);
@@ -1842,9 +1722,7 @@ static int pxa2xx_spi_resume(struct device *dev)
/* Start the queue running */
return spi_controller_resume(drv_data->controller);
}
-#endif
-#ifdef CONFIG_PM
static int pxa2xx_spi_runtime_suspend(struct device *dev)
{
struct driver_data *drv_data = dev_get_drvdata(dev);
@@ -1859,18 +1737,35 @@ static int pxa2xx_spi_runtime_resume(struct device *dev)
return clk_prepare_enable(drv_data->ssp->clk);
}
-#endif
static const struct dev_pm_ops pxa2xx_spi_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pxa2xx_spi_suspend, pxa2xx_spi_resume)
- SET_RUNTIME_PM_OPS(pxa2xx_spi_runtime_suspend,
- pxa2xx_spi_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pxa2xx_spi_suspend, pxa2xx_spi_resume)
+ RUNTIME_PM_OPS(pxa2xx_spi_runtime_suspend, pxa2xx_spi_runtime_resume, NULL)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
+ { "80860F0E", LPSS_BYT_SSP },
+ { "8086228E", LPSS_BSW_SSP },
+ { "INT33C0", LPSS_LPT_SSP },
+ { "INT33C1", LPSS_LPT_SSP },
+ { "INT3430", LPSS_LPT_SSP },
+ { "INT3431", LPSS_LPT_SSP },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
+#endif
+
+static const struct of_device_id pxa2xx_spi_of_match[] = {
+ { .compatible = "marvell,mmp2-ssp", .data = (void *)MMP2_SSP },
+ {}
};
+MODULE_DEVICE_TABLE(of, pxa2xx_spi_of_match);
static struct platform_driver driver = {
.driver = {
.name = "pxa2xx-spi",
- .pm = &pxa2xx_spi_pm_ops,
+ .pm = pm_ptr(&pxa2xx_spi_pm_ops),
.acpi_match_table = ACPI_PTR(pxa2xx_spi_acpi_match),
.of_match_table = of_match_ptr(pxa2xx_spi_of_match),
},
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
index 5d27ee482237..983b3621bc2a 100644
--- a/drivers/spi/spi-sc18is602.c
+++ b/drivers/spi/spi-sc18is602.c
@@ -235,9 +235,9 @@ static int sc18is602_setup(struct spi_device *spi)
return 0;
}
-static int sc18is602_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sc18is602_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct device *dev = &client->dev;
struct device_node *np = dev->of_node;
struct sc18is602_platform_data *pdata = dev_get_platdata(dev);
@@ -337,7 +337,7 @@ static struct i2c_driver sc18is602_driver = {
.name = "sc18is602",
.of_match_table = of_match_ptr(sc18is602_of_match),
},
- .probe = sc18is602_probe,
+ .probe_new = sc18is602_probe,
.id_table = sc18is602_id,
};
diff --git a/drivers/spi/spi-sn-f-ospi.c b/drivers/spi/spi-sn-f-ospi.c
new file mode 100644
index 000000000000..348c6e1edd38
--- /dev/null
+++ b/drivers/spi/spi-sn-f-ospi.c
@@ -0,0 +1,703 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Socionext SPI flash controller F_OSPI driver
+ * Copyright (C) 2021 Socionext Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+/* Registers */
+#define OSPI_PROT_CTL_INDIR 0x00
+#define OSPI_PROT_MODE_DATA_MASK GENMASK(31, 30)
+#define OSPI_PROT_MODE_ALT_MASK GENMASK(29, 28)
+#define OSPI_PROT_MODE_ADDR_MASK GENMASK(27, 26)
+#define OSPI_PROT_MODE_CODE_MASK GENMASK(25, 24)
+#define OSPI_PROT_MODE_SINGLE 0
+#define OSPI_PROT_MODE_DUAL 1
+#define OSPI_PROT_MODE_QUAD 2
+#define OSPI_PROT_MODE_OCTAL 3
+#define OSPI_PROT_DATA_RATE_DATA BIT(23)
+#define OSPI_PROT_DATA_RATE_ALT BIT(22)
+#define OSPI_PROT_DATA_RATE_ADDR BIT(21)
+#define OSPI_PROT_DATA_RATE_CODE BIT(20)
+#define OSPI_PROT_SDR 0
+#define OSPI_PROT_DDR 1
+#define OSPI_PROT_BIT_POS_DATA BIT(19)
+#define OSPI_PROT_BIT_POS_ALT BIT(18)
+#define OSPI_PROT_BIT_POS_ADDR BIT(17)
+#define OSPI_PROT_BIT_POS_CODE BIT(16)
+#define OSPI_PROT_SAMP_EDGE BIT(12)
+#define OSPI_PROT_DATA_UNIT_MASK GENMASK(11, 10)
+#define OSPI_PROT_DATA_UNIT_1B 0
+#define OSPI_PROT_DATA_UNIT_2B 1
+#define OSPI_PROT_DATA_UNIT_4B 3
+#define OSPI_PROT_TRANS_DIR_WRITE BIT(9)
+#define OSPI_PROT_DATA_EN BIT(8)
+#define OSPI_PROT_ALT_SIZE_MASK GENMASK(7, 5)
+#define OSPI_PROT_ADDR_SIZE_MASK GENMASK(4, 2)
+#define OSPI_PROT_CODE_SIZE_MASK GENMASK(1, 0)
+
+#define OSPI_CLK_CTL 0x10
+#define OSPI_CLK_CTL_BOOT_INT_CLK_EN BIT(16)
+#define OSPI_CLK_CTL_PHA BIT(12)
+#define OSPI_CLK_CTL_PHA_180 0
+#define OSPI_CLK_CTL_PHA_90 1
+#define OSPI_CLK_CTL_DIV GENMASK(9, 8)
+#define OSPI_CLK_CTL_DIV_1 0
+#define OSPI_CLK_CTL_DIV_2 1
+#define OSPI_CLK_CTL_DIV_4 2
+#define OSPI_CLK_CTL_DIV_8 3
+#define OSPI_CLK_CTL_INT_CLK_EN BIT(0)
+
+#define OSPI_CS_CTL1 0x14
+#define OSPI_CS_CTL2 0x18
+#define OSPI_SSEL 0x20
+#define OSPI_CMD_IDX_INDIR 0x40
+#define OSPI_ADDR 0x50
+#define OSPI_ALT_INDIR 0x60
+#define OSPI_DMY_INDIR 0x70
+#define OSPI_DAT 0x80
+#define OSPI_DAT_SWP_INDIR 0x90
+
+#define OSPI_DAT_SIZE_INDIR 0xA0
+#define OSPI_DAT_SIZE_EN BIT(15)
+#define OSPI_DAT_SIZE_MASK GENMASK(10, 0)
+#define OSPI_DAT_SIZE_MAX (OSPI_DAT_SIZE_MASK + 1)
+
+#define OSPI_TRANS_CTL 0xC0
+#define OSPI_TRANS_CTL_STOP_REQ BIT(1) /* RW1AC */
+#define OSPI_TRANS_CTL_START_REQ BIT(0) /* RW1AC */
+
+#define OSPI_ACC_MODE 0xC4
+#define OSPI_ACC_MODE_BOOT_DISABLE BIT(0)
+
+#define OSPI_SWRST 0xD0
+#define OSPI_SWRST_INDIR_WRITE_FIFO BIT(9) /* RW1AC */
+#define OSPI_SWRST_INDIR_READ_FIFO BIT(8) /* RW1AC */
+
+#define OSPI_STAT 0xE0
+#define OSPI_STAT_IS_AXI_WRITING BIT(10)
+#define OSPI_STAT_IS_AXI_READING BIT(9)
+#define OSPI_STAT_IS_SPI_INT_CLK_STOP BIT(4)
+#define OSPI_STAT_IS_SPI_IDLE BIT(3)
+
+#define OSPI_IRQ 0xF0
+#define OSPI_IRQ_CS_DEASSERT BIT(8)
+#define OSPI_IRQ_WRITE_BUF_READY BIT(2)
+#define OSPI_IRQ_READ_BUF_READY BIT(1)
+#define OSPI_IRQ_CS_TRANS_COMP BIT(0)
+#define OSPI_IRQ_ALL \
+ (OSPI_IRQ_CS_DEASSERT | OSPI_IRQ_WRITE_BUF_READY \
+ | OSPI_IRQ_READ_BUF_READY | OSPI_IRQ_CS_TRANS_COMP)
+
+#define OSPI_IRQ_STAT_EN 0xF4
+#define OSPI_IRQ_SIG_EN 0xF8
+
+/* Parameters */
+#define OSPI_NUM_CS 4
+#define OSPI_DUMMY_CYCLE_MAX 255
+#define OSPI_WAIT_MAX_MSEC 100
+
+struct f_ospi {
+ void __iomem *base;
+ struct device *dev;
+ struct clk *clk;
+ struct mutex mlock;
+};
+
+static u32 f_ospi_get_dummy_cycle(const struct spi_mem_op *op)
+{
+ return (op->dummy.nbytes * 8) / op->dummy.buswidth;
+}
+
+static void f_ospi_clear_irq(struct f_ospi *ospi)
+{
+ writel(OSPI_IRQ_CS_DEASSERT | OSPI_IRQ_CS_TRANS_COMP,
+ ospi->base + OSPI_IRQ);
+}
+
+static void f_ospi_enable_irq_status(struct f_ospi *ospi, u32 irq_bits)
+{
+ u32 val;
+
+ val = readl(ospi->base + OSPI_IRQ_STAT_EN);
+ val |= irq_bits;
+ writel(val, ospi->base + OSPI_IRQ_STAT_EN);
+}
+
+static void f_ospi_disable_irq_status(struct f_ospi *ospi, u32 irq_bits)
+{
+ u32 val;
+
+ val = readl(ospi->base + OSPI_IRQ_STAT_EN);
+ val &= ~irq_bits;
+ writel(val, ospi->base + OSPI_IRQ_STAT_EN);
+}
+
+static void f_ospi_disable_irq_output(struct f_ospi *ospi, u32 irq_bits)
+{
+ u32 val;
+
+ val = readl(ospi->base + OSPI_IRQ_SIG_EN);
+ val &= ~irq_bits;
+ writel(val, ospi->base + OSPI_IRQ_SIG_EN);
+}
+
+static int f_ospi_prepare_config(struct f_ospi *ospi)
+{
+ u32 val, stat0, stat1;
+
+ /* G4: Disable internal clock */
+ val = readl(ospi->base + OSPI_CLK_CTL);
+ val &= ~(OSPI_CLK_CTL_BOOT_INT_CLK_EN | OSPI_CLK_CTL_INT_CLK_EN);
+ writel(val, ospi->base + OSPI_CLK_CTL);
+
+ /* G5: Wait for stop */
+ stat0 = OSPI_STAT_IS_AXI_WRITING | OSPI_STAT_IS_AXI_READING;
+ stat1 = OSPI_STAT_IS_SPI_IDLE | OSPI_STAT_IS_SPI_INT_CLK_STOP;
+
+ return readl_poll_timeout(ospi->base + OSPI_STAT,
+ val, (val & (stat0 | stat1)) == stat1,
+ 0, OSPI_WAIT_MAX_MSEC);
+}
+
+static int f_ospi_unprepare_config(struct f_ospi *ospi)
+{
+ u32 val;
+
+ /* G11: Enable internal clock */
+ val = readl(ospi->base + OSPI_CLK_CTL);
+ val |= OSPI_CLK_CTL_BOOT_INT_CLK_EN | OSPI_CLK_CTL_INT_CLK_EN;
+ writel(val, ospi->base + OSPI_CLK_CTL);
+
+ /* G12: Wait for clock to start */
+ return readl_poll_timeout(ospi->base + OSPI_STAT,
+ val, !(val & OSPI_STAT_IS_SPI_INT_CLK_STOP),
+ 0, OSPI_WAIT_MAX_MSEC);
+}
+
+static void f_ospi_config_clk(struct f_ospi *ospi, u32 device_hz)
+{
+ long rate_hz = clk_get_rate(ospi->clk);
+ u32 div = DIV_ROUND_UP(rate_hz, device_hz);
+ u32 div_reg;
+ u32 val;
+
+ if (rate_hz < device_hz) {
+ dev_warn(ospi->dev, "Device frequency too large: %d\n",
+ device_hz);
+ div_reg = OSPI_CLK_CTL_DIV_1;
+ } else {
+ if (div == 1) {
+ div_reg = OSPI_CLK_CTL_DIV_1;
+ } else if (div == 2) {
+ div_reg = OSPI_CLK_CTL_DIV_2;
+ } else if (div <= 4) {
+ div_reg = OSPI_CLK_CTL_DIV_4;
+ } else if (div <= 8) {
+ div_reg = OSPI_CLK_CTL_DIV_8;
+ } else {
+ dev_warn(ospi->dev, "Device frequency too small: %d\n",
+ device_hz);
+ div_reg = OSPI_CLK_CTL_DIV_8;
+ }
+ }
+
+ /*
+ * G7: Set clock mode
+ * clock phase is fixed at 180 degrees and configure edge direction
+ * instead.
+ */
+ val = readl(ospi->base + OSPI_CLK_CTL);
+
+ val &= ~(OSPI_CLK_CTL_PHA | OSPI_CLK_CTL_DIV);
+ val |= FIELD_PREP(OSPI_CLK_CTL_PHA, OSPI_CLK_CTL_PHA_180)
+ | FIELD_PREP(OSPI_CLK_CTL_DIV, div_reg);
+
+ writel(val, ospi->base + OSPI_CLK_CTL);
+}
+
+static void f_ospi_config_dll(struct f_ospi *ospi)
+{
+ /* G8: Configure DLL, nothing */
+}
+
+static u8 f_ospi_get_mode(struct f_ospi *ospi, int width, int data_size)
+{
+ u8 mode = OSPI_PROT_MODE_SINGLE;
+
+ switch (width) {
+ case 1:
+ mode = OSPI_PROT_MODE_SINGLE;
+ break;
+ case 2:
+ mode = OSPI_PROT_MODE_DUAL;
+ break;
+ case 4:
+ mode = OSPI_PROT_MODE_QUAD;
+ break;
+ case 8:
+ mode = OSPI_PROT_MODE_OCTAL;
+ break;
+ default:
+ if (data_size)
+ dev_err(ospi->dev, "Invalid buswidth: %d\n", width);
+ break;
+ }
+
+ return mode;
+}
+
+static void f_ospi_config_indir_protocol(struct f_ospi *ospi,
+ struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct spi_device *spi = mem->spi;
+ u8 mode;
+ u32 prot = 0, val;
+ int unit;
+
+ /* Set one chip select */
+ writel(BIT(spi->chip_select), ospi->base + OSPI_SSEL);
+
+ mode = f_ospi_get_mode(ospi, op->cmd.buswidth, 1);
+ prot |= FIELD_PREP(OSPI_PROT_MODE_CODE_MASK, mode);
+
+ mode = f_ospi_get_mode(ospi, op->addr.buswidth, op->addr.nbytes);
+ prot |= FIELD_PREP(OSPI_PROT_MODE_ADDR_MASK, mode);
+
+ mode = f_ospi_get_mode(ospi, op->data.buswidth, op->data.nbytes);
+ prot |= FIELD_PREP(OSPI_PROT_MODE_DATA_MASK, mode);
+
+ prot |= FIELD_PREP(OSPI_PROT_DATA_RATE_DATA, OSPI_PROT_SDR);
+ prot |= FIELD_PREP(OSPI_PROT_DATA_RATE_ALT, OSPI_PROT_SDR);
+ prot |= FIELD_PREP(OSPI_PROT_DATA_RATE_ADDR, OSPI_PROT_SDR);
+ prot |= FIELD_PREP(OSPI_PROT_DATA_RATE_CODE, OSPI_PROT_SDR);
+
+ if (spi->mode & SPI_LSB_FIRST)
+ prot |= OSPI_PROT_BIT_POS_DATA | OSPI_PROT_BIT_POS_ALT
+ | OSPI_PROT_BIT_POS_ADDR | OSPI_PROT_BIT_POS_CODE;
+
+ if (spi->mode & SPI_CPHA)
+ prot |= OSPI_PROT_SAMP_EDGE;
+
+ /* Examine nbytes % 4 */
+ switch (op->data.nbytes & 0x3) {
+ case 0:
+ unit = OSPI_PROT_DATA_UNIT_4B;
+ val = 0;
+ break;
+ case 2:
+ unit = OSPI_PROT_DATA_UNIT_2B;
+ val = OSPI_DAT_SIZE_EN | (op->data.nbytes - 1);
+ break;
+ default:
+ unit = OSPI_PROT_DATA_UNIT_1B;
+ val = OSPI_DAT_SIZE_EN | (op->data.nbytes - 1);
+ break;
+ }
+ prot |= FIELD_PREP(OSPI_PROT_DATA_UNIT_MASK, unit);
+
+ switch (op->data.dir) {
+ case SPI_MEM_DATA_IN:
+ prot |= OSPI_PROT_DATA_EN;
+ break;
+
+ case SPI_MEM_DATA_OUT:
+ prot |= OSPI_PROT_TRANS_DIR_WRITE | OSPI_PROT_DATA_EN;
+ break;
+
+ case SPI_MEM_NO_DATA:
+ prot |= OSPI_PROT_TRANS_DIR_WRITE;
+ break;
+
+ default:
+ dev_warn(ospi->dev, "Unsupported direction");
+ break;
+ }
+
+ prot |= FIELD_PREP(OSPI_PROT_ADDR_SIZE_MASK, op->addr.nbytes);
+ prot |= FIELD_PREP(OSPI_PROT_CODE_SIZE_MASK, 1); /* 1byte */
+
+ writel(prot, ospi->base + OSPI_PROT_CTL_INDIR);
+ writel(val, ospi->base + OSPI_DAT_SIZE_INDIR);
+}
+
+static int f_ospi_indir_prepare_op(struct f_ospi *ospi, struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct spi_device *spi = mem->spi;
+ u32 irq_stat_en;
+ int ret;
+
+ ret = f_ospi_prepare_config(ospi);
+ if (ret)
+ return ret;
+
+ f_ospi_config_clk(ospi, spi->max_speed_hz);
+
+ f_ospi_config_indir_protocol(ospi, mem, op);
+
+ writel(f_ospi_get_dummy_cycle(op), ospi->base + OSPI_DMY_INDIR);
+ writel(op->addr.val, ospi->base + OSPI_ADDR);
+ writel(op->cmd.opcode, ospi->base + OSPI_CMD_IDX_INDIR);
+
+ f_ospi_clear_irq(ospi);
+
+ switch (op->data.dir) {
+ case SPI_MEM_DATA_IN:
+ irq_stat_en = OSPI_IRQ_READ_BUF_READY | OSPI_IRQ_CS_TRANS_COMP;
+ break;
+
+ case SPI_MEM_DATA_OUT:
+ irq_stat_en = OSPI_IRQ_WRITE_BUF_READY | OSPI_IRQ_CS_TRANS_COMP;
+ break;
+
+ case SPI_MEM_NO_DATA:
+ irq_stat_en = OSPI_IRQ_CS_TRANS_COMP;
+ break;
+
+ default:
+ dev_warn(ospi->dev, "Unsupported direction");
+ irq_stat_en = 0;
+ }
+
+ f_ospi_disable_irq_status(ospi, ~irq_stat_en);
+ f_ospi_enable_irq_status(ospi, irq_stat_en);
+
+ return f_ospi_unprepare_config(ospi);
+}
+
+static void f_ospi_indir_start_xfer(struct f_ospi *ospi)
+{
+ /* Write only 1, auto cleared */
+ writel(OSPI_TRANS_CTL_START_REQ, ospi->base + OSPI_TRANS_CTL);
+}
+
+static void f_ospi_indir_stop_xfer(struct f_ospi *ospi)
+{
+ /* Write only 1, auto cleared */
+ writel(OSPI_TRANS_CTL_STOP_REQ, ospi->base + OSPI_TRANS_CTL);
+}
+
+static int f_ospi_indir_wait_xfer_complete(struct f_ospi *ospi)
+{
+ u32 val;
+
+ return readl_poll_timeout(ospi->base + OSPI_IRQ, val,
+ val & OSPI_IRQ_CS_TRANS_COMP,
+ 0, OSPI_WAIT_MAX_MSEC);
+}
+
+static int f_ospi_indir_read(struct f_ospi *ospi, struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ u8 *buf = op->data.buf.in;
+ u32 val;
+ int i, ret;
+
+ mutex_lock(&ospi->mlock);
+
+ /* E1-2: Prepare transfer operation */
+ ret = f_ospi_indir_prepare_op(ospi, mem, op);
+ if (ret)
+ goto out;
+
+ f_ospi_indir_start_xfer(ospi);
+
+ /* E3-4: Wait for ready and read data */
+ for (i = 0; i < op->data.nbytes; i++) {
+ ret = readl_poll_timeout(ospi->base + OSPI_IRQ, val,
+ val & OSPI_IRQ_READ_BUF_READY,
+ 0, OSPI_WAIT_MAX_MSEC);
+ if (ret)
+ goto out;
+
+ buf[i] = readl(ospi->base + OSPI_DAT) & 0xFF;
+ }
+
+ /* E5-6: Stop transfer if data size is nothing */
+ if (!(readl(ospi->base + OSPI_DAT_SIZE_INDIR) & OSPI_DAT_SIZE_EN))
+ f_ospi_indir_stop_xfer(ospi);
+
+ /* E7-8: Wait for completion and clear */
+ ret = f_ospi_indir_wait_xfer_complete(ospi);
+ if (ret)
+ goto out;
+
+ writel(OSPI_IRQ_CS_TRANS_COMP, ospi->base + OSPI_IRQ);
+
+ /* E9: Do nothing if data size is valid */
+ if (readl(ospi->base + OSPI_DAT_SIZE_INDIR) & OSPI_DAT_SIZE_EN)
+ goto out;
+
+ /* E10-11: Reset and check read fifo */
+ writel(OSPI_SWRST_INDIR_READ_FIFO, ospi->base + OSPI_SWRST);
+
+ ret = readl_poll_timeout(ospi->base + OSPI_SWRST, val,
+ !(val & OSPI_SWRST_INDIR_READ_FIFO),
+ 0, OSPI_WAIT_MAX_MSEC);
+out:
+ mutex_unlock(&ospi->mlock);
+
+ return ret;
+}
+
+static int f_ospi_indir_write(struct f_ospi *ospi, struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ u8 *buf = (u8 *)op->data.buf.out;
+ u32 val;
+ int i, ret;
+
+ mutex_lock(&ospi->mlock);
+
+ /* F1-3: Prepare transfer operation */
+ ret = f_ospi_indir_prepare_op(ospi, mem, op);
+ if (ret)
+ goto out;
+
+ f_ospi_indir_start_xfer(ospi);
+
+ if (!(readl(ospi->base + OSPI_PROT_CTL_INDIR) & OSPI_PROT_DATA_EN))
+ goto nodata;
+
+ /* F4-5: Wait for buffer ready and write data */
+ for (i = 0; i < op->data.nbytes; i++) {
+ ret = readl_poll_timeout(ospi->base + OSPI_IRQ, val,
+ val & OSPI_IRQ_WRITE_BUF_READY,
+ 0, OSPI_WAIT_MAX_MSEC);
+ if (ret)
+ goto out;
+
+ writel(buf[i], ospi->base + OSPI_DAT);
+ }
+
+ /* F6-7: Stop transfer if data size is nothing */
+ if (!(readl(ospi->base + OSPI_DAT_SIZE_INDIR) & OSPI_DAT_SIZE_EN))
+ f_ospi_indir_stop_xfer(ospi);
+
+nodata:
+ /* F8-9: Wait for completion and clear */
+ ret = f_ospi_indir_wait_xfer_complete(ospi);
+ if (ret)
+ goto out;
+
+ writel(OSPI_IRQ_CS_TRANS_COMP, ospi->base + OSPI_IRQ);
+out:
+ mutex_unlock(&ospi->mlock);
+
+ return ret;
+}
+
+static int f_ospi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct f_ospi *ospi = spi_controller_get_devdata(mem->spi->master);
+ int err = 0;
+
+ switch (op->data.dir) {
+ case SPI_MEM_DATA_IN:
+ err = f_ospi_indir_read(ospi, mem, op);
+ break;
+
+ case SPI_MEM_DATA_OUT:
+ fallthrough;
+ case SPI_MEM_NO_DATA:
+ err = f_ospi_indir_write(ospi, mem, op);
+ break;
+
+ default:
+ dev_warn(ospi->dev, "Unsupported direction");
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static bool f_ospi_supports_op_width(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ u8 width_available[] = { 0, 1, 2, 4, 8 };
+ u8 width_op[] = { op->cmd.buswidth, op->addr.buswidth,
+ op->dummy.buswidth, op->data.buswidth };
+ bool is_match_found;
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(width_op); i++) {
+ is_match_found = false;
+
+ for (j = 0; j < ARRAY_SIZE(width_available); j++) {
+ if (width_op[i] == width_available[j]) {
+ is_match_found = true;
+ break;
+ }
+ }
+
+ if (!is_match_found)
+ return false;
+ }
+
+ return true;
+}
+
+static bool f_ospi_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ if (f_ospi_get_dummy_cycle(op) > OSPI_DUMMY_CYCLE_MAX)
+ return false;
+
+ if (op->addr.nbytes > 4)
+ return false;
+
+ if (!f_ospi_supports_op_width(mem, op))
+ return false;
+
+ return true;
+}
+
+static int f_ospi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ op->data.nbytes = min((int)op->data.nbytes, (int)(OSPI_DAT_SIZE_MAX));
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops f_ospi_mem_ops = {
+ .adjust_op_size = f_ospi_adjust_op_size,
+ .supports_op = f_ospi_supports_op,
+ .exec_op = f_ospi_exec_op,
+};
+
+static int f_ospi_init(struct f_ospi *ospi)
+{
+ int ret;
+
+ ret = f_ospi_prepare_config(ospi);
+ if (ret)
+ return ret;
+
+ /* Disable boot signal */
+ writel(OSPI_ACC_MODE_BOOT_DISABLE, ospi->base + OSPI_ACC_MODE);
+
+ f_ospi_config_dll(ospi);
+
+ /* Disable IRQ */
+ f_ospi_clear_irq(ospi);
+ f_ospi_disable_irq_status(ospi, OSPI_IRQ_ALL);
+ f_ospi_disable_irq_output(ospi, OSPI_IRQ_ALL);
+
+ return f_ospi_unprepare_config(ospi);
+}
+
+static int f_ospi_probe(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr;
+ struct device *dev = &pdev->dev;
+ struct f_ospi *ospi;
+ u32 num_cs = OSPI_NUM_CS;
+ int ret;
+
+ ctlr = spi_alloc_master(dev, sizeof(*ospi));
+ if (!ctlr)
+ return -ENOMEM;
+
+ ctlr->mode_bits = SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL
+ | SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_OCTAL
+ | SPI_MODE_0 | SPI_MODE_1 | SPI_LSB_FIRST;
+ ctlr->mem_ops = &f_ospi_mem_ops;
+ ctlr->bus_num = -1;
+ of_property_read_u32(dev->of_node, "num-cs", &num_cs);
+ if (num_cs > OSPI_NUM_CS) {
+ dev_err(dev, "num-cs too large: %d\n", num_cs);
+ return -ENOMEM;
+ }
+ ctlr->num_chipselect = num_cs;
+ ctlr->dev.of_node = dev->of_node;
+
+ ospi = spi_controller_get_devdata(ctlr);
+ ospi->dev = dev;
+
+ platform_set_drvdata(pdev, ospi);
+
+ ospi->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ospi->base)) {
+ ret = PTR_ERR(ospi->base);
+ goto err_put_ctlr;
+ }
+
+ ospi->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ospi->clk)) {
+ ret = PTR_ERR(ospi->clk);
+ goto err_put_ctlr;
+ }
+
+ ret = clk_prepare_enable(ospi->clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable the clock\n");
+ goto err_disable_clk;
+ }
+
+ mutex_init(&ospi->mlock);
+
+ ret = f_ospi_init(ospi);
+ if (ret)
+ goto err_destroy_mutex;
+
+ ret = devm_spi_register_controller(dev, ctlr);
+ if (ret)
+ goto err_destroy_mutex;
+
+ return 0;
+
+err_destroy_mutex:
+ mutex_destroy(&ospi->mlock);
+
+err_disable_clk:
+ clk_disable_unprepare(ospi->clk);
+
+err_put_ctlr:
+ spi_controller_put(ctlr);
+
+ return ret;
+}
+
+static int f_ospi_remove(struct platform_device *pdev)
+{
+ struct f_ospi *ospi = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(ospi->clk);
+
+ mutex_destroy(&ospi->mlock);
+
+ return 0;
+}
+
+static const struct of_device_id f_ospi_dt_ids[] = {
+ { .compatible = "socionext,f-ospi" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, f_ospi_dt_ids);
+
+static struct platform_driver f_ospi_driver = {
+ .driver = {
+ .name = "socionext,f-ospi",
+ .of_match_table = f_ospi_dt_ids,
+ },
+ .probe = f_ospi_probe,
+ .remove = f_ospi_remove,
+};
+module_platform_driver(f_ospi_driver);
+
+MODULE_DESCRIPTION("Socionext F_OSPI controller driver");
+MODULE_AUTHOR("Socionext Inc.");
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-wpcm-fiu.c b/drivers/spi/spi-wpcm-fiu.c
new file mode 100644
index 000000000000..ab33710d50ac
--- /dev/null
+++ b/drivers/spi/spi-wpcm-fiu.c
@@ -0,0 +1,508 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2022 Jonathan Neuschäfer
+
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi-mem.h>
+
+#define FIU_CFG 0x00
+#define FIU_BURST_BFG 0x01
+#define FIU_RESP_CFG 0x02
+#define FIU_CFBB_PROT 0x03
+#define FIU_FWIN1_LOW 0x04
+#define FIU_FWIN1_HIGH 0x06
+#define FIU_FWIN2_LOW 0x08
+#define FIU_FWIN2_HIGH 0x0a
+#define FIU_FWIN3_LOW 0x0c
+#define FIU_FWIN3_HIGH 0x0e
+#define FIU_PROT_LOCK 0x10
+#define FIU_PROT_CLEAR 0x11
+#define FIU_SPI_FL_CFG 0x14
+#define FIU_UMA_CODE 0x16
+#define FIU_UMA_AB0 0x17
+#define FIU_UMA_AB1 0x18
+#define FIU_UMA_AB2 0x19
+#define FIU_UMA_DB0 0x1a
+#define FIU_UMA_DB1 0x1b
+#define FIU_UMA_DB2 0x1c
+#define FIU_UMA_DB3 0x1d
+#define FIU_UMA_CTS 0x1e
+#define FIU_UMA_ECTS 0x1f
+
+#define FIU_BURST_CFG_R16 3
+
+#define FIU_UMA_CTS_D_SIZE(x) (x)
+#define FIU_UMA_CTS_A_SIZE BIT(3)
+#define FIU_UMA_CTS_WR BIT(4)
+#define FIU_UMA_CTS_CS(x) ((x) << 5)
+#define FIU_UMA_CTS_EXEC_DONE BIT(7)
+
+#define SHM_FLASH_SIZE 0x02
+#define SHM_FLASH_SIZE_STALL_HOST BIT(6)
+
+/*
+ * I observed a typical wait time of 16 iterations for a UMA transfer to
+ * finish, so this should be a safe limit.
+ */
+#define UMA_WAIT_ITERATIONS 100
+
+/* The memory-mapped view of flash is 16 MiB long */
+#define MAX_MEMORY_SIZE_PER_CS (16 << 20)
+#define MAX_MEMORY_SIZE_TOTAL (4 * MAX_MEMORY_SIZE_PER_CS)
+
+struct wpcm_fiu_spi {
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *regs;
+ void __iomem *memory;
+ size_t memory_size;
+ struct regmap *shm_regmap;
+};
+
+static void wpcm_fiu_set_opcode(struct wpcm_fiu_spi *fiu, u8 opcode)
+{
+ writeb(opcode, fiu->regs + FIU_UMA_CODE);
+}
+
+static void wpcm_fiu_set_addr(struct wpcm_fiu_spi *fiu, u32 addr)
+{
+ writeb((addr >> 0) & 0xff, fiu->regs + FIU_UMA_AB0);
+ writeb((addr >> 8) & 0xff, fiu->regs + FIU_UMA_AB1);
+ writeb((addr >> 16) & 0xff, fiu->regs + FIU_UMA_AB2);
+}
+
+static void wpcm_fiu_set_data(struct wpcm_fiu_spi *fiu, const u8 *data, unsigned int nbytes)
+{
+ int i;
+
+ for (i = 0; i < nbytes; i++)
+ writeb(data[i], fiu->regs + FIU_UMA_DB0 + i);
+}
+
+static void wpcm_fiu_get_data(struct wpcm_fiu_spi *fiu, u8 *data, unsigned int nbytes)
+{
+ int i;
+
+ for (i = 0; i < nbytes; i++)
+ data[i] = readb(fiu->regs + FIU_UMA_DB0 + i);
+}
+
+/*
+ * Perform a UMA (User Mode Access) operation, i.e. a software-controlled SPI transfer.
+ */
+static int wpcm_fiu_do_uma(struct wpcm_fiu_spi *fiu, unsigned int cs,
+ bool use_addr, bool write, int data_bytes)
+{
+ int i = 0;
+ u8 cts = FIU_UMA_CTS_EXEC_DONE | FIU_UMA_CTS_CS(cs);
+
+ if (use_addr)
+ cts |= FIU_UMA_CTS_A_SIZE;
+ if (write)
+ cts |= FIU_UMA_CTS_WR;
+ cts |= FIU_UMA_CTS_D_SIZE(data_bytes);
+
+ writeb(cts, fiu->regs + FIU_UMA_CTS);
+
+ for (i = 0; i < UMA_WAIT_ITERATIONS; i++)
+ if (!(readb(fiu->regs + FIU_UMA_CTS) & FIU_UMA_CTS_EXEC_DONE))
+ return 0;
+
+ dev_info(fiu->dev, "UMA transfer has not finished in %d iterations\n", UMA_WAIT_ITERATIONS);
+ return -EIO;
+}
+
+static void wpcm_fiu_ects_assert(struct wpcm_fiu_spi *fiu, unsigned int cs)
+{
+ u8 ects = readb(fiu->regs + FIU_UMA_ECTS);
+
+ ects &= ~BIT(cs);
+ writeb(ects, fiu->regs + FIU_UMA_ECTS);
+}
+
+static void wpcm_fiu_ects_deassert(struct wpcm_fiu_spi *fiu, unsigned int cs)
+{
+ u8 ects = readb(fiu->regs + FIU_UMA_ECTS);
+
+ ects |= BIT(cs);
+ writeb(ects, fiu->regs + FIU_UMA_ECTS);
+}
+
+struct wpcm_fiu_op_shape {
+ bool (*match)(const struct spi_mem_op *op);
+ int (*exec)(struct spi_mem *mem, const struct spi_mem_op *op);
+};
+
+static bool wpcm_fiu_normal_match(const struct spi_mem_op *op)
+{
+ // Opcode 0x0b (FAST READ) is treated differently in hardware
+ if (op->cmd.opcode == 0x0b)
+ return false;
+
+ return (op->addr.nbytes == 0 || op->addr.nbytes == 3) &&
+ op->dummy.nbytes == 0 && op->data.nbytes <= 4;
+}
+
+static int wpcm_fiu_normal_exec(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct wpcm_fiu_spi *fiu = spi_controller_get_devdata(mem->spi->controller);
+ int ret;
+
+ wpcm_fiu_set_opcode(fiu, op->cmd.opcode);
+ wpcm_fiu_set_addr(fiu, op->addr.val);
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ wpcm_fiu_set_data(fiu, op->data.buf.out, op->data.nbytes);
+
+ ret = wpcm_fiu_do_uma(fiu, mem->spi->chip_select, op->addr.nbytes == 3,
+ op->data.dir == SPI_MEM_DATA_OUT, op->data.nbytes);
+
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ wpcm_fiu_get_data(fiu, op->data.buf.in, op->data.nbytes);
+
+ return ret;
+}
+
+static bool wpcm_fiu_fast_read_match(const struct spi_mem_op *op)
+{
+ return op->cmd.opcode == 0x0b && op->addr.nbytes == 3 &&
+ op->dummy.nbytes == 1 &&
+ op->data.nbytes >= 1 && op->data.nbytes <= 4 &&
+ op->data.dir == SPI_MEM_DATA_IN;
+}
+
+static int wpcm_fiu_fast_read_exec(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ return -EINVAL;
+}
+
+/*
+ * 4-byte addressing.
+ *
+ * Flash view: [ C A A A A D D D D]
+ * bytes: 13 aa bb cc dd -> 5a a5 f0 0f
+ * FIU's view: [ C A A A][ C D D D D]
+ * FIU mode: [ read/write][ read ]
+ */
+static bool wpcm_fiu_4ba_match(const struct spi_mem_op *op)
+{
+ return op->addr.nbytes == 4 && op->dummy.nbytes == 0 && op->data.nbytes <= 4;
+}
+
+static int wpcm_fiu_4ba_exec(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct wpcm_fiu_spi *fiu = spi_controller_get_devdata(mem->spi->controller);
+ int cs = mem->spi->chip_select;
+
+ wpcm_fiu_ects_assert(fiu, cs);
+
+ wpcm_fiu_set_opcode(fiu, op->cmd.opcode);
+ wpcm_fiu_set_addr(fiu, op->addr.val >> 8);
+ wpcm_fiu_do_uma(fiu, cs, true, false, 0);
+
+ wpcm_fiu_set_opcode(fiu, op->addr.val & 0xff);
+ wpcm_fiu_set_addr(fiu, 0);
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ wpcm_fiu_set_data(fiu, op->data.buf.out, op->data.nbytes);
+ wpcm_fiu_do_uma(fiu, cs, false, op->data.dir == SPI_MEM_DATA_OUT, op->data.nbytes);
+
+ wpcm_fiu_ects_deassert(fiu, cs);
+
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ wpcm_fiu_get_data(fiu, op->data.buf.in, op->data.nbytes);
+
+ return 0;
+}
+
+/*
+ * RDID (Read Identification) needs special handling because Linux expects to
+ * be able to read 6 ID bytes and FIU can only read up to 4 at once.
+ *
+ * We're lucky in this case, because executing the RDID instruction twice will
+ * result in the same result.
+ *
+ * What we do is as follows (C: write command/opcode byte, D: read data byte,
+ * A: write address byte):
+ *
+ * 1. C D D D
+ * 2. C A A A D D D
+ */
+static bool wpcm_fiu_rdid_match(const struct spi_mem_op *op)
+{
+ return op->cmd.opcode == 0x9f && op->addr.nbytes == 0 &&
+ op->dummy.nbytes == 0 && op->data.nbytes == 6 &&
+ op->data.dir == SPI_MEM_DATA_IN;
+}
+
+static int wpcm_fiu_rdid_exec(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct wpcm_fiu_spi *fiu = spi_controller_get_devdata(mem->spi->controller);
+ int cs = mem->spi->chip_select;
+
+ /* First transfer */
+ wpcm_fiu_set_opcode(fiu, op->cmd.opcode);
+ wpcm_fiu_set_addr(fiu, 0);
+ wpcm_fiu_do_uma(fiu, cs, false, false, 3);
+ wpcm_fiu_get_data(fiu, op->data.buf.in, 3);
+
+ /* Second transfer */
+ wpcm_fiu_set_opcode(fiu, op->cmd.opcode);
+ wpcm_fiu_set_addr(fiu, 0);
+ wpcm_fiu_do_uma(fiu, cs, true, false, 3);
+ wpcm_fiu_get_data(fiu, op->data.buf.in + 3, 3);
+
+ return 0;
+}
+
+/*
+ * With some dummy bytes.
+ *
+ * C A A A X* X D D D D
+ * [C A A A D*][C D D D D]
+ */
+static bool wpcm_fiu_dummy_match(const struct spi_mem_op *op)
+{
+ // Opcode 0x0b (FAST READ) is treated differently in hardware
+ if (op->cmd.opcode == 0x0b)
+ return false;
+
+ return (op->addr.nbytes == 0 || op->addr.nbytes == 3) &&
+ op->dummy.nbytes >= 1 && op->dummy.nbytes <= 5 &&
+ op->data.nbytes <= 4;
+}
+
+static int wpcm_fiu_dummy_exec(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct wpcm_fiu_spi *fiu = spi_controller_get_devdata(mem->spi->controller);
+ int cs = mem->spi->chip_select;
+
+ wpcm_fiu_ects_assert(fiu, cs);
+
+ /* First transfer */
+ wpcm_fiu_set_opcode(fiu, op->cmd.opcode);
+ wpcm_fiu_set_addr(fiu, op->addr.val);
+ wpcm_fiu_do_uma(fiu, cs, op->addr.nbytes != 0, true, op->dummy.nbytes - 1);
+
+ /* Second transfer */
+ wpcm_fiu_set_opcode(fiu, 0);
+ wpcm_fiu_set_addr(fiu, 0);
+ wpcm_fiu_do_uma(fiu, cs, false, false, op->data.nbytes);
+ wpcm_fiu_get_data(fiu, op->data.buf.in, op->data.nbytes);
+
+ wpcm_fiu_ects_deassert(fiu, cs);
+
+ return 0;
+}
+
+static const struct wpcm_fiu_op_shape wpcm_fiu_op_shapes[] = {
+ { .match = wpcm_fiu_normal_match, .exec = wpcm_fiu_normal_exec },
+ { .match = wpcm_fiu_fast_read_match, .exec = wpcm_fiu_fast_read_exec },
+ { .match = wpcm_fiu_4ba_match, .exec = wpcm_fiu_4ba_exec },
+ { .match = wpcm_fiu_rdid_match, .exec = wpcm_fiu_rdid_exec },
+ { .match = wpcm_fiu_dummy_match, .exec = wpcm_fiu_dummy_exec },
+};
+
+static const struct wpcm_fiu_op_shape *wpcm_fiu_find_op_shape(const struct spi_mem_op *op)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(wpcm_fiu_op_shapes); i++) {
+ const struct wpcm_fiu_op_shape *shape = &wpcm_fiu_op_shapes[i];
+
+ if (shape->match(op))
+ return shape;
+ }
+
+ return NULL;
+}
+
+static bool wpcm_fiu_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+
+ if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr)
+ return false;
+
+ if (op->cmd.buswidth > 1 || op->addr.buswidth > 1 ||
+ op->dummy.buswidth > 1 || op->data.buswidth > 1)
+ return false;
+
+ return wpcm_fiu_find_op_shape(op) != NULL;
+}
+
+/*
+ * In order to ensure the integrity of SPI transfers performed via UMA,
+ * temporarily disable (stall) memory accesses coming from the host CPU.
+ */
+static void wpcm_fiu_stall_host(struct wpcm_fiu_spi *fiu, bool stall)
+{
+ if (fiu->shm_regmap) {
+ int res = regmap_update_bits(fiu->shm_regmap, SHM_FLASH_SIZE,
+ SHM_FLASH_SIZE_STALL_HOST,
+ stall ? SHM_FLASH_SIZE_STALL_HOST : 0);
+ if (res)
+ dev_warn(fiu->dev, "Failed to (un)stall host memory accesses: %d\n", res);
+ }
+}
+
+static int wpcm_fiu_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct wpcm_fiu_spi *fiu = spi_controller_get_devdata(mem->spi->controller);
+ const struct wpcm_fiu_op_shape *shape = wpcm_fiu_find_op_shape(op);
+
+ wpcm_fiu_stall_host(fiu, true);
+
+ if (shape)
+ return shape->exec(mem, op);
+
+ wpcm_fiu_stall_host(fiu, false);
+
+ return -ENOTSUPP;
+}
+
+static int wpcm_fiu_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ if (op->data.nbytes > 4)
+ op->data.nbytes = 4;
+
+ return 0;
+}
+
+static int wpcm_fiu_dirmap_create(struct spi_mem_dirmap_desc *desc)
+{
+ struct wpcm_fiu_spi *fiu = spi_controller_get_devdata(desc->mem->spi->controller);
+ int cs = desc->mem->spi->chip_select;
+
+ if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
+ return -ENOTSUPP;
+
+ /*
+ * Unfortunately, FIU only supports a 16 MiB direct mapping window (per
+ * attached flash chip), but the SPI MEM core doesn't support partial
+ * direct mappings. This means that we can't support direct mapping on
+ * flashes that are bigger than 16 MiB.
+ */
+ if (desc->info.offset + desc->info.length > MAX_MEMORY_SIZE_PER_CS)
+ return -ENOTSUPP;
+
+ /* Don't read past the memory window */
+ if (cs * MAX_MEMORY_SIZE_PER_CS + desc->info.offset + desc->info.length > fiu->memory_size)
+ return -ENOTSUPP;
+
+ return 0;
+}
+
+static ssize_t wpcm_fiu_direct_read(struct spi_mem_dirmap_desc *desc, u64 offs, size_t len, void *buf)
+{
+ struct wpcm_fiu_spi *fiu = spi_controller_get_devdata(desc->mem->spi->controller);
+ int cs = desc->mem->spi->chip_select;
+
+ if (offs >= MAX_MEMORY_SIZE_PER_CS)
+ return -ENOTSUPP;
+
+ offs += cs * MAX_MEMORY_SIZE_PER_CS;
+
+ if (!fiu->memory || offs >= fiu->memory_size)
+ return -ENOTSUPP;
+
+ len = min_t(size_t, len, fiu->memory_size - offs);
+ memcpy_fromio(buf, fiu->memory + offs, len);
+
+ return len;
+}
+
+static const struct spi_controller_mem_ops wpcm_fiu_mem_ops = {
+ .adjust_op_size = wpcm_fiu_adjust_op_size,
+ .supports_op = wpcm_fiu_supports_op,
+ .exec_op = wpcm_fiu_exec_op,
+ .dirmap_create = wpcm_fiu_dirmap_create,
+ .dirmap_read = wpcm_fiu_direct_read,
+};
+
+static void wpcm_fiu_hw_init(struct wpcm_fiu_spi *fiu)
+{
+ /* Configure memory-mapped flash access */
+ writeb(FIU_BURST_CFG_R16, fiu->regs + FIU_BURST_BFG);
+ writeb(MAX_MEMORY_SIZE_TOTAL / (512 << 10), fiu->regs + FIU_CFG);
+ writeb(MAX_MEMORY_SIZE_PER_CS / (512 << 10) | BIT(6), fiu->regs + FIU_SPI_FL_CFG);
+
+ /* Deassert all manually asserted chip selects */
+ writeb(0x0f, fiu->regs + FIU_UMA_ECTS);
+}
+
+static int wpcm_fiu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_controller *ctrl;
+ struct wpcm_fiu_spi *fiu;
+ struct resource *res;
+
+ ctrl = devm_spi_alloc_master(dev, sizeof(*fiu));
+ if (!ctrl)
+ return -ENOMEM;
+
+ fiu = spi_controller_get_devdata(ctrl);
+ fiu->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control");
+ fiu->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(fiu->regs)) {
+ dev_err(dev, "Failed to map registers\n");
+ return PTR_ERR(fiu->regs);
+ }
+
+ fiu->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(fiu->clk))
+ return PTR_ERR(fiu->clk);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "memory");
+ fiu->memory = devm_ioremap_resource(dev, res);
+ fiu->memory_size = min_t(size_t, resource_size(res), MAX_MEMORY_SIZE_TOTAL);
+ if (IS_ERR(fiu->memory)) {
+ dev_err(dev, "Failed to map flash memory window\n");
+ return PTR_ERR(fiu->memory);
+ }
+
+ fiu->shm_regmap = syscon_regmap_lookup_by_phandle_optional(dev->of_node, "nuvoton,shm");
+
+ wpcm_fiu_hw_init(fiu);
+
+ ctrl->bus_num = -1;
+ ctrl->mem_ops = &wpcm_fiu_mem_ops;
+ ctrl->num_chipselect = 4;
+ ctrl->dev.of_node = dev->of_node;
+
+ /*
+ * The FIU doesn't include a clock divider, the clock is entirely
+ * determined by the AHB3 bus clock.
+ */
+ ctrl->min_speed_hz = clk_get_rate(fiu->clk);
+ ctrl->max_speed_hz = clk_get_rate(fiu->clk);
+
+ return devm_spi_register_controller(dev, ctrl);
+}
+
+static const struct of_device_id wpcm_fiu_dt_ids[] = {
+ { .compatible = "nuvoton,wpcm450-fiu", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, wpcm_fiu_dt_ids);
+
+static struct platform_driver wpcm_fiu_driver = {
+ .driver = {
+ .name = "wpcm450-fiu",
+ .bus = &platform_bus_type,
+ .of_match_table = wpcm_fiu_dt_ids,
+ },
+ .probe = wpcm_fiu_probe,
+};
+module_platform_driver(wpcm_fiu_driver);
+
+MODULE_DESCRIPTION("Nuvoton WPCM450 FIU SPI controller driver");
+MODULE_AUTHOR("Jonathan Neuschäfer <j.neuschaefer@gmx.net>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c
index 1d9b3f03d986..8628241ec99e 100644
--- a/drivers/spi/spi-xcomm.c
+++ b/drivers/spi/spi-xcomm.c
@@ -202,8 +202,7 @@ static int spi_xcomm_transfer_one(struct spi_master *master,
return status;
}
-static int spi_xcomm_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int spi_xcomm_probe(struct i2c_client *i2c)
{
struct spi_xcomm *spi_xcomm;
struct spi_master *master;
@@ -242,7 +241,7 @@ static struct i2c_driver spi_xcomm_driver = {
.name = "spi-xcomm",
},
.id_table = spi_xcomm_ids,
- .probe = spi_xcomm_probe,
+ .probe_new = spi_xcomm_probe,
};
module_i2c_driver(spi_xcomm_driver);
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index c760aac070e5..95ff15665d44 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
@@ -34,6 +35,7 @@
#define GQSPI_RXD_OFST 0x00000120
#define GQSPI_TX_THRESHOLD_OFST 0x00000128
#define GQSPI_RX_THRESHOLD_OFST 0x0000012C
+#define IOU_TAPDLY_BYPASS_OFST 0x0000003C
#define GQSPI_LPBK_DLY_ADJ_OFST 0x00000138
#define GQSPI_GEN_FIFO_OFST 0x00000140
#define GQSPI_SEL_OFST 0x00000144
@@ -48,6 +50,7 @@
#define GQSPI_QSPIDMA_DST_I_MASK_OFST 0x00000820
#define GQSPI_QSPIDMA_DST_ADDR_OFST 0x00000800
#define GQSPI_QSPIDMA_DST_ADDR_MSB_OFST 0x00000828
+#define GQSPI_DATA_DLY_ADJ_OFST 0x000001F8
/* GQSPI register bit masks */
#define GQSPI_SEL_MASK 0x00000001
@@ -136,11 +139,37 @@
#define GQSPI_MAX_NUM_CS 2 /* Maximum number of chip selects */
+#define GQSPI_USE_DATA_DLY 0x1
+#define GQSPI_USE_DATA_DLY_SHIFT 31
+#define GQSPI_DATA_DLY_ADJ_VALUE 0x2
+#define GQSPI_DATA_DLY_ADJ_SHIFT 28
+#define GQSPI_LPBK_DLY_ADJ_DLY_1 0x1
+#define GQSPI_LPBK_DLY_ADJ_DLY_1_SHIFT 0x3
+#define TAP_DLY_BYPASS_LQSPI_RX_VALUE 0x1
+#define TAP_DLY_BYPASS_LQSPI_RX_SHIFT 0x2
+
+/* set to differentiate versal from zynqmp, 1=versal, 0=zynqmp */
+#define QSPI_QUIRK_HAS_TAPDELAY BIT(0)
+
+#define GQSPI_FREQ_37_5MHZ 37500000
+#define GQSPI_FREQ_40MHZ 40000000
+#define GQSPI_FREQ_100MHZ 100000000
+#define GQSPI_FREQ_150MHZ 150000000
+
#define SPI_AUTOSUSPEND_TIMEOUT 3000
enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA};
/**
+ * struct qspi_platform_data - zynqmp qspi platform data structure
+ * @quirks: Flags is used to identify the platform
+ */
+struct qspi_platform_data {
+ u32 quirks;
+};
+
+/**
* struct zynqmp_qspi - Defines qspi driver instance
+ * @ctlr: Pointer to the spi controller information
* @regs: Virtual address of the QSPI controller registers
* @refclk: Pointer to the peripheral clock
* @pclk: Pointer to the APB clock
@@ -157,6 +186,9 @@ enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA};
* @genfifoentry: Used for storing the genfifoentry instruction.
* @mode: Defines the mode in which QSPI is operating
* @data_completion: completion structure
+ * @op_lock: Operational lock
+ * @speed_hz: Current SPI bus clock speed in hz
+ * @has_tapdelay: Used for tapdelay register available in qspi
*/
struct zynqmp_qspi {
struct spi_controller *ctlr;
@@ -177,6 +209,8 @@ struct zynqmp_qspi {
enum mode_type mode;
struct completion data_completion;
struct mutex op_lock;
+ u32 speed_hz;
+ bool has_tapdelay;
};
/**
@@ -250,6 +284,56 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
}
/**
+ * zynqmp_qspi_set_tapdelay: To configure qspi tap delays
+ * @xqspi: Pointer to the zynqmp_qspi structure
+ * @baudrateval: Buadrate to configure
+ */
+static void zynqmp_qspi_set_tapdelay(struct zynqmp_qspi *xqspi, u32 baudrateval)
+{
+ u32 tapdlybypass = 0, lpbkdlyadj = 0, datadlyadj = 0, clk_rate;
+ u32 reqhz = 0;
+
+ clk_rate = clk_get_rate(xqspi->refclk);
+ reqhz = (clk_rate / (GQSPI_BAUD_DIV_SHIFT << baudrateval));
+
+ if (!xqspi->has_tapdelay) {
+ if (reqhz <= GQSPI_FREQ_40MHZ) {
+ zynqmp_pm_set_tapdelay_bypass(PM_TAPDELAY_QSPI,
+ PM_TAPDELAY_BYPASS_ENABLE);
+ } else if (reqhz <= GQSPI_FREQ_100MHZ) {
+ zynqmp_pm_set_tapdelay_bypass(PM_TAPDELAY_QSPI,
+ PM_TAPDELAY_BYPASS_ENABLE);
+ lpbkdlyadj |= (GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK);
+ datadlyadj |= ((GQSPI_USE_DATA_DLY <<
+ GQSPI_USE_DATA_DLY_SHIFT)
+ | (GQSPI_DATA_DLY_ADJ_VALUE <<
+ GQSPI_DATA_DLY_ADJ_SHIFT));
+ } else if (reqhz <= GQSPI_FREQ_150MHZ) {
+ lpbkdlyadj |= GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK;
+ }
+ } else {
+ if (reqhz <= GQSPI_FREQ_37_5MHZ) {
+ tapdlybypass |= (TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
+ TAP_DLY_BYPASS_LQSPI_RX_SHIFT);
+ } else if (reqhz <= GQSPI_FREQ_100MHZ) {
+ tapdlybypass |= (TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
+ TAP_DLY_BYPASS_LQSPI_RX_SHIFT);
+ lpbkdlyadj |= (GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK);
+ datadlyadj |= (GQSPI_USE_DATA_DLY <<
+ GQSPI_USE_DATA_DLY_SHIFT);
+ } else if (reqhz <= GQSPI_FREQ_150MHZ) {
+ lpbkdlyadj |= (GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK
+ | (GQSPI_LPBK_DLY_ADJ_DLY_1 <<
+ GQSPI_LPBK_DLY_ADJ_DLY_1_SHIFT));
+ }
+ zynqmp_gqspi_write(xqspi,
+ IOU_TAPDLY_BYPASS_OFST, tapdlybypass);
+ }
+ zynqmp_gqspi_write(xqspi, GQSPI_LPBK_DLY_ADJ_OFST, lpbkdlyadj);
+ zynqmp_gqspi_write(xqspi, GQSPI_DATA_DLY_ADJ_OFST, datadlyadj);
+}
+
+/**
* zynqmp_qspi_init_hw - Initialize the hardware
* @xqspi: Pointer to the zynqmp_qspi structure
*
@@ -264,12 +348,15 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
* - Enable manual slave select
* - Enable manual start
* - Deselect all the chip select lines
- * - Set the little endian mode of TX FIFO and
+ * - Set the little endian mode of TX FIFO
+ * - Set clock phase
+ * - Set clock polarity and
* - Enable the QSPI controller
*/
static void zynqmp_qspi_init_hw(struct zynqmp_qspi *xqspi)
{
- u32 config_reg;
+ u32 config_reg, baud_rate_val = 0;
+ ulong clk_rate;
/* Select the GQSPI mode */
zynqmp_gqspi_write(xqspi, GQSPI_SEL_OFST, GQSPI_SEL_MASK);
@@ -303,21 +390,37 @@ static void zynqmp_qspi_init_hw(struct zynqmp_qspi *xqspi)
config_reg |= GQSPI_CFG_WP_HOLD_MASK;
/* Clear pre-scalar by default */
config_reg &= ~GQSPI_CFG_BAUD_RATE_DIV_MASK;
- /* CPHA 0 */
- config_reg &= ~GQSPI_CFG_CLK_PHA_MASK;
- /* CPOL 0 */
- config_reg &= ~GQSPI_CFG_CLK_POL_MASK;
+ /* Set CPHA */
+ if (xqspi->ctlr->mode_bits & SPI_CPHA)
+ config_reg |= GQSPI_CFG_CLK_PHA_MASK;
+ else
+ config_reg &= ~GQSPI_CFG_CLK_PHA_MASK;
+ /* Set CPOL */
+ if (xqspi->ctlr->mode_bits & SPI_CPOL)
+ config_reg |= GQSPI_CFG_CLK_POL_MASK;
+ else
+ config_reg &= ~GQSPI_CFG_CLK_POL_MASK;
+
+ /* Set the clock frequency */
+ clk_rate = clk_get_rate(xqspi->refclk);
+ while ((baud_rate_val < GQSPI_BAUD_DIV_MAX) &&
+ (clk_rate /
+ (GQSPI_BAUD_DIV_SHIFT << baud_rate_val)) > xqspi->speed_hz)
+ baud_rate_val++;
+
+ config_reg &= ~GQSPI_CFG_BAUD_RATE_DIV_MASK;
+ config_reg |= (baud_rate_val << GQSPI_CFG_BAUD_RATE_DIV_SHIFT);
+
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+ /* Set the tapdelay for clock frequency */
+ zynqmp_qspi_set_tapdelay(xqspi, baud_rate_val);
+
/* Clear the TX and RX FIFO */
zynqmp_gqspi_write(xqspi, GQSPI_FIFO_CTRL_OFST,
GQSPI_FIFO_CTRL_RST_RX_FIFO_MASK |
GQSPI_FIFO_CTRL_RST_TX_FIFO_MASK |
GQSPI_FIFO_CTRL_RST_GEN_FIFO_MASK);
- /* Set by default to allow for high frequencies */
- zynqmp_gqspi_write(xqspi, GQSPI_LPBK_DLY_ADJ_OFST,
- zynqmp_gqspi_read(xqspi, GQSPI_LPBK_DLY_ADJ_OFST) |
- GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK);
/* Reset thresholds */
zynqmp_gqspi_write(xqspi, GQSPI_TX_THRESHOLD_OFST,
GQSPI_TX_FIFO_THRESHOLD_RESET_VAL);
@@ -455,30 +558,30 @@ static int zynqmp_qspi_config_op(struct zynqmp_qspi *xqspi,
struct spi_device *qspi)
{
ulong clk_rate;
- u32 config_reg, baud_rate_val = 0;
+ u32 config_reg, req_speed_hz, baud_rate_val = 0;
- /* Set the clock frequency */
- /* If req_hz == 0, default to lowest speed */
- clk_rate = clk_get_rate(xqspi->refclk);
+ req_speed_hz = qspi->max_speed_hz;
- while ((baud_rate_val < GQSPI_BAUD_DIV_MAX) &&
- (clk_rate /
- (GQSPI_BAUD_DIV_SHIFT << baud_rate_val)) > qspi->max_speed_hz)
- baud_rate_val++;
+ if (xqspi->speed_hz != req_speed_hz) {
+ xqspi->speed_hz = req_speed_hz;
- config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
+ /* Set the clock frequency */
+ /* If req_speed_hz == 0, default to lowest speed */
+ clk_rate = clk_get_rate(xqspi->refclk);
- /* Set the QSPI clock phase and clock polarity */
- config_reg &= (~GQSPI_CFG_CLK_PHA_MASK) & (~GQSPI_CFG_CLK_POL_MASK);
+ while ((baud_rate_val < GQSPI_BAUD_DIV_MAX) &&
+ (clk_rate /
+ (GQSPI_BAUD_DIV_SHIFT << baud_rate_val)) >
+ req_speed_hz)
+ baud_rate_val++;
- if (qspi->mode & SPI_CPHA)
- config_reg |= GQSPI_CFG_CLK_PHA_MASK;
- if (qspi->mode & SPI_CPOL)
- config_reg |= GQSPI_CFG_CLK_POL_MASK;
+ config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
- config_reg &= ~GQSPI_CFG_BAUD_RATE_DIV_MASK;
- config_reg |= (baud_rate_val << GQSPI_CFG_BAUD_RATE_DIV_SHIFT);
- zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+ config_reg &= ~GQSPI_CFG_BAUD_RATE_DIV_MASK;
+ config_reg |= (baud_rate_val << GQSPI_CFG_BAUD_RATE_DIV_SHIFT);
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+ zynqmp_qspi_set_tapdelay(xqspi, baud_rate_val);
+ }
return 0;
}
@@ -739,6 +842,8 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
/**
* zynqmp_qspi_setuprxdma - This function sets up the RX DMA operation
* @xqspi: xqspi is a pointer to the GQSPI instance.
+ *
+ * Return: 0 on success; error value otherwise.
*/
static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
{
@@ -823,6 +928,8 @@ static void zynqmp_qspi_write_op(struct zynqmp_qspi *xqspi, u8 tx_nbits,
* @rx_nbits: Receive buswidth.
* @genfifoentry: genfifoentry is pointer to the variable in which
* GENFIFO mask is returned to calling function
+ *
+ * Return: 0 on success; error value otherwise.
*/
static int zynqmp_qspi_read_op(struct zynqmp_qspi *xqspi, u8 rx_nbits,
u32 genfifoentry)
@@ -1087,6 +1194,16 @@ static const struct dev_pm_ops zynqmp_qspi_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(zynqmp_qspi_suspend, zynqmp_qspi_resume)
};
+static const struct qspi_platform_data versal_qspi_def = {
+ .quirks = QSPI_QUIRK_HAS_TAPDELAY,
+};
+
+static const struct of_device_id zynqmp_qspi_of_match[] = {
+ { .compatible = "xlnx,zynqmp-qspi-1.0"},
+ { .compatible = "xlnx,versal-qspi-1.0", .data = &versal_qspi_def },
+ { /* End of table */ }
+};
+
static const struct spi_controller_mem_ops zynqmp_qspi_mem_ops = {
.exec_op = zynqmp_qspi_exec_op,
};
@@ -1107,6 +1224,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
u32 num_cs;
+ const struct qspi_platform_data *p_data;
ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
if (!ctlr)
@@ -1117,6 +1235,10 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
xqspi->ctlr = ctlr;
platform_set_drvdata(pdev, xqspi);
+ p_data = of_device_get_match_data(&pdev->dev);
+ if (p_data && (p_data->quirks & QSPI_QUIRK_HAS_TAPDELAY))
+ xqspi->has_tapdelay = true;
+
xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xqspi->regs)) {
ret = PTR_ERR(xqspi->regs);
@@ -1164,6 +1286,11 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
goto clk_dis_all;
}
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
+ SPI_TX_DUAL | SPI_TX_QUAD;
+ ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
+ xqspi->speed_hz = ctlr->max_speed_hz;
+
/* QSPI controller initializations */
zynqmp_qspi_init_hw(xqspi);
@@ -1199,10 +1326,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->mem_ops = &zynqmp_qspi_mem_ops;
ctlr->setup = zynqmp_qspi_setup_op;
- ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
- ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
- SPI_TX_DUAL | SPI_TX_QUAD;
ctlr->dev.of_node = np;
ctlr->auto_runtime_pm = true;
@@ -1253,11 +1377,6 @@ static int zynqmp_qspi_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id zynqmp_qspi_of_match[] = {
- { .compatible = "xlnx,zynqmp-qspi-1.0", },
- { /* End of table */ }
-};
-
MODULE_DEVICE_TABLE(of, zynqmp_qspi_of_match);
static struct platform_driver zynqmp_qspi_driver = {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 5f9aedd1f0b6..3cc7bb4d03de 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -127,10 +127,10 @@ do { \
unsigned int start; \
pcpu_stats = per_cpu_ptr(in, i); \
do { \
- start = u64_stats_fetch_begin_irq( \
+ start = u64_stats_fetch_begin( \
&pcpu_stats->syncp); \
inc = u64_stats_read(&pcpu_stats->field); \
- } while (u64_stats_fetch_retry_irq( \
+ } while (u64_stats_fetch_retry( \
&pcpu_stats->syncp, start)); \
ret += inc; \
} \
@@ -360,6 +360,18 @@ const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
}
EXPORT_SYMBOL_GPL(spi_get_device_id);
+const void *spi_get_device_match_data(const struct spi_device *sdev)
+{
+ const void *match;
+
+ match = device_get_match_data(&sdev->dev);
+ if (match)
+ return match;
+
+ return (const void *)spi_get_device_id(sdev)->driver_data;
+}
+EXPORT_SYMBOL_GPL(spi_get_device_match_data);
+
static int spi_match_device(struct device *dev, struct device_driver *drv)
{
const struct spi_device *spi = to_spi_device(dev);
@@ -2212,6 +2224,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
struct device_node *nc)
{
u32 value;
+ u16 cs_setup;
int rc;
/* Mode (clock phase/polarity/etc.) */
@@ -2297,6 +2310,11 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
if (!of_property_read_u32(nc, "spi-max-frequency", &value))
spi->max_speed_hz = value;
+ if (!of_property_read_u16(nc, "spi-cs-setup-ns", &cs_setup)) {
+ spi->cs_setup.value = cs_setup;
+ spi->cs_setup.unit = SPI_DELAY_UNIT_NSECS;
+ }
+
return 0;
}
@@ -2759,6 +2777,17 @@ int spi_slave_abort(struct spi_device *spi)
}
EXPORT_SYMBOL_GPL(spi_slave_abort);
+int spi_target_abort(struct spi_device *spi)
+{
+ struct spi_controller *ctlr = spi->controller;
+
+ if (spi_controller_is_target(ctlr) && ctlr->target_abort)
+ return ctlr->target_abort(ctlr);
+
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL_GPL(spi_target_abort);
+
static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -3593,6 +3622,37 @@ static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
}
/**
+ * spi_set_cs_timing - configure CS setup, hold, and inactive delays
+ * @spi: the device that requires specific CS timing configuration
+ *
+ * Return: zero on success, else a negative error code.
+ */
+static int spi_set_cs_timing(struct spi_device *spi)
+{
+ struct device *parent = spi->controller->dev.parent;
+ int status = 0;
+
+ if (spi->controller->set_cs_timing && !spi->cs_gpiod) {
+ if (spi->controller->auto_runtime_pm) {
+ status = pm_runtime_get_sync(parent);
+ if (status < 0) {
+ pm_runtime_put_noidle(parent);
+ dev_err(&spi->controller->dev, "Failed to power device: %d\n",
+ status);
+ return status;
+ }
+
+ status = spi->controller->set_cs_timing(spi);
+ pm_runtime_mark_last_busy(parent);
+ pm_runtime_put_autosuspend(parent);
+ } else {
+ status = spi->controller->set_cs_timing(spi);
+ }
+ }
+ return status;
+}
+
+/**
* spi_setup - setup SPI mode and clock rate
* @spi: the device whose settings are being modified
* Context: can sleep, and no requests are queued to the device
@@ -3688,6 +3748,12 @@ int spi_setup(struct spi_device *spi)
}
}
+ status = spi_set_cs_timing(spi);
+ if (status) {
+ mutex_unlock(&spi->controller->io_mutex);
+ return status;
+ }
+
if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
status = pm_runtime_resume_and_get(spi->controller->dev.parent);
if (status < 0) {
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index b2775d82d2d7..6313e7d0cdf8 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -377,12 +377,23 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
switch (cmd) {
/* read requests */
case SPI_IOC_RD_MODE:
- retval = put_user(spi->mode & SPI_MODE_MASK,
- (__u8 __user *)arg);
- break;
case SPI_IOC_RD_MODE32:
- retval = put_user(spi->mode & SPI_MODE_MASK,
- (__u32 __user *)arg);
+ tmp = spi->mode;
+
+ {
+ struct spi_controller *ctlr = spi->controller;
+
+ if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods &&
+ ctlr->cs_gpiods[spi->chip_select])
+ tmp &= ~SPI_CS_HIGH;
+ }
+
+ if (cmd == SPI_IOC_RD_MODE)
+ retval = put_user(tmp & SPI_MODE_MASK,
+ (__u8 __user *)arg);
+ else
+ retval = put_user(tmp & SPI_MODE_MASK,
+ (__u32 __user *)arg);
break;
case SPI_IOC_RD_LSB_FIRST:
retval = put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0,