diff options
-rw-r--r-- | drivers/spi/Kconfig | 1 | ||||
-rw-r--r-- | drivers/spi/spi-bcm2835.c | 392 |
2 files changed, 354 insertions, 39 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 72b059081559..4392db2d28ca 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -77,6 +77,7 @@ config SPI_ATMEL config SPI_BCM2835 tristate "BCM2835 SPI controller" + depends on GPIOLIB depends on ARCH_BCM2835 || COMPILE_TEST depends on GPIOLIB help diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index 37875cf942f7..59705ab23577 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -20,18 +20,22 @@ * GNU General Public License for more details. */ +#include <asm/page.h> #include <linux/clk.h> #include <linux/completion.h> #include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_irq.h> -#include <linux/of_gpio.h> +#include <linux/of_address.h> #include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/of_irq.h> #include <linux/spi/spi.h> /* SPI register offsets */ @@ -69,7 +73,8 @@ #define BCM2835_SPI_CS_CS_01 0x00000001 #define BCM2835_SPI_POLLING_LIMIT_US 30 -#define BCM2835_SPI_TIMEOUT_MS 30000 +#define BCM2835_SPI_POLLING_JIFFIES 2 +#define BCM2835_SPI_DMA_MIN_LENGTH 96 #define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \ | SPI_NO_CS | SPI_3WIRE) @@ -83,6 +88,7 @@ struct bcm2835_spi { u8 *rx_buf; int tx_len; int rx_len; + bool dma_pending; }; static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg) @@ -128,12 +134,15 @@ static void bcm2835_spi_reset_hw(struct spi_master *master) /* Disable SPI interrupts and transfer */ cs &= ~(BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | + BCM2835_SPI_CS_DMAEN | BCM2835_SPI_CS_TA); /* and reset RX/TX FIFOS */ cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX; /* and reset the SPI_HW */ bcm2835_wr(bs, BCM2835_SPI_CS, cs); + /* as well as DLEN */ + bcm2835_wr(bs, BCM2835_SPI_DLEN, 0); } static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id) @@ -157,42 +166,6 @@ static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static int bcm2835_spi_transfer_one_poll(struct spi_master *master, - struct spi_device *spi, - struct spi_transfer *tfr, - u32 cs, - unsigned long xfer_time_us) -{ - struct bcm2835_spi *bs = spi_master_get_devdata(master); - /* set timeout to 1 second of maximum polling */ - unsigned long timeout = jiffies + HZ; - - /* enable HW block without interrupts */ - bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA); - - /* loop until finished the transfer */ - while (bs->rx_len) { - /* read from fifo as much as possible */ - bcm2835_rd_fifo(bs); - /* fill in tx fifo as much as possible */ - bcm2835_wr_fifo(bs); - /* if we still expect some data after the read, - * check for a possible timeout - */ - if (bs->rx_len && time_after(jiffies, timeout)) { - /* Transfer complete - reset SPI HW */ - bcm2835_spi_reset_hw(master); - /* and return timeout */ - return -ETIMEDOUT; - } - } - - /* Transfer complete - reset SPI HW */ - bcm2835_spi_reset_hw(master); - /* and return without waiting for completion */ - return 0; -} - static int bcm2835_spi_transfer_one_irq(struct spi_master *master, struct spi_device *spi, struct spi_transfer *tfr, @@ -229,6 +202,329 @@ static int bcm2835_spi_transfer_one_irq(struct spi_master *master, return 1; } +/* + * DMA support + * + * this implementation has currently a few issues in so far as it does + * not work arrount limitations of the HW. + * + * the main one being that DMA transfers are limited to 16 bit + * (so 0 to 65535 bytes) by the SPI HW due to BCM2835_SPI_DLEN + * + * also we currently assume that the scatter-gather fragments are + * all multiple of 4 (except the last) - otherwise we would need + * to reset the FIFO before subsequent transfers... + * this also means that tx/rx transfers sg's need to be of equal size! + * + * there may be a few more border-cases we may need to address as well + * but unfortunately this would mean splitting up the scatter-gather + * list making it slightly unpractical... + */ +static void bcm2835_spi_dma_done(void *data) +{ + struct spi_master *master = data; + struct bcm2835_spi *bs = spi_master_get_devdata(master); + + /* reset fifo and HW */ + bcm2835_spi_reset_hw(master); + + /* and terminate tx-dma as we do not have an irq for it + * because when the rx dma will terminate and this callback + * is called the tx-dma must have finished - can't get to this + * situation otherwise... + */ + dmaengine_terminate_all(master->dma_tx); + + /* mark as no longer pending */ + bs->dma_pending = 0; + + /* and mark as completed */; + complete(&master->xfer_completion); +} + +static int bcm2835_spi_prepare_sg(struct spi_master *master, + struct spi_transfer *tfr, + bool is_tx) +{ + struct dma_chan *chan; + struct scatterlist *sgl; + unsigned int nents; + enum dma_transfer_direction dir; + unsigned long flags; + + struct dma_async_tx_descriptor *desc; + dma_cookie_t cookie; + + if (is_tx) { + dir = DMA_MEM_TO_DEV; + chan = master->dma_tx; + nents = tfr->tx_sg.nents; + sgl = tfr->tx_sg.sgl; + flags = 0 /* no tx interrupt */; + + } else { + dir = DMA_DEV_TO_MEM; + chan = master->dma_rx; + nents = tfr->rx_sg.nents; + sgl = tfr->rx_sg.sgl; + flags = DMA_PREP_INTERRUPT; + } + /* prepare the channel */ + desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags); + if (!desc) + return -EINVAL; + + /* set callback for rx */ + if (!is_tx) { + desc->callback = bcm2835_spi_dma_done; + desc->callback_param = master; + } + + /* submit it to DMA-engine */ + cookie = dmaengine_submit(desc); + + return dma_submit_error(cookie); +} + +static inline int bcm2835_check_sg_length(struct sg_table *sgt) +{ + int i; + struct scatterlist *sgl; + + /* check that the sg entries are word-sized (except for last) */ + for_each_sg(sgt->sgl, sgl, (int)sgt->nents - 1, i) { + if (sg_dma_len(sgl) % 4) + return -EFAULT; + } + + return 0; +} + +static int bcm2835_spi_transfer_one_dma(struct spi_master *master, + struct spi_device *spi, + struct spi_transfer *tfr, + u32 cs) +{ + struct bcm2835_spi *bs = spi_master_get_devdata(master); + int ret; + + /* check that the scatter gather segments are all a multiple of 4 */ + if (bcm2835_check_sg_length(&tfr->tx_sg) || + bcm2835_check_sg_length(&tfr->rx_sg)) { + dev_warn_once(&spi->dev, + "scatter gather segment length is not a multiple of 4 - falling back to interrupt mode\n"); + return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs); + } + + /* setup tx-DMA */ + ret = bcm2835_spi_prepare_sg(master, tfr, true); + if (ret) + return ret; + + /* start TX early */ + dma_async_issue_pending(master->dma_tx); + + /* mark as dma pending */ + bs->dma_pending = 1; + + /* set the DMA length */ + bcm2835_wr(bs, BCM2835_SPI_DLEN, tfr->len); + + /* start the HW */ + bcm2835_wr(bs, BCM2835_SPI_CS, + cs | BCM2835_SPI_CS_TA | BCM2835_SPI_CS_DMAEN); + + /* setup rx-DMA late - to run transfers while + * mapping of the rx buffers still takes place + * this saves 10us or more. + */ + ret = bcm2835_spi_prepare_sg(master, tfr, false); + if (ret) { + /* need to reset on errors */ + dmaengine_terminate_all(master->dma_tx); + bcm2835_spi_reset_hw(master); + return ret; + } + + /* start rx dma late */ + dma_async_issue_pending(master->dma_rx); + + /* wait for wakeup in framework */ + return 1; +} + +static bool bcm2835_spi_can_dma(struct spi_master *master, + struct spi_device *spi, + struct spi_transfer *tfr) +{ + /* only run for gpio_cs */ + if (!gpio_is_valid(spi->cs_gpio)) + return false; + + /* we start DMA efforts only on bigger transfers */ + if (tfr->len < BCM2835_SPI_DMA_MIN_LENGTH) + return false; + + /* BCM2835_SPI_DLEN has defined a max transfer size as + * 16 bit, so max is 65535 + * we can revisit this by using an alternative transfer + * method - ideally this would get done without any more + * interaction... + */ + if (tfr->len > 65535) { + dev_warn_once(&spi->dev, + "transfer size of %d too big for dma-transfer\n", + tfr->len); + return false; + } + + /* if we run rx/tx_buf with word aligned addresses then we are OK */ + if ((((size_t)tfr->rx_buf & 3) == 0) && + (((size_t)tfr->tx_buf & 3) == 0)) + return true; + + /* otherwise we only allow transfers within the same page + * to avoid wasting time on dma_mapping when it is not practical + */ + if (((size_t)tfr->tx_buf & PAGE_MASK) + tfr->len > PAGE_SIZE) { + dev_warn_once(&spi->dev, + "Unaligned spi tx-transfer bridging page\n"); + return false; + } + if (((size_t)tfr->rx_buf & PAGE_MASK) + tfr->len > PAGE_SIZE) { + dev_warn_once(&spi->dev, + "Unaligned spi tx-transfer bridging page\n"); + return false; + } + + /* return OK */ + return true; +} + +static void bcm2835_dma_release(struct spi_master *master) +{ + if (master->dma_tx) { + dmaengine_terminate_all(master->dma_tx); + dma_release_channel(master->dma_tx); + master->dma_tx = NULL; + } + if (master->dma_rx) { + dmaengine_terminate_all(master->dma_rx); + dma_release_channel(master->dma_rx); + master->dma_rx = NULL; + } +} + +static void bcm2835_dma_init(struct spi_master *master, struct device *dev) +{ + struct dma_slave_config slave_config; + const __be32 *addr; + dma_addr_t dma_reg_base; + int ret; + + /* base address in dma-space */ + addr = of_get_address(master->dev.of_node, 0, NULL, NULL); + if (!addr) { + dev_err(dev, "could not get DMA-register address - not using dma mode\n"); + goto err; + } + dma_reg_base = be32_to_cpup(addr); + + /* get tx/rx dma */ + master->dma_tx = dma_request_slave_channel(dev, "tx"); + if (!master->dma_tx) { + dev_err(dev, "no tx-dma configuration found - not using dma mode\n"); + goto err; + } + master->dma_rx = dma_request_slave_channel(dev, "rx"); + if (!master->dma_rx) { + dev_err(dev, "no rx-dma configuration found - not using dma mode\n"); + goto err_release; + } + + /* configure DMAs */ + slave_config.direction = DMA_MEM_TO_DEV; + slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO); + slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + + ret = dmaengine_slave_config(master->dma_tx, &slave_config); + if (ret) + goto err_config; + + slave_config.direction = DMA_DEV_TO_MEM; + slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO); + slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + + ret = dmaengine_slave_config(master->dma_rx, &slave_config); + if (ret) + goto err_config; + + /* all went well, so set can_dma */ + master->can_dma = bcm2835_spi_can_dma; + master->max_dma_len = 65535; /* limitation by BCM2835_SPI_DLEN */ + /* need to do TX AND RX DMA, so we need dummy buffers */ + master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX; + + return; + +err_config: + dev_err(dev, "issue configuring dma: %d - not using DMA mode\n", + ret); +err_release: + bcm2835_dma_release(master); +err: + return; +} + +static int bcm2835_spi_transfer_one_poll(struct spi_master *master, + struct spi_device *spi, + struct spi_transfer *tfr, + u32 cs, + unsigned long xfer_time_us) +{ + struct bcm2835_spi *bs = spi_master_get_devdata(master); + unsigned long timeout; + + /* enable HW block without interrupts */ + bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA); + + /* fill in the fifo before timeout calculations + * if we are interrupted here, then the data is + * getting transferred by the HW while we are interrupted + */ + bcm2835_wr_fifo(bs); + + /* set the timeout */ + timeout = jiffies + BCM2835_SPI_POLLING_JIFFIES; + + /* loop until finished the transfer */ + while (bs->rx_len) { + /* fill in tx fifo with remaining data */ + bcm2835_wr_fifo(bs); + + /* read from fifo as much as possible */ + bcm2835_rd_fifo(bs); + + /* if there is still data pending to read + * then check the timeout + */ + if (bs->rx_len && time_after(jiffies, timeout)) { + dev_dbg_ratelimited(&spi->dev, + "timeout period reached: jiffies: %lu remaining tx/rx: %d/%d - falling back to interrupt mode\n", + jiffies - timeout, + bs->tx_len, bs->rx_len); + /* fall back to interrupt mode */ + return bcm2835_spi_transfer_one_irq(master, spi, + tfr, cs); + } + } + + /* Transfer complete - reset SPI HW */ + bcm2835_spi_reset_hw(master); + /* and return without waiting for completion */ + return 0; +} + static int bcm2835_spi_transfer_one(struct spi_master *master, struct spi_device *spi, struct spi_transfer *tfr) @@ -288,12 +584,26 @@ static int bcm2835_spi_transfer_one(struct spi_master *master, return bcm2835_spi_transfer_one_poll(master, spi, tfr, cs, xfer_time_us); + /* run in dma mode if conditions are right */ + if (master->can_dma && bcm2835_spi_can_dma(master, spi, tfr)) + return bcm2835_spi_transfer_one_dma(master, spi, tfr, cs); + + /* run in interrupt-mode */ return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs); } static void bcm2835_spi_handle_err(struct spi_master *master, struct spi_message *msg) { + struct bcm2835_spi *bs = spi_master_get_devdata(master); + + /* if an error occurred and we have an active dma, then terminate */ + if (bs->dma_pending) { + dmaengine_terminate_all(master->dma_tx); + dmaengine_terminate_all(master->dma_rx); + bs->dma_pending = 0; + } + /* and reset */ bcm2835_spi_reset_hw(master); } @@ -463,6 +773,8 @@ static int bcm2835_spi_probe(struct platform_device *pdev) goto out_clk_disable; } + bcm2835_dma_init(master, &pdev->dev); + /* initialise the hardware with the default polarities */ bcm2835_wr(bs, BCM2835_SPI_CS, BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); @@ -493,6 +805,8 @@ static int bcm2835_spi_remove(struct platform_device *pdev) clk_disable_unprepare(bs->clk); + bcm2835_dma_release(master); + return 0; } |