diff options
Diffstat (limited to 'drivers/spi/spi.c')
-rw-r--r-- | drivers/spi/spi.c | 154 |
1 files changed, 86 insertions, 68 deletions
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index fc13fa192189..d4da5464dbd0 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1222,11 +1222,6 @@ void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0); } -/* Dummy SG for unidirect transfers */ -static struct scatterlist dummy_sg = { - .page_link = SG_END, -}; - static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) { struct device *tx_dev, *rx_dev; @@ -1265,8 +1260,8 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) attrs); if (ret != 0) return ret; - } else { - xfer->tx_sg.sgl = &dummy_sg; + + xfer->tx_sg_mapped = true; } if (xfer->rx_buf != NULL) { @@ -1280,8 +1275,8 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) return ret; } - } else { - xfer->rx_sg.sgl = &dummy_sg; + + xfer->rx_sg_mapped = true; } } /* No transfer has been mapped, bail out with success */ @@ -1290,7 +1285,6 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) ctlr->cur_rx_dma_dev = rx_dev; ctlr->cur_tx_dma_dev = tx_dev; - ctlr->cur_msg_mapped = true; return 0; } @@ -1301,57 +1295,46 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) struct device *tx_dev = ctlr->cur_tx_dma_dev; struct spi_transfer *xfer; - if (!ctlr->cur_msg_mapped || !ctlr->can_dma) - return 0; - list_for_each_entry(xfer, &msg->transfers, transfer_list) { /* The sync has already been done after each transfer. */ unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC; - if (!ctlr->can_dma(ctlr, msg->spi, xfer)) - continue; + if (xfer->rx_sg_mapped) + spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg, + DMA_FROM_DEVICE, attrs); + xfer->rx_sg_mapped = false; - spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg, - DMA_FROM_DEVICE, attrs); - spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg, - DMA_TO_DEVICE, attrs); + if (xfer->tx_sg_mapped) + spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg, + DMA_TO_DEVICE, attrs); + xfer->tx_sg_mapped = false; } - ctlr->cur_msg_mapped = false; - return 0; } -static void spi_dma_sync_for_device(struct spi_controller *ctlr, struct spi_message *msg, +static void spi_dma_sync_for_device(struct spi_controller *ctlr, struct spi_transfer *xfer) { struct device *rx_dev = ctlr->cur_rx_dma_dev; struct device *tx_dev = ctlr->cur_tx_dma_dev; - if (!ctlr->cur_msg_mapped) - return; - - if (!ctlr->can_dma(ctlr, msg->spi, xfer)) - return; - - dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); - dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); + if (xfer->tx_sg_mapped) + dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); + if (xfer->rx_sg_mapped) + dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); } -static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, struct spi_message *msg, +static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, struct spi_transfer *xfer) { struct device *rx_dev = ctlr->cur_rx_dma_dev; struct device *tx_dev = ctlr->cur_tx_dma_dev; - if (!ctlr->cur_msg_mapped) - return; - - if (!ctlr->can_dma(ctlr, msg->spi, xfer)) - return; - - dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); - dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); + if (xfer->rx_sg_mapped) + dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); + if (xfer->tx_sg_mapped) + dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); } #else /* !CONFIG_HAS_DMA */ static inline int __spi_map_msg(struct spi_controller *ctlr, @@ -1367,13 +1350,11 @@ static inline int __spi_unmap_msg(struct spi_controller *ctlr, } static void spi_dma_sync_for_device(struct spi_controller *ctrl, - struct spi_message *msg, struct spi_transfer *xfer) { } static void spi_dma_sync_for_cpu(struct spi_controller *ctrl, - struct spi_message *msg, struct spi_transfer *xfer) { } @@ -1645,13 +1626,13 @@ static int spi_transfer_one_message(struct spi_controller *ctlr, reinit_completion(&ctlr->xfer_completion); fallback_pio: - spi_dma_sync_for_device(ctlr, msg, xfer); + spi_dma_sync_for_device(ctlr, xfer); ret = ctlr->transfer_one(ctlr, msg->spi, xfer); if (ret < 0) { - spi_dma_sync_for_cpu(ctlr, msg, xfer); + spi_dma_sync_for_cpu(ctlr, xfer); - if (ctlr->cur_msg_mapped && - (xfer->error & SPI_TRANS_FAIL_NO_START)) { + if ((xfer->tx_sg_mapped || xfer->rx_sg_mapped) && + (xfer->error & SPI_TRANS_FAIL_NO_START)) { __spi_unmap_msg(ctlr, msg); ctlr->fallback = true; xfer->error &= ~SPI_TRANS_FAIL_NO_START; @@ -1673,7 +1654,7 @@ fallback_pio: msg->status = ret; } - spi_dma_sync_for_cpu(ctlr, msg, xfer); + spi_dma_sync_for_cpu(ctlr, xfer); } else { if (xfer->len) dev_err(&msg->spi->dev, @@ -2151,7 +2132,8 @@ static void __spi_unoptimize_message(struct spi_message *msg) */ static void spi_maybe_unoptimize_message(struct spi_message *msg) { - if (!msg->pre_optimized && msg->optimized) + if (!msg->pre_optimized && msg->optimized && + !msg->spi->controller->defer_optimize_message) __spi_unoptimize_message(msg); } @@ -2230,11 +2212,8 @@ static int spi_start_queue(struct spi_controller *ctlr) static int spi_stop_queue(struct spi_controller *ctlr) { + unsigned int limit = 500; unsigned long flags; - unsigned limit = 500; - int ret = 0; - - spin_lock_irqsave(&ctlr->queue_lock, flags); /* * This is a bit lame, but is optimized for the common execution path. @@ -2242,20 +2221,18 @@ static int spi_stop_queue(struct spi_controller *ctlr) * execution path (pump_messages) would be required to call wake_up or * friends on every SPI message. Do this instead. */ - while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) { + do { + spin_lock_irqsave(&ctlr->queue_lock, flags); + if (list_empty(&ctlr->queue) && !ctlr->busy) { + ctlr->running = false; + spin_unlock_irqrestore(&ctlr->queue_lock, flags); + return 0; + } spin_unlock_irqrestore(&ctlr->queue_lock, flags); usleep_range(10000, 11000); - spin_lock_irqsave(&ctlr->queue_lock, flags); - } - - if (!list_empty(&ctlr->queue) || ctlr->busy) - ret = -EBUSY; - else - ctlr->running = false; + } while (--limit); - spin_unlock_irqrestore(&ctlr->queue_lock, flags); - - return ret; + return -EBUSY; } static int spi_destroy_queue(struct spi_controller *ctlr) @@ -2594,7 +2571,7 @@ struct spi_device *spi_new_ancillary_device(struct spi_device *spi, { struct spi_controller *ctlr = spi->controller; struct spi_device *ancillary; - int rc = 0; + int rc; /* Alloc an spi_device */ ancillary = spi_alloc_device(ctlr); @@ -2740,7 +2717,7 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) return -ENODEV; if (ctlr) { - if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle) + if (!device_match_acpi_handle(ctlr->dev.parent, parent_handle)) return -ENODEV; } else { struct acpi_device *adev; @@ -2839,7 +2816,7 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr, if (!lookup.max_speed_hz && ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) && - ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) { + device_match_acpi_handle(lookup.ctlr->dev.parent, parent_handle)) { /* Apple does not use _CRS but nested devices for SPI slaves */ acpi_spi_parse_apple_properties(adev, &lookup); } @@ -3925,7 +3902,7 @@ static int spi_set_cs_timing(struct spi_device *spi) int spi_setup(struct spi_device *spi) { unsigned bad_bits, ugly_bits; - int status = 0; + int status; /* * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO @@ -4294,6 +4271,11 @@ static int __spi_optimize_message(struct spi_device *spi, static int spi_maybe_optimize_message(struct spi_device *spi, struct spi_message *msg) { + if (spi->controller->defer_optimize_message) { + msg->spi = spi; + return 0; + } + if (msg->pre_optimized) return 0; @@ -4324,6 +4306,13 @@ int spi_optimize_message(struct spi_device *spi, struct spi_message *msg) { int ret; + /* + * Pre-optimization is not supported and optimization is deferred e.g. + * when using spi-mux. + */ + if (spi->controller->defer_optimize_message) + return 0; + ret = __spi_optimize_message(spi, msg); if (ret) return ret; @@ -4350,6 +4339,9 @@ EXPORT_SYMBOL_GPL(spi_optimize_message); */ void spi_unoptimize_message(struct spi_message *msg) { + if (msg->spi->controller->defer_optimize_message) + return; + __spi_unoptimize_message(msg); msg->pre_optimized = false; } @@ -4382,6 +4374,34 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message) return ctlr->transfer(spi, message); } +static void devm_spi_unoptimize_message(void *msg) +{ + spi_unoptimize_message(msg); +} + +/** + * devm_spi_optimize_message - managed version of spi_optimize_message() + * @dev: the device that manages @msg (usually @spi->dev) + * @spi: the device that will be used for the message + * @msg: the message to optimize + * Return: zero on success, else a negative error code + * + * spi_unoptimize_message() will automatically be called when the device is + * removed. + */ +int devm_spi_optimize_message(struct device *dev, struct spi_device *spi, + struct spi_message *msg) +{ + int ret; + + ret = spi_optimize_message(spi, msg); + if (ret) + return ret; + + return devm_add_action_or_reset(dev, devm_spi_unoptimize_message, msg); +} +EXPORT_SYMBOL_GPL(devm_spi_optimize_message); + /** * spi_async - asynchronous SPI transfer * @spi: device with which data will be exchanged @@ -4432,8 +4452,6 @@ int spi_async(struct spi_device *spi, struct spi_message *message) spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); - spi_maybe_unoptimize_message(message); - return ret; } EXPORT_SYMBOL_GPL(spi_async); |