diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2023-02-03 06:58:55 +0100 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2023-02-03 06:58:55 +0100 |
commit | cbce3de28c37fdd7531526d8dbb0ae2187667dfe (patch) | |
tree | d855f035af806dd91184812e6361a16d6455c2d5 | |
parent | d45fed4ff61c48890e55a590bdd7a9fb22457be8 (diff) | |
parent | 3c54a3ff0a2cdcd902482a62fef813f1d46e5eaf (diff) |
Merge tag 'mhi-for-v6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/mani/mhi into char-misc-next
Manivannan writes:
MHI Host
========
- Fixed the module description
MHI Endpoint
============
- Powered down the MHI EP stack completely during MHI RESET instead of just
doing transfer abort as the MMIO register access will be prohibited
afterwards. EP stack will also be powered on again in case the RESET
happened due to SYS_ERR.
- Added a sanity check before processing the command ring to make sure that
the channel is supported by the controller.
- Added a check to make sure the xfer_cb is available for the channel
before trying to send the error status to the client drivers. This
helps in avoiding a potential null pointer dereference.
- Fixed the debug log of RESET command
- Modified the channel ring handler lock to protect the whole handler
instead of locking it partially. This helps in avoiding a race that may
happen if a channel STOP/RESET command is issued by the host parallely.
- Saved the MHI state locally during suspend and resume. Otherwise, the MHI
EP stack will not be aware of a channel that got disabled and may try to
access it later.
- Changed the MHI state_lock to mutex instead of spinlock. This helps in
avoiding the sleeping in atomic bug reported by Dan Carpenter and also
allows the lock to be held throughout the state change.
- Fixed the off by one error while doing the MHI channel check during
command ring processing.
MHI Generic
===========
- Updated the MHI toplevel Makefile to use Kconfig flags for building the
host and endpoint sub-directories conditionally.
* tag 'mhi-for-v6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/mani/mhi:
bus: mhi: ep: Fix off by one in mhi_ep_process_cmd_ring()
bus: mhi: ep: Change state_lock to mutex
bus: mhi: ep: Save channel state locally during suspend and resume
bus: mhi: ep: Move chan->lock to the start of processing queued ch ring
bus: mhi: ep: Fix the debug message for MHI_PKT_TYPE_RESET_CHAN_CMD cmd
bus: mhi: ep: Only send -ENOTCONN status if client driver is available
bus: mhi: ep: Check if the channel is supported by the controller
bus: mhi: ep: Power up/down MHI stack during MHI RESET
bus: mhi: host: Update mhi driver description
bus: mhi: Update Makefile to used Kconfig flags
-rw-r--r-- | drivers/bus/mhi/Makefile | 4 | ||||
-rw-r--r-- | drivers/bus/mhi/ep/main.c | 85 | ||||
-rw-r--r-- | drivers/bus/mhi/ep/sm.c | 42 | ||||
-rw-r--r-- | drivers/bus/mhi/host/init.c | 2 | ||||
-rw-r--r-- | include/linux/mhi_ep.h | 4 |
5 files changed, 75 insertions, 62 deletions
diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile index 46981331b38f..354204b0ef3a 100644 --- a/drivers/bus/mhi/Makefile +++ b/drivers/bus/mhi/Makefile @@ -1,5 +1,5 @@ # Host MHI stack -obj-y += host/ +obj-$(CONFIG_MHI_BUS) += host/ # Endpoint MHI stack -obj-y += ep/ +obj-$(CONFIG_MHI_BUS_EP) += ep/ diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 1dc8a3557a46..dffe03658ff9 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -123,6 +123,13 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele int ret; ch_id = MHI_TRE_GET_CMD_CHID(el); + + /* Check if the channel is supported by the controller */ + if ((ch_id >= mhi_cntrl->max_chan) || !mhi_cntrl->mhi_chan[ch_id].name) { + dev_err(dev, "Channel (%u) not supported!\n", ch_id); + return -ENODEV; + } + mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring; @@ -196,9 +203,11 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id); /* Send channel disconnect status to client drivers */ - result.transaction_status = -ENOTCONN; - result.bytes_xferd = 0; - mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + if (mhi_chan->xfer_cb) { + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } /* Set channel state to STOP */ mhi_chan->state = MHI_CH_STATE_STOP; @@ -217,7 +226,7 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele mutex_unlock(&mhi_chan->lock); break; case MHI_PKT_TYPE_RESET_CHAN_CMD: - dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id); + dev_dbg(dev, "Received RESET command for channel (%u)\n", ch_id); if (!ch_ring->started) { dev_err(dev, "Channel (%u) not opened\n", ch_id); return -ENODEV; @@ -228,9 +237,11 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele mhi_ep_ring_reset(mhi_cntrl, ch_ring); /* Send channel disconnect status to client driver */ - result.transaction_status = -ENOTCONN; - result.bytes_xferd = 0; - mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + if (mhi_chan->xfer_cb) { + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } /* Set channel state to DISABLED */ mhi_chan->state = MHI_CH_STATE_DISABLED; @@ -719,24 +730,37 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work) list_del(&itr->node); ring = itr->ring; + chan = &mhi_cntrl->mhi_chan[ring->ch_id]; + mutex_lock(&chan->lock); + + /* + * The ring could've stopped while we waited to grab the (chan->lock), so do + * a sanity check before going further. + */ + if (!ring->started) { + mutex_unlock(&chan->lock); + kfree(itr); + continue; + } + /* Update the write offset for the ring */ ret = mhi_ep_update_wr_offset(ring); if (ret) { dev_err(dev, "Error updating write offset for ring\n"); + mutex_unlock(&chan->lock); kfree(itr); continue; } /* Sanity check to make sure there are elements in the ring */ if (ring->rd_offset == ring->wr_offset) { + mutex_unlock(&chan->lock); kfree(itr); continue; } el = &ring->ring_cache[ring->rd_offset]; - chan = &mhi_cntrl->mhi_chan[ring->ch_id]; - mutex_lock(&chan->lock); dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id); ret = mhi_ep_process_ch_ring(ring, el); if (ret) { @@ -973,44 +997,25 @@ static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl) static void mhi_ep_reset_worker(struct work_struct *work) { struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work); - struct device *dev = &mhi_cntrl->mhi_dev->dev; enum mhi_state cur_state; - int ret; - mhi_ep_abort_transfer(mhi_cntrl); + mhi_ep_power_down(mhi_cntrl); + + mutex_lock(&mhi_cntrl->state_lock); - spin_lock_bh(&mhi_cntrl->state_lock); /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */ mhi_ep_mmio_reset(mhi_cntrl); cur_state = mhi_cntrl->mhi_state; - spin_unlock_bh(&mhi_cntrl->state_lock); /* * Only proceed further if the reset is due to SYS_ERR. The host will * issue reset during shutdown also and we don't need to do re-init in * that case. */ - if (cur_state == MHI_STATE_SYS_ERR) { - mhi_ep_mmio_init(mhi_cntrl); - - /* Set AMSS EE before signaling ready state */ - mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); - - /* All set, notify the host that we are ready */ - ret = mhi_ep_set_ready_state(mhi_cntrl); - if (ret) - return; - - dev_dbg(dev, "READY state notification sent to the host\n"); - - ret = mhi_ep_enable(mhi_cntrl); - if (ret) { - dev_err(dev, "Failed to enable MHI endpoint: %d\n", ret); - return; - } + if (cur_state == MHI_STATE_SYS_ERR) + mhi_ep_power_up(mhi_cntrl); - enable_irq(mhi_cntrl->irq); - } + mutex_unlock(&mhi_cntrl->state_lock); } /* @@ -1089,11 +1094,11 @@ EXPORT_SYMBOL_GPL(mhi_ep_power_up); void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl) { - if (mhi_cntrl->enabled) + if (mhi_cntrl->enabled) { mhi_ep_abort_transfer(mhi_cntrl); - - kfree(mhi_cntrl->mhi_event); - disable_irq(mhi_cntrl->irq); + kfree(mhi_cntrl->mhi_event); + disable_irq(mhi_cntrl->irq); + } } EXPORT_SYMBOL_GPL(mhi_ep_power_down); @@ -1119,6 +1124,7 @@ void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl) dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n"); /* Set channel state to SUSPENDED */ + mhi_chan->state = MHI_CH_STATE_SUSPENDED; tmp &= ~CHAN_CTX_CHSTATE_MASK; tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED); mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); @@ -1148,6 +1154,7 @@ void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl) dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n"); /* Set channel state to RUNNING */ + mhi_chan->state = MHI_CH_STATE_RUNNING; tmp &= ~CHAN_CTX_CHSTATE_MASK; tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); @@ -1381,8 +1388,8 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, INIT_LIST_HEAD(&mhi_cntrl->st_transition_list); INIT_LIST_HEAD(&mhi_cntrl->ch_db_list); - spin_lock_init(&mhi_cntrl->state_lock); spin_lock_init(&mhi_cntrl->list_lock); + mutex_init(&mhi_cntrl->state_lock); mutex_init(&mhi_cntrl->event_lock); /* Set MHI version and AMSS EE before enumeration */ diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c index 3655c19e23c7..fd200b2ac0bb 100644 --- a/drivers/bus/mhi/ep/sm.c +++ b/drivers/bus/mhi/ep/sm.c @@ -63,24 +63,23 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl) int ret; /* If MHI is in M3, resume suspended channels */ - spin_lock_bh(&mhi_cntrl->state_lock); + mutex_lock(&mhi_cntrl->state_lock); + old_state = mhi_cntrl->mhi_state; if (old_state == MHI_STATE_M3) mhi_ep_resume_channels(mhi_cntrl); ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0); - spin_unlock_bh(&mhi_cntrl->state_lock); - if (ret) { mhi_ep_handle_syserr(mhi_cntrl); - return ret; + goto err_unlock; } /* Signal host that the device moved to M0 */ ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0); if (ret) { dev_err(dev, "Failed sending M0 state change event\n"); - return ret; + goto err_unlock; } if (old_state == MHI_STATE_READY) { @@ -88,11 +87,14 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl) ret = mhi_ep_send_ee_event(mhi_cntrl, MHI_EE_AMSS); if (ret) { dev_err(dev, "Failed sending AMSS EE event\n"); - return ret; + goto err_unlock; } } - return 0; +err_unlock: + mutex_unlock(&mhi_cntrl->state_lock); + + return ret; } int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl) @@ -100,13 +102,12 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl) struct device *dev = &mhi_cntrl->mhi_dev->dev; int ret; - spin_lock_bh(&mhi_cntrl->state_lock); - ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3); - spin_unlock_bh(&mhi_cntrl->state_lock); + mutex_lock(&mhi_cntrl->state_lock); + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3); if (ret) { mhi_ep_handle_syserr(mhi_cntrl); - return ret; + goto err_unlock; } mhi_ep_suspend_channels(mhi_cntrl); @@ -115,10 +116,13 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl) ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3); if (ret) { dev_err(dev, "Failed sending M3 state change event\n"); - return ret; + goto err_unlock; } - return 0; +err_unlock: + mutex_unlock(&mhi_cntrl->state_lock); + + return ret; } int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl) @@ -127,22 +131,24 @@ int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl) enum mhi_state mhi_state; int ret, is_ready; - spin_lock_bh(&mhi_cntrl->state_lock); + mutex_lock(&mhi_cntrl->state_lock); + /* Ensure that the MHISTATUS is set to RESET by host */ mhi_state = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK); is_ready = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK); if (mhi_state != MHI_STATE_RESET || is_ready) { dev_err(dev, "READY state transition failed. MHI host not in RESET state\n"); - spin_unlock_bh(&mhi_cntrl->state_lock); - return -EIO; + ret = -EIO; + goto err_unlock; } ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY); - spin_unlock_bh(&mhi_cntrl->state_lock); - if (ret) mhi_ep_handle_syserr(mhi_cntrl); +err_unlock: + mutex_unlock(&mhi_cntrl->state_lock); + return ret; } diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c index bf672de35131..7307335c4fd1 100644 --- a/drivers/bus/mhi/host/init.c +++ b/drivers/bus/mhi/host/init.c @@ -1449,4 +1449,4 @@ postcore_initcall(mhi_init); module_exit(mhi_exit); MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("MHI Host Interface"); +MODULE_DESCRIPTION("Modem Host Interface"); diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 478aece17046..f198a8ac7ee7 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -70,8 +70,8 @@ struct mhi_ep_db_info { * @cmd_ctx_cache_phys: Physical address of the host command context cache * @chdb: Array of channel doorbell interrupt info * @event_lock: Lock for protecting event rings - * @list_lock: Lock for protecting state transition and channel doorbell lists * @state_lock: Lock for protecting state transitions + * @list_lock: Lock for protecting state transition and channel doorbell lists * @st_transition_list: List of state transitions * @ch_db_list: List of queued channel doorbells * @wq: Dedicated workqueue for handling rings and state changes @@ -117,8 +117,8 @@ struct mhi_ep_cntrl { struct mhi_ep_db_info chdb[4]; struct mutex event_lock; + struct mutex state_lock; spinlock_t list_lock; - spinlock_t state_lock; struct list_head st_transition_list; struct list_head ch_db_list; |