diff options
author | Mika Westerberg <mika.westerberg@linux.intel.com> | 2017-10-02 13:38:41 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-10-02 11:24:41 -0700 |
commit | 4ffe722eefcb07c76701f03e0d759fbaecedf79f (patch) | |
tree | 08f0584d8bf1537616f64b8ee73ac40dccce6034 /drivers/thunderbolt | |
parent | 59120e06101db72442acf4c8b364a0c76d8faa68 (diff) |
thunderbolt: Add polling mode for rings
In order to support things like networking over Thunderbolt cable, there
needs to be a way to switch the ring to a mode where it can be polled
with the interrupt masked. We implement such mode so that the caller can
allocate a ring by passing pointer to a function that is then called
when an interrupt is triggered. Completed frames can be fetched using
tb_ring_poll() and the interrupt can be re-enabled when the caller is
finished with polling by using tb_ring_poll_complete().
Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
Reviewed-by: Michael Jamet <michael.jamet@intel.com>
Reviewed-by: Yehezkel Bernat <yehezkel.bernat@intel.com>
Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/thunderbolt')
-rw-r--r-- | drivers/thunderbolt/ctl.c | 2 | ||||
-rw-r--r-- | drivers/thunderbolt/nhi.c | 126 |
2 files changed, 118 insertions, 10 deletions
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c index dd10789e1dbb..d079dbba2c03 100644 --- a/drivers/thunderbolt/ctl.c +++ b/drivers/thunderbolt/ctl.c @@ -619,7 +619,7 @@ struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data) goto err; ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0xffff, - 0xffff); + 0xffff, NULL, NULL); if (!ctl->rx) goto err; diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index 0b3c0640048b..af0a80ddf594 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -252,7 +252,8 @@ invoke_callback: * Do not hold on to it. */ list_del_init(&frame->list); - frame->callback(ring, frame, canceled); + if (frame->callback) + frame->callback(ring, frame, canceled); } } @@ -273,11 +274,106 @@ int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame) } EXPORT_SYMBOL_GPL(__tb_ring_enqueue); +/** + * tb_ring_poll() - Poll one completed frame from the ring + * @ring: Ring to poll + * + * This function can be called when @start_poll callback of the @ring + * has been called. It will read one completed frame from the ring and + * return it to the caller. Returns %NULL if there is no more completed + * frames. + */ +struct ring_frame *tb_ring_poll(struct tb_ring *ring) +{ + struct ring_frame *frame = NULL; + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + if (!ring->running) + goto unlock; + if (ring_empty(ring)) + goto unlock; + + if (ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED) { + frame = list_first_entry(&ring->in_flight, typeof(*frame), + list); + list_del_init(&frame->list); + + if (!ring->is_tx) { + frame->size = ring->descriptors[ring->tail].length; + frame->eof = ring->descriptors[ring->tail].eof; + frame->sof = ring->descriptors[ring->tail].sof; + frame->flags = ring->descriptors[ring->tail].flags; + } + + ring->tail = (ring->tail + 1) % ring->size; + } + +unlock: + spin_unlock_irqrestore(&ring->lock, flags); + return frame; +} +EXPORT_SYMBOL_GPL(tb_ring_poll); + +static void __ring_interrupt_mask(struct tb_ring *ring, bool mask) +{ + int idx = ring_interrupt_index(ring); + int reg = REG_RING_INTERRUPT_BASE + idx / 32 * 4; + int bit = idx % 32; + u32 val; + + val = ioread32(ring->nhi->iobase + reg); + if (mask) + val &= ~BIT(bit); + else + val |= BIT(bit); + iowrite32(val, ring->nhi->iobase + reg); +} + +/* Both @nhi->lock and @ring->lock should be held */ +static void __ring_interrupt(struct tb_ring *ring) +{ + if (!ring->running) + return; + + if (ring->start_poll) { + __ring_interrupt_mask(ring, false); + ring->start_poll(ring->poll_data); + } else { + schedule_work(&ring->work); + } +} + +/** + * tb_ring_poll_complete() - Re-start interrupt for the ring + * @ring: Ring to re-start the interrupt + * + * This will re-start (unmask) the ring interrupt once the user is done + * with polling. + */ +void tb_ring_poll_complete(struct tb_ring *ring) +{ + unsigned long flags; + + spin_lock_irqsave(&ring->nhi->lock, flags); + spin_lock(&ring->lock); + if (ring->start_poll) + __ring_interrupt_mask(ring, false); + spin_unlock(&ring->lock); + spin_unlock_irqrestore(&ring->nhi->lock, flags); +} +EXPORT_SYMBOL_GPL(tb_ring_poll_complete); + static irqreturn_t ring_msix(int irq, void *data) { struct tb_ring *ring = data; - schedule_work(&ring->work); + spin_lock(&ring->nhi->lock); + spin_lock(&ring->lock); + __ring_interrupt(ring); + spin_unlock(&ring->lock); + spin_unlock(&ring->nhi->lock); + return IRQ_HANDLED; } @@ -317,7 +413,9 @@ static void ring_release_msix(struct tb_ring *ring) static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size, bool transmit, unsigned int flags, - u16 sof_mask, u16 eof_mask) + u16 sof_mask, u16 eof_mask, + void (*start_poll)(void *), + void *poll_data) { struct tb_ring *ring = NULL; dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n", @@ -346,6 +444,8 @@ static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size, ring->head = 0; ring->tail = 0; ring->running = false; + ring->start_poll = start_poll; + ring->poll_data = poll_data; ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev, size * sizeof(*ring->descriptors), @@ -399,7 +499,7 @@ err_free_ring: struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size, unsigned int flags) { - return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0); + return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, NULL, NULL); } EXPORT_SYMBOL_GPL(tb_ring_alloc_tx); @@ -411,11 +511,17 @@ EXPORT_SYMBOL_GPL(tb_ring_alloc_tx); * @flags: Flags for the ring * @sof_mask: Mask of PDF values that start a frame * @eof_mask: Mask of PDF values that end a frame + * @start_poll: If not %NULL the ring will call this function when an + * interrupt is triggered and masked, instead of callback + * in each Rx frame. + * @poll_data: Optional data passed to @start_poll */ struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size, - unsigned int flags, u16 sof_mask, u16 eof_mask) + unsigned int flags, u16 sof_mask, u16 eof_mask, + void (*start_poll)(void *), void *poll_data) { - return tb_ring_alloc(nhi, hop, size, false, flags, sof_mask, eof_mask); + return tb_ring_alloc(nhi, hop, size, false, flags, sof_mask, eof_mask, + start_poll, poll_data); } EXPORT_SYMBOL_GPL(tb_ring_alloc_rx); @@ -556,6 +662,7 @@ void tb_ring_free(struct tb_ring *ring) dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n", RING_TYPE(ring), ring->hop); } + spin_unlock_irq(&ring->nhi->lock); ring_release_msix(ring); @@ -572,7 +679,6 @@ void tb_ring_free(struct tb_ring *ring) RING_TYPE(ring), ring->hop); - spin_unlock_irq(&ring->nhi->lock); /** * ring->work can no longer be scheduled (it is scheduled only * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it @@ -682,8 +788,10 @@ static void nhi_interrupt_work(struct work_struct *work) hop); continue; } - /* we do not check ring->running, this is done in ring->work */ - schedule_work(&ring->work); + + spin_lock(&ring->lock); + __ring_interrupt(ring); + spin_unlock(&ring->lock); } spin_unlock_irq(&nhi->lock); } |