summaryrefslogtreecommitdiff
path: root/drivers/vhost
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2024-04-30 09:27:48 +1000
committerMichael S. Tsirkin <mst@redhat.com>2024-07-09 08:42:40 -0400
commit7ad4723976672b7d359403a555e0ce5c75529153 (patch)
treefacd5a3970b5ffa5df8f945ef5312ea1006314e9 /drivers/vhost
parentfdba68d2adf8d9ae680e23e8b8ba79330baa45bc (diff)
vhost: move smp_rmb() into vhost_get_avail_idx()
All callers of vhost_get_avail_idx() use smp_rmb() to order the available ring entry read and avail_idx read. Make vhost_get_avail_idx() call smp_rmb() itself whenever the avail_idx is accessed. This way, the callers don't need to worry about the memory barrier. As a side benefit, we also validate the index on all paths now, which will hopefully help prevent/catch earlier future bugs. Note that current code is inconsistent in how the errors are handled. They are treated as an empty ring in some places, but as non-empty ring in other places. This patch doesn't attempt to change the existing behaviour. No functional change intended. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Gavin Shan <gshan@redhat.com> Acked-by: Will Deacon <will@kernel.org> Message-Id: <20240429232748.642356-1-gshan@redhat.com>
Diffstat (limited to 'drivers/vhost')
-rw-r--r--drivers/vhost/vhost.c105
1 files changed, 42 insertions, 63 deletions
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index b60955682474..9ac25d08f473 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1346,10 +1346,36 @@ static void vhost_dev_unlock_vqs(struct vhost_dev *d)
mutex_unlock(&d->vqs[i]->mutex);
}
-static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
- __virtio16 *idx)
+static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq)
{
- return vhost_get_avail(vq, *idx, &vq->avail->idx);
+ __virtio16 idx;
+ int r;
+
+ r = vhost_get_avail(vq, idx, &vq->avail->idx);
+ if (unlikely(r < 0)) {
+ vq_err(vq, "Failed to access available index at %p (%d)\n",
+ &vq->avail->idx, r);
+ return r;
+ }
+
+ /* Check it isn't doing very strange thing with available indexes */
+ vq->avail_idx = vhost16_to_cpu(vq, idx);
+ if (unlikely((u16)(vq->avail_idx - vq->last_avail_idx) > vq->num)) {
+ vq_err(vq, "Invalid available index change from %u to %u",
+ vq->last_avail_idx, vq->avail_idx);
+ return -EINVAL;
+ }
+
+ /* We're done if there is nothing new */
+ if (vq->avail_idx == vq->last_avail_idx)
+ return 0;
+
+ /*
+ * We updated vq->avail_idx so we need a memory barrier between
+ * the index read above and the caller reading avail ring entries.
+ */
+ smp_rmb();
+ return 1;
}
static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
@@ -2554,38 +2580,17 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
{
struct vring_desc desc;
unsigned int i, head, found = 0;
- u16 last_avail_idx;
- __virtio16 avail_idx;
+ u16 last_avail_idx = vq->last_avail_idx;
__virtio16 ring_head;
int ret, access;
- /* Check it isn't doing very strange things with descriptor numbers. */
- last_avail_idx = vq->last_avail_idx;
-
if (vq->avail_idx == vq->last_avail_idx) {
- if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
- vq_err(vq, "Failed to access avail idx at %p\n",
- &vq->avail->idx);
- return -EFAULT;
- }
- vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
-
- if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
- vq_err(vq, "Guest moved avail index from %u to %u",
- last_avail_idx, vq->avail_idx);
- return -EFAULT;
- }
+ ret = vhost_get_avail_idx(vq);
+ if (unlikely(ret < 0))
+ return ret;
- /* If there's nothing new since last we looked, return
- * invalid.
- */
- if (vq->avail_idx == last_avail_idx)
+ if (!ret)
return vq->num;
-
- /* Only get avail ring entries after they have been
- * exposed by guest.
- */
- smp_rmb();
}
/* Grab the next descriptor number they're advertising, and increment
@@ -2846,35 +2851,21 @@ EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
/* return true if we're sure that avaiable ring is empty */
bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
- __virtio16 avail_idx;
int r;
if (vq->avail_idx != vq->last_avail_idx)
return false;
- r = vhost_get_avail_idx(vq, &avail_idx);
- if (unlikely(r))
- return false;
-
- vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
- if (vq->avail_idx != vq->last_avail_idx) {
- /* Since we have updated avail_idx, the following
- * call to vhost_get_vq_desc() will read available
- * ring entries. Make sure that read happens after
- * the avail_idx read.
- */
- smp_rmb();
- return false;
- }
+ r = vhost_get_avail_idx(vq);
- return true;
+ /* Note: we treat error as non-empty here */
+ return r == 0;
}
EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
/* OK, now we need to know about added descriptors. */
bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
- __virtio16 avail_idx;
int r;
if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
@@ -2898,25 +2889,13 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
/* They could have slipped one in as we were doing that: make
* sure it's written, then check again. */
smp_mb();
- r = vhost_get_avail_idx(vq, &avail_idx);
- if (r) {
- vq_err(vq, "Failed to check avail idx at %p: %d\n",
- &vq->avail->idx, r);
- return false;
- }
- vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
- if (vq->avail_idx != vq->last_avail_idx) {
- /* Since we have updated avail_idx, the following
- * call to vhost_get_vq_desc() will read available
- * ring entries. Make sure that read happens after
- * the avail_idx read.
- */
- smp_rmb();
- return true;
- }
+ r = vhost_get_avail_idx(vq);
+ /* Note: we treat error as empty here */
+ if (unlikely(r < 0))
+ return false;
- return false;
+ return r;
}
EXPORT_SYMBOL_GPL(vhost_enable_notify);