diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-05-23 12:04:36 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-05-23 12:04:36 -0700 |
commit | 2ef32ad2241340565c35baf77fc95053c84eeeb0 (patch) | |
tree | 672ed232affb09035e73e1ba3411be7f71d3289a | |
parent | c760b3725e52403dc1b28644fb09c47a83cacea6 (diff) | |
parent | c8fae27d141a32a1624d0d0d5419d94252824498 (diff) |
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Pull virtio updates from Michael Tsirkin:
"Several new features here:
- virtio-net is finally supported in vduse
- virtio (balloon and mem) interaction with suspend is improved
- vhost-scsi now handles signals better/faster
And fixes, cleanups all over the place"
* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (48 commits)
virtio-pci: Check if is_avq is NULL
virtio: delete vq in vp_find_vqs_msix() when request_irq() fails
MAINTAINERS: add Eugenio Pérez as reviewer
vhost-vdpa: Remove usage of the deprecated ida_simple_xx() API
vp_vdpa: don't allocate unused msix vectors
sound: virtio: drop owner assignment
fuse: virtio: drop owner assignment
scsi: virtio: drop owner assignment
rpmsg: virtio: drop owner assignment
nvdimm: virtio_pmem: drop owner assignment
wifi: mac80211_hwsim: drop owner assignment
vsock/virtio: drop owner assignment
net: 9p: virtio: drop owner assignment
net: virtio: drop owner assignment
net: caif: virtio: drop owner assignment
misc: nsm: drop owner assignment
iommu: virtio: drop owner assignment
drm/virtio: drop owner assignment
gpio: virtio: drop owner assignment
firmware: arm_scmi: virtio: drop owner assignment
...
40 files changed, 353 insertions, 175 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 7af431daf77b..27367ad339ea 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10654,8 +10654,10 @@ F: include/net/nl802154.h F: net/ieee802154/ F: net/mac802154/ -IFCVF VIRTIO DATA PATH ACCELERATOR -R: Zhu Lingshan <lingshan.zhu@intel.com> +Intel VIRTIO DATA PATH ACCELERATOR +M: Zhu Lingshan <lingshan.zhu@intel.com> +L: virtualization@lists.linux.dev +S: Supported F: drivers/vdpa/ifcvf/ IFE PROTOCOL @@ -23746,6 +23748,7 @@ M: "Michael S. Tsirkin" <mst@redhat.com> M: Jason Wang <jasowang@redhat.com> R: Paolo Bonzini <pbonzini@redhat.com> R: Stefan Hajnoczi <stefanha@redhat.com> +R: Eugenio Pérez <eperezma@redhat.com> L: virtualization@lists.linux.dev S: Maintained F: drivers/block/virtio_blk.c @@ -23764,6 +23767,7 @@ VIRTIO CORE AND NET DRIVERS M: "Michael S. Tsirkin" <mst@redhat.com> M: Jason Wang <jasowang@redhat.com> R: Xuan Zhuo <xuanzhuo@linux.alibaba.com> +R: Eugenio Pérez <eperezma@redhat.com> L: virtualization@lists.linux.dev S: Maintained F: Documentation/ABI/testing/sysfs-bus-vdpa @@ -23805,6 +23809,7 @@ VIRTIO FILE SYSTEM M: Vivek Goyal <vgoyal@redhat.com> M: Stefan Hajnoczi <stefanha@redhat.com> M: Miklos Szeredi <miklos@szeredi.hu> +R: Eugenio Pérez <eperezma@redhat.com> L: virtualization@lists.linux.dev L: linux-fsdevel@vger.kernel.org S: Supported @@ -23838,6 +23843,7 @@ F: include/uapi/linux/virtio_gpu.h VIRTIO HOST (VHOST) M: "Michael S. Tsirkin" <mst@redhat.com> M: Jason Wang <jasowang@redhat.com> +R: Eugenio Pérez <eperezma@redhat.com> L: kvm@vger.kernel.org L: virtualization@lists.linux.dev L: netdev@vger.kernel.org diff --git a/arch/um/drivers/virt-pci.c b/arch/um/drivers/virt-pci.c index 97a37c062997..7cb503469bbd 100644 --- a/arch/um/drivers/virt-pci.c +++ b/arch/um/drivers/virt-pci.c @@ -752,7 +752,6 @@ MODULE_DEVICE_TABLE(virtio, id_table); static struct virtio_driver um_pci_virtio_driver = { .driver.name = "virtio-pci", - .driver.owner = THIS_MODULE, .id_table = id_table, .probe = um_pci_virtio_probe, .remove = um_pci_virtio_remove, diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index c1af0a7d56c8..2351f411fa46 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -1658,7 +1658,6 @@ static struct virtio_driver virtio_blk = { .feature_table_legacy = features_legacy, .feature_table_size_legacy = ARRAY_SIZE(features_legacy), .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtblk_probe, .remove = virtblk_remove, diff --git a/drivers/bluetooth/virtio_bt.c b/drivers/bluetooth/virtio_bt.c index 18208e152a36..40bd83825c29 100644 --- a/drivers/bluetooth/virtio_bt.c +++ b/drivers/bluetooth/virtio_bt.c @@ -415,7 +415,6 @@ static const unsigned int virtbt_features[] = { static struct virtio_driver virtbt_driver = { .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .feature_table = virtbt_features, .feature_table_size = ARRAY_SIZE(virtbt_features), .id_table = virtbt_table, diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index 7a4b45393acb..dd998f4fe4f2 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -245,7 +245,6 @@ static const struct virtio_device_id id_table[] = { static struct virtio_driver virtio_rng_driver = { .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtrng_probe, .remove = virtrng_remove, diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 035f89f1a251..d9ee2dbc7eab 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -2173,7 +2173,6 @@ static struct virtio_driver virtio_console = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtcons_probe, .remove = virtcons_remove, @@ -2188,7 +2187,6 @@ static struct virtio_driver virtio_rproc_serial = { .feature_table = rproc_serial_features, .feature_table_size = ARRAY_SIZE(rproc_serial_features), .driver.name = "virtio_rproc_serial", - .driver.owner = THIS_MODULE, .id_table = rproc_serial_id_table, .probe = virtcons_probe, .remove = virtcons_remove, diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c index 6a67d70e7f1c..30cd040aa03b 100644 --- a/drivers/crypto/virtio/virtio_crypto_core.c +++ b/drivers/crypto/virtio/virtio_crypto_core.c @@ -581,7 +581,6 @@ static const struct virtio_device_id id_table[] = { static struct virtio_driver virtio_crypto_driver = { .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .id_table = id_table, diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c index d68c01cb7aa0..4892058445ce 100644 --- a/drivers/firmware/arm_scmi/virtio.c +++ b/drivers/firmware/arm_scmi/virtio.c @@ -908,7 +908,6 @@ static const struct virtio_device_id id_table[] = { static struct virtio_driver virtio_scmi_driver = { .driver.name = "scmi-virtio", - .driver.owner = THIS_MODULE, .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .id_table = id_table, diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c index fcc5e8c08973..9fae8e396c58 100644 --- a/drivers/gpio/gpio-virtio.c +++ b/drivers/gpio/gpio-virtio.c @@ -653,7 +653,6 @@ static struct virtio_driver virtio_gpio_driver = { .remove = virtio_gpio_remove, .driver = { .name = KBUILD_MODNAME, - .owner = THIS_MODULE, }, }; module_virtio_driver(virtio_gpio_driver); diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 9539aa28937f..188e126383c2 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -154,7 +154,6 @@ static struct virtio_driver virtio_gpu_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtio_gpu_probe, .remove = virtio_gpu_remove, diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 8e776f6c6e35..36d680826b57 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -1251,7 +1251,6 @@ MODULE_DEVICE_TABLE(virtio, id_table); static struct virtio_driver virtio_iommu_drv = { .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .feature_table = features, .feature_table_size = ARRAY_SIZE(features), diff --git a/drivers/misc/nsm.c b/drivers/misc/nsm.c index 0eaa3b4484bd..ef7b32742340 100644 --- a/drivers/misc/nsm.c +++ b/drivers/misc/nsm.c @@ -494,7 +494,6 @@ static struct virtio_driver virtio_nsm_driver = { .feature_table_legacy = 0, .feature_table_size_legacy = 0, .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .probe = nsm_device_probe, .remove = nsm_device_remove, diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c index 0b0f234b0b50..99d984851fef 100644 --- a/drivers/net/caif/caif_virtio.c +++ b/drivers/net/caif/caif_virtio.c @@ -782,7 +782,6 @@ static struct virtio_driver caif_virtio_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .probe = cfv_probe, .remove = cfv_remove, diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 4e1a0fc0d555..4a802c0ea2cb 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -6039,7 +6039,6 @@ static struct virtio_driver virtio_net_driver = { .feature_table_legacy = features_legacy, .feature_table_size_legacy = ARRAY_SIZE(features_legacy), .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .validate = virtnet_validate, .probe = virtnet_probe, diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c index b5afaec61827..c5d896994e70 100644 --- a/drivers/net/wireless/virtual/mac80211_hwsim.c +++ b/drivers/net/wireless/virtual/mac80211_hwsim.c @@ -6678,7 +6678,6 @@ MODULE_DEVICE_TABLE(virtio, id_table); static struct virtio_driver virtio_hwsim = { .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .probe = hwsim_virtio_probe, .remove = hwsim_virtio_remove, diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c index 4ceced5cefcf..c9b97aeabf85 100644 --- a/drivers/nvdimm/virtio_pmem.c +++ b/drivers/nvdimm/virtio_pmem.c @@ -151,7 +151,6 @@ static struct virtio_driver virtio_pmem_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .validate = virtio_pmem_validate, .probe = virtio_pmem_probe, diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c index 1062939c3264..e9e8c1f7829f 100644 --- a/drivers/rpmsg/virtio_rpmsg_bus.c +++ b/drivers/rpmsg/virtio_rpmsg_bus.c @@ -1053,7 +1053,6 @@ static struct virtio_driver virtio_ipc_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .probe = rpmsg_probe, .remove = rpmsg_remove, diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 617eb892f4ad..89ca26945721 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -1052,7 +1052,6 @@ static struct virtio_driver virtio_scsi_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtscsi_probe, #ifdef CONFIG_PM_SLEEP diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index 6cb96a1e8b7d..8d391947eb8d 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -98,7 +98,7 @@ static ssize_t driver_override_show(struct device *dev, ssize_t len; device_lock(dev); - len = snprintf(buf, PAGE_SIZE, "%s\n", vdev->driver_override); + len = sysfs_emit(buf, "%s\n", vdev->driver_override); device_unlock(dev); return len; diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index 73c89701fc9d..7ae99691efdf 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -8,6 +8,7 @@ * */ +#include "linux/virtio_net.h" #include <linux/init.h> #include <linux/module.h> #include <linux/cdev.h> @@ -28,6 +29,7 @@ #include <uapi/linux/virtio_config.h> #include <uapi/linux/virtio_ids.h> #include <uapi/linux/virtio_blk.h> +#include <uapi/linux/virtio_ring.h> #include <linux/mod_devicetable.h> #include "iova_domain.h" @@ -141,6 +143,7 @@ static struct workqueue_struct *vduse_irq_bound_wq; static u32 allowed_device_id[] = { VIRTIO_ID_BLOCK, + VIRTIO_ID_NET, }; static inline struct vduse_dev *vdpa_to_vduse(struct vdpa_device *vdpa) @@ -1705,13 +1708,21 @@ static bool device_is_allowed(u32 device_id) return false; } -static bool features_is_valid(u64 features) +static bool features_is_valid(struct vduse_dev_config *config) { - if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) + if (!(config->features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM))) return false; /* Now we only support read-only configuration space */ - if (features & (1ULL << VIRTIO_BLK_F_CONFIG_WCE)) + if ((config->device_id == VIRTIO_ID_BLOCK) && + (config->features & BIT_ULL(VIRTIO_BLK_F_CONFIG_WCE))) + return false; + else if ((config->device_id == VIRTIO_ID_NET) && + (config->features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) + return false; + + if ((config->device_id == VIRTIO_ID_NET) && + !(config->features & BIT_ULL(VIRTIO_F_VERSION_1))) return false; return true; @@ -1738,7 +1749,7 @@ static bool vduse_validate_config(struct vduse_dev_config *config) if (!device_is_allowed(config->device_id)) return false; - if (!features_is_valid(config->features)) + if (!features_is_valid(config)) return false; return true; @@ -1821,6 +1832,10 @@ static int vduse_create_dev(struct vduse_dev_config *config, int ret; struct vduse_dev *dev; + ret = -EPERM; + if ((config->device_id == VIRTIO_ID_NET) && !capable(CAP_NET_ADMIN)) + goto err; + ret = -EEXIST; if (vduse_find_dev(config->name)) goto err; @@ -2064,6 +2079,7 @@ static const struct vdpa_mgmtdev_ops vdpa_dev_mgmtdev_ops = { static struct virtio_device_id id_table[] = { { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID }, + { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, { 0 }, }; diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c index df5f4a3bccb5..ac4ab22f7d8b 100644 --- a/drivers/vdpa/virtio_pci/vp_vdpa.c +++ b/drivers/vdpa/virtio_pci/vp_vdpa.c @@ -160,7 +160,13 @@ static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa) struct pci_dev *pdev = mdev->pci_dev; int i, ret, irq; int queues = vp_vdpa->queues; - int vectors = queues + 1; + int vectors = 1; + int msix_vec = 0; + + for (i = 0; i < queues; i++) { + if (vp_vdpa->vring[i].cb.callback) + vectors++; + } ret = pci_alloc_irq_vectors(pdev, vectors, vectors, PCI_IRQ_MSIX); if (ret != vectors) { @@ -173,9 +179,12 @@ static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa) vp_vdpa->vectors = vectors; for (i = 0; i < queues; i++) { + if (!vp_vdpa->vring[i].cb.callback) + continue; + snprintf(vp_vdpa->vring[i].msix_name, VP_VDPA_NAME_SIZE, "vp-vdpa[%s]-%d\n", pci_name(pdev), i); - irq = pci_irq_vector(pdev, i); + irq = pci_irq_vector(pdev, msix_vec); ret = devm_request_irq(&pdev->dev, irq, vp_vdpa_vq_handler, 0, vp_vdpa->vring[i].msix_name, @@ -185,21 +194,22 @@ static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa) "vp_vdpa: fail to request irq for vq %d\n", i); goto err; } - vp_modern_queue_vector(mdev, i, i); + vp_modern_queue_vector(mdev, i, msix_vec); vp_vdpa->vring[i].irq = irq; + msix_vec++; } snprintf(vp_vdpa->msix_name, VP_VDPA_NAME_SIZE, "vp-vdpa[%s]-config\n", pci_name(pdev)); - irq = pci_irq_vector(pdev, queues); + irq = pci_irq_vector(pdev, msix_vec); ret = devm_request_irq(&pdev->dev, irq, vp_vdpa_config_handler, 0, vp_vdpa->msix_name, vp_vdpa); if (ret) { dev_err(&pdev->dev, - "vp_vdpa: fail to request irq for vq %d\n", i); + "vp_vdpa: fail to request irq for config: %d\n", ret); goto err; } - vp_modern_config_vector(mdev, queues); + vp_modern_config_vector(mdev, msix_vec); vp_vdpa->config_irq = irq; return 0; @@ -216,7 +226,10 @@ static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status) if (status & VIRTIO_CONFIG_S_DRIVER_OK && !(s & VIRTIO_CONFIG_S_DRIVER_OK)) { - vp_vdpa_request_irq(vp_vdpa); + if (vp_vdpa_request_irq(vp_vdpa)) { + WARN_ON(1); + return; + } } vp_modern_set_status(mdev, status); diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 282aac45c690..006ffacf1c56 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -210,6 +210,7 @@ struct vhost_scsi { struct vhost_scsi_tmf { struct vhost_work vwork; + struct work_struct flush_work; struct vhost_scsi *vhost; struct vhost_scsi_virtqueue *svq; @@ -358,14 +359,23 @@ static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf) vhost_scsi_put_inflight(inflight); } +static void vhost_scsi_drop_cmds(struct vhost_scsi_virtqueue *svq) +{ + struct vhost_scsi_cmd *cmd, *t; + struct llist_node *llnode; + + llnode = llist_del_all(&svq->completion_list); + llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) + vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd); +} + static void vhost_scsi_release_cmd(struct se_cmd *se_cmd) { if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) { struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf, se_cmd); - struct vhost_virtqueue *vq = &tmf->svq->vq; - vhost_vq_work_queue(vq, &tmf->vwork); + schedule_work(&tmf->flush_work); } else { struct vhost_scsi_cmd *cmd = container_of(se_cmd, struct vhost_scsi_cmd, tvc_se_cmd); @@ -373,7 +383,8 @@ static void vhost_scsi_release_cmd(struct se_cmd *se_cmd) struct vhost_scsi_virtqueue, vq); llist_add(&cmd->tvc_completion_list, &svq->completion_list); - vhost_vq_work_queue(&svq->vq, &svq->completion_work); + if (!vhost_vq_work_queue(&svq->vq, &svq->completion_work)) + vhost_scsi_drop_cmds(svq); } } @@ -497,10 +508,8 @@ again: vq_err(vq, "Faulted on vhost_scsi_send_event\n"); } -static void vhost_scsi_evt_work(struct vhost_work *work) +static void vhost_scsi_complete_events(struct vhost_scsi *vs, bool drop) { - struct vhost_scsi *vs = container_of(work, struct vhost_scsi, - vs_event_work); struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; struct vhost_scsi_evt *evt, *t; struct llist_node *llnode; @@ -508,12 +517,20 @@ static void vhost_scsi_evt_work(struct vhost_work *work) mutex_lock(&vq->mutex); llnode = llist_del_all(&vs->vs_event_list); llist_for_each_entry_safe(evt, t, llnode, list) { - vhost_scsi_do_evt_work(vs, evt); + if (!drop) + vhost_scsi_do_evt_work(vs, evt); vhost_scsi_free_evt(vs, evt); } mutex_unlock(&vq->mutex); } +static void vhost_scsi_evt_work(struct vhost_work *work) +{ + struct vhost_scsi *vs = container_of(work, struct vhost_scsi, + vs_event_work); + vhost_scsi_complete_events(vs, false); +} + static int vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd *cmd) { struct iov_iter *iter = &cmd->saved_iter; @@ -1270,33 +1287,32 @@ static void vhost_scsi_tmf_resp_work(struct vhost_work *work) { struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf, vwork); - struct vhost_virtqueue *ctl_vq, *vq; - int resp_code, i; - - if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE) { - /* - * Flush IO vqs that don't share a worker with the ctl to make - * sure they have sent their responses before us. - */ - ctl_vq = &tmf->vhost->vqs[VHOST_SCSI_VQ_CTL].vq; - for (i = VHOST_SCSI_VQ_IO; i < tmf->vhost->dev.nvqs; i++) { - vq = &tmf->vhost->vqs[i].vq; - - if (vhost_vq_is_setup(vq) && - vq->worker != ctl_vq->worker) - vhost_vq_flush(vq); - } + int resp_code; + if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE) resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED; - } else { + else resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED; - } vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs, tmf->vq_desc, &tmf->resp_iov, resp_code); vhost_scsi_release_tmf_res(tmf); } +static void vhost_scsi_tmf_flush_work(struct work_struct *work) +{ + struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf, + flush_work); + struct vhost_virtqueue *vq = &tmf->svq->vq; + /* + * Make sure we have sent responses for other commands before we + * send our response. + */ + vhost_dev_flush(vq->dev); + if (!vhost_vq_work_queue(vq, &tmf->vwork)) + vhost_scsi_release_tmf_res(tmf); +} + static void vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg, struct vhost_virtqueue *vq, @@ -1320,6 +1336,7 @@ vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg, if (!tmf) goto send_reject; + INIT_WORK(&tmf->flush_work, vhost_scsi_tmf_flush_work); vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work); tmf->vhost = vs; tmf->svq = svq; @@ -1509,7 +1526,8 @@ vhost_scsi_send_evt(struct vhost_scsi *vs, struct vhost_virtqueue *vq, } llist_add(&evt->list, &vs->vs_event_list); - vhost_vq_work_queue(vq, &vs->vs_event_work); + if (!vhost_vq_work_queue(vq, &vs->vs_event_work)) + vhost_scsi_complete_events(vs, true); } static void vhost_scsi_evt_handle_kick(struct vhost_work *work) diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index ba52d128aeb7..63a53680a85c 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -1548,7 +1548,7 @@ static void vhost_vdpa_release_dev(struct device *device) struct vhost_vdpa *v = container_of(device, struct vhost_vdpa, dev); - ida_simple_remove(&vhost_vdpa_ida, v->minor); + ida_free(&vhost_vdpa_ida, v->minor); kfree(v->vqs); kfree(v); } @@ -1571,8 +1571,8 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa) if (!v) return -ENOMEM; - minor = ida_simple_get(&vhost_vdpa_ida, 0, - VHOST_VDPA_DEV_MAX, GFP_KERNEL); + minor = ida_alloc_max(&vhost_vdpa_ida, VHOST_VDPA_DEV_MAX - 1, + GFP_KERNEL); if (minor < 0) { kfree(v); return minor; diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 8995730ce0bf..b60955682474 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -263,34 +263,37 @@ bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work) } EXPORT_SYMBOL_GPL(vhost_vq_work_queue); -void vhost_vq_flush(struct vhost_virtqueue *vq) -{ - struct vhost_flush_struct flush; - - init_completion(&flush.wait_event); - vhost_work_init(&flush.work, vhost_flush_work); - - if (vhost_vq_work_queue(vq, &flush.work)) - wait_for_completion(&flush.wait_event); -} -EXPORT_SYMBOL_GPL(vhost_vq_flush); - /** - * vhost_worker_flush - flush a worker + * __vhost_worker_flush - flush a worker * @worker: worker to flush * - * This does not use RCU to protect the worker, so the device or worker - * mutex must be held. + * The worker's flush_mutex must be held. */ -static void vhost_worker_flush(struct vhost_worker *worker) +static void __vhost_worker_flush(struct vhost_worker *worker) { struct vhost_flush_struct flush; + if (!worker->attachment_cnt || worker->killed) + return; + init_completion(&flush.wait_event); vhost_work_init(&flush.work, vhost_flush_work); vhost_worker_queue(worker, &flush.work); + /* + * Drop mutex in case our worker is killed and it needs to take the + * mutex to force cleanup. + */ + mutex_unlock(&worker->mutex); wait_for_completion(&flush.wait_event); + mutex_lock(&worker->mutex); +} + +static void vhost_worker_flush(struct vhost_worker *worker) +{ + mutex_lock(&worker->mutex); + __vhost_worker_flush(worker); + mutex_unlock(&worker->mutex); } void vhost_dev_flush(struct vhost_dev *dev) @@ -298,15 +301,8 @@ void vhost_dev_flush(struct vhost_dev *dev) struct vhost_worker *worker; unsigned long i; - xa_for_each(&dev->worker_xa, i, worker) { - mutex_lock(&worker->mutex); - if (!worker->attachment_cnt) { - mutex_unlock(&worker->mutex); - continue; - } + xa_for_each(&dev->worker_xa, i, worker) vhost_worker_flush(worker); - mutex_unlock(&worker->mutex); - } } EXPORT_SYMBOL_GPL(vhost_dev_flush); @@ -392,7 +388,7 @@ static void vhost_vq_reset(struct vhost_dev *dev, __vhost_vq_meta_reset(vq); } -static bool vhost_worker(void *data) +static bool vhost_run_work_list(void *data) { struct vhost_worker *worker = data; struct vhost_work *work, *work_next; @@ -417,6 +413,40 @@ static bool vhost_worker(void *data) return !!node; } +static void vhost_worker_killed(void *data) +{ + struct vhost_worker *worker = data; + struct vhost_dev *dev = worker->dev; + struct vhost_virtqueue *vq; + int i, attach_cnt = 0; + + mutex_lock(&worker->mutex); + worker->killed = true; + + for (i = 0; i < dev->nvqs; i++) { + vq = dev->vqs[i]; + + mutex_lock(&vq->mutex); + if (worker == + rcu_dereference_check(vq->worker, + lockdep_is_held(&vq->mutex))) { + rcu_assign_pointer(vq->worker, NULL); + attach_cnt++; + } + mutex_unlock(&vq->mutex); + } + + worker->attachment_cnt -= attach_cnt; + if (attach_cnt) + synchronize_rcu(); + /* + * Finish vhost_worker_flush calls and any other works that snuck in + * before the synchronize_rcu. + */ + vhost_run_work_list(worker); + mutex_unlock(&worker->mutex); +} + static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq) { kfree(vq->indirect); @@ -631,9 +661,11 @@ static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev) if (!worker) return NULL; + worker->dev = dev; snprintf(name, sizeof(name), "vhost-%d", current->pid); - vtsk = vhost_task_create(vhost_worker, worker, name); + vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed, + worker, name); if (!vtsk) goto free_worker; @@ -664,22 +696,37 @@ static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq, { struct vhost_worker *old_worker; - old_worker = rcu_dereference_check(vq->worker, - lockdep_is_held(&vq->dev->mutex)); - mutex_lock(&worker->mutex); - worker->attachment_cnt++; - mutex_unlock(&worker->mutex); + if (worker->killed) { + mutex_unlock(&worker->mutex); + return; + } + + mutex_lock(&vq->mutex); + + old_worker = rcu_dereference_check(vq->worker, + lockdep_is_held(&vq->mutex)); rcu_assign_pointer(vq->worker, worker); + worker->attachment_cnt++; - if (!old_worker) + if (!old_worker) { + mutex_unlock(&vq->mutex); + mutex_unlock(&worker->mutex); return; + } + mutex_unlock(&vq->mutex); + mutex_unlock(&worker->mutex); + /* * Take the worker mutex to make sure we see the work queued from * device wide flushes which doesn't use RCU for execution. */ mutex_lock(&old_worker->mutex); - old_worker->attachment_cnt--; + if (old_worker->killed) { + mutex_unlock(&old_worker->mutex); + return; + } + /* * We don't want to call synchronize_rcu for every vq during setup * because it will slow down VM startup. If we haven't done @@ -690,6 +737,8 @@ static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq, mutex_lock(&vq->mutex); if (!vhost_vq_get_backend(vq) && !vq->kick) { mutex_unlock(&vq->mutex); + + old_worker->attachment_cnt--; mutex_unlock(&old_worker->mutex); /* * vsock can queue anytime after VHOST_VSOCK_SET_GUEST_CID. @@ -705,7 +754,8 @@ static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq, /* Make sure new vq queue/flush/poll calls see the new worker */ synchronize_rcu(); /* Make sure whatever was queued gets run */ - vhost_worker_flush(old_worker); + __vhost_worker_flush(old_worker); + old_worker->attachment_cnt--; mutex_unlock(&old_worker->mutex); } @@ -754,10 +804,16 @@ static int vhost_free_worker(struct vhost_dev *dev, return -ENODEV; mutex_lock(&worker->mutex); - if (worker->attachment_cnt) { + if (worker->attachment_cnt || worker->killed) { mutex_unlock(&worker->mutex); return -EBUSY; } + /* + * A flush might have raced and snuck in before attachment_cnt was set + * to zero. Make sure flushes are flushed from the queue before + * freeing. + */ + __vhost_worker_flush(worker); mutex_unlock(&worker->mutex); vhost_worker_destroy(dev, worker); diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 9e942fcda5c3..bb75a292d50c 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -28,12 +28,14 @@ struct vhost_work { struct vhost_worker { struct vhost_task *vtsk; + struct vhost_dev *dev; /* Used to serialize device wide flushing with worker swapping. */ struct mutex mutex; struct llist_head work_list; u64 kcov_handle; u32 id; int attachment_cnt; + bool killed; }; /* Poll a file (eventfd or socket) */ @@ -205,7 +207,6 @@ int vhost_get_vq_desc(struct vhost_virtqueue *, struct vhost_log *log, unsigned int *log_num); void vhost_discard_vq_desc(struct vhost_virtqueue *, int n); -void vhost_vq_flush(struct vhost_virtqueue *vq); bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work); bool vhost_vq_has_work(struct vhost_virtqueue *vq); bool vhost_vq_is_setup(struct vhost_virtqueue *vq); diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 1f5b3dd31fcf..c0a63638f95e 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -121,11 +121,14 @@ struct virtio_balloon { struct page_reporting_dev_info pr_dev_info; /* State for keeping the wakeup_source active while adjusting the balloon */ - spinlock_t adjustment_lock; - bool adjustment_signal_pending; - bool adjustment_in_progress; + spinlock_t wakeup_lock; + bool processing_wakeup_event; + u32 wakeup_signal_mask; }; +#define VIRTIO_BALLOON_WAKEUP_SIGNAL_ADJUST (1 << 0) +#define VIRTIO_BALLOON_WAKEUP_SIGNAL_STATS (1 << 1) + static const struct virtio_device_id id_table[] = { { VIRTIO_ID_BALLOON, VIRTIO_DEV_ANY_ID }, { 0 }, @@ -140,6 +143,36 @@ static u32 page_to_balloon_pfn(struct page *page) return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE; } +static void start_wakeup_event(struct virtio_balloon *vb, u32 mask) +{ + unsigned long flags; + + spin_lock_irqsave(&vb->wakeup_lock, flags); + vb->wakeup_signal_mask |= mask; + if (!vb->processing_wakeup_event) { + vb->processing_wakeup_event = true; + pm_stay_awake(&vb->vdev->dev); + } + spin_unlock_irqrestore(&vb->wakeup_lock, flags); +} + +static void process_wakeup_event(struct virtio_balloon *vb, u32 mask) +{ + spin_lock_irq(&vb->wakeup_lock); + vb->wakeup_signal_mask &= ~mask; + spin_unlock_irq(&vb->wakeup_lock); +} + +static void finish_wakeup_event(struct virtio_balloon *vb) +{ + spin_lock_irq(&vb->wakeup_lock); + if (!vb->wakeup_signal_mask && vb->processing_wakeup_event) { + vb->processing_wakeup_event = false; + pm_relax(&vb->vdev->dev); + } + spin_unlock_irq(&vb->wakeup_lock); +} + static void balloon_ack(struct virtqueue *vq) { struct virtio_balloon *vb = vq->vdev->priv; @@ -370,8 +403,10 @@ static void stats_request(struct virtqueue *vq) struct virtio_balloon *vb = vq->vdev->priv; spin_lock(&vb->stop_update_lock); - if (!vb->stop_update) + if (!vb->stop_update) { + start_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_STATS); queue_work(system_freezable_wq, &vb->update_balloon_stats_work); + } spin_unlock(&vb->stop_update_lock); } @@ -444,29 +479,10 @@ static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb) static void start_update_balloon_size(struct virtio_balloon *vb) { - unsigned long flags; - - spin_lock_irqsave(&vb->adjustment_lock, flags); - vb->adjustment_signal_pending = true; - if (!vb->adjustment_in_progress) { - vb->adjustment_in_progress = true; - pm_stay_awake(vb->vdev->dev.parent); - } - spin_unlock_irqrestore(&vb->adjustment_lock, flags); - + start_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_ADJUST); queue_work(system_freezable_wq, &vb->update_balloon_size_work); } -static void end_update_balloon_size(struct virtio_balloon *vb) -{ - spin_lock_irq(&vb->adjustment_lock); - if (!vb->adjustment_signal_pending && vb->adjustment_in_progress) { - vb->adjustment_in_progress = false; - pm_relax(vb->vdev->dev.parent); - } - spin_unlock_irq(&vb->adjustment_lock); -} - static void virtballoon_changed(struct virtio_device *vdev) { struct virtio_balloon *vb = vdev->priv; @@ -495,7 +511,10 @@ static void update_balloon_stats_func(struct work_struct *work) vb = container_of(work, struct virtio_balloon, update_balloon_stats_work); + + process_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_STATS); stats_handle_request(vb); + finish_wakeup_event(vb); } static void update_balloon_size_func(struct work_struct *work) @@ -506,9 +525,7 @@ static void update_balloon_size_func(struct work_struct *work) vb = container_of(work, struct virtio_balloon, update_balloon_size_work); - spin_lock_irq(&vb->adjustment_lock); - vb->adjustment_signal_pending = false; - spin_unlock_irq(&vb->adjustment_lock); + process_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_ADJUST); diff = towards_target(vb); @@ -523,7 +540,7 @@ static void update_balloon_size_func(struct work_struct *work) if (diff) queue_work(system_freezable_wq, work); else - end_update_balloon_size(vb); + finish_wakeup_event(vb); } static int init_vqs(struct virtio_balloon *vb) @@ -1027,7 +1044,16 @@ static int virtballoon_probe(struct virtio_device *vdev) goto out_unregister_oom; } - spin_lock_init(&vb->adjustment_lock); + spin_lock_init(&vb->wakeup_lock); + + /* + * The virtio balloon itself can't wake up the device, but it is + * responsible for processing wakeup events passed up from the transport + * layer. Wakeup sources don't support nesting/chaining calls, so we use + * our own wakeup source to ensure wakeup events are properly handled + * without trampling on the transport layer's wakeup source. + */ + device_set_wakeup_capable(&vb->vdev->dev, true); virtio_device_ready(vdev); @@ -1155,7 +1181,6 @@ static struct virtio_driver virtio_balloon_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .validate = virtballoon_validate, .probe = virtballoon_probe, diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c index 3aa46703872d..1a730d6c0b55 100644 --- a/drivers/virtio/virtio_input.c +++ b/drivers/virtio/virtio_input.c @@ -394,7 +394,6 @@ static const struct virtio_device_id id_table[] = { static struct virtio_driver virtio_input_driver = { .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .id_table = id_table, diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index e8355f55a8f7..a3857bacc844 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -22,6 +22,7 @@ #include <linux/lockdep.h> #include <linux/log2.h> #include <linux/vmalloc.h> +#include <linux/suspend.h> #include <acpi/acpi_numa.h> @@ -253,6 +254,9 @@ struct virtio_mem { /* Memory notifier (online/offline events). */ struct notifier_block memory_notifier; + /* Notifier to block hibernation image storing/reloading. */ + struct notifier_block pm_notifier; + #ifdef CONFIG_PROC_VMCORE /* vmcore callback for /proc/vmcore handling in kdump mode */ struct vmcore_cb vmcore_cb; @@ -1112,6 +1116,25 @@ static int virtio_mem_memory_notifier_cb(struct notifier_block *nb, return rc; } +static int virtio_mem_pm_notifier_cb(struct notifier_block *nb, + unsigned long action, void *arg) +{ + struct virtio_mem *vm = container_of(nb, struct virtio_mem, + pm_notifier); + switch (action) { + case PM_HIBERNATION_PREPARE: + case PM_RESTORE_PREPARE: + /* + * When restarting the VM, all memory is unplugged. Don't + * allow to hibernate and restore from an image. + */ + dev_err(&vm->vdev->dev, "hibernation is not supported.\n"); + return NOTIFY_BAD; + default: + return NOTIFY_OK; + } +} + /* * Set a range of pages PG_offline. Remember pages that were never onlined * (via generic_online_page()) using PageDirty(). @@ -2616,11 +2639,19 @@ static int virtio_mem_init_hotplug(struct virtio_mem *vm) rc = register_memory_notifier(&vm->memory_notifier); if (rc) goto out_unreg_group; - rc = register_virtio_mem_device(vm); + /* Block hibernation as early as possible. */ + vm->pm_notifier.priority = INT_MAX; + vm->pm_notifier.notifier_call = virtio_mem_pm_notifier_cb; + rc = register_pm_notifier(&vm->pm_notifier); if (rc) goto out_unreg_mem; + rc = register_virtio_mem_device(vm); + if (rc) + goto out_unreg_pm; return 0; +out_unreg_pm: + unregister_pm_notifier(&vm->pm_notifier); out_unreg_mem: unregister_memory_notifier(&vm->memory_notifier); out_unreg_group: @@ -2898,6 +2929,7 @@ static void virtio_mem_deinit_hotplug(struct virtio_mem *vm) /* unregister callbacks */ unregister_virtio_mem_device(vm); + unregister_pm_notifier(&vm->pm_notifier); unregister_memory_notifier(&vm->memory_notifier); /* @@ -2961,17 +2993,40 @@ static void virtio_mem_config_changed(struct virtio_device *vdev) #ifdef CONFIG_PM_SLEEP static int virtio_mem_freeze(struct virtio_device *vdev) { + struct virtio_mem *vm = vdev->priv; + /* - * When restarting the VM, all memory is usually unplugged. Don't - * allow to suspend/hibernate. + * We block hibernation using the PM notifier completely. The workqueue + * is already frozen by the PM core at this point, so we simply + * reset the device and cleanup the queues. */ - dev_err(&vdev->dev, "save/restore not supported.\n"); - return -EPERM; + if (pm_suspend_target_state != PM_SUSPEND_TO_IDLE && + vm->plugged_size && + !virtio_has_feature(vm->vdev, VIRTIO_MEM_F_PERSISTENT_SUSPEND)) { + dev_err(&vm->vdev->dev, + "suspending with plugged memory is not supported\n"); + return -EPERM; + } + + virtio_reset_device(vdev); + vdev->config->del_vqs(vdev); + vm->vq = NULL; + return 0; } static int virtio_mem_restore(struct virtio_device *vdev) { - return -EPERM; + struct virtio_mem *vm = vdev->priv; + int ret; + + ret = virtio_mem_init_vq(vm); + if (ret) + return ret; + virtio_device_ready(vdev); + + /* Let's check if anything changed. */ + virtio_mem_config_changed(vdev); + return 0; } #endif @@ -2980,6 +3035,7 @@ static unsigned int virtio_mem_features[] = { VIRTIO_MEM_F_ACPI_PXM, #endif VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE, + VIRTIO_MEM_F_PERSISTENT_SUSPEND, }; static const struct virtio_device_id virtio_mem_id_table[] = { @@ -2991,7 +3047,6 @@ static struct virtio_driver virtio_mem_driver = { .feature_table = virtio_mem_features, .feature_table_size = ARRAY_SIZE(virtio_mem_features), .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = virtio_mem_id_table, .probe = virtio_mem_probe, .remove = virtio_mem_remove, diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 59892a31cf76..173596589c71 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -696,12 +696,10 @@ free_vm_dev: return rc; } -static int virtio_mmio_remove(struct platform_device *pdev) +static void virtio_mmio_remove(struct platform_device *pdev) { struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev); unregister_virtio_device(&vm_dev->vdev); - - return 0; } @@ -847,7 +845,7 @@ MODULE_DEVICE_TABLE(acpi, virtio_mmio_acpi_match); static struct platform_driver virtio_mmio_driver = { .probe = virtio_mmio_probe, - .remove = virtio_mmio_remove, + .remove_new = virtio_mmio_remove, .driver = { .name = "virtio-mmio", .of_match_table = virtio_mmio_match, diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index b655fccaf773..f6b0b00e4599 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -236,7 +236,7 @@ void vp_del_vqs(struct virtio_device *vdev) int i; list_for_each_entry_safe(vq, n, &vdev->vqs, list) { - if (vp_dev->is_avq(vdev, vq->index)) + if (vp_dev->is_avq && vp_dev->is_avq(vdev, vq->index)) continue; if (vp_dev->per_vq_vectors) { @@ -348,8 +348,10 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs, vring_interrupt, 0, vp_dev->msix_names[msix_vec], vqs[i]); - if (err) + if (err) { + vp_del_vq(vqs[i]); goto error_find; + } } return 0; diff --git a/fs/coredump.c b/fs/coredump.c index 317065e3eb9b..a57a06b80f57 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -376,9 +376,7 @@ static int zap_process(struct task_struct *start, int exit_code) if (t != current && !(t->flags & PF_POSTCOREDUMP)) { sigaddset(&t->pending.signal, SIGKILL); signal_wake_up(t, 1); - /* The vhost_worker does not particpate in coredumps */ - if ((t->flags & (PF_USER_WORKER | PF_IO_WORKER)) != PF_USER_WORKER) - nr++; + nr++; } } diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index 8ffa4f063a37..1a52a51b6b07 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -1078,7 +1078,6 @@ static const unsigned int feature_table[] = {}; static struct virtio_driver virtio_fs_driver = { .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .feature_table = feature_table, .feature_table_size = ARRAY_SIZE(feature_table), diff --git a/include/linux/sched/vhost_task.h b/include/linux/sched/vhost_task.h index bc60243d43b3..25446c5d3508 100644 --- a/include/linux/sched/vhost_task.h +++ b/include/linux/sched/vhost_task.h @@ -4,7 +4,8 @@ struct vhost_task; -struct vhost_task *vhost_task_create(bool (*fn)(void *), void *arg, +struct vhost_task *vhost_task_create(bool (*fn)(void *), + void (*handle_kill)(void *), void *arg, const char *name); void vhost_task_start(struct vhost_task *vtsk); void vhost_task_stop(struct vhost_task *vtsk); diff --git a/include/uapi/linux/virtio_mem.h b/include/uapi/linux/virtio_mem.h index e9122f1d0e0c..6e4b2cf6b7f1 100644 --- a/include/uapi/linux/virtio_mem.h +++ b/include/uapi/linux/virtio_mem.h @@ -90,6 +90,8 @@ #define VIRTIO_MEM_F_ACPI_PXM 0 /* unplugged memory must not be accessed */ #define VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE 1 +/* plugged memory will remain plugged when suspending+resuming */ +#define VIRTIO_MEM_F_PERSISTENT_SUSPEND 2 /* --- virtio-mem: guest -> host requests --- */ diff --git a/kernel/exit.c b/kernel/exit.c index cd3aa9042f1a..f95a2c1338a8 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -413,10 +413,7 @@ static void coredump_task_exit(struct task_struct *tsk) tsk->flags |= PF_POSTCOREDUMP; core_state = tsk->signal->core_state; spin_unlock_irq(&tsk->sighand->siglock); - - /* The vhost_worker does not particpate in coredumps */ - if (core_state && - ((tsk->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)) { + if (core_state) { struct core_thread self; self.task = current; diff --git a/kernel/signal.c b/kernel/signal.c index 01c4c46a51a8..1f9dd41c04be 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1375,9 +1375,7 @@ int zap_other_threads(struct task_struct *p) for_other_threads(p, t) { task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); - /* Don't require de_thread to wait for the vhost_worker */ - if ((t->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER) - count++; + count++; /* Don't bother with already dead threads */ if (t->exit_state) diff --git a/kernel/vhost_task.c b/kernel/vhost_task.c index da35e5b7f047..8800f5acc007 100644 --- a/kernel/vhost_task.c +++ b/kernel/vhost_task.c @@ -10,38 +10,32 @@ enum vhost_task_flags { VHOST_TASK_FLAGS_STOP, + VHOST_TASK_FLAGS_KILLED, }; struct vhost_task { bool (*fn)(void *data); + void (*handle_sigkill)(void *data); void *data; struct completion exited; unsigned long flags; struct task_struct *task; + /* serialize SIGKILL and vhost_task_stop calls */ + struct mutex exit_mutex; }; static int vhost_task_fn(void *data) { struct vhost_task *vtsk = data; - bool dead = false; for (;;) { bool did_work; - if (!dead && signal_pending(current)) { + if (signal_pending(current)) { struct ksignal ksig; - /* - * Calling get_signal will block in SIGSTOP, - * or clear fatal_signal_pending, but remember - * what was set. - * - * This thread won't actually exit until all - * of the file descriptors are closed, and - * the release function is called. - */ - dead = get_signal(&ksig); - if (dead) - clear_thread_flag(TIF_SIGPENDING); + + if (get_signal(&ksig)) + break; } /* mb paired w/ vhost_task_stop */ @@ -57,7 +51,19 @@ static int vhost_task_fn(void *data) schedule(); } + mutex_lock(&vtsk->exit_mutex); + /* + * If a vhost_task_stop and SIGKILL race, we can ignore the SIGKILL. + * When the vhost layer has called vhost_task_stop it's already stopped + * new work and flushed. + */ + if (!test_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags)) { + set_bit(VHOST_TASK_FLAGS_KILLED, &vtsk->flags); + vtsk->handle_sigkill(vtsk->data); + } + mutex_unlock(&vtsk->exit_mutex); complete(&vtsk->exited); + do_exit(0); } @@ -78,12 +84,17 @@ EXPORT_SYMBOL_GPL(vhost_task_wake); * @vtsk: vhost_task to stop * * vhost_task_fn ensures the worker thread exits after - * VHOST_TASK_FLAGS_SOP becomes true. + * VHOST_TASK_FLAGS_STOP becomes true. */ void vhost_task_stop(struct vhost_task *vtsk) { - set_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags); - vhost_task_wake(vtsk); + mutex_lock(&vtsk->exit_mutex); + if (!test_bit(VHOST_TASK_FLAGS_KILLED, &vtsk->flags)) { + set_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags); + vhost_task_wake(vtsk); + } + mutex_unlock(&vtsk->exit_mutex); + /* * Make sure vhost_task_fn is no longer accessing the vhost_task before * freeing it below. @@ -96,14 +107,16 @@ EXPORT_SYMBOL_GPL(vhost_task_stop); /** * vhost_task_create - create a copy of a task to be used by the kernel * @fn: vhost worker function - * @arg: data to be passed to fn + * @handle_sigkill: vhost function to handle when we are killed + * @arg: data to be passed to fn and handled_kill * @name: the thread's name * * This returns a specialized task for use by the vhost layer or NULL on * failure. The returned task is inactive, and the caller must fire it up * through vhost_task_start(). */ -struct vhost_task *vhost_task_create(bool (*fn)(void *), void *arg, +struct vhost_task *vhost_task_create(bool (*fn)(void *), + void (*handle_sigkill)(void *), void *arg, const char *name) { struct kernel_clone_args args = { @@ -122,8 +135,10 @@ struct vhost_task *vhost_task_create(bool (*fn)(void *), void *arg, if (!vtsk) return NULL; init_completion(&vtsk->exited); + mutex_init(&vtsk->exit_mutex); vtsk->data = arg; vtsk->fn = fn; + vtsk->handle_sigkill = handle_sigkill; args.fn_arg = vtsk; diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index e305071eb7b8..0b8086f58ad5 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -781,7 +781,6 @@ static struct virtio_driver p9_virtio_drv = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .probe = p9_virtio_probe, .remove = p9_virtio_remove, diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index ee5d306a96d0..43d405298857 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -859,7 +859,6 @@ static struct virtio_driver virtio_vsock_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtio_vsock_probe, .remove = virtio_vsock_remove, diff --git a/sound/virtio/virtio_card.c b/sound/virtio/virtio_card.c index 2da20c625247..7805daea0102 100644 --- a/sound/virtio/virtio_card.c +++ b/sound/virtio/virtio_card.c @@ -438,7 +438,6 @@ static unsigned int features[] = { static struct virtio_driver virtsnd_driver = { .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .feature_table = features, .feature_table_size = ARRAY_SIZE(features), |