From c1b9f2c66eed3261db76cccd8a22a9affae8dcbf Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Thu, 20 Oct 2022 21:21:09 +0200 Subject: [PATCH 01/18] vdpa: Fix an error handling path in eni_vdpa_probe() After a successful vp_legacy_probe() call, vp_legacy_remove() should be called in the error handling path, as already done in the remove function. Add the missing call. Fixes: e85087beedca ("eni_vdpa: add vDPA driver for Alibaba ENI") Signed-off-by: Christophe JAILLET Message-Id: Signed-off-by: Michael S. Tsirkin Acked-by: Jason Wang --- drivers/vdpa/alibaba/eni_vdpa.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/vdpa/alibaba/eni_vdpa.c b/drivers/vdpa/alibaba/eni_vdpa.c index 5a09a09cca70..cce3d1837104 100644 --- a/drivers/vdpa/alibaba/eni_vdpa.c +++ b/drivers/vdpa/alibaba/eni_vdpa.c @@ -497,7 +497,7 @@ static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (!eni_vdpa->vring) { ret = -ENOMEM; ENI_ERR(pdev, "failed to allocate virtqueues\n"); - goto err; + goto err_remove_vp_legacy; } for (i = 0; i < eni_vdpa->queues; i++) { @@ -509,11 +509,13 @@ static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id) ret = vdpa_register_device(&eni_vdpa->vdpa, eni_vdpa->queues); if (ret) { ENI_ERR(pdev, "failed to register to vdpa bus\n"); - goto err; + goto err_remove_vp_legacy; } return 0; +err_remove_vp_legacy: + vp_legacy_remove(&eni_vdpa->ldev); err: put_device(&eni_vdpa->vdpa.dev); return ret; From ab78ffe1ff7d17102972348bb9b1a16ec2696a2b Mon Sep 17 00:00:00 2001 From: Pasha Tatashin Date: Tue, 26 Dec 2023 18:28:27 +0000 Subject: [PATCH 02/18] vhost-vdpa: account iommu allocations iommu allocations should be accounted in order to allow admins to monitor and limit the amount of iommu memory. Signed-off-by: Pasha Tatashin Acked-by: Michael S. Tsirkin Message-Id: <20231226182827.294158-1-pasha.tatashin@soleen.com> Signed-off-by: Michael S. Tsirkin Acked-by: David Rientjes --- drivers/vhost/vdpa.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index da7ec77cdaff..a51c69c078d9 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -968,7 +968,8 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb, r = ops->set_map(vdpa, asid, iotlb); } else { r = iommu_map(v->domain, iova, pa, size, - perm_to_iommu_flags(perm), GFP_KERNEL); + perm_to_iommu_flags(perm), + GFP_KERNEL_ACCOUNT); } if (r) { vhost_iotlb_del_range(iotlb, iova, iova + size - 1); From d2c4f1928a3f7f4a1f28a0cfc022e8a145ce6903 Mon Sep 17 00:00:00 2001 From: Xuan Zhuo Date: Tue, 26 Dec 2023 17:43:33 +0800 Subject: [PATCH 03/18] virtio_net: fix missing dma unmap for resize For rq, we have three cases getting buffers from virtio core: 1. virtqueue_get_buf{,_ctx} 2. virtqueue_detach_unused_buf 3. callback for virtqueue_resize But in commit 295525e29a5b("virtio_net: merge dma operations when filling mergeable buffers"), I missed the dma unmap for the #3 case. That will leak some memory, because I did not release the pages referred by the unused buffers. If we do such script, we will make the system OOM. while true do ethtool -G ens4 rx 128 ethtool -G ens4 rx 256 free -m done Fixes: 295525e29a5b ("virtio_net: merge dma operations when filling mergeable buffers") Signed-off-by: Xuan Zhuo Message-Id: <20231226094333.47740-1-xuanzhuo@linux.alibaba.com> Signed-off-by: Michael S. Tsirkin --- drivers/net/virtio_net.c | 60 ++++++++++++++++++++-------------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index d16f592c2061..51b1868d2f22 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -334,7 +334,6 @@ struct virtio_net_common_hdr { }; }; -static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf); static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf); static bool is_xdp_frame(void *ptr) @@ -408,6 +407,17 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) return p; } +static void virtnet_rq_free_buf(struct virtnet_info *vi, + struct receive_queue *rq, void *buf) +{ + if (vi->mergeable_rx_bufs) + put_page(virt_to_head_page(buf)); + else if (vi->big_packets) + give_pages(rq, buf); + else + put_page(virt_to_head_page(buf)); +} + static void enable_delayed_refill(struct virtnet_info *vi) { spin_lock_bh(&vi->refill_lock); @@ -634,17 +644,6 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx) return buf; } -static void *virtnet_rq_detach_unused_buf(struct receive_queue *rq) -{ - void *buf; - - buf = virtqueue_detach_unused_buf(rq->vq); - if (buf && rq->do_dma) - virtnet_rq_unmap(rq, buf, 0); - - return buf; -} - static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len) { struct virtnet_rq_dma *dma; @@ -744,6 +743,20 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi) } } +static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf) +{ + struct virtnet_info *vi = vq->vdev->priv; + struct receive_queue *rq; + int i = vq2rxq(vq); + + rq = &vi->rq[i]; + + if (rq->do_dma) + virtnet_rq_unmap(rq, buf, 0); + + virtnet_rq_free_buf(vi, rq, buf); +} + static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) { unsigned int len; @@ -1764,7 +1777,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, if (unlikely(len < vi->hdr_len + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); DEV_STATS_INC(dev, rx_length_errors); - virtnet_rq_free_unused_buf(rq->vq, buf); + virtnet_rq_free_buf(vi, rq, buf); return; } @@ -2392,7 +2405,7 @@ static int virtnet_rx_resize(struct virtnet_info *vi, if (running) napi_disable(&rq->napi); - err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf); + err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf); if (err) netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); @@ -4031,19 +4044,6 @@ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf) xdp_return_frame(ptr_to_xdp(buf)); } -static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf) -{ - struct virtnet_info *vi = vq->vdev->priv; - int i = vq2rxq(vq); - - if (vi->mergeable_rx_bufs) - put_page(virt_to_head_page(buf)); - else if (vi->big_packets) - give_pages(&vi->rq[i], buf); - else - put_page(virt_to_head_page(buf)); -} - static void free_unused_bufs(struct virtnet_info *vi) { void *buf; @@ -4057,10 +4057,10 @@ static void free_unused_bufs(struct virtnet_info *vi) } for (i = 0; i < vi->max_queue_pairs; i++) { - struct receive_queue *rq = &vi->rq[i]; + struct virtqueue *vq = vi->rq[i].vq; - while ((buf = virtnet_rq_detach_unused_buf(rq)) != NULL) - virtnet_rq_free_unused_buf(rq->vq, buf); + while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) + virtnet_rq_unmap_free_buf(vq, buf); cond_resched(); } } From dff4fa0e57856045359440d05af9e9b7f7048f52 Mon Sep 17 00:00:00 2001 From: David Stevens Date: Fri, 8 Dec 2023 16:07:54 +0900 Subject: [PATCH 04/18] virtio: Add support for no-reset virtio PCI PM If a virtio_pci_device supports native PCI power management and has the No_Soft_Reset bit set, then skip resetting and reinitializing the device when suspending and restoring the device. This allows system-wide low power states like s2idle to be used in systems with stateful virtio devices that can't simply be re-initialized (e.g. virtio-fs). Signed-off-by: David Stevens Message-Id: <20231208070754.3132339-1-stevensd@chromium.org> Signed-off-by: Michael S. Tsirkin Acked-by: Jason Wang --- drivers/virtio/virtio_pci_common.c | 34 +++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 7a5593997e0e..1d21d1a1b3f5 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -492,8 +492,40 @@ static int virtio_pci_restore(struct device *dev) return virtio_device_restore(&vp_dev->vdev); } +static bool vp_supports_pm_no_reset(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + u16 pmcsr; + + if (!pci_dev->pm_cap) + return false; + + pci_read_config_word(pci_dev, pci_dev->pm_cap + PCI_PM_CTRL, &pmcsr); + if (PCI_POSSIBLE_ERROR(pmcsr)) { + dev_err(dev, "Unable to query pmcsr"); + return false; + } + + return pmcsr & PCI_PM_CTRL_NO_SOFT_RESET; +} + +static int virtio_pci_suspend(struct device *dev) +{ + return vp_supports_pm_no_reset(dev) ? 0 : virtio_pci_freeze(dev); +} + +static int virtio_pci_resume(struct device *dev) +{ + return vp_supports_pm_no_reset(dev) ? 0 : virtio_pci_restore(dev); +} + static const struct dev_pm_ops virtio_pci_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore) + .suspend = virtio_pci_suspend, + .resume = virtio_pci_resume, + .freeze = virtio_pci_freeze, + .thaw = virtio_pci_restore, + .poweroff = virtio_pci_freeze, + .restore = virtio_pci_restore, }; #endif From c271fcd9095f066e0e7104e6d8919e2cf26f0899 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 10 Dec 2023 18:51:50 +0100 Subject: [PATCH 05/18] vdpa: Remove usage of the deprecated ida_simple_xx() API ida_alloc() and ida_free() should be preferred to the deprecated ida_simple_get() and ida_simple_remove(). This is less verbose. Signed-off-by: Christophe JAILLET Message-Id: Signed-off-by: Michael S. Tsirkin Acked-by: Jason Wang --- drivers/vdpa/vdpa.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index a7612e0783b3..d0695680b282 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -131,7 +131,7 @@ static void vdpa_release_dev(struct device *d) if (ops->free) ops->free(vdev); - ida_simple_remove(&vdpa_index_ida, vdev->index); + ida_free(&vdpa_index_ida, vdev->index); kfree(vdev->driver_override); kfree(vdev); } @@ -205,7 +205,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent, return vdev; err_name: - ida_simple_remove(&vdpa_index_ida, vdev->index); + ida_free(&vdpa_index_ida, vdev->index); err_ida: kfree(vdev); err: From b12fbc3f787e3d7e47af274a761fdee6e867bc7d Mon Sep 17 00:00:00 2001 From: David Stevens Date: Wed, 10 Jan 2024 11:19:25 +0900 Subject: [PATCH 06/18] virtio_balloon: stay awake while adjusting balloon A virtio_balloon's parent device may be configured so that a configuration change interrupt is a wakeup event. Extend the processing of such a wakeup event until the balloon finishes inflating or deflating by calling pm_stay_awake/pm_relax in the virtio_balloon driver. Note that these calls are no-ops if the parent device doesn't support wakeup events or if the wakeup events are not enabled. This change allows the guest to use system power states such as s2idle without running the risk the virtio_balloon's cooperative memory management becoming unresponsive to the host's requests. Tested-by: Theodore Ts'o Signed-off-by: David Stevens Signed-off-by: Theodore Ts'o Message-Id: <20240110021925.1137333-1-stevensd@google.com> Signed-off-by: Michael S. Tsirkin --- drivers/virtio/virtio_balloon.c | 57 +++++++++++++++++++++++++++------ 1 file changed, 47 insertions(+), 10 deletions(-) diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 1fe93e93f5bc..fa710e6c505a 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -119,6 +119,11 @@ struct virtio_balloon { /* Free page reporting device */ struct virtqueue *reporting_vq; struct page_reporting_dev_info pr_dev_info; + + /* State for keeping the wakeup_source active while adjusting the balloon */ + spinlock_t adjustment_lock; + bool adjustment_signal_pending; + bool adjustment_in_progress; }; static const struct virtio_device_id id_table[] = { @@ -437,6 +442,31 @@ static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb) queue_work(vb->balloon_wq, &vb->report_free_page_work); } +static void start_update_balloon_size(struct virtio_balloon *vb) +{ + unsigned long flags; + + spin_lock_irqsave(&vb->adjustment_lock, flags); + vb->adjustment_signal_pending = true; + if (!vb->adjustment_in_progress) { + vb->adjustment_in_progress = true; + pm_stay_awake(vb->vdev->dev.parent); + } + spin_unlock_irqrestore(&vb->adjustment_lock, flags); + + queue_work(system_freezable_wq, &vb->update_balloon_size_work); +} + +static void end_update_balloon_size(struct virtio_balloon *vb) +{ + spin_lock_irq(&vb->adjustment_lock); + if (!vb->adjustment_signal_pending && vb->adjustment_in_progress) { + vb->adjustment_in_progress = false; + pm_relax(vb->vdev->dev.parent); + } + spin_unlock_irq(&vb->adjustment_lock); +} + static void virtballoon_changed(struct virtio_device *vdev) { struct virtio_balloon *vb = vdev->priv; @@ -444,8 +474,7 @@ static void virtballoon_changed(struct virtio_device *vdev) spin_lock_irqsave(&vb->stop_update_lock, flags); if (!vb->stop_update) { - queue_work(system_freezable_wq, - &vb->update_balloon_size_work); + start_update_balloon_size(vb); virtio_balloon_queue_free_page_work(vb); } spin_unlock_irqrestore(&vb->stop_update_lock, flags); @@ -476,19 +505,25 @@ static void update_balloon_size_func(struct work_struct *work) vb = container_of(work, struct virtio_balloon, update_balloon_size_work); + + spin_lock_irq(&vb->adjustment_lock); + vb->adjustment_signal_pending = false; + spin_unlock_irq(&vb->adjustment_lock); + diff = towards_target(vb); - if (!diff) - return; - - if (diff > 0) - diff -= fill_balloon(vb, diff); - else - diff += leak_balloon(vb, -diff); - update_balloon_size(vb); + if (diff) { + if (diff > 0) + diff -= fill_balloon(vb, diff); + else + diff += leak_balloon(vb, -diff); + update_balloon_size(vb); + } if (diff) queue_work(system_freezable_wq, work); + else + end_update_balloon_size(vb); } static int init_vqs(struct virtio_balloon *vb) @@ -992,6 +1027,8 @@ static int virtballoon_probe(struct virtio_device *vdev) goto out_unregister_oom; } + spin_lock_init(&vb->adjustment_lock); + virtio_device_ready(vdev); if (towards_target(vb)) From 35967bdcff325f4572b21b0d0005318da7e03f53 Mon Sep 17 00:00:00 2001 From: Changyuan Lyu Date: Wed, 20 Dec 2023 12:49:06 -0800 Subject: [PATCH 07/18] virtio_pmem: support feature SHMEM_REGION This patch adds the support for feature VIRTIO_PMEM_F_SHMEM_REGION (virtio spec v1.2 section 5.19.5.2 [1]). During feature negotiation, if VIRTIO_PMEM_F_SHMEM_REGION is offered by the device, the driver looks for a shared memory region of id 0. If it is found, this feature is understood. Otherwise, this feature bit is cleared. During probe, if VIRTIO_PMEM_F_SHMEM_REGION has been negotiated, virtio pmem ignores the `start` and `size` fields in device config and uses the physical address range of shared memory region 0. [1] https://docs.oasis-open.org/virtio/virtio/v1.2/csd01/virtio-v1.2-csd01.html#x1-6480002 Signed-off-by: Changyuan Lyu Message-Id: <20231220204906.566922-1-changyuanl@google.com> Signed-off-by: Michael S. Tsirkin Acked-by: Jason Wang --- drivers/nvdimm/virtio_pmem.c | 36 ++++++++++++++++++++++++++++---- include/uapi/linux/virtio_pmem.h | 7 +++++++ 2 files changed, 39 insertions(+), 4 deletions(-) diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c index a92eb172f0e7..4ceced5cefcf 100644 --- a/drivers/nvdimm/virtio_pmem.c +++ b/drivers/nvdimm/virtio_pmem.c @@ -29,12 +29,27 @@ static int init_vq(struct virtio_pmem *vpmem) return 0; }; +static int virtio_pmem_validate(struct virtio_device *vdev) +{ + struct virtio_shm_region shm_reg; + + if (virtio_has_feature(vdev, VIRTIO_PMEM_F_SHMEM_REGION) && + !virtio_get_shm_region(vdev, &shm_reg, (u8)VIRTIO_PMEM_SHMEM_REGION_ID) + ) { + dev_notice(&vdev->dev, "failed to get shared memory region %d\n", + VIRTIO_PMEM_SHMEM_REGION_ID); + __virtio_clear_bit(vdev, VIRTIO_PMEM_F_SHMEM_REGION); + } + return 0; +} + static int virtio_pmem_probe(struct virtio_device *vdev) { struct nd_region_desc ndr_desc = {}; struct nd_region *nd_region; struct virtio_pmem *vpmem; struct resource res; + struct virtio_shm_region shm_reg; int err = 0; if (!vdev->config->get) { @@ -57,10 +72,16 @@ static int virtio_pmem_probe(struct virtio_device *vdev) goto out_err; } - virtio_cread_le(vpmem->vdev, struct virtio_pmem_config, - start, &vpmem->start); - virtio_cread_le(vpmem->vdev, struct virtio_pmem_config, - size, &vpmem->size); + if (virtio_has_feature(vdev, VIRTIO_PMEM_F_SHMEM_REGION)) { + virtio_get_shm_region(vdev, &shm_reg, (u8)VIRTIO_PMEM_SHMEM_REGION_ID); + vpmem->start = shm_reg.addr; + vpmem->size = shm_reg.len; + } else { + virtio_cread_le(vpmem->vdev, struct virtio_pmem_config, + start, &vpmem->start); + virtio_cread_le(vpmem->vdev, struct virtio_pmem_config, + size, &vpmem->size); + } res.start = vpmem->start; res.end = vpmem->start + vpmem->size - 1; @@ -122,10 +143,17 @@ static void virtio_pmem_remove(struct virtio_device *vdev) virtio_reset_device(vdev); } +static unsigned int features[] = { + VIRTIO_PMEM_F_SHMEM_REGION, +}; + static struct virtio_driver virtio_pmem_driver = { + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, + .validate = virtio_pmem_validate, .probe = virtio_pmem_probe, .remove = virtio_pmem_remove, }; diff --git a/include/uapi/linux/virtio_pmem.h b/include/uapi/linux/virtio_pmem.h index d676b3620383..ede4f3564977 100644 --- a/include/uapi/linux/virtio_pmem.h +++ b/include/uapi/linux/virtio_pmem.h @@ -14,6 +14,13 @@ #include #include +/* Feature bits */ +/* guest physical address range will be indicated as shared memory region 0 */ +#define VIRTIO_PMEM_F_SHMEM_REGION 0 + +/* shmid of the shared memory region corresponding to the pmem */ +#define VIRTIO_PMEM_SHMEM_REGION_ID 0 + struct virtio_pmem_config { __le64 start; __le64 size; From 95e7249691f082a5178d4d6f60fcdee91da458ab Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 13 Dec 2023 23:26:49 -0600 Subject: [PATCH 08/18] scsi: virtio_scsi: Add mq_poll support This adds polling support to virtio-scsi. It's based on and works similar to virtblk support where we add a module param to specify the number of poll queues then subtract to calculate the IO queues. When using 8 poll queues and a vhost worker per queue we see 4K IOPs with fio: fio --filename=/dev/sda --direct=1 --rw=randread --bs=4k \ --ioengine=io_uring --hipri --iodepth=128 --numjobs=$NUM_JOBS increase like: jobs base poll 1 207K 296K 2 392K 552K 3 581K 860K 4 765K 1235K 5 936K 1598K 6 1104K 1880K 7 1253K 2095K 8 1311k 2187K Signed-off-by: Mike Christie Message-Id: <20231214052649.57743-1-michael.christie@oracle.com> Signed-off-by: Michael S. Tsirkin --- drivers/scsi/virtio_scsi.c | 78 +++++++++++++++++++++++++++++++++++--- 1 file changed, 73 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 9d1bdcdc1331..4cf20be668a6 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -37,6 +37,11 @@ #define VIRTIO_SCSI_EVENT_LEN 8 #define VIRTIO_SCSI_VQ_BASE 2 +static unsigned int virtscsi_poll_queues; +module_param(virtscsi_poll_queues, uint, 0644); +MODULE_PARM_DESC(virtscsi_poll_queues, + "The number of dedicated virtqueues for polling I/O"); + /* Command queue element */ struct virtio_scsi_cmd { struct scsi_cmnd *sc; @@ -76,6 +81,7 @@ struct virtio_scsi { struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN]; u32 num_queues; + int io_queues[HCTX_MAX_TYPES]; struct hlist_node node; @@ -722,9 +728,49 @@ static int virtscsi_abort(struct scsi_cmnd *sc) static void virtscsi_map_queues(struct Scsi_Host *shost) { struct virtio_scsi *vscsi = shost_priv(shost); - struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; + int i, qoff; - blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2); + for (i = 0, qoff = 0; i < shost->nr_maps; i++) { + struct blk_mq_queue_map *map = &shost->tag_set.map[i]; + + map->nr_queues = vscsi->io_queues[i]; + map->queue_offset = qoff; + qoff += map->nr_queues; + + if (map->nr_queues == 0) + continue; + + /* + * Regular queues have interrupts and hence CPU affinity is + * defined by the core virtio code, but polling queues have + * no interrupts so we let the block layer assign CPU affinity. + */ + if (i == HCTX_TYPE_POLL) + blk_mq_map_queues(map); + else + blk_mq_virtio_map_queues(map, vscsi->vdev, 2); + } +} + +static int virtscsi_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) +{ + struct virtio_scsi *vscsi = shost_priv(shost); + struct virtio_scsi_vq *virtscsi_vq = &vscsi->req_vqs[queue_num]; + unsigned long flags; + unsigned int len; + int found = 0; + void *buf; + + spin_lock_irqsave(&virtscsi_vq->vq_lock, flags); + + while ((buf = virtqueue_get_buf(virtscsi_vq->vq, &len)) != NULL) { + virtscsi_complete_cmd(vscsi, buf); + found++; + } + + spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags); + + return found; } static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq) @@ -751,6 +797,7 @@ static const struct scsi_host_template virtscsi_host_template = { .this_id = -1, .cmd_size = sizeof(struct virtio_scsi_cmd), .queuecommand = virtscsi_queuecommand, + .mq_poll = virtscsi_mq_poll, .commit_rqs = virtscsi_commit_rqs, .change_queue_depth = virtscsi_change_queue_depth, .eh_abort_handler = virtscsi_abort, @@ -795,13 +842,14 @@ static int virtscsi_init(struct virtio_device *vdev, { int err; u32 i; - u32 num_vqs; + u32 num_vqs, num_poll_vqs, num_req_vqs; vq_callback_t **callbacks; const char **names; struct virtqueue **vqs; struct irq_affinity desc = { .pre_vectors = 2 }; - num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE; + num_req_vqs = vscsi->num_queues; + num_vqs = num_req_vqs + VIRTIO_SCSI_VQ_BASE; vqs = kmalloc_array(num_vqs, sizeof(struct virtqueue *), GFP_KERNEL); callbacks = kmalloc_array(num_vqs, sizeof(vq_callback_t *), GFP_KERNEL); @@ -812,15 +860,31 @@ static int virtscsi_init(struct virtio_device *vdev, goto out; } + num_poll_vqs = min_t(unsigned int, virtscsi_poll_queues, + num_req_vqs - 1); + vscsi->io_queues[HCTX_TYPE_DEFAULT] = num_req_vqs - num_poll_vqs; + vscsi->io_queues[HCTX_TYPE_READ] = 0; + vscsi->io_queues[HCTX_TYPE_POLL] = num_poll_vqs; + + dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n", + vscsi->io_queues[HCTX_TYPE_DEFAULT], + vscsi->io_queues[HCTX_TYPE_READ], + vscsi->io_queues[HCTX_TYPE_POLL]); + callbacks[0] = virtscsi_ctrl_done; callbacks[1] = virtscsi_event_done; names[0] = "control"; names[1] = "event"; - for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) { + for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs - num_poll_vqs; i++) { callbacks[i] = virtscsi_req_done; names[i] = "request"; } + for (; i < num_vqs; i++) { + callbacks[i] = NULL; + names[i] = "request_poll"; + } + /* Discover virtqueues and write information to configuration. */ err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc); if (err) @@ -874,6 +938,7 @@ static int virtscsi_probe(struct virtio_device *vdev) sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1; shost->sg_tablesize = sg_elems; + shost->nr_maps = 1; vscsi = shost_priv(shost); vscsi->vdev = vdev; vscsi->num_queues = num_queues; @@ -883,6 +948,9 @@ static int virtscsi_probe(struct virtio_device *vdev) if (err) goto virtscsi_init_failed; + if (vscsi->io_queues[HCTX_TYPE_POLL]) + shost->nr_maps = HCTX_TYPE_POLL + 1; + shost->can_queue = virtqueue_get_vring_size(vscsi->req_vqs[0].vq); cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1; From c7e194402be3ed385dfaefaf1681bb731fd776e2 Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Mon, 25 Dec 2023 15:42:09 +0200 Subject: [PATCH 09/18] vdpa: Track device suspended state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Set vdpa device suspended state on successful suspend. Clear it on successful resume and reset. The state will be locked by the vhost_vdpa mutex. The mutex is taken during suspend, resume and reset in vhost_vdpa_unlocked_ioctl. The exception is vhost_vdpa_open which does a device reset but that should be safe because it can only happen before the other ops. Signed-off-by: Dragos Tatulea Suggested-by: Eugenio Pérez Message-Id: <20231225134210.151540-2-dtatulea@nvidia.com> Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vdpa.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index a51c69c078d9..5f046770c0a4 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -59,6 +59,7 @@ struct vhost_vdpa { int in_batch; struct vdpa_iova_range range; u32 batch_asid; + bool suspended; }; static DEFINE_IDA(vhost_vdpa_ida); @@ -232,6 +233,8 @@ static int _compat_vdpa_reset(struct vhost_vdpa *v) struct vdpa_device *vdpa = v->vdpa; u32 flags = 0; + v->suspended = false; + if (v->vdev.vqs) { flags |= !vhost_backend_has_feature(v->vdev.vqs[0], VHOST_BACKEND_F_IOTLB_PERSIST) ? @@ -590,11 +593,16 @@ static long vhost_vdpa_suspend(struct vhost_vdpa *v) { struct vdpa_device *vdpa = v->vdpa; const struct vdpa_config_ops *ops = vdpa->config; + int ret; if (!ops->suspend) return -EOPNOTSUPP; - return ops->suspend(vdpa); + ret = ops->suspend(vdpa); + if (!ret) + v->suspended = true; + + return ret; } /* After a successful return of this ioctl the device resumes processing @@ -605,11 +613,16 @@ static long vhost_vdpa_resume(struct vhost_vdpa *v) { struct vdpa_device *vdpa = v->vdpa; const struct vdpa_config_ops *ops = vdpa->config; + int ret; if (!ops->resume) return -EOPNOTSUPP; - return ops->resume(vdpa); + ret = ops->resume(vdpa); + if (!ret) + v->suspended = false; + + return ret; } static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, From a09483c4065fe1e14812f2371cee1cba78a13d60 Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Mon, 25 Dec 2023 15:42:10 +0200 Subject: [PATCH 10/18] vdpa: Block vq property changes in DRIVER_OK MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The virtio standard doesn't allow for virtqueue address and state changes when the device is in DRIVER_OK. Return an error in such cases unless the device is suspended. The suspended device exception is needed because some devices support virtqueue changes when the device is suspended. Signed-off-by: Dragos Tatulea Suggested-by: Eugenio Pérez Message-Id: <20231225134210.151540-3-dtatulea@nvidia.com> Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vdpa.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 5f046770c0a4..5c3e019c0122 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -703,6 +703,9 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, switch (cmd) { case VHOST_SET_VRING_ADDR: + if ((ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) && !v->suspended) + return -EINVAL; + if (ops->set_vq_address(vdpa, idx, (u64)(uintptr_t)vq->desc, (u64)(uintptr_t)vq->avail, @@ -711,6 +714,9 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, break; case VHOST_SET_VRING_BASE: + if ((ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) && !v->suspended) + return -EINVAL; + if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) { vq_state.packed.last_avail_idx = vq->last_avail_idx & 0x7fff; vq_state.packed.last_avail_counter = !!(vq->last_avail_idx & 0x8000); From ef067191f73cce3ee192e991ce486d95524655d5 Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Mon, 25 Dec 2023 17:11:56 +0200 Subject: [PATCH 11/18] vdpa/mlx5: Expose resumable vq capability MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Necessary for checking if resumable vqs are supported by the hardware. Actual support will be added in a downstream patch. Reviewed-by: Gal Pressman Acked-by: Eugenio Pérez Signed-off-by: Dragos Tatulea Message-Id: <20231225151203.152687-2-dtatulea@nvidia.com> Signed-off-by: Michael S. Tsirkin --- include/linux/mlx5/mlx5_ifc.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 6f3631425f38..9eaceaf6bcb0 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1236,7 +1236,8 @@ struct mlx5_ifc_virtio_emulation_cap_bits { u8 reserved_at_c0[0x13]; u8 desc_group_mkey_supported[0x1]; - u8 reserved_at_d4[0xc]; + u8 freeze_to_rdy_supported[0x1]; + u8 reserved_at_d5[0xb]; u8 reserved_at_e0[0x20]; From 651cdaa9c028b1edf0897e10f560fed4f4fb1fb6 Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Mon, 25 Dec 2023 17:11:57 +0200 Subject: [PATCH 12/18] vdpa/mlx5: Allow modifying multiple vq fields in one modify command MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a bitmask variable that tracks hw vq field changes that are supposed to be modified on next hw vq change command. This will be useful to set multiple vq fields when resuming the vq. Reviewed-by: Gal Pressman Acked-by: Eugenio Pérez Acked-by: Jason Wang Signed-off-by: Dragos Tatulea Message-Id: <20231225151203.152687-3-dtatulea@nvidia.com> Signed-off-by: Michael S. Tsirkin --- drivers/vdpa/mlx5/net/mlx5_vnet.c | 48 +++++++++++++++++++++++++------ 1 file changed, 40 insertions(+), 8 deletions(-) diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 26ba7da6b410..1e08a8805640 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -120,6 +120,9 @@ struct mlx5_vdpa_virtqueue { u16 avail_idx; u16 used_idx; int fw_state; + + u64 modified_fields; + struct msi_map map; /* keep last in the struct */ @@ -1181,7 +1184,19 @@ static bool is_valid_state_change(int oldstate, int newstate) } } -static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state) +static bool modifiable_virtqueue_fields(struct mlx5_vdpa_virtqueue *mvq) +{ + /* Only state is always modifiable */ + if (mvq->modified_fields & ~MLX5_VIRTQ_MODIFY_MASK_STATE) + return mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT || + mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND; + + return true; +} + +static int modify_virtqueue(struct mlx5_vdpa_net *ndev, + struct mlx5_vdpa_virtqueue *mvq, + int state) { int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in); u32 out[MLX5_ST_SZ_DW(modify_virtio_net_q_out)] = {}; @@ -1193,6 +1208,9 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_NONE) return 0; + if (!modifiable_virtqueue_fields(mvq)) + return -EINVAL; + if (!is_valid_state_change(mvq->fw_state, state)) return -EINVAL; @@ -1208,17 +1226,28 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, in, obj_context); - MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select, - MLX5_VIRTQ_MODIFY_MASK_STATE); - MLX5_SET(virtio_net_q_object, obj_context, state, state); + if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE) + MLX5_SET(virtio_net_q_object, obj_context, state, state); + + MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select, mvq->modified_fields); err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); kfree(in); if (!err) mvq->fw_state = state; + mvq->modified_fields = 0; + return err; } +static int modify_virtqueue_state(struct mlx5_vdpa_net *ndev, + struct mlx5_vdpa_virtqueue *mvq, + unsigned int state) +{ + mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_STATE; + return modify_virtqueue(ndev, mvq, state); +} + static int counter_set_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) { u32 in[MLX5_ST_SZ_DW(create_virtio_q_counters_in)] = {}; @@ -1347,7 +1376,7 @@ static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) goto err_vq; if (mvq->ready) { - err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); + err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); if (err) { mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n", idx, err); @@ -1382,7 +1411,7 @@ static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *m if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) return; - if (modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND)) + if (modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND)) mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n"); if (query_virtqueue(ndev, mvq, &attr)) { @@ -1407,6 +1436,7 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue * return; suspend_vq(ndev, mvq); + mvq->modified_fields = 0; destroy_virtqueue(ndev, mvq); dealloc_vector(ndev, mvq); counter_set_dealloc(ndev, mvq); @@ -2207,7 +2237,7 @@ static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready if (!ready) { suspend_vq(ndev, mvq); } else { - err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); + err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); if (err) { mlx5_vdpa_warn(mvdev, "modify VQ %d to ready failed (%d)\n", idx, err); ready = false; @@ -2804,8 +2834,10 @@ static void clear_vqs_ready(struct mlx5_vdpa_net *ndev) { int i; - for (i = 0; i < ndev->mvdev.max_vqs; i++) + for (i = 0; i < ndev->mvdev.max_vqs; i++) { ndev->vqs[i].ready = false; + ndev->vqs[i].modified_fields = 0; + } ndev->mvdev.cvq.ready = false; } From 145096937b8a6a8d5889f5a1a6fb453c52cc48a1 Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Mon, 25 Dec 2023 17:11:58 +0200 Subject: [PATCH 13/18] vdpa/mlx5: Introduce per vq and device resume MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implement vdpa vq and device resume if capability detected. Add support for suspend -> ready state change. Reviewed-by: Gal Pressman Acked-by: Eugenio Pérez Acked-by: Jason Wang Signed-off-by: Dragos Tatulea Message-Id: <20231225151203.152687-4-dtatulea@nvidia.com> Signed-off-by: Michael S. Tsirkin --- drivers/vdpa/mlx5/net/mlx5_vnet.c | 69 +++++++++++++++++++++++++++---- 1 file changed, 62 insertions(+), 7 deletions(-) diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 1e08a8805640..f8f088cced50 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -1170,7 +1170,12 @@ static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueu return err; } -static bool is_valid_state_change(int oldstate, int newstate) +static bool is_resumable(struct mlx5_vdpa_net *ndev) +{ + return ndev->mvdev.vdev.config->resume; +} + +static bool is_valid_state_change(int oldstate, int newstate, bool resumable) { switch (oldstate) { case MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT: @@ -1178,6 +1183,7 @@ static bool is_valid_state_change(int oldstate, int newstate) case MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY: return newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND; case MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND: + return resumable ? newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY : false; case MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR: default: return false; @@ -1200,6 +1206,7 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, { int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in); u32 out[MLX5_ST_SZ_DW(modify_virtio_net_q_out)] = {}; + bool state_change = false; void *obj_context; void *cmd_hdr; void *in; @@ -1211,9 +1218,6 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, if (!modifiable_virtqueue_fields(mvq)) return -EINVAL; - if (!is_valid_state_change(mvq->fw_state, state)) - return -EINVAL; - in = kzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1226,17 +1230,29 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, in, obj_context); - if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE) + + if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE) { + if (!is_valid_state_change(mvq->fw_state, state, is_resumable(ndev))) { + err = -EINVAL; + goto done; + } + MLX5_SET(virtio_net_q_object, obj_context, state, state); + state_change = true; + } MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select, mvq->modified_fields); err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); - kfree(in); - if (!err) + if (err) + goto done; + + if (state_change) mvq->fw_state = state; mvq->modified_fields = 0; +done: + kfree(in); return err; } @@ -1430,6 +1446,24 @@ static void suspend_vqs(struct mlx5_vdpa_net *ndev) suspend_vq(ndev, &ndev->vqs[i]); } +static void resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) +{ + if (!mvq->initialized || !is_resumable(ndev)) + return; + + if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND) + return; + + if (modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)) + mlx5_vdpa_warn(&ndev->mvdev, "modify to resume failed for vq %u\n", mvq->index); +} + +static void resume_vqs(struct mlx5_vdpa_net *ndev) +{ + for (int i = 0; i < ndev->mvdev.max_vqs; i++) + resume_vq(ndev, &ndev->vqs[i]); +} + static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) { if (!mvq->initialized) @@ -3261,6 +3295,23 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev) return 0; } +static int mlx5_vdpa_resume(struct vdpa_device *vdev) +{ + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + struct mlx5_vdpa_net *ndev; + + ndev = to_mlx5_vdpa_ndev(mvdev); + + mlx5_vdpa_info(mvdev, "resuming device\n"); + + down_write(&ndev->reslock); + mvdev->suspended = false; + resume_vqs(ndev); + register_link_notifier(ndev); + up_write(&ndev->reslock); + return 0; +} + static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group, unsigned int asid) { @@ -3317,6 +3368,7 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = { .get_vq_dma_dev = mlx5_get_vq_dma_dev, .free = mlx5_vdpa_free, .suspend = mlx5_vdpa_suspend, + .resume = mlx5_vdpa_resume, /* Op disabled if not supported. */ }; static int query_mtu(struct mlx5_core_dev *mdev, u16 *mtu) @@ -3688,6 +3740,9 @@ static int mlx5v_probe(struct auxiliary_device *adev, if (!MLX5_CAP_DEV_VDPA_EMULATION(mdev, desc_group_mkey_supported)) mgtdev->vdpa_ops.get_vq_desc_group = NULL; + if (!MLX5_CAP_DEV_VDPA_EMULATION(mdev, freeze_to_rdy_supported)) + mgtdev->vdpa_ops.resume = NULL; + err = vdpa_mgmtdev_register(&mgtdev->mgtdev); if (err) goto reg_err; From 9b23417825df470e4c9e98e7ed4b2c37465bfa1e Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Mon, 25 Dec 2023 17:11:59 +0200 Subject: [PATCH 14/18] vdpa/mlx5: Mark vq addrs for modification in hw vq Addresses get set by .set_vq_address. hw vq addresses will be updated on next modify_virtqueue. Reviewed-by: Gal Pressman Signed-off-by: Dragos Tatulea Message-Id: <20231225151203.152687-5-dtatulea@nvidia.com> Signed-off-by: Michael S. Tsirkin --- drivers/vdpa/mlx5/net/mlx5_vnet.c | 9 +++++++++ include/linux/mlx5/mlx5_ifc_vdpa.h | 1 + 2 files changed, 10 insertions(+) diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index f8f088cced50..80e066de0866 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -1209,6 +1209,7 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, bool state_change = false; void *obj_context; void *cmd_hdr; + void *vq_ctx; void *in; int err; @@ -1230,6 +1231,7 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, in, obj_context); + vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context); if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE) { if (!is_valid_state_change(mvq->fw_state, state, is_resumable(ndev))) { @@ -1241,6 +1243,12 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, state_change = true; } + if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS) { + MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr); + MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr); + MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr); + } + MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select, mvq->modified_fields); err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); if (err) @@ -2202,6 +2210,7 @@ static int mlx5_vdpa_set_vq_address(struct vdpa_device *vdev, u16 idx, u64 desc_ mvq->desc_addr = desc_area; mvq->device_addr = device_area; mvq->driver_addr = driver_area; + mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS; return 0; } diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h index b86d51a855f6..9594ac405740 100644 --- a/include/linux/mlx5/mlx5_ifc_vdpa.h +++ b/include/linux/mlx5/mlx5_ifc_vdpa.h @@ -145,6 +145,7 @@ enum { MLX5_VIRTQ_MODIFY_MASK_STATE = (u64)1 << 0, MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_PARAMS = (u64)1 << 3, MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_DUMP_ENABLE = (u64)1 << 4, + MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS = (u64)1 << 6, MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY = (u64)1 << 14, }; From 60c43b3f6b4eb5a3d672952a0d65991f414ea258 Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Mon, 25 Dec 2023 17:12:00 +0200 Subject: [PATCH 15/18] vdpa/mlx5: Mark vq state for modification in hw vq .set_vq_state will set the indices and mark the fields to be modified in the hw vq. Advertise that the device supports changing the vq state when the device is in DRIVER_OK state and suspended. Reviewed-by: Gal Pressman Signed-off-by: Dragos Tatulea Acked-by: Jason Wang Message-Id: <20231225151203.152687-6-dtatulea@nvidia.com> Signed-off-by: Michael S. Tsirkin --- drivers/vdpa/mlx5/net/mlx5_vnet.c | 8 ++++++++ include/linux/mlx5/mlx5_ifc_vdpa.h | 2 ++ 2 files changed, 10 insertions(+) diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 80e066de0866..d6c8506cec8f 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -1249,6 +1249,12 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr); } + if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX) + MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx); + + if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_USED_IDX) + MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx); + MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select, mvq->modified_fields); err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); if (err) @@ -2328,6 +2334,8 @@ static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx, mvq->used_idx = state->split.avail_index; mvq->avail_idx = state->split.avail_index; + mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX | + MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_USED_IDX; return 0; } diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h index 9594ac405740..32e712106e68 100644 --- a/include/linux/mlx5/mlx5_ifc_vdpa.h +++ b/include/linux/mlx5/mlx5_ifc_vdpa.h @@ -146,6 +146,8 @@ enum { MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_PARAMS = (u64)1 << 3, MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_DUMP_ENABLE = (u64)1 << 4, MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS = (u64)1 << 6, + MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX = (u64)1 << 7, + MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_USED_IDX = (u64)1 << 8, MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY = (u64)1 << 14, }; From f756dd3e2a4c704c0ab5ecb143ab71f1249af497 Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Mon, 25 Dec 2023 17:12:01 +0200 Subject: [PATCH 16/18] vdpa/mlx5: Use vq suspend/resume during .set_map MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of tearing down and setting up vq resources, use vq suspend/resume during .set_map to speed things up a bit. The vq mr is updated with the new mapping while the vqs are suspended. If the device doesn't support resumable vqs, do the old teardown and setup dance. Reviewed-by: Gal Pressman Acked-by: Eugenio Pérez Acked-by: Jason Wang Signed-off-by: Dragos Tatulea Message-Id: <20231225151203.152687-7-dtatulea@nvidia.com> Signed-off-by: Michael S. Tsirkin --- drivers/vdpa/mlx5/net/mlx5_vnet.c | 46 ++++++++++++++++++++++++------ include/linux/mlx5/mlx5_ifc_vdpa.h | 1 + 2 files changed, 39 insertions(+), 8 deletions(-) diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index d6c8506cec8f..6a21223d97a8 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -1206,6 +1206,7 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, { int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in); u32 out[MLX5_ST_SZ_DW(modify_virtio_net_q_out)] = {}; + struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; bool state_change = false; void *obj_context; void *cmd_hdr; @@ -1255,6 +1256,24 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_USED_IDX) MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx); + if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) { + struct mlx5_vdpa_mr *mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]]; + + if (mr) + MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, mr->mkey); + else + mvq->modified_fields &= ~MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY; + } + + if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) { + struct mlx5_vdpa_mr *mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; + + if (mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) + MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, mr->mkey); + else + mvq->modified_fields &= ~MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY; + } + MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select, mvq->modified_fields); err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); if (err) @@ -2784,24 +2803,35 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, unsigned int asid) { struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); + bool teardown = !is_resumable(ndev); int err; suspend_vqs(ndev); - err = save_channels_info(ndev); - if (err) - return err; + if (teardown) { + err = save_channels_info(ndev); + if (err) + return err; - teardown_driver(ndev); + teardown_driver(ndev); + } mlx5_vdpa_update_mr(mvdev, new_mr, asid); + for (int i = 0; i < ndev->cur_num_vqs; i++) + ndev->vqs[i].modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY | + MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY; + if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) || mvdev->suspended) return 0; - restore_channels_info(ndev); - err = setup_driver(mvdev); - if (err) - return err; + if (teardown) { + restore_channels_info(ndev); + err = setup_driver(mvdev); + if (err) + return err; + } + + resume_vqs(ndev); return 0; } diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h index 32e712106e68..40371c916cf9 100644 --- a/include/linux/mlx5/mlx5_ifc_vdpa.h +++ b/include/linux/mlx5/mlx5_ifc_vdpa.h @@ -148,6 +148,7 @@ enum { MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS = (u64)1 << 6, MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX = (u64)1 << 7, MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_USED_IDX = (u64)1 << 8, + MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY = (u64)1 << 11, MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY = (u64)1 << 14, }; From a06bd11b18fdf2d40e2b81d4318abc6cc38e70c9 Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Mon, 25 Dec 2023 17:12:02 +0200 Subject: [PATCH 17/18] vdpa/mlx5: Introduce reference counting to mrs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Deleting the old mr during mr update (.set_map) and then modifying the vqs with the new mr is not a good flow for firmware. The firmware expects that mkeys are deleted after there are no more vqs referencing them. Introduce reference counting for mrs to fix this. It is the only way to make sure that mkeys are not in use by vqs. An mr reference is taken when the mr is associated to the mr asid table and when the mr is linked to the vq on create/modify. The reference is released when the mkey is unlinked from the vq (trough modify/destroy) and from the mr asid table. To make things consistent, get rid of mlx5_vdpa_destroy_mr and use get/put semantics everywhere. Reviewed-by: Gal Pressman Acked-by: Eugenio Pérez Acked-by: Jason Wang Signed-off-by: Dragos Tatulea Message-Id: <20231225151203.152687-8-dtatulea@nvidia.com> Signed-off-by: Michael S. Tsirkin --- drivers/vdpa/mlx5/core/mlx5_vdpa.h | 8 +++-- drivers/vdpa/mlx5/core/mr.c | 54 ++++++++++++++++++++---------- drivers/vdpa/mlx5/net/mlx5_vnet.c | 45 +++++++++++++++++++++---- 3 files changed, 80 insertions(+), 27 deletions(-) diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h index 84547d998bcf..1a0d27b6e09a 100644 --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h @@ -35,6 +35,8 @@ struct mlx5_vdpa_mr { struct vhost_iotlb *iotlb; bool user_mr; + + refcount_t refcount; }; struct mlx5_vdpa_resources { @@ -118,8 +120,10 @@ int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey); struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb); void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev); -void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, - struct mlx5_vdpa_mr *mr); +void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev, + struct mlx5_vdpa_mr *mr); +void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev, + struct mlx5_vdpa_mr *mr); void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr, unsigned int asid); diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c index 2197c46e563a..c7dc8914354a 100644 --- a/drivers/vdpa/mlx5/core/mr.c +++ b/drivers/vdpa/mlx5/core/mr.c @@ -498,32 +498,52 @@ static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) { + if (WARN_ON(!mr)) + return; + if (mr->user_mr) destroy_user_mr(mvdev, mr); else destroy_dma_mr(mvdev, mr); vhost_iotlb_free(mr->iotlb); + + kfree(mr); } -void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, - struct mlx5_vdpa_mr *mr) +static void _mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev, + struct mlx5_vdpa_mr *mr) { if (!mr) return; + if (refcount_dec_and_test(&mr->refcount)) + _mlx5_vdpa_destroy_mr(mvdev, mr); +} + +void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev, + struct mlx5_vdpa_mr *mr) +{ mutex_lock(&mvdev->mr_mtx); - - _mlx5_vdpa_destroy_mr(mvdev, mr); - - for (int i = 0; i < MLX5_VDPA_NUM_AS; i++) { - if (mvdev->mr[i] == mr) - mvdev->mr[i] = NULL; - } - + _mlx5_vdpa_put_mr(mvdev, mr); mutex_unlock(&mvdev->mr_mtx); +} - kfree(mr); +static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev, + struct mlx5_vdpa_mr *mr) +{ + if (!mr) + return; + + refcount_inc(&mr->refcount); +} + +void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev, + struct mlx5_vdpa_mr *mr) +{ + mutex_lock(&mvdev->mr_mtx); + _mlx5_vdpa_get_mr(mvdev, mr); + mutex_unlock(&mvdev->mr_mtx); } void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev, @@ -534,20 +554,16 @@ void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev, mutex_lock(&mvdev->mr_mtx); + _mlx5_vdpa_put_mr(mvdev, old_mr); mvdev->mr[asid] = new_mr; - if (old_mr) { - _mlx5_vdpa_destroy_mr(mvdev, old_mr); - kfree(old_mr); - } mutex_unlock(&mvdev->mr_mtx); - } void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev) { for (int i = 0; i < MLX5_VDPA_NUM_AS; i++) - mlx5_vdpa_destroy_mr(mvdev, mvdev->mr[i]); + mlx5_vdpa_update_mr(mvdev, NULL, i); prune_iotlb(mvdev->cvq.iotlb); } @@ -607,6 +623,8 @@ struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, if (err) goto out_err; + refcount_set(&mr->refcount, 1); + return mr; out_err: @@ -651,7 +669,7 @@ int mlx5_vdpa_reset_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid) if (asid >= MLX5_VDPA_NUM_AS) return -EINVAL; - mlx5_vdpa_destroy_mr(mvdev, mvdev->mr[asid]); + mlx5_vdpa_update_mr(mvdev, NULL, asid); if (asid == 0 && MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { if (mlx5_vdpa_create_dma_mr(mvdev)) diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 6a21223d97a8..133cbb66dcfe 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -123,6 +123,9 @@ struct mlx5_vdpa_virtqueue { u64 modified_fields; + struct mlx5_vdpa_mr *vq_mr; + struct mlx5_vdpa_mr *desc_mr; + struct msi_map map; /* keep last in the struct */ @@ -946,6 +949,14 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque kfree(in); mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); + mlx5_vdpa_get_mr(mvdev, vq_mr); + mvq->vq_mr = vq_mr; + + if (vq_desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) { + mlx5_vdpa_get_mr(mvdev, vq_desc_mr); + mvq->desc_mr = vq_desc_mr; + } + return 0; err_cmd: @@ -972,6 +983,12 @@ static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtq } mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE; umems_destroy(ndev, mvq); + + mlx5_vdpa_put_mr(&ndev->mvdev, mvq->vq_mr); + mvq->vq_mr = NULL; + + mlx5_vdpa_put_mr(&ndev->mvdev, mvq->desc_mr); + mvq->desc_mr = NULL; } static u32 get_rqpn(struct mlx5_vdpa_virtqueue *mvq, bool fw) @@ -1207,6 +1224,8 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in); u32 out[MLX5_ST_SZ_DW(modify_virtio_net_q_out)] = {}; struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; + struct mlx5_vdpa_mr *desc_mr = NULL; + struct mlx5_vdpa_mr *vq_mr = NULL; bool state_change = false; void *obj_context; void *cmd_hdr; @@ -1257,19 +1276,19 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx); if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) { - struct mlx5_vdpa_mr *mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]]; + vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]]; - if (mr) - MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, mr->mkey); + if (vq_mr) + MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey); else mvq->modified_fields &= ~MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY; } if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) { - struct mlx5_vdpa_mr *mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; + desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; - if (mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) - MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, mr->mkey); + if (desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) + MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, desc_mr->mkey); else mvq->modified_fields &= ~MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY; } @@ -1282,6 +1301,18 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, if (state_change) mvq->fw_state = state; + if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) { + mlx5_vdpa_put_mr(mvdev, mvq->vq_mr); + mlx5_vdpa_get_mr(mvdev, vq_mr); + mvq->vq_mr = vq_mr; + } + + if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) { + mlx5_vdpa_put_mr(mvdev, mvq->desc_mr); + mlx5_vdpa_get_mr(mvdev, desc_mr); + mvq->desc_mr = desc_mr; + } + mvq->modified_fields = 0; done: @@ -3095,7 +3126,7 @@ static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, return mlx5_vdpa_update_cvq_iotlb(mvdev, iotlb, asid); out_err: - mlx5_vdpa_destroy_mr(mvdev, new_mr); + mlx5_vdpa_put_mr(mvdev, new_mr); return err; } From f16d65124380ac6de8055c4a8e5373a1043bb09b Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Mon, 25 Dec 2023 17:12:03 +0200 Subject: [PATCH 18/18] vdpa/mlx5: Add mkey leak detection MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Track allocated mrs in a list and show warning when leaks are detected on device free or reset. Reviewed-by: Gal Pressman Acked-by: Eugenio Pérez Signed-off-by: Dragos Tatulea Message-Id: <20231225151203.152687-9-dtatulea@nvidia.com> Signed-off-by: Michael S. Tsirkin --- drivers/vdpa/mlx5/core/mlx5_vdpa.h | 2 ++ drivers/vdpa/mlx5/core/mr.c | 23 +++++++++++++++++++++++ drivers/vdpa/mlx5/net/mlx5_vnet.c | 2 ++ 3 files changed, 27 insertions(+) diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h index 1a0d27b6e09a..50aac8fe57ef 100644 --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h @@ -37,6 +37,7 @@ struct mlx5_vdpa_mr { bool user_mr; refcount_t refcount; + struct list_head mr_list; }; struct mlx5_vdpa_resources { @@ -95,6 +96,7 @@ struct mlx5_vdpa_dev { u32 generation; struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS]; + struct list_head mr_list_head; /* serialize mr access */ struct mutex mr_mtx; struct mlx5_control_vq cvq; diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c index c7dc8914354a..4758914ccf86 100644 --- a/drivers/vdpa/mlx5/core/mr.c +++ b/drivers/vdpa/mlx5/core/mr.c @@ -508,6 +508,8 @@ static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_ vhost_iotlb_free(mr->iotlb); + list_del(&mr->mr_list); + kfree(mr); } @@ -560,12 +562,31 @@ void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev, mutex_unlock(&mvdev->mr_mtx); } +static void mlx5_vdpa_show_mr_leaks(struct mlx5_vdpa_dev *mvdev) +{ + struct mlx5_vdpa_mr *mr; + + mutex_lock(&mvdev->mr_mtx); + + list_for_each_entry(mr, &mvdev->mr_list_head, mr_list) { + + mlx5_vdpa_warn(mvdev, "mkey still alive after resource delete: " + "mr: %p, mkey: 0x%x, refcount: %u\n", + mr, mr->mkey, refcount_read(&mr->refcount)); + } + + mutex_unlock(&mvdev->mr_mtx); + +} + void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev) { for (int i = 0; i < MLX5_VDPA_NUM_AS; i++) mlx5_vdpa_update_mr(mvdev, NULL, i); prune_iotlb(mvdev->cvq.iotlb); + + mlx5_vdpa_show_mr_leaks(mvdev); } static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, @@ -592,6 +613,8 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, if (err) goto err_iotlb; + list_add_tail(&mr->mr_list, &mvdev->mr_list_head); + return 0; err_iotlb: diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 133cbb66dcfe..778821bab7d9 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -3722,6 +3722,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, if (err) goto err_mpfs; + INIT_LIST_HEAD(&mvdev->mr_list_head); + if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { err = mlx5_vdpa_create_dma_mr(mvdev); if (err)