vhost-vdpa: passing iotlb to IOMMU mapping helpers

To prepare for the ASID support for vhost-vdpa, try to pass IOTLB
object to dma helpers. No functional changes, it's just a preparation
for support multiple IOTLBs.

Signed-off-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Gautam Dawar <gdawar@xilinx.com>
Message-Id: <20220330180436.24644-4-gdawar@xilinx.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
Gautam Dawar 2022-03-30 23:33:43 +05:30 committed by Michael S. Tsirkin
parent ea239a6746
commit ae967246d0
1 changed files with 37 additions and 30 deletions

View File

@ -537,10 +537,11 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
return r;
}
static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, u64 start, u64 last)
static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v,
struct vhost_iotlb *iotlb,
u64 start, u64 last)
{
struct vhost_dev *dev = &v->vdev;
struct vhost_iotlb *iotlb = dev->iotlb;
struct vhost_iotlb_map *map;
struct page *page;
unsigned long pfn, pinned;
@ -559,10 +560,10 @@ static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, u64 start, u64 last)
}
}
static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, u64 start, u64 last)
static void vhost_vdpa_va_unmap(struct vhost_vdpa *v,
struct vhost_iotlb *iotlb,
u64 start, u64 last)
{
struct vhost_dev *dev = &v->vdev;
struct vhost_iotlb *iotlb = dev->iotlb;
struct vhost_iotlb_map *map;
struct vdpa_map_file *map_file;
@ -574,21 +575,24 @@ static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, u64 start, u64 last)
}
}
static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
struct vhost_iotlb *iotlb,
u64 start, u64 last)
{
struct vdpa_device *vdpa = v->vdpa;
if (vdpa->use_va)
return vhost_vdpa_va_unmap(v, start, last);
return vhost_vdpa_va_unmap(v, iotlb, start, last);
return vhost_vdpa_pa_unmap(v, start, last);
return vhost_vdpa_pa_unmap(v, iotlb, start, last);
}
static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
{
struct vhost_dev *dev = &v->vdev;
struct vhost_iotlb *iotlb = dev->iotlb;
vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
vhost_vdpa_iotlb_unmap(v, iotlb, 0ULL, 0ULL - 1);
kfree(dev->iotlb);
dev->iotlb = NULL;
}
@ -615,15 +619,15 @@ static int perm_to_iommu_flags(u32 perm)
return flags | IOMMU_CACHE;
}
static int vhost_vdpa_map(struct vhost_vdpa *v, u64 iova,
u64 size, u64 pa, u32 perm, void *opaque)
static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
u64 iova, u64 size, u64 pa, u32 perm, void *opaque)
{
struct vhost_dev *dev = &v->vdev;
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
int r = 0;
r = vhost_iotlb_add_range_ctx(dev->iotlb, iova, iova + size - 1,
r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1,
pa, perm, opaque);
if (r)
return r;
@ -632,13 +636,13 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, u64 iova,
r = ops->dma_map(vdpa, iova, size, pa, perm, opaque);
} else if (ops->set_map) {
if (!v->in_batch)
r = ops->set_map(vdpa, dev->iotlb);
r = ops->set_map(vdpa, iotlb);
} else {
r = iommu_map(v->domain, iova, pa, size,
perm_to_iommu_flags(perm));
}
if (r) {
vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
return r;
}
@ -648,25 +652,27 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, u64 iova,
return 0;
}
static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
static void vhost_vdpa_unmap(struct vhost_vdpa *v,
struct vhost_iotlb *iotlb,
u64 iova, u64 size)
{
struct vhost_dev *dev = &v->vdev;
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1);
if (ops->dma_map) {
ops->dma_unmap(vdpa, iova, size);
} else if (ops->set_map) {
if (!v->in_batch)
ops->set_map(vdpa, dev->iotlb);
ops->set_map(vdpa, iotlb);
} else {
iommu_unmap(v->domain, iova, size);
}
}
static int vhost_vdpa_va_map(struct vhost_vdpa *v,
struct vhost_iotlb *iotlb,
u64 iova, u64 size, u64 uaddr, u32 perm)
{
struct vhost_dev *dev = &v->vdev;
@ -696,7 +702,7 @@ static int vhost_vdpa_va_map(struct vhost_vdpa *v,
offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start;
map_file->offset = offset;
map_file->file = get_file(vma->vm_file);
ret = vhost_vdpa_map(v, map_iova, map_size, uaddr,
ret = vhost_vdpa_map(v, iotlb, map_iova, map_size, uaddr,
perm, map_file);
if (ret) {
fput(map_file->file);
@ -709,7 +715,7 @@ next:
map_iova += map_size;
}
if (ret)
vhost_vdpa_unmap(v, iova, map_iova - iova);
vhost_vdpa_unmap(v, iotlb, iova, map_iova - iova);
mmap_read_unlock(dev->mm);
@ -717,6 +723,7 @@ next:
}
static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
struct vhost_iotlb *iotlb,
u64 iova, u64 size, u64 uaddr, u32 perm)
{
struct vhost_dev *dev = &v->vdev;
@ -780,7 +787,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
if (last_pfn && (this_pfn != last_pfn + 1)) {
/* Pin a contiguous chunk of memory */
csize = PFN_PHYS(last_pfn - map_pfn + 1);
ret = vhost_vdpa_map(v, iova, csize,
ret = vhost_vdpa_map(v, iotlb, iova, csize,
PFN_PHYS(map_pfn),
perm, NULL);
if (ret) {
@ -810,7 +817,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
}
/* Pin the rest chunk */
ret = vhost_vdpa_map(v, iova, PFN_PHYS(last_pfn - map_pfn + 1),
ret = vhost_vdpa_map(v, iotlb, iova, PFN_PHYS(last_pfn - map_pfn + 1),
PFN_PHYS(map_pfn), perm, NULL);
out:
if (ret) {
@ -830,7 +837,7 @@ out:
for (pfn = map_pfn; pfn <= last_pfn; pfn++)
unpin_user_page(pfn_to_page(pfn));
}
vhost_vdpa_unmap(v, start, size);
vhost_vdpa_unmap(v, iotlb, start, size);
}
unlock:
mmap_read_unlock(dev->mm);
@ -841,11 +848,10 @@ free:
}
static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
struct vhost_iotlb *iotlb,
struct vhost_iotlb_msg *msg)
{
struct vhost_dev *dev = &v->vdev;
struct vdpa_device *vdpa = v->vdpa;
struct vhost_iotlb *iotlb = dev->iotlb;
if (msg->iova < v->range.first || !msg->size ||
msg->iova > U64_MAX - msg->size + 1 ||
@ -857,10 +863,10 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
return -EEXIST;
if (vdpa->use_va)
return vhost_vdpa_va_map(v, msg->iova, msg->size,
return vhost_vdpa_va_map(v, iotlb, msg->iova, msg->size,
msg->uaddr, msg->perm);
return vhost_vdpa_pa_map(v, msg->iova, msg->size, msg->uaddr,
return vhost_vdpa_pa_map(v, iotlb, msg->iova, msg->size, msg->uaddr,
msg->perm);
}
@ -870,6 +876,7 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
struct vhost_iotlb *iotlb = dev->iotlb;
int r = 0;
mutex_lock(&dev->mutex);
@ -880,17 +887,17 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
switch (msg->type) {
case VHOST_IOTLB_UPDATE:
r = vhost_vdpa_process_iotlb_update(v, msg);
r = vhost_vdpa_process_iotlb_update(v, iotlb, msg);
break;
case VHOST_IOTLB_INVALIDATE:
vhost_vdpa_unmap(v, msg->iova, msg->size);
vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
break;
case VHOST_IOTLB_BATCH_BEGIN:
v->in_batch = true;
break;
case VHOST_IOTLB_BATCH_END:
if (v->in_batch && ops->set_map)
ops->set_map(vdpa, dev->iotlb);
ops->set_map(vdpa, iotlb);
v->in_batch = false;
break;
default: