Merge branch 'vfio-no-iommu' into iommufd.git for-next

Shared branch with VFIO for the no-iommu support.

Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Jason Gunthorpe 2023-02-03 15:45:57 -04:00
commit bed9e516f1
10 changed files with 136 additions and 35 deletions

View File

@ -23,7 +23,7 @@ config IOMMUFD_VFIO_CONTAINER
removed.
IOMMUFD VFIO container emulation is known to lack certain features
of the native VFIO container, such as no-IOMMU support, peer-to-peer
of the native VFIO container, such as peer-to-peer
DMA mapping, PPC IOMMU support, as well as other potentially
undiscovered gaps. This option is currently intended for the
purpose of testing IOMMUFD with unmodified userspace supporting VFIO

View File

@ -18,6 +18,8 @@ struct iommufd_ctx {
struct xarray objects;
u8 account_mode;
/* Compatibility with VFIO no iommu */
u8 no_iommu_mode;
struct iommufd_ioas *vfio_ioas;
};

View File

@ -26,39 +26,84 @@ out_unlock:
}
/**
* iommufd_vfio_compat_ioas_id - Return the IOAS ID that vfio should use
* iommufd_vfio_compat_ioas_get_id - Ensure a compat IOAS exists
* @ictx: Context to operate on
* @out_ioas_id: The IOAS ID of the compatibility IOAS
*
* Return the ID of the current compatibility IOAS. The ID can be passed into
* other functions that take an ioas_id.
*/
int iommufd_vfio_compat_ioas_get_id(struct iommufd_ctx *ictx, u32 *out_ioas_id)
{
struct iommufd_ioas *ioas;
ioas = get_compat_ioas(ictx);
if (IS_ERR(ioas))
return PTR_ERR(ioas);
*out_ioas_id = ioas->obj.id;
iommufd_put_object(&ioas->obj);
return 0;
}
EXPORT_SYMBOL_NS_GPL(iommufd_vfio_compat_ioas_get_id, IOMMUFD_VFIO);
/**
* iommufd_vfio_compat_set_no_iommu - Called when a no-iommu device is attached
* @ictx: Context to operate on
*
* This allows selecting the VFIO_NOIOMMU_IOMMU and blocks normal types.
*/
int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx)
{
int ret;
xa_lock(&ictx->objects);
if (!ictx->vfio_ioas) {
ictx->no_iommu_mode = 1;
ret = 0;
} else {
ret = -EINVAL;
}
xa_unlock(&ictx->objects);
return ret;
}
EXPORT_SYMBOL_NS_GPL(iommufd_vfio_compat_set_no_iommu, IOMMUFD_VFIO);
/**
* iommufd_vfio_compat_ioas_create - Ensure the compat IOAS is created
* @ictx: Context to operate on
* @out_ioas_id: The ioas_id the caller should use
*
* The compatibility IOAS is the IOAS that the vfio compatibility ioctls operate
* on since they do not have an IOAS ID input in their ABI. Only attaching a
* group should cause a default creation of the internal ioas, this returns the
* existing ioas if it has already been assigned somehow.
* group should cause a default creation of the internal ioas, this does nothing
* if an existing ioas has already been assigned somehow.
*/
int iommufd_vfio_compat_ioas_id(struct iommufd_ctx *ictx, u32 *out_ioas_id)
int iommufd_vfio_compat_ioas_create(struct iommufd_ctx *ictx)
{
struct iommufd_ioas *ioas = NULL;
struct iommufd_ioas *out_ioas;
int ret;
ioas = iommufd_ioas_alloc(ictx);
if (IS_ERR(ioas))
return PTR_ERR(ioas);
xa_lock(&ictx->objects);
if (ictx->vfio_ioas && iommufd_lock_obj(&ictx->vfio_ioas->obj))
out_ioas = ictx->vfio_ioas;
else {
out_ioas = ioas;
ictx->vfio_ioas = ioas;
/*
* VFIO won't allow attaching a container to both iommu and no iommu
* operation
*/
if (ictx->no_iommu_mode) {
ret = -EINVAL;
goto out_abort;
}
if (ictx->vfio_ioas && iommufd_lock_obj(&ictx->vfio_ioas->obj)) {
ret = 0;
iommufd_put_object(&ictx->vfio_ioas->obj);
goto out_abort;
}
ictx->vfio_ioas = ioas;
xa_unlock(&ictx->objects);
*out_ioas_id = out_ioas->obj.id;
if (out_ioas != ioas) {
iommufd_put_object(&out_ioas->obj);
iommufd_object_abort(ictx, &ioas->obj);
return 0;
}
/*
* An automatically created compat IOAS is treated as a userspace
* created object. Userspace can learn the ID via IOMMU_VFIO_IOAS_GET,
@ -67,8 +112,13 @@ int iommufd_vfio_compat_ioas_id(struct iommufd_ctx *ictx, u32 *out_ioas_id)
*/
iommufd_object_finalize(ictx, &ioas->obj);
return 0;
out_abort:
xa_unlock(&ictx->objects);
iommufd_object_abort(ictx, &ioas->obj);
return ret;
}
EXPORT_SYMBOL_NS_GPL(iommufd_vfio_compat_ioas_id, IOMMUFD_VFIO);
EXPORT_SYMBOL_NS_GPL(iommufd_vfio_compat_ioas_create, IOMMUFD_VFIO);
int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd)
{
@ -235,6 +285,9 @@ static int iommufd_vfio_check_extension(struct iommufd_ctx *ictx,
case VFIO_UNMAP_ALL:
return 1;
case VFIO_NOIOMMU_IOMMU:
return IS_ENABLED(CONFIG_VFIO_NOIOMMU);
case VFIO_DMA_CC_IOMMU:
return iommufd_vfio_cc_iommu(ictx);
@ -261,10 +314,24 @@ static int iommufd_vfio_check_extension(struct iommufd_ctx *ictx,
static int iommufd_vfio_set_iommu(struct iommufd_ctx *ictx, unsigned long type)
{
bool no_iommu_mode = READ_ONCE(ictx->no_iommu_mode);
struct iommufd_ioas *ioas = NULL;
int rc = 0;
if (type != VFIO_TYPE1_IOMMU && type != VFIO_TYPE1v2_IOMMU)
/*
* Emulation for NOIOMMU is imperfect in that VFIO blocks almost all
* other ioctls. We let them keep working but they mostly fail since no
* IOAS should exist.
*/
if (IS_ENABLED(CONFIG_VFIO_NOIOMMU) && type == VFIO_NOIOMMU_IOMMU &&
no_iommu_mode) {
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
return 0;
}
if ((type != VFIO_TYPE1_IOMMU && type != VFIO_TYPE1v2_IOMMU) ||
no_iommu_mode)
return -EINVAL;
/* VFIO fails the set_iommu if there is no group */

View File

@ -32,6 +32,7 @@ config VFIO_IOMMU_SPAPR_TCE
tristate
depends on SPAPR_TCE_IOMMU
default VFIO
endif
config VFIO_NOIOMMU
bool "VFIO No-IOMMU support"
@ -46,7 +47,6 @@ config VFIO_NOIOMMU
this mode since there is no IOMMU to provide DMA translation.
If you don't know what to do here, say N.
endif
config VFIO_VIRQFD
bool

View File

@ -29,13 +29,6 @@ static struct vfio {
struct mutex iommu_drivers_lock;
} vfio;
#ifdef CONFIG_VFIO_NOIOMMU
bool vfio_noiommu __read_mostly;
module_param_named(enable_unsafe_noiommu_mode,
vfio_noiommu, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)");
#endif
static void *vfio_noiommu_open(unsigned long arg)
{
if (arg != VFIO_NOIOMMU_IOMMU)

View File

@ -133,9 +133,12 @@ static int vfio_group_ioctl_set_container(struct vfio_group *group,
iommufd = iommufd_ctx_from_file(f.file);
if (!IS_ERR(iommufd)) {
u32 ioas_id;
if (IS_ENABLED(CONFIG_VFIO_NOIOMMU) &&
group->type == VFIO_NO_IOMMU)
ret = iommufd_vfio_compat_set_no_iommu(iommufd);
else
ret = iommufd_vfio_compat_ioas_create(iommufd);
ret = iommufd_vfio_compat_ioas_id(iommufd, &ioas_id);
if (ret) {
iommufd_ctx_put(group->iommufd);
goto out_unlock;

View File

@ -18,6 +18,20 @@ int vfio_iommufd_bind(struct vfio_device *vdev, struct iommufd_ctx *ictx)
lockdep_assert_held(&vdev->dev_set->lock);
if (vfio_device_is_noiommu(vdev)) {
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
/*
* Require no compat ioas to be assigned to proceed. The basic
* statement is that the user cannot have done something that
* implies they expected translation to exist
*/
if (!iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id))
return -EPERM;
return 0;
}
/*
* If the driver doesn't provide this op then it means the device does
* not do DMA at all. So nothing to do.
@ -29,7 +43,7 @@ int vfio_iommufd_bind(struct vfio_device *vdev, struct iommufd_ctx *ictx)
if (ret)
return ret;
ret = iommufd_vfio_compat_ioas_id(ictx, &ioas_id);
ret = iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id);
if (ret)
goto err_unbind;
ret = vdev->ops->attach_ioas(vdev, &ioas_id);
@ -52,6 +66,9 @@ void vfio_iommufd_unbind(struct vfio_device *vdev)
{
lockdep_assert_held(&vdev->dev_set->lock);
if (vfio_device_is_noiommu(vdev))
return;
if (vdev->ops->unbind_iommufd)
vdev->ops->unbind_iommufd(vdev);
}

View File

@ -10,10 +10,10 @@
#include <linux/device.h>
#include <linux/cdev.h>
#include <linux/module.h>
#include <linux/vfio.h>
struct iommufd_ctx;
struct iommu_group;
struct vfio_device;
struct vfio_container;
void vfio_device_put_registration(struct vfio_device *device);
@ -88,6 +88,12 @@ bool vfio_device_has_container(struct vfio_device *device);
int __init vfio_group_init(void);
void vfio_group_cleanup(void);
static inline bool vfio_device_is_noiommu(struct vfio_device *vdev)
{
return IS_ENABLED(CONFIG_VFIO_NOIOMMU) &&
vdev->group->type == VFIO_NO_IOMMU;
}
#if IS_ENABLED(CONFIG_VFIO_CONTAINER)
/* events for the backend driver notify callback */
enum vfio_iommu_notify_type {

View File

@ -45,6 +45,13 @@ static struct vfio {
struct ida device_ida;
} vfio;
#ifdef CONFIG_VFIO_NOIOMMU
bool vfio_noiommu __read_mostly;
module_param_named(enable_unsafe_noiommu_mode,
vfio_noiommu, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)");
#endif
static DEFINE_XARRAY(vfio_device_set_xa);
int vfio_assign_device_set(struct vfio_device *device, void *set_id)

View File

@ -57,7 +57,9 @@ void iommufd_access_unpin_pages(struct iommufd_access *access,
unsigned long iova, unsigned long length);
int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
void *data, size_t len, unsigned int flags);
int iommufd_vfio_compat_ioas_id(struct iommufd_ctx *ictx, u32 *out_ioas_id);
int iommufd_vfio_compat_ioas_get_id(struct iommufd_ctx *ictx, u32 *out_ioas_id);
int iommufd_vfio_compat_ioas_create(struct iommufd_ctx *ictx);
int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx);
#else /* !CONFIG_IOMMUFD */
static inline struct iommufd_ctx *iommufd_ctx_from_file(struct file *file)
{
@ -89,8 +91,12 @@ static inline int iommufd_access_rw(struct iommufd_access *access, unsigned long
return -EOPNOTSUPP;
}
static inline int iommufd_vfio_compat_ioas_id(struct iommufd_ctx *ictx,
u32 *out_ioas_id)
static inline int iommufd_vfio_compat_ioas_create(struct iommufd_ctx *ictx)
{
return -EOPNOTSUPP;
}
static inline int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx)
{
return -EOPNOTSUPP;
}