linux-stable/drivers/iommu/iommu-sva-lib.h
Lu Baolu 8cc93159f9 iommu: Prepare IOMMU domain for IOPF
This adds some mechanisms around the iommu_domain so that the I/O page
fault handling framework could route a page fault to the domain and
call the fault handler from it.

Add pointers to the page fault handler and its private data in struct
iommu_domain. The fault handler will be called with the private data
as a parameter once a page fault is routed to the domain. Any kernel
component which owns an iommu domain could install handler and its
private parameter so that the page fault could be further routed and
handled.

This also prepares the SVA implementation to be the first consumer of
the per-domain page fault handling model. The I/O page fault handler
for SVA is copied to the SVA file with mmget_not_zero() added before
mmap_read_lock().

Suggested-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Zhangfei Gao <zhangfei.gao@linaro.org>
Tested-by: Tony Zhu <tony.zhu@intel.com>
Link: https://lore.kernel.org/r/20221031005917.45690-12-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-11-03 15:47:52 +01:00

75 lines
1.8 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
/*
* SVA library for IOMMU drivers
*/
#ifndef _IOMMU_SVA_LIB_H
#define _IOMMU_SVA_LIB_H
#include <linux/ioasid.h>
#include <linux/mm_types.h>
int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max);
struct mm_struct *iommu_sva_find(ioasid_t pasid);
/* I/O Page fault */
struct device;
struct iommu_fault;
struct iopf_queue;
#ifdef CONFIG_IOMMU_SVA
int iommu_queue_iopf(struct iommu_fault *fault, void *cookie);
int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev);
int iopf_queue_remove_device(struct iopf_queue *queue,
struct device *dev);
int iopf_queue_flush_dev(struct device *dev);
struct iopf_queue *iopf_queue_alloc(const char *name);
void iopf_queue_free(struct iopf_queue *queue);
int iopf_queue_discard_partial(struct iopf_queue *queue);
enum iommu_page_response_code
iommu_sva_handle_iopf(struct iommu_fault *fault, void *data);
#else /* CONFIG_IOMMU_SVA */
static inline int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
{
return -ENODEV;
}
static inline int iopf_queue_add_device(struct iopf_queue *queue,
struct device *dev)
{
return -ENODEV;
}
static inline int iopf_queue_remove_device(struct iopf_queue *queue,
struct device *dev)
{
return -ENODEV;
}
static inline int iopf_queue_flush_dev(struct device *dev)
{
return -ENODEV;
}
static inline struct iopf_queue *iopf_queue_alloc(const char *name)
{
return NULL;
}
static inline void iopf_queue_free(struct iopf_queue *queue)
{
}
static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
{
return -ENODEV;
}
static inline enum iommu_page_response_code
iommu_sva_handle_iopf(struct iommu_fault *fault, void *data)
{
return IOMMU_PAGE_RESP_INVALID;
}
#endif /* CONFIG_IOMMU_SVA */
#endif /* _IOMMU_SVA_LIB_H */