iommu/vt-d: Avoid using idr_for_each_entry()
idr_for_each_entry() is used to iteratte over idr elements of a given type. It isn't suitable for the globle pasid idr since the pasid idr consumer could specify different types of pointers to bind with a pasid. Cc: Ashok Raj <ashok.raj@intel.com> Cc: Jacob Pan <jacob.jun.pan@linux.intel.com> Cc: Kevin Tian <kevin.tian@intel.com> Cc: Liu Yi L <yi.l.liu@intel.com> Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Liu Yi L <yi.l.liu@intel.com> Reviewed-by: Peter Xu <peterx@redhat.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
562831747f
commit
51261aac51
|
@ -298,6 +298,7 @@ static const struct mmu_notifier_ops intel_mmuops = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static DEFINE_MUTEX(pasid_mutex);
|
static DEFINE_MUTEX(pasid_mutex);
|
||||||
|
static LIST_HEAD(global_svm_list);
|
||||||
|
|
||||||
int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
|
int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
|
||||||
{
|
{
|
||||||
|
@ -329,13 +330,13 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
|
||||||
|
|
||||||
mutex_lock(&pasid_mutex);
|
mutex_lock(&pasid_mutex);
|
||||||
if (pasid && !(flags & SVM_FLAG_PRIVATE_PASID)) {
|
if (pasid && !(flags & SVM_FLAG_PRIVATE_PASID)) {
|
||||||
int i;
|
struct intel_svm *t;
|
||||||
|
|
||||||
idr_for_each_entry(&iommu->pasid_idr, svm, i) {
|
list_for_each_entry(t, &global_svm_list, list) {
|
||||||
if (svm->mm != mm ||
|
if (t->mm != mm || (t->flags & SVM_FLAG_PRIVATE_PASID))
|
||||||
(svm->flags & SVM_FLAG_PRIVATE_PASID))
|
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
svm = t;
|
||||||
if (svm->pasid >= pasid_max) {
|
if (svm->pasid >= pasid_max) {
|
||||||
dev_warn(dev,
|
dev_warn(dev,
|
||||||
"Limited PASID width. Cannot use existing PASID %d\n",
|
"Limited PASID width. Cannot use existing PASID %d\n",
|
||||||
|
@ -404,6 +405,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
|
||||||
svm->mm = mm;
|
svm->mm = mm;
|
||||||
svm->flags = flags;
|
svm->flags = flags;
|
||||||
INIT_LIST_HEAD_RCU(&svm->devs);
|
INIT_LIST_HEAD_RCU(&svm->devs);
|
||||||
|
INIT_LIST_HEAD(&svm->list);
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
if (mm) {
|
if (mm) {
|
||||||
ret = mmu_notifier_register(&svm->notifier, mm);
|
ret = mmu_notifier_register(&svm->notifier, mm);
|
||||||
|
@ -430,6 +432,8 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
|
||||||
*/
|
*/
|
||||||
if (cap_caching_mode(iommu->cap))
|
if (cap_caching_mode(iommu->cap))
|
||||||
intel_flush_pasid_dev(svm, sdev, svm->pasid);
|
intel_flush_pasid_dev(svm, sdev, svm->pasid);
|
||||||
|
|
||||||
|
list_add_tail(&svm->list, &global_svm_list);
|
||||||
}
|
}
|
||||||
list_add_rcu(&sdev->list, &svm->devs);
|
list_add_rcu(&sdev->list, &svm->devs);
|
||||||
|
|
||||||
|
@ -485,6 +489,8 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
|
||||||
if (svm->mm)
|
if (svm->mm)
|
||||||
mmu_notifier_unregister(&svm->notifier, svm->mm);
|
mmu_notifier_unregister(&svm->notifier, svm->mm);
|
||||||
|
|
||||||
|
list_del(&svm->list);
|
||||||
|
|
||||||
/* We mandate that no page faults may be outstanding
|
/* We mandate that no page faults may be outstanding
|
||||||
* for the PASID when intel_svm_unbind_mm() is called.
|
* for the PASID when intel_svm_unbind_mm() is called.
|
||||||
* If that is not obeyed, subtle errors will happen.
|
* If that is not obeyed, subtle errors will happen.
|
||||||
|
|
|
@ -487,6 +487,7 @@ struct intel_svm {
|
||||||
int flags;
|
int flags;
|
||||||
int pasid;
|
int pasid;
|
||||||
struct list_head devs;
|
struct list_head devs;
|
||||||
|
struct list_head list;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev);
|
extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev);
|
||||||
|
|
Loading…
Reference in New Issue