iommu/vt-d: Replace spin_lock_irqsave() with spin_lock()

The iommu->lock is used to protect changes in root/context/pasid tables
and domain ID allocation. There's no use case to change these resources
in any interrupt context. Therefore, it is unnecessary to disable the
interrupts when the spinlock is held. The same thing happens on the
device_domain_lock side, which protects the device domain attachment
information. This replaces spin_lock/unlock_irqsave/irqrestore() calls
with the normal spin_lock/unlock().

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Link: https://lore.kernel.org/r/20220706025524.2904370-6-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Lu Baolu 2022-07-12 08:08:56 +08:00 committed by Joerg Roedel
parent 2e1c8dafb8
commit ffd5869d93
3 changed files with 32 additions and 46 deletions

View file

@ -263,10 +263,9 @@ static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus)
static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu) static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu)
{ {
unsigned long flags;
u16 bus; u16 bus;
spin_lock_irqsave(&iommu->lock, flags); spin_lock(&iommu->lock);
seq_printf(m, "IOMMU %s: Root Table Address: 0x%llx\n", iommu->name, seq_printf(m, "IOMMU %s: Root Table Address: 0x%llx\n", iommu->name,
(u64)virt_to_phys(iommu->root_entry)); (u64)virt_to_phys(iommu->root_entry));
seq_puts(m, "B.D.F\tRoot_entry\t\t\t\tContext_entry\t\t\t\tPASID\tPASID_table_entry\n"); seq_puts(m, "B.D.F\tRoot_entry\t\t\t\tContext_entry\t\t\t\tPASID\tPASID_table_entry\n");
@ -278,8 +277,7 @@ static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu)
*/ */
for (bus = 0; bus < 256; bus++) for (bus = 0; bus < 256; bus++)
ctx_tbl_walk(m, iommu, bus); ctx_tbl_walk(m, iommu, bus);
spin_unlock(&iommu->lock);
spin_unlock_irqrestore(&iommu->lock, flags);
} }
static int dmar_translation_struct_show(struct seq_file *m, void *unused) static int dmar_translation_struct_show(struct seq_file *m, void *unused)

View file

@ -797,13 +797,12 @@ static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
{ {
struct context_entry *context; struct context_entry *context;
int ret = 0; int ret = 0;
unsigned long flags;
spin_lock_irqsave(&iommu->lock, flags); spin_lock(&iommu->lock);
context = iommu_context_addr(iommu, bus, devfn, 0); context = iommu_context_addr(iommu, bus, devfn, 0);
if (context) if (context)
ret = context_present(context); ret = context_present(context);
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock(&iommu->lock);
return ret; return ret;
} }
@ -1508,17 +1507,15 @@ static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
static void iommu_flush_dev_iotlb(struct dmar_domain *domain, static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
u64 addr, unsigned mask) u64 addr, unsigned mask)
{ {
unsigned long flags;
struct device_domain_info *info; struct device_domain_info *info;
if (!domain->has_iotlb_device) if (!domain->has_iotlb_device)
return; return;
spin_lock_irqsave(&device_domain_lock, flags); spin_lock(&device_domain_lock);
list_for_each_entry(info, &domain->devices, link) list_for_each_entry(info, &domain->devices, link)
__iommu_flush_dev_iotlb(info, addr, mask); __iommu_flush_dev_iotlb(info, addr, mask);
spin_unlock(&device_domain_lock);
spin_unlock_irqrestore(&device_domain_lock, flags);
} }
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
@ -1917,7 +1914,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
int translation = CONTEXT_TT_MULTI_LEVEL; int translation = CONTEXT_TT_MULTI_LEVEL;
struct device_domain_info *info = NULL; struct device_domain_info *info = NULL;
struct context_entry *context; struct context_entry *context;
unsigned long flags;
int ret; int ret;
WARN_ON(did == 0); WARN_ON(did == 0);
@ -1930,7 +1926,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
BUG_ON(!domain->pgd); BUG_ON(!domain->pgd);
spin_lock_irqsave(&device_domain_lock, flags); spin_lock(&device_domain_lock);
spin_lock(&iommu->lock); spin_lock(&iommu->lock);
ret = -ENOMEM; ret = -ENOMEM;
@ -2052,7 +2048,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
out_unlock: out_unlock:
spin_unlock(&iommu->lock); spin_unlock(&iommu->lock);
spin_unlock_irqrestore(&device_domain_lock, flags); spin_unlock(&device_domain_lock);
return ret; return ret;
} }
@ -2296,16 +2292,15 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
{ {
struct intel_iommu *iommu = info->iommu; struct intel_iommu *iommu = info->iommu;
struct context_entry *context; struct context_entry *context;
unsigned long flags;
u16 did_old; u16 did_old;
if (!iommu) if (!iommu)
return; return;
spin_lock_irqsave(&iommu->lock, flags); spin_lock(&iommu->lock);
context = iommu_context_addr(iommu, bus, devfn, 0); context = iommu_context_addr(iommu, bus, devfn, 0);
if (!context) { if (!context) {
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock(&iommu->lock);
return; return;
} }
@ -2320,7 +2315,7 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
context_clear_entry(context); context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context)); __iommu_flush_cache(iommu, context, sizeof(*context));
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock(&iommu->lock);
iommu->flush.flush_context(iommu, iommu->flush.flush_context(iommu,
did_old, did_old,
(((u16)bus) << 8) | devfn, (((u16)bus) << 8) | devfn,
@ -2342,12 +2337,11 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
static void domain_remove_dev_info(struct dmar_domain *domain) static void domain_remove_dev_info(struct dmar_domain *domain)
{ {
struct device_domain_info *info, *tmp; struct device_domain_info *info, *tmp;
unsigned long flags;
spin_lock_irqsave(&device_domain_lock, flags); spin_lock(&device_domain_lock);
list_for_each_entry_safe(info, tmp, &domain->devices, link) list_for_each_entry_safe(info, tmp, &domain->devices, link)
__dmar_remove_one_dev_info(info); __dmar_remove_one_dev_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags); spin_unlock(&device_domain_lock);
} }
static int domain_setup_first_level(struct intel_iommu *iommu, static int domain_setup_first_level(struct intel_iommu *iommu,
@ -2469,7 +2463,6 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
{ {
struct device_domain_info *info = dev_iommu_priv_get(dev); struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu; struct intel_iommu *iommu;
unsigned long flags;
u8 bus, devfn; u8 bus, devfn;
int ret; int ret;
@ -2477,17 +2470,17 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
if (!iommu) if (!iommu)
return -ENODEV; return -ENODEV;
spin_lock_irqsave(&device_domain_lock, flags); spin_lock(&device_domain_lock);
info->domain = domain; info->domain = domain;
spin_lock(&iommu->lock); spin_lock(&iommu->lock);
ret = domain_attach_iommu(domain, iommu); ret = domain_attach_iommu(domain, iommu);
spin_unlock(&iommu->lock); spin_unlock(&iommu->lock);
if (ret) { if (ret) {
spin_unlock_irqrestore(&device_domain_lock, flags); spin_unlock(&device_domain_lock);
return ret; return ret;
} }
list_add(&info->link, &domain->devices); list_add(&info->link, &domain->devices);
spin_unlock_irqrestore(&device_domain_lock, flags); spin_unlock(&device_domain_lock);
/* PASID table is mandatory for a PCI device in scalable mode. */ /* PASID table is mandatory for a PCI device in scalable mode. */
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) { if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
@ -2499,7 +2492,7 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
} }
/* Setup the PASID entry for requests without PASID: */ /* Setup the PASID entry for requests without PASID: */
spin_lock_irqsave(&iommu->lock, flags); spin_lock(&iommu->lock);
if (hw_pass_through && domain_type_is_si(domain)) if (hw_pass_through && domain_type_is_si(domain))
ret = intel_pasid_setup_pass_through(iommu, domain, ret = intel_pasid_setup_pass_through(iommu, domain,
dev, PASID_RID2PASID); dev, PASID_RID2PASID);
@ -2509,7 +2502,7 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
else else
ret = intel_pasid_setup_second_level(iommu, domain, ret = intel_pasid_setup_second_level(iommu, domain,
dev, PASID_RID2PASID); dev, PASID_RID2PASID);
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock(&iommu->lock);
if (ret) { if (ret) {
dev_err(dev, "Setup RID2PASID failed\n"); dev_err(dev, "Setup RID2PASID failed\n");
dmar_remove_one_dev_info(dev); dmar_remove_one_dev_info(dev);
@ -2777,7 +2770,6 @@ static int copy_translation_tables(struct intel_iommu *iommu)
struct root_entry *old_rt; struct root_entry *old_rt;
phys_addr_t old_rt_phys; phys_addr_t old_rt_phys;
int ctxt_table_entries; int ctxt_table_entries;
unsigned long flags;
u64 rtaddr_reg; u64 rtaddr_reg;
int bus, ret; int bus, ret;
bool new_ext, ext; bool new_ext, ext;
@ -2820,7 +2812,7 @@ static int copy_translation_tables(struct intel_iommu *iommu)
} }
} }
spin_lock_irqsave(&iommu->lock, flags); spin_lock(&iommu->lock);
/* Context tables are copied, now write them to the root_entry table */ /* Context tables are copied, now write them to the root_entry table */
for (bus = 0; bus < 256; bus++) { for (bus = 0; bus < 256; bus++) {
@ -2839,7 +2831,7 @@ static int copy_translation_tables(struct intel_iommu *iommu)
iommu->root_entry[bus].hi = val; iommu->root_entry[bus].hi = val;
} }
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock(&iommu->lock);
kfree(ctxt_tbls); kfree(ctxt_tbls);
@ -4166,7 +4158,6 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
{ {
struct dmar_domain *domain; struct dmar_domain *domain;
struct intel_iommu *iommu; struct intel_iommu *iommu;
unsigned long flags;
assert_spin_locked(&device_domain_lock); assert_spin_locked(&device_domain_lock);
@ -4188,21 +4179,20 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
list_del(&info->link); list_del(&info->link);
spin_lock_irqsave(&iommu->lock, flags); spin_lock(&iommu->lock);
domain_detach_iommu(domain, iommu); domain_detach_iommu(domain, iommu);
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock(&iommu->lock);
} }
static void dmar_remove_one_dev_info(struct device *dev) static void dmar_remove_one_dev_info(struct device *dev)
{ {
struct device_domain_info *info; struct device_domain_info *info;
unsigned long flags;
spin_lock_irqsave(&device_domain_lock, flags); spin_lock(&device_domain_lock);
info = dev_iommu_priv_get(dev); info = dev_iommu_priv_get(dev);
if (info) if (info)
__dmar_remove_one_dev_info(info); __dmar_remove_one_dev_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags); spin_unlock(&device_domain_lock);
} }
static int md_domain_init(struct dmar_domain *domain, int guest_width) static int md_domain_init(struct dmar_domain *domain, int guest_width)
@ -4518,20 +4508,19 @@ static void domain_set_force_snooping(struct dmar_domain *domain)
static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain) static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
{ {
struct dmar_domain *dmar_domain = to_dmar_domain(domain); struct dmar_domain *dmar_domain = to_dmar_domain(domain);
unsigned long flags;
if (dmar_domain->force_snooping) if (dmar_domain->force_snooping)
return true; return true;
spin_lock_irqsave(&device_domain_lock, flags); spin_lock(&device_domain_lock);
if (!domain_support_force_snooping(dmar_domain)) { if (!domain_support_force_snooping(dmar_domain)) {
spin_unlock_irqrestore(&device_domain_lock, flags); spin_unlock(&device_domain_lock);
return false; return false;
} }
domain_set_force_snooping(dmar_domain); domain_set_force_snooping(dmar_domain);
dmar_domain->force_snooping = true; dmar_domain->force_snooping = true;
spin_unlock_irqrestore(&device_domain_lock, flags); spin_unlock(&device_domain_lock);
return true; return true;
} }
@ -4678,7 +4667,6 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
struct device_domain_info *info = dev_iommu_priv_get(dev); struct device_domain_info *info = dev_iommu_priv_get(dev);
struct context_entry *context; struct context_entry *context;
struct dmar_domain *domain; struct dmar_domain *domain;
unsigned long flags;
u64 ctx_lo; u64 ctx_lo;
int ret; int ret;
@ -4686,7 +4674,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
if (!domain) if (!domain)
return -EINVAL; return -EINVAL;
spin_lock_irqsave(&device_domain_lock, flags); spin_lock(&device_domain_lock);
spin_lock(&iommu->lock); spin_lock(&iommu->lock);
ret = -EINVAL; ret = -EINVAL;
@ -4718,7 +4706,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
out: out:
spin_unlock(&iommu->lock); spin_unlock(&iommu->lock);
spin_unlock_irqrestore(&device_domain_lock, flags); spin_unlock(&device_domain_lock);
return ret; return ret;
} }

View file

@ -328,9 +328,9 @@ static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
unsigned int flags) unsigned int flags)
{ {
struct device_domain_info *info = dev_iommu_priv_get(dev); struct device_domain_info *info = dev_iommu_priv_get(dev);
unsigned long iflags, sflags;
struct intel_svm_dev *sdev; struct intel_svm_dev *sdev;
struct intel_svm *svm; struct intel_svm *svm;
unsigned long sflags;
int ret = 0; int ret = 0;
svm = pasid_private_find(mm->pasid); svm = pasid_private_find(mm->pasid);
@ -394,10 +394,10 @@ static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
sflags = (flags & SVM_FLAG_SUPERVISOR_MODE) ? sflags = (flags & SVM_FLAG_SUPERVISOR_MODE) ?
PASID_FLAG_SUPERVISOR_MODE : 0; PASID_FLAG_SUPERVISOR_MODE : 0;
sflags |= cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0; sflags |= cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0;
spin_lock_irqsave(&iommu->lock, iflags); spin_lock(&iommu->lock);
ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, mm->pasid, ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, mm->pasid,
FLPT_DEFAULT_DID, sflags); FLPT_DEFAULT_DID, sflags);
spin_unlock_irqrestore(&iommu->lock, iflags); spin_unlock(&iommu->lock);
if (ret) if (ret)
goto free_sdev; goto free_sdev;