IOMMU Updates for Linux v3.19

This time with:
 
 	* A new IOMMU-API call: iommu_map_sg() to map multiple
 	  non-contiguous pages into an IO address space with only one
 	  API call. This allows certain optimizations in the IOMMU
 	  driver.
 
 	* DMAR device hotplug in the Intel VT-d driver. It is now
 	  possible to hotplug the IOMMU itself.
 
 	* A new IOMMU driver for the Rockchip ARM platform.
 
 	* Couple of cleanups and improvements in the OMAP IOMMU driver.
 
 	* Nesting support for the ARM-SMMU driver.
 
 	* Various other small cleanups and improvements.
 
 Please note that this time some branches were also pulled into other
 trees, like the DRI and the Tegra tree. The VT-d branch was also pulled
 into tip/x86/apic.
 Some patches for the AMD IOMMUv2 driver are not in the IOMMU tree but
 were merged by Andrew (or finally ended up in the DRI tree).
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.22 (GNU/Linux)
 
 iQIcBAABAgAGBQJUiwktAAoJECvwRC2XARrjweAP/Rr4igltfUFjfSp7iQBoQxQP
 qmFv3/A6f0gxfJ2IhA8ZnmJwoNAxYnqCEl+vA0FYDnXxO6CCHXWkeO5NprwU+fuG
 BZDn4lmg+GhpTCXS5668l+MZxtqZCMaCQbK5pm+b+uk8qkXznlVs2Nb00BBL/TTo
 4WhyLPbNnAfQkBGTFf47QqSi5YYPmJ44TvIcHPsFBz0dTesO7JKg9c4HpyxUmnMs
 7VDPWJmiuTJ+/UISkFzxHVw9GcL6XXhdu70XEFrVo6wnXRDTqbDtBUP8vnghnqwG
 kOIYrY+7HO3iEDaiiSGqxeMH5Uac2yIlQi4W4TXWg7WpKenZPqnwZusD50vO3HyB
 9L4VIL14gbgV6s+/9YNDYR494d9+xRjGMO4FAIIWnVFmE98+zqHpc8OaF0azY4N7
 3vxlM6VGvWytTmTpU/J/VMhwFPo/6QHdGpBa0k0+ACMQ8LTPt+Q7o+fTBcDCgnJW
 SAa5AGruMBFgZ2RtN9bgAGxAhuSPmh7pD+va5vUzvAngY0o6tamblh9G6Sk26nka
 y0RZVJ+vplP3jvLXMy9SByxGgBv3/iNF8YUd7Zp0rpZyW8KElRm4m9xuvxzu9cIW
 3SR+/pEhvqqXo48XS040FScD65QjGbKWGuiH+Zs+6Ij4MY6xxCAlf7EV+8DIiTO5
 LcI66Fk6oFzFKz2BRBpn
 =x7Ei
 -----END PGP SIGNATURE-----

Merge tag 'iommu-updates-v3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU updates from Joerg Roedel:
 "This time with:

   - A new IOMMU-API call: iommu_map_sg() to map multiple non-contiguous
     pages into an IO address space with only one API call.  This allows
     certain optimizations in the IOMMU driver.

   - DMAR device hotplug in the Intel VT-d driver.  It is now possible
     to hotplug the IOMMU itself.

   - A new IOMMU driver for the Rockchip ARM platform.

   - Couple of cleanups and improvements in the OMAP IOMMU driver.

   - Nesting support for the ARM-SMMU driver.

   - Various other small cleanups and improvements.

  Please note that this time some branches were also pulled into other
  trees, like the DRI and the Tegra tree.  The VT-d branch was also
  pulled into tip/x86/apic.

  Some patches for the AMD IOMMUv2 driver are not in the IOMMU tree but
  were merged by Andrew (or finally ended up in the DRI tree)"

* tag 'iommu-updates-v3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (42 commits)
  iommu: Decouple iommu_map_sg from CPU page size
  iommu/vt-d: Fix an off-by-one bug in __domain_mapping()
  pci, ACPI, iommu: Enhance pci_root to support DMAR device hotplug
  iommu/vt-d: Enhance intel-iommu driver to support DMAR unit hotplug
  iommu/vt-d: Enhance error recovery in function intel_enable_irq_remapping()
  iommu/vt-d: Enhance intel_irq_remapping driver to support DMAR unit hotplug
  iommu/vt-d: Search for ACPI _DSM method for DMAR hotplug
  iommu/vt-d: Implement DMAR unit hotplug framework
  iommu/vt-d: Dynamically allocate and free seq_id for DMAR units
  iommu/vt-d: Introduce helper function dmar_walk_resources()
  iommu/arm-smmu: add support for DOMAIN_ATTR_NESTING attribute
  iommu/arm-smmu: Play nice on non-ARM/SMMU systems
  iommu/amd: remove compiler warning due to IOMMU_CAP_NOEXEC
  iommu/arm-smmu: add IOMMU_CAP_NOEXEC to the ARM SMMU driver
  iommu: add capability IOMMU_CAP_NOEXEC
  iommu/arm-smmu: change IOMMU_EXEC to IOMMU_NOEXEC
  iommu/amd: Fix accounting of device_state
  x86/vt-d: Fix incorrect bit operations in setting values
  iommu/rockchip: Allow to compile with COMPILE_TEST
  iommu/ipmmu-vmsa: Return proper error if devm_request_irq fails
  ...
This commit is contained in:
Linus Torvalds 2014-12-12 15:10:34 -08:00
commit 8d14066755
21 changed files with 2453 additions and 1006 deletions

View file

@ -0,0 +1,26 @@
Rockchip IOMMU
==============
A Rockchip DRM iommu translates io virtual addresses to physical addresses for
its master device. Each slave device is bound to a single master device, and
shares its clocks, power domain and irq.
Required properties:
- compatible : Should be "rockchip,iommu"
- reg : Address space for the configuration registers
- interrupts : Interrupt specifier for the IOMMU instance
- interrupt-names : Interrupt name for the IOMMU instance
- #iommu-cells : Should be <0>. This indicates the iommu is a
"single-master" device, and needs no additional information
to associate with its master device. See:
Documentation/devicetree/bindings/iommu/iommu.txt
Example:
vopl_mmu: iommu@ff940300 {
compatible = "rockchip,iommu";
reg = <0xff940300 0x100>;
interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "vopl_mmu";
#iommu-cells = <0>;
};

View file

@ -33,6 +33,7 @@
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include <linux/pci-aspm.h>
#include <linux/dmar.h>
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/dmi.h>
@ -525,6 +526,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
struct acpi_pci_root *root;
acpi_handle handle = device->handle;
int no_aspm = 0, clear_aspm = 0;
bool hotadd = system_state != SYSTEM_BOOTING;
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root)
@ -571,6 +573,11 @@ static int acpi_pci_root_add(struct acpi_device *device,
strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
device->driver_data = root;
if (hotadd && dmar_device_add(handle)) {
result = -ENXIO;
goto end;
}
pr_info(PREFIX "%s [%s] (domain %04x %pR)\n",
acpi_device_name(device), acpi_device_bid(device),
root->segment, &root->secondary);
@ -597,7 +604,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
root->segment, (unsigned int)root->secondary.start);
device->driver_data = NULL;
result = -ENODEV;
goto end;
goto remove_dmar;
}
if (clear_aspm) {
@ -611,7 +618,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
if (device->wakeup.flags.run_wake)
device_set_run_wake(root->bus->bridge, true);
if (system_state != SYSTEM_BOOTING) {
if (hotadd) {
pcibios_resource_survey_bus(root->bus);
pci_assign_unassigned_root_bus_resources(root->bus);
}
@ -621,6 +628,9 @@ static int acpi_pci_root_add(struct acpi_device *device,
pci_unlock_rescan_remove();
return 1;
remove_dmar:
if (hotadd)
dmar_device_remove(handle);
end:
kfree(root);
return result;
@ -639,6 +649,8 @@ static void acpi_pci_root_remove(struct acpi_device *device)
pci_remove_root_bus(root->bus);
dmar_device_remove(device->handle);
pci_unlock_rescan_remove();
kfree(root);

View file

@ -144,13 +144,26 @@ config OMAP_IOMMU
select IOMMU_API
config OMAP_IOMMU_DEBUG
tristate "Export OMAP IOMMU internals in DebugFS"
depends on OMAP_IOMMU && DEBUG_FS
help
Select this to see extensive information about
the internal state of OMAP IOMMU in debugfs.
bool "Export OMAP IOMMU internals in DebugFS"
depends on OMAP_IOMMU && DEBUG_FS
---help---
Select this to see extensive information about
the internal state of OMAP IOMMU in debugfs.
Say N unless you know you need this.
Say N unless you know you need this.
config ROCKCHIP_IOMMU
bool "Rockchip IOMMU Support"
depends on ARM
depends on ARCH_ROCKCHIP || COMPILE_TEST
select IOMMU_API
select ARM_DMA_USE_IOMMU
help
Support for IOMMUs found on Rockchip rk32xx SOCs.
These IOMMUs allow virtualization of the address space used by most
cores within the multimedia subsystem.
Say Y here if you are using a Rockchip SoC that includes an IOMMU
device.
config TEGRA_IOMMU_GART
bool "Tegra GART IOMMU Support"

View file

@ -11,8 +11,8 @@ obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o
obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o

View file

@ -3411,6 +3411,8 @@ static bool amd_iommu_capable(enum iommu_cap cap)
return true;
case IOMMU_CAP_INTR_REMAP:
return (irq_remapping_enabled == 1);
case IOMMU_CAP_NOEXEC:
return false;
}
return false;

View file

@ -279,10 +279,8 @@ static void free_pasid_state(struct pasid_state *pasid_state)
static void put_pasid_state(struct pasid_state *pasid_state)
{
if (atomic_dec_and_test(&pasid_state->count)) {
put_device_state(pasid_state->device_state);
if (atomic_dec_and_test(&pasid_state->count))
wake_up(&pasid_state->wq);
}
}
static void put_pasid_state_wait(struct pasid_state *pasid_state)
@ -291,9 +289,7 @@ static void put_pasid_state_wait(struct pasid_state *pasid_state)
prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE);
if (atomic_dec_and_test(&pasid_state->count))
put_device_state(pasid_state->device_state);
else
if (!atomic_dec_and_test(&pasid_state->count))
schedule();
finish_wait(&pasid_state->wq, &wait);

View file

@ -404,9 +404,16 @@ struct arm_smmu_cfg {
#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_S1 = 0,
ARM_SMMU_DOMAIN_S2,
ARM_SMMU_DOMAIN_NESTED,
};
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
spinlock_t lock;
};
@ -906,19 +913,46 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu_domain->smmu)
goto out_unlock;
if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
/*
* Mapping the requested stage onto what we support is surprisingly
* complicated, mainly because the spec allows S1+S2 SMMUs without
* support for nested translation. That means we end up with the
* following table:
*
* Requested Supported Actual
* S1 N S1
* S1 S1+S2 S1
* S1 S2 S2
* S1 S1 S1
* N N N
* N S1+S2 S2
* N S2 S2
* N S1 S1
*
* Note that you can't actually request stage-2 mappings.
*/
if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
switch (smmu_domain->stage) {
case ARM_SMMU_DOMAIN_S1:
cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
start = smmu->num_s2_context_banks;
break;
case ARM_SMMU_DOMAIN_NESTED:
/*
* We will likely want to change this if/when KVM gets
* involved.
*/
cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
start = smmu->num_s2_context_banks;
} else if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) {
cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
start = smmu->num_s2_context_banks;
} else {
case ARM_SMMU_DOMAIN_S2:
cfg->cbar = CBAR_TYPE_S2_TRANS;
start = 0;
break;
default:
ret = -EINVAL;
goto out_unlock;
}
ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
@ -1281,7 +1315,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
unsigned long pfn, int prot, int stage)
{
pte_t *pte, *start;
pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN;
pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF;
if (pmd_none(*pmd)) {
/* Allocate a new set of tables */
@ -1315,10 +1349,11 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
pteval |= ARM_SMMU_PTE_MEMATTR_NC;
}
if (prot & IOMMU_NOEXEC)
pteval |= ARM_SMMU_PTE_XN;
/* If no access, create a faulting entry to avoid TLB fills */
if (prot & IOMMU_EXEC)
pteval &= ~ARM_SMMU_PTE_XN;
else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
pteval &= ~ARM_SMMU_PTE_PAGE;
pteval |= ARM_SMMU_PTE_SH_IS;
@ -1568,6 +1603,8 @@ static bool arm_smmu_capable(enum iommu_cap cap)
return true;
case IOMMU_CAP_INTR_REMAP:
return true; /* MSIs are just memory writes */
case IOMMU_CAP_NOEXEC:
return true;
default:
return false;
}
@ -1644,21 +1681,57 @@ static void arm_smmu_remove_device(struct device *dev)
iommu_group_remove_device(dev);
}
static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
struct arm_smmu_domain *smmu_domain = domain->priv;
switch (attr) {
case DOMAIN_ATTR_NESTING:
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
return 0;
default:
return -ENODEV;
}
}
static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
struct arm_smmu_domain *smmu_domain = domain->priv;
switch (attr) {
case DOMAIN_ATTR_NESTING:
if (smmu_domain->smmu)
return -EPERM;
if (*(int *)data)
smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
else
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
return 0;
default:
return -ENODEV;
}
}
static const struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_init = arm_smmu_domain_init,
.domain_destroy = arm_smmu_domain_destroy,
.attach_dev = arm_smmu_attach_dev,
.detach_dev = arm_smmu_detach_dev,
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
.map_sg = default_iommu_map_sg,
.iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device,
.remove_device = arm_smmu_remove_device,
.pgsize_bitmap = (SECTION_SIZE |
ARM_SMMU_PTE_CONT_SIZE |
PAGE_SIZE),
.capable = arm_smmu_capable,
.domain_init = arm_smmu_domain_init,
.domain_destroy = arm_smmu_domain_destroy,
.attach_dev = arm_smmu_attach_dev,
.detach_dev = arm_smmu_detach_dev,
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
.map_sg = default_iommu_map_sg,
.iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device,
.remove_device = arm_smmu_remove_device,
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
.pgsize_bitmap = (SECTION_SIZE |
ARM_SMMU_PTE_CONT_SIZE |
PAGE_SIZE),
};
static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
@ -2073,8 +2146,20 @@ static struct platform_driver arm_smmu_driver = {
static int __init arm_smmu_init(void)
{
struct device_node *np;
int ret;
/*
* Play nice with systems that don't have an ARM SMMU by checking that
* an ARM SMMU exists in the system before proceeding with the driver
* and IOMMU bus operation registration.
*/
np = of_find_matching_node(NULL, arm_smmu_of_match);
if (!np)
return 0;
of_node_put(np);
ret = platform_driver_register(&arm_smmu_driver);
if (ret)
return ret;

View file

@ -44,6 +44,14 @@
#include "irq_remapping.h"
typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
struct dmar_res_callback {
dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
void *arg[ACPI_DMAR_TYPE_RESERVED];
bool ignore_unhandled;
bool print_entry;
};
/*
* Assumptions:
* 1) The hotplug framework guarentees that DMAR unit will be hot-added
@ -62,11 +70,12 @@ LIST_HEAD(dmar_drhd_units);
struct acpi_table_header * __initdata dmar_tbl;
static acpi_size dmar_tbl_size;
static int dmar_dev_scope_status = 1;
static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
static int alloc_iommu(struct dmar_drhd_unit *drhd);
static void free_iommu(struct intel_iommu *iommu);
static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
{
/*
* add INCLUDE_ALL at the tail, so scan the list will find it at
@ -344,24 +353,45 @@ static struct notifier_block dmar_pci_bus_nb = {
.priority = INT_MIN,
};
static struct dmar_drhd_unit *
dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
{
struct dmar_drhd_unit *dmaru;
list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
if (dmaru->segment == drhd->segment &&
dmaru->reg_base_addr == drhd->address)
return dmaru;
return NULL;
}
/**
* dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
* structure which uniquely represent one DMA remapping hardware unit
* present in the platform
*/
static int __init
dmar_parse_one_drhd(struct acpi_dmar_header *header)
static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_hardware_unit *drhd;
struct dmar_drhd_unit *dmaru;
int ret = 0;
drhd = (struct acpi_dmar_hardware_unit *)header;
dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
dmaru = dmar_find_dmaru(drhd);
if (dmaru)
goto out;
dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
if (!dmaru)
return -ENOMEM;
dmaru->hdr = header;
/*
* If header is allocated from slab by ACPI _DSM method, we need to
* copy the content because the memory buffer will be freed on return.
*/
dmaru->hdr = (void *)(dmaru + 1);
memcpy(dmaru->hdr, header, header->length);
dmaru->reg_base_addr = drhd->address;
dmaru->segment = drhd->segment;
dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
@ -381,6 +411,11 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
return ret;
}
dmar_register_drhd_unit(dmaru);
out:
if (arg)
(*(int *)arg)++;
return 0;
}
@ -393,7 +428,8 @@ static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
kfree(dmaru);
}
static int __init dmar_parse_one_andd(struct acpi_dmar_header *header)
static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
void *arg)
{
struct acpi_dmar_andd *andd = (void *)header;
@ -414,8 +450,7 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header)
}
#ifdef CONFIG_ACPI_NUMA
static int __init
dmar_parse_one_rhsa(struct acpi_dmar_header *header)
static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_rhsa *rhsa;
struct dmar_drhd_unit *drhd;
@ -442,6 +477,8 @@ dmar_parse_one_rhsa(struct acpi_dmar_header *header)
return 0;
}
#else
#define dmar_parse_one_rhsa dmar_res_noop
#endif
static void __init
@ -503,6 +540,52 @@ static int __init dmar_table_detect(void)
return (ACPI_SUCCESS(status) ? 1 : 0);
}
static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
size_t len, struct dmar_res_callback *cb)
{
int ret = 0;
struct acpi_dmar_header *iter, *next;
struct acpi_dmar_header *end = ((void *)start) + len;
for (iter = start; iter < end && ret == 0; iter = next) {
next = (void *)iter + iter->length;
if (iter->length == 0) {
/* Avoid looping forever on bad ACPI tables */
pr_debug(FW_BUG "Invalid 0-length structure\n");
break;
} else if (next > end) {
/* Avoid passing table end */
pr_warn(FW_BUG "record passes table end\n");
ret = -EINVAL;
break;
}
if (cb->print_entry)
dmar_table_print_dmar_entry(iter);
if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
/* continue for forward compatibility */
pr_debug("Unknown DMAR structure type %d\n",
iter->type);
} else if (cb->cb[iter->type]) {
ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
} else if (!cb->ignore_unhandled) {
pr_warn("No handler for DMAR structure type %d\n",
iter->type);
ret = -EINVAL;
}
}
return ret;
}
static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
struct dmar_res_callback *cb)
{
return dmar_walk_remapping_entries((void *)(dmar + 1),
dmar->header.length - sizeof(*dmar), cb);
}
/**
* parse_dmar_table - parses the DMA reporting table
*/
@ -510,9 +593,18 @@ static int __init
parse_dmar_table(void)
{
struct acpi_table_dmar *dmar;
struct acpi_dmar_header *entry_header;
int ret = 0;
int drhd_count = 0;
struct dmar_res_callback cb = {
.print_entry = true,
.ignore_unhandled = true,
.arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
.cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
.cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
.cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
.cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
.cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
};
/*
* Do it again, earlier dmar_tbl mapping could be mapped with
@ -536,51 +628,10 @@ parse_dmar_table(void)
}
pr_info("Host address width %d\n", dmar->width + 1);
entry_header = (struct acpi_dmar_header *)(dmar + 1);
while (((unsigned long)entry_header) <
(((unsigned long)dmar) + dmar_tbl->length)) {
/* Avoid looping forever on bad ACPI tables */
if (entry_header->length == 0) {
pr_warn("Invalid 0-length structure\n");
ret = -EINVAL;
break;
}
dmar_table_print_dmar_entry(entry_header);
switch (entry_header->type) {
case ACPI_DMAR_TYPE_HARDWARE_UNIT:
drhd_count++;
ret = dmar_parse_one_drhd(entry_header);
break;
case ACPI_DMAR_TYPE_RESERVED_MEMORY:
ret = dmar_parse_one_rmrr(entry_header);
break;
case ACPI_DMAR_TYPE_ROOT_ATS:
ret = dmar_parse_one_atsr(entry_header);
break;
case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
#ifdef CONFIG_ACPI_NUMA
ret = dmar_parse_one_rhsa(entry_header);
#endif
break;
case ACPI_DMAR_TYPE_NAMESPACE:
ret = dmar_parse_one_andd(entry_header);
break;
default:
pr_warn("Unknown DMAR structure type %d\n",
entry_header->type);
ret = 0; /* for forward compatibility */
break;
}
if (ret)
break;
entry_header = ((void *)entry_header + entry_header->length);
}
if (drhd_count == 0)
ret = dmar_walk_dmar_table(dmar, &cb);
if (ret == 0 && drhd_count == 0)
pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
return ret;
}
@ -778,76 +829,68 @@ static void warn_invalid_dmar(u64 addr, const char *message)
dmi_get_system_info(DMI_PRODUCT_VERSION));
}
static int __init check_zero_address(void)
static int __ref
dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
{
struct acpi_table_dmar *dmar;
struct acpi_dmar_header *entry_header;
struct acpi_dmar_hardware_unit *drhd;
void __iomem *addr;
u64 cap, ecap;
dmar = (struct acpi_table_dmar *)dmar_tbl;
entry_header = (struct acpi_dmar_header *)(dmar + 1);
while (((unsigned long)entry_header) <
(((unsigned long)dmar) + dmar_tbl->length)) {
/* Avoid looping forever on bad ACPI tables */
if (entry_header->length == 0) {
pr_warn("Invalid 0-length structure\n");
return 0;
}
if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
void __iomem *addr;
u64 cap, ecap;
drhd = (void *)entry_header;
if (!drhd->address) {
warn_invalid_dmar(0, "");
goto failed;
}
addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
if (!addr ) {
printk("IOMMU: can't validate: %llx\n", drhd->address);
goto failed;
}
cap = dmar_readq(addr + DMAR_CAP_REG);
ecap = dmar_readq(addr + DMAR_ECAP_REG);
early_iounmap(addr, VTD_PAGE_SIZE);
if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
warn_invalid_dmar(drhd->address,
" returns all ones");
goto failed;
}
}
entry_header = ((void *)entry_header + entry_header->length);
drhd = (void *)entry;
if (!drhd->address) {
warn_invalid_dmar(0, "");
return -EINVAL;
}
if (arg)
addr = ioremap(drhd->address, VTD_PAGE_SIZE);
else
addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
if (!addr) {
pr_warn("IOMMU: can't validate: %llx\n", drhd->address);
return -EINVAL;
}
cap = dmar_readq(addr + DMAR_CAP_REG);
ecap = dmar_readq(addr + DMAR_ECAP_REG);
if (arg)
iounmap(addr);
else
early_iounmap(addr, VTD_PAGE_SIZE);
if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
warn_invalid_dmar(drhd->address, " returns all ones");
return -EINVAL;
}
return 1;
failed:
return 0;
}
int __init detect_intel_iommu(void)
{
int ret;
struct dmar_res_callback validate_drhd_cb = {
.cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
.ignore_unhandled = true,
};
down_write(&dmar_global_lock);
ret = dmar_table_detect();
if (ret)
ret = check_zero_address();
{
if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
iommu_detected = 1;
/* Make sure ACS will be enabled */
pci_request_acs();
}
ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
&validate_drhd_cb);
if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
iommu_detected = 1;
/* Make sure ACS will be enabled */
pci_request_acs();
}
#ifdef CONFIG_X86
if (ret)
x86_init.iommu.iommu_init = intel_iommu_init;
if (ret)
x86_init.iommu.iommu_init = intel_iommu_init;
#endif
}
early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
dmar_tbl = NULL;
up_write(&dmar_global_lock);
@ -931,11 +974,32 @@ static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
return err;
}
static int dmar_alloc_seq_id(struct intel_iommu *iommu)
{
iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
DMAR_UNITS_SUPPORTED);
if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
iommu->seq_id = -1;
} else {
set_bit(iommu->seq_id, dmar_seq_ids);
sprintf(iommu->name, "dmar%d", iommu->seq_id);
}
return iommu->seq_id;
}
static void dmar_free_seq_id(struct intel_iommu *iommu)
{
if (iommu->seq_id >= 0) {
clear_bit(iommu->seq_id, dmar_seq_ids);
iommu->seq_id = -1;
}
}
static int alloc_iommu(struct dmar_drhd_unit *drhd)
{
struct intel_iommu *iommu;
u32 ver, sts;
static int iommu_allocated = 0;
int agaw = 0;
int msagaw = 0;
int err;
@ -949,13 +1013,16 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
if (!iommu)
return -ENOMEM;
iommu->seq_id = iommu_allocated++;
sprintf (iommu->name, "dmar%d", iommu->seq_id);
if (dmar_alloc_seq_id(iommu) < 0) {
pr_err("IOMMU: failed to allocate seq_id\n");
err = -ENOSPC;
goto error;
}
err = map_iommu(iommu, drhd->reg_base_addr);
if (err) {
pr_err("IOMMU: failed to map %s\n", iommu->name);
goto error;
goto error_free_seq_id;
}
err = -EINVAL;
@ -1005,9 +1072,11 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
return 0;
err_unmap:
err_unmap:
unmap_iommu(iommu);
error:
error_free_seq_id:
dmar_free_seq_id(iommu);
error:
kfree(iommu);
return err;
}
@ -1031,6 +1100,7 @@ static void free_iommu(struct intel_iommu *iommu)
if (iommu->reg)
unmap_iommu(iommu);
dmar_free_seq_id(iommu);
kfree(iommu);
}
@ -1661,12 +1731,17 @@ int __init dmar_ir_support(void)
return dmar->flags & 0x1;
}
/* Check whether DMAR units are in use */
static inline bool dmar_in_use(void)
{
return irq_remapping_enabled || intel_iommu_enabled;
}
static int __init dmar_free_unused_resources(void)
{
struct dmar_drhd_unit *dmaru, *dmaru_n;
/* DMAR units are in use */
if (irq_remapping_enabled || intel_iommu_enabled)
if (dmar_in_use())
return 0;
if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
@ -1684,3 +1759,242 @@ static int __init dmar_free_unused_resources(void)
late_initcall(dmar_free_unused_resources);
IOMMU_INIT_POST(detect_intel_iommu);
/*
* DMAR Hotplug Support
* For more details, please refer to Intel(R) Virtualization Technology
* for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
* "Remapping Hardware Unit Hot Plug".
*/
static u8 dmar_hp_uuid[] = {
/* 0000 */ 0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C,
/* 0008 */ 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF
};
/*
* Currently there's only one revision and BIOS will not check the revision id,
* so use 0 for safety.
*/
#define DMAR_DSM_REV_ID 0
#define DMAR_DSM_FUNC_DRHD 1
#define DMAR_DSM_FUNC_ATSR 2
#define DMAR_DSM_FUNC_RHSA 3
static inline bool dmar_detect_dsm(acpi_handle handle, int func)
{
return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func);
}
static int dmar_walk_dsm_resource(acpi_handle handle, int func,
dmar_res_handler_t handler, void *arg)
{
int ret = -ENODEV;
union acpi_object *obj;
struct acpi_dmar_header *start;
struct dmar_res_callback callback;
static int res_type[] = {
[DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
[DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
[DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
};
if (!dmar_detect_dsm(handle, func))
return 0;
obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID,
func, NULL, ACPI_TYPE_BUFFER);
if (!obj)
return -ENODEV;
memset(&callback, 0, sizeof(callback));
callback.cb[res_type[func]] = handler;
callback.arg[res_type[func]] = arg;
start = (struct acpi_dmar_header *)obj->buffer.pointer;
ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
ACPI_FREE(obj);
return ret;
}
static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
{
int ret;
struct dmar_drhd_unit *dmaru;
dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
if (!dmaru)
return -ENODEV;
ret = dmar_ir_hotplug(dmaru, true);
if (ret == 0)
ret = dmar_iommu_hotplug(dmaru, true);
return ret;
}
static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
{
int i, ret;
struct device *dev;
struct dmar_drhd_unit *dmaru;
dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
if (!dmaru)
return 0;
/*
* All PCI devices managed by this unit should have been destroyed.
*/
if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt)
for_each_active_dev_scope(dmaru->devices,
dmaru->devices_cnt, i, dev)
return -EBUSY;
ret = dmar_ir_hotplug(dmaru, false);
if (ret == 0)
ret = dmar_iommu_hotplug(dmaru, false);
return ret;
}
static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
{
struct dmar_drhd_unit *dmaru;
dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
if (dmaru) {
list_del_rcu(&dmaru->list);
synchronize_rcu();
dmar_free_drhd(dmaru);
}
return 0;
}
static int dmar_hotplug_insert(acpi_handle handle)
{
int ret;
int drhd_count = 0;
ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
&dmar_validate_one_drhd, (void *)1);
if (ret)
goto out;
ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
&dmar_parse_one_drhd, (void *)&drhd_count);
if (ret == 0 && drhd_count == 0) {
pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
goto out;
} else if (ret) {
goto release_drhd;
}
ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
&dmar_parse_one_rhsa, NULL);
if (ret)
goto release_drhd;
ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
&dmar_parse_one_atsr, NULL);
if (ret)
goto release_atsr;
ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
&dmar_hp_add_drhd, NULL);
if (!ret)
return 0;
dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
&dmar_hp_remove_drhd, NULL);
release_atsr:
dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
&dmar_release_one_atsr, NULL);
release_drhd:
dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
&dmar_hp_release_drhd, NULL);
out:
return ret;
}
static int dmar_hotplug_remove(acpi_handle handle)
{
int ret;
ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
&dmar_check_one_atsr, NULL);
if (ret)
return ret;
ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
&dmar_hp_remove_drhd, NULL);
if (ret == 0) {
WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
&dmar_release_one_atsr, NULL));
WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
&dmar_hp_release_drhd, NULL));
} else {
dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
&dmar_hp_add_drhd, NULL);
}
return ret;
}
static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
void *context, void **retval)
{
acpi_handle *phdl = retval;
if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
*phdl = handle;
return AE_CTRL_TERMINATE;
}
return AE_OK;
}
static int dmar_device_hotplug(acpi_handle handle, bool insert)
{
int ret;
acpi_handle tmp = NULL;
acpi_status status;
if (!dmar_in_use())
return 0;
if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
tmp = handle;
} else {
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
ACPI_UINT32_MAX,
dmar_get_dsm_handle,
NULL, NULL, &tmp);
if (ACPI_FAILURE(status)) {
pr_warn("Failed to locate _DSM method.\n");
return -ENXIO;
}
}
if (tmp == NULL)
return 0;
down_write(&dmar_global_lock);
if (insert)
ret = dmar_hotplug_insert(tmp);
else
ret = dmar_hotplug_remove(tmp);
up_write(&dmar_global_lock);
return ret;
}
int dmar_device_add(acpi_handle handle)
{
return dmar_device_hotplug(handle, true);
}
int dmar_device_remove(acpi_handle handle)
{
return dmar_device_hotplug(handle, false);
}

View file

@ -195,6 +195,7 @@ static inline void set_root_present(struct root_entry *root)
}
static inline void set_root_value(struct root_entry *root, unsigned long value)
{
root->val &= ~VTD_PAGE_MASK;
root->val |= value & VTD_PAGE_MASK;
}
@ -247,6 +248,7 @@ static inline void context_set_translation_type(struct context_entry *context,
static inline void context_set_address_root(struct context_entry *context,
unsigned long value)
{
context->lo &= ~VTD_PAGE_MASK;
context->lo |= value & VTD_PAGE_MASK;
}
@ -328,17 +330,10 @@ static int hw_pass_through = 1;
/* si_domain contains mulitple devices */
#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
/* define the limit of IOMMUs supported in each domain */
#ifdef CONFIG_X86
# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
#else
# define IOMMU_UNITS_SUPPORTED 64
#endif
struct dmar_domain {
int id; /* domain id */
int nid; /* node id */
DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
/* bitmap of iommus this domain uses*/
struct list_head devices; /* all devices' list */
@ -1132,8 +1127,11 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
unsigned long flags;
root = (struct root_entry *)alloc_pgtable_page(iommu->node);
if (!root)
if (!root) {
pr_err("IOMMU: allocating root entry for %s failed\n",
iommu->name);
return -ENOMEM;
}
__iommu_flush_cache(iommu, root, ROOT_SIZE);
@ -1473,7 +1471,7 @@ static int iommu_init_domains(struct intel_iommu *iommu)
return 0;
}
static void free_dmar_iommu(struct intel_iommu *iommu)
static void disable_dmar_iommu(struct intel_iommu *iommu)
{
struct dmar_domain *domain;
int i;
@ -1497,11 +1495,16 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
}
kfree(iommu->domains);
kfree(iommu->domain_ids);
iommu->domains = NULL;
iommu->domain_ids = NULL;
static void free_dmar_iommu(struct intel_iommu *iommu)
{
if ((iommu->domains) && (iommu->domain_ids)) {
kfree(iommu->domains);
kfree(iommu->domain_ids);
iommu->domains = NULL;
iommu->domain_ids = NULL;
}
g_iommus[iommu->seq_id] = NULL;
@ -1983,7 +1986,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
{
struct dma_pte *first_pte = NULL, *pte = NULL;
phys_addr_t uninitialized_var(pteval);
unsigned long sg_res;
unsigned long sg_res = 0;
unsigned int largepage_lvl = 0;
unsigned long lvl_pages = 0;
@ -1994,10 +1997,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
if (sg)
sg_res = 0;
else {
sg_res = nr_pages + 1;
if (!sg) {
sg_res = nr_pages;
pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
}
@ -2708,6 +2709,41 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
return 0;
}
static void intel_iommu_init_qi(struct intel_iommu *iommu)
{
/*
* Start from the sane iommu hardware state.
* If the queued invalidation is already initialized by us
* (for example, while enabling interrupt-remapping) then
* we got the things already rolling from a sane state.
*/
if (!iommu->qi) {
/*
* Clear any previous faults.
*/
dmar_fault(-1, iommu);
/*
* Disable queued invalidation if supported and already enabled
* before OS handover.
*/
dmar_disable_qi(iommu);
}
if (dmar_enable_qi(iommu)) {
/*
* Queued Invalidate not enabled, use Register Based Invalidate
*/
iommu->flush.flush_context = __iommu_flush_context;
iommu->flush.flush_iotlb = __iommu_flush_iotlb;
pr_info("IOMMU: %s using Register based invalidation\n",
iommu->name);
} else {
iommu->flush.flush_context = qi_flush_context;
iommu->flush.flush_iotlb = qi_flush_iotlb;
pr_info("IOMMU: %s using Queued invalidation\n", iommu->name);
}
}
static int __init init_dmars(void)
{
struct dmar_drhd_unit *drhd;
@ -2728,14 +2764,18 @@ static int __init init_dmars(void)
* threaded kernel __init code path all other access are read
* only
*/
if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
g_num_of_iommus++;
continue;
}
printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
IOMMU_UNITS_SUPPORTED);
DMAR_UNITS_SUPPORTED);
}
/* Preallocate enough resources for IOMMU hot-addition */
if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
g_num_of_iommus = DMAR_UNITS_SUPPORTED;
g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
GFP_KERNEL);
if (!g_iommus) {
@ -2764,58 +2804,14 @@ static int __init init_dmars(void)
* among all IOMMU's. Need to Split it later.
*/
ret = iommu_alloc_root_entry(iommu);
if (ret) {
printk(KERN_ERR "IOMMU: allocate root entry failed\n");
if (ret)
goto free_iommu;
}
if (!ecap_pass_through(iommu->ecap))
hw_pass_through = 0;
}
/*
* Start from the sane iommu hardware state.
*/
for_each_active_iommu(iommu, drhd) {
/*
* If the queued invalidation is already initialized by us
* (for example, while enabling interrupt-remapping) then
* we got the things already rolling from a sane state.
*/
if (iommu->qi)
continue;
/*
* Clear any previous faults.
*/
dmar_fault(-1, iommu);
/*
* Disable queued invalidation if supported and already enabled
* before OS handover.
*/
dmar_disable_qi(iommu);
}
for_each_active_iommu(iommu, drhd) {
if (dmar_enable_qi(iommu)) {
/*
* Queued Invalidate not enabled, use Register Based
* Invalidate
*/
iommu->flush.flush_context = __iommu_flush_context;
iommu->flush.flush_iotlb = __iommu_flush_iotlb;
printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
"invalidation\n",
iommu->seq_id,
(unsigned long long)drhd->reg_base_addr);
} else {
iommu->flush.flush_context = qi_flush_context;
iommu->flush.flush_iotlb = qi_flush_iotlb;
printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
"invalidation\n",
iommu->seq_id,
(unsigned long long)drhd->reg_base_addr);
}
}
for_each_active_iommu(iommu, drhd)
intel_iommu_init_qi(iommu);
if (iommu_pass_through)
iommu_identity_mapping |= IDENTMAP_ALL;
@ -2901,8 +2897,10 @@ static int __init init_dmars(void)
return 0;
free_iommu:
for_each_active_iommu(iommu, drhd)
for_each_active_iommu(iommu, drhd) {
disable_dmar_iommu(iommu);
free_dmar_iommu(iommu);
}
kfree(deferred_flush);
free_g_iommus:
kfree(g_iommus);
@ -3682,7 +3680,7 @@ static inline void init_iommu_pm_ops(void) {}
#endif /* CONFIG_PM */
int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_reserved_memory *rmrr;
struct dmar_rmrr_unit *rmrru;
@ -3708,17 +3706,48 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
return 0;
}
int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
{
struct dmar_atsr_unit *atsru;
struct acpi_dmar_atsr *tmp;
list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
tmp = (struct acpi_dmar_atsr *)atsru->hdr;
if (atsr->segment != tmp->segment)
continue;
if (atsr->header.length != tmp->header.length)
continue;
if (memcmp(atsr, tmp, atsr->header.length) == 0)
return atsru;
}
return NULL;
}
int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
{
struct acpi_dmar_atsr *atsr;
struct dmar_atsr_unit *atsru;
if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
return 0;
atsr = container_of(hdr, struct acpi_dmar_atsr, header);
atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
atsru = dmar_find_atsr(atsr);
if (atsru)
return 0;
atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
if (!atsru)
return -ENOMEM;
atsru->hdr = hdr;
/*
* If memory is allocated from slab by ACPI _DSM method, we need to
* copy the memory content because the memory buffer will be freed
* on return.
*/
atsru->hdr = (void *)(atsru + 1);
memcpy(atsru->hdr, hdr, hdr->length);
atsru->include_all = atsr->flags & 0x1;
if (!atsru->include_all) {
atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
@ -3741,6 +3770,138 @@ static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
kfree(atsru);
}
int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
{
struct acpi_dmar_atsr *atsr;
struct dmar_atsr_unit *atsru;
atsr = container_of(hdr, struct acpi_dmar_atsr, header);
atsru = dmar_find_atsr(atsr);
if (atsru) {
list_del_rcu(&atsru->list);
synchronize_rcu();
intel_iommu_free_atsr(atsru);
}
return 0;
}
int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
{
int i;
struct device *dev;
struct acpi_dmar_atsr *atsr;
struct dmar_atsr_unit *atsru;
atsr = container_of(hdr, struct acpi_dmar_atsr, header);
atsru = dmar_find_atsr(atsr);
if (!atsru)
return 0;
if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
i, dev)
return -EBUSY;
return 0;
}
static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
{
int sp, ret = 0;
struct intel_iommu *iommu = dmaru->iommu;
if (g_iommus[iommu->seq_id])
return 0;
if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
pr_warn("IOMMU: %s doesn't support hardware pass through.\n",
iommu->name);
return -ENXIO;
}
if (!ecap_sc_support(iommu->ecap) &&
domain_update_iommu_snooping(iommu)) {
pr_warn("IOMMU: %s doesn't support snooping.\n",
iommu->name);
return -ENXIO;
}
sp = domain_update_iommu_superpage(iommu) - 1;
if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
pr_warn("IOMMU: %s doesn't support large page.\n",
iommu->name);
return -ENXIO;
}
/*
* Disable translation if already enabled prior to OS handover.
*/
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
g_iommus[iommu->seq_id] = iommu;
ret = iommu_init_domains(iommu);
if (ret == 0)
ret = iommu_alloc_root_entry(iommu);
if (ret)
goto out;
if (dmaru->ignored) {
/*
* we always have to disable PMRs or DMA may fail on this device
*/
if (force_on)
iommu_disable_protect_mem_regions(iommu);
return 0;
}
intel_iommu_init_qi(iommu);
iommu_flush_write_buffer(iommu);
ret = dmar_set_interrupt(iommu);
if (ret)
goto disable_iommu;
iommu_set_root_entry(iommu);
iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
iommu_enable_translation(iommu);
if (si_domain) {
ret = iommu_attach_domain(si_domain, iommu);
if (ret < 0 || si_domain->id != ret)
goto disable_iommu;
domain_attach_iommu(si_domain, iommu);
}
iommu_disable_protect_mem_regions(iommu);
return 0;
disable_iommu:
disable_dmar_iommu(iommu);
out:
free_dmar_iommu(iommu);
return ret;
}
int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
{
int ret = 0;
struct intel_iommu *iommu = dmaru->iommu;
if (!intel_iommu_enabled)
return 0;
if (iommu == NULL)
return -EINVAL;
if (insert) {
ret = intel_iommu_add(dmaru);
} else {
disable_dmar_iommu(iommu);
free_dmar_iommu(iommu);
}
return ret;
}
static void intel_iommu_free_dmars(void)
{
struct dmar_rmrr_unit *rmrru, *rmrr_n;

View file

@ -36,7 +36,6 @@ struct hpet_scope {
static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
static struct hpet_scope ir_hpet[MAX_HPET_TBS];
static int ir_ioapic_num, ir_hpet_num;
/*
* Lock ordering:
@ -206,7 +205,7 @@ static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
int i;
for (i = 0; i < MAX_HPET_TBS; i++)
if (ir_hpet[i].id == hpet_id)
if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu)
return ir_hpet[i].iommu;
return NULL;
}
@ -216,7 +215,7 @@ static struct intel_iommu *map_ioapic_to_ir(int apic)
int i;
for (i = 0; i < MAX_IO_APICS; i++)
if (ir_ioapic[i].id == apic)
if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu)
return ir_ioapic[i].iommu;
return NULL;
}
@ -325,7 +324,7 @@ static int set_ioapic_sid(struct irte *irte, int apic)
down_read(&dmar_global_lock);
for (i = 0; i < MAX_IO_APICS; i++) {
if (ir_ioapic[i].id == apic) {
if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) {
sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
break;
}
@ -352,7 +351,7 @@ static int set_hpet_sid(struct irte *irte, u8 id)
down_read(&dmar_global_lock);
for (i = 0; i < MAX_HPET_TBS; i++) {
if (ir_hpet[i].id == id) {
if (ir_hpet[i].iommu && ir_hpet[i].id == id) {
sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
break;
}
@ -473,17 +472,17 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
static int intel_setup_irq_remapping(struct intel_iommu *iommu)
{
struct ir_table *ir_table;
struct page *pages;
unsigned long *bitmap;
ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
GFP_ATOMIC);
if (iommu->ir_table)
return 0;
if (!iommu->ir_table)
ir_table = kzalloc(sizeof(struct ir_table), GFP_ATOMIC);
if (!ir_table)
return -ENOMEM;
pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
@ -492,24 +491,37 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
if (!pages) {
pr_err("IR%d: failed to allocate pages of order %d\n",
iommu->seq_id, INTR_REMAP_PAGE_ORDER);
kfree(iommu->ir_table);
return -ENOMEM;
goto out_free_table;
}
bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
sizeof(long), GFP_ATOMIC);
if (bitmap == NULL) {
pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
__free_pages(pages, INTR_REMAP_PAGE_ORDER);
kfree(ir_table);
return -ENOMEM;
goto out_free_pages;
}
ir_table->base = page_address(pages);
ir_table->bitmap = bitmap;
iommu_set_irq_remapping(iommu, mode);
iommu->ir_table = ir_table;
return 0;
out_free_pages:
__free_pages(pages, INTR_REMAP_PAGE_ORDER);
out_free_table:
kfree(ir_table);
return -ENOMEM;
}
static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
{
if (iommu && iommu->ir_table) {
free_pages((unsigned long)iommu->ir_table->base,
INTR_REMAP_PAGE_ORDER);
kfree(iommu->ir_table->bitmap);
kfree(iommu->ir_table);
iommu->ir_table = NULL;
}
}
/*
@ -666,9 +678,10 @@ static int __init intel_enable_irq_remapping(void)
if (!ecap_ir_support(iommu->ecap))
continue;
if (intel_setup_irq_remapping(iommu, eim))
if (intel_setup_irq_remapping(iommu))
goto error;
iommu_set_irq_remapping(iommu, eim);
setup = 1;
}
@ -689,9 +702,11 @@ static int __init intel_enable_irq_remapping(void)
return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
error:
/*
* handle error condition gracefully here!
*/
for_each_iommu(iommu, drhd)
if (ecap_ir_support(iommu->ecap)) {
iommu_disable_irq_remapping(iommu);
intel_teardown_irq_remapping(iommu);
}
if (x2apic_present)
pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
@ -699,12 +714,13 @@ static int __init intel_enable_irq_remapping(void)
return -1;
}
static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
struct intel_iommu *iommu)
static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
struct intel_iommu *iommu,
struct acpi_dmar_hardware_unit *drhd)
{
struct acpi_dmar_pci_path *path;
u8 bus;
int count;
int count, free = -1;
bus = scope->bus;
path = (struct acpi_dmar_pci_path *)(scope + 1);
@ -720,19 +736,36 @@ static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
PCI_SECONDARY_BUS);
path++;
}
ir_hpet[ir_hpet_num].bus = bus;
ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function);
ir_hpet[ir_hpet_num].iommu = iommu;
ir_hpet[ir_hpet_num].id = scope->enumeration_id;
ir_hpet_num++;
for (count = 0; count < MAX_HPET_TBS; count++) {
if (ir_hpet[count].iommu == iommu &&
ir_hpet[count].id == scope->enumeration_id)
return 0;
else if (ir_hpet[count].iommu == NULL && free == -1)
free = count;
}
if (free == -1) {
pr_warn("Exceeded Max HPET blocks\n");
return -ENOSPC;
}
ir_hpet[free].iommu = iommu;
ir_hpet[free].id = scope->enumeration_id;
ir_hpet[free].bus = bus;
ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function);
pr_info("HPET id %d under DRHD base 0x%Lx\n",
scope->enumeration_id, drhd->address);
return 0;
}
static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
struct intel_iommu *iommu)
static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
struct intel_iommu *iommu,
struct acpi_dmar_hardware_unit *drhd)
{
struct acpi_dmar_pci_path *path;
u8 bus;
int count;
int count, free = -1;
bus = scope->bus;
path = (struct acpi_dmar_pci_path *)(scope + 1);
@ -749,54 +782,63 @@ static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
path++;
}
ir_ioapic[ir_ioapic_num].bus = bus;
ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function);
ir_ioapic[ir_ioapic_num].iommu = iommu;
ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
ir_ioapic_num++;
for (count = 0; count < MAX_IO_APICS; count++) {
if (ir_ioapic[count].iommu == iommu &&
ir_ioapic[count].id == scope->enumeration_id)
return 0;
else if (ir_ioapic[count].iommu == NULL && free == -1)
free = count;
}
if (free == -1) {
pr_warn("Exceeded Max IO APICS\n");
return -ENOSPC;
}
ir_ioapic[free].bus = bus;
ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function);
ir_ioapic[free].iommu = iommu;
ir_ioapic[free].id = scope->enumeration_id;
pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n",
scope->enumeration_id, drhd->address, iommu->seq_id);
return 0;
}
static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
struct intel_iommu *iommu)
{
int ret = 0;
struct acpi_dmar_hardware_unit *drhd;
struct acpi_dmar_device_scope *scope;
void *start, *end;
drhd = (struct acpi_dmar_hardware_unit *)header;
start = (void *)(drhd + 1);
end = ((void *)drhd) + header->length;
while (start < end) {
while (start < end && ret == 0) {
scope = start;
if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
if (ir_ioapic_num == MAX_IO_APICS) {
printk(KERN_WARNING "Exceeded Max IO APICS\n");
return -1;
}
printk(KERN_INFO "IOAPIC id %d under DRHD base "
" 0x%Lx IOMMU %d\n", scope->enumeration_id,
drhd->address, iommu->seq_id);
ir_parse_one_ioapic_scope(scope, iommu);
} else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
if (ir_hpet_num == MAX_HPET_TBS) {
printk(KERN_WARNING "Exceeded Max HPET blocks\n");
return -1;
}
printk(KERN_INFO "HPET id %d under DRHD base"
" 0x%Lx\n", scope->enumeration_id,
drhd->address);
ir_parse_one_hpet_scope(scope, iommu);
}
if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC)
ret = ir_parse_one_ioapic_scope(scope, iommu, drhd);
else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET)
ret = ir_parse_one_hpet_scope(scope, iommu, drhd);
start += scope->length;
}
return 0;
return ret;
}
static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu)
{
int i;
for (i = 0; i < MAX_HPET_TBS; i++)
if (ir_hpet[i].iommu == iommu)
ir_hpet[i].iommu = NULL;
for (i = 0; i < MAX_IO_APICS; i++)
if (ir_ioapic[i].iommu == iommu)
ir_ioapic[i].iommu = NULL;
}
/*
@ -1171,3 +1213,86 @@ struct irq_remap_ops intel_irq_remap_ops = {
.msi_setup_irq = intel_msi_setup_irq,
.alloc_hpet_msi = intel_alloc_hpet_msi,
};
/*
* Support of Interrupt Remapping Unit Hotplug
*/
static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
{
int ret;
int eim = x2apic_enabled();
if (eim && !ecap_eim_support(iommu->ecap)) {
pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
iommu->reg_phys, iommu->ecap);
return -ENODEV;
}
if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) {
pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n",
iommu->reg_phys);
return -ENODEV;
}
/* TODO: check all IOAPICs are covered by IOMMU */
/* Setup Interrupt-remapping now. */
ret = intel_setup_irq_remapping(iommu);
if (ret) {
pr_err("DRHD %Lx: failed to allocate resource\n",
iommu->reg_phys);
ir_remove_ioapic_hpet_scope(iommu);
return ret;
}
if (!iommu->qi) {
/* Clear previous faults. */
dmar_fault(-1, iommu);
iommu_disable_irq_remapping(iommu);
dmar_disable_qi(iommu);
}
/* Enable queued invalidation */
ret = dmar_enable_qi(iommu);
if (!ret) {
iommu_set_irq_remapping(iommu, eim);
} else {
pr_err("DRHD %Lx: failed to enable queued invalidation, ecap %Lx, ret %d\n",
iommu->reg_phys, iommu->ecap, ret);
intel_teardown_irq_remapping(iommu);
ir_remove_ioapic_hpet_scope(iommu);
}
return ret;
}
int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
{
int ret = 0;
struct intel_iommu *iommu = dmaru->iommu;
if (!irq_remapping_enabled)
return 0;
if (iommu == NULL)
return -EINVAL;
if (!ecap_ir_support(iommu->ecap))
return 0;
if (insert) {
if (!iommu->ir_table)
ret = dmar_ir_add(dmaru, iommu);
} else {
if (iommu->ir_table) {
if (!bitmap_empty(iommu->ir_table->bitmap,
INTR_REMAP_TABLE_ENTRIES)) {
ret = -EBUSY;
} else {
iommu_disable_irq_remapping(iommu);
intel_teardown_irq_remapping(iommu);
ir_remove_ioapic_hpet_scope(iommu);
}
}
}
return ret;
}

View file

@ -1143,14 +1143,24 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
{
struct scatterlist *s;
size_t mapped = 0;
unsigned int i;
unsigned int i, min_pagesz;
int ret;
for_each_sg(sg, s, nents, i) {
phys_addr_t phys = page_to_phys(sg_page(s));
if (unlikely(domain->ops->pgsize_bitmap == 0UL))
return 0;
/* We are mapping on page boundarys, so offset must be 0 */
if (s->offset)
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
for_each_sg(sg, s, nents, i) {
phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
/*
* We are mapping on IOMMU page boundaries, so offset within
* the page must be 0. However, the IOMMU may support pages
* smaller than PAGE_SIZE, so s->offset may still represent
* an offset of that boundary within the CPU page.
*/
if (!IS_ALIGNED(s->offset, min_pagesz))
goto out_err;
ret = iommu_map(domain, iova + mapped, phys, s->length, prot);

View file

@ -1185,7 +1185,7 @@ static int ipmmu_probe(struct platform_device *pdev)
dev_name(&pdev->dev), mmu);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
return irq;
return ret;
}
ipmmu_device_reset(mmu);

View file

@ -73,8 +73,7 @@ static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
{
if (drvdata->clk)
clk_disable(drvdata->clk);
clk_disable(drvdata->clk);
clk_disable(drvdata->pclk);
}

View file

@ -131,7 +131,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
struct clk *iommu_clk;
struct clk *iommu_pclk;
struct msm_iommu_drvdata *drvdata;
struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data;
struct msm_iommu_dev *iommu_dev = dev_get_platdata(&pdev->dev);
void __iomem *regs_base;
int ret, irq, par;
@ -224,8 +224,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, drvdata);
if (iommu_clk)
clk_disable(iommu_clk);
clk_disable(iommu_clk);
clk_disable(iommu_pclk);
@ -264,7 +263,7 @@ static int msm_iommu_remove(struct platform_device *pdev)
static int msm_iommu_ctx_probe(struct platform_device *pdev)
{
struct msm_iommu_ctx_dev *c = pdev->dev.platform_data;
struct msm_iommu_ctx_dev *c = dev_get_platdata(&pdev->dev);
struct msm_iommu_drvdata *drvdata;
struct msm_iommu_ctx_drvdata *ctx_drvdata;
int i, ret;
@ -323,8 +322,7 @@ static int msm_iommu_ctx_probe(struct platform_device *pdev)
SET_NSCFG(drvdata->base, mid, 3);
}
if (drvdata->clk)
clk_disable(drvdata->clk);
clk_disable(drvdata->clk);
clk_disable(drvdata->pclk);
dev_info(&pdev->dev, "context %s using bank %d\n", c->name, c->num);

View file

@ -10,45 +10,35 @@
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/platform_device.h>
#include <linux/debugfs.h>
#include <linux/omap-iommu.h>
#include <linux/platform_data/iommu-omap.h>
#include "omap-iopgtable.h"
#include "omap-iommu.h"
#define MAXCOLUMN 100 /* for short messages */
static DEFINE_MUTEX(iommu_debug_lock);
static struct dentry *iommu_debug_root;
static ssize_t debug_read_ver(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
static inline bool is_omap_iommu_detached(struct omap_iommu *obj)
{
u32 ver = omap_iommu_arch_version();
char buf[MAXCOLUMN], *p = buf;
p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf);
return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
return !obj->domain;
}
static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct device *dev = file->private_data;
struct omap_iommu *obj = dev_to_omap_iommu(dev);
struct omap_iommu *obj = file->private_data;
char *p, *buf;
ssize_t bytes;
if (is_omap_iommu_detached(obj))
return -EPERM;
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@ -68,11 +58,13 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct device *dev = file->private_data;
struct omap_iommu *obj = dev_to_omap_iommu(dev);
struct omap_iommu *obj = file->private_data;
char *p, *buf;
ssize_t bytes, rest;
if (is_omap_iommu_detached(obj))
return -EPERM;
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@ -93,133 +85,69 @@ static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
return bytes;
}
static ssize_t debug_write_pagetable(struct file *file,
const char __user *userbuf, size_t count, loff_t *ppos)
static void dump_ioptable(struct seq_file *s)
{
struct iotlb_entry e;
struct cr_regs cr;
int err;
struct device *dev = file->private_data;
struct omap_iommu *obj = dev_to_omap_iommu(dev);
char buf[MAXCOLUMN], *p = buf;
count = min(count, sizeof(buf));
mutex_lock(&iommu_debug_lock);
if (copy_from_user(p, userbuf, count)) {
mutex_unlock(&iommu_debug_lock);
return -EFAULT;
}
sscanf(p, "%x %x", &cr.cam, &cr.ram);
if (!cr.cam || !cr.ram) {
mutex_unlock(&iommu_debug_lock);
return -EINVAL;
}
omap_iotlb_cr_to_e(&cr, &e);
err = omap_iopgtable_store_entry(obj, &e);
if (err)
dev_err(obj->dev, "%s: fail to store cr\n", __func__);
mutex_unlock(&iommu_debug_lock);
return count;
}
#define dump_ioptable_entry_one(lv, da, val) \
({ \
int __err = 0; \
ssize_t bytes; \
const int maxcol = 22; \
const char *str = "%d: %08x %08x\n"; \
bytes = snprintf(p, maxcol, str, lv, da, val); \
p += bytes; \
len -= bytes; \
if (len < maxcol) \
__err = -ENOMEM; \
__err; \
})
static ssize_t dump_ioptable(struct omap_iommu *obj, char *buf, ssize_t len)
{
int i;
u32 *iopgd;
char *p = buf;
int i, j;
u32 da;
u32 *iopgd, *iopte;
struct omap_iommu *obj = s->private;
spin_lock(&obj->page_table_lock);
iopgd = iopgd_offset(obj, 0);
for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) {
int j, err;
u32 *iopte;
u32 da;
if (!*iopgd)
continue;
if (!(*iopgd & IOPGD_TABLE)) {
da = i << IOPGD_SHIFT;
err = dump_ioptable_entry_one(1, da, *iopgd);
if (err)
goto out;
seq_printf(s, "1: 0x%08x 0x%08x\n", da, *iopgd);
continue;
}
iopte = iopte_offset(iopgd, 0);
for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) {
if (!*iopte)
continue;
da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT);
err = dump_ioptable_entry_one(2, da, *iopgd);
if (err)
goto out;
seq_printf(s, "2: 0x%08x 0x%08x\n", da, *iopte);
}
}
out:
spin_unlock(&obj->page_table_lock);
return p - buf;
spin_unlock(&obj->page_table_lock);
}
static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
static int debug_read_pagetable(struct seq_file *s, void *data)
{
struct device *dev = file->private_data;
struct omap_iommu *obj = dev_to_omap_iommu(dev);
char *p, *buf;
size_t bytes;
struct omap_iommu *obj = s->private;
buf = (char *)__get_free_page(GFP_KERNEL);
if (!buf)
return -ENOMEM;
p = buf;
p += sprintf(p, "L: %8s %8s\n", "da:", "pa:");
p += sprintf(p, "-----------------------------------------\n");
if (is_omap_iommu_detached(obj))
return -EPERM;
mutex_lock(&iommu_debug_lock);
bytes = PAGE_SIZE - (p - buf);
p += dump_ioptable(obj, p, bytes);
bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
seq_printf(s, "L: %8s %8s\n", "da:", "pte:");
seq_puts(s, "--------------------------\n");
dump_ioptable(s);
mutex_unlock(&iommu_debug_lock);
free_page((unsigned long)buf);
return bytes;
return 0;
}
#define DEBUG_FOPS(name) \
static const struct file_operations debug_##name##_fops = { \
.open = simple_open, \
.read = debug_read_##name, \
.write = debug_write_##name, \
.llseek = generic_file_llseek, \
};
#define DEBUG_SEQ_FOPS_RO(name) \
static int debug_open_##name(struct inode *inode, struct file *file) \
{ \
return single_open(file, debug_read_##name, inode->i_private); \
} \
\
static const struct file_operations debug_##name##_fops = { \
.open = debug_open_##name, \
.read = seq_read, \
.llseek = seq_lseek, \
.release = single_release, \
}
#define DEBUG_FOPS_RO(name) \
static const struct file_operations debug_##name##_fops = { \
@ -228,103 +156,63 @@ static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf,
.llseek = generic_file_llseek, \
};
DEBUG_FOPS_RO(ver);
DEBUG_FOPS_RO(regs);
DEBUG_FOPS_RO(tlb);
DEBUG_FOPS(pagetable);
DEBUG_SEQ_FOPS_RO(pagetable);
#define __DEBUG_ADD_FILE(attr, mode) \
{ \
struct dentry *dent; \
dent = debugfs_create_file(#attr, mode, parent, \
dev, &debug_##attr##_fops); \
dent = debugfs_create_file(#attr, mode, obj->debug_dir, \
obj, &debug_##attr##_fops); \
if (!dent) \
return -ENOMEM; \
goto err; \
}
#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 0600)
#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400)
static int iommu_debug_register(struct device *dev, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct omap_iommu *obj = platform_get_drvdata(pdev);
struct omap_iommu_arch_data *arch_data;
struct dentry *d, *parent;
if (!obj || !obj->dev)
return -EINVAL;
arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL);
if (!arch_data)
return -ENOMEM;
arch_data->iommu_dev = obj;
dev->archdata.iommu = arch_data;
d = debugfs_create_dir(obj->name, iommu_debug_root);
if (!d)
goto nomem;
parent = d;
d = debugfs_create_u8("nr_tlb_entries", 400, parent,
(u8 *)&obj->nr_tlb_entries);
if (!d)
goto nomem;
DEBUG_ADD_FILE_RO(ver);
DEBUG_ADD_FILE_RO(regs);
DEBUG_ADD_FILE_RO(tlb);
DEBUG_ADD_FILE(pagetable);
return 0;
nomem:
kfree(arch_data);
return -ENOMEM;
}
static int iommu_debug_unregister(struct device *dev, void *data)
{
if (!dev->archdata.iommu)
return 0;
kfree(dev->archdata.iommu);
dev->archdata.iommu = NULL;
return 0;
}
static int __init iommu_debug_init(void)
void omap_iommu_debugfs_add(struct omap_iommu *obj)
{
struct dentry *d;
int err;
d = debugfs_create_dir("iommu", NULL);
if (!iommu_debug_root)
return;
obj->debug_dir = debugfs_create_dir(obj->name, iommu_debug_root);
if (!obj->debug_dir)
return;
d = debugfs_create_u8("nr_tlb_entries", 0400, obj->debug_dir,
(u8 *)&obj->nr_tlb_entries);
if (!d)
return -ENOMEM;
iommu_debug_root = d;
return;
err = omap_foreach_iommu_device(d, iommu_debug_register);
if (err)
goto err_out;
return 0;
DEBUG_ADD_FILE_RO(regs);
DEBUG_ADD_FILE_RO(tlb);
DEBUG_ADD_FILE_RO(pagetable);
err_out:
debugfs_remove_recursive(iommu_debug_root);
return err;
return;
err:
debugfs_remove_recursive(obj->debug_dir);
}
module_init(iommu_debug_init)
static void __exit iommu_debugfs_exit(void)
void omap_iommu_debugfs_remove(struct omap_iommu *obj)
{
debugfs_remove_recursive(iommu_debug_root);
omap_foreach_iommu_device(NULL, iommu_debug_unregister);
}
module_exit(iommu_debugfs_exit)
if (!obj->debug_dir)
return;
MODULE_DESCRIPTION("omap iommu: debugfs interface");
MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
MODULE_LICENSE("GPL v2");
debugfs_remove_recursive(obj->debug_dir);
}
void __init omap_iommu_debugfs_init(void)
{
iommu_debug_root = debugfs_create_dir("omap_iommu", NULL);
if (!iommu_debug_root)
pr_err("can't create debugfs dir\n");
}
void __exit omap_iommu_debugfs_exit(void)
{
debugfs_remove(iommu_debug_root);
}

View file

@ -76,44 +76,9 @@ struct iotlb_lock {
short vict;
};
/* accommodate the difference between omap1 and omap2/3 */
static const struct iommu_functions *arch_iommu;
static struct platform_driver omap_iommu_driver;
static struct kmem_cache *iopte_cachep;
/**
* omap_install_iommu_arch - Install archtecure specific iommu functions
* @ops: a pointer to architecture specific iommu functions
*
* There are several kind of iommu algorithm(tlb, pagetable) among
* omap series. This interface installs such an iommu algorighm.
**/
int omap_install_iommu_arch(const struct iommu_functions *ops)
{
if (arch_iommu)
return -EBUSY;
arch_iommu = ops;
return 0;
}
EXPORT_SYMBOL_GPL(omap_install_iommu_arch);
/**
* omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions
* @ops: a pointer to architecture specific iommu functions
*
* This interface uninstalls the iommu algorighm installed previously.
**/
void omap_uninstall_iommu_arch(const struct iommu_functions *ops)
{
if (arch_iommu != ops)
pr_err("%s: not your arch\n", __func__);
arch_iommu = NULL;
}
EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch);
/**
* omap_iommu_save_ctx - Save registers for pm off-mode support
* @dev: client device
@ -121,8 +86,13 @@ EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch);
void omap_iommu_save_ctx(struct device *dev)
{
struct omap_iommu *obj = dev_to_omap_iommu(dev);
u32 *p = obj->ctx;
int i;
arch_iommu->save_ctx(obj);
for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
p[i] = iommu_read_reg(obj, i * sizeof(u32));
dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
}
}
EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
@ -133,28 +103,74 @@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
void omap_iommu_restore_ctx(struct device *dev)
{
struct omap_iommu *obj = dev_to_omap_iommu(dev);
u32 *p = obj->ctx;
int i;
arch_iommu->restore_ctx(obj);
for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
iommu_write_reg(obj, p[i], i * sizeof(u32));
dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
}
}
EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
/**
* omap_iommu_arch_version - Return running iommu arch version
**/
u32 omap_iommu_arch_version(void)
static void __iommu_set_twl(struct omap_iommu *obj, bool on)
{
return arch_iommu->version;
u32 l = iommu_read_reg(obj, MMU_CNTL);
if (on)
iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
else
iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
l &= ~MMU_CNTL_MASK;
if (on)
l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
else
l |= (MMU_CNTL_MMU_EN);
iommu_write_reg(obj, l, MMU_CNTL);
}
static int omap2_iommu_enable(struct omap_iommu *obj)
{
u32 l, pa;
if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
return -EINVAL;
pa = virt_to_phys(obj->iopgd);
if (!IS_ALIGNED(pa, SZ_16K))
return -EINVAL;
l = iommu_read_reg(obj, MMU_REVISION);
dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
(l >> 4) & 0xf, l & 0xf);
iommu_write_reg(obj, pa, MMU_TTB);
if (obj->has_bus_err_back)
iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
__iommu_set_twl(obj, true);
return 0;
}
static void omap2_iommu_disable(struct omap_iommu *obj)
{
u32 l = iommu_read_reg(obj, MMU_CNTL);
l &= ~MMU_CNTL_MASK;
iommu_write_reg(obj, l, MMU_CNTL);
dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
}
EXPORT_SYMBOL_GPL(omap_iommu_arch_version);
static int iommu_enable(struct omap_iommu *obj)
{
int err;
struct platform_device *pdev = to_platform_device(obj->dev);
struct iommu_platform_data *pdata = pdev->dev.platform_data;
if (!arch_iommu)
return -ENODEV;
struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
if (pdata && pdata->deassert_reset) {
err = pdata->deassert_reset(pdev, pdata->reset_name);
@ -166,7 +182,7 @@ static int iommu_enable(struct omap_iommu *obj)
pm_runtime_get_sync(obj->dev);
err = arch_iommu->enable(obj);
err = omap2_iommu_enable(obj);
return err;
}
@ -174,9 +190,9 @@ static int iommu_enable(struct omap_iommu *obj)
static void iommu_disable(struct omap_iommu *obj)
{
struct platform_device *pdev = to_platform_device(obj->dev);
struct iommu_platform_data *pdata = pdev->dev.platform_data;
struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
arch_iommu->disable(obj);
omap2_iommu_disable(obj);
pm_runtime_put_sync(obj->dev);
@ -187,44 +203,51 @@ static void iommu_disable(struct omap_iommu *obj)
/*
* TLB operations
*/
void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
{
BUG_ON(!cr || !e);
arch_iommu->cr_to_e(cr, e);
}
EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e);
static inline int iotlb_cr_valid(struct cr_regs *cr)
{
if (!cr)
return -EINVAL;
return arch_iommu->cr_valid(cr);
}
static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
struct iotlb_entry *e)
{
if (!e)
return NULL;
return arch_iommu->alloc_cr(obj, e);
return cr->cam & MMU_CAM_V;
}
static u32 iotlb_cr_to_virt(struct cr_regs *cr)
{
return arch_iommu->cr_to_virt(cr);
u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
u32 mask = get_cam_va_mask(cr->cam & page_size);
return cr->cam & mask;
}
static u32 get_iopte_attr(struct iotlb_entry *e)
{
return arch_iommu->get_pte_attr(e);
u32 attr;
attr = e->mixed << 5;
attr |= e->endian;
attr |= e->elsz >> 3;
attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
(e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
return attr;
}
static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
{
return arch_iommu->fault_isr(obj, da);
u32 status, fault_addr;
status = iommu_read_reg(obj, MMU_IRQSTATUS);
status &= MMU_IRQ_MASK;
if (!status) {
*da = 0;
return 0;
}
fault_addr = iommu_read_reg(obj, MMU_FAULT_AD);
*da = fault_addr;
iommu_write_reg(obj, status, MMU_IRQSTATUS);
return status;
}
static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
@ -250,31 +273,19 @@ static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
{
arch_iommu->tlb_read_cr(obj, cr);
cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
}
static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
{
arch_iommu->tlb_load_cr(obj, cr);
iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
iommu_write_reg(obj, cr->ram, MMU_RAM);
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
iommu_write_reg(obj, 1, MMU_LD_TLB);
}
/**
* iotlb_dump_cr - Dump an iommu tlb entry into buf
* @obj: target iommu
* @cr: contents of cam and ram register
* @buf: output buffer
**/
static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
char *buf)
{
BUG_ON(!cr || !buf);
return arch_iommu->dump_cr(obj, cr, buf);
}
/* only used in iotlb iteration for-loop */
static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
{
@ -289,12 +300,36 @@ static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
return cr;
}
#ifdef PREFETCH_IOTLB
static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
struct iotlb_entry *e)
{
struct cr_regs *cr;
if (!e)
return NULL;
if (e->da & ~(get_cam_va_mask(e->pgsz))) {
dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
e->da);
return ERR_PTR(-EINVAL);
}
cr = kmalloc(sizeof(*cr), GFP_KERNEL);
if (!cr)
return ERR_PTR(-ENOMEM);
cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
cr->ram = e->pa | e->endian | e->elsz | e->mixed;
return cr;
}
/**
* load_iotlb_entry - Set an iommu tlb entry
* @obj: target iommu
* @e: an iommu tlb entry info
**/
#ifdef PREFETCH_IOTLB
static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
{
int err = 0;
@ -423,7 +458,45 @@ static void flush_iotlb_all(struct omap_iommu *obj)
pm_runtime_put_sync(obj->dev);
}
#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
#ifdef CONFIG_OMAP_IOMMU_DEBUG
#define pr_reg(name) \
do { \
ssize_t bytes; \
const char *str = "%20s: %08x\n"; \
const int maxcol = 32; \
bytes = snprintf(p, maxcol, str, __stringify(name), \
iommu_read_reg(obj, MMU_##name)); \
p += bytes; \
len -= bytes; \
if (len < maxcol) \
goto out; \
} while (0)
static ssize_t
omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
{
char *p = buf;
pr_reg(REVISION);
pr_reg(IRQSTATUS);
pr_reg(IRQENABLE);
pr_reg(WALKING_ST);
pr_reg(CNTL);
pr_reg(FAULT_AD);
pr_reg(TTB);
pr_reg(LOCK);
pr_reg(LD_TLB);
pr_reg(CAM);
pr_reg(RAM);
pr_reg(GFLUSH);
pr_reg(FLUSH_ENTRY);
pr_reg(READ_CAM);
pr_reg(READ_RAM);
pr_reg(EMU_FAULT_AD);
out:
return p - buf;
}
ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
{
@ -432,13 +505,12 @@ ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
pm_runtime_get_sync(obj->dev);
bytes = arch_iommu->dump_ctx(obj, buf, bytes);
bytes = omap2_iommu_dump_ctx(obj, buf, bytes);
pm_runtime_put_sync(obj->dev);
return bytes;
}
EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx);
static int
__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
@ -463,6 +535,24 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
return p - crs;
}
/**
* iotlb_dump_cr - Dump an iommu tlb entry into buf
* @obj: target iommu
* @cr: contents of cam and ram register
* @buf: output buffer
**/
static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
char *buf)
{
char *p = buf;
/* FIXME: Need more detail analysis of cam/ram */
p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
(cr->cam & MMU_CAM_P) ? 1 : 0);
return p - buf;
}
/**
* omap_dump_tlb_entries - dump cr arrays to given buffer
* @obj: target iommu
@ -488,16 +578,8 @@ size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes)
return p - buf;
}
EXPORT_SYMBOL_GPL(omap_dump_tlb_entries);
int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *))
{
return driver_for_each_device(&omap_iommu_driver.driver,
NULL, data, fn);
}
EXPORT_SYMBOL_GPL(omap_foreach_iommu_device);
#endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */
#endif /* CONFIG_OMAP_IOMMU_DEBUG */
/*
* H/W pagetable operations
@ -680,7 +762,8 @@ iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
* @obj: target iommu
* @e: an iommu tlb entry info
**/
int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
static int
omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
{
int err;
@ -690,7 +773,6 @@ int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
prefetch_iotlb_entry(obj, e);
return err;
}
EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry);
/**
* iopgtable_lookup_entry - Lookup an iommu pte entry
@ -819,8 +901,9 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
u32 *iopgd, *iopte;
struct omap_iommu *obj = data;
struct iommu_domain *domain = obj->domain;
struct omap_iommu_domain *omap_domain = domain->priv;
if (!obj->refcount)
if (!omap_domain->iommu_dev)
return IRQ_NONE;
errs = iommu_report_fault(obj, &da);
@ -880,13 +963,6 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
spin_lock(&obj->iommu_lock);
/* an iommu device can only be attached once */
if (++obj->refcount > 1) {
dev_err(dev, "%s: already attached!\n", obj->name);
err = -EBUSY;
goto err_enable;
}
obj->iopgd = iopgd;
err = iommu_enable(obj);
if (err)
@ -899,7 +975,6 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
return obj;
err_enable:
obj->refcount--;
spin_unlock(&obj->iommu_lock);
return ERR_PTR(err);
}
@ -915,9 +990,7 @@ static void omap_iommu_detach(struct omap_iommu *obj)
spin_lock(&obj->iommu_lock);
if (--obj->refcount == 0)
iommu_disable(obj);
iommu_disable(obj);
obj->iopgd = NULL;
spin_unlock(&obj->iommu_lock);
@ -934,7 +1007,7 @@ static int omap_iommu_probe(struct platform_device *pdev)
int irq;
struct omap_iommu *obj;
struct resource *res;
struct iommu_platform_data *pdata = pdev->dev.platform_data;
struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *of = pdev->dev.of_node;
obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
@ -981,6 +1054,8 @@ static int omap_iommu_probe(struct platform_device *pdev)
pm_runtime_irq_safe(obj->dev);
pm_runtime_enable(obj->dev);
omap_iommu_debugfs_add(obj);
dev_info(&pdev->dev, "%s registered\n", obj->name);
return 0;
}
@ -990,6 +1065,7 @@ static int omap_iommu_remove(struct platform_device *pdev)
struct omap_iommu *obj = platform_get_drvdata(pdev);
iopgtable_clear_entry_all(obj);
omap_iommu_debugfs_remove(obj);
pm_runtime_disable(obj->dev);
@ -1026,7 +1102,6 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
e->da = da;
e->pa = pa;
e->valid = MMU_CAM_V;
/* FIXME: add OMAP1 support */
e->pgsz = pgsz;
e->endian = MMU_RAM_ENDIAN_LITTLE;
e->elsz = MMU_RAM_ELSZ_8;
@ -1131,6 +1206,7 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
omap_domain->dev = NULL;
oiommu->domain = NULL;
}
static void omap_iommu_detach_dev(struct iommu_domain *domain,
@ -1309,6 +1385,8 @@ static int __init omap_iommu_init(void)
bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
omap_iommu_debugfs_init();
return platform_driver_register(&omap_iommu_driver);
}
/* must be ready before omap3isp is probed */
@ -1319,6 +1397,8 @@ static void __exit omap_iommu_exit(void)
kmem_cache_destroy(iopte_cachep);
platform_driver_unregister(&omap_iommu_driver);
omap_iommu_debugfs_exit();
}
module_exit(omap_iommu_exit);

View file

@ -10,9 +10,8 @@
* published by the Free Software Foundation.
*/
#if defined(CONFIG_ARCH_OMAP1)
#error "iommu for this processor not implemented yet"
#endif
#ifndef _OMAP_IOMMU_H
#define _OMAP_IOMMU_H
struct iotlb_entry {
u32 da;
@ -30,10 +29,9 @@ struct omap_iommu {
const char *name;
void __iomem *regbase;
struct device *dev;
void *isr_priv;
struct iommu_domain *domain;
struct dentry *debug_dir;
unsigned int refcount;
spinlock_t iommu_lock; /* global for this whole object */
/*
@ -67,34 +65,6 @@ struct cr_regs {
};
};
/* architecture specific functions */
struct iommu_functions {
unsigned long version;
int (*enable)(struct omap_iommu *obj);
void (*disable)(struct omap_iommu *obj);
void (*set_twl)(struct omap_iommu *obj, bool on);
u32 (*fault_isr)(struct omap_iommu *obj, u32 *ra);
void (*tlb_read_cr)(struct omap_iommu *obj, struct cr_regs *cr);
void (*tlb_load_cr)(struct omap_iommu *obj, struct cr_regs *cr);
struct cr_regs *(*alloc_cr)(struct omap_iommu *obj,
struct iotlb_entry *e);
int (*cr_valid)(struct cr_regs *cr);
u32 (*cr_to_virt)(struct cr_regs *cr);
void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e);
ssize_t (*dump_cr)(struct omap_iommu *obj, struct cr_regs *cr,
char *buf);
u32 (*get_pte_attr)(struct iotlb_entry *e);
void (*save_ctx)(struct omap_iommu *obj);
void (*restore_ctx)(struct omap_iommu *obj);
ssize_t (*dump_ctx)(struct omap_iommu *obj, char *buf, ssize_t len);
};
#ifdef CONFIG_IOMMU_API
/**
* dev_to_omap_iommu() - retrieves an omap iommu object from a user device
* @dev: iommu client device
@ -105,7 +75,6 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
return arch_data->iommu_dev;
}
#endif
/*
* MMU Register offsets
@ -133,6 +102,28 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
/*
* MMU Register bit definitions
*/
/* IRQSTATUS & IRQENABLE */
#define MMU_IRQ_MULTIHITFAULT (1 << 4)
#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
#define MMU_IRQ_EMUMISS (1 << 2)
#define MMU_IRQ_TRANSLATIONFAULT (1 << 1)
#define MMU_IRQ_TLBMISS (1 << 0)
#define __MMU_IRQ_FAULT \
(MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
#define MMU_IRQ_MASK \
(__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS)
#define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT)
#define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS)
/* MMU_CNTL */
#define MMU_CNTL_SHIFT 1
#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT)
#define MMU_CNTL_EML_TLB (1 << 3)
#define MMU_CNTL_TWL_EN (1 << 2)
#define MMU_CNTL_MMU_EN (1 << 1)
/* CAM */
#define MMU_CAM_VATAG_SHIFT 12
#define MMU_CAM_VATAG_MASK \
((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT)
@ -144,6 +135,7 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
#define MMU_CAM_PGSZ_4K (2 << 0)
#define MMU_CAM_PGSZ_16M (3 << 0)
/* RAM */
#define MMU_RAM_PADDR_SHIFT 12
#define MMU_RAM_PADDR_MASK \
((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT)
@ -165,6 +157,12 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
#define MMU_GP_REG_BUS_ERR_BACK_EN 0x1
#define get_cam_va_mask(pgsz) \
(((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \
((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \
((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \
((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0)
/*
* utilities for super page(16MB, 1MB, 64KB and 4KB)
*/
@ -192,27 +190,25 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
/*
* global functions
*/
extern u32 omap_iommu_arch_version(void);
extern void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e);
extern int
omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e);
extern void omap_iommu_save_ctx(struct device *dev);
extern void omap_iommu_restore_ctx(struct device *dev);
extern int omap_foreach_iommu_device(void *data,
int (*fn)(struct device *, void *));
extern int omap_install_iommu_arch(const struct iommu_functions *ops);
extern void omap_uninstall_iommu_arch(const struct iommu_functions *ops);
#ifdef CONFIG_OMAP_IOMMU_DEBUG
extern ssize_t
omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len);
extern size_t
omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len);
void omap_iommu_debugfs_init(void);
void omap_iommu_debugfs_exit(void);
void omap_iommu_debugfs_add(struct omap_iommu *obj);
void omap_iommu_debugfs_remove(struct omap_iommu *obj);
#else
static inline void omap_iommu_debugfs_init(void) { }
static inline void omap_iommu_debugfs_exit(void) { }
static inline void omap_iommu_debugfs_add(struct omap_iommu *obj) { }
static inline void omap_iommu_debugfs_remove(struct omap_iommu *obj) { }
#endif
/*
* register accessors
*/
@ -225,3 +221,5 @@ static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs)
{
__raw_writel(val, obj->regbase + offs);
}
#endif /* _OMAP_IOMMU_H */

View file

@ -1,337 +0,0 @@
/*
* omap iommu: omap2/3 architecture specific functions
*
* Copyright (C) 2008-2009 Nokia Corporation
*
* Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
* Paul Mundt and Toshihiro Kobayashi
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/err.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/omap-iommu.h>
#include <linux/slab.h>
#include <linux/stringify.h>
#include <linux/platform_data/iommu-omap.h>
#include "omap-iommu.h"
/*
* omap2 architecture specific register bit definitions
*/
#define IOMMU_ARCH_VERSION 0x00000011
/* IRQSTATUS & IRQENABLE */
#define MMU_IRQ_MULTIHITFAULT (1 << 4)
#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
#define MMU_IRQ_EMUMISS (1 << 2)
#define MMU_IRQ_TRANSLATIONFAULT (1 << 1)
#define MMU_IRQ_TLBMISS (1 << 0)
#define __MMU_IRQ_FAULT \
(MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
#define MMU_IRQ_MASK \
(__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS)
#define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT)
#define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS)
/* MMU_CNTL */
#define MMU_CNTL_SHIFT 1
#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT)
#define MMU_CNTL_EML_TLB (1 << 3)
#define MMU_CNTL_TWL_EN (1 << 2)
#define MMU_CNTL_MMU_EN (1 << 1)
#define get_cam_va_mask(pgsz) \
(((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \
((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \
((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \
((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0)
/* IOMMU errors */
#define OMAP_IOMMU_ERR_TLB_MISS (1 << 0)
#define OMAP_IOMMU_ERR_TRANS_FAULT (1 << 1)
#define OMAP_IOMMU_ERR_EMU_MISS (1 << 2)
#define OMAP_IOMMU_ERR_TBLWALK_FAULT (1 << 3)
#define OMAP_IOMMU_ERR_MULTIHIT_FAULT (1 << 4)
static void __iommu_set_twl(struct omap_iommu *obj, bool on)
{
u32 l = iommu_read_reg(obj, MMU_CNTL);
if (on)
iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
else
iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
l &= ~MMU_CNTL_MASK;
if (on)
l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
else
l |= (MMU_CNTL_MMU_EN);
iommu_write_reg(obj, l, MMU_CNTL);
}
static int omap2_iommu_enable(struct omap_iommu *obj)
{
u32 l, pa;
if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
return -EINVAL;
pa = virt_to_phys(obj->iopgd);
if (!IS_ALIGNED(pa, SZ_16K))
return -EINVAL;
l = iommu_read_reg(obj, MMU_REVISION);
dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
(l >> 4) & 0xf, l & 0xf);
iommu_write_reg(obj, pa, MMU_TTB);
if (obj->has_bus_err_back)
iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
__iommu_set_twl(obj, true);
return 0;
}
static void omap2_iommu_disable(struct omap_iommu *obj)
{
u32 l = iommu_read_reg(obj, MMU_CNTL);
l &= ~MMU_CNTL_MASK;
iommu_write_reg(obj, l, MMU_CNTL);
dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
}
static void omap2_iommu_set_twl(struct omap_iommu *obj, bool on)
{
__iommu_set_twl(obj, false);
}
static u32 omap2_iommu_fault_isr(struct omap_iommu *obj, u32 *ra)
{
u32 stat, da;
u32 errs = 0;
stat = iommu_read_reg(obj, MMU_IRQSTATUS);
stat &= MMU_IRQ_MASK;
if (!stat) {
*ra = 0;
return 0;
}
da = iommu_read_reg(obj, MMU_FAULT_AD);
*ra = da;
if (stat & MMU_IRQ_TLBMISS)
errs |= OMAP_IOMMU_ERR_TLB_MISS;
if (stat & MMU_IRQ_TRANSLATIONFAULT)
errs |= OMAP_IOMMU_ERR_TRANS_FAULT;
if (stat & MMU_IRQ_EMUMISS)
errs |= OMAP_IOMMU_ERR_EMU_MISS;
if (stat & MMU_IRQ_TABLEWALKFAULT)
errs |= OMAP_IOMMU_ERR_TBLWALK_FAULT;
if (stat & MMU_IRQ_MULTIHITFAULT)
errs |= OMAP_IOMMU_ERR_MULTIHIT_FAULT;
iommu_write_reg(obj, stat, MMU_IRQSTATUS);
return errs;
}
static void omap2_tlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
{
cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
}
static void omap2_tlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
{
iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
iommu_write_reg(obj, cr->ram, MMU_RAM);
}
static u32 omap2_cr_to_virt(struct cr_regs *cr)
{
u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
u32 mask = get_cam_va_mask(cr->cam & page_size);
return cr->cam & mask;
}
static struct cr_regs *omap2_alloc_cr(struct omap_iommu *obj,
struct iotlb_entry *e)
{
struct cr_regs *cr;
if (e->da & ~(get_cam_va_mask(e->pgsz))) {
dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
e->da);
return ERR_PTR(-EINVAL);
}
cr = kmalloc(sizeof(*cr), GFP_KERNEL);
if (!cr)
return ERR_PTR(-ENOMEM);
cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
cr->ram = e->pa | e->endian | e->elsz | e->mixed;
return cr;
}
static inline int omap2_cr_valid(struct cr_regs *cr)
{
return cr->cam & MMU_CAM_V;
}
static u32 omap2_get_pte_attr(struct iotlb_entry *e)
{
u32 attr;
attr = e->mixed << 5;
attr |= e->endian;
attr |= e->elsz >> 3;
attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
(e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
return attr;
}
static ssize_t
omap2_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, char *buf)
{
char *p = buf;
/* FIXME: Need more detail analysis of cam/ram */
p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
(cr->cam & MMU_CAM_P) ? 1 : 0);
return p - buf;
}
#define pr_reg(name) \
do { \
ssize_t bytes; \
const char *str = "%20s: %08x\n"; \
const int maxcol = 32; \
bytes = snprintf(p, maxcol, str, __stringify(name), \
iommu_read_reg(obj, MMU_##name)); \
p += bytes; \
len -= bytes; \
if (len < maxcol) \
goto out; \
} while (0)
static ssize_t
omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
{
char *p = buf;
pr_reg(REVISION);
pr_reg(IRQSTATUS);
pr_reg(IRQENABLE);
pr_reg(WALKING_ST);
pr_reg(CNTL);
pr_reg(FAULT_AD);
pr_reg(TTB);
pr_reg(LOCK);
pr_reg(LD_TLB);
pr_reg(CAM);
pr_reg(RAM);
pr_reg(GFLUSH);
pr_reg(FLUSH_ENTRY);
pr_reg(READ_CAM);
pr_reg(READ_RAM);
pr_reg(EMU_FAULT_AD);
out:
return p - buf;
}
static void omap2_iommu_save_ctx(struct omap_iommu *obj)
{
int i;
u32 *p = obj->ctx;
for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
p[i] = iommu_read_reg(obj, i * sizeof(u32));
dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
}
BUG_ON(p[0] != IOMMU_ARCH_VERSION);
}
static void omap2_iommu_restore_ctx(struct omap_iommu *obj)
{
int i;
u32 *p = obj->ctx;
for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
iommu_write_reg(obj, p[i], i * sizeof(u32));
dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
}
BUG_ON(p[0] != IOMMU_ARCH_VERSION);
}
static void omap2_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
{
e->da = cr->cam & MMU_CAM_VATAG_MASK;
e->pa = cr->ram & MMU_RAM_PADDR_MASK;
e->valid = cr->cam & MMU_CAM_V;
e->pgsz = cr->cam & MMU_CAM_PGSZ_MASK;
e->endian = cr->ram & MMU_RAM_ENDIAN_MASK;
e->elsz = cr->ram & MMU_RAM_ELSZ_MASK;
e->mixed = cr->ram & MMU_RAM_MIXED;
}
static const struct iommu_functions omap2_iommu_ops = {
.version = IOMMU_ARCH_VERSION,
.enable = omap2_iommu_enable,
.disable = omap2_iommu_disable,
.set_twl = omap2_iommu_set_twl,
.fault_isr = omap2_iommu_fault_isr,
.tlb_read_cr = omap2_tlb_read_cr,
.tlb_load_cr = omap2_tlb_load_cr,
.cr_to_e = omap2_cr_to_e,
.cr_to_virt = omap2_cr_to_virt,
.alloc_cr = omap2_alloc_cr,
.cr_valid = omap2_cr_valid,
.dump_cr = omap2_dump_cr,
.get_pte_attr = omap2_get_pte_attr,
.save_ctx = omap2_iommu_save_ctx,
.restore_ctx = omap2_iommu_restore_ctx,
.dump_ctx = omap2_iommu_dump_ctx,
};
static int __init omap2_iommu_init(void)
{
return omap_install_iommu_arch(&omap2_iommu_ops);
}
module_init(omap2_iommu_init);
static void __exit omap2_iommu_exit(void)
{
omap_uninstall_iommu_arch(&omap2_iommu_ops);
}
module_exit(omap2_iommu_exit);
MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
MODULE_DESCRIPTION("omap iommu: omap2/3 architecture specific functions");
MODULE_LICENSE("GPL v2");

File diff suppressed because it is too large Load diff

View file

@ -30,6 +30,12 @@
struct acpi_dmar_header;
#ifdef CONFIG_X86
# define DMAR_UNITS_SUPPORTED MAX_IO_APICS
#else
# define DMAR_UNITS_SUPPORTED 64
#endif
/* DMAR Flags */
#define DMAR_INTR_REMAP 0x1
#define DMAR_X2APIC_OPT_OUT 0x2
@ -120,29 +126,61 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
/* Intel IOMMU detection */
extern int detect_intel_iommu(void);
extern int enable_drhd_fault_handling(void);
extern int dmar_device_add(acpi_handle handle);
extern int dmar_device_remove(acpi_handle handle);
static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg)
{
return 0;
}
#ifdef CONFIG_INTEL_IOMMU
extern int iommu_detected, no_iommu;
extern int intel_iommu_init(void);
extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header);
extern int dmar_parse_one_atsr(struct acpi_dmar_header *header);
extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg);
extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg);
extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg);
extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg);
extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
#else /* !CONFIG_INTEL_IOMMU: */
static inline int intel_iommu_init(void) { return -ENODEV; }
static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header)
{
return 0;
}
static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header)
{
return 0;
}
#define dmar_parse_one_rmrr dmar_res_noop
#define dmar_parse_one_atsr dmar_res_noop
#define dmar_check_one_atsr dmar_res_noop
#define dmar_release_one_atsr dmar_res_noop
static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
{
return 0;
}
static inline int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
{
return 0;
}
#endif /* CONFIG_INTEL_IOMMU */
#ifdef CONFIG_IRQ_REMAP
extern int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
#else /* CONFIG_IRQ_REMAP */
static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
{ return 0; }
#endif /* CONFIG_IRQ_REMAP */
#else /* CONFIG_DMAR_TABLE */
static inline int dmar_device_add(void *handle)
{
return 0;
}
static inline int dmar_device_remove(void *handle)
{
return 0;
}
#endif /* CONFIG_DMAR_TABLE */
struct irte {

View file

@ -28,7 +28,7 @@
#define IOMMU_READ (1 << 0)
#define IOMMU_WRITE (1 << 1)
#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
#define IOMMU_EXEC (1 << 3)
#define IOMMU_NOEXEC (1 << 3)
struct iommu_ops;
struct iommu_group;
@ -62,6 +62,7 @@ enum iommu_cap {
IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
transactions */
IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
};
/*