2007-10-21 23:41:41 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2006, Intel Corporation.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms and conditions of the GNU General Public License,
|
|
|
|
* version 2, as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along with
|
|
|
|
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
|
|
|
|
* Place - Suite 330, Boston, MA 02111-1307 USA.
|
|
|
|
*
|
|
|
|
* Copyright (C) Ashok Raj <ashok.raj@intel.com>
|
|
|
|
* Copyright (C) Shaohua Li <shaohua.li@intel.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __DMAR_H__
|
|
|
|
#define __DMAR_H__
|
|
|
|
|
|
|
|
#include <linux/acpi.h>
|
|
|
|
#include <linux/types.h>
|
2007-10-21 23:41:49 +00:00
|
|
|
#include <linux/msi.h>
|
2009-03-17 00:04:57 +00:00
|
|
|
#include <linux/irqreturn.h>
|
2007-10-21 23:41:41 +00:00
|
|
|
|
2007-10-21 23:41:49 +00:00
|
|
|
struct intel_iommu;
|
2009-03-17 00:05:02 +00:00
|
|
|
#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
|
2008-07-10 18:16:43 +00:00
|
|
|
struct dmar_drhd_unit {
|
|
|
|
struct list_head list; /* list of drhd units */
|
|
|
|
struct acpi_dmar_header *hdr; /* ACPI header */
|
|
|
|
u64 reg_base_addr; /* register base address*/
|
|
|
|
struct pci_dev **devices; /* target device array */
|
|
|
|
int devices_cnt; /* target device count */
|
2009-04-04 00:45:37 +00:00
|
|
|
u16 segment; /* PCI domain */
|
2008-07-10 18:16:43 +00:00
|
|
|
u8 ignored:1; /* ignore drhd */
|
|
|
|
u8 include_all:1;
|
|
|
|
struct intel_iommu *iommu;
|
|
|
|
};
|
|
|
|
|
|
|
|
extern struct list_head dmar_drhd_units;
|
|
|
|
|
|
|
|
#define for_each_drhd_unit(drhd) \
|
|
|
|
list_for_each_entry(drhd, &dmar_drhd_units, list)
|
|
|
|
|
2009-04-03 14:19:32 +00:00
|
|
|
#define for_each_active_iommu(i, drhd) \
|
|
|
|
list_for_each_entry(drhd, &dmar_drhd_units, list) \
|
|
|
|
if (i=drhd->iommu, drhd->ignored) {} else
|
|
|
|
|
|
|
|
#define for_each_iommu(i, drhd) \
|
|
|
|
list_for_each_entry(drhd, &dmar_drhd_units, list) \
|
|
|
|
if (i=drhd->iommu, 0) {} else
|
|
|
|
|
2008-07-10 18:16:43 +00:00
|
|
|
extern int dmar_table_init(void);
|
|
|
|
extern int dmar_dev_scope_init(void);
|
|
|
|
|
|
|
|
/* Intel IOMMU detection */
|
2010-08-26 17:57:57 +00:00
|
|
|
extern int detect_intel_iommu(void);
|
2009-03-17 00:04:55 +00:00
|
|
|
extern int enable_drhd_fault_handling(void);
|
2008-07-10 18:16:43 +00:00
|
|
|
|
|
|
|
extern int parse_ioapics_under_ir(void);
|
|
|
|
extern int alloc_iommu(struct dmar_drhd_unit *);
|
|
|
|
#else
|
2010-08-26 17:57:57 +00:00
|
|
|
static inline int detect_intel_iommu(void)
|
2008-07-10 18:16:43 +00:00
|
|
|
{
|
2010-08-26 17:57:57 +00:00
|
|
|
return -ENODEV;
|
2008-07-10 18:16:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int dmar_table_init(void)
|
|
|
|
{
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
2009-03-17 00:05:02 +00:00
|
|
|
static inline int enable_drhd_fault_handling(void)
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
2008-07-10 18:16:43 +00:00
|
|
|
#endif /* !CONFIG_DMAR && !CONFIG_INTR_REMAP */
|
|
|
|
|
|
|
|
struct irte {
|
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
__u64 present : 1,
|
|
|
|
fpd : 1,
|
|
|
|
dst_mode : 1,
|
|
|
|
redir_hint : 1,
|
|
|
|
trigger_mode : 1,
|
|
|
|
dlvry_mode : 3,
|
|
|
|
avail : 4,
|
|
|
|
__reserved_1 : 4,
|
|
|
|
vector : 8,
|
|
|
|
__reserved_2 : 8,
|
|
|
|
dest_id : 32;
|
|
|
|
};
|
|
|
|
__u64 low;
|
|
|
|
};
|
|
|
|
|
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
__u64 sid : 16,
|
|
|
|
sq : 2,
|
|
|
|
svt : 2,
|
|
|
|
__reserved_3 : 44;
|
|
|
|
};
|
|
|
|
__u64 high;
|
|
|
|
};
|
|
|
|
};
|
2010-10-10 09:39:09 +00:00
|
|
|
|
2009-03-17 00:05:02 +00:00
|
|
|
#ifdef CONFIG_INTR_REMAP
|
|
|
|
extern int intr_remapping_enabled;
|
2009-04-17 08:42:14 +00:00
|
|
|
extern int intr_remapping_supported(void);
|
2009-03-17 00:05:02 +00:00
|
|
|
extern int enable_intr_remapping(int);
|
2009-03-27 21:22:44 +00:00
|
|
|
extern void disable_intr_remapping(void);
|
|
|
|
extern int reenable_intr_remapping(int);
|
2009-03-17 00:05:02 +00:00
|
|
|
|
2008-07-10 18:16:44 +00:00
|
|
|
extern int get_irte(int irq, struct irte *entry);
|
|
|
|
extern int modify_irte(int irq, struct irte *irte_modified);
|
|
|
|
extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count);
|
|
|
|
extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index,
|
|
|
|
u16 sub_handle);
|
|
|
|
extern int map_irq_to_irte_handle(int irq, u16 *sub_handle);
|
|
|
|
extern int free_irte(int irq);
|
|
|
|
|
2008-07-10 18:16:57 +00:00
|
|
|
extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev);
|
x64, x2apic/intr-remap: IO-APIC support for interrupt-remapping
IO-APIC support in the presence of interrupt-remapping infrastructure.
IO-APIC RTE will be programmed with interrupt-remapping table entry(IRTE)
index and the IRTE will contain information about the vector, cpu destination,
trigger mode etc, which traditionally was present in the IO-APIC RTE.
Introduce a new irq_chip for cleaner irq migration (in the process
context as opposed to the current irq migration in the context of an interrupt.
interrupt-remapping infrastructure will help us achieve this cleanly).
For edge triggered, irq migration is a simple atomic update(of vector
and cpu destination) of IRTE and flush the hardware cache.
For level triggered, we need to modify the io-apic RTE aswell with the update
vector information, along with modifying IRTE with vector and cpu destination.
So irq migration for level triggered is little bit more complex compared to
edge triggered migration. But the good news is, we use the same algorithm
for level triggered migration as we have today, only difference being,
we now initiate the irq migration from process context instead of the
interrupt context.
In future, when we do a directed EOI (combined with cpu EOI broadcast
suppression) to the IO-APIC, level triggered irq migration will also be
as simple as edge triggered migration and we can do the irq migration
with a simple atomic update to IO-APIC RTE.
TBD: some tests/changes needed in the presence of fixup_irqs() for
level triggered irq migration.
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: akpm@linux-foundation.org
Cc: arjan@linux.intel.com
Cc: andi@firstfloor.org
Cc: ebiederm@xmission.com
Cc: jbarnes@virtuousgeek.org
Cc: steiner@sgi.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-10 18:16:56 +00:00
|
|
|
extern struct intel_iommu *map_ioapic_to_ir(int apic);
|
2009-08-04 19:07:08 +00:00
|
|
|
extern struct intel_iommu *map_hpet_to_ir(u8 id);
|
2009-05-22 16:41:15 +00:00
|
|
|
extern int set_ioapic_sid(struct irte *irte, int apic);
|
2009-08-04 19:07:08 +00:00
|
|
|
extern int set_hpet_sid(struct irte *irte, u8 id);
|
2009-05-22 16:41:15 +00:00
|
|
|
extern int set_msi_sid(struct irte *irte, struct pci_dev *dev);
|
2008-07-10 18:16:43 +00:00
|
|
|
#else
|
2009-03-17 00:05:02 +00:00
|
|
|
static inline int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
static inline int modify_irte(int irq, struct irte *irte_modified)
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
static inline int free_irte(int irq)
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
static inline int map_irq_to_irte_handle(int irq, u16 *sub_handle)
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
static inline int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index,
|
|
|
|
u16 sub_handle)
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
static inline struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
static inline struct intel_iommu *map_ioapic_to_ir(int apic)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
2009-08-04 19:07:08 +00:00
|
|
|
static inline struct intel_iommu *map_hpet_to_ir(unsigned int hpet_id)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
2009-05-22 16:41:15 +00:00
|
|
|
static inline int set_ioapic_sid(struct irte *irte, int apic)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2009-08-04 19:07:08 +00:00
|
|
|
static inline int set_hpet_sid(struct irte *irte, u8 id)
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
2009-05-22 16:41:15 +00:00
|
|
|
static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-07-10 18:16:43 +00:00
|
|
|
#define intr_remapping_enabled (0)
|
2010-11-22 20:48:34 +00:00
|
|
|
|
|
|
|
static inline int enable_intr_remapping(int eim)
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void disable_intr_remapping(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int reenable_intr_remapping(int eim)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2008-07-10 18:16:43 +00:00
|
|
|
#endif
|
|
|
|
|
2007-10-21 23:41:54 +00:00
|
|
|
/* Can't use the common MSI interrupt functions
|
|
|
|
* since DMAR is not a pci device
|
|
|
|
*/
|
2010-09-28 15:15:11 +00:00
|
|
|
struct irq_data;
|
|
|
|
extern void dmar_msi_unmask(struct irq_data *data);
|
|
|
|
extern void dmar_msi_mask(struct irq_data *data);
|
2007-10-21 23:41:54 +00:00
|
|
|
extern void dmar_msi_read(int irq, struct msi_msg *msg);
|
|
|
|
extern void dmar_msi_write(int irq, struct msi_msg *msg);
|
|
|
|
extern int dmar_set_interrupt(struct intel_iommu *iommu);
|
2009-03-17 00:04:57 +00:00
|
|
|
extern irqreturn_t dmar_fault(int irq, void *dev_id);
|
2007-10-21 23:41:54 +00:00
|
|
|
extern int arch_setup_dmar_msi(unsigned int irq);
|
|
|
|
|
2009-03-17 00:04:55 +00:00
|
|
|
#ifdef CONFIG_DMAR
|
2008-07-10 18:16:43 +00:00
|
|
|
extern int iommu_detected, no_iommu;
|
2007-10-21 23:41:41 +00:00
|
|
|
extern struct list_head dmar_rmrr_units;
|
|
|
|
struct dmar_rmrr_unit {
|
|
|
|
struct list_head list; /* list of rmrr units */
|
2008-07-10 18:16:37 +00:00
|
|
|
struct acpi_dmar_header *hdr; /* ACPI header */
|
2007-10-21 23:41:41 +00:00
|
|
|
u64 base_address; /* reserved base address*/
|
|
|
|
u64 end_address; /* reserved end address */
|
|
|
|
struct pci_dev **devices; /* target devices */
|
|
|
|
int devices_cnt; /* target device count */
|
|
|
|
};
|
|
|
|
|
2007-10-21 23:41:49 +00:00
|
|
|
#define for_each_rmrr_units(rmrr) \
|
|
|
|
list_for_each_entry(rmrr, &dmar_rmrr_units, list)
|
2009-05-18 05:51:34 +00:00
|
|
|
|
|
|
|
struct dmar_atsr_unit {
|
|
|
|
struct list_head list; /* list of ATSR units */
|
|
|
|
struct acpi_dmar_header *hdr; /* ACPI header */
|
|
|
|
struct pci_dev **devices; /* target devices */
|
|
|
|
int devices_cnt; /* target device count */
|
|
|
|
u8 include_all:1; /* include all ports */
|
|
|
|
};
|
|
|
|
|
2008-07-10 18:16:43 +00:00
|
|
|
extern int intel_iommu_init(void);
|
2009-11-10 10:46:16 +00:00
|
|
|
#else /* !CONFIG_DMAR: */
|
|
|
|
static inline int intel_iommu_init(void) { return -ENODEV; }
|
|
|
|
#endif /* CONFIG_DMAR */
|
|
|
|
|
2007-10-21 23:41:41 +00:00
|
|
|
#endif /* __DMAR_H__ */
|