xen: support pirq_eoi_map

The pirq_eoi_map is a bitmap offered by Xen to check which pirqs need to
be EOI'd without having to issue an hypercall every time.
We use PHYSDEVOP_pirq_eoi_gmfn_v2 to map the bitmap, then if we
succeed we use pirq_eoi_map to check whether pirqs need eoi.

Changes in v3:

- explicitly use PHYSDEVOP_pirq_eoi_gmfn_v2 rather than
PHYSDEVOP_pirq_eoi_gmfn;

- introduce pirq_check_eoi_map, a function to check if a pirq needs an
eoi using the map;

-rename pirq_needs_eoi into pirq_needs_eoi_flag;

- introduce a function pointer called pirq_needs_eoi that is going to be
set to the right implementation depending on the availability of
PHYSDEVOP_pirq_eoi_gmfn_v2.

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
This commit is contained in:
Stefano Stabellini 2012-01-30 16:21:48 +00:00 committed by Konrad Rzeszutek Wilk
parent 102b208e6b
commit 9846ff10af
2 changed files with 44 additions and 3 deletions

View File

@ -37,6 +37,7 @@
#include <asm/idle.h>
#include <asm/io_apic.h>
#include <asm/sync_bitops.h>
#include <asm/xen/page.h>
#include <asm/xen/pci.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
@ -109,6 +110,8 @@ struct irq_info {
#define PIRQ_SHAREABLE (1 << 1)
static int *evtchn_to_irq;
static unsigned long *pirq_eoi_map;
static bool (*pirq_needs_eoi)(unsigned irq);
static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG],
cpu_evtchn_mask);
@ -269,10 +272,14 @@ static unsigned int cpu_from_evtchn(unsigned int evtchn)
return ret;
}
static bool pirq_needs_eoi(unsigned irq)
static bool pirq_check_eoi_map(unsigned irq)
{
return test_bit(irq, pirq_eoi_map);
}
static bool pirq_needs_eoi_flag(unsigned irq)
{
struct irq_info *info = info_for_irq(irq);
BUG_ON(info->type != IRQT_PIRQ);
return info->u.pirq.flags & PIRQ_NEEDS_EOI;
@ -1768,7 +1775,7 @@ void xen_callback_vector(void) {}
void __init xen_init_IRQ(void)
{
int i;
int i, rc;
evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
GFP_KERNEL);
@ -1782,6 +1789,8 @@ void __init xen_init_IRQ(void)
for (i = 0; i < NR_EVENT_CHANNELS; i++)
mask_evtchn(i);
pirq_needs_eoi = pirq_needs_eoi_flag;
if (xen_hvm_domain()) {
xen_callback_vector();
native_init_IRQ();
@ -1789,8 +1798,19 @@ void __init xen_init_IRQ(void)
* __acpi_register_gsi can point at the right function */
pci_xen_hvm_init();
} else {
struct physdev_pirq_eoi_gmfn eoi_gmfn;
irq_ctx_init(smp_processor_id());
if (xen_initial_domain())
pci_xen_initial_domain();
pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map);
rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
if (rc != 0) {
free_page((unsigned long) pirq_eoi_map);
pirq_eoi_map = NULL;
} else
pirq_needs_eoi = pirq_check_eoi_map;
}
}

View File

@ -38,6 +38,27 @@ struct physdev_eoi {
uint32_t irq;
};
/*
* Register a shared page for the hypervisor to indicate whether the guest
* must issue PHYSDEVOP_eoi. The semantics of PHYSDEVOP_eoi change slightly
* once the guest used this function in that the associated event channel
* will automatically get unmasked. The page registered is used as a bit
* array indexed by Xen's PIRQ value.
*/
#define PHYSDEVOP_pirq_eoi_gmfn_v1 17
/*
* Register a shared page for the hypervisor to indicate whether the
* guest must issue PHYSDEVOP_eoi. This hypercall is very similar to
* PHYSDEVOP_pirq_eoi_gmfn_v1 but it doesn't change the semantics of
* PHYSDEVOP_eoi. The page registered is used as a bit array indexed by
* Xen's PIRQ value.
*/
#define PHYSDEVOP_pirq_eoi_gmfn_v2 28
struct physdev_pirq_eoi_gmfn {
/* IN */
unsigned long gmfn;
};
/*
* Query the status of an IRQ line.
* @arg == pointer to physdev_irq_status_query structure.