mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 00:48:50 +00:00
29c395c77a
The irq stack switching was moved out of the ASM entry code in course of the entry code consolidation. It ended up being suboptimal in various ways. - Make the stack switching inline so the stackpointer manipulation is not longer at an easy to find place. - Get rid of the unnecessary indirect call. - Avoid the double stack switching in interrupt return and reuse the interrupt stack for softirq handling. - A objtool fix for CONFIG_FRAME_POINTER=y builds where it got confused about the stack pointer manipulation. -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAmA21OcTHHRnbHhAbGlu dXRyb25peC5kZQAKCRCmGPVMDXSYoaX0D/9S0ud6oqbsIvI8LwhvYub63a2cjKP9 liHAJ7xwMYYVwzf0skwsPb/QE6+onCzdq0upJkgG/gEYm2KbiaMWZ4GgHdj0O7ER qXKJONDd36AGxSEdaVzLY5kPuD/mkomGk5QdaZaTmjruthkNzg4y/N2wXUBIMZR0 FdpSpp5fGspSZCn/DXDx6FjClwpLI53VclvDs6DcZ2DIBA0K+F/cSLb1UQoDLE1U hxGeuNa+GhKeeZ5C+q5giho1+ukbwtjMW9WnKHAVNiStjm0uzdqq7ERGi/REvkcB LY62u5uOSW1zIBMmzUjDDQEqvypB0iFxFCpN8g9sieZjA0zkaUioRTQyR+YIQ8Cp l8LLir0dVQivR1bHghHDKQJUpdw/4zvDj4mMH10XHqbcOtIxJDOJHC5D00ridsAz OK0RlbAJBl9FTdLNfdVReBCoehYAO8oefeyMAG12nZeSh5XVUWl238rvzmzIYNhG cEtkSx2wIUNEA+uSuI+xvfmwpxL7voTGvqmiRDCAFxyO7Bl/GBu9OEBFA1eOvHB+ +wTmPDMswRetQNh4QCRXzk1JzP1Wk5CobUL9iinCWFoTJmnsPPSOWlosN6ewaNXt kYFpRLy5xt9EP7dlfgBSjiRlthDhTdMrFjD5bsy1vdm1w7HKUo82lHa4O8Hq3PHS tinKICUqRsbjig== =Sqr1 -----END PGP SIGNATURE----- Merge tag 'x86-entry-2021-02-24' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 irq entry updates from Thomas Gleixner: "The irq stack switching was moved out of the ASM entry code in course of the entry code consolidation. It ended up being suboptimal in various ways. This reworks the X86 irq stack handling: - Make the stack switching inline so the stackpointer manipulation is not longer at an easy to find place. - Get rid of the unnecessary indirect call. - Avoid the double stack switching in interrupt return and reuse the interrupt stack for softirq handling. - A objtool fix for CONFIG_FRAME_POINTER=y builds where it got confused about the stack pointer manipulation" * tag 'x86-entry-2021-02-24' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: objtool: Fix stack-swizzle for FRAME_POINTER=y um: Enforce the usage of asm-generic/softirq_stack.h x86/softirq/64: Inline do_softirq_own_stack() softirq: Move do_softirq_own_stack() to generic asm header softirq: Move __ARCH_HAS_DO_SOFTIRQ to Kconfig x86: Select CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK x86/softirq: Remove indirection in do_softirq_own_stack() x86/entry: Use run_sysvec_on_irqstack_cond() for XEN upcall x86/entry: Convert device interrupts to inline stack switching x86/entry: Convert system vectors to irq stack macro x86/irq: Provide macro for inlining irq stack switching x86/apic: Split out spurious handling code x86/irq/64: Adjust the per CPU irq stack pointer by 8 x86/irq: Sanitize irq stack tracking x86/entry: Fix instrumentation annotation
397 lines
11 KiB
C
397 lines
11 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Common interrupt code for 32 and 64 bit
|
|
*/
|
|
#include <linux/cpu.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/kernel_stat.h>
|
|
#include <linux/of.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/ftrace.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/export.h>
|
|
#include <linux/irq.h>
|
|
|
|
#include <asm/irq_stack.h>
|
|
#include <asm/apic.h>
|
|
#include <asm/io_apic.h>
|
|
#include <asm/irq.h>
|
|
#include <asm/mce.h>
|
|
#include <asm/hw_irq.h>
|
|
#include <asm/desc.h>
|
|
#include <asm/traps.h>
|
|
#include <asm/thermal.h>
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
#include <asm/trace/irq_vectors.h>
|
|
|
|
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
|
|
EXPORT_PER_CPU_SYMBOL(irq_stat);
|
|
|
|
atomic_t irq_err_count;
|
|
|
|
/*
|
|
* 'what should we do if we get a hw irq event on an illegal vector'.
|
|
* each architecture has to answer this themselves.
|
|
*/
|
|
void ack_bad_irq(unsigned int irq)
|
|
{
|
|
if (printk_ratelimit())
|
|
pr_err("unexpected IRQ trap at vector %02x\n", irq);
|
|
|
|
/*
|
|
* Currently unexpected vectors happen only on SMP and APIC.
|
|
* We _must_ ack these because every local APIC has only N
|
|
* irq slots per priority level, and a 'hanging, unacked' IRQ
|
|
* holds up an irq slot - in excessive cases (when multiple
|
|
* unexpected vectors occur) that might lock up the APIC
|
|
* completely.
|
|
* But only ack when the APIC is enabled -AK
|
|
*/
|
|
ack_APIC_irq();
|
|
}
|
|
|
|
#define irq_stats(x) (&per_cpu(irq_stat, x))
|
|
/*
|
|
* /proc/interrupts printing for arch specific interrupts
|
|
*/
|
|
int arch_show_interrupts(struct seq_file *p, int prec)
|
|
{
|
|
int j;
|
|
|
|
seq_printf(p, "%*s: ", prec, "NMI");
|
|
for_each_online_cpu(j)
|
|
seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
|
|
seq_puts(p, " Non-maskable interrupts\n");
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
seq_printf(p, "%*s: ", prec, "LOC");
|
|
for_each_online_cpu(j)
|
|
seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
|
|
seq_puts(p, " Local timer interrupts\n");
|
|
|
|
seq_printf(p, "%*s: ", prec, "SPU");
|
|
for_each_online_cpu(j)
|
|
seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
|
|
seq_puts(p, " Spurious interrupts\n");
|
|
seq_printf(p, "%*s: ", prec, "PMI");
|
|
for_each_online_cpu(j)
|
|
seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
|
|
seq_puts(p, " Performance monitoring interrupts\n");
|
|
seq_printf(p, "%*s: ", prec, "IWI");
|
|
for_each_online_cpu(j)
|
|
seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
|
|
seq_puts(p, " IRQ work interrupts\n");
|
|
seq_printf(p, "%*s: ", prec, "RTR");
|
|
for_each_online_cpu(j)
|
|
seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
|
|
seq_puts(p, " APIC ICR read retries\n");
|
|
if (x86_platform_ipi_callback) {
|
|
seq_printf(p, "%*s: ", prec, "PLT");
|
|
for_each_online_cpu(j)
|
|
seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
|
|
seq_puts(p, " Platform interrupts\n");
|
|
}
|
|
#endif
|
|
#ifdef CONFIG_SMP
|
|
seq_printf(p, "%*s: ", prec, "RES");
|
|
for_each_online_cpu(j)
|
|
seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
|
|
seq_puts(p, " Rescheduling interrupts\n");
|
|
seq_printf(p, "%*s: ", prec, "CAL");
|
|
for_each_online_cpu(j)
|
|
seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
|
|
seq_puts(p, " Function call interrupts\n");
|
|
seq_printf(p, "%*s: ", prec, "TLB");
|
|
for_each_online_cpu(j)
|
|
seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
|
|
seq_puts(p, " TLB shootdowns\n");
|
|
#endif
|
|
#ifdef CONFIG_X86_THERMAL_VECTOR
|
|
seq_printf(p, "%*s: ", prec, "TRM");
|
|
for_each_online_cpu(j)
|
|
seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
|
|
seq_puts(p, " Thermal event interrupts\n");
|
|
#endif
|
|
#ifdef CONFIG_X86_MCE_THRESHOLD
|
|
seq_printf(p, "%*s: ", prec, "THR");
|
|
for_each_online_cpu(j)
|
|
seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
|
|
seq_puts(p, " Threshold APIC interrupts\n");
|
|
#endif
|
|
#ifdef CONFIG_X86_MCE_AMD
|
|
seq_printf(p, "%*s: ", prec, "DFR");
|
|
for_each_online_cpu(j)
|
|
seq_printf(p, "%10u ", irq_stats(j)->irq_deferred_error_count);
|
|
seq_puts(p, " Deferred Error APIC interrupts\n");
|
|
#endif
|
|
#ifdef CONFIG_X86_MCE
|
|
seq_printf(p, "%*s: ", prec, "MCE");
|
|
for_each_online_cpu(j)
|
|
seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
|
|
seq_puts(p, " Machine check exceptions\n");
|
|
seq_printf(p, "%*s: ", prec, "MCP");
|
|
for_each_online_cpu(j)
|
|
seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
|
|
seq_puts(p, " Machine check polls\n");
|
|
#endif
|
|
#ifdef CONFIG_X86_HV_CALLBACK_VECTOR
|
|
if (test_bit(HYPERVISOR_CALLBACK_VECTOR, system_vectors)) {
|
|
seq_printf(p, "%*s: ", prec, "HYP");
|
|
for_each_online_cpu(j)
|
|
seq_printf(p, "%10u ",
|
|
irq_stats(j)->irq_hv_callback_count);
|
|
seq_puts(p, " Hypervisor callback interrupts\n");
|
|
}
|
|
#endif
|
|
#if IS_ENABLED(CONFIG_HYPERV)
|
|
if (test_bit(HYPERV_REENLIGHTENMENT_VECTOR, system_vectors)) {
|
|
seq_printf(p, "%*s: ", prec, "HRE");
|
|
for_each_online_cpu(j)
|
|
seq_printf(p, "%10u ",
|
|
irq_stats(j)->irq_hv_reenlightenment_count);
|
|
seq_puts(p, " Hyper-V reenlightenment interrupts\n");
|
|
}
|
|
if (test_bit(HYPERV_STIMER0_VECTOR, system_vectors)) {
|
|
seq_printf(p, "%*s: ", prec, "HVS");
|
|
for_each_online_cpu(j)
|
|
seq_printf(p, "%10u ",
|
|
irq_stats(j)->hyperv_stimer0_count);
|
|
seq_puts(p, " Hyper-V stimer0 interrupts\n");
|
|
}
|
|
#endif
|
|
seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
|
|
#if defined(CONFIG_X86_IO_APIC)
|
|
seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
|
|
#endif
|
|
#ifdef CONFIG_HAVE_KVM
|
|
seq_printf(p, "%*s: ", prec, "PIN");
|
|
for_each_online_cpu(j)
|
|
seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis);
|
|
seq_puts(p, " Posted-interrupt notification event\n");
|
|
|
|
seq_printf(p, "%*s: ", prec, "NPI");
|
|
for_each_online_cpu(j)
|
|
seq_printf(p, "%10u ",
|
|
irq_stats(j)->kvm_posted_intr_nested_ipis);
|
|
seq_puts(p, " Nested posted-interrupt event\n");
|
|
|
|
seq_printf(p, "%*s: ", prec, "PIW");
|
|
for_each_online_cpu(j)
|
|
seq_printf(p, "%10u ",
|
|
irq_stats(j)->kvm_posted_intr_wakeup_ipis);
|
|
seq_puts(p, " Posted-interrupt wakeup event\n");
|
|
#endif
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* /proc/stat helpers
|
|
*/
|
|
u64 arch_irq_stat_cpu(unsigned int cpu)
|
|
{
|
|
u64 sum = irq_stats(cpu)->__nmi_count;
|
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
sum += irq_stats(cpu)->apic_timer_irqs;
|
|
sum += irq_stats(cpu)->irq_spurious_count;
|
|
sum += irq_stats(cpu)->apic_perf_irqs;
|
|
sum += irq_stats(cpu)->apic_irq_work_irqs;
|
|
sum += irq_stats(cpu)->icr_read_retry_count;
|
|
if (x86_platform_ipi_callback)
|
|
sum += irq_stats(cpu)->x86_platform_ipis;
|
|
#endif
|
|
#ifdef CONFIG_SMP
|
|
sum += irq_stats(cpu)->irq_resched_count;
|
|
sum += irq_stats(cpu)->irq_call_count;
|
|
#endif
|
|
#ifdef CONFIG_X86_THERMAL_VECTOR
|
|
sum += irq_stats(cpu)->irq_thermal_count;
|
|
#endif
|
|
#ifdef CONFIG_X86_MCE_THRESHOLD
|
|
sum += irq_stats(cpu)->irq_threshold_count;
|
|
#endif
|
|
#ifdef CONFIG_X86_MCE
|
|
sum += per_cpu(mce_exception_count, cpu);
|
|
sum += per_cpu(mce_poll_count, cpu);
|
|
#endif
|
|
return sum;
|
|
}
|
|
|
|
u64 arch_irq_stat(void)
|
|
{
|
|
u64 sum = atomic_read(&irq_err_count);
|
|
return sum;
|
|
}
|
|
|
|
static __always_inline void handle_irq(struct irq_desc *desc,
|
|
struct pt_regs *regs)
|
|
{
|
|
if (IS_ENABLED(CONFIG_X86_64))
|
|
generic_handle_irq_desc(desc);
|
|
else
|
|
__handle_irq(desc, regs);
|
|
}
|
|
|
|
/*
|
|
* common_interrupt() handles all normal device IRQ's (the special SMP
|
|
* cross-CPU interrupts have their own entry points).
|
|
*/
|
|
DEFINE_IDTENTRY_IRQ(common_interrupt)
|
|
{
|
|
struct pt_regs *old_regs = set_irq_regs(regs);
|
|
struct irq_desc *desc;
|
|
|
|
/* entry code tells RCU that we're not quiescent. Check it. */
|
|
RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
|
|
|
|
desc = __this_cpu_read(vector_irq[vector]);
|
|
if (likely(!IS_ERR_OR_NULL(desc))) {
|
|
handle_irq(desc, regs);
|
|
} else {
|
|
ack_APIC_irq();
|
|
|
|
if (desc == VECTOR_UNUSED) {
|
|
pr_emerg_ratelimited("%s: %d.%u No irq handler for vector\n",
|
|
__func__, smp_processor_id(),
|
|
vector);
|
|
} else {
|
|
__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
|
|
}
|
|
}
|
|
|
|
set_irq_regs(old_regs);
|
|
}
|
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
/* Function pointer for generic interrupt vector handling */
|
|
void (*x86_platform_ipi_callback)(void) = NULL;
|
|
/*
|
|
* Handler for X86_PLATFORM_IPI_VECTOR.
|
|
*/
|
|
DEFINE_IDTENTRY_SYSVEC(sysvec_x86_platform_ipi)
|
|
{
|
|
struct pt_regs *old_regs = set_irq_regs(regs);
|
|
|
|
ack_APIC_irq();
|
|
trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
|
|
inc_irq_stat(x86_platform_ipis);
|
|
if (x86_platform_ipi_callback)
|
|
x86_platform_ipi_callback();
|
|
trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
|
|
set_irq_regs(old_regs);
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_HAVE_KVM
|
|
static void dummy_handler(void) {}
|
|
static void (*kvm_posted_intr_wakeup_handler)(void) = dummy_handler;
|
|
|
|
void kvm_set_posted_intr_wakeup_handler(void (*handler)(void))
|
|
{
|
|
if (handler)
|
|
kvm_posted_intr_wakeup_handler = handler;
|
|
else
|
|
kvm_posted_intr_wakeup_handler = dummy_handler;
|
|
}
|
|
EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wakeup_handler);
|
|
|
|
/*
|
|
* Handler for POSTED_INTERRUPT_VECTOR.
|
|
*/
|
|
DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_ipi)
|
|
{
|
|
ack_APIC_irq();
|
|
inc_irq_stat(kvm_posted_intr_ipis);
|
|
}
|
|
|
|
/*
|
|
* Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
|
|
*/
|
|
DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_posted_intr_wakeup_ipi)
|
|
{
|
|
ack_APIC_irq();
|
|
inc_irq_stat(kvm_posted_intr_wakeup_ipis);
|
|
kvm_posted_intr_wakeup_handler();
|
|
}
|
|
|
|
/*
|
|
* Handler for POSTED_INTERRUPT_NESTED_VECTOR.
|
|
*/
|
|
DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_nested_ipi)
|
|
{
|
|
ack_APIC_irq();
|
|
inc_irq_stat(kvm_posted_intr_nested_ipis);
|
|
}
|
|
#endif
|
|
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
|
|
void fixup_irqs(void)
|
|
{
|
|
unsigned int irr, vector;
|
|
struct irq_desc *desc;
|
|
struct irq_data *data;
|
|
struct irq_chip *chip;
|
|
|
|
irq_migrate_all_off_this_cpu();
|
|
|
|
/*
|
|
* We can remove mdelay() and then send spuriuous interrupts to
|
|
* new cpu targets for all the irqs that were handled previously by
|
|
* this cpu. While it works, I have seen spurious interrupt messages
|
|
* (nothing wrong but still...).
|
|
*
|
|
* So for now, retain mdelay(1) and check the IRR and then send those
|
|
* interrupts to new targets as this cpu is already offlined...
|
|
*/
|
|
mdelay(1);
|
|
|
|
/*
|
|
* We can walk the vector array of this cpu without holding
|
|
* vector_lock because the cpu is already marked !online, so
|
|
* nothing else will touch it.
|
|
*/
|
|
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
|
|
if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector])))
|
|
continue;
|
|
|
|
irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
|
|
if (irr & (1 << (vector % 32))) {
|
|
desc = __this_cpu_read(vector_irq[vector]);
|
|
|
|
raw_spin_lock(&desc->lock);
|
|
data = irq_desc_get_irq_data(desc);
|
|
chip = irq_data_get_irq_chip(data);
|
|
if (chip->irq_retrigger) {
|
|
chip->irq_retrigger(data);
|
|
__this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);
|
|
}
|
|
raw_spin_unlock(&desc->lock);
|
|
}
|
|
if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED)
|
|
__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
|
|
}
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_X86_THERMAL_VECTOR
|
|
static void smp_thermal_vector(void)
|
|
{
|
|
if (x86_thermal_enabled())
|
|
intel_thermal_interrupt();
|
|
else
|
|
pr_err("CPU%d: Unexpected LVT thermal interrupt!\n",
|
|
smp_processor_id());
|
|
}
|
|
|
|
DEFINE_IDTENTRY_SYSVEC(sysvec_thermal)
|
|
{
|
|
trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
|
|
inc_irq_stat(irq_thermal_count);
|
|
smp_thermal_vector();
|
|
trace_thermal_apic_exit(THERMAL_APIC_VECTOR);
|
|
ack_APIC_irq();
|
|
}
|
|
#endif
|