mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-30 08:02:30 +00:00
Merge branch 'fixes' of git://git.armlinux.org.uk/~rmk/linux-arm
Pull ARM fixes from Russell King: - Łukasz Stelmach spotted a couple of issues with the decompressor. - a couple of kdump fixes found while testing kdump - replace some perl with shell code - resolve SIGFPE breakage - kprobes fixes * 'fixes' of git://git.armlinux.org.uk/~rmk/linux-arm: ARM: fix kill( ,SIGFPE) breakage ARM: 8772/1: kprobes: Prohibit kprobes on get_user functions ARM: 8771/1: kprobes: Prohibit kprobes on do_undefinstr ARM: 8770/1: kprobes: Prohibit probing on optimized_callback ARM: 8769/1: kprobes: Fix to use get_kprobe_ctlblk after irq-disabed ARM: replace unnecessary perl with sed and the shell $(( )) operator ARM: kexec: record parent context registers for non-crash CPUs ARM: kexec: fix kdump register saving on panic() ARM: 8758/1: decompressor: restore r1 and r2 just before jumping to the kernel ARM: 8753/1: decompressor: add a missing parameter to the addruart macro
This commit is contained in:
commit
132ce5d43a
9 changed files with 64 additions and 44 deletions
|
@ -117,11 +117,9 @@ ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
|
|||
asflags-y := -DZIMAGE
|
||||
|
||||
# Supply kernel BSS size to the decompressor via a linker symbol.
|
||||
KBSS_SZ = $(shell $(CROSS_COMPILE)nm $(obj)/../../../../vmlinux | \
|
||||
perl -e 'while (<>) { \
|
||||
$$bss_start=hex($$1) if /^([[:xdigit:]]+) B __bss_start$$/; \
|
||||
$$bss_end=hex($$1) if /^([[:xdigit:]]+) B __bss_stop$$/; \
|
||||
}; printf "%d\n", $$bss_end - $$bss_start;')
|
||||
KBSS_SZ = $(shell echo $$(($$($(CROSS_COMPILE)nm $(obj)/../../../../vmlinux | \
|
||||
sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \
|
||||
-e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) )
|
||||
LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ)
|
||||
# Supply ZRELADDR to the decompressor via a linker symbol.
|
||||
ifneq ($(CONFIG_AUTO_ZRELADDR),y)
|
||||
|
|
|
@ -29,19 +29,19 @@
|
|||
#if defined(CONFIG_DEBUG_ICEDCC)
|
||||
|
||||
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
|
||||
.macro loadsp, rb, tmp
|
||||
.macro loadsp, rb, tmp1, tmp2
|
||||
.endm
|
||||
.macro writeb, ch, rb
|
||||
mcr p14, 0, \ch, c0, c5, 0
|
||||
.endm
|
||||
#elif defined(CONFIG_CPU_XSCALE)
|
||||
.macro loadsp, rb, tmp
|
||||
.macro loadsp, rb, tmp1, tmp2
|
||||
.endm
|
||||
.macro writeb, ch, rb
|
||||
mcr p14, 0, \ch, c8, c0, 0
|
||||
.endm
|
||||
#else
|
||||
.macro loadsp, rb, tmp
|
||||
.macro loadsp, rb, tmp1, tmp2
|
||||
.endm
|
||||
.macro writeb, ch, rb
|
||||
mcr p14, 0, \ch, c1, c0, 0
|
||||
|
@ -57,7 +57,7 @@
|
|||
.endm
|
||||
|
||||
#if defined(CONFIG_ARCH_SA1100)
|
||||
.macro loadsp, rb, tmp
|
||||
.macro loadsp, rb, tmp1, tmp2
|
||||
mov \rb, #0x80000000 @ physical base address
|
||||
#ifdef CONFIG_DEBUG_LL_SER3
|
||||
add \rb, \rb, #0x00050000 @ Ser3
|
||||
|
@ -66,8 +66,8 @@
|
|||
#endif
|
||||
.endm
|
||||
#else
|
||||
.macro loadsp, rb, tmp
|
||||
addruart \rb, \tmp
|
||||
.macro loadsp, rb, tmp1, tmp2
|
||||
addruart \rb, \tmp1, \tmp2
|
||||
.endm
|
||||
#endif
|
||||
#endif
|
||||
|
@ -561,8 +561,6 @@ not_relocated: mov r0, #0
|
|||
bl decompress_kernel
|
||||
bl cache_clean_flush
|
||||
bl cache_off
|
||||
mov r1, r7 @ restore architecture number
|
||||
mov r2, r8 @ restore atags pointer
|
||||
|
||||
#ifdef CONFIG_ARM_VIRT_EXT
|
||||
mrs r0, spsr @ Get saved CPU boot mode
|
||||
|
@ -1297,7 +1295,7 @@ phex: adr r3, phexbuf
|
|||
b 1b
|
||||
|
||||
@ puts corrupts {r0, r1, r2, r3}
|
||||
puts: loadsp r3, r1
|
||||
puts: loadsp r3, r2, r1
|
||||
1: ldrb r2, [r0], #1
|
||||
teq r2, #0
|
||||
moveq pc, lr
|
||||
|
@ -1314,8 +1312,8 @@ puts: loadsp r3, r1
|
|||
@ putc corrupts {r0, r1, r2, r3}
|
||||
putc:
|
||||
mov r2, r0
|
||||
loadsp r3, r1, r0
|
||||
mov r0, #0
|
||||
loadsp r3, r1
|
||||
b 2b
|
||||
|
||||
@ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
|
||||
|
@ -1365,6 +1363,8 @@ __hyp_reentry_vectors:
|
|||
|
||||
__enter_kernel:
|
||||
mov r0, #0 @ must be 0
|
||||
mov r1, r7 @ restore architecture number
|
||||
mov r2, r8 @ restore atags pointer
|
||||
ARM( mov pc, r4 ) @ call kernel
|
||||
M_CLASS( add r4, r4, #1 ) @ enter in Thumb mode for M class
|
||||
THUMB( bx r4 ) @ entry point is always ARM for A/R classes
|
||||
|
|
|
@ -536,4 +536,14 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
|
|||
#endif
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_KPROBES
|
||||
#define _ASM_NOKPROBE(entry) \
|
||||
.pushsection "_kprobe_blacklist", "aw" ; \
|
||||
.balign 4 ; \
|
||||
.long entry; \
|
||||
.popsection
|
||||
#else
|
||||
#define _ASM_NOKPROBE(entry)
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_ASSEMBLER_H__ */
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
#ifndef __ASM_SIGINFO_H
|
||||
#define __ASM_SIGINFO_H
|
||||
|
||||
#include <asm-generic/siginfo.h>
|
||||
|
||||
/*
|
||||
* SIGFPE si_codes
|
||||
*/
|
||||
#ifdef __KERNEL__
|
||||
#define FPE_FIXME 0 /* Broken dup of SI_USER */
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif
|
|
@ -83,7 +83,7 @@ void machine_crash_nonpanic_core(void *unused)
|
|||
{
|
||||
struct pt_regs regs;
|
||||
|
||||
crash_setup_regs(®s, NULL);
|
||||
crash_setup_regs(®s, get_irq_regs());
|
||||
printk(KERN_DEBUG "CPU %u will stop doing anything useful since another CPU has crashed\n",
|
||||
smp_processor_id());
|
||||
crash_save_cpu(®s, smp_processor_id());
|
||||
|
@ -95,6 +95,27 @@ void machine_crash_nonpanic_core(void *unused)
|
|||
cpu_relax();
|
||||
}
|
||||
|
||||
void crash_smp_send_stop(void)
|
||||
{
|
||||
static int cpus_stopped;
|
||||
unsigned long msecs;
|
||||
|
||||
if (cpus_stopped)
|
||||
return;
|
||||
|
||||
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
|
||||
smp_call_function(machine_crash_nonpanic_core, NULL, false);
|
||||
msecs = 1000; /* Wait at most a second for the other cpus to stop */
|
||||
while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
|
||||
mdelay(1);
|
||||
msecs--;
|
||||
}
|
||||
if (atomic_read(&waiting_for_crash_ipi) > 0)
|
||||
pr_warn("Non-crashing CPUs did not react to IPI\n");
|
||||
|
||||
cpus_stopped = 1;
|
||||
}
|
||||
|
||||
static void machine_kexec_mask_interrupts(void)
|
||||
{
|
||||
unsigned int i;
|
||||
|
@ -120,19 +141,8 @@ static void machine_kexec_mask_interrupts(void)
|
|||
|
||||
void machine_crash_shutdown(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long msecs;
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
|
||||
smp_call_function(machine_crash_nonpanic_core, NULL, false);
|
||||
msecs = 1000; /* Wait at most a second for the other cpus to stop */
|
||||
while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
|
||||
mdelay(1);
|
||||
msecs--;
|
||||
}
|
||||
if (atomic_read(&waiting_for_crash_ipi) > 0)
|
||||
pr_warn("Non-crashing CPUs did not react to IPI\n");
|
||||
crash_smp_send_stop();
|
||||
|
||||
crash_save_cpu(regs, smp_processor_id());
|
||||
machine_kexec_mask_interrupts();
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/uaccess.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/bug.h>
|
||||
|
@ -417,7 +418,8 @@ void unregister_undef_hook(struct undef_hook *hook)
|
|||
raw_spin_unlock_irqrestore(&undef_lock, flags);
|
||||
}
|
||||
|
||||
static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
|
||||
static nokprobe_inline
|
||||
int call_undef_hook(struct pt_regs *regs, unsigned int instr)
|
||||
{
|
||||
struct undef_hook *hook;
|
||||
unsigned long flags;
|
||||
|
@ -490,6 +492,7 @@ asmlinkage void do_undefinstr(struct pt_regs *regs)
|
|||
|
||||
arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
|
||||
}
|
||||
NOKPROBE_SYMBOL(do_undefinstr)
|
||||
|
||||
/*
|
||||
* Handle FIQ similarly to NMI on x86 systems.
|
||||
|
|
|
@ -38,6 +38,7 @@ ENTRY(__get_user_1)
|
|||
mov r0, #0
|
||||
ret lr
|
||||
ENDPROC(__get_user_1)
|
||||
_ASM_NOKPROBE(__get_user_1)
|
||||
|
||||
ENTRY(__get_user_2)
|
||||
check_uaccess r0, 2, r1, r2, __get_user_bad
|
||||
|
@ -58,6 +59,7 @@ rb .req r0
|
|||
mov r0, #0
|
||||
ret lr
|
||||
ENDPROC(__get_user_2)
|
||||
_ASM_NOKPROBE(__get_user_2)
|
||||
|
||||
ENTRY(__get_user_4)
|
||||
check_uaccess r0, 4, r1, r2, __get_user_bad
|
||||
|
@ -65,6 +67,7 @@ ENTRY(__get_user_4)
|
|||
mov r0, #0
|
||||
ret lr
|
||||
ENDPROC(__get_user_4)
|
||||
_ASM_NOKPROBE(__get_user_4)
|
||||
|
||||
ENTRY(__get_user_8)
|
||||
check_uaccess r0, 8, r1, r2, __get_user_bad8
|
||||
|
@ -78,6 +81,7 @@ ENTRY(__get_user_8)
|
|||
mov r0, #0
|
||||
ret lr
|
||||
ENDPROC(__get_user_8)
|
||||
_ASM_NOKPROBE(__get_user_8)
|
||||
|
||||
#ifdef __ARMEB__
|
||||
ENTRY(__get_user_32t_8)
|
||||
|
@ -91,6 +95,7 @@ ENTRY(__get_user_32t_8)
|
|||
mov r0, #0
|
||||
ret lr
|
||||
ENDPROC(__get_user_32t_8)
|
||||
_ASM_NOKPROBE(__get_user_32t_8)
|
||||
|
||||
ENTRY(__get_user_64t_1)
|
||||
check_uaccess r0, 1, r1, r2, __get_user_bad8
|
||||
|
@ -98,6 +103,7 @@ ENTRY(__get_user_64t_1)
|
|||
mov r0, #0
|
||||
ret lr
|
||||
ENDPROC(__get_user_64t_1)
|
||||
_ASM_NOKPROBE(__get_user_64t_1)
|
||||
|
||||
ENTRY(__get_user_64t_2)
|
||||
check_uaccess r0, 2, r1, r2, __get_user_bad8
|
||||
|
@ -114,6 +120,7 @@ rb .req r0
|
|||
mov r0, #0
|
||||
ret lr
|
||||
ENDPROC(__get_user_64t_2)
|
||||
_ASM_NOKPROBE(__get_user_64t_2)
|
||||
|
||||
ENTRY(__get_user_64t_4)
|
||||
check_uaccess r0, 4, r1, r2, __get_user_bad8
|
||||
|
@ -121,6 +128,7 @@ ENTRY(__get_user_64t_4)
|
|||
mov r0, #0
|
||||
ret lr
|
||||
ENDPROC(__get_user_64t_4)
|
||||
_ASM_NOKPROBE(__get_user_64t_4)
|
||||
#endif
|
||||
|
||||
__get_user_bad8:
|
||||
|
@ -131,6 +139,8 @@ __get_user_bad:
|
|||
ret lr
|
||||
ENDPROC(__get_user_bad)
|
||||
ENDPROC(__get_user_bad8)
|
||||
_ASM_NOKPROBE(__get_user_bad)
|
||||
_ASM_NOKPROBE(__get_user_bad8)
|
||||
|
||||
.pushsection __ex_table, "a"
|
||||
.long 1b, __get_user_bad
|
||||
|
|
|
@ -165,13 +165,14 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
|
|||
{
|
||||
unsigned long flags;
|
||||
struct kprobe *p = &op->kp;
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
struct kprobe_ctlblk *kcb;
|
||||
|
||||
/* Save skipped registers */
|
||||
regs->ARM_pc = (unsigned long)op->kp.addr;
|
||||
regs->ARM_ORIG_r0 = ~0UL;
|
||||
|
||||
local_irq_save(flags);
|
||||
kcb = get_kprobe_ctlblk();
|
||||
|
||||
if (kprobe_running()) {
|
||||
kprobes_inc_nmissed_count(&op->kp);
|
||||
|
@ -191,6 +192,7 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
|
|||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
NOKPROBE_SYMBOL(optimized_callback)
|
||||
|
||||
int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig)
|
||||
{
|
||||
|
|
|
@ -257,7 +257,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
|
|||
|
||||
if (exceptions == VFP_EXCEPTION_ERROR) {
|
||||
vfp_panic("unhandled bounce", inst);
|
||||
vfp_raise_sigfpe(FPE_FIXME, regs);
|
||||
vfp_raise_sigfpe(FPE_FLTINV, regs);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue