diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index ef8235c68c09..a2232d07be9d 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -83,14 +83,7 @@ static inline int reinstall_suspended_bps(struct pt_regs *regs) } #endif -#ifdef CONFIG_COMPAT int aarch32_break_handler(struct pt_regs *regs); -#else -static int aarch32_break_handler(struct pt_regs *regs) -{ - return -EFAULT; -} -#endif #endif /* __ASSEMBLY */ #endif /* __KERNEL__ */ diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h index a6e1750369ef..7a18fabbe0f6 100644 --- a/arch/arm64/include/asm/system_misc.h +++ b/arch/arm64/include/asm/system_misc.h @@ -23,6 +23,7 @@ #include #include #include +#include struct pt_regs; @@ -41,7 +42,7 @@ extern void show_pte(struct mm_struct *mm, unsigned long addr); extern void __show_regs(struct pt_regs *); void soft_restart(unsigned long); -extern void (*arm_pm_restart)(char str, const char *cmd); +extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); #define UDBG_UNDEFINED (1 << 0) #define UDBG_SYSCALL (1 << 1) diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 46f02c3b5015..1788bf6b471f 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -132,7 +132,7 @@ void machine_restart(char *cmd) /* Now call the architecture specific reboot code. */ if (arm_pm_restart) - arm_pm_restart('h', cmd); + arm_pm_restart(reboot_mode, cmd); /* * Whoops - the architecture was unable to reboot. diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 4a053b3d1728..fee5cce83450 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -199,13 +199,6 @@ asmlinkage void secondary_start_kernel(void) raw_spin_lock(&boot_lock); raw_spin_unlock(&boot_lock); - /* - * Enable local interrupts. - */ - notify_cpu_starting(cpu); - local_irq_enable(); - local_fiq_enable(); - /* * OK, now it's safe to let the boot CPU continue. Wait for * the CPU migration code to notice that the CPU is online @@ -214,6 +207,14 @@ asmlinkage void secondary_start_kernel(void) set_cpu_online(cpu, true); complete(&cpu_running); + /* + * Enable GIC and timers. + */ + notify_cpu_starting(cpu); + + local_irq_enable(); + local_fiq_enable(); + /* * OK, it's off to the idle thread for us */ diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 0ecac8980aae..6c8ba25bf6bb 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -152,25 +152,8 @@ void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs) #define ESR_CM (1 << 8) #define ESR_LNX_EXEC (1 << 24) -/* - * Check that the permissions on the VMA allow for the fault which occurred. - * If we encountered a write fault, we must have write permission, otherwise - * we allow any permission. - */ -static inline bool access_error(unsigned int esr, struct vm_area_struct *vma) -{ - unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; - - if (esr & ESR_WRITE) - mask = VM_WRITE; - if (esr & ESR_LNX_EXEC) - mask = VM_EXEC; - - return vma->vm_flags & mask ? false : true; -} - static int __do_page_fault(struct mm_struct *mm, unsigned long addr, - unsigned int esr, unsigned int flags, + unsigned int mm_flags, unsigned long vm_flags, struct task_struct *tsk) { struct vm_area_struct *vma; @@ -188,12 +171,17 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr, * it. */ good_area: - if (access_error(esr, vma)) { + /* + * Check that the permissions on the VMA allow for the fault which + * occurred. If we encountered a write or exec fault, we must have + * appropriate permissions, otherwise we allow any permission. + */ + if (!(vma->vm_flags & vm_flags)) { fault = VM_FAULT_BADACCESS; goto out; } - return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); + return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags); check_stack: if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) @@ -208,9 +196,15 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, struct task_struct *tsk; struct mm_struct *mm; int fault, sig, code; - bool write = (esr & ESR_WRITE) && !(esr & ESR_CM); - unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | - (write ? FAULT_FLAG_WRITE : 0); + unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; + unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + + if (esr & ESR_LNX_EXEC) { + vm_flags = VM_EXEC; + } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) { + vm_flags = VM_WRITE; + mm_flags |= FAULT_FLAG_WRITE; + } tsk = current; mm = tsk->mm; @@ -248,7 +242,7 @@ retry: #endif } - fault = __do_page_fault(mm, addr, esr, flags, tsk); + fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk); /* * If we need to retry but a fatal signal is pending, handle the @@ -265,7 +259,7 @@ retry: */ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); - if (flags & FAULT_FLAG_ALLOW_RETRY) { + if (mm_flags & FAULT_FLAG_ALLOW_RETRY) { if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, @@ -280,7 +274,7 @@ retry: * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of * starvation. */ - flags &= ~FAULT_FLAG_ALLOW_RETRY; + mm_flags &= ~FAULT_FLAG_ALLOW_RETRY; goto retry; } }