powerpc: Use user_mode() macro when possible
There is a nice macro to check user mode. Use it instead of open coding anding with MSR_PR to increase readability and avoid having to comment what that anding is for. Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://msgid.link/fbf74887dcf1f1ba9e1680fc3247cbb581b00662.1708078228.git.christophe.leroy@csgroup.eu
This commit is contained in:
parent
9e00743aba
commit
d5835fb60b
|
@ -97,7 +97,7 @@ DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
|
|||
|
||||
static inline bool is_implicit_soft_masked(struct pt_regs *regs)
|
||||
{
|
||||
if (regs->msr & MSR_PR)
|
||||
if (user_mode(regs))
|
||||
return false;
|
||||
|
||||
if (regs->nip >= (unsigned long)__end_soft_masked)
|
||||
|
|
|
@ -31,7 +31,7 @@ notrace long system_call_exception(struct pt_regs *regs, unsigned long r0)
|
|||
user_exit_irqoff();
|
||||
|
||||
BUG_ON(regs_is_unrecoverable(regs));
|
||||
BUG_ON(!(regs->msr & MSR_PR));
|
||||
BUG_ON(!user_mode(regs));
|
||||
BUG_ON(arch_irq_disabled_regs(regs));
|
||||
|
||||
#ifdef CONFIG_PPC_PKEY
|
||||
|
|
|
@ -404,7 +404,7 @@ noinstr void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
|
|||
return;
|
||||
if (!(regs->msr & MSR_HV))
|
||||
return;
|
||||
if (regs->msr & MSR_PR)
|
||||
if (user_mode(regs))
|
||||
return;
|
||||
|
||||
/*
|
||||
|
@ -1510,7 +1510,7 @@ static void do_program_check(struct pt_regs *regs)
|
|||
if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR))
|
||||
bugaddr += PAGE_OFFSET;
|
||||
|
||||
if (!(regs->msr & MSR_PR) && /* not user-mode */
|
||||
if (!user_mode(regs) &&
|
||||
report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
|
||||
regs_add_return_ip(regs, 4);
|
||||
return;
|
||||
|
|
|
@ -1429,7 +1429,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
|
|||
return 1;
|
||||
|
||||
case 18: /* rfid, scary */
|
||||
if (regs->msr & MSR_PR)
|
||||
if (user_mode(regs))
|
||||
goto priv;
|
||||
op->type = RFI;
|
||||
return 0;
|
||||
|
@ -1742,13 +1742,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
|
|||
return 1;
|
||||
#endif
|
||||
case 83: /* mfmsr */
|
||||
if (regs->msr & MSR_PR)
|
||||
if (user_mode(regs))
|
||||
goto priv;
|
||||
op->type = MFMSR;
|
||||
op->reg = rd;
|
||||
return 0;
|
||||
case 146: /* mtmsr */
|
||||
if (regs->msr & MSR_PR)
|
||||
if (user_mode(regs))
|
||||
goto priv;
|
||||
op->type = MTMSR;
|
||||
op->reg = rd;
|
||||
|
@ -1756,7 +1756,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
|
|||
return 0;
|
||||
#ifdef CONFIG_PPC64
|
||||
case 178: /* mtmsrd */
|
||||
if (regs->msr & MSR_PR)
|
||||
if (user_mode(regs))
|
||||
goto priv;
|
||||
op->type = MTMSR;
|
||||
op->reg = rd;
|
||||
|
@ -3437,14 +3437,14 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
|
|||
* stored in the thread_struct. If the instruction is in
|
||||
* the kernel, we must not touch the state in the thread_struct.
|
||||
*/
|
||||
if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
|
||||
if (!user_mode(regs) && !(regs->msr & MSR_FP))
|
||||
return 0;
|
||||
err = do_fp_load(op, ea, regs, cross_endian);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
case LOAD_VMX:
|
||||
if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
|
||||
if (!user_mode(regs) && !(regs->msr & MSR_VEC))
|
||||
return 0;
|
||||
err = do_vec_load(op->reg, ea, size, regs, cross_endian);
|
||||
break;
|
||||
|
@ -3459,7 +3459,7 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
|
|||
*/
|
||||
if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
|
||||
msrbit = MSR_VEC;
|
||||
if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
|
||||
if (!user_mode(regs) && !(regs->msr & msrbit))
|
||||
return 0;
|
||||
err = do_vsx_load(op, ea, regs, cross_endian);
|
||||
break;
|
||||
|
@ -3495,8 +3495,7 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
|
|||
}
|
||||
#endif
|
||||
if ((op->type & UPDATE) && size == sizeof(long) &&
|
||||
op->reg == 1 && op->update_reg == 1 &&
|
||||
!(regs->msr & MSR_PR) &&
|
||||
op->reg == 1 && op->update_reg == 1 && !user_mode(regs) &&
|
||||
ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
|
||||
err = handle_stack_update(ea, regs);
|
||||
break;
|
||||
|
@ -3508,14 +3507,14 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
|
|||
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
case STORE_FP:
|
||||
if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
|
||||
if (!user_mode(regs) && !(regs->msr & MSR_FP))
|
||||
return 0;
|
||||
err = do_fp_store(op, ea, regs, cross_endian);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
case STORE_VMX:
|
||||
if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
|
||||
if (!user_mode(regs) && !(regs->msr & MSR_VEC))
|
||||
return 0;
|
||||
err = do_vec_store(op->reg, ea, size, regs, cross_endian);
|
||||
break;
|
||||
|
@ -3530,7 +3529,7 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
|
|||
*/
|
||||
if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
|
||||
msrbit = MSR_VEC;
|
||||
if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
|
||||
if (!user_mode(regs) && !(regs->msr & msrbit))
|
||||
return 0;
|
||||
err = do_vsx_store(op, ea, regs, cross_endian);
|
||||
break;
|
||||
|
|
|
@ -256,7 +256,7 @@ static bool regs_sipr(struct pt_regs *regs)
|
|||
|
||||
static inline u32 perf_flags_from_msr(struct pt_regs *regs)
|
||||
{
|
||||
if (regs->msr & MSR_PR)
|
||||
if (user_mode(regs))
|
||||
return PERF_RECORD_MISC_USER;
|
||||
if ((regs->msr & MSR_HV) && freeze_events_kernel != MMCR0_FCHV)
|
||||
return PERF_RECORD_MISC_HYPERVISOR;
|
||||
|
|
|
@ -1818,8 +1818,8 @@ static void print_bug_trap(struct pt_regs *regs)
|
|||
const struct bug_entry *bug;
|
||||
unsigned long addr;
|
||||
|
||||
if (regs->msr & MSR_PR)
|
||||
return; /* not in kernel */
|
||||
if (user_mode(regs))
|
||||
return;
|
||||
addr = regs->nip; /* address of trap instruction */
|
||||
if (!is_kernel_addr(addr))
|
||||
return;
|
||||
|
|
Loading…
Reference in New Issue