2021-06-07 09:46:24 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
/*
|
|
|
|
* Low-level idle sequences
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/cpu.h>
|
|
|
|
#include <linux/irqflags.h>
|
|
|
|
|
|
|
|
#include <asm/barrier.h>
|
2021-06-24 13:01:55 +00:00
|
|
|
#include <asm/cpuidle.h>
|
2021-06-07 09:46:24 +00:00
|
|
|
#include <asm/cpufeature.h>
|
|
|
|
#include <asm/sysreg.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* cpu_do_idle()
|
|
|
|
*
|
|
|
|
* Idle the processor (wait for interrupt).
|
|
|
|
*
|
|
|
|
* If the CPU supports priority masking we must do additional work to
|
|
|
|
* ensure that interrupts are not masked at the PMR (because the core will
|
|
|
|
* not wake up if we block the wake up signal in the interrupt controller).
|
|
|
|
*/
|
2023-09-06 16:02:57 +00:00
|
|
|
void __cpuidle cpu_do_idle(void)
|
2021-06-07 09:46:24 +00:00
|
|
|
{
|
2021-06-24 13:01:55 +00:00
|
|
|
struct arm_cpuidle_irq_context context;
|
|
|
|
|
|
|
|
arm_cpuidle_save_irq_context(&context);
|
|
|
|
|
|
|
|
dsb(sy);
|
|
|
|
wfi();
|
|
|
|
|
|
|
|
arm_cpuidle_restore_irq_context(&context);
|
2021-06-07 09:46:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is our default idle handler.
|
|
|
|
*/
|
2023-09-06 16:02:57 +00:00
|
|
|
void __cpuidle arch_cpu_idle(void)
|
2021-06-07 09:46:24 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* This should do all the clock switching and wait for interrupt
|
|
|
|
* tricks
|
|
|
|
*/
|
|
|
|
cpu_do_idle();
|
|
|
|
}
|