linux-stable/arch/arm/mach-omap2/common.h
Linus Torvalds 1f2d9ffc7a Scheduler updates in this cycle are:
- Improve the scalability of the CFS bandwidth unthrottling logic
    with large number of CPUs.
 
  - Fix & rework various cpuidle routines, simplify interaction with
    the generic scheduler code. Add __cpuidle methods as noinstr to
    objtool's noinstr detection and fix boatloads of cpuidle bugs & quirks.
 
  - Add new ABI: introduce MEMBARRIER_CMD_GET_REGISTRATIONS,
    to query previously issued registrations.
 
  - Limit scheduler slice duration to the sysctl_sched_latency period,
    to improve scheduling granularity with a large number of SCHED_IDLE
    tasks.
 
  - Debuggability enhancement on sys_exit(): warn about disabled IRQs,
    but also enable them to prevent a cascade of followup problems and
    repeat warnings.
 
  - Fix the rescheduling logic in prio_changed_dl().
 
  - Micro-optimize cpufreq and sched-util methods.
 
  - Micro-optimize ttwu_runnable()
 
  - Micro-optimize the idle-scanning in update_numa_stats(),
    select_idle_capacity() and steal_cookie_task().
 
  - Update the RSEQ code & self-tests
 
  - Constify various scheduler methods
 
  - Remove unused methods
 
  - Refine __init tags
 
  - Documentation updates
 
  - ... Misc other cleanups, fixes
 
 Signed-off-by: Ingo Molnar <mingo@kernel.org>
 -----BEGIN PGP SIGNATURE-----
 
 iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmPzbJwRHG1pbmdvQGtl
 cm5lbC5vcmcACgkQEnMQ0APhK1iIvA//ZcEaB8Z6ChLRQjM+bsaudKJu3pdLQbPK
 iYbP8Da+LsAfxbEfYuGV3m+jIp0LlBOtsI/EezxQrXV+V7FvNyAX9Y00eEu/zlj8
 7Jn3LMy/DBYTwH7LwVdcU0MyIVI8ZPc6WNnkx0LOtGZn8n+qfHPSDzcP3CW+a5AV
 UvllPYpYyEmsX0Eby7CF4Ue8mSmbViw/xR3rNr8ZSve0c25XzKabw8O9kE3jiHxP
 d/zERJoAYeDyYUEuZqhfn5dTlB4an4IjNEkAfRE5SQ09RA8Gkxsa5Ar8gob9e9M1
 eQsdd4/bdhnrkM8L5qDZczqmgCTZ2bukQrxkBXhRDhLgoFxwAn77b+2ZjmIW3Lae
 AyGqRcDSg1q2oxaYm5ZiuO/t26aDOZu9vPHyHRDGt95EGbZlrp+GgeePyfCigJYz
 UmPdZAAcHdSymnnnlcvdG37WVvaVkpgWZzd8LbtBi23QR+Zc4WQ2IlgnUS5WKNNf
 VOBcAcP6E1IslDotZDQCc2dPFFQoQQEssVooyUc5oMytm7BsvxXLOeHG+Ncu/8uc
 H+U8Qn8jnqTxJbC5hkWQIJlhVKCq2FJrHxxySYTKROfUNcDgCmxboFeAcXTCIU1K
 T0S+sdoTS/CvtLklRkG0j6B8N4N98mOd9cFwUV3tX+/gMLMep3hCQs5L76JagvC5
 skkQXoONNaM=
 =l1nN
 -----END PGP SIGNATURE-----

Merge tag 'sched-core-2023-02-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler updates from Ingo Molnar:

 - Improve the scalability of the CFS bandwidth unthrottling logic with
   large number of CPUs.

 - Fix & rework various cpuidle routines, simplify interaction with the
   generic scheduler code. Add __cpuidle methods as noinstr to objtool's
   noinstr detection and fix boatloads of cpuidle bugs & quirks.

 - Add new ABI: introduce MEMBARRIER_CMD_GET_REGISTRATIONS, to query
   previously issued registrations.

 - Limit scheduler slice duration to the sysctl_sched_latency period, to
   improve scheduling granularity with a large number of SCHED_IDLE
   tasks.

 - Debuggability enhancement on sys_exit(): warn about disabled IRQs,
   but also enable them to prevent a cascade of followup problems and
   repeat warnings.

 - Fix the rescheduling logic in prio_changed_dl().

 - Micro-optimize cpufreq and sched-util methods.

 - Micro-optimize ttwu_runnable()

 - Micro-optimize the idle-scanning in update_numa_stats(),
   select_idle_capacity() and steal_cookie_task().

 - Update the RSEQ code & self-tests

 - Constify various scheduler methods

 - Remove unused methods

 - Refine __init tags

 - Documentation updates

 - Misc other cleanups, fixes

* tag 'sched-core-2023-02-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (110 commits)
  sched/rt: pick_next_rt_entity(): check list_entry
  sched/deadline: Add more reschedule cases to prio_changed_dl()
  sched/fair: sanitize vruntime of entity being placed
  sched/fair: Remove capacity inversion detection
  sched/fair: unlink misfit task from cpu overutilized
  objtool: mem*() are not uaccess safe
  cpuidle: Fix poll_idle() noinstr annotation
  sched/clock: Make local_clock() noinstr
  sched/clock/x86: Mark sched_clock() noinstr
  x86/pvclock: Improve atomic update of last_value in pvclock_clocksource_read()
  x86/atomics: Always inline arch_atomic64*()
  cpuidle: tracing, preempt: Squash _rcuidle tracing
  cpuidle: tracing: Warn about !rcu_is_watching()
  cpuidle: lib/bug: Disable rcu_is_watching() during WARN/BUG
  cpuidle: drivers: firmware: psci: Dont instrument suspend code
  KVM: selftests: Fix build of rseq test
  exit: Detect and fix irq disabled state in oops
  cpuidle, arm64: Fix the ARM64 cpuidle logic
  cpuidle: mvebu: Fix duplicate flags assignment
  sched/fair: Limit sched slice duration
  ...
2023-02-20 17:41:08 -08:00

349 lines
8.9 KiB
C

/*
* Header for code common to all OMAP2+ machines.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H
#define __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H
#ifndef __ASSEMBLER__
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/mfd/twl.h>
#include <linux/platform_data/i2c-omap.h>
#include <linux/reboot.h>
#include <linux/irqchip/irq-omap-intc.h>
#include <asm/proc-fns.h>
#include <asm/hardware/cache-l2x0.h>
#include "i2c.h"
#define OMAP_INTC_START NR_IRQS
extern int (*omap_pm_soc_init)(void);
int omap_pm_nop_init(void);
#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP3)
int omap3_pm_init(void);
#else
static inline int omap3_pm_init(void)
{
return 0;
}
#endif
#if defined(CONFIG_PM) && (defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX))
int omap4_pm_init(void);
int omap4_pm_init_early(void);
#else
static inline int omap4_pm_init(void)
{
return 0;
}
static inline int omap4_pm_init_early(void)
{
return 0;
}
#endif
#if defined(CONFIG_PM) && (defined(CONFIG_SOC_AM33XX) || \
defined(CONFIG_SOC_AM43XX))
int amx3_common_pm_init(void);
#else
static inline int amx3_common_pm_init(void)
{
return 0;
}
#endif
#ifdef CONFIG_CACHE_L2X0
int omap_l2_cache_init(void);
#define OMAP_L2C_AUX_CTRL (L2C_AUX_CTRL_SHARED_OVERRIDE | \
L310_AUX_CTRL_DATA_PREFETCH | \
L310_AUX_CTRL_INSTR_PREFETCH)
void omap4_l2c310_write_sec(unsigned long val, unsigned reg);
#else
static inline int omap_l2_cache_init(void)
{
return 0;
}
#define OMAP_L2C_AUX_CTRL 0
#define omap4_l2c310_write_sec NULL
#endif
#ifdef CONFIG_SOC_HAS_REALTIME_COUNTER
extern void omap5_realtime_timer_init(void);
#else
static inline void omap5_realtime_timer_init(void)
{
}
#endif
void omap2420_init_early(void);
void omap2430_init_early(void);
void omap3430_init_early(void);
void omap3630_init_early(void);
void am33xx_init_early(void);
void am35xx_init_early(void);
void ti814x_init_early(void);
void ti816x_init_early(void);
void am43xx_init_early(void);
void am43xx_init_late(void);
void omap4430_init_early(void);
void omap5_init_early(void);
void omap3_init_late(void);
void omap4430_init_late(void);
void ti81xx_init_late(void);
void am33xx_init_late(void);
void omap5_init_late(void);
void dra7xx_init_early(void);
void dra7xx_init_late(void);
#ifdef CONFIG_SOC_BUS
void omap_soc_device_init(void);
#else
static inline void omap_soc_device_init(void)
{
}
#endif
#if defined(CONFIG_SOC_OMAP2420) || defined(CONFIG_SOC_OMAP2430)
void omap2xxx_restart(enum reboot_mode mode, const char *cmd);
#else
static inline void omap2xxx_restart(enum reboot_mode mode, const char *cmd)
{
}
#endif
#ifdef CONFIG_SOC_AM33XX
void am33xx_restart(enum reboot_mode mode, const char *cmd);
#else
static inline void am33xx_restart(enum reboot_mode mode, const char *cmd)
{
}
#endif
#ifdef CONFIG_ARCH_OMAP3
void omap3xxx_restart(enum reboot_mode mode, const char *cmd);
#else
static inline void omap3xxx_restart(enum reboot_mode mode, const char *cmd)
{
}
#endif
#ifdef CONFIG_SOC_TI81XX
void ti81xx_restart(enum reboot_mode mode, const char *cmd);
#else
static inline void ti81xx_restart(enum reboot_mode mode, const char *cmd)
{
}
#endif
#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM43XX)
void omap44xx_restart(enum reboot_mode mode, const char *cmd);
#else
static inline void omap44xx_restart(enum reboot_mode mode, const char *cmd)
{
}
#endif
#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
void omap_barrier_reserve_memblock(void);
void omap_barriers_init(void);
#else
static inline void omap_barrier_reserve_memblock(void)
{
}
#endif
/* This gets called from mach-omap2/io.c, do not call this */
void __init omap2_set_globals_tap(u32 class, void __iomem *tap);
void __init omap242x_map_io(void);
void __init omap243x_map_io(void);
void __init omap3_map_io(void);
void __init am33xx_map_io(void);
void __init omap4_map_io(void);
void __init omap5_map_io(void);
void __init dra7xx_map_io(void);
void __init ti81xx_map_io(void);
/**
* omap_test_timeout - busy-loop, testing a condition
* @cond: condition to test until it evaluates to true
* @timeout: maximum number of microseconds in the timeout
* @index: loop index (integer)
*
* Loop waiting for @cond to become true or until at least @timeout
* microseconds have passed. To use, define some integer @index in the
* calling code. After running, if @index == @timeout, then the loop has
* timed out.
*/
#define omap_test_timeout(cond, timeout, index) \
({ \
for (index = 0; index < timeout; index++) { \
if (cond) \
break; \
udelay(1); \
} \
})
void omap_gic_of_init(void);
#ifdef CONFIG_CACHE_L2X0
extern void __iomem *omap4_get_l2cache_base(void);
#endif
struct device_node;
#ifdef CONFIG_SMP
extern void __iomem *omap4_get_scu_base(void);
#else
static inline void __iomem *omap4_get_scu_base(void)
{
return NULL;
}
#endif
extern void gic_dist_disable(void);
extern void gic_dist_enable(void);
extern bool gic_dist_disabled(void);
extern void gic_timer_retrigger(void);
extern void _omap_smc1(u32 fn, u32 arg);
extern void omap4_sar_ram_init(void);
extern void __iomem *omap4_get_sar_ram_base(void);
extern void omap4_mpuss_early_init(void);
extern void omap_do_wfi(void);
extern void omap_interconnect_sync(void);
#ifdef CONFIG_SMP
/* Needed for secondary core boot */
extern u32 omap_modify_auxcoreboot0(u32 set_mask, u32 clear_mask);
extern void omap_auxcoreboot_addr(u32 cpu_addr);
extern u32 omap_read_auxcoreboot0(void);
extern void omap4_cpu_die(unsigned int cpu);
extern int omap4_cpu_kill(unsigned int cpu);
extern const struct smp_operations omap4_smp_ops;
#endif
extern u32 omap4_get_cpu1_ns_pa_addr(void);
#if defined(CONFIG_SMP) && defined(CONFIG_PM)
extern int omap4_mpuss_init(void);
extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state,
bool rcuidle);
extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
#else
static inline int omap4_enter_lowpower(unsigned int cpu,
unsigned int power_state,
bool rcuidle)
{
cpu_do_idle();
return 0;
}
static inline int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
{
cpu_do_idle();
return 0;
}
static inline int omap4_mpuss_init(void)
{
return 0;
}
#endif
#ifdef CONFIG_ARCH_OMAP4
void omap4_secondary_startup(void);
void omap4460_secondary_startup(void);
int omap4_finish_suspend(unsigned long cpu_state);
void omap4_cpu_resume(void);
#else
static inline void omap4_secondary_startup(void)
{
}
static inline void omap4460_secondary_startup(void)
{
}
static inline int omap4_finish_suspend(unsigned long cpu_state)
{
return 0;
}
static inline void omap4_cpu_resume(void)
{
}
#endif
#if defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX)
void omap5_secondary_startup(void);
void omap5_secondary_hyp_startup(void);
#else
static inline void omap5_secondary_startup(void)
{
}
static inline void omap5_secondary_hyp_startup(void)
{
}
#endif
struct omap_system_dma_plat_info;
void pdata_quirks_init(const struct of_device_id *);
void omap_auxdata_legacy_init(struct device *dev);
void omap_pcs_legacy_init(int irq, void (*rearm)(void));
extern struct omap_system_dma_plat_info dma_plat_info;
struct omap_sdrc_params;
extern void omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
struct omap_sdrc_params *sdrc_cs1);
extern void omap_reserve(void);
struct omap_hwmod;
extern int omap_dss_reset(struct omap_hwmod *);
/* SoC specific clock initializer */
int omap_clk_init(void);
#if IS_ENABLED(CONFIG_OMAP_IOMMU)
int omap_iommu_set_pwrdm_constraint(struct platform_device *pdev, bool request,
u8 *pwrst);
#else
static inline int omap_iommu_set_pwrdm_constraint(struct platform_device *pdev,
bool request, u8 *pwrst)
{
return 0;
}
#endif
#endif /* __ASSEMBLER__ */
#endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */