header cleanups for 6.8

The goal is to get sched.h down to a type only header, so the main thing
 happening in this patchset is splitting out various _types.h headers and
 dependency fixups, as well as moving some things out of sched.h to
 better locations.
 
 This is prep work for the memory allocation profiling patchset which
 adds new sched.h interdepencencies.
 
 Testing - it's been in -next, and fixes from pretty much all
 architectures have percolated in - nothing major.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEKnAFLkS8Qha+jvQrE6szbY3KbnYFAmWfBwwACgkQE6szbY3K
 bnZPwBAAmuRojXaeWxi01IPIOehSGDe68vw44PR9glEMZvxdnZuPOdvE4/+245/L
 bRKU2WBCjBUokUbV9msIShwRkFTZAmEMPNfPAAsFMA+VXeDYHKB+ZRdwTggNAQ+I
 SG6fZgh5m0HsewCDxU8oqVHkjVq4fXn0cy+aL6xLEd9gu67GoBzX2pDieS2Kvy6j
 jnyoKTxFwb+LTQgph0P4EIpq5I2umAsdLwdSR8EJ+8e9NiNvMo1pI00Lx/ntAnFZ
 JftWUJcMy3TQ5u1GkyfQN9y/yThX1bZK5GvmHS9SJ2Dkacaus5d+xaKCHtRuFS1I
 7C6b8PsNgRczUMumBXus44HdlNfNs1yU3lvVxFvBIPE1qC9pYRHrkWIXXIocXLLC
 oxTEJ6B2G3BQZVQgLIA4fOaxMVhmvKffi/aEZLi9vN9VVosd1a6XNKI6KbyRnXFp
 GSs9qDqszhn5I3GYNlDNQTc/8UsRlhPFgS6nS0By6QnvxtGi9QkU2tBRBsXvqwCy
 cLoCYIhc2tvugHvld70dz26umiJ4rnmxGlobStNoigDvIKAIUt1UmIdr1so8P8eH
 xehnL9ZcOX6xnANDL0AqMFFHV6I58CJynhFdUoXfVQf/DWLGX48mpi9LVNsYBzsI
 CAwVOAQ0UjGrpdWmJ9ueY/ABYqg9vRjzaDEXQ+MhAYO55CLaVsg=
 =3tyT
 -----END PGP SIGNATURE-----

Merge tag 'header_cleanup-2024-01-10' of https://evilpiepirate.org/git/bcachefs

Pull header cleanups from Kent Overstreet:
 "The goal is to get sched.h down to a type only header, so the main
  thing happening in this patchset is splitting out various _types.h
  headers and dependency fixups, as well as moving some things out of
  sched.h to better locations.

  This is prep work for the memory allocation profiling patchset which
  adds new sched.h interdepencencies"

* tag 'header_cleanup-2024-01-10' of https://evilpiepirate.org/git/bcachefs: (51 commits)
  Kill sched.h dependency on rcupdate.h
  kill unnecessary thread_info.h include
  Kill unnecessary kernel.h include
  preempt.h: Kill dependency on list.h
  rseq: Split out rseq.h from sched.h
  LoongArch: signal.c: add header file to fix build error
  restart_block: Trim includes
  lockdep: move held_lock to lockdep_types.h
  sem: Split out sem_types.h
  uidgid: Split out uidgid_types.h
  seccomp: Split out seccomp_types.h
  refcount: Split out refcount_types.h
  uapi/linux/resource.h: fix include
  x86/signal: kill dependency on time.h
  syscall_user_dispatch.h: split out *_types.h
  mm_types_task.h: Trim dependencies
  Split out irqflags_types.h
  ipc: Kill bogus dependency on spinlock.h
  shm: Slim down dependencies
  workqueue: Split out workqueue_types.h
  ...
This commit is contained in:
Linus Torvalds 2024-01-10 16:43:55 -08:00
commit 78273df7f6
119 changed files with 1061 additions and 774 deletions

View File

@ -13,8 +13,8 @@
#define __BP_HARDEN_HYP_VECS_SZ ((BP_HARDEN_EL2_SLOTS - 1) * SZ_2K)
#ifndef __ASSEMBLY__
#include <linux/percpu.h>
#include <linux/smp.h>
#include <asm/percpu.h>
#include <asm/cpufeature.h>
#include <asm/virt.h>

View File

@ -28,6 +28,7 @@
#include <linux/hw_breakpoint.h>
#include <linux/regset.h>
#include <linux/elf.h>
#include <linux/rseq.h>
#include <asm/compat.h>
#include <asm/cpufeature.h>

View File

@ -15,6 +15,7 @@
#include <linux/context_tracking.h>
#include <linux/entry-common.h>
#include <linux/irqflags.h>
#include <linux/rseq.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/personality.h>

View File

@ -8,6 +8,7 @@
#ifndef __ASM_M68K_PROCESSOR_H
#define __ASM_M68K_PROCESSOR_H
#include <linux/preempt.h>
#include <linux/thread_info.h>
#include <asm/fpu.h>
#include <asm/ptrace.h>

View File

@ -336,6 +336,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
}
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
struct vm_area_struct;
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{

View File

@ -33,6 +33,7 @@
#include <asm/msgbuf.h>
#include <asm/sparsemem.h>
#include <asm/asm-offsets.h>
#include <asm/shmbuf.h>
extern int data_start;
extern void parisc_kernel_start(void); /* Kernel entry point in head.S */

View File

@ -3,6 +3,7 @@
#include <linux/context_tracking.h>
#include <linux/err.h>
#include <linux/compat.h>
#include <linux/rseq.h>
#include <linux/sched/debug.h> /* for show_regs */
#include <asm/kup.h>

View File

@ -20,6 +20,7 @@
#include <linux/iommu.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/rcupdate_wait.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>

View File

@ -12,6 +12,7 @@
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/rseq.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kernel.h>

View File

@ -2,6 +2,7 @@
#ifndef _ASM_X86_CURRENT_H
#define _ASM_X86_CURRENT_H
#include <linux/build_bug.h>
#include <linux/compiler.h>
#ifndef __ASSEMBLY__

View File

@ -5,6 +5,7 @@
#include <linux/bug.h>
#include <linux/percpu.h>
#include <uapi/asm/debugreg.h>
#include <asm/cpufeature.h>
DECLARE_PER_CPU(unsigned long, cpu_dr7);

View File

@ -5,6 +5,8 @@
#ifndef _ASM_X86_FPU_H
#define _ASM_X86_FPU_H
#include <asm/page_types.h>
/*
* The legacy x87 FPU state format, as saved by FSAVE and
* restored by the FRSTOR instructions:

View File

@ -6,6 +6,10 @@
#include <asm/paravirt_types.h>
#ifndef __ASSEMBLY__
struct mm_struct;
#endif
#ifdef CONFIG_PARAVIRT
#include <asm/pgtable_types.h>
#include <asm/asm.h>

View File

@ -5,6 +5,7 @@
#ifdef CONFIG_PARAVIRT
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <asm/desc_defs.h>
#include <asm/pgtable_types.h>

View File

@ -24,8 +24,8 @@
#else /* ...!ASSEMBLY */
#include <linux/kernel.h>
#include <linux/stringify.h>
#include <asm/asm.h>
#ifdef CONFIG_SMP
#define __percpu_prefix "%%"__stringify(__percpu_seg)":"

View File

@ -6,7 +6,6 @@
#include <asm/percpu.h>
#include <asm/current.h>
#include <linux/thread_info.h>
#include <linux/static_call_types.h>
/* We use the MSB mostly because its available */

View File

@ -4,7 +4,6 @@
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/time.h>
#include <linux/compiler.h>
/* Avoid too many header ordering problems. */

View File

@ -2,6 +2,7 @@
/*
* x86 FPU bug checks:
*/
#include <asm/cpufeature.h>
#include <asm/fpu/api.h>
/*

View File

@ -27,6 +27,7 @@
#include <linux/context_tracking.h>
#include <linux/entry-common.h>
#include <linux/syscalls.h>
#include <linux/rseq.h>
#include <asm/processor.h>
#include <asm/ucontext.h>

View File

@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
#include <asm/paravirt.h>
#include <linux/smp.h>
#include <linux/export.h>

View File

@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pid.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/utsname.h>

View File

@ -11,6 +11,7 @@
#include <linux/export.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include <linux/rculist.h>
#include <trace/events/rpm.h>
#include "../base.h"

View File

@ -23,6 +23,8 @@
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/cpufeature.h>
#include <asm/fpu/api.h>
#include "i915_memcpy.h"

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
#include <linux/pid.h>
#include <linux/slab.h>
#include "lima_device.h"

View File

@ -8,6 +8,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <linux/pid.h>
#include <linux/sched.h>
#include <linux/irqchip/arm-gic-v4.h>

View File

@ -14,6 +14,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ":%s, %d: " fmt, __func__, __LINE__
#include <linux/types.h>
#include <linux/math64.h>
#include <linux/printk.h>
#include <linux/ratelimit.h>

View File

@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/configfs.h>
#include <linux/ratelimit.h>
#include <scsi/scsi_proto.h>

View File

@ -66,6 +66,7 @@
#include <linux/coredump.h>
#include <linux/time_namespace.h>
#include <linux/user_events.h>
#include <linux/rseq.h>
#include <linux/uaccess.h>
#include <asm/mmu_context.h>

View File

@ -36,6 +36,7 @@ struct mqstat;
struct audit_watch;
struct audit_tree;
struct sk_buff;
struct kern_ipc_perm;
struct audit_krule {
u32 pflags;

View File

@ -21,6 +21,7 @@
#include <linux/sched.h>
#include <linux/printk.h>
#include <linux/rcupdate.h>
#include <linux/timekeeping.h>
struct dma_fence;
struct dma_fence_ops;

View File

@ -13,13 +13,13 @@
#define _LINUX_HRTIMER_H
#include <linux/hrtimer_defs.h>
#include <linux/rbtree.h>
#include <linux/hrtimer_types.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/percpu.h>
#include <linux/percpu-defs.h>
#include <linux/rbtree.h>
#include <linux/seqlock.h>
#include <linux/timer.h>
#include <linux/timerqueue.h>
struct hrtimer_clock_base;
struct hrtimer_cpu_base;
@ -59,14 +59,6 @@ enum hrtimer_mode {
HRTIMER_MODE_REL_PINNED_HARD = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_HARD,
};
/*
* Return values for the callback function
*/
enum hrtimer_restart {
HRTIMER_NORESTART, /* Timer is not restarted */
HRTIMER_RESTART, /* Timer must be restarted */
};
/*
* Values to track state of the timer
*
@ -94,38 +86,6 @@ enum hrtimer_restart {
#define HRTIMER_STATE_INACTIVE 0x00
#define HRTIMER_STATE_ENQUEUED 0x01
/**
* struct hrtimer - the basic hrtimer structure
* @node: timerqueue node, which also manages node.expires,
* the absolute expiry time in the hrtimers internal
* representation. The time is related to the clock on
* which the timer is based. Is setup by adding
* slack to the _softexpires value. For non range timers
* identical to _softexpires.
* @_softexpires: the absolute earliest expiry time of the hrtimer.
* The time which was given as expiry time when the timer
* was armed.
* @function: timer expiry callback function
* @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above)
* @is_rel: Set if the timer was armed relative
* @is_soft: Set if hrtimer will be expired in soft interrupt context.
* @is_hard: Set if hrtimer will be expired in hard interrupt context
* even on RT.
*
* The hrtimer structure must be initialized by hrtimer_init()
*/
struct hrtimer {
struct timerqueue_node node;
ktime_t _softexpires;
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
u8 state;
u8 is_rel;
u8 is_soft;
u8 is_hard;
};
/**
* struct hrtimer_sleeper - simple sleeper structure
* @timer: embedded timer structure

View File

@ -0,0 +1,50 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_HRTIMER_TYPES_H
#define _LINUX_HRTIMER_TYPES_H
#include <linux/types.h>
#include <linux/timerqueue_types.h>
struct hrtimer_clock_base;
/*
* Return values for the callback function
*/
enum hrtimer_restart {
HRTIMER_NORESTART, /* Timer is not restarted */
HRTIMER_RESTART, /* Timer must be restarted */
};
/**
* struct hrtimer - the basic hrtimer structure
* @node: timerqueue node, which also manages node.expires,
* the absolute expiry time in the hrtimers internal
* representation. The time is related to the clock on
* which the timer is based. Is setup by adding
* slack to the _softexpires value. For non range timers
* identical to _softexpires.
* @_softexpires: the absolute earliest expiry time of the hrtimer.
* The time which was given as expiry time when the timer
* was armed.
* @function: timer expiry callback function
* @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above)
* @is_rel: Set if the timer was armed relative
* @is_soft: Set if hrtimer will be expired in soft interrupt context.
* @is_hard: Set if hrtimer will be expired in hard interrupt context
* even on RT.
*
* The hrtimer structure must be initialized by hrtimer_init()
*/
struct hrtimer {
struct timerqueue_node node;
ktime_t _softexpires;
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
u8 state;
u8 is_rel;
u8 is_soft;
u8 is_hard;
};
#endif /* _LINUX_HRTIMER_TYPES_H */

View File

@ -2,7 +2,7 @@
#ifndef _LINUX_IPC_H
#define _LINUX_IPC_H
#include <linux/spinlock.h>
#include <linux/spinlock_types.h>
#include <linux/uidgid.h>
#include <linux/rhashtable-types.h>
#include <uapi/linux/ipc.h>

View File

@ -12,6 +12,7 @@
#ifndef _LINUX_TRACE_IRQFLAGS_H
#define _LINUX_TRACE_IRQFLAGS_H
#include <linux/irqflags_types.h>
#include <linux/typecheck.h>
#include <linux/cleanup.h>
#include <asm/irqflags.h>
@ -34,19 +35,6 @@
#ifdef CONFIG_TRACE_IRQFLAGS
/* Per-task IRQ trace events information. */
struct irqtrace_events {
unsigned int irq_events;
unsigned long hardirq_enable_ip;
unsigned long hardirq_disable_ip;
unsigned int hardirq_enable_event;
unsigned int hardirq_disable_event;
unsigned long softirq_disable_ip;
unsigned long softirq_enable_ip;
unsigned int softirq_disable_event;
unsigned int softirq_enable_event;
};
DECLARE_PER_CPU(int, hardirqs_enabled);
DECLARE_PER_CPU(int, hardirq_context);

View File

@ -0,0 +1,22 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IRQFLAGS_TYPES_H
#define _LINUX_IRQFLAGS_TYPES_H
#ifdef CONFIG_TRACE_IRQFLAGS
/* Per-task IRQ trace events information. */
struct irqtrace_events {
unsigned int irq_events;
unsigned long hardirq_enable_ip;
unsigned long hardirq_disable_ip;
unsigned int hardirq_enable_event;
unsigned int hardirq_disable_event;
unsigned long softirq_disable_ip;
unsigned long softirq_enable_ip;
unsigned int softirq_disable_event;
unsigned int softirq_enable_event;
};
#endif
#endif /* _LINUX_IRQFLAGS_TYPES_H */

View File

@ -9,6 +9,8 @@
#ifndef _LINUX_KMSAN_TYPES_H
#define _LINUX_KMSAN_TYPES_H
#include <linux/types.h>
/* These constants are defined in the MSan LLVM instrumentation pass. */
#define KMSAN_RETVAL_SIZE 800
#define KMSAN_PARAM_SIZE 800

View File

@ -21,12 +21,10 @@
#ifndef _LINUX_KTIME_H
#define _LINUX_KTIME_H
#include <linux/time.h>
#include <linux/jiffies.h>
#include <asm/bug.h>
/* Nanosecond scalar representation for kernel time values */
typedef s64 ktime_t;
#include <linux/jiffies.h>
#include <linux/time.h>
#include <linux/types.h>
/**
* ktime_set - Set a ktime_t variable from a seconds/nanoseconds value

View File

@ -82,63 +82,6 @@ struct lock_chain {
u64 chain_key;
};
#define MAX_LOCKDEP_KEYS_BITS 13
#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
#define INITIAL_CHAIN_KEY -1
struct held_lock {
/*
* One-way hash of the dependency chain up to this point. We
* hash the hashes step by step as the dependency chain grows.
*
* We use it for dependency-caching and we skip detection
* passes and dependency-updates if there is a cache-hit, so
* it is absolutely critical for 100% coverage of the validator
* to have a unique key value for every unique dependency path
* that can occur in the system, to make a unique hash value
* as likely as possible - hence the 64-bit width.
*
* The task struct holds the current hash value (initialized
* with zero), here we store the previous hash value:
*/
u64 prev_chain_key;
unsigned long acquire_ip;
struct lockdep_map *instance;
struct lockdep_map *nest_lock;
#ifdef CONFIG_LOCK_STAT
u64 waittime_stamp;
u64 holdtime_stamp;
#endif
/*
* class_idx is zero-indexed; it points to the element in
* lock_classes this held lock instance belongs to. class_idx is in
* the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
*/
unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
/*
* The lock-stack is unified in that the lock chains of interrupt
* contexts nest ontop of process context chains, but we 'separate'
* the hashes by starting with 0 if we cross into an interrupt
* context, and we also keep do not add cross-context lock
* dependencies - the lock usage graph walking covers that area
* anyway, and we'd just unnecessarily increase the number of
* dependencies otherwise. [Note: hardirq and softirq contexts
* are separated from each other too.]
*
* The following field is used to detect when we cross into an
* interrupt context:
*/
unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
unsigned int trylock:1; /* 16 bits */
unsigned int read:2; /* see lock_acquire() comment */
unsigned int check:1; /* see lock_acquire() comment */
unsigned int hardirqs_off:1;
unsigned int sync:1;
unsigned int references:11; /* 32 bits */
unsigned int pin_count;
};
/*
* Initialization, self-test and debugging-output methods:
*/

View File

@ -198,6 +198,63 @@ struct lockdep_map {
struct pin_cookie { unsigned int val; };
#define MAX_LOCKDEP_KEYS_BITS 13
#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
#define INITIAL_CHAIN_KEY -1
struct held_lock {
/*
* One-way hash of the dependency chain up to this point. We
* hash the hashes step by step as the dependency chain grows.
*
* We use it for dependency-caching and we skip detection
* passes and dependency-updates if there is a cache-hit, so
* it is absolutely critical for 100% coverage of the validator
* to have a unique key value for every unique dependency path
* that can occur in the system, to make a unique hash value
* as likely as possible - hence the 64-bit width.
*
* The task struct holds the current hash value (initialized
* with zero), here we store the previous hash value:
*/
u64 prev_chain_key;
unsigned long acquire_ip;
struct lockdep_map *instance;
struct lockdep_map *nest_lock;
#ifdef CONFIG_LOCK_STAT
u64 waittime_stamp;
u64 holdtime_stamp;
#endif
/*
* class_idx is zero-indexed; it points to the element in
* lock_classes this held lock instance belongs to. class_idx is in
* the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
*/
unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
/*
* The lock-stack is unified in that the lock chains of interrupt
* contexts nest ontop of process context chains, but we 'separate'
* the hashes by starting with 0 if we cross into an interrupt
* context, and we also keep do not add cross-context lock
* dependencies - the lock usage graph walking covers that area
* anyway, and we'd just unnecessarily increase the number of
* dependencies otherwise. [Note: hardirq and softirq contexts
* are separated from each other too.]
*
* The following field is used to detect when we cross into an
* interrupt context:
*/
unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
unsigned int trylock:1; /* 16 bits */
unsigned int read:2; /* see lock_acquire() comment */
unsigned int check:1; /* see lock_acquire() comment */
unsigned int hardirqs_off:1;
unsigned int sync:1;
unsigned int references:11; /* 32 bits */
unsigned int pin_count;
};
#else /* !CONFIG_LOCKDEP */
/*

View File

@ -9,9 +9,6 @@
*/
#include <linux/types.h>
#include <linux/threads.h>
#include <linux/atomic.h>
#include <linux/cpumask.h>
#include <asm/page.h>
@ -36,6 +33,8 @@ enum {
NR_MM_COUNTERS
};
struct page;
struct page_frag {
struct page *page;
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)

View File

@ -20,6 +20,7 @@
#include <linux/osq_lock.h>
#include <linux/debug_locks.h>
#include <linux/cleanup.h>
#include <linux/mutex_types.h>
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
@ -33,49 +34,6 @@
#ifndef CONFIG_PREEMPT_RT
/*
* Simple, straightforward mutexes with strict semantics:
*
* - only one task can hold the mutex at a time
* - only the owner can unlock the mutex
* - multiple unlocks are not permitted
* - recursive locking is not permitted
* - a mutex object must be initialized via the API
* - a mutex object must not be initialized via memset or copying
* - task may not exit with mutex held
* - memory areas where held locks reside must not be freed
* - held mutexes must not be reinitialized
* - mutexes may not be used in hardware or software interrupt
* contexts such as tasklets and timers
*
* These semantics are fully enforced when DEBUG_MUTEXES is
* enabled. Furthermore, besides enforcing the above rules, the mutex
* debugging code also implements a number of additional features
* that make lock debugging easier and faster:
*
* - uses symbolic names of mutexes, whenever they are printed in debug output
* - point-of-acquire tracking, symbolic lookup of function names
* - list of all locks held in the system, printout of them
* - owner tracking
* - detects self-recursing locks and prints out all relevant info
* - detects multi-task circular deadlocks and prints out all affected
* locks and tasks (and only those tasks)
*/
struct mutex {
atomic_long_t owner;
raw_spinlock_t wait_lock;
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
struct optimistic_spin_queue osq; /* Spinner MCS lock */
#endif
struct list_head wait_list;
#ifdef CONFIG_DEBUG_MUTEXES
void *magic;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
};
#ifdef CONFIG_DEBUG_MUTEXES
#define __DEBUG_MUTEX_INITIALIZER(lockname) \
@ -131,14 +89,6 @@ extern bool mutex_is_locked(struct mutex *lock);
/*
* Preempt-RT variant based on rtmutexes.
*/
#include <linux/rtmutex.h>
struct mutex {
struct rt_mutex_base rtmutex;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
};
#define __MUTEX_INITIALIZER(mutexname) \
{ \

View File

@ -0,0 +1,71 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_MUTEX_TYPES_H
#define __LINUX_MUTEX_TYPES_H
#include <linux/atomic.h>
#include <linux/lockdep_types.h>
#include <linux/osq_lock.h>
#include <linux/spinlock_types.h>
#include <linux/types.h>
#ifndef CONFIG_PREEMPT_RT
/*
* Simple, straightforward mutexes with strict semantics:
*
* - only one task can hold the mutex at a time
* - only the owner can unlock the mutex
* - multiple unlocks are not permitted
* - recursive locking is not permitted
* - a mutex object must be initialized via the API
* - a mutex object must not be initialized via memset or copying
* - task may not exit with mutex held
* - memory areas where held locks reside must not be freed
* - held mutexes must not be reinitialized
* - mutexes may not be used in hardware or software interrupt
* contexts such as tasklets and timers
*
* These semantics are fully enforced when DEBUG_MUTEXES is
* enabled. Furthermore, besides enforcing the above rules, the mutex
* debugging code also implements a number of additional features
* that make lock debugging easier and faster:
*
* - uses symbolic names of mutexes, whenever they are printed in debug output
* - point-of-acquire tracking, symbolic lookup of function names
* - list of all locks held in the system, printout of them
* - owner tracking
* - detects self-recursing locks and prints out all relevant info
* - detects multi-task circular deadlocks and prints out all affected
* locks and tasks (and only those tasks)
*/
struct mutex {
atomic_long_t owner;
raw_spinlock_t wait_lock;
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
struct optimistic_spin_queue osq; /* Spinner MCS lock */
#endif
struct list_head wait_list;
#ifdef CONFIG_DEBUG_MUTEXES
void *magic;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
};
#else /* !CONFIG_PREEMPT_RT */
/*
* Preempt-RT variant based on rtmutexes.
*/
#include <linux/rtmutex.h>
struct mutex {
struct rt_mutex_base rtmutex;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
};
#endif /* CONFIG_PREEMPT_RT */
#endif /* __LINUX_MUTEX_TYPES_H */

View File

@ -93,10 +93,10 @@
#include <linux/threads.h>
#include <linux/bitmap.h>
#include <linux/minmax.h>
#include <linux/nodemask_types.h>
#include <linux/numa.h>
#include <linux/random.h>
typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
extern nodemask_t _unused_nodemask_arg_;
/**

View File

@ -0,0 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_NODEMASK_TYPES_H
#define __LINUX_NODEMASK_TYPES_H
#include <linux/bitops.h>
#include <linux/numa.h>
typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
#endif /* __LINUX_NODEMASK_TYPES_H */

View File

@ -2,6 +2,7 @@
#ifndef _LINUX_NSPROXY_H
#define _LINUX_NSPROXY_H
#include <linux/refcount.h>
#include <linux/spinlock.h>
#include <linux/sched.h>

View File

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_NUMA_H
#define _LINUX_NUMA_H
#include <linux/init.h>
#include <linux/types.h>
#ifdef CONFIG_NODES_SHIFT
@ -22,34 +23,26 @@
#endif
#ifdef CONFIG_NUMA
#include <linux/printk.h>
#include <asm/sparsemem.h>
/* Generic implementation available */
int numa_nearest_node(int node, unsigned int state);
#ifndef memory_add_physaddr_to_nid
static inline int memory_add_physaddr_to_nid(u64 start)
{
pr_info_once("Unknown online node for memory at 0x%llx, assuming node 0\n",
start);
return 0;
}
int memory_add_physaddr_to_nid(u64 start);
#endif
#ifndef phys_to_target_node
static inline int phys_to_target_node(u64 start)
{
pr_info_once("Unknown target node for memory at 0x%llx, assuming node 0\n",
start);
return 0;
}
int phys_to_target_node(u64 start);
#endif
#ifndef numa_fill_memblks
static inline int __init numa_fill_memblks(u64 start, u64 end)
{
return NUMA_NO_MEMBLK;
}
#endif
#else /* !CONFIG_NUMA */
static inline int numa_nearest_node(int node, unsigned int state)
{

View File

@ -2,18 +2,12 @@
#ifndef _LINUX_PID_H
#define _LINUX_PID_H
#include <linux/pid_types.h>
#include <linux/rculist.h>
#include <linux/wait.h>
#include <linux/rcupdate.h>
#include <linux/refcount.h>
enum pid_type
{
PIDTYPE_PID,
PIDTYPE_TGID,
PIDTYPE_PGID,
PIDTYPE_SID,
PIDTYPE_MAX,
};
#include <linux/sched.h>
#include <linux/wait.h>
/*
* What is struct pid?
@ -110,9 +104,6 @@ extern void exchange_tids(struct task_struct *task, struct task_struct *old);
extern void transfer_pid(struct task_struct *old, struct task_struct *new,
enum pid_type);
struct pid_namespace;
extern struct pid_namespace init_pid_ns;
extern int pid_max;
extern int pid_max_min, pid_max_max;
@ -215,4 +206,127 @@ pid_t pid_vnr(struct pid *pid);
} \
task = tg___; \
} while_each_pid_task(pid, type, task)
static inline struct pid *task_pid(struct task_struct *task)
{
return task->thread_pid;
}
/*
* the helpers to get the task's different pids as they are seen
* from various namespaces
*
* task_xid_nr() : global id, i.e. the id seen from the init namespace;
* task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
* current.
* task_xid_nr_ns() : id seen from the ns specified;
*
* see also pid_nr() etc in include/linux/pid.h
*/
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
static inline pid_t task_pid_nr(struct task_struct *tsk)
{
return tsk->pid;
}
static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
}
static inline pid_t task_pid_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
}
static inline pid_t task_tgid_nr(struct task_struct *tsk)
{
return tsk->tgid;
}
/**
* pid_alive - check that a task structure is not stale
* @p: Task structure to be checked.
*
* Test if a process is not yet dead (at most zombie state)
* If pid_alive fails, then pointers within the task structure
* can be stale and must not be dereferenced.
*
* Return: 1 if the process is alive. 0 otherwise.
*/
static inline int pid_alive(const struct task_struct *p)
{
return p->thread_pid != NULL;
}
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
}
static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
}
static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
}
static inline pid_t task_session_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
}
static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
}
static inline pid_t task_tgid_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
}
static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
{
pid_t pid = 0;
rcu_read_lock();
if (pid_alive(tsk))
pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
rcu_read_unlock();
return pid;
}
static inline pid_t task_ppid_nr(const struct task_struct *tsk)
{
return task_ppid_nr_ns(tsk, &init_pid_ns);
}
/* Obsolete, do not use: */
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
{
return task_pgrp_nr_ns(tsk, &init_pid_ns);
}
/**
* is_global_init - check if a task structure is init. Since init
* is free to have sub-threads we need to check tgid.
* @tsk: Task structure to be checked.
*
* Check if a task structure is the first user space task the kernel created.
*
* Return: 1 if the task structure is init. 0 otherwise.
*/
static inline int is_global_init(struct task_struct *tsk)
{
return task_tgid_nr(tsk) == 1;
}
#endif /* _LINUX_PID_H */

16
include/linux/pid_types.h Normal file
View File

@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PID_TYPES_H
#define _LINUX_PID_TYPES_H
enum pid_type {
PIDTYPE_PID,
PIDTYPE_TGID,
PIDTYPE_PGID,
PIDTYPE_SID,
PIDTYPE_MAX,
};
struct pid_namespace;
extern struct pid_namespace init_pid_ns;
#endif /* _LINUX_PID_TYPES_H */

View File

@ -75,20 +75,10 @@
#include <linux/container_of.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/plist_types.h>
#include <asm/bug.h>
struct plist_head {
struct list_head node_list;
};
struct plist_node {
int prio;
struct list_head prio_list;
struct list_head node_list;
};
/**
* PLIST_HEAD_INIT - static struct plist_head initializer
* @head: struct plist_head variable name

View File

@ -0,0 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _LINUX_PLIST_TYPES_H
#define _LINUX_PLIST_TYPES_H
#include <linux/types.h>
struct plist_head {
struct list_head node_list;
};
struct plist_node {
int prio;
struct list_head prio_list;
struct list_head node_list;
};
#endif /* _LINUX_PLIST_TYPES_H */

View File

@ -2,40 +2,16 @@
#ifndef _linux_POSIX_TIMERS_H
#define _linux_POSIX_TIMERS_H
#include <linux/spinlock.h>
#include <linux/alarmtimer.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/alarmtimer.h>
#include <linux/posix-timers_types.h>
#include <linux/spinlock.h>
#include <linux/timerqueue.h>
struct kernel_siginfo;
struct task_struct;
/*
* Bit fields within a clockid:
*
* The most significant 29 bits hold either a pid or a file descriptor.
*
* Bit 2 indicates whether a cpu clock refers to a thread or a process.
*
* Bits 1 and 0 give the type: PROF=0, VIRT=1, SCHED=2, or FD=3.
*
* A clockid is invalid if bits 2, 1, and 0 are all set.
*/
#define CPUCLOCK_PID(clock) ((pid_t) ~((clock) >> 3))
#define CPUCLOCK_PERTHREAD(clock) \
(((clock) & (clockid_t) CPUCLOCK_PERTHREAD_MASK) != 0)
#define CPUCLOCK_PERTHREAD_MASK 4
#define CPUCLOCK_WHICH(clock) ((clock) & (clockid_t) CPUCLOCK_CLOCK_MASK)
#define CPUCLOCK_CLOCK_MASK 3
#define CPUCLOCK_PROF 0
#define CPUCLOCK_VIRT 1
#define CPUCLOCK_SCHED 2
#define CPUCLOCK_MAX 3
#define CLOCKFD CPUCLOCK_MAX
#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK)
static inline clockid_t make_process_cpuclock(const unsigned int pid,
const clockid_t clock)
{
@ -109,44 +85,6 @@ static inline void cpu_timer_setexpires(struct cpu_timer *ctmr, u64 exp)
ctmr->node.expires = exp;
}
/**
* posix_cputimer_base - Container per posix CPU clock
* @nextevt: Earliest-expiration cache
* @tqhead: timerqueue head for cpu_timers
*/
struct posix_cputimer_base {
u64 nextevt;
struct timerqueue_head tqhead;
};
/**
* posix_cputimers - Container for posix CPU timer related data
* @bases: Base container for posix CPU clocks
* @timers_active: Timers are queued.
* @expiry_active: Timer expiry is active. Used for
* process wide timers to avoid multiple
* task trying to handle expiry concurrently
*
* Used in task_struct and signal_struct
*/
struct posix_cputimers {
struct posix_cputimer_base bases[CPUCLOCK_MAX];
unsigned int timers_active;
unsigned int expiry_active;
};
/**
* posix_cputimers_work - Container for task work based posix CPU timer expiry
* @work: The task work to be scheduled
* @mutex: Mutex held around expiry in context of this task work
* @scheduled: @work has been scheduled already, no further processing
*/
struct posix_cputimers_work {
struct callback_head work;
struct mutex mutex;
unsigned int scheduled;
};
static inline void posix_cputimers_init(struct posix_cputimers *pct)
{
memset(pct, 0, sizeof(*pct));
@ -179,7 +117,6 @@ static inline void posix_cputimers_rt_watchdog(struct posix_cputimers *pct,
.bases = INIT_CPU_TIMERBASES(s.posix_cputimers.bases), \
},
#else
struct posix_cputimers { };
struct cpu_timer { };
#define INIT_CPU_TIMERS(s)
static inline void posix_cputimers_init(struct posix_cputimers *pct) { }

View File

@ -0,0 +1,80 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _linux_POSIX_TIMERS_TYPES_H
#define _linux_POSIX_TIMERS_TYPES_H
#include <linux/mutex_types.h>
#include <linux/timerqueue_types.h>
#include <linux/types.h>
/*
* Bit fields within a clockid:
*
* The most significant 29 bits hold either a pid or a file descriptor.
*
* Bit 2 indicates whether a cpu clock refers to a thread or a process.
*
* Bits 1 and 0 give the type: PROF=0, VIRT=1, SCHED=2, or FD=3.
*
* A clockid is invalid if bits 2, 1, and 0 are all set.
*/
#define CPUCLOCK_PID(clock) ((pid_t) ~((clock) >> 3))
#define CPUCLOCK_PERTHREAD(clock) \
(((clock) & (clockid_t) CPUCLOCK_PERTHREAD_MASK) != 0)
#define CPUCLOCK_PERTHREAD_MASK 4
#define CPUCLOCK_WHICH(clock) ((clock) & (clockid_t) CPUCLOCK_CLOCK_MASK)
#define CPUCLOCK_CLOCK_MASK 3
#define CPUCLOCK_PROF 0
#define CPUCLOCK_VIRT 1
#define CPUCLOCK_SCHED 2
#define CPUCLOCK_MAX 3
#define CLOCKFD CPUCLOCK_MAX
#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK)
#ifdef CONFIG_POSIX_TIMERS
/**
* posix_cputimer_base - Container per posix CPU clock
* @nextevt: Earliest-expiration cache
* @tqhead: timerqueue head for cpu_timers
*/
struct posix_cputimer_base {
u64 nextevt;
struct timerqueue_head tqhead;
};
/**
* posix_cputimers - Container for posix CPU timer related data
* @bases: Base container for posix CPU clocks
* @timers_active: Timers are queued.
* @expiry_active: Timer expiry is active. Used for
* process wide timers to avoid multiple
* task trying to handle expiry concurrently
*
* Used in task_struct and signal_struct
*/
struct posix_cputimers {
struct posix_cputimer_base bases[CPUCLOCK_MAX];
unsigned int timers_active;
unsigned int expiry_active;
};
/**
* posix_cputimers_work - Container for task work based posix CPU timer expiry
* @work: The task work to be scheduled
* @mutex: Mutex held around expiry in context of this task work
* @scheduled: @work has been scheduled already, no further processing
*/
struct posix_cputimers_work {
struct callback_head work;
struct mutex mutex;
unsigned int scheduled;
};
#else /* CONFIG_POSIX_TIMERS */
struct posix_cputimers { };
#endif /* CONFIG_POSIX_TIMERS */
#endif /* _linux_POSIX_TIMERS_TYPES_H */

View File

@ -10,7 +10,6 @@
#include <linux/types.h>
#include <linux/once.h>
#include <linux/percpu.h>
#include <linux/random.h>
struct rnd_state {

View File

@ -9,7 +9,7 @@
#include <linux/linkage.h>
#include <linux/cleanup.h>
#include <linux/list.h>
#include <linux/types.h>
/*
* We put the hardirq and softirq counter into the preemption
@ -360,7 +360,9 @@ void preempt_notifier_unregister(struct preempt_notifier *notifier);
static inline void preempt_notifier_init(struct preempt_notifier *notifier,
struct preempt_ops *ops)
{
INIT_HLIST_NODE(&notifier->link);
/* INIT_HLIST_NODE() open coded, to avoid dependency on list.h */
notifier->link.next = NULL;
notifier->link.pprev = NULL;
notifier->ops = ops;
}

View File

@ -8,6 +8,7 @@
#include <linux/rcupdate.h>
#include <linux/completion.h>
#include <linux/sched.h>
/*
* Structure allowing asynchronous waiting on RCU.
@ -55,4 +56,13 @@ do { \
#define synchronize_rcu_mult(...) \
_wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
static inline void cond_resched_rcu(void)
{
#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
rcu_read_unlock();
cond_resched();
rcu_read_lock();
#endif
}
#endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */

View File

@ -96,22 +96,11 @@
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/limits.h>
#include <linux/refcount_types.h>
#include <linux/spinlock_types.h>
struct mutex;
/**
* typedef refcount_t - variant of atomic_t specialized for reference counts
* @refs: atomic_t counter field
*
* The counter saturates at REFCOUNT_SATURATED and will not move once
* there. This avoids wrapping the counter and causing 'spurious'
* use-after-free bugs.
*/
typedef struct refcount_struct {
atomic_t refs;
} refcount_t;
#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
#define REFCOUNT_MAX INT_MAX
#define REFCOUNT_SATURATED (INT_MIN / 2)

View File

@ -0,0 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_REFCOUNT_TYPES_H
#define _LINUX_REFCOUNT_TYPES_H
#include <linux/types.h>
/**
* typedef refcount_t - variant of atomic_t specialized for reference counts
* @refs: atomic_t counter field
*
* The counter saturates at REFCOUNT_SATURATED and will not move once
* there. This avoids wrapping the counter and causing 'spurious'
* use-after-free bugs.
*/
typedef struct refcount_struct {
atomic_t refs;
} refcount_t;
#endif /* _LINUX_REFCOUNT_TYPES_H */

View File

@ -7,8 +7,8 @@
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/time64.h>
struct __kernel_timespec;
struct timespec;
struct old_timespec32;
struct pollfd;

View File

@ -6,6 +6,7 @@
#include <linux/sched.h>
#include <linux/task_work.h>
#include <linux/memcontrol.h>
#include <linux/rseq.h>
#include <linux/blk-cgroup.h>
/**

View File

@ -12,7 +12,7 @@
#include <linux/atomic.h>
#include <linux/compiler.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/workqueue_types.h>
struct rhash_head {
struct rhash_head __rcu *next;

131
include/linux/rseq.h Normal file
View File

@ -0,0 +1,131 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
#ifndef _LINUX_RSEQ_H
#define _LINUX_RSEQ_H
#ifdef CONFIG_RSEQ
#include <linux/preempt.h>
#include <linux/sched.h>
/*
* Map the event mask on the user-space ABI enum rseq_cs_flags
* for direct mask checks.
*/
enum rseq_event_mask_bits {
RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
};
enum rseq_event_mask {
RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
};
static inline void rseq_set_notify_resume(struct task_struct *t)
{
if (t->rseq)
set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
}
void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
static inline void rseq_handle_notify_resume(struct ksignal *ksig,
struct pt_regs *regs)
{
if (current->rseq)
__rseq_handle_notify_resume(ksig, regs);
}
static inline void rseq_signal_deliver(struct ksignal *ksig,
struct pt_regs *regs)
{
preempt_disable();
__set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
preempt_enable();
rseq_handle_notify_resume(ksig, regs);
}
/* rseq_preempt() requires preemption to be disabled. */
static inline void rseq_preempt(struct task_struct *t)
{
__set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
rseq_set_notify_resume(t);
}
/* rseq_migrate() requires preemption to be disabled. */
static inline void rseq_migrate(struct task_struct *t)
{
__set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
rseq_set_notify_resume(t);
}
/*
* If parent process has a registered restartable sequences area, the
* child inherits. Unregister rseq for a clone with CLONE_VM set.
*/
static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
{
if (clone_flags & CLONE_VM) {
t->rseq = NULL;
t->rseq_len = 0;
t->rseq_sig = 0;
t->rseq_event_mask = 0;
} else {
t->rseq = current->rseq;
t->rseq_len = current->rseq_len;
t->rseq_sig = current->rseq_sig;
t->rseq_event_mask = current->rseq_event_mask;
}
}
static inline void rseq_execve(struct task_struct *t)
{
t->rseq = NULL;
t->rseq_len = 0;
t->rseq_sig = 0;
t->rseq_event_mask = 0;
}
#else
static inline void rseq_set_notify_resume(struct task_struct *t)
{
}
static inline void rseq_handle_notify_resume(struct ksignal *ksig,
struct pt_regs *regs)
{
}
static inline void rseq_signal_deliver(struct ksignal *ksig,
struct pt_regs *regs)
{
}
static inline void rseq_preempt(struct task_struct *t)
{
}
static inline void rseq_migrate(struct task_struct *t)
{
}
static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
{
}
static inline void rseq_execve(struct task_struct *t)
{
}
#endif
#ifdef CONFIG_DEBUG_RSEQ
void rseq_syscall(struct pt_regs *regs);
#else
static inline void rseq_syscall(struct pt_regs *regs)
{
}
#endif
#endif /* _LINUX_RSEQ_H */

View File

@ -10,7 +10,6 @@
#ifndef _RSLIB_H_
#define _RSLIB_H_
#include <linux/list.h>
#include <linux/types.h> /* for gfp_t */
#include <linux/gfp.h> /* for GFP_KERNEL */

View File

@ -10,33 +10,41 @@
#include <uapi/linux/sched.h>
#include <asm/current.h>
#include <asm/processor.h>
#include <linux/thread_info.h>
#include <linux/preempt.h>
#include <linux/cpumask.h>
#include <linux/pid.h>
#include <linux/sem.h>
#include <linux/cache.h>
#include <linux/irqflags_types.h>
#include <linux/smp_types.h>
#include <linux/pid_types.h>
#include <linux/sem_types.h>
#include <linux/shm.h>
#include <linux/kmsan_types.h>
#include <linux/mutex.h>
#include <linux/plist.h>
#include <linux/hrtimer.h>
#include <linux/irqflags.h>
#include <linux/seccomp.h>
#include <linux/nodemask.h>
#include <linux/rcupdate.h>
#include <linux/refcount.h>
#include <linux/mutex_types.h>
#include <linux/plist_types.h>
#include <linux/hrtimer_types.h>
#include <linux/timer_types.h>
#include <linux/seccomp_types.h>
#include <linux/nodemask_types.h>
#include <linux/refcount_types.h>
#include <linux/resource.h>
#include <linux/latencytop.h>
#include <linux/sched/prio.h>
#include <linux/sched/types.h>
#include <linux/signal_types.h>
#include <linux/syscall_user_dispatch.h>
#include <linux/syscall_user_dispatch_types.h>
#include <linux/mm_types_task.h>
#include <linux/task_io_accounting.h>
#include <linux/posix-timers.h>
#include <linux/rseq.h>
#include <linux/seqlock.h>
#include <linux/posix-timers_types.h>
#include <linux/restart_block.h>
#include <uapi/linux/rseq.h>
#include <linux/seqlock_types.h>
#include <linux/kcsan.h>
#include <linux/rv.h>
#include <linux/livepatch_sched.h>
#include <linux/uidgid_types.h>
#include <asm/kmap_size.h>
/* task_struct member predeclarations (sorted alphabetically): */
@ -1556,114 +1564,6 @@ struct task_struct {
*/
};
static inline struct pid *task_pid(struct task_struct *task)
{
return task->thread_pid;
}
/*
* the helpers to get the task's different pids as they are seen
* from various namespaces
*
* task_xid_nr() : global id, i.e. the id seen from the init namespace;
* task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
* current.
* task_xid_nr_ns() : id seen from the ns specified;
*
* see also pid_nr() etc in include/linux/pid.h
*/
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
static inline pid_t task_pid_nr(struct task_struct *tsk)
{
return tsk->pid;
}
static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
}
static inline pid_t task_pid_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
}
static inline pid_t task_tgid_nr(struct task_struct *tsk)
{
return tsk->tgid;
}
/**
* pid_alive - check that a task structure is not stale
* @p: Task structure to be checked.
*
* Test if a process is not yet dead (at most zombie state)
* If pid_alive fails, then pointers within the task structure
* can be stale and must not be dereferenced.
*
* Return: 1 if the process is alive. 0 otherwise.
*/
static inline int pid_alive(const struct task_struct *p)
{
return p->thread_pid != NULL;
}
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
}
static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
}
static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
}
static inline pid_t task_session_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
}
static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
}
static inline pid_t task_tgid_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
}
static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
{
pid_t pid = 0;
rcu_read_lock();
if (pid_alive(tsk))
pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
rcu_read_unlock();
return pid;
}
static inline pid_t task_ppid_nr(const struct task_struct *tsk)
{
return task_ppid_nr_ns(tsk, &init_pid_ns);
}
/* Obsolete, do not use: */
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
{
return task_pgrp_nr_ns(tsk, &init_pid_ns);
}
#define TASK_REPORT_IDLE (TASK_REPORT + 1)
#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
@ -1707,20 +1607,6 @@ static inline char task_state_to_char(struct task_struct *tsk)
return task_index_to_char(task_state_index(tsk));
}
/**
* is_global_init - check if a task structure is init. Since init
* is free to have sub-threads we need to check tgid.
* @tsk: Task structure to be checked.
*
* Check if a task structure is the first user space task the kernel created.
*
* Return: 1 if the task structure is init. 0 otherwise.
*/
static inline int is_global_init(struct task_struct *tsk)
{
return task_tgid_nr(tsk) == 1;
}
extern struct pid *cad_pid;
/*
@ -2170,15 +2056,6 @@ extern int __cond_resched_rwlock_write(rwlock_t *lock);
__cond_resched_rwlock_write(lock); \
})
static inline void cond_resched_rcu(void)
{
#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
rcu_read_unlock();
cond_resched();
rcu_read_lock();
#endif
}
#ifdef CONFIG_PREEMPT_DYNAMIC
extern bool preempt_model_none(void);
@ -2220,37 +2097,6 @@ static inline bool preempt_model_preemptible(void)
return preempt_model_full() || preempt_model_rt();
}
/*
* Does a critical section need to be broken due to another
* task waiting?: (technically does not depend on CONFIG_PREEMPTION,
* but a general need for low latency)
*/
static inline int spin_needbreak(spinlock_t *lock)
{
#ifdef CONFIG_PREEMPTION
return spin_is_contended(lock);
#else
return 0;
#endif
}
/*
* Check if a rwlock is contended.
* Returns non-zero if there is another task waiting on the rwlock.
* Returns zero if the lock is not contended or the system / underlying
* rwlock implementation does not support contention detection.
* Technically does not depend on CONFIG_PREEMPTION, but a general need
* for low latency.
*/
static inline int rwlock_needbreak(rwlock_t *lock)
{
#ifdef CONFIG_PREEMPTION
return rwlock_is_contended(lock);
#else
return 0;
#endif
}
static __always_inline bool need_resched(void)
{
return unlikely(tif_need_resched());
@ -2285,6 +2131,8 @@ extern bool sched_task_on_rq(struct task_struct *p);
extern unsigned long get_wchan(struct task_struct *p);
extern struct task_struct *cpu_curr_snapshot(int cpu);
#include <linux/spinlock.h>
/*
* In order to reduce various lock holder preemption latencies provide an
* interface to see if a vCPU is currently running or not.
@ -2321,129 +2169,6 @@ static inline bool owner_on_cpu(struct task_struct *owner)
unsigned long sched_cpu_util(int cpu);
#endif /* CONFIG_SMP */
#ifdef CONFIG_RSEQ
/*
* Map the event mask on the user-space ABI enum rseq_cs_flags
* for direct mask checks.
*/
enum rseq_event_mask_bits {
RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
};
enum rseq_event_mask {
RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
};
static inline void rseq_set_notify_resume(struct task_struct *t)
{
if (t->rseq)
set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
}
void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
static inline void rseq_handle_notify_resume(struct ksignal *ksig,
struct pt_regs *regs)
{
if (current->rseq)
__rseq_handle_notify_resume(ksig, regs);
}
static inline void rseq_signal_deliver(struct ksignal *ksig,
struct pt_regs *regs)
{
preempt_disable();
__set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
preempt_enable();
rseq_handle_notify_resume(ksig, regs);
}
/* rseq_preempt() requires preemption to be disabled. */
static inline void rseq_preempt(struct task_struct *t)
{
__set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
rseq_set_notify_resume(t);
}
/* rseq_migrate() requires preemption to be disabled. */
static inline void rseq_migrate(struct task_struct *t)
{
__set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
rseq_set_notify_resume(t);
}
/*
* If parent process has a registered restartable sequences area, the
* child inherits. Unregister rseq for a clone with CLONE_VM set.
*/
static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
{
if (clone_flags & CLONE_VM) {
t->rseq = NULL;
t->rseq_len = 0;
t->rseq_sig = 0;
t->rseq_event_mask = 0;
} else {
t->rseq = current->rseq;
t->rseq_len = current->rseq_len;
t->rseq_sig = current->rseq_sig;
t->rseq_event_mask = current->rseq_event_mask;
}
}
static inline void rseq_execve(struct task_struct *t)
{
t->rseq = NULL;
t->rseq_len = 0;
t->rseq_sig = 0;
t->rseq_event_mask = 0;
}
#else
static inline void rseq_set_notify_resume(struct task_struct *t)
{
}
static inline void rseq_handle_notify_resume(struct ksignal *ksig,
struct pt_regs *regs)
{
}
static inline void rseq_signal_deliver(struct ksignal *ksig,
struct pt_regs *regs)
{
}
static inline void rseq_preempt(struct task_struct *t)
{
}
static inline void rseq_migrate(struct task_struct *t)
{
}
static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
{
}
static inline void rseq_execve(struct task_struct *t)
{
}
#endif
#ifdef CONFIG_DEBUG_RSEQ
void rseq_syscall(struct pt_regs *regs);
#else
static inline void rseq_syscall(struct pt_regs *regs)
{
}
#endif
#ifdef CONFIG_SCHED_CORE
extern void sched_core_free(struct task_struct *tsk);
extern void sched_core_fork(struct task_struct *p);

View File

@ -9,6 +9,7 @@
#include <linux/sched/task.h>
#include <linux/cred.h>
#include <linux/refcount.h>
#include <linux/pid.h>
#include <linux/posix-timers.h>
#include <linux/mm_types.h>
#include <asm/ptrace.h>

View File

@ -7,6 +7,8 @@
* functionality:
*/
#include <linux/rcupdate.h>
#include <linux/refcount.h>
#include <linux/sched.h>
#include <linux/uaccess.h>

View File

@ -8,6 +8,7 @@
#include <linux/sched.h>
#include <linux/magic.h>
#include <linux/refcount.h>
#ifdef CONFIG_THREAD_INFO_IN_TASK

View File

@ -3,6 +3,7 @@
#define _LINUX_SECCOMP_H
#include <uapi/linux/seccomp.h>
#include <linux/seccomp_types.h>
#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
SECCOMP_FILTER_FLAG_LOG | \
@ -21,25 +22,6 @@
#include <linux/atomic.h>
#include <asm/seccomp.h>
struct seccomp_filter;
/**
* struct seccomp - the state of a seccomp'ed process
*
* @mode: indicates one of the valid values above for controlled
* system calls available to a process.
* @filter_count: number of seccomp filters
* @filter: must always point to a valid seccomp-filter or NULL as it is
* accessed without locking during system call entry.
*
* @filter must only be accessed from the context of current as there
* is no read locking.
*/
struct seccomp {
int mode;
atomic_t filter_count;
struct seccomp_filter *filter;
};
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
extern int __secure_computing(const struct seccomp_data *sd);
static inline int secure_computing(void)
@ -64,8 +46,6 @@ static inline int seccomp_mode(struct seccomp *s)
#include <linux/errno.h>
struct seccomp { };
struct seccomp_filter { };
struct seccomp_data;
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
@ -126,6 +106,8 @@ static inline long seccomp_get_metadata(struct task_struct *task,
#ifdef CONFIG_SECCOMP_CACHE_DEBUG
struct seq_file;
struct pid_namespace;
struct pid;
int proc_pid_seccomp_cache(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task);

View File

@ -0,0 +1,35 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SECCOMP_TYPES_H
#define _LINUX_SECCOMP_TYPES_H
#include <linux/types.h>
#ifdef CONFIG_SECCOMP
struct seccomp_filter;
/**
* struct seccomp - the state of a seccomp'ed process
*
* @mode: indicates one of the valid values above for controlled
* system calls available to a process.
* @filter_count: number of seccomp filters
* @filter: must always point to a valid seccomp-filter or NULL as it is
* accessed without locking during system call entry.
*
* @filter must only be accessed from the context of current as there
* is no read locking.
*/
struct seccomp {
int mode;
atomic_t filter_count;
struct seccomp_filter *filter;
};
#else
struct seccomp { };
struct seccomp_filter { };
#endif
#endif /* _LINUX_SECCOMP_TYPES_H */

View File

@ -3,25 +3,17 @@
#define _LINUX_SEM_H
#include <uapi/linux/sem.h>
#include <linux/sem_types.h>
struct task_struct;
struct sem_undo_list;
#ifdef CONFIG_SYSVIPC
struct sysv_sem {
struct sem_undo_list *undo_list;
};
extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk);
extern void exit_sem(struct task_struct *tsk);
#else
struct sysv_sem {
/* empty */
};
static inline int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
{
return 0;

13
include/linux/sem_types.h Normal file
View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SEM_TYPES_H
#define _LINUX_SEM_TYPES_H
struct sem_undo_list;
struct sysv_sem {
#ifdef CONFIG_SYSVIPC
struct sem_undo_list *undo_list;
#endif
};
#endif /* _LINUX_SEM_TYPES_H */

View File

@ -18,6 +18,7 @@
#include <linux/lockdep.h>
#include <linux/mutex.h>
#include <linux/preempt.h>
#include <linux/seqlock_types.h>
#include <linux/spinlock.h>
#include <asm/processor.h>
@ -37,37 +38,6 @@
*/
#define KCSAN_SEQLOCK_REGION_MAX 1000
/*
* Sequence counters (seqcount_t)
*
* This is the raw counting mechanism, without any writer protection.
*
* Write side critical sections must be serialized and non-preemptible.
*
* If readers can be invoked from hardirq or softirq contexts,
* interrupts or bottom halves must also be respectively disabled before
* entering the write section.
*
* This mechanism can't be used if the protected data contains pointers,
* as the writer can invalidate a pointer that a reader is following.
*
* If the write serialization mechanism is one of the common kernel
* locking primitives, use a sequence counter with associated lock
* (seqcount_LOCKNAME_t) instead.
*
* If it's desired to automatically handle the sequence counter writer
* serialization and non-preemptibility requirements, use a sequential
* lock (seqlock_t) instead.
*
* See Documentation/locking/seqlock.rst
*/
typedef struct seqcount {
unsigned sequence;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
} seqcount_t;
static inline void __seqcount_init(seqcount_t *s, const char *name,
struct lock_class_key *key)
{
@ -131,28 +101,6 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
* See Documentation/locking/seqlock.rst
*/
/*
* For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot
* disable preemption. It can lead to higher latencies, and the write side
* sections will not be able to acquire locks which become sleeping locks
* (e.g. spinlock_t).
*
* To remain preemptible while avoiding a possible livelock caused by the
* reader preempting the writer, use a different technique: let the reader
* detect if a seqcount_LOCKNAME_t writer is in progress. If that is the
* case, acquire then release the associated LOCKNAME writer serialization
* lock. This will allow any possibly-preempted writer to make progress
* until the end of its writer serialization lock critical section.
*
* This lock-unlock technique must be implemented for all of PREEMPT_RT
* sleeping locks. See Documentation/locking/locktypes.rst
*/
#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT)
#define __SEQ_LOCK(expr) expr
#else
#define __SEQ_LOCK(expr)
#endif
/*
* typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
* @seqcount: The real sequence counter
@ -194,11 +142,6 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
* @lockbase: prefix for associated lock/unlock
*/
#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \
typedef struct seqcount_##lockname { \
seqcount_t seqcount; \
__SEQ_LOCK(locktype *lock); \
} seqcount_##lockname##_t; \
\
static __always_inline seqcount_t * \
__seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
{ \
@ -284,6 +227,7 @@ SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin)
SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin)
SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read)
SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
#undef SEQCOUNT_LOCKNAME
/*
* SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
@ -794,25 +738,6 @@ static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
smp_wmb(); /* increment "sequence" before following stores */
}
/*
* Sequential locks (seqlock_t)
*
* Sequence counters with an embedded spinlock for writer serialization
* and non-preemptibility.
*
* For more info, see:
* - Comments on top of seqcount_t
* - Documentation/locking/seqlock.rst
*/
typedef struct {
/*
* Make sure that readers don't starve writers on PREEMPT_RT: use
* seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
*/
seqcount_spinlock_t seqcount;
spinlock_t lock;
} seqlock_t;
#define __SEQLOCK_UNLOCKED(lockname) \
{ \
.seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \

View File

@ -0,0 +1,93 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_SEQLOCK_TYPES_H
#define __LINUX_SEQLOCK_TYPES_H
#include <linux/lockdep_types.h>
#include <linux/mutex_types.h>
#include <linux/spinlock_types.h>
/*
* Sequence counters (seqcount_t)
*
* This is the raw counting mechanism, without any writer protection.
*
* Write side critical sections must be serialized and non-preemptible.
*
* If readers can be invoked from hardirq or softirq contexts,
* interrupts or bottom halves must also be respectively disabled before
* entering the write section.
*
* This mechanism can't be used if the protected data contains pointers,
* as the writer can invalidate a pointer that a reader is following.
*
* If the write serialization mechanism is one of the common kernel
* locking primitives, use a sequence counter with associated lock
* (seqcount_LOCKNAME_t) instead.
*
* If it's desired to automatically handle the sequence counter writer
* serialization and non-preemptibility requirements, use a sequential
* lock (seqlock_t) instead.
*
* See Documentation/locking/seqlock.rst
*/
typedef struct seqcount {
unsigned sequence;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
} seqcount_t;
/*
* For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot
* disable preemption. It can lead to higher latencies, and the write side
* sections will not be able to acquire locks which become sleeping locks
* (e.g. spinlock_t).
*
* To remain preemptible while avoiding a possible livelock caused by the
* reader preempting the writer, use a different technique: let the reader
* detect if a seqcount_LOCKNAME_t writer is in progress. If that is the
* case, acquire then release the associated LOCKNAME writer serialization
* lock. This will allow any possibly-preempted writer to make progress
* until the end of its writer serialization lock critical section.
*
* This lock-unlock technique must be implemented for all of PREEMPT_RT
* sleeping locks. See Documentation/locking/locktypes.rst
*/
#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT)
#define __SEQ_LOCK(expr) expr
#else
#define __SEQ_LOCK(expr)
#endif
#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \
typedef struct seqcount_##lockname { \
seqcount_t seqcount; \
__SEQ_LOCK(locktype *lock); \
} seqcount_##lockname##_t;
SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin)
SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin)
SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read)
SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
#undef SEQCOUNT_LOCKNAME
/*
* Sequential locks (seqlock_t)
*
* Sequence counters with an embedded spinlock for writer serialization
* and non-preemptibility.
*
* For more info, see:
* - Comments on top of seqcount_t
* - Documentation/locking/seqlock.rst
*/
typedef struct {
/*
* Make sure that readers don't starve writers on PREEMPT_RT: use
* seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
*/
seqcount_spinlock_t seqcount;
spinlock_t lock;
} seqlock_t;
#endif /* __LINUX_SEQLOCK_TYPES_H */

View File

@ -2,12 +2,12 @@
#ifndef _LINUX_SHM_H_
#define _LINUX_SHM_H_
#include <linux/list.h>
#include <linux/types.h>
#include <asm/page.h>
#include <uapi/linux/shm.h>
#include <asm/shmparam.h>
struct file;
struct task_struct;
#ifdef CONFIG_SYSVIPC
struct sysv_shm {

View File

@ -3,6 +3,7 @@
#define _LINUX_SIGNAL_H
#include <linux/bug.h>
#include <linux/list.h>
#include <linux/signal_types.h>
#include <linux/string.h>

View File

@ -6,7 +6,7 @@
* Basic signal handling related data type definitions:
*/
#include <linux/list.h>
#include <linux/types.h>
#include <uapi/linux/signal.h>
typedef struct kernel_siginfo {

View File

@ -449,6 +449,37 @@ static __always_inline int spin_is_contended(spinlock_t *lock)
return raw_spin_is_contended(&lock->rlock);
}
/*
* Does a critical section need to be broken due to another
* task waiting?: (technically does not depend on CONFIG_PREEMPTION,
* but a general need for low latency)
*/
static inline int spin_needbreak(spinlock_t *lock)
{
#ifdef CONFIG_PREEMPTION
return spin_is_contended(lock);
#else
return 0;
#endif
}
/*
* Check if a rwlock is contended.
* Returns non-zero if there is another task waiting on the rwlock.
* Returns zero if the lock is not contended or the system / underlying
* rwlock implementation does not support contention detection.
* Technically does not depend on CONFIG_PREEMPTION, but a general need
* for low latency.
*/
static inline int rwlock_needbreak(rwlock_t *lock)
{
#ifdef CONFIG_PREEMPTION
return rwlock_is_contended(lock);
#else
return 0;
#endif
}
#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
#else /* !CONFIG_PREEMPT_RT */

View File

@ -6,16 +6,10 @@
#define _SYSCALL_USER_DISPATCH_H
#include <linux/thread_info.h>
#include <linux/syscall_user_dispatch_types.h>
#ifdef CONFIG_GENERIC_ENTRY
struct syscall_user_dispatch {
char __user *selector;
unsigned long offset;
unsigned long len;
bool on_dispatch;
};
int set_syscall_user_dispatch(unsigned long mode, unsigned long offset,
unsigned long len, char __user *selector);
@ -29,7 +23,6 @@ int syscall_user_dispatch_set_config(struct task_struct *task, unsigned long siz
void __user *data);
#else
struct syscall_user_dispatch {};
static inline int set_syscall_user_dispatch(unsigned long mode, unsigned long offset,
unsigned long len, char __user *selector)

View File

@ -0,0 +1,22 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SYSCALL_USER_DISPATCH_TYPES_H
#define _SYSCALL_USER_DISPATCH_TYPES_H
#include <linux/types.h>
#ifdef CONFIG_GENERIC_ENTRY
struct syscall_user_dispatch {
char __user *selector;
unsigned long offset;
unsigned long len;
bool on_dispatch;
};
#else
struct syscall_user_dispatch {};
#endif
#endif /* _SYSCALL_USER_DISPATCH_TYPES_H */

View File

@ -7,10 +7,13 @@
#include <linux/nsproxy.h>
#include <linux/ns_common.h>
#include <linux/err.h>
#include <linux/time64.h>
struct user_namespace;
extern struct user_namespace init_user_ns;
struct vm_area_struct;
struct timens_offsets {
struct timespec64 monotonic;
struct timespec64 boottime;

View File

@ -4,6 +4,7 @@
#include <linux/errno.h>
#include <linux/clocksource_ids.h>
#include <linux/ktime.h>
/* Included from linux/ktime.h */

View File

@ -7,21 +7,7 @@
#include <linux/stddef.h>
#include <linux/debugobjects.h>
#include <linux/stringify.h>
struct timer_list {
/*
* All fields that change during normal runtime grouped to the
* same cacheline
*/
struct hlist_node entry;
unsigned long expires;
void (*function)(struct timer_list *);
u32 flags;
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
#endif
};
#include <linux/timer_types.h>
#ifdef CONFIG_LOCKDEP
/*

View File

@ -0,0 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_TIMER_TYPES_H
#define _LINUX_TIMER_TYPES_H
#include <linux/lockdep_types.h>
#include <linux/types.h>
struct timer_list {
/*
* All fields that change during normal runtime grouped to the
* same cacheline
*/
struct hlist_node entry;
unsigned long expires;
void (*function)(struct timer_list *);
u32 flags;
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
#endif
};
#endif /* _LINUX_TIMER_TYPES_H */

View File

@ -3,18 +3,7 @@
#define _LINUX_TIMERQUEUE_H
#include <linux/rbtree.h>
#include <linux/ktime.h>
struct timerqueue_node {
struct rb_node node;
ktime_t expires;
};
struct timerqueue_head {
struct rb_root_cached rb_root;
};
#include <linux/timerqueue_types.h>
extern bool timerqueue_add(struct timerqueue_head *head,
struct timerqueue_node *node);

View File

@ -0,0 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_TIMERQUEUE_TYPES_H
#define _LINUX_TIMERQUEUE_TYPES_H
#include <linux/rbtree_types.h>
#include <linux/types.h>
struct timerqueue_node {
struct rb_node node;
ktime_t expires;
};
struct timerqueue_head {
struct rb_root_cached rb_root;
};
#endif /* _LINUX_TIMERQUEUE_TYPES_H */

View File

@ -21,6 +21,7 @@
#include <linux/debugobjects.h>
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/hrtimer.h>
/* Definitions for a non-string torture-test module parameter. */
#define torture_param(type, name, init, msg) \

View File

@ -120,6 +120,9 @@ typedef s64 int64_t;
#define aligned_be64 __aligned_be64
#define aligned_le64 __aligned_le64
/* Nanosecond scalar representation for kernel time values */
typedef s64 ktime_t;
/**
* The type used for indexing onto a disc or disc partition.
*

View File

@ -12,22 +12,13 @@
* to detect when we overlook these differences.
*
*/
#include <linux/types.h>
#include <linux/uidgid_types.h>
#include <linux/highuid.h>
struct user_namespace;
extern struct user_namespace init_user_ns;
struct uid_gid_map;
typedef struct {
uid_t val;
} kuid_t;
typedef struct {
gid_t val;
} kgid_t;
#define KUIDT_INIT(value) (kuid_t){ value }
#define KGIDT_INIT(value) (kgid_t){ value }

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_UIDGID_TYPES_H
#define _LINUX_UIDGID_TYPES_H
#include <linux/types.h>
typedef struct {
uid_t val;
} kuid_t;
typedef struct {
gid_t val;
} kgid_t;
#endif /* _LINUX_UIDGID_TYPES_H */

View File

@ -9,7 +9,6 @@
#include <linux/spinlock.h>
#include <asm/current.h>
#include <uapi/linux/wait.h>
typedef struct wait_queue_entry wait_queue_entry_t;

View File

@ -14,12 +14,7 @@
#include <linux/atomic.h>
#include <linux/cpumask.h>
#include <linux/rcupdate.h>
struct workqueue_struct;
struct work_struct;
typedef void (*work_func_t)(struct work_struct *work);
void delayed_work_timer_fn(struct timer_list *t);
#include <linux/workqueue_types.h>
/*
* The first word is the work queue pointer and the flags rolled into
@ -95,15 +90,6 @@ enum {
#define WORK_STRUCT_FLAG_MASK ((1ul << WORK_STRUCT_FLAG_BITS) - 1)
#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
struct work_struct {
atomic_long_t data;
struct list_head entry;
work_func_t func;
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
#endif
};
#define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
#define WORK_DATA_STATIC_INIT() \
ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))

View File

@ -0,0 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_WORKQUEUE_TYPES_H
#define _LINUX_WORKQUEUE_TYPES_H
#include <linux/atomic.h>
#include <linux/lockdep_types.h>
#include <linux/timer_types.h>
#include <linux/types.h>
struct workqueue_struct;
struct work_struct;
typedef void (*work_func_t)(struct work_struct *work);
void delayed_work_timer_fn(struct timer_list *t);
struct work_struct {
atomic_long_t data;
struct list_head entry;
work_func_t func;
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
#endif
};
#endif /* _LINUX_WORKQUEUE_TYPES_H */

View File

@ -2,7 +2,7 @@
#ifndef _UAPI_LINUX_RESOURCE_H
#define _UAPI_LINUX_RESOURCE_H
#include <linux/time.h>
#include <linux/time_types.h>
#include <linux/types.h>
/*

View File

@ -12,6 +12,7 @@
#include <linux/audit.h>
#include <linux/numa.h>
#include <linux/scs.h>
#include <linux/plist.h>
#include <linux/uaccess.h>

View File

@ -29,6 +29,7 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
#include <uapi/linux/shm.h>
#include <linux/init.h>
#include <linux/file.h>
#include <linux/mman.h>

View File

@ -14,6 +14,7 @@
#include <linux/unistd.h>
#include <linux/err.h>
#include <linux/ipc_namespace.h>
#include <linux/pid.h>
/*
* The IPC ID contains 2 separate numbers - index and sequence number.

View File

@ -114,6 +114,7 @@ obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
obj-$(CONFIG_HAVE_STATIC_CALL) += static_call.o
obj-$(CONFIG_HAVE_STATIC_CALL_INLINE) += static_call_inline.o
obj-$(CONFIG_CFI_CLANG) += cfi.o
obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_PERF_EVENTS) += events/

View File

@ -46,11 +46,12 @@ asynchronous and synchronous parts of the kernel.
#include <linux/async.h>
#include <linux/atomic.h>
#include <linux/ktime.h>
#include <linux/export.h>
#include <linux/wait.h>
#include <linux/ktime.h>
#include <linux/pid.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include "workqueue_internal.h"

View File

@ -7,6 +7,7 @@
#include <linux/jhash.h>
#include <linux/filter.h>
#include <linux/rculist_nulls.h>
#include <linux/rcupdate_wait.h>
#include <linux/random.h>
#include <uapi/linux/btf.h>
#include <linux/rcupdate_trace.h>

View File

@ -69,8 +69,10 @@
#include <linux/rethook.h>
#include <linux/sysfs.h>
#include <linux/user_events.h>
#include <linux/uaccess.h>
#include <uapi/linux/wait.h>
#include <asm/unistd.h>
#include <asm/mmu_context.h>

View File

@ -53,6 +53,7 @@
#include <linux/seccomp.h>
#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/syscall_user_dispatch.h>
#include <linux/jiffies.h>
#include <linux/futex.h>
#include <linux/compat.h>
@ -99,6 +100,7 @@
#include <linux/stackprotector.h>
#include <linux/user_events.h>
#include <linux/iommu.h>
#include <linux/rseq.h>
#include <asm/pgalloc.h>
#include <linux/uaccess.h>

Some files were not shown because too many files have changed in this diff Show More