2019-11-14 18:02:54 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
|
|
|
|
#ifndef _LINUX_KCSAN_H
|
|
|
|
#define _LINUX_KCSAN_H
|
|
|
|
|
|
|
|
#include <linux/kcsan-checks.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
|
|
|
|
#ifdef CONFIG_KCSAN
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Context for each thread of execution: for tasks, this is stored in
|
|
|
|
* task_struct, and interrupts access internal per-CPU storage.
|
|
|
|
*/
|
|
|
|
struct kcsan_ctx {
|
|
|
|
int disable_count; /* disable counter */
|
|
|
|
int atomic_next; /* number of following atomic ops */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We distinguish between: (a) nestable atomic regions that may contain
|
|
|
|
* other nestable regions; and (b) flat atomic regions that do not keep
|
|
|
|
* track of nesting. Both (a) and (b) are entirely independent of each
|
|
|
|
* other, and a flat region may be started in a nestable region or
|
|
|
|
* vice-versa.
|
|
|
|
*
|
|
|
|
* This is required because, for example, in the annotations for
|
|
|
|
* seqlocks, we declare seqlock writer critical sections as (a) nestable
|
|
|
|
* atomic regions, but reader critical sections as (b) flat atomic
|
|
|
|
* regions, but have encountered cases where seqlock reader critical
|
|
|
|
* sections are contained within writer critical sections (the opposite
|
|
|
|
* may be possible, too).
|
|
|
|
*
|
|
|
|
* To support these cases, we independently track the depth of nesting
|
|
|
|
* for (a), and whether the leaf level is flat for (b).
|
|
|
|
*/
|
|
|
|
int atomic_nest_count;
|
|
|
|
bool in_flat_atomic;
|
2020-02-11 16:04:22 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Access mask for all accesses if non-zero.
|
|
|
|
*/
|
|
|
|
unsigned long access_mask;
|
kcsan: Add support for scoped accesses
This adds support for scoped accesses, where the memory range is checked
for the duration of the scope. The feature is implemented by inserting
the relevant access information into a list of scoped accesses for
the current execution context, which are then checked (until removed)
on every call (through instrumentation) into the KCSAN runtime.
An alternative, more complex, implementation could set up a watchpoint for
the scoped access, and keep the watchpoint set up. This, however, would
require first exposing a handle to the watchpoint, as well as dealing
with cases such as accesses by the same thread while the watchpoint is
still set up (and several more cases). It is also doubtful if this would
provide any benefit, since the majority of delay where the watchpoint
is set up is likely due to the injected delays by KCSAN. Therefore,
the implementation in this patch is simpler and avoids hurting KCSAN's
main use-case (normal data race detection); it also implicitly increases
scoped-access race-detection-ability due to increased probability of
setting up watchpoints by repeatedly calling __kcsan_check_access()
throughout the scope of the access.
The implementation required adding an additional conditional branch to
the fast-path. However, the microbenchmark showed a *speedup* of ~5%
on the fast-path. This appears to be due to subtly improved codegen by
GCC from moving get_ctx() and associated load of preempt_count earlier.
Suggested-by: Boqun Feng <boqun.feng@gmail.com>
Suggested-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
2020-03-25 16:41:56 +00:00
|
|
|
|
|
|
|
/* List of scoped accesses. */
|
|
|
|
struct list_head scoped_accesses;
|
2019-11-14 18:02:54 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* kcsan_init - initialize KCSAN runtime
|
|
|
|
*/
|
|
|
|
void kcsan_init(void);
|
|
|
|
|
|
|
|
#else /* CONFIG_KCSAN */
|
|
|
|
|
2019-11-20 09:41:43 +00:00
|
|
|
static inline void kcsan_init(void) { }
|
2019-11-14 18:02:54 +00:00
|
|
|
|
|
|
|
#endif /* CONFIG_KCSAN */
|
|
|
|
|
|
|
|
#endif /* _LINUX_KCSAN_H */
|