LKMM updates for v6.4

This update improves LKMM diagnostic messages, unifies handling
 of the ordering produced by unlock/lock pairs, adds support for the
 smp_mb__after_srcu_read_unlock() macro, removes redundant members from
 the to-r relation, brings SRCU read-side semantics into alignment
 with Linux-kernel SRCU, makes ppo a subrelation of po, and improves
 documentation.
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCgAxFiEEbK7UrM+RBIrCoViJnr8S83LZ+4wFAmQws9wTHHBhdWxtY2tA
 a2VybmVsLm9yZwAKCRCevxLzctn7jKNGD/9oBTUIlsscsZ0GCtx5CYkA+AsIOAzV
 ejvH3vB935jKHVXuDYtnvjDrk9km2aK98CzZaYyJT44bvMZ7gMIvMG8b/k/ZokgF
 GNkde2Vv2S2sMpgQZUxSCjCBPyqd7uRzPtixyAb4zIu7VAEHdlptHRFc+0DQ2l1z
 F4jCRI01ZXD/pk+7fcztxi+obBYXfbVwSFSieN7Kym5Vt6sQXbG/SG3nIS/ZgExA
 i52uiKScAVtfdGOQCnC4YR8hrcahFZ7ohmEx66B1mcgLknzjS/Pzy7tDNiRr6jx9
 Ong7c/ImLk7JLKWMC/52lzeXlzrwMAvdoJ9EviJO5zfAjP5ycD8Te2ErCDkg8f4P
 yoNoLinYweG5VTGrMvYFpUnRUL6iABbtZe4NrwlP3eWsfRQ3lZg2NK/QypirYW36
 aHYQPhKjj2zSpxx2+DDNQZUpd+kXGMNLngix+VT/E9hJ1k95lt1I44ID68UJdIaU
 ctIB1c1lEPJbgFnNXsBBorhcfBU1JduYrNg212Uh2C2XuMFzMiiVGCuZvckFc9nZ
 HllllQzE4WH5BhBx5o1qNmRAsbH4v1+B/UAoYpH6Er7fDtHhdXE0YtgopH4YiU11
 9UxO35qFQYGjU1+iR3063OO9dVhbeSlwRyAQ2cbIJ/GZG+ciniOtI/lg7Px8MO/y
 nZyuOytkf6x6JQ==
 =Vilg
 -----END PGP SIGNATURE-----

Merge tag 'lkmm.2023.04.07a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu

Pull Linux Kernel Memory Model updates from Paul McKenney
 "This improves LKMM diagnostic messages, unifies handling of the
  ordering produced by unlock/lock pairs, adds support for the
  smp_mb__after_srcu_read_unlock() macro, removes redundant members from
  the to-r relation, brings SRCU read-side semantics into alignment with
  Linux-kernel SRCU, makes ppo a subrelation of po, and improves
  documentation"

* tag 'lkmm.2023.04.07a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu:
  Documentation: litmus-tests: Correct spelling
  tools/memory-model: Add documentation about SRCU read-side critical sections
  tools/memory-model: Make ppo a subrelation of po
  tools/memory-model: Provide exact SRCU semantics
  tools/memory-model: Restrict to-r to read-read address dependency
  tools/memory-model: Add smp_mb__after_srcu_read_unlock()
  tools/memory-model: Unify UNLOCK+LOCK pairings to po-unlock-lock-po
  tools/memory-model: Update some warning labels
This commit is contained in:
Linus Torvalds 2023-04-24 12:00:51 -07:00
commit 406037351e
6 changed files with 204 additions and 39 deletions

View File

@ -9,7 +9,7 @@ a kernel test module based on a litmus test, please see
tools/memory-model/README.
atomic (/atomic derectory)
atomic (/atomic directory)
--------------------------
Atomic-RMW+mb__after_atomic-is-stronger-than-acquire.litmus

View File

@ -28,9 +28,10 @@ Explanation of the Linux-Kernel Memory Consistency Model
20. THE HAPPENS-BEFORE RELATION: hb
21. THE PROPAGATES-BEFORE RELATION: pb
22. RCU RELATIONS: rcu-link, rcu-gp, rcu-rscsi, rcu-order, rcu-fence, and rb
23. LOCKING
24. PLAIN ACCESSES AND DATA RACES
25. ODDS AND ENDS
23. SRCU READ-SIDE CRITICAL SECTIONS
24. LOCKING
25. PLAIN ACCESSES AND DATA RACES
26. ODDS AND ENDS
@ -1848,14 +1849,169 @@ section in P0 both starts before P1's grace period does and ends
before it does, and the critical section in P2 both starts after P1's
grace period does and ends after it does.
Addendum: The LKMM now supports SRCU (Sleepable Read-Copy-Update) in
addition to normal RCU. The ideas involved are much the same as
above, with new relations srcu-gp and srcu-rscsi added to represent
SRCU grace periods and read-side critical sections. There is a
restriction on the srcu-gp and srcu-rscsi links that can appear in an
rcu-order sequence (the srcu-rscsi links must be paired with srcu-gp
links having the same SRCU domain with proper nesting); the details
are relatively unimportant.
The LKMM supports SRCU (Sleepable Read-Copy-Update) in addition to
normal RCU. The ideas involved are much the same as above, with new
relations srcu-gp and srcu-rscsi added to represent SRCU grace periods
and read-side critical sections. However, there are some significant
differences between RCU read-side critical sections and their SRCU
counterparts, as described in the next section.
SRCU READ-SIDE CRITICAL SECTIONS
--------------------------------
The LKMM uses the srcu-rscsi relation to model SRCU read-side critical
sections. They differ from RCU read-side critical sections in the
following respects:
1. Unlike the analogous RCU primitives, synchronize_srcu(),
srcu_read_lock(), and srcu_read_unlock() take a pointer to a
struct srcu_struct as an argument. This structure is called
an SRCU domain, and calls linked by srcu-rscsi must have the
same domain. Read-side critical sections and grace periods
associated with different domains are independent of one
another; the SRCU version of the RCU Guarantee applies only
to pairs of critical sections and grace periods having the
same domain.
2. srcu_read_lock() returns a value, called the index, which must
be passed to the matching srcu_read_unlock() call. Unlike
rcu_read_lock() and rcu_read_unlock(), an srcu_read_lock()
call does not always have to match the next unpaired
srcu_read_unlock(). In fact, it is possible for two SRCU
read-side critical sections to overlap partially, as in the
following example (where s is an srcu_struct and idx1 and idx2
are integer variables):
idx1 = srcu_read_lock(&s); // Start of first RSCS
idx2 = srcu_read_lock(&s); // Start of second RSCS
srcu_read_unlock(&s, idx1); // End of first RSCS
srcu_read_unlock(&s, idx2); // End of second RSCS
The matching is determined entirely by the domain pointer and
index value. By contrast, if the calls had been
rcu_read_lock() and rcu_read_unlock() then they would have
created two nested (fully overlapping) read-side critical
sections: an inner one and an outer one.
3. The srcu_down_read() and srcu_up_read() primitives work
exactly like srcu_read_lock() and srcu_read_unlock(), except
that matching calls don't have to execute on the same CPU.
(The names are meant to be suggestive of operations on
semaphores.) Since the matching is determined by the domain
pointer and index value, these primitives make it possible for
an SRCU read-side critical section to start on one CPU and end
on another, so to speak.
In order to account for these properties of SRCU, the LKMM models
srcu_read_lock() as a special type of load event (which is
appropriate, since it takes a memory location as argument and returns
a value, just as a load does) and srcu_read_unlock() as a special type
of store event (again appropriate, since it takes as arguments a
memory location and a value). These loads and stores are annotated as
belonging to the "srcu-lock" and "srcu-unlock" event classes
respectively.
This approach allows the LKMM to tell whether two events are
associated with the same SRCU domain, simply by checking whether they
access the same memory location (i.e., they are linked by the loc
relation). It also gives a way to tell which unlock matches a
particular lock, by checking for the presence of a data dependency
from the load (srcu-lock) to the store (srcu-unlock). For example,
given the situation outlined earlier (with statement labels added):
A: idx1 = srcu_read_lock(&s);
B: idx2 = srcu_read_lock(&s);
C: srcu_read_unlock(&s, idx1);
D: srcu_read_unlock(&s, idx2);
the LKMM will treat A and B as loads from s yielding values saved in
idx1 and idx2 respectively. Similarly, it will treat C and D as
though they stored the values from idx1 and idx2 in s. The end result
is much as if we had written:
A: idx1 = READ_ONCE(s);
B: idx2 = READ_ONCE(s);
C: WRITE_ONCE(s, idx1);
D: WRITE_ONCE(s, idx2);
except for the presence of the special srcu-lock and srcu-unlock
annotations. You can see at once that we have A ->data C and
B ->data D. These dependencies tell the LKMM that C is the
srcu-unlock event matching srcu-lock event A, and D is the
srcu-unlock event matching srcu-lock event B.
This approach is admittedly a hack, and it has the potential to lead
to problems. For example, in:
idx1 = srcu_read_lock(&s);
srcu_read_unlock(&s, idx1);
idx2 = srcu_read_lock(&s);
srcu_read_unlock(&s, idx2);
the LKMM will believe that idx2 must have the same value as idx1,
since it reads from the immediately preceding store of idx1 in s.
Fortunately this won't matter, assuming that litmus tests never do
anything with SRCU index values other than pass them to
srcu_read_unlock() or srcu_up_read() calls.
However, sometimes it is necessary to store an index value in a
shared variable temporarily. In fact, this is the only way for
srcu_down_read() to pass the index it gets to an srcu_up_read() call
on a different CPU. In more detail, we might have soething like:
struct srcu_struct s;
int x;
P0()
{
int r0;
A: r0 = srcu_down_read(&s);
B: WRITE_ONCE(x, r0);
}
P1()
{
int r1;
C: r1 = READ_ONCE(x);
D: srcu_up_read(&s, r1);
}
Assuming that P1 executes after P0 and does read the index value
stored in x, we can write this (using brackets to represent event
annotations) as:
A[srcu-lock] ->data B[once] ->rf C[once] ->data D[srcu-unlock].
The LKMM defines a carry-srcu-data relation to express this pattern;
it permits an arbitrarily long sequence of
data ; rf
pairs (that is, a data link followed by an rf link) to occur between
an srcu-lock event and the final data dependency leading to the
matching srcu-unlock event. carry-srcu-data is complicated by the
need to ensure that none of the intermediate store events in this
sequence are instances of srcu-unlock. This is necessary because in a
pattern like the one above:
A: idx1 = srcu_read_lock(&s);
B: srcu_read_unlock(&s, idx1);
C: idx2 = srcu_read_lock(&s);
D: srcu_read_unlock(&s, idx2);
the LKMM treats B as a store to the variable s and C as a load from
that variable, creating an undesirable rf link from B to C:
A ->data B ->rf C ->data D.
This would cause carry-srcu-data to mistakenly extend a data
dependency from A to D, giving the impression that D was the
srcu-unlock event matching A's srcu-lock. To avoid such problems,
carry-srcu-data does not accept sequences in which the ends of any of
the intermediate ->data links (B above) is an srcu-unlock event.
LOCKING

View File

@ -31,7 +31,8 @@ enum Barriers = 'wmb (*smp_wmb*) ||
'before-atomic (*smp_mb__before_atomic*) ||
'after-atomic (*smp_mb__after_atomic*) ||
'after-spinlock (*smp_mb__after_spinlock*) ||
'after-unlock-lock (*smp_mb__after_unlock_lock*)
'after-unlock-lock (*smp_mb__after_unlock_lock*) ||
'after-srcu-read-unlock (*smp_mb__after_srcu_read_unlock*)
instructions F[Barriers]
(* SRCU *)
@ -53,38 +54,31 @@ let rcu-rscs = let rec
in matched
(* Validate nesting *)
flag ~empty Rcu-lock \ domain(rcu-rscs) as unbalanced-rcu-locking
flag ~empty Rcu-unlock \ range(rcu-rscs) as unbalanced-rcu-locking
flag ~empty Rcu-lock \ domain(rcu-rscs) as unmatched-rcu-lock
flag ~empty Rcu-unlock \ range(rcu-rscs) as unmatched-rcu-unlock
(* Compute matching pairs of nested Srcu-lock and Srcu-unlock *)
let srcu-rscs = let rec
unmatched-locks = Srcu-lock \ domain(matched)
and unmatched-unlocks = Srcu-unlock \ range(matched)
and unmatched = unmatched-locks | unmatched-unlocks
and unmatched-po = ([unmatched] ; po ; [unmatched]) & loc
and unmatched-locks-to-unlocks =
([unmatched-locks] ; po ; [unmatched-unlocks]) & loc
and matched = matched | (unmatched-locks-to-unlocks \
(unmatched-po ; unmatched-po))
in matched
let carry-srcu-data = (data ; [~ Srcu-unlock] ; rf)*
let srcu-rscs = ([Srcu-lock] ; carry-srcu-data ; data ; [Srcu-unlock]) & loc
(* Validate nesting *)
flag ~empty Srcu-lock \ domain(srcu-rscs) as unbalanced-srcu-locking
flag ~empty Srcu-unlock \ range(srcu-rscs) as unbalanced-srcu-locking
flag ~empty Srcu-lock \ domain(srcu-rscs) as unmatched-srcu-lock
flag ~empty Srcu-unlock \ range(srcu-rscs) as unmatched-srcu-unlock
flag ~empty (srcu-rscs^-1 ; srcu-rscs) \ id as multiple-srcu-matches
(* Check for use of synchronize_srcu() inside an RCU critical section *)
flag ~empty rcu-rscs & (po ; [Sync-srcu] ; po) as invalid-sleep
(* Validate SRCU dynamic match *)
flag ~empty different-values(srcu-rscs) as srcu-bad-nesting
flag ~empty different-values(srcu-rscs) as srcu-bad-value-match
(* Compute marked and plain memory accesses *)
let Marked = (~M) | IW | Once | Release | Acquire | domain(rmw) | range(rmw) |
LKR | LKW | UL | LF | RL | RU
LKR | LKW | UL | LF | RL | RU | Srcu-lock | Srcu-unlock
let Plain = M \ Marked
(* Redefine dependencies to include those carried through plain accesses *)
let carry-dep = (data ; rfi)*
let carry-dep = (data ; [~ Srcu-unlock] ; rfi)*
let addr = carry-dep ; addr
let ctrl = carry-dep ; ctrl
let data = carry-dep ; data

View File

@ -37,8 +37,20 @@ let mb = ([M] ; fencerel(Mb) ; [M]) |
([M] ; fencerel(Before-atomic) ; [RMW] ; po? ; [M]) |
([M] ; po? ; [RMW] ; fencerel(After-atomic) ; [M]) |
([M] ; po? ; [LKW] ; fencerel(After-spinlock) ; [M]) |
([M] ; po ; [UL] ; (co | po) ; [LKW] ;
fencerel(After-unlock-lock) ; [M])
(*
* Note: The po-unlock-lock-po relation only passes the lock to the direct
* successor, perhaps giving the impression that the ordering of the
* smp_mb__after_unlock_lock() fence only affects a single lock handover.
* However, in a longer sequence of lock handovers, the implicit
* A-cumulative release fences of lock-release ensure that any stores that
* propagate to one of the involved CPUs before it hands over the lock to
* the next CPU will also propagate to the final CPU handing over the lock
* to the CPU that executes the fence. Therefore, all those stores are
* also affected by the fence.
*)
([M] ; po-unlock-lock-po ;
[After-unlock-lock] ; po ; [M]) |
([M] ; po? ; [Srcu-unlock] ; fencerel(After-srcu-read-unlock) ; [M])
let gp = po ; [Sync-rcu | Sync-srcu] ; po?
let strong-fence = mb | gp
@ -69,8 +81,8 @@ let dep = addr | data
let rwdep = (dep | ctrl) ; [W]
let overwrite = co | fr
let to-w = rwdep | (overwrite & int) | (addr ; [Plain] ; wmb)
let to-r = addr | (dep ; [Marked] ; rfi)
let ppo = to-r | to-w | fence | (po-unlock-lock-po & int)
let to-r = (addr ; [R]) | (dep ; [Marked] ; rfi)
let ppo = to-r | to-w | (fence & int) | (po-unlock-lock-po & int)
(* Propagation: Ordering from release operations and strong fences. *)
let A-cumul(r) = (rfe ; [Marked])? ; r

View File

@ -24,6 +24,7 @@ smp_mb__before_atomic() { __fence{before-atomic}; }
smp_mb__after_atomic() { __fence{after-atomic}; }
smp_mb__after_spinlock() { __fence{after-spinlock}; }
smp_mb__after_unlock_lock() { __fence{after-unlock-lock}; }
smp_mb__after_srcu_read_unlock() { __fence{after-srcu-read-unlock}; }
barrier() { __fence{barrier}; }
// Exchange
@ -49,8 +50,10 @@ synchronize_rcu() { __fence{sync-rcu}; }
synchronize_rcu_expedited() { __fence{sync-rcu}; }
// SRCU
srcu_read_lock(X) __srcu{srcu-lock}(X)
srcu_read_unlock(X,Y) { __srcu{srcu-unlock}(X,Y); }
srcu_read_lock(X) __load{srcu-lock}(*X)
srcu_read_unlock(X,Y) { __store{srcu-unlock}(*X,Y); }
srcu_down_read(X) __load{srcu-lock}(*X)
srcu_up_read(X,Y) { __store{srcu-unlock}(*X,Y); }
synchronize_srcu(X) { __srcu{sync-srcu}(X); }
synchronize_srcu_expedited(X) { __srcu{sync-srcu}(X); }

View File

@ -36,9 +36,9 @@ let RU = try RU with emptyset
(* Treat RL as a kind of LF: a read with no ordering properties *)
let LF = LF | RL
(* There should be no ordinary R or W accesses to spinlocks *)
let ALL-LOCKS = LKR | LKW | UL | LF | RU
flag ~empty [M \ IW] ; loc ; [ALL-LOCKS] as mixed-lock-accesses
(* There should be no ordinary R or W accesses to spinlocks or SRCU structs *)
let ALL-LOCKS = LKR | LKW | UL | LF | RU | Srcu-lock | Srcu-unlock | Sync-srcu
flag ~empty [M \ IW \ ALL-LOCKS] ; loc ; [ALL-LOCKS] as mixed-lock-accesses
(* Link Lock-Reads to their RMW-partner Lock-Writes *)
let lk-rmw = ([LKR] ; po-loc ; [LKW]) \ (po ; po)