mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-31 16:38:12 +00:00
vmscan: use atomic-long for shrinker batching
Use atomic-long operations instead of looping around cmpxchg(). [akpm@linux-foundation.org: massage atomic.h inclusions] Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org> Cc: Dave Chinner <david@fromorbit.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
635697c663
commit
83aeeada7c
4 changed files with 10 additions and 12 deletions
|
@ -393,8 +393,8 @@ struct inodes_stat_t {
|
|||
#include <linux/semaphore.h>
|
||||
#include <linux/fiemap.h>
|
||||
#include <linux/rculist_bl.h>
|
||||
#include <linux/shrinker.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/shrinker.h>
|
||||
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/mmzone.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/prio_tree.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/debug_locks.h>
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/range.h>
|
||||
|
|
|
@ -35,7 +35,7 @@ struct shrinker {
|
|||
|
||||
/* These are for internal use */
|
||||
struct list_head list;
|
||||
long nr; /* objs pending delete */
|
||||
atomic_long_t nr_in_batch; /* objs pending delete */
|
||||
};
|
||||
#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
|
||||
extern void register_shrinker(struct shrinker *);
|
||||
|
|
17
mm/vmscan.c
17
mm/vmscan.c
|
@ -183,7 +183,7 @@ static unsigned long zone_nr_lru_pages(struct zone *zone,
|
|||
*/
|
||||
void register_shrinker(struct shrinker *shrinker)
|
||||
{
|
||||
shrinker->nr = 0;
|
||||
atomic_long_set(&shrinker->nr_in_batch, 0);
|
||||
down_write(&shrinker_rwsem);
|
||||
list_add_tail(&shrinker->list, &shrinker_list);
|
||||
up_write(&shrinker_rwsem);
|
||||
|
@ -264,9 +264,7 @@ unsigned long shrink_slab(struct shrink_control *shrink,
|
|||
* and zero it so that other concurrent shrinker invocations
|
||||
* don't also do this scanning work.
|
||||
*/
|
||||
do {
|
||||
nr = shrinker->nr;
|
||||
} while (cmpxchg(&shrinker->nr, nr, 0) != nr);
|
||||
nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
|
||||
|
||||
total_scan = nr;
|
||||
delta = (4 * nr_pages_scanned) / shrinker->seeks;
|
||||
|
@ -328,12 +326,11 @@ unsigned long shrink_slab(struct shrink_control *shrink,
|
|||
* manner that handles concurrent updates. If we exhausted the
|
||||
* scan, there is no need to do an update.
|
||||
*/
|
||||
do {
|
||||
nr = shrinker->nr;
|
||||
new_nr = total_scan + nr;
|
||||
if (total_scan <= 0)
|
||||
break;
|
||||
} while (cmpxchg(&shrinker->nr, nr, new_nr) != nr);
|
||||
if (total_scan > 0)
|
||||
new_nr = atomic_long_add_return(total_scan,
|
||||
&shrinker->nr_in_batch);
|
||||
else
|
||||
new_nr = atomic_long_read(&shrinker->nr_in_batch);
|
||||
|
||||
trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue