mm: add selftests for migration entries

Add some basic migration tests and in particular tests that will
stress both the pte and pmd migration entry wait paths.

Link: https://lkml.kernel.org/r/20220324014349.229253-1-apopple@nvidia.com
Signed-off-by: Alistair Popple <apopple@nvidia.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jan Kara <jack@suse.cz>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Shuah Khan <shuah@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Alistair Popple 2022-04-28 23:16:07 -07:00 committed by akpm
parent bc78b5ed9f
commit 0c2d087284
2 changed files with 196 additions and 0 deletions

View file

@ -41,6 +41,7 @@ TEST_GEN_FILES += map_fixed_noreplace
TEST_GEN_FILES += map_hugetlb
TEST_GEN_FILES += map_populate
TEST_GEN_FILES += memfd_secret
TEST_GEN_FILES += migration
TEST_GEN_FILES += mlock-random-test
TEST_GEN_FILES += mlock2-tests
TEST_GEN_FILES += mremap_dontunmap
@ -149,6 +150,8 @@ $(OUTPUT)/hmm-tests: LDLIBS += $(HMM_EXTRA_LIBS)
$(OUTPUT)/ksm_tests: LDLIBS += -lnuma
$(OUTPUT)/migration: LDLIBS += -lnuma
local_config.mk local_config.h: check_config.sh
/bin/sh ./check_config.sh $(CC)

View file

@ -0,0 +1,193 @@
// SPDX-License-Identifier: GPL-2.0
/*
* The main purpose of the tests here is to exercise the migration entry code
* paths in the kernel.
*/
#include "../kselftest_harness.h"
#include <strings.h>
#include <pthread.h>
#include <numa.h>
#include <numaif.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <signal.h>
#include <time.h>
#define TWOMEG (2<<20)
#define RUNTIME (60)
#define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
FIXTURE(migration)
{
pthread_t *threads;
pid_t *pids;
int nthreads;
int n1;
int n2;
};
FIXTURE_SETUP(migration)
{
int n;
ASSERT_EQ(numa_available(), 0);
self->nthreads = numa_num_task_cpus() - 1;
self->n1 = -1;
self->n2 = -1;
for (n = 0; n < numa_max_possible_node(); n++)
if (numa_bitmask_isbitset(numa_all_nodes_ptr, n)) {
if (self->n1 == -1) {
self->n1 = n;
} else {
self->n2 = n;
break;
}
}
self->threads = malloc(self->nthreads * sizeof(*self->threads));
ASSERT_NE(self->threads, NULL);
self->pids = malloc(self->nthreads * sizeof(*self->pids));
ASSERT_NE(self->pids, NULL);
};
FIXTURE_TEARDOWN(migration)
{
free(self->threads);
free(self->pids);
}
int migrate(uint64_t *ptr, int n1, int n2)
{
int ret, tmp;
int status = 0;
struct timespec ts1, ts2;
if (clock_gettime(CLOCK_MONOTONIC, &ts1))
return -1;
while (1) {
if (clock_gettime(CLOCK_MONOTONIC, &ts2))
return -1;
if (ts2.tv_sec - ts1.tv_sec >= RUNTIME)
return 0;
ret = move_pages(0, 1, (void **) &ptr, &n2, &status,
MPOL_MF_MOVE_ALL);
if (ret) {
if (ret > 0)
printf("Didn't migrate %d pages\n", ret);
else
perror("Couldn't migrate pages");
return -2;
}
tmp = n2;
n2 = n1;
n1 = tmp;
}
return 0;
}
void *access_mem(void *ptr)
{
uint64_t y = 0;
volatile uint64_t *x = ptr;
while (1) {
pthread_testcancel();
y += *x;
}
return NULL;
}
/*
* Basic migration entry testing. One thread will move pages back and forth
* between nodes whilst other threads try and access them triggering the
* migration entry wait paths in the kernel.
*/
TEST_F_TIMEOUT(migration, private_anon, 2*RUNTIME)
{
uint64_t *ptr;
int i;
if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
SKIP(return, "Not enough threads or NUMA nodes available");
ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
ASSERT_NE(ptr, MAP_FAILED);
memset(ptr, 0xde, TWOMEG);
for (i = 0; i < self->nthreads - 1; i++)
if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
perror("Couldn't create thread");
ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
for (i = 0; i < self->nthreads - 1; i++)
ASSERT_EQ(pthread_cancel(self->threads[i]), 0);
}
/*
* Same as the previous test but with shared memory.
*/
TEST_F_TIMEOUT(migration, shared_anon, 2*RUNTIME)
{
pid_t pid;
uint64_t *ptr;
int i;
if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
SKIP(return, "Not enough threads or NUMA nodes available");
ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
ASSERT_NE(ptr, MAP_FAILED);
memset(ptr, 0xde, TWOMEG);
for (i = 0; i < self->nthreads - 1; i++) {
pid = fork();
if (!pid)
access_mem(ptr);
else
self->pids[i] = pid;
}
ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
for (i = 0; i < self->nthreads - 1; i++)
ASSERT_EQ(kill(self->pids[i], SIGTERM), 0);
}
/*
* Tests the pmd migration entry paths.
*/
TEST_F_TIMEOUT(migration, private_anon_thp, 2*RUNTIME)
{
uint64_t *ptr;
int i;
if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
SKIP(return, "Not enough threads or NUMA nodes available");
ptr = mmap(NULL, 2*TWOMEG, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
ASSERT_NE(ptr, MAP_FAILED);
ptr = (uint64_t *) ALIGN((uintptr_t) ptr, TWOMEG);
ASSERT_EQ(madvise(ptr, TWOMEG, MADV_HUGEPAGE), 0);
memset(ptr, 0xde, TWOMEG);
for (i = 0; i < self->nthreads - 1; i++)
if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
perror("Couldn't create thread");
ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
for (i = 0; i < self->nthreads - 1; i++)
ASSERT_EQ(pthread_cancel(self->threads[i]), 0);
}
TEST_HARNESS_MAIN