diff --git a/tools/testing/selftests/kvm/rseq_test.c b/tools/testing/selftests/kvm/rseq_test.c index 49f6018e57bc..fac248a43666 100644 --- a/tools/testing/selftests/kvm/rseq_test.c +++ b/tools/testing/selftests/kvm/rseq_test.c @@ -41,6 +41,18 @@ static void guest_code(void) GUEST_SYNC(0); } +/* + * We have to perform direct system call for getcpu() because it's + * not available until glic 2.29. + */ +static void sys_getcpu(unsigned *cpu) +{ + int r; + + r = syscall(__NR_getcpu, cpu, NULL, NULL); + TEST_ASSERT(!r, "getcpu failed, errno = %d (%s)", errno, strerror(errno)); +} + static int next_cpu(int cpu) { /* @@ -85,7 +97,7 @@ static void *migration_worker(void *__rseq_tid) atomic_inc(&seq_cnt); /* - * Ensure the odd count is visible while sched_getcpu() isn't + * Ensure the odd count is visible while getcpu() isn't * stable, i.e. while changing affinity is in-progress. */ smp_wmb(); @@ -126,10 +138,10 @@ static void *migration_worker(void *__rseq_tid) * check completes. * * 3. To ensure the read-side makes efficient forward progress, - * e.g. if sched_getcpu() involves a syscall. Stalling the - * read-side means the test will spend more time waiting for - * sched_getcpu() to stabilize and less time trying to hit - * the timing-dependent bug. + * e.g. if getcpu() involves a syscall. Stalling the read-side + * means the test will spend more time waiting for getcpu() + * to stabilize and less time trying to hit the timing-dependent + * bug. * * Because any bug in this area is likely to be timing-dependent, * run with a range of delays at 1us intervals from 1us to 10us @@ -224,9 +236,9 @@ int main(int argc, char *argv[]) /* * Verify rseq's CPU matches sched's CPU. Ensure migration - * doesn't occur between sched_getcpu() and reading the rseq - * cpu_id by rereading both if the sequence count changes, or - * if the count is odd (migration in-progress). + * doesn't occur between getcpu() and reading the rseq cpu_id + * by rereading both if the sequence count changes, or if the + * count is odd (migration in-progress). */ do { /* @@ -236,12 +248,12 @@ int main(int argc, char *argv[]) snapshot = atomic_read(&seq_cnt) & ~1; /* - * Ensure reading sched_getcpu() and rseq.cpu_id - * complete in a single "no migration" window, i.e. are - * not reordered across the seq_cnt reads. + * Ensure calling getcpu() and reading rseq.cpu_id complete + * in a single "no migration" window, i.e. are not reordered + * across the seq_cnt reads. */ smp_rmb(); - cpu = sched_getcpu(); + sys_getcpu(&cpu); rseq_cpu = rseq_current_cpu_raw(); smp_rmb(); } while (snapshot != atomic_read(&seq_cnt)); @@ -253,9 +265,9 @@ int main(int argc, char *argv[]) /* * Sanity check that the test was able to enter the guest a reasonable * number of times, e.g. didn't get stalled too often/long waiting for - * sched_getcpu() to stabilize. A 2:1 migration:KVM_RUN ratio is a - * fairly conservative ratio on x86-64, which can do _more_ KVM_RUNs - * than migrations given the 1us+ delay in the migration task. + * getcpu() to stabilize. A 2:1 migration:KVM_RUN ratio is a fairly + * conservative ratio on x86-64, which can do _more_ KVM_RUNs than + * migrations given the 1us+ delay in the migration task. */ TEST_ASSERT(i > (NR_TASK_MIGRATIONS / 2), "Only performed %d KVM_RUNs, task stalled too much?\n", i);