tools/virtio: Fix arm64 ringtest compilation error

[ Upstream commit 57380fd124 ]

Add cpu_relax() for arm64 instead of directly assert(), and add assert.h
header file. Also, add smp_wmb and smp_mb for arm64.

Compilation error as follows, avoid __always_inline undefined.

    $ make
    cc -Wall -pthread -O2 -ggdb -flto -fwhole-program -c -o ring.o ring.c
    In file included from ring.c:10:
    main.h: In function ‘busy_wait’:
    main.h:99:21: warning: implicit declaration of function ‘assert’
    [-Wimplicit-function-declaration]
    99 | #define cpu_relax() assert(0)
        |                     ^~~~~~
    main.h:107:17: note: in expansion of macro ‘cpu_relax’
    107 |                 cpu_relax();
        |                 ^~~~~~~~~
    main.h:12:1: note: ‘assert’ is defined in header ‘<assert.h>’; did you
    forget to ‘#include <assert.h>’?
    11 | #include <stdbool.h>
    +++ |+#include <assert.h>
    12 |
    main.h: At top level:
    main.h:143:23: error: expected ‘;’ before ‘void’
    143 | static __always_inline
        |                       ^
        |                       ;
    144 | void __read_once_size(const volatile void *p, void *res, int
    size)
        | ~~~~
    main.h:158:23: error: expected ‘;’ before ‘void’
    158 | static __always_inline void __write_once_size(volatile void *p,
    void *res, int size)
        |                       ^~~~~
        |                       ;
    make: *** [<builtin>: ring.o] Error 1

Signed-off-by: Rong Tao <rongtao@cestc.cn>
Message-Id: <tencent_F53E159DD7925174445D830DA19FACF44B07@qq.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Rong Tao 2023-05-24 20:31:24 +08:00 committed by Greg Kroah-Hartman
parent 7701bef788
commit 8ec977a9d3
1 changed files with 11 additions and 0 deletions

View File

@ -8,6 +8,7 @@
#ifndef MAIN_H
#define MAIN_H
#include <assert.h>
#include <stdbool.h>
extern int param;
@ -95,6 +96,8 @@ extern unsigned ring_size;
#define cpu_relax() asm ("rep; nop" ::: "memory")
#elif defined(__s390x__)
#define cpu_relax() barrier()
#elif defined(__aarch64__)
#define cpu_relax() asm ("yield" ::: "memory")
#else
#define cpu_relax() assert(0)
#endif
@ -112,6 +115,8 @@ static inline void busy_wait(void)
#if defined(__x86_64__) || defined(__i386__)
#define smp_mb() asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")
#elif defined(__aarch64__)
#define smp_mb() asm volatile("dmb ish" ::: "memory")
#else
/*
* Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized
@ -136,10 +141,16 @@ static inline void busy_wait(void)
#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
#define smp_wmb() barrier()
#elif defined(__aarch64__)
#define smp_wmb() asm volatile("dmb ishst" ::: "memory")
#else
#define smp_wmb() smp_release()
#endif
#ifndef __always_inline
#define __always_inline inline __attribute__((always_inline))
#endif
static __always_inline
void __read_once_size(const volatile void *p, void *res, int size)
{