linux-stable/arch/arm64/include/uapi/asm/sve_context.h
Dave Martin 9966a05c7b arm64/sve: Disentangle <uapi/asm/ptrace.h> from <uapi/asm/sigcontext.h>
Currently, <uapi/asm/sigcontext.h> provides common definitions for
describing SVE context structures that are also used by the ptrace
definitions in <uapi/asm/ptrace.h>.

For this reason, a #include of <asm/sigcontext.h> was added in
ptrace.h, but it this turns out that this can interact badly with
userspace code that tries to include ptrace.h on top of the libc
headers (which may provide their own shadow definitions for
sigcontext.h).

To make the headers easier for userspace to consume, this patch
bounces the common definitions into an __SVE_* namespace and moves
them to a backend header <uapi/asm/sve_context.h> that can be
included by the other headers as appropriate.  This should allow
ptrace.h to be used alongside libc's sigcontext.h (if any) without
ill effects.

This should make the situation unambiguous: <asm/sigcontext.h> is
the header to include for the sigframe-specific definitions, while
<asm/ptrace.h> is the header to include for ptrace-specific
definitions.

To avoid conflicting with existing usage, <asm/sigcontext.h>
remains the canonical way to get the common definitions for
SVE_VQ_MIN, sve_vq_from_vl() etc., both in userspace and in the
kernel: relying on these being defined as a side effect of
including just <asm/ptrace.h> was never intended to be safe.

Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2019-01-04 13:31:45 +00:00

53 lines
1.5 KiB
C

/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/* Copyright (C) 2017-2018 ARM Limited */
/*
* For use by other UAPI headers only.
* Do not make direct use of header or its definitions.
*/
#ifndef _UAPI__ASM_SVE_CONTEXT_H
#define _UAPI__ASM_SVE_CONTEXT_H
#include <linux/types.h>
#define __SVE_VQ_BYTES 16 /* number of bytes per quadword */
#define __SVE_VQ_MIN 1
#define __SVE_VQ_MAX 512
#define __SVE_VL_MIN (__SVE_VQ_MIN * __SVE_VQ_BYTES)
#define __SVE_VL_MAX (__SVE_VQ_MAX * __SVE_VQ_BYTES)
#define __SVE_NUM_ZREGS 32
#define __SVE_NUM_PREGS 16
#define __sve_vl_valid(vl) \
((vl) % __SVE_VQ_BYTES == 0 && \
(vl) >= __SVE_VL_MIN && \
(vl) <= __SVE_VL_MAX)
#define __sve_vq_from_vl(vl) ((vl) / __SVE_VQ_BYTES)
#define __sve_vl_from_vq(vq) ((vq) * __SVE_VQ_BYTES)
#define __SVE_ZREG_SIZE(vq) ((__u32)(vq) * __SVE_VQ_BYTES)
#define __SVE_PREG_SIZE(vq) ((__u32)(vq) * (__SVE_VQ_BYTES / 8))
#define __SVE_FFR_SIZE(vq) __SVE_PREG_SIZE(vq)
#define __SVE_ZREGS_OFFSET 0
#define __SVE_ZREG_OFFSET(vq, n) \
(__SVE_ZREGS_OFFSET + __SVE_ZREG_SIZE(vq) * (n))
#define __SVE_ZREGS_SIZE(vq) \
(__SVE_ZREG_OFFSET(vq, __SVE_NUM_ZREGS) - __SVE_ZREGS_OFFSET)
#define __SVE_PREGS_OFFSET(vq) \
(__SVE_ZREGS_OFFSET + __SVE_ZREGS_SIZE(vq))
#define __SVE_PREG_OFFSET(vq, n) \
(__SVE_PREGS_OFFSET(vq) + __SVE_PREG_SIZE(vq) * (n))
#define __SVE_PREGS_SIZE(vq) \
(__SVE_PREG_OFFSET(vq, __SVE_NUM_PREGS) - __SVE_PREGS_OFFSET(vq))
#define __SVE_FFR_OFFSET(vq) \
(__SVE_PREGS_OFFSET(vq) + __SVE_PREGS_SIZE(vq))
#endif /* ! _UAPI__ASM_SVE_CONTEXT_H */