Merge branch 'for-next/sysreg-gen' into for-next/core

* for-next/sysreg-gen: (32 commits)
  : Automatic system register definition generation.
  arm64/sysreg: Generate definitions for FAR_ELx
  arm64/sysreg: Generate definitions for DACR32_EL2
  arm64/sysreg: Generate definitions for CSSELR_EL1
  arm64/sysreg: Generate definitions for CPACR_ELx
  arm64/sysreg: Generate definitions for CONTEXTIDR_ELx
  arm64/sysreg: Generate definitions for CLIDR_EL1
  arm64/sve: Generate ZCR definitions
  arm64/sme: Generate defintions for SVCR
  arm64/sme: Generate SMPRI_EL1 definitions
  arm64/sme: Automatically generate SMPRIMAP_EL2 definitions
  arm64/sme: Automatically generate SMIDR_EL1 defines
  arm64/sme: Automatically generate defines for SMCR
  arm64/sysreg: Support generation of RAZ fields
  arm64/sme: Remove _EL0 from name of SVCR - FIXME sysreg.h
  arm64/sme: Standardise bitfield names for SVCR
  arm64/sme: Drop SYS_ from SMIDR_EL1 defines
  arm64/fp: Rename SVE and SME LEN field name to _WIDTH
  arm64/fp: Make SVE and SME length register definition match architecture
  arm64/sysreg: fix odd line spacing
  arm64/sysreg: improve comment for regs without fields
  ...
This commit is contained in:
Catalin Marinas 2022-05-20 18:50:57 +01:00
commit e003d5335c
19 changed files with 784 additions and 220 deletions

View file

@ -7,3 +7,4 @@ generic-y += parport.h
generic-y += user.h
generated-y += cpucaps.h
generated-y += sysreg-defs.h

View file

@ -142,7 +142,7 @@ static inline bool __init __early_cpu_has_rndr(void)
{
/* Open code as we run prior to the first call to cpufeature. */
unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1);
return (ftr >> ID_AA64ISAR0_RNDR_SHIFT) & 0xf;
return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf;
}
static inline bool __init __must_check

View file

@ -171,7 +171,7 @@
msr_s SYS_SMCR_EL2, x1 // length for EL1.
mrs_s x1, SYS_SMIDR_EL1 // Priority mapping supported?
ubfx x1, x1, #SYS_SMIDR_EL1_SMPS_SHIFT, #1
ubfx x1, x1, #SMIDR_EL1_SMPS_SHIFT, #1
cbz x1, .Lskip_sme_\@
msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal

View file

@ -67,12 +67,12 @@ extern void fpsimd_save_and_flush_cpu_state(void);
static inline bool thread_sm_enabled(struct thread_struct *thread)
{
return system_supports_sme() && (thread->svcr & SYS_SVCR_EL0_SM_MASK);
return system_supports_sme() && (thread->svcr & SVCR_SM_MASK);
}
static inline bool thread_za_enabled(struct thread_struct *thread)
{
return system_supports_sme() && (thread->svcr & SYS_SVCR_EL0_ZA_MASK);
return system_supports_sme() && (thread->svcr & SVCR_ZA_MASK);
}
/* Maximum VL that SVE/SME VL-agnostic software can transparently support */

View file

@ -192,7 +192,7 @@ static inline unsigned int thread_get_sme_vl(struct thread_struct *thread)
static inline unsigned int thread_get_cur_vl(struct thread_struct *thread)
{
if (system_supports_sme() && (thread->svcr & SYS_SVCR_EL0_SM_MASK))
if (system_supports_sme() && (thread->svcr & SVCR_SM_MASK))
return thread_get_sme_vl(thread);
else
return thread_get_sve_vl(thread);

View file

@ -114,6 +114,14 @@
#define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2)
#define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2)
/*
* Automatically generated definitions for system registers, the
* manual encodings below are in the process of being converted to
* come from here. The header relies on the definition of sys_reg()
* earlier in this file.
*/
#include "asm/sysreg-defs.h"
/*
* System registers, organised loosely by encoding but grouped together
* where the architected name contains an index. e.g. ID_MMFR<n>_EL1.
@ -193,7 +201,6 @@
#define SYS_ID_AA64AFR0_EL1 sys_reg(3, 0, 0, 5, 4)
#define SYS_ID_AA64AFR1_EL1 sys_reg(3, 0, 0, 5, 5)
#define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0)
#define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1)
#define SYS_ID_AA64ISAR2_EL1 sys_reg(3, 0, 0, 6, 2)
@ -201,19 +208,12 @@
#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1)
#define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2)
#define SYS_SCTLR_EL1 sys_reg(3, 0, 1, 0, 0)
#define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1)
#define SYS_CPACR_EL1 sys_reg(3, 0, 1, 0, 2)
#define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5)
#define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6)
#define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0)
#define SYS_TRFCR_EL1 sys_reg(3, 0, 1, 2, 1)
#define SYS_SMPRI_EL1 sys_reg(3, 0, 1, 2, 4)
#define SYS_SMCR_EL1 sys_reg(3, 0, 1, 2, 6)
#define SYS_TTBR0_EL1 sys_reg(3, 0, 2, 0, 0)
#define SYS_TTBR1_EL1 sys_reg(3, 0, 2, 0, 1)
#define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2)
#define SYS_APIAKEYLO_EL1 sys_reg(3, 0, 2, 1, 0)
@ -249,7 +249,6 @@
#define SYS_TFSR_EL1 sys_reg(3, 0, 5, 6, 0)
#define SYS_TFSRE0_EL1 sys_reg(3, 0, 5, 6, 1)
#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0)
#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0)
#define SYS_PAR_EL1_F BIT(0)
@ -403,8 +402,6 @@
#define TRBIDR_ALIGN_MASK GENMASK(3, 0)
#define TRBIDR_ALIGN_SHIFT 0
#define SMPRI_EL1_PRIORITY_MASK 0xf
#define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1)
#define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2)
@ -450,7 +447,6 @@
#define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6)
#define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
#define SYS_CONTEXTIDR_EL1 sys_reg(3, 0, 13, 0, 1)
#define SYS_TPIDR_EL1 sys_reg(3, 0, 13, 0, 4)
#define SYS_SCXTNUM_EL1 sys_reg(3, 0, 13, 0, 7)
@ -458,16 +454,12 @@
#define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0)
#define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0)
#define SYS_CLIDR_EL1 sys_reg(3, 1, 0, 0, 1)
#define SYS_GMID_EL1 sys_reg(3, 1, 0, 0, 4)
#define SYS_SMIDR_EL1 sys_reg(3, 1, 0, 0, 6)
#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7)
#define SYS_SMIDR_EL1_IMPLEMENTER_SHIFT 24
#define SYS_SMIDR_EL1_SMPS_SHIFT 15
#define SYS_SMIDR_EL1_AFFINITY_SHIFT 0
#define SYS_CSSELR_EL1 sys_reg(3, 2, 0, 0, 0)
#define SMIDR_EL1_IMPLEMENTER_SHIFT 24
#define SMIDR_EL1_SMPS_SHIFT 15
#define SMIDR_EL1_AFFINITY_SHIFT 0
#define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1)
#define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7)
@ -475,10 +467,6 @@
#define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0)
#define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1)
#define SYS_SVCR_EL0 sys_reg(3, 3, 4, 2, 2)
#define SYS_SVCR_EL0_ZA_MASK 2
#define SYS_SVCR_EL0_SM_MASK 1
#define SYS_PMCR_EL0 sys_reg(3, 3, 9, 12, 0)
#define SYS_PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1)
#define SYS_PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2)
@ -563,12 +551,8 @@
#define SYS_HFGRTR_EL2 sys_reg(3, 4, 1, 1, 4)
#define SYS_HFGWTR_EL2 sys_reg(3, 4, 1, 1, 5)
#define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6)
#define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0)
#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1)
#define SYS_HCRX_EL2 sys_reg(3, 4, 1, 2, 2)
#define SYS_SMPRIMAP_EL2 sys_reg(3, 4, 1, 2, 5)
#define SYS_SMCR_EL2 sys_reg(3, 4, 1, 2, 6)
#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0)
#define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4)
#define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5)
#define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6)
@ -579,7 +563,6 @@
#define SYS_VSESR_EL2 sys_reg(3, 4, 5, 2, 3)
#define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0)
#define SYS_TFSR_EL2 sys_reg(3, 4, 5, 6, 0)
#define SYS_FAR_EL2 sys_reg(3, 4, 6, 0, 0)
#define SYS_VDISR_EL2 sys_reg(3, 4, 12, 1, 1)
#define __SYS__AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x)
@ -625,9 +608,6 @@
/* VHE encodings for architectural EL0/1 system registers */
#define SYS_SCTLR_EL12 sys_reg(3, 5, 1, 0, 0)
#define SYS_CPACR_EL12 sys_reg(3, 5, 1, 0, 2)
#define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0)
#define SYS_SMCR_EL12 sys_reg(3, 5, 1, 2, 6)
#define SYS_TTBR0_EL12 sys_reg(3, 5, 2, 0, 0)
#define SYS_TTBR1_EL12 sys_reg(3, 5, 2, 0, 1)
#define SYS_TCR_EL12 sys_reg(3, 5, 2, 0, 2)
@ -637,11 +617,9 @@
#define SYS_AFSR1_EL12 sys_reg(3, 5, 5, 1, 1)
#define SYS_ESR_EL12 sys_reg(3, 5, 5, 2, 0)
#define SYS_TFSR_EL12 sys_reg(3, 5, 5, 6, 0)
#define SYS_FAR_EL12 sys_reg(3, 5, 6, 0, 0)
#define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0)
#define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0)
#define SYS_VBAR_EL12 sys_reg(3, 5, 12, 0, 0)
#define SYS_CONTEXTIDR_EL12 sys_reg(3, 5, 13, 0, 1)
#define SYS_CNTKCTL_EL12 sys_reg(3, 5, 14, 1, 0)
#define SYS_CNTP_TVAL_EL02 sys_reg(3, 5, 14, 2, 0)
#define SYS_CNTP_CTL_EL02 sys_reg(3, 5, 14, 2, 1)
@ -655,28 +633,26 @@
#define SCTLR_ELx_DSSBS (BIT(44))
#define SCTLR_ELx_ATA (BIT(43))
#define SCTLR_ELx_TCF_SHIFT 40
#define SCTLR_ELx_TCF_NONE (UL(0x0) << SCTLR_ELx_TCF_SHIFT)
#define SCTLR_ELx_TCF_SYNC (UL(0x1) << SCTLR_ELx_TCF_SHIFT)
#define SCTLR_ELx_TCF_ASYNC (UL(0x2) << SCTLR_ELx_TCF_SHIFT)
#define SCTLR_ELx_TCF_ASYMM (UL(0x3) << SCTLR_ELx_TCF_SHIFT)
#define SCTLR_ELx_TCF_MASK (UL(0x3) << SCTLR_ELx_TCF_SHIFT)
#define SCTLR_ELx_ENIA_SHIFT 31
#define SCTLR_ELx_ITFSB (BIT(37))
#define SCTLR_ELx_ENIA (BIT(SCTLR_ELx_ENIA_SHIFT))
#define SCTLR_ELx_ENIB (BIT(30))
#define SCTLR_ELx_ENDA (BIT(27))
#define SCTLR_ELx_EE (BIT(25))
#define SCTLR_ELx_IESB (BIT(21))
#define SCTLR_ELx_WXN (BIT(19))
#define SCTLR_ELx_ENDB (BIT(13))
#define SCTLR_ELx_I (BIT(12))
#define SCTLR_ELx_SA (BIT(3))
#define SCTLR_ELx_C (BIT(2))
#define SCTLR_ELx_A (BIT(1))
#define SCTLR_ELx_M (BIT(0))
#define SCTLR_ELx_ITFSB (BIT(37))
#define SCTLR_ELx_ENIA (BIT(SCTLR_ELx_ENIA_SHIFT))
#define SCTLR_ELx_ENIB (BIT(30))
#define SCTLR_ELx_LSMAOE (BIT(29))
#define SCTLR_ELx_nTLSMD (BIT(28))
#define SCTLR_ELx_ENDA (BIT(27))
#define SCTLR_ELx_EE (BIT(25))
#define SCTLR_ELx_EIS (BIT(22))
#define SCTLR_ELx_IESB (BIT(21))
#define SCTLR_ELx_TSCXT (BIT(20))
#define SCTLR_ELx_WXN (BIT(19))
#define SCTLR_ELx_ENDB (BIT(13))
#define SCTLR_ELx_I (BIT(12))
#define SCTLR_ELx_EOS (BIT(11))
#define SCTLR_ELx_SA (BIT(3))
#define SCTLR_ELx_C (BIT(2))
#define SCTLR_ELx_A (BIT(1))
#define SCTLR_ELx_M (BIT(0))
/* SCTLR_EL2 specific flags. */
#define SCTLR_EL2_RES1 ((BIT(4)) | (BIT(5)) | (BIT(11)) | (BIT(16)) | \
@ -698,34 +674,6 @@
(SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
/* SCTLR_EL1 specific flags. */
#define SCTLR_EL1_EPAN (BIT(57))
#define SCTLR_EL1_ATA0 (BIT(42))
#define SCTLR_EL1_TCF0_SHIFT 38
#define SCTLR_EL1_TCF0_NONE (UL(0x0) << SCTLR_EL1_TCF0_SHIFT)
#define SCTLR_EL1_TCF0_SYNC (UL(0x1) << SCTLR_EL1_TCF0_SHIFT)
#define SCTLR_EL1_TCF0_ASYNC (UL(0x2) << SCTLR_EL1_TCF0_SHIFT)
#define SCTLR_EL1_TCF0_ASYMM (UL(0x3) << SCTLR_EL1_TCF0_SHIFT)
#define SCTLR_EL1_TCF0_MASK (UL(0x3) << SCTLR_EL1_TCF0_SHIFT)
#define SCTLR_EL1_BT1 (BIT(36))
#define SCTLR_EL1_BT0 (BIT(35))
#define SCTLR_EL1_UCI (BIT(26))
#define SCTLR_EL1_E0E (BIT(24))
#define SCTLR_EL1_SPAN (BIT(23))
#define SCTLR_EL1_NTWE (BIT(18))
#define SCTLR_EL1_NTWI (BIT(16))
#define SCTLR_EL1_UCT (BIT(15))
#define SCTLR_EL1_DZE (BIT(14))
#define SCTLR_EL1_UMA (BIT(9))
#define SCTLR_EL1_SED (BIT(8))
#define SCTLR_EL1_ITD (BIT(7))
#define SCTLR_EL1_CP15BEN (BIT(5))
#define SCTLR_EL1_SA0 (BIT(4))
#define SCTLR_EL1_RES1 ((BIT(11)) | (BIT(20)) | (BIT(22)) | (BIT(28)) | \
(BIT(29)))
#ifdef CONFIG_CPU_BIG_ENDIAN
#define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
#else
@ -733,13 +681,17 @@
#endif
#define INIT_SCTLR_EL1_MMU_OFF \
(ENDIAN_SET_EL1 | SCTLR_EL1_RES1)
(ENDIAN_SET_EL1 | SCTLR_EL1_LSMAOE | SCTLR_EL1_nTLSMD | \
SCTLR_EL1_EIS | SCTLR_EL1_TSCXT | SCTLR_EL1_EOS)
#define INIT_SCTLR_EL1_MMU_ON \
(SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_EL1_SA0 | \
SCTLR_EL1_SED | SCTLR_ELx_I | SCTLR_EL1_DZE | SCTLR_EL1_UCT | \
SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \
ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_EPAN | SCTLR_EL1_RES1)
(SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | \
SCTLR_EL1_SA0 | SCTLR_EL1_SED | SCTLR_ELx_I | \
SCTLR_EL1_DZE | SCTLR_EL1_UCT | SCTLR_EL1_nTWE | \
SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \
ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_EPAN | \
SCTLR_EL1_LSMAOE | SCTLR_EL1_nTLSMD | SCTLR_EL1_EIS | \
SCTLR_EL1_TSCXT | SCTLR_EL1_EOS)
/* MAIR_ELx memory attributes (used by Linux) */
#define MAIR_ATTR_DEVICE_nGnRnE UL(0x00)
@ -752,25 +704,6 @@
/* Position the attr at the correct index */
#define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8))
/* id_aa64isar0 */
#define ID_AA64ISAR0_RNDR_SHIFT 60
#define ID_AA64ISAR0_TLB_SHIFT 56
#define ID_AA64ISAR0_TS_SHIFT 52
#define ID_AA64ISAR0_FHM_SHIFT 48
#define ID_AA64ISAR0_DP_SHIFT 44
#define ID_AA64ISAR0_SM4_SHIFT 40
#define ID_AA64ISAR0_SM3_SHIFT 36
#define ID_AA64ISAR0_SHA3_SHIFT 32
#define ID_AA64ISAR0_RDM_SHIFT 28
#define ID_AA64ISAR0_ATOMICS_SHIFT 20
#define ID_AA64ISAR0_CRC32_SHIFT 16
#define ID_AA64ISAR0_SHA2_SHIFT 12
#define ID_AA64ISAR0_SHA1_SHIFT 8
#define ID_AA64ISAR0_AES_SHIFT 4
#define ID_AA64ISAR0_TLB_RANGE_NI 0x0
#define ID_AA64ISAR0_TLB_RANGE 0x2
/* id_aa64isar1 */
#define ID_AA64ISAR1_I8MM_SHIFT 52
#define ID_AA64ISAR1_DGH_SHIFT 48
@ -1154,27 +1087,6 @@
#define DCZID_DZP_SHIFT 4
#define DCZID_BS_SHIFT 0
/*
* The ZCR_ELx_LEN_* definitions intentionally include bits [8:4] which
* are reserved by the SVE architecture for future expansion of the LEN
* field, with compatible semantics.
*/
#define ZCR_ELx_LEN_SHIFT 0
#define ZCR_ELx_LEN_SIZE 9
#define ZCR_ELx_LEN_MASK 0x1ff
#define SMCR_ELx_FA64_SHIFT 31
#define SMCR_ELx_FA64_MASK (1 << SMCR_ELx_FA64_SHIFT)
/*
* The SMCR_ELx_LEN_* definitions intentionally include bits [8:4] which
* are reserved by the SME architecture for future expansion of the LEN
* field, with compatible semantics.
*/
#define SMCR_ELx_LEN_SHIFT 0
#define SMCR_ELx_LEN_SIZE 9
#define SMCR_ELx_LEN_MASK 0x1ff
#define CPACR_EL1_FPEN_EL1EN (BIT(20)) /* enable EL1 access */
#define CPACR_EL1_FPEN_EL0EN (BIT(21)) /* enable EL0 access, if EL1EN set */
@ -1412,4 +1324,10 @@
#endif
#define SYS_FIELD_PREP(reg, field, val) \
FIELD_PREP(reg##_##field##_MASK, val)
#define SYS_FIELD_PREP_ENUM(reg, field, val) \
FIELD_PREP(reg##_##field##_MASK, reg##_##field##_##val)
#endif /* __ASM_SYSREG_H */

View file

@ -191,20 +191,20 @@ static bool __system_matches_cap(unsigned int n);
* sync with the documentation of the CPU feature register ABI.
*/
static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RNDR_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TLB_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_TLB_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_TS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_FHM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_DP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SM4_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SM3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_RDM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_ATOMIC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_CRC32_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA1_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_AES_SHIFT, 4, 0),
ARM64_FTR_END,
};
@ -577,13 +577,13 @@ static const struct arm64_ftr_bits ftr_id_dfr1[] = {
static const struct arm64_ftr_bits ftr_zcr[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_SIZE, 0), /* LEN */
ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_WIDTH, 0), /* LEN */
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_smcr[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
SMCR_ELx_LEN_SHIFT, SMCR_ELx_LEN_SIZE, 0), /* LEN */
SMCR_ELx_LEN_SHIFT, SMCR_ELx_LEN_WIDTH, 0), /* LEN */
ARM64_FTR_END,
};
@ -2062,7 +2062,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64ISAR0_EL1,
.field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
.field_pos = ID_AA64ISAR0_EL1_ATOMIC_SHIFT,
.field_width = 4,
.sign = FTR_UNSIGNED,
.min_field_value = 2,
@ -2244,10 +2244,10 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64ISAR0_EL1,
.field_pos = ID_AA64ISAR0_TLB_SHIFT,
.field_pos = ID_AA64ISAR0_EL1_TLB_SHIFT,
.field_width = 4,
.sign = FTR_UNSIGNED,
.min_field_value = ID_AA64ISAR0_TLB_RANGE,
.min_field_value = ID_AA64ISAR0_EL1_TLB_RANGE,
},
#ifdef CONFIG_ARM64_HW_AFDBM
{
@ -2276,7 +2276,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64ISAR0_EL1,
.field_pos = ID_AA64ISAR0_CRC32_SHIFT,
.field_pos = ID_AA64ISAR0_EL1_CRC32_SHIFT,
.field_width = 4,
.min_field_value = 1,
},
@ -2431,7 +2431,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64ISAR0_EL1,
.field_pos = ID_AA64ISAR0_RNDR_SHIFT,
.field_pos = ID_AA64ISAR0_EL1_RNDR_SHIFT,
.field_width = 4,
.sign = FTR_UNSIGNED,
.min_field_value = 1,
@ -2590,22 +2590,22 @@ static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
#endif
static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_PMULL),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AES),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA1),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA2),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_SHA512),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_CRC32),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ATOMICS),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA3),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM3),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM4),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RNDR_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_PMULL),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AES),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA1_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA1),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA2),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_SHA512),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_CRC32),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_ATOMIC_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ATOMICS),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RDM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA3_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA3),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SM3_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM3),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SM4_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM4),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_DP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_FHM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),

View file

@ -393,7 +393,7 @@ static void task_fpsimd_load(void)
if (test_thread_flag(TIF_SME))
sme_set_vq(sve_vq_from_vl(sme_vl) - 1);
write_sysreg_s(current->thread.svcr, SYS_SVCR_EL0);
write_sysreg_s(current->thread.svcr, SYS_SVCR);
if (thread_za_enabled(&current->thread))
za_load_state(current->thread.za_state);
@ -445,15 +445,15 @@ static void fpsimd_save(void)
if (system_supports_sme()) {
u64 *svcr = last->svcr;
*svcr = read_sysreg_s(SYS_SVCR_EL0);
*svcr = read_sysreg_s(SYS_SVCR);
*svcr = read_sysreg_s(SYS_SVCR_EL0);
*svcr = read_sysreg_s(SYS_SVCR);
if (*svcr & SYS_SVCR_EL0_ZA_MASK)
if (*svcr & SVCR_ZA_MASK)
za_save_state(last->za_state);
/* If we are in streaming mode override regular SVE. */
if (*svcr & SYS_SVCR_EL0_SM_MASK) {
if (*svcr & SVCR_SM_MASK) {
save_sve_regs = true;
save_ffr = system_supports_fa64();
vl = last->sme_vl;
@ -851,8 +851,8 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
sve_to_fpsimd(task);
if (system_supports_sme() && type == ARM64_VEC_SME) {
task->thread.svcr &= ~(SYS_SVCR_EL0_SM_MASK |
SYS_SVCR_EL0_ZA_MASK);
task->thread.svcr &= ~(SVCR_SM_MASK |
SVCR_ZA_MASK);
clear_thread_flag(TIF_SME);
}
@ -1914,10 +1914,10 @@ void __efi_fpsimd_begin(void)
__this_cpu_write(efi_sve_state_used, true);
if (system_supports_sme()) {
svcr = read_sysreg_s(SYS_SVCR_EL0);
svcr = read_sysreg_s(SYS_SVCR);
if (!system_supports_fa64())
ffr = svcr & SYS_SVCR_EL0_SM_MASK;
ffr = svcr & SVCR_SM_MASK;
__this_cpu_write(efi_sm_state, ffr);
}
@ -1927,8 +1927,8 @@ void __efi_fpsimd_begin(void)
ffr);
if (system_supports_sme())
sysreg_clear_set_s(SYS_SVCR_EL0,
SYS_SVCR_EL0_SM_MASK, 0);
sysreg_clear_set_s(SYS_SVCR,
SVCR_SM_MASK, 0);
} else {
fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state));
@ -1961,9 +1961,9 @@ void __efi_fpsimd_end(void)
*/
if (system_supports_sme()) {
if (__this_cpu_read(efi_sm_state)) {
sysreg_clear_set_s(SYS_SVCR_EL0,
sysreg_clear_set_s(SYS_SVCR,
0,
SYS_SVCR_EL0_SM_MASK);
SVCR_SM_MASK);
if (!system_supports_fa64())
ffr = efi_sm_state;
}

View file

@ -107,7 +107,8 @@ int memcmp_pages(struct page *page1, struct page *page2)
static inline void __mte_enable_kernel(const char *mode, unsigned long tcf)
{
/* Enable MTE Sync Mode for EL1. */
sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, tcf);
sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF_MASK,
SYS_FIELD_PREP(SCTLR_EL1, TCF, tcf));
isb();
pr_info_once("MTE: enabled in %s mode at EL1\n", mode);
@ -123,12 +124,12 @@ void mte_enable_kernel_sync(void)
WARN_ONCE(system_uses_mte_async_or_asymm_mode(),
"MTE async mode enabled system wide!");
__mte_enable_kernel("synchronous", SCTLR_ELx_TCF_SYNC);
__mte_enable_kernel("synchronous", SCTLR_EL1_TCF_SYNC);
}
void mte_enable_kernel_async(void)
{
__mte_enable_kernel("asynchronous", SCTLR_ELx_TCF_ASYNC);
__mte_enable_kernel("asynchronous", SCTLR_EL1_TCF_ASYNC);
/*
* MTE async mode is set system wide by the first PE that
@ -145,7 +146,7 @@ void mte_enable_kernel_async(void)
void mte_enable_kernel_asymm(void)
{
if (cpus_have_cap(ARM64_MTE_ASYMM)) {
__mte_enable_kernel("asymmetric", SCTLR_ELx_TCF_ASYMM);
__mte_enable_kernel("asymmetric", SCTLR_EL1_TCF_ASYMM);
/*
* MTE asymm mode behaves as async mode for store
@ -217,11 +218,11 @@ static void mte_update_sctlr_user(struct task_struct *task)
* default order.
*/
if (resolved_mte_tcf & MTE_CTRL_TCF_ASYMM)
sctlr |= SCTLR_EL1_TCF0_ASYMM;
sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, ASYMM);
else if (resolved_mte_tcf & MTE_CTRL_TCF_ASYNC)
sctlr |= SCTLR_EL1_TCF0_ASYNC;
sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, ASYNC);
else if (resolved_mte_tcf & MTE_CTRL_TCF_SYNC)
sctlr |= SCTLR_EL1_TCF0_SYNC;
sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, SYNC);
task->thread.sctlr_user = sctlr;
}

View file

@ -867,10 +867,10 @@ static int sve_set_common(struct task_struct *target,
switch (type) {
case ARM64_VEC_SVE:
target->thread.svcr &= ~SYS_SVCR_EL0_SM_MASK;
target->thread.svcr &= ~SVCR_SM_MASK;
break;
case ARM64_VEC_SME:
target->thread.svcr |= SYS_SVCR_EL0_SM_MASK;
target->thread.svcr |= SVCR_SM_MASK;
break;
default:
WARN_ON_ONCE(1);
@ -1100,7 +1100,7 @@ static int za_set(struct task_struct *target,
/* If there is no data then disable ZA */
if (!count) {
target->thread.svcr &= ~SYS_SVCR_EL0_ZA_MASK;
target->thread.svcr &= ~SVCR_ZA_MASK;
goto out;
}
@ -1125,7 +1125,7 @@ static int za_set(struct task_struct *target,
/* Mark ZA as active and let userspace use it */
set_tsk_thread_flag(target, TIF_SME);
target->thread.svcr |= SYS_SVCR_EL0_ZA_MASK;
target->thread.svcr |= SVCR_ZA_MASK;
out:
fpsimd_flush_task_state(target);

View file

@ -288,7 +288,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
if (sve.head.size <= sizeof(*user->sve)) {
clear_thread_flag(TIF_SVE);
current->thread.svcr &= ~SYS_SVCR_EL0_SM_MASK;
current->thread.svcr &= ~SVCR_SM_MASK;
goto fpsimd_only;
}
@ -321,7 +321,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
return -EFAULT;
if (sve.flags & SVE_SIG_FLAG_SM)
current->thread.svcr |= SYS_SVCR_EL0_SM_MASK;
current->thread.svcr |= SVCR_SM_MASK;
else
set_thread_flag(TIF_SVE);
@ -398,7 +398,7 @@ static int restore_za_context(struct user_ctxs __user *user)
return -EINVAL;
if (za.head.size <= sizeof(*user->za)) {
current->thread.svcr &= ~SYS_SVCR_EL0_ZA_MASK;
current->thread.svcr &= ~SVCR_ZA_MASK;
return 0;
}
@ -419,7 +419,7 @@ static int restore_za_context(struct user_ctxs __user *user)
sme_alloc(current);
if (!current->thread.za_state) {
current->thread.svcr &= ~SYS_SVCR_EL0_ZA_MASK;
current->thread.svcr &= ~SVCR_ZA_MASK;
clear_thread_flag(TIF_SME);
return -ENOMEM;
}
@ -432,7 +432,7 @@ static int restore_za_context(struct user_ctxs __user *user)
return -EFAULT;
set_thread_flag(TIF_SME);
current->thread.svcr |= SYS_SVCR_EL0_ZA_MASK;
current->thread.svcr |= SVCR_ZA_MASK;
return 0;
}
@ -922,8 +922,8 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
/* Signal handlers are invoked with ZA and streaming mode disabled */
if (system_supports_sme()) {
current->thread.svcr &= ~(SYS_SVCR_EL0_ZA_MASK |
SYS_SVCR_EL0_SM_MASK);
current->thread.svcr &= ~(SVCR_ZA_MASK |
SVCR_SM_MASK);
sme_smstop();
}

View file

@ -174,9 +174,9 @@ static inline void fp_user_discard(void)
* need updating.
*/
if (system_supports_sme() && test_thread_flag(TIF_SME)) {
u64 svcr = read_sysreg_s(SYS_SVCR_EL0);
u64 svcr = read_sysreg_s(SYS_SVCR);
if (svcr & SYS_SVCR_EL0_SM_MASK)
if (svcr & SVCR_SM_MASK)
sme_smstop_sm();
}

View file

@ -96,8 +96,8 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
vcpu->arch.flags |= KVM_ARM64_HOST_SME_ENABLED;
if (read_sysreg_s(SYS_SVCR_EL0) &
(SYS_SVCR_EL0_SM_MASK | SYS_SVCR_EL0_ZA_MASK)) {
if (read_sysreg_s(SYS_SVCR) &
(SVCR_SM_MASK | SVCR_ZA_MASK)) {
vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
fpsimd_save_and_flush_cpu_state();
}

View file

@ -159,20 +159,20 @@
* No restrictions on instructions implemented in AArch64.
*/
#define PVM_ID_AA64ISAR0_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64ISAR0_AES) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_SHA1) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_SHA2) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_CRC32) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_ATOMICS) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_RDM) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_SHA3) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_SM3) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_SM4) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_DP) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_FHM) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_TS) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_TLB) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_RNDR) \
ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_AES) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SHA1) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SHA2) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_CRC32) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_ATOMIC) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_RDM) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SHA3) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SM3) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SM4) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_DP) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_FHM) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_TS) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_TLB) | \
ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_RNDR) \
)
#define PVM_ID_AA64ISAR1_ALLOW (\

View file

@ -1685,7 +1685,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_SMIDR_EL1), undef_access },
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
{ SYS_DESC(SYS_CTR_EL0), access_ctr },
{ SYS_DESC(SYS_SVCR_EL0), undef_access },
{ SYS_DESC(SYS_SVCR), undef_access },
{ PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
.reset = reset_pmcr, .reg = PMCR_EL0 },

View file

@ -335,7 +335,8 @@ static void do_tag_recovery(unsigned long addr, unsigned int esr,
* It will be done lazily on the other CPUs when they will hit a
* tag fault.
*/
sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, SCTLR_ELx_TCF_NONE);
sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF_MASK,
SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF, NONE));
isb();
}

View file

@ -3,7 +3,7 @@
gen := arch/$(ARCH)/include/generated
kapi := $(gen)/asm
kapi-hdrs-y := $(kapi)/cpucaps.h
kapi-hdrs-y := $(kapi)/cpucaps.h $(kapi)/sysreg-defs.h
targets += $(addprefix ../../../, $(kapi-hdrs-y))
@ -14,5 +14,11 @@ kapi: $(kapi-hdrs-y)
quiet_cmd_gen_cpucaps = GEN $@
cmd_gen_cpucaps = mkdir -p $(dir $@); $(AWK) -f $(real-prereqs) > $@
quiet_cmd_gen_sysreg = GEN $@
cmd_gen_sysreg = mkdir -p $(dir $@); $(AWK) -f $(real-prereqs) > $@
$(kapi)/cpucaps.h: $(src)/gen-cpucaps.awk $(src)/cpucaps FORCE
$(call if_changed,gen_cpucaps)
$(kapi)/sysreg-defs.h: $(src)/gen-sysreg.awk $(src)/sysreg FORCE
$(call if_changed,gen_sysreg)

268
arch/arm64/tools/gen-sysreg.awk Executable file
View file

@ -0,0 +1,268 @@
#!/bin/awk -f
# SPDX-License-Identifier: GPL-2.0
# gen-sysreg.awk: arm64 sysreg header generator
#
# Usage: awk -f gen-sysreg.awk sysregs.txt
# Log an error and terminate
function fatal(msg) {
print "Error at " NR ": " msg > "/dev/stderr"
exit 1
}
# Sanity check that the start or end of a block makes sense at this point in
# the file. If not, produce an error and terminate.
#
# @this - the $Block or $EndBlock
# @prev - the only valid block to already be in (value of @block)
# @new - the new value of @block
function change_block(this, prev, new) {
if (block != prev)
fatal("unexpected " this " (inside " block ")")
block = new
}
# Sanity check the number of records for a field makes sense. If not, produce
# an error and terminate.
function expect_fields(nf) {
if (NF != nf)
fatal(NF " fields found where " nf " expected")
}
# Print a CPP macro definition, padded with spaces so that the macro bodies
# line up in a column
function define(name, val) {
printf "%-48s%s\n", "#define " name, val
}
# Print standard BITMASK/SHIFT/WIDTH CPP definitions for a field
function define_field(reg, field, msb, lsb) {
define(reg "_" field, "GENMASK(" msb ", " lsb ")")
define(reg "_" field "_MASK", "GENMASK(" msb ", " lsb ")")
define(reg "_" field "_SHIFT", lsb)
define(reg "_" field "_WIDTH", msb - lsb + 1)
}
# Parse a "<msb>[:<lsb>]" string into the global variables @msb and @lsb
function parse_bitdef(reg, field, bitdef, _bits)
{
if (bitdef ~ /^[0-9]+$/) {
msb = bitdef
lsb = bitdef
} else if (split(bitdef, _bits, ":") == 2) {
msb = _bits[1]
lsb = _bits[2]
} else {
fatal("invalid bit-range definition '" bitdef "'")
}
if (msb != next_bit)
fatal(reg "." field " starts at " msb " not " next_bit)
if (63 < msb || msb < 0)
fatal(reg "." field " invalid high bit in '" bitdef "'")
if (63 < lsb || lsb < 0)
fatal(reg "." field " invalid low bit in '" bitdef "'")
if (msb < lsb)
fatal(reg "." field " invalid bit-range '" bitdef "'")
if (low > high)
fatal(reg "." field " has invalid range " high "-" low)
next_bit = lsb - 1
}
BEGIN {
print "#ifndef __ASM_SYSREG_DEFS_H"
print "#define __ASM_SYSREG_DEFS_H"
print ""
print "/* Generated file - do not edit */"
print ""
block = "None"
}
END {
print "#endif /* __ASM_SYSREG_DEFS_H */"
}
# skip blank lines and comment lines
/^$/ { next }
/^#/ { next }
/^SysregFields/ {
change_block("SysregFields", "None", "SysregFields")
expect_fields(2)
reg = $2
res0 = "UL(0)"
res1 = "UL(0)"
next_bit = 63
next
}
/^EndSysregFields/ {
if (next_bit > 0)
fatal("Unspecified bits in " reg)
change_block("EndSysregFields", "SysregFields", "None")
define(reg "_RES0", "(" res0 ")")
define(reg "_RES1", "(" res1 ")")
print ""
reg = null
res0 = null
res1 = null
next
}
/^Sysreg/ {
change_block("Sysreg", "None", "Sysreg")
expect_fields(7)
reg = $2
op0 = $3
op1 = $4
crn = $5
crm = $6
op2 = $7
res0 = "UL(0)"
res1 = "UL(0)"
define("REG_" reg, "S" op0 "_" op1 "_C" crn "_C" crm "_" op2)
define("SYS_" reg, "sys_reg(" op0 ", " op1 ", " crn ", " crm ", " op2 ")")
define("SYS_" reg "_Op0", op0)
define("SYS_" reg "_Op1", op1)
define("SYS_" reg "_CRn", crn)
define("SYS_" reg "_CRm", crm)
define("SYS_" reg "_Op2", op2)
print ""
next_bit = 63
next
}
/^EndSysreg/ {
if (next_bit > 0)
fatal("Unspecified bits in " reg)
change_block("EndSysreg", "Sysreg", "None")
if (res0 != null)
define(reg "_RES0", "(" res0 ")")
if (res1 != null)
define(reg "_RES1", "(" res1 ")")
if (res0 != null || res1 != null)
print ""
reg = null
op0 = null
op1 = null
crn = null
crm = null
op2 = null
res0 = null
res1 = null
next
}
# Currently this is effectivey a comment, in future we may want to emit
# defines for the fields.
/^Fields/ && (block == "Sysreg") {
expect_fields(2)
if (next_bit != 63)
fatal("Some fields already defined for " reg)
print "/* For " reg " fields see " $2 " */"
print ""
next_bit = 0
res0 = null
res1 = null
next
}
/^Res0/ && (block == "Sysreg" || block == "SysregFields") {
expect_fields(2)
parse_bitdef(reg, "RES0", $2)
field = "RES0_" msb "_" lsb
res0 = res0 " | GENMASK_ULL(" msb ", " lsb ")"
next
}
/^Res1/ && (block == "Sysreg" || block == "SysregFields") {
expect_fields(2)
parse_bitdef(reg, "RES1", $2)
field = "RES1_" msb "_" lsb
res1 = res1 " | GENMASK_ULL(" msb ", " lsb ")"
next
}
/^Field/ && (block == "Sysreg" || block == "SysregFields") {
expect_fields(3)
field = $3
parse_bitdef(reg, field, $2)
define_field(reg, field, msb, lsb)
print ""
next
}
/^Raz/ && (block == "Sysreg" || block == "SysregFields") {
expect_fields(2)
parse_bitdef(reg, field, $2)
next
}
/^Enum/ {
change_block("Enum", "Sysreg", "Enum")
expect_fields(3)
field = $3
parse_bitdef(reg, field, $2)
define_field(reg, field, msb, lsb)
next
}
/^EndEnum/ {
change_block("EndEnum", "Enum", "Sysreg")
field = null
msb = null
lsb = null
print ""
next
}
/0b[01]+/ && block = "Enum" {
expect_fields(2)
val = $1
name = $2
define(reg "_" field "_" name, "UL(" val ")")
next
}
# Any lines not handled by previous rules are unexpected
{
fatal("unhandled statement")
}

369
arch/arm64/tools/sysreg Normal file
View file

@ -0,0 +1,369 @@
# SPDX-License-Identifier: GPL-2.0-only
#
# System register metadata
# Each System register is described by a Sysreg block:
# Sysreg <name> <op0> <op1> <crn> <crm> <op2>
# <field>
# ...
# EndSysreg
# Within a Sysreg block, each field can be described as one of:
# Res0 <msb>[:<lsb>]
# Res1 <msb>[:<lsb>]
# Field <msb>[:<lsb>] <name>
# Enum <msb>[:<lsb>] <name>
# <enumval> <enumname>
# ...
# EndEnum
# Alternatively if multiple registers share the same layout then
# a SysregFields block can be used to describe the shared layout
# SysregFields <fieldsname>
# <field>
# ...
# EndSysregFields
# and referenced from within the Sysreg:
# Sysreg <name> <op0> <op1> <crn> <crm> <op2>
# Fields <fieldsname>
# EndSysreg
# For ID registers we adopt a few conventions for translating the
# language in the ARM into defines:
#
# NI - Not implemented
# IMP - Implemented
#
# In general it is recommended that new enumeration items be named for the
# feature that introduces them (eg, FEAT_LS64_ACCDATA introduces enumeration
# item ACCDATA) though it may be more taseful to do something else.
Sysreg ID_AA64ISAR0_EL1 3 0 0 6 0
Enum 63:60 RNDR
0b0000 NI
0b0001 IMP
EndEnum
Enum 59:56 TLB
0b0000 NI
0b0001 OS
0b0010 RANGE
EndEnum
Enum 55:52 TS
0b0000 NI
0b0001 FLAGM
0b0010 FLAGM2
EndEnum
Enum 51:48 FHM
0b0000 NI
0b0001 IMP
EndEnum
Enum 47:44 DP
0b0000 NI
0b0001 IMP
EndEnum
Enum 43:40 SM4
0b0000 NI
0b0001 IMP
EndEnum
Enum 39:36 SM3
0b0000 NI
0b0001 IMP
EndEnum
Enum 35:32 SHA3
0b0000 NI
0b0001 IMP
EndEnum
Enum 31:28 RDM
0b0000 NI
0b0001 IMP
EndEnum
Enum 27:24 TME
0b0000 NI
0b0001 IMP
EndEnum
Enum 23:20 ATOMIC
0b0000 NI
0b0010 IMP
EndEnum
Enum 19:16 CRC32
0b0000 NI
0b0001 IMP
EndEnum
Enum 15:12 SHA2
0b0000 NI
0b0001 SHA256
0b0010 SHA512
EndEnum
Enum 11:8 SHA1
0b0000 NI
0b0001 IMP
EndEnum
Enum 7:4 AES
0b0000 NI
0b0001 AES
0b0010 PMULL
EndEnum
Res0 3:0
EndSysreg
Sysreg SCTLR_EL1 3 0 1 0 0
Field 63 TIDCP
Field 62 SPINMASK
Field 61 NMI
Field 60 EnTP2
Res0 59:58
Field 57 EPAN
Field 56 EnALS
Field 55 EnAS0
Field 54 EnASR
Field 53 TME
Field 52 TME0
Field 51 TMT
Field 50 TMT0
Field 49:46 TWEDEL
Field 45 TWEDEn
Field 44 DSSBS
Field 43 ATA
Field 42 ATA0
Enum 41:40 TCF
0b00 NONE
0b01 SYNC
0b10 ASYNC
0b11 ASYMM
EndEnum
Enum 39:38 TCF0
0b00 NONE
0b01 SYNC
0b10 ASYNC
0b11 ASYMM
EndEnum
Field 37 ITFSB
Field 36 BT1
Field 35 BT0
Res0 34
Field 33 MSCEn
Field 32 CMOW
Field 31 EnIA
Field 30 EnIB
Field 29 LSMAOE
Field 28 nTLSMD
Field 27 EnDA
Field 26 UCI
Field 25 EE
Field 24 E0E
Field 23 SPAN
Field 22 EIS
Field 21 IESB
Field 20 TSCXT
Field 19 WXN
Field 18 nTWE
Res0 17
Field 16 nTWI
Field 15 UCT
Field 14 DZE
Field 13 EnDB
Field 12 I
Field 11 EOS
Field 10 EnRCTX
Field 9 UMA
Field 8 SED
Field 7 ITD
Field 6 nAA
Field 5 CP15BEN
Field 4 SA0
Field 3 SA
Field 2 C
Field 1 A
Field 0 M
EndSysreg
SysregFields CPACR_ELx
Res0 63:29
Field 28 TTA
Res0 27:26
Field 25:24 SMEN
Res0 23:22
Field 21:20 FPEN
Res0 19:18
Field 17:16 ZEN
Res0 15:0
EndSysregFields
Sysreg CPACR_EL1 3 0 1 0 2
Fields CPACR_ELx
EndSysreg
Sysreg SMPRI_EL1 3 0 1 2 4
Res0 63:4
Field 3:0 PRIORITY
EndSysreg
SysregFields ZCR_ELx
Res0 63:9
Raz 8:4
Field 3:0 LEN
EndSysregFields
Sysreg ZCR_EL1 3 0 1 2 0
Fields ZCR_ELx
EndSysreg
SysregFields SMCR_ELx
Res0 63:32
Field 31 FA64
Res0 30:9
Raz 8:4
Field 3:0 LEN
EndSysregFields
Sysreg SMCR_EL1 3 0 1 2 6
Fields SMCR_ELx
EndSysreg
Sysreg FAR_EL1 3 0 6 0 0
Field 63:0 ADDR
EndSysreg
SysregFields CONTEXTIDR_ELx
Res0 63:32
Field 31:0 PROCID
EndSysregFields
Sysreg CONTEXTIDR_EL1 3 0 13 0 1
Fields CONTEXTIDR_ELx
EndSysreg
Sysreg CLIDR_EL1 3 1 0 0 1
Res0 63:47
Field 46:33 Ttypen
Field 32:30 ICB
Field 29:27 LoUU
Field 26:24 LoC
Field 23:21 LoUIS
Field 20:18 Ctype7
Field 17:15 Ctype6
Field 14:12 Ctype5
Field 11:9 Ctype4
Field 8:6 Ctype3
Field 5:3 Ctype2
Field 2:0 Ctype1
EndSysreg
Sysreg SMIDR_EL1 3 1 0 0 6
Res0 63:32
Field 31:24 IMPLEMENTER
Field 23:16 REVISION
Field 15 SMPS
Res0 14:12
Field 11:0 AFFINITY
EndSysreg
Sysreg CSSELR_EL1 3 2 0 0 0
Res0 63:5
Field 4 TnD
Field 3:1 Level
Field 0 InD
EndSysreg
Sysreg SVCR 3 3 4 2 2
Res0 63:2
Field 1 ZA
Field 0 SM
EndSysreg
Sysreg ZCR_EL2 3 4 1 2 0
Fields ZCR_ELx
EndSysreg
Sysreg SMPRIMAP_EL2 3 4 1 2 5
Field 63:60 P15
Field 59:56 P14
Field 55:52 P13
Field 51:48 P12
Field 47:44 P11
Field 43:40 P10
Field 39:36 F9
Field 35:32 P8
Field 31:28 P7
Field 27:24 P6
Field 23:20 P5
Field 19:16 P4
Field 15:12 P3
Field 11:8 P2
Field 7:4 P1
Field 3:0 P0
EndSysreg
Sysreg SMCR_EL2 3 4 1 2 6
Fields SMCR_ELx
EndSysreg
Sysreg DACR32_EL2 3 4 3 0 0
Res0 63:32
Field 31:30 D15
Field 29:28 D14
Field 27:26 D13
Field 25:24 D12
Field 23:22 D11
Field 21:20 D10
Field 19:18 D9
Field 17:16 D8
Field 15:14 D7
Field 13:12 D6
Field 11:10 D5
Field 9:8 D4
Field 7:6 D3
Field 5:4 D2
Field 3:2 D1
Field 1:0 D0
EndSysreg
Sysreg FAR_EL2 3 4 6 0 0
Field 63:0 ADDR
EndSysreg
Sysreg CONTEXTIDR_EL2 3 4 13 0 1
Fields CONTEXTIDR_ELx
EndSysreg
Sysreg CPACR_EL12 3 5 1 0 2
Fields CPACR_ELx
EndSysreg
Sysreg ZCR_EL12 3 5 1 2 0
Fields ZCR_ELx
EndSysreg
Sysreg SMCR_EL12 3 5 1 2 6
Fields SMCR_ELx
EndSysreg
Sysreg FAR_EL12 3 5 6 0 0
Field 63:0 ADDR
EndSysreg
Sysreg CONTEXTIDR_EL12 3 5 13 0 1
Fields CONTEXTIDR_ELx
EndSysreg
SysregFields TTBRx_EL1
Field 63:48 ASID
Field 47:1 BADDR
Field 0 CnP
EndSysregFields
Sysreg TTBR0_EL1 3 0 2 0 0
Fields TTBRx_EL1
EndSysreg
Sysreg TTBR1_EL1 3 0 2 0 1
Fields TTBRx_EL1
EndSysreg