Merge branches 'for-next/kpti', 'for-next/missing-proto-warn', 'for-next/iss2-decode', 'for-next/kselftest', 'for-next/misc', 'for-next/feat_mops', 'for-next/module-alloc', 'for-next/sysreg', 'for-next/cpucap', 'for-next/acpi', 'for-next/kdump', 'for-next/acpi-doc', 'for-next/doc' and 'for-next/tpidr2-fix', remote-tracking branch 'arm64/for-next/perf' into for-next/core

* arm64/for-next/perf:
  docs: perf: Fix warning from 'make htmldocs' in hisi-pmu.rst
  docs: perf: Add new description for HiSilicon UC PMU
  drivers/perf: hisi: Add support for HiSilicon UC PMU driver
  drivers/perf: hisi: Add support for HiSilicon H60PA and PAv3 PMU driver
  perf: arm_cspmu: Add missing MODULE_DEVICE_TABLE
  perf/arm-cmn: Add sysfs identifier
  perf/arm-cmn: Revamp model detection
  perf/arm_dmc620: Add cpumask
  dt-bindings: perf: fsl-imx-ddr: Add i.MX93 compatible
  drivers/perf: imx_ddr: Add support for NXP i.MX9 SoC DDRC PMU driver
  perf/arm_cspmu: Decouple APMT dependency
  perf/arm_cspmu: Clean up ACPI dependency
  ACPI/APMT: Don't register invalid resource
  perf/arm_cspmu: Fix event attribute type
  perf: arm_cspmu: Set irq affinitiy only if overflow interrupt is used
  drivers/perf: hisi: Don't migrate perf to the CPU going to teardown
  drivers/perf: apple_m1: Force 63bit counters for M2 CPUs
  perf/arm-cmn: Fix DTC reset
  perf: qcom_l2_pmu: Make l2_cache_pmu_probe_cluster() more robust
  perf/arm-cci: Slightly optimize cci_pmu_sync_counters()

* for-next/kpti:
  : Simplify KPTI trampoline exit code
  arm64: entry: Simplify tramp_alias macro and tramp_exit routine
  arm64: entry: Preserve/restore X29 even for compat tasks

* for-next/missing-proto-warn:
  : Address -Wmissing-prototype warnings
  arm64: add alt_cb_patch_nops prototype
  arm64: move early_brk64 prototype to header
  arm64: signal: include asm/exception.h
  arm64: kaslr: add kaslr_early_init() declaration
  arm64: flush: include linux/libnvdimm.h
  arm64: module-plts: inline linux/moduleloader.h
  arm64: hide unused is_valid_bugaddr()
  arm64: efi: add efi_handle_corrupted_x18 prototype
  arm64: cpuidle: fix #ifdef for acpi functions
  arm64: kvm: add prototypes for functions called in asm
  arm64: spectre: provide prototypes for internal functions
  arm64: move cpu_suspend_set_dbg_restorer() prototype to header
  arm64: avoid prototype warnings for syscalls
  arm64: add scs_patch_vmlinux prototype
  arm64: xor-neon: mark xor_arm64_neon_*() static

* for-next/iss2-decode:
  : Add decode of ISS2 to data abort reports
  arm64/esr: Add decode of ISS2 to data abort reporting
  arm64/esr: Use GENMASK() for the ISS mask

* for-next/kselftest:
  : Various arm64 kselftest improvements
  kselftest/arm64: Log signal code and address for unexpected signals
  kselftest/arm64: Add a smoke test for ptracing hardware break/watch points

* for-next/misc:
  : Miscellaneous patches
  arm64: alternatives: make clean_dcache_range_nopatch() noinstr-safe
  arm64: hibernate: remove WARN_ON in save_processor_state
  arm64/fpsimd: Exit streaming mode when flushing tasks
  arm64: mm: fix VA-range sanity check
  arm64/mm: remove now-superfluous ISBs from TTBR writes
  arm64: consolidate rox page protection logic
  arm64: set __exception_irq_entry with __irq_entry as a default
  arm64: syscall: unmask DAIF for tracing status
  arm64: lockdep: enable checks for held locks when returning to userspace
  arm64/cpucaps: increase string width to properly format cpucaps.h
  arm64/cpufeature: Use helper for ECV CNTPOFF cpufeature

* for-next/feat_mops:
  : Support for ARMv8.8 memcpy instructions in userspace
  kselftest/arm64: add MOPS to hwcap test
  arm64: mops: allow disabling MOPS from the kernel command line
  arm64: mops: detect and enable FEAT_MOPS
  arm64: mops: handle single stepping after MOPS exception
  arm64: mops: handle MOPS exceptions
  KVM: arm64: hide MOPS from guests
  arm64: mops: don't disable host MOPS instructions from EL2
  arm64: mops: document boot requirements for MOPS
  KVM: arm64: switch HCRX_EL2 between host and guest
  arm64: cpufeature: detect FEAT_HCX
  KVM: arm64: initialize HCRX_EL2

* for-next/module-alloc:
  : Make the arm64 module allocation code more robust (clean-up, VA range expansion)
  arm64: module: rework module VA range selection
  arm64: module: mandate MODULE_PLTS
  arm64: module: move module randomization to module.c
  arm64: kaslr: split kaslr/module initialization
  arm64: kasan: remove !KASAN_VMALLOC remnants
  arm64: module: remove old !KASAN_VMALLOC logic

* for-next/sysreg: (21 commits)
  : More sysreg conversions to automatic generation
  arm64/sysreg: Convert TRBIDR_EL1 register to automatic generation
  arm64/sysreg: Convert TRBTRG_EL1 register to automatic generation
  arm64/sysreg: Convert TRBMAR_EL1 register to automatic generation
  arm64/sysreg: Convert TRBSR_EL1 register to automatic generation
  arm64/sysreg: Convert TRBBASER_EL1 register to automatic generation
  arm64/sysreg: Convert TRBPTR_EL1 register to automatic generation
  arm64/sysreg: Convert TRBLIMITR_EL1 register to automatic generation
  arm64/sysreg: Rename TRBIDR_EL1 fields per auto-gen tools format
  arm64/sysreg: Rename TRBTRG_EL1 fields per auto-gen tools format
  arm64/sysreg: Rename TRBMAR_EL1 fields per auto-gen tools format
  arm64/sysreg: Rename TRBSR_EL1 fields per auto-gen tools format
  arm64/sysreg: Rename TRBBASER_EL1 fields per auto-gen tools format
  arm64/sysreg: Rename TRBPTR_EL1 fields per auto-gen tools format
  arm64/sysreg: Rename TRBLIMITR_EL1 fields per auto-gen tools format
  arm64/sysreg: Convert OSECCR_EL1 to automatic generation
  arm64/sysreg: Convert OSDTRTX_EL1 to automatic generation
  arm64/sysreg: Convert OSDTRRX_EL1 to automatic generation
  arm64/sysreg: Convert OSLAR_EL1 to automatic generation
  arm64/sysreg: Standardise naming of bitfield constants in OSL[AS]R_EL1
  arm64/sysreg: Convert MDSCR_EL1 to automatic register generation
  ...

* for-next/cpucap:
  : arm64 cpucap clean-up
  arm64: cpufeature: fold cpus_set_cap() into update_cpu_capabilities()
  arm64: cpufeature: use cpucap naming
  arm64: alternatives: use cpucap naming
  arm64: standardise cpucap bitmap names

* for-next/acpi:
  : Various arm64-related ACPI patches
  ACPI: bus: Consolidate all arm specific initialisation into acpi_arm_init()

* for-next/kdump:
  : Simplify the crashkernel reservation behaviour of crashkernel=X,high on arm64
  arm64: add kdump.rst into index.rst
  Documentation: add kdump.rst to present crashkernel reservation on arm64
  arm64: kdump: simplify the reservation behaviour of crashkernel=,high

* for-next/acpi-doc:
  : Update ACPI documentation for Arm systems
  Documentation/arm64: Update ACPI tables from BBR
  Documentation/arm64: Update references in arm-acpi
  Documentation/arm64: Update ARM and arch reference

* for-next/doc:
  : arm64 documentation updates
  Documentation/arm64: Add ptdump documentation

* for-next/tpidr2-fix:
  : Fix the TPIDR2_EL0 register restoring on sigreturn
  kselftest/arm64: Add a test case for TPIDR2 restore
  arm64/signal: Restore TPIDR2 register rather than memory state
This commit is contained in:
90 changed files with 1282 additions and 585 deletions

View file

@ -429,6 +429,9 @@
arm64.nosme [ARM64] Unconditionally disable Scalable Matrix
Extension support
arm64.nomops [ARM64] Unconditionally disable Memory Copy and Memory
Set instructions support
ataflop= [HW,M68k]
atarimouse= [HW,MOUSE] Atari Mouse

View file

@ -17,16 +17,37 @@ For ACPI on arm64, tables also fall into the following categories:
- Recommended: BERT, EINJ, ERST, HEST, PCCT, SSDT
- Optional: BGRT, CPEP, CSRT, DBG2, DRTM, ECDT, FACS, FPDT, IBFT,
IORT, MCHI, MPST, MSCT, NFIT, PMTT, RASF, SBST, SLIT, SPMI, SRAT,
STAO, TCPA, TPM2, UEFI, XENV
- Optional: AGDI, BGRT, CEDT, CPEP, CSRT, DBG2, DRTM, ECDT, FACS, FPDT,
HMAT, IBFT, IORT, MCHI, MPAM, MPST, MSCT, NFIT, PMTT, PPTT, RASF, SBST,
SDEI, SLIT, SPMI, SRAT, STAO, TCPA, TPM2, UEFI, XENV
- Not supported: BOOT, DBGP, DMAR, ETDT, HPET, IVRS, LPIT, MSDM, OEMx,
PSDT, RSDT, SLIC, WAET, WDAT, WDRT, WPBT
- Not supported: AEST, APMT, BOOT, DBGP, DMAR, ETDT, HPET, IVRS, LPIT,
MSDM, OEMx, PDTT, PSDT, RAS2, RSDT, SLIC, WAET, WDAT, WDRT, WPBT
====== ========================================================================
Table Usage for ARMv8 Linux
====== ========================================================================
AEST Signature Reserved (signature == "AEST")
**Arm Error Source Table**
This table informs the OS of any error nodes in the system that are
compliant with the Arm RAS architecture.
AGDI Signature Reserved (signature == "AGDI")
**Arm Generic diagnostic Dump and Reset Device Interface Table**
This table describes a non-maskable event, that is used by the platform
firmware, to request the OS to generate a diagnostic dump and reset the device.
APMT Signature Reserved (signature == "APMT")
**Arm Performance Monitoring Table**
This table describes the properties of PMU support implmented by
components in the system.
BERT Section 18.3 (signature == "BERT")
**Boot Error Record Table**
@ -47,6 +68,13 @@ BGRT Section 5.2.22 (signature == "BGRT")
Optional, not currently supported, with no real use-case for an
ARM server.
CEDT Signature Reserved (signature == "CEDT")
**CXL Early Discovery Table**
This table allows the OS to discover any CXL Host Bridges and the Host
Bridge registers.
CPEP Section 5.2.18 (signature == "CPEP")
**Corrected Platform Error Polling table**
@ -184,6 +212,15 @@ HEST Section 18.3.2 (signature == "HEST")
Must be supplied if RAS support is provided by the platform. It
is recommended this table be supplied.
HMAT Section 5.2.28 (signature == "HMAT")
**Heterogeneous Memory Attribute Table**
This table describes the memory attributes, such as memory side cache
attributes and bandwidth and latency details, related to Memory Proximity
Domains. The OS uses this information to optimize the system memory
configuration.
HPET Signature Reserved (signature == "HPET")
**High Precision Event timer Table**
@ -241,6 +278,13 @@ MCHI Signature Reserved (signature == "MCHI")
Optional, not currently supported.
MPAM Signature Reserved (signature == "MPAM")
**Memory Partitioning And Monitoring table**
This table allows the OS to discover the MPAM controls implemented by
the subsystems.
MPST Section 5.2.21 (signature == "MPST")
**Memory Power State Table**
@ -281,18 +325,39 @@ PCCT Section 14.1 (signature == "PCCT)
Recommend for use on arm64; use of PCC is recommended when using CPPC
to control performance and power for platform processors.
PDTT Section 5.2.29 (signature == "PDTT")
**Platform Debug Trigger Table**
This table describes PCC channels used to gather debug logs of
non-architectural features.
PMTT Section 5.2.21.12 (signature == "PMTT")
**Platform Memory Topology Table**
Optional, not currently supported.
PPTT Section 5.2.30 (signature == "PPTT")
**Processor Properties Topology Table**
This table provides the processor and cache topology.
PSDT Section 5.2.11.3 (signature == "PSDT")
**Persistent System Description Table**
Obsolete table, will not be supported.
RAS2 Section 5.2.21 (signature == "RAS2")
**RAS Features 2 table**
This table provides interfaces for the RAS capabilities implemented in
the platform.
RASF Section 5.2.20 (signature == "RASF")
**RAS Feature table**
@ -318,6 +383,12 @@ SBST Section 5.2.14 (signature == "SBST")
Optional, not currently supported.
SDEI Signature Reserved (signature == "SDEI")
**Software Delegated Exception Interface table**
This table advertises the presence of the SDEI interface.
SLIC Signature Reserved (signature == "SLIC")
**Software LIcensing table**

View file

@ -1,40 +1,41 @@
=====================
ACPI on ARMv8 Servers
=====================
===================
ACPI on Arm systems
===================
ACPI can be used for ARMv8 general purpose servers designed to follow
the ARM SBSA (Server Base System Architecture) [0] and SBBR (Server
Base Boot Requirements) [1] specifications. Please note that the SBBR
can be retrieved simply by visiting [1], but the SBSA is currently only
available to those with an ARM login due to ARM IP licensing concerns.
ACPI can be used for Armv8 and Armv9 systems designed to follow
the BSA (Arm Base System Architecture) [0] and BBR (Arm
Base Boot Requirements) [1] specifications. Both BSA and BBR are publicly
accessible documents.
Arm Servers, in addition to being BSA compliant, comply with a set
of rules defined in SBSA (Server Base System Architecture) [2].
The ARMv8 kernel implements the reduced hardware model of ACPI version
The Arm kernel implements the reduced hardware model of ACPI version
5.1 or later. Links to the specification and all external documents
it refers to are managed by the UEFI Forum. The specification is
available at http://www.uefi.org/specifications and documents referenced
by the specification can be found via http://www.uefi.org/acpi.
If an ARMv8 system does not meet the requirements of the SBSA and SBBR,
If an Arm system does not meet the requirements of the BSA and BBR,
or cannot be described using the mechanisms defined in the required ACPI
specifications, then ACPI may not be a good fit for the hardware.
While the documents mentioned above set out the requirements for building
industry-standard ARMv8 servers, they also apply to more than one operating
industry-standard Arm systems, they also apply to more than one operating
system. The purpose of this document is to describe the interaction between
ACPI and Linux only, on an ARMv8 system -- that is, what Linux expects of
ACPI and Linux only, on an Arm system -- that is, what Linux expects of
ACPI and what ACPI can expect of Linux.
Why ACPI on ARM?
Why ACPI on Arm?
----------------
Before examining the details of the interface between ACPI and Linux, it is
useful to understand why ACPI is being used. Several technologies already
exist in Linux for describing non-enumerable hardware, after all. In this
section we summarize a blog post [2] from Grant Likely that outlines the
reasoning behind ACPI on ARMv8 servers. Actually, we snitch a good portion
section we summarize a blog post [3] from Grant Likely that outlines the
reasoning behind ACPI on Arm systems. Actually, we snitch a good portion
of the summary text almost directly, to be honest.
The short form of the rationale for ACPI on ARM is:
The short form of the rationale for ACPI on Arm is:
- ACPIs byte code (AML) allows the platform to encode hardware behavior,
while DT explicitly does not support this. For hardware vendors, being
@ -47,7 +48,7 @@ The short form of the rationale for ACPI on ARM is:
- In the enterprise server environment, ACPI has established bindings (such
as for RAS) which are currently used in production systems. DT does not.
Such bindings could be defined in DT at some point, but doing so means ARM
Such bindings could be defined in DT at some point, but doing so means Arm
and x86 would end up using completely different code paths in both firmware
and the kernel.
@ -108,7 +109,7 @@ recent version of the kernel.
Relationship with Device Tree
-----------------------------
ACPI support in drivers and subsystems for ARMv8 should never be mutually
ACPI support in drivers and subsystems for Arm should never be mutually
exclusive with DT support at compile time.
At boot time the kernel will only use one description method depending on
@ -121,11 +122,11 @@ time).
Booting using ACPI tables
-------------------------
The only defined method for passing ACPI tables to the kernel on ARMv8
The only defined method for passing ACPI tables to the kernel on Arm
is via the UEFI system configuration table. Just so it is explicit, this
means that ACPI is only supported on platforms that boot via UEFI.
When an ARMv8 system boots, it can either have DT information, ACPI tables,
When an Arm system boots, it can either have DT information, ACPI tables,
or in some very unusual cases, both. If no command line parameters are used,
the kernel will try to use DT for device enumeration; if there is no DT
present, the kernel will try to use ACPI tables, but only if they are present.
@ -169,7 +170,7 @@ hardware reduced mode must be set to zero.
For the ACPI core to operate properly, and in turn provide the information
the kernel needs to configure devices, it expects to find the following
tables (all section numbers refer to the ACPI 6.1 specification):
tables (all section numbers refer to the ACPI 6.5 specification):
- RSDP (Root System Description Pointer), section 5.2.5
@ -184,20 +185,76 @@ tables (all section numbers refer to the ACPI 6.1 specification):
- GTDT (Generic Timer Description Table), section 5.2.24
- PPTT (Processor Properties Topology Table), section 5.2.30
- DBG2 (DeBuG port table 2), section 5.2.6, specifically Table 5-6.
- APMT (Arm Performance Monitoring unit Table), section 5.2.6, specifically Table 5-6.
- AGDI (Arm Generic diagnostic Dump and Reset Device Interface Table), section 5.2.6, specifically Table 5-6.
- If PCI is supported, the MCFG (Memory mapped ConFiGuration
Table), section 5.2.6, specifically Table 5-31.
Table), section 5.2.6, specifically Table 5-6.
- If booting without a console=<device> kernel parameter is
supported, the SPCR (Serial Port Console Redirection table),
section 5.2.6, specifically Table 5-31.
section 5.2.6, specifically Table 5-6.
- If necessary to describe the I/O topology, SMMUs and GIC ITSs,
the IORT (Input Output Remapping Table, section 5.2.6, specifically
Table 5-31).
Table 5-6).
- If NUMA is supported, the following tables are required:
- SRAT (System Resource Affinity Table), section 5.2.16
- SLIT (System Locality distance Information Table), section 5.2.17
- If NUMA is supported, and the system contains heterogeneous memory,
the HMAT (Heterogeneous Memory Attribute Table), section 5.2.28.
- If the ACPI Platform Error Interfaces are required, the following
tables are conditionally required:
- BERT (Boot Error Record Table, section 18.3.1)
- EINJ (Error INJection table, section 18.6.1)
- ERST (Error Record Serialization Table, section 18.5)
- HEST (Hardware Error Source Table, section 18.3.2)
- SDEI (Software Delegated Exception Interface table, section 5.2.6,
specifically Table 5-6)
- AEST (Arm Error Source Table, section 5.2.6,
specifically Table 5-6)
- RAS2 (ACPI RAS2 feature table, section 5.2.21)
- If the system contains controllers using PCC channel, the
PCCT (Platform Communications Channel Table), section 14.1
- If the system contains a controller to capture board-level system state,
and communicates with the host via PCC, the PDTT (Platform Debug Trigger
Table), section 5.2.29.
- If NVDIMM is supported, the NFIT (NVDIMM Firmware Interface Table), section 5.2.26
- If video framebuffer is present, the BGRT (Boot Graphics Resource Table), section 5.2.23
- If IPMI is implemented, the SPMI (Server Platform Management Interface),
section 5.2.6, specifically Table 5-6.
- If the system contains a CXL Host Bridge, the CEDT (CXL Early Discovery
Table), section 5.2.6, specifically Table 5-6.
- If the system supports MPAM, the MPAM (Memory Partitioning And Monitoring table), section 5.2.6,
specifically Table 5-6.
- If the system lacks persistent storage, the IBFT (ISCSI Boot Firmware
Table), section 5.2.6, specifically Table 5-6.
- If NUMA is supported, the SRAT (System Resource Affinity Table)
and SLIT (System Locality distance Information Table), sections
5.2.16 and 5.2.17, respectively.
If the above tables are not all present, the kernel may or may not be
able to boot properly since it may not be able to configure all of the
@ -269,16 +326,14 @@ Drivers should look for device properties in the _DSD object ONLY; the _DSD
object is described in the ACPI specification section 6.2.5, but this only
describes how to define the structure of an object returned via _DSD, and
how specific data structures are defined by specific UUIDs. Linux should
only use the _DSD Device Properties UUID [5]:
only use the _DSD Device Properties UUID [4]:
- UUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301
- https://www.uefi.org/sites/default/files/resources/_DSD-device-properties-UUID.pdf
The UEFI Forum provides a mechanism for registering device properties [4]
so that they may be used across all operating systems supporting ACPI.
Device properties that have not been registered with the UEFI Forum should
not be used.
Common device properties can be registered by creating a pull request to [4] so
that they may be used across all operating systems supporting ACPI.
Device properties that have not been registered with the UEFI Forum can be used
but not as "uefi-" common properties.
Before creating new device properties, check to be sure that they have not
been defined before and either registered in the Linux kernel documentation
@ -306,7 +361,7 @@ process.
Once registration and review have been completed, the kernel provides an
interface for looking up device properties in a manner independent of
whether DT or ACPI is being used. This API should be used [6]; it can
whether DT or ACPI is being used. This API should be used [5]; it can
eliminate some duplication of code paths in driver probing functions and
discourage divergence between DT bindings and ACPI device properties.
@ -448,15 +503,15 @@ ASWG
----
The ACPI specification changes regularly. During the year 2014, for instance,
version 5.1 was released and version 6.0 substantially completed, with most of
the changes being driven by ARM-specific requirements. Proposed changes are
the changes being driven by Arm-specific requirements. Proposed changes are
presented and discussed in the ASWG (ACPI Specification Working Group) which
is a part of the UEFI Forum. The current version of the ACPI specification
is 6.1 release in January 2016.
is 6.5 release in August 2022.
Participation in this group is open to all UEFI members. Please see
http://www.uefi.org/workinggroup for details on group membership.
It is the intent of the ARMv8 ACPI kernel code to follow the ACPI specification
It is the intent of the Arm ACPI kernel code to follow the ACPI specification
as closely as possible, and to only implement functionality that complies with
the released standards from UEFI ASWG. As a practical matter, there will be
vendors that provide bad ACPI tables or violate the standards in some way.
@ -470,12 +525,12 @@ likely be willing to assist in submitting ECRs.
Linux Code
----------
Individual items specific to Linux on ARM, contained in the Linux
Individual items specific to Linux on Arm, contained in the Linux
source code, are in the list that follows:
ACPI_OS_NAME
This macro defines the string to be returned when
an ACPI method invokes the _OS method. On ARM64
an ACPI method invokes the _OS method. On Arm
systems, this macro will be "Linux" by default.
The command line parameter acpi_os=<string>
can be used to set it to some other value. The
@ -490,31 +545,23 @@ Documentation/arm64/acpi_object_usage.rst.
References
----------
[0] http://silver.arm.com
document ARM-DEN-0029, or newer:
"Server Base System Architecture", version 2.3, dated 27 Mar 2014
[0] https://developer.arm.com/documentation/den0094/latest
document Arm-DEN-0094: "Arm Base System Architecture", version 1.0C, dated 6 Oct 2022
[1] http://infocenter.arm.com/help/topic/com.arm.doc.den0044a/Server_Base_Boot_Requirements.pdf
Document ARM-DEN-0044A, or newer: "Server Base Boot Requirements, System
Software on ARM Platforms", dated 16 Aug 2014
[1] https://developer.arm.com/documentation/den0044/latest
Document Arm-DEN-0044: "Arm Base Boot Requirements", version 2.0G, dated 15 Apr 2022
[2] http://www.secretlab.ca/archives/151,
[2] https://developer.arm.com/documentation/den0029/latest
Document Arm-DEN-0029: "Arm Server Base System Architecture", version 7.1, dated 06 Oct 2022
[3] http://www.secretlab.ca/archives/151,
10 Jan 2015, Copyright (c) 2015,
Linaro Ltd., written by Grant Likely.
[3] AMD ACPI for Seattle platform documentation
http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2012/10/Seattle_ACPI_Guide.pdf
[4] _DSD (Device Specific Data) Implementation Guide
https://github.com/UEFI/DSD-Guide/blob/main/dsd-guide.pdf
[4] http://www.uefi.org/acpi
please see the link for the "ACPI _DSD Device
Property Registry Instructions"
[5] http://www.uefi.org/acpi
please see the link for the "_DSD (Device
Specific Data) Implementation Guide"
[6] Kernel code for the unified device
[5] Kernel code for the unified device
property interface can be found in
include/linux/property.h and drivers/base/property.c.

View file

@ -379,6 +379,12 @@ Before jumping into the kernel, the following conditions must be met:
- SMCR_EL2.EZT0 (bit 30) must be initialised to 0b1.
For CPUs with Memory Copy and Memory Set instructions (FEAT_MOPS):
- If the kernel is entered at EL1 and EL2 is present:
- HCRX_EL2.MSCEn (bit 11) must be initialised to 0b1.
The requirements described above for CPU mode, caches, MMUs, architected
timers, coherency and system registers apply to all CPUs. All CPUs must
enter the kernel in the same exception level. Where the values documented

View file

@ -288,6 +288,8 @@ infrastructure:
+------------------------------+---------+---------+
| Name | bits | visible |
+------------------------------+---------+---------+
| MOPS | [19-16] | y |
+------------------------------+---------+---------+
| RPRES | [7-4] | y |
+------------------------------+---------+---------+
| WFXT | [3-0] | y |

View file

@ -302,6 +302,9 @@ HWCAP2_SMEB16B16
HWCAP2_SMEF16F16
Functionality implied by ID_AA64SMFR0_EL1.F16F16 == 0b1
HWCAP2_MOPS
Functionality implied by ID_AA64ISAR2_EL1.MOPS == 0b0001.
4. Unused AT_HWCAP bits
-----------------------

View file

@ -15,11 +15,13 @@ ARM64 Architecture
cpu-feature-registers
elf_hwcaps
hugetlbpage
kdump
legacy_instructions
memory
memory-tagging-extension
perf
pointer-authentication
ptdump
silicon-errata
sme
sve

View file

@ -0,0 +1,92 @@
=======================================
crashkernel memory reservation on arm64
=======================================
Author: Baoquan He <bhe@redhat.com>
Kdump mechanism is used to capture a corrupted kernel vmcore so that
it can be subsequently analyzed. In order to do this, a preliminarily
reserved memory is needed to pre-load the kdump kernel and boot such
kernel if corruption happens.
That reserved memory for kdump is adapted to be able to minimally
accommodate the kdump kernel and the user space programs needed for the
vmcore collection.
Kernel parameter
================
Through the kernel parameters below, memory can be reserved accordingly
during the early stage of the first kernel booting so that a continuous
large chunk of memomy can be found. The low memory reservation needs to
be considered if the crashkernel is reserved from the high memory area.
- crashkernel=size@offset
- crashkernel=size
- crashkernel=size,high crashkernel=size,low
Low memory and high memory
==========================
For kdump reservations, low memory is the memory area under a specific
limit, usually decided by the accessible address bits of the DMA-capable
devices needed by the kdump kernel to run. Those devices not related to
vmcore dumping can be ignored. On arm64, the low memory upper bound is
not fixed: it is 1G on the RPi4 platform but 4G on most other systems.
On special kernels built with CONFIG_ZONE_(DMA|DMA32) disabled, the
whole system RAM is low memory. Outside of the low memory described
above, the rest of system RAM is considered high memory.
Implementation
==============
1) crashkernel=size@offset
--------------------------
The crashkernel memory must be reserved at the user-specified region or
fail if already occupied.
2) crashkernel=size
-------------------
The crashkernel memory region will be reserved in any available position
according to the search order:
Firstly, the kernel searches the low memory area for an available region
with the specified size.
If searching for low memory fails, the kernel falls back to searching
the high memory area for an available region of the specified size. If
the reservation in high memory succeeds, a default size reservation in
the low memory will be done. Currently the default size is 128M,
sufficient for the low memory needs of the kdump kernel.
Note: crashkernel=size is the recommended option for crashkernel kernel
reservations. The user would not need to know the system memory layout
for a specific platform.
3) crashkernel=size,high crashkernel=size,low
---------------------------------------------
crashkernel=size,(high|low) are an important supplement to
crashkernel=size. They allows the user to specify how much memory needs
to be allocated from the high memory and low memory respectively. On
many systems the low memory is precious and crashkernel reservations
from this area should be kept to a minimum.
To reserve memory for crashkernel=size,high, searching is first
attempted from the high memory region. If the reservation succeeds, the
low memory reservation will be done subsequently.
If reservation from the high memory failed, the kernel falls back to
searching the low memory with the specified size in crashkernel=,high.
If it succeeds, no further reservation for low memory is needed.
Notes:
- If crashkernel=,low is not specified, the default low memory
reservation will be done automatically.
- if crashkernel=0,low is specified, it means that the low memory
reservation is omitted intentionally.

View file

@ -33,8 +33,8 @@ AArch64 Linux memory layout with 4KB pages + 4 levels (48-bit)::
0000000000000000 0000ffffffffffff 256TB user
ffff000000000000 ffff7fffffffffff 128TB kernel logical memory map
[ffff600000000000 ffff7fffffffffff] 32TB [kasan shadow region]
ffff800000000000 ffff800007ffffff 128MB modules
ffff800008000000 fffffbffefffffff 124TB vmalloc
ffff800000000000 ffff80007fffffff 2GB modules
ffff800080000000 fffffbffefffffff 124TB vmalloc
fffffbfff0000000 fffffbfffdffffff 224MB fixed mappings (top down)
fffffbfffe000000 fffffbfffe7fffff 8MB [guard region]
fffffbfffe800000 fffffbffff7fffff 16MB PCI I/O space
@ -50,8 +50,8 @@ AArch64 Linux memory layout with 64KB pages + 3 levels (52-bit with HW support):
0000000000000000 000fffffffffffff 4PB user
fff0000000000000 ffff7fffffffffff ~4PB kernel logical memory map
[fffd800000000000 ffff7fffffffffff] 512TB [kasan shadow region]
ffff800000000000 ffff800007ffffff 128MB modules
ffff800008000000 fffffbffefffffff 124TB vmalloc
ffff800000000000 ffff80007fffffff 2GB modules
ffff800080000000 fffffbffefffffff 124TB vmalloc
fffffbfff0000000 fffffbfffdffffff 224MB fixed mappings (top down)
fffffbfffe000000 fffffbfffe7fffff 8MB [guard region]
fffffbfffe800000 fffffbffff7fffff 16MB PCI I/O space

View file

@ -0,0 +1,96 @@
======================
Kernel page table dump
======================
ptdump is a debugfs interface that provides a detailed dump of the
kernel page tables. It offers a comprehensive overview of the kernel
virtual memory layout as well as the attributes associated with the
various regions in a human-readable format. It is useful to dump the
kernel page tables to verify permissions and memory types. Examining the
page table entries and permissions helps identify potential security
vulnerabilities such as mappings with overly permissive access rights or
improper memory protections.
Memory hotplug allows dynamic expansion or contraction of available
memory without requiring a system reboot. To maintain the consistency
and integrity of the memory management data structures, arm64 makes use
of the ``mem_hotplug_lock`` semaphore in write mode. Additionally, in
read mode, ``mem_hotplug_lock`` supports an efficient implementation of
``get_online_mems()`` and ``put_online_mems()``. These protect the
offlining of memory being accessed by the ptdump code.
In order to dump the kernel page tables, enable the following
configurations and mount debugfs::
CONFIG_GENERIC_PTDUMP=y
CONFIG_PTDUMP_CORE=y
CONFIG_PTDUMP_DEBUGFS=y
mount -t debugfs nodev /sys/kernel/debug
cat /sys/kernel/debug/kernel_page_tables
On analysing the output of ``cat /sys/kernel/debug/kernel_page_tables``
one can derive information about the virtual address range of the entry,
followed by size of the memory region covered by this entry, the
hierarchical structure of the page tables and finally the attributes
associated with each page. The page attributes provide information about
access permissions, execution capability, type of mapping such as leaf
level PTE or block level PGD, PMD and PUD, and access status of a page
within the kernel memory. Assessing these attributes can assist in
understanding the memory layout, access patterns and security
characteristics of the kernel pages.
Kernel virtual memory layout example::
start address end address size attributes
+---------------------------------------------------------------------------------------+
| ---[ Linear Mapping start ]---------------------------------------------------------- |
| .................. |
| 0xfff0000000000000-0xfff0000000210000 2112K PTE RW NX SHD AF UXN MEM/NORMAL-TAGGED |
| 0xfff0000000210000-0xfff0000001c00000 26560K PTE ro NX SHD AF UXN MEM/NORMAL |
| .................. |
| ---[ Linear Mapping end ]------------------------------------------------------------ |
+---------------------------------------------------------------------------------------+
| ---[ Modules start ]----------------------------------------------------------------- |
| .................. |
| 0xffff800000000000-0xffff800008000000 128M PTE |
| .................. |
| ---[ Modules end ]------------------------------------------------------------------- |
+---------------------------------------------------------------------------------------+
| ---[ vmalloc() area ]---------------------------------------------------------------- |
| .................. |
| 0xffff800008010000-0xffff800008200000 1984K PTE ro x SHD AF UXN MEM/NORMAL |
| 0xffff800008200000-0xffff800008e00000 12M PTE ro x SHD AF CON UXN MEM/NORMAL |
| .................. |
| ---[ vmalloc() end ]----------------------------------------------------------------- |
+---------------------------------------------------------------------------------------+
| ---[ Fixmap start ]------------------------------------------------------------------ |
| .................. |
| 0xfffffbfffdb80000-0xfffffbfffdb90000 64K PTE ro x SHD AF UXN MEM/NORMAL |
| 0xfffffbfffdb90000-0xfffffbfffdba0000 64K PTE ro NX SHD AF UXN MEM/NORMAL |
| .................. |
| ---[ Fixmap end ]-------------------------------------------------------------------- |
+---------------------------------------------------------------------------------------+
| ---[ PCI I/O start ]----------------------------------------------------------------- |
| .................. |
| 0xfffffbfffe800000-0xfffffbffff800000 16M PTE |
| .................. |
| ---[ PCI I/O end ]------------------------------------------------------------------- |
+---------------------------------------------------------------------------------------+
| ---[ vmemmap start ]----------------------------------------------------------------- |
| .................. |
| 0xfffffc0002000000-0xfffffc0002200000 2M PTE RW NX SHD AF UXN MEM/NORMAL |
| 0xfffffc0002200000-0xfffffc0020000000 478M PTE |
| .................. |
| ---[ vmemmap end ]------------------------------------------------------------------- |
+---------------------------------------------------------------------------------------+
``cat /sys/kernel/debug/kernel_page_tables`` output::
0xfff0000001c00000-0xfff0000080000000 2020M PTE RW NX SHD AF UXN MEM/NORMAL-TAGGED
0xfff0000080000000-0xfff0000800000000 30G PMD
0xfff0000800000000-0xfff0000800700000 7M PTE RW NX SHD AF UXN MEM/NORMAL-TAGGED
0xfff0000800700000-0xfff0000800710000 64K PTE ro NX SHD AF UXN MEM/NORMAL-TAGGED
0xfff0000800710000-0xfff0000880000000 2089920K PTE RW NX SHD AF UXN MEM/NORMAL-TAGGED
0xfff0000880000000-0xfff0040000000000 4062G PMD
0xfff0040000000000-0xffff800000000000 3964T PGD

View file

@ -207,6 +207,7 @@ config ARM64
select HAVE_IOREMAP_PROT
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_KVM
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
@ -577,7 +578,6 @@ config ARM64_ERRATUM_845719
config ARM64_ERRATUM_843419
bool "Cortex-A53: 843419: A load or store might access an incorrect address"
default y
select ARM64_MODULE_PLTS if MODULES
help
This option links the kernel with '--fix-cortex-a53-843419' and
enables PLT support to replace certain ADRP instructions, which can
@ -2107,26 +2107,6 @@ config ARM64_SME
register state capable of holding two dimensional matrix tiles to
enable various matrix operations.
config ARM64_MODULE_PLTS
bool "Use PLTs to allow module memory to spill over into vmalloc area"
depends on MODULES
select HAVE_MOD_ARCH_SPECIFIC
help
Allocate PLTs when loading modules so that jumps and calls whose
targets are too far away for their relative offsets to be encoded
in the instructions themselves can be bounced via veneers in the
module's PLT. This allows modules to be allocated in the generic
vmalloc area after the dedicated module memory area has been
exhausted.
When running with address space randomization (KASLR), the module
region itself may be too far away for ordinary relative jumps and
calls, and so in that case, module PLTs are required and cannot be
disabled.
Specific errata workaround(s) might also force module PLTs to be
enabled (ARM64_ERRATUM_843419).
config ARM64_PSEUDO_NMI
bool "Support for NMI-like interrupts"
select ARM_GIC_V3
@ -2167,7 +2147,6 @@ config RELOCATABLE
config RANDOMIZE_BASE
bool "Randomize the address of the kernel image"
select ARM64_MODULE_PLTS if MODULES
select RELOCATABLE
help
Randomizes the virtual address at which the kernel image is
@ -2198,9 +2177,8 @@ config RANDOMIZE_MODULE_REGION_FULL
When this option is not set, the module region will be randomized over
a limited range that contains the [_stext, _etext] interval of the
core kernel, so branch relocations are almost always in range unless
ARM64_MODULE_PLTS is enabled and the region is exhausted. In this
particular case of region exhaustion, modules might be able to fall
back to a larger 2GB area.
the region is exhausted. In this particular case of region
exhaustion, modules might be able to fall back to a larger 2GB area.
config CC_HAVE_STACKPROTECTOR_SYSREG
def_bool $(cc-option,-mstack-protector-guard=sysreg -mstack-protector-guard-reg=sp_el0 -mstack-protector-guard-offset=0)

View file

@ -23,17 +23,17 @@
#include <linux/stringify.h>
#define ALTINSTR_ENTRY(feature) \
#define ALTINSTR_ENTRY(cpucap) \
" .word 661b - .\n" /* label */ \
" .word 663f - .\n" /* new instruction */ \
" .hword " __stringify(feature) "\n" /* feature bit */ \
" .hword " __stringify(cpucap) "\n" /* cpucap */ \
" .byte 662b-661b\n" /* source len */ \
" .byte 664f-663f\n" /* replacement len */
#define ALTINSTR_ENTRY_CB(feature, cb) \
#define ALTINSTR_ENTRY_CB(cpucap, cb) \
" .word 661b - .\n" /* label */ \
" .word " __stringify(cb) "- .\n" /* callback */ \
" .hword " __stringify(feature) "\n" /* feature bit */ \
" .word " __stringify(cb) "- .\n" /* callback */ \
" .hword " __stringify(cpucap) "\n" /* cpucap */ \
" .byte 662b-661b\n" /* source len */ \
" .byte 664f-663f\n" /* replacement len */
@ -53,13 +53,13 @@
*
* Alternatives with callbacks do not generate replacement instructions.
*/
#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \
#define __ALTERNATIVE_CFG(oldinstr, newinstr, cpucap, cfg_enabled) \
".if "__stringify(cfg_enabled)" == 1\n" \
"661:\n\t" \
oldinstr "\n" \
"662:\n" \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature) \
ALTINSTR_ENTRY(cpucap) \
".popsection\n" \
".subsection 1\n" \
"663:\n\t" \
@ -70,31 +70,31 @@
".previous\n" \
".endif\n"
#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \
#define __ALTERNATIVE_CFG_CB(oldinstr, cpucap, cfg_enabled, cb) \
".if "__stringify(cfg_enabled)" == 1\n" \
"661:\n\t" \
oldinstr "\n" \
"662:\n" \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY_CB(feature, cb) \
ALTINSTR_ENTRY_CB(cpucap, cb) \
".popsection\n" \
"663:\n\t" \
"664:\n\t" \
".endif\n"
#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \
__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
#define _ALTERNATIVE_CFG(oldinstr, newinstr, cpucap, cfg, ...) \
__ALTERNATIVE_CFG(oldinstr, newinstr, cpucap, IS_ENABLED(cfg))
#define ALTERNATIVE_CB(oldinstr, feature, cb) \
__ALTERNATIVE_CFG_CB(oldinstr, (1 << ARM64_CB_SHIFT) | (feature), 1, cb)
#define ALTERNATIVE_CB(oldinstr, cpucap, cb) \
__ALTERNATIVE_CFG_CB(oldinstr, (1 << ARM64_CB_SHIFT) | (cpucap), 1, cb)
#else
#include <asm/assembler.h>
.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len
.macro altinstruction_entry orig_offset alt_offset cpucap orig_len alt_len
.word \orig_offset - .
.word \alt_offset - .
.hword (\feature)
.hword (\cpucap)
.byte \orig_len
.byte \alt_len
.endm
@ -210,9 +210,9 @@ alternative_endif
#endif /* __ASSEMBLY__ */
/*
* Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature));
* Usage: asm(ALTERNATIVE(oldinstr, newinstr, cpucap));
*
* Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature, CONFIG_FOO));
* Usage: asm(ALTERNATIVE(oldinstr, newinstr, cpucap, CONFIG_FOO));
* N.B. If CONFIG_FOO is specified, but not selected, the whole block
* will be omitted, including oldinstr.
*/
@ -224,15 +224,15 @@ alternative_endif
#include <linux/types.h>
static __always_inline bool
alternative_has_feature_likely(const unsigned long feature)
alternative_has_cap_likely(const unsigned long cpucap)
{
compiletime_assert(feature < ARM64_NCAPS,
"feature must be < ARM64_NCAPS");
compiletime_assert(cpucap < ARM64_NCAPS,
"cpucap must be < ARM64_NCAPS");
asm_volatile_goto(
ALTERNATIVE_CB("b %l[l_no]", %[feature], alt_cb_patch_nops)
ALTERNATIVE_CB("b %l[l_no]", %[cpucap], alt_cb_patch_nops)
:
: [feature] "i" (feature)
: [cpucap] "i" (cpucap)
:
: l_no);
@ -242,15 +242,15 @@ alternative_has_feature_likely(const unsigned long feature)
}
static __always_inline bool
alternative_has_feature_unlikely(const unsigned long feature)
alternative_has_cap_unlikely(const unsigned long cpucap)
{
compiletime_assert(feature < ARM64_NCAPS,
"feature must be < ARM64_NCAPS");
compiletime_assert(cpucap < ARM64_NCAPS,
"cpucap must be < ARM64_NCAPS");
asm_volatile_goto(
ALTERNATIVE("nop", "b %l[l_yes]", %[feature])
ALTERNATIVE("nop", "b %l[l_yes]", %[cpucap])
:
: [feature] "i" (feature)
: [cpucap] "i" (cpucap)
:
: l_yes);

View file

@ -13,7 +13,7 @@
struct alt_instr {
s32 orig_offset; /* offset to original instruction */
s32 alt_offset; /* offset to replacement instruction */
u16 cpufeature; /* cpufeature bit set for replacement */
u16 cpucap; /* cpucap bit set for replacement */
u8 orig_len; /* size of original instruction(s) */
u8 alt_len; /* size of new instruction(s), <= orig_len */
};
@ -23,7 +23,7 @@ typedef void (*alternative_cb_t)(struct alt_instr *alt,
void __init apply_boot_alternatives(void);
void __init apply_alternatives_all(void);
bool alternative_is_applied(u16 cpufeature);
bool alternative_is_applied(u16 cpucap);
#ifdef CONFIG_MODULES
void apply_alternatives_module(void *start, size_t length);
@ -31,5 +31,8 @@ void apply_alternatives_module(void *start, size_t length);
static inline void apply_alternatives_module(void *start, size_t length) { }
#endif
void alt_cb_patch_nops(struct alt_instr *alt, __le32 *origptr,
__le32 *updptr, int nr_inst);
#endif /* __ASSEMBLY__ */
#endif /* __ASM_ALTERNATIVE_H */

View file

@ -129,4 +129,6 @@ static inline bool __init __early_cpu_has_rndr(void)
return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf;
}
u64 kaslr_early_init(void *fdt);
#endif /* _ASM_ARCHRANDOM_H */

View file

@ -18,7 +18,6 @@
bic \tmp1, \tmp1, #TTBR_ASID_MASK
sub \tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET // reserved_pg_dir
msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
isb
add \tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET
msr ttbr1_el1, \tmp1 // set reserved ASID
isb
@ -31,7 +30,6 @@
extr \tmp2, \tmp2, \tmp1, #48
ror \tmp2, \tmp2, #16
msr ttbr1_el1, \tmp2 // set the active ASID
isb
msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
isb
.endm

View file

@ -96,6 +96,8 @@ static inline int is_compat_thread(struct thread_info *thread)
return test_ti_thread_flag(thread, TIF_32BIT);
}
long compat_arm_syscall(struct pt_regs *regs, int scno);
#else /* !CONFIG_COMPAT */
static inline int is_compat_thread(struct thread_info *thread)

View file

@ -107,7 +107,7 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
* CPU capabilities:
*
* We use arm64_cpu_capabilities to represent system features, errata work
* arounds (both used internally by kernel and tracked in cpu_hwcaps) and
* arounds (both used internally by kernel and tracked in system_cpucaps) and
* ELF HWCAPs (which are exposed to user).
*
* To support systems with heterogeneous CPUs, we need to make sure that we
@ -419,12 +419,12 @@ static __always_inline bool is_hyp_code(void)
return is_vhe_hyp_code() || is_nvhe_hyp_code();
}
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
extern DECLARE_BITMAP(system_cpucaps, ARM64_NCAPS);
extern DECLARE_BITMAP(boot_capabilities, ARM64_NCAPS);
extern DECLARE_BITMAP(boot_cpucaps, ARM64_NCAPS);
#define for_each_available_cap(cap) \
for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS)
for_each_set_bit(cap, system_cpucaps, ARM64_NCAPS)
bool this_cpu_has_cap(unsigned int cap);
void cpu_set_feature(unsigned int num);
@ -437,7 +437,7 @@ unsigned long cpu_get_elf_hwcap2(void);
static __always_inline bool system_capabilities_finalized(void)
{
return alternative_has_feature_likely(ARM64_ALWAYS_SYSTEM);
return alternative_has_cap_likely(ARM64_ALWAYS_SYSTEM);
}
/*
@ -449,7 +449,7 @@ static __always_inline bool cpus_have_cap(unsigned int num)
{
if (num >= ARM64_NCAPS)
return false;
return arch_test_bit(num, cpu_hwcaps);
return arch_test_bit(num, system_cpucaps);
}
/*
@ -464,7 +464,7 @@ static __always_inline bool __cpus_have_const_cap(int num)
{
if (num >= ARM64_NCAPS)
return false;
return alternative_has_feature_unlikely(num);
return alternative_has_cap_unlikely(num);
}
/*
@ -504,16 +504,6 @@ static __always_inline bool cpus_have_const_cap(int num)
return cpus_have_cap(num);
}
static inline void cpus_set_cap(unsigned int num)
{
if (num >= ARM64_NCAPS) {
pr_warn("Attempt to set an illegal CPU capability (%d >= %d)\n",
num, ARM64_NCAPS);
} else {
__set_bit(num, cpu_hwcaps);
}
}
static inline int __attribute_const__
cpuid_feature_extract_signed_field_width(u64 features, int field, int width)
{

View file

@ -166,4 +166,6 @@ static inline void efi_capsule_flush_cache_range(void *addr, int size)
dcache_clean_inval_poc((unsigned long)addr, (unsigned long)addr + size);
}
efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f);
#endif /* _ASM_EFI_H */

View file

@ -22,6 +22,15 @@
isb
.endm
.macro __init_el2_hcrx
mrs x0, id_aa64mmfr1_el1
ubfx x0, x0, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4
cbz x0, .Lskip_hcrx_\@
mov_q x0, HCRX_HOST_FLAGS
msr_s SYS_HCRX_EL2, x0
.Lskip_hcrx_\@:
.endm
/*
* Allow Non-secure EL1 and EL0 to access physical timer and counter.
* This is not necessary for VHE, since the host kernel runs in EL2,
@ -69,7 +78,7 @@
cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present
mrs_s x0, SYS_TRBIDR_EL1
and x0, x0, TRBIDR_PROG
and x0, x0, TRBIDR_EL1_P
cbnz x0, .Lskip_trace_\@ // If TRBE is available at EL2
mov x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT)
@ -184,6 +193,7 @@
*/
.macro init_el2_state
__init_el2_sctlr
__init_el2_hcrx
__init_el2_timers
__init_el2_debug
__init_el2_lor
@ -284,14 +294,6 @@
cbz x1, .Lskip_sme_\@
msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal
mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present?
ubfx x1, x1, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4
cbz x1, .Lskip_sme_\@
mrs_s x1, SYS_HCRX_EL2
orr x1, x1, #HCRX_EL2_SMPME_MASK // Enable priority mapping
msr_s SYS_HCRX_EL2, x1
.Lskip_sme_\@:
.endm

View file

@ -47,7 +47,7 @@
#define ESR_ELx_EC_DABT_LOW (0x24)
#define ESR_ELx_EC_DABT_CUR (0x25)
#define ESR_ELx_EC_SP_ALIGN (0x26)
/* Unallocated EC: 0x27 */
#define ESR_ELx_EC_MOPS (0x27)
#define ESR_ELx_EC_FP_EXC32 (0x28)
/* Unallocated EC: 0x29 - 0x2B */
#define ESR_ELx_EC_FP_EXC64 (0x2C)
@ -75,8 +75,11 @@
#define ESR_ELx_IL_SHIFT (25)
#define ESR_ELx_IL (UL(1) << ESR_ELx_IL_SHIFT)
#define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1)
#define ESR_ELx_ISS_MASK (GENMASK(24, 0))
#define ESR_ELx_ISS(esr) ((esr) & ESR_ELx_ISS_MASK)
#define ESR_ELx_ISS2_SHIFT (32)
#define ESR_ELx_ISS2_MASK (GENMASK_ULL(55, 32))
#define ESR_ELx_ISS2(esr) (((esr) & ESR_ELx_ISS2_MASK) >> ESR_ELx_ISS2_SHIFT)
/* ISS field definitions shared by different classes */
#define ESR_ELx_WNR_SHIFT (6)
@ -140,6 +143,20 @@
#define ESR_ELx_CM_SHIFT (8)
#define ESR_ELx_CM (UL(1) << ESR_ELx_CM_SHIFT)
/* ISS2 field definitions for Data Aborts */
#define ESR_ELx_TnD_SHIFT (10)
#define ESR_ELx_TnD (UL(1) << ESR_ELx_TnD_SHIFT)
#define ESR_ELx_TagAccess_SHIFT (9)
#define ESR_ELx_TagAccess (UL(1) << ESR_ELx_TagAccess_SHIFT)
#define ESR_ELx_GCS_SHIFT (8)
#define ESR_ELx_GCS (UL(1) << ESR_ELx_GCS_SHIFT)
#define ESR_ELx_Overlay_SHIFT (6)
#define ESR_ELx_Overlay (UL(1) << ESR_ELx_Overlay_SHIFT)
#define ESR_ELx_DirtyBit_SHIFT (5)
#define ESR_ELx_DirtyBit (UL(1) << ESR_ELx_DirtyBit_SHIFT)
#define ESR_ELx_Xs_SHIFT (0)
#define ESR_ELx_Xs_MASK (GENMASK_ULL(4, 0))
/* ISS field definitions for exceptions taken in to Hyp */
#define ESR_ELx_CV (UL(1) << 24)
#define ESR_ELx_COND_SHIFT (20)
@ -356,6 +373,15 @@
#define ESR_ELx_SME_ISS_ZA_DISABLED 3
#define ESR_ELx_SME_ISS_ZT_DISABLED 4
/* ISS field definitions for MOPS exceptions */
#define ESR_ELx_MOPS_ISS_MEM_INST (UL(1) << 24)
#define ESR_ELx_MOPS_ISS_FROM_EPILOGUE (UL(1) << 18)
#define ESR_ELx_MOPS_ISS_WRONG_OPTION (UL(1) << 17)
#define ESR_ELx_MOPS_ISS_OPTION_A (UL(1) << 16)
#define ESR_ELx_MOPS_ISS_DESTREG(esr) (((esr) & (UL(0x1f) << 10)) >> 10)
#define ESR_ELx_MOPS_ISS_SRCREG(esr) (((esr) & (UL(0x1f) << 5)) >> 5)
#define ESR_ELx_MOPS_ISS_SIZEREG(esr) (((esr) & (UL(0x1f) << 0)) >> 0)
#ifndef __ASSEMBLY__
#include <asm/types.h>

View file

@ -8,16 +8,11 @@
#define __ASM_EXCEPTION_H
#include <asm/esr.h>
#include <asm/kprobes.h>
#include <asm/ptrace.h>
#include <linux/interrupt.h>
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#define __exception_irq_entry __irq_entry
#else
#define __exception_irq_entry __kprobes
#endif
static inline unsigned long disr_to_esr(u64 disr)
{
@ -77,6 +72,7 @@ void do_el0_svc(struct pt_regs *regs);
void do_el0_svc_compat(struct pt_regs *regs);
void do_el0_fpac(struct pt_regs *regs, unsigned long esr);
void do_el1_fpac(struct pt_regs *regs, unsigned long esr);
void do_el0_mops(struct pt_regs *regs, unsigned long esr);
void do_serror(struct pt_regs *regs, unsigned long esr);
void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags);

View file

@ -154,4 +154,12 @@ static inline int get_num_wrps(void)
ID_AA64DFR0_EL1_WRPs_SHIFT);
}
#ifdef CONFIG_CPU_PM
extern void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int));
#else
static inline void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int))
{
}
#endif
#endif /* __ASM_BREAKPOINT_H */

View file

@ -137,6 +137,7 @@
#define KERNEL_HWCAP_SME_BI32I32 __khwcap2_feature(SME_BI32I32)
#define KERNEL_HWCAP_SME_B16B16 __khwcap2_feature(SME_B16B16)
#define KERNEL_HWCAP_SME_F16F16 __khwcap2_feature(SME_F16F16)
#define KERNEL_HWCAP_MOPS __khwcap2_feature(MOPS)
/*
* This yields a mask that user programs can use to figure out what

View file

@ -24,7 +24,7 @@
static __always_inline bool __irqflags_uses_pmr(void)
{
return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
alternative_has_feature_unlikely(ARM64_HAS_GIC_PRIO_MASKING);
alternative_has_cap_unlikely(ARM64_HAS_GIC_PRIO_MASKING);
}
static __always_inline void __daif_local_irq_enable(void)

View file

@ -9,6 +9,7 @@
#include <asm/esr.h>
#include <asm/memory.h>
#include <asm/sysreg.h>
#include <asm/types.h>
/* Hyp Configuration Register (HCR) bits */
@ -92,6 +93,9 @@
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
#define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME)
#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn)
/* TCR_EL2 Registers bits */
#define TCR_EL2_RES1 ((1U << 31) | (1 << 23))
#define TCR_EL2_TBI (1 << 20)

View file

@ -267,6 +267,24 @@ extern u64 __kvm_get_mdcr_el2(void);
__kvm_at_err; \
} )
void __noreturn hyp_panic(void);
asmlinkage void kvm_unexpected_el2_exception(void);
asmlinkage void __noreturn hyp_panic(void);
asmlinkage void __noreturn hyp_panic_bad_stack(void);
asmlinkage void kvm_unexpected_el2_exception(void);
struct kvm_cpu_context;
void handle_trap(struct kvm_cpu_context *host_ctxt);
asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on);
void __noreturn __pkvm_init_finalise(void);
void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
void kvm_patch_vector_branch(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst);
void kvm_get_kimage_voffset(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst);
void kvm_compute_final_ctr_el0(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst);
void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr_virt,
u64 elr_phys, u64 par, uintptr_t vcpu, u64 far, u64 hpfar);
#else /* __ASSEMBLY__ */

View file

@ -1031,7 +1031,7 @@ void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
#define kvm_vcpu_os_lock_enabled(vcpu) \
(!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & SYS_OSLSR_OSLK))
(!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & OSLSR_EL1_OSLK))
int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);

View file

@ -18,7 +18,7 @@
static __always_inline bool system_uses_lse_atomics(void)
{
return alternative_has_feature_likely(ARM64_HAS_LSE_ATOMICS);
return alternative_has_cap_likely(ARM64_HAS_LSE_ATOMICS);
}
#define __lse_ll_sc_body(op, ...) \

View file

@ -46,7 +46,7 @@
#define KIMAGE_VADDR (MODULES_END)
#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
#define MODULES_VADDR (_PAGE_END(VA_BITS_MIN))
#define MODULES_VSIZE (SZ_128M)
#define MODULES_VSIZE (SZ_2G)
#define VMEMMAP_START (-(UL(1) << (VA_BITS - VMEMMAP_SHIFT)))
#define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE)
#define PCI_IO_END (VMEMMAP_START - SZ_8M)
@ -204,15 +204,17 @@ static inline unsigned long kaslr_offset(void)
return kimage_vaddr - KIMAGE_VADDR;
}
#ifdef CONFIG_RANDOMIZE_BASE
void kaslr_init(void);
static inline bool kaslr_enabled(void)
{
/*
* The KASLR offset modulo MIN_KIMG_ALIGN is taken from the physical
* placement of the image rather than from the seed, so a displacement
* of less than MIN_KIMG_ALIGN means that no seed was provided.
*/
return kaslr_offset() >= MIN_KIMG_ALIGN;
extern bool __kaslr_is_enabled;
return __kaslr_is_enabled;
}
#else
static inline void kaslr_init(void) { }
static inline bool kaslr_enabled(void) { return false; }
#endif
/*
* Allow all memory at the discovery stage. We will clip it later.

View file

@ -39,11 +39,16 @@ static inline void contextidr_thread_switch(struct task_struct *next)
/*
* Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0.
*/
static inline void cpu_set_reserved_ttbr0(void)
static inline void cpu_set_reserved_ttbr0_nosync(void)
{
unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
write_sysreg(ttbr, ttbr0_el1);
}
static inline void cpu_set_reserved_ttbr0(void)
{
cpu_set_reserved_ttbr0_nosync();
isb();
}
@ -52,7 +57,6 @@ void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
{
BUG_ON(pgd == swapper_pg_dir);
cpu_set_reserved_ttbr0();
cpu_do_switch_mm(virt_to_phys(pgd),mm);
}
@ -164,7 +168,7 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
* up (i.e. cpufeature framework is not up yet) and
* latter only when we enable CNP via cpufeature's
* enable() callback.
* Also we rely on the cpu_hwcap bit being set before
* Also we rely on the system_cpucaps bit being set before
* calling the enable() function.
*/
ttbr1 |= TTBR_CNP_BIT;

View file

@ -7,7 +7,6 @@
#include <asm-generic/module.h>
#ifdef CONFIG_ARM64_MODULE_PLTS
struct mod_plt_sec {
int plt_shndx;
int plt_num_entries;
@ -21,7 +20,6 @@ struct mod_arch_specific {
/* for CONFIG_DYNAMIC_FTRACE */
struct plt_entry *ftrace_trampolines;
};
#endif
u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
void *loc, const Elf64_Rela *rela,
@ -30,12 +28,6 @@ u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
void *loc, u64 val);
#ifdef CONFIG_RANDOMIZE_BASE
extern u64 module_alloc_base;
#else
#define module_alloc_base ((u64)_etext - MODULES_VSIZE)
#endif
struct plt_entry {
/*
* A program that conforms to the AArch64 Procedure Call Standard

View file

@ -1,9 +1,7 @@
SECTIONS {
#ifdef CONFIG_ARM64_MODULE_PLTS
.plt 0 : { BYTE(0) }
.init.plt 0 : { BYTE(0) }
.text.ftrace_trampoline 0 : { BYTE(0) }
#endif
#ifdef CONFIG_KASAN_SW_TAGS
/*

View file

@ -73,6 +73,7 @@ static inline void dynamic_scs_init(void) {}
#endif
int scs_patch(const u8 eh_frame[], int size);
asmlinkage void scs_patch_vmlinux(void);
#endif /* __ASSEMBLY __ */

View file

@ -100,5 +100,21 @@ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int sco
u8 spectre_bhb_loop_affected(int scope);
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr);
void spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt, __le32 *origptr,
__le32 *updptr, int nr_inst);
void smccc_patch_fw_mitigation_conduit(struct alt_instr *alt, __le32 *origptr,
__le32 *updptr, int nr_inst);
void spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt, __le32 *origptr,
__le32 *updptr, int nr_inst);
void spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt, __le32 *origptr,
__le32 *updptr, int nr_inst);
void spectre_bhb_patch_loop_iter(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst);
void spectre_bhb_patch_wa3(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst);
void spectre_bhb_patch_clearbhb(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst);
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SPECTRE_H */

View file

@ -38,6 +38,7 @@
asmlinkage long __arm64_compat_sys_##sname(const struct pt_regs *__unused)
#define COND_SYSCALL_COMPAT(name) \
asmlinkage long __arm64_compat_sys_##name(const struct pt_regs *regs); \
asmlinkage long __weak __arm64_compat_sys_##name(const struct pt_regs *regs) \
{ \
return sys_ni_syscall(); \
@ -53,6 +54,7 @@
ALLOW_ERROR_INJECTION(__arm64_sys##name, ERRNO); \
static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
asmlinkage long __arm64_sys##name(const struct pt_regs *regs); \
asmlinkage long __arm64_sys##name(const struct pt_regs *regs) \
{ \
return __se_sys##name(SC_ARM64_REGS_TO_ARGS(x,__VA_ARGS__)); \
@ -73,11 +75,13 @@
asmlinkage long __arm64_sys_##sname(const struct pt_regs *__unused)
#define COND_SYSCALL(name) \
asmlinkage long __arm64_sys_##name(const struct pt_regs *regs); \
asmlinkage long __weak __arm64_sys_##name(const struct pt_regs *regs) \
{ \
return sys_ni_syscall(); \
}
asmlinkage long __arm64_sys_ni_syscall(const struct pt_regs *__unused);
#define SYS_NI(name) SYSCALL_ALIAS(__arm64_sys_##name, sys_ni_posix_timers);
#endif /* __ASM_SYSCALL_WRAPPER_H */

View file

@ -134,25 +134,17 @@
#define SYS_SVCR_SMSTART_SM_EL0 sys_reg(0, 3, 4, 3, 3)
#define SYS_SVCR_SMSTOP_SMZA_EL0 sys_reg(0, 3, 4, 6, 3)
#define SYS_OSDTRRX_EL1 sys_reg(2, 0, 0, 0, 2)
#define SYS_MDCCINT_EL1 sys_reg(2, 0, 0, 2, 0)
#define SYS_MDSCR_EL1 sys_reg(2, 0, 0, 2, 2)
#define SYS_OSDTRTX_EL1 sys_reg(2, 0, 0, 3, 2)
#define SYS_OSECCR_EL1 sys_reg(2, 0, 0, 6, 2)
#define SYS_DBGBVRn_EL1(n) sys_reg(2, 0, 0, n, 4)
#define SYS_DBGBCRn_EL1(n) sys_reg(2, 0, 0, n, 5)
#define SYS_DBGWVRn_EL1(n) sys_reg(2, 0, 0, n, 6)
#define SYS_DBGWCRn_EL1(n) sys_reg(2, 0, 0, n, 7)
#define SYS_MDRAR_EL1 sys_reg(2, 0, 1, 0, 0)
#define SYS_OSLAR_EL1 sys_reg(2, 0, 1, 0, 4)
#define SYS_OSLAR_OSLK BIT(0)
#define SYS_OSLSR_EL1 sys_reg(2, 0, 1, 1, 4)
#define SYS_OSLSR_OSLM_MASK (BIT(3) | BIT(0))
#define SYS_OSLSR_OSLM_NI 0
#define SYS_OSLSR_OSLM_IMPLEMENTED BIT(3)
#define SYS_OSLSR_OSLK BIT(1)
#define OSLSR_EL1_OSLM_MASK (BIT(3) | BIT(0))
#define OSLSR_EL1_OSLM_NI 0
#define OSLSR_EL1_OSLM_IMPLEMENTED BIT(3)
#define OSLSR_EL1_OSLK BIT(1)
#define SYS_OSDLR_EL1 sys_reg(2, 0, 1, 3, 4)
#define SYS_DBGPRCR_EL1 sys_reg(2, 0, 1, 4, 4)
@ -235,54 +227,8 @@
/*** End of Statistical Profiling Extension ***/
/*
* TRBE Registers
*/
#define SYS_TRBLIMITR_EL1 sys_reg(3, 0, 9, 11, 0)
#define SYS_TRBPTR_EL1 sys_reg(3, 0, 9, 11, 1)
#define SYS_TRBBASER_EL1 sys_reg(3, 0, 9, 11, 2)
#define SYS_TRBSR_EL1 sys_reg(3, 0, 9, 11, 3)
#define SYS_TRBMAR_EL1 sys_reg(3, 0, 9, 11, 4)
#define SYS_TRBTRG_EL1 sys_reg(3, 0, 9, 11, 6)
#define SYS_TRBIDR_EL1 sys_reg(3, 0, 9, 11, 7)
#define TRBLIMITR_LIMIT_MASK GENMASK_ULL(51, 0)
#define TRBLIMITR_LIMIT_SHIFT 12
#define TRBLIMITR_NVM BIT(5)
#define TRBLIMITR_TRIG_MODE_MASK GENMASK(1, 0)
#define TRBLIMITR_TRIG_MODE_SHIFT 3
#define TRBLIMITR_FILL_MODE_MASK GENMASK(1, 0)
#define TRBLIMITR_FILL_MODE_SHIFT 1
#define TRBLIMITR_ENABLE BIT(0)
#define TRBPTR_PTR_MASK GENMASK_ULL(63, 0)
#define TRBPTR_PTR_SHIFT 0
#define TRBBASER_BASE_MASK GENMASK_ULL(51, 0)
#define TRBBASER_BASE_SHIFT 12
#define TRBSR_EC_MASK GENMASK(5, 0)
#define TRBSR_EC_SHIFT 26
#define TRBSR_IRQ BIT(22)
#define TRBSR_TRG BIT(21)
#define TRBSR_WRAP BIT(20)
#define TRBSR_ABORT BIT(18)
#define TRBSR_STOP BIT(17)
#define TRBSR_MSS_MASK GENMASK(15, 0)
#define TRBSR_MSS_SHIFT 0
#define TRBSR_BSC_MASK GENMASK(5, 0)
#define TRBSR_BSC_SHIFT 0
#define TRBSR_FSC_MASK GENMASK(5, 0)
#define TRBSR_FSC_SHIFT 0
#define TRBMAR_SHARE_MASK GENMASK(1, 0)
#define TRBMAR_SHARE_SHIFT 8
#define TRBMAR_OUTER_MASK GENMASK(3, 0)
#define TRBMAR_OUTER_SHIFT 4
#define TRBMAR_INNER_MASK GENMASK(3, 0)
#define TRBMAR_INNER_SHIFT 0
#define TRBTRG_TRG_MASK GENMASK(31, 0)
#define TRBTRG_TRG_SHIFT 0
#define TRBIDR_FLAG BIT(5)
#define TRBIDR_PROG BIT(4)
#define TRBIDR_ALIGN_MASK GENMASK(3, 0)
#define TRBIDR_ALIGN_SHIFT 0
#define TRBSR_EL1_BSC_MASK GENMASK(5, 0)
#define TRBSR_EL1_BSC_SHIFT 0
#define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1)
#define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2)

View file

@ -29,6 +29,8 @@ void arm64_force_sig_fault(int signo, int code, unsigned long far, const char *s
void arm64_force_sig_mceerr(int code, unsigned long far, short lsb, const char *str);
void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far, const char *str);
int early_brk64(unsigned long addr, unsigned long esr, struct pt_regs *regs);
/*
* Move regs->pc to next instruction and do necessary setup before it
* is executed.

View file

@ -65,7 +65,6 @@ static inline void __uaccess_ttbr0_disable(void)
ttbr &= ~TTBR_ASID_MASK;
/* reserved_pg_dir placed before swapper_pg_dir */
write_sysreg(ttbr - RESERVED_SWAPPER_OFFSET, ttbr0_el1);
isb();
/* Set reserved ASID */
write_sysreg(ttbr, ttbr1_el1);
isb();
@ -89,7 +88,6 @@ static inline void __uaccess_ttbr0_enable(void)
ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */
ttbr1 |= ttbr0 & TTBR_ASID_MASK;
write_sysreg(ttbr1, ttbr1_el1);
isb();
/* Restore user page table */
write_sysreg(ttbr0, ttbr0_el1);

View file

@ -102,5 +102,6 @@
#define HWCAP2_SME_BI32I32 (1UL << 40)
#define HWCAP2_SME_B16B16 (1UL << 41)
#define HWCAP2_SME_F16F16 (1UL << 42)
#define HWCAP2_MOPS (1UL << 43)
#endif /* _UAPI__ASM_HWCAP_H */

View file

@ -42,8 +42,7 @@ obj-$(CONFIG_COMPAT) += sigreturn32.o
obj-$(CONFIG_COMPAT_ALIGNMENT_FIXUPS) += compat_alignment.o
obj-$(CONFIG_KUSER_HELPERS) += kuser32.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o
obj-$(CONFIG_MODULES) += module.o module-plts.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_CPU_PM) += sleep.o suspend.o

View file

@ -24,8 +24,8 @@
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
#define ALT_CAP(a) ((a)->cpufeature & ~ARM64_CB_BIT)
#define ALT_HAS_CB(a) ((a)->cpufeature & ARM64_CB_BIT)
#define ALT_CAP(a) ((a)->cpucap & ~ARM64_CB_BIT)
#define ALT_HAS_CB(a) ((a)->cpucap & ARM64_CB_BIT)
/* Volatile, as we may be patching the guts of READ_ONCE() */
static volatile int all_alternatives_applied;
@ -37,12 +37,12 @@ struct alt_region {
struct alt_instr *end;
};
bool alternative_is_applied(u16 cpufeature)
bool alternative_is_applied(u16 cpucap)
{
if (WARN_ON(cpufeature >= ARM64_NCAPS))
if (WARN_ON(cpucap >= ARM64_NCAPS))
return false;
return test_bit(cpufeature, applied_alternatives);
return test_bit(cpucap, applied_alternatives);
}
/*
@ -121,11 +121,11 @@ static noinstr void patch_alternative(struct alt_instr *alt,
* accidentally call into the cache.S code, which is patched by us at
* runtime.
*/
static void clean_dcache_range_nopatch(u64 start, u64 end)
static noinstr void clean_dcache_range_nopatch(u64 start, u64 end)
{
u64 cur, d_size, ctr_el0;
ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
ctr_el0 = arm64_ftr_reg_ctrel0.sys_val;
d_size = 4 << cpuid_feature_extract_unsigned_field(ctr_el0,
CTR_EL0_DminLine_SHIFT);
cur = start & ~(d_size - 1);
@ -141,7 +141,7 @@ static void clean_dcache_range_nopatch(u64 start, u64 end)
static void __apply_alternatives(const struct alt_region *region,
bool is_module,
unsigned long *feature_mask)
unsigned long *cpucap_mask)
{
struct alt_instr *alt;
__le32 *origptr, *updptr;
@ -151,7 +151,7 @@ static void __apply_alternatives(const struct alt_region *region,
int nr_inst;
int cap = ALT_CAP(alt);
if (!test_bit(cap, feature_mask))
if (!test_bit(cap, cpucap_mask))
continue;
if (!cpus_have_cap(cap))
@ -188,11 +188,10 @@ static void __apply_alternatives(const struct alt_region *region,
icache_inval_all_pou();
isb();
/* Ignore ARM64_CB bit from feature mask */
bitmap_or(applied_alternatives, applied_alternatives,
feature_mask, ARM64_NCAPS);
cpucap_mask, ARM64_NCAPS);
bitmap_and(applied_alternatives, applied_alternatives,
cpu_hwcaps, ARM64_NCAPS);
system_cpucaps, ARM64_NCAPS);
}
}
@ -239,7 +238,7 @@ static int __init __apply_alternatives_multi_stop(void *unused)
} else {
DECLARE_BITMAP(remaining_capabilities, ARM64_NCAPS);
bitmap_complement(remaining_capabilities, boot_capabilities,
bitmap_complement(remaining_capabilities, boot_cpucaps,
ARM64_NCAPS);
BUG_ON(all_alternatives_applied);
@ -274,7 +273,7 @@ void __init apply_boot_alternatives(void)
pr_info("applying boot alternatives\n");
__apply_alternatives(&kernel_alternatives, false,
&boot_capabilities[0]);
&boot_cpucaps[0]);
}
#ifdef CONFIG_MODULES

View file

@ -105,11 +105,11 @@ unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
unsigned int compat_elf_hwcap2 __read_mostly;
#endif
DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcaps);
static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS];
DECLARE_BITMAP(system_cpucaps, ARM64_NCAPS);
EXPORT_SYMBOL(system_cpucaps);
static struct arm64_cpu_capabilities const __ro_after_init *cpucap_ptrs[ARM64_NCAPS];
DECLARE_BITMAP(boot_capabilities, ARM64_NCAPS);
DECLARE_BITMAP(boot_cpucaps, ARM64_NCAPS);
bool arm64_use_ng_mappings = false;
EXPORT_SYMBOL(arm64_use_ng_mappings);
@ -137,7 +137,7 @@ static cpumask_var_t cpu_32bit_el0_mask __cpumask_var_read_mostly;
void dump_cpu_features(void)
{
/* file-wide pr_fmt adds "CPU features: " prefix */
pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
pr_emerg("0x%*pb\n", ARM64_NCAPS, &system_cpucaps);
}
#define ARM64_CPUID_FIELDS(reg, field, min_value) \
@ -223,6 +223,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CSSC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRFM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_MOPS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_EL1_APA3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
@ -364,6 +365,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TIDCP1_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HCX_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_ETS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TWED_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_XNX_SHIFT, 4, 0),
@ -954,24 +956,24 @@ extern const struct arm64_cpu_capabilities arm64_errata[];
static const struct arm64_cpu_capabilities arm64_features[];
static void __init
init_cpu_hwcaps_indirect_list_from_array(const struct arm64_cpu_capabilities *caps)
init_cpucap_indirect_list_from_array(const struct arm64_cpu_capabilities *caps)
{
for (; caps->matches; caps++) {
if (WARN(caps->capability >= ARM64_NCAPS,
"Invalid capability %d\n", caps->capability))
continue;
if (WARN(cpu_hwcaps_ptrs[caps->capability],
if (WARN(cpucap_ptrs[caps->capability],
"Duplicate entry for capability %d\n",
caps->capability))
continue;
cpu_hwcaps_ptrs[caps->capability] = caps;
cpucap_ptrs[caps->capability] = caps;
}
}
static void __init init_cpu_hwcaps_indirect_list(void)
static void __init init_cpucap_indirect_list(void)
{
init_cpu_hwcaps_indirect_list_from_array(arm64_features);
init_cpu_hwcaps_indirect_list_from_array(arm64_errata);
init_cpucap_indirect_list_from_array(arm64_features);
init_cpucap_indirect_list_from_array(arm64_errata);
}
static void __init setup_boot_cpu_capabilities(void);
@ -1049,10 +1051,10 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid);
/*
* Initialize the indirect array of CPU hwcaps capabilities pointers
* before we handle the boot CPU below.
* Initialize the indirect array of CPU capabilities pointers before we
* handle the boot CPU below.
*/
init_cpu_hwcaps_indirect_list();
init_cpucap_indirect_list();
/*
* Detect and enable early CPU capabilities based on the boot CPU,
@ -2048,9 +2050,9 @@ static bool has_address_auth_cpucap(const struct arm64_cpu_capabilities *entry,
static bool has_address_auth_metacap(const struct arm64_cpu_capabilities *entry,
int scope)
{
bool api = has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_IMP_DEF], scope);
bool apa = has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA5], scope);
bool apa3 = has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA3], scope);
bool api = has_address_auth_cpucap(cpucap_ptrs[ARM64_HAS_ADDRESS_AUTH_IMP_DEF], scope);
bool apa = has_address_auth_cpucap(cpucap_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA5], scope);
bool apa3 = has_address_auth_cpucap(cpucap_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA3], scope);
return apa || apa3 || api;
}
@ -2186,6 +2188,11 @@ static void cpu_enable_dit(const struct arm64_cpu_capabilities *__unused)
set_pstate_dit(1);
}
static void cpu_enable_mops(const struct arm64_cpu_capabilities *__unused)
{
sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_MSCEn);
}
/* Internal helper functions to match cpu capability type */
static bool
cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
@ -2235,11 +2242,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_ECV_CNTPOFF,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64MMFR0_EL1,
.field_pos = ID_AA64MMFR0_EL1_ECV_SHIFT,
.field_width = 4,
.sign = FTR_UNSIGNED,
.min_field_value = ID_AA64MMFR0_EL1_ECV_CNTPOFF,
ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, ECV, CNTPOFF)
},
#ifdef CONFIG_ARM64_PAN
{
@ -2309,6 +2312,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = is_kvm_protected_mode,
},
{
.desc = "HCRX_EL2 register",
.capability = ARM64_HAS_HCX,
.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
.matches = has_cpuid_feature,
ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, HCX, IMP)
},
#endif
{
.desc = "Kernel page table isolation (KPTI)",
@ -2641,6 +2651,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.cpu_enable = cpu_enable_dit,
ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, DIT, IMP)
},
{
.desc = "Memory Copy and Memory Set instructions",
.capability = ARM64_HAS_MOPS,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
.cpu_enable = cpu_enable_mops,
ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, MOPS, IMP)
},
{},
};
@ -2769,6 +2787,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(ID_AA64ISAR2_EL1, RPRFM, IMP, CAP_HWCAP, KERNEL_HWCAP_RPRFM),
HWCAP_CAP(ID_AA64ISAR2_EL1, RPRES, IMP, CAP_HWCAP, KERNEL_HWCAP_RPRES),
HWCAP_CAP(ID_AA64ISAR2_EL1, WFxT, IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT),
HWCAP_CAP(ID_AA64ISAR2_EL1, MOPS, IMP, CAP_HWCAP, KERNEL_HWCAP_MOPS),
#ifdef CONFIG_ARM64_SME
HWCAP_CAP(ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME),
HWCAP_CAP(ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
@ -2895,7 +2914,7 @@ static void update_cpu_capabilities(u16 scope_mask)
scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
for (i = 0; i < ARM64_NCAPS; i++) {
caps = cpu_hwcaps_ptrs[i];
caps = cpucap_ptrs[i];
if (!caps || !(caps->type & scope_mask) ||
cpus_have_cap(caps->capability) ||
!caps->matches(caps, cpucap_default_scope(caps)))
@ -2903,10 +2922,11 @@ static void update_cpu_capabilities(u16 scope_mask)
if (caps->desc)
pr_info("detected: %s\n", caps->desc);
cpus_set_cap(caps->capability);
__set_bit(caps->capability, system_cpucaps);
if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU))
set_bit(caps->capability, boot_capabilities);
set_bit(caps->capability, boot_cpucaps);
}
}
@ -2920,7 +2940,7 @@ static int cpu_enable_non_boot_scope_capabilities(void *__unused)
u16 non_boot_scope = SCOPE_ALL & ~SCOPE_BOOT_CPU;
for_each_available_cap(i) {
const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[i];
const struct arm64_cpu_capabilities *cap = cpucap_ptrs[i];
if (WARN_ON(!cap))
continue;
@ -2950,7 +2970,7 @@ static void __init enable_cpu_capabilities(u16 scope_mask)
for (i = 0; i < ARM64_NCAPS; i++) {
unsigned int num;
caps = cpu_hwcaps_ptrs[i];
caps = cpucap_ptrs[i];
if (!caps || !(caps->type & scope_mask))
continue;
num = caps->capability;
@ -2995,7 +3015,7 @@ static void verify_local_cpu_caps(u16 scope_mask)
scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
for (i = 0; i < ARM64_NCAPS; i++) {
caps = cpu_hwcaps_ptrs[i];
caps = cpucap_ptrs[i];
if (!caps || !(caps->type & scope_mask))
continue;
@ -3194,7 +3214,7 @@ static void __init setup_boot_cpu_capabilities(void)
bool this_cpu_has_cap(unsigned int n)
{
if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) {
const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
const struct arm64_cpu_capabilities *cap = cpucap_ptrs[n];
if (cap)
return cap->matches(cap, SCOPE_LOCAL_CPU);
@ -3207,13 +3227,13 @@ EXPORT_SYMBOL_GPL(this_cpu_has_cap);
/*
* This helper function is used in a narrow window when,
* - The system wide safe registers are set with all the SMP CPUs and,
* - The SYSTEM_FEATURE cpu_hwcaps may not have been set.
* - The SYSTEM_FEATURE system_cpucaps may not have been set.
* In all other cases cpus_have_{const_}cap() should be used.
*/
static bool __maybe_unused __system_matches_cap(unsigned int n)
{
if (n < ARM64_NCAPS) {
const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
const struct arm64_cpu_capabilities *cap = cpucap_ptrs[n];
if (cap)
return cap->matches(cap, SCOPE_SYSTEM);

View file

@ -13,7 +13,7 @@
#include <linux/of_device.h>
#include <linux/psci.h>
#ifdef CONFIG_ACPI
#ifdef CONFIG_ACPI_PROCESSOR_IDLE
#include <acpi/processor.h>

View file

@ -125,6 +125,7 @@ static const char *const hwcap_str[] = {
[KERNEL_HWCAP_SME_BI32I32] = "smebi32i32",
[KERNEL_HWCAP_SME_B16B16] = "smeb16b16",
[KERNEL_HWCAP_SME_F16F16] = "smef16f16",
[KERNEL_HWCAP_MOPS] = "mops",
};
#ifdef CONFIG_COMPAT

View file

@ -126,7 +126,7 @@ static __always_inline void __exit_to_user_mode(void)
lockdep_hardirqs_on(CALLER_ADDR0);
}
static __always_inline void prepare_exit_to_user_mode(struct pt_regs *regs)
static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs)
{
unsigned long flags;
@ -135,11 +135,13 @@ static __always_inline void prepare_exit_to_user_mode(struct pt_regs *regs)
flags = read_thread_flags();
if (unlikely(flags & _TIF_WORK_MASK))
do_notify_resume(regs, flags);
lockdep_sys_exit();
}
static __always_inline void exit_to_user_mode(struct pt_regs *regs)
{
prepare_exit_to_user_mode(regs);
exit_to_user_mode_prepare(regs);
mte_check_tfsr_exit();
__exit_to_user_mode();
}
@ -611,6 +613,14 @@ static void noinstr el0_bti(struct pt_regs *regs)
exit_to_user_mode(regs);
}
static void noinstr el0_mops(struct pt_regs *regs, unsigned long esr)
{
enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX);
do_el0_mops(regs, esr);
exit_to_user_mode(regs);
}
static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
{
enter_from_user_mode(regs);
@ -688,6 +698,9 @@ asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
case ESR_ELx_EC_BTI:
el0_bti(regs);
break;
case ESR_ELx_EC_MOPS:
el0_mops(regs, esr);
break;
case ESR_ELx_EC_BREAKPT_LOW:
case ESR_ELx_EC_SOFTSTP_LOW:
case ESR_ELx_EC_WATCHPT_LOW:

View file

@ -101,12 +101,11 @@
.org .Lventry_start\@ + 128 // Did we overflow the ventry slot?
.endm
.macro tramp_alias, dst, sym, tmp
mov_q \dst, TRAMP_VALIAS
adr_l \tmp, \sym
add \dst, \dst, \tmp
adr_l \tmp, .entry.tramp.text
sub \dst, \dst, \tmp
.macro tramp_alias, dst, sym
.set .Lalias\@, TRAMP_VALIAS + \sym - .entry.tramp.text
movz \dst, :abs_g2_s:.Lalias\@
movk \dst, :abs_g1_nc:.Lalias\@
movk \dst, :abs_g0_nc:.Lalias\@
.endm
/*
@ -435,13 +434,14 @@ alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
eret
alternative_else_nop_endif
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
bne 4f
msr far_el1, x29
tramp_alias x30, tramp_exit_native, x29
br x30
4:
tramp_alias x30, tramp_exit_compat, x29
br x30
ldr_this_cpu x30, this_cpu_vector, x29
tramp_alias x29, tramp_exit
msr vbar_el1, x30 // install vector table
ldr lr, [sp, #S_LR] // restore x30
add sp, sp, #PT_REGS_SIZE // restore sp
br x29
#endif
.else
ldr lr, [sp, #S_LR]
@ -732,22 +732,6 @@ alternative_else_nop_endif
.org 1b + 128 // Did we overflow the ventry slot?
.endm
.macro tramp_exit, regsize = 64
tramp_data_read_var x30, this_cpu_vector
get_this_cpu_offset x29
ldr x30, [x30, x29]
msr vbar_el1, x30
ldr lr, [sp, #S_LR]
tramp_unmap_kernel x29
.if \regsize == 64
mrs x29, far_el1
.endif
add sp, sp, #PT_REGS_SIZE // restore sp
eret
sb
.endm
.macro generate_tramp_vector, kpti, bhb
.Lvector_start\@:
.space 0x400
@ -768,7 +752,7 @@ alternative_else_nop_endif
*/
.pushsection ".entry.tramp.text", "ax"
.align 11
SYM_CODE_START_NOALIGN(tramp_vectors)
SYM_CODE_START_LOCAL_NOALIGN(tramp_vectors)
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP
generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW
@ -777,13 +761,12 @@ SYM_CODE_START_NOALIGN(tramp_vectors)
generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE
SYM_CODE_END(tramp_vectors)
SYM_CODE_START(tramp_exit_native)
tramp_exit
SYM_CODE_END(tramp_exit_native)
SYM_CODE_START(tramp_exit_compat)
tramp_exit 32
SYM_CODE_END(tramp_exit_compat)
SYM_CODE_START_LOCAL(tramp_exit)
tramp_unmap_kernel x29
mrs x29, far_el1 // restore x29
eret
sb
SYM_CODE_END(tramp_exit)
.popsection // .entry.tramp.text
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
@ -1077,7 +1060,7 @@ alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
alternative_else_nop_endif
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3
tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline
br x5
#endif
SYM_CODE_END(__sdei_asm_handler)

View file

@ -1649,6 +1649,7 @@ void fpsimd_flush_thread(void)
fpsimd_flush_thread_vl(ARM64_VEC_SME);
current->thread.svcr = 0;
sme_smstop();
}
current->thread.fp_type = FP_STATE_FPSIMD;

View file

@ -197,7 +197,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
static struct plt_entry *get_ftrace_plt(struct module *mod)
{
#ifdef CONFIG_ARM64_MODULE_PLTS
#ifdef CONFIG_MODULES
struct plt_entry *plt = mod->arch.ftrace_trampolines;
return &plt[FTRACE_PLT_IDX];
@ -249,7 +249,7 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec,
* must use a PLT to reach it. We can only place PLTs for modules, and
* only when module PLT support is built-in.
*/
if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
if (!IS_ENABLED(CONFIG_MODULES))
return false;
/*
@ -431,10 +431,8 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
*
* Note: 'mod' is only set at module load time.
*/
if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS) &&
IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && mod) {
if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS) && mod)
return aarch64_insn_patch_text_nosync((void *)pc, new);
}
if (!ftrace_find_callable_addr(rec, mod, &addr))
return -EINVAL;

View file

@ -99,7 +99,6 @@ int pfn_is_nosave(unsigned long pfn)
void notrace save_processor_state(void)
{
WARN_ON(num_online_cpus() != 1);
}
void notrace restore_processor_state(void)

View file

@ -973,14 +973,6 @@ static int hw_breakpoint_reset(unsigned int cpu)
return 0;
}
#ifdef CONFIG_CPU_PM
extern void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int));
#else
static inline void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int))
{
}
#endif
/*
* One-time initialisation.
*/

View file

@ -123,6 +123,7 @@ static const struct ftr_set_desc isar2 __initconst = {
.fields = {
FIELD("gpa3", ID_AA64ISAR2_EL1_GPA3_SHIFT, NULL),
FIELD("apa3", ID_AA64ISAR2_EL1_APA3_SHIFT, NULL),
FIELD("mops", ID_AA64ISAR2_EL1_MOPS_SHIFT, NULL),
{}
},
};
@ -174,6 +175,7 @@ static const struct {
"id_aa64isar1.gpi=0 id_aa64isar1.gpa=0 "
"id_aa64isar1.api=0 id_aa64isar1.apa=0 "
"id_aa64isar2.gpa3=0 id_aa64isar2.apa3=0" },
{ "arm64.nomops", "id_aa64isar2.mops=0" },
{ "arm64.nomte", "id_aa64pfr1.mte=0" },
{ "nokaslr", "kaslr.disabled=1" },
};

View file

@ -4,90 +4,35 @@
*/
#include <linux/cache.h>
#include <linux/crc32.h>
#include <linux/init.h>
#include <linux/libfdt.h>
#include <linux/mm_types.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/pgtable.h>
#include <linux/random.h>
#include <linux/printk.h>
#include <asm/fixmap.h>
#include <asm/kernel-pgtable.h>
#include <asm/cpufeature.h>
#include <asm/memory.h>
#include <asm/mmu.h>
#include <asm/sections.h>
#include <asm/setup.h>
u64 __ro_after_init module_alloc_base;
u16 __initdata memstart_offset_seed;
struct arm64_ftr_override kaslr_feature_override __initdata;
static int __init kaslr_init(void)
bool __ro_after_init __kaslr_is_enabled = false;
void __init kaslr_init(void)
{
u64 module_range;
u32 seed;
/*
* Set a reasonable default for module_alloc_base in case
* we end up running with module randomization disabled.
*/
module_alloc_base = (u64)_etext - MODULES_VSIZE;
if (kaslr_feature_override.val & kaslr_feature_override.mask & 0xf) {
pr_info("KASLR disabled on command line\n");
return 0;
return;
}
if (!kaslr_enabled()) {
/*
* The KASLR offset modulo MIN_KIMG_ALIGN is taken from the physical
* placement of the image rather than from the seed, so a displacement
* of less than MIN_KIMG_ALIGN means that no seed was provided.
*/
if (kaslr_offset() < MIN_KIMG_ALIGN) {
pr_warn("KASLR disabled due to lack of seed\n");
return 0;
return;
}
pr_info("KASLR enabled\n");
/*
* KASAN without KASAN_VMALLOC does not expect the module region to
* intersect the vmalloc region, since shadow memory is allocated for
* each module at load time, whereas the vmalloc region will already be
* shadowed by KASAN zero pages.
*/
BUILD_BUG_ON((IS_ENABLED(CONFIG_KASAN_GENERIC) ||
IS_ENABLED(CONFIG_KASAN_SW_TAGS)) &&
!IS_ENABLED(CONFIG_KASAN_VMALLOC));
seed = get_random_u32();
if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
/*
* Randomize the module region over a 2 GB window covering the
* kernel. This reduces the risk of modules leaking information
* about the address of the kernel itself, but results in
* branches between modules and the core kernel that are
* resolved via PLTs. (Branches between modules will be
* resolved normally.)
*/
module_range = SZ_2G - (u64)(_end - _stext);
module_alloc_base = max((u64)_end - SZ_2G, (u64)MODULES_VADDR);
} else {
/*
* Randomize the module region by setting module_alloc_base to
* a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE,
* _stext) . This guarantees that the resulting region still
* covers [_stext, _etext], and that all relative branches can
* be resolved without veneers unless this region is exhausted
* and we fall back to a larger 2GB window in module_alloc()
* when ARM64_MODULE_PLTS is enabled.
*/
module_range = MODULES_VSIZE - (u64)(_etext - _stext);
}
/* use the lower 21 bits to randomize the base of the module region */
module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
module_alloc_base &= PAGE_MASK;
return 0;
__kaslr_is_enabled = true;
}
subsys_initcall(kaslr_init)

View file

@ -7,6 +7,7 @@
#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleloader.h>
#include <linux/sort.h>
static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc,

View file

@ -7,6 +7,8 @@
* Author: Will Deacon <will.deacon@arm.com>
*/
#define pr_fmt(fmt) "Modules: " fmt
#include <linux/bitops.h>
#include <linux/elf.h>
#include <linux/ftrace.h>
@ -15,52 +17,131 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/moduleloader.h>
#include <linux/random.h>
#include <linux/scs.h>
#include <linux/vmalloc.h>
#include <asm/alternative.h>
#include <asm/insn.h>
#include <asm/scs.h>
#include <asm/sections.h>
static u64 module_direct_base __ro_after_init = 0;
static u64 module_plt_base __ro_after_init = 0;
/*
* Choose a random page-aligned base address for a window of 'size' bytes which
* entirely contains the interval [start, end - 1].
*/
static u64 __init random_bounding_box(u64 size, u64 start, u64 end)
{
u64 max_pgoff, pgoff;
if ((end - start) >= size)
return 0;
max_pgoff = (size - (end - start)) / PAGE_SIZE;
pgoff = get_random_u32_inclusive(0, max_pgoff);
return start - pgoff * PAGE_SIZE;
}
/*
* Modules may directly reference data and text anywhere within the kernel
* image and other modules. References using PREL32 relocations have a +/-2G
* range, and so we need to ensure that the entire kernel image and all modules
* fall within a 2G window such that these are always within range.
*
* Modules may directly branch to functions and code within the kernel text,
* and to functions and code within other modules. These branches will use
* CALL26/JUMP26 relocations with a +/-128M range. Without PLTs, we must ensure
* that the entire kernel text and all module text falls within a 128M window
* such that these are always within range. With PLTs, we can expand this to a
* 2G window.
*
* We chose the 128M region to surround the entire kernel image (rather than
* just the text) as using the same bounds for the 128M and 2G regions ensures
* by construction that we never select a 128M region that is not a subset of
* the 2G region. For very large and unusual kernel configurations this means
* we may fall back to PLTs where they could have been avoided, but this keeps
* the logic significantly simpler.
*/
static int __init module_init_limits(void)
{
u64 kernel_end = (u64)_end;
u64 kernel_start = (u64)_text;
u64 kernel_size = kernel_end - kernel_start;
/*
* The default modules region is placed immediately below the kernel
* image, and is large enough to use the full 2G relocation range.
*/
BUILD_BUG_ON(KIMAGE_VADDR != MODULES_END);
BUILD_BUG_ON(MODULES_VSIZE < SZ_2G);
if (!kaslr_enabled()) {
if (kernel_size < SZ_128M)
module_direct_base = kernel_end - SZ_128M;
if (kernel_size < SZ_2G)
module_plt_base = kernel_end - SZ_2G;
} else {
u64 min = kernel_start;
u64 max = kernel_end;
if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
pr_info("2G module region forced by RANDOMIZE_MODULE_REGION_FULL\n");
} else {
module_direct_base = random_bounding_box(SZ_128M, min, max);
if (module_direct_base) {
min = module_direct_base;
max = module_direct_base + SZ_128M;
}
}
module_plt_base = random_bounding_box(SZ_2G, min, max);
}
pr_info("%llu pages in range for non-PLT usage",
module_direct_base ? (SZ_128M - kernel_size) / PAGE_SIZE : 0);
pr_info("%llu pages in range for PLT usage",
module_plt_base ? (SZ_2G - kernel_size) / PAGE_SIZE : 0);
return 0;
}
subsys_initcall(module_init_limits);
void *module_alloc(unsigned long size)
{
u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
gfp_t gfp_mask = GFP_KERNEL;
void *p;
void *p = NULL;
/* Silence the initial allocation */
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
gfp_mask |= __GFP_NOWARN;
/*
* Where possible, prefer to allocate within direct branch range of the
* kernel such that no PLTs are necessary.
*/
if (module_direct_base) {
p = __vmalloc_node_range(size, MODULE_ALIGN,
module_direct_base,
module_direct_base + SZ_128M,
GFP_KERNEL | __GFP_NOWARN,
PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
IS_ENABLED(CONFIG_KASAN_SW_TAGS))
/* don't exceed the static module region - see below */
module_alloc_end = MODULES_END;
if (!p && module_plt_base) {
p = __vmalloc_node_range(size, MODULE_ALIGN,
module_plt_base,
module_plt_base + SZ_2G,
GFP_KERNEL | __GFP_NOWARN,
PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
module_alloc_end, gfp_mask, PAGE_KERNEL, VM_DEFER_KMEMLEAK,
NUMA_NO_NODE, __builtin_return_address(0));
if (!p) {
pr_warn_ratelimited("%s: unable to allocate memory\n",
__func__);
}
if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
(IS_ENABLED(CONFIG_KASAN_VMALLOC) ||
(!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
!IS_ENABLED(CONFIG_KASAN_SW_TAGS))))
/*
* KASAN without KASAN_VMALLOC can only deal with module
* allocations being served from the reserved module region,
* since the remainder of the vmalloc region is already
* backed by zero shadow pages, and punching holes into it
* is non-trivial. Since the module region is not randomized
* when KASAN is enabled without KASAN_VMALLOC, it is even
* less likely that the module region gets exhausted, so we
* can simply omit this fallback in that case.
*/
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
module_alloc_base + SZ_2G, GFP_KERNEL,
PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
if (p && (kasan_alloc_module_shadow(p, size, GFP_KERNEL) < 0)) {
vfree(p);
return NULL;
}
@ -448,9 +529,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
case R_AARCH64_CALL26:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
AARCH64_INSN_IMM_26);
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
ovf == -ERANGE) {
if (ovf == -ERANGE) {
val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
if (!val)
return -ENOEXEC;
@ -487,7 +566,7 @@ static int module_init_ftrace_plt(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *mod)
{
#if defined(CONFIG_ARM64_MODULE_PLTS) && defined(CONFIG_DYNAMIC_FTRACE)
#if defined(CONFIG_DYNAMIC_FTRACE)
const Elf_Shdr *s;
struct plt_entry *plts;

View file

@ -296,6 +296,8 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
*cmdline_p = boot_command_line;
kaslr_init();
/*
* If know now we are going to need KPTI then use non-global
* mappings from the start, avoiding the cost of rewriting

View file

@ -23,6 +23,7 @@
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/elf.h>
#include <asm/exception.h>
#include <asm/cacheflush.h>
#include <asm/ucontext.h>
#include <asm/unistd.h>
@ -398,7 +399,7 @@ static int restore_tpidr2_context(struct user_ctxs *user)
__get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err);
if (!err)
current->thread.tpidr2_el0 = tpidr2_el0;
write_sysreg_s(tpidr2_el0, SYS_TPIDR2_EL0);
return err;
}

View file

@ -147,11 +147,9 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
* exit regardless, as the old entry assembly did.
*/
if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
local_daif_mask();
flags = read_thread_flags();
if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP))
return;
local_daif_restore(DAIF_PROCCTX);
}
trace_exit:

View file

@ -514,6 +514,63 @@ void do_el1_fpac(struct pt_regs *regs, unsigned long esr)
die("Oops - FPAC", regs, esr);
}
void do_el0_mops(struct pt_regs *regs, unsigned long esr)
{
bool wrong_option = esr & ESR_ELx_MOPS_ISS_WRONG_OPTION;
bool option_a = esr & ESR_ELx_MOPS_ISS_OPTION_A;
int dstreg = ESR_ELx_MOPS_ISS_DESTREG(esr);
int srcreg = ESR_ELx_MOPS_ISS_SRCREG(esr);
int sizereg = ESR_ELx_MOPS_ISS_SIZEREG(esr);
unsigned long dst, src, size;
dst = pt_regs_read_reg(regs, dstreg);
src = pt_regs_read_reg(regs, srcreg);
size = pt_regs_read_reg(regs, sizereg);
/*
* Put the registers back in the original format suitable for a
* prologue instruction, using the generic return routine from the
* Arm ARM (DDI 0487I.a) rules CNTMJ and MWFQH.
*/
if (esr & ESR_ELx_MOPS_ISS_MEM_INST) {
/* SET* instruction */
if (option_a ^ wrong_option) {
/* Format is from Option A; forward set */
pt_regs_write_reg(regs, dstreg, dst + size);
pt_regs_write_reg(regs, sizereg, -size);
}
} else {
/* CPY* instruction */
if (!(option_a ^ wrong_option)) {
/* Format is from Option B */
if (regs->pstate & PSR_N_BIT) {
/* Backward copy */
pt_regs_write_reg(regs, dstreg, dst - size);
pt_regs_write_reg(regs, srcreg, src - size);
}
} else {
/* Format is from Option A */
if (size & BIT(63)) {
/* Forward copy */
pt_regs_write_reg(regs, dstreg, dst + size);
pt_regs_write_reg(regs, srcreg, src + size);
pt_regs_write_reg(regs, sizereg, -size);
}
}
}
if (esr & ESR_ELx_MOPS_ISS_FROM_EPILOGUE)
regs->pc -= 8;
else
regs->pc -= 4;
/*
* If single stepping then finish the step before executing the
* prologue instruction.
*/
user_fastforward_single_step(current);
}
#define __user_cache_maint(insn, address, res) \
if (address >= TASK_SIZE_MAX) { \
res = -EFAULT; \
@ -824,6 +881,7 @@ static const char *esr_class_str[] = {
[ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)",
[ESR_ELx_EC_DABT_CUR] = "DABT (current EL)",
[ESR_ELx_EC_SP_ALIGN] = "SP Alignment",
[ESR_ELx_EC_MOPS] = "MOPS",
[ESR_ELx_EC_FP_EXC32] = "FP (AArch32)",
[ESR_ELx_EC_FP_EXC64] = "FP (AArch64)",
[ESR_ELx_EC_SERROR] = "SError",
@ -947,7 +1005,7 @@ void do_serror(struct pt_regs *regs, unsigned long esr)
}
/* GENERIC_BUG traps */
#ifdef CONFIG_GENERIC_BUG
int is_valid_bugaddr(unsigned long addr)
{
/*
@ -959,6 +1017,7 @@ int is_valid_bugaddr(unsigned long addr)
*/
return 1;
}
#endif
static int bug_handler(struct pt_regs *regs, unsigned long esr)
{

View file

@ -333,7 +333,7 @@ void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
/* Check if we have TRBE implemented and available at the host */
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) &&
!(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG))
!(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_EL1_P))
vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE);
}

View file

@ -130,6 +130,9 @@ static inline void ___activate_traps(struct kvm_vcpu *vcpu)
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
if (cpus_have_final_cap(ARM64_HAS_HCX))
write_sysreg_s(HCRX_GUEST_FLAGS, SYS_HCRX_EL2);
}
static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
@ -144,6 +147,9 @@ static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 &= ~HCR_VSE;
vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
}
if (cpus_have_final_cap(ARM64_HAS_HCX))
write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2);
}
static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)

View file

@ -56,7 +56,7 @@ static void __debug_save_trace(u64 *trfcr_el1)
*trfcr_el1 = 0;
/* Check if the TRBE is enabled */
if (!(read_sysreg_s(SYS_TRBLIMITR_EL1) & TRBLIMITR_ENABLE))
if (!(read_sysreg_s(SYS_TRBLIMITR_EL1) & TRBLIMITR_EL1_E))
return;
/*
* Prohibit trace generation while we are in guest.

View file

@ -388,9 +388,9 @@ static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
return read_from_write_only(vcpu, p, r);
/* Forward the OSLK bit to OSLSR */
oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~SYS_OSLSR_OSLK;
if (p->regval & SYS_OSLAR_OSLK)
oslsr |= SYS_OSLSR_OSLK;
oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~OSLSR_EL1_OSLK;
if (p->regval & OSLAR_EL1_OSLK)
oslsr |= OSLSR_EL1_OSLK;
__vcpu_sys_reg(vcpu, OSLSR_EL1) = oslsr;
return true;
@ -414,7 +414,7 @@ static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
* The only modifiable bit is the OSLK bit. Refuse the write if
* userspace attempts to change any other bit in the register.
*/
if ((val ^ rd->val) & ~SYS_OSLSR_OSLK)
if ((val ^ rd->val) & ~OSLSR_EL1_OSLK)
return -EINVAL;
__vcpu_sys_reg(vcpu, rd->reg) = val;
@ -1252,6 +1252,7 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r
ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
if (!cpus_have_final_cap(ARM64_HAS_WFXT))
val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS);
break;
case SYS_ID_AA64DFR0_EL1:
/* Limit debug to ARMv8.0 */
@ -1781,7 +1782,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
{ SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 },
{ SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1,
SYS_OSLSR_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
OSLSR_EL1_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
{ SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
{ SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
{ SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },

View file

@ -10,7 +10,7 @@
#include <linux/module.h>
#include <asm/neon-intrinsics.h>
void xor_arm64_neon_2(unsigned long bytes, unsigned long * __restrict p1,
static void xor_arm64_neon_2(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2)
{
uint64_t *dp1 = (uint64_t *)p1;
@ -37,7 +37,7 @@ void xor_arm64_neon_2(unsigned long bytes, unsigned long * __restrict p1,
} while (--lines > 0);
}
void xor_arm64_neon_3(unsigned long bytes, unsigned long * __restrict p1,
static void xor_arm64_neon_3(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2,
const unsigned long * __restrict p3)
{
@ -73,7 +73,7 @@ void xor_arm64_neon_3(unsigned long bytes, unsigned long * __restrict p1,
} while (--lines > 0);
}
void xor_arm64_neon_4(unsigned long bytes, unsigned long * __restrict p1,
static void xor_arm64_neon_4(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2,
const unsigned long * __restrict p3,
const unsigned long * __restrict p4)
@ -118,7 +118,7 @@ void xor_arm64_neon_4(unsigned long bytes, unsigned long * __restrict p1,
} while (--lines > 0);
}
void xor_arm64_neon_5(unsigned long bytes, unsigned long * __restrict p1,
static void xor_arm64_neon_5(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2,
const unsigned long * __restrict p3,
const unsigned long * __restrict p4,

View file

@ -364,8 +364,8 @@ void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm)
ttbr1 &= ~TTBR_ASID_MASK;
ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid);
cpu_set_reserved_ttbr0_nosync();
write_sysreg(ttbr1, ttbr1_el1);
isb();
write_sysreg(ttbr0, ttbr0_el1);
isb();
post_ttbr_update_workaround();

View file

@ -66,6 +66,8 @@ static inline const struct fault_info *esr_to_debug_fault_info(unsigned long esr
static void data_abort_decode(unsigned long esr)
{
unsigned long iss2 = ESR_ELx_ISS2(esr);
pr_alert("Data abort info:\n");
if (esr & ESR_ELx_ISV) {
@ -78,12 +80,21 @@ static void data_abort_decode(unsigned long esr)
(esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT,
(esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT);
} else {
pr_alert(" ISV = 0, ISS = 0x%08lx\n", esr & ESR_ELx_ISS_MASK);
pr_alert(" ISV = 0, ISS = 0x%08lx, ISS2 = 0x%08lx\n",
esr & ESR_ELx_ISS_MASK, iss2);
}
pr_alert(" CM = %lu, WnR = %lu\n",
pr_alert(" CM = %lu, WnR = %lu, TnD = %lu, TagAccess = %lu\n",
(esr & ESR_ELx_CM) >> ESR_ELx_CM_SHIFT,
(esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT);
(esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT,
(iss2 & ESR_ELx_TnD) >> ESR_ELx_TnD_SHIFT,
(iss2 & ESR_ELx_TagAccess) >> ESR_ELx_TagAccess_SHIFT);
pr_alert(" GCS = %ld, Overlay = %lu, DirtyBit = %lu, Xs = %llu\n",
(iss2 & ESR_ELx_GCS) >> ESR_ELx_GCS_SHIFT,
(iss2 & ESR_ELx_Overlay) >> ESR_ELx_Overlay_SHIFT,
(iss2 & ESR_ELx_DirtyBit) >> ESR_ELx_DirtyBit_SHIFT,
(iss2 & ESR_ELx_Xs_MASK) >> ESR_ELx_Xs_SHIFT);
}
static void mem_abort_decode(unsigned long esr)
@ -886,9 +897,6 @@ void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs)
}
NOKPROBE_SYMBOL(do_sp_pc_abort);
int __init early_brk64(unsigned long addr, unsigned long esr,
struct pt_regs *regs);
/*
* __refdata because early_brk64 is __init, but the reference to it is
* clobbered at arch_initcall time.

View file

@ -8,6 +8,7 @@
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/libnvdimm.h>
#include <linux/pagemap.h>
#include <asm/cacheflush.h>

View file

@ -69,6 +69,7 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit;
#define CRASH_ADDR_LOW_MAX arm64_dma_phys_limit
#define CRASH_ADDR_HIGH_MAX (PHYS_MASK + 1)
#define CRASH_HIGH_SEARCH_BASE SZ_4G
#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20)
@ -101,12 +102,13 @@ static int __init reserve_crashkernel_low(unsigned long long low_size)
*/
static void __init reserve_crashkernel(void)
{
unsigned long long crash_base, crash_size;
unsigned long long crash_low_size = 0;
unsigned long long crash_low_size = 0, search_base = 0;
unsigned long long crash_max = CRASH_ADDR_LOW_MAX;
unsigned long long crash_base, crash_size;
char *cmdline = boot_command_line;
int ret;
bool fixed_base = false;
bool high = false;
int ret;
if (!IS_ENABLED(CONFIG_KEXEC_CORE))
return;
@ -129,7 +131,9 @@ static void __init reserve_crashkernel(void)
else if (ret)
return;
search_base = CRASH_HIGH_SEARCH_BASE;
crash_max = CRASH_ADDR_HIGH_MAX;
high = true;
} else if (ret || !crash_size) {
/* The specified value is invalid */
return;
@ -140,31 +144,51 @@ static void __init reserve_crashkernel(void)
/* User specifies base address explicitly. */
if (crash_base) {
fixed_base = true;
search_base = crash_base;
crash_max = crash_base + crash_size;
}
retry:
crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
crash_base, crash_max);
search_base, crash_max);
if (!crash_base) {
/*
* If the first attempt was for low memory, fall back to
* high memory, the minimum required low memory will be
* reserved later.
* For crashkernel=size[KMG]@offset[KMG], print out failure
* message if can't reserve the specified region.
*/
if (!fixed_base && (crash_max == CRASH_ADDR_LOW_MAX)) {
if (fixed_base) {
pr_warn("crashkernel reservation failed - memory is in use.\n");
return;
}
/*
* For crashkernel=size[KMG], if the first attempt was for
* low memory, fall back to high memory, the minimum required
* low memory will be reserved later.
*/
if (!high && crash_max == CRASH_ADDR_LOW_MAX) {
crash_max = CRASH_ADDR_HIGH_MAX;
search_base = CRASH_ADDR_LOW_MAX;
crash_low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE;
goto retry;
}
/*
* For crashkernel=size[KMG],high, if the first attempt was
* for high memory, fall back to low memory.
*/
if (high && crash_max == CRASH_ADDR_HIGH_MAX) {
crash_max = CRASH_ADDR_LOW_MAX;
search_base = 0;
goto retry;
}
pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
crash_size);
return;
}
if ((crash_base > CRASH_ADDR_LOW_MAX - crash_low_size) &&
crash_low_size && reserve_crashkernel_low(crash_low_size)) {
if ((crash_base >= CRASH_ADDR_LOW_MAX) && crash_low_size &&
reserve_crashkernel_low(crash_low_size)) {
memblock_phys_free(crash_base, crash_size);
return;
}

View file

@ -214,7 +214,7 @@ static void __init clear_pgds(unsigned long start,
static void __init kasan_init_shadow(void)
{
u64 kimg_shadow_start, kimg_shadow_end;
u64 mod_shadow_start, mod_shadow_end;
u64 mod_shadow_start;
u64 vmalloc_shadow_end;
phys_addr_t pa_start, pa_end;
u64 i;
@ -223,7 +223,6 @@ static void __init kasan_init_shadow(void)
kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(KERNEL_END));
mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
@ -246,17 +245,9 @@ static void __init kasan_init_shadow(void)
kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
(void *)mod_shadow_start);
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
BUILD_BUG_ON(VMALLOC_START != MODULES_END);
kasan_populate_early_shadow((void *)vmalloc_shadow_end,
(void *)KASAN_SHADOW_END);
} else {
kasan_populate_early_shadow((void *)kimg_shadow_end,
(void *)KASAN_SHADOW_END);
if (kimg_shadow_start > mod_shadow_end)
kasan_populate_early_shadow((void *)mod_shadow_end,
(void *)kimg_shadow_start);
}
BUILD_BUG_ON(VMALLOC_START != MODULES_END);
kasan_populate_early_shadow((void *)vmalloc_shadow_end,
(void *)KASAN_SHADOW_END);
for_each_mem_range(i, &pa_start, &pa_end) {
void *start = (void *)__phys_to_virt(pa_start);

View file

@ -451,7 +451,7 @@ static phys_addr_t pgd_pgtable_alloc(int shift)
void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot)
{
if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
if (virt < PAGE_OFFSET) {
pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
&phys, virt);
return;
@ -478,7 +478,7 @@ void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot)
{
if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
if (virt < PAGE_OFFSET) {
pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n",
&phys, virt);
return;
@ -663,12 +663,17 @@ static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
vm_area_add_early(vma);
}
static pgprot_t kernel_exec_prot(void)
{
return rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
}
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static int __init map_entry_trampoline(void)
{
int i;
pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
pgprot_t prot = kernel_exec_prot();
phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
/* The trampoline is always mapped and can therefore be global */
@ -723,7 +728,7 @@ static void __init map_kernel(pgd_t *pgdp)
* mapping to install SW breakpoints. Allow this (only) when
* explicitly requested with rodata=off.
*/
pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
pgprot_t text_prot = kernel_exec_prot();
/*
* If we have a CPU that supports BTI and a kernel built for

View file

@ -32,8 +32,10 @@ HAS_GENERIC_AUTH_IMP_DEF
HAS_GIC_CPUIF_SYSREGS
HAS_GIC_PRIO_MASKING
HAS_GIC_PRIO_RELAXED_SYNC
HAS_HCX
HAS_LDAPR
HAS_LSE_ATOMICS
HAS_MOPS
HAS_NESTED_VIRT
HAS_NO_FPSIMD
HAS_NO_HW_PREFETCH

View file

@ -24,12 +24,12 @@ BEGIN {
}
/^[vA-Z0-9_]+$/ {
printf("#define ARM64_%-30s\t%d\n", $0, cap_num++)
printf("#define ARM64_%-40s\t%d\n", $0, cap_num++)
next
}
END {
printf("#define ARM64_NCAPS\t\t\t\t%d\n", cap_num)
printf("#define ARM64_NCAPS\t\t\t\t\t%d\n", cap_num)
print ""
print "#endif /* __ASM_CPUCAPS_H */"
}

View file

@ -48,6 +48,61 @@
# feature that introduces them (eg, FEAT_LS64_ACCDATA introduces enumeration
# item ACCDATA) though it may be more taseful to do something else.
Sysreg OSDTRRX_EL1 2 0 0 0 2
Res0 63:32
Field 31:0 DTRRX
EndSysreg
Sysreg MDCCINT_EL1 2 0 0 2 0
Res0 63:31
Field 30 RX
Field 29 TX
Res0 28:0
EndSysreg
Sysreg MDSCR_EL1 2 0 0 2 2
Res0 63:36
Field 35 EHBWE
Field 34 EnSPM
Field 33 TTA
Field 32 EMBWE
Field 31 TFO
Field 30 RXfull
Field 29 TXfull
Res0 28
Field 27 RXO
Field 26 TXU
Res0 25:24
Field 23:22 INTdis
Field 21 TDA
Res0 20
Field 19 SC2
Res0 18:16
Field 15 MDE
Field 14 HDE
Field 13 KDE
Field 12 TDCC
Res0 11:7
Field 6 ERR
Res0 5:1
Field 0 SS
EndSysreg
Sysreg OSDTRTX_EL1 2 0 0 3 2
Res0 63:32
Field 31:0 DTRTX
EndSysreg
Sysreg OSECCR_EL1 2 0 0 6 2
Res0 63:32
Field 31:0 EDECCR
EndSysreg
Sysreg OSLAR_EL1 2 0 1 0 4
Res0 63:1
Field 0 OSLK
EndSysreg
Sysreg ID_PFR0_EL1 3 0 0 1 0
Res0 63:32
UnsignedEnum 31:28 RAS
@ -2200,3 +2255,80 @@ Sysreg ICC_NMIAR1_EL1 3 0 12 9 5
Res0 63:24
Field 23:0 INTID
EndSysreg
Sysreg TRBLIMITR_EL1 3 0 9 11 0
Field 63:12 LIMIT
Res0 11:7
Field 6 XE
Field 5 nVM
Enum 4:3 TM
0b00 STOP
0b01 IRQ
0b11 IGNR
EndEnum
Enum 2:1 FM
0b00 FILL
0b01 WRAP
0b11 CBUF
EndEnum
Field 0 E
EndSysreg
Sysreg TRBPTR_EL1 3 0 9 11 1
Field 63:0 PTR
EndSysreg
Sysreg TRBBASER_EL1 3 0 9 11 2
Field 63:12 BASE
Res0 11:0
EndSysreg
Sysreg TRBSR_EL1 3 0 9 11 3
Res0 63:56
Field 55:32 MSS2
Field 31:26 EC
Res0 25:24
Field 23 DAT
Field 22 IRQ
Field 21 TRG
Field 20 WRAP
Res0 19
Field 18 EA
Field 17 S
Res0 16
Field 15:0 MSS
EndSysreg
Sysreg TRBMAR_EL1 3 0 9 11 4
Res0 63:12
Enum 11:10 PAS
0b00 SECURE
0b01 NON_SECURE
0b10 ROOT
0b11 REALM
EndEnum
Enum 9:8 SH
0b00 NON_SHAREABLE
0b10 OUTER_SHAREABLE
0b11 INNER_SHAREABLE
EndEnum
Field 7:0 Attr
EndSysreg
Sysreg TRBTRG_EL1 3 0 9 11 6
Res0 63:32
Field 31:0 TRG
EndSysreg
Sysreg TRBIDR_EL1 3 0 9 11 7
Res0 63:12
Enum 11:8 EA
0b0000 NON_DESC
0b0001 IGNORE
0b0010 SERROR
EndEnum
Res0 7:6
Field 5 F
Field 4 P
Field 3:0 Align
EndSysreg

View file

@ -3,4 +3,4 @@ obj-$(CONFIG_ACPI_AGDI) += agdi.o
obj-$(CONFIG_ACPI_IORT) += iort.o
obj-$(CONFIG_ACPI_GTDT) += gtdt.o
obj-$(CONFIG_ACPI_APMT) += apmt.o
obj-y += dma.o
obj-y += dma.o init.o

View file

@ -9,11 +9,11 @@
#define pr_fmt(fmt) "ACPI: AGDI: " fmt
#include <linux/acpi.h>
#include <linux/acpi_agdi.h>
#include <linux/arm_sdei.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include "init.h"
struct agdi_data {
int sdei_event;

View file

@ -10,10 +10,10 @@
#define pr_fmt(fmt) "ACPI: APMT: " fmt
#include <linux/acpi.h>
#include <linux/acpi_apmt.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include "init.h"
#define DEV_NAME "arm-cs-arch-pmu"

13
drivers/acpi/arm64/init.c Normal file
View file

@ -0,0 +1,13 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/acpi.h>
#include "init.h"
void __init acpi_arm_init(void)
{
if (IS_ENABLED(CONFIG_ACPI_AGDI))
acpi_agdi_init();
if (IS_ENABLED(CONFIG_ACPI_APMT))
acpi_apmt_init();
if (IS_ENABLED(CONFIG_ACPI_IORT))
acpi_iort_init();
}

View file

@ -0,0 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <linux/init.h>
void __init acpi_agdi_init(void);
void __init acpi_apmt_init(void);
void __init acpi_iort_init(void);

View file

@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dma-map-ops.h>
#include "init.h"
#define IORT_TYPE_MASK(type) (1 << (type))
#define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP)

View file

@ -26,9 +26,6 @@
#include <asm/mpspec.h>
#include <linux/dmi.h>
#endif
#include <linux/acpi_agdi.h>
#include <linux/acpi_apmt.h>
#include <linux/acpi_iort.h>
#include <linux/acpi_viot.h>
#include <linux/pci.h>
#include <acpi/apei.h>
@ -1408,7 +1405,7 @@ static int __init acpi_init(void)
acpi_init_ffh();
pci_mmcfg_late_init();
acpi_iort_init();
acpi_arm_init();
acpi_viot_early_init();
acpi_hest_init();
acpi_ghes_init();
@ -1420,8 +1417,6 @@ static int __init acpi_init(void)
acpi_debugger_init();
acpi_setup_sb_notify_handler();
acpi_viot_init();
acpi_agdi_init();
acpi_apmt_init();
return 0;
}

View file

@ -218,7 +218,7 @@ static inline void set_trbe_enabled(struct trbe_cpudata *cpudata, u64 trblimitr)
* Enable the TRBE without clearing LIMITPTR which
* might be required for fetching the buffer limits.
*/
trblimitr |= TRBLIMITR_ENABLE;
trblimitr |= TRBLIMITR_EL1_E;
write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
/* Synchronize the TRBE enable event */
@ -236,7 +236,7 @@ static inline void set_trbe_disabled(struct trbe_cpudata *cpudata)
* Disable the TRBE without clearing LIMITPTR which
* might be required for fetching the buffer limits.
*/
trblimitr &= ~TRBLIMITR_ENABLE;
trblimitr &= ~TRBLIMITR_EL1_E;
write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
if (trbe_needs_drain_after_disable(cpudata))
@ -582,12 +582,12 @@ static void clr_trbe_status(void)
u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1);
WARN_ON(is_trbe_enabled());
trbsr &= ~TRBSR_IRQ;
trbsr &= ~TRBSR_TRG;
trbsr &= ~TRBSR_WRAP;
trbsr &= ~(TRBSR_EC_MASK << TRBSR_EC_SHIFT);
trbsr &= ~(TRBSR_BSC_MASK << TRBSR_BSC_SHIFT);
trbsr &= ~TRBSR_STOP;
trbsr &= ~TRBSR_EL1_IRQ;
trbsr &= ~TRBSR_EL1_TRG;
trbsr &= ~TRBSR_EL1_WRAP;
trbsr &= ~TRBSR_EL1_EC_MASK;
trbsr &= ~TRBSR_EL1_BSC_MASK;
trbsr &= ~TRBSR_EL1_S;
write_sysreg_s(trbsr, SYS_TRBSR_EL1);
}
@ -596,13 +596,13 @@ static void set_trbe_limit_pointer_enabled(struct trbe_buf *buf)
u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
unsigned long addr = buf->trbe_limit;
WARN_ON(!IS_ALIGNED(addr, (1UL << TRBLIMITR_LIMIT_SHIFT)));
WARN_ON(!IS_ALIGNED(addr, (1UL << TRBLIMITR_EL1_LIMIT_SHIFT)));
WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
trblimitr &= ~TRBLIMITR_NVM;
trblimitr &= ~(TRBLIMITR_FILL_MODE_MASK << TRBLIMITR_FILL_MODE_SHIFT);
trblimitr &= ~(TRBLIMITR_TRIG_MODE_MASK << TRBLIMITR_TRIG_MODE_SHIFT);
trblimitr &= ~(TRBLIMITR_LIMIT_MASK << TRBLIMITR_LIMIT_SHIFT);
trblimitr &= ~TRBLIMITR_EL1_nVM;
trblimitr &= ~TRBLIMITR_EL1_FM_MASK;
trblimitr &= ~TRBLIMITR_EL1_TM_MASK;
trblimitr &= ~TRBLIMITR_EL1_LIMIT_MASK;
/*
* Fill trace buffer mode is used here while configuring the
@ -613,14 +613,15 @@ static void set_trbe_limit_pointer_enabled(struct trbe_buf *buf)
* trace data in the interrupt handler, before reconfiguring
* the TRBE.
*/
trblimitr |= (TRBE_FILL_MODE_FILL & TRBLIMITR_FILL_MODE_MASK) << TRBLIMITR_FILL_MODE_SHIFT;
trblimitr |= (TRBLIMITR_EL1_FM_FILL << TRBLIMITR_EL1_FM_SHIFT) &
TRBLIMITR_EL1_FM_MASK;
/*
* Trigger mode is not used here while configuring the TRBE for
* the trace capture. Hence just keep this in the ignore mode.
*/
trblimitr |= (TRBE_TRIG_MODE_IGNORE & TRBLIMITR_TRIG_MODE_MASK) <<
TRBLIMITR_TRIG_MODE_SHIFT;
trblimitr |= (TRBLIMITR_EL1_TM_IGNR << TRBLIMITR_EL1_TM_SHIFT) &
TRBLIMITR_EL1_TM_MASK;
trblimitr |= (addr & PAGE_MASK);
set_trbe_enabled(buf->cpudata, trblimitr);
}

View file

@ -30,7 +30,7 @@ static inline bool is_trbe_enabled(void)
{
u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
return trblimitr & TRBLIMITR_ENABLE;
return trblimitr & TRBLIMITR_EL1_E;
}
#define TRBE_EC_OTHERS 0
@ -39,7 +39,7 @@ static inline bool is_trbe_enabled(void)
static inline int get_trbe_ec(u64 trbsr)
{
return (trbsr >> TRBSR_EC_SHIFT) & TRBSR_EC_MASK;
return (trbsr & TRBSR_EL1_EC_MASK) >> TRBSR_EL1_EC_SHIFT;
}
#define TRBE_BSC_NOT_STOPPED 0
@ -48,63 +48,55 @@ static inline int get_trbe_ec(u64 trbsr)
static inline int get_trbe_bsc(u64 trbsr)
{
return (trbsr >> TRBSR_BSC_SHIFT) & TRBSR_BSC_MASK;
return (trbsr & TRBSR_EL1_BSC_MASK) >> TRBSR_EL1_BSC_SHIFT;
}
static inline void clr_trbe_irq(void)
{
u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1);
trbsr &= ~TRBSR_IRQ;
trbsr &= ~TRBSR_EL1_IRQ;
write_sysreg_s(trbsr, SYS_TRBSR_EL1);
}
static inline bool is_trbe_irq(u64 trbsr)
{
return trbsr & TRBSR_IRQ;
return trbsr & TRBSR_EL1_IRQ;
}
static inline bool is_trbe_trg(u64 trbsr)
{
return trbsr & TRBSR_TRG;
return trbsr & TRBSR_EL1_TRG;
}
static inline bool is_trbe_wrap(u64 trbsr)
{
return trbsr & TRBSR_WRAP;
return trbsr & TRBSR_EL1_WRAP;
}
static inline bool is_trbe_abort(u64 trbsr)
{
return trbsr & TRBSR_ABORT;
return trbsr & TRBSR_EL1_EA;
}
static inline bool is_trbe_running(u64 trbsr)
{
return !(trbsr & TRBSR_STOP);
return !(trbsr & TRBSR_EL1_S);
}
#define TRBE_TRIG_MODE_STOP 0
#define TRBE_TRIG_MODE_IRQ 1
#define TRBE_TRIG_MODE_IGNORE 3
#define TRBE_FILL_MODE_FILL 0
#define TRBE_FILL_MODE_WRAP 1
#define TRBE_FILL_MODE_CIRCULAR_BUFFER 3
static inline bool get_trbe_flag_update(u64 trbidr)
{
return trbidr & TRBIDR_FLAG;
return trbidr & TRBIDR_EL1_F;
}
static inline bool is_trbe_programmable(u64 trbidr)
{
return !(trbidr & TRBIDR_PROG);
return !(trbidr & TRBIDR_EL1_P);
}
static inline int get_trbe_address_align(u64 trbidr)
{
return (trbidr >> TRBIDR_ALIGN_SHIFT) & TRBIDR_ALIGN_MASK;
return (trbidr & TRBIDR_EL1_Align_MASK) >> TRBIDR_EL1_Align_SHIFT;
}
static inline unsigned long get_trbe_write_pointer(void)
@ -121,7 +113,7 @@ static inline void set_trbe_write_pointer(unsigned long addr)
static inline unsigned long get_trbe_limit_pointer(void)
{
u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
unsigned long addr = trblimitr & (TRBLIMITR_LIMIT_MASK << TRBLIMITR_LIMIT_SHIFT);
unsigned long addr = trblimitr & TRBLIMITR_EL1_LIMIT_MASK;
WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
return addr;
@ -130,7 +122,7 @@ static inline unsigned long get_trbe_limit_pointer(void)
static inline unsigned long get_trbe_base_pointer(void)
{
u64 trbbaser = read_sysreg_s(SYS_TRBBASER_EL1);
unsigned long addr = trbbaser & (TRBBASER_BASE_MASK << TRBBASER_BASE_SHIFT);
unsigned long addr = trbbaser & TRBBASER_EL1_BASE_MASK;
WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
return addr;
@ -139,7 +131,7 @@ static inline unsigned long get_trbe_base_pointer(void)
static inline void set_trbe_base_pointer(unsigned long addr)
{
WARN_ON(is_trbe_enabled());
WARN_ON(!IS_ALIGNED(addr, (1UL << TRBBASER_BASE_SHIFT)));
WARN_ON(!IS_ALIGNED(addr, (1UL << TRBBASER_EL1_BASE_SHIFT)));
WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
write_sysreg_s(addr, SYS_TRBBASER_EL1);
}

View file

@ -1507,6 +1507,12 @@ static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu)
}
#endif
#ifdef CONFIG_ARM64
void acpi_arm_init(void);
#else
static inline void acpi_arm_init(void) { }
#endif
#ifdef CONFIG_ACPI_PCC
void acpi_init_pcc(void);
#else

View file

@ -1,13 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ACPI_AGDI_H__
#define __ACPI_AGDI_H__
#include <linux/acpi.h>
#ifdef CONFIG_ACPI_AGDI
void __init acpi_agdi_init(void);
#else
static inline void acpi_agdi_init(void) {}
#endif
#endif /* __ACPI_AGDI_H__ */

View file

@ -1,19 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0
*
* ARM CoreSight PMU driver.
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES.
*
*/
#ifndef __ACPI_APMT_H__
#define __ACPI_APMT_H__
#include <linux/acpi.h>
#ifdef CONFIG_ACPI_APMT
void acpi_apmt_init(void);
#else
static inline void acpi_apmt_init(void) { }
#endif /* CONFIG_ACPI_APMT */
#endif /* __ACPI_APMT_H__ */

View file

@ -27,7 +27,6 @@ int iort_register_domain_token(int trans_id, phys_addr_t base,
void iort_deregister_domain_token(int trans_id);
struct fwnode_handle *iort_find_domain_token(int trans_id);
#ifdef CONFIG_ACPI_IORT
void acpi_iort_init(void);
u32 iort_msi_map_id(struct device *dev, u32 id);
struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
enum irq_domain_bus_token bus_token);
@ -43,7 +42,6 @@ int iort_iommu_configure_id(struct device *dev, const u32 *id_in);
void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head);
phys_addr_t acpi_iort_dma_get_max_cpu_address(void);
#else
static inline void acpi_iort_init(void) { }
static inline u32 iort_msi_map_id(struct device *dev, u32 id)
{ return id; }
static inline struct irq_domain *iort_get_device_domain(

View file

@ -39,6 +39,20 @@ static void cssc_sigill(void)
asm volatile(".inst 0xdac01c00" : : : "x0");
}
static void mops_sigill(void)
{
char dst[1], src[1];
register char *dstp asm ("x0") = dst;
register char *srcp asm ("x1") = src;
register long size asm ("x2") = 1;
/* CPYP [x0]!, [x1]!, x2! */
asm volatile(".inst 0x1d010440"
: "+r" (dstp), "+r" (srcp), "+r" (size)
:
: "cc", "memory");
}
static void rng_sigill(void)
{
asm volatile("mrs x0, S3_3_C2_C4_0" : : : "x0");
@ -209,6 +223,14 @@ static const struct hwcap_data {
.cpuinfo = "cssc",
.sigill_fn = cssc_sigill,
},
{
.name = "MOPS",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_MOPS,
.cpuinfo = "mops",
.sigill_fn = mops_sigill,
.sigill_reliable = true,
},
{
.name = "RNG",
.at_hwcap = AT_HWCAP2,

View file

@ -20,7 +20,7 @@
#include "../../kselftest.h"
#define EXPECTED_TESTS 7
#define EXPECTED_TESTS 11
#define MAX_TPIDRS 2
@ -132,6 +132,34 @@ static void test_tpidr(pid_t child)
}
}
static void test_hw_debug(pid_t child, int type, const char *type_name)
{
struct user_hwdebug_state state;
struct iovec iov;
int slots, arch, ret;
iov.iov_len = sizeof(state);
iov.iov_base = &state;
/* Should be able to read the values */
ret = ptrace(PTRACE_GETREGSET, child, type, &iov);
ksft_test_result(ret == 0, "read_%s\n", type_name);
if (ret == 0) {
/* Low 8 bits is the number of slots, next 4 bits the arch */
slots = state.dbg_info & 0xff;
arch = (state.dbg_info >> 8) & 0xf;
ksft_print_msg("%s version %d with %d slots\n", type_name,
arch, slots);
/* Zero is not currently architecturally valid */
ksft_test_result(arch, "%s_arch_set\n", type_name);
} else {
ksft_test_result_skip("%s_arch_set\n");
}
}
static int do_child(void)
{
if (ptrace(PTRACE_TRACEME, -1, NULL, NULL))
@ -207,6 +235,8 @@ static int do_parent(pid_t child)
ksft_print_msg("Parent is %d, child is %d\n", getpid(), child);
test_tpidr(child);
test_hw_debug(child, NT_ARM_HW_WATCH, "NT_ARM_HW_WATCH");
test_hw_debug(child, NT_ARM_HW_BREAK, "NT_ARM_HW_BREAK");
ret = EXIT_SUCCESS;

View file

@ -4,7 +4,7 @@ fake_sigreturn_*
sme_*
ssve_*
sve_*
tpidr2_siginfo
tpidr2_*
za_*
zt_*
!*.[ch]

View file

@ -249,7 +249,8 @@ static void default_handler(int signum, siginfo_t *si, void *uc)
fprintf(stderr, "-- Timeout !\n");
} else {
fprintf(stderr,
"-- RX UNEXPECTED SIGNAL: %d\n", signum);
"-- RX UNEXPECTED SIGNAL: %d code %d address %p\n",
signum, si->si_code, si->si_addr);
}
default_result(current, 1);
}

View file

@ -0,0 +1,86 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2023 ARM Limited
*
* Verify that the TPIDR2 register context in signal frames is restored.
*/
#include <signal.h>
#include <ucontext.h>
#include <sys/auxv.h>
#include <sys/prctl.h>
#include <unistd.h>
#include <asm/sigcontext.h>
#include "test_signals_utils.h"
#include "testcases.h"
#define SYS_TPIDR2 "S3_3_C13_C0_5"
static uint64_t get_tpidr2(void)
{
uint64_t val;
asm volatile (
"mrs %0, " SYS_TPIDR2 "\n"
: "=r"(val)
:
: "cc");
return val;
}
static void set_tpidr2(uint64_t val)
{
asm volatile (
"msr " SYS_TPIDR2 ", %0\n"
:
: "r"(val)
: "cc");
}
static uint64_t initial_tpidr2;
static bool save_tpidr2(struct tdescr *td)
{
initial_tpidr2 = get_tpidr2();
fprintf(stderr, "Initial TPIDR2: %lx\n", initial_tpidr2);
return true;
}
static int modify_tpidr2(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
{
uint64_t my_tpidr2 = get_tpidr2();
my_tpidr2++;
fprintf(stderr, "Setting TPIDR2 to %lx\n", my_tpidr2);
set_tpidr2(my_tpidr2);
return 0;
}
static void check_tpidr2(struct tdescr *td)
{
uint64_t tpidr2 = get_tpidr2();
td->pass = tpidr2 == initial_tpidr2;
if (td->pass)
fprintf(stderr, "TPIDR2 restored\n");
else
fprintf(stderr, "TPIDR2 was %lx but is now %lx\n",
initial_tpidr2, tpidr2);
}
struct tdescr tde = {
.name = "TPIDR2 restore",
.descr = "Validate that TPIDR2 is restored from the sigframe",
.feats_required = FEAT_SME,
.timeout = 3,
.sig_trig = SIGUSR1,
.init = save_tpidr2,
.run = modify_tpidr2,
.check_result = check_tpidr2,
};