KVM/arm64 updates for 5.15

- Page ownership tracking between host EL1 and EL2
 
 - Rely on userspace page tables to create large stage-2 mappings
 
 - Fix incompatibility between pKVM and kmemleak
 
 - Fix the PMU reset state, and improve the performance of the virtual PMU
 
 - Move over to the generic KVM entry code
 
 - Address PSCI reset issues w.r.t. save/restore
 
 - Preliminary rework for the upcoming pKVM fixed feature
 
 - A bunch of MM cleanups
 
 - a vGIC fix for timer spurious interrupts
 
 - Various cleanups
 -----BEGIN PGP SIGNATURE-----
 
 iQJDBAABCgAtFiEEn9UcU+C1Yxj9lZw9I9DQutE9ekMFAmEnfogPHG1hekBrZXJu
 ZWwub3JnAAoJECPQ0LrRPXpDF9oQAINWHN1n30gsxcErMV8gH+XAyhDq2vTjkExQ
 Qz5ddo4R5zeVkj0nkunFSK+W3xYz+W97X3I+IaiiHvk5D6dUatj37IyYlazX5iFT
 7mbjTAqY7GRxfd6um7uK+CTRCApXY49GGkCVLGA5f+6mQ0JMVXaK9AKlsXKWUQLZ
 JvLasUgKkseN6IEJWmPDNBdIeiKBTZloeZMdlM2vSm34HsuirSS5LmshdzJQzSk8
 QSEqwXZX50afzJLNlB9Qa6V1tokjZVoYIBk0vAPO83tTh9HIyGL/PFAqBeq2rnWT
 M19fFFbx5vizap4ICbpviLmZ5AOywCoBmbPBT79eMAJ53rOqHUJhU1y/3DoiVzxu
 LJZI4wmGBQZVivOWOqyEZcNtTAagPLhyrLhMzYulBLwAjfFJmUHdSOxYtx+2Ysvr
 SDIPN31FKWrvifTXTqJHDmaaXusi2CNZUOPzVSe2I14SbX+ZX2ny9DltlbRgPNuc
 hGJagI5cZc0ngd4mAIzjjNmgBS2B+dSc8dOo71dRNJRLtQLiNHcAyQNJyFme+4xI
 NpvpkvzxBAs8rG2X0YIR/Cz3W3yZoCYuQNcoPk7+F/bUTK47VocQCS+gLucHVLbT
 H4286EV5n4nZ7E01oJ6uWnDnslPvrx9Sz2fxsrWYkBDR+xrz0EprrGsftFaILprz
 Ic43uXfd
 =LuHM
 -----END PGP SIGNATURE-----

Merge tag 'kvmarm-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 updates for 5.15

- Page ownership tracking between host EL1 and EL2

- Rely on userspace page tables to create large stage-2 mappings

- Fix incompatibility between pKVM and kmemleak

- Fix the PMU reset state, and improve the performance of the virtual PMU

- Move over to the generic KVM entry code

- Address PSCI reset issues w.r.t. save/restore

- Preliminary rework for the upcoming pKVM fixed feature

- A bunch of MM cleanups

- a vGIC fix for timer spurious interrupts

- Various cleanups
This commit is contained in:
Paolo Bonzini 2021-09-06 06:34:11 -04:00
commit e99314a340
574 changed files with 7136 additions and 3867 deletions

View file

@ -45,14 +45,24 @@ how the user addresses are used by the kernel:
1. User addresses not accessed by the kernel but used for address space
management (e.g. ``mprotect()``, ``madvise()``). The use of valid
tagged pointers in this context is allowed with the exception of
``brk()``, ``mmap()`` and the ``new_address`` argument to
``mremap()`` as these have the potential to alias with existing
user addresses.
tagged pointers in this context is allowed with these exceptions:
NOTE: This behaviour changed in v5.6 and so some earlier kernels may
incorrectly accept valid tagged pointers for the ``brk()``,
``mmap()`` and ``mremap()`` system calls.
- ``brk()``, ``mmap()`` and the ``new_address`` argument to
``mremap()`` as these have the potential to alias with existing
user addresses.
NOTE: This behaviour changed in v5.6 and so some earlier kernels may
incorrectly accept valid tagged pointers for the ``brk()``,
``mmap()`` and ``mremap()`` system calls.
- The ``range.start``, ``start`` and ``dst`` arguments to the
``UFFDIO_*`` ``ioctl()``s used on a file descriptor obtained from
``userfaultfd()``, as fault addresses subsequently obtained by reading
the file descriptor will be untagged, which may otherwise confuse
tag-unaware programs.
NOTE: This behaviour changed in v5.14 and so some earlier kernels may
incorrectly accept valid tagged pointers for this system call.
2. User addresses accessed by the kernel (e.g. ``write()``). This ABI
relaxation is disabled by default and the application thread needs to

View file

@ -1,56 +0,0 @@
IMX8 glue layer controller, NXP imx8 families support Synopsys MAC 5.10a IP.
This file documents platform glue layer for IMX.
Please see stmmac.txt for the other unchanged properties.
The device node has following properties.
Required properties:
- compatible: Should be "nxp,imx8mp-dwmac-eqos" to select glue layer
and "snps,dwmac-5.10a" to select IP version.
- clocks: Must contain a phandle for each entry in clock-names.
- clock-names: Should be "stmmaceth" for the host clock.
Should be "pclk" for the MAC apb clock.
Should be "ptp_ref" for the MAC timer clock.
Should be "tx" for the MAC RGMII TX clock:
Should be "mem" for EQOS MEM clock.
- "mem" clock is required for imx8dxl platform.
- "mem" clock is not required for imx8mp platform.
- interrupt-names: Should contain a list of interrupt names corresponding to
the interrupts in the interrupts property, if available.
Should be "macirq" for the main MAC IRQ
Should be "eth_wake_irq" for the IT which wake up system
- intf_mode: Should be phandle/offset pair. The phandle to the syscon node which
encompases the GPR register, and the offset of the GPR register.
- required for imx8mp platform.
- is optional for imx8dxl platform.
Optional properties:
- intf_mode: is optional for imx8dxl platform.
- snps,rmii_refclk_ext: to select RMII reference clock from external.
Example:
eqos: ethernet@30bf0000 {
compatible = "nxp,imx8mp-dwmac-eqos", "snps,dwmac-5.10a";
reg = <0x30bf0000 0x10000>;
interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "eth_wake_irq", "macirq";
clocks = <&clk IMX8MP_CLK_ENET_QOS_ROOT>,
<&clk IMX8MP_CLK_QOS_ENET_ROOT>,
<&clk IMX8MP_CLK_ENET_QOS_TIMER>,
<&clk IMX8MP_CLK_ENET_QOS>;
clock-names = "stmmaceth", "pclk", "ptp_ref", "tx";
assigned-clocks = <&clk IMX8MP_CLK_ENET_AXI>,
<&clk IMX8MP_CLK_ENET_QOS_TIMER>,
<&clk IMX8MP_CLK_ENET_QOS>;
assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_266M>,
<&clk IMX8MP_SYS_PLL2_100M>,
<&clk IMX8MP_SYS_PLL2_125M>;
assigned-clock-rates = <0>, <100000000>, <125000000>;
nvmem-cells = <&eth_mac0>;
nvmem-cell-names = "mac-address";
nvmem_macaddr_swap;
intf_mode = <&gpr 0x4>;
status = "disabled";
};

View file

@ -0,0 +1,93 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/net/nxp,dwmac-imx.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: NXP i.MX8 DWMAC glue layer Device Tree Bindings
maintainers:
- Joakim Zhang <qiangqing.zhang@nxp.com>
# We need a select here so we don't match all nodes with 'snps,dwmac'
select:
properties:
compatible:
contains:
enum:
- nxp,imx8mp-dwmac-eqos
- nxp,imx8dxl-dwmac-eqos
required:
- compatible
allOf:
- $ref: "snps,dwmac.yaml#"
properties:
compatible:
oneOf:
- items:
- enum:
- nxp,imx8mp-dwmac-eqos
- nxp,imx8dxl-dwmac-eqos
- const: snps,dwmac-5.10a
clocks:
minItems: 3
maxItems: 5
items:
- description: MAC host clock
- description: MAC apb clock
- description: MAC timer clock
- description: MAC RGMII TX clock
- description: EQOS MEM clock
clock-names:
minItems: 3
maxItems: 5
contains:
enum:
- stmmaceth
- pclk
- ptp_ref
- tx
- mem
intf_mode:
$ref: /schemas/types.yaml#/definitions/phandle-array
description:
Should be phandle/offset pair. The phandle to the syscon node which
encompases the GPR register, and the offset of the GPR register.
snps,rmii_refclk_ext:
$ref: /schemas/types.yaml#/definitions/flag
description:
To select RMII reference clock from external.
required:
- compatible
- clocks
- clock-names
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/clock/imx8mp-clock.h>
eqos: ethernet@30bf0000 {
compatible = "nxp,imx8mp-dwmac-eqos","snps,dwmac-5.10a";
reg = <0x30bf0000 0x10000>;
interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq", "eth_wake_irq";
clocks = <&clk IMX8MP_CLK_ENET_QOS_ROOT>,
<&clk IMX8MP_CLK_QOS_ENET_ROOT>,
<&clk IMX8MP_CLK_ENET_QOS_TIMER>,
<&clk IMX8MP_CLK_ENET_QOS>;
clock-names = "stmmaceth", "pclk", "ptp_ref", "tx";
phy-mode = "rgmii";
status = "disabled";
};

View file

@ -28,6 +28,7 @@ select:
- snps,dwmac-4.00
- snps,dwmac-4.10a
- snps,dwmac-4.20a
- snps,dwmac-5.10a
- snps,dwxgmac
- snps,dwxgmac-2.10
@ -82,6 +83,7 @@ properties:
- snps,dwmac-4.00
- snps,dwmac-4.10a
- snps,dwmac-4.20a
- snps,dwmac-5.10a
- snps,dwxgmac
- snps,dwxgmac-2.10
@ -375,6 +377,7 @@ allOf:
- snps,dwmac-4.00
- snps,dwmac-4.10a
- snps,dwmac-4.20a
- snps,dwmac-5.10a
- snps,dwxgmac
- snps,dwxgmac-2.10
- st,spear600-gmac

View file

@ -57,12 +57,14 @@ properties:
maxItems: 1
power-domains:
deprecated: true
description:
Power domain to use for enable control. This binding is only
available if the compatible is chosen to regulator-fixed-domain.
maxItems: 1
required-opps:
deprecated: true
description:
Performance state to use for enable control. This binding is only
available if the compatible is chosen to regulator-fixed-domain. The

View file

@ -114,7 +114,7 @@ properties:
ports:
$ref: /schemas/graph.yaml#/properties/ports
properties:
patternProperties:
port(@[0-9a-f]+)?:
$ref: audio-graph-port.yaml#
unevaluatedProperties: false

View file

@ -243,8 +243,8 @@ Configuration Flags and Socket Options
These are the various configuration flags that can be used to control
and monitor the behavior of AF_XDP sockets.
XDP_COPY and XDP_ZERO_COPY bind flags
-------------------------------------
XDP_COPY and XDP_ZEROCOPY bind flags
------------------------------------
When you bind to a socket, the kernel will first try to use zero-copy
copy. If zero-copy is not supported, it will fall back on using copy
@ -252,7 +252,7 @@ mode, i.e. copying all packets out to user space. But if you would
like to force a certain mode, you can use the following flags. If you
pass the XDP_COPY flag to the bind call, the kernel will force the
socket into copy mode. If it cannot use copy mode, the bind call will
fail with an error. Conversely, the XDP_ZERO_COPY flag will force the
fail with an error. Conversely, the XDP_ZEROCOPY flag will force the
socket into zero-copy mode or fail.
XDP_SHARED_UMEM bind flag

View file

@ -826,7 +826,7 @@ tcp_fastopen_blackhole_timeout_sec - INTEGER
initial value when the blackhole issue goes away.
0 to disable the blackhole detection.
By default, it is set to 1hr.
By default, it is set to 0 (feature is disabled).
tcp_fastopen_key - list of comma separated 32-digit hexadecimal INTEGERs
The list consists of a primary key and an optional backup key. The

View file

@ -191,7 +191,7 @@ Documentation written by Tom Zanussi
with the event, in nanoseconds. May be
modified by .usecs to have timestamps
interpreted as microseconds.
cpu int the cpu on which the event occurred.
common_cpu int the cpu on which the event occurred.
====================== ==== =======================================
Extended error information

View file

@ -445,7 +445,7 @@ F: drivers/platform/x86/wmi.c
F: include/uapi/linux/wmi.h
ACRN HYPERVISOR SERVICE MODULE
M: Shuo Liu <shuo.a.liu@intel.com>
M: Fei Li <fei1.li@intel.com>
L: acrn-dev@lists.projectacrn.org (subscribers-only)
S: Supported
W: https://projectacrn.org
@ -7858,9 +7858,9 @@ S: Maintained
F: drivers/input/touchscreen/goodix.c
GOOGLE ETHERNET DRIVERS
M: Catherine Sullivan <csully@google.com>
R: Sagi Shahar <sagis@google.com>
R: Jon Olson <jonolson@google.com>
M: Jeroen de Borst <jeroendb@google.com>
R: Catherine Sullivan <csully@google.com>
R: David Awogbemila <awogbemila@google.com>
L: netdev@vger.kernel.org
S: Supported
F: Documentation/networking/device_drivers/ethernet/google/gve.rst
@ -11327,6 +11327,12 @@ W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
F: drivers/media/radio/radio-maxiradio*
MCAB MICROCHIP CAN BUS ANALYZER TOOL DRIVER
R: Yasushi SHOJI <yashi@spacecubics.com>
L: linux-can@vger.kernel.org
S: Maintained
F: drivers/net/can/usb/mcba_usb.c
MCAN MMIO DEVICE DRIVER
M: Chandrasekar Ramakrishnan <rcsekar@samsung.com>
L: linux-can@vger.kernel.org
@ -11758,6 +11764,7 @@ F: drivers/char/hw_random/mtk-rng.c
MEDIATEK SWITCH DRIVER
M: Sean Wang <sean.wang@mediatek.com>
M: Landen Chao <Landen.Chao@mediatek.com>
M: DENG Qingfang <dqfext@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/dsa/mt7530.*
@ -15467,6 +15474,8 @@ M: Pan, Xinhui <Xinhui.Pan@amd.com>
L: amd-gfx@lists.freedesktop.org
S: Supported
T: git https://gitlab.freedesktop.org/agd5f/linux.git
B: https://gitlab.freedesktop.org/drm/amd/-/issues
C: irc://irc.oftc.net/radeon
F: drivers/gpu/drm/amd/
F: drivers/gpu/drm/radeon/
F: include/uapi/drm/amdgpu_drm.h
@ -19122,7 +19131,7 @@ M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-usb@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/phy/hisilicon,hi3670-usb3.yaml
F: drivers/phy/hisilicon/phy-kirin970-usb3.c
F: drivers/phy/hisilicon/phy-hi3670-usb3.c
USB ISP116X DRIVER
M: Olav Kongas <ok@artecdesign.ee>
@ -19800,6 +19809,14 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/ptp/ptp_vmw.c
VMWARE VMCI DRIVER
M: Jorgen Hansen <jhansen@vmware.com>
M: Vishnu Dasa <vdasa@vmware.com>
L: linux-kernel@vger.kernel.org
L: pv-drivers@vmware.com (private)
S: Maintained
F: drivers/misc/vmw_vmci/
VMWARE VMMOUSE SUBDRIVER
M: "VMware Graphics" <linux-graphics-maintainer@vmware.com>
M: "VMware, Inc." <pv-drivers@vmware.com>

View file

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 14
SUBLEVEL = 0
EXTRAVERSION = -rc2
EXTRAVERSION = -rc4
NAME = Opossums on Parade
# *DOCUMENTATION*

View file

@ -14,7 +14,6 @@ config ALPHA
select PCI_SYSCALL if PCI
select HAVE_AOUT
select HAVE_ASM_MODVERSIONS
select HAVE_IDE
select HAVE_PCSPKR_PLATFORM
select HAVE_PERF_EVENTS
select NEED_DMA_MAP_STATE
@ -532,7 +531,7 @@ config SMP
will run faster if you say N here.
See also the SMP-HOWTO available at
<http://www.tldp.org/docs.html#howto>.
<https://www.tldp.org/docs.html#howto>.
If you don't know what to do here, say N.

View file

@ -23,7 +23,7 @@
#include "ksize.h"
extern unsigned long switch_to_osf_pal(unsigned long nr,
struct pcb_struct * pcb_va, struct pcb_struct * pcb_pa,
struct pcb_struct *pcb_va, struct pcb_struct *pcb_pa,
unsigned long *vptb);
extern void move_stack(unsigned long new_stack);

View file

@ -200,7 +200,7 @@ extern char _end;
START_ADDR KSEG address of the entry point of kernel code.
ZERO_PGE KSEG address of page full of zeroes, but
upon entry to kerne cvan be expected
upon entry to kernel, it can be expected
to hold the parameter list and possible
INTRD information.

View file

@ -30,7 +30,7 @@ extern long srm_printk(const char *, ...)
__attribute__ ((format (printf, 1, 2)));
/*
* gzip delarations
* gzip declarations
*/
#define OF(args) args
#define STATIC static

View file

@ -70,3 +70,4 @@ CONFIG_DEBUG_INFO=y
CONFIG_ALPHA_LEGACY_START_ADDRESS=y
CONFIG_MATHEMU=y
CONFIG_CRYPTO_HMAC=y
CONFIG_DEVTMPFS=y

View file

@ -4,15 +4,4 @@
#include <uapi/asm/compiler.h>
/* Some idiots over in <linux/compiler.h> thought inline should imply
always_inline. This breaks stuff. We'll include this file whenever
we run into such problems. */
#include <linux/compiler.h>
#undef inline
#undef __inline__
#undef __inline
#undef __always_inline
#define __always_inline inline __attribute__((always_inline))
#endif /* __ALPHA_COMPILER_H */

View file

@ -9,4 +9,10 @@ static inline int syscall_get_arch(struct task_struct *task)
return AUDIT_ARCH_ALPHA;
}
static inline long syscall_get_return_value(struct task_struct *task,
struct pt_regs *regs)
{
return regs->r0;
}
#endif /* _ASM_ALPHA_SYSCALL_H */

View file

@ -834,7 +834,7 @@ SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer,
return -EFAULT;
state = &current_thread_info()->ieee_state;
/* Update softare trap enable bits. */
/* Update software trap enable bits. */
*state = (*state & ~IEEE_SW_MASK) | (swcr & IEEE_SW_MASK);
/* Update the real fpcr. */
@ -854,7 +854,7 @@ SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer,
state = &current_thread_info()->ieee_state;
exc &= IEEE_STATUS_MASK;
/* Update softare trap enable bits. */
/* Update software trap enable bits. */
swcr = (*state & IEEE_SW_MASK) | exc;
*state |= exc;

View file

@ -574,7 +574,7 @@ static void alpha_pmu_start(struct perf_event *event, int flags)
* Check that CPU performance counters are supported.
* - currently support EV67 and later CPUs.
* - actually some later revisions of the EV6 have the same PMC model as the
* EV67 but we don't do suffiently deep CPU detection to detect them.
* EV67 but we don't do sufficiently deep CPU detection to detect them.
* Bad luck to the very few people who might have one, I guess.
*/
static int supported_cpu(void)

View file

@ -256,7 +256,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
childstack->r26 = (unsigned long) ret_from_kernel_thread;
childstack->r9 = usp; /* function */
childstack->r10 = kthread_arg;
childregs->hae = alpha_mv.hae_cache,
childregs->hae = alpha_mv.hae_cache;
childti->pcb.usp = 0;
return 0;
}

View file

@ -319,18 +319,19 @@ setup_memory(void *kernel_end)
i, cluster->usage, cluster->start_pfn,
cluster->start_pfn + cluster->numpages);
/* Bit 0 is console/PALcode reserved. Bit 1 is
non-volatile memory -- we might want to mark
this for later. */
if (cluster->usage & 3)
continue;
end = cluster->start_pfn + cluster->numpages;
if (end > max_low_pfn)
max_low_pfn = end;
memblock_add(PFN_PHYS(cluster->start_pfn),
cluster->numpages << PAGE_SHIFT);
/* Bit 0 is console/PALcode reserved. Bit 1 is
non-volatile memory -- we might want to mark
this for later. */
if (cluster->usage & 3)
memblock_reserve(PFN_PHYS(cluster->start_pfn),
cluster->numpages << PAGE_SHIFT);
}
/*

View file

@ -582,7 +582,7 @@ void
smp_send_stop(void)
{
cpumask_t to_whom;
cpumask_copy(&to_whom, cpu_possible_mask);
cpumask_copy(&to_whom, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &to_whom);
#ifdef DEBUG_IPI_MSG
if (hard_smp_processor_id() != boot_cpu_id)

View file

@ -212,7 +212,7 @@ nautilus_init_pci(void)
/* Use default IO. */
pci_add_resource(&bridge->windows, &ioport_resource);
/* Irongate PCI memory aperture, calculate requred size before
/* Irongate PCI memory aperture, calculate required size before
setting it up. */
pci_add_resource(&bridge->windows, &irongate_mem);

View file

@ -730,7 +730,7 @@ do_entUnaUser(void __user * va, unsigned long opcode,
long error;
/* Check the UAC bits to decide what the user wants us to do
with the unaliged access. */
with the unaligned access. */
if (!(current_thread_info()->status & TS_UAC_NOPRINT)) {
if (__ratelimit(&ratelimit)) {

View file

@ -65,7 +65,7 @@ static long (*save_emul) (unsigned long pc);
long do_alpha_fp_emul_imprecise(struct pt_regs *, unsigned long);
long do_alpha_fp_emul(unsigned long);
int init_module(void)
static int alpha_fp_emul_init_module(void)
{
save_emul_imprecise = alpha_fp_emul_imprecise;
save_emul = alpha_fp_emul;
@ -73,12 +73,14 @@ int init_module(void)
alpha_fp_emul = do_alpha_fp_emul;
return 0;
}
module_init(alpha_fp_emul_init_module);
void cleanup_module(void)
static void alpha_fp_emul_cleanup_module(void)
{
alpha_fp_emul_imprecise = save_emul_imprecise;
alpha_fp_emul = save_emul;
}
module_exit(alpha_fp_emul_cleanup_module);
#undef alpha_fp_emul_imprecise
#define alpha_fp_emul_imprecise do_alpha_fp_emul_imprecise
@ -401,3 +403,5 @@ alpha_fp_emul_imprecise (struct pt_regs *regs, unsigned long write_mask)
egress:
return si_code;
}
EXPORT_SYMBOL(__udiv_qrnnd);

View file

@ -95,7 +95,6 @@ config ARM
select HAVE_FUNCTION_TRACER if !XIP_KERNEL
select HAVE_GCC_PLUGINS
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
select HAVE_IDE if PCI || ISA || PCMCIA
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZ4
@ -361,7 +360,6 @@ config ARCH_FOOTBRIDGE
bool "FootBridge"
select CPU_SA110
select FOOTBRIDGE
select HAVE_IDE
select NEED_MACH_IO_H if !MMU
select NEED_MACH_MEMORY_H
help
@ -430,7 +428,6 @@ config ARCH_PXA
select GENERIC_IRQ_MULTI_HANDLER
select GPIO_PXA
select GPIOLIB
select HAVE_IDE
select IRQ_DOMAIN
select PLAT_PXA
select SPARSE_IRQ
@ -446,7 +443,6 @@ config ARCH_RPC
select ARM_HAS_SG_CHAIN
select CPU_SA110
select FIQ
select HAVE_IDE
select HAVE_PATA_PLATFORM
select ISA_DMA_API
select LEGACY_TIMER_TICK
@ -469,7 +465,6 @@ config ARCH_SA1100
select CPU_SA1100
select GENERIC_IRQ_MULTI_HANDLER
select GPIOLIB
select HAVE_IDE
select IRQ_DOMAIN
select ISA
select NEED_MACH_MEMORY_H
@ -505,7 +500,6 @@ config ARCH_OMAP1
select GENERIC_IRQ_CHIP
select GENERIC_IRQ_MULTI_HANDLER
select GPIOLIB
select HAVE_IDE
select HAVE_LEGACY_CLK
select IRQ_DOMAIN
select NEED_MACH_IO_H if PCCARD

View file

@ -9,7 +9,6 @@ menuconfig ARCH_DAVINCI
select PM_GENERIC_DOMAINS_OF if PM && OF
select REGMAP_MMIO
select RESET_CONTROLLER
select HAVE_IDE
select PINCTRL_SINGLE
if ARCH_DAVINCI

View file

@ -49,6 +49,7 @@ static int __init parse_tag_acorn(const struct tag *tag)
fallthrough; /* ??? */
case 256:
vram_size += PAGE_SIZE * 256;
break;
default:
break;
}

View file

@ -1602,6 +1602,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
break;
/* speculation barrier */
case BPF_ST | BPF_NOSPEC:
break;
/* ST: *(size *)(dst + off) = imm */
case BPF_ST | BPF_MEM | BPF_W:
case BPF_ST | BPF_MEM | BPF_H:

View file

@ -579,7 +579,7 @@ uart2: serial@30890000 {
};
flexcan1: can@308c0000 {
compatible = "fsl,imx8mp-flexcan", "fsl,imx6q-flexcan";
compatible = "fsl,imx8mp-flexcan";
reg = <0x308c0000 0x10000>;
interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MP_CLK_IPG_ROOT>,
@ -594,7 +594,7 @@ flexcan1: can@308c0000 {
};
flexcan2: can@308d0000 {
compatible = "fsl,imx8mp-flexcan", "fsl,imx6q-flexcan";
compatible = "fsl,imx8mp-flexcan";
reg = <0x308d0000 0x10000>;
interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MP_CLK_IPG_ROOT>,
@ -821,9 +821,9 @@ fec: ethernet@30be0000 {
eqos: ethernet@30bf0000 {
compatible = "nxp,imx8mp-dwmac-eqos", "snps,dwmac-5.10a";
reg = <0x30bf0000 0x10000>;
interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "eth_wake_irq", "macirq";
interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq", "eth_wake_irq";
clocks = <&clk IMX8MP_CLK_ENET_QOS_ROOT>,
<&clk IMX8MP_CLK_QOS_ENET_ROOT>,
<&clk IMX8MP_CLK_ENET_QOS_TIMER>,

View file

@ -1063,7 +1063,7 @@ &usb2 {
status = "okay";
extcon = <&usb2_id>;
usb@7600000 {
dwc3@7600000 {
extcon = <&usb2_id>;
dr_mode = "otg";
maximum-speed = "high-speed";
@ -1074,7 +1074,7 @@ &usb3 {
status = "okay";
extcon = <&usb3_id>;
usb@6a00000 {
dwc3@6a00000 {
extcon = <&usb3_id>;
dr_mode = "otg";
};

View file

@ -443,7 +443,7 @@ usb_0: usb@8af8800 {
resets = <&gcc GCC_USB0_BCR>;
status = "disabled";
dwc_0: usb@8a00000 {
dwc_0: dwc3@8a00000 {
compatible = "snps,dwc3";
reg = <0x8a00000 0xcd00>;
interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
@ -484,7 +484,7 @@ usb_1: usb@8cf8800 {
resets = <&gcc GCC_USB1_BCR>;
status = "disabled";
dwc_1: usb@8c00000 {
dwc_1: dwc3@8c00000 {
compatible = "snps,dwc3";
reg = <0x8c00000 0xcd00>;
interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;

View file

@ -2566,7 +2566,7 @@ usb3: usb@6af8800 {
power-domains = <&gcc USB30_GDSC>;
status = "disabled";
usb@6a00000 {
dwc3@6a00000 {
compatible = "snps,dwc3";
reg = <0x06a00000 0xcc00>;
interrupts = <0 131 IRQ_TYPE_LEVEL_HIGH>;
@ -2873,7 +2873,7 @@ usb2: usb@76f8800 {
qcom,select-utmi-as-pipe-clk;
status = "disabled";
usb@7600000 {
dwc3@7600000 {
compatible = "snps,dwc3";
reg = <0x07600000 0xcc00>;
interrupts = <0 138 IRQ_TYPE_LEVEL_HIGH>;

View file

@ -1964,7 +1964,7 @@ usb3: usb@a8f8800 {
resets = <&gcc GCC_USB_30_BCR>;
usb3_dwc3: usb@a800000 {
usb3_dwc3: dwc3@a800000 {
compatible = "snps,dwc3";
reg = <0x0a800000 0xcd00>;
interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;

View file

@ -337,7 +337,7 @@ &usb2_phy_sec {
&usb3 {
status = "okay";
usb@7580000 {
dwc3@7580000 {
dr_mode = "host";
};
};

View file

@ -544,7 +544,7 @@ usb3: usb@7678800 {
assigned-clock-rates = <19200000>, <200000000>;
status = "disabled";
usb@7580000 {
dwc3@7580000 {
compatible = "snps,dwc3";
reg = <0x07580000 0xcd00>;
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
@ -573,7 +573,7 @@ usb2: usb@79b8800 {
assigned-clock-rates = <19200000>, <133333333>;
status = "disabled";
usb@78c0000 {
dwc3@78c0000 {
compatible = "snps,dwc3";
reg = <0x078c0000 0xcc00>;
interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;

View file

@ -2756,7 +2756,7 @@ usb_1: usb@a6f8800 {
<&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3 0>;
interconnect-names = "usb-ddr", "apps-usb";
usb_1_dwc3: usb@a600000 {
usb_1_dwc3: dwc3@a600000 {
compatible = "snps,dwc3";
reg = <0 0x0a600000 0 0xe000>;
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;

View file

@ -3781,7 +3781,7 @@ usb_1: usb@a6f8800 {
<&gladiator_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3_0 0>;
interconnect-names = "usb-ddr", "apps-usb";
usb_1_dwc3: usb@a600000 {
usb_1_dwc3: dwc3@a600000 {
compatible = "snps,dwc3";
reg = <0 0x0a600000 0 0xcd00>;
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
@ -3829,7 +3829,7 @@ usb_2: usb@a8f8800 {
<&gladiator_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3_1 0>;
interconnect-names = "usb-ddr", "apps-usb";
usb_2_dwc3: usb@a800000 {
usb_2_dwc3: dwc3@a800000 {
compatible = "snps,dwc3";
reg = <0 0x0a800000 0 0xcd00>;
interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;

View file

@ -2344,7 +2344,7 @@ usb_1: usb@a6f8800 {
resets = <&gcc GCC_USB30_PRIM_BCR>;
usb_1_dwc3: usb@a600000 {
usb_1_dwc3: dwc3@a600000 {
compatible = "snps,dwc3";
reg = <0 0x0a600000 0 0xcd00>;
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;

View file

@ -602,14 +602,14 @@ static inline bool id_aa64pfr0_32bit_el1(u64 pfr0)
{
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SHIFT);
return val == ID_AA64PFR0_EL1_32BIT_64BIT;
return val == ID_AA64PFR0_ELx_32BIT_64BIT;
}
static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
{
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT);
return val == ID_AA64PFR0_EL0_32BIT_64BIT;
return val == ID_AA64PFR0_ELx_32BIT_64BIT;
}
static inline bool id_aa64pfr0_sve(u64 pfr0)
@ -657,7 +657,8 @@ static inline bool system_supports_4kb_granule(void)
val = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_TGRAN4_SHIFT);
return val == ID_AA64MMFR0_TGRAN4_SUPPORTED;
return (val >= ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN) &&
(val <= ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX);
}
static inline bool system_supports_64kb_granule(void)
@ -669,7 +670,8 @@ static inline bool system_supports_64kb_granule(void)
val = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_TGRAN64_SHIFT);
return val == ID_AA64MMFR0_TGRAN64_SUPPORTED;
return (val >= ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN) &&
(val <= ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX);
}
static inline bool system_supports_16kb_granule(void)
@ -681,7 +683,8 @@ static inline bool system_supports_16kb_granule(void)
val = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_TGRAN16_SHIFT);
return val == ID_AA64MMFR0_TGRAN16_SUPPORTED;
return (val >= ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN) &&
(val <= ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX);
}
static inline bool system_supports_mixed_endian_el0(void)
@ -781,13 +784,13 @@ extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
{
switch (parange) {
case 0: return 32;
case 1: return 36;
case 2: return 40;
case 3: return 42;
case 4: return 44;
case 5: return 48;
case 6: return 52;
case ID_AA64MMFR0_PARANGE_32: return 32;
case ID_AA64MMFR0_PARANGE_36: return 36;
case ID_AA64MMFR0_PARANGE_40: return 40;
case ID_AA64MMFR0_PARANGE_42: return 42;
case ID_AA64MMFR0_PARANGE_44: return 44;
case ID_AA64MMFR0_PARANGE_48: return 48;
case ID_AA64MMFR0_PARANGE_52: return 52;
/*
* A future PE could use a value unknown to the kernel.
* However, by the "D10.1.4 Principles of the ID scheme

View file

@ -12,8 +12,13 @@
#include <asm/types.h>
/* Hyp Configuration Register (HCR) bits */
#define HCR_TID5 (UL(1) << 58)
#define HCR_DCT (UL(1) << 57)
#define HCR_ATA_SHIFT 56
#define HCR_ATA (UL(1) << HCR_ATA_SHIFT)
#define HCR_AMVOFFEN (UL(1) << 51)
#define HCR_FIEN (UL(1) << 47)
#define HCR_FWB (UL(1) << 46)
#define HCR_API (UL(1) << 41)
#define HCR_APK (UL(1) << 40)
@ -32,9 +37,9 @@
#define HCR_TVM (UL(1) << 26)
#define HCR_TTLB (UL(1) << 25)
#define HCR_TPU (UL(1) << 24)
#define HCR_TPC (UL(1) << 23)
#define HCR_TPC (UL(1) << 23) /* HCR_TPCP if FEAT_DPB */
#define HCR_TSW (UL(1) << 22)
#define HCR_TAC (UL(1) << 21)
#define HCR_TACR (UL(1) << 21)
#define HCR_TIDCP (UL(1) << 20)
#define HCR_TSC (UL(1) << 19)
#define HCR_TID3 (UL(1) << 18)
@ -56,12 +61,13 @@
#define HCR_PTW (UL(1) << 2)
#define HCR_SWIO (UL(1) << 1)
#define HCR_VM (UL(1) << 0)
#define HCR_RES0 ((UL(1) << 48) | (UL(1) << 39))
/*
* The bits we set in HCR:
* TLOR: Trap LORegion register accesses
* RW: 64bit by default, can be overridden for 32bit VMs
* TAC: Trap ACTLR
* TACR: Trap ACTLR
* TSC: Trap SMC
* TSW: Trap cache operations by set/way
* TWE: Trap WFE
@ -76,7 +82,7 @@
* PTW: Take a stage2 fault if a stage1 walk steps in device memory
*/
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
HCR_BSU_IS | HCR_FB | HCR_TAC | \
HCR_BSU_IS | HCR_FB | HCR_TACR | \
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
HCR_FMO | HCR_IMO | HCR_PTW )
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
@ -275,24 +281,40 @@
#define CPTR_EL2_TTA (1 << 20)
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
#define CPTR_EL2_TZ (1 << 8)
#define CPTR_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 */
#define CPTR_EL2_DEFAULT CPTR_EL2_RES1
#define CPTR_NVHE_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 (nVHE) */
#define CPTR_EL2_DEFAULT CPTR_NVHE_EL2_RES1
#define CPTR_NVHE_EL2_RES0 (GENMASK(63, 32) | \
GENMASK(29, 21) | \
GENMASK(19, 14) | \
BIT(11))
/* Hyp Debug Configuration Register bits */
#define MDCR_EL2_E2TB_MASK (UL(0x3))
#define MDCR_EL2_E2TB_SHIFT (UL(24))
#define MDCR_EL2_TTRF (1 << 19)
#define MDCR_EL2_TPMS (1 << 14)
#define MDCR_EL2_HPMFZS (UL(1) << 36)
#define MDCR_EL2_HPMFZO (UL(1) << 29)
#define MDCR_EL2_MTPME (UL(1) << 28)
#define MDCR_EL2_TDCC (UL(1) << 27)
#define MDCR_EL2_HCCD (UL(1) << 23)
#define MDCR_EL2_TTRF (UL(1) << 19)
#define MDCR_EL2_HPMD (UL(1) << 17)
#define MDCR_EL2_TPMS (UL(1) << 14)
#define MDCR_EL2_E2PB_MASK (UL(0x3))
#define MDCR_EL2_E2PB_SHIFT (UL(12))
#define MDCR_EL2_TDRA (1 << 11)
#define MDCR_EL2_TDOSA (1 << 10)
#define MDCR_EL2_TDA (1 << 9)
#define MDCR_EL2_TDE (1 << 8)
#define MDCR_EL2_HPME (1 << 7)
#define MDCR_EL2_TPM (1 << 6)
#define MDCR_EL2_TPMCR (1 << 5)
#define MDCR_EL2_HPMN_MASK (0x1F)
#define MDCR_EL2_TDRA (UL(1) << 11)
#define MDCR_EL2_TDOSA (UL(1) << 10)
#define MDCR_EL2_TDA (UL(1) << 9)
#define MDCR_EL2_TDE (UL(1) << 8)
#define MDCR_EL2_HPME (UL(1) << 7)
#define MDCR_EL2_TPM (UL(1) << 6)
#define MDCR_EL2_TPMCR (UL(1) << 5)
#define MDCR_EL2_HPMN_MASK (UL(0x1F))
#define MDCR_EL2_RES0 (GENMASK(63, 37) | \
GENMASK(35, 30) | \
GENMASK(25, 24) | \
GENMASK(22, 20) | \
BIT(18) | \
GENMASK(16, 15))
/* For compatibility with fault code shared with 32-bit */
#define FSC_FAULT ESR_ELx_FSC_FAULT

View file

@ -59,12 +59,11 @@
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs 13
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs 14
#define __KVM_HOST_SMCCC_FUNC___pkvm_init 15
#define __KVM_HOST_SMCCC_FUNC___pkvm_create_mappings 16
#define __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp 16
#define __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping 17
#define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector 18
#define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize 19
#define __KVM_HOST_SMCCC_FUNC___pkvm_mark_hyp 20
#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 21
#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 20
#ifndef __ASSEMBLY__
@ -210,7 +209,7 @@ extern u64 __vgic_v3_read_vmcr(void);
extern void __vgic_v3_write_vmcr(u32 vmcr);
extern void __vgic_v3_init_lrs(void);
extern u32 __kvm_get_mdcr_el2(void);
extern u64 __kvm_get_mdcr_el2(void);
#define __KVM_EXTABLE(from, to) \
" .pushsection __kvm_ex_table, \"a\"\n" \

View file

@ -66,7 +66,7 @@ DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
extern unsigned int kvm_sve_max_vl;
int kvm_arm_init_sve(void);
int __attribute_const__ kvm_target_cpu(void);
u32 __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
@ -185,7 +185,6 @@ enum vcpu_sysreg {
PMCNTENSET_EL0, /* Count Enable Set Register */
PMINTENSET_EL1, /* Interrupt Enable Set Register */
PMOVSSET_EL0, /* Overflow Flag Status Set Register */
PMSWINC_EL0, /* Software Increment Register */
PMUSERENR_EL0, /* User Enable Register */
/* Pointer Authentication Registers in a strict increasing order. */
@ -287,9 +286,13 @@ struct kvm_vcpu_arch {
/* Stage 2 paging state used by the hardware on next switch */
struct kvm_s2_mmu *hw_mmu;
/* HYP configuration */
/* Values of trap registers for the guest. */
u64 hcr_el2;
u32 mdcr_el2;
u64 mdcr_el2;
u64 cptr_el2;
/* Values of trap registers for the host before guest entry. */
u64 mdcr_el2_host;
/* Exception Information */
struct kvm_vcpu_fault_info fault;
@ -576,6 +579,7 @@ struct kvm_vcpu_stat {
u64 wfi_exit_stat;
u64 mmio_exit_user;
u64 mmio_exit_kernel;
u64 signal_exits;
u64 exits;
};
@ -771,6 +775,11 @@ void kvm_arch_free_vm(struct kvm *kvm);
int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
static inline bool kvm_vm_is_protected(struct kvm *kvm)
{
return false;
}
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);

View file

@ -95,7 +95,7 @@ void __sve_restore_state(void *sve_pffr, u32 *fpsr);
#ifndef __KVM_NVHE_HYPERVISOR__
void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
void deactivate_traps_vhe_put(void);
void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu);
#endif
u64 __guest_enter(struct kvm_vcpu *vcpu);

View file

@ -252,6 +252,11 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
/*
* When this is (directly or indirectly) used on the TLB invalidation
* path, we rely on a previously issued DSB so that page table updates
* and VMID reads are correctly ordered.
*/
static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
{
struct kvm_vmid *vmid = &mmu->vmid;
@ -259,7 +264,7 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
baddr = mmu->pgd_phys;
vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
vmid_field = (u64)READ_ONCE(vmid->vmid) << VTTBR_VMID_SHIFT;
return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
}
@ -267,9 +272,10 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
* Must be called from hyp code running at EL2 with an updated VTTBR
* and interrupts disabled.
*/
static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long vtcr)
static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu,
struct kvm_arch *arch)
{
write_sysreg(vtcr, vtcr_el2);
write_sysreg(arch->vtcr, vtcr_el2);
write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
/*
@ -280,11 +286,6 @@ static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
}
static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu)
{
__load_stage2(mmu, kern_hyp_va(mmu->arch)->vtcr);
}
static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
{
return container_of(mmu->arch, struct kvm, arch);

View file

@ -25,6 +25,46 @@ static inline u64 kvm_get_parange(u64 mmfr0)
typedef u64 kvm_pte_t;
#define KVM_PTE_VALID BIT(0)
#define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT)
#define KVM_PTE_ADDR_51_48 GENMASK(15, 12)
static inline bool kvm_pte_valid(kvm_pte_t pte)
{
return pte & KVM_PTE_VALID;
}
static inline u64 kvm_pte_to_phys(kvm_pte_t pte)
{
u64 pa = pte & KVM_PTE_ADDR_MASK;
if (PAGE_SHIFT == 16)
pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48;
return pa;
}
static inline u64 kvm_granule_shift(u32 level)
{
/* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */
return ARM64_HW_PGTABLE_LEVEL_SHIFT(level);
}
static inline u64 kvm_granule_size(u32 level)
{
return BIT(kvm_granule_shift(level));
}
static inline bool kvm_level_supports_block_mapping(u32 level)
{
/*
* Reject invalid block mappings and don't bother with 4TB mappings for
* 52-bit PAs.
*/
return !(level == 0 || (PAGE_SIZE != SZ_4K && level == 1));
}
/**
* struct kvm_pgtable_mm_ops - Memory management callbacks.
* @zalloc_page: Allocate a single zeroed memory page.
@ -75,6 +115,44 @@ enum kvm_pgtable_stage2_flags {
KVM_PGTABLE_S2_IDMAP = BIT(1),
};
/**
* enum kvm_pgtable_prot - Page-table permissions and attributes.
* @KVM_PGTABLE_PROT_X: Execute permission.
* @KVM_PGTABLE_PROT_W: Write permission.
* @KVM_PGTABLE_PROT_R: Read permission.
* @KVM_PGTABLE_PROT_DEVICE: Device attributes.
* @KVM_PGTABLE_PROT_SW0: Software bit 0.
* @KVM_PGTABLE_PROT_SW1: Software bit 1.
* @KVM_PGTABLE_PROT_SW2: Software bit 2.
* @KVM_PGTABLE_PROT_SW3: Software bit 3.
*/
enum kvm_pgtable_prot {
KVM_PGTABLE_PROT_X = BIT(0),
KVM_PGTABLE_PROT_W = BIT(1),
KVM_PGTABLE_PROT_R = BIT(2),
KVM_PGTABLE_PROT_DEVICE = BIT(3),
KVM_PGTABLE_PROT_SW0 = BIT(55),
KVM_PGTABLE_PROT_SW1 = BIT(56),
KVM_PGTABLE_PROT_SW2 = BIT(57),
KVM_PGTABLE_PROT_SW3 = BIT(58),
};
#define KVM_PGTABLE_PROT_RW (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W)
#define KVM_PGTABLE_PROT_RWX (KVM_PGTABLE_PROT_RW | KVM_PGTABLE_PROT_X)
#define PKVM_HOST_MEM_PROT KVM_PGTABLE_PROT_RWX
#define PKVM_HOST_MMIO_PROT KVM_PGTABLE_PROT_RW
#define PAGE_HYP KVM_PGTABLE_PROT_RW
#define PAGE_HYP_EXEC (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_X)
#define PAGE_HYP_RO (KVM_PGTABLE_PROT_R)
#define PAGE_HYP_DEVICE (PAGE_HYP | KVM_PGTABLE_PROT_DEVICE)
typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end,
enum kvm_pgtable_prot prot);
/**
* struct kvm_pgtable - KVM page-table.
* @ia_bits: Maximum input address size, in bits.
@ -82,6 +160,9 @@ enum kvm_pgtable_stage2_flags {
* @pgd: Pointer to the first top-level entry of the page-table.
* @mm_ops: Memory management callbacks.
* @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
* @flags: Stage-2 page-table flags.
* @force_pte_cb: Function that returns true if page level mappings must
* be used instead of block mappings.
*/
struct kvm_pgtable {
u32 ia_bits;
@ -92,36 +173,7 @@ struct kvm_pgtable {
/* Stage-2 only */
struct kvm_s2_mmu *mmu;
enum kvm_pgtable_stage2_flags flags;
};
/**
* enum kvm_pgtable_prot - Page-table permissions and attributes.
* @KVM_PGTABLE_PROT_X: Execute permission.
* @KVM_PGTABLE_PROT_W: Write permission.
* @KVM_PGTABLE_PROT_R: Read permission.
* @KVM_PGTABLE_PROT_DEVICE: Device attributes.
*/
enum kvm_pgtable_prot {
KVM_PGTABLE_PROT_X = BIT(0),
KVM_PGTABLE_PROT_W = BIT(1),
KVM_PGTABLE_PROT_R = BIT(2),
KVM_PGTABLE_PROT_DEVICE = BIT(3),
};
#define PAGE_HYP (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W)
#define PAGE_HYP_EXEC (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_X)
#define PAGE_HYP_RO (KVM_PGTABLE_PROT_R)
#define PAGE_HYP_DEVICE (PAGE_HYP | KVM_PGTABLE_PROT_DEVICE)
/**
* struct kvm_mem_range - Range of Intermediate Physical Addresses
* @start: Start of the range.
* @end: End of the range.
*/
struct kvm_mem_range {
u64 start;
u64 end;
kvm_pgtable_force_pte_cb_t force_pte_cb;
};
/**
@ -216,21 +268,24 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
/**
* kvm_pgtable_stage2_init_flags() - Initialise a guest stage-2 page-table.
* __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
* @pgt: Uninitialised page-table structure to initialise.
* @arch: Arch-specific KVM structure representing the guest virtual
* machine.
* @mm_ops: Memory management callbacks.
* @flags: Stage-2 configuration flags.
* @force_pte_cb: Function that returns true if page level mappings must
* be used instead of block mappings.
*
* Return: 0 on success, negative error code on failure.
*/
int kvm_pgtable_stage2_init_flags(struct kvm_pgtable *pgt, struct kvm_arch *arch,
struct kvm_pgtable_mm_ops *mm_ops,
enum kvm_pgtable_stage2_flags flags);
int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch,
struct kvm_pgtable_mm_ops *mm_ops,
enum kvm_pgtable_stage2_flags flags,
kvm_pgtable_force_pte_cb_t force_pte_cb);
#define kvm_pgtable_stage2_init(pgt, arch, mm_ops) \
kvm_pgtable_stage2_init_flags(pgt, arch, mm_ops, 0)
__kvm_pgtable_stage2_init(pgt, arch, mm_ops, 0, NULL)
/**
* kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
@ -374,7 +429,8 @@ kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr);
* If there is a valid, leaf page-table entry used to translate @addr, then
* relax the permissions in that entry according to the read, write and
* execute permissions specified by @prot. No permissions are removed, and
* TLB invalidation is performed after updating the entry.
* TLB invalidation is performed after updating the entry. Software bits cannot
* be set or cleared using kvm_pgtable_stage2_relax_perms().
*
* Return: 0 on success, negative error code on failure.
*/
@ -433,22 +489,42 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
struct kvm_pgtable_walker *walker);
/**
* kvm_pgtable_stage2_find_range() - Find a range of Intermediate Physical
* Addresses with compatible permission
* attributes.
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Address that must be covered by the range.
* @prot: Protection attributes that the range must be compatible with.
* @range: Range structure used to limit the search space at call time and
* that will hold the result.
* kvm_pgtable_get_leaf() - Walk a page-table and retrieve the leaf entry
* with its level.
* @pgt: Page-table structure initialised by kvm_pgtable_*_init()
* or a similar initialiser.
* @addr: Input address for the start of the walk.
* @ptep: Pointer to storage for the retrieved PTE.
* @level: Pointer to storage for the level of the retrieved PTE.
*
* The offset of @addr within a page is ignored. An IPA is compatible with @prot
* iff its corresponding stage-2 page-table entry has default ownership and, if
* valid, is mapped with protection attributes identical to @prot.
* The offset of @addr within a page is ignored.
*
* The walker will walk the page-table entries corresponding to the input
* address specified, retrieving the leaf corresponding to this address.
* Invalid entries are treated as leaf entries.
*
* Return: 0 on success, negative error code on failure.
*/
int kvm_pgtable_stage2_find_range(struct kvm_pgtable *pgt, u64 addr,
enum kvm_pgtable_prot prot,
struct kvm_mem_range *range);
int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
kvm_pte_t *ptep, u32 *level);
/**
* kvm_pgtable_stage2_pte_prot() - Retrieve the protection attributes of a
* stage-2 Page-Table Entry.
* @pte: Page-table entry
*
* Return: protection attributes of the page-table entry in the enum
* kvm_pgtable_prot format.
*/
enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte);
/**
* kvm_pgtable_hyp_pte_prot() - Retrieve the protection attributes of a stage-1
* Page-Table Entry.
* @pte: Page-table entry
*
* Return: protection attributes of the page-table entry in the enum
* kvm_pgtable_prot format.
*/
enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte);
#endif /* __ARM64_KVM_PGTABLE_H__ */

View file

@ -784,14 +784,13 @@
#define ID_AA64PFR0_AMU 0x1
#define ID_AA64PFR0_SVE 0x1
#define ID_AA64PFR0_RAS_V1 0x1
#define ID_AA64PFR0_RAS_V1P1 0x2
#define ID_AA64PFR0_FP_NI 0xf
#define ID_AA64PFR0_FP_SUPPORTED 0x0
#define ID_AA64PFR0_ASIMD_NI 0xf
#define ID_AA64PFR0_ASIMD_SUPPORTED 0x0
#define ID_AA64PFR0_EL1_64BIT_ONLY 0x1
#define ID_AA64PFR0_EL1_32BIT_64BIT 0x2
#define ID_AA64PFR0_EL0_64BIT_ONLY 0x1
#define ID_AA64PFR0_EL0_32BIT_64BIT 0x2
#define ID_AA64PFR0_ELx_64BIT_ONLY 0x1
#define ID_AA64PFR0_ELx_32BIT_64BIT 0x2
/* id_aa64pfr1 */
#define ID_AA64PFR1_MPAMFRAC_SHIFT 16
@ -847,15 +846,29 @@
#define ID_AA64MMFR0_ASID_SHIFT 4
#define ID_AA64MMFR0_PARANGE_SHIFT 0
#define ID_AA64MMFR0_TGRAN4_NI 0xf
#define ID_AA64MMFR0_TGRAN4_SUPPORTED 0x0
#define ID_AA64MMFR0_TGRAN64_NI 0xf
#define ID_AA64MMFR0_TGRAN64_SUPPORTED 0x0
#define ID_AA64MMFR0_TGRAN16_NI 0x0
#define ID_AA64MMFR0_TGRAN16_SUPPORTED 0x1
#define ID_AA64MMFR0_ASID_8 0x0
#define ID_AA64MMFR0_ASID_16 0x2
#define ID_AA64MMFR0_TGRAN4_NI 0xf
#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_TGRAN64_NI 0xf
#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_TGRAN16_NI 0x0
#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN 0x1
#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX 0xf
#define ID_AA64MMFR0_PARANGE_32 0x0
#define ID_AA64MMFR0_PARANGE_36 0x1
#define ID_AA64MMFR0_PARANGE_40 0x2
#define ID_AA64MMFR0_PARANGE_42 0x3
#define ID_AA64MMFR0_PARANGE_44 0x4
#define ID_AA64MMFR0_PARANGE_48 0x5
#define ID_AA64MMFR0_PARANGE_52 0x6
#define ARM64_MIN_PARANGE_BITS 32
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT 0x0
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE 0x1
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN 0x2
@ -900,6 +913,7 @@
#define ID_AA64MMFR2_CNP_SHIFT 0
/* id_aa64dfr0 */
#define ID_AA64DFR0_MTPMU_SHIFT 48
#define ID_AA64DFR0_TRBE_SHIFT 44
#define ID_AA64DFR0_TRACE_FILT_SHIFT 40
#define ID_AA64DFR0_DOUBLELOCK_SHIFT 36
@ -1028,16 +1042,19 @@
#if defined(CONFIG_ARM64_4K_PAGES)
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX
#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN4_2_SHIFT
#elif defined(CONFIG_ARM64_16K_PAGES)
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0xF
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX
#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN16_2_SHIFT
#elif defined(CONFIG_ARM64_64K_PAGES)
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX
#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN64_2_SHIFT
#endif
#define MVFR2_FPMISC_SHIFT 4
@ -1153,6 +1170,11 @@
#define ICH_VTR_A3V_SHIFT 21
#define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT)
#define ARM64_FEATURE_FIELD_BITS 4
/* Create a mask for the feature bits of the specified feature. */
#define ARM64_FEATURE_MASK(x) (GENMASK_ULL(x##_SHIFT + ARM64_FEATURE_FIELD_BITS - 1, x##_SHIFT))
#ifdef __ASSEMBLY__
.irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30

View file

@ -239,8 +239,8 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_ELx_64BIT_ONLY),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_ELx_64BIT_ONLY),
ARM64_FTR_END,
};
@ -1956,7 +1956,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_EL0_SHIFT,
.min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
.min_field_value = ID_AA64PFR0_ELx_32BIT_64BIT,
},
#ifdef CONFIG_KVM
{
@ -1967,7 +1967,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_EL1_SHIFT,
.min_field_value = ID_AA64PFR0_EL1_32BIT_64BIT,
.min_field_value = ID_AA64PFR0_ELx_32BIT_64BIT,
},
{
.desc = "Protected KVM",

View file

@ -32,20 +32,23 @@ SYM_FUNC_END(__arm_smccc_sve_check)
EXPORT_SYMBOL(__arm_smccc_sve_check)
.macro SMCCC instr
stp x29, x30, [sp, #-16]!
mov x29, sp
alternative_if ARM64_SVE
bl __arm_smccc_sve_check
alternative_else_nop_endif
\instr #0
ldr x4, [sp]
ldr x4, [sp, #16]
stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS]
stp x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS]
ldr x4, [sp, #8]
ldr x4, [sp, #24]
cbz x4, 1f /* no quirk structure */
ldr x9, [x4, #ARM_SMCCC_QUIRK_ID_OFFS]
cmp x9, #ARM_SMCCC_QUIRK_QCOM_A6
b.ne 1f
str x6, [x4, ARM_SMCCC_QUIRK_STATE_OFFS]
1: ret
1: ldp x29, x30, [sp], #16
ret
.endm
/*

View file

@ -181,6 +181,8 @@ SECTIONS
/* everything from this point to __init_begin will be marked RO NX */
RO_DATA(PAGE_SIZE)
HYPERVISOR_DATA_SECTIONS
idmap_pg_dir = .;
. += IDMAP_DIR_SIZE;
idmap_pg_end = .;
@ -260,8 +262,6 @@ SECTIONS
_sdata = .;
RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
HYPERVISOR_DATA_SECTIONS
/*
* Data written with the MMU off but read with the MMU on requires
* cache lines to be invalidated, discarding up to a Cache Writeback

View file

@ -26,6 +26,7 @@ menuconfig KVM
select HAVE_KVM_ARCH_TLB_FLUSH_ALL
select KVM_MMIO
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_XFER_TO_GUEST_WORK
select SRCU
select KVM_VFIO
select HAVE_KVM_EVENTFD
@ -46,6 +47,15 @@ if KVM
source "virt/kvm/Kconfig"
config NVHE_EL2_DEBUG
bool "Debug mode for non-VHE EL2 object"
help
Say Y here to enable the debug mode for the non-VHE KVM EL2 object.
Failure reports will BUG() in the hypervisor. This is intended for
local EL2 hypervisor development.
If unsure, say N.
endif # KVM
endif # VIRTUALIZATION

View file

@ -6,6 +6,7 @@
#include <linux/bug.h>
#include <linux/cpu_pm.h>
#include <linux/entry-kvm.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
@ -15,6 +16,7 @@
#include <linux/fs.h>
#include <linux/mman.h>
#include <linux/sched.h>
#include <linux/kmemleak.h>
#include <linux/kvm.h>
#include <linux/kvm_irqfd.h>
#include <linux/irqbypass.h>
@ -42,10 +44,6 @@
#include <kvm/arm_pmu.h>
#include <kvm/arm_psci.h>
#ifdef REQUIRES_VIRT
__asm__(".arch_extension virt");
#endif
static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT;
DEFINE_STATIC_KEY_FALSE(kvm_protected_mode_initialized);
@ -94,10 +92,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
kvm->arch.return_nisv_io_abort_to_user = true;
break;
case KVM_CAP_ARM_MTE:
if (!system_supports_mte() || kvm->created_vcpus)
return -EINVAL;
r = 0;
kvm->arch.mte_enabled = true;
mutex_lock(&kvm->lock);
if (!system_supports_mte() || kvm->created_vcpus) {
r = -EINVAL;
} else {
r = 0;
kvm->arch.mte_enabled = true;
}
mutex_unlock(&kvm->lock);
break;
default:
r = -EINVAL;
@ -571,7 +573,7 @@ static void update_vmid(struct kvm_vmid *vmid)
kvm_call_hyp(__kvm_flush_vm_context);
}
vmid->vmid = kvm_next_vmid;
WRITE_ONCE(vmid->vmid, kvm_next_vmid);
kvm_next_vmid++;
kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1;
@ -714,6 +716,45 @@ static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
static_branch_unlikely(&arm64_mismatched_32bit_el0);
}
/**
* kvm_vcpu_exit_request - returns true if the VCPU should *not* enter the guest
* @vcpu: The VCPU pointer
* @ret: Pointer to write optional return code
*
* Returns: true if the VCPU needs to return to a preemptible + interruptible
* and skip guest entry.
*
* This function disambiguates between two different types of exits: exits to a
* preemptible + interruptible kernel context and exits to userspace. For an
* exit to userspace, this function will write the return code to ret and return
* true. For an exit to preemptible + interruptible kernel context (i.e. check
* for pending work and re-enter), return true without writing to ret.
*/
static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret)
{
struct kvm_run *run = vcpu->run;
/*
* If we're using a userspace irqchip, then check if we need
* to tell a userspace irqchip about timer or PMU level
* changes and if so, exit to userspace (the actual level
* state gets updated in kvm_timer_update_run and
* kvm_pmu_update_run below).
*/
if (static_branch_unlikely(&userspace_irqchip_in_use)) {
if (kvm_timer_should_notify_user(vcpu) ||
kvm_pmu_should_notify_user(vcpu)) {
*ret = -EINTR;
run->exit_reason = KVM_EXIT_INTR;
return true;
}
}
return kvm_request_pending(vcpu) ||
need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) ||
xfer_to_guest_mode_work_pending();
}
/**
* kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
* @vcpu: The VCPU pointer
@ -757,7 +798,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
/*
* Check conditions before entering the guest
*/
cond_resched();
ret = xfer_to_guest_mode_handle_work(vcpu);
if (!ret)
ret = 1;
update_vmid(&vcpu->arch.hw_mmu->vmid);
@ -776,30 +819,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_vgic_flush_hwstate(vcpu);
/*
* Exit if we have a signal pending so that we can deliver the
* signal to user space.
*/
if (signal_pending(current)) {
ret = -EINTR;
run->exit_reason = KVM_EXIT_INTR;
}
/*
* If we're using a userspace irqchip, then check if we need
* to tell a userspace irqchip about timer or PMU level
* changes and if so, exit to userspace (the actual level
* state gets updated in kvm_timer_update_run and
* kvm_pmu_update_run below).
*/
if (static_branch_unlikely(&userspace_irqchip_in_use)) {
if (kvm_timer_should_notify_user(vcpu) ||
kvm_pmu_should_notify_user(vcpu)) {
ret = -EINTR;
run->exit_reason = KVM_EXIT_INTR;
}
}
/*
* Ensure we set mode to IN_GUEST_MODE after we disable
* interrupts and before the final VCPU requests check.
@ -808,8 +827,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
if (ret <= 0 || need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) ||
kvm_request_pending(vcpu)) {
if (ret <= 0 || kvm_vcpu_exit_request(vcpu, &ret)) {
vcpu->mode = OUTSIDE_GUEST_MODE;
isb(); /* Ensure work in x_flush_hwstate is committed */
kvm_pmu_sync_hwstate(vcpu);
@ -1035,7 +1053,7 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_init *init)
{
unsigned int i, ret;
int phys_target = kvm_target_cpu();
u32 phys_target = kvm_target_cpu();
if (init->target != phys_target)
return -EINVAL;
@ -1104,6 +1122,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
}
vcpu_reset_hcr(vcpu);
vcpu->arch.cptr_el2 = CPTR_EL2_DEFAULT;
/*
* Handle the "start in power-off" case.
@ -1215,6 +1234,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
if (copy_from_user(&reg, argp, sizeof(reg)))
break;
/*
* We could owe a reset due to PSCI. Handle the pending reset
* here to ensure userspace register accesses are ordered after
* the reset.
*/
if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
kvm_reset_vcpu(vcpu);
if (ioctl == KVM_SET_ONE_REG)
r = kvm_arm_set_reg(vcpu, &reg);
else
@ -1696,11 +1723,6 @@ static bool init_psci_relay(void)
return true;
}
static int init_common_resources(void)
{
return kvm_set_ipa_limit();
}
static int init_subsystems(void)
{
int err = 0;
@ -1954,56 +1976,17 @@ static void _kvm_host_prot_finalize(void *discard)
WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize));
}
static inline int pkvm_mark_hyp(phys_addr_t start, phys_addr_t end)
{
return kvm_call_hyp_nvhe(__pkvm_mark_hyp, start, end);
}
#define pkvm_mark_hyp_section(__section) \
pkvm_mark_hyp(__pa_symbol(__section##_start), \
__pa_symbol(__section##_end))
static int finalize_hyp_mode(void)
{
int cpu, ret;
if (!is_protected_kvm_enabled())
return 0;
ret = pkvm_mark_hyp_section(__hyp_idmap_text);
if (ret)
return ret;
ret = pkvm_mark_hyp_section(__hyp_text);
if (ret)
return ret;
ret = pkvm_mark_hyp_section(__hyp_rodata);
if (ret)
return ret;
ret = pkvm_mark_hyp_section(__hyp_bss);
if (ret)
return ret;
ret = pkvm_mark_hyp(hyp_mem_base, hyp_mem_base + hyp_mem_size);
if (ret)
return ret;
for_each_possible_cpu(cpu) {
phys_addr_t start = virt_to_phys((void *)kvm_arm_hyp_percpu_base[cpu]);
phys_addr_t end = start + (PAGE_SIZE << nvhe_percpu_order());
ret = pkvm_mark_hyp(start, end);
if (ret)
return ret;
start = virt_to_phys((void *)per_cpu(kvm_arm_hyp_stack_page, cpu));
end = start + PAGE_SIZE;
ret = pkvm_mark_hyp(start, end);
if (ret)
return ret;
}
/*
* Exclude HYP BSS from kmemleak so that it doesn't get peeked
* at, which would end badly once the section is inaccessible.
* None of other sections should ever be introspected.
*/
kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
/*
* Flip the static key upfront as that may no longer be possible
@ -2015,11 +1998,6 @@ static int finalize_hyp_mode(void)
return 0;
}
static void check_kvm_target_cpu(void *ret)
{
*(int *)ret = kvm_target_cpu();
}
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
{
struct kvm_vcpu *vcpu;
@ -2079,7 +2057,6 @@ void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons)
int kvm_arch_init(void *opaque)
{
int err;
int ret, cpu;
bool in_hyp_mode;
if (!is_hyp_mode_available()) {
@ -2094,15 +2071,7 @@ int kvm_arch_init(void *opaque)
kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
"Only trusted guests should be used on this system.\n");
for_each_online_cpu(cpu) {
smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
if (ret < 0) {
kvm_err("Error, CPU %d not supported!\n", cpu);
return -ENODEV;
}
}
err = init_common_resources();
err = kvm_set_ipa_limit();
if (err)
return err;

View file

@ -21,7 +21,7 @@
DBG_MDSCR_KDE | \
DBG_MDSCR_MDE)
static DEFINE_PER_CPU(u32, mdcr_el2);
static DEFINE_PER_CPU(u64, mdcr_el2);
/**
* save/restore_guest_debug_regs

View file

@ -48,6 +48,7 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
STATS_DESC_COUNTER(VCPU, mmio_exit_user),
STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
STATS_DESC_COUNTER(VCPU, signal_exits),
STATS_DESC_COUNTER(VCPU, exits)
};
@ -838,7 +839,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
return 0;
}
int __attribute_const__ kvm_target_cpu(void)
u32 __attribute_const__ kvm_target_cpu(void)
{
unsigned long implementor = read_cpuid_implementor();
unsigned long part_number = read_cpuid_part_number();
@ -870,7 +871,7 @@ int __attribute_const__ kvm_target_cpu(void)
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
{
int target = kvm_target_cpu();
u32 target = kvm_target_cpu();
if (target < 0)
return -ENODEV;

View file

@ -113,34 +113,20 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
* guest and host are using the same debug facilities it will be up to
* userspace to re-inject the correct exception for guest delivery.
*
* @return: 0 (while setting vcpu->run->exit_reason), -1 for error
* @return: 0 (while setting vcpu->run->exit_reason)
*/
static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
u32 esr = kvm_vcpu_get_esr(vcpu);
int ret = 0;
run->exit_reason = KVM_EXIT_DEBUG;
run->debug.arch.hsr = esr;
switch (ESR_ELx_EC(esr)) {
case ESR_ELx_EC_WATCHPT_LOW:
if (ESR_ELx_EC(esr) == ESR_ELx_EC_WATCHPT_LOW)
run->debug.arch.far = vcpu->arch.fault.far_el2;
fallthrough;
case ESR_ELx_EC_SOFTSTP_LOW:
case ESR_ELx_EC_BREAKPT_LOW:
case ESR_ELx_EC_BKPT32:
case ESR_ELx_EC_BRK64:
break;
default:
kvm_err("%s: un-handled case esr: %#08x\n",
__func__, (unsigned int) esr);
ret = -1;
break;
}
return ret;
return 0;
}
static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
@ -292,11 +278,12 @@ void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
}
void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr,
void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
u64 elr_virt, u64 elr_phys,
u64 par, uintptr_t vcpu,
u64 far, u64 hpfar) {
u64 elr_in_kimg = __phys_to_kimg(__hyp_pa(elr));
u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr;
u64 elr_in_kimg = __phys_to_kimg(elr_phys);
u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr_virt;
u64 mode = spsr & PSR_MODE_MASK;
/*
@ -309,20 +296,24 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr,
kvm_err("Invalid host exception to nVHE hyp!\n");
} else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
(esr & ESR_ELx_BRK64_ISS_COMMENT_MASK) == BUG_BRK_IMM) {
struct bug_entry *bug = find_bug(elr_in_kimg);
const char *file = NULL;
unsigned int line = 0;
/* All hyp bugs, including warnings, are treated as fatal. */
if (bug)
bug_get_file_line(bug, &file, &line);
if (!is_protected_kvm_enabled() ||
IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
struct bug_entry *bug = find_bug(elr_in_kimg);
if (bug)
bug_get_file_line(bug, &file, &line);
}
if (file)
kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line);
else
kvm_err("nVHE hyp BUG at: %016llx!\n", elr + hyp_offset);
kvm_err("nVHE hyp BUG at: %016llx!\n", elr_virt + hyp_offset);
} else {
kvm_err("nVHE hyp panic at: %016llx!\n", elr + hyp_offset);
kvm_err("nVHE hyp panic at: %016llx!\n", elr_virt + hyp_offset);
}
/*
@ -334,5 +325,5 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr,
kvm_err("Hyp Offset: 0x%llx\n", hyp_offset);
panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n",
spsr, elr, esr, far, hpfar, par, vcpu);
spsr, elr_virt, esr, far, hpfar, par, vcpu);
}

View file

@ -92,11 +92,15 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
write_sysreg(0, pmselr_el0);
write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
}
vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2);
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
}
static inline void __deactivate_traps_common(void)
static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
{
write_sysreg(vcpu->arch.mdcr_el2_host, mdcr_el2);
write_sysreg(0, hstr_el2);
if (kvm_arm_support_pmu_v3())
write_sysreg(0, pmuserenr_el0);

View file

@ -12,6 +12,32 @@
#include <asm/virt.h>
#include <nvhe/spinlock.h>
/*
* SW bits 0-1 are reserved to track the memory ownership state of each page:
* 00: The page is owned exclusively by the page-table owner.
* 01: The page is owned by the page-table owner, but is shared
* with another entity.
* 10: The page is shared with, but not owned by the page-table owner.
* 11: Reserved for future use (lending).
*/
enum pkvm_page_state {
PKVM_PAGE_OWNED = 0ULL,
PKVM_PAGE_SHARED_OWNED = KVM_PGTABLE_PROT_SW0,
PKVM_PAGE_SHARED_BORROWED = KVM_PGTABLE_PROT_SW1,
};
#define PKVM_PAGE_STATE_PROT_MASK (KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1)
static inline enum kvm_pgtable_prot pkvm_mkstate(enum kvm_pgtable_prot prot,
enum pkvm_page_state state)
{
return (prot & ~PKVM_PAGE_STATE_PROT_MASK) | state;
}
static inline enum pkvm_page_state pkvm_getstate(enum kvm_pgtable_prot prot)
{
return prot & PKVM_PAGE_STATE_PROT_MASK;
}
struct host_kvm {
struct kvm_arch arch;
struct kvm_pgtable pgt;
@ -20,16 +46,21 @@ struct host_kvm {
};
extern struct host_kvm host_kvm;
int __pkvm_prot_finalize(void);
int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end);
extern const u8 pkvm_hyp_id;
int __pkvm_prot_finalize(void);
int __pkvm_host_share_hyp(u64 pfn);
bool addr_is_memory(phys_addr_t phys);
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);
int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id);
int kvm_host_prepare_stage2(void *pgt_pool_base);
void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
static __always_inline void __load_host_stage2(void)
{
if (static_branch_likely(&kvm_protected_mode_initialized))
__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
__load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
else
write_sysreg(0, vttbr_el2);
}

View file

@ -23,8 +23,7 @@ int hyp_map_vectors(void);
int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back);
int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot);
int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
int __pkvm_create_mappings(unsigned long start, unsigned long size,
unsigned long phys, enum kvm_pgtable_prot prot);
int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot);
unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
enum kvm_pgtable_prot prot);

View file

@ -15,6 +15,7 @@
#include <asm/alternative.h>
#include <asm/lse.h>
#include <asm/rwonce.h>
typedef union hyp_spinlock {
u32 __val;
@ -89,4 +90,28 @@ static inline void hyp_spin_unlock(hyp_spinlock_t *lock)
: "memory");
}
static inline bool hyp_spin_is_locked(hyp_spinlock_t *lock)
{
hyp_spinlock_t lockval = READ_ONCE(*lock);
return lockval.owner != lockval.next;
}
#ifdef CONFIG_NVHE_EL2_DEBUG
static inline void hyp_assert_lock_held(hyp_spinlock_t *lock)
{
/*
* The __pkvm_init() path accesses protected data-structures without
* holding locks as the other CPUs are guaranteed to not enter EL2
* concurrently at this point in time. The point by which EL2 is
* initialized on all CPUs is reflected in the pkvm static key, so
* wait until it is set before checking the lock state.
*/
if (static_branch_likely(&kvm_protected_mode_initialized))
BUG_ON(!hyp_spin_is_locked(lock));
}
#else
static inline void hyp_assert_lock_held(hyp_spinlock_t *lock) { }
#endif
#endif /* __ARM64_KVM_NVHE_SPINLOCK_H__ */

View file

@ -109,7 +109,7 @@ void __debug_switch_to_host(struct kvm_vcpu *vcpu)
__debug_switch_to_host_common(vcpu);
}
u32 __kvm_get_mdcr_el2(void)
u64 __kvm_get_mdcr_el2(void)
{
return read_sysreg(mdcr_el2);
}

View file

@ -7,6 +7,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
@ -85,12 +86,24 @@ SYM_FUNC_START(__hyp_do_panic)
mov x29, x0
#ifdef CONFIG_NVHE_EL2_DEBUG
/* Ensure host stage-2 is disabled */
mrs x0, hcr_el2
bic x0, x0, #HCR_VM
msr hcr_el2, x0
isb
tlbi vmalls12e1
dsb nsh
#endif
/* Load the panic arguments into x0-7 */
mrs x0, esr_el2
get_vcpu_ptr x4, x5
mrs x5, far_el2
mrs x6, hpfar_el2
mov x7, xzr // Unused argument
mov x4, x3
mov x3, x2
hyp_pa x3, x6
get_vcpu_ptr x5, x6
mrs x6, far_el2
mrs x7, hpfar_el2
/* Enter the host, conditionally restoring the host context. */
cbz x29, __host_enter_without_restoring

View file

@ -140,14 +140,11 @@ static void handle___pkvm_cpu_set_vector(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = pkvm_cpu_set_vector(slot);
}
static void handle___pkvm_create_mappings(struct kvm_cpu_context *host_ctxt)
static void handle___pkvm_host_share_hyp(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(unsigned long, start, host_ctxt, 1);
DECLARE_REG(unsigned long, size, host_ctxt, 2);
DECLARE_REG(unsigned long, phys, host_ctxt, 3);
DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 4);
DECLARE_REG(u64, pfn, host_ctxt, 1);
cpu_reg(host_ctxt, 1) = __pkvm_create_mappings(start, size, phys, prot);
cpu_reg(host_ctxt, 1) = __pkvm_host_share_hyp(pfn);
}
static void handle___pkvm_create_private_mapping(struct kvm_cpu_context *host_ctxt)
@ -163,14 +160,6 @@ static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt)
{
cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize();
}
static void handle___pkvm_mark_hyp(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(phys_addr_t, start, host_ctxt, 1);
DECLARE_REG(phys_addr_t, end, host_ctxt, 2);
cpu_reg(host_ctxt, 1) = __pkvm_mark_hyp(start, end);
}
typedef void (*hcall_t)(struct kvm_cpu_context *);
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
@ -193,10 +182,9 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__vgic_v3_restore_aprs),
HANDLE_FUNC(__pkvm_init),
HANDLE_FUNC(__pkvm_cpu_set_vector),
HANDLE_FUNC(__pkvm_create_mappings),
HANDLE_FUNC(__pkvm_host_share_hyp),
HANDLE_FUNC(__pkvm_create_private_mapping),
HANDLE_FUNC(__pkvm_prot_finalize),
HANDLE_FUNC(__pkvm_mark_hyp),
};
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)

View file

@ -31,7 +31,7 @@ static struct hyp_pool host_s2_pool;
u64 id_aa64mmfr0_el1_sys_val;
u64 id_aa64mmfr1_el1_sys_val;
static const u8 pkvm_hyp_id = 1;
const u8 pkvm_hyp_id = 1;
static void *host_s2_zalloc_pages_exact(size_t size)
{
@ -89,6 +89,8 @@ static void prepare_host_vtcr(void)
id_aa64mmfr1_el1_sys_val, phys_shift);
}
static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot);
int kvm_host_prepare_stage2(void *pgt_pool_base)
{
struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
@ -101,16 +103,17 @@ int kvm_host_prepare_stage2(void *pgt_pool_base)
if (ret)
return ret;
ret = kvm_pgtable_stage2_init_flags(&host_kvm.pgt, &host_kvm.arch,
&host_kvm.mm_ops, KVM_HOST_S2_FLAGS);
ret = __kvm_pgtable_stage2_init(&host_kvm.pgt, &host_kvm.arch,
&host_kvm.mm_ops, KVM_HOST_S2_FLAGS,
host_stage2_force_pte_cb);
if (ret)
return ret;
mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
mmu->arch = &host_kvm.arch;
mmu->pgt = &host_kvm.pgt;
mmu->vmid.vmid_gen = 0;
mmu->vmid.vmid = 0;
WRITE_ONCE(mmu->vmid.vmid_gen, 0);
WRITE_ONCE(mmu->vmid.vmid, 0);
return 0;
}
@ -126,7 +129,7 @@ int __pkvm_prot_finalize(void)
kvm_flush_dcache_to_poc(params, sizeof(*params));
write_sysreg(params->hcr_el2, hcr_el2);
__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
__load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
/*
* Make sure to have an ISB before the TLB maintenance below but only
@ -159,6 +162,11 @@ static int host_stage2_unmap_dev_all(void)
return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr);
}
struct kvm_mem_range {
u64 start;
u64 end;
};
static bool find_mem_range(phys_addr_t addr, struct kvm_mem_range *range)
{
int cur, left = 0, right = hyp_memblock_nr;
@ -189,16 +197,26 @@ static bool find_mem_range(phys_addr_t addr, struct kvm_mem_range *range)
return false;
}
bool addr_is_memory(phys_addr_t phys)
{
struct kvm_mem_range range;
return find_mem_range(phys, &range);
}
static bool is_in_mem_range(u64 addr, struct kvm_mem_range *range)
{
return range->start <= addr && addr < range->end;
}
static bool range_is_memory(u64 start, u64 end)
{
struct kvm_mem_range r1, r2;
struct kvm_mem_range r;
if (!find_mem_range(start, &r1) || !find_mem_range(end, &r2))
return false;
if (r1.start != r2.start)
if (!find_mem_range(start, &r))
return false;
return true;
return is_in_mem_range(end - 1, &r);
}
static inline int __host_stage2_idmap(u64 start, u64 end,
@ -208,60 +226,208 @@ static inline int __host_stage2_idmap(u64 start, u64 end,
prot, &host_s2_pool);
}
static int host_stage2_idmap(u64 addr)
/*
* The pool has been provided with enough pages to cover all of memory with
* page granularity, but it is difficult to know how much of the MMIO range
* we will need to cover upfront, so we may need to 'recycle' the pages if we
* run out.
*/
#define host_stage2_try(fn, ...) \
({ \
int __ret; \
hyp_assert_lock_held(&host_kvm.lock); \
__ret = fn(__VA_ARGS__); \
if (__ret == -ENOMEM) { \
__ret = host_stage2_unmap_dev_all(); \
if (!__ret) \
__ret = fn(__VA_ARGS__); \
} \
__ret; \
})
static inline bool range_included(struct kvm_mem_range *child,
struct kvm_mem_range *parent)
{
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W;
struct kvm_mem_range range;
bool is_memory = find_mem_range(addr, &range);
return parent->start <= child->start && child->end <= parent->end;
}
static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
{
struct kvm_mem_range cur;
kvm_pte_t pte;
u32 level;
int ret;
if (is_memory)
prot |= KVM_PGTABLE_PROT_X;
hyp_assert_lock_held(&host_kvm.lock);
ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, &level);
if (ret)
return ret;
if (kvm_pte_valid(pte))
return -EAGAIN;
if (pte)
return -EPERM;
do {
u64 granule = kvm_granule_size(level);
cur.start = ALIGN_DOWN(addr, granule);
cur.end = cur.start + granule;
level++;
} while ((level < KVM_PGTABLE_MAX_LEVELS) &&
!(kvm_level_supports_block_mapping(level) &&
range_included(&cur, range)));
*range = cur;
return 0;
}
int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
enum kvm_pgtable_prot prot)
{
hyp_assert_lock_held(&host_kvm.lock);
return host_stage2_try(__host_stage2_idmap, addr, addr + size, prot);
}
int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
{
hyp_assert_lock_held(&host_kvm.lock);
return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_kvm.pgt,
addr, size, &host_s2_pool, owner_id);
}
static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot)
{
/*
* Block mappings must be used with care in the host stage-2 as a
* kvm_pgtable_stage2_map() operation targeting a page in the range of
* an existing block will delete the block under the assumption that
* mappings in the rest of the block range can always be rebuilt lazily.
* That assumption is correct for the host stage-2 with RWX mappings
* targeting memory or RW mappings targeting MMIO ranges (see
* host_stage2_idmap() below which implements some of the host memory
* abort logic). However, this is not safe for any other mappings where
* the host stage-2 page-table is in fact the only place where this
* state is stored. In all those cases, it is safer to use page-level
* mappings, hence avoiding to lose the state because of side-effects in
* kvm_pgtable_stage2_map().
*/
if (range_is_memory(addr, end))
return prot != PKVM_HOST_MEM_PROT;
else
return prot != PKVM_HOST_MMIO_PROT;
}
static int host_stage2_idmap(u64 addr)
{
struct kvm_mem_range range;
bool is_memory = find_mem_range(addr, &range);
enum kvm_pgtable_prot prot;
int ret;
prot = is_memory ? PKVM_HOST_MEM_PROT : PKVM_HOST_MMIO_PROT;
hyp_spin_lock(&host_kvm.lock);
ret = kvm_pgtable_stage2_find_range(&host_kvm.pgt, addr, prot, &range);
ret = host_stage2_adjust_range(addr, &range);
if (ret)
goto unlock;
ret = __host_stage2_idmap(range.start, range.end, prot);
if (ret != -ENOMEM)
goto unlock;
/*
* The pool has been provided with enough pages to cover all of memory
* with page granularity, but it is difficult to know how much of the
* MMIO range we will need to cover upfront, so we may need to 'recycle'
* the pages if we run out.
*/
ret = host_stage2_unmap_dev_all();
if (ret)
goto unlock;
ret = __host_stage2_idmap(range.start, range.end, prot);
ret = host_stage2_idmap_locked(range.start, range.end - range.start, prot);
unlock:
hyp_spin_unlock(&host_kvm.lock);
return ret;
}
int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end)
static inline bool check_prot(enum kvm_pgtable_prot prot,
enum kvm_pgtable_prot required,
enum kvm_pgtable_prot denied)
{
return (prot & (required | denied)) == required;
}
int __pkvm_host_share_hyp(u64 pfn)
{
phys_addr_t addr = hyp_pfn_to_phys(pfn);
enum kvm_pgtable_prot prot, cur;
void *virt = __hyp_va(addr);
enum pkvm_page_state state;
kvm_pte_t pte;
int ret;
/*
* host_stage2_unmap_dev_all() currently relies on MMIO mappings being
* non-persistent, so don't allow changing page ownership in MMIO range.
*/
if (!range_is_memory(start, end))
if (!addr_is_memory(addr))
return -EINVAL;
hyp_spin_lock(&host_kvm.lock);
ret = kvm_pgtable_stage2_set_owner(&host_kvm.pgt, start, end - start,
&host_s2_pool, pkvm_hyp_id);
hyp_spin_lock(&pkvm_pgd_lock);
ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, NULL);
if (ret)
goto unlock;
if (!pte)
goto map_shared;
/*
* Check attributes in the host stage-2 PTE. We need the page to be:
* - mapped RWX as we're sharing memory;
* - not borrowed, as that implies absence of ownership.
* Otherwise, we can't let it got through
*/
cur = kvm_pgtable_stage2_pte_prot(pte);
prot = pkvm_mkstate(0, PKVM_PAGE_SHARED_BORROWED);
if (!check_prot(cur, PKVM_HOST_MEM_PROT, prot)) {
ret = -EPERM;
goto unlock;
}
state = pkvm_getstate(cur);
if (state == PKVM_PAGE_OWNED)
goto map_shared;
/*
* Tolerate double-sharing the same page, but this requires
* cross-checking the hypervisor stage-1.
*/
if (state != PKVM_PAGE_SHARED_OWNED) {
ret = -EPERM;
goto unlock;
}
ret = kvm_pgtable_get_leaf(&pkvm_pgtable, (u64)virt, &pte, NULL);
if (ret)
goto unlock;
/*
* If the page has been shared with the hypervisor, it must be
* already mapped as SHARED_BORROWED in its stage-1.
*/
cur = kvm_pgtable_hyp_pte_prot(pte);
prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED);
if (!check_prot(cur, prot, ~prot))
ret = -EPERM;
goto unlock;
map_shared:
/*
* If the page is not yet shared, adjust mappings in both page-tables
* while both locks are held.
*/
prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED);
ret = pkvm_create_mappings_locked(virt, virt + PAGE_SIZE, prot);
BUG_ON(ret);
prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_OWNED);
ret = host_stage2_idmap_locked(addr, PAGE_SIZE, prot);
BUG_ON(ret);
unlock:
hyp_spin_unlock(&pkvm_pgd_lock);
hyp_spin_unlock(&host_kvm.lock);
return ret != -EAGAIN ? ret : 0;
return ret;
}
void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)

View file

@ -23,8 +23,8 @@ u64 __io_map_base;
struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS];
unsigned int hyp_memblock_nr;
int __pkvm_create_mappings(unsigned long start, unsigned long size,
unsigned long phys, enum kvm_pgtable_prot prot)
static int __pkvm_create_mappings(unsigned long start, unsigned long size,
unsigned long phys, enum kvm_pgtable_prot prot)
{
int err;
@ -67,13 +67,15 @@ unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
return addr;
}
int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot)
{
unsigned long start = (unsigned long)from;
unsigned long end = (unsigned long)to;
unsigned long virt_addr;
phys_addr_t phys;
hyp_assert_lock_held(&pkvm_pgd_lock);
start = start & PAGE_MASK;
end = PAGE_ALIGN(end);
@ -81,7 +83,8 @@ int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
int err;
phys = hyp_virt_to_phys((void *)virt_addr);
err = __pkvm_create_mappings(virt_addr, PAGE_SIZE, phys, prot);
err = kvm_pgtable_hyp_map(&pkvm_pgtable, virt_addr, PAGE_SIZE,
phys, prot);
if (err)
return err;
}
@ -89,6 +92,17 @@ int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
return 0;
}
int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
{
int ret;
hyp_spin_lock(&pkvm_pgd_lock);
ret = pkvm_create_mappings_locked(from, to, prot);
hyp_spin_unlock(&pkvm_pgd_lock);
return ret;
}
int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back)
{
unsigned long start, end;

View file

@ -58,6 +58,7 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
{
void *start, *end, *virt = hyp_phys_to_virt(phys);
unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT;
enum kvm_pgtable_prot prot;
int ret, i;
/* Recreate the hyp page-table using the early page allocator */
@ -83,10 +84,6 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
if (ret)
return ret;
ret = pkvm_create_mappings(__start_rodata, __end_rodata, PAGE_HYP_RO);
if (ret)
return ret;
ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO);
if (ret)
return ret;
@ -95,10 +92,6 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
if (ret)
return ret;
ret = pkvm_create_mappings(__hyp_bss_end, __bss_stop, PAGE_HYP_RO);
if (ret)
return ret;
ret = pkvm_create_mappings(virt, virt + size, PAGE_HYP);
if (ret)
return ret;
@ -117,6 +110,24 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
return ret;
}
/*
* Map the host's .bss and .rodata sections RO in the hypervisor, but
* transfer the ownership from the host to the hypervisor itself to
* make sure it can't be donated or shared with another entity.
*
* The ownership transition requires matching changes in the host
* stage-2. This will be done later (see finalize_host_mappings()) once
* the hyp_vmemmap is addressable.
*/
prot = pkvm_mkstate(PAGE_HYP_RO, PKVM_PAGE_SHARED_OWNED);
ret = pkvm_create_mappings(__start_rodata, __end_rodata, prot);
if (ret)
return ret;
ret = pkvm_create_mappings(__hyp_bss_end, __bss_stop, prot);
if (ret)
return ret;
return 0;
}
@ -148,6 +159,57 @@ static void hpool_put_page(void *addr)
hyp_put_page(&hpool, addr);
}
static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level,
kvm_pte_t *ptep,
enum kvm_pgtable_walk_flags flag,
void * const arg)
{
enum kvm_pgtable_prot prot;
enum pkvm_page_state state;
kvm_pte_t pte = *ptep;
phys_addr_t phys;
if (!kvm_pte_valid(pte))
return 0;
if (level != (KVM_PGTABLE_MAX_LEVELS - 1))
return -EINVAL;
phys = kvm_pte_to_phys(pte);
if (!addr_is_memory(phys))
return 0;
/*
* Adjust the host stage-2 mappings to match the ownership attributes
* configured in the hypervisor stage-1.
*/
state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(pte));
switch (state) {
case PKVM_PAGE_OWNED:
return host_stage2_set_owner_locked(phys, PAGE_SIZE, pkvm_hyp_id);
case PKVM_PAGE_SHARED_OWNED:
prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_BORROWED);
break;
case PKVM_PAGE_SHARED_BORROWED:
prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_OWNED);
break;
default:
return -EINVAL;
}
return host_stage2_idmap_locked(phys, PAGE_SIZE, prot);
}
static int finalize_host_mappings(void)
{
struct kvm_pgtable_walker walker = {
.cb = finalize_host_mappings_walker,
.flags = KVM_PGTABLE_WALK_LEAF,
};
return kvm_pgtable_walk(&pkvm_pgtable, 0, BIT(pkvm_pgtable.ia_bits), &walker);
}
void __noreturn __pkvm_init_finalise(void)
{
struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data);
@ -167,6 +229,10 @@ void __noreturn __pkvm_init_finalise(void)
if (ret)
goto out;
ret = finalize_host_mappings();
if (ret)
goto out;
pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) {
.zalloc_page = hyp_zalloc_hyp_page,
.phys_to_virt = hyp_phys_to_virt,

View file

@ -41,7 +41,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
___activate_traps(vcpu);
__activate_traps_common(vcpu);
val = CPTR_EL2_DEFAULT;
val = vcpu->arch.cptr_el2;
val |= CPTR_EL2_TTA | CPTR_EL2_TAM;
if (!update_fp_enabled(vcpu)) {
val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
@ -69,12 +69,10 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
static void __deactivate_traps(struct kvm_vcpu *vcpu)
{
extern char __kvm_hyp_host_vector[];
u64 mdcr_el2, cptr;
u64 cptr;
___deactivate_traps(vcpu);
mdcr_el2 = read_sysreg(mdcr_el2);
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
u64 val;
@ -92,13 +90,8 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
isb();
}
__deactivate_traps_common();
__deactivate_traps_common(vcpu);
mdcr_el2 &= MDCR_EL2_HPMN_MASK;
mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
mdcr_el2 |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT;
write_sysreg(mdcr_el2, mdcr_el2);
write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
cptr = CPTR_EL2_DEFAULT;
@ -170,6 +163,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
struct kvm_s2_mmu *mmu;
bool pmu_switch_needed;
u64 exit_code;
@ -213,7 +207,8 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__sysreg32_restore_state(vcpu);
__sysreg_restore_state_nvhe(guest_ctxt);
__load_guest_stage2(kern_hyp_va(vcpu->arch.hw_mmu));
mmu = kern_hyp_va(vcpu->arch.hw_mmu);
__load_stage2(mmu, kern_hyp_va(mmu->arch));
__activate_traps(vcpu);
__hyp_vgic_restore_state(vcpu);

View file

@ -34,12 +34,12 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
}
/*
* __load_guest_stage2() includes an ISB only when the AT
* __load_stage2() includes an ISB only when the AT
* workaround is applied. Take care of the opposite condition,
* ensuring that we always have an ISB, but not two ISBs back
* to back.
*/
__load_guest_stage2(mmu);
__load_stage2(mmu, kern_hyp_va(mmu->arch));
asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
}

View file

@ -11,16 +11,12 @@
#include <asm/kvm_pgtable.h>
#include <asm/stage2_pgtable.h>
#define KVM_PTE_VALID BIT(0)
#define KVM_PTE_TYPE BIT(1)
#define KVM_PTE_TYPE_BLOCK 0
#define KVM_PTE_TYPE_PAGE 1
#define KVM_PTE_TYPE_TABLE 1
#define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT)
#define KVM_PTE_ADDR_51_48 GENMASK(15, 12)
#define KVM_PTE_LEAF_ATTR_LO GENMASK(11, 2)
#define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2)
@ -40,6 +36,8 @@
#define KVM_PTE_LEAF_ATTR_HI GENMASK(63, 51)
#define KVM_PTE_LEAF_ATTR_HI_SW GENMASK(58, 55)
#define KVM_PTE_LEAF_ATTR_HI_S1_XN BIT(54)
#define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54)
@ -48,9 +46,7 @@
KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
KVM_PTE_LEAF_ATTR_HI_S2_XN)
#define KVM_PTE_LEAF_ATTR_S2_IGNORED GENMASK(58, 55)
#define KVM_INVALID_PTE_OWNER_MASK GENMASK(63, 56)
#define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2)
#define KVM_MAX_OWNER_ID 1
struct kvm_pgtable_walk_data {
@ -61,17 +57,6 @@ struct kvm_pgtable_walk_data {
u64 end;
};
static u64 kvm_granule_shift(u32 level)
{
/* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */
return ARM64_HW_PGTABLE_LEVEL_SHIFT(level);
}
static u64 kvm_granule_size(u32 level)
{
return BIT(kvm_granule_shift(level));
}
#define KVM_PHYS_INVALID (-1ULL)
static bool kvm_phys_is_valid(u64 phys)
@ -79,15 +64,6 @@ static bool kvm_phys_is_valid(u64 phys)
return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_PARANGE_MAX));
}
static bool kvm_level_supports_block_mapping(u32 level)
{
/*
* Reject invalid block mappings and don't bother with 4TB mappings for
* 52-bit PAs.
*/
return !(level == 0 || (PAGE_SIZE != SZ_4K && level == 1));
}
static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level)
{
u64 granule = kvm_granule_size(level);
@ -135,11 +111,6 @@ static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
return __kvm_pgd_page_idx(&pgt, -1ULL) + 1;
}
static bool kvm_pte_valid(kvm_pte_t pte)
{
return pte & KVM_PTE_VALID;
}
static bool kvm_pte_table(kvm_pte_t pte, u32 level)
{
if (level == KVM_PGTABLE_MAX_LEVELS - 1)
@ -151,16 +122,6 @@ static bool kvm_pte_table(kvm_pte_t pte, u32 level)
return FIELD_GET(KVM_PTE_TYPE, pte) == KVM_PTE_TYPE_TABLE;
}
static u64 kvm_pte_to_phys(kvm_pte_t pte)
{
u64 pa = pte & KVM_PTE_ADDR_MASK;
if (PAGE_SHIFT == 16)
pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48;
return pa;
}
static kvm_pte_t kvm_phys_to_pte(u64 pa)
{
kvm_pte_t pte = pa & KVM_PTE_ADDR_MASK;
@ -326,6 +287,45 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
return _kvm_pgtable_walk(&walk_data);
}
struct leaf_walk_data {
kvm_pte_t pte;
u32 level;
};
static int leaf_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
enum kvm_pgtable_walk_flags flag, void * const arg)
{
struct leaf_walk_data *data = arg;
data->pte = *ptep;
data->level = level;
return 0;
}
int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
kvm_pte_t *ptep, u32 *level)
{
struct leaf_walk_data data;
struct kvm_pgtable_walker walker = {
.cb = leaf_walker,
.flags = KVM_PGTABLE_WALK_LEAF,
.arg = &data,
};
int ret;
ret = kvm_pgtable_walk(pgt, ALIGN_DOWN(addr, PAGE_SIZE),
PAGE_SIZE, &walker);
if (!ret) {
if (ptep)
*ptep = data.pte;
if (level)
*level = data.level;
}
return ret;
}
struct hyp_map_data {
u64 phys;
kvm_pte_t attr;
@ -357,11 +357,47 @@ static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_AP, ap);
attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_SH, sh);
attr |= KVM_PTE_LEAF_ATTR_LO_S1_AF;
attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW;
*ptep = attr;
return 0;
}
enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
{
enum kvm_pgtable_prot prot = pte & KVM_PTE_LEAF_ATTR_HI_SW;
u32 ap;
if (!kvm_pte_valid(pte))
return prot;
if (!(pte & KVM_PTE_LEAF_ATTR_HI_S1_XN))
prot |= KVM_PGTABLE_PROT_X;
ap = FIELD_GET(KVM_PTE_LEAF_ATTR_LO_S1_AP, pte);
if (ap == KVM_PTE_LEAF_ATTR_LO_S1_AP_RO)
prot |= KVM_PGTABLE_PROT_R;
else if (ap == KVM_PTE_LEAF_ATTR_LO_S1_AP_RW)
prot |= KVM_PGTABLE_PROT_RW;
return prot;
}
static bool hyp_pte_needs_update(kvm_pte_t old, kvm_pte_t new)
{
/*
* Tolerate KVM recreating the exact same mapping, or changing software
* bits if the existing mapping was valid.
*/
if (old == new)
return false;
if (!kvm_pte_valid(old))
return true;
return !WARN_ON((old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW);
}
static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level,
kvm_pte_t *ptep, struct hyp_map_data *data)
{
@ -371,9 +407,8 @@ static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level,
if (!kvm_block_mapping_supported(addr, end, phys, level))
return false;
/* Tolerate KVM recreating the exact same mapping */
new = kvm_init_valid_leaf_pte(phys, data->attr, level);
if (old != new && !WARN_ON(kvm_pte_valid(old)))
if (hyp_pte_needs_update(old, new))
smp_store_release(ptep, new);
data->phys += granule;
@ -438,6 +473,8 @@ int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
pgt->start_level = KVM_PGTABLE_MAX_LEVELS - levels;
pgt->mm_ops = mm_ops;
pgt->mmu = NULL;
pgt->force_pte_cb = NULL;
return 0;
}
@ -475,6 +512,9 @@ struct stage2_map_data {
void *memcache;
struct kvm_pgtable_mm_ops *mm_ops;
/* Force mappings to page granularity */
bool force_pte;
};
u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
@ -539,11 +579,29 @@ static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot p
attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh);
attr |= KVM_PTE_LEAF_ATTR_LO_S2_AF;
attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW;
*ptep = attr;
return 0;
}
enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte)
{
enum kvm_pgtable_prot prot = pte & KVM_PTE_LEAF_ATTR_HI_SW;
if (!kvm_pte_valid(pte))
return prot;
if (pte & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R)
prot |= KVM_PGTABLE_PROT_R;
if (pte & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W)
prot |= KVM_PGTABLE_PROT_W;
if (!(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN))
prot |= KVM_PGTABLE_PROT_X;
return prot;
}
static bool stage2_pte_needs_update(kvm_pte_t old, kvm_pte_t new)
{
if (!kvm_pte_valid(old) || !kvm_pte_valid(new))
@ -588,6 +646,15 @@ static bool stage2_pte_executable(kvm_pte_t pte)
return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
}
static bool stage2_leaf_mapping_allowed(u64 addr, u64 end, u32 level,
struct stage2_map_data *data)
{
if (data->force_pte && (level < (KVM_PGTABLE_MAX_LEVELS - 1)))
return false;
return kvm_block_mapping_supported(addr, end, data->phys, level);
}
static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
kvm_pte_t *ptep,
struct stage2_map_data *data)
@ -597,7 +664,7 @@ static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
struct kvm_pgtable *pgt = data->mmu->pgt;
struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
if (!kvm_block_mapping_supported(addr, end, phys, level))
if (!stage2_leaf_mapping_allowed(addr, end, level, data))
return -E2BIG;
if (kvm_phys_is_valid(phys))
@ -641,7 +708,7 @@ static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
if (data->anchor)
return 0;
if (!kvm_block_mapping_supported(addr, end, data->phys, level))
if (!stage2_leaf_mapping_allowed(addr, end, level, data))
return 0;
data->childp = kvm_pte_follow(*ptep, data->mm_ops);
@ -771,6 +838,7 @@ int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
.mmu = pgt->mmu,
.memcache = mc,
.mm_ops = pgt->mm_ops,
.force_pte = pgt->force_pte_cb && pgt->force_pte_cb(addr, addr + size, prot),
};
struct kvm_pgtable_walker walker = {
.cb = stage2_map_walker,
@ -802,6 +870,7 @@ int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
.memcache = mc,
.mm_ops = pgt->mm_ops,
.owner_id = owner_id,
.force_pte = true,
};
struct kvm_pgtable_walker walker = {
.cb = stage2_map_walker,
@ -995,6 +1064,9 @@ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
u32 level;
kvm_pte_t set = 0, clr = 0;
if (prot & KVM_PTE_LEAF_ATTR_HI_SW)
return -EINVAL;
if (prot & KVM_PGTABLE_PROT_R)
set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
@ -1043,9 +1115,11 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
return kvm_pgtable_walk(pgt, addr, size, &walker);
}
int kvm_pgtable_stage2_init_flags(struct kvm_pgtable *pgt, struct kvm_arch *arch,
struct kvm_pgtable_mm_ops *mm_ops,
enum kvm_pgtable_stage2_flags flags)
int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch,
struct kvm_pgtable_mm_ops *mm_ops,
enum kvm_pgtable_stage2_flags flags,
kvm_pgtable_force_pte_cb_t force_pte_cb)
{
size_t pgd_sz;
u64 vtcr = arch->vtcr;
@ -1063,6 +1137,7 @@ int kvm_pgtable_stage2_init_flags(struct kvm_pgtable *pgt, struct kvm_arch *arch
pgt->mm_ops = mm_ops;
pgt->mmu = &arch->mmu;
pgt->flags = flags;
pgt->force_pte_cb = force_pte_cb;
/* Ensure zeroed PGD pages are visible to the hardware walker */
dsb(ishst);
@ -1102,77 +1177,3 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
pgt->mm_ops->free_pages_exact(pgt->pgd, pgd_sz);
pgt->pgd = NULL;
}
#define KVM_PTE_LEAF_S2_COMPAT_MASK (KVM_PTE_LEAF_ATTR_S2_PERMS | \
KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR | \
KVM_PTE_LEAF_ATTR_S2_IGNORED)
static int stage2_check_permission_walker(u64 addr, u64 end, u32 level,
kvm_pte_t *ptep,
enum kvm_pgtable_walk_flags flag,
void * const arg)
{
kvm_pte_t old_attr, pte = *ptep, *new_attr = arg;
/*
* Compatible mappings are either invalid and owned by the page-table
* owner (whose id is 0), or valid with matching permission attributes.
*/
if (kvm_pte_valid(pte)) {
old_attr = pte & KVM_PTE_LEAF_S2_COMPAT_MASK;
if (old_attr != *new_attr)
return -EEXIST;
} else if (pte) {
return -EEXIST;
}
return 0;
}
int kvm_pgtable_stage2_find_range(struct kvm_pgtable *pgt, u64 addr,
enum kvm_pgtable_prot prot,
struct kvm_mem_range *range)
{
kvm_pte_t attr;
struct kvm_pgtable_walker check_perm_walker = {
.cb = stage2_check_permission_walker,
.flags = KVM_PGTABLE_WALK_LEAF,
.arg = &attr,
};
u64 granule, start, end;
u32 level;
int ret;
ret = stage2_set_prot_attr(pgt, prot, &attr);
if (ret)
return ret;
attr &= KVM_PTE_LEAF_S2_COMPAT_MASK;
for (level = pgt->start_level; level < KVM_PGTABLE_MAX_LEVELS; level++) {
granule = kvm_granule_size(level);
start = ALIGN_DOWN(addr, granule);
end = start + granule;
if (!kvm_level_supports_block_mapping(level))
continue;
if (start < range->start || range->end < end)
continue;
/*
* Check the presence of existing mappings with incompatible
* permissions within the current block range, and try one level
* deeper if one is found.
*/
ret = kvm_pgtable_walk(pgt, start, granule, &check_perm_walker);
if (ret != -EEXIST)
break;
}
if (!ret) {
range->start = start;
range->end = end;
}
return ret;
}

View file

@ -20,7 +20,7 @@ void __debug_switch_to_host(struct kvm_vcpu *vcpu)
__debug_switch_to_host_common(vcpu);
}
u32 __kvm_get_mdcr_el2(void)
u64 __kvm_get_mdcr_el2(void)
{
return read_sysreg(mdcr_el2);
}

View file

@ -91,17 +91,9 @@ void activate_traps_vhe_load(struct kvm_vcpu *vcpu)
__activate_traps_common(vcpu);
}
void deactivate_traps_vhe_put(void)
void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu)
{
u64 mdcr_el2 = read_sysreg(mdcr_el2);
mdcr_el2 &= MDCR_EL2_HPMN_MASK |
MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT |
MDCR_EL2_TPMS;
write_sysreg(mdcr_el2, mdcr_el2);
__deactivate_traps_common();
__deactivate_traps_common(vcpu);
}
/* Switch to the guest for VHE systems running in EL2 */
@ -124,11 +116,11 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
*
* We have already configured the guest's stage 1 translation in
* kvm_vcpu_load_sysregs_vhe above. We must now call
* __load_guest_stage2 before __activate_traps, because
* __load_guest_stage2 configures stage 2 translation, and
* __load_stage2 before __activate_traps, because
* __load_stage2 configures stage 2 translation, and
* __activate_traps clear HCR_EL2.TGE (among other things).
*/
__load_guest_stage2(vcpu->arch.hw_mmu);
__load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
__activate_traps(vcpu);
__kvm_adjust_pc(vcpu);

View file

@ -101,7 +101,7 @@ void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *host_ctxt;
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
deactivate_traps_vhe_put();
deactivate_traps_vhe_put(vcpu);
__sysreg_save_el1_state(guest_ctxt);
__sysreg_save_user_state(guest_ctxt);

View file

@ -50,10 +50,10 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
*
* ARM erratum 1165522 requires some special handling (again),
* as we need to make sure both stages of translation are in
* place before clearing TGE. __load_guest_stage2() already
* place before clearing TGE. __load_stage2() already
* has an ISB in order to deal with this.
*/
__load_guest_stage2(mmu);
__load_stage2(mmu, mmu->arch);
val = read_sysreg(hcr_el2);
val &= ~HCR_TGE;
write_sysreg(val, hcr_el2);

View file

@ -260,10 +260,8 @@ static int __create_hyp_mappings(unsigned long start, unsigned long size,
{
int err;
if (!kvm_host_owns_hyp_mappings()) {
return kvm_call_hyp_nvhe(__pkvm_create_mappings,
start, size, phys, prot);
}
if (WARN_ON(!kvm_host_owns_hyp_mappings()))
return -EINVAL;
mutex_lock(&kvm_hyp_pgd_mutex);
err = kvm_pgtable_hyp_map(hyp_pgtable, start, size, phys, prot);
@ -283,6 +281,21 @@ static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
}
}
static int pkvm_share_hyp(phys_addr_t start, phys_addr_t end)
{
phys_addr_t addr;
int ret;
for (addr = ALIGN_DOWN(start, PAGE_SIZE); addr < end; addr += PAGE_SIZE) {
ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp,
__phys_to_pfn(addr));
if (ret)
return ret;
}
return 0;
}
/**
* create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
* @from: The virtual kernel start address of the range
@ -303,6 +316,13 @@ int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
if (is_kernel_in_hyp_mode())
return 0;
if (!kvm_host_owns_hyp_mappings()) {
if (WARN_ON(prot != PAGE_HYP))
return -EPERM;
return pkvm_share_hyp(kvm_kaddr_to_phys(from),
kvm_kaddr_to_phys(to));
}
start = start & PAGE_MASK;
end = PAGE_ALIGN(end);
@ -434,6 +454,32 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
return 0;
}
static struct kvm_pgtable_mm_ops kvm_user_mm_ops = {
/* We shouldn't need any other callback to walk the PT */
.phys_to_virt = kvm_host_va,
};
static int get_user_mapping_size(struct kvm *kvm, u64 addr)
{
struct kvm_pgtable pgt = {
.pgd = (kvm_pte_t *)kvm->mm->pgd,
.ia_bits = VA_BITS,
.start_level = (KVM_PGTABLE_MAX_LEVELS -
CONFIG_PGTABLE_LEVELS),
.mm_ops = &kvm_user_mm_ops,
};
kvm_pte_t pte = 0; /* Keep GCC quiet... */
u32 level = ~0;
int ret;
ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level);
VM_BUG_ON(ret);
VM_BUG_ON(level >= KVM_PGTABLE_MAX_LEVELS);
VM_BUG_ON(!(pte & PTE_VALID));
return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level));
}
static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
.zalloc_page = stage2_memcache_zalloc_page,
.zalloc_pages_exact = kvm_host_zalloc_pages_exact,
@ -486,7 +532,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
mmu->arch = &kvm->arch;
mmu->pgt = pgt;
mmu->pgd_phys = __pa(pgt->pgd);
mmu->vmid.vmid_gen = 0;
WRITE_ONCE(mmu->vmid.vmid_gen, 0);
return 0;
out_destroy_pgtable:
@ -781,7 +827,7 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
* Returns the size of the mapping.
*/
static unsigned long
transparent_hugepage_adjust(struct kvm_memory_slot *memslot,
transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long hva, kvm_pfn_t *pfnp,
phys_addr_t *ipap)
{
@ -792,8 +838,8 @@ transparent_hugepage_adjust(struct kvm_memory_slot *memslot,
* sure that the HVA and IPA are sufficiently aligned and that the
* block map is contained within the memslot.
*/
if (kvm_is_transparent_hugepage(pfn) &&
fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) &&
get_user_mapping_size(kvm, hva) >= PMD_SIZE) {
/*
* The address we faulted on is backed by a transparent huge
* page. However, because we map the compound huge page and
@ -815,7 +861,7 @@ transparent_hugepage_adjust(struct kvm_memory_slot *memslot,
*ipap &= PMD_MASK;
kvm_release_pfn_clean(pfn);
pfn &= ~(PTRS_PER_PMD - 1);
kvm_get_pfn(pfn);
get_page(pfn_to_page(pfn));
*pfnp = pfn;
return PMD_SIZE;
@ -1051,9 +1097,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* If we are not forced to use page mapping, check if we are
* backed by a THP and thus use block mapping if possible.
*/
if (vma_pagesize == PAGE_SIZE && !(force_pte || device))
vma_pagesize = transparent_hugepage_adjust(memslot, hva,
&pfn, &fault_ipa);
if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) {
if (fault_status == FSC_PERM && fault_granule > PAGE_SIZE)
vma_pagesize = fault_granule;
else
vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
hva, &pfn,
&fault_ipa);
}
if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) {
/* Check the VMM hasn't introduced a new VM_SHARED VMA */

View file

@ -50,7 +50,7 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {
int kvm_perf_init(void)
{
if (kvm_pmu_probe_pmuver() != 0xf && !is_protected_kvm_enabled())
if (kvm_pmu_probe_pmuver() != ID_AA64DFR0_PMUVER_IMP_DEF && !is_protected_kvm_enabled())
static_branch_enable(&kvm_arm_pmu_available);
return perf_register_guest_info_callbacks(&kvm_guest_cbs);

View file

@ -373,7 +373,6 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
reg &= kvm_pmu_valid_counter_mask(vcpu);
}
return reg;
@ -564,20 +563,21 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
*/
void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
{
unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
int i;
if (val & ARMV8_PMU_PMCR_E) {
kvm_pmu_enable_counter_mask(vcpu,
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
} else {
kvm_pmu_disable_counter_mask(vcpu, mask);
kvm_pmu_disable_counter_mask(vcpu,
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
}
if (val & ARMV8_PMU_PMCR_C)
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
if (val & ARMV8_PMU_PMCR_P) {
unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
for_each_set_bit(i, &mask, 32)
kvm_pmu_set_counter_value(vcpu, i, 0);
@ -745,7 +745,7 @@ int kvm_pmu_probe_pmuver(void)
struct perf_event_attr attr = { };
struct perf_event *event;
struct arm_pmu *pmu;
int pmuver = 0xf;
int pmuver = ID_AA64DFR0_PMUVER_IMP_DEF;
/*
* Create a dummy event that only counts user cycles. As we'll never
@ -770,7 +770,7 @@ int kvm_pmu_probe_pmuver(void)
if (IS_ERR(event)) {
pr_err_once("kvm: pmu event creation failed %ld\n",
PTR_ERR(event));
return 0xf;
return ID_AA64DFR0_PMUVER_IMP_DEF;
}
if (event->pmu) {
@ -923,7 +923,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
if (!vcpu->kvm->arch.pmuver)
vcpu->kvm->arch.pmuver = kvm_pmu_probe_pmuver();
if (vcpu->kvm->arch.pmuver == 0xf)
if (vcpu->kvm->arch.pmuver == ID_AA64DFR0_PMUVER_IMP_DEF)
return -ENODEV;
switch (attr->attr) {

View file

@ -59,6 +59,12 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
kvm_vcpu_kick(vcpu);
}
static inline bool kvm_psci_valid_affinity(struct kvm_vcpu *vcpu,
unsigned long affinity)
{
return !(affinity & ~MPIDR_HWID_BITMASK);
}
static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
{
struct vcpu_reset_state *reset_state;
@ -66,9 +72,9 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
struct kvm_vcpu *vcpu = NULL;
unsigned long cpu_id;
cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
if (vcpu_mode_is_32bit(source_vcpu))
cpu_id &= ~((u32) 0);
cpu_id = smccc_get_arg1(source_vcpu);
if (!kvm_psci_valid_affinity(source_vcpu, cpu_id))
return PSCI_RET_INVALID_PARAMS;
vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
@ -126,6 +132,9 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
target_affinity = smccc_get_arg1(vcpu);
lowest_affinity_level = smccc_get_arg2(vcpu);
if (!kvm_psci_valid_affinity(vcpu, target_affinity))
return PSCI_RET_INVALID_PARAMS;
/* Determine target affinity mask */
target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
if (!target_affinity_mask)

View file

@ -210,10 +210,16 @@ static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
*/
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_reset_state reset_state;
int ret;
bool loaded;
u32 pstate;
mutex_lock(&vcpu->kvm->lock);
reset_state = vcpu->arch.reset_state;
WRITE_ONCE(vcpu->arch.reset_state.reset, false);
mutex_unlock(&vcpu->kvm->lock);
/* Reset PMU outside of the non-preemptible section */
kvm_pmu_vcpu_reset(vcpu);
@ -276,8 +282,8 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
* Additional reset state handling that PSCI may have imposed on us.
* Must be done after all the sys_reg reset.
*/
if (vcpu->arch.reset_state.reset) {
unsigned long target_pc = vcpu->arch.reset_state.pc;
if (reset_state.reset) {
unsigned long target_pc = reset_state.pc;
/* Gracefully handle Thumb2 entry point */
if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
@ -286,13 +292,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
}
/* Propagate caller endianness */
if (vcpu->arch.reset_state.be)
if (reset_state.be)
kvm_vcpu_set_be(vcpu);
*vcpu_pc(vcpu) = target_pc;
vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
vcpu->arch.reset_state.reset = false;
vcpu_set_reg(vcpu, 0, reset_state.r0);
}
/* Reset timer */
@ -311,31 +315,26 @@ u32 get_kvm_ipa_limit(void)
int kvm_set_ipa_limit(void)
{
unsigned int parange, tgran_2;
unsigned int parange;
u64 mmfr0;
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
parange = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_PARANGE_SHIFT);
/*
* IPA size beyond 48 bits could not be supported
* on either 4K or 16K page size. Hence let's cap
* it to 48 bits, in case it's reported as larger
* on the system.
*/
if (PAGE_SIZE != SZ_64K)
parange = min(parange, (unsigned int)ID_AA64MMFR0_PARANGE_48);
/*
* Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
* Stage-2. If not, things will stop very quickly.
*/
switch (PAGE_SIZE) {
default:
case SZ_4K:
tgran_2 = ID_AA64MMFR0_TGRAN4_2_SHIFT;
break;
case SZ_16K:
tgran_2 = ID_AA64MMFR0_TGRAN16_2_SHIFT;
break;
case SZ_64K:
tgran_2 = ID_AA64MMFR0_TGRAN64_2_SHIFT;
break;
}
switch (cpuid_feature_extract_unsigned_field(mmfr0, tgran_2)) {
switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_TGRAN_2_SHIFT)) {
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE:
kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
return -EINVAL;
@ -369,7 +368,7 @@ int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type);
if (phys_shift) {
if (phys_shift > kvm_ipa_limit ||
phys_shift < 32)
phys_shift < ARM64_MIN_PARANGE_BITS)
return -EINVAL;
} else {
phys_shift = KVM_PHYS_SHIFT;

View file

@ -44,10 +44,6 @@
* 64bit interface.
*/
#define reg_to_encoding(x) \
sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
(u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
static bool read_from_write_only(struct kvm_vcpu *vcpu,
struct sys_reg_params *params,
const struct sys_reg_desc *r)
@ -318,14 +314,14 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
/*
* We want to avoid world-switching all the DBG registers all the
* time:
*
*
* - If we've touched any debug register, it is likely that we're
* going to touch more of them. It then makes sense to disable the
* traps and start doing the save/restore dance
* - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
* then mandatory to save/restore the registers, as the guest
* depends on them.
*
*
* For this, we use a DIRTY bit, indicating the guest has modified the
* debug registers, used as follow:
*
@ -603,6 +599,41 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
return REG_HIDDEN;
}
static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 n, mask = BIT(ARMV8_PMU_CYCLE_IDX);
/* No PMU available, any PMU reg may UNDEF... */
if (!kvm_arm_support_pmu_v3())
return;
n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
n &= ARMV8_PMU_PMCR_N_MASK;
if (n)
mask |= GENMASK(n - 1, 0);
reset_unknown(vcpu, r);
__vcpu_sys_reg(vcpu, r->reg) &= mask;
}
static void reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
reset_unknown(vcpu, r);
__vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
}
static void reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
reset_unknown(vcpu, r);
__vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
}
static void reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
reset_unknown(vcpu, r);
__vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
}
static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 pmcr, val;
@ -845,7 +876,7 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
kvm_pmu_disable_counter_mask(vcpu, val);
}
} else {
p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
}
return true;
@ -869,7 +900,7 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
/* accessing PMINTENCLR_EL1 */
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
} else {
p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
}
return true;
@ -891,7 +922,7 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
/* accessing PMOVSCLR_EL0 */
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
} else {
p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
}
return true;
@ -944,16 +975,18 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
#define PMU_SYS_REG(r) \
SYS_DESC(r), .reset = reset_unknown, .visibility = pmu_visibility
SYS_DESC(r), .reset = reset_pmu_reg, .visibility = pmu_visibility
/* Macro to expand the PMEVCNTRn_EL0 register */
#define PMU_PMEVCNTR_EL0(n) \
{ PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
.reset = reset_pmevcntr, \
.access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
/* Macro to expand the PMEVTYPERn_EL0 register */
#define PMU_PMEVTYPER_EL0(n) \
{ PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \
.reset = reset_pmevtyper, \
.access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
@ -1026,8 +1059,6 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
return true;
}
#define FEATURE(x) (GENMASK_ULL(x##_SHIFT + 3, x##_SHIFT))
/* Read a sanitised cpufeature ID register by sys_reg_desc */
static u64 read_id_reg(const struct kvm_vcpu *vcpu,
struct sys_reg_desc const *r, bool raz)
@ -1038,40 +1069,40 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
switch (id) {
case SYS_ID_AA64PFR0_EL1:
if (!vcpu_has_sve(vcpu))
val &= ~FEATURE(ID_AA64PFR0_SVE);
val &= ~FEATURE(ID_AA64PFR0_AMU);
val &= ~FEATURE(ID_AA64PFR0_CSV2);
val |= FIELD_PREP(FEATURE(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
val &= ~FEATURE(ID_AA64PFR0_CSV3);
val |= FIELD_PREP(FEATURE(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_SVE);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_AMU);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
break;
case SYS_ID_AA64PFR1_EL1:
val &= ~FEATURE(ID_AA64PFR1_MTE);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE);
if (kvm_has_mte(vcpu->kvm)) {
u64 pfr, mte;
pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
mte = cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR1_MTE_SHIFT);
val |= FIELD_PREP(FEATURE(ID_AA64PFR1_MTE), mte);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR1_MTE), mte);
}
break;
case SYS_ID_AA64ISAR1_EL1:
if (!vcpu_has_ptrauth(vcpu))
val &= ~(FEATURE(ID_AA64ISAR1_APA) |
FEATURE(ID_AA64ISAR1_API) |
FEATURE(ID_AA64ISAR1_GPA) |
FEATURE(ID_AA64ISAR1_GPI));
val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) |
ARM64_FEATURE_MASK(ID_AA64ISAR1_API) |
ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) |
ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI));
break;
case SYS_ID_AA64DFR0_EL1:
/* Limit debug to ARMv8.0 */
val &= ~FEATURE(ID_AA64DFR0_DEBUGVER);
val |= FIELD_PREP(FEATURE(ID_AA64DFR0_DEBUGVER), 6);
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), 6);
/* Limit guests to PMUv3 for ARMv8.4 */
val = cpuid_feature_cap_perfmon_field(val,
ID_AA64DFR0_PMUVER_SHIFT,
kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0);
/* Hide SPE from guests */
val &= ~FEATURE(ID_AA64DFR0_PMSVER);
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER);
break;
case SYS_ID_DFR0_EL1:
/* Limit guests to PMUv3 for ARMv8.4 */
@ -1249,6 +1280,20 @@ static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
return __set_id_reg(vcpu, rd, uaddr, true);
}
static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
int err;
u64 val;
/* Perform the access even if we are going to ignore the value */
err = reg_from_user(&val, uaddr, sys_reg_to_index(rd));
if (err)
return err;
return 0;
}
static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
@ -1592,16 +1637,21 @@ static const struct sys_reg_desc sys_reg_descs[] = {
.access = access_pmcnten, .reg = PMCNTENSET_EL0 },
{ PMU_SYS_REG(SYS_PMOVSCLR_EL0),
.access = access_pmovs, .reg = PMOVSSET_EL0 },
/*
* PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
* previously (and pointlessly) advertised in the past...
*/
{ PMU_SYS_REG(SYS_PMSWINC_EL0),
.access = access_pmswinc, .reg = PMSWINC_EL0 },
.get_user = get_raz_id_reg, .set_user = set_wi_reg,
.access = access_pmswinc, .reset = NULL },
{ PMU_SYS_REG(SYS_PMSELR_EL0),
.access = access_pmselr, .reg = PMSELR_EL0 },
.access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
{ PMU_SYS_REG(SYS_PMCEID0_EL0),
.access = access_pmceid, .reset = NULL },
{ PMU_SYS_REG(SYS_PMCEID1_EL0),
.access = access_pmceid, .reset = NULL },
{ PMU_SYS_REG(SYS_PMCCNTR_EL0),
.access = access_pmu_evcntr, .reg = PMCCNTR_EL0 },
.access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0 },
{ PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
.access = access_pmu_evtyper, .reset = NULL },
{ PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
@ -2106,23 +2156,6 @@ static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
return 0;
}
static int match_sys_reg(const void *key, const void *elt)
{
const unsigned long pval = (unsigned long)key;
const struct sys_reg_desc *r = elt;
return pval - reg_to_encoding(r);
}
static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
const struct sys_reg_desc table[],
unsigned int num)
{
unsigned long pval = reg_to_encoding(params);
return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
}
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
{
kvm_inject_undefined(vcpu);
@ -2365,13 +2398,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
trace_kvm_handle_sys_reg(esr);
params.Op0 = (esr >> 20) & 3;
params.Op1 = (esr >> 14) & 0x7;
params.CRn = (esr >> 10) & 0xf;
params.CRm = (esr >> 1) & 0xf;
params.Op2 = (esr >> 17) & 0x7;
params = esr_sys64_to_params(esr);
params.regval = vcpu_get_reg(vcpu, Rt);
params.is_write = !(esr & 1);
ret = emulate_sys_reg(vcpu, &params);

View file

@ -11,6 +11,12 @@
#ifndef __ARM64_KVM_SYS_REGS_LOCAL_H__
#define __ARM64_KVM_SYS_REGS_LOCAL_H__
#include <linux/bsearch.h>
#define reg_to_encoding(x) \
sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
(u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
struct sys_reg_params {
u8 Op0;
u8 Op1;
@ -21,6 +27,14 @@ struct sys_reg_params {
bool is_write;
};
#define esr_sys64_to_params(esr) \
((struct sys_reg_params){ .Op0 = ((esr) >> 20) & 3, \
.Op1 = ((esr) >> 14) & 0x7, \
.CRn = ((esr) >> 10) & 0xf, \
.CRm = ((esr) >> 1) & 0xf, \
.Op2 = ((esr) >> 17) & 0x7, \
.is_write = !((esr) & 1) })
struct sys_reg_desc {
/* Sysreg string for debug */
const char *name;
@ -152,6 +166,23 @@ static inline int cmp_sys_reg(const struct sys_reg_desc *i1,
return i1->Op2 - i2->Op2;
}
static inline int match_sys_reg(const void *key, const void *elt)
{
const unsigned long pval = (unsigned long)key;
const struct sys_reg_desc *r = elt;
return pval - reg_to_encoding(r);
}
static inline const struct sys_reg_desc *
find_reg(const struct sys_reg_params *params, const struct sys_reg_desc table[],
unsigned int num)
{
unsigned long pval = reg_to_encoding(params);
return __inline_bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
}
const struct sys_reg_desc *find_reg_by_id(u64 id,
struct sys_reg_params *params,
const struct sys_reg_desc table[],

View file

@ -78,13 +78,17 @@ TRACE_EVENT(kvm_arm_clear_debug,
TP_printk("flags: 0x%08x", __entry->guest_debug)
);
/*
* The dreg32 name is a leftover from a distant past. This will really
* output a 64bit value...
*/
TRACE_EVENT(kvm_arm_set_dreg32,
TP_PROTO(const char *name, __u32 value),
TP_PROTO(const char *name, __u64 value),
TP_ARGS(name, value),
TP_STRUCT__entry(
__field(const char *, name)
__field(__u32, value)
__field(__u64, value)
),
TP_fast_assign(
@ -92,7 +96,7 @@ TRACE_EVENT(kvm_arm_set_dreg32,
__entry->value = value;
),
TP_printk("%s: 0x%08x", __entry->name, __entry->value)
TP_printk("%s: 0x%llx", __entry->name, __entry->value)
);
TRACE_DEFINE_SIZEOF(__u64);

View file

@ -282,7 +282,7 @@ static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
case GIC_CPU_PRIMASK:
/*
* Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
* the PMR field as GICH_VMCR.VMPriMask rather than
* PMR field as GICH_VMCR.VMPriMask rather than
* GICC_PMR.Priority, so we expose the upper five bits of
* priority mask to userspace using the lower bits in the
* unsigned long.
@ -329,7 +329,7 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
case GIC_CPU_PRIMASK:
/*
* Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
* the PMR field as GICH_VMCR.VMPriMask rather than
* PMR field as GICH_VMCR.VMPriMask rather than
* GICC_PMR.Priority, so we expose the upper five bits of
* priority mask to userspace using the lower bits in the
* unsigned long.

View file

@ -60,6 +60,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
u32 val = cpuif->vgic_lr[lr];
u32 cpuid, intid = val & GICH_LR_VIRTUALID;
struct vgic_irq *irq;
bool deactivated;
/* Extract the source vCPU id from the LR */
cpuid = val & GICH_LR_PHYSID_CPUID;
@ -75,7 +76,8 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
raw_spin_lock(&irq->irq_lock);
/* Always preserve the active bit */
/* Always preserve the active bit, note deactivation */
deactivated = irq->active && !(val & GICH_LR_ACTIVE_BIT);
irq->active = !!(val & GICH_LR_ACTIVE_BIT);
if (irq->active && vgic_irq_is_sgi(intid))
@ -96,36 +98,8 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
if (irq->config == VGIC_CONFIG_LEVEL && !(val & GICH_LR_STATE))
irq->pending_latch = false;
/*
* Level-triggered mapped IRQs are special because we only
* observe rising edges as input to the VGIC.
*
* If the guest never acked the interrupt we have to sample
* the physical line and set the line level, because the
* device state could have changed or we simply need to
* process the still pending interrupt later.
*
* If this causes us to lower the level, we have to also clear
* the physical active state, since we will otherwise never be
* told when the interrupt becomes asserted again.
*
* Another case is when the interrupt requires a helping hand
* on deactivation (no HW deactivation, for example).
*/
if (vgic_irq_is_mapped_level(irq)) {
bool resample = false;
if (val & GICH_LR_PENDING_BIT) {
irq->line_level = vgic_get_phys_line_level(irq);
resample = !irq->line_level;
} else if (vgic_irq_needs_resampling(irq) &&
!(irq->active || irq->pending_latch)) {
resample = true;
}
if (resample)
vgic_irq_set_phys_active(irq, false);
}
/* Handle resampling for mapped interrupts if required */
vgic_irq_handle_resampling(irq, deactivated, val & GICH_LR_PENDING_BIT);
raw_spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq);

View file

@ -46,6 +46,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
u32 intid, cpuid;
struct vgic_irq *irq;
bool is_v2_sgi = false;
bool deactivated;
cpuid = val & GICH_LR_PHYSID_CPUID;
cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
@ -68,7 +69,8 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
raw_spin_lock(&irq->irq_lock);
/* Always preserve the active bit */
/* Always preserve the active bit, note deactivation */
deactivated = irq->active && !(val & ICH_LR_ACTIVE_BIT);
irq->active = !!(val & ICH_LR_ACTIVE_BIT);
if (irq->active && is_v2_sgi)
@ -89,36 +91,8 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE))
irq->pending_latch = false;
/*
* Level-triggered mapped IRQs are special because we only
* observe rising edges as input to the VGIC.
*
* If the guest never acked the interrupt we have to sample
* the physical line and set the line level, because the
* device state could have changed or we simply need to
* process the still pending interrupt later.
*
* If this causes us to lower the level, we have to also clear
* the physical active state, since we will otherwise never be
* told when the interrupt becomes asserted again.
*
* Another case is when the interrupt requires a helping hand
* on deactivation (no HW deactivation, for example).
*/
if (vgic_irq_is_mapped_level(irq)) {
bool resample = false;
if (val & ICH_LR_PENDING_BIT) {
irq->line_level = vgic_get_phys_line_level(irq);
resample = !irq->line_level;
} else if (vgic_irq_needs_resampling(irq) &&
!(irq->active || irq->pending_latch)) {
resample = true;
}
if (resample)
vgic_irq_set_phys_active(irq, false);
}
/* Handle resampling for mapped interrupts if required */
vgic_irq_handle_resampling(irq, deactivated, val & ICH_LR_PENDING_BIT);
raw_spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq);

View file

@ -106,7 +106,6 @@ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
if (intid >= VGIC_MIN_LPI)
return vgic_get_lpi(kvm, intid);
WARN(1, "Looking up struct vgic_irq for reserved INTID");
return NULL;
}
@ -1022,3 +1021,41 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
return map_is_active;
}
/*
* Level-triggered mapped IRQs are special because we only observe rising
* edges as input to the VGIC.
*
* If the guest never acked the interrupt we have to sample the physical
* line and set the line level, because the device state could have changed
* or we simply need to process the still pending interrupt later.
*
* We could also have entered the guest with the interrupt active+pending.
* On the next exit, we need to re-evaluate the pending state, as it could
* otherwise result in a spurious interrupt by injecting a now potentially
* stale pending state.
*
* If this causes us to lower the level, we have to also clear the physical
* active state, since we will otherwise never be told when the interrupt
* becomes asserted again.
*
* Another case is when the interrupt requires a helping hand on
* deactivation (no HW deactivation, for example).
*/
void vgic_irq_handle_resampling(struct vgic_irq *irq,
bool lr_deactivated, bool lr_pending)
{
if (vgic_irq_is_mapped_level(irq)) {
bool resample = false;
if (unlikely(vgic_irq_needs_resampling(irq))) {
resample = !(irq->active || irq->pending_latch);
} else if (lr_pending || (lr_deactivated && irq->line_level)) {
irq->line_level = vgic_get_phys_line_level(irq);
resample = !irq->line_level;
}
if (resample)
vgic_irq_set_phys_active(irq, false);
}
}

View file

@ -169,6 +169,8 @@ void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active);
bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
unsigned long flags);
void vgic_kick_vcpus(struct kvm *kvm);
void vgic_irq_handle_resampling(struct vgic_irq *irq,
bool lr_deactivated, bool lr_pending);
int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
phys_addr_t addr, phys_addr_t alignment);

View file

@ -1339,7 +1339,6 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
return dt_virt;
}
#if CONFIG_PGTABLE_LEVELS > 3
int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
{
pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot));
@ -1354,16 +1353,6 @@ int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
return 1;
}
int pud_clear_huge(pud_t *pudp)
{
if (!pud_sect(READ_ONCE(*pudp)))
return 0;
pud_clear(pudp);
return 1;
}
#endif
#if CONFIG_PGTABLE_LEVELS > 2
int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
{
pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot));
@ -1378,6 +1367,14 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
return 1;
}
int pud_clear_huge(pud_t *pudp)
{
if (!pud_sect(READ_ONCE(*pudp)))
return 0;
pud_clear(pudp);
return 1;
}
int pmd_clear_huge(pmd_t *pmdp)
{
if (!pmd_sect(READ_ONCE(*pmdp)))
@ -1385,7 +1382,6 @@ int pmd_clear_huge(pmd_t *pmdp)
pmd_clear(pmdp);
return 1;
}
#endif
int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
{

View file

@ -823,6 +823,19 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
return ret;
break;
/* speculation barrier */
case BPF_ST | BPF_NOSPEC:
/*
* Nothing required here.
*
* In case of arm64, we rely on the firmware mitigation of
* Speculative Store Bypass as controlled via the ssbd kernel
* parameter. Whenever the mitigation is enabled, it works
* for all of the kernel code with no need to provide any
* additional instructions.
*/
break;
/* ST: *(size *)(dst + off) = imm */
case BPF_ST | BPF_MEM | BPF_W:
case BPF_ST | BPF_MEM | BPF_H:

View file

@ -44,7 +44,6 @@ config H8300_H8MAX
bool "H8MAX"
select H83069
select RAMKERNEL
select HAVE_IDE
help
H8MAX Evaluation Board Support
More Information. (Japanese Only)

View file

@ -25,7 +25,6 @@ config IA64
select HAVE_ASM_MODVERSIONS
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_EXIT_THREAD
select HAVE_IDE
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_FTRACE_MCOUNT_RECORD

View file

@ -23,7 +23,6 @@ config M68K
select HAVE_DEBUG_BUGVERBOSE
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !CPU_HAS_NO_UNALIGNED
select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
select HAVE_IDE
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_UID16
select MMU_GATHER_NO_RANGE if MMU

View file

@ -33,6 +33,7 @@ config MAC
depends on MMU
select MMU_MOTOROLA if MMU
select HAVE_ARCH_NVRAM_OPS
select HAVE_PATA_PLATFORM
select LEGACY_TIMER_TICK
help
This option enables support for the Apple Macintosh series of

View file

@ -26,7 +26,7 @@ DEFINE_CLK(pll, "pll.0", MCF_CLK);
DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
static struct clk_lookup m525x_clk_lookup[] = {
CLKDEV_INIT(NULL, "pll.0", &pll),
CLKDEV_INIT(NULL, "pll.0", &clk_pll),
CLKDEV_INIT(NULL, "sys.0", &clk_sys),
CLKDEV_INIT("mcftmr.0", NULL, &clk_sys),
CLKDEV_INIT("mcftmr.1", NULL, &clk_sys),

View file

@ -71,7 +71,6 @@ config MIPS
select HAVE_FUNCTION_TRACER
select HAVE_GCC_PLUGINS
select HAVE_GENERIC_VDSO
select HAVE_IDE
select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_IRQ_TIME_ACCOUNTING

View file

@ -1355,6 +1355,9 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
}
break;
case BPF_ST | BPF_NOSPEC: /* speculation barrier */
break;
case BPF_ST | BPF_B | BPF_MEM:
case BPF_ST | BPF_H | BPF_MEM:
case BPF_ST | BPF_W | BPF_MEM:

View file

@ -59,7 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start))
(!vma || addr + len <= vm_start_gap(vma)))
return addr;
}

View file

@ -3,7 +3,6 @@ config PARISC
def_bool y
select ARCH_32BIT_OFF_T if !64BIT
select ARCH_MIGHT_HAVE_PC_PARPORT
select HAVE_IDE
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_SYSCALL_TRACEPOINTS

View file

@ -220,7 +220,6 @@ config PPC
select HAVE_HARDLOCKUP_DETECTOR_ARCH if PPC_BOOK3S_64 && SMP
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
select HAVE_IDE
select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_IRQ_TIME_ACCOUNTING

View file

@ -27,6 +27,13 @@ KASAN_SANITIZE := n
ccflags-y := -shared -fno-common -fno-builtin -nostdlib \
-Wl,-soname=linux-vdso64.so.1 -Wl,--hash-style=both
# Go prior to 1.16.x assumes r30 is not clobbered by any VDSO code. That used to be true
# by accident when the VDSO was hand-written asm code, but may not be now that the VDSO is
# compiler generated. To avoid breaking Go tell GCC not to use r30. Impact on code
# generation is minimal, it will just use r29 instead.
ccflags-y += $(call cc-option, -ffixed-r30)
asflags-y := -D__VDSO64__ -s
targets += vdso64.lds

View file

@ -2697,8 +2697,10 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP | HFSCR_PREFIX;
if (cpu_has_feature(CPU_FTR_HVMODE)) {
vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
vcpu->arch.hfscr |= HFSCR_TM;
#endif
}
if (cpu_has_feature(CPU_FTR_TM_COMP))
vcpu->arch.hfscr |= HFSCR_TM;

Some files were not shown because too many files have changed in this diff Show more