Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR.

No conflicts.

Adjacent changes:

net/core/dev.c
  9f30831390 ("net: add rcu safety to rtnl_prop_list_size()")
  723de3ebef ("net: free altname using an RCU callback")

net/unix/garbage.c
  11498715f2 ("af_unix: Remove io_uring code for GC.")
  25236c91b5 ("af_unix: Fix task hung while purging oob_skb in GC.")

drivers/net/ethernet/renesas/ravb_main.c
  ed4adc0720 ("net: ravb: Count packets instead of descriptors in GbEth RX path"
)
  c2da940857 ("ravb: Add Rx checksum offload support for GbEth")

net/mptcp/protocol.c
  bdd70eb689 ("mptcp: drop the push_pending field")
  28e5c13805 ("mptcp: annotate lockless accesses around read-mostly fields")

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-02-15 14:01:43 -08:00
commit 73be9a3aab
396 changed files with 2943 additions and 1872 deletions

View File

@ -191,10 +191,11 @@ Gao Xiang <xiang@kernel.org> <gaoxiang25@huawei.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@linux.alibaba.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@redhat.com>
Geliang Tang <geliang.tang@linux.dev> <geliang.tang@suse.com>
Geliang Tang <geliang.tang@linux.dev> <geliangtang@xiaomi.com>
Geliang Tang <geliang.tang@linux.dev> <geliangtang@gmail.com>
Geliang Tang <geliang.tang@linux.dev> <geliangtang@163.com>
Geliang Tang <geliang@kernel.org> <geliang.tang@linux.dev>
Geliang Tang <geliang@kernel.org> <geliang.tang@suse.com>
Geliang Tang <geliang@kernel.org> <geliangtang@xiaomi.com>
Geliang Tang <geliang@kernel.org> <geliangtang@gmail.com>
Geliang Tang <geliang@kernel.org> <geliangtang@163.com>
Georgi Djakov <djakov@kernel.org> <georgi.djakov@linaro.org>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
@ -289,6 +290,7 @@ Johan Hovold <johan@kernel.org> <johan@hovoldconsulting.com>
John Crispin <john@phrozen.org> <blogic@openwrt.org>
John Fastabend <john.fastabend@gmail.com> <john.r.fastabend@intel.com>
John Keeping <john@keeping.me.uk> <john@metanate.com>
John Moon <john@jmoon.dev> <quic_johmoo@quicinc.com>
John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
John Stultz <johnstul@us.ibm.com>
<jon.toppins+linux@gmail.com> <jtoppins@cumulusnetworks.com>
@ -344,6 +346,7 @@ Leonid I Ananiev <leonid.i.ananiev@intel.com>
Leon Romanovsky <leon@kernel.org> <leon@leon.nu>
Leon Romanovsky <leon@kernel.org> <leonro@mellanox.com>
Leon Romanovsky <leon@kernel.org> <leonro@nvidia.com>
Leo Yan <leo.yan@linux.dev> <leo.yan@linaro.org>
Liam Mark <quic_lmark@quicinc.com> <lmark@codeaurora.org>
Linas Vepstas <linas@austin.ibm.com>
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>

View File

@ -1,4 +1,4 @@
What: /sys/class/<iface>/statistics/collisions
What: /sys/class/net/<iface>/statistics/collisions
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@ -6,7 +6,7 @@ Description:
Indicates the number of collisions seen by this network device.
This value might not be relevant with all MAC layers.
What: /sys/class/<iface>/statistics/multicast
What: /sys/class/net/<iface>/statistics/multicast
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@ -14,7 +14,7 @@ Description:
Indicates the number of multicast packets received by this
network device.
What: /sys/class/<iface>/statistics/rx_bytes
What: /sys/class/net/<iface>/statistics/rx_bytes
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@ -23,7 +23,7 @@ Description:
See the network driver for the exact meaning of when this
value is incremented.
What: /sys/class/<iface>/statistics/rx_compressed
What: /sys/class/net/<iface>/statistics/rx_compressed
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@ -32,7 +32,7 @@ Description:
network device. This value might only be relevant for interfaces
that support packet compression (e.g: PPP).
What: /sys/class/<iface>/statistics/rx_crc_errors
What: /sys/class/net/<iface>/statistics/rx_crc_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@ -41,7 +41,7 @@ Description:
by this network device. Note that the specific meaning might
depend on the MAC layer used by the interface.
What: /sys/class/<iface>/statistics/rx_dropped
What: /sys/class/net/<iface>/statistics/rx_dropped
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@ -51,7 +51,7 @@ Description:
packet processing. See the network driver for the exact
meaning of this value.
What: /sys/class/<iface>/statistics/rx_errors
What: /sys/class/net/<iface>/statistics/rx_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@ -59,7 +59,7 @@ Description:
Indicates the number of receive errors on this network device.
See the network driver for the exact meaning of this value.
What: /sys/class/<iface>/statistics/rx_fifo_errors
What: /sys/class/net/<iface>/statistics/rx_fifo_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@ -68,7 +68,7 @@ Description:
network device. See the network driver for the exact
meaning of this value.
What: /sys/class/<iface>/statistics/rx_frame_errors
What: /sys/class/net/<iface>/statistics/rx_frame_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@ -78,7 +78,7 @@ Description:
on the MAC layer protocol used. See the network driver for
the exact meaning of this value.
What: /sys/class/<iface>/statistics/rx_length_errors
What: /sys/class/net/<iface>/statistics/rx_length_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@ -87,7 +87,7 @@ Description:
error, oversized or undersized. See the network driver for the
exact meaning of this value.
What: /sys/class/<iface>/statistics/rx_missed_errors
What: /sys/class/net/<iface>/statistics/rx_missed_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@ -96,7 +96,7 @@ Description:
due to lack of capacity in the receive side. See the network
driver for the exact meaning of this value.
What: /sys/class/<iface>/statistics/rx_nohandler
What: /sys/class/net/<iface>/statistics/rx_nohandler
Date: February 2016
KernelVersion: 4.6
Contact: netdev@vger.kernel.org
@ -104,7 +104,7 @@ Description:
Indicates the number of received packets that were dropped on
an inactive device by the network core.
What: /sys/class/<iface>/statistics/rx_over_errors
What: /sys/class/net/<iface>/statistics/rx_over_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@ -114,7 +114,7 @@ Description:
(e.g: larger than MTU). See the network driver for the exact
meaning of this value.
What: /sys/class/<iface>/statistics/rx_packets
What: /sys/class/net/<iface>/statistics/rx_packets
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@ -122,7 +122,7 @@ Description:
Indicates the total number of good packets received by this
network device.
What: /sys/class/<iface>/statistics/tx_aborted_errors
What: /sys/class/net/<iface>/statistics/tx_aborted_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@ -132,7 +132,7 @@ Description:
a medium collision). See the network driver for the exact
meaning of this value.
What: /sys/class/<iface>/statistics/tx_bytes
What: /sys/class/net/<iface>/statistics/tx_bytes
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@ -143,7 +143,7 @@ Description:
transmitted packets or all packets that have been queued for
transmission.
What: /sys/class/<iface>/statistics/tx_carrier_errors
What: /sys/class/net/<iface>/statistics/tx_carrier_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@ -152,7 +152,7 @@ Description:
because of carrier errors (e.g: physical link down). See the
network driver for the exact meaning of this value.
What: /sys/class/<iface>/statistics/tx_compressed
What: /sys/class/net/<iface>/statistics/tx_compressed
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@ -161,7 +161,7 @@ Description:
this might only be relevant for devices that support
compression (e.g: PPP).
What: /sys/class/<iface>/statistics/tx_dropped
What: /sys/class/net/<iface>/statistics/tx_dropped
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@ -170,7 +170,7 @@ Description:
See the driver for the exact reasons as to why the packets were
dropped.
What: /sys/class/<iface>/statistics/tx_errors
What: /sys/class/net/<iface>/statistics/tx_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@ -179,7 +179,7 @@ Description:
a network device. See the driver for the exact reasons as to
why the packets were dropped.
What: /sys/class/<iface>/statistics/tx_fifo_errors
What: /sys/class/net/<iface>/statistics/tx_fifo_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@ -188,7 +188,7 @@ Description:
FIFO error. See the driver for the exact reasons as to why the
packets were dropped.
What: /sys/class/<iface>/statistics/tx_heartbeat_errors
What: /sys/class/net/<iface>/statistics/tx_heartbeat_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@ -197,7 +197,7 @@ Description:
reported as heartbeat errors. See the driver for the exact
reasons as to why the packets were dropped.
What: /sys/class/<iface>/statistics/tx_packets
What: /sys/class/net/<iface>/statistics/tx_packets
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@ -206,7 +206,7 @@ Description:
device. See the driver for whether this reports the number of all
attempted or successful transmissions.
What: /sys/class/<iface>/statistics/tx_window_errors
What: /sys/class/net/<iface>/statistics/tx_window_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org

View File

@ -28,7 +28,10 @@ $(obj)/%.example.dts: $(src)/%.yaml check_dtschema_version FORCE
find_all_cmd = find $(srctree)/$(src) \( -name '*.yaml' ! \
-name 'processed-schema*' \)
find_cmd = $(find_all_cmd) | sed 's|^$(srctree)/$(src)/||' | grep -F -e "$(subst :," -e ",$(DT_SCHEMA_FILES))" | sed 's|^|$(srctree)/$(src)/|'
find_cmd = $(find_all_cmd) | \
sed 's|^$(srctree)/||' | \
grep -F -e "$(subst :," -e ",$(DT_SCHEMA_FILES))" | \
sed 's|^|$(srctree)/|'
CHK_DT_DOCS := $(shell $(find_cmd))
quiet_cmd_yamllint = LINT $(src)

View File

@ -7,7 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Ceva AHCI SATA Controller
maintainers:
- Piyush Mehta <piyush.mehta@amd.com>
- Mubin Sayyed <mubin.sayyed@amd.com>
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
description: |
The Ceva SATA controller mostly conforms to the AHCI interface with some

View File

@ -29,19 +29,22 @@ properties:
audio-ports:
description:
Array of 8-bit values, 2 values per DAI (Documentation/sound/soc/dai.rst).
Array of 2 values per DAI (Documentation/sound/soc/dai.rst).
The implementation allows one or two DAIs.
If two DAIs are defined, they must be of different type.
$ref: /schemas/types.yaml#/definitions/uint32-matrix
minItems: 1
maxItems: 2
items:
minItems: 1
items:
- description: |
The first value defines the DAI type: TDA998x_SPDIF or TDA998x_I2S
(see include/dt-bindings/display/tda998x.h).
enum: [ 1, 2 ]
- description:
The second value defines the tda998x AP_ENA reg content when the
DAI in question is used.
maximum: 0xff
'#sound-dai-cells':
enum: [ 0, 1 ]

View File

@ -12,7 +12,8 @@ description:
PS_MODE). Every pin can be configured as input/output.
maintainers:
- Piyush Mehta <piyush.mehta@amd.com>
- Mubin Sayyed <mubin.sayyed@amd.com>
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
properties:
compatible:

View File

@ -78,8 +78,8 @@ examples:
pcie@0 {
#address-cells = <3>;
#size-cells = <2>;
ranges = <0x0 0x0 0x0 0x0 0x0 0x0>;
reg = <0x0 0x0 0x0 0x0 0x0 0x0>;
ranges = <0x02000000 0x0 0x100000 0x10000000 0x0 0x0>;
reg = <0x0 0x1000>;
device_type = "pci";
switch@0,0 {

View File

@ -7,7 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Zynq UltraScale+ MPSoC and Versal reset
maintainers:
- Piyush Mehta <piyush.mehta@amd.com>
- Mubin Sayyed <mubin.sayyed@amd.com>
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
description: |
The Zynq UltraScale+ MPSoC and Versal has several different resets.

View File

@ -42,7 +42,7 @@ properties:
resets:
description: Reset controller to reset the TPM
$ref: /schemas/types.yaml#/definitions/phandle
maxItems: 1
reset-gpios:
description: Output GPIO pin to reset the TPM

View File

@ -55,9 +55,12 @@ properties:
samsung,sysreg:
$ref: /schemas/types.yaml#/definitions/phandle-array
description: Should be phandle/offset pair. The phandle to the syscon node
which indicates the FSYSx sysreg interface and the offset of
the control register for UFS io coherency setting.
items:
- items:
- description: phandle to FSYSx sysreg node
- description: offset of the control register for UFS io coherency setting
description:
Phandle and offset to the FSYSx sysreg for UFS io coherency setting.
dma-coherent: true

View File

@ -7,7 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Xilinx SuperSpeed DWC3 USB SoC controller
maintainers:
- Piyush Mehta <piyush.mehta@amd.com>
- Mubin Sayyed <mubin.sayyed@amd.com>
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
properties:
compatible:

View File

@ -16,8 +16,9 @@ description:
USB 2.0 traffic.
maintainers:
- Piyush Mehta <piyush.mehta@amd.com>
- Michal Simek <michal.simek@amd.com>
- Mubin Sayyed <mubin.sayyed@amd.com>
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
properties:
compatible:

View File

@ -7,7 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Xilinx udc controller
maintainers:
- Piyush Mehta <piyush.mehta@amd.com>
- Mubin Sayyed <mubin.sayyed@amd.com>
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
properties:
compatible:

View File

@ -423,8 +423,6 @@ operations:
- type
dump:
pre: dpll-lock-dumpit
post: dpll-unlock-dumpit
reply: *dev-attrs
-
@ -512,8 +510,6 @@ operations:
- fractional-frequency-offset
dump:
pre: dpll-lock-dumpit
post: dpll-unlock-dumpit
request:
attributes:
- id

View File

@ -126,7 +126,7 @@ Users may also set the RoCE capability of the function using
`devlink port function set roce` command.
Users may also set the function as migratable using
'devlink port function set migratable' command.
`devlink port function set migratable` command.
Users may also set the IPsec crypto capability of the function using
`devlink port function set ipsec_crypto` command.

View File

@ -136,8 +136,8 @@ struct_netpoll_info* npinfo -
possible_net_t nd_net - read_mostly (dev_net)napi_busy_loop,tcp_v(4/6)_rcv,ip(v6)_rcv,ip(6)_input,ip(6)_input_finish
void* ml_priv
enum_netdev_ml_priv_type ml_priv_type
struct_pcpu_lstats__percpu* lstats
struct_pcpu_sw_netstats__percpu* tstats
struct_pcpu_lstats__percpu* lstats read_mostly dev_lstats_add()
struct_pcpu_sw_netstats__percpu* tstats read_mostly dev_sw_netstats_tx_add()
struct_pcpu_dstats__percpu* dstats
struct_garp_port* garp_port
struct_mrp_port* mrp_port

View File

@ -38,13 +38,13 @@ u32 max_window read_mostly -
u32 mss_cache read_mostly read_mostly tcp_rate_check_app_limited,tcp_current_mss,tcp_sync_mss,tcp_sndbuf_expand,tcp_tso_should_defer(tx);tcp_update_pacing_rate,tcp_clean_rtx_queue(rx)
u32 window_clamp read_mostly read_write tcp_rcv_space_adjust,__tcp_select_window
u32 rcv_ssthresh read_mostly - __tcp_select_window
u82 scaling_ratio
u8 scaling_ratio read_mostly read_mostly tcp_win_from_space
struct tcp_rack
u16 advmss - read_mostly tcp_rcv_space_adjust
u8 compressed_ack
u8:2 dup_ack_counter
u8:1 tlp_retrans
u8:1 tcp_usec_ts
u8:1 tcp_usec_ts read_mostly read_mostly
u32 chrono_start read_write - tcp_chrono_start/stop(tcp_write_xmit,tcp_cwnd_validate,tcp_send_syn_data)
u32[3] chrono_stat read_write - tcp_chrono_start/stop(tcp_write_xmit,tcp_cwnd_validate,tcp_send_syn_data)
u8:2 chrono_type read_write - tcp_chrono_start/stop(tcp_write_xmit,tcp_cwnd_validate,tcp_send_syn_data)

View File

@ -109,7 +109,7 @@ class KernelFeat(Directive):
else:
out_lines += line + "\n"
nodeList = self.nestedParse(out_lines, fname)
nodeList = self.nestedParse(out_lines, self.arguments[0])
return nodeList
def nestedParse(self, lines, fname):

View File

@ -10811,11 +10811,11 @@ F: drivers/gpio/gpio-tangier.h
INTEL GVT-g DRIVERS (Intel GPU Virtualization)
M: Zhenyu Wang <zhenyuw@linux.intel.com>
M: Zhi Wang <zhi.a.wang@intel.com>
M: Zhi Wang <zhi.wang.linux@gmail.com>
L: intel-gvt-dev@lists.freedesktop.org
L: intel-gfx@lists.freedesktop.org
S: Supported
W: https://01.org/igvt-g
W: https://github.com/intel/gvt-linux/wiki
T: git https://github.com/intel/gvt-linux.git
F: drivers/gpu/drm/i915/gvt/
@ -15344,7 +15344,7 @@ K: \bmdo_
NETWORKING [MPTCP]
M: Matthieu Baerts <matttbe@kernel.org>
M: Mat Martineau <martineau@kernel.org>
R: Geliang Tang <geliang.tang@linux.dev>
R: Geliang Tang <geliang@kernel.org>
L: netdev@vger.kernel.org
L: mptcp@lists.linux.dev
S: Maintained
@ -17202,7 +17202,7 @@ R: John Garry <john.g.garry@oracle.com>
R: Will Deacon <will@kernel.org>
R: James Clark <james.clark@arm.com>
R: Mike Leach <mike.leach@linaro.org>
R: Leo Yan <leo.yan@linaro.org>
R: Leo Yan <leo.yan@linux.dev>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: tools/build/feature/test-libopencsd.c

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 8
SUBLEVEL = 0
EXTRAVERSION = -rc3
EXTRAVERSION = -rc4
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*

View File

@ -31,7 +31,7 @@
static __always_inline bool arch_static_branch(struct static_key *key,
bool branch)
{
asm_volatile_goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n"
asm goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n"
"1: \n"
"nop \n"
".pushsection __jump_table, \"aw\" \n"
@ -47,7 +47,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key *key,
bool branch)
{
asm_volatile_goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n"
asm goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n"
"1: \n"
"b %l[l_yes] \n"
".pushsection __jump_table, \"aw\" \n"

View File

@ -11,7 +11,7 @@
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\n\t"
asm goto("1:\n\t"
WASM(nop) "\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b, %l[l_yes], %c0\n\t"
@ -25,7 +25,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\n\t"
asm goto("1:\n\t"
WASM(b) " %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b, %l[l_yes], %c0\n\t"

View File

@ -298,6 +298,8 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
goto done;
}
count_vm_vma_lock_event(VMA_LOCK_RETRY);
if (fault & VM_FAULT_MAJOR)
flags |= FAULT_FLAG_TRIED;
/* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) {

View File

@ -229,7 +229,7 @@ alternative_has_cap_likely(const unsigned long cpucap)
if (!cpucap_is_possible(cpucap))
return false;
asm_volatile_goto(
asm goto(
ALTERNATIVE_CB("b %l[l_no]", %[cpucap], alt_cb_patch_nops)
:
: [cpucap] "i" (cpucap)
@ -247,7 +247,7 @@ alternative_has_cap_unlikely(const unsigned long cpucap)
if (!cpucap_is_possible(cpucap))
return false;
asm_volatile_goto(
asm goto(
ALTERNATIVE("nop", "b %l[l_yes]", %[cpucap])
:
: [cpucap] "i" (cpucap)

View File

@ -18,7 +18,7 @@
static __always_inline bool arch_static_branch(struct static_key * const key,
const bool branch)
{
asm_volatile_goto(
asm goto(
"1: nop \n\t"
" .pushsection __jump_table, \"aw\" \n\t"
" .align 3 \n\t"
@ -35,7 +35,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key * const key,
const bool branch)
{
asm_volatile_goto(
asm goto(
"1: b %l[l_yes] \n\t"
" .pushsection __jump_table, \"aw\" \n\t"
" .align 3 \n\t"

View File

@ -12,7 +12,7 @@
static __always_inline bool arch_static_branch(struct static_key *key,
bool branch)
{
asm_volatile_goto(
asm goto(
"1: nop32 \n"
" .pushsection __jump_table, \"aw\" \n"
" .align 2 \n"
@ -29,7 +29,7 @@ label:
static __always_inline bool arch_static_branch_jump(struct static_key *key,
bool branch)
{
asm_volatile_goto(
asm goto(
"1: bsr32 %l[label] \n"
" .pushsection __jump_table, \"aw\" \n"
" .align 2 \n"

View File

@ -22,7 +22,7 @@
static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch)
{
asm_volatile_goto(
asm goto(
"1: nop \n\t"
JUMP_TABLE_ENTRY
: : "i"(&((char *)key)[branch]) : : l_yes);
@ -35,7 +35,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch)
{
asm_volatile_goto(
asm goto(
"1: b %l[l_yes] \n\t"
JUMP_TABLE_ENTRY
: : "i"(&((char *)key)[branch]) : : l_yes);

View File

@ -241,7 +241,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
" .set pop"
: "=&r" (sum), "=&r" (tmp)
: "r" (saddr), "r" (daddr),
"0" (htonl(len)), "r" (htonl(proto)), "r" (sum));
"0" (htonl(len)), "r" (htonl(proto)), "r" (sum)
: "memory");
return csum_fold(sum);
}

View File

@ -39,7 +39,7 @@ extern void jump_label_apply_nops(struct module *mod);
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\t" B_INSN " 2f\n\t"
asm goto("1:\t" B_INSN " 2f\n\t"
"2:\t.insn\n\t"
".pushsection __jump_table, \"aw\"\n\t"
WORD_INSN " 1b, %l[l_yes], %0\n\t"
@ -53,7 +53,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\t" J_INSN " %l[l_yes]\n\t"
asm goto("1:\t" J_INSN " %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
WORD_INSN " 1b, %l[l_yes], %0\n\t"
".popsection\n\t"

View File

@ -60,6 +60,7 @@ static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
regs->cp0_epc = val;
regs->cp0_cause &= ~CAUSEF_BD;
}
/* Query offset/name of register from its name/offset */
@ -154,6 +155,8 @@ static inline long regs_return_value(struct pt_regs *regs)
}
#define instruction_pointer(regs) ((regs)->cp0_epc)
extern unsigned long exception_ip(struct pt_regs *regs);
#define exception_ip(regs) exception_ip(regs)
#define profile_pc(regs) instruction_pointer(regs)
extern asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall);

View File

@ -31,6 +31,7 @@
#include <linux/seccomp.h>
#include <linux/ftrace.h>
#include <asm/branch.h>
#include <asm/byteorder.h>
#include <asm/cpu.h>
#include <asm/cpu-info.h>
@ -48,6 +49,12 @@
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
unsigned long exception_ip(struct pt_regs *regs)
{
return exception_epc(regs);
}
EXPORT_SYMBOL(exception_ip);
/*
* Called by kernel/ptrace.c when detaching..
*

View File

@ -12,7 +12,7 @@
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\n\t"
asm goto("1:\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align %1\n\t"
@ -29,7 +29,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\n\t"
asm goto("1:\n\t"
"b,n %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align %1\n\t"

View File

@ -17,7 +17,7 @@
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\n\t"
asm goto("1:\n\t"
"nop # arch_static_branch\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".long 1b - ., %l[l_yes] - .\n\t"
@ -32,7 +32,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\n\t"
asm goto("1:\n\t"
"b %l[l_yes] # arch_static_branch_jump\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".long 1b - ., %l[l_yes] - .\n\t"

View File

@ -74,7 +74,7 @@ __pu_failed: \
/* -mprefixed can generate offsets beyond range, fall back hack */
#ifdef CONFIG_PPC_KERNEL_PREFIXED
#define __put_user_asm_goto(x, addr, label, op) \
asm_volatile_goto( \
asm goto( \
"1: " op " %0,0(%1) # put_user\n" \
EX_TABLE(1b, %l2) \
: \
@ -83,7 +83,7 @@ __pu_failed: \
: label)
#else
#define __put_user_asm_goto(x, addr, label, op) \
asm_volatile_goto( \
asm goto( \
"1: " op "%U1%X1 %0,%1 # put_user\n" \
EX_TABLE(1b, %l2) \
: \
@ -97,7 +97,7 @@ __pu_failed: \
__put_user_asm_goto(x, ptr, label, "std")
#else /* __powerpc64__ */
#define __put_user_asm2_goto(x, addr, label) \
asm_volatile_goto( \
asm goto( \
"1: stw%X1 %0, %1\n" \
"2: stw%X1 %L0, %L1\n" \
EX_TABLE(1b, %l2) \
@ -146,7 +146,7 @@ do { \
/* -mprefixed can generate offsets beyond range, fall back hack */
#ifdef CONFIG_PPC_KERNEL_PREFIXED
#define __get_user_asm_goto(x, addr, label, op) \
asm_volatile_goto( \
asm_goto_output( \
"1: "op" %0,0(%1) # get_user\n" \
EX_TABLE(1b, %l2) \
: "=r" (x) \
@ -155,7 +155,7 @@ do { \
: label)
#else
#define __get_user_asm_goto(x, addr, label, op) \
asm_volatile_goto( \
asm_goto_output( \
"1: "op"%U1%X1 %0, %1 # get_user\n" \
EX_TABLE(1b, %l2) \
: "=r" (x) \
@ -169,7 +169,7 @@ do { \
__get_user_asm_goto(x, addr, label, "ld")
#else /* __powerpc64__ */
#define __get_user_asm2_goto(x, addr, label) \
asm_volatile_goto( \
asm_goto_output( \
"1: lwz%X1 %0, %1\n" \
"2: lwz%X1 %L0, %L1\n" \
EX_TABLE(1b, %l2) \

View File

@ -230,7 +230,7 @@ again:
* This allows interrupts to be unmasked without hard disabling, and
* also without new hard interrupts coming in ahead of pending ones.
*/
asm_volatile_goto(
asm goto(
"1: \n"
" lbz 9,%0(13) \n"
" cmpwi 9,0 \n"

View File

@ -20,7 +20,7 @@
static __always_inline unsigned int __arch_hweight32(unsigned int w)
{
#ifdef CONFIG_RISCV_ISA_ZBB
asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
@ -51,7 +51,7 @@ static inline unsigned int __arch_hweight8(unsigned int w)
static __always_inline unsigned long __arch_hweight64(__u64 w)
{
# ifdef CONFIG_RISCV_ISA_ZBB
asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);

View File

@ -39,7 +39,7 @@ static __always_inline unsigned long variable__ffs(unsigned long word)
{
int num;
asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
@ -95,7 +95,7 @@ static __always_inline unsigned long variable__fls(unsigned long word)
{
int num;
asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
@ -154,7 +154,7 @@ static __always_inline int variable_ffs(int x)
if (!x)
return 0;
asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
@ -209,7 +209,7 @@ static __always_inline int variable_fls(unsigned int x)
if (!x)
return 0;
asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);

View File

@ -53,7 +53,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
unsigned long fold_temp;
asm_volatile_goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
:
:

View File

@ -80,7 +80,7 @@ riscv_has_extension_likely(const unsigned long ext)
"ext must be < RISCV_ISA_EXT_MAX");
if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
asm_volatile_goto(
asm goto(
ALTERNATIVE("j %l[l_no]", "nop", 0, %[ext], 1)
:
: [ext] "i" (ext)
@ -103,7 +103,7 @@ riscv_has_extension_unlikely(const unsigned long ext)
"ext must be < RISCV_ISA_EXT_MAX");
if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
asm_volatile_goto(
asm goto(
ALTERNATIVE("nop", "j %l[l_yes]", 0, %[ext], 1)
:
: [ext] "i" (ext)

View File

@ -11,6 +11,9 @@ static inline void arch_clear_hugepage_flags(struct page *page)
}
#define arch_clear_hugepage_flags arch_clear_hugepage_flags
bool arch_hugetlb_migration_supported(struct hstate *h);
#define arch_hugetlb_migration_supported arch_hugetlb_migration_supported
#ifdef CONFIG_RISCV_ISA_SVNAPOT
#define __HAVE_ARCH_HUGE_PTE_CLEAR
void huge_pte_clear(struct mm_struct *mm, unsigned long addr,

View File

@ -17,7 +17,7 @@
static __always_inline bool arch_static_branch(struct static_key * const key,
const bool branch)
{
asm_volatile_goto(
asm goto(
" .align 2 \n\t"
" .option push \n\t"
" .option norelax \n\t"
@ -39,7 +39,7 @@ label:
static __always_inline bool arch_static_branch_jump(struct static_key * const key,
const bool branch)
{
asm_volatile_goto(
asm goto(
" .align 2 \n\t"
" .option push \n\t"
" .option norelax \n\t"

View File

@ -21,4 +21,9 @@ static inline bool on_thread_stack(void)
return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
}
#ifdef CONFIG_VMAP_STACK
DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
#endif /* CONFIG_VMAP_STACK */
#endif /* _ASM_RISCV_STACKTRACE_H */

View File

@ -16,7 +16,7 @@ static void tlb_flush(struct mmu_gather *tlb);
static inline void tlb_flush(struct mmu_gather *tlb)
{
#ifdef CONFIG_MMU
if (tlb->fullmm || tlb->need_flush_all)
if (tlb->fullmm || tlb->need_flush_all || tlb->freed_tables)
flush_tlb_mm(tlb->mm);
else
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end,

View File

@ -75,6 +75,7 @@ static inline void flush_tlb_kernel_range(unsigned long start,
#define flush_tlb_mm(mm) flush_tlb_all()
#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
#define local_flush_tlb_kernel_range(start, end) flush_tlb_all()
#endif /* !CONFIG_SMP || !CONFIG_MMU */
#endif /* _ASM_RISCV_TLBFLUSH_H */

View File

@ -53,7 +53,7 @@ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
* support, so nop when Zbb is available and jump when Zbb is
* not available.
*/
asm_volatile_goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
:
:
@ -170,7 +170,7 @@ do_csum_with_alignment(const unsigned char *buff, int len)
* support, so nop when Zbb is available and jump when Zbb is
* not available.
*/
asm_volatile_goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
:
:
@ -178,7 +178,7 @@ do_csum_with_alignment(const unsigned char *buff, int len)
: no_zbb);
#ifdef CONFIG_32BIT
asm_volatile_goto(".option push \n\
asm_goto_output(".option push \n\
.option arch,+zbb \n\
rori %[fold_temp], %[csum], 16 \n\
andi %[offset], %[offset], 1 \n\
@ -193,7 +193,7 @@ do_csum_with_alignment(const unsigned char *buff, int len)
return (unsigned short)csum;
#else /* !CONFIG_32BIT */
asm_volatile_goto(".option push \n\
asm_goto_output(".option push \n\
.option arch,+zbb \n\
rori %[fold_temp], %[csum], 32 \n\
add %[csum], %[fold_temp], %[csum] \n\
@ -257,7 +257,7 @@ do_csum_no_alignment(const unsigned char *buff, int len)
* support, so nop when Zbb is available and jump when Zbb is
* not available.
*/
asm_volatile_goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
:
:

View File

@ -125,6 +125,26 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
return pte;
}
unsigned long hugetlb_mask_last_page(struct hstate *h)
{
unsigned long hp_size = huge_page_size(h);
switch (hp_size) {
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
return P4D_SIZE - PUD_SIZE;
#endif
case PMD_SIZE:
return PUD_SIZE - PMD_SIZE;
case napot_cont_size(NAPOT_CONT64KB_ORDER):
return PMD_SIZE - napot_cont_size(NAPOT_CONT64KB_ORDER);
default:
break;
}
return 0UL;
}
static pte_t get_clear_contig(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep,
@ -177,13 +197,36 @@ pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
return entry;
}
static void clear_flush(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep,
unsigned long pgsize,
unsigned long ncontig)
{
struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
unsigned long i, saddr = addr;
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
ptep_get_and_clear(mm, addr, ptep);
flush_tlb_range(&vma, saddr, addr);
}
/*
* When dealing with NAPOT mappings, the privileged specification indicates that
* "if an update needs to be made, the OS generally should first mark all of the
* PTEs invalid, then issue SFENCE.VMA instruction(s) covering all 4 KiB regions
* within the range, [...] then update the PTE(s), as described in Section
* 4.2.1.". That's the equivalent of the Break-Before-Make approach used by
* arm64.
*/
void set_huge_pte_at(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep,
pte_t pte,
unsigned long sz)
{
unsigned long hugepage_shift;
unsigned long hugepage_shift, pgsize;
int i, pte_num;
if (sz >= PGDIR_SIZE)
@ -198,7 +241,22 @@ void set_huge_pte_at(struct mm_struct *mm,
hugepage_shift = PAGE_SHIFT;
pte_num = sz >> hugepage_shift;
for (i = 0; i < pte_num; i++, ptep++, addr += (1 << hugepage_shift))
pgsize = 1 << hugepage_shift;
if (!pte_present(pte)) {
for (i = 0; i < pte_num; i++, ptep++, addr += pgsize)
set_ptes(mm, addr, ptep, pte, 1);
return;
}
if (!pte_napot(pte)) {
set_ptes(mm, addr, ptep, pte, 1);
return;
}
clear_flush(mm, addr, ptep, pgsize, pte_num);
for (i = 0; i < pte_num; i++, ptep++, addr += pgsize)
set_pte_at(mm, addr, ptep, pte);
}
@ -306,7 +364,7 @@ void huge_pte_clear(struct mm_struct *mm,
pte_clear(mm, addr, ptep);
}
static __init bool is_napot_size(unsigned long size)
static bool is_napot_size(unsigned long size)
{
unsigned long order;
@ -334,7 +392,7 @@ arch_initcall(napot_hugetlbpages_init);
#else
static __init bool is_napot_size(unsigned long size)
static bool is_napot_size(unsigned long size)
{
return false;
}
@ -351,7 +409,7 @@ int pmd_huge(pmd_t pmd)
return pmd_leaf(pmd);
}
bool __init arch_hugetlb_valid_size(unsigned long size)
static bool __hugetlb_valid_size(unsigned long size)
{
if (size == HPAGE_SIZE)
return true;
@ -363,6 +421,16 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
return false;
}
bool __init arch_hugetlb_valid_size(unsigned long size)
{
return __hugetlb_valid_size(size);
}
bool arch_hugetlb_migration_supported(struct hstate *h)
{
return __hugetlb_valid_size(huge_page_size(h));
}
#ifdef CONFIG_CONTIG_ALLOC
static __init int gigantic_pages_init(void)
{

View File

@ -1385,6 +1385,10 @@ void __init misc_mem_init(void)
early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
arch_numa_init();
sparse_init();
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/* The entire VMEMMAP region has been populated. Flush TLB for this region */
local_flush_tlb_kernel_range(VMEMMAP_START, VMEMMAP_END);
#endif
zone_sizes_init();
arch_reserve_crashkernel();
memblock_dump_all();

View File

@ -66,6 +66,7 @@ static inline void local_flush_tlb_range_asid(unsigned long start,
local_flush_tlb_range_threshold_asid(start, size, stride, asid);
}
/* Flush a range of kernel pages without broadcasting */
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
local_flush_tlb_range_asid(start, end - start, PAGE_SIZE, FLUSH_TLB_NO_ASID);
@ -233,4 +234,5 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
{
__flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0,
FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
cpumask_clear(&batch->cpumask);
}

View File

@ -25,7 +25,7 @@
*/
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("0: brcl 0,%l[label]\n"
asm goto("0: brcl 0,%l[label]\n"
".pushsection __jump_table,\"aw\"\n"
".balign 8\n"
".long 0b-.,%l[label]-.\n"
@ -39,7 +39,7 @@ label:
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
asm_volatile_goto("0: brcl 15,%l[label]\n"
asm goto("0: brcl 15,%l[label]\n"
".pushsection __jump_table,\"aw\"\n"
".balign 8\n"
".long 0b-.,%l[label]-.\n"

View File

@ -10,7 +10,7 @@
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\n\t"
asm goto("1:\n\t"
"nop\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
@ -26,7 +26,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\n\t"
asm goto("1:\n\t"
"b %l[l_yes]\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"

View File

@ -75,7 +75,7 @@ extern void setup_clear_cpu_cap(unsigned int bit);
*/
static __always_inline bool _static_cpu_has(u16 bit)
{
asm_volatile_goto("1: jmp 6f\n"
asm goto("1: jmp 6f\n"
"2:\n"
".skip -(((5f-4f) - (2b-1b)) > 0) * "
"((5f-4f) - (2b-1b)),0x90\n"

View File

@ -379,7 +379,7 @@ config X86_CMOV
config X86_MINIMUM_CPU_FAMILY
int
default "64" if X86_64
default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8)
default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCORE2 || MK7 || MK8)
default "5" if X86_32 && X86_CMPXCHG64
default "4"

View File

@ -106,8 +106,7 @@ extra_header_fields:
.word 0 # MinorSubsystemVersion
.long 0 # Win32VersionValue
.long setup_size + ZO__end + pecompat_vsize
# SizeOfImage
.long setup_size + ZO__end # SizeOfImage
.long salign # SizeOfHeaders
.long 0 # CheckSum
@ -143,7 +142,7 @@ section_table:
.ascii ".setup"
.byte 0
.byte 0
.long setup_size - salign # VirtualSize
.long pecompat_fstart - salign # VirtualSize
.long salign # VirtualAddress
.long pecompat_fstart - salign # SizeOfRawData
.long salign # PointerToRawData
@ -156,8 +155,8 @@ section_table:
#ifdef CONFIG_EFI_MIXED
.asciz ".compat"
.long 8 # VirtualSize
.long setup_size + ZO__end # VirtualAddress
.long pecompat_fsize # VirtualSize
.long pecompat_fstart # VirtualAddress
.long pecompat_fsize # SizeOfRawData
.long pecompat_fstart # PointerToRawData
@ -172,17 +171,16 @@ section_table:
* modes this image supports.
*/
.pushsection ".pecompat", "a", @progbits
.balign falign
.set pecompat_vsize, salign
.balign salign
.globl pecompat_fstart
pecompat_fstart:
.byte 0x1 # Version
.byte 8 # Size
.word IMAGE_FILE_MACHINE_I386 # PE machine type
.long setup_size + ZO_efi32_pe_entry # Entrypoint
.byte 0x0 # Sentinel
.popsection
#else
.set pecompat_vsize, 0
.set pecompat_fstart, setup_size
#endif
.ascii ".text"

View File

@ -24,6 +24,9 @@ SECTIONS
.text : { *(.text .text.*) }
.text32 : { *(.text32) }
.pecompat : { *(.pecompat) }
PROVIDE(pecompat_fsize = setup_size - pecompat_fstart);
. = ALIGN(16);
.rodata : { *(.rodata*) }
@ -36,9 +39,6 @@ SECTIONS
. = ALIGN(16);
.data : { *(.data*) }
.pecompat : { *(.pecompat) }
PROVIDE(pecompat_fsize = setup_size - pecompat_fstart);
.signature : {
setup_sig = .;
LONG(0x5a5aaa55)

View File

@ -168,7 +168,7 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
*/
static __always_inline bool _static_cpu_has(u16 bit)
{
asm_volatile_goto(
asm goto(
ALTERNATIVE_TERNARY("jmp 6f", %P[feature], "", "jmp %l[t_no]")
".pushsection .altinstr_aux,\"ax\"\n"
"6:\n"

View File

@ -24,7 +24,7 @@
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("1:"
asm goto("1:"
"jmp %l[l_yes] # objtool NOPs this \n\t"
JUMP_TABLE_ENTRY
: : "i" (key), "i" (2 | branch) : : l_yes);
@ -38,7 +38,7 @@ l_yes:
static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch)
{
asm_volatile_goto("1:"
asm goto("1:"
".byte " __stringify(BYTES_NOP5) "\n\t"
JUMP_TABLE_ENTRY
: : "i" (key), "i" (branch) : : l_yes);
@ -52,7 +52,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch)
{
asm_volatile_goto("1:"
asm goto("1:"
"jmp %l[l_yes]\n\t"
JUMP_TABLE_ENTRY
: : "i" (key), "i" (branch) : : l_yes);

View File

@ -13,7 +13,7 @@
#define __GEN_RMWcc(fullop, _var, cc, clobbers, ...) \
({ \
bool c = false; \
asm_volatile_goto (fullop "; j" #cc " %l[cc_label]" \
asm goto (fullop "; j" #cc " %l[cc_label]" \
: : [var] "m" (_var), ## __VA_ARGS__ \
: clobbers : cc_label); \
if (0) { \

View File

@ -205,7 +205,7 @@ static inline void clwb(volatile void *__p)
#ifdef CONFIG_X86_USER_SHADOW_STACK
static inline int write_user_shstk_64(u64 __user *addr, u64 val)
{
asm_volatile_goto("1: wrussq %[val], (%[addr])\n"
asm goto("1: wrussq %[val], (%[addr])\n"
_ASM_EXTABLE(1b, %l[fail])
:: [addr] "r" (addr), [val] "r" (val)
:: fail);

View File

@ -133,7 +133,7 @@ extern int __get_user_bad(void);
#ifdef CONFIG_X86_32
#define __put_user_goto_u64(x, addr, label) \
asm_volatile_goto("\n" \
asm goto("\n" \
"1: movl %%eax,0(%1)\n" \
"2: movl %%edx,4(%1)\n" \
_ASM_EXTABLE_UA(1b, %l2) \
@ -295,7 +295,7 @@ do { \
} while (0)
#define __get_user_asm(x, addr, itype, ltype, label) \
asm_volatile_goto("\n" \
asm_goto_output("\n" \
"1: mov"itype" %[umem],%[output]\n" \
_ASM_EXTABLE_UA(1b, %l2) \
: [output] ltype(x) \
@ -375,7 +375,7 @@ do { \
__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
__typeof__(*(_ptr)) __old = *_old; \
__typeof__(*(_ptr)) __new = (_new); \
asm_volatile_goto("\n" \
asm_goto_output("\n" \
"1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
_ASM_EXTABLE_UA(1b, %l[label]) \
: CC_OUT(z) (success), \
@ -394,7 +394,7 @@ do { \
__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
__typeof__(*(_ptr)) __old = *_old; \
__typeof__(*(_ptr)) __new = (_new); \
asm_volatile_goto("\n" \
asm_goto_output("\n" \
"1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
_ASM_EXTABLE_UA(1b, %l[label]) \
: CC_OUT(z) (success), \
@ -477,7 +477,7 @@ struct __large_struct { unsigned long buf[100]; };
* aliasing issues.
*/
#define __put_user_goto(x, addr, itype, ltype, label) \
asm_volatile_goto("\n" \
asm goto("\n" \
"1: mov"itype" %0,%1\n" \
_ASM_EXTABLE_UA(1b, %l2) \
: : ltype(x), "m" (__m(addr)) \

View File

@ -274,12 +274,13 @@ static int __restore_fpregs_from_user(void __user *buf, u64 ufeatures,
* Attempt to restore the FPU registers directly from user memory.
* Pagefaults are handled and any errors returned are fatal.
*/
static bool restore_fpregs_from_user(void __user *buf, u64 xrestore,
bool fx_only, unsigned int size)
static bool restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only)
{
struct fpu *fpu = &current->thread.fpu;
int ret;
/* Restore enabled features only. */
xrestore &= fpu->fpstate->user_xfeatures;
retry:
fpregs_lock();
/* Ensure that XFD is up to date */
@ -309,7 +310,7 @@ retry:
if (ret != X86_TRAP_PF)
return false;
if (!fault_in_readable(buf, size))
if (!fault_in_readable(buf, fpu->fpstate->user_size))
goto retry;
return false;
}
@ -339,7 +340,6 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx,
struct user_i387_ia32_struct env;
bool success, fx_only = false;
union fpregs_state *fpregs;
unsigned int state_size;
u64 user_xfeatures = 0;
if (use_xsave()) {
@ -349,17 +349,14 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx,
return false;
fx_only = !fx_sw_user.magic1;
state_size = fx_sw_user.xstate_size;
user_xfeatures = fx_sw_user.xfeatures;
} else {
user_xfeatures = XFEATURE_MASK_FPSSE;
state_size = fpu->fpstate->user_size;
}
if (likely(!ia32_fxstate)) {
/* Restore the FPU registers directly from user memory. */
return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only,
state_size);
return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only);
}
/*

View File

@ -8,7 +8,7 @@
#define svm_asm(insn, clobber...) \
do { \
asm_volatile_goto("1: " __stringify(insn) "\n\t" \
asm goto("1: " __stringify(insn) "\n\t" \
_ASM_EXTABLE(1b, %l[fault]) \
::: clobber : fault); \
return; \
@ -18,7 +18,7 @@ fault: \
#define svm_asm1(insn, op1, clobber...) \
do { \
asm_volatile_goto("1: " __stringify(insn) " %0\n\t" \
asm goto("1: " __stringify(insn) " %0\n\t" \
_ASM_EXTABLE(1b, %l[fault]) \
:: op1 : clobber : fault); \
return; \
@ -28,7 +28,7 @@ fault: \
#define svm_asm2(insn, op1, op2, clobber...) \
do { \
asm_volatile_goto("1: " __stringify(insn) " %1, %0\n\t" \
asm goto("1: " __stringify(insn) " %1, %0\n\t" \
_ASM_EXTABLE(1b, %l[fault]) \
:: op1, op2 : clobber : fault); \
return; \

View File

@ -738,7 +738,7 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
*/
static int kvm_cpu_vmxoff(void)
{
asm_volatile_goto("1: vmxoff\n\t"
asm goto("1: vmxoff\n\t"
_ASM_EXTABLE(1b, %l[fault])
::: "cc", "memory" : fault);
@ -2784,7 +2784,7 @@ static int kvm_cpu_vmxon(u64 vmxon_pointer)
cr4_set_bits(X86_CR4_VMXE);
asm_volatile_goto("1: vmxon %[vmxon_pointer]\n\t"
asm goto("1: vmxon %[vmxon_pointer]\n\t"
_ASM_EXTABLE(1b, %l[fault])
: : [vmxon_pointer] "m"(vmxon_pointer)
: : fault);

View File

@ -94,7 +94,7 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
asm_volatile_goto("1: vmread %[field], %[output]\n\t"
asm_goto_output("1: vmread %[field], %[output]\n\t"
"jna %l[do_fail]\n\t"
_ASM_EXTABLE(1b, %l[do_exception])
@ -188,7 +188,7 @@ static __always_inline unsigned long vmcs_readl(unsigned long field)
#define vmx_asm1(insn, op1, error_args...) \
do { \
asm_volatile_goto("1: " __stringify(insn) " %0\n\t" \
asm goto("1: " __stringify(insn) " %0\n\t" \
".byte 0x2e\n\t" /* branch not taken hint */ \
"jna %l[error]\n\t" \
_ASM_EXTABLE(1b, %l[fault]) \
@ -205,7 +205,7 @@ fault: \
#define vmx_asm2(insn, op1, op2, error_args...) \
do { \
asm_volatile_goto("1: " __stringify(insn) " %1, %0\n\t" \
asm goto("1: " __stringify(insn) " %1, %0\n\t" \
".byte 0x2e\n\t" /* branch not taken hint */ \
"jna %l[error]\n\t" \
_ASM_EXTABLE(1b, %l[fault]) \

View File

@ -163,23 +163,23 @@ SYM_CODE_END(__get_user_8_handle_exception)
#endif
/* get_user */
_ASM_EXTABLE(1b, __get_user_handle_exception)
_ASM_EXTABLE(2b, __get_user_handle_exception)
_ASM_EXTABLE(3b, __get_user_handle_exception)
_ASM_EXTABLE_UA(1b, __get_user_handle_exception)
_ASM_EXTABLE_UA(2b, __get_user_handle_exception)
_ASM_EXTABLE_UA(3b, __get_user_handle_exception)
#ifdef CONFIG_X86_64
_ASM_EXTABLE(4b, __get_user_handle_exception)
_ASM_EXTABLE_UA(4b, __get_user_handle_exception)
#else
_ASM_EXTABLE(4b, __get_user_8_handle_exception)
_ASM_EXTABLE(5b, __get_user_8_handle_exception)
_ASM_EXTABLE_UA(4b, __get_user_8_handle_exception)
_ASM_EXTABLE_UA(5b, __get_user_8_handle_exception)
#endif
/* __get_user */
_ASM_EXTABLE(6b, __get_user_handle_exception)
_ASM_EXTABLE(7b, __get_user_handle_exception)
_ASM_EXTABLE(8b, __get_user_handle_exception)
_ASM_EXTABLE_UA(6b, __get_user_handle_exception)
_ASM_EXTABLE_UA(7b, __get_user_handle_exception)
_ASM_EXTABLE_UA(8b, __get_user_handle_exception)
#ifdef CONFIG_X86_64
_ASM_EXTABLE(9b, __get_user_handle_exception)
_ASM_EXTABLE_UA(9b, __get_user_handle_exception)
#else
_ASM_EXTABLE(9b, __get_user_8_handle_exception)
_ASM_EXTABLE(10b, __get_user_8_handle_exception)
_ASM_EXTABLE_UA(9b, __get_user_8_handle_exception)
_ASM_EXTABLE_UA(10b, __get_user_8_handle_exception)
#endif

View File

@ -133,15 +133,15 @@ SYM_CODE_START_LOCAL(__put_user_handle_exception)
RET
SYM_CODE_END(__put_user_handle_exception)
_ASM_EXTABLE(1b, __put_user_handle_exception)
_ASM_EXTABLE(2b, __put_user_handle_exception)
_ASM_EXTABLE(3b, __put_user_handle_exception)
_ASM_EXTABLE(4b, __put_user_handle_exception)
_ASM_EXTABLE(5b, __put_user_handle_exception)
_ASM_EXTABLE(6b, __put_user_handle_exception)
_ASM_EXTABLE(7b, __put_user_handle_exception)
_ASM_EXTABLE(9b, __put_user_handle_exception)
_ASM_EXTABLE_UA(1b, __put_user_handle_exception)
_ASM_EXTABLE_UA(2b, __put_user_handle_exception)
_ASM_EXTABLE_UA(3b, __put_user_handle_exception)
_ASM_EXTABLE_UA(4b, __put_user_handle_exception)
_ASM_EXTABLE_UA(5b, __put_user_handle_exception)
_ASM_EXTABLE_UA(6b, __put_user_handle_exception)
_ASM_EXTABLE_UA(7b, __put_user_handle_exception)
_ASM_EXTABLE_UA(9b, __put_user_handle_exception)
#ifdef CONFIG_X86_32
_ASM_EXTABLE(8b, __put_user_handle_exception)
_ASM_EXTABLE(10b, __put_user_handle_exception)
_ASM_EXTABLE_UA(8b, __put_user_handle_exception)
_ASM_EXTABLE_UA(10b, __put_user_handle_exception)
#endif

View File

@ -65,6 +65,8 @@ int xen_smp_intr_init(unsigned int cpu)
char *resched_name, *callfunc_name, *debug_name;
resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
if (!resched_name)
goto fail_mem;
per_cpu(xen_resched_irq, cpu).name = resched_name;
rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
cpu,
@ -77,6 +79,8 @@ int xen_smp_intr_init(unsigned int cpu)
per_cpu(xen_resched_irq, cpu).irq = rc;
callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
if (!callfunc_name)
goto fail_mem;
per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
cpu,
@ -90,6 +94,9 @@ int xen_smp_intr_init(unsigned int cpu)
if (!xen_fifo_events) {
debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
if (!debug_name)
goto fail_mem;
per_cpu(xen_debug_irq, cpu).name = debug_name;
rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
xen_debug_interrupt,
@ -101,6 +108,9 @@ int xen_smp_intr_init(unsigned int cpu)
}
callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
if (!callfunc_name)
goto fail_mem;
per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
cpu,
@ -114,6 +124,8 @@ int xen_smp_intr_init(unsigned int cpu)
return 0;
fail_mem:
rc = -ENOMEM;
fail:
xen_smp_intr_free(cpu);
return rc;

View File

@ -13,7 +13,7 @@
static __always_inline bool arch_static_branch(struct static_key *key,
bool branch)
{
asm_volatile_goto("1:\n\t"
asm goto("1:\n\t"
"_nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b, %l[l_yes], %c0\n\t"
@ -38,7 +38,7 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key,
* make it reachable and wrap both into a no-transform block
* to avoid any assembler interference with this.
*/
asm_volatile_goto("1:\n\t"
asm goto("1:\n\t"
".begin no-transform\n\t"
"_j %l[l_yes]\n\t"
"2:\n\t"

View File

@ -1353,6 +1353,13 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
lockdep_assert_held(&iocg->waitq.lock);
/*
* If the delay is set by another CPU, we may be in the past. No need to
* change anything if so. This avoids decay calculation underflow.
*/
if (time_before64(now->now, iocg->delay_at))
return false;
/* calculate the current delay in effect - 1/2 every second */
tdelta = now->now - iocg->delay_at;
if (iocg->delay)

View File

@ -163,9 +163,9 @@ static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
*/
static bool wb_recent_wait(struct rq_wb *rwb)
{
struct bdi_writeback *wb = &rwb->rqos.disk->bdi->wb;
struct backing_dev_info *bdi = rwb->rqos.disk->bdi;
return time_before(jiffies, wb->dirty_sleep + HZ);
return time_before(jiffies, bdi->last_bdp_sleep + HZ);
}
static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,

View File

@ -480,9 +480,8 @@ static int ivpu_pci_init(struct ivpu_device *vdev)
/* Clear any pending errors */
pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
/* VPU 37XX does not require 10m D3hot delay */
if (ivpu_hw_gen(vdev) == IVPU_HW_37XX)
pdev->d3hot_delay = 0;
/* NPU does not require 10m D3hot delay */
pdev->d3hot_delay = 0;
ret = pcim_enable_device(pdev);
if (ret) {

View File

@ -222,7 +222,6 @@ ivpu_fw_init_wa(struct ivpu_device *vdev)
const struct vpu_firmware_header *fw_hdr = (const void *)vdev->fw->file->data;
if (IVPU_FW_CHECK_API_VER_LT(vdev, fw_hdr, BOOT, 3, 17) ||
(ivpu_hw_gen(vdev) > IVPU_HW_37XX) ||
(ivpu_test_mode & IVPU_TEST_MODE_D0I3_MSG_DISABLE))
vdev->wa.disable_d0i3_msg = true;

View File

@ -525,7 +525,7 @@ static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);

View File

@ -530,7 +530,7 @@ static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES);
val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val);
val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val);
@ -704,7 +704,6 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
{
struct ivpu_hw_info *hw = vdev->hw;
u32 tile_disable;
u32 tile_enable;
u32 fuse;
fuse = REGB_RD32(VPU_40XX_BUTTRESS_TILE_FUSE);
@ -725,10 +724,6 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
else
ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", TILE_MAX_NUM);
tile_enable = (~tile_disable) & TILE_MAX_MASK;
hw->sku = REG_SET_FLD_NUM(SKU, HW_ID, LNL_HW_ID, hw->sku);
hw->sku = REG_SET_FLD_NUM(SKU, TILE, tile_enable, hw->sku);
hw->tile_fuse = tile_disable;
hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;

View File

@ -294,7 +294,7 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32
return -ENOENT;
if (job->file_priv->has_mmu_faults)
job_status = VPU_JSM_STATUS_ABORTED;
job_status = DRM_IVPU_JOB_STATUS_ABORTED;
job->bos[CMD_BUF_IDX]->job_status = job_status;
dma_fence_signal(job->done_fence);
@ -315,7 +315,7 @@ void ivpu_jobs_abort_all(struct ivpu_device *vdev)
unsigned long id;
xa_for_each(&vdev->submitted_jobs_xa, id, job)
ivpu_job_signal_and_destroy(vdev, id, VPU_JSM_STATUS_ABORTED);
ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
}
static int ivpu_job_submit(struct ivpu_job *job)

View File

@ -72,10 +72,10 @@
#define IVPU_MMU_Q_COUNT_LOG2 4 /* 16 entries */
#define IVPU_MMU_Q_COUNT ((u32)1 << IVPU_MMU_Q_COUNT_LOG2)
#define IVPU_MMU_Q_WRAP_BIT (IVPU_MMU_Q_COUNT << 1)
#define IVPU_MMU_Q_WRAP_MASK (IVPU_MMU_Q_WRAP_BIT - 1)
#define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1)
#define IVPU_MMU_Q_WRAP_MASK GENMASK(IVPU_MMU_Q_COUNT_LOG2, 0)
#define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1)
#define IVPU_MMU_Q_IDX(val) ((val) & IVPU_MMU_Q_IDX_MASK)
#define IVPU_MMU_Q_WRP(val) ((val) & IVPU_MMU_Q_COUNT)
#define IVPU_MMU_CMDQ_CMD_SIZE 16
#define IVPU_MMU_CMDQ_SIZE (IVPU_MMU_Q_COUNT * IVPU_MMU_CMDQ_CMD_SIZE)
@ -475,20 +475,32 @@ static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
return 0;
}
static bool ivpu_mmu_queue_is_full(struct ivpu_mmu_queue *q)
{
return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) &&
(IVPU_MMU_Q_WRP(q->prod) != IVPU_MMU_Q_WRP(q->cons)));
}
static bool ivpu_mmu_queue_is_empty(struct ivpu_mmu_queue *q)
{
return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) &&
(IVPU_MMU_Q_WRP(q->prod) == IVPU_MMU_Q_WRP(q->cons)));
}
static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1)
{
struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
u64 *queue_buffer = q->base;
int idx = IVPU_MMU_Q_IDX(q->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer));
struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
u64 *queue_buffer = cmdq->base;
int idx = IVPU_MMU_Q_IDX(cmdq->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer));
if (!CIRC_SPACE(IVPU_MMU_Q_IDX(q->prod), IVPU_MMU_Q_IDX(q->cons), IVPU_MMU_Q_COUNT)) {
if (ivpu_mmu_queue_is_full(cmdq)) {
ivpu_err(vdev, "Failed to write MMU CMD %s\n", name);
return -EBUSY;
}
queue_buffer[idx] = data0;
queue_buffer[idx + 1] = data1;
q->prod = (q->prod + 1) & IVPU_MMU_Q_WRAP_MASK;
cmdq->prod = (cmdq->prod + 1) & IVPU_MMU_Q_WRAP_MASK;
ivpu_dbg(vdev, MMU, "CMD write: %s data: 0x%llx 0x%llx\n", name, data0, data1);
@ -560,7 +572,6 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
mmu->cmdq.cons = 0;
memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE);
clflush_cache_range(mmu->evtq.base, IVPU_MMU_EVTQ_SIZE);
mmu->evtq.prod = 0;
mmu->evtq.cons = 0;
@ -874,14 +885,10 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE);
evtq->prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT))
if (ivpu_mmu_queue_is_empty(evtq))
return NULL;
clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE);
evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, evtq->cons);
return evt;
}
@ -902,6 +909,7 @@ void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
}
ivpu_mmu_user_context_mark_invalid(vdev, ssid);
REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
}
}

View File

@ -680,32 +680,6 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
static DECLARE_RWSEM(cxl_cper_rw_sem);
static cxl_cper_callback cper_callback;
/* CXL Event record UUIDs are formatted as GUIDs and reported in section type */
/*
* General Media Event Record
* CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
*/
#define CPER_SEC_CXL_GEN_MEDIA_GUID \
GUID_INIT(0xfbcd0a77, 0xc260, 0x417f, \
0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6)
/*
* DRAM Event Record
* CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
*/
#define CPER_SEC_CXL_DRAM_GUID \
GUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, \
0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24)
/*
* Memory Module Event Record
* CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
*/
#define CPER_SEC_CXL_MEM_MODULE_GUID \
GUID_INIT(0xfe927475, 0xdd59, 0x4339, \
0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74)
static void cxl_cper_post_event(enum cxl_event_type event_type,
struct cxl_cper_event_rec *rec)
{

View File

@ -9,6 +9,23 @@
#define BLOCK_TEST_SIZE 12
static void get_changed_bytes(void *orig, void *new, size_t size)
{
char *o = orig;
char *n = new;
int i;
get_random_bytes(new, size);
/*
* This could be nicer and more efficient but we shouldn't
* super care.
*/
for (i = 0; i < size; i++)
while (n[i] == o[i])
get_random_bytes(&n[i], 1);
}
static const struct regmap_config test_regmap_config = {
.max_register = BLOCK_TEST_SIZE,
.reg_stride = 1,
@ -1202,7 +1219,8 @@ static void raw_noinc_write(struct kunit *test)
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int val, val_test, val_last;
unsigned int val;
u16 val_test, val_last;
u16 val_array[BLOCK_TEST_SIZE];
config = raw_regmap_config;
@ -1251,7 +1269,7 @@ static void raw_sync(struct kunit *test)
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
u16 val[2];
u16 val[3];
u16 *hw_buf;
unsigned int rval;
int i;
@ -1265,17 +1283,13 @@ static void raw_sync(struct kunit *test)
hw_buf = (u16 *)data->vals;
get_random_bytes(&val, sizeof(val));
get_changed_bytes(&hw_buf[2], &val[0], sizeof(val));
/* Do a regular write and a raw write in cache only mode */
regcache_cache_only(map, true);
KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
if (config.val_format_endian == REGMAP_ENDIAN_BIG)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
be16_to_cpu(val[0])));
else
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
le16_to_cpu(val[0])));
KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val,
sizeof(u16) * 2));
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2]));
/* We should read back the new values, and defaults for the rest */
for (i = 0; i < config.max_register + 1; i++) {
@ -1284,24 +1298,34 @@ static void raw_sync(struct kunit *test)
switch (i) {
case 2:
case 3:
case 6:
if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
KUNIT_EXPECT_EQ(test, rval,
be16_to_cpu(val[i % 2]));
be16_to_cpu(val[i - 2]));
} else {
KUNIT_EXPECT_EQ(test, rval,
le16_to_cpu(val[i % 2]));
le16_to_cpu(val[i - 2]));
}
break;
case 4:
KUNIT_EXPECT_EQ(test, rval, val[i - 2]);
break;
default:
KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
break;
}
}
/*
* The value written via _write() was translated by the core,
* translate the original copy for comparison purposes.
*/
if (config.val_format_endian == REGMAP_ENDIAN_BIG)
val[2] = cpu_to_be16(val[2]);
else
val[2] = cpu_to_le16(val[2]);
/* The values should not appear in the "hardware" */
KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], val, sizeof(val));
KUNIT_EXPECT_MEMNEQ(test, &hw_buf[6], val, sizeof(u16));
KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val));
for (i = 0; i < config.max_register + 1; i++)
data->written[i] = false;
@ -1312,8 +1336,7 @@ static void raw_sync(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* The values should now appear in the "hardware" */
KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
KUNIT_EXPECT_MEMEQ(test, &hw_buf[6], val, sizeof(u16));
KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val));
regmap_exit(map);
}

View File

@ -1593,14 +1593,15 @@ static int virtblk_freeze(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
/* Ensure no requests in virtqueues before deleting vqs. */
blk_mq_freeze_queue(vblk->disk->queue);
/* Ensure we don't receive any more interrupts */
virtio_reset_device(vdev);
/* Make sure no work handler is accessing the device. */
flush_work(&vblk->config_work);
blk_mq_quiesce_queue(vblk->disk->queue);
vdev->config->del_vqs(vdev);
kfree(vblk->vqs);
@ -1618,7 +1619,7 @@ static int virtblk_restore(struct virtio_device *vdev)
virtio_device_ready(vdev);
blk_mq_unquiesce_queue(vblk->disk->queue);
blk_mq_unfreeze_queue(vblk->disk->queue);
return 0;
}
#endif

View File

@ -108,9 +108,8 @@ static inline void send_msg(struct cn_msg *msg)
filter_data[1] = 0;
}
if (cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT,
cn_filter, (void *)filter_data) == -ESRCH)
atomic_set(&proc_event_num_listeners, 0);
cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT,
cn_filter, (void *)filter_data);
local_unlock(&local_event.lock);
}

View File

@ -338,7 +338,7 @@ TRACE_EVENT(cxl_general_media,
TP_fast_assign(
CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
memcpy(&__entry->hdr_uuid, &CXL_EVENT_GEN_MEDIA_UUID, sizeof(uuid_t));
__entry->hdr_uuid = CXL_EVENT_GEN_MEDIA_UUID;
/* General Media */
__entry->dpa = le64_to_cpu(rec->phys_addr);
@ -425,7 +425,7 @@ TRACE_EVENT(cxl_dram,
TP_fast_assign(
CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
memcpy(&__entry->hdr_uuid, &CXL_EVENT_DRAM_UUID, sizeof(uuid_t));
__entry->hdr_uuid = CXL_EVENT_DRAM_UUID;
/* DRAM */
__entry->dpa = le64_to_cpu(rec->phys_addr);
@ -573,7 +573,7 @@ TRACE_EVENT(cxl_memory_module,
TP_fast_assign(
CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
memcpy(&__entry->hdr_uuid, &CXL_EVENT_MEM_MODULE_UUID, sizeof(uuid_t));
__entry->hdr_uuid = CXL_EVENT_MEM_MODULE_UUID;
/* Memory Module Event */
__entry->event_type = rec->event_type;

View File

@ -1206,6 +1206,7 @@ int dpll_nl_pin_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
unsigned long i;
int ret = 0;
mutex_lock(&dpll_lock);
xa_for_each_marked_start(&dpll_pin_xa, i, pin, DPLL_REGISTERED,
ctx->idx) {
if (!dpll_pin_available(pin))
@ -1225,6 +1226,8 @@ int dpll_nl_pin_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
}
genlmsg_end(skb, hdr);
}
mutex_unlock(&dpll_lock);
if (ret == -EMSGSIZE) {
ctx->idx = i;
return skb->len;
@ -1380,6 +1383,7 @@ int dpll_nl_device_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
unsigned long i;
int ret = 0;
mutex_lock(&dpll_lock);
xa_for_each_marked_start(&dpll_device_xa, i, dpll, DPLL_REGISTERED,
ctx->idx) {
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
@ -1396,6 +1400,8 @@ int dpll_nl_device_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
}
genlmsg_end(skb, hdr);
}
mutex_unlock(&dpll_lock);
if (ret == -EMSGSIZE) {
ctx->idx = i;
return skb->len;
@ -1446,20 +1452,6 @@ dpll_unlock_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
mutex_unlock(&dpll_lock);
}
int dpll_lock_dumpit(struct netlink_callback *cb)
{
mutex_lock(&dpll_lock);
return 0;
}
int dpll_unlock_dumpit(struct netlink_callback *cb)
{
mutex_unlock(&dpll_lock);
return 0;
}
int dpll_pin_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
struct genl_info *info)
{

View File

@ -95,9 +95,7 @@ static const struct genl_split_ops dpll_nl_ops[] = {
},
{
.cmd = DPLL_CMD_DEVICE_GET,
.start = dpll_lock_dumpit,
.dumpit = dpll_nl_device_get_dumpit,
.done = dpll_unlock_dumpit,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP,
},
{
@ -129,9 +127,7 @@ static const struct genl_split_ops dpll_nl_ops[] = {
},
{
.cmd = DPLL_CMD_PIN_GET,
.start = dpll_lock_dumpit,
.dumpit = dpll_nl_pin_get_dumpit,
.done = dpll_unlock_dumpit,
.policy = dpll_pin_get_dump_nl_policy,
.maxattr = DPLL_A_PIN_ID,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP,

View File

@ -30,8 +30,6 @@ dpll_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
void
dpll_pin_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
struct genl_info *info);
int dpll_lock_dumpit(struct netlink_callback *cb);
int dpll_unlock_dumpit(struct netlink_callback *cb);
int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info);
int dpll_nl_device_get_doit(struct sk_buff *skb, struct genl_info *info);

View File

@ -429,7 +429,23 @@ static void bm_work(struct work_struct *work)
*/
card->bm_generation = generation;
if (root_device == NULL) {
if (card->gap_count == 0) {
/*
* If self IDs have inconsistent gap counts, do a
* bus reset ASAP. The config rom read might never
* complete, so don't wait for it. However, still
* send a PHY configuration packet prior to the
* bus reset. The PHY configuration packet might
* fail, but 1394-2008 8.4.5.2 explicitly permits
* it in this case, so it should be safe to try.
*/
new_root_id = local_id;
/*
* We must always send a bus reset if the gap count
* is inconsistent, so bypass the 5-reset limit.
*/
card->bm_retries = 0;
} else if (root_device == NULL) {
/*
* Either link_on is false, or we failed to read the
* config rom. In either case, pick another root.

View File

@ -107,7 +107,7 @@ static int __init arm_enable_runtime_services(void)
efi_memory_desc_t *md;
for_each_efi_memory_desc(md) {
int md_size = md->num_pages << EFI_PAGE_SHIFT;
u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
struct resource *res;
if (!(md->attribute & EFI_MEMORY_SP))

View File

@ -523,6 +523,17 @@ static void cper_print_tstamp(const char *pfx,
}
}
struct ignore_section {
guid_t guid;
const char *name;
};
static const struct ignore_section ignore_sections[] = {
{ .guid = CPER_SEC_CXL_GEN_MEDIA_GUID, .name = "CXL General Media Event" },
{ .guid = CPER_SEC_CXL_DRAM_GUID, .name = "CXL DRAM Event" },
{ .guid = CPER_SEC_CXL_MEM_MODULE_GUID, .name = "CXL Memory Module Event" },
};
static void
cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata,
int sec_no)
@ -543,6 +554,14 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata
printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text);
snprintf(newpfx, sizeof(newpfx), "%s ", pfx);
for (int i = 0; i < ARRAY_SIZE(ignore_sections); i++) {
if (guid_equal(sec_type, &ignore_sections[i].guid)) {
printk("%ssection_type: %s\n", newpfx, ignore_sections[i].name);
return;
}
}
if (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC)) {
struct cper_sec_proc_generic *proc_err = acpi_hest_get_payload(gdata);

View File

@ -143,15 +143,6 @@ static __init int is_usable_memory(efi_memory_desc_t *md)
case EFI_BOOT_SERVICES_DATA:
case EFI_CONVENTIONAL_MEMORY:
case EFI_PERSISTENT_MEMORY:
/*
* Special purpose memory is 'soft reserved', which means it
* is set aside initially, but can be hotplugged back in or
* be assigned to the dax driver after boot.
*/
if (efi_soft_reserve_enabled() &&
(md->attribute & EFI_MEMORY_SP))
return false;
/*
* According to the spec, these regions are no longer reserved
* after calling ExitBootServices(). However, we can only use
@ -196,6 +187,16 @@ static __init void reserve_regions(void)
size = npages << PAGE_SHIFT;
if (is_memory(md)) {
/*
* Special purpose memory is 'soft reserved', which
* means it is set aside initially. Don't add a memblock
* for it now so that it can be hotplugged back in or
* be assigned to the dax driver after boot.
*/
if (efi_soft_reserve_enabled() &&
(md->attribute & EFI_MEMORY_SP))
continue;
early_init_dt_add_memory_arch(paddr, size);
if (!is_usable_memory(md))

View File

@ -28,7 +28,7 @@ cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \
-DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \
-DEFI_HAVE_STRCMP -fno-builtin -fpic \
$(call cc-option,-mno-single-pic-base)
cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE
cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE -mno-relax
cflags-$(CONFIG_LOONGARCH) += -fpie
cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt
@ -143,7 +143,7 @@ STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
# exist.
STUBCOPY_FLAGS-$(CONFIG_RISCV) += --prefix-alloc-sections=.init \
--prefix-symbols=__efistub_
STUBCOPY_RELOC-$(CONFIG_RISCV) := R_RISCV_HI20
STUBCOPY_RELOC-$(CONFIG_RISCV) := -E R_RISCV_HI20\|R_RISCV_$(BITS)\|R_RISCV_RELAX
# For LoongArch, keep all the symbols in .init section and make sure that no
# absolute symbols references exist.

View File

@ -14,6 +14,7 @@
* @max: the address that the last allocated memory page shall not
* exceed
* @align: minimum alignment of the base of the allocation
* @memory_type: the type of memory to allocate
*
* Allocate pages as EFI_LOADER_DATA. The allocated pages are aligned according
* to @align, which should be >= EFI_ALLOC_ALIGN. The last allocated page will

View File

@ -956,7 +956,8 @@ efi_status_t efi_get_random_bytes(unsigned long size, u8 *out);
efi_status_t efi_random_alloc(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long random_seed,
int memory_type, unsigned long alloc_limit);
int memory_type, unsigned long alloc_min,
unsigned long alloc_max);
efi_status_t efi_random_get_seed(void);

View File

@ -119,7 +119,7 @@ efi_status_t efi_kaslr_relocate_kernel(unsigned long *image_addr,
*/
status = efi_random_alloc(*reserve_size, min_kimg_align,
reserve_addr, phys_seed,
EFI_LOADER_CODE, EFI_ALLOC_LIMIT);
EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT);
if (status != EFI_SUCCESS)
efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
} else {

View File

@ -17,7 +17,7 @@
static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
unsigned long size,
unsigned long align_shift,
u64 alloc_limit)
u64 alloc_min, u64 alloc_max)
{
unsigned long align = 1UL << align_shift;
u64 first_slot, last_slot, region_end;
@ -30,11 +30,11 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
return 0;
region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1,
alloc_limit);
alloc_max);
if (region_end < size)
return 0;
first_slot = round_up(md->phys_addr, align);
first_slot = round_up(max(md->phys_addr, alloc_min), align);
last_slot = round_down(region_end - size + 1, align);
if (first_slot > last_slot)
@ -56,7 +56,8 @@ efi_status_t efi_random_alloc(unsigned long size,
unsigned long *addr,
unsigned long random_seed,
int memory_type,
unsigned long alloc_limit)
unsigned long alloc_min,
unsigned long alloc_max)
{
unsigned long total_slots = 0, target_slot;
unsigned long total_mirrored_slots = 0;
@ -78,7 +79,8 @@ efi_status_t efi_random_alloc(unsigned long size,
efi_memory_desc_t *md = (void *)map->map + map_offset;
unsigned long slots;
slots = get_entry_num_slots(md, size, ilog2(align), alloc_limit);
slots = get_entry_num_slots(md, size, ilog2(align), alloc_min,
alloc_max);
MD_NUM_SLOTS(md) = slots;
total_slots += slots;
if (md->attribute & EFI_MEMORY_MORE_RELIABLE)

View File

@ -223,8 +223,8 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
}
}
void efi_adjust_memory_range_protection(unsigned long start,
unsigned long size)
efi_status_t efi_adjust_memory_range_protection(unsigned long start,
unsigned long size)
{
efi_status_t status;
efi_gcd_memory_space_desc_t desc;
@ -236,13 +236,17 @@ void efi_adjust_memory_range_protection(unsigned long start,
rounded_end = roundup(start + size, EFI_PAGE_SIZE);
if (memattr != NULL) {
efi_call_proto(memattr, clear_memory_attributes, rounded_start,
rounded_end - rounded_start, EFI_MEMORY_XP);
return;
status = efi_call_proto(memattr, clear_memory_attributes,
rounded_start,
rounded_end - rounded_start,
EFI_MEMORY_XP);
if (status != EFI_SUCCESS)
efi_warn("Failed to clear EFI_MEMORY_XP attribute\n");
return status;
}
if (efi_dxe_table == NULL)
return;
return EFI_SUCCESS;
/*
* Don't modify memory region attributes, they are
@ -255,7 +259,7 @@ void efi_adjust_memory_range_protection(unsigned long start,
status = efi_dxe_call(get_memory_space_descriptor, start, &desc);
if (status != EFI_SUCCESS)
return;
break;
next = desc.base_address + desc.length;
@ -280,8 +284,10 @@ void efi_adjust_memory_range_protection(unsigned long start,
unprotect_start,
unprotect_start + unprotect_size,
status);
break;
}
}
return EFI_SUCCESS;
}
static void setup_unaccepted_memory(void)
@ -793,6 +799,7 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr,
seed[0], EFI_LOADER_CODE,
LOAD_PHYSICAL_ADDR,
EFI_X86_KERNEL_ALLOC_LIMIT);
if (status != EFI_SUCCESS)
return status;
@ -805,9 +812,7 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
*kernel_entry = addr + entry;
efi_adjust_memory_range_protection(addr, kernel_total_size);
return EFI_SUCCESS;
return efi_adjust_memory_range_protection(addr, kernel_total_size);
}
static void __noreturn enter_kernel(unsigned long kernel_addr,

View File

@ -5,8 +5,8 @@
extern void trampoline_32bit_src(void *, bool);
extern const u16 trampoline_ljmp_imm_offset;
void efi_adjust_memory_range_protection(unsigned long start,
unsigned long size);
efi_status_t efi_adjust_memory_range_protection(unsigned long start,
unsigned long size);
#ifdef CONFIG_X86_64
efi_status_t efi_setup_5level_paging(void);

View File

@ -119,7 +119,7 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
}
status = efi_random_alloc(alloc_size, min_kimg_align, &image_base,
seed, EFI_LOADER_CODE, EFI_ALLOC_LIMIT);
seed, EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT);
if (status != EFI_SUCCESS) {
efi_err("Failed to allocate memory\n");
goto free_cmdline;

View File

@ -85,7 +85,7 @@ static int __init riscv_enable_runtime_services(void)
efi_memory_desc_t *md;
for_each_efi_memory_desc(md) {
int md_size = md->num_pages << EFI_PAGE_SHIFT;
u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
struct resource *res;
if (!(md->attribute & EFI_MEMORY_SP))

View File

@ -1005,15 +1005,15 @@ err_remove_of_chip:
err_free_gpiochip_mask:
gpiochip_remove_pin_ranges(gc);
gpiochip_free_valid_mask(gc);
err_remove_from_list:
spin_lock_irqsave(&gpio_lock, flags);
list_del(&gdev->list);
spin_unlock_irqrestore(&gpio_lock, flags);
if (gdev->dev.release) {
/* release() has been registered by gpiochip_setup_dev() */
gpio_device_put(gdev);
goto err_print_message;
}
err_remove_from_list:
spin_lock_irqsave(&gpio_lock, flags);
list_del(&gdev->list);
spin_unlock_irqrestore(&gpio_lock, flags);
err_free_label:
kfree_const(gdev->label);
err_free_descs:

View File

@ -1078,6 +1078,8 @@ struct amdgpu_device {
bool in_s3;
bool in_s4;
bool in_s0ix;
/* indicate amdgpu suspension status */
bool suspend_complete;
enum pp_mp1_state mp1_state;
struct amdgpu_doorbell_index doorbell_index;

View File

@ -2476,6 +2476,7 @@ static int amdgpu_pmops_suspend(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
adev->suspend_complete = false;
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = true;
else if (amdgpu_acpi_is_s3_active(adev))
@ -2490,6 +2491,7 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
adev->suspend_complete = true;
if (amdgpu_acpi_should_gpu_reset(adev))
return amdgpu_asic_reset(adev);

View File

@ -362,7 +362,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
}
}
if (copy_to_user((char *)buf, context->mem_context.shared_buf, shared_buf_len))
if (copy_to_user((char *)&buf[copy_pos], context->mem_context.shared_buf, shared_buf_len))
ret = -EFAULT;
err_free_shared_buf:

Some files were not shown because too many files have changed in this diff Show More