Linux 6.0-rc4

-----BEGIN PGP SIGNATURE-----
 
 iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAmMVBhkeHHRvcnZhbGRz
 QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGZ4QH/13A3r0SLRUzVn0X
 fsw7V7algKkA4GosU+UDekolGLU8CF3v4wn9dnZN6FNfgBeS+fBOZBriFLytZK/q
 EUXyEtKIpjWQT0f9JwLAq1vMfFWqMIYoEJbzKVtJSsBkaZfhmOINiSbgmR5mCxvv
 s0XibQPs8POYbUEYdTosbs7PNUPBXrx1kBhMNLFCctoM38J7GB2woVBGl8s81v1a
 evT0+fGuXLZskIRpgFUlHOYAuiuGPAXmYnt92fiRuSSDQo40Q7LDLp2V3RrOizuz
 RdzZQdsTBZ1vM6jk2GPSoQMith6L71XuXA91q7NvRF4kxT5QI17o2q6tgnkeEfPJ
 Xgw4MJU=
 =Lr3i
 -----END PGP SIGNATURE-----
gpgsig -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEreZoqmdXGLWf4p/qJNaLcl1Uh9AFAmMcr88ACgkQJNaLcl1U
 h9AaLQf+L8mUb5E6YvghO1aP/jSfd8XDxfAjELpsq+lGUF1Rvc0J0J9DXvuI2Hmh
 8HX6kARgEI0yL3wcjDpaH64yOuA8OZOLTiiQiDvcuOdSm5PwkjeNL51bbL0OIEUE
 6d83LKnWDY3bCyT43zLy87qll7/fMBkgseKfT7/GJJk1R/u8dA8Lb48ERk8vL0fO
 32TJe0r/3q1FNH1vcC8ueqUdF8VSBltdhCaFhej4JQBLuMuyFBdW0rXhI4MoyiuQ
 2ld1KcVzIngcOFRodHruBswFdtrZxR8A/H2KY8TFJKVcsH3b3sjW0KWqdyZ+VORe
 /NKGAuW0I+unn6+WlyaIXl9EeRzvaQ==
 =Ms7J
 -----END PGP SIGNATURE-----

spi: Merge tag 'v6.0-rc4' into spi-6.1

Linux 6.0-rc4 so we can test on BeagleBone again.
This commit is contained in:
Mark Brown 2022-09-10 16:39:51 +01:00
commit 824723ccf5
327 changed files with 3080 additions and 1763 deletions

View file

@ -1,9 +1,9 @@
.. _readme:
Linux kernel release 5.x <http://kernel.org/>
Linux kernel release 6.x <http://kernel.org/>
=============================================
These are the release notes for Linux version 5. Read them carefully,
These are the release notes for Linux version 6. Read them carefully,
as they tell you what this is all about, explain how to install the
kernel, and what to do if something goes wrong.
@ -63,7 +63,7 @@ Installing the kernel source
directory where you have permissions (e.g. your home directory) and
unpack it::
xz -cd linux-5.x.tar.xz | tar xvf -
xz -cd linux-6.x.tar.xz | tar xvf -
Replace "X" with the version number of the latest kernel.
@ -72,12 +72,12 @@ Installing the kernel source
files. They should match the library, and not get messed up by
whatever the kernel-du-jour happens to be.
- You can also upgrade between 5.x releases by patching. Patches are
- You can also upgrade between 6.x releases by patching. Patches are
distributed in the xz format. To install by patching, get all the
newer patch files, enter the top level directory of the kernel source
(linux-5.x) and execute::
(linux-6.x) and execute::
xz -cd ../patch-5.x.xz | patch -p1
xz -cd ../patch-6.x.xz | patch -p1
Replace "x" for all versions bigger than the version "x" of your current
source tree, **in_order**, and you should be ok. You may want to remove
@ -85,13 +85,13 @@ Installing the kernel source
that there are no failed patches (some-file-name# or some-file-name.rej).
If there are, either you or I have made a mistake.
Unlike patches for the 5.x kernels, patches for the 5.x.y kernels
Unlike patches for the 6.x kernels, patches for the 6.x.y kernels
(also known as the -stable kernels) are not incremental but instead apply
directly to the base 5.x kernel. For example, if your base kernel is 5.0
and you want to apply the 5.0.3 patch, you must not first apply the 5.0.1
and 5.0.2 patches. Similarly, if you are running kernel version 5.0.2 and
want to jump to 5.0.3, you must first reverse the 5.0.2 patch (that is,
patch -R) **before** applying the 5.0.3 patch. You can read more on this in
directly to the base 6.x kernel. For example, if your base kernel is 6.0
and you want to apply the 6.0.3 patch, you must not first apply the 6.0.1
and 6.0.2 patches. Similarly, if you are running kernel version 6.0.2 and
want to jump to 6.0.3, you must first reverse the 6.0.2 patch (that is,
patch -R) **before** applying the 6.0.3 patch. You can read more on this in
:ref:`Documentation/process/applying-patches.rst <applying_patches>`.
Alternatively, the script patch-kernel can be used to automate this
@ -114,7 +114,7 @@ Installing the kernel source
Software requirements
---------------------
Compiling and running the 5.x kernels requires up-to-date
Compiling and running the 6.x kernels requires up-to-date
versions of various software packages. Consult
:ref:`Documentation/process/changes.rst <changes>` for the minimum version numbers
required and how to get updates for these packages. Beware that using
@ -132,12 +132,12 @@ Build directory for the kernel
place for the output files (including .config).
Example::
kernel source code: /usr/src/linux-5.x
kernel source code: /usr/src/linux-6.x
build directory: /home/name/build/kernel
To configure and build the kernel, use::
cd /usr/src/linux-5.x
cd /usr/src/linux-6.x
make O=/home/name/build/kernel menuconfig
make O=/home/name/build/kernel
sudo make O=/home/name/build/kernel modules_install install

View file

@ -50,10 +50,10 @@ For a short example, users can monitor the virtual address space of a given
workload as below. ::
# cd /sys/kernel/mm/damon/admin/
# echo 1 > kdamonds/nr && echo 1 > kdamonds/0/contexts/nr
# echo 1 > kdamonds/nr_kdamonds && echo 1 > kdamonds/0/contexts/nr_contexts
# echo vaddr > kdamonds/0/contexts/0/operations
# echo 1 > kdamonds/0/contexts/0/targets/nr
# echo $(pidof <workload>) > kdamonds/0/contexts/0/targets/0/pid
# echo 1 > kdamonds/0/contexts/0/targets/nr_targets
# echo $(pidof <workload>) > kdamonds/0/contexts/0/targets/0/pid_target
# echo on > kdamonds/0/state
Files Hierarchy
@ -366,12 +366,12 @@ memory rate becomes larger than 60%, or lower than 30%". ::
# echo 1 > kdamonds/0/contexts/0/schemes/nr_schemes
# cd kdamonds/0/contexts/0/schemes/0
# # set the basic access pattern and the action
# echo 4096 > access_patterns/sz/min
# echo 8192 > access_patterns/sz/max
# echo 0 > access_patterns/nr_accesses/min
# echo 5 > access_patterns/nr_accesses/max
# echo 10 > access_patterns/age/min
# echo 20 > access_patterns/age/max
# echo 4096 > access_pattern/sz/min
# echo 8192 > access_pattern/sz/max
# echo 0 > access_pattern/nr_accesses/min
# echo 5 > access_pattern/nr_accesses/max
# echo 10 > access_pattern/age/min
# echo 20 > access_pattern/age/max
# echo pageout > action
# # set quotas
# echo 10 > quotas/ms

View file

@ -23,3 +23,4 @@ Block
stat
switching-sched
writeback_cache_control
ublk

View file

@ -0,0 +1,253 @@
.. SPDX-License-Identifier: GPL-2.0
===========================================
Userspace block device driver (ublk driver)
===========================================
Overview
========
ublk is a generic framework for implementing block device logic from userspace.
The motivation behind it is that moving virtual block drivers into userspace,
such as loop, nbd and similar can be very helpful. It can help to implement
new virtual block device such as ublk-qcow2 (there are several attempts of
implementing qcow2 driver in kernel).
Userspace block devices are attractive because:
- They can be written many programming languages.
- They can use libraries that are not available in the kernel.
- They can be debugged with tools familiar to application developers.
- Crashes do not kernel panic the machine.
- Bugs are likely to have a lower security impact than bugs in kernel
code.
- They can be installed and updated independently of the kernel.
- They can be used to simulate block device easily with user specified
parameters/setting for test/debug purpose
ublk block device (``/dev/ublkb*``) is added by ublk driver. Any IO request
on the device will be forwarded to ublk userspace program. For convenience,
in this document, ``ublk server`` refers to generic ublk userspace
program. ``ublksrv`` [#userspace]_ is one of such implementation. It
provides ``libublksrv`` [#userspace_lib]_ library for developing specific
user block device conveniently, while also generic type block device is
included, such as loop and null. Richard W.M. Jones wrote userspace nbd device
``nbdublk`` [#userspace_nbdublk]_ based on ``libublksrv`` [#userspace_lib]_.
After the IO is handled by userspace, the result is committed back to the
driver, thus completing the request cycle. This way, any specific IO handling
logic is totally done by userspace, such as loop's IO handling, NBD's IO
communication, or qcow2's IO mapping.
``/dev/ublkb*`` is driven by blk-mq request-based driver. Each request is
assigned by one queue wide unique tag. ublk server assigns unique tag to each
IO too, which is 1:1 mapped with IO of ``/dev/ublkb*``.
Both the IO request forward and IO handling result committing are done via
``io_uring`` passthrough command; that is why ublk is also one io_uring based
block driver. It has been observed that using io_uring passthrough command can
give better IOPS than block IO; which is why ublk is one of high performance
implementation of userspace block device: not only IO request communication is
done by io_uring, but also the preferred IO handling in ublk server is io_uring
based approach too.
ublk provides control interface to set/get ublk block device parameters.
The interface is extendable and kabi compatible: basically any ublk request
queue's parameter or ublk generic feature parameters can be set/get via the
interface. Thus, ublk is generic userspace block device framework.
For example, it is easy to setup a ublk device with specified block
parameters from userspace.
Using ublk
==========
ublk requires userspace ublk server to handle real block device logic.
Below is example of using ``ublksrv`` to provide ublk-based loop device.
- add a device::
ublk add -t loop -f ublk-loop.img
- format with xfs, then use it::
mkfs.xfs /dev/ublkb0
mount /dev/ublkb0 /mnt
# do anything. all IOs are handled by io_uring
...
umount /mnt
- list the devices with their info::
ublk list
- delete the device::
ublk del -a
ublk del -n $ublk_dev_id
See usage details in README of ``ublksrv`` [#userspace_readme]_.
Design
======
Control plane
-------------
ublk driver provides global misc device node (``/dev/ublk-control``) for
managing and controlling ublk devices with help of several control commands:
- ``UBLK_CMD_ADD_DEV``
Add a ublk char device (``/dev/ublkc*``) which is talked with ublk server
WRT IO command communication. Basic device info is sent together with this
command. It sets UAPI structure of ``ublksrv_ctrl_dev_info``,
such as ``nr_hw_queues``, ``queue_depth``, and max IO request buffer size,
for which the info is negotiated with the driver and sent back to the server.
When this command is completed, the basic device info is immutable.
- ``UBLK_CMD_SET_PARAMS`` / ``UBLK_CMD_GET_PARAMS``
Set or get parameters of the device, which can be either generic feature
related, or request queue limit related, but can't be IO logic specific,
because the driver does not handle any IO logic. This command has to be
sent before sending ``UBLK_CMD_START_DEV``.
- ``UBLK_CMD_START_DEV``
After the server prepares userspace resources (such as creating per-queue
pthread & io_uring for handling ublk IO), this command is sent to the
driver for allocating & exposing ``/dev/ublkb*``. Parameters set via
``UBLK_CMD_SET_PARAMS`` are applied for creating the device.
- ``UBLK_CMD_STOP_DEV``
Halt IO on ``/dev/ublkb*`` and remove the device. When this command returns,
ublk server will release resources (such as destroying per-queue pthread &
io_uring).
- ``UBLK_CMD_DEL_DEV``
Remove ``/dev/ublkc*``. When this command returns, the allocated ublk device
number can be reused.
- ``UBLK_CMD_GET_QUEUE_AFFINITY``
When ``/dev/ublkc`` is added, the driver creates block layer tagset, so
that each queue's affinity info is available. The server sends
``UBLK_CMD_GET_QUEUE_AFFINITY`` to retrieve queue affinity info. It can
set up the per-queue context efficiently, such as bind affine CPUs with IO
pthread and try to allocate buffers in IO thread context.
- ``UBLK_CMD_GET_DEV_INFO``
For retrieving device info via ``ublksrv_ctrl_dev_info``. It is the server's
responsibility to save IO target specific info in userspace.
Data plane
----------
ublk server needs to create per-queue IO pthread & io_uring for handling IO
commands via io_uring passthrough. The per-queue IO pthread
focuses on IO handling and shouldn't handle any control & management
tasks.
The's IO is assigned by a unique tag, which is 1:1 mapping with IO
request of ``/dev/ublkb*``.
UAPI structure of ``ublksrv_io_desc`` is defined for describing each IO from
the driver. A fixed mmaped area (array) on ``/dev/ublkc*`` is provided for
exporting IO info to the server; such as IO offset, length, OP/flags and
buffer address. Each ``ublksrv_io_desc`` instance can be indexed via queue id
and IO tag directly.
The following IO commands are communicated via io_uring passthrough command,
and each command is only for forwarding the IO and committing the result
with specified IO tag in the command data:
- ``UBLK_IO_FETCH_REQ``
Sent from the server IO pthread for fetching future incoming IO requests
destined to ``/dev/ublkb*``. This command is sent only once from the server
IO pthread for ublk driver to setup IO forward environment.
- ``UBLK_IO_COMMIT_AND_FETCH_REQ``
When an IO request is destined to ``/dev/ublkb*``, the driver stores
the IO's ``ublksrv_io_desc`` to the specified mapped area; then the
previous received IO command of this IO tag (either ``UBLK_IO_FETCH_REQ``
or ``UBLK_IO_COMMIT_AND_FETCH_REQ)`` is completed, so the server gets
the IO notification via io_uring.
After the server handles the IO, its result is committed back to the
driver by sending ``UBLK_IO_COMMIT_AND_FETCH_REQ`` back. Once ublkdrv
received this command, it parses the result and complete the request to
``/dev/ublkb*``. In the meantime setup environment for fetching future
requests with the same IO tag. That is, ``UBLK_IO_COMMIT_AND_FETCH_REQ``
is reused for both fetching request and committing back IO result.
- ``UBLK_IO_NEED_GET_DATA``
With ``UBLK_F_NEED_GET_DATA`` enabled, the WRITE request will be firstly
issued to ublk server without data copy. Then, IO backend of ublk server
receives the request and it can allocate data buffer and embed its addr
inside this new io command. After the kernel driver gets the command,
data copy is done from request pages to this backend's buffer. Finally,
backend receives the request again with data to be written and it can
truly handle the request.
``UBLK_IO_NEED_GET_DATA`` adds one additional round-trip and one
io_uring_enter() syscall. Any user thinks that it may lower performance
should not enable UBLK_F_NEED_GET_DATA. ublk server pre-allocates IO
buffer for each IO by default. Any new project should try to use this
buffer to communicate with ublk driver. However, existing project may
break or not able to consume the new buffer interface; that's why this
command is added for backwards compatibility so that existing projects
can still consume existing buffers.
- data copy between ublk server IO buffer and ublk block IO request
The driver needs to copy the block IO request pages into the server buffer
(pages) first for WRITE before notifying the server of the coming IO, so
that the server can handle WRITE request.
When the server handles READ request and sends
``UBLK_IO_COMMIT_AND_FETCH_REQ`` to the server, ublkdrv needs to copy
the server buffer (pages) read to the IO request pages.
Future development
==================
Container-aware ublk deivice
----------------------------
ublk driver doesn't handle any IO logic. Its function is well defined
for now and very limited userspace interfaces are needed, which is also
well defined too. It is possible to make ublk devices container-aware block
devices in future as Stefan Hajnoczi suggested [#stefan]_, by removing
ADMIN privilege.
Zero copy
---------
Zero copy is a generic requirement for nbd, fuse or similar drivers. A
problem [#xiaoguang]_ Xiaoguang mentioned is that pages mapped to userspace
can't be remapped any more in kernel with existing mm interfaces. This can
occurs when destining direct IO to ``/dev/ublkb*``. Also, he reported that
big requests (IO size >= 256 KB) may benefit a lot from zero copy.
References
==========
.. [#userspace] https://github.com/ming1/ubdsrv
.. [#userspace_lib] https://github.com/ming1/ubdsrv/tree/master/lib
.. [#userspace_nbdublk] https://gitlab.com/rwmjones/libnbd/-/tree/nbdublk
.. [#userspace_readme] https://github.com/ming1/ubdsrv/blob/master/README
.. [#stefan] https://lore.kernel.org/linux-block/YoOr6jBfgVm8GvWg@stefanha-x1.localdomain/
.. [#xiaoguang] https://lore.kernel.org/linux-block/YoOr6jBfgVm8GvWg@stefanha-x1.localdomain/

View file

@ -86,6 +86,7 @@ if major >= 3:
"__used",
"__weak",
"noinline",
"__fix_address",
# include/linux/memblock.h:
"__init_memblock",

View file

@ -24,8 +24,10 @@ properties:
interrupts:
minItems: 1
maxItems: 2
description:
Should be configured with type IRQ_TYPE_EDGE_RISING.
If two interrupts are provided, expected order is INT1 and INT2.
required:
- compatible

View file

@ -16,6 +16,7 @@ properties:
compatible:
enum:
- goodix,gt1151
- goodix,gt1158
- goodix,gt5663
- goodix,gt5688
- goodix,gt911

View file

@ -24,6 +24,7 @@ properties:
- mediatek,mt2712-mtu3
- mediatek,mt8173-mtu3
- mediatek,mt8183-mtu3
- mediatek,mt8188-mtu3
- mediatek,mt8192-mtu3
- mediatek,mt8195-mtu3
- const: mediatek,mtu3

View file

@ -33,6 +33,7 @@ properties:
- qcom,sm6115-dwc3
- qcom,sm6125-dwc3
- qcom,sm6350-dwc3
- qcom,sm6375-dwc3
- qcom,sm8150-dwc3
- qcom,sm8250-dwc3
- qcom,sm8350-dwc3
@ -108,12 +109,17 @@ properties:
HS/FS/LS modes are supported.
type: boolean
wakeup-source: true
# Required child node:
patternProperties:
"^usb@[0-9a-f]+$":
$ref: snps,dwc3.yaml#
properties:
wakeup-source: false
required:
- compatible
- reg

View file

@ -517,6 +517,7 @@ All I-Force devices are supported by the iforce module. This includes:
* AVB Mag Turbo Force
* AVB Top Shot Pegasus
* AVB Top Shot Force Feedback Racing Wheel
* Boeder Force Feedback Wheel
* Logitech WingMan Force
* Logitech WingMan Force Wheel
* Guillemot Race Leader Force Feedback

View file

@ -67,7 +67,7 @@ The ``netdevsim`` driver supports rate objects management, which includes:
- setting tx_share and tx_max rate values for any rate object type;
- setting parent node for any rate object type.
Rate nodes and it's parameters are exposed in ``netdevsim`` debugfs in RO mode.
Rate nodes and their parameters are exposed in ``netdevsim`` debugfs in RO mode.
For example created rate node with name ``some_group``:
.. code:: shell

View file

@ -8,7 +8,7 @@ Transmit path guidelines:
1) The ndo_start_xmit method must not return NETDEV_TX_BUSY under
any normal circumstances. It is considered a hard error unless
there is no way your device can tell ahead of time when it's
there is no way your device can tell ahead of time when its
transmit function will become busy.
Instead it must maintain the queue properly. For example,

View file

@ -1035,7 +1035,10 @@ tcp_limit_output_bytes - INTEGER
tcp_challenge_ack_limit - INTEGER
Limits number of Challenge ACK sent per second, as recommended
in RFC 5961 (Improving TCP's Robustness to Blind In-Window Attacks)
Default: 1000
Note that this per netns rate limit can allow some side channel
attacks and probably should not be enabled.
TCP stack implements per TCP socket limits anyway.
Default: INT_MAX (unlimited)
UDP variables
=============

View file

@ -11,7 +11,7 @@ Initial Release:
================
This is conceptually very similar to the macvlan driver with one major
exception of using L3 for mux-ing /demux-ing among slaves. This property makes
the master device share the L2 with it's slave devices. I have developed this
the master device share the L2 with its slave devices. I have developed this
driver in conjunction with network namespaces and not sure if there is use case
outside of it.

View file

@ -530,7 +530,7 @@ its tunnel close actions. For L2TPIP sockets, the socket's close
handler initiates the same tunnel close actions. All sessions are
first closed. Each session drops its tunnel ref. When the tunnel ref
reaches zero, the tunnel puts its socket ref. When the socket is
eventually destroyed, it's sk_destruct finally frees the L2TP tunnel
eventually destroyed, its sk_destruct finally frees the L2TP tunnel
context.
Sessions

View file

@ -159,7 +159,7 @@ tools such as iproute2.
The switchdev driver can know a particular port's position in the topology by
monitoring NETDEV_CHANGEUPPER notifications. For example, a port moved into a
bond will see it's upper master change. If that bond is moved into a bridge,
bond will see its upper master change. If that bond is moved into a bridge,
the bond's upper master will change. And so on. The driver will track such
movements to know what position a port is in in the overall topology by
registering for netdevice events and acting on NETDEV_CHANGEUPPER.

View file

@ -70,8 +70,16 @@
% Translations have Asian (CJK) characters which are only displayed if
% xeCJK is used
\usepackage{ifthen}
\newboolean{enablecjk}
\setboolean{enablecjk}{false}
\IfFontExistsTF{Noto Sans CJK SC}{
% Load xeCJK when CJK font is available
\IfFileExists{xeCJK.sty}{
\setboolean{enablecjk}{true}
}{}
}{}
\ifthenelse{\boolean{enablecjk}}{
% Load xeCJK when both the Noto Sans CJK font and xeCJK.sty are available.
\usepackage{xeCJK}
% Noto CJK fonts don't provide slant shape. [AutoFakeSlant] permits
% its emulation.
@ -196,7 +204,7 @@
% Inactivate CJK after tableofcontents
\apptocmd{\sphinxtableofcontents}{\kerneldocCJKoff}{}{}
\xeCJKsetup{CJKspace = true}% For inter-phrase space of Korean TOC
}{ % No CJK font found
}{ % Don't enable CJK
% Custom macros to on/off CJK and switch CJK fonts (Dummy)
\newcommand{\kerneldocCJKon}{}
\newcommand{\kerneldocCJKoff}{}
@ -204,14 +212,16 @@
%% and ignore the argument (#1) in their definitions, whole contents of
%% CJK chapters can be ignored.
\newcommand{\kerneldocBeginSC}[1]{%
%% Put a note on missing CJK fonts in place of zh_CN translation.
\begin{sphinxadmonition}{note}{Note on missing fonts:}
%% Put a note on missing CJK fonts or the xecjk package in place of
%% zh_CN translation.
\begin{sphinxadmonition}{note}{Note on missing fonts and a package:}
Translations of Simplified Chinese (zh\_CN), Traditional Chinese
(zh\_TW), Korean (ko\_KR), and Japanese (ja\_JP) were skipped
due to the lack of suitable font families.
due to the lack of suitable font families and/or the texlive-xecjk
package.
If you want them, please install ``Noto Sans CJK'' font families
by following instructions from
along with the texlive-xecjk package by following instructions from
\sphinxcode{./scripts/sphinx-pre-install}.
Having optional ``Noto Serif CJK'' font families will improve
the looks of those translations.

View file

@ -35,8 +35,7 @@ Linux カーネルに変更を加えたいと思っている個人又は会社
てもらえやすくする提案を集めたものです。
コードを投稿する前に、Documentation/process/submit-checklist.rst の項目リストに目
を通してチェックしてください。もしあなたがドライバーを投稿しようとし
ているなら、Documentation/process/submitting-drivers.rst にも目を通してください。
を通してチェックしてください。
--------------------------------------------
セクション1 パッチの作り方と送り方

View file

@ -10032,6 +10032,7 @@ F: Documentation/devicetree/bindings/input/
F: Documentation/devicetree/bindings/serio/
F: Documentation/input/
F: drivers/input/
F: include/dt-bindings/input/
F: include/linux/input.h
F: include/linux/input/
F: include/uapi/linux/input-event-codes.h
@ -20765,6 +20766,7 @@ UBLK USERSPACE BLOCK DRIVER
M: Ming Lei <ming.lei@redhat.com>
L: linux-block@vger.kernel.org
S: Maintained
F: Documentation/block/ublk.rst
F: drivers/block/ublk_drv.c
F: include/uapi/linux/ublk_cmd.h
@ -22306,7 +22308,7 @@ M: Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
R: Srinivas Neeli <srinivas.neeli@xilinx.com>
R: Michal Simek <michal.simek@xilinx.com>
S: Maintained
F: Documentation/devicetree/bindings/gpio/gpio-xilinx.txt
F: Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml
F: Documentation/devicetree/bindings/gpio/gpio-zynq.yaml
F: drivers/gpio/gpio-xilinx.c
F: drivers/gpio/gpio-zynq.c

View file

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 0
SUBLEVEL = 0
EXTRAVERSION = -rc3
EXTRAVERSION = -rc4
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*

View file

@ -64,28 +64,28 @@
#define EARLY_KASLR (0)
#endif
#define EARLY_ENTRIES(vstart, vend, shift) \
((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + EARLY_KASLR)
#define EARLY_ENTRIES(vstart, vend, shift, add) \
((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + add)
#define EARLY_PGDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT))
#define EARLY_PGDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT, add))
#if SWAPPER_PGTABLE_LEVELS > 3
#define EARLY_PUDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PUD_SHIFT))
#define EARLY_PUDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PUD_SHIFT, add))
#else
#define EARLY_PUDS(vstart, vend) (0)
#define EARLY_PUDS(vstart, vend, add) (0)
#endif
#if SWAPPER_PGTABLE_LEVELS > 2
#define EARLY_PMDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, SWAPPER_TABLE_SHIFT))
#define EARLY_PMDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, SWAPPER_TABLE_SHIFT, add))
#else
#define EARLY_PMDS(vstart, vend) (0)
#define EARLY_PMDS(vstart, vend, add) (0)
#endif
#define EARLY_PAGES(vstart, vend) ( 1 /* PGDIR page */ \
+ EARLY_PGDS((vstart), (vend)) /* each PGDIR needs a next level page table */ \
+ EARLY_PUDS((vstart), (vend)) /* each PUD needs a next level page table */ \
+ EARLY_PMDS((vstart), (vend))) /* each PMD needs a next level page table */
#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end))
#define EARLY_PAGES(vstart, vend, add) ( 1 /* PGDIR page */ \
+ EARLY_PGDS((vstart), (vend), add) /* each PGDIR needs a next level page table */ \
+ EARLY_PUDS((vstart), (vend), add) /* each PUD needs a next level page table */ \
+ EARLY_PMDS((vstart), (vend), add)) /* each PMD needs a next level page table */
#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end, EARLY_KASLR))
/* the initial ID map may need two extra pages if it needs to be extended */
#if VA_BITS < 48
@ -93,7 +93,7 @@
#else
#define INIT_IDMAP_DIR_SIZE (INIT_IDMAP_DIR_PAGES * PAGE_SIZE)
#endif
#define INIT_IDMAP_DIR_PAGES EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE)
#define INIT_IDMAP_DIR_PAGES EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE, 1)
/* Initial memory map size */
#if ARM64_KERNEL_USES_PMD_MAPS

View file

@ -371,7 +371,9 @@ SYM_FUNC_END(create_idmap)
SYM_FUNC_START_LOCAL(create_kernel_mapping)
adrp x0, init_pg_dir
mov_q x5, KIMAGE_VADDR // compile time __va(_text)
#ifdef CONFIG_RELOCATABLE
add x5, x5, x23 // add KASLR displacement
#endif
adrp x6, _end // runtime __pa(_end)
adrp x3, _text // runtime __pa(_text)
sub x6, x6, x3 // _end - _text

View file

@ -47,7 +47,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
u64 i;
phys_addr_t start, end;
nr_ranges = 1; /* for exclusion of crashkernel region */
nr_ranges = 2; /* for exclusion of crashkernel region */
for_each_mem_range(i, &start, &end)
nr_ranges++;

View file

@ -39,6 +39,7 @@ config LOONGARCH
select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
select ARCH_KEEP_MEMBLOCK
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_SPARSEMEM_ENABLE
@ -51,6 +52,7 @@ config LOONGARCH
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANTS_NO_INSTR
select BUILDTIME_TABLE_SORT
select COMMON_CLK

View file

@ -15,7 +15,7 @@ extern int acpi_pci_disabled;
extern int acpi_noirq;
#define acpi_os_ioremap acpi_os_ioremap
void __init __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
static inline void disable_acpi(void)
{

View file

@ -48,7 +48,7 @@ void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
early_memunmap(map, size);
}
void __init __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
{
if (!memblock_is_memory(phys))
return ioremap(phys, size);

View file

@ -529,11 +529,11 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
signal_setup_done(ret, ksig, 0);
}
void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal)
void arch_do_signal_or_restart(struct pt_regs *regs)
{
struct ksignal ksig;
if (has_signal && get_signal(&ksig)) {
if (get_signal(&ksig)) {
/* Whee! Actually deliver the signal. */
handle_signal(&ksig, regs);
return;

View file

@ -77,6 +77,8 @@ SECTIONS
PERCPU_SECTION(1 << CONFIG_L1_CACHE_SHIFT)
#endif
.rela.dyn : ALIGN(8) { *(.rela.dyn) *(.rela*) }
.init.bss : {
*(.init.bss)
}

View file

@ -18,11 +18,11 @@ void dump_tlb_regs(void)
{
const int field = 2 * sizeof(unsigned long);
pr_info("Index : %0x\n", read_csr_tlbidx());
pr_info("PageSize : %0x\n", read_csr_pagesize());
pr_info("EntryHi : %0*llx\n", field, read_csr_entryhi());
pr_info("EntryLo0 : %0*llx\n", field, read_csr_entrylo0());
pr_info("EntryLo1 : %0*llx\n", field, read_csr_entrylo1());
pr_info("Index : 0x%0x\n", read_csr_tlbidx());
pr_info("PageSize : 0x%0x\n", read_csr_pagesize());
pr_info("EntryHi : 0x%0*llx\n", field, read_csr_entryhi());
pr_info("EntryLo0 : 0x%0*llx\n", field, read_csr_entrylo0());
pr_info("EntryLo1 : 0x%0*llx\n", field, read_csr_entrylo1());
}
static void dump_tlb(int first, int last)
@ -33,8 +33,8 @@ static void dump_tlb(int first, int last)
unsigned int s_index, s_asid;
unsigned int pagesize, c0, c1, i;
unsigned long asidmask = cpu_asid_mask(&current_cpu_data);
int pwidth = 11;
int vwidth = 11;
int pwidth = 16;
int vwidth = 16;
int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4);
s_entryhi = read_csr_entryhi();
@ -64,22 +64,22 @@ static void dump_tlb(int first, int last)
/*
* Only print entries in use
*/
pr_info("Index: %2d pgsize=%x ", i, (1 << pagesize));
pr_info("Index: %4d pgsize=0x%x ", i, (1 << pagesize));
c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
pr_cont("va=%0*lx asid=%0*lx",
pr_cont("va=0x%0*lx asid=0x%0*lx",
vwidth, (entryhi & ~0x1fffUL), asidwidth, asid & asidmask);
/* NR/NX are in awkward places, so mask them off separately */
pa = entrylo0 & ~(ENTRYLO_NR | ENTRYLO_NX);
pa = pa & PAGE_MASK;
pr_cont("\n\t[");
pr_cont("ri=%d xi=%d ",
pr_cont("nr=%d nx=%d ",
(entrylo0 & ENTRYLO_NR) ? 1 : 0,
(entrylo0 & ENTRYLO_NX) ? 1 : 0);
pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d plv=%lld] [",
pr_cont("pa=0x%0*llx c=%d d=%d v=%d g=%d plv=%lld] [",
pwidth, pa, c0,
(entrylo0 & ENTRYLO_D) ? 1 : 0,
(entrylo0 & ENTRYLO_V) ? 1 : 0,
@ -88,10 +88,10 @@ static void dump_tlb(int first, int last)
/* NR/NX are in awkward places, so mask them off separately */
pa = entrylo1 & ~(ENTRYLO_NR | ENTRYLO_NX);
pa = pa & PAGE_MASK;
pr_cont("ri=%d xi=%d ",
pr_cont("nr=%d nx=%d ",
(entrylo1 & ENTRYLO_NR) ? 1 : 0,
(entrylo1 & ENTRYLO_NX) ? 1 : 0);
pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d plv=%lld]\n",
pr_cont("pa=0x%0*llx c=%d d=%d v=%d g=%d plv=%lld]\n",
pwidth, pa, c1,
(entrylo1 & ENTRYLO_D) ? 1 : 0,
(entrylo1 & ENTRYLO_V) ? 1 : 0,

View file

@ -131,18 +131,6 @@ int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params)
return ret;
}
#ifdef CONFIG_NUMA
int memory_add_physaddr_to_nid(u64 start)
{
int nid;
nid = pa_to_nid(start);
return nid;
}
EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif
#ifdef CONFIG_MEMORY_HOTREMOVE
void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
@ -154,6 +142,13 @@ void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
page += vmem_altmap_offset(altmap);
__remove_pages(start_pfn, nr_pages, altmap);
}
#ifdef CONFIG_NUMA
int memory_add_physaddr_to_nid(u64 start)
{
return pa_to_nid(start);
}
EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif
#endif

View file

@ -83,6 +83,8 @@ enum {
FW_FEATURE_POWERNV_ALWAYS = 0,
FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
FW_FEATURE_NATIVE_POSSIBLE = 0,
FW_FEATURE_NATIVE_ALWAYS = 0,
FW_FEATURE_POSSIBLE =
#ifdef CONFIG_PPC_PSERIES
FW_FEATURE_PSERIES_POSSIBLE |
@ -92,6 +94,9 @@ enum {
#endif
#ifdef CONFIG_PPC_PS3
FW_FEATURE_PS3_POSSIBLE |
#endif
#ifdef CONFIG_PPC_HASH_MMU_NATIVE
FW_FEATURE_NATIVE_ALWAYS |
#endif
0,
FW_FEATURE_ALWAYS =
@ -103,6 +108,9 @@ enum {
#endif
#ifdef CONFIG_PPC_PS3
FW_FEATURE_PS3_ALWAYS &
#endif
#ifdef CONFIG_PPC_HASH_MMU_NATIVE
FW_FEATURE_NATIVE_ALWAYS &
#endif
FW_FEATURE_POSSIBLE,

View file

@ -113,7 +113,14 @@ static inline void __hard_RI_enable(void)
static inline notrace unsigned long irq_soft_mask_return(void)
{
return READ_ONCE(local_paca->irq_soft_mask);
unsigned long flags;
asm volatile(
"lbz %0,%1(13)"
: "=r" (flags)
: "i" (offsetof(struct paca_struct, irq_soft_mask)));
return flags;
}
/*
@ -140,24 +147,46 @@ static inline notrace void irq_soft_mask_set(unsigned long mask)
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
WARN_ON(mask && !(mask & IRQS_DISABLED));
WRITE_ONCE(local_paca->irq_soft_mask, mask);
barrier();
asm volatile(
"stb %0,%1(13)"
:
: "r" (mask),
"i" (offsetof(struct paca_struct, irq_soft_mask))
: "memory");
}
static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
{
unsigned long flags = irq_soft_mask_return();
unsigned long flags;
irq_soft_mask_set(mask);
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
WARN_ON(mask && !(mask & IRQS_DISABLED));
#endif
asm volatile(
"lbz %0,%1(13); stb %2,%1(13)"
: "=&r" (flags)
: "i" (offsetof(struct paca_struct, irq_soft_mask)),
"r" (mask)
: "memory");
return flags;
}
static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
{
unsigned long flags = irq_soft_mask_return();
unsigned long flags, tmp;
irq_soft_mask_set(flags | mask);
asm volatile(
"lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)"
: "=&r" (flags), "=r" (tmp)
: "i" (offsetof(struct paca_struct, irq_soft_mask)),
"r" (mask)
: "memory");
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED));
#endif
return flags;
}
@ -282,7 +311,8 @@ static inline bool pmi_irq_pending(void)
flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \
local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
if (!arch_irqs_disabled_flags(flags)) { \
WRITE_ONCE(local_paca->saved_r1, current_stack_pointer);\
asm volatile("std%X0 %1,%0" : "=m" (local_paca->saved_r1) \
: "r" (current_stack_pointer)); \
trace_hardirqs_off(); \
} \
} while(0)

View file

@ -245,6 +245,15 @@ static int __init pcibios_init(void)
printk(KERN_INFO "PCI: Probing PCI hardware\n");
#ifdef CONFIG_PPC_PCI_BUS_NUM_DOMAIN_DEPENDENT
/*
* Enable PCI domains in /proc when PCI bus numbers are not unique
* across all PCI domains to prevent conflicts. And keep PCI domain 0
* backward compatible in /proc for video cards.
*/
pci_add_flags(PCI_ENABLE_PROC_DOMAINS | PCI_COMPAT_DOMAIN_0);
#endif
if (pci_has_flag(PCI_REASSIGN_ALL_BUS))
pci_assign_all_buses = 1;

View file

@ -109,8 +109,12 @@ __enter_rtas:
* its critical regions (as specified in PAPR+ section 7.2.1). MSR[S]
* is not impacted by RFI_TO_KERNEL (only urfid can unset it). So if
* MSR[S] is set, it will remain when entering RTAS.
* If we're in HV mode, RTAS must also run in HV mode, so extract MSR_HV
* from the saved MSR value and insert into the value RTAS will use.
*/
extrdi r0, r6, 1, 63 - MSR_HV_LG
LOAD_REG_IMMEDIATE(r6, MSR_ME | MSR_RI)
insrdi r6, r0, 1, 63 - MSR_HV_LG
li r0,0
mtmsrd r0,1 /* disable RI before using SRR0/1 */

View file

@ -18,6 +18,7 @@
.p2align 3
#define __SYSCALL(nr, entry) .8byte entry
#else
.p2align 2
#define __SYSCALL(nr, entry) .long entry
#endif

View file

@ -124,9 +124,6 @@ struct papr_scm_priv {
/* The bits which needs to be overridden */
u64 health_bitmap_inject_mask;
/* array to have event_code and stat_id mappings */
u8 *nvdimm_events_map;
};
static int papr_scm_pmem_flush(struct nd_region *nd_region,
@ -350,6 +347,25 @@ static ssize_t drc_pmem_query_stats(struct papr_scm_priv *p,
#ifdef CONFIG_PERF_EVENTS
#define to_nvdimm_pmu(_pmu) container_of(_pmu, struct nvdimm_pmu, pmu)
static const char * const nvdimm_events_map[] = {
[1] = "CtlResCt",
[2] = "CtlResTm",
[3] = "PonSecs ",
[4] = "MemLife ",
[5] = "CritRscU",
[6] = "HostLCnt",
[7] = "HostSCnt",
[8] = "HostSDur",
[9] = "HostLDur",
[10] = "MedRCnt ",
[11] = "MedWCnt ",
[12] = "MedRDur ",
[13] = "MedWDur ",
[14] = "CchRHCnt",
[15] = "CchWHCnt",
[16] = "FastWCnt",
};
static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev, u64 *count)
{
struct papr_scm_perf_stat *stat;
@ -357,11 +373,15 @@ static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev,
struct papr_scm_priv *p = dev_get_drvdata(dev);
int rc, size;
/* Invalid eventcode */
if (event->attr.config == 0 || event->attr.config >= ARRAY_SIZE(nvdimm_events_map))
return -EINVAL;
/* Allocate request buffer enough to hold single performance stat */
size = sizeof(struct papr_scm_perf_stats) +
sizeof(struct papr_scm_perf_stat);
if (!p || !p->nvdimm_events_map)
if (!p)
return -EINVAL;
stats = kzalloc(size, GFP_KERNEL);
@ -370,7 +390,7 @@ static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev,
stat = &stats->scm_statistic[0];
memcpy(&stat->stat_id,
&p->nvdimm_events_map[event->attr.config * sizeof(stat->stat_id)],
nvdimm_events_map[event->attr.config],
sizeof(stat->stat_id));
stat->stat_val = 0;
@ -458,56 +478,6 @@ static void papr_scm_pmu_del(struct perf_event *event, int flags)
papr_scm_pmu_read(event);
}
static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu *nd_pmu)
{
struct papr_scm_perf_stat *stat;
struct papr_scm_perf_stats *stats;
u32 available_events;
int index, rc = 0;
if (!p->stat_buffer_len)
return -ENOENT;
available_events = (p->stat_buffer_len - sizeof(struct papr_scm_perf_stats))
/ sizeof(struct papr_scm_perf_stat);
if (available_events == 0)
return -EOPNOTSUPP;
/* Allocate the buffer for phyp where stats are written */
stats = kzalloc(p->stat_buffer_len, GFP_KERNEL);
if (!stats) {
rc = -ENOMEM;
return rc;
}
/* Called to get list of events supported */
rc = drc_pmem_query_stats(p, stats, 0);
if (rc)
goto out;
/*
* Allocate memory and populate nvdimm_event_map.
* Allocate an extra element for NULL entry
*/
p->nvdimm_events_map = kcalloc(available_events + 1,
sizeof(stat->stat_id),
GFP_KERNEL);
if (!p->nvdimm_events_map) {
rc = -ENOMEM;
goto out;
}
/* Copy all stat_ids to event map */
for (index = 0, stat = stats->scm_statistic;
index < available_events; index++, ++stat) {
memcpy(&p->nvdimm_events_map[index * sizeof(stat->stat_id)],
&stat->stat_id, sizeof(stat->stat_id));
}
out:
kfree(stats);
return rc;
}
static void papr_scm_pmu_register(struct papr_scm_priv *p)
{
struct nvdimm_pmu *nd_pmu;
@ -519,9 +489,10 @@ static void papr_scm_pmu_register(struct papr_scm_priv *p)
goto pmu_err_print;
}
rc = papr_scm_pmu_check_events(p, nd_pmu);
if (rc)
if (!p->stat_buffer_len) {
rc = -ENOENT;
goto pmu_check_events_err;
}
nd_pmu->pmu.task_ctx_nr = perf_invalid_context;
nd_pmu->pmu.name = nvdimm_name(p->nvdimm);
@ -539,7 +510,7 @@ static void papr_scm_pmu_register(struct papr_scm_priv *p)
rc = register_nvdimm_pmu(nd_pmu, p->pdev);
if (rc)
goto pmu_register_err;
goto pmu_check_events_err;
/*
* Set archdata.priv value to nvdimm_pmu structure, to handle the
@ -548,8 +519,6 @@ static void papr_scm_pmu_register(struct papr_scm_priv *p)
p->pdev->archdata.priv = nd_pmu;
return;
pmu_register_err:
kfree(p->nvdimm_events_map);
pmu_check_events_err:
kfree(nd_pmu);
pmu_err_print:
@ -1560,7 +1529,6 @@ static int papr_scm_remove(struct platform_device *pdev)
unregister_nvdimm_pmu(pdev->archdata.priv);
pdev->archdata.priv = NULL;
kfree(p->nvdimm_events_map);
kfree(p->bus_desc.provider_name);
kfree(p);

View file

@ -33,4 +33,16 @@ void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
u32 type, u64 flags);
const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid);
#ifdef CONFIG_RISCV_SBI_V01
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01;
#endif
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor;
#endif /* __RISCV_KVM_VCPU_SBI_H__ */

View file

@ -32,23 +32,13 @@ static int kvm_linux_err_map_sbi(int err)
};
}
#ifdef CONFIG_RISCV_SBI_V01
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01;
#else
#ifndef CONFIG_RISCV_SBI_V01
static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
.extid_start = -1UL,
.extid_end = -1UL,
.handler = NULL,
};
#endif
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor;
static const struct kvm_vcpu_sbi_extension *sbi_ext[] = {
&vcpu_sbi_ext_v01,

View file

@ -299,7 +299,6 @@ static void kvm_riscv_vcpu_update_timedelta(struct kvm_vcpu *vcpu)
void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_csr *csr;
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
kvm_riscv_vcpu_update_timedelta(vcpu);
@ -307,7 +306,6 @@ void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
if (!t->sstc_enabled)
return;
csr = &vcpu->arch.guest_csr;
#if defined(CONFIG_32BIT)
csr_write(CSR_VSTIMECMP, (u32)t->next_cycles);
csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycles >> 32));
@ -324,13 +322,11 @@ void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_csr *csr;
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
if (!t->sstc_enabled)
return;
csr = &vcpu->arch.guest_csr;
t = &vcpu->arch.timer;
#if defined(CONFIG_32BIT)
t->next_cycles = csr_read(CSR_VSTIMECMP);

View file

@ -118,10 +118,10 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
if (!numpages)
return 0;
mmap_read_lock(&init_mm);
mmap_write_lock(&init_mm);
ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
&masks);
mmap_read_unlock(&init_mm);
mmap_write_unlock(&init_mm);
flush_tlb_kernel_range(start, end);

View file

@ -40,8 +40,6 @@ CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_LIVEPATCH=y
CONFIG_MARCH_ZEC12=y
@ -74,6 +72,7 @@ CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_MODULE_SIG_SHA256=y
@ -93,6 +92,10 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_IOSCHED_BFQ=y
CONFIG_BFQ_GROUP_IOSCHED=y
CONFIG_BINFMT_MISC=m
CONFIG_ZSWAP=y
CONFIG_ZSMALLOC_STAT=y
CONFIG_SLUB_STATS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
@ -102,14 +105,12 @@ CONFIG_CMA_DEBUGFS=y
CONFIG_CMA_SYSFS=y
CONFIG_CMA_AREAS=7
CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
CONFIG_ZSMALLOC=y
CONFIG_ZSMALLOC_STAT=y
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_PERCPU_STATS=y
CONFIG_GUP_TEST=y
CONFIG_ANON_VMA_NAME=y
CONFIG_USERFAULTFD=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@ -167,6 +168,7 @@ CONFIG_BRIDGE_NETFILTER=m
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_PROCFS=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_TIMEOUT=y
CONFIG_NF_CONNTRACK_TIMESTAMP=y
@ -493,7 +495,6 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_ASIX is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_CADENCE is not set
# CONFIG_NET_VENDOR_CAVIUM is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
@ -509,7 +510,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_GOOGLE is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MICROSOFT is not set
# CONFIG_NET_VENDOR_WANGXUN is not set
# CONFIG_NET_VENDOR_LITEX is not set
# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
@ -518,16 +519,18 @@ CONFIG_MLX5_CORE_EN=y
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MICROCHIP is not set
# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_MICROSOFT is not set
# CONFIG_NET_VENDOR_MYRI is not set
# CONFIG_NET_VENDOR_NI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETERION is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
# CONFIG_NET_VENDOR_NVIDIA is not set
# CONFIG_NET_VENDOR_OKI is not set
# CONFIG_NET_VENDOR_PACKET_ENGINES is not set
# CONFIG_NET_VENDOR_PENSANDO is not set
# CONFIG_NET_VENDOR_QLOGIC is not set
# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_QUALCOMM is not set
# CONFIG_NET_VENDOR_RDC is not set
# CONFIG_NET_VENDOR_REALTEK is not set
@ -535,9 +538,9 @@ CONFIG_MLX5_CORE_EN=y
# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SILAN is not set
# CONFIG_NET_VENDOR_SIS is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
@ -570,6 +573,8 @@ CONFIG_VIRTIO_CONSOLE=m
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_HANGCHECK_TIMER=m
CONFIG_TN3270_FS=y
# CONFIG_RANDOM_TRUST_CPU is not set
# CONFIG_RANDOM_TRUST_BOOTLOADER is not set
CONFIG_PPS=m
# CONFIG_PTP_1588_CLOCK is not set
# CONFIG_HWMON is not set
@ -727,18 +732,26 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_SHA3_256_S390=m
CONFIG_CRYPTO_SHA3_512_S390=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_BLOWFISH=m
@ -746,11 +759,14 @@ CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_CHACHA_S390=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
CONFIG_CRYPTO_SM4_GENERIC=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_842=m
@ -766,16 +782,6 @@ CONFIG_CRYPTO_STATS=y
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_SHA3_256_S390=m
CONFIG_CRYPTO_SHA3_512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_CHACHA_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_CORDIC=m
CONFIG_CRYPTO_LIB_CURVE25519=m
@ -797,6 +803,7 @@ CONFIG_HEADERS_INSTALL=y
CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_PAGEALLOC=y
CONFIG_SLUB_DEBUG_ON=y
CONFIG_PAGE_OWNER=y
CONFIG_DEBUG_RODATA_TEST=y
CONFIG_DEBUG_WX=y
@ -808,8 +815,6 @@ CONFIG_DEBUG_OBJECTS_TIMERS=y
CONFIG_DEBUG_OBJECTS_WORK=y
CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
CONFIG_SLUB_DEBUG_ON=y
CONFIG_SLUB_STATS=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_VM=y
CONFIG_DEBUG_VM_PGFLAGS=y

View file

@ -38,8 +38,6 @@ CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_LIVEPATCH=y
CONFIG_MARCH_ZEC12=y
@ -69,6 +67,7 @@ CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_MODULE_SIG_SHA256=y
@ -88,6 +87,9 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_IOSCHED_BFQ=y
CONFIG_BFQ_GROUP_IOSCHED=y
CONFIG_BINFMT_MISC=m
CONFIG_ZSWAP=y
CONFIG_ZSMALLOC_STAT=y
# CONFIG_COMPAT_BRK is not set
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
@ -95,13 +97,11 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CMA_SYSFS=y
CONFIG_CMA_AREAS=7
CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
CONFIG_ZSMALLOC=y
CONFIG_ZSMALLOC_STAT=y
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_PERCPU_STATS=y
CONFIG_ANON_VMA_NAME=y
CONFIG_USERFAULTFD=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@ -159,6 +159,7 @@ CONFIG_BRIDGE_NETFILTER=m
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_PROCFS=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_TIMEOUT=y
CONFIG_NF_CONNTRACK_TIMESTAMP=y
@ -484,7 +485,6 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_ASIX is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_CADENCE is not set
# CONFIG_NET_VENDOR_CAVIUM is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
@ -500,7 +500,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_GOOGLE is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MICROSOFT is not set
# CONFIG_NET_VENDOR_WANGXUN is not set
# CONFIG_NET_VENDOR_LITEX is not set
# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
@ -509,16 +509,18 @@ CONFIG_MLX5_CORE_EN=y
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MICROCHIP is not set
# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_MICROSOFT is not set
# CONFIG_NET_VENDOR_MYRI is not set
# CONFIG_NET_VENDOR_NI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETERION is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
# CONFIG_NET_VENDOR_NVIDIA is not set
# CONFIG_NET_VENDOR_OKI is not set
# CONFIG_NET_VENDOR_PACKET_ENGINES is not set
# CONFIG_NET_VENDOR_PENSANDO is not set
# CONFIG_NET_VENDOR_QLOGIC is not set
# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_QUALCOMM is not set
# CONFIG_NET_VENDOR_RDC is not set
# CONFIG_NET_VENDOR_REALTEK is not set
@ -526,9 +528,9 @@ CONFIG_MLX5_CORE_EN=y
# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SILAN is not set
# CONFIG_NET_VENDOR_SIS is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
@ -561,6 +563,8 @@ CONFIG_VIRTIO_CONSOLE=m
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_HANGCHECK_TIMER=m
CONFIG_TN3270_FS=y
# CONFIG_RANDOM_TRUST_CPU is not set
# CONFIG_RANDOM_TRUST_BOOTLOADER is not set
# CONFIG_PTP_1588_CLOCK is not set
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
@ -713,18 +717,26 @@ CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_SHA3_256_S390=m
CONFIG_CRYPTO_SHA3_512_S390=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_BLOWFISH=m
@ -732,11 +744,14 @@ CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_CHACHA_S390=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
CONFIG_CRYPTO_SM4_GENERIC=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_842=m
@ -752,16 +767,6 @@ CONFIG_CRYPTO_STATS=y
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_SHA3_256_S390=m
CONFIG_CRYPTO_SHA3_512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_CHACHA_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_CORDIC=m
CONFIG_PRIME_NUMBERS=m

View file

@ -1,4 +1,3 @@
# CONFIG_SWAP is not set
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BPF_SYSCALL=y
@ -9,7 +8,6 @@ CONFIG_BPF_SYSCALL=y
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_COMPAT_BRK is not set
CONFIG_MARCH_ZEC12=y
CONFIG_TUNE_ZEC12=y
# CONFIG_COMPAT is not set
@ -28,6 +26,8 @@ CONFIG_CRASH_DUMP=y
# CONFIG_BLOCK_LEGACY_AUTOLOAD is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_SWAP is not set
# CONFIG_COMPAT_BRK is not set
# CONFIG_COMPACTION is not set
# CONFIG_MIGRATION is not set
CONFIG_NET=y
@ -53,10 +53,12 @@ CONFIG_ZFCP=y
# CONFIG_HVC_IUCV is not set
# CONFIG_HW_RANDOM_S390 is not set
# CONFIG_HMC_DRV is not set
# CONFIG_S390_UV_UAPI is not set
# CONFIG_S390_TAPE is not set
# CONFIG_VMCP is not set
# CONFIG_MONWRITER is not set
# CONFIG_S390_VMUR is not set
# CONFIG_RANDOM_TRUST_BOOTLOADER is not set
# CONFIG_HID is not set
# CONFIG_VIRTIO_MENU is not set
# CONFIG_VHOST_MENU is not set

View file

@ -28,9 +28,11 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
{
if (len & ~HPAGE_MASK)
struct hstate *h = hstate_file(file);
if (len & ~huge_page_mask(h))
return -EINVAL;
if (addr & ~HPAGE_MASK)
if (addr & ~huge_page_mask(h))
return -EINVAL;
return 0;
}

View file

@ -1038,16 +1038,11 @@ static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
#define __KVM_HAVE_ARCH_VM_FREE
void kvm_arch_free_vm(struct kvm *kvm);
#ifdef CONFIG_VFIO_PCI_ZDEV_KVM
int kvm_s390_pci_register_kvm(struct zpci_dev *zdev, struct kvm *kvm);
void kvm_s390_pci_unregister_kvm(struct zpci_dev *zdev);
#else
static inline int kvm_s390_pci_register_kvm(struct zpci_dev *dev,
struct kvm *kvm)
{
return -EPERM;
}
static inline void kvm_s390_pci_unregister_kvm(struct zpci_dev *dev) {}
#endif
struct zpci_kvm_hook {
int (*kvm_register)(void *opaque, struct kvm *kvm);
void (*kvm_unregister)(void *opaque);
};
extern struct zpci_kvm_hook zpci_kvm_hook;
#endif

View file

@ -131,6 +131,7 @@ SECTIONS
/*
* Table with the patch locations to undo expolines
*/
. = ALIGN(4);
.nospec_call_table : {
__nospec_call_start = . ;
*(.s390_indirect*)

View file

@ -431,8 +431,9 @@ static void kvm_s390_pci_dev_release(struct zpci_dev *zdev)
* available, enable them and let userspace indicate whether or not they will
* be used (specify SHM bit to disable).
*/
int kvm_s390_pci_register_kvm(struct zpci_dev *zdev, struct kvm *kvm)
static int kvm_s390_pci_register_kvm(void *opaque, struct kvm *kvm)
{
struct zpci_dev *zdev = opaque;
int rc;
if (!zdev)
@ -510,10 +511,10 @@ int kvm_s390_pci_register_kvm(struct zpci_dev *zdev, struct kvm *kvm)
kvm_put_kvm(kvm);
return rc;
}
EXPORT_SYMBOL_GPL(kvm_s390_pci_register_kvm);
void kvm_s390_pci_unregister_kvm(struct zpci_dev *zdev)
static void kvm_s390_pci_unregister_kvm(void *opaque)
{
struct zpci_dev *zdev = opaque;
struct kvm *kvm;
if (!zdev)
@ -566,7 +567,6 @@ void kvm_s390_pci_unregister_kvm(struct zpci_dev *zdev)
kvm_put_kvm(kvm);
}
EXPORT_SYMBOL_GPL(kvm_s390_pci_unregister_kvm);
void kvm_s390_pci_init_list(struct kvm *kvm)
{
@ -678,6 +678,8 @@ int kvm_s390_pci_init(void)
spin_lock_init(&aift->gait_lock);
mutex_init(&aift->aift_lock);
zpci_kvm_hook.kvm_register = kvm_s390_pci_register_kvm;
zpci_kvm_hook.kvm_unregister = kvm_s390_pci_unregister_kvm;
return 0;
}
@ -685,6 +687,8 @@ int kvm_s390_pci_init(void)
void kvm_s390_pci_exit(void)
{
mutex_destroy(&aift->aift_lock);
zpci_kvm_hook.kvm_register = NULL;
zpci_kvm_hook.kvm_unregister = NULL;
kfree(aift);
}

View file

@ -421,8 +421,6 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
if (unlikely(!(vma->vm_flags & access)))
goto out_up;
if (is_vm_hugetlb_page(vma))
address &= HPAGE_MASK;
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo

View file

@ -5,5 +5,5 @@
obj-$(CONFIG_PCI) += pci.o pci_irq.o pci_dma.o pci_clp.o pci_sysfs.o \
pci_event.o pci_debug.o pci_insn.o pci_mmio.o \
pci_bus.o
pci_bus.o pci_kvm_hook.o
obj-$(CONFIG_PCI_IOV) += pci_iov.o

View file

@ -0,0 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO ZPCI devices support
*
* Copyright (C) IBM Corp. 2022. All rights reserved.
* Author(s): Pierre Morel <pmorel@linux.ibm.com>
*/
#include <linux/kvm_host.h>
struct zpci_kvm_hook zpci_kvm_hook;
EXPORT_SYMBOL_GPL(zpci_kvm_hook);

View file

@ -4052,8 +4052,9 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
/* Disable guest PEBS if host PEBS is enabled. */
arr[pebs_enable].guest = 0;
} else {
/* Disable guest PEBS for cross-mapped PEBS counters. */
/* Disable guest PEBS thoroughly for cross-mapped PEBS counters. */
arr[pebs_enable].guest &= ~kvm_pmu->host_cross_mapped_mask;
arr[global_ctrl].guest &= ~kvm_pmu->host_cross_mapped_mask;
/* Set hw GLOBAL_CTRL bits for PEBS counter when it runs for guest */
arr[global_ctrl].guest |= arr[pebs_enable].guest;
}

View file

@ -5361,19 +5361,6 @@ void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu)
__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu);
}
static bool need_remote_flush(u64 old, u64 new)
{
if (!is_shadow_present_pte(old))
return false;
if (!is_shadow_present_pte(new))
return true;
if ((old ^ new) & SPTE_BASE_ADDR_MASK)
return true;
old ^= shadow_nx_mask;
new ^= shadow_nx_mask;
return (old & ~new & SPTE_PERM_MASK) != 0;
}
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
int *bytes)
{
@ -5519,7 +5506,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
if (gentry && sp->role.level != PG_LEVEL_4K)
++vcpu->kvm->stat.mmu_pde_zapped;
if (need_remote_flush(entry, *spte))
if (is_shadow_present_pte(entry))
flush = true;
++spte;
}
@ -6085,47 +6072,18 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
const struct kvm_memory_slot *memslot,
int start_level)
{
bool flush = false;
if (kvm_memslots_have_rmaps(kvm)) {
write_lock(&kvm->mmu_lock);
flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
start_level, KVM_MAX_HUGEPAGE_LEVEL,
false);
slot_handle_level(kvm, memslot, slot_rmap_write_protect,
start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
write_unlock(&kvm->mmu_lock);
}
if (is_tdp_mmu_enabled(kvm)) {
read_lock(&kvm->mmu_lock);
flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
read_unlock(&kvm->mmu_lock);
}
/*
* Flush TLBs if any SPTEs had to be write-protected to ensure that
* guest writes are reflected in the dirty bitmap before the memslot
* update completes, i.e. before enabling dirty logging is visible to
* userspace.
*
* Perform the TLB flush outside the mmu_lock to reduce the amount of
* time the lock is held. However, this does mean that another CPU can
* now grab mmu_lock and encounter a write-protected SPTE while CPUs
* still have a writable mapping for the associated GFN in their TLB.
*
* This is safe but requires KVM to be careful when making decisions
* based on the write-protection status of an SPTE. Specifically, KVM
* also write-protects SPTEs to monitor changes to guest page tables
* during shadow paging, and must guarantee no CPUs can write to those
* page before the lock is dropped. As mentioned in the previous
* paragraph, a write-protected SPTE is no guarantee that CPU cannot
* perform writes. So to determine if a TLB flush is truly required, KVM
* will clear a separate software-only bit (MMU-writable) and skip the
* flush if-and-only-if this bit was already clear.
*
* See is_writable_pte() for more details.
*/
if (flush)
kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
}
static inline bool need_topup(struct kvm_mmu_memory_cache *cache, int min)
@ -6493,32 +6451,30 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
const struct kvm_memory_slot *memslot)
{
bool flush = false;
if (kvm_memslots_have_rmaps(kvm)) {
write_lock(&kvm->mmu_lock);
/*
* Clear dirty bits only on 4k SPTEs since the legacy MMU only
* support dirty logging at a 4k granularity.
*/
flush = slot_handle_level_4k(kvm, memslot, __rmap_clear_dirty, false);
slot_handle_level_4k(kvm, memslot, __rmap_clear_dirty, false);
write_unlock(&kvm->mmu_lock);
}
if (is_tdp_mmu_enabled(kvm)) {
read_lock(&kvm->mmu_lock);
flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
read_unlock(&kvm->mmu_lock);
}
/*
* The caller will flush the TLBs after this function returns.
*
* It's also safe to flush TLBs out of mmu lock here as currently this
* function is only used for dirty logging, in which case flushing TLB
* out of mmu lock also guarantees no dirty pages will be lost in
* dirty_bitmap.
*/
if (flush)
kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
}
void kvm_mmu_zap_all(struct kvm *kvm)

View file

@ -343,7 +343,7 @@ static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check,
}
/*
* An shadow-present leaf SPTE may be non-writable for 3 possible reasons:
* A shadow-present leaf SPTE may be non-writable for 4 possible reasons:
*
* 1. To intercept writes for dirty logging. KVM write-protects huge pages
* so that they can be split be split down into the dirty logging
@ -361,8 +361,13 @@ static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check,
* read-only memslot or guest memory backed by a read-only VMA. Writes to
* such pages are disallowed entirely.
*
* To keep track of why a given SPTE is write-protected, KVM uses 2
* software-only bits in the SPTE:
* 4. To emulate the Accessed bit for SPTEs without A/D bits. Note, in this
* case, the SPTE is access-protected, not just write-protected!
*
* For cases #1 and #4, KVM can safely make such SPTEs writable without taking
* mmu_lock as capturing the Accessed/Dirty state doesn't require taking it.
* To differentiate #1 and #4 from #2 and #3, KVM uses two software-only bits
* in the SPTE:
*
* shadow_mmu_writable_mask, aka MMU-writable -
* Cleared on SPTEs that KVM is currently write-protecting for shadow paging
@ -391,7 +396,8 @@ static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check,
* shadow page tables between vCPUs. Write-protecting an SPTE for dirty logging
* (which does not clear the MMU-writable bit), does not flush TLBs before
* dropping the lock, as it only needs to synchronize guest writes with the
* dirty bitmap.
* dirty bitmap. Similarly, making the SPTE inaccessible (and non-writable) for
* access-tracking via the clear_young() MMU notifier also does not flush TLBs.
*
* So, there is the problem: clearing the MMU-writable bit can encounter a
* write-protected SPTE while CPUs still have writable mappings for that SPTE

View file

@ -843,8 +843,7 @@ static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr)
if (!(exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS))
return true;
return vmx_test_msr_bitmap_write(vmx->loaded_vmcs->msr_bitmap,
MSR_IA32_SPEC_CTRL);
return vmx_test_msr_bitmap_write(vmx->loaded_vmcs->msr_bitmap, msr);
}
unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)

View file

@ -1557,12 +1557,32 @@ static const u32 msr_based_features_all[] = {
static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all)];
static unsigned int num_msr_based_features;
/*
* Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM
* does not yet virtualize. These include:
* 10 - MISC_PACKAGE_CTRLS
* 11 - ENERGY_FILTERING_CTL
* 12 - DOITM
* 18 - FB_CLEAR_CTRL
* 21 - XAPIC_DISABLE_STATUS
* 23 - OVERCLOCKING_STATUS
*/
#define KVM_SUPPORTED_ARCH_CAP \
(ARCH_CAP_RDCL_NO | ARCH_CAP_IBRS_ALL | ARCH_CAP_RSBA | \
ARCH_CAP_SKIP_VMENTRY_L1DFLUSH | ARCH_CAP_SSB_NO | ARCH_CAP_MDS_NO | \
ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO)
static u64 kvm_get_arch_capabilities(void)
{
u64 data = 0;
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) {
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data);
data &= KVM_SUPPORTED_ARCH_CAP;
}
/*
* If nx_huge_pages is enabled, KVM's shadow paging will ensure that
@ -1610,9 +1630,6 @@ static u64 kvm_get_arch_capabilities(void)
*/
}
/* Guests don't need to know "Fill buffer clear control" exists */
data &= ~ARCH_CAP_FB_CLEAR_CTRL;
return data;
}
@ -10652,7 +10669,8 @@ static inline int vcpu_block(struct kvm_vcpu *vcpu)
case KVM_MP_STATE_INIT_RECEIVED:
break;
default:
return -EINTR;
WARN_ON_ONCE(1);
break;
}
return 1;
}
@ -11093,9 +11111,22 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
vcpu_load(vcpu);
if (!lapic_in_kernel(vcpu) &&
mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
switch (mp_state->mp_state) {
case KVM_MP_STATE_UNINITIALIZED:
case KVM_MP_STATE_HALTED:
case KVM_MP_STATE_AP_RESET_HOLD:
case KVM_MP_STATE_INIT_RECEIVED:
case KVM_MP_STATE_SIPI_RECEIVED:
if (!lapic_in_kernel(vcpu))
goto out;
break;
case KVM_MP_STATE_RUNNABLE:
break;
default:
goto out;
}
/*
* KVM_MP_STATE_INIT_RECEIVED means the processor is in
@ -11563,7 +11594,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.mci_ctl2_banks = kcalloc(KVM_MAX_MCE_BANKS, sizeof(u64),
GFP_KERNEL_ACCOUNT);
if (!vcpu->arch.mce_banks || !vcpu->arch.mci_ctl2_banks)
goto fail_free_pio_data;
goto fail_free_mce_banks;
vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask,
@ -11617,7 +11648,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
fail_free_mce_banks:
kfree(vcpu->arch.mce_banks);
kfree(vcpu->arch.mci_ctl2_banks);
fail_free_pio_data:
free_page((unsigned long)vcpu->arch.pio_data);
fail_free_lapic:
kvm_free_lapic(vcpu);
@ -12473,6 +12503,50 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
} else {
kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K);
}
/*
* Unconditionally flush the TLBs after enabling dirty logging.
* A flush is almost always going to be necessary (see below),
* and unconditionally flushing allows the helpers to omit
* the subtly complex checks when removing write access.
*
* Do the flush outside of mmu_lock to reduce the amount of
* time mmu_lock is held. Flushing after dropping mmu_lock is
* safe as KVM only needs to guarantee the slot is fully
* write-protected before returning to userspace, i.e. before
* userspace can consume the dirty status.
*
* Flushing outside of mmu_lock requires KVM to be careful when
* making decisions based on writable status of an SPTE, e.g. a
* !writable SPTE doesn't guarantee a CPU can't perform writes.
*
* Specifically, KVM also write-protects guest page tables to
* monitor changes when using shadow paging, and must guarantee
* no CPUs can write to those page before mmu_lock is dropped.
* Because CPUs may have stale TLB entries at this point, a
* !writable SPTE doesn't guarantee CPUs can't perform writes.
*
* KVM also allows making SPTES writable outside of mmu_lock,
* e.g. to allow dirty logging without taking mmu_lock.
*
* To handle these scenarios, KVM uses a separate software-only
* bit (MMU-writable) to track if a SPTE is !writable due to
* a guest page table being write-protected (KVM clears the
* MMU-writable flag when write-protecting for shadow paging).
*
* The use of MMU-writable is also the primary motivation for
* the unconditional flush. Because KVM must guarantee that a
* CPU doesn't contain stale, writable TLB entries for a
* !MMU-writable SPTE, KVM must flush if it encounters any
* MMU-writable SPTE regardless of whether the actual hardware
* writable bit was set. I.e. KVM is almost guaranteed to need
* to flush, while unconditionally flushing allows the "remove
* write access" helpers to ignore MMU-writable entirely.
*
* See is_writable_pte() for more details (the case involving
* access-tracked SPTEs is particularly relevant).
*/
kvm_arch_flush_remote_tlbs_memslot(kvm, new);
}
}

View file

@ -1385,6 +1385,18 @@ static int binder_inc_ref_for_node(struct binder_proc *proc,
}
ret = binder_inc_ref_olocked(ref, strong, target_list);
*rdata = ref->data;
if (ret && ref == new_ref) {
/*
* Cleanup the failed reference here as the target
* could now be dead and have already released its
* references by now. Calling on the new reference
* with strong=0 and a tmp_refs will not decrement
* the node. The new_ref gets kfree'd below.
*/
binder_cleanup_ref_olocked(new_ref);
ref = NULL;
}
binder_proc_unlock(proc);
if (new_ref && ref != new_ref)
/*

View file

@ -322,7 +322,6 @@ static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
*/
if (vma) {
vm_start = vma->vm_start;
alloc->vma_vm_mm = vma->vm_mm;
mmap_assert_write_locked(alloc->vma_vm_mm);
} else {
mmap_assert_locked(alloc->vma_vm_mm);
@ -795,7 +794,6 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
binder_insert_free_buffer(alloc, buffer);
alloc->free_async_space = alloc->buffer_size / 2;
binder_alloc_set_vma(alloc, vma);
mmgrab(alloc->vma_vm_mm);
return 0;
@ -1091,6 +1089,8 @@ static struct shrinker binder_shrinker = {
void binder_alloc_init(struct binder_alloc *alloc)
{
alloc->pid = current->group_leader->pid;
alloc->vma_vm_mm = current->mm;
mmgrab(alloc->vma_vm_mm);
mutex_init(&alloc->mutex);
INIT_LIST_HEAD(&alloc->buffers);
}

View file

@ -735,7 +735,7 @@ void update_siblings_masks(unsigned int cpuid)
int cpu, ret;
ret = detect_cache_attributes(cpuid);
if (ret)
if (ret && ret != -ENOENT)
pr_info("Early cacheinfo failed, ret = %d\n", ret);
/* update core and thread sibling masks */

View file

@ -274,12 +274,42 @@ static int __init deferred_probe_timeout_setup(char *str)
}
__setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
/**
* driver_deferred_probe_check_state() - Check deferred probe state
* @dev: device to check
*
* Return:
* * -ENODEV if initcalls have completed and modules are disabled.
* * -ETIMEDOUT if the deferred probe timeout was set and has expired
* and modules are enabled.
* * -EPROBE_DEFER in other cases.
*
* Drivers or subsystems can opt-in to calling this function instead of directly
* returning -EPROBE_DEFER.
*/
int driver_deferred_probe_check_state(struct device *dev)
{
if (!IS_ENABLED(CONFIG_MODULES) && initcalls_done) {
dev_warn(dev, "ignoring dependency for device, assuming no driver\n");
return -ENODEV;
}
if (!driver_deferred_probe_timeout && initcalls_done) {
dev_warn(dev, "deferred probe timeout, ignoring dependency\n");
return -ETIMEDOUT;
}
return -EPROBE_DEFER;
}
EXPORT_SYMBOL_GPL(driver_deferred_probe_check_state);
static void deferred_probe_timeout_work_func(struct work_struct *work)
{
struct device_private *p;
fw_devlink_drivers_done();
driver_deferred_probe_timeout = 0;
driver_deferred_probe_trigger();
flush_work(&deferred_probe_work);
@ -881,6 +911,11 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
dev_dbg(dev, "Device match requests probe deferral\n");
dev->can_match = true;
driver_deferred_probe_add(dev);
/*
* Device can't match with a driver right now, so don't attempt
* to match or bind with other drivers on the bus.
*/
return ret;
} else if (ret < 0) {
dev_dbg(dev, "Bus failed to match device: %d\n", ret);
return ret;
@ -1120,6 +1155,11 @@ static int __driver_attach(struct device *dev, void *data)
dev_dbg(dev, "Device match requests probe deferral\n");
dev->can_match = true;
driver_deferred_probe_add(dev);
/*
* Driver could not match with device, but may match with
* another device on the bus.
*/
return 0;
} else if (ret < 0) {
dev_dbg(dev, "Bus failed to match device: %d\n", ret);
return ret;

View file

@ -93,10 +93,9 @@ static void fw_dev_release(struct device *dev)
{
struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
if (fw_sysfs->fw_upload_priv) {
free_fw_priv(fw_sysfs->fw_priv);
kfree(fw_sysfs->fw_upload_priv);
}
if (fw_sysfs->fw_upload_priv)
fw_upload_free(fw_sysfs);
kfree(fw_sysfs);
}

View file

@ -106,12 +106,17 @@ extern struct device_attribute dev_attr_cancel;
extern struct device_attribute dev_attr_remaining_size;
int fw_upload_start(struct fw_sysfs *fw_sysfs);
void fw_upload_free(struct fw_sysfs *fw_sysfs);
umode_t fw_upload_is_visible(struct kobject *kobj, struct attribute *attr, int n);
#else
static inline int fw_upload_start(struct fw_sysfs *fw_sysfs)
{
return 0;
}
static inline void fw_upload_free(struct fw_sysfs *fw_sysfs)
{
}
#endif
#endif /* __FIRMWARE_SYSFS_H */

View file

@ -264,6 +264,15 @@ int fw_upload_start(struct fw_sysfs *fw_sysfs)
return 0;
}
void fw_upload_free(struct fw_sysfs *fw_sysfs)
{
struct fw_upload_priv *fw_upload_priv = fw_sysfs->fw_upload_priv;
free_fw_priv(fw_sysfs->fw_priv);
kfree(fw_upload_priv->fw_upload);
kfree(fw_upload_priv);
}
/**
* firmware_upload_register() - register for the firmware upload sysfs API
* @module: kernel module of this device
@ -377,6 +386,7 @@ void firmware_upload_unregister(struct fw_upload *fw_upload)
{
struct fw_sysfs *fw_sysfs = fw_upload->priv;
struct fw_upload_priv *fw_upload_priv = fw_sysfs->fw_upload_priv;
struct module *module = fw_upload_priv->module;
mutex_lock(&fw_upload_priv->lock);
if (fw_upload_priv->progress == FW_UPLOAD_PROG_IDLE) {
@ -392,6 +402,6 @@ void firmware_upload_unregister(struct fw_upload *fw_upload)
unregister:
device_unregister(&fw_sysfs->dev);
module_put(fw_upload_priv->module);
module_put(module);
}
EXPORT_SYMBOL_GPL(firmware_upload_unregister);

View file

@ -2733,7 +2733,7 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
mutex_unlock(&gpd_list_lock);
dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
__func__, PTR_ERR(pd));
return -ENODEV;
return driver_deferred_probe_check_state(base_dev);
}
dev_dbg(dev, "adding to PM domain %s\n", pd->name);

View file

@ -226,6 +226,9 @@ struct xen_vbd {
sector_t size;
unsigned int flush_support:1;
unsigned int discard_secure:1;
/* Connect-time cached feature_persistent parameter value */
unsigned int feature_gnt_persistent_parm:1;
/* Persistent grants feature negotiation result */
unsigned int feature_gnt_persistent:1;
unsigned int overflow_max_grants:1;
};

View file

@ -907,7 +907,7 @@ static void connect(struct backend_info *be)
xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
be->blkif->vbd.feature_gnt_persistent);
be->blkif->vbd.feature_gnt_persistent_parm);
if (err) {
xenbus_dev_fatal(dev, err, "writing %s/feature-persistent",
dev->nodename);
@ -1085,7 +1085,9 @@ static int connect_ring(struct backend_info *be)
return -ENOSYS;
}
blkif->vbd.feature_gnt_persistent = feature_persistent &&
blkif->vbd.feature_gnt_persistent_parm = feature_persistent;
blkif->vbd.feature_gnt_persistent =
blkif->vbd.feature_gnt_persistent_parm &&
xenbus_read_unsigned(dev->otherend, "feature-persistent", 0);
blkif->vbd.overflow_max_grants = 0;

View file

@ -213,6 +213,9 @@ struct blkfront_info
unsigned int feature_fua:1;
unsigned int feature_discard:1;
unsigned int feature_secdiscard:1;
/* Connect-time cached feature_persistent parameter */
unsigned int feature_persistent_parm:1;
/* Persistent grants feature negotiation result */
unsigned int feature_persistent:1;
unsigned int bounce:1;
unsigned int discard_granularity;
@ -1756,6 +1759,12 @@ static int write_per_ring_nodes(struct xenbus_transaction xbt,
return err;
}
/* Enable the persistent grants feature. */
static bool feature_persistent = true;
module_param(feature_persistent, bool, 0644);
MODULE_PARM_DESC(feature_persistent,
"Enables the persistent grants feature");
/* Common code used when first setting up, and when resuming. */
static int talk_to_blkback(struct xenbus_device *dev,
struct blkfront_info *info)
@ -1847,8 +1856,9 @@ static int talk_to_blkback(struct xenbus_device *dev,
message = "writing protocol";
goto abort_transaction;
}
info->feature_persistent_parm = feature_persistent;
err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
info->feature_persistent);
info->feature_persistent_parm);
if (err)
dev_warn(&dev->dev,
"writing persistent grants feature to xenbus");
@ -1916,12 +1926,6 @@ static int negotiate_mq(struct blkfront_info *info)
return 0;
}
/* Enable the persistent grants feature. */
static bool feature_persistent = true;
module_param(feature_persistent, bool, 0644);
MODULE_PARM_DESC(feature_persistent,
"Enables the persistent grants feature");
/*
* Entry point to this code when a new device is created. Allocate the basic
* structures and the ring buffer for communication with the backend, and
@ -2281,7 +2285,7 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
blkfront_setup_discard(info);
if (feature_persistent)
if (info->feature_persistent_parm)
info->feature_persistent =
!!xenbus_read_unsigned(info->xbdev->otherend,
"feature-persistent", 0);

View file

@ -430,12 +430,25 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev)
{
struct mhi_event *mhi_event = dev;
struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
struct mhi_event_ctxt *er_ctxt =
&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
struct mhi_event_ctxt *er_ctxt;
struct mhi_ring *ev_ring = &mhi_event->ring;
dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
dma_addr_t ptr;
void *dev_rp;
/*
* If CONFIG_DEBUG_SHIRQ is set, the IRQ handler will get invoked during __free_irq()
* and by that time mhi_ctxt() would've freed. So check for the existence of mhi_ctxt
* before handling the IRQs.
*/
if (!mhi_cntrl->mhi_ctxt) {
dev_dbg(&mhi_cntrl->mhi_dev->dev,
"mhi_ctxt has been freed\n");
return IRQ_HANDLED;
}
er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
ptr = le64_to_cpu(er_ctxt->rp);
if (!is_valid_ring_ptr(ev_ring, ptr)) {
dev_err(&mhi_cntrl->mhi_dev->dev,
"Event ring rp points outside of the event ring\n");

View file

@ -480,6 +480,11 @@ static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
}
static int uring_cmd_null(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
{
return 0;
}
static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
{
size_t written = 0;
@ -663,6 +668,7 @@ static const struct file_operations null_fops = {
.read_iter = read_iter_null,
.write_iter = write_iter_null,
.splice_write = splice_write_null,
.uring_cmd = uring_cmd_null,
};
static const struct file_operations __maybe_unused port_fops = {

View file

@ -203,7 +203,7 @@ static unsigned long raspberrypi_fw_get_rate(struct clk_hw *hw,
ret = raspberrypi_clock_property(rpi->firmware, data,
RPI_FIRMWARE_GET_CLOCK_RATE, &val);
if (ret)
return ret;
return 0;
return val;
}
@ -220,7 +220,7 @@ static int raspberrypi_fw_set_rate(struct clk_hw *hw, unsigned long rate,
ret = raspberrypi_clock_property(rpi->firmware, data,
RPI_FIRMWARE_SET_CLOCK_RATE, &_rate);
if (ret)
dev_err_ratelimited(rpi->dev, "Failed to change %s frequency: %d",
dev_err_ratelimited(rpi->dev, "Failed to change %s frequency: %d\n",
clk_hw_get_name(hw), ret);
return ret;
@ -288,7 +288,7 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi,
RPI_FIRMWARE_GET_MIN_CLOCK_RATE,
&min_rate);
if (ret) {
dev_err(rpi->dev, "Failed to get clock %d min freq: %d",
dev_err(rpi->dev, "Failed to get clock %d min freq: %d\n",
id, ret);
return ERR_PTR(ret);
}
@ -344,8 +344,13 @@ static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi,
struct rpi_firmware_get_clocks_response *clks;
int ret;
/*
* The firmware doesn't guarantee that the last element of
* RPI_FIRMWARE_GET_CLOCKS is zeroed. So allocate an additional
* zero element as sentinel.
*/
clks = devm_kcalloc(rpi->dev,
RPI_FIRMWARE_NUM_CLK_ID, sizeof(*clks),
RPI_FIRMWARE_NUM_CLK_ID + 1, sizeof(*clks),
GFP_KERNEL);
if (!clks)
return -ENOMEM;
@ -360,7 +365,8 @@ static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi,
struct raspberrypi_clk_variant *variant;
if (clks->id > RPI_FIRMWARE_NUM_CLK_ID) {
dev_err(rpi->dev, "Unknown clock id: %u", clks->id);
dev_err(rpi->dev, "Unknown clock id: %u (max: %u)\n",
clks->id, RPI_FIRMWARE_NUM_CLK_ID);
return -EINVAL;
}

View file

@ -840,10 +840,9 @@ static void clk_core_unprepare(struct clk_core *core)
if (core->ops->unprepare)
core->ops->unprepare(core->hw);
clk_pm_runtime_put(core);
trace_clk_unprepare_complete(core);
clk_core_unprepare(core->parent);
clk_pm_runtime_put(core);
}
static void clk_core_unprepare_lock(struct clk_core *core)

View file

@ -135,6 +135,7 @@ static struct device_node *ti_find_clock_provider(struct device_node *from,
continue;
if (!strncmp(n, tmp, strlen(tmp))) {
of_node_get(np);
found = true;
break;
}

View file

@ -295,7 +295,8 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
enum dma_resv_usage old_usage;
dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
if ((old->context == fence->context && old_usage >= usage) ||
if ((old->context == fence->context && old_usage >= usage &&
dma_fence_is_later(fence, old)) ||
dma_fence_is_signaled(old)) {
dma_resv_list_set(fobj, i, fence, usage);
dma_fence_put(old);

View file

@ -164,6 +164,7 @@ static void dio48e_irq_mask(struct irq_data *data)
dio48egpio->irq_mask &= ~BIT(0);
else
dio48egpio->irq_mask &= ~BIT(1);
gpiochip_disable_irq(chip, offset);
if (!dio48egpio->irq_mask)
/* disable interrupts */
@ -191,6 +192,7 @@ static void dio48e_irq_unmask(struct irq_data *data)
iowrite8(0x00, &dio48egpio->reg->enable_interrupt);
}
gpiochip_enable_irq(chip, offset);
if (offset == 19)
dio48egpio->irq_mask |= BIT(0);
else
@ -213,12 +215,14 @@ static int dio48e_irq_set_type(struct irq_data *data, unsigned int flow_type)
return 0;
}
static struct irq_chip dio48e_irqchip = {
static const struct irq_chip dio48e_irqchip = {
.name = "104-dio-48e",
.irq_ack = dio48e_irq_ack,
.irq_mask = dio48e_irq_mask,
.irq_unmask = dio48e_irq_unmask,
.irq_set_type = dio48e_irq_set_type
.irq_set_type = dio48e_irq_set_type,
.flags = IRQCHIP_IMMUTABLE,
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t dio48e_irq_handler(int irq, void *dev_id)
@ -322,7 +326,7 @@ static int dio48e_probe(struct device *dev, unsigned int id)
dio48egpio->chip.set_multiple = dio48e_gpio_set_multiple;
girq = &dio48egpio->chip.irq;
girq->chip = &dio48e_irqchip;
gpio_irq_chip_set_chip(girq, &dio48e_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;

View file

@ -113,6 +113,7 @@ static void idi_48_irq_mask(struct irq_data *data)
spin_lock_irqsave(&idi48gpio->lock, flags);
idi48gpio->irq_mask[boundary] &= ~mask;
gpiochip_disable_irq(chip, offset);
/* Exit early if there are still input lines with IRQ unmasked */
if (idi48gpio->irq_mask[boundary])
@ -140,6 +141,7 @@ static void idi_48_irq_unmask(struct irq_data *data)
prev_irq_mask = idi48gpio->irq_mask[boundary];
gpiochip_enable_irq(chip, offset);
idi48gpio->irq_mask[boundary] |= mask;
/* Exit early if IRQ was already unmasked for this boundary */
@ -164,12 +166,14 @@ static int idi_48_irq_set_type(struct irq_data *data, unsigned int flow_type)
return 0;
}
static struct irq_chip idi_48_irqchip = {
static const struct irq_chip idi_48_irqchip = {
.name = "104-idi-48",
.irq_ack = idi_48_irq_ack,
.irq_mask = idi_48_irq_mask,
.irq_unmask = idi_48_irq_unmask,
.irq_set_type = idi_48_irq_set_type
.irq_set_type = idi_48_irq_set_type,
.flags = IRQCHIP_IMMUTABLE,
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t idi_48_irq_handler(int irq, void *dev_id)
@ -267,7 +271,7 @@ static int idi_48_probe(struct device *dev, unsigned int id)
idi48gpio->chip.get_multiple = idi_48_gpio_get_multiple;
girq = &idi48gpio->chip.irq;
girq->chip = &idi_48_irqchip;
gpio_irq_chip_set_chip(girq, &idi_48_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;

View file

@ -174,10 +174,11 @@ static void idio_16_irq_mask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
const unsigned long mask = BIT(irqd_to_hwirq(data));
const unsigned long offset = irqd_to_hwirq(data);
unsigned long flags;
idio16gpio->irq_mask &= ~mask;
idio16gpio->irq_mask &= ~BIT(offset);
gpiochip_disable_irq(chip, offset);
if (!idio16gpio->irq_mask) {
raw_spin_lock_irqsave(&idio16gpio->lock, flags);
@ -192,11 +193,12 @@ static void idio_16_irq_unmask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
const unsigned long mask = BIT(irqd_to_hwirq(data));
const unsigned long offset = irqd_to_hwirq(data);
const unsigned long prev_irq_mask = idio16gpio->irq_mask;
unsigned long flags;
idio16gpio->irq_mask |= mask;
gpiochip_enable_irq(chip, offset);
idio16gpio->irq_mask |= BIT(offset);
if (!prev_irq_mask) {
raw_spin_lock_irqsave(&idio16gpio->lock, flags);
@ -217,12 +219,14 @@ static int idio_16_irq_set_type(struct irq_data *data, unsigned int flow_type)
return 0;
}
static struct irq_chip idio_16_irqchip = {
static const struct irq_chip idio_16_irqchip = {
.name = "104-idio-16",
.irq_ack = idio_16_irq_ack,
.irq_mask = idio_16_irq_mask,
.irq_unmask = idio_16_irq_unmask,
.irq_set_type = idio_16_irq_set_type
.irq_set_type = idio_16_irq_set_type,
.flags = IRQCHIP_IMMUTABLE,
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t idio_16_irq_handler(int irq, void *dev_id)
@ -299,7 +303,7 @@ static int idio_16_probe(struct device *dev, unsigned int id)
idio16gpio->out_state = 0xFFFF;
girq = &idio16gpio->chip.irq;
girq->chip = &idio_16_irqchip;
gpio_irq_chip_set_chip(girq, &idio_16_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;

View file

@ -373,6 +373,13 @@ static void gpio_mockup_debugfs_setup(struct device *dev,
}
}
static void gpio_mockup_debugfs_cleanup(void *data)
{
struct gpio_mockup_chip *chip = data;
debugfs_remove_recursive(chip->dbg_dir);
}
static void gpio_mockup_dispose_mappings(void *data)
{
struct gpio_mockup_chip *chip = data;
@ -455,7 +462,7 @@ static int gpio_mockup_probe(struct platform_device *pdev)
gpio_mockup_debugfs_setup(dev, chip);
return 0;
return devm_add_action_or_reset(dev, gpio_mockup_debugfs_cleanup, chip);
}
static const struct of_device_id gpio_mockup_of_match[] = {

View file

@ -1175,7 +1175,9 @@ static int pca953x_suspend(struct device *dev)
{
struct pca953x_chip *chip = dev_get_drvdata(dev);
mutex_lock(&chip->i2c_lock);
regcache_cache_only(chip->regmap, true);
mutex_unlock(&chip->i2c_lock);
if (atomic_read(&chip->wakeup_path))
device_set_wakeup_path(dev);
@ -1198,13 +1200,17 @@ static int pca953x_resume(struct device *dev)
}
}
mutex_lock(&chip->i2c_lock);
regcache_cache_only(chip->regmap, false);
regcache_mark_dirty(chip->regmap);
ret = pca953x_regcache_sync(dev);
if (ret)
if (ret) {
mutex_unlock(&chip->i2c_lock);
return ret;
}
ret = regcache_sync(chip->regmap);
mutex_unlock(&chip->i2c_lock);
if (ret) {
dev_err(dev, "Failed to restore register map: %d\n", ret);
return ret;

View file

@ -661,24 +661,17 @@ static int pxa_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gpio_reg_base))
return PTR_ERR(gpio_reg_base);
clk = clk_get(&pdev->dev, NULL);
clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "Error %ld to get gpio clock\n",
PTR_ERR(clk));
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
if (ret) {
clk_put(clk);
return ret;
}
/* Initialize GPIO chips */
ret = pxa_init_gpio_chip(pchip, pxa_last_gpio + 1, gpio_reg_base);
if (ret) {
clk_put(clk);
if (ret)
return ret;
}
/* clear all GPIO edge detects */
for_each_gpio_bank(gpio, c, pchip) {

View file

@ -46,10 +46,20 @@
* @lock: Lock for accessing the IRQ registers and values
* @intr_mask: Mask for interrupts lines
* @intr_type: Interrupt type selection
* @bank_read: Read a bank setting as a single 32-bit value
* @bank_write: Write a bank setting as a single 32-bit value
* @imr_line_pos: Bit shift of an IRQ line's IMR value.
*
* The DIR, DATA, and ISR registers consist of four 8-bit port values, packed
* into a single 32-bit register. Use @bank_read (@bank_write) to get (assign)
* a value from (to) these registers. The IMR register consists of four 16-bit
* port values, packed into two 32-bit registers. Use @imr_line_pos to get the
* bit shift of the 2-bit field for a line's IMR settings. Shifts larger than
* 32 overflow into the second register.
*
* Because the interrupt mask register (IMR) combines the function of IRQ type
* selection and masking, two extra values are stored. @intr_mask is used to
* mask/unmask the interrupts for a GPIO port, and @intr_type is used to store
* mask/unmask the interrupts for a GPIO line, and @intr_type is used to store
* the selected interrupt types. The logical AND of these values is written to
* IMR on changes.
*/
@ -59,10 +69,11 @@ struct realtek_gpio_ctrl {
void __iomem *cpumask_base;
struct cpumask cpu_irq_maskable;
raw_spinlock_t lock;
u16 intr_mask[REALTEK_GPIO_PORTS_PER_BANK];
u16 intr_type[REALTEK_GPIO_PORTS_PER_BANK];
unsigned int (*port_offset_u8)(unsigned int port);
unsigned int (*port_offset_u16)(unsigned int port);
u8 intr_mask[REALTEK_GPIO_MAX];
u8 intr_type[REALTEK_GPIO_MAX];
u32 (*bank_read)(void __iomem *reg);
void (*bank_write)(void __iomem *reg, u32 value);
unsigned int (*line_imr_pos)(unsigned int line);
};
/* Expand with more flags as devices with other quirks are added */
@ -101,14 +112,22 @@ static struct realtek_gpio_ctrl *irq_data_to_ctrl(struct irq_data *data)
* port. The two interrupt mask registers store two bits per GPIO, so use u16
* values.
*/
static unsigned int realtek_gpio_port_offset_u8(unsigned int port)
static u32 realtek_gpio_bank_read_swapped(void __iomem *reg)
{
return port;
return ioread32be(reg);
}
static unsigned int realtek_gpio_port_offset_u16(unsigned int port)
static void realtek_gpio_bank_write_swapped(void __iomem *reg, u32 value)
{
return 2 * port;
iowrite32be(value, reg);
}
static unsigned int realtek_gpio_line_imr_pos_swapped(unsigned int line)
{
unsigned int port_pin = line % 8;
unsigned int port = line / 8;
return 2 * (8 * (port ^ 1) + port_pin);
}
/*
@ -119,66 +138,67 @@ static unsigned int realtek_gpio_port_offset_u16(unsigned int port)
* per GPIO, so use u16 values. The first register contains ports 1 and 0, the
* second ports 3 and 2.
*/
static unsigned int realtek_gpio_port_offset_u8_rev(unsigned int port)
static u32 realtek_gpio_bank_read(void __iomem *reg)
{
return 3 - port;
return ioread32(reg);
}
static unsigned int realtek_gpio_port_offset_u16_rev(unsigned int port)
static void realtek_gpio_bank_write(void __iomem *reg, u32 value)
{
return 2 * (port ^ 1);
iowrite32(value, reg);
}
static void realtek_gpio_write_imr(struct realtek_gpio_ctrl *ctrl,
unsigned int port, u16 irq_type, u16 irq_mask)
static unsigned int realtek_gpio_line_imr_pos(unsigned int line)
{
iowrite16(irq_type & irq_mask,
ctrl->base + REALTEK_GPIO_REG_IMR + ctrl->port_offset_u16(port));
return 2 * line;
}
static void realtek_gpio_clear_isr(struct realtek_gpio_ctrl *ctrl,
unsigned int port, u8 mask)
static void realtek_gpio_clear_isr(struct realtek_gpio_ctrl *ctrl, u32 mask)
{
iowrite8(mask, ctrl->base + REALTEK_GPIO_REG_ISR + ctrl->port_offset_u8(port));
ctrl->bank_write(ctrl->base + REALTEK_GPIO_REG_ISR, mask);
}
static u8 realtek_gpio_read_isr(struct realtek_gpio_ctrl *ctrl, unsigned int port)
static u32 realtek_gpio_read_isr(struct realtek_gpio_ctrl *ctrl)
{
return ioread8(ctrl->base + REALTEK_GPIO_REG_ISR + ctrl->port_offset_u8(port));
return ctrl->bank_read(ctrl->base + REALTEK_GPIO_REG_ISR);
}
/* Set the rising and falling edge mask bits for a GPIO port pin */
static u16 realtek_gpio_imr_bits(unsigned int pin, u16 value)
/* Set the rising and falling edge mask bits for a GPIO pin */
static void realtek_gpio_update_line_imr(struct realtek_gpio_ctrl *ctrl, unsigned int line)
{
return (value & REALTEK_GPIO_IMR_LINE_MASK) << 2 * pin;
void __iomem *reg = ctrl->base + REALTEK_GPIO_REG_IMR;
unsigned int line_shift = ctrl->line_imr_pos(line);
unsigned int shift = line_shift % 32;
u32 irq_type = ctrl->intr_type[line];
u32 irq_mask = ctrl->intr_mask[line];
u32 reg_val;
reg += 4 * (line_shift / 32);
reg_val = ioread32(reg);
reg_val &= ~(REALTEK_GPIO_IMR_LINE_MASK << shift);
reg_val |= (irq_type & irq_mask & REALTEK_GPIO_IMR_LINE_MASK) << shift;
iowrite32(reg_val, reg);
}
static void realtek_gpio_irq_ack(struct irq_data *data)
{
struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
irq_hw_number_t line = irqd_to_hwirq(data);
unsigned int port = line / 8;
unsigned int port_pin = line % 8;
realtek_gpio_clear_isr(ctrl, port, BIT(port_pin));
realtek_gpio_clear_isr(ctrl, BIT(line));
}
static void realtek_gpio_irq_unmask(struct irq_data *data)
{
struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
unsigned int line = irqd_to_hwirq(data);
unsigned int port = line / 8;
unsigned int port_pin = line % 8;
unsigned long flags;
u16 m;
gpiochip_enable_irq(&ctrl->gc, line);
raw_spin_lock_irqsave(&ctrl->lock, flags);
m = ctrl->intr_mask[port];
m |= realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
ctrl->intr_mask[port] = m;
realtek_gpio_write_imr(ctrl, port, ctrl->intr_type[port], m);
ctrl->intr_mask[line] = REALTEK_GPIO_IMR_LINE_MASK;
realtek_gpio_update_line_imr(ctrl, line);
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
}
@ -186,16 +206,11 @@ static void realtek_gpio_irq_mask(struct irq_data *data)
{
struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
unsigned int line = irqd_to_hwirq(data);
unsigned int port = line / 8;
unsigned int port_pin = line % 8;
unsigned long flags;
u16 m;
raw_spin_lock_irqsave(&ctrl->lock, flags);
m = ctrl->intr_mask[port];
m &= ~realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
ctrl->intr_mask[port] = m;
realtek_gpio_write_imr(ctrl, port, ctrl->intr_type[port], m);
ctrl->intr_mask[line] = 0;
realtek_gpio_update_line_imr(ctrl, line);
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
gpiochip_disable_irq(&ctrl->gc, line);
@ -205,10 +220,8 @@ static int realtek_gpio_irq_set_type(struct irq_data *data, unsigned int flow_ty
{
struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
unsigned int line = irqd_to_hwirq(data);
unsigned int port = line / 8;
unsigned int port_pin = line % 8;
unsigned long flags;
u16 type, t;
u8 type;
switch (flow_type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_FALLING:
@ -227,11 +240,8 @@ static int realtek_gpio_irq_set_type(struct irq_data *data, unsigned int flow_ty
irq_set_handler_locked(data, handle_edge_irq);
raw_spin_lock_irqsave(&ctrl->lock, flags);
t = ctrl->intr_type[port];
t &= ~realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
t |= realtek_gpio_imr_bits(port_pin, type);
ctrl->intr_type[port] = t;
realtek_gpio_write_imr(ctrl, port, t, ctrl->intr_mask[port]);
ctrl->intr_type[line] = type;
realtek_gpio_update_line_imr(ctrl, line);
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
return 0;
@ -242,28 +252,21 @@ static void realtek_gpio_irq_handler(struct irq_desc *desc)
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct realtek_gpio_ctrl *ctrl = gpiochip_get_data(gc);
struct irq_chip *irq_chip = irq_desc_get_chip(desc);
unsigned int lines_done;
unsigned int port_pin_count;
unsigned long status;
int offset;
chained_irq_enter(irq_chip, desc);
for (lines_done = 0; lines_done < gc->ngpio; lines_done += 8) {
status = realtek_gpio_read_isr(ctrl, lines_done / 8);
port_pin_count = min(gc->ngpio - lines_done, 8U);
for_each_set_bit(offset, &status, port_pin_count)
generic_handle_domain_irq(gc->irq.domain, offset + lines_done);
}
status = realtek_gpio_read_isr(ctrl);
for_each_set_bit(offset, &status, gc->ngpio)
generic_handle_domain_irq(gc->irq.domain, offset);
chained_irq_exit(irq_chip, desc);
}
static inline void __iomem *realtek_gpio_irq_cpu_mask(struct realtek_gpio_ctrl *ctrl,
unsigned int port, int cpu)
static inline void __iomem *realtek_gpio_irq_cpu_mask(struct realtek_gpio_ctrl *ctrl, int cpu)
{
return ctrl->cpumask_base + ctrl->port_offset_u8(port) +
REALTEK_GPIO_PORTS_PER_BANK * cpu;
return ctrl->cpumask_base + REALTEK_GPIO_PORTS_PER_BANK * cpu;
}
static int realtek_gpio_irq_set_affinity(struct irq_data *data,
@ -271,12 +274,10 @@ static int realtek_gpio_irq_set_affinity(struct irq_data *data,
{
struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
unsigned int line = irqd_to_hwirq(data);
unsigned int port = line / 8;
unsigned int port_pin = line % 8;
void __iomem *irq_cpu_mask;
unsigned long flags;
int cpu;
u8 v;
u32 v;
if (!ctrl->cpumask_base)
return -ENXIO;
@ -284,15 +285,15 @@ static int realtek_gpio_irq_set_affinity(struct irq_data *data,
raw_spin_lock_irqsave(&ctrl->lock, flags);
for_each_cpu(cpu, &ctrl->cpu_irq_maskable) {
irq_cpu_mask = realtek_gpio_irq_cpu_mask(ctrl, port, cpu);
v = ioread8(irq_cpu_mask);
irq_cpu_mask = realtek_gpio_irq_cpu_mask(ctrl, cpu);
v = ctrl->bank_read(irq_cpu_mask);
if (cpumask_test_cpu(cpu, dest))
v |= BIT(port_pin);
v |= BIT(line);
else
v &= ~BIT(port_pin);
v &= ~BIT(line);
iowrite8(v, irq_cpu_mask);
ctrl->bank_write(irq_cpu_mask, v);
}
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
@ -305,16 +306,17 @@ static int realtek_gpio_irq_set_affinity(struct irq_data *data,
static int realtek_gpio_irq_init(struct gpio_chip *gc)
{
struct realtek_gpio_ctrl *ctrl = gpiochip_get_data(gc);
unsigned int port;
u32 mask_all = GENMASK(gc->ngpio - 1, 0);
unsigned int line;
int cpu;
for (port = 0; (port * 8) < gc->ngpio; port++) {
realtek_gpio_write_imr(ctrl, port, 0, 0);
realtek_gpio_clear_isr(ctrl, port, GENMASK(7, 0));
for (line = 0; line < gc->ngpio; line++)
realtek_gpio_update_line_imr(ctrl, line);
for_each_cpu(cpu, &ctrl->cpu_irq_maskable)
iowrite8(GENMASK(7, 0), realtek_gpio_irq_cpu_mask(ctrl, port, cpu));
}
realtek_gpio_clear_isr(ctrl, mask_all);
for_each_cpu(cpu, &ctrl->cpu_irq_maskable)
ctrl->bank_write(realtek_gpio_irq_cpu_mask(ctrl, cpu), mask_all);
return 0;
}
@ -387,12 +389,14 @@ static int realtek_gpio_probe(struct platform_device *pdev)
if (dev_flags & GPIO_PORTS_REVERSED) {
bgpio_flags = 0;
ctrl->port_offset_u8 = realtek_gpio_port_offset_u8_rev;
ctrl->port_offset_u16 = realtek_gpio_port_offset_u16_rev;
ctrl->bank_read = realtek_gpio_bank_read;
ctrl->bank_write = realtek_gpio_bank_write;
ctrl->line_imr_pos = realtek_gpio_line_imr_pos;
} else {
bgpio_flags = BGPIOF_BIG_ENDIAN_BYTE_ORDER;
ctrl->port_offset_u8 = realtek_gpio_port_offset_u8;
ctrl->port_offset_u16 = realtek_gpio_port_offset_u16;
ctrl->bank_read = realtek_gpio_bank_read_swapped;
ctrl->bank_write = realtek_gpio_bank_write_swapped;
ctrl->line_imr_pos = realtek_gpio_line_imr_pos_swapped;
}
err = bgpio_init(&ctrl->gc, dev, 4,

View file

@ -265,6 +265,7 @@ static void ws16c48_irq_mask(struct irq_data *data)
raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
ws16c48gpio->irq_mask &= ~mask;
gpiochip_disable_irq(chip, offset);
port_state = ws16c48gpio->irq_mask >> (8 * port);
/* Select Register Page 2; Unlock all I/O ports */
@ -295,6 +296,7 @@ static void ws16c48_irq_unmask(struct irq_data *data)
raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
gpiochip_enable_irq(chip, offset);
ws16c48gpio->irq_mask |= mask;
port_state = ws16c48gpio->irq_mask >> (8 * port);
@ -356,12 +358,14 @@ static int ws16c48_irq_set_type(struct irq_data *data, unsigned flow_type)
return 0;
}
static struct irq_chip ws16c48_irqchip = {
static const struct irq_chip ws16c48_irqchip = {
.name = "ws16c48",
.irq_ack = ws16c48_irq_ack,
.irq_mask = ws16c48_irq_mask,
.irq_unmask = ws16c48_irq_unmask,
.irq_set_type = ws16c48_irq_set_type
.irq_set_type = ws16c48_irq_set_type,
.flags = IRQCHIP_IMMUTABLE,
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t ws16c48_irq_handler(int irq, void *dev_id)
@ -463,7 +467,7 @@ static int ws16c48_probe(struct device *dev, unsigned int id)
ws16c48gpio->chip.set_multiple = ws16c48_gpio_set_multiple;
girq = &ws16c48gpio->chip.irq;
girq->chip = &ws16c48_irqchip;
gpio_irq_chip_set_chip(girq, &ws16c48_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;

View file

@ -5524,7 +5524,8 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
resource_size_t aper_limit =
adev->gmc.aper_base + adev->gmc.aper_size - 1;
bool p2p_access = !(pci_p2pdma_distance_many(adev->pdev,
bool p2p_access = !adev->gmc.xgmi.connected_to_cpu &&
!(pci_p2pdma_distance_many(adev->pdev,
&peer_adev->dev, 1, true) < 0);
return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&

View file

@ -66,10 +66,15 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
return true;
case CHIP_SIENNA_CICHLID:
if (strnstr(atom_ctx->vbios_version, "D603",
sizeof(atom_ctx->vbios_version))) {
if (strnstr(atom_ctx->vbios_version, "D603GLXE",
sizeof(atom_ctx->vbios_version)))
return true;
else
return false;
else
return true;
} else {
return false;
}
default:
return false;
}

View file

@ -159,7 +159,10 @@ void amdgpu_job_free(struct amdgpu_job *job)
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
dma_fence_put(&job->hw_fence);
if (!job->hw_fence.ops)
kfree(job);
else
dma_fence_put(&job->hw_fence);
}
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,

View file

@ -2401,7 +2401,7 @@ static int psp_load_smu_fw(struct psp_context *psp)
static bool fw_load_skip_check(struct psp_context *psp,
struct amdgpu_firmware_info *ucode)
{
if (!ucode->fw)
if (!ucode->fw || !ucode->ucode_size)
return true;
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&

View file

@ -4274,35 +4274,45 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
if (adev->gfx.rlc.global_tap_delays_ucode_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
if (adev->gfx.rlc.se0_tap_delays_ucode_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
if (adev->gfx.rlc.se1_tap_delays_ucode_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
if (adev->gfx.rlc.se2_tap_delays_ucode_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
if (adev->gfx.rlc.se3_tap_delays_ucode_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;

View file

@ -183,6 +183,7 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
mes_add_queue_pkt.tma_addr = input->tma_addr;
mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
mes_add_queue_pkt.trap_en = 1;
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),

View file

@ -1094,7 +1094,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
dc->current_state->stream_count != context->stream_count)
should_disable = true;
if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe) {
if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe &&
!dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) {
struct pipe_ctx *old_pipe, *new_pipe;
old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];

View file

@ -104,6 +104,9 @@ static bool has_query_dp_alt(struct link_encoder *enc)
{
struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
if (enc->ctx->dce_version >= DCN_VERSION_3_15)
return true;
/* Supports development firmware and firmware >= 4.0.11 */
return dc_dmub_srv &&
!(dc_dmub_srv->dmub->fw_version >= DMUB_FW_VERSION(4, 0, 0) &&

View file

@ -317,6 +317,7 @@ static void enc314_stream_encoder_dp_unblank(
/* switch DP encoder to CRTC data, but reset it the fifo first. It may happen
* that it overflows during mode transition, and sometimes doesn't recover.
*/
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1);
udelay(10);

View file

@ -98,7 +98,8 @@ static void optc314_set_odm_combine(struct timing_generator *optc, int *opp_id,
REG_UPDATE(OPTC_WIDTH_CONTROL,
OPTC_SEGMENT_WIDTH, mpcc_hactive);
REG_SET(OTG_H_TIMING_CNTL, 0, OTG_H_TIMING_DIV_MODE, opp_cnt - 1);
REG_UPDATE(OTG_H_TIMING_CNTL,
OTG_H_TIMING_DIV_MODE, opp_cnt - 1);
optc1->opp_count = opp_cnt;
}

View file

@ -454,6 +454,7 @@ static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs
hpo_dp_stream_encoder_reg_list(0),
hpo_dp_stream_encoder_reg_list(1),
hpo_dp_stream_encoder_reg_list(2),
hpo_dp_stream_encoder_reg_list(3)
};
static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = {

View file

@ -225,19 +225,19 @@ void dccg32_set_dpstreamclk(
case 0:
REG_UPDATE_2(DPSTREAMCLK_CNTL,
DPSTREAMCLK0_EN,
(src == REFCLK) ? 0 : 1, DPSTREAMCLK0_SRC_SEL, 0);
(src == REFCLK) ? 0 : 1, DPSTREAMCLK0_SRC_SEL, otg_inst);
break;
case 1:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK1_EN,
(src == REFCLK) ? 0 : 1, DPSTREAMCLK1_SRC_SEL, 1);
(src == REFCLK) ? 0 : 1, DPSTREAMCLK1_SRC_SEL, otg_inst);
break;
case 2:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK2_EN,
(src == REFCLK) ? 0 : 1, DPSTREAMCLK2_SRC_SEL, 2);
(src == REFCLK) ? 0 : 1, DPSTREAMCLK2_SRC_SEL, otg_inst);
break;
case 3:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK3_EN,
(src == REFCLK) ? 0 : 1, DPSTREAMCLK3_SRC_SEL, 3);
(src == REFCLK) ? 0 : 1, DPSTREAMCLK3_SRC_SEL, otg_inst);
break;
default:
BREAK_TO_DEBUGGER();

View file

@ -310,6 +310,11 @@ static void enc32_stream_encoder_dp_unblank(
// TODO: Confirm if we need to wait for DIG_SYMCLK_FE_ON
REG_WAIT(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, 1, 10, 5000);
/* read start level = 0 will bring underflow / overflow and DIG_FIFO_ERROR = 1
* so set it to 1/2 full = 7 before reset as suggested by hardware team.
*/
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1);
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 10, 5000);

View file

@ -295,24 +295,38 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
}
// Include cursor size for CAB allocation
if (stream->cursor_position.enable && plane->address.grph.cursor_cache_addr.quad_part) {
cursor_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size;
switch (stream->cursor_attributes.color_format) {
case CURSOR_MODE_MONO:
cursor_size /= 2;
break;
case CURSOR_MODE_COLOR_1BIT_AND:
case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
cursor_size *= 4;
break;
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[j];
struct hubp *hubp = pipe->plane_res.hubp;
case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
cursor_size *= 8;
break;
}
cache_lines_used += dcn32_cache_lines_for_surface(dc, surface_size,
if (pipe->stream && pipe->plane_state && hubp)
/* Find the cursor plane and use the exact size instead of
* using the max for calculation
*/
if (hubp->curs_attr.width > 0) {
cursor_size = hubp->curs_attr.width * hubp->curs_attr.height;
break;
}
}
switch (stream->cursor_attributes.color_format) {
case CURSOR_MODE_MONO:
cursor_size /= 2;
break;
case CURSOR_MODE_COLOR_1BIT_AND:
case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
cursor_size *= 4;
break;
case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
cursor_size *= 8;
break;
}
if (stream->cursor_position.enable && plane->address.grph.cursor_cache_addr.quad_part) {
cache_lines_used += dcn32_cache_lines_for_surface(dc, cursor_size,
plane->address.grph.cursor_cache_addr.quad_part);
}
}
@ -325,6 +339,26 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
if (cache_lines_used % lines_per_way > 0)
num_ways++;
for (i = 0; i < ctx->stream_count; i++) {
stream = ctx->streams[i];
for (j = 0; j < ctx->stream_status[i].plane_count; j++) {
plane = ctx->stream_status[i].plane_states[j];
if (stream->cursor_position.enable && plane &&
!plane->address.grph.cursor_cache_addr.quad_part &&
cursor_size > 16384) {
/* Cursor caching is not supported since it won't be on the same line.
* So we need an extra line to accommodate it. With large cursors and a single 4k monitor
* this case triggers corruption. If we're at the edge, then dont trigger display refresh
* from MALL. We only need to cache cursor if its greater that 64x64 at 4 bpp.
*/
num_ways++;
/* We only expect one cursor plane */
break;
}
}
}
return num_ways;
}

View file

@ -144,7 +144,7 @@ bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc,
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->stream)
continue;
return false;
if (!pipe->plane_state)
return false;

View file

@ -1014,6 +1014,15 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
dc->debug.force_subvp_mclk_switch)) {
dcn32_merge_pipes_for_subvp(dc, context);
// to re-initialize viewport after the pipe merge
for (int i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (!pipe_ctx->plane_state || !pipe_ctx->stream)
continue;
resource_build_scaling_params(pipe_ctx);
}
while (!found_supported_config && dcn32_enough_pipes_for_subvp(dc, context) &&
dcn32_assign_subvp_pipe(dc, context, &dc_pipe_idx)) {

View file

@ -116,7 +116,7 @@ static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
dto_params.timing = &pipe_ctx->stream->timing;
dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, link_enc->inst);
dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, stream_enc->inst);
dccg->funcs->enable_symclk32_se(dccg, stream_enc->inst, phyd32clk);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
stream_enc->funcs->enable_stream(stream_enc);
@ -137,7 +137,7 @@ static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
stream_enc->funcs->disable(stream_enc);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
dccg->funcs->disable_symclk32_se(dccg, stream_enc->inst);
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, pipe_ctx->link_res.hpo_dp_link_enc->inst);
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, stream_enc->inst);
}
static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx)

View file

@ -268,7 +268,8 @@ union MESAPI__ADD_QUEUE {
uint32_t is_tmz_queue : 1;
uint32_t map_kiq_utility_queue : 1;
uint32_t is_kfd_process : 1;
uint32_t reserved : 22;
uint32_t trap_en : 1;
uint32_t reserved : 21;
};
struct MES_API_STATUS api_status;
uint64_t tma_addr;

View file

@ -25,7 +25,7 @@
#define SMU13_DRIVER_IF_V13_0_0_H
//Increment this version if SkuTable_t or BoardTable_t change
#define PPTABLE_VERSION 0x22
#define PPTABLE_VERSION 0x24
#define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_SOCCLK_DPM_LEVELS 8

View file

@ -30,7 +30,7 @@
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x05
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x2E
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x30
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
@ -291,5 +291,11 @@ int smu_v13_0_set_default_dpm_tables(struct smu_context *smu);
void smu_v13_0_set_smu_mailbox_registers(struct smu_context *smu);
int smu_v13_0_mode1_reset(struct smu_context *smu);
int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
void **table,
uint32_t *size,
uint32_t pptable_id);
#endif
#endif

Some files were not shown because too many files have changed in this diff Show more