Merge branch 'for-next' into for-linus

Pull the 6.5-devel branch for upstreaming.

Signed-off-by: Takashi Iwai <tiwai@suse.de>
This commit is contained in:
Takashi Iwai 2023-06-26 15:23:15 +02:00
commit a15b513756
302 changed files with 14640 additions and 5133 deletions

View File

@ -18,7 +18,6 @@ Block
kyber-iosched
null_blk
pr
request
stat
switching-sched
writeback_cache_control

View File

@ -1,99 +0,0 @@
============================
struct request documentation
============================
Jens Axboe <jens.axboe@oracle.com> 27/05/02
.. FIXME:
No idea about what does mean - seems just some noise, so comment it
1.0
Index
2.0 Struct request members classification
2.1 struct request members explanation
3.0
2.0
Short explanation of request members
====================================
Classification flags:
= ====================
D driver member
B block layer member
I I/O scheduler member
= ====================
Unless an entry contains a D classification, a device driver must not access
this member. Some members may contain D classifications, but should only be
access through certain macros or functions (eg ->flags).
<linux/blkdev.h>
=============================== ======= =======================================
Member Flag Comment
=============================== ======= =======================================
struct list_head queuelist BI Organization on various internal
queues
``void *elevator_private`` I I/O scheduler private data
unsigned char cmd[16] D Driver can use this for setting up
a cdb before execution, see
blk_queue_prep_rq
unsigned long flags DBI Contains info about data direction,
request type, etc.
int rq_status D Request status bits
kdev_t rq_dev DBI Target device
int errors DB Error counts
sector_t sector DBI Target location
unsigned long hard_nr_sectors B Used to keep sector sane
unsigned long nr_sectors DBI Total number of sectors in request
unsigned long hard_nr_sectors B Used to keep nr_sectors sane
unsigned short nr_phys_segments DB Number of physical scatter gather
segments in a request
unsigned short nr_hw_segments DB Number of hardware scatter gather
segments in a request
unsigned int current_nr_sectors DB Number of sectors in first segment
of request
unsigned int hard_cur_sectors B Used to keep current_nr_sectors sane
int tag DB TCQ tag, if assigned
``void *special`` D Free to be used by driver
``char *buffer`` D Map of first segment, also see
section on bouncing SECTION
``struct completion *waiting`` D Can be used by driver to get signalled
on request completion
``struct bio *bio`` DBI First bio in request
``struct bio *biotail`` DBI Last bio in request
``struct request_queue *q`` DB Request queue this request belongs to
``struct request_list *rl`` B Request list this request came from
=============================== ======= =======================================

View File

@ -49,6 +49,7 @@ properties:
properties:
data-lanes:
minItems: 1
maxItems: 2
required:

View File

@ -17,20 +17,11 @@ description:
properties:
clocks:
minItems: 3
items:
- description: PCIe bridge clock.
- description: PCIe bus clock.
- description: PCIe PHY clock.
- description: Additional required clock entry for imx6sx-pcie,
imx6sx-pcie-ep, imx8mq-pcie, imx8mq-pcie-ep.
maxItems: 4
clock-names:
minItems: 3
items:
- const: pcie
- const: pcie_bus
- enum: [ pcie_phy, pcie_aux ]
- enum: [ pcie_inbound_axi, pcie_aux ]
maxItems: 4
num-lanes:
const: 1

View File

@ -31,6 +31,19 @@ properties:
- const: dbi
- const: addr_space
clocks:
minItems: 3
items:
- description: PCIe bridge clock.
- description: PCIe bus clock.
- description: PCIe PHY clock.
- description: Additional required clock entry for imx6sx-pcie,
imx6sx-pcie-ep, imx8mq-pcie, imx8mq-pcie-ep.
clock-names:
minItems: 3
maxItems: 4
interrupts:
items:
- description: builtin eDMA interrupter.
@ -49,6 +62,31 @@ required:
allOf:
- $ref: /schemas/pci/snps,dw-pcie-ep.yaml#
- $ref: /schemas/pci/fsl,imx6q-pcie-common.yaml#
- if:
properties:
compatible:
enum:
- fsl,imx8mq-pcie-ep
then:
properties:
clocks:
minItems: 4
clock-names:
items:
- const: pcie
- const: pcie_bus
- const: pcie_phy
- const: pcie_aux
else:
properties:
clocks:
maxItems: 3
clock-names:
items:
- const: pcie
- const: pcie_bus
- const: pcie_aux
unevaluatedProperties: false

View File

@ -40,6 +40,19 @@ properties:
- const: dbi
- const: config
clocks:
minItems: 3
items:
- description: PCIe bridge clock.
- description: PCIe bus clock.
- description: PCIe PHY clock.
- description: Additional required clock entry for imx6sx-pcie,
imx6sx-pcie-ep, imx8mq-pcie, imx8mq-pcie-ep.
clock-names:
minItems: 3
maxItems: 4
interrupts:
items:
- description: builtin MSI controller.
@ -77,6 +90,70 @@ required:
allOf:
- $ref: /schemas/pci/snps,dw-pcie.yaml#
- $ref: /schemas/pci/fsl,imx6q-pcie-common.yaml#
- if:
properties:
compatible:
enum:
- fsl,imx6sx-pcie
then:
properties:
clocks:
minItems: 4
clock-names:
items:
- const: pcie
- const: pcie_bus
- const: pcie_phy
- const: pcie_inbound_axi
- if:
properties:
compatible:
enum:
- fsl,imx8mq-pcie
then:
properties:
clocks:
minItems: 4
clock-names:
items:
- const: pcie
- const: pcie_bus
- const: pcie_phy
- const: pcie_aux
- if:
properties:
compatible:
enum:
- fsl,imx6q-pcie
- fsl,imx6qp-pcie
- fsl,imx7d-pcie
then:
properties:
clocks:
maxItems: 3
clock-names:
items:
- const: pcie
- const: pcie_bus
- const: pcie_phy
- if:
properties:
compatible:
enum:
- fsl,imx8mm-pcie
- fsl,imx8mp-pcie
then:
properties:
clocks:
maxItems: 3
clock-names:
items:
- const: pcie
- const: pcie_bus
- const: pcie_aux
unevaluatedProperties: false

View File

@ -776,10 +776,11 @@ peer_notif_delay
Specify the delay, in milliseconds, between each peer
notification (gratuitous ARP and unsolicited IPv6 Neighbor
Advertisement) when they are issued after a failover event.
This delay should be a multiple of the link monitor interval
(arp_interval or miimon, whichever is active). The default
value is 0 which means to match the value of the link monitor
interval.
This delay should be a multiple of the MII link monitor interval
(miimon).
The valid range is 0 - 300000. The default value is 0, which means
to match the value of the MII link monitor interval.
prio
Slave priority. A higher number means higher priority.

View File

@ -116,8 +116,8 @@ Contents:
udplite
vrf
vxlan
x25-iface
x25
x25-iface
xfrm_device
xfrm_proc
xfrm_sync

View File

@ -1,8 +1,7 @@
.. SPDX-License-Identifier: GPL-2.0
============================-
X.25 Device Driver Interface
============================-
============================
Version 1.1

View File

@ -227,7 +227,7 @@ PCM stream related controls
name='EMU10K1 PCM Volume',index 0-31
------------------------------------
Channel volume attenuation in range 0-0xffff. The maximum value (no
Channel volume attenuation in range 0-0x1fffd. The middle value (no
attenuation) is default. The channel mapping for three values is
as follows:
@ -240,30 +240,30 @@ name='EMU10K1 PCM Send Routing',index 0-31
This control specifies the destination - FX-bus accumulators. There are 24
values in this mapping:
* 0 - mono, A destination (FX-bus 0-63), default 0
* 1 - mono, B destination (FX-bus 0-63), default 1
* 2 - mono, C destination (FX-bus 0-63), default 2
* 3 - mono, D destination (FX-bus 0-63), default 3
* 4 - mono, E destination (FX-bus 0-63), default 0
* 5 - mono, F destination (FX-bus 0-63), default 0
* 6 - mono, G destination (FX-bus 0-63), default 0
* 7 - mono, H destination (FX-bus 0-63), default 0
* 8 - left, A destination (FX-bus 0-63), default 0
* 9 - left, B destination (FX-bus 0-63), default 1
* 0 - mono, A destination (FX-bus 0-63), default 0
* 1 - mono, B destination (FX-bus 0-63), default 1
* 2 - mono, C destination (FX-bus 0-63), default 2
* 3 - mono, D destination (FX-bus 0-63), default 3
* 4 - mono, E destination (FX-bus 0-63), default 4
* 5 - mono, F destination (FX-bus 0-63), default 5
* 6 - mono, G destination (FX-bus 0-63), default 6
* 7 - mono, H destination (FX-bus 0-63), default 7
* 8 - left, A destination (FX-bus 0-63), default 0
* 9 - left, B destination (FX-bus 0-63), default 1
* 10 - left, C destination (FX-bus 0-63), default 2
* 11 - left, D destination (FX-bus 0-63), default 3
* 12 - left, E destination (FX-bus 0-63), default 0
* 13 - left, F destination (FX-bus 0-63), default 0
* 14 - left, G destination (FX-bus 0-63), default 0
* 15 - left, H destination (FX-bus 0-63), default 0
* 12 - left, E destination (FX-bus 0-63), default 4
* 13 - left, F destination (FX-bus 0-63), default 5
* 14 - left, G destination (FX-bus 0-63), default 6
* 15 - left, H destination (FX-bus 0-63), default 7
* 16 - right, A destination (FX-bus 0-63), default 0
* 17 - right, B destination (FX-bus 0-63), default 1
* 18 - right, C destination (FX-bus 0-63), default 2
* 19 - right, D destination (FX-bus 0-63), default 3
* 20 - right, E destination (FX-bus 0-63), default 0
* 21 - right, F destination (FX-bus 0-63), default 0
* 22 - right, G destination (FX-bus 0-63), default 0
* 23 - right, H destination (FX-bus 0-63), default 0
* 20 - right, E destination (FX-bus 0-63), default 4
* 21 - right, F destination (FX-bus 0-63), default 5
* 22 - right, G destination (FX-bus 0-63), default 6
* 23 - right, H destination (FX-bus 0-63), default 7
Don't forget that it's illegal to assign a channel to the same FX-bus accumulator
more than once (it means 0=0 && 1=0 is an invalid combination).

View File

@ -17,3 +17,4 @@ Card-Specific Information
hdspm
serial-u16550
img-spdif-in
pcmtest

View File

@ -0,0 +1,120 @@
.. SPDX-License-Identifier: GPL-2.0
The Virtual PCM Test Driver
===========================
The Virtual PCM Test Driver emulates a generic PCM device, and can be used for
testing/fuzzing of the userspace ALSA applications, as well as for testing/fuzzing of
the PCM middle layer. Additionally, it can be used for simulating hard to reproduce
problems with PCM devices.
What can this driver do?
~~~~~~~~~~~~~~~~~~~~~~~~
At this moment the driver can do the following things:
* Simulate both capture and playback processes
* Generate random or pattern-based capturing data
* Inject delays into the playback and capturing processes
* Inject errors during the PCM callbacks
It supports up to 8 substreams and 4 channels. Also it supports both interleaved and
non-interleaved access modes.
Also, this driver can check the playback stream for containing the predefined pattern,
which is used in the corresponding selftest (alsa/pcmtest-test.sh) to check the PCM middle
layer data transferring functionality. Additionally, this driver redefines the default
RESET ioctl, and the selftest covers this PCM API functionality as well.
Configuration
-------------
The driver has several parameters besides the common ALSA module parameters:
* fill_mode (bool) - Buffer fill mode (see below)
* inject_delay (int)
* inject_hwpars_err (bool)
* inject_prepare_err (bool)
* inject_trigger_err (bool)
Capture Data Generation
-----------------------
The driver has two modes of data generation: the first (0 in the fill_mode parameter)
means random data generation, the second (1 in the fill_mode) - pattern-based
data generation. Let's look at the second mode.
First of all, you may want to specify the pattern for data generation. You can do it
by writing the pattern to the debugfs file. There are pattern buffer debugfs entries
for each channel, as well as entries which contain the pattern buffer length.
* /sys/kernel/debug/pcmtest/fill_pattern[0-3]
* /sys/kernel/debug/pcmtest/fill_pattern[0-3]_len
To set the pattern for the channel 0 you can execute the following command:
.. code-block:: bash
echo -n mycoolpattern > /sys/kernel/debug/pcmtest/fill_pattern0
Then, after every capture action performed on the 'pcmtest' device the buffer for the
channel 0 will contain 'mycoolpatternmycoolpatternmycoolpatternmy...'.
The pattern itself can be up to 4096 bytes long.
Delay injection
---------------
The driver has 'inject_delay' parameter, which has very self-descriptive name and
can be used for time delay/speedup simulations. The parameter has integer type, and
it means the delay added between module's internal timer ticks.
If the 'inject_delay' value is positive, the buffer will be filled slower, if it is
negative - faster. You can try it yourself by starting a recording in any
audiorecording application (like Audacity) and selecting the 'pcmtest' device as a
source.
This parameter can be also used for generating a huge amount of sound data in a very
short period of time (with the negative 'inject_delay' value).
Errors injection
----------------
This module can be used for injecting errors into the PCM communication process. This
action can help you to figure out how the userspace ALSA program behaves under unusual
circumstances.
For example, you can make all 'hw_params' PCM callback calls return EBUSY error by
writing '1' to the 'inject_hwpars_err' module parameter:
.. code-block:: bash
echo 1 > /sys/module/snd_pcmtest/parameters/inject_hwpars_err
Errors can be injected into the following PCM callbacks:
* hw_params (EBUSY)
* prepare (EINVAL)
* trigger (EINVAL)
Playback test
-------------
This driver can be also used for the playback functionality testing - every time you
write the playback data to the 'pcmtest' PCM device and close it, the driver checks the
buffer for containing the looped pattern (which is specified in the fill_pattern
debugfs file for each channel). If the playback buffer content represents the looped
pattern, 'pc_test' debugfs entry is set into '1'. Otherwise, the driver sets it to '0'.
ioctl redefinition test
-----------------------
The driver redefines the 'reset' ioctl, which is default for all PCM devices. To test
this functionality, we can trigger the reset ioctl and check the 'ioctl_test' debugfs
entry:
.. code-block:: bash
cat /sys/kernel/debug/pcmtest/ioctl_test
If the ioctl is triggered successfully, this file will contain '1', and '0' otherwise.

View File

@ -258,7 +258,7 @@ PCM stream related controls
``name='EMU10K1 PCM Volume',index 0-31``
----------------------------------------
Channel volume attenuation in range 0-0xffff. The maximum value (no
Channel volume attenuation in range 0-0x1fffd. The middle value (no
attenuation) is default. The channel mapping for three values is
as follows:

View File

@ -268,11 +268,12 @@ with setting of meta_data and signalling for next track ::
| |
| V
| +----------+
| | |
| |NEXT_TRACK|
| | |
| +----------+
| |
| compr_set_params() | |
| +-----------|NEXT_TRACK|
| | | |
| | +--+-------+
| | | |
| +--------------+ |
| |
| | compr_partial_drain()
| |

View File

@ -15,3 +15,4 @@ Designs and Implementations
oss-emulation
seq-oss
jack-injection
midi-2.0

View File

@ -0,0 +1,378 @@
=================
MIDI 2.0 on Linux
=================
General
=======
MIDI 2.0 is an extended protocol for providing higher resolutions and
more fine controls over the legacy MIDI 1.0. The fundamental changes
introduced for supporting MIDI 2.0 are:
- Support of Universal MIDI Packet (UMP)
- Support of MIDI 2.0 protocol messages
- Transparent conversions between UMP and legacy MIDI 1.0 byte stream
- MIDI-CI for property and profile configurations
UMP is a new container format to hold all MIDI protocol 1.0 and MIDI
2.0 protocol messages. Unlike the former byte stream, it's 32bit
aligned, and each message can be put in a single packet. UMP can send
the events up to 16 "UMP Groups", where each UMP Group contain up to
16 MIDI channels.
MIDI 2.0 protocol is an extended protocol to achieve the higher
resolution and more controls over the old MIDI 1.0 protocol.
MIDI-CI is a high-level protocol that can talk with the MIDI device
for the flexible profiles and configurations. It's represented in the
form of special SysEx.
For Linux implementations, the kernel supports the UMP transport and
the encoding/decoding of MIDI protocols on UMP, while MIDI-CI is
supported in user-space over the standard SysEx.
As of this writing, only USB MIDI device supports the UMP and Linux
2.0 natively. The UMP support itself is pretty generic, hence it
could be used by other transport layers, although it could be
implemented differently (e.g. as a ALSA sequencer client), too.
The access to UMP devices are provided in two ways: the access via
rawmidi device and the access via ALSA sequencer API.
ALSA sequencer API was extended to allow the payload of UMP packets.
It's allowed to connect freely between MIDI 1.0 and MIDI 2.0 sequencer
clients, and the events are converted transparently.
Kernel Configuration
====================
The following new configs are added for supporting MIDI 2.0:
`CONFIG_SND_UMP`, `CONFIG_SND_UMP_LEGACY_RAWMIDI`,
`CONFIG_SND_SEQ_UMP`, `CONFIG_SND_SEQ_UMP_CLIENT`, and
`CONFIG_SND_USB_AUDIO_MIDI_V2`. The first visible one is
`CONFIG_SND_USB_AUDIO_MIDI_V2`, and when you choose it (to set `=y`),
the core support for UMP (`CONFIG_SND_UMP`) and the sequencer binding
(`CONFIG_SND_SEQ_UMP_CLIENT`) will be automatically selected.
Additionally, `CONFIG_SND_UMP_LEGACY_RAWMIDI=y` will enable the
support for the legacy raw MIDI device for UMP Endpoints.
Rawmidi Device with USB MIDI 2.0
================================
When a device supports MIDI 2.0, the USB-audio driver probes and uses
the MIDI 2.0 interface (that is found always at the altset 1) as
default instead of the MIDI 1.0 interface (at altset 0). You can
switch back to the binding with the old MIDI 1.0 interface by passing
`midi2_enable=0` option to snd-usb-audio driver module, too.
The USB audio driver tries to query the UMP Endpoint and UMP Function
Block information that are provided since UMP v1.1, and builds up the
topology based on those information. When the device is older and
doesn't respond to the new UMP inquiries, the driver falls back and
builds the topology based on Group Terminal Block (GTB) information
from the USB descriptor. Some device might be screwed up by the
unexpected UMP command; in such a case, pass `midi2_probe=0` option to
snd-usb-audio driver for skipping the UMP v1.1 inquiries.
When the MIDI 2.0 device is probed, the kernel creates a rawmidi
device for each UMP Endpoint of the device. Its device name is
`/dev/snd/umpC*D*` and different from the standard rawmidi device name
`/dev/snd/midiC*D*` for MIDI 1.0, in order to avoid confusing the
legacy applications accessing mistakenly to UMP devices.
You can read and write UMP packet data directly from/to this UMP
rawmidi device. For example, reading via `hexdump` like below will
show the incoming UMP packets of the card 0 device 0 in the hex
format::
% hexdump -C /dev/snd/umpC0D0
00000000 01 07 b0 20 00 07 b0 20 64 3c 90 20 64 3c 80 20 |... ... d<. d<. |
Unlike the MIDI 1.0 byte stream, UMP is a 32bit packet, and the size
for reading or writing the device is also aligned to 32bit (which is 4
bytes).
The 32-bit words in the UMP packet payload are always in CPU native
endianness. Transport drivers are responsible to convert UMP words
from / to system endianness to required transport endianness / byte
order.
When `CONFIG_SND_UMP_LEGACY_RAWMIDI` is set, the driver creates
another standard raw MIDI device additionally as `/dev/snd/midiC*D*`.
This contains 16 substreams, and each substream corresponds to a
(0-based) UMP Group. Legacy applications can access to the specified
group via each substream in MIDI 1.0 byte stream format. With the
ALSA rawmidi API, you can open the arbitrary substream, while just
opening `/dev/snd/midiC*D*` will end up with opening the first
substream.
Each UMP Endpoint can provide the additional information, constructed
from the information inquired via UMP 1.1 Stream messages or USB MIDI
2.0 descriptors. And a UMP Endpoint may contain one or more UMP
Blocks, where UMP Block is an abstraction introduced in the ALSA UMP
implementations to represent the associations among UMP Groups. UMP
Block corresponds to Function Block in UMP 1.1 specification. When
UMP 1.1 Function Block information isn't available, it's filled
partially from Group Terminal Block (GTB) as defined in USB MIDI 2.0
specifications.
The information of UMP Endpoints and UMP Blocks are found in the proc
file `/proc/asound/card*/midi*`. For example::
% cat /proc/asound/card1/midi0
ProtoZOA MIDI
Type: UMP
EP Name: ProtoZOA
EP Product ID: ABCD12345678
UMP Version: 0x0000
Protocol Caps: 0x00000100
Protocol: 0x00000100
Num Blocks: 3
Block 0 (ProtoZOA Main)
Direction: bidirection
Active: Yes
Groups: 1-1
Is MIDI1: No
Block 1 (ProtoZOA Ext IN)
Direction: output
Active: Yes
Groups: 2-2
Is MIDI1: Yes (Low Speed)
....
Note that `Groups` field shown in the proc file above indicates the
1-based UMP Group numbers (from-to).
Those additional UMP Endpoint and UMP Block information can be
obtained via the new ioctls `SNDRV_UMP_IOCTL_ENDPOINT_INFO` and
`SNDRV_UMP_IOCTL_BLOCK_INFO`, respectively.
The rawmidi name and the UMP Endpoint name are usually identical, and
in the case of USB MIDI, it's taken from `iInterface` of the
corresponding USB MIDI interface descriptor. If it's not provided,
it's copied from `iProduct` of the USB device descriptor as a
fallback.
The Endpoint Product ID is a string field and supposed to be unique.
It's copied from `iSerialNumber` of the device for USB MIDI.
The protocol capabilities and the actual protocol bits are defined in
`asound.h`.
ALSA Sequencer with USB MIDI 2.0
================================
In addition to the rawmidi interfaces, ALSA sequencer interface
supports the new UMP MIDI 2.0 device, too. Now, each ALSA sequencer
client may set its MIDI version (0, 1 or 2) to declare itself being
either the legacy, UMP MIDI 1.0 or UMP MIDI 2.0 device, respectively.
The first, legacy client is the one that sends/receives the old
sequencer event as was. Meanwhile, UMP MIDI 1.0 and 2.0 clients send
and receive in the extended event record for UMP. The MIDI version is
seen in the new `midi_version` field of `snd_seq_client_info`.
A UMP packet can be sent/received in a sequencer event embedded by
specifying the new event flag bit `SNDRV_SEQ_EVENT_UMP`. When this
flag is set, the event has 16 byte (128 bit) data payload for holding
the UMP packet. Without the `SNDRV_SEQ_EVENT_UMP` bit flag, the event
is treated as a legacy event as it was (with max 12 byte data
payload).
With `SNDRV_SEQ_EVENT_UMP` flag set, the type field of a UMP sequencer
event is ignored (but it should be set to 0 as default).
The type of each client can be seen in `/proc/asound/seq/clients`.
For example::
% cat /proc/asound/seq/clients
Client info
cur clients : 3
....
Client 14 : "Midi Through" [Kernel Legacy]
Port 0 : "Midi Through Port-0" (RWe-)
Client 20 : "ProtoZOA" [Kernel UMP MIDI1]
UMP Endpoint: ProtoZOA
UMP Block 0: ProtoZOA Main [Active]
Groups: 1-1
UMP Block 1: ProtoZOA Ext IN [Active]
Groups: 2-2
UMP Block 2: ProtoZOA Ext OUT [Active]
Groups: 3-3
Port 0 : "MIDI 2.0" (RWeX) [In/Out]
Port 1 : "ProtoZOA Main" (RWeX) [In/Out]
Port 2 : "ProtoZOA Ext IN" (-We-) [Out]
Port 3 : "ProtoZOA Ext OUT" (R-e-) [In]
Here you can find two types of kernel clients, "Legacy" for client 14,
and "UMP MIDI1" for client 20, which is a USB MIDI 2.0 device.
A USB MIDI 2.0 client gives always the port 0 as "MIDI 2.0" and the
rest ports from 1 for each UMP Group (e.g. port 1 for Group 1).
In this example, the device has three active groups (Main, Ext IN and
Ext OUT), and those are exposed as sequencer ports from 1 to 3.
The "MIDI 2.0" port is for a UMP Endpoint, and its difference from
other UMP Group ports is that UMP Endpoint port sends the events from
the all ports on the device ("catch-all"), while each UMP Group port
sends only the events from the given UMP Group.
Also, UMP groupless messages (such as the UMP message type 0x0f) are
sent only to the UMP Endpoint port.
Note that, although each UMP sequencer client usually creates 16
ports, those ports that don't belong to any UMP Blocks (or belonging
to inactive UMP Blocks) are marked as inactive, and they don't appear
in the proc outputs. In the example above, the sequencer ports from 4
to 16 are present but not shown there.
The proc file above shows the UMP Block information, too. The same
entry (but with more detailed information) is found in the rawmidi
proc output.
When clients are connected between different MIDI versions, the events
are translated automatically depending on the client's version, not
only between the legacy and the UMP MIDI 1.0/2.0 types, but also
between UMP MIDI 1.0 and 2.0 types, too. For example, running
`aseqdump` program on the ProtoZOA Main port in the legacy mode will
give you the output like::
% aseqdump -p 20:1
Waiting for data. Press Ctrl+C to end.
Source Event Ch Data
20:1 Note on 0, note 60, velocity 100
20:1 Note off 0, note 60, velocity 100
20:1 Control change 0, controller 11, value 4
When you run `aseqdump` in MIDI 2.0 mode, it'll receive the high
precision data like::
% aseqdump -u 2 -p 20:1
Waiting for data. Press Ctrl+C to end.
Source Event Ch Data
20:1 Note on 0, note 60, velocity 0xc924, attr type = 0, data = 0x0
20:1 Note off 0, note 60, velocity 0xc924, attr type = 0, data = 0x0
20:1 Control change 0, controller 11, value 0x2000000
while the data is automatically converted by ALSA sequencer core.
Rawmidi API Extensions
======================
* The additional UMP Endpoint information can be obtained via the new
ioctl `SNDRV_UMP_IOCTL_ENDPOINT_INFO`. It contains the associated
card and device numbers, the bit flags, the protocols, the number of
UMP Blocks, the name string of the endpoint, etc.
The protocols are specified in two field, the protocol capabilities
and the current protocol. Both contain the bit flags specifying the
MIDI protocol version (`SNDRV_UMP_EP_INFO_PROTO_MIDI1` or
`SNDRV_UMP_EP_INFO_PROTO_MIDI2`) in the upper byte and the jitter
reduction timestamp (`SNDRV_UMP_EP_INFO_PROTO_JRTS_TX` and
`SNDRV_UMP_EP_INFO_PROTO_JRTS_RX`) in the lower byte.
A UMP Endpoint may contain up to 32 UMP Blocks, and the number of
the currently assigned blocks are shown in the Endpoint information.
* Each UMP Block information can be obtained via another new ioctl
`SNDRV_UMP_IOCTL_BLOCK_INFO`. The block ID number (0-based) has to
be passed for the block to query. The received data contains the
associated the direction of the block, the first associated group ID
(0-based) and the number of groups, the name string of the block,
etc.
The direction is either `SNDRV_UMP_DIR_INPUT`,
`SNDRV_UMP_DIR_OUTPUT` or `SNDRV_UMP_DIR_BIDIRECTION`.
* For the device supports UMP v1.1, the UMP MIDI protocol can be
switched via "Stream Configuration Request" message (UMP type 0x0f,
status 0x05). When UMP core receives such a message, it updates the
UMP EP info and the corresponding sequencer clients as well.
Control API Extensions
======================
* The new ioctl `SNDRV_CTL_IOCTL_UMP_NEXT_DEVICE` is introduced for
querying the next UMP rawmidi device, while the existing ioctl
`SNDRV_CTL_IOCTL_RAWMIDI_NEXT_DEVICE` queries only the legacy
rawmidi devices.
For setting the subdevice (substream number) to be opened, use the
ioctl `SNDRV_CTL_IOCTL_RAWMIDI_PREFER_SUBDEVICE` like the normal
rawmidi.
* Two new ioctls `SNDRV_CTL_IOCTL_UMP_ENDPOINT_INFO` and
`SNDRV_CTL_IOCTL_UMP_BLOCK_INFO` provide the UMP Endpoint and UMP
Block information of the specified UMP device via ALSA control API
without opening the actual (UMP) rawmidi device.
The `card` field is ignored upon inquiry, always tied with the card
of the control interface.
Sequencer API Extensions
========================
* `midi_version` field is added to `snd_seq_client_info` to indicate
the current MIDI version (either 0, 1 or 2) of each client.
When `midi_version` is 1 or 2, the alignment of read from a UMP
sequencer client is also changed from the former 28 bytes to 32
bytes for the extended payload. The alignment size for the write
isn't changed, but each event size may differ depending on the new
bit flag below.
* `SNDRV_SEQ_EVENT_UMP` flag bit is added for each sequencer event
flags. When this bit flag is set, the sequencer event is extended
to have a larger payload of 16 bytes instead of the legacy 12
bytes, and the event contains the UMP packet in the payload.
* The new sequencer port type bit (`SNDRV_SEQ_PORT_TYPE_MIDI_UMP`)
indicates the port being UMP-capable.
* The sequencer ports have new capability bits to indicate the
inactive ports (`SNDRV_SEQ_PORT_CAP_INACTIVE`) and the UMP Endpoint
port (`SNDRV_SEQ_PORT_CAP_UMP_ENDPOINT`).
* The event conversion of ALSA sequencer clients can be suppressed the
new filter bit `SNDRV_SEQ_FILTER_NO_CONVERT` set to the client info.
For example, the kernel pass-through client (`snd-seq-dummy`) sets
this flag internally.
* The port information gained the new field `direction` to indicate
the direction of the port (either `SNDRV_SEQ_PORT_DIR_INPUT`,
`SNDRV_SEQ_PORT_DIR_OUTPUT` or `SNDRV_SEQ_PORT_DIR_BIDIRECTION`).
* Another additional field for the port information is `ump_group`
which specifies the associated UMP Group Number (1-based).
When it's non-zero, the UMP group field in the UMP packet updated
upon delivery to the specified group (corrected to be 0-based).
Each sequencer port is supposed to set this field if it's a port to
specific to a certain UMP group.
* Each client may set the additional event filter for UMP Groups in
`group_filter` bitmap. The filter consists of bitmap from 1-based
Group numbers. For example, when the bit 1 is set, messages from
Group 1 (i.e. the very first group) are filtered and not delivered.
The bit 0 is used for filtering UMP groupless messages.
* Two new ioctls are added for UMP-capable clients:
`SNDRV_SEQ_IOCTL_GET_CLIENT_UMP_INFO` and
`SNDRV_SEQ_IOCTL_SET_CLIENT_UMP_INFO`. They are used to get and set
either `snd_ump_endpoint_info` or `snd_ump_block_info` data
associated with the sequencer client. The USB MIDI driver provides
those information from the underlying UMP rawmidi, while a
user-space client may provide its own data via `*_SET` ioctl.
For an Endpoint data, pass 0 to the `type` field, while for a Block
data, pass the block number + 1 to the `type` field.
Setting the data for a kernel client shall result in an error.
* With UMP 1.1, Function Block information may be changed
dynamically. When the update of Function Block is received from the
device, ALSA sequencer core changes the corresponding sequencer port
name and attributes accordingly, and notifies the changes via the
announcement to the ALSA sequencer system port, similarly like the
normal port change notification.

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 4
SUBLEVEL = 0
EXTRAVERSION = -rc1
EXTRAVERSION = -rc2
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*

View File

@ -308,6 +308,29 @@ static int unwind_exec_pop_subset_r0_to_r3(struct unwind_ctrl_block *ctrl,
return URC_OK;
}
static unsigned long unwind_decode_uleb128(struct unwind_ctrl_block *ctrl)
{
unsigned long bytes = 0;
unsigned long insn;
unsigned long result = 0;
/*
* unwind_get_byte() will advance `ctrl` one instruction at a time, so
* loop until we get an instruction byte where bit 7 is not set.
*
* Note: This decodes a maximum of 4 bytes to output 28 bits data where
* max is 0xfffffff: that will cover a vsp increment of 1073742336, hence
* it is sufficient for unwinding the stack.
*/
do {
insn = unwind_get_byte(ctrl);
result |= (insn & 0x7f) << (bytes * 7);
bytes++;
} while (!!(insn & 0x80) && (bytes != sizeof(result)));
return result;
}
/*
* Execute the current unwind instruction.
*/
@ -361,7 +384,7 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
if (ret)
goto error;
} else if (insn == 0xb2) {
unsigned long uleb128 = unwind_get_byte(ctrl);
unsigned long uleb128 = unwind_decode_uleb128(ctrl);
ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
} else {

View File

@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/**
/*
* arch/arm/mac-sa1100/jornada720_ssp.c
*
* Copyright (C) 2006/2007 Kristoffer Ericson <Kristoffer.Ericson@gmail.com>
@ -26,6 +26,7 @@ static unsigned long jornada_ssp_flags;
/**
* jornada_ssp_reverse - reverses input byte
* @byte: input byte to reverse
*
* we need to reverse all data we receive from the mcu due to its physical location
* returns : 01110111 -> 11101110
@ -46,6 +47,7 @@ EXPORT_SYMBOL(jornada_ssp_reverse);
/**
* jornada_ssp_byte - waits for ready ssp bus and sends byte
* @byte: input byte to transmit
*
* waits for fifo buffer to clear and then transmits, if it doesn't then we will
* timeout after <timeout> rounds. Needs mcu running before its called.
@ -77,6 +79,7 @@ EXPORT_SYMBOL(jornada_ssp_byte);
/**
* jornada_ssp_inout - decide if input is command or trading byte
* @byte: input byte to send (may be %TXDUMMY)
*
* returns : (jornada_ssp_byte(byte)) on success
* : %-ETIMEDOUT on timeout failure

View File

@ -23,6 +23,9 @@
@
ENTRY(do_vfp)
mov r1, r10
mov r3, r9
b vfp_entry
str lr, [sp, #-8]!
add r3, sp, #4
str r9, [r3]
bl vfp_entry
ldr pc, [sp], #8
ENDPROC(do_vfp)

View File

@ -172,13 +172,14 @@ vfp_hw_state_valid:
@ out before setting an FPEXC that
@ stops us reading stuff
VFPFMXR FPEXC, r1 @ Restore FPEXC last
mov sp, r3 @ we think we have handled things
pop {lr}
sub r2, r2, #4 @ Retry current instruction - if Thumb
str r2, [sp, #S_PC] @ mode it's two 16-bit instructions,
@ else it's one 32-bit instruction, so
@ always subtract 4 from the following
@ instruction address.
mov lr, r3 @ we think we have handled things
local_bh_enable_and_ret:
adr r0, .
mov r1, #SOFTIRQ_DISABLE_OFFSET
@ -209,8 +210,9 @@ skip:
process_exception:
DBGSTR "bounce"
mov sp, r3 @ setup for a return to the user code.
pop {lr}
mov r2, sp @ nothing stacked - regdump is at TOS
mov lr, r3 @ setup for a return to the user code.
@ Now call the C code to package up the bounce to the support code
@ r0 holds the trigger instruction

View File

@ -413,12 +413,12 @@ extern void paging_init (void);
* For the 64bit version, the offset is extended by 32bit.
*/
#define __swp_type(x) ((x).val & 0x1f)
#define __swp_offset(x) ( (((x).val >> 6) & 0x7) | \
(((x).val >> 8) & ~0x7) )
#define __swp_offset(x) ( (((x).val >> 5) & 0x7) | \
(((x).val >> 10) << 3) )
#define __swp_entry(type, offset) ((swp_entry_t) { \
((type) & 0x1f) | \
((offset & 0x7) << 6) | \
((offset & ~0x7) << 8) })
((offset & 0x7) << 5) | \
((offset >> 3) << 10) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })

View File

@ -4,6 +4,8 @@
#include <linux/console.h>
#include <linux/kexec.h>
#include <linux/delay.h>
#include <linux/reboot.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>

View File

@ -22,7 +22,7 @@ KCOV_INSTRUMENT := n
$(obj)/%.pi.o: OBJCOPYFLAGS := --prefix-symbols=__pi_ \
--remove-section=.note.gnu.property \
--prefix-alloc-sections=.init
--prefix-alloc-sections=.init.pi
$(obj)/%.pi.o: $(obj)/%.o FORCE
$(call if_changed,objcopy)

View File

@ -84,11 +84,8 @@ SECTIONS
__init_data_begin = .;
INIT_DATA_SECTION(16)
/* Those sections result from the compilation of kernel/pi/string.c */
.init.pidata : {
*(.init.srodata.cst8*)
*(.init__bug_table*)
*(.init.sdata*)
.init.pi : {
*(.init.pi*)
}
.init.bss : {

View File

@ -1703,10 +1703,8 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
perf_sample_data_init(&data, 0, event->hw.last_period);
if (has_branch_stack(event)) {
data.br_stack = &cpuc->lbr_stack;
data.sample_flags |= PERF_SAMPLE_BRANCH_STACK;
}
if (has_branch_stack(event))
perf_sample_save_brstack(&data, event, &cpuc->lbr_stack);
if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0);

View File

@ -1229,12 +1229,14 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
struct perf_event *event, bool add)
{
struct pmu *pmu = event->pmu;
/*
* Make sure we get updated with the first PEBS
* event. It will trigger also during removal, but
* that does not hurt:
*/
bool update = cpuc->n_pebs == 1;
if (cpuc->n_pebs == 1)
cpuc->pebs_data_cfg = PEBS_UPDATE_DS_SW;
if (needed_cb != pebs_needs_sched_cb(cpuc)) {
if (!needed_cb)
@ -1242,7 +1244,7 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
else
perf_sched_cb_dec(pmu);
update = true;
cpuc->pebs_data_cfg |= PEBS_UPDATE_DS_SW;
}
/*
@ -1252,24 +1254,13 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
if (x86_pmu.intel_cap.pebs_baseline && add) {
u64 pebs_data_cfg;
/* Clear pebs_data_cfg and pebs_record_size for first PEBS. */
if (cpuc->n_pebs == 1) {
cpuc->pebs_data_cfg = 0;
cpuc->pebs_record_size = sizeof(struct pebs_basic);
}
pebs_data_cfg = pebs_update_adaptive_cfg(event);
/* Update pebs_record_size if new event requires more data. */
if (pebs_data_cfg & ~cpuc->pebs_data_cfg) {
cpuc->pebs_data_cfg |= pebs_data_cfg;
adaptive_pebs_record_size_update();
update = true;
}
/*
* Be sure to update the thresholds when we change the record.
*/
if (pebs_data_cfg & ~cpuc->pebs_data_cfg)
cpuc->pebs_data_cfg |= pebs_data_cfg | PEBS_UPDATE_DS_SW;
}
if (update)
pebs_update_threshold(cpuc);
}
void intel_pmu_pebs_add(struct perf_event *event)
@ -1326,9 +1317,17 @@ static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
wrmsrl(base + idx, value);
}
static inline void intel_pmu_drain_large_pebs(struct cpu_hw_events *cpuc)
{
if (cpuc->n_pebs == cpuc->n_large_pebs &&
cpuc->n_pebs != cpuc->n_pebs_via_pt)
intel_pmu_drain_pebs_buffer();
}
void intel_pmu_pebs_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
u64 pebs_data_cfg = cpuc->pebs_data_cfg & ~PEBS_UPDATE_DS_SW;
struct hw_perf_event *hwc = &event->hw;
struct debug_store *ds = cpuc->ds;
unsigned int idx = hwc->idx;
@ -1344,11 +1343,22 @@ void intel_pmu_pebs_enable(struct perf_event *event)
if (x86_pmu.intel_cap.pebs_baseline) {
hwc->config |= ICL_EVENTSEL_ADAPTIVE;
if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) {
wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg);
cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg;
if (pebs_data_cfg != cpuc->active_pebs_data_cfg) {
/*
* drain_pebs() assumes uniform record size;
* hence we need to drain when changing said
* size.
*/
intel_pmu_drain_large_pebs(cpuc);
adaptive_pebs_record_size_update();
wrmsrl(MSR_PEBS_DATA_CFG, pebs_data_cfg);
cpuc->active_pebs_data_cfg = pebs_data_cfg;
}
}
if (cpuc->pebs_data_cfg & PEBS_UPDATE_DS_SW) {
cpuc->pebs_data_cfg = pebs_data_cfg;
pebs_update_threshold(cpuc);
}
if (idx >= INTEL_PMC_IDX_FIXED) {
if (x86_pmu.intel_cap.pebs_format < 5)
@ -1391,9 +1401,7 @@ void intel_pmu_pebs_disable(struct perf_event *event)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
if (cpuc->n_pebs == cpuc->n_large_pebs &&
cpuc->n_pebs != cpuc->n_pebs_via_pt)
intel_pmu_drain_pebs_buffer();
intel_pmu_drain_large_pebs(cpuc);
cpuc->pebs_enabled &= ~(1ULL << hwc->idx);

View File

@ -121,6 +121,9 @@
#define PEBS_DATACFG_LBRS BIT_ULL(3)
#define PEBS_DATACFG_LBR_SHIFT 24
/* Steal the highest bit of pebs_data_cfg for SW usage */
#define PEBS_UPDATE_DS_SW BIT_ULL(63)
/*
* Intel "Architectural Performance Monitoring" CPUID
* detection/enumeration details:

View File

@ -36,6 +36,7 @@
#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e
#define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4 0x14e4
#define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4 0x14f4
#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4 0x12fc
/* Protect the PCI config register pairs used for SMN. */
static DEFINE_MUTEX(smn_mutex);
@ -79,6 +80,7 @@ static const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
{}
};

View File

@ -144,8 +144,8 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array)
*/
.align 64
.skip 63, 0xcc
SYM_FUNC_START_NOALIGN(zen_untrain_ret);
SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
ANNOTATE_NOENDBR
/*
* As executed from zen_untrain_ret, this is:
*

View File

@ -1666,7 +1666,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
return -EIO;
dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
if (!dir) {
if (IS_ERR(dir)) {
dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
nbd_name(nbd));
return -EIO;
@ -1692,7 +1692,7 @@ static int nbd_dbg_init(void)
struct dentry *dbg_dir;
dbg_dir = debugfs_create_dir("nbd", NULL);
if (!dbg_dir)
if (IS_ERR(dbg_dir))
return -EIO;
nbd_dbg_dir = dbg_dir;

View File

@ -241,7 +241,7 @@ static inline blk_opf_t rnbd_to_bio_flags(u32 rnbd_opf)
bio_opf = REQ_OP_WRITE;
break;
case RNBD_OP_FLUSH:
bio_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
bio_opf = REQ_OP_WRITE | REQ_PREFLUSH;
break;
case RNBD_OP_DISCARD:
bio_opf = REQ_OP_DISCARD;

View File

@ -1281,7 +1281,7 @@ static inline int ublk_check_cmd_op(u32 cmd_op)
{
u32 ioc_type = _IOC_TYPE(cmd_op);
if (IS_ENABLED(CONFIG_BLKDEV_UBLK_LEGACY_OPCODES) && ioc_type != 'u')
if (!IS_ENABLED(CONFIG_BLKDEV_UBLK_LEGACY_OPCODES) && ioc_type != 'u')
return -EOPNOTSUPP;
if (ioc_type != 'u' && ioc_type != 0)

View File

@ -571,6 +571,7 @@ void read_cdat_data(struct cxl_port *port)
/* Don't leave table data allocated on error */
devm_kfree(dev, cdat_table);
dev_err(dev, "CDAT data read error\n");
return;
}
port->cdat.table = cdat_table + sizeof(__le32);

View File

@ -706,21 +706,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
int rcode;
if (destination == IEEE1394_ALL_NODES) {
kfree(r);
return;
}
if (offset != dev->handler.offset)
// Although the response to the broadcast packet is not necessarily required, the
// fw_send_response() function should still be called to maintain the reference
// counting of the object. In the case, the call of function just releases the
// object as a result to decrease the reference counting.
rcode = RCODE_COMPLETE;
} else if (offset != dev->handler.offset) {
rcode = RCODE_ADDRESS_ERROR;
else if (tcode != TCODE_WRITE_BLOCK_REQUEST)
} else if (tcode != TCODE_WRITE_BLOCK_REQUEST) {
rcode = RCODE_TYPE_ERROR;
else if (fwnet_incoming_packet(dev, payload, length,
source, generation, false) != 0) {
} else if (fwnet_incoming_packet(dev, payload, length,
source, generation, false) != 0) {
dev_err(&dev->netdev->dev, "incoming packet failure\n");
rcode = RCODE_CONFLICT_ERROR;
} else
} else {
rcode = RCODE_COMPLETE;
}
fw_send_response(card, r, rcode);
}

View File

@ -51,7 +51,8 @@ __init bool sysfb_parse_mode(const struct screen_info *si,
*
* It's not easily possible to fix this in struct screen_info,
* as this could break UAPI. The best solution is to compute
* bits_per_pixel here and ignore lfb_depth. In the loop below,
* bits_per_pixel from the color bits, reserved bits and
* reported lfb_depth, whichever is highest. In the loop below,
* ignore simplefb formats with alpha bits, as EFI and VESA
* don't specify alpha channels.
*/
@ -60,6 +61,7 @@ __init bool sysfb_parse_mode(const struct screen_info *si,
si->green_size + si->green_pos,
si->blue_size + si->blue_pos),
si->rsvd_size + si->rsvd_pos);
bits_per_pixel = max_t(u32, bits_per_pixel, si->lfb_depth);
} else {
bits_per_pixel = si->lfb_depth;
}

View File

@ -3757,6 +3757,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
/* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
* internal path natively support atomics, set have_atomics_support to true.
*/
else if ((adev->flags & AMD_IS_APU) &&
(adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0)))
adev->have_atomics_support = true;
else
adev->have_atomics_support =
!pci_enable_atomic_ops_to_root(adev->pdev,
@ -4506,7 +4512,11 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
dev_info(adev->dev, "recover vram bo from shadow start\n");
mutex_lock(&adev->shadow_list_lock);
list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
shadow = &vmbo->bo;
/* If vm is compute context or adev is APU, shadow will be NULL */
if (!vmbo->shadow)
continue;
shadow = vmbo->shadow;
/* No need to recover an evicted BO */
if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||

View File

@ -687,9 +687,11 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r
if (r)
return r;
r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
if (r)
goto late_fini;
if (adev->gfx.cp_ecc_error_irq.funcs) {
r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
if (r)
goto late_fini;
}
} else {
amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
}

View File

@ -1315,13 +1315,6 @@ static int gfx_v11_0_sw_init(void *handle)
if (r)
return r;
/* ECC error */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
GFX_11_0_0__SRCID__CP_ECC_ERROR,
&adev->gfx.cp_ecc_error_irq);
if (r)
return r;
/* FED error */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT,
@ -4444,7 +4437,6 @@ static int gfx_v11_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
@ -5897,36 +5889,6 @@ static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev
}
}
#define CP_ME1_PIPE_INST_ADDR_INTERVAL 0x1
#define SET_ECC_ME_PIPE_STATE(reg_addr, state) \
do { \
uint32_t tmp = RREG32_SOC15_IP(GC, reg_addr); \
tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, state); \
WREG32_SOC15_IP(GC, reg_addr, tmp); \
} while (0)
static int gfx_v11_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
uint32_t ecc_irq_state = 0;
uint32_t pipe0_int_cntl_addr = 0;
int i = 0;
ecc_irq_state = (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0;
pipe0_int_cntl_addr = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, ecc_irq_state);
for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++)
SET_ECC_ME_PIPE_STATE(pipe0_int_cntl_addr + i * CP_ME1_PIPE_INST_ADDR_INTERVAL,
ecc_irq_state);
return 0;
}
static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@ -6341,11 +6303,6 @@ static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = {
.process = gfx_v11_0_priv_inst_irq,
};
static const struct amdgpu_irq_src_funcs gfx_v11_0_cp_ecc_error_irq_funcs = {
.set = gfx_v11_0_set_cp_ecc_error_state,
.process = amdgpu_gfx_cp_ecc_error_irq,
};
static const struct amdgpu_irq_src_funcs gfx_v11_0_rlc_gc_fed_irq_funcs = {
.process = gfx_v11_0_rlc_gc_fed_irq,
};
@ -6361,9 +6318,6 @@ static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs;
adev->gfx.cp_ecc_error_irq.num_types = 1; /* CP ECC error */
adev->gfx.cp_ecc_error_irq.funcs = &gfx_v11_0_cp_ecc_error_irq_funcs;
adev->gfx.rlc_gc_fed_irq.num_types = 1; /* 0x80 FED error */
adev->gfx.rlc_gc_fed_irq.funcs = &gfx_v11_0_rlc_gc_fed_irq_funcs;

View File

@ -3764,7 +3764,8 @@ static int gfx_v9_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);

View File

@ -54,6 +54,7 @@ static int jpeg_v3_0_early_init(void *handle)
switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(3, 1, 1):
case IP_VERSION(3, 1, 2):
break;
default:
harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);

View File

@ -98,6 +98,16 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode =
};
/* Sienna Cichlid */
static const struct amdgpu_video_codec_info sc_video_codecs_encode_array[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)},
};
static const struct amdgpu_video_codecs sc_video_codecs_encode = {
.codec_count = ARRAY_SIZE(sc_video_codecs_encode_array),
.codec_array = sc_video_codecs_encode_array,
};
static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] =
{
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
@ -136,8 +146,8 @@ static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn1 =
/* SRIOV Sienna Cichlid, not const since data is controlled by host */
static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] =
{
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)},
};
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] =
@ -237,12 +247,12 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
} else {
if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
if (encode)
*codecs = &nv_video_codecs_encode;
*codecs = &sc_video_codecs_encode;
else
*codecs = &sc_video_codecs_decode_vcn1;
} else {
if (encode)
*codecs = &nv_video_codecs_encode;
*codecs = &sc_video_codecs_encode;
else
*codecs = &sc_video_codecs_decode_vcn0;
}
@ -251,14 +261,14 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
case IP_VERSION(3, 0, 16):
case IP_VERSION(3, 0, 2):
if (encode)
*codecs = &nv_video_codecs_encode;
*codecs = &sc_video_codecs_encode;
else
*codecs = &sc_video_codecs_decode_vcn0;
return 0;
case IP_VERSION(3, 1, 1):
case IP_VERSION(3, 1, 2):
if (encode)
*codecs = &nv_video_codecs_encode;
*codecs = &sc_video_codecs_encode;
else
*codecs = &yc_video_codecs_decode;
return 0;

View File

@ -1917,9 +1917,11 @@ static int sdma_v4_0_hw_fini(void *handle)
return 0;
}
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
AMDGPU_SDMA_IRQ_INSTANCE0 + i);
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
AMDGPU_SDMA_IRQ_INSTANCE0 + i);
}
}
sdma_v4_0_ctx_switch_enable(adev, false);

View File

@ -711,7 +711,7 @@ static int soc21_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_JPEG;
adev->external_rev_id = adev->rev_id + 0x1;
adev->external_rev_id = adev->rev_id + 0x80;
break;
default:

View File

@ -423,3 +423,68 @@ void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool
PERF_TRACE();
}
static void apply_symclk_on_tx_off_wa(struct dc_link *link)
{
/* There are use cases where SYMCLK is referenced by OTG. For instance
* for TMDS signal, OTG relies SYMCLK even if TX video output is off.
* However current link interface will power off PHY when disabling link
* output. This will turn off SYMCLK generated by PHY. The workaround is
* to identify such case where SYMCLK is still in use by OTG when we
* power off PHY. When this is detected, we will temporarily power PHY
* back on and move PHY's SYMCLK state to SYMCLK_ON_TX_OFF by calling
* program_pix_clk interface. When OTG is disabled, we will then power
* off PHY by calling disable link output again.
*
* In future dcn generations, we plan to rework transmitter control
* interface so that we could have an option to set SYMCLK ON TX OFF
* state in one step without this workaround
*/
struct dc *dc = link->ctx->dc;
struct pipe_ctx *pipe_ctx = NULL;
uint8_t i;
if (link->phy_state.symclk_ref_cnts.otg > 0) {
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) {
pipe_ctx->clock_source->funcs->program_pix_clk(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
dc->link_srv->dp_get_encoding_format(
&pipe_ctx->link_config.dp_link_settings),
&pipe_ctx->pll_settings);
link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
break;
}
}
}
}
void dcn314_disable_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal)
{
struct dc *dc = link->ctx->dc;
const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
struct dmcu *dmcu = dc->res_pool->dmcu;
if (signal == SIGNAL_TYPE_EDP &&
link->dc->hwss.edp_backlight_control)
link->dc->hwss.edp_backlight_control(link, false);
else if (dmcu != NULL && dmcu->funcs->lock_phy)
dmcu->funcs->lock_phy(dmcu);
link_hwss->disable_link_output(link, link_res, signal);
link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
/*
* Add the logic to extract BOTH power up and power down sequences
* from enable/disable link output and only call edp panel control
* in enable_link_dp and disable_link_dp once.
*/
if (dmcu != NULL && dmcu->funcs->lock_phy)
dmcu->funcs->unlock_phy(dmcu);
dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
apply_symclk_on_tx_off_wa(link);
}

View File

@ -45,4 +45,6 @@ void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool
void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on);
void dcn314_disable_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal);
#endif /* __DC_HWSS_DCN314_H__ */

View File

@ -105,7 +105,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.enable_lvds_link_output = dce110_enable_lvds_link_output,
.enable_tmds_link_output = dce110_enable_tmds_link_output,
.enable_dp_link_output = dce110_enable_dp_link_output,
.disable_link_output = dce110_disable_link_output,
.disable_link_output = dcn314_disable_link_output,
.z10_restore = dcn31_z10_restore,
.z10_save_init = dcn31_z10_save_init,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,

View File

@ -810,7 +810,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->SwathHeightY[k],
v->SwathHeightC[k],
TWait,
v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ?
(v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ||
v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= MIN_DCFCLK_FREQ_MHZ) ?
mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
/* Output */
&v->DSTXAfterScaler[k],
@ -3310,7 +3311,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->swath_width_chroma_ub_this_state[k],
v->SwathHeightYThisState[k],
v->SwathHeightCThisState[k], v->TWait,
v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ ?
(v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= MIN_DCFCLK_FREQ_MHZ) ?
mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
/* Output */

View File

@ -53,6 +53,7 @@
#define BPP_BLENDED_PIPE 0xffffffff
#define MEM_STROBE_FREQ_MHZ 1600
#define MIN_DCFCLK_FREQ_MHZ 200
#define MEM_STROBE_MAX_DELIVERY_TIME_US 60.0
struct display_mode_lib;

View File

@ -36,6 +36,8 @@
#define amdgpu_dpm_enable_bapm(adev, e) \
((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
#define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
@ -1460,15 +1462,24 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
{
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
struct smu_context *smu = adev->powerplay.pp_handle;
if (is_support_sw_smu(adev)) {
struct smu_context *smu = adev->powerplay.pp_handle;
if ((is_support_sw_smu(adev) && smu->od_enabled) ||
(is_support_sw_smu(adev) && smu->is_apu) ||
(!is_support_sw_smu(adev) && hwmgr->od_enabled))
return true;
return (smu->od_enabled || smu->is_apu);
} else {
struct pp_hwmgr *hwmgr;
return false;
/*
* dpm on some legacy asics don't carry od_enabled member
* as its pp_handle is casted directly from adev.
*/
if (amdgpu_dpm_is_legacy_dpm(adev))
return false;
hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
return hwmgr->od_enabled;
}
}
int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,

View File

@ -425,11 +425,12 @@ struct ast_device *ast_device_create(const struct drm_driver *drv,
return ERR_PTR(-EIO);
/*
* If we don't have IO space at all, use MMIO now and
* assume the chip has MMIO enabled by default (rev 0x20
* and higher).
* After AST2500, MMIO is enabled by default, and it should be adopted
* to be compatible with Arm.
*/
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) {
if (pdev->revision >= 0x40) {
ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
} else if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) {
drm_info(dev, "platform has no IO space, trying MMIO\n");
ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
}

View File

@ -641,19 +641,27 @@ static void drm_fb_helper_damage(struct drm_fb_helper *helper, u32 x, u32 y,
static void drm_fb_helper_memory_range_to_clip(struct fb_info *info, off_t off, size_t len,
struct drm_rect *clip)
{
u32 line_length = info->fix.line_length;
u32 fb_height = info->var.yres;
off_t end = off + len;
u32 x1 = 0;
u32 y1 = off / info->fix.line_length;
u32 y1 = off / line_length;
u32 x2 = info->var.xres;
u32 y2 = DIV_ROUND_UP(end, info->fix.line_length);
u32 y2 = DIV_ROUND_UP(end, line_length);
/* Don't allow any of them beyond the bottom bound of display area */
if (y1 > fb_height)
y1 = fb_height;
if (y2 > fb_height)
y2 = fb_height;
if ((y2 - y1) == 1) {
/*
* We've only written to a single scanline. Try to reduce
* the number of horizontal pixels that need an update.
*/
off_t bit_off = (off % info->fix.line_length) * 8;
off_t bit_end = (end % info->fix.line_length) * 8;
off_t bit_off = (off % line_length) * 8;
off_t bit_end = (end % line_length) * 8;
x1 = bit_off / info->var.bits_per_pixel;
x2 = DIV_ROUND_UP(bit_end, info->var.bits_per_pixel);

View File

@ -221,7 +221,7 @@ mipi_dsi_device_register_full(struct mipi_dsi_host *host,
return dsi;
}
dsi->dev.of_node = info->node;
device_set_node(&dsi->dev, of_fwnode_handle(info->node));
dsi->channel = info->channel;
strlcpy(dsi->name, info->type, sizeof(dsi->name));

View File

@ -62,10 +62,11 @@ config DRM_I915_FORCE_PROBE
This is the default value for the i915.force_probe module
parameter. Using the module parameter overrides this option.
Force probe the i915 for Intel graphics devices that are
recognized but not properly supported by this kernel version. It is
recommended to upgrade to a kernel version with proper support as soon
as it is available.
Force probe the i915 driver for Intel graphics devices that are
recognized but not properly supported by this kernel version. Force
probing an unsupported device taints the kernel. It is recommended to
upgrade to a kernel version with proper support as soon as it is
available.
It can also be used to block the probe of recognized and fully
supported devices.
@ -75,7 +76,8 @@ config DRM_I915_FORCE_PROBE
Use "<pci-id>[,<pci-id>,...]" to force probe the i915 for listed
devices. For example, "4500" or "4500,4571".
Use "*" to force probe the driver for all known devices.
Use "*" to force probe the driver for all known devices. Not
recommended.
Use "!" right before the ID to block the probe of the device. For
example, "4500,!4571" forces the probe of 4500 and blocks the probe of

View File

@ -1028,7 +1028,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
int ret;
if (old_obj) {
const struct intel_crtc_state *crtc_state =
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state,
to_intel_crtc(old_plane_state->hw.crtc));
@ -1043,7 +1043,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
if (intel_crtc_needs_modeset(crtc_state)) {
if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state)) {
ret = i915_sw_fence_await_reservation(&state->commit_ready,
old_obj->base.resv,
false, 0,

View File

@ -1601,6 +1601,11 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
pipe_config->dsc.slice_count =
drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
true);
if (!pipe_config->dsc.slice_count) {
drm_dbg_kms(&dev_priv->drm, "Unsupported Slice Count %d\n",
pipe_config->dsc.slice_count);
return -EINVAL;
}
} else {
u16 dsc_max_output_bpp = 0;
u8 dsc_dp_slice_count;

View File

@ -31,12 +31,14 @@
{ FORCEWAKE_MT, 0, 0, "FORCEWAKE" }
#define COMMON_GEN9BASE_GLOBAL \
{ GEN8_FAULT_TLB_DATA0, 0, 0, "GEN8_FAULT_TLB_DATA0" }, \
{ GEN8_FAULT_TLB_DATA1, 0, 0, "GEN8_FAULT_TLB_DATA1" }, \
{ ERROR_GEN6, 0, 0, "ERROR_GEN6" }, \
{ DONE_REG, 0, 0, "DONE_REG" }, \
{ HSW_GTT_CACHE_EN, 0, 0, "HSW_GTT_CACHE_EN" }
#define GEN9_GLOBAL \
{ GEN8_FAULT_TLB_DATA0, 0, 0, "GEN8_FAULT_TLB_DATA0" }, \
{ GEN8_FAULT_TLB_DATA1, 0, 0, "GEN8_FAULT_TLB_DATA1" }
#define COMMON_GEN12BASE_GLOBAL \
{ GEN12_FAULT_TLB_DATA0, 0, 0, "GEN12_FAULT_TLB_DATA0" }, \
{ GEN12_FAULT_TLB_DATA1, 0, 0, "GEN12_FAULT_TLB_DATA1" }, \
@ -142,6 +144,7 @@ static const struct __guc_mmio_reg_descr xe_lpd_gsc_inst_regs[] = {
static const struct __guc_mmio_reg_descr default_global_regs[] = {
COMMON_BASE_GLOBAL,
COMMON_GEN9BASE_GLOBAL,
GEN9_GLOBAL,
};
static const struct __guc_mmio_reg_descr default_rc_class_regs[] = {

View File

@ -1344,6 +1344,12 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV;
}
if (intel_info->require_force_probe) {
dev_info(&pdev->dev, "Force probing unsupported Device ID %04x, tainting kernel\n",
pdev->device);
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
}
/* Only bind to function 0 of the device. Early generations
* used function 1 as a placeholder for multi-head. This causes
* us confusion instead, especially on the systems where both

View File

@ -2,6 +2,8 @@
#ifndef __NVIF_IF0012_H__
#define __NVIF_IF0012_H__
#include <drm/display/drm_dp.h>
union nvif_outp_args {
struct nvif_outp_v0 {
__u8 version;
@ -63,7 +65,7 @@ union nvif_outp_acquire_args {
__u8 hda;
__u8 mst;
__u8 pad04[4];
__u8 dpcd[16];
__u8 dpcd[DP_RECEIVER_CAP_SIZE];
} dp;
};
} v0;

View File

@ -3,6 +3,7 @@
#define __NVKM_DISP_OUTP_H__
#include "priv.h"
#include <drm/display/drm_dp.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/dp.h>
@ -42,7 +43,7 @@ struct nvkm_outp {
bool aux_pwr_pu;
u8 lttpr[6];
u8 lttprs;
u8 dpcd[16];
u8 dpcd[DP_RECEIVER_CAP_SIZE];
struct {
int dpcd; /* -1, or index into SUPPORTED_LINK_RATES table */

View File

@ -146,7 +146,7 @@ nvkm_uoutp_mthd_release(struct nvkm_outp *outp, void *argv, u32 argc)
}
static int
nvkm_uoutp_mthd_acquire_dp(struct nvkm_outp *outp, u8 dpcd[16],
nvkm_uoutp_mthd_acquire_dp(struct nvkm_outp *outp, u8 dpcd[DP_RECEIVER_CAP_SIZE],
u8 link_nr, u8 link_bw, bool hda, bool mst)
{
int ret;

View File

@ -309,7 +309,7 @@ static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
*/
void drm_sched_fault(struct drm_gpu_scheduler *sched)
{
if (sched->ready)
if (sched->timeout_wq)
mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
}
EXPORT_SYMBOL(drm_sched_fault);

View File

@ -507,6 +507,7 @@ static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
{ PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{}
};

View File

@ -1035,7 +1035,6 @@ static int mdp_comp_sub_create(struct mdp_dev *mdp)
{
struct device *dev = &mdp->pdev->dev;
struct device_node *node, *parent;
const struct mtk_mdp_driver_data *data = mdp->mdp_data;
parent = dev->of_node->parent;
@ -1045,7 +1044,7 @@ static int mdp_comp_sub_create(struct mdp_dev *mdp)
int id, alias_id;
struct mdp_comp *comp;
of_id = of_match_node(data->mdp_sub_comp_dt_ids, node);
of_id = of_match_node(mdp->mdp_data->mdp_sub_comp_dt_ids, node);
if (!of_id)
continue;
if (!of_device_is_available(node)) {

View File

@ -378,8 +378,8 @@ static int mxc_isi_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops mxc_isi_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(mxc_isi_pm_suspend, mxc_isi_pm_resume)
SET_RUNTIME_PM_OPS(mxc_isi_runtime_suspend, mxc_isi_runtime_resume, NULL)
SYSTEM_SLEEP_PM_OPS(mxc_isi_pm_suspend, mxc_isi_pm_resume)
RUNTIME_PM_OPS(mxc_isi_runtime_suspend, mxc_isi_runtime_resume, NULL)
};
/* -----------------------------------------------------------------------------
@ -528,7 +528,7 @@ static struct platform_driver mxc_isi_driver = {
.driver = {
.of_match_table = mxc_isi_of_match,
.name = MXC_ISI_DRIVER_NAME,
.pm = &mxc_isi_pm_ops,
.pm = pm_ptr(&mxc_isi_pm_ops),
}
};
module_platform_driver(mxc_isi_driver);

View File

@ -29,11 +29,10 @@ static inline void mxc_isi_write(struct mxc_isi_pipe *pipe, u32 reg, u32 val)
void mxc_isi_channel_set_inbuf(struct mxc_isi_pipe *pipe, dma_addr_t dma_addr)
{
mxc_isi_write(pipe, CHNL_IN_BUF_ADDR, dma_addr);
#if CONFIG_ARCH_DMA_ADDR_T_64BIT
mxc_isi_write(pipe, CHNL_IN_BUF_ADDR, lower_32_bits(dma_addr));
if (pipe->isi->pdata->has_36bit_dma)
mxc_isi_write(pipe, CHNL_IN_BUF_XTND_ADDR, dma_addr >> 32);
#endif
mxc_isi_write(pipe, CHNL_IN_BUF_XTND_ADDR,
upper_32_bits(dma_addr));
}
void mxc_isi_channel_set_outbuf(struct mxc_isi_pipe *pipe,
@ -45,34 +44,36 @@ void mxc_isi_channel_set_outbuf(struct mxc_isi_pipe *pipe,
val = mxc_isi_read(pipe, CHNL_OUT_BUF_CTRL);
if (buf_id == MXC_ISI_BUF1) {
mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_Y, dma_addrs[0]);
mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_U, dma_addrs[1]);
mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_V, dma_addrs[2]);
#if CONFIG_ARCH_DMA_ADDR_T_64BIT
mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_Y,
lower_32_bits(dma_addrs[0]));
mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_U,
lower_32_bits(dma_addrs[1]));
mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_V,
lower_32_bits(dma_addrs[2]));
if (pipe->isi->pdata->has_36bit_dma) {
mxc_isi_write(pipe, CHNL_Y_BUF1_XTND_ADDR,
dma_addrs[0] >> 32);
upper_32_bits(dma_addrs[0]));
mxc_isi_write(pipe, CHNL_U_BUF1_XTND_ADDR,
dma_addrs[1] >> 32);
upper_32_bits(dma_addrs[1]));
mxc_isi_write(pipe, CHNL_V_BUF1_XTND_ADDR,
dma_addrs[2] >> 32);
upper_32_bits(dma_addrs[2]));
}
#endif
val ^= CHNL_OUT_BUF_CTRL_LOAD_BUF1_ADDR;
} else {
mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_Y, dma_addrs[0]);
mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_U, dma_addrs[1]);
mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_V, dma_addrs[2]);
#if CONFIG_ARCH_DMA_ADDR_T_64BIT
mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_Y,
lower_32_bits(dma_addrs[0]));
mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_U,
lower_32_bits(dma_addrs[1]));
mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_V,
lower_32_bits(dma_addrs[2]));
if (pipe->isi->pdata->has_36bit_dma) {
mxc_isi_write(pipe, CHNL_Y_BUF2_XTND_ADDR,
dma_addrs[0] >> 32);
upper_32_bits(dma_addrs[0]));
mxc_isi_write(pipe, CHNL_U_BUF2_XTND_ADDR,
dma_addrs[1] >> 32);
upper_32_bits(dma_addrs[1]));
mxc_isi_write(pipe, CHNL_V_BUF2_XTND_ADDR,
dma_addrs[2] >> 32);
upper_32_bits(dma_addrs[2]));
}
#endif
val ^= CHNL_OUT_BUF_CTRL_LOAD_BUF2_ADDR;
}

View File

@ -728,11 +728,9 @@ static int rvin_setup(struct rvin_dev *vin)
case V4L2_FIELD_SEQ_TB:
case V4L2_FIELD_SEQ_BT:
case V4L2_FIELD_NONE:
vnmc = VNMC_IM_ODD_EVEN;
progressive = true;
break;
case V4L2_FIELD_ALTERNATE:
vnmc = VNMC_IM_ODD_EVEN;
progressive = true;
break;
default:
vnmc = VNMC_IM_ODD;
@ -1312,12 +1310,23 @@ static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd,
}
if (rvin_scaler_needed(vin)) {
/* Gen3 can't scale NV12 */
if (vin->info->model == RCAR_GEN3 &&
vin->format.pixelformat == V4L2_PIX_FMT_NV12)
return -EPIPE;
if (!vin->scaler)
return -EPIPE;
} else {
if (fmt.format.width != vin->format.width ||
fmt.format.height != vin->format.height)
return -EPIPE;
if (vin->format.pixelformat == V4L2_PIX_FMT_NV12) {
if (ALIGN(fmt.format.width, 32) != vin->format.width ||
ALIGN(fmt.format.height, 32) != vin->format.height)
return -EPIPE;
} else {
if (fmt.format.width != vin->format.width ||
fmt.format.height != vin->format.height)
return -EPIPE;
}
}
if (fmt.format.code != vin->mbus_code)

View File

@ -84,6 +84,11 @@ nla_put_failure:
return -EMSGSIZE;
}
/* Limit the max delay range to 300s */
static struct netlink_range_validation delay_range = {
.max = 300000,
};
static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_MODE] = { .type = NLA_U8 },
[IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
@ -114,7 +119,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_AD_ACTOR_SYSTEM] = { .type = NLA_BINARY,
.len = ETH_ALEN },
[IFLA_BOND_TLB_DYNAMIC_LB] = { .type = NLA_U8 },
[IFLA_BOND_PEER_NOTIF_DELAY] = { .type = NLA_U32 },
[IFLA_BOND_PEER_NOTIF_DELAY] = NLA_POLICY_FULL_RANGE(NLA_U32, &delay_range),
[IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 },
[IFLA_BOND_NS_IP6_TARGET] = { .type = NLA_NESTED },
};

View File

@ -169,6 +169,12 @@ static const struct bond_opt_value bond_num_peer_notif_tbl[] = {
{ NULL, -1, 0}
};
static const struct bond_opt_value bond_peer_notif_delay_tbl[] = {
{ "off", 0, 0},
{ "maxval", 300000, BOND_VALFLAG_MAX},
{ NULL, -1, 0}
};
static const struct bond_opt_value bond_primary_reselect_tbl[] = {
{ "always", BOND_PRI_RESELECT_ALWAYS, BOND_VALFLAG_DEFAULT},
{ "better", BOND_PRI_RESELECT_BETTER, 0},
@ -488,7 +494,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.id = BOND_OPT_PEER_NOTIF_DELAY,
.name = "peer_notif_delay",
.desc = "Delay between each peer notification on failover event, in milliseconds",
.values = bond_intmax_tbl,
.values = bond_peer_notif_delay_tbl,
.set = bond_option_peer_notif_delay_set
}
};

View File

@ -294,19 +294,6 @@ static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
bool reschedule = false;
int work_done = 0;
/* Clear PCI MSI-X Pending Bit Array (PBA)
*
* This bit is set if an interrupt event occurs while the vector is
* masked. If this bit is set and we reenable the interrupt, it will
* fire again. Since we're just about to poll the queue state, we don't
* need it to fire again.
*
* Under high softirq load, it's possible that the interrupt condition
* is triggered twice before we got the chance to process it.
*/
gve_write_irq_doorbell_dqo(priv, block,
GVE_ITR_NO_UPDATE_DQO | GVE_ITR_CLEAR_PBA_BIT_DQO);
if (block->tx)
reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);

View File

@ -654,7 +654,7 @@ __mtk_wed_detach(struct mtk_wed_device *dev)
BIT(hw->index), BIT(hw->index));
}
if (!hw_list[!hw->index]->wed_dev &&
if ((!hw_list[!hw->index] || !hw_list[!hw->index]->wed_dev) &&
hw->eth->dma_dev != hw->eth->dev)
mtk_eth_set_dma_device(hw->eth, hw->eth->dev);

View File

@ -307,15 +307,15 @@ static const u32 vsc7514_sys_regmap[] = {
REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218),
REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c),
REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220),
REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000214),
REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000218),
REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00021c),
REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000220),
REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000224),
REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000228),
REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00022c),
REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000230),
REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000234),
REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000224),
REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000228),
REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00022c),
REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000230),
REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000234),
REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000238),
REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00023c),
REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000240),
REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000244),
REG(SYS_RESET_CFG, 0x000508),
REG(SYS_CMID, 0x00050c),
REG(SYS_VLAN_ETYPE_CFG, 0x000510),

View File

@ -181,6 +181,7 @@ enum power_event {
#define GMAC4_LPI_CTRL_STATUS 0xd0
#define GMAC4_LPI_TIMER_CTRL 0xd4
#define GMAC4_LPI_ENTRY_TIMER 0xd8
#define GMAC4_MAC_ONEUS_TIC_COUNTER 0xdc
/* LPI control and status defines */
#define GMAC4_LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable */

View File

@ -25,6 +25,7 @@ static void dwmac4_core_init(struct mac_device_info *hw,
struct stmmac_priv *priv = netdev_priv(dev);
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONFIG);
u32 clk_rate;
value |= GMAC_CORE_INIT;
@ -47,6 +48,10 @@ static void dwmac4_core_init(struct mac_device_info *hw,
writel(value, ioaddr + GMAC_CONFIG);
/* Configure LPI 1us counter to number of CSR clock ticks in 1us - 1 */
clk_rate = clk_get_rate(priv->plat->stmmac_clk);
writel((clk_rate / 1000000) - 1, ioaddr + GMAC4_MAC_ONEUS_TIC_COUNTER);
/* Enable GMAC interrupts */
value = GMAC_INT_DEFAULT_ENABLE;

View File

@ -436,6 +436,9 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
goto err;
}
skb_dst_set(skb, &rt->dst);
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
err = ip_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(err)))
dev->stats.tx_errors++;
@ -474,6 +477,9 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
goto err;
}
skb_dst_set(skb, dst);
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
err = ip6_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(err)))
dev->stats.tx_errors++;

View File

@ -67,6 +67,7 @@ static int mvusb_mdio_probe(struct usb_interface *interface,
struct device *dev = &interface->dev;
struct mvusb_mdio *mvusb;
struct mii_bus *mdio;
int ret;
mdio = devm_mdiobus_alloc_size(dev, sizeof(*mvusb));
if (!mdio)
@ -87,7 +88,15 @@ static int mvusb_mdio_probe(struct usb_interface *interface,
mdio->write = mvusb_mdio_write;
usb_set_intfdata(interface, mvusb);
return of_mdiobus_register(mdio, dev->of_node);
ret = of_mdiobus_register(mdio, dev->of_node);
if (ret)
goto put_dev;
return 0;
put_dev:
usb_put_dev(mvusb->udev);
return ret;
}
static void mvusb_mdio_disconnect(struct usb_interface *interface)

View File

@ -1203,7 +1203,7 @@ static const struct xpcs_compat synopsys_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
[DW_XPCS_2500BASEX] = {
.supported = xpcs_2500basex_features,
.interface = xpcs_2500basex_interfaces,
.num_interfaces = ARRAY_SIZE(xpcs_2500basex_features),
.num_interfaces = ARRAY_SIZE(xpcs_2500basex_interfaces),
.an_mode = DW_2500BASEX,
},
};

View File

@ -40,6 +40,11 @@ static inline int bcm_phy_write_exp_sel(struct phy_device *phydev,
return bcm_phy_write_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER, val);
}
static inline int bcm_phy_read_exp_sel(struct phy_device *phydev, u16 reg)
{
return bcm_phy_read_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER);
}
int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val);
int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum);

View File

@ -486,7 +486,7 @@ static int bcm7xxx_16nm_ephy_afe_config(struct phy_device *phydev)
bcm_phy_write_misc(phydev, 0x0038, 0x0002, 0xede0);
/* Read CORE_EXPA9 */
tmp = bcm_phy_read_exp(phydev, 0x00a9);
tmp = bcm_phy_read_exp_sel(phydev, 0x00a9);
/* CORE_EXPA9[6:1] is rcalcode[5:0] */
rcalcode = (tmp & 0x7e) / 2;
/* Correct RCAL code + 1 is -1% rprogr, LP: +16 */

View File

@ -742,7 +742,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
/* Move network header to the right position for VLAN tagged packets */
if (eth_type_vlan(skb->protocol) &&
__vlan_get_protocol(skb, skb->protocol, &depth) != 0)
vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
skb_set_network_header(skb, depth);
/* copy skb_ubuf_info for callback when skb has no error */
@ -1197,7 +1197,7 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
/* Move network header to the right position for VLAN tagged packets */
if (eth_type_vlan(skb->protocol) &&
__vlan_get_protocol(skb, skb->protocol, &depth) != 0)
vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
skb_set_network_header(skb, depth);
rcu_read_lock();

View File

@ -784,7 +784,7 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
fifo = vring->fifo;
/* Return if vdev is not ready. */
if (!fifo->vdev[devid])
if (!fifo || !fifo->vdev[devid])
return;
/* Return if another vring is running. */
@ -980,9 +980,13 @@ static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev,
vq->num_max = vring->num;
vq->priv = vring;
/* Make vq update visible before using it. */
virtio_mb(false);
vqs[i] = vq;
vring->vq = vq;
vq->priv = vring;
}
return 0;
@ -1302,6 +1306,9 @@ static int mlxbf_tmfifo_probe(struct platform_device *pdev)
mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL);
/* Make all updates visible before setting the 'is_ready' flag. */
virtio_mb(false);
fifo->is_ready = true;
return 0;

View File

@ -211,6 +211,7 @@ struct bios_rfkill2_state {
static const struct key_entry hp_wmi_keymap[] = {
{ KE_KEY, 0x02, { KEY_BRIGHTNESSUP } },
{ KE_KEY, 0x03, { KEY_BRIGHTNESSDOWN } },
{ KE_KEY, 0x270, { KEY_MICMUTE } },
{ KE_KEY, 0x20e6, { KEY_PROG1 } },
{ KE_KEY, 0x20e8, { KEY_MEDIA } },
{ KE_KEY, 0x2142, { KEY_MEDIA } },

View File

@ -44,14 +44,18 @@ static ssize_t store_min_max_freq_khz(struct uncore_data *data,
int min_max)
{
unsigned int input;
int ret;
if (kstrtouint(buf, 10, &input))
return -EINVAL;
mutex_lock(&uncore_lock);
uncore_write(data, input, min_max);
ret = uncore_write(data, input, min_max);
mutex_unlock(&uncore_lock);
if (ret)
return ret;
return count;
}

View File

@ -34,6 +34,7 @@ static int intel_scu_pci_probe(struct pci_dev *pdev,
static const struct pci_device_id pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x080e) },
{ PCI_VDEVICE(INTEL, 0x082a) },
{ PCI_VDEVICE(INTEL, 0x08ea) },
{ PCI_VDEVICE(INTEL, 0x0a94) },
{ PCI_VDEVICE(INTEL, 0x11a0) },

View File

@ -10318,6 +10318,7 @@ static atomic_t dytc_ignore_event = ATOMIC_INIT(0);
static DEFINE_MUTEX(dytc_mutex);
static int dytc_capabilities;
static bool dytc_mmc_get_available;
static int profile_force;
static int convert_dytc_to_profile(int funcmode, int dytcmode,
enum platform_profile_option *profile)
@ -10580,6 +10581,21 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
if (err)
return err;
/* Check if user wants to override the profile selection */
if (profile_force) {
switch (profile_force) {
case -1:
dytc_capabilities = 0;
break;
case 1:
dytc_capabilities = BIT(DYTC_FC_MMC);
break;
case 2:
dytc_capabilities = BIT(DYTC_FC_PSC);
break;
}
pr_debug("Profile selection forced: 0x%x\n", dytc_capabilities);
}
if (dytc_capabilities & BIT(DYTC_FC_MMC)) { /* MMC MODE */
pr_debug("MMC is supported\n");
/*
@ -10593,11 +10609,6 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
dytc_mmc_get_available = true;
}
} else if (dytc_capabilities & BIT(DYTC_FC_PSC)) { /* PSC MODE */
/* Support for this only works on AMD platforms */
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
dbg_printk(TPACPI_DBG_INIT, "PSC not support on Intel platforms\n");
return -ENODEV;
}
pr_debug("PSC is supported\n");
} else {
dbg_printk(TPACPI_DBG_INIT, "No DYTC support available\n");
@ -11646,6 +11657,9 @@ MODULE_PARM_DESC(uwb_state,
"Initial state of the emulated UWB switch");
#endif
module_param(profile_force, int, 0444);
MODULE_PARM_DESC(profile_force, "Force profile mode. -1=off, 1=MMC, 2=PSC");
static void thinkpad_acpi_module_exit(void)
{
struct ibm_struct *ibm, *itmp;

View File

@ -336,6 +336,22 @@ static const struct ts_dmi_data dexp_ursus_7w_data = {
.properties = dexp_ursus_7w_props,
};
static const struct property_entry dexp_ursus_kx210i_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 5),
PROPERTY_ENTRY_U32("touchscreen-min-y", 2),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1720),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1137),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-dexp-ursus-kx210i.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data dexp_ursus_kx210i_data = {
.acpi_name = "MSSL1680:00",
.properties = dexp_ursus_kx210i_props,
};
static const struct property_entry digma_citi_e200_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
@ -378,6 +394,11 @@ static const struct ts_dmi_data gdix1001_01_upside_down_data = {
.properties = gdix1001_upside_down_props,
};
static const struct ts_dmi_data gdix1002_00_upside_down_data = {
.acpi_name = "GDIX1002:00",
.properties = gdix1001_upside_down_props,
};
static const struct property_entry gp_electronic_t701_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
@ -1185,6 +1206,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "7W"),
},
},
{
/* DEXP Ursus KX210i */
.driver_data = (void *)&dexp_ursus_kx210i_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "INSYDE Corp."),
DMI_MATCH(DMI_PRODUCT_NAME, "S107I"),
},
},
{
/* Digma Citi E200 */
.driver_data = (void *)&digma_citi_e200_data,
@ -1295,6 +1324,18 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
DMI_MATCH(DMI_BIOS_VERSION, "jumperx.T87.KFBNEEA"),
},
},
{
/* Juno Tablet */
.driver_data = (void *)&gdix1002_00_upside_down_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
/* Both product- and board-name being "Default string" is somewhat rare */
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
DMI_MATCH(DMI_BOARD_NAME, "Default string"),
/* Above matches are too generic, add partial bios-version match */
DMI_MATCH(DMI_BIOS_VERSION, "JP2V1."),
},
},
{
/* Mediacom WinPad 7.0 W700 (same hw as Wintron surftab 7") */
.driver_data = (void *)&trekstor_surftab_wintron70_data,

View File

@ -9459,8 +9459,16 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
* that performance might be impacted.
*/
ret = ufshcd_urgent_bkops(hba);
if (ret)
if (ret) {
/*
* If return err in suspend flow, IO will hang.
* Trigger error handler and break suspend for
* error recovery.
*/
ufshcd_force_error_recovery(hba);
ret = -EBUSY;
goto enable_scaling;
}
} else {
/* make sure that auto bkops is disabled */
ufshcd_disable_auto_bkops(hba);

View File

@ -124,7 +124,7 @@ static u_long get_line_length(int xres_virtual, int bpp)
* First part, xxxfb_check_var, must not write anything
* to hardware, it should only verify and adjust var.
* This means it doesn't alter par but it does use hardware
* data from it to check this var.
* data from it to check this var.
*/
static int mc68x328fb_check_var(struct fb_var_screeninfo *var,
@ -182,7 +182,7 @@ static int mc68x328fb_check_var(struct fb_var_screeninfo *var,
/*
* Now that we checked it we alter var. The reason being is that the video
* mode passed in might not work but slight changes to it might make it
* mode passed in might not work but slight changes to it might make it
* work. This way we let the user know what is acceptable.
*/
switch (var->bits_per_pixel) {
@ -257,8 +257,8 @@ static int mc68x328fb_check_var(struct fb_var_screeninfo *var,
}
/* This routine actually sets the video mode. It's in here where we
* the hardware state info->par and fix which can be affected by the
* change in par. For this driver it doesn't do much.
* the hardware state info->par and fix which can be affected by the
* change in par. For this driver it doesn't do much.
*/
static int mc68x328fb_set_par(struct fb_info *info)
{
@ -295,7 +295,7 @@ static int mc68x328fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
* {hardwarespecific} contains width of RAMDAC
* cmap[X] is programmed to (X << red.offset) | (X << green.offset) | (X << blue.offset)
* RAMDAC[X] is programmed to (red, green, blue)
*
*
* Pseudocolor:
* uses offset = 0 && length = RAMDAC register width.
* var->{color}.offset is 0
@ -384,7 +384,7 @@ static int mc68x328fb_pan_display(struct fb_var_screeninfo *var,
}
/*
* Most drivers don't need their own mmap function
* Most drivers don't need their own mmap function
*/
static int mc68x328fb_mmap(struct fb_info *info, struct vm_area_struct *vma)

View File

@ -523,7 +523,7 @@ static int arcfb_probe(struct platform_device *dev)
info = framebuffer_alloc(sizeof(struct arcfb_par), &dev->dev);
if (!info)
goto err;
goto err_fb_alloc;
info->screen_base = (char __iomem *)videomemory;
info->fbops = &arcfb_ops;
@ -535,7 +535,7 @@ static int arcfb_probe(struct platform_device *dev)
if (!dio_addr || !cio_addr || !c2io_addr) {
printk(KERN_WARNING "no IO addresses supplied\n");
goto err1;
goto err_addr;
}
par->dio_addr = dio_addr;
par->cio_addr = cio_addr;
@ -551,12 +551,12 @@ static int arcfb_probe(struct platform_device *dev)
printk(KERN_INFO
"arcfb: Failed req IRQ %d\n", par->irq);
retval = -EBUSY;
goto err1;
goto err_addr;
}
}
retval = register_framebuffer(info);
if (retval < 0)
goto err1;
goto err_register_fb;
platform_set_drvdata(dev, info);
fb_info(info, "Arc frame buffer device, using %dK of video memory\n",
videomemorysize >> 10);
@ -580,9 +580,12 @@ static int arcfb_probe(struct platform_device *dev)
}
return 0;
err1:
err_register_fb:
free_irq(par->irq, info);
err_addr:
framebuffer_release(info);
err:
err_fb_alloc:
vfree(videomemory);
return retval;
}

View File

@ -317,7 +317,7 @@ static inline void atmel_lcdfb_free_video_memory(struct atmel_lcdfb_info *sinfo)
/**
* atmel_lcdfb_alloc_video_memory - Allocate framebuffer memory
* @sinfo: the frame buffer to allocate memory for
*
*
* This function is called only from the atmel_lcdfb_probe()
* so no locking by fb_info->mm_lock around smem_len setting is needed.
*/

View File

@ -512,7 +512,7 @@ static int cg14_probe(struct platform_device *op)
is_8mb = (resource_size(&op->resource[1]) == (8 * 1024 * 1024));
BUILD_BUG_ON(sizeof(par->mmap_map) != sizeof(__cg14_mmap_map));
memcpy(&par->mmap_map, &__cg14_mmap_map, sizeof(par->mmap_map));
for (i = 0; i < CG14_MMAP_ENTRIES; i++) {

View File

@ -113,14 +113,14 @@ struct fb_info_control {
struct fb_info info;
struct fb_par_control par;
u32 pseudo_palette[16];
struct cmap_regs __iomem *cmap_regs;
unsigned long cmap_regs_phys;
struct control_regs __iomem *control_regs;
unsigned long control_regs_phys;
unsigned long control_regs_size;
__u8 __iomem *frame_buffer;
unsigned long frame_buffer_phys;
unsigned long fb_orig_base;
@ -196,7 +196,7 @@ static void set_control_clock(unsigned char *params)
while (!req.complete)
cuda_poll();
}
#endif
#endif
}
/*
@ -233,19 +233,19 @@ static void control_set_hardware(struct fb_info_control *p, struct fb_par_contro
if (p->par.xoffset != par->xoffset ||
p->par.yoffset != par->yoffset)
set_screen_start(par->xoffset, par->yoffset, p);
return;
}
p->par = *par;
cmode = p->par.cmode;
r = &par->regvals;
/* Turn off display */
out_le32(CNTRL_REG(p,ctrl), 0x400 | par->ctrl);
set_control_clock(r->clock_params);
RADACAL_WRITE(0x20, r->radacal_ctrl);
RADACAL_WRITE(0x21, p->control_use_bank2 ? 0 : 1);
RADACAL_WRITE(0x10, 0);
@ -254,7 +254,7 @@ static void control_set_hardware(struct fb_info_control *p, struct fb_par_contro
rp = &p->control_regs->vswin;
for (i = 0; i < 16; ++i, ++rp)
out_le32(&rp->r, r->regs[i]);
out_le32(CNTRL_REG(p,pitch), par->pitch);
out_le32(CNTRL_REG(p,mode), r->mode);
out_le32(CNTRL_REG(p,vram_attr), p->vram_attr);
@ -366,7 +366,7 @@ static int read_control_sense(struct fb_info_control *p)
sense |= (in_le32(CNTRL_REG(p,mon_sense)) & 0x180) >> 7;
out_le32(CNTRL_REG(p,mon_sense), 077); /* turn off drivers */
return sense;
}
@ -558,9 +558,9 @@ static int control_var_to_par(struct fb_var_screeninfo *var,
static void control_par_to_var(struct fb_par_control *par, struct fb_var_screeninfo *var)
{
struct control_regints *rv;
rv = (struct control_regints *) par->regvals.regs;
memset(var, 0, sizeof(*var));
var->xres = par->xres;
var->yres = par->yres;
@ -568,7 +568,7 @@ static void control_par_to_var(struct fb_par_control *par, struct fb_var_screeni
var->yres_virtual = par->vyres;
var->xoffset = par->xoffset;
var->yoffset = par->yoffset;
switch(par->cmode) {
default:
case CMODE_8:
@ -634,7 +634,7 @@ static int controlfb_check_var (struct fb_var_screeninfo *var, struct fb_info *i
err = control_var_to_par(var, &par, info);
if (err)
return err;
return err;
control_par_to_var(&par, var);
return 0;
@ -655,7 +655,7 @@ static int controlfb_set_par (struct fb_info *info)
" control_var_to_par: %d.\n", err);
return err;
}
control_set_hardware(p, &par);
info->fix.visual = (p->par.cmode == CMODE_8) ?
@ -840,7 +840,7 @@ static int __init init_control(struct fb_info_control *p)
int full, sense, vmode, cmode, vyres;
struct fb_var_screeninfo var;
int rc;
printk(KERN_INFO "controlfb: ");
full = p->total_vram == 0x400000;

View File

@ -257,6 +257,11 @@ static const struct fb_videomode modedb[] = {
{ NULL, 72, 480, 300, 33386, 40, 24, 11, 19, 80, 3, 0,
FB_VMODE_DOUBLE },
/* 1920x1080 @ 60 Hz, 67.3 kHz hsync */
{ NULL, 60, 1920, 1080, 6734, 148, 88, 36, 4, 44, 5, 0,
FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
FB_VMODE_NONINTERLACED },
/* 1920x1200 @ 60 Hz, 74.5 Khz hsync */
{ NULL, 60, 1920, 1200, 5177, 128, 336, 1, 38, 208, 3,
FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,

View File

@ -6,7 +6,7 @@
*
* This driver is based on tgafb.c
*
* Copyright (C) 1997 Geert Uytterhoeven
* Copyright (C) 1997 Geert Uytterhoeven
* Copyright (C) 1995 Jay Estabrook
*
* This file is subject to the terms and conditions of the GNU General Public
@ -28,7 +28,7 @@
#include <asm/io.h>
#include <asm/jazz.h>
/*
/*
* Various defines for the G364
*/
#define G364_MEM_BASE 0xe4400000
@ -125,7 +125,7 @@ static const struct fb_ops g364fb_ops = {
*
* This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
*/
static int g364fb_pan_display(struct fb_var_screeninfo *var,
static int g364fb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
if (var->xoffset ||

View File

@ -1,6 +1,6 @@
/*
* linux/drivers/video/hgafb.c -- Hercules graphics adaptor frame buffer device
*
*
* Created 25 Nov 1999 by Ferenc Bakonyi (fero@drama.obuda.kando.hu)
* Based on skeletonfb.c by Geert Uytterhoeven and
* mdacon.c by Andrew Apted
@ -8,14 +8,14 @@
* History:
*
* - Revision 0.1.8 (23 Oct 2002): Ported to new framebuffer api.
*
* - Revision 0.1.7 (23 Jan 2001): fix crash resulting from MDA only cards
*
* - Revision 0.1.7 (23 Jan 2001): fix crash resulting from MDA only cards
* being detected as Hercules. (Paul G.)
* - Revision 0.1.6 (17 Aug 2000): new style structs
* documentation
* - Revision 0.1.5 (13 Mar 2000): spinlocks instead of saveflags();cli();etc
* minor fixes
* - Revision 0.1.4 (24 Jan 2000): fixed a bug in hga_card_detect() for
* - Revision 0.1.4 (24 Jan 2000): fixed a bug in hga_card_detect() for
* HGA-only systems
* - Revision 0.1.3 (22 Jan 2000): modified for the new fb_info structure
* screen is cleared after rmmod
@ -143,7 +143,7 @@ static bool nologo = 0;
static void write_hga_b(unsigned int val, unsigned char reg)
{
outb_p(reg, HGA_INDEX_PORT);
outb_p(reg, HGA_INDEX_PORT);
outb_p(val, HGA_VALUE_PORT);
}
@ -155,7 +155,7 @@ static void write_hga_w(unsigned int val, unsigned char reg)
static int test_hga_b(unsigned char val, unsigned char reg)
{
outb_p(reg, HGA_INDEX_PORT);
outb_p(reg, HGA_INDEX_PORT);
outb (val, HGA_VALUE_PORT);
udelay(20); val = (inb_p(HGA_VALUE_PORT) == val);
return val;
@ -244,7 +244,7 @@ static void hga_show_logo(struct fb_info *info)
void __iomem *dest = hga_vram;
char *logo = linux_logo_bw;
int x, y;
for (y = 134; y < 134 + 80 ; y++) * this needs some cleanup *
for (x = 0; x < 10 ; x++)
writeb(~*(logo++),(dest + HGA_ROWADDR(y) + x + 40));
@ -255,7 +255,7 @@ static void hga_pan(unsigned int xoffset, unsigned int yoffset)
{
unsigned int base;
unsigned long flags;
base = (yoffset / 8) * 90 + xoffset;
spin_lock_irqsave(&hga_reg_lock, flags);
write_hga_w(base, 0x0c); /* start address */
@ -310,7 +310,7 @@ static int hga_card_detect(void)
/* Ok, there is definitely a card registering at the correct
* memory location, so now we do an I/O port test.
*/
if (!test_hga_b(0x66, 0x0f)) /* cursor low register */
goto error;
@ -321,7 +321,7 @@ static int hga_card_detect(void)
* bit of the status register is changing. This test lasts for
* approximately 1/10th of a second.
*/
p_save = q_save = inb_p(HGA_STATUS_PORT) & HGA_STATUS_VSYNC;
for (count=0; count < 50000 && p_save == q_save; count++) {
@ -329,7 +329,7 @@ static int hga_card_detect(void)
udelay(2);
}
if (p_save == q_save)
if (p_save == q_save)
goto error;
switch (inb_p(HGA_STATUS_PORT) & 0x70) {
@ -415,7 +415,7 @@ static int hgafb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
* @info:pointer to fb_info object containing info for current hga board
*
* This function looks only at xoffset, yoffset and the %FB_VMODE_YWRAP
* flag in @var. If input parameters are correct it calls hga_pan() to
* flag in @var. If input parameters are correct it calls hga_pan() to
* program the hardware. @info->var is updated to the new values.
* A zero is returned on success and %-EINVAL for failure.
*/
@ -442,9 +442,9 @@ static int hgafb_pan_display(struct fb_var_screeninfo *var,
* hgafb_blank - (un)blank the screen
* @blank_mode:blanking method to use
* @info:unused
*
* Blank the screen if blank_mode != 0, else unblank.
* Implements VESA suspend and powerdown modes on hardware that supports
*
* Blank the screen if blank_mode != 0, else unblank.
* Implements VESA suspend and powerdown modes on hardware that supports
* disabling hsync/vsync:
* @blank_mode == 2 means suspend vsync,
* @blank_mode == 3 means suspend hsync,
@ -539,15 +539,15 @@ static const struct fb_ops hgafb_ops = {
.fb_copyarea = hgafb_copyarea,
.fb_imageblit = hgafb_imageblit,
};
/* ------------------------------------------------------------------------- *
*
* Functions in fb_info
*
*
* ------------------------------------------------------------------------- */
/* ------------------------------------------------------------------------- */
/*
* Initialization
*/

View File

@ -92,7 +92,7 @@ static int hpfb_setcolreg(unsigned regno, unsigned red, unsigned green,
if (regno >= info->cmap.len)
return 1;
while (in_be16(fb_regs + 0x6002) & 0x4) udelay(1);
out_be16(fb_regs + 0x60ba, 0xff);
@ -143,7 +143,7 @@ static void topcat_blit(int x0, int y0, int x1, int y1, int w, int h, int rr)
out_8(fb_regs + WMOVE, fb_bitmask);
}
static void hpfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
static void hpfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
topcat_blit(area->sx, area->sy, area->dx, area->dy, area->width, area->height, RR_COPY);
}
@ -315,7 +315,7 @@ unmap_screen_base:
return ret;
}
/*
/*
* Check that the secondary ID indicates that we have some hope of working with this
* framebuffer. The catseye boards are pretty much like topcats and we can muddle through.
*/
@ -323,7 +323,7 @@ unmap_screen_base:
#define topcat_sid_ok(x) (((x) == DIO_ID2_LRCATSEYE) || ((x) == DIO_ID2_HRCCATSEYE) \
|| ((x) == DIO_ID2_HRMCATSEYE) || ((x) == DIO_ID2_TOPCAT))
/*
/*
* Initialise the framebuffer
*/
static int hpfb_dio_probe(struct dio_dev *d, const struct dio_device_id *ent)

View File

@ -1347,7 +1347,7 @@ static const struct fb_ops imsttfb_ops = {
.fb_ioctl = imsttfb_ioctl,
};
static void init_imstt(struct fb_info *info)
static int init_imstt(struct fb_info *info)
{
struct imstt_par *par = info->par;
__u32 i, tmp, *ip, *end;
@ -1420,7 +1420,7 @@ static void init_imstt(struct fb_info *info)
|| !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) {
printk("imsttfb: %ux%ux%u not supported\n", info->var.xres, info->var.yres, info->var.bits_per_pixel);
framebuffer_release(info);
return;
return -ENODEV;
}
sprintf(info->fix.id, "IMS TT (%s)", par->ramdac == IBM ? "IBM" : "TVP");
@ -1456,12 +1456,13 @@ static void init_imstt(struct fb_info *info)
if (register_framebuffer(info) < 0) {
framebuffer_release(info);
return;
return -ENODEV;
}
tmp = (read_reg_le32(par->dc_regs, SSTATUS) & 0x0f00) >> 8;
fb_info(info, "%s frame buffer; %uMB vram; chip version %u\n",
info->fix.id, info->fix.smem_len >> 20, tmp);
return 0;
}
static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@ -1529,10 +1530,10 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!par->cmap_regs)
goto error;
info->pseudo_palette = par->palette;
init_imstt(info);
pci_set_drvdata(pdev, info);
return 0;
ret = init_imstt(info);
if (!ret)
pci_set_drvdata(pdev, info);
return ret;
error:
if (par->dc_regs)

View File

@ -339,7 +339,7 @@ static int civic_setpalette(unsigned int regno, unsigned int red,
{
unsigned long flags;
int clut_status;
local_irq_save(flags);
/* Set the register address */
@ -439,7 +439,7 @@ static int macfb_setcolreg(unsigned regno, unsigned red, unsigned green,
* (according to the entries in the `var' structure).
* Return non-zero for invalid regno.
*/
if (regno >= fb_info->cmap.len)
return 1;
@ -548,7 +548,7 @@ static int __init macfb_init(void)
return -ENODEV;
macfb_setup(option);
if (!MACH_IS_MAC)
if (!MACH_IS_MAC)
return -ENODEV;
if (mac_bi_data.id == MAC_MODEL_Q630 ||
@ -644,7 +644,7 @@ static int __init macfb_init(void)
err = -EINVAL;
goto fail_unmap;
}
/*
* We take a wild guess that if the video physical address is
* in nubus slot space, that the nubus card is driving video.
@ -774,7 +774,7 @@ static int __init macfb_init(void)
civic_cmap_regs = ioremap(CIVIC_BASE, 0x1000);
break;
/*
* Assorted weirdos
* We think this may be like the LC II

View File

@ -138,7 +138,7 @@ int __init maxinefb_init(void)
*(volatile unsigned char *)fboff = 0x0;
maxinefb_fix.smem_start = fb_start;
/* erase hardware cursor */
for (i = 0; i < 512; i++) {
maxinefb_ims332_write_register(IMS332_REG_CURSOR_RAM + i,

Some files were not shown because too many files have changed in this diff Show More