This is the 6.8.7 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmYflScACgkQONu9yGCS
 aT5lzA/+MKsoKxiJnofvJ60mIfGsFRutrU78gs+xeqDsYH+79vFqWwq7TwtzffNz
 BNkW+6Gt9H27syAOEuKqk0xf055RYRE91QS+f7o3D15YJ+y+tu9S4OErZ+ZbM088
 i+PpH59XuwPAhsRK278sffqVdRq3F0LfE8eRE6vqohleM78yU21o0pm2D4XFgYeB
 52fyPadIdujr7qpwDLJXx6r6r1ZyBCOycBfrceLQYeygEthZQX1WeK09RfyxDP7n
 WVhslMVqx48Lr58O9df2dUv9FuOQHfKjNRIW4D4mLJsdT50lRM0C4WWgCt0UwiYs
 2Ig9ysoAguHh2afGAnizaVzZFp5Oaf3Dl+EvfiFdhOTt8S0s0BfCx6ykqq0fHPV3
 D6QxBSr8k+3WfpmLDXMi+DeTIk1mvDGXWBqWQ7kUCQkfthIIKCO9TGhmTUS4hxNI
 rrpJGaUVxjHNl+S0A6QtamICYahOA+TUvjEFwdGQVN4FKCEHrZe7xSg4bCEEIxK8
 BjRBQfmhIaBYpT7zTvl5gi+vG2PnMIJuaYa6IgGboDCzdJIG1/Vof7NTEe/d+k9/
 K8fTkdDAS5AUDeD8xECv5HdE0nXQH5i5FKH/nS4i6a/kkKSQl5fS2Ldq07zSPCdd
 oPq9w2ZKx0opPHnQhrqSHRsjIjpa+GckrIpFPufHUxtCievwzy0=
 =XmsE
 -----END PGP SIGNATURE-----

Merge v6.8.7

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2024-04-17 11:23:43 +02:00
commit 0640bfe1fb
170 changed files with 1558 additions and 881 deletions

View File

@ -439,12 +439,12 @@ The possible values in this file are:
- System is protected by retpoline
* - BHI: BHI_DIS_S
- System is protected by BHI_DIS_S
* - BHI: SW loop; KVM SW loop
* - BHI: SW loop, KVM SW loop
- System is protected by software clearing sequence
* - BHI: Syscall hardening
- Syscalls are hardened against BHI
* - BHI: Syscall hardening; KVM: SW loop
- System is protected from userspace attacks by syscall hardening; KVM is protected by software clearing sequence
* - BHI: Vulnerable
- System is vulnerable to BHI
* - BHI: Vulnerable, KVM: SW loop
- System is vulnerable; KVM is protected by software clearing sequence
Full mitigation might require a microcode update from the CPU
vendor. When the necessary microcode is not available, the kernel will
@ -661,18 +661,14 @@ kernel command line.
spectre_bhi=
[X86] Control mitigation of Branch History Injection
(BHI) vulnerability. Syscalls are hardened against BHI
regardless of this setting. This setting affects the deployment
(BHI) vulnerability. This setting affects the deployment
of the HW BHI control and the SW BHB clearing sequence.
on
unconditionally enable.
(default) Enable the HW or SW mitigation as
needed.
off
unconditionally disable.
auto
enable if hardware mitigation
control(BHI_DIS_S) is available, otherwise
enable alternate mitigation in KVM.
Disable the mitigation.
For spectre_v2_user see Documentation/admin-guide/kernel-parameters.txt

View File

@ -3419,6 +3419,7 @@
reg_file_data_sampling=off [X86]
retbleed=off [X86]
spec_store_bypass_disable=off [X86,PPC]
spectre_bhi=off [X86]
spectre_v2_user=off [X86]
srbds=off [X86,INTEL]
ssbd=force-off [ARM64]
@ -6032,16 +6033,13 @@
See Documentation/admin-guide/laptops/sonypi.rst
spectre_bhi= [X86] Control mitigation of Branch History Injection
(BHI) vulnerability. Syscalls are hardened against BHI
reglardless of this setting. This setting affects the
(BHI) vulnerability. This setting affects the
deployment of the HW BHI control and the SW BHB
clearing sequence.
on - unconditionally enable.
off - unconditionally disable.
auto - (default) enable hardware mitigation
(BHI_DIS_S) if available, otherwise enable
alternate mitigation in KVM.
on - (default) Enable the HW or SW mitigation
as needed.
off - Disable the mitigation.
spectre_v2= [X86] Control mitigation of Spectre variant 2
(indirect branch speculation) vulnerability.

View File

@ -53,6 +53,15 @@ patternProperties:
compatible:
const: qcom,sm8150-dpu
"^displayport-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
contains:
const: qcom,sm8150-dp
"^dsi@[0-9a-f]+$":
type: object
additionalProperties: true

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 8
SUBLEVEL = 6
SUBLEVEL = 7
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth

View File

@ -210,6 +210,7 @@
remote-endpoint = <&mipi_from_sensor>;
clock-lanes = <0>;
data-lanes = <1>;
link-frequencies = /bits/ 64 <330000000>;
};
};
};

View File

@ -79,10 +79,8 @@ static struct musb_hdrc_platform_data tusb_data = {
static struct gpiod_lookup_table tusb_gpio_table = {
.dev_id = "musb-tusb",
.table = {
GPIO_LOOKUP("gpio-0-15", 0, "enable",
GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio-48-63", 10, "int",
GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio-0-31", 0, "enable", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio-32-63", 26, "int", GPIO_ACTIVE_HIGH),
{ }
},
};
@ -140,12 +138,11 @@ static int slot1_cover_open;
static int slot2_cover_open;
static struct device *mmc_device;
static struct gpiod_lookup_table nokia8xx_mmc_gpio_table = {
static struct gpiod_lookup_table nokia800_mmc_gpio_table = {
.dev_id = "mmci-omap.0",
.table = {
/* Slot switch, GPIO 96 */
GPIO_LOOKUP("gpio-80-111", 16,
"switch", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio-96-127", 0, "switch", GPIO_ACTIVE_HIGH),
{ }
},
};
@ -153,12 +150,12 @@ static struct gpiod_lookup_table nokia8xx_mmc_gpio_table = {
static struct gpiod_lookup_table nokia810_mmc_gpio_table = {
.dev_id = "mmci-omap.0",
.table = {
/* Slot switch, GPIO 96 */
GPIO_LOOKUP("gpio-96-127", 0, "switch", GPIO_ACTIVE_HIGH),
/* Slot index 1, VSD power, GPIO 23 */
GPIO_LOOKUP_IDX("gpio-16-31", 7,
"vsd", 1, GPIO_ACTIVE_HIGH),
GPIO_LOOKUP_IDX("gpio-0-31", 23, "vsd", 1, GPIO_ACTIVE_HIGH),
/* Slot index 1, VIO power, GPIO 9 */
GPIO_LOOKUP_IDX("gpio-0-15", 9,
"vio", 1, GPIO_ACTIVE_HIGH),
GPIO_LOOKUP_IDX("gpio-0-31", 9, "vio", 1, GPIO_ACTIVE_HIGH),
{ }
},
};
@ -415,8 +412,6 @@ static struct omap_mmc_platform_data *mmc_data[OMAP24XX_NR_MMC];
static void __init n8x0_mmc_init(void)
{
gpiod_add_lookup_table(&nokia8xx_mmc_gpio_table);
if (board_is_n810()) {
mmc1_data.slots[0].name = "external";
@ -429,6 +424,8 @@ static void __init n8x0_mmc_init(void)
mmc1_data.slots[1].name = "internal";
mmc1_data.slots[1].ban_openended = 1;
gpiod_add_lookup_table(&nokia810_mmc_gpio_table);
} else {
gpiod_add_lookup_table(&nokia800_mmc_gpio_table);
}
mmc1_data.nr_slots = 2;

View File

@ -41,7 +41,7 @@ conn_subsys: bus@5b000000 {
interrupts = <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>;
fsl,usbphy = <&usbphy1>;
fsl,usbmisc = <&usbmisc1 0>;
clocks = <&usb2_lpcg 0>;
clocks = <&usb2_lpcg IMX_LPCG_CLK_6>;
ahb-burst-config = <0x0>;
tx-burst-size-dword = <0x10>;
rx-burst-size-dword = <0x10>;
@ -58,7 +58,7 @@ conn_subsys: bus@5b000000 {
usbphy1: usbphy@5b100000 {
compatible = "fsl,imx7ulp-usbphy";
reg = <0x5b100000 0x1000>;
clocks = <&usb2_lpcg 1>;
clocks = <&usb2_lpcg IMX_LPCG_CLK_7>;
power-domains = <&pd IMX_SC_R_USB_0_PHY>;
status = "disabled";
};
@ -67,8 +67,8 @@ conn_subsys: bus@5b000000 {
interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>;
reg = <0x5b010000 0x10000>;
clocks = <&sdhc0_lpcg IMX_LPCG_CLK_4>,
<&sdhc0_lpcg IMX_LPCG_CLK_0>,
<&sdhc0_lpcg IMX_LPCG_CLK_5>;
<&sdhc0_lpcg IMX_LPCG_CLK_5>,
<&sdhc0_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "ahb", "per";
power-domains = <&pd IMX_SC_R_SDHC_0>;
status = "disabled";
@ -78,8 +78,8 @@ conn_subsys: bus@5b000000 {
interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>;
reg = <0x5b020000 0x10000>;
clocks = <&sdhc1_lpcg IMX_LPCG_CLK_4>,
<&sdhc1_lpcg IMX_LPCG_CLK_0>,
<&sdhc1_lpcg IMX_LPCG_CLK_5>;
<&sdhc1_lpcg IMX_LPCG_CLK_5>,
<&sdhc1_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "ahb", "per";
power-domains = <&pd IMX_SC_R_SDHC_1>;
fsl,tuning-start-tap = <20>;
@ -91,8 +91,8 @@ conn_subsys: bus@5b000000 {
interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>;
reg = <0x5b030000 0x10000>;
clocks = <&sdhc2_lpcg IMX_LPCG_CLK_4>,
<&sdhc2_lpcg IMX_LPCG_CLK_0>,
<&sdhc2_lpcg IMX_LPCG_CLK_5>;
<&sdhc2_lpcg IMX_LPCG_CLK_5>,
<&sdhc2_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "ahb", "per";
power-domains = <&pd IMX_SC_R_SDHC_2>;
status = "disabled";

View File

@ -27,8 +27,8 @@ dma_subsys: bus@5a000000 {
#size-cells = <0>;
interrupts = <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
clocks = <&spi0_lpcg 0>,
<&spi0_lpcg 1>;
clocks = <&spi0_lpcg IMX_LPCG_CLK_0>,
<&spi0_lpcg IMX_LPCG_CLK_4>;
clock-names = "per", "ipg";
assigned-clocks = <&clk IMX_SC_R_SPI_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <60000000>;
@ -43,8 +43,8 @@ dma_subsys: bus@5a000000 {
#size-cells = <0>;
interrupts = <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
clocks = <&spi1_lpcg 0>,
<&spi1_lpcg 1>;
clocks = <&spi1_lpcg IMX_LPCG_CLK_0>,
<&spi1_lpcg IMX_LPCG_CLK_4>;
clock-names = "per", "ipg";
assigned-clocks = <&clk IMX_SC_R_SPI_1 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <60000000>;
@ -59,8 +59,8 @@ dma_subsys: bus@5a000000 {
#size-cells = <0>;
interrupts = <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
clocks = <&spi2_lpcg 0>,
<&spi2_lpcg 1>;
clocks = <&spi2_lpcg IMX_LPCG_CLK_0>,
<&spi2_lpcg IMX_LPCG_CLK_4>;
clock-names = "per", "ipg";
assigned-clocks = <&clk IMX_SC_R_SPI_2 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <60000000>;
@ -75,8 +75,8 @@ dma_subsys: bus@5a000000 {
#size-cells = <0>;
interrupts = <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
clocks = <&spi3_lpcg 0>,
<&spi3_lpcg 1>;
clocks = <&spi3_lpcg IMX_LPCG_CLK_0>,
<&spi3_lpcg IMX_LPCG_CLK_4>;
clock-names = "per", "ipg";
assigned-clocks = <&clk IMX_SC_R_SPI_3 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <60000000>;
@ -144,8 +144,8 @@ dma_subsys: bus@5a000000 {
compatible = "fsl,imx8qxp-pwm", "fsl,imx27-pwm";
reg = <0x5a190000 0x1000>;
interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&adma_pwm_lpcg 1>,
<&adma_pwm_lpcg 0>;
clocks = <&adma_pwm_lpcg IMX_LPCG_CLK_4>,
<&adma_pwm_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "per";
assigned-clocks = <&clk IMX_SC_R_LCD_0_PWM_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
@ -377,8 +377,8 @@ dma_subsys: bus@5a000000 {
reg = <0x5a880000 0x10000>;
interrupts = <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
clocks = <&adc0_lpcg 0>,
<&adc0_lpcg 1>;
clocks = <&adc0_lpcg IMX_LPCG_CLK_0>,
<&adc0_lpcg IMX_LPCG_CLK_4>;
clock-names = "per", "ipg";
assigned-clocks = <&clk IMX_SC_R_ADC_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
@ -392,8 +392,8 @@ dma_subsys: bus@5a000000 {
reg = <0x5a890000 0x10000>;
interrupts = <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
clocks = <&adc1_lpcg 0>,
<&adc1_lpcg 1>;
clocks = <&adc1_lpcg IMX_LPCG_CLK_0>,
<&adc1_lpcg IMX_LPCG_CLK_4>;
clock-names = "per", "ipg";
assigned-clocks = <&clk IMX_SC_R_ADC_1 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
@ -406,8 +406,8 @@ dma_subsys: bus@5a000000 {
reg = <0x5a8d0000 0x10000>;
interrupts = <GIC_SPI 235 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
clocks = <&can0_lpcg 1>,
<&can0_lpcg 0>;
clocks = <&can0_lpcg IMX_LPCG_CLK_4>,
<&can0_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "per";
assigned-clocks = <&clk IMX_SC_R_CAN_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <40000000>;
@ -427,8 +427,8 @@ dma_subsys: bus@5a000000 {
* CAN1 shares CAN0's clock and to enable CAN0's clock it
* has to be powered on.
*/
clocks = <&can0_lpcg 1>,
<&can0_lpcg 0>;
clocks = <&can0_lpcg IMX_LPCG_CLK_4>,
<&can0_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "per";
assigned-clocks = <&clk IMX_SC_R_CAN_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <40000000>;
@ -448,8 +448,8 @@ dma_subsys: bus@5a000000 {
* CAN2 shares CAN0's clock and to enable CAN0's clock it
* has to be powered on.
*/
clocks = <&can0_lpcg 1>,
<&can0_lpcg 0>;
clocks = <&can0_lpcg IMX_LPCG_CLK_4>,
<&can0_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "per";
assigned-clocks = <&clk IMX_SC_R_CAN_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <40000000>;

View File

@ -25,8 +25,8 @@ lsio_subsys: bus@5d000000 {
compatible = "fsl,imx27-pwm";
reg = <0x5d000000 0x10000>;
clock-names = "ipg", "per";
clocks = <&pwm0_lpcg 4>,
<&pwm0_lpcg 1>;
clocks = <&pwm0_lpcg IMX_LPCG_CLK_6>,
<&pwm0_lpcg IMX_LPCG_CLK_1>;
assigned-clocks = <&clk IMX_SC_R_PWM_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
#pwm-cells = <3>;
@ -38,8 +38,8 @@ lsio_subsys: bus@5d000000 {
compatible = "fsl,imx27-pwm";
reg = <0x5d010000 0x10000>;
clock-names = "ipg", "per";
clocks = <&pwm1_lpcg 4>,
<&pwm1_lpcg 1>;
clocks = <&pwm1_lpcg IMX_LPCG_CLK_6>,
<&pwm1_lpcg IMX_LPCG_CLK_1>;
assigned-clocks = <&clk IMX_SC_R_PWM_1 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
#pwm-cells = <3>;
@ -51,8 +51,8 @@ lsio_subsys: bus@5d000000 {
compatible = "fsl,imx27-pwm";
reg = <0x5d020000 0x10000>;
clock-names = "ipg", "per";
clocks = <&pwm2_lpcg 4>,
<&pwm2_lpcg 1>;
clocks = <&pwm2_lpcg IMX_LPCG_CLK_6>,
<&pwm2_lpcg IMX_LPCG_CLK_1>;
assigned-clocks = <&clk IMX_SC_R_PWM_2 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
#pwm-cells = <3>;
@ -64,8 +64,8 @@ lsio_subsys: bus@5d000000 {
compatible = "fsl,imx27-pwm";
reg = <0x5d030000 0x10000>;
clock-names = "ipg", "per";
clocks = <&pwm3_lpcg 4>,
<&pwm3_lpcg 1>;
clocks = <&pwm3_lpcg IMX_LPCG_CLK_6>,
<&pwm3_lpcg IMX_LPCG_CLK_1>;
assigned-clocks = <&clk IMX_SC_R_PWM_3 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
#pwm-cells = <3>;

View File

@ -14,6 +14,7 @@
pinctrl-0 = <&pinctrl_usbcon1>;
type = "micro";
label = "otg";
vbus-supply = <&reg_usb1_vbus>;
id-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
port {
@ -183,7 +184,6 @@
};
&usb3_phy0 {
vbus-supply = <&reg_usb1_vbus>;
status = "okay";
};

View File

@ -14,6 +14,7 @@
pinctrl-0 = <&pinctrl_usbcon1>;
type = "micro";
label = "otg";
vbus-supply = <&reg_usb1_vbus>;
id-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
port {
@ -202,7 +203,6 @@
};
&usb3_phy0 {
vbus-supply = <&reg_usb1_vbus>;
status = "okay";
};

View File

@ -127,15 +127,15 @@
};
&flexcan2 {
clocks = <&can1_lpcg 1>,
<&can1_lpcg 0>;
clocks = <&can1_lpcg IMX_LPCG_CLK_4>,
<&can1_lpcg IMX_LPCG_CLK_0>;
assigned-clocks = <&clk IMX_SC_R_CAN_1 IMX_SC_PM_CLK_PER>;
fsl,clk-source = /bits/ 8 <1>;
};
&flexcan3 {
clocks = <&can2_lpcg 1>,
<&can2_lpcg 0>;
clocks = <&can2_lpcg IMX_LPCG_CLK_4>,
<&can2_lpcg IMX_LPCG_CLK_0>;
assigned-clocks = <&clk IMX_SC_R_CAN_2 IMX_SC_PM_CLK_PER>;
fsl,clk-source = /bits/ 8 <1>;
};

View File

@ -161,12 +161,18 @@ static inline unsigned long get_trans_granule(void)
#define MAX_TLBI_RANGE_PAGES __TLBI_RANGE_PAGES(31, 3)
/*
* Generate 'num' values from -1 to 30 with -1 rejected by the
* __flush_tlb_range() loop below.
* Generate 'num' values from -1 to 31 with -1 rejected by the
* __flush_tlb_range() loop below. Its return value is only
* significant for a maximum of MAX_TLBI_RANGE_PAGES pages. If
* 'pages' is more than that, you must iterate over the overall
* range.
*/
#define TLBI_RANGE_MASK GENMASK_ULL(4, 0)
#define __TLBI_RANGE_NUM(pages, scale) \
((((pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1)
#define __TLBI_RANGE_NUM(pages, scale) \
({ \
int __pages = min((pages), \
__TLBI_RANGE_PAGES(31, (scale))); \
(__pages >> (5 * (scale) + 1)) - 1; \
})
/*
* TLB Invalidation
@ -379,10 +385,6 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
* 3. If there is 1 page remaining, flush it through non-range operations. Range
* operations can only span an even number of pages. We save this for last to
* ensure 64KB start alignment is maintained for the LPA2 case.
*
* Note that certain ranges can be represented by either num = 31 and
* scale or num = 0 and scale + 1. The loop below favours the latter
* since num is limited to 30 by the __TLBI_RANGE_NUM() macro.
*/
#define __flush_tlb_range_op(op, start, pages, stride, \
asid, tlb_level, tlbi_user, lpa2) \

View File

@ -2612,31 +2612,16 @@ config MITIGATION_RFDS
stored in floating point, vector and integer registers.
See also <file:Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst>
choice
prompt "Clear branch history"
config MITIGATION_SPECTRE_BHI
bool "Mitigate Spectre-BHB (Branch History Injection)"
depends on CPU_SUP_INTEL
default SPECTRE_BHI_ON
default y
help
Enable BHI mitigations. BHI attacks are a form of Spectre V2 attacks
where the branch history buffer is poisoned to speculatively steer
indirect branches.
See <file:Documentation/admin-guide/hw-vuln/spectre.rst>
config SPECTRE_BHI_ON
bool "on"
help
Equivalent to setting spectre_bhi=on command line parameter.
config SPECTRE_BHI_OFF
bool "off"
help
Equivalent to setting spectre_bhi=off command line parameter.
config SPECTRE_BHI_AUTO
bool "auto"
help
Equivalent to setting spectre_bhi=auto command line parameter.
endchoice
endif
config ARCH_HAS_ADD_PAGES

View File

@ -1644,6 +1644,7 @@ static void x86_pmu_del(struct perf_event *event, int flags)
while (++i < cpuc->n_events) {
cpuc->event_list[i-1] = cpuc->event_list[i];
cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
cpuc->assign[i-1] = cpuc->assign[i];
}
cpuc->event_constraint[i-1] = NULL;
--cpuc->n_events;

View File

@ -13,6 +13,7 @@
#include <asm/mpspec.h>
#include <asm/msr.h>
#include <asm/hardirq.h>
#include <asm/io.h>
#define ARCH_APICTIMER_STOPS_ON_C3 1
@ -96,7 +97,7 @@ static inline void native_apic_mem_write(u32 reg, u32 v)
static inline u32 native_apic_mem_read(u32 reg)
{
return *((volatile u32 *)(APIC_BASE + reg));
return readl((void __iomem *)(APIC_BASE + reg));
}
static inline void native_apic_mem_eoi(void)

View File

@ -1724,11 +1724,11 @@ static int x2apic_state;
static bool x2apic_hw_locked(void)
{
u64 ia32_cap;
u64 x86_arch_cap_msr;
u64 msr;
ia32_cap = x86_read_arch_cap_msr();
if (ia32_cap & ARCH_CAP_XAPIC_DISABLE) {
x86_arch_cap_msr = x86_read_arch_cap_msr();
if (x86_arch_cap_msr & ARCH_CAP_XAPIC_DISABLE) {
rdmsrl(MSR_IA32_XAPIC_DISABLE_STATUS, msr);
return (msr & LEGACY_XAPIC_DISABLED);
}

View File

@ -61,6 +61,8 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
EXPORT_SYMBOL_GPL(x86_pred_cmd);
static u64 __ro_after_init x86_arch_cap_msr;
static DEFINE_MUTEX(spec_ctrl_mutex);
void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
@ -144,6 +146,8 @@ void __init cpu_select_mitigations(void)
x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
}
x86_arch_cap_msr = x86_read_arch_cap_msr();
/* Select the proper CPU mitigations before patching alternatives: */
spectre_v1_select_mitigation();
spectre_v2_select_mitigation();
@ -301,8 +305,6 @@ static const char * const taa_strings[] = {
static void __init taa_select_mitigation(void)
{
u64 ia32_cap;
if (!boot_cpu_has_bug(X86_BUG_TAA)) {
taa_mitigation = TAA_MITIGATION_OFF;
return;
@ -341,9 +343,8 @@ static void __init taa_select_mitigation(void)
* On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
* update is required.
*/
ia32_cap = x86_read_arch_cap_msr();
if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
if ( (x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
!(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
/*
@ -401,8 +402,6 @@ static const char * const mmio_strings[] = {
static void __init mmio_select_mitigation(void)
{
u64 ia32_cap;
if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
cpu_mitigations_off()) {
@ -413,8 +412,6 @@ static void __init mmio_select_mitigation(void)
if (mmio_mitigation == MMIO_MITIGATION_OFF)
return;
ia32_cap = x86_read_arch_cap_msr();
/*
* Enable CPU buffer clear mitigation for host and VMM, if also affected
* by MDS or TAA. Otherwise, enable mitigation for VMM only.
@ -437,7 +434,7 @@ static void __init mmio_select_mitigation(void)
* be propagated to uncore buffers, clearing the Fill buffers on idle
* is required irrespective of SMT state.
*/
if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
static_branch_enable(&mds_idle_clear);
/*
@ -447,10 +444,10 @@ static void __init mmio_select_mitigation(void)
* FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
* affected systems.
*/
if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
if ((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
(boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
!(ia32_cap & ARCH_CAP_MDS_NO)))
!(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))
mmio_mitigation = MMIO_MITIGATION_VERW;
else
mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
@ -508,7 +505,7 @@ static void __init rfds_select_mitigation(void)
if (rfds_mitigation == RFDS_MITIGATION_OFF)
return;
if (x86_read_arch_cap_msr() & ARCH_CAP_RFDS_CLEAR)
if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
else
rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
@ -659,8 +656,6 @@ void update_srbds_msr(void)
static void __init srbds_select_mitigation(void)
{
u64 ia32_cap;
if (!boot_cpu_has_bug(X86_BUG_SRBDS))
return;
@ -669,8 +664,7 @@ static void __init srbds_select_mitigation(void)
* are only exposed to SRBDS when TSX is enabled or when CPU is affected
* by Processor MMIO Stale Data vulnerability.
*/
ia32_cap = x86_read_arch_cap_msr();
if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
@ -813,7 +807,7 @@ static void __init gds_select_mitigation(void)
/* Will verify below that mitigation _can_ be disabled */
/* No microcode */
if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) {
if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
if (gds_mitigation == GDS_MITIGATION_FORCE) {
/*
* This only needs to be done on the boot CPU so do it
@ -1543,20 +1537,25 @@ static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
return SPECTRE_V2_RETPOLINE;
}
static bool __ro_after_init rrsba_disabled;
/* Disable in-kernel use of non-RSB RET predictors */
static void __init spec_ctrl_disable_kernel_rrsba(void)
{
u64 ia32_cap;
if (rrsba_disabled)
return;
if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) {
rrsba_disabled = true;
return;
}
if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
return;
ia32_cap = x86_read_arch_cap_msr();
if (ia32_cap & ARCH_CAP_RRSBA) {
x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
update_spec_ctrl(x86_spec_ctrl_base);
}
x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
update_spec_ctrl(x86_spec_ctrl_base);
rrsba_disabled = true;
}
static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
@ -1625,13 +1624,10 @@ static bool __init spec_ctrl_bhi_dis(void)
enum bhi_mitigations {
BHI_MITIGATION_OFF,
BHI_MITIGATION_ON,
BHI_MITIGATION_AUTO,
};
static enum bhi_mitigations bhi_mitigation __ro_after_init =
IS_ENABLED(CONFIG_SPECTRE_BHI_ON) ? BHI_MITIGATION_ON :
IS_ENABLED(CONFIG_SPECTRE_BHI_OFF) ? BHI_MITIGATION_OFF :
BHI_MITIGATION_AUTO;
IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_ON : BHI_MITIGATION_OFF;
static int __init spectre_bhi_parse_cmdline(char *str)
{
@ -1642,8 +1638,6 @@ static int __init spectre_bhi_parse_cmdline(char *str)
bhi_mitigation = BHI_MITIGATION_OFF;
else if (!strcmp(str, "on"))
bhi_mitigation = BHI_MITIGATION_ON;
else if (!strcmp(str, "auto"))
bhi_mitigation = BHI_MITIGATION_AUTO;
else
pr_err("Ignoring unknown spectre_bhi option (%s)", str);
@ -1657,9 +1651,11 @@ static void __init bhi_select_mitigation(void)
return;
/* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) &&
!(x86_read_arch_cap_msr() & ARCH_CAP_RRSBA))
return;
if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
spec_ctrl_disable_kernel_rrsba();
if (rrsba_disabled)
return;
}
if (spec_ctrl_bhi_dis())
return;
@ -1671,9 +1667,6 @@ static void __init bhi_select_mitigation(void)
setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT);
pr_info("Spectre BHI mitigation: SW BHB clearing on vm exit\n");
if (bhi_mitigation == BHI_MITIGATION_AUTO)
return;
/* Mitigate syscalls when the mitigation is forced =on */
setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP);
pr_info("Spectre BHI mitigation: SW BHB clearing on syscall\n");
@ -1907,8 +1900,6 @@ static void update_indir_branch_cond(void)
/* Update the static key controlling the MDS CPU buffer clear in idle */
static void update_mds_branch_idle(void)
{
u64 ia32_cap = x86_read_arch_cap_msr();
/*
* Enable the idle clearing if SMT is active on CPUs which are
* affected only by MSBDS and not any other MDS variant.
@ -1923,7 +1914,7 @@ static void update_mds_branch_idle(void)
if (sched_smt_active()) {
static_branch_enable(&mds_idle_clear);
} else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
(ia32_cap & ARCH_CAP_FBSDP_NO)) {
(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
static_branch_disable(&mds_idle_clear);
}
}
@ -2808,7 +2799,7 @@ static char *pbrsb_eibrs_state(void)
}
}
static const char * const spectre_bhi_state(void)
static const char *spectre_bhi_state(void)
{
if (!boot_cpu_has_bug(X86_BUG_BHI))
return "; BHI: Not affected";
@ -2816,13 +2807,12 @@ static const char * const spectre_bhi_state(void)
return "; BHI: BHI_DIS_S";
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
return "; BHI: SW loop, KVM: SW loop";
else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
!(x86_read_arch_cap_msr() & ARCH_CAP_RRSBA))
else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && rrsba_disabled)
return "; BHI: Retpoline";
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
return "; BHI: Syscall hardening, KVM: SW loop";
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
return "; BHI: Vulnerable, KVM: SW loop";
return "; BHI: Vulnerable (Syscall hardening enabled)";
return "; BHI: Vulnerable";
}
static ssize_t spectre_v2_show_state(char *buf)

View File

@ -1327,25 +1327,25 @@ static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long whi
u64 x86_read_arch_cap_msr(void)
{
u64 ia32_cap = 0;
u64 x86_arch_cap_msr = 0;
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr);
return ia32_cap;
return x86_arch_cap_msr;
}
static bool arch_cap_mmio_immune(u64 ia32_cap)
static bool arch_cap_mmio_immune(u64 x86_arch_cap_msr)
{
return (ia32_cap & ARCH_CAP_FBSDP_NO &&
ia32_cap & ARCH_CAP_PSDP_NO &&
ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
return (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO &&
x86_arch_cap_msr & ARCH_CAP_PSDP_NO &&
x86_arch_cap_msr & ARCH_CAP_SBDR_SSDP_NO);
}
static bool __init vulnerable_to_rfds(u64 ia32_cap)
static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
{
/* The "immunity" bit trumps everything else: */
if (ia32_cap & ARCH_CAP_RFDS_NO)
if (x86_arch_cap_msr & ARCH_CAP_RFDS_NO)
return false;
/*
@ -1353,7 +1353,7 @@ static bool __init vulnerable_to_rfds(u64 ia32_cap)
* indicate that mitigation is needed because guest is running on a
* vulnerable hardware or may migrate to such hardware:
*/
if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
return true;
/* Only consult the blacklist when there is no enumeration: */
@ -1362,11 +1362,11 @@ static bool __init vulnerable_to_rfds(u64 ia32_cap)
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
{
u64 ia32_cap = x86_read_arch_cap_msr();
u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
/* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
!(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
!(x86_arch_cap_msr & ARCH_CAP_PSCHANGE_MC_NO))
setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
@ -1378,7 +1378,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
!(ia32_cap & ARCH_CAP_SSB_NO) &&
!(x86_arch_cap_msr & ARCH_CAP_SSB_NO) &&
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
@ -1386,15 +1386,15 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
* AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature
* flag and protect from vendor-specific bugs via the whitelist.
*/
if ((ia32_cap & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
if ((x86_arch_cap_msr & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
!(ia32_cap & ARCH_CAP_PBRSB_NO))
!(x86_arch_cap_msr & ARCH_CAP_PBRSB_NO))
setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
}
if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
!(ia32_cap & ARCH_CAP_MDS_NO)) {
!(x86_arch_cap_msr & ARCH_CAP_MDS_NO)) {
setup_force_cpu_bug(X86_BUG_MDS);
if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
@ -1413,9 +1413,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
* TSX_CTRL check alone is not sufficient for cases when the microcode
* update is not present or running as guest that don't get TSX_CTRL.
*/
if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
if (!(x86_arch_cap_msr & ARCH_CAP_TAA_NO) &&
(cpu_has(c, X86_FEATURE_RTM) ||
(ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)))
setup_force_cpu_bug(X86_BUG_TAA);
/*
@ -1441,7 +1441,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
* Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
* nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
*/
if (!arch_cap_mmio_immune(ia32_cap)) {
if (!arch_cap_mmio_immune(x86_arch_cap_msr)) {
if (cpu_matches(cpu_vuln_blacklist, MMIO))
setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
@ -1449,7 +1449,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
}
if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (x86_arch_cap_msr & ARCH_CAP_RSBA))
setup_force_cpu_bug(X86_BUG_RETBLEED);
}
@ -1467,15 +1467,15 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
* disabling AVX2. The only way to do this in HW is to clear XCR0[2],
* which means that AVX will be disabled.
*/
if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) &&
if (cpu_matches(cpu_vuln_blacklist, GDS) && !(x86_arch_cap_msr & ARCH_CAP_GDS_NO) &&
boot_cpu_has(X86_FEATURE_AVX))
setup_force_cpu_bug(X86_BUG_GDS);
if (vulnerable_to_rfds(ia32_cap))
if (vulnerable_to_rfds(x86_arch_cap_msr))
setup_force_cpu_bug(X86_BUG_RFDS);
/* When virtualized, eIBRS could be hidden, assume vulnerable */
if (!(ia32_cap & ARCH_CAP_BHI_NO) &&
if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) &&
!cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
(boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
boot_cpu_has(X86_FEATURE_HYPERVISOR)))
@ -1485,7 +1485,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
return;
/* Rogue Data Cache Load? No! */
if (ia32_cap & ARCH_CAP_RDCL_NO)
if (x86_arch_cap_msr & ARCH_CAP_RDCL_NO)
return;
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);

View File

@ -1409,6 +1409,12 @@ static int blkcg_css_online(struct cgroup_subsys_state *css)
return 0;
}
void blkg_init_queue(struct request_queue *q)
{
INIT_LIST_HEAD(&q->blkg_list);
mutex_init(&q->blkcg_mutex);
}
int blkcg_init_disk(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
@ -1416,9 +1422,6 @@ int blkcg_init_disk(struct gendisk *disk)
bool preloaded;
int ret;
INIT_LIST_HEAD(&q->blkg_list);
mutex_init(&q->blkcg_mutex);
new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
if (!new_blkg)
return -ENOMEM;

View File

@ -188,6 +188,7 @@ struct blkcg_policy {
extern struct blkcg blkcg_root;
extern bool blkcg_debug_stats;
void blkg_init_queue(struct request_queue *q);
int blkcg_init_disk(struct gendisk *disk);
void blkcg_exit_disk(struct gendisk *disk);
@ -481,6 +482,7 @@ struct blkcg {
};
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
static inline void blkg_init_queue(struct request_queue *q) { }
static inline int blkcg_init_disk(struct gendisk *disk) { return 0; }
static inline void blkcg_exit_disk(struct gendisk *disk) { }
static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }

View File

@ -431,6 +431,8 @@ struct request_queue *blk_alloc_queue(int node_id)
init_waitqueue_head(&q->mq_freeze_wq);
mutex_init(&q->mq_freeze_lock);
blkg_init_queue(q);
/*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
* See blk_register_queue() for details.

View File

@ -131,22 +131,6 @@ static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param
return 0;
}
static int ivpu_get_core_clock_rate(struct ivpu_device *vdev, u64 *clk_rate)
{
int ret;
ret = ivpu_rpm_get_if_active(vdev);
if (ret < 0)
return ret;
*clk_rate = ret ? ivpu_hw_reg_pll_freq_get(vdev) : 0;
if (ret)
ivpu_rpm_put(vdev);
return 0;
}
static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
@ -170,7 +154,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = vdev->platform;
break;
case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
ret = ivpu_get_core_clock_rate(vdev, &args->value);
args->value = ivpu_hw_ratio_to_freq(vdev, vdev->hw->pll.max_ratio);
break;
case DRM_IVPU_PARAM_NUM_CONTEXTS:
args->value = ivpu_get_context_count(vdev);
@ -530,7 +514,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
atomic64_set(&vdev->unique_id_counter, 0);
xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
INIT_LIST_HEAD(&vdev->bo_list);

View File

@ -21,6 +21,7 @@ struct ivpu_hw_ops {
u32 (*profiling_freq_get)(struct ivpu_device *vdev);
void (*profiling_freq_drive)(struct ivpu_device *vdev, bool enable);
u32 (*reg_pll_freq_get)(struct ivpu_device *vdev);
u32 (*ratio_to_freq)(struct ivpu_device *vdev, u32 ratio);
u32 (*reg_telemetry_offset_get)(struct ivpu_device *vdev);
u32 (*reg_telemetry_size_get)(struct ivpu_device *vdev);
u32 (*reg_telemetry_enable_get)(struct ivpu_device *vdev);
@ -130,6 +131,11 @@ static inline u32 ivpu_hw_reg_pll_freq_get(struct ivpu_device *vdev)
return vdev->hw->ops->reg_pll_freq_get(vdev);
};
static inline u32 ivpu_hw_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
{
return vdev->hw->ops->ratio_to_freq(vdev, ratio);
}
static inline u32 ivpu_hw_reg_telemetry_offset_get(struct ivpu_device *vdev)
{
return vdev->hw->ops->reg_telemetry_offset_get(vdev);

View File

@ -805,12 +805,12 @@ static void ivpu_hw_37xx_profiling_freq_drive(struct ivpu_device *vdev, bool ena
/* Profiling freq - is a debug feature. Unavailable on VPU 37XX. */
}
static u32 ivpu_hw_37xx_pll_to_freq(u32 ratio, u32 config)
static u32 ivpu_hw_37xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
{
u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
u32 cpu_clock;
if ((config & 0xff) == PLL_RATIO_4_3)
if ((vdev->hw->config & 0xff) == PLL_RATIO_4_3)
cpu_clock = pll_clock * 2 / 4;
else
cpu_clock = pll_clock * 2 / 5;
@ -829,7 +829,7 @@ static u32 ivpu_hw_37xx_reg_pll_freq_get(struct ivpu_device *vdev)
if (!ivpu_is_silicon(vdev))
return PLL_SIMULATION_FREQ;
return ivpu_hw_37xx_pll_to_freq(pll_curr_ratio, vdev->hw->config);
return ivpu_hw_37xx_ratio_to_freq(vdev, pll_curr_ratio);
}
static u32 ivpu_hw_37xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
@ -1052,6 +1052,7 @@ const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
.profiling_freq_get = ivpu_hw_37xx_profiling_freq_get,
.profiling_freq_drive = ivpu_hw_37xx_profiling_freq_drive,
.reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get,
.ratio_to_freq = ivpu_hw_37xx_ratio_to_freq,
.reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get,
.reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get,
.reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get,

View File

@ -980,6 +980,11 @@ static u32 ivpu_hw_40xx_reg_pll_freq_get(struct ivpu_device *vdev)
return PLL_RATIO_TO_FREQ(pll_curr_ratio);
}
static u32 ivpu_hw_40xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
{
return PLL_RATIO_TO_FREQ(ratio);
}
static u32 ivpu_hw_40xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
{
return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET);
@ -1230,6 +1235,7 @@ const struct ivpu_hw_ops ivpu_hw_40xx_ops = {
.profiling_freq_get = ivpu_hw_40xx_profiling_freq_get,
.profiling_freq_drive = ivpu_hw_40xx_profiling_freq_drive,
.reg_pll_freq_get = ivpu_hw_40xx_reg_pll_freq_get,
.ratio_to_freq = ivpu_hw_40xx_ratio_to_freq,
.reg_telemetry_offset_get = ivpu_hw_40xx_reg_telemetry_offset_get,
.reg_telemetry_size_get = ivpu_hw_40xx_reg_telemetry_size_get,
.reg_telemetry_enable_get = ivpu_hw_40xx_reg_telemetry_enable_get,

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2023 Intel Corporation
* Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/genalloc.h>
@ -501,7 +501,11 @@ int ivpu_ipc_init(struct ivpu_device *vdev)
spin_lock_init(&ipc->cons_lock);
INIT_LIST_HEAD(&ipc->cons_list);
INIT_LIST_HEAD(&ipc->cb_msg_list);
drmm_mutex_init(&vdev->drm, &ipc->lock);
ret = drmm_mutex_init(&vdev->drm, &ipc->lock);
if (ret) {
ivpu_err(vdev, "Failed to initialize ipc->lock, ret %d\n", ret);
goto err_free_rx;
}
ivpu_ipc_reset(vdev);
return 0;

View File

@ -74,10 +74,10 @@ static int ivpu_resume(struct ivpu_device *vdev)
{
int ret;
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
pci_restore_state(to_pci_dev(vdev->drm.dev));
retry:
pci_restore_state(to_pci_dev(vdev->drm.dev));
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
ret = ivpu_hw_power_up(vdev);
if (ret) {
ivpu_err(vdev, "Failed to power up HW: %d\n", ret);
@ -100,6 +100,7 @@ err_mmu_disable:
ivpu_mmu_disable(vdev);
err_power_down:
ivpu_hw_power_down(vdev);
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
if (!ivpu_fw_is_cold_boot(vdev)) {
ivpu_pm_prepare_cold_boot(vdev);

View File

@ -59,9 +59,8 @@ struct target_cache {
};
enum {
NODE_ACCESS_CLASS_0 = 0,
NODE_ACCESS_CLASS_1,
NODE_ACCESS_CLASS_GENPORT_SINK,
NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL = ACCESS_COORDINATE_MAX,
NODE_ACCESS_CLASS_GENPORT_SINK_CPU,
NODE_ACCESS_CLASS_MAX,
};
@ -127,7 +126,8 @@ static struct memory_target *acpi_find_genport_target(u32 uid)
/**
* acpi_get_genport_coordinates - Retrieve the access coordinates for a generic port
* @uid: ACPI unique id
* @coord: The access coordinates written back out for the generic port
* @coord: The access coordinates written back out for the generic port.
* Expect 2 levels array.
*
* Return: 0 on success. Errno on failure.
*
@ -143,7 +143,10 @@ int acpi_get_genport_coordinates(u32 uid,
if (!target)
return -ENOENT;
*coord = target->coord[NODE_ACCESS_CLASS_GENPORT_SINK];
coord[ACCESS_COORDINATE_LOCAL] =
target->coord[NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL];
coord[ACCESS_COORDINATE_CPU] =
target->coord[NODE_ACCESS_CLASS_GENPORT_SINK_CPU];
return 0;
}
@ -374,11 +377,11 @@ static __init void hmat_update_target(unsigned int tgt_pxm, unsigned int init_px
if (target && target->processor_pxm == init_pxm) {
hmat_update_target_access(target, type, value,
NODE_ACCESS_CLASS_0);
ACCESS_COORDINATE_LOCAL);
/* If the node has a CPU, update access 1 */
if (node_state(pxm_to_node(init_pxm), N_CPU))
hmat_update_target_access(target, type, value,
NODE_ACCESS_CLASS_1);
ACCESS_COORDINATE_CPU);
}
}
@ -697,7 +700,8 @@ static void hmat_update_target_attrs(struct memory_target *target,
int i;
/* Don't update for generic port if there's no device handle */
if (access == NODE_ACCESS_CLASS_GENPORT_SINK &&
if ((access == NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL ||
access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) &&
!(*(u16 *)target->gen_port_device_handle))
return;
@ -709,7 +713,8 @@ static void hmat_update_target_attrs(struct memory_target *target,
*/
if (target->processor_pxm != PXM_INVAL) {
cpu_nid = pxm_to_node(target->processor_pxm);
if (access == 0 || node_state(cpu_nid, N_CPU)) {
if (access == ACCESS_COORDINATE_LOCAL ||
node_state(cpu_nid, N_CPU)) {
set_bit(target->processor_pxm, p_nodes);
return;
}
@ -737,7 +742,9 @@ static void hmat_update_target_attrs(struct memory_target *target,
list_for_each_entry(initiator, &initiators, node) {
u32 value;
if (access == 1 && !initiator->has_cpu) {
if ((access == ACCESS_COORDINATE_CPU ||
access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) &&
!initiator->has_cpu) {
clear_bit(initiator->processor_pxm, p_nodes);
continue;
}
@ -775,15 +782,19 @@ static void hmat_update_generic_target(struct memory_target *target)
static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
hmat_update_target_attrs(target, p_nodes,
NODE_ACCESS_CLASS_GENPORT_SINK);
NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL);
hmat_update_target_attrs(target, p_nodes,
NODE_ACCESS_CLASS_GENPORT_SINK_CPU);
}
static void hmat_register_target_initiators(struct memory_target *target)
{
static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
__hmat_register_target_initiators(target, p_nodes, 0);
__hmat_register_target_initiators(target, p_nodes, 1);
__hmat_register_target_initiators(target, p_nodes,
ACCESS_COORDINATE_LOCAL);
__hmat_register_target_initiators(target, p_nodes,
ACCESS_COORDINATE_CPU);
}
static void hmat_register_target_cache(struct memory_target *target)
@ -854,8 +865,8 @@ static void hmat_register_target(struct memory_target *target)
if (!target->registered) {
hmat_register_target_initiators(target);
hmat_register_target_cache(target);
hmat_register_target_perf(target, NODE_ACCESS_CLASS_0);
hmat_register_target_perf(target, NODE_ACCESS_CLASS_1);
hmat_register_target_perf(target, ACCESS_COORDINATE_LOCAL);
hmat_register_target_perf(target, ACCESS_COORDINATE_CPU);
target->registered = true;
}
mutex_unlock(&target_lock);
@ -927,7 +938,7 @@ static int hmat_calculate_adistance(struct notifier_block *self,
return NOTIFY_OK;
mutex_lock(&target_lock);
hmat_update_target_attrs(target, p_nodes, 1);
hmat_update_target_attrs(target, p_nodes, ACCESS_COORDINATE_CPU);
mutex_unlock(&target_lock);
perf = &target->coord[1];

View File

@ -1802,7 +1802,8 @@ static void acpi_scan_dep_init(struct acpi_device *adev)
if (dep->honor_dep)
adev->flags.honor_deps = 1;
adev->dep_unmet++;
if (!dep->met)
adev->dep_unmet++;
}
}
}

View File

@ -2539,7 +2539,7 @@ static void ata_dev_config_cdl(struct ata_device *dev)
bool cdl_enabled;
u64 val;
if (ata_id_major_version(dev->id) < 12)
if (ata_id_major_version(dev->id) < 11)
goto not_supported;
if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE) ||

View File

@ -4745,7 +4745,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
* bail out.
*/
if (ap->pflags & ATA_PFLAG_SUSPENDED)
goto unlock;
goto unlock_ap;
if (!sdev)
continue;
@ -4758,7 +4758,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
if (do_resume) {
ret = scsi_resume_device(sdev);
if (ret == -EWOULDBLOCK)
goto unlock;
goto unlock_scan;
dev->flags &= ~ATA_DFLAG_RESUMING;
}
ret = scsi_rescan_device(sdev);
@ -4766,12 +4766,13 @@ void ata_scsi_dev_rescan(struct work_struct *work)
spin_lock_irqsave(ap->lock, flags);
if (ret)
goto unlock;
goto unlock_ap;
}
}
unlock:
unlock_ap:
spin_unlock_irqrestore(ap->lock, flags);
unlock_scan:
mutex_unlock(&ap->scsi_scan_mutex);
/* Reschedule with a delay if scsi_rescan_device() returned an error */

View File

@ -126,7 +126,7 @@ static void node_access_release(struct device *dev)
}
static struct node_access_nodes *node_init_node_access(struct node *node,
unsigned int access)
enum access_coordinate_class access)
{
struct node_access_nodes *access_node;
struct device *dev;
@ -191,7 +191,7 @@ static struct attribute *access_attrs[] = {
* @access: The access class the for the given attributes
*/
void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
unsigned int access)
enum access_coordinate_class access)
{
struct node_access_nodes *c;
struct node *node;
@ -689,7 +689,7 @@ int register_cpu_under_node(unsigned int cpu, unsigned int nid)
*/
int register_memory_node_under_compute_node(unsigned int mem_nid,
unsigned int cpu_nid,
unsigned int access)
enum access_coordinate_class access)
{
struct node *init_node, *targ_node;
struct node_access_nodes *initiator, *target;

View File

@ -530,13 +530,15 @@ static int get_genport_coordinates(struct device *dev, struct cxl_dport *dport)
if (kstrtou32(acpi_device_uid(hb), 0, &uid))
return -EINVAL;
rc = acpi_get_genport_coordinates(uid, &dport->hb_coord);
rc = acpi_get_genport_coordinates(uid, dport->hb_coord);
if (rc < 0)
return rc;
/* Adjust back to picoseconds from nanoseconds */
dport->hb_coord.read_latency *= 1000;
dport->hb_coord.write_latency *= 1000;
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
dport->hb_coord[i].read_latency *= 1000;
dport->hb_coord[i].write_latency *= 1000;
}
return 0;
}

View File

@ -162,15 +162,22 @@ static int cxl_cdat_endpoint_process(struct cxl_port *port,
static int cxl_port_perf_data_calculate(struct cxl_port *port,
struct xarray *dsmas_xa)
{
struct access_coordinate c;
struct access_coordinate ep_c;
struct access_coordinate coord[ACCESS_COORDINATE_MAX];
struct dsmas_entry *dent;
int valid_entries = 0;
unsigned long index;
int rc;
rc = cxl_endpoint_get_perf_coordinates(port, &c);
rc = cxl_endpoint_get_perf_coordinates(port, &ep_c);
if (rc) {
dev_dbg(&port->dev, "Failed to retrieve perf coordinates.\n");
dev_dbg(&port->dev, "Failed to retrieve ep perf coordinates.\n");
return rc;
}
rc = cxl_hb_get_perf_coordinates(port, coord);
if (rc) {
dev_dbg(&port->dev, "Failed to retrieve hb perf coordinates.\n");
return rc;
}
@ -185,18 +192,19 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
xa_for_each(dsmas_xa, index, dent) {
int qos_class;
dent->coord.read_latency = dent->coord.read_latency +
c.read_latency;
dent->coord.write_latency = dent->coord.write_latency +
c.write_latency;
dent->coord.read_bandwidth = min_t(int, c.read_bandwidth,
dent->coord.read_bandwidth);
dent->coord.write_bandwidth = min_t(int, c.write_bandwidth,
dent->coord.write_bandwidth);
cxl_coordinates_combine(&dent->coord, &dent->coord, &ep_c);
/*
* Keeping the host bridge coordinates separate from the dsmas
* coordinates in order to allow calculation of access class
* 0 and 1 for region later.
*/
cxl_coordinates_combine(&coord[ACCESS_COORDINATE_LOCAL],
&coord[ACCESS_COORDINATE_LOCAL],
&dent->coord);
dent->entries = 1;
rc = cxl_root->ops->qos_class(cxl_root, &dent->coord, 1,
&qos_class);
rc = cxl_root->ops->qos_class(cxl_root,
&coord[ACCESS_COORDINATE_LOCAL],
1, &qos_class);
if (rc != 1)
continue;
@ -484,4 +492,26 @@ void cxl_switch_parse_cdat(struct cxl_port *port)
}
EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, CXL);
/**
* cxl_coordinates_combine - Combine the two input coordinates
*
* @out: Output coordinate of c1 and c2 combined
* @c1: input coordinates
* @c2: input coordinates
*/
void cxl_coordinates_combine(struct access_coordinate *out,
struct access_coordinate *c1,
struct access_coordinate *c2)
{
if (c1->write_bandwidth && c2->write_bandwidth)
out->write_bandwidth = min(c1->write_bandwidth,
c2->write_bandwidth);
out->write_latency = c1->write_latency + c2->write_latency;
if (c1->read_bandwidth && c2->read_bandwidth)
out->read_bandwidth = min(c1->read_bandwidth,
c2->read_bandwidth);
out->read_latency = c1->read_latency + c2->read_latency;
}
MODULE_IMPORT_NS(CXL);

View File

@ -915,7 +915,7 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds,
payload->handles[i++] = gen->hdr.handle;
dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log,
le16_to_cpu(payload->handles[i]));
le16_to_cpu(payload->handles[i - 1]));
if (i == max_handles) {
payload->nr_recs = i;
@ -958,13 +958,14 @@ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
.payload_in = &log_type,
.size_in = sizeof(log_type),
.payload_out = payload,
.size_out = mds->payload_size,
.min_out = struct_size(payload, records, 0),
};
do {
int rc, i;
mbox_cmd.size_out = mds->payload_size;
rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc) {
dev_err_ratelimited(dev,

View File

@ -2096,18 +2096,41 @@ bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
}
EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
static void combine_coordinates(struct access_coordinate *c1,
struct access_coordinate *c2)
/**
* cxl_hb_get_perf_coordinates - Retrieve performance numbers between initiator
* and host bridge
*
* @port: endpoint cxl_port
* @coord: output access coordinates
*
* Return: errno on failure, 0 on success.
*/
int cxl_hb_get_perf_coordinates(struct cxl_port *port,
struct access_coordinate *coord)
{
if (c2->write_bandwidth)
c1->write_bandwidth = min(c1->write_bandwidth,
c2->write_bandwidth);
c1->write_latency += c2->write_latency;
struct cxl_port *iter = port;
struct cxl_dport *dport;
if (c2->read_bandwidth)
c1->read_bandwidth = min(c1->read_bandwidth,
c2->read_bandwidth);
c1->read_latency += c2->read_latency;
if (!is_cxl_endpoint(port))
return -EINVAL;
dport = iter->parent_dport;
while (iter && !is_cxl_root(to_cxl_port(iter->dev.parent))) {
iter = to_cxl_port(iter->dev.parent);
dport = iter->parent_dport;
}
coord[ACCESS_COORDINATE_LOCAL] =
dport->hb_coord[ACCESS_COORDINATE_LOCAL];
coord[ACCESS_COORDINATE_CPU] =
dport->hb_coord[ACCESS_COORDINATE_CPU];
return 0;
}
static bool parent_port_is_cxl_root(struct cxl_port *port)
{
return is_cxl_root(to_cxl_port(port->dev.parent));
}
/**
@ -2129,30 +2152,31 @@ int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
struct cxl_dport *dport;
struct pci_dev *pdev;
unsigned int bw;
bool is_cxl_root;
if (!is_cxl_endpoint(port))
return -EINVAL;
dport = iter->parent_dport;
/*
* Exit the loop when the parent port of the current port is cxl root.
* The iterative loop starts at the endpoint and gathers the
* latency of the CXL link from the current iter to the next downstream
* port each iteration. If the parent is cxl root then there is
* nothing to gather.
* Exit the loop when the parent port of the current iter port is cxl
* root. The iterative loop starts at the endpoint and gathers the
* latency of the CXL link from the current device/port to the connected
* downstream port each iteration.
*/
while (iter && !is_cxl_root(to_cxl_port(iter->dev.parent))) {
combine_coordinates(&c, &dport->sw_coord);
do {
dport = iter->parent_dport;
iter = to_cxl_port(iter->dev.parent);
is_cxl_root = parent_port_is_cxl_root(iter);
/*
* There's no valid access_coordinate for a root port since RPs do not
* have CDAT and therefore needs to be skipped.
*/
if (!is_cxl_root)
cxl_coordinates_combine(&c, &c, &dport->sw_coord);
c.write_latency += dport->link_latency;
c.read_latency += dport->link_latency;
iter = to_cxl_port(iter->dev.parent);
dport = iter->parent_dport;
}
/* Augment with the generic port (host bridge) perf data */
combine_coordinates(&c, &dport->hb_coord);
} while (!is_cxl_root);
/* Get the calculated PCI paths bandwidth */
pdev = to_pci_dev(port->uport_dev->parent);

View File

@ -271,6 +271,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs, CXL);
static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
struct cxl_register_map *map)
{
u8 reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo);
int bar = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BIR_MASK, reg_lo);
u64 offset = ((u64)reg_hi << 32) |
(reg_lo & CXL_DVSEC_REG_LOCATOR_BLOCK_OFF_LOW_MASK);
@ -278,11 +279,11 @@ static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
if (offset > pci_resource_len(pdev, bar)) {
dev_warn(&pdev->dev,
"BAR%d: %pr: too small (offset: %pa, type: %d)\n", bar,
&pdev->resource[bar], &offset, map->reg_type);
&pdev->resource[bar], &offset, reg_type);
return false;
}
map->reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo);
map->reg_type = reg_type;
map->resource = pci_resource_start(pdev, bar) + offset;
map->max_size = pci_resource_len(pdev, bar) - offset;
return true;

View File

@ -671,7 +671,7 @@ struct cxl_dport {
struct cxl_port *port;
struct cxl_regs regs;
struct access_coordinate sw_coord;
struct access_coordinate hb_coord;
struct access_coordinate hb_coord[ACCESS_COORDINATE_MAX];
long link_latency;
};
@ -879,9 +879,15 @@ void cxl_switch_parse_cdat(struct cxl_port *port);
int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
struct access_coordinate *coord);
int cxl_hb_get_perf_coordinates(struct cxl_port *port,
struct access_coordinate *coord);
void cxl_memdev_update_perf(struct cxl_memdev *cxlmd);
void cxl_coordinates_combine(struct access_coordinate *out,
struct access_coordinate *c1,
struct access_coordinate *c2);
/*
* Unit test builds overrides this to __weak, find the 'strong' version
* of these symbols in tools/testing/cxl/.

View File

@ -790,7 +790,7 @@ static void ffa_notification_info_get(void)
part_id = packed_id_list[ids_processed++];
if (!ids_count[list]) { /* Global Notification */
if (ids_count[list] == 1) { /* Global Notification */
__do_sched_recv_cb(part_id, 0, false);
continue;
}

View File

@ -921,7 +921,7 @@ static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
rd->raw = raw;
filp->private_data = rd;
return 0;
return nonseekable_open(inode, filp);
}
static int scmi_dbg_raw_mode_release(struct inode *inode, struct file *filp)
@ -950,6 +950,7 @@ static const struct file_operations scmi_dbg_raw_mode_reset_fops = {
.open = scmi_dbg_raw_mode_open,
.release = scmi_dbg_raw_mode_release,
.write = scmi_dbg_raw_mode_reset_write,
.llseek = no_llseek,
.owner = THIS_MODULE,
};
@ -959,6 +960,7 @@ static const struct file_operations scmi_dbg_raw_mode_message_fops = {
.read = scmi_dbg_raw_mode_message_read,
.write = scmi_dbg_raw_mode_message_write,
.poll = scmi_dbg_raw_mode_message_poll,
.llseek = no_llseek,
.owner = THIS_MODULE,
};
@ -975,6 +977,7 @@ static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
.read = scmi_dbg_raw_mode_message_read,
.write = scmi_dbg_raw_mode_message_async_write,
.poll = scmi_dbg_raw_mode_message_poll,
.llseek = no_llseek,
.owner = THIS_MODULE,
};
@ -998,6 +1001,7 @@ static const struct file_operations scmi_dbg_raw_mode_notification_fops = {
.release = scmi_dbg_raw_mode_release,
.read = scmi_test_dbg_raw_mode_notif_read,
.poll = scmi_test_dbg_raw_mode_notif_poll,
.llseek = no_llseek,
.owner = THIS_MODULE,
};
@ -1021,6 +1025,7 @@ static const struct file_operations scmi_dbg_raw_mode_errors_fops = {
.release = scmi_dbg_raw_mode_release,
.read = scmi_test_dbg_raw_mode_errors_read,
.poll = scmi_test_dbg_raw_mode_errors_poll,
.llseek = no_llseek,
.owner = THIS_MODULE,
};

View File

@ -390,6 +390,12 @@ static int vpe_hw_init(void *handle)
struct amdgpu_vpe *vpe = &adev->vpe;
int ret;
/* Power on VPE */
ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
AMD_PG_STATE_UNGATE);
if (ret)
return ret;
ret = vpe_load_microcode(vpe);
if (ret)
return ret;

View File

@ -1630,7 +1630,7 @@ static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
}
active_rb_bitmap |= global_active_rb_bitmap;
active_rb_bitmap &= global_active_rb_bitmap;
adev->gfx.config.backend_enable_mask = active_rb_bitmap;
adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
}

View File

@ -450,10 +450,8 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev)
{
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
return false;
default:
return true;
}
@ -714,7 +712,10 @@ static int soc21_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_GFX_PG;
adev->external_rev_id = adev->rev_id + 0x1;
if (adev->rev_id == 0)
adev->external_rev_id = 0x1;
else
adev->external_rev_id = adev->rev_id + 0x10;
break;
default:
/* FIXME: not supported yet */
@ -832,10 +833,35 @@ static int soc21_common_suspend(void *handle)
return soc21_common_hw_fini(adev);
}
static bool soc21_need_reset_on_resume(struct amdgpu_device *adev)
{
u32 sol_reg1, sol_reg2;
/* Will reset for the following suspend abort cases.
* 1) Only reset dGPU side.
* 2) S3 suspend got aborted and TOS is active.
*/
if (!(adev->flags & AMD_IS_APU) && adev->in_s3 &&
!adev->suspend_complete) {
sol_reg1 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
msleep(100);
sol_reg2 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
return (sol_reg1 != sol_reg2);
}
return false;
}
static int soc21_common_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (soc21_need_reset_on_resume(adev)) {
dev_info(adev->dev, "S3 suspend aborted, resetting...");
soc21_asic_reset(adev);
}
return soc21_common_hw_init(adev);
}

View File

@ -225,6 +225,8 @@ static int umsch_mm_v4_0_ring_start(struct amdgpu_umsch_mm *umsch)
WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_SIZE, ring->ring_size);
ring->wptr = 0;
data = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE);
data &= ~(VCN_RB_ENABLE__AUDIO_RB_EN_MASK);
WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, data);

View File

@ -1997,6 +1997,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
dev_err(dev, "HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n");
while (halt_if_hws_hang)
schedule();
kfd_hws_hang(dqm);
return -ETIME;
}

View File

@ -6257,19 +6257,16 @@ create_stream_for_sink(struct drm_connector *connector,
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) {
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
stream->signal == SIGNAL_TYPE_EDP) {
//
// should decide stream support vsc sdp colorimetry capability
// before building vsc info packet
//
stream->use_vsc_sdp_for_colorimetry = false;
if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
stream->use_vsc_sdp_for_colorimetry =
aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
} else {
if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
stream->use_vsc_sdp_for_colorimetry = true;
}
stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED;
if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
tf = TRANSFER_FUNC_GAMMA_22;
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);

View File

@ -76,10 +76,8 @@ static int amdgpu_dm_wb_encoder_atomic_check(struct drm_encoder *encoder,
static int amdgpu_dm_wb_connector_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
return drm_add_modes_noedid(connector, dev->mode_config.max_width,
dev->mode_config.max_height);
/* Maximum resolution supported by DWB */
return drm_add_modes_noedid(connector, 3840, 2160);
}
static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector,

View File

@ -99,20 +99,25 @@ static int dcn316_get_active_display_cnt_wa(
return display_count;
}
static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
bool safe_to_lower, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
struct pipe_ctx *pipe = safe_to_lower
? &context->res_ctx.pipe_ctx[i]
: &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
dc_is_virtual_signal(pipe->stream->signal))) {
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
!pipe->stream->link_enc)) {
if (disable) {
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
reset_sync_context_for_pipe(dc, context, i);
} else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
@ -207,11 +212,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
dcn316_disable_otg_wa(clk_mgr_base, context, true);
dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
dcn316_disable_otg_wa(clk_mgr_base, context, false);
dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;
}

View File

@ -436,6 +436,15 @@ bool dc_state_add_plane(
goto out;
}
if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
/* ODM combine could prevent us from supporting more planes
* we will reset ODM slice count back to 1 when all planes have
* been removed to maximize the amount of planes supported when
* new planes are added.
*/
resource_update_pipes_for_stream_with_slice_count(
state, dc->current_state, dc->res_pool, stream, 1);
otg_master_pipe = resource_get_otg_master_for_stream(
&state->res_ctx, stream);
if (otg_master_pipe)

View File

@ -267,9 +267,6 @@ static void optc32_setup_manual_trigger(struct timing_generator *optc)
OTG_V_TOTAL_MAX_SEL, 1,
OTG_FORCE_LOCK_ON_EVENT, 0,
OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
// Setup manual flow control for EOF via TRIG_A
optc->funcs->setup_manual_trigger(optc);
}
}

View File

@ -226,8 +226,18 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
if (!en && !adev->in_s0ix)
if (!en && !adev->in_s0ix) {
/* Adds a GFX reset as workaround just before sending the
* MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
* an invalid state.
*/
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
SMU_RESET_MODE_2, NULL);
if (ret)
return ret;
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
}
return ret;
}

View File

@ -180,6 +180,7 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
{
struct ast_device *ast = to_ast_device(dev);
u8 video_on_off = on;
u32 i = 0;
// Video On/Off
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_VIDEO_ENABLE, on);
@ -192,6 +193,8 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
ASTDP_MIRROR_VIDEO_ENABLE) != video_on_off) {
// wait 1 ms
mdelay(1);
if (++i > 200)
break;
}
}
}

View File

@ -777,6 +777,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
unsigned int total_modes_count = 0;
struct drm_client_offset *offsets;
unsigned int connector_count = 0;
/* points to modes protected by mode_config.mutex */
struct drm_display_mode **modes;
struct drm_crtc **crtcs;
int i, ret = 0;
@ -845,7 +846,6 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
drm_client_pick_crtcs(client, connectors, connector_count,
crtcs, modes, 0, width, height);
}
mutex_unlock(&dev->mode_config.mutex);
drm_client_modeset_release(client);
@ -875,6 +875,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
modeset->y = offset->y;
}
}
mutex_unlock(&dev->mode_config.mutex);
mutex_unlock(&client->modeset_mutex);
out:

View File

@ -2521,7 +2521,7 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
if (IS_DG2(i915))
intel_cdclk_pcode_pre_notify(state);
if (pipe == INVALID_PIPE ||
if (new_cdclk_state->disable_pipes ||
old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
@ -2553,7 +2553,7 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
if (IS_DG2(i915))
intel_cdclk_pcode_post_notify(state);
if (pipe != INVALID_PIPE &&
if (!new_cdclk_state->disable_pipes &&
old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
@ -3036,6 +3036,7 @@ static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_globa
return NULL;
cdclk_state->pipe = INVALID_PIPE;
cdclk_state->disable_pipes = false;
return &cdclk_state->base;
}
@ -3214,6 +3215,8 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
if (ret)
return ret;
new_cdclk_state->disable_pipes = true;
drm_dbg_kms(&dev_priv->drm,
"Modeset required for cdclk change\n");
}

View File

@ -51,6 +51,9 @@ struct intel_cdclk_state {
/* bitmask of active pipes */
u8 active_pipes;
/* update cdclk with pipes disabled */
bool disable_pipes;
};
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);

View File

@ -4229,7 +4229,12 @@ static bool m_n_equal(const struct intel_link_m_n *m_n_1,
static bool crtcs_port_sync_compatible(const struct intel_crtc_state *crtc_state1,
const struct intel_crtc_state *crtc_state2)
{
/*
* FIXME the modeset sequence is currently wrong and
* can't deal with bigjoiner + port sync at the same time.
*/
return crtc_state1->hw.active && crtc_state2->hw.active &&
!crtc_state1->bigjoiner_pipes && !crtc_state2->bigjoiner_pipes &&
crtc_state1->output_types == crtc_state2->output_types &&
crtc_state1->output_format == crtc_state2->output_format &&
crtc_state1->lane_count == crtc_state2->lane_count &&

View File

@ -2756,7 +2756,11 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
int pixel_clock;
if (has_seamless_m_n(connector))
/*
* FIXME all joined pipes share the same transcoder.
* Need to account for that when updating M/N live.
*/
if (has_seamless_m_n(connector) && !pipe_config->bigjoiner_pipes)
pipe_config->update_m_n = true;
if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {

View File

@ -1368,6 +1368,17 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
/*
* FIXME figure out what is wrong with PSR+bigjoiner and
* fix it. Presumably something related to the fact that
* PSR is a transcoder level feature.
*/
if (crtc_state->bigjoiner_pipes) {
drm_dbg_kms(&dev_priv->drm,
"PSR disabled due to bigjoiner\n");
return;
}
if (CAN_PANEL_REPLAY(intel_dp))
crtc_state->has_panel_replay = true;
else

View File

@ -117,6 +117,13 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
const struct drm_display_info *info = &connector->base.display_info;
int vmin, vmax;
/*
* FIXME all joined pipes share the same transcoder.
* Need to account for that during VRR toggle/push/etc.
*/
if (crtc_state->bigjoiner_pipes)
return;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
return;

View File

@ -1295,6 +1295,10 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
if (adreno_is_a618(gpu))
gpu->ubwc_config.highest_bank_bit = 14;
if (adreno_is_a619(gpu))
/* TODO: Should be 14 but causes corruption at e.g. 1920x1200 on DP */
gpu->ubwc_config.highest_bank_bit = 13;
if (adreno_is_a619_holi(gpu))
gpu->ubwc_config.highest_bank_bit = 13;

View File

@ -459,15 +459,15 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
&perf->core_clk_rate);
debugfs_create_u32("enable_bw_release", 0600, entry,
(u32 *)&perf->enable_bw_release);
debugfs_create_u32("threshold_low", 0600, entry,
debugfs_create_u32("threshold_low", 0400, entry,
(u32 *)&perf->perf_cfg->max_bw_low);
debugfs_create_u32("threshold_high", 0600, entry,
debugfs_create_u32("threshold_high", 0400, entry,
(u32 *)&perf->perf_cfg->max_bw_high);
debugfs_create_u32("min_core_ib", 0600, entry,
debugfs_create_u32("min_core_ib", 0400, entry,
(u32 *)&perf->perf_cfg->min_core_ib);
debugfs_create_u32("min_llcc_ib", 0600, entry,
debugfs_create_u32("min_llcc_ib", 0400, entry,
(u32 *)&perf->perf_cfg->min_llcc_ib);
debugfs_create_u32("min_dram_ib", 0600, entry,
debugfs_create_u32("min_dram_ib", 0400, entry,
(u32 *)&perf->perf_cfg->min_dram_ib);
debugfs_create_file("perf_mode", 0600, entry,
(u32 *)perf, &dpu_core_perf_mode_fops);

View File

@ -525,14 +525,14 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms,
int ret;
if (!irq_cb) {
DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n",
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
DPU_ERROR("IRQ=[%d, %d] NULL callback\n",
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
return -EINVAL;
}
if (!dpu_core_irq_is_valid(irq_idx)) {
DPU_ERROR("invalid IRQ=[%d, %d]\n",
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n",
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
return -EINVAL;
}

View File

@ -598,6 +598,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
ret = dp_display_usbpd_configure_cb(&pdev->dev);
if (ret) { /* link train failed */
dp->hpd_state = ST_DISCONNECTED;
pm_runtime_put_sync(&pdev->dev);
} else {
dp->hpd_state = ST_MAINLINK_READY;
}
@ -655,6 +656,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
dp_display_host_phy_exit(dp);
dp->hpd_state = ST_DISCONNECTED;
dp_display_notify_disconnect(&dp->dp_display.pdev->dev);
pm_runtime_put_sync(&pdev->dev);
mutex_unlock(&dp->event_mutex);
return 0;
}

View File

@ -89,7 +89,7 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
for (i = 0; i < n; i++) {
ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &msm_fb->iova[i]);
drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)",
drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)\n",
fb->base.id, i, msm_fb->iova[i], ret);
if (ret)
return ret;
@ -176,7 +176,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
const struct msm_format *format;
int ret, i, n;
drm_dbg_state(dev, "create framebuffer: mode_cmd=%p (%dx%d@%4.4s)",
drm_dbg_state(dev, "create framebuffer: mode_cmd=%p (%dx%d@%4.4s)\n",
mode_cmd, mode_cmd->width, mode_cmd->height,
(char *)&mode_cmd->pixel_format);
@ -232,7 +232,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
refcount_set(&msm_fb->dirtyfb, 1);
drm_dbg_state(dev, "create: FB ID: %d (%p)", fb->base.id, fb);
drm_dbg_state(dev, "create: FB ID: %d (%p)\n", fb->base.id, fb);
return fb;

View File

@ -149,7 +149,7 @@ int msm_crtc_enable_vblank(struct drm_crtc *crtc)
struct msm_kms *kms = priv->kms;
if (!kms)
return -ENXIO;
drm_dbg_vbl(dev, "crtc=%u", crtc->base.id);
drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
return vblank_ctrl_queue_work(priv, crtc, true);
}
@ -160,7 +160,7 @@ void msm_crtc_disable_vblank(struct drm_crtc *crtc)
struct msm_kms *kms = priv->kms;
if (!kms)
return;
drm_dbg_vbl(dev, "crtc=%u", crtc->base.id);
drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
vblank_ctrl_queue_work(priv, crtc, false);
}

View File

@ -66,11 +66,16 @@ of_init(struct nvkm_bios *bios, const char *name)
return ERR_PTR(-EINVAL);
}
static void of_fini(void *p)
{
kfree(p);
}
const struct nvbios_source
nvbios_of = {
.name = "OpenFirmware",
.init = of_init,
.fini = (void(*)(void *))kfree,
.fini = of_fini,
.read = of_read,
.size = of_size,
.rw = false,

View File

@ -502,11 +502,18 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
mapping_set_unevictable(mapping);
for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
/* Can happen if the last fault only partially filled this
* section of the pages array before failing. In that case
* we skip already filled pages.
*/
if (pages[i])
continue;
pages[i] = shmem_read_mapping_page(mapping, i);
if (IS_ERR(pages[i])) {
ret = PTR_ERR(pages[i]);
pages[i] = NULL;
goto err_pages;
goto err_unlock;
}
}
@ -514,7 +521,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
if (ret)
goto err_pages;
goto err_unlock;
ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
if (ret)
@ -537,8 +544,6 @@ out:
err_map:
sg_free_table(sgt);
err_pages:
drm_gem_shmem_put_pages(&bo->base);
err_unlock:
dma_resv_unlock(obj->resv);
err_bo:

View File

@ -58,16 +58,56 @@ static long qxl_fence_wait(struct dma_fence *fence, bool intr,
signed long timeout)
{
struct qxl_device *qdev;
struct qxl_release *release;
int count = 0, sc = 0;
bool have_drawable_releases;
unsigned long cur, end = jiffies + timeout;
qdev = container_of(fence->lock, struct qxl_device, release_lock);
release = container_of(fence, struct qxl_release, base);
have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
if (!wait_event_timeout(qdev->release_event,
(dma_fence_is_signaled(fence) ||
(qxl_io_notify_oom(qdev), 0)),
timeout))
return 0;
retry:
sc++;
if (dma_fence_is_signaled(fence))
goto signaled;
qxl_io_notify_oom(qdev);
for (count = 0; count < 11; count++) {
if (!qxl_queue_garbage_collect(qdev, true))
break;
if (dma_fence_is_signaled(fence))
goto signaled;
}
if (dma_fence_is_signaled(fence))
goto signaled;
if (have_drawable_releases || sc < 4) {
if (sc > 2)
/* back off */
usleep_range(500, 1000);
if (time_after(jiffies, end))
return 0;
if (have_drawable_releases && sc > 300) {
DMA_FENCE_WARN(fence,
"failed to wait on release %llu after spincount %d\n",
fence->context & ~0xf0000000, sc);
goto signaled;
}
goto retry;
}
/*
* yeah, original sync_obj_wait gave up after 3 spins when
* have_drawable_releases is not set.
*/
signaled:
cur = jiffies;
if (time_after(cur, end))
return 0;

View File

@ -666,11 +666,12 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
[vmw_dma_map_populate] = "Caching DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
/* TTM currently doesn't fully support SEV encryption. */
if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return -EINVAL;
if (vmw_force_coherent)
/*
* When running with SEV we always want dma mappings, because
* otherwise ttm tt pool pages will bounce through swiotlb running
* out of available space.
*/
if (vmw_force_coherent || cc_platform_has(CC_ATTR_MEM_ENCRYPT))
dev_priv->map_mode = vmw_dma_alloc_coherent;
else if (vmw_restrict_iommu)
dev_priv->map_mode = vmw_dma_map_bind;

View File

@ -108,11 +108,6 @@ int xe_display_create(struct xe_device *xe)
xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
drmm_mutex_init(&xe->drm, &xe->sb_lock);
drmm_mutex_init(&xe->drm, &xe->display.backlight.lock);
drmm_mutex_init(&xe->drm, &xe->display.audio.mutex);
drmm_mutex_init(&xe->drm, &xe->display.wm.wm_mutex);
drmm_mutex_init(&xe->drm, &xe->display.pps.mutex);
drmm_mutex_init(&xe->drm, &xe->display.hdcp.hdcp_mutex);
xe->enabled_irq_mask = ~0;
err = drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);

View File

@ -288,7 +288,7 @@ xe_hwmon_power1_max_interval_show(struct device *dev, struct device_attribute *a
* As y can be < 2, we compute tau4 = (4 | x) << y
* and then add 2 when doing the final right shift to account for units
*/
tau4 = ((1 << x_w) | x) << y;
tau4 = (u64)((1 << x_w) | x) << y;
/* val in hwmon interface units (millisec) */
out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
@ -328,7 +328,7 @@ xe_hwmon_power1_max_interval_store(struct device *dev, struct device_attribute *
r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
tau4 = ((1 << x_w) | x) << y;
tau4 = (u64)((1 << x_w) | x) << y;
max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
if (val > max_win)

View File

@ -4381,9 +4381,11 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
}
dev_iommu_priv_set(dev, info);
ret = device_rbtree_insert(iommu, info);
if (ret)
goto free;
if (pdev && pci_ats_supported(pdev)) {
ret = device_rbtree_insert(iommu, info);
if (ret)
goto free;
}
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
ret = intel_pasid_alloc_table(dev);
@ -4410,7 +4412,8 @@ static void intel_iommu_release_device(struct device *dev)
struct intel_iommu *iommu = info->iommu;
mutex_lock(&iommu->iopf_lock);
device_rbtree_remove(info);
if (dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev)))
device_rbtree_remove(info);
mutex_unlock(&iommu->iopf_lock);
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&

View File

@ -438,7 +438,7 @@ static int iommu_pmu_assign_event(struct iommu_pmu *iommu_pmu,
iommu_pmu_set_filter(domain, event->attr.config1,
IOMMU_PMU_FILTER_DOMAIN, idx,
event->attr.config1);
iommu_pmu_set_filter(pasid, event->attr.config1,
iommu_pmu_set_filter(pasid, event->attr.config2,
IOMMU_PMU_FILTER_PASID, idx,
event->attr.config1);
iommu_pmu_set_filter(ats, event->attr.config2,

View File

@ -67,7 +67,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
struct page *pages;
int irq, ret;
pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
if (!pages) {
pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
iommu->name);

View File

@ -1474,7 +1474,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
for (j = 0; j < i; j++)
if (r1_bio->bios[j])
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
free_r1bio(r1_bio);
mempool_free(r1_bio, &conf->r1bio_pool);
allow_barrier(conf, bio->bi_iter.bi_sector);
if (bio->bi_opf & REQ_NOWAIT) {

View File

@ -1151,20 +1151,6 @@ void cec_received_msg_ts(struct cec_adapter *adap,
if (valid_la && min_len) {
/* These messages have special length requirements */
switch (cmd) {
case CEC_MSG_TIMER_STATUS:
if (msg->msg[2] & 0x10) {
switch (msg->msg[2] & 0xf) {
case CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE:
case CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE:
if (msg->len < 5)
valid_la = false;
break;
}
} else if ((msg->msg[2] & 0xf) == CEC_OP_PROG_ERROR_DUPLICATE) {
if (msg->len < 5)
valid_la = false;
}
break;
case CEC_MSG_RECORD_ON:
switch (msg->msg[2]) {
case CEC_OP_RECORD_SRC_OWN:

View File

@ -1119,10 +1119,25 @@ static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on,
host = slot->host;
if (slot->vsd)
gpiod_set_value(slot->vsd, power_on);
if (slot->vio)
gpiod_set_value(slot->vio, power_on);
if (power_on) {
if (slot->vsd) {
gpiod_set_value(slot->vsd, power_on);
msleep(1);
}
if (slot->vio) {
gpiod_set_value(slot->vio, power_on);
msleep(1);
}
} else {
if (slot->vio) {
gpiod_set_value(slot->vio, power_on);
msleep(50);
}
if (slot->vsd) {
gpiod_set_value(slot->vsd, power_on);
msleep(50);
}
}
if (slot->pdata->set_power != NULL)
slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on,
@ -1259,18 +1274,18 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
slot->pdata = &host->pdata->slots[id];
/* Check for some optional GPIO controls */
slot->vsd = gpiod_get_index_optional(host->dev, "vsd",
id, GPIOD_OUT_LOW);
slot->vsd = devm_gpiod_get_index_optional(host->dev, "vsd",
id, GPIOD_OUT_LOW);
if (IS_ERR(slot->vsd))
return dev_err_probe(host->dev, PTR_ERR(slot->vsd),
"error looking up VSD GPIO\n");
slot->vio = gpiod_get_index_optional(host->dev, "vio",
id, GPIOD_OUT_LOW);
slot->vio = devm_gpiod_get_index_optional(host->dev, "vio",
id, GPIOD_OUT_LOW);
if (IS_ERR(slot->vio))
return dev_err_probe(host->dev, PTR_ERR(slot->vio),
"error looking up VIO GPIO\n");
slot->cover = gpiod_get_index_optional(host->dev, "cover",
id, GPIOD_IN);
slot->cover = devm_gpiod_get_index_optional(host->dev, "cover",
id, GPIOD_IN);
if (IS_ERR(slot->cover))
return dev_err_probe(host->dev, PTR_ERR(slot->cover),
"error looking up cover switch GPIO\n");
@ -1384,13 +1399,6 @@ static int mmc_omap_probe(struct platform_device *pdev)
if (IS_ERR(host->virt_base))
return PTR_ERR(host->virt_base);
host->slot_switch = gpiod_get_optional(host->dev, "switch",
GPIOD_OUT_LOW);
if (IS_ERR(host->slot_switch))
return dev_err_probe(host->dev, PTR_ERR(host->slot_switch),
"error looking up slot switch GPIO\n");
INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work);
INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);
@ -1409,6 +1417,12 @@ static int mmc_omap_probe(struct platform_device *pdev)
host->dev = &pdev->dev;
platform_set_drvdata(pdev, host);
host->slot_switch = devm_gpiod_get_optional(host->dev, "switch",
GPIOD_OUT_LOW);
if (IS_ERR(host->slot_switch))
return dev_err_probe(host->dev, PTR_ERR(host->slot_switch),
"error looking up slot switch GPIO\n");
host->id = pdev->id;
host->irq = irq;
host->phys_base = res->start;

View File

@ -998,20 +998,173 @@ unlock_exit:
mutex_unlock(&priv->reg_mutex);
}
/* On page 205, section "8.6.3 Frame filtering" of the active standard, IEEE Std
* 802.1Q-2022, it is stated that frames with 01:80:C2:00:00:00-0F as MAC DA
* must only be propagated to C-VLAN and MAC Bridge components. That means
* VLAN-aware and VLAN-unaware bridges. On the switch designs with CPU ports,
* these frames are supposed to be processed by the CPU (software). So we make
* the switch only forward them to the CPU port. And if received from a CPU
* port, forward to a single port. The software is responsible of making the
* switch conform to the latter by setting a single port as destination port on
* the special tag.
/* In Clause 5 of IEEE Std 802-2014, two sublayers of the data link layer (DLL)
* of the Open Systems Interconnection basic reference model (OSI/RM) are
* described; the medium access control (MAC) and logical link control (LLC)
* sublayers. The MAC sublayer is the one facing the physical layer.
*
* This switch intellectual property cannot conform to this part of the standard
* fully. Whilst the REV_UN frame tag covers the remaining :04-0D and :0F MAC
* DAs, it also includes :22-FF which the scope of propagation is not supposed
* to be restricted for these MAC DAs.
* In 8.2 of IEEE Std 802.1Q-2022, the Bridge architecture is described. A
* Bridge component comprises a MAC Relay Entity for interconnecting the Ports
* of the Bridge, at least two Ports, and higher layer entities with at least a
* Spanning Tree Protocol Entity included.
*
* Each Bridge Port also functions as an end station and shall provide the MAC
* Service to an LLC Entity. Each instance of the MAC Service is provided to a
* distinct LLC Entity that supports protocol identification, multiplexing, and
* demultiplexing, for protocol data unit (PDU) transmission and reception by
* one or more higher layer entities.
*
* It is described in 8.13.9 of IEEE Std 802.1Q-2022 that in a Bridge, the LLC
* Entity associated with each Bridge Port is modeled as being directly
* connected to the attached Local Area Network (LAN).
*
* On the switch with CPU port architecture, CPU port functions as Management
* Port, and the Management Port functionality is provided by software which
* functions as an end station. Software is connected to an IEEE 802 LAN that is
* wholly contained within the system that incorporates the Bridge. Software
* provides access to the LLC Entity associated with each Bridge Port by the
* value of the source port field on the special tag on the frame received by
* software.
*
* We call frames that carry control information to determine the active
* topology and current extent of each Virtual Local Area Network (VLAN), i.e.,
* spanning tree or Shortest Path Bridging (SPB) and Multiple VLAN Registration
* Protocol Data Units (MVRPDUs), and frames from other link constrained
* protocols, such as Extensible Authentication Protocol over LAN (EAPOL) and
* Link Layer Discovery Protocol (LLDP), link-local frames. They are not
* forwarded by a Bridge. Permanently configured entries in the filtering
* database (FDB) ensure that such frames are discarded by the Forwarding
* Process. In 8.6.3 of IEEE Std 802.1Q-2022, this is described in detail:
*
* Each of the reserved MAC addresses specified in Table 8-1
* (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]) shall be
* permanently configured in the FDB in C-VLAN components and ERs.
*
* Each of the reserved MAC addresses specified in Table 8-2
* (01-80-C2-00-00-[01,02,03,04,05,06,07,08,09,0A,0E]) shall be permanently
* configured in the FDB in S-VLAN components.
*
* Each of the reserved MAC addresses specified in Table 8-3
* (01-80-C2-00-00-[01,02,04,0E]) shall be permanently configured in the FDB in
* TPMR components.
*
* The FDB entries for reserved MAC addresses shall specify filtering for all
* Bridge Ports and all VIDs. Management shall not provide the capability to
* modify or remove entries for reserved MAC addresses.
*
* The addresses in Table 8-1, Table 8-2, and Table 8-3 determine the scope of
* propagation of PDUs within a Bridged Network, as follows:
*
* The Nearest Bridge group address (01-80-C2-00-00-0E) is an address that no
* conformant Two-Port MAC Relay (TPMR) component, Service VLAN (S-VLAN)
* component, Customer VLAN (C-VLAN) component, or MAC Bridge can forward.
* PDUs transmitted using this destination address, or any other addresses
* that appear in Table 8-1, Table 8-2, and Table 8-3
* (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]), can
* therefore travel no further than those stations that can be reached via a
* single individual LAN from the originating station.
*
* The Nearest non-TPMR Bridge group address (01-80-C2-00-00-03), is an
* address that no conformant S-VLAN component, C-VLAN component, or MAC
* Bridge can forward; however, this address is relayed by a TPMR component.
* PDUs using this destination address, or any of the other addresses that
* appear in both Table 8-1 and Table 8-2 but not in Table 8-3
* (01-80-C2-00-00-[00,03,05,06,07,08,09,0A,0B,0C,0D,0F]), will be relayed by
* any TPMRs but will propagate no further than the nearest S-VLAN component,
* C-VLAN component, or MAC Bridge.
*
* The Nearest Customer Bridge group address (01-80-C2-00-00-00) is an address
* that no conformant C-VLAN component, MAC Bridge can forward; however, it is
* relayed by TPMR components and S-VLAN components. PDUs using this
* destination address, or any of the other addresses that appear in Table 8-1
* but not in either Table 8-2 or Table 8-3 (01-80-C2-00-00-[00,0B,0C,0D,0F]),
* will be relayed by TPMR components and S-VLAN components but will propagate
* no further than the nearest C-VLAN component or MAC Bridge.
*
* Because the LLC Entity associated with each Bridge Port is provided via CPU
* port, we must not filter these frames but forward them to CPU port.
*
* In a Bridge, the transmission Port is majorly decided by ingress and egress
* rules, FDB, and spanning tree Port State functions of the Forwarding Process.
* For link-local frames, only CPU port should be designated as destination port
* in the FDB, and the other functions of the Forwarding Process must not
* interfere with the decision of the transmission Port. We call this process
* trapping frames to CPU port.
*
* Therefore, on the switch with CPU port architecture, link-local frames must
* be trapped to CPU port, and certain link-local frames received by a Port of a
* Bridge comprising a TPMR component or an S-VLAN component must be excluded
* from it.
*
* A Bridge of the switch with CPU port architecture cannot comprise a Two-Port
* MAC Relay (TPMR) component as a TPMR component supports only a subset of the
* functionality of a MAC Bridge. A Bridge comprising two Ports (Management Port
* doesn't count) of this architecture will either function as a standard MAC
* Bridge or a standard VLAN Bridge.
*
* Therefore, a Bridge of this architecture can only comprise S-VLAN components,
* C-VLAN components, or MAC Bridge components. Since there's no TPMR component,
* we don't need to relay PDUs using the destination addresses specified on the
* Nearest non-TPMR section, and the proportion of the Nearest Customer Bridge
* section where they must be relayed by TPMR components.
*
* One option to trap link-local frames to CPU port is to add static FDB entries
* with CPU port designated as destination port. However, because that
* Independent VLAN Learning (IVL) is being used on every VID, each entry only
* applies to a single VLAN Identifier (VID). For a Bridge comprising a MAC
* Bridge component or a C-VLAN component, there would have to be 16 times 4096
* entries. This switch intellectual property can only hold a maximum of 2048
* entries. Using this option, there also isn't a mechanism to prevent
* link-local frames from being discarded when the spanning tree Port State of
* the reception Port is discarding.
*
* The remaining option is to utilise the BPC, RGAC1, RGAC2, RGAC3, and RGAC4
* registers. Whilst this applies to every VID, it doesn't contain all of the
* reserved MAC addresses without affecting the remaining Standard Group MAC
* Addresses. The REV_UN frame tag utilised using the RGAC4 register covers the
* remaining 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F] destination
* addresses. It also includes the 01-80-C2-00-00-22 to 01-80-C2-00-00-FF
* destination addresses which may be relayed by MAC Bridges or VLAN Bridges.
* The latter option provides better but not complete conformance.
*
* This switch intellectual property also does not provide a mechanism to trap
* link-local frames with specific destination addresses to CPU port by Bridge,
* to conform to the filtering rules for the distinct Bridge components.
*
* Therefore, regardless of the type of the Bridge component, link-local frames
* with these destination addresses will be trapped to CPU port:
*
* 01-80-C2-00-00-[00,01,02,03,0E]
*
* In a Bridge comprising a MAC Bridge component or a C-VLAN component:
*
* Link-local frames with these destination addresses won't be trapped to CPU
* port which won't conform to IEEE Std 802.1Q-2022:
*
* 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F]
*
* In a Bridge comprising an S-VLAN component:
*
* Link-local frames with these destination addresses will be trapped to CPU
* port which won't conform to IEEE Std 802.1Q-2022:
*
* 01-80-C2-00-00-00
*
* Link-local frames with these destination addresses won't be trapped to CPU
* port which won't conform to IEEE Std 802.1Q-2022:
*
* 01-80-C2-00-00-[04,05,06,07,08,09,0A]
*
* To trap link-local frames to CPU port as conformant as this switch
* intellectual property can allow, link-local frames are made to be regarded as
* Bridge Protocol Data Units (BPDUs). This is because this switch intellectual
* property only lets the frames regarded as BPDUs bypass the spanning tree Port
* State function of the Forwarding Process.
*
* The only remaining interference is the ingress rules. When the reception Port
* has no PVID assigned on software, VLAN-untagged frames won't be allowed in.
* There doesn't seem to be a mechanism on the switch intellectual property to
* have link-local frames bypass this function of the Forwarding Process.
*/
static void
mt753x_trap_frames(struct mt7530_priv *priv)
@ -1019,35 +1172,43 @@ mt753x_trap_frames(struct mt7530_priv *priv)
/* Trap 802.1X PAE frames and BPDUs to the CPU port(s) and egress them
* VLAN-untagged.
*/
mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_EG_TAG_MASK |
MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK |
MT753X_BPDU_PORT_FW_MASK,
MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) |
MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
MT753X_BPDU_CPU_ONLY);
mt7530_rmw(priv, MT753X_BPC,
MT753X_PAE_BPDU_FR | MT753X_PAE_EG_TAG_MASK |
MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK |
MT753X_BPDU_PORT_FW_MASK,
MT753X_PAE_BPDU_FR |
MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) |
MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
MT753X_BPDU_CPU_ONLY);
/* Trap frames with :01 and :02 MAC DAs to the CPU port(s) and egress
* them VLAN-untagged.
*/
mt7530_rmw(priv, MT753X_RGAC1, MT753X_R02_EG_TAG_MASK |
MT753X_R02_PORT_FW_MASK | MT753X_R01_EG_TAG_MASK |
MT753X_R01_PORT_FW_MASK,
MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) |
MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
MT753X_BPDU_CPU_ONLY);
mt7530_rmw(priv, MT753X_RGAC1,
MT753X_R02_BPDU_FR | MT753X_R02_EG_TAG_MASK |
MT753X_R02_PORT_FW_MASK | MT753X_R01_BPDU_FR |
MT753X_R01_EG_TAG_MASK | MT753X_R01_PORT_FW_MASK,
MT753X_R02_BPDU_FR |
MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) |
MT753X_R01_BPDU_FR |
MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
MT753X_BPDU_CPU_ONLY);
/* Trap frames with :03 and :0E MAC DAs to the CPU port(s) and egress
* them VLAN-untagged.
*/
mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_EG_TAG_MASK |
MT753X_R0E_PORT_FW_MASK | MT753X_R03_EG_TAG_MASK |
MT753X_R03_PORT_FW_MASK,
MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) |
MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
MT753X_BPDU_CPU_ONLY);
mt7530_rmw(priv, MT753X_RGAC2,
MT753X_R0E_BPDU_FR | MT753X_R0E_EG_TAG_MASK |
MT753X_R0E_PORT_FW_MASK | MT753X_R03_BPDU_FR |
MT753X_R03_EG_TAG_MASK | MT753X_R03_PORT_FW_MASK,
MT753X_R0E_BPDU_FR |
MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) |
MT753X_R03_BPDU_FR |
MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
MT753X_BPDU_CPU_ONLY);
}
static int

View File

@ -65,6 +65,7 @@ enum mt753x_id {
/* Registers for BPDU and PAE frame control*/
#define MT753X_BPC 0x24
#define MT753X_PAE_BPDU_FR BIT(25)
#define MT753X_PAE_EG_TAG_MASK GENMASK(24, 22)
#define MT753X_PAE_EG_TAG(x) FIELD_PREP(MT753X_PAE_EG_TAG_MASK, x)
#define MT753X_PAE_PORT_FW_MASK GENMASK(18, 16)
@ -75,20 +76,24 @@ enum mt753x_id {
/* Register for :01 and :02 MAC DA frame control */
#define MT753X_RGAC1 0x28
#define MT753X_R02_BPDU_FR BIT(25)
#define MT753X_R02_EG_TAG_MASK GENMASK(24, 22)
#define MT753X_R02_EG_TAG(x) FIELD_PREP(MT753X_R02_EG_TAG_MASK, x)
#define MT753X_R02_PORT_FW_MASK GENMASK(18, 16)
#define MT753X_R02_PORT_FW(x) FIELD_PREP(MT753X_R02_PORT_FW_MASK, x)
#define MT753X_R01_BPDU_FR BIT(9)
#define MT753X_R01_EG_TAG_MASK GENMASK(8, 6)
#define MT753X_R01_EG_TAG(x) FIELD_PREP(MT753X_R01_EG_TAG_MASK, x)
#define MT753X_R01_PORT_FW_MASK GENMASK(2, 0)
/* Register for :03 and :0E MAC DA frame control */
#define MT753X_RGAC2 0x2c
#define MT753X_R0E_BPDU_FR BIT(25)
#define MT753X_R0E_EG_TAG_MASK GENMASK(24, 22)
#define MT753X_R0E_EG_TAG(x) FIELD_PREP(MT753X_R0E_EG_TAG_MASK, x)
#define MT753X_R0E_PORT_FW_MASK GENMASK(18, 16)
#define MT753X_R0E_PORT_FW(x) FIELD_PREP(MT753X_R0E_PORT_FW_MASK, x)
#define MT753X_R03_BPDU_FR BIT(9)
#define MT753X_R03_EG_TAG_MASK GENMASK(8, 6)
#define MT753X_R03_EG_TAG(x) FIELD_PREP(MT753X_R03_EG_TAG_MASK, x)
#define MT753X_R03_PORT_FW_MASK GENMASK(2, 0)

View File

@ -362,7 +362,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
io_sq->bounce_buf_ctrl.next_to_use = 0;
size = io_sq->bounce_buf_ctrl.buffer_size *
size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
io_sq->bounce_buf_ctrl.buffers_num;
dev_node = dev_to_node(ena_dev->dmadev);

View File

@ -696,8 +696,11 @@ void ena_unmap_tx_buff(struct ena_ring *tx_ring,
static void ena_free_tx_bufs(struct ena_ring *tx_ring)
{
bool print_once = true;
bool is_xdp_ring;
u32 i;
is_xdp_ring = ENA_IS_XDP_INDEX(tx_ring->adapter, tx_ring->qid);
for (i = 0; i < tx_ring->ring_size; i++) {
struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
@ -717,10 +720,15 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
ena_unmap_tx_buff(tx_ring, tx_info);
dev_kfree_skb_any(tx_info->skb);
if (is_xdp_ring)
xdp_return_frame(tx_info->xdpf);
else
dev_kfree_skb_any(tx_info->skb);
}
netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->qid));
if (!is_xdp_ring)
netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->qid));
}
static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
@ -3421,10 +3429,11 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
{
struct ena_ring *tx_ring;
struct ena_ring *rx_ring;
int i, budget, rc;
int qid, budget, rc;
int io_queue_count;
io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
/* Make sure the driver doesn't turn the device in other process */
smp_rmb();
@ -3437,27 +3446,29 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
return;
budget = ENA_MONITORED_TX_QUEUES;
budget = min_t(u32, io_queue_count, ENA_MONITORED_TX_QUEUES);
for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
tx_ring = &adapter->tx_ring[i];
rx_ring = &adapter->rx_ring[i];
qid = adapter->last_monitored_tx_qid;
while (budget) {
qid = (qid + 1) % io_queue_count;
tx_ring = &adapter->tx_ring[qid];
rx_ring = &adapter->rx_ring[qid];
rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
if (unlikely(rc))
return;
rc = !ENA_IS_XDP_INDEX(adapter, i) ?
rc = !ENA_IS_XDP_INDEX(adapter, qid) ?
check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
if (unlikely(rc))
return;
budget--;
if (!budget)
break;
}
adapter->last_monitored_tx_qid = i % io_queue_count;
adapter->last_monitored_tx_qid = qid;
}
/* trigger napi schedule after 2 consecutive detections */

View File

@ -89,7 +89,7 @@ int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
rc = ena_xdp_tx_map_frame(tx_ring, tx_info, xdpf, &ena_tx_ctx);
if (unlikely(rc))
return rc;
goto err;
ena_tx_ctx.req_id = req_id;
@ -112,7 +112,9 @@ int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
error_unmap_dma:
ena_unmap_tx_buff(tx_ring, tx_info);
err:
tx_info->xdpf = NULL;
return rc;
}

View File

@ -595,6 +595,16 @@ err_out:
pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
}
void pdsc_pci_reset_thread(struct work_struct *work)
{
struct pdsc *pdsc = container_of(work, struct pdsc, pci_reset_work);
struct pci_dev *pdev = pdsc->pdev;
pci_dev_get(pdev);
pci_reset_function(pdev);
pci_dev_put(pdev);
}
static void pdsc_check_pci_health(struct pdsc *pdsc)
{
u8 fw_status;
@ -609,8 +619,8 @@ static void pdsc_check_pci_health(struct pdsc *pdsc)
if (fw_status != PDS_RC_BAD_PCI)
return;
pdsc_reset_prepare(pdsc->pdev);
pdsc_reset_done(pdsc->pdev);
/* prevent deadlock between pdsc_reset_prepare and pdsc_health_thread */
queue_work(pdsc->wq, &pdsc->pci_reset_work);
}
void pdsc_health_thread(struct work_struct *work)

View File

@ -197,6 +197,7 @@ struct pdsc {
struct pdsc_qcq notifyqcq;
u64 last_eid;
struct pdsc_viftype *viftype_status;
struct work_struct pci_reset_work;
};
/** enum pds_core_dbell_bits - bitwise composition of dbell values.
@ -283,9 +284,6 @@ int pdsc_devcmd_init(struct pdsc *pdsc);
int pdsc_devcmd_reset(struct pdsc *pdsc);
int pdsc_dev_init(struct pdsc *pdsc);
void pdsc_reset_prepare(struct pci_dev *pdev);
void pdsc_reset_done(struct pci_dev *pdev);
int pdsc_intr_alloc(struct pdsc *pdsc, char *name,
irq_handler_t handler, void *data);
void pdsc_intr_free(struct pdsc *pdsc, int index);
@ -315,5 +313,6 @@ int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw,
void pdsc_fw_down(struct pdsc *pdsc);
void pdsc_fw_up(struct pdsc *pdsc);
void pdsc_pci_reset_thread(struct work_struct *work);
#endif /* _PDSC_H_ */

View File

@ -229,6 +229,9 @@ int pdsc_devcmd_reset(struct pdsc *pdsc)
.reset.opcode = PDS_CORE_CMD_RESET,
};
if (!pdsc_is_fw_running(pdsc))
return 0;
return pdsc_devcmd(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
}

View File

@ -238,6 +238,7 @@ static int pdsc_init_pf(struct pdsc *pdsc)
snprintf(wq_name, sizeof(wq_name), "%s.%d", PDS_CORE_DRV_NAME, pdsc->uid);
pdsc->wq = create_singlethread_workqueue(wq_name);
INIT_WORK(&pdsc->health_work, pdsc_health_thread);
INIT_WORK(&pdsc->pci_reset_work, pdsc_pci_reset_thread);
timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0);
pdsc->wdtimer_period = PDSC_WATCHDOG_SECS * HZ;
@ -468,7 +469,7 @@ static void pdsc_restart_health_thread(struct pdsc *pdsc)
mod_timer(&pdsc->wdtimer, jiffies + 1);
}
void pdsc_reset_prepare(struct pci_dev *pdev)
static void pdsc_reset_prepare(struct pci_dev *pdev)
{
struct pdsc *pdsc = pci_get_drvdata(pdev);
@ -477,10 +478,11 @@ void pdsc_reset_prepare(struct pci_dev *pdev)
pdsc_unmap_bars(pdsc);
pci_release_regions(pdev);
pci_disable_device(pdev);
if (pci_is_enabled(pdev))
pci_disable_device(pdev);
}
void pdsc_reset_done(struct pci_dev *pdev)
static void pdsc_reset_done(struct pci_dev *pdev)
{
struct pdsc *pdsc = pci_get_drvdata(pdev);
struct device *dev = pdsc->dev;

View File

@ -11526,6 +11526,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* VF-reps may need to be re-opened after the PF is re-opened */
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
if (bp->ptp_cfg)
atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
bnxt_ptp_init_rtc(bp, true);
bnxt_ptp_cfg_tstamp_filters(bp);
return 0;

View File

@ -210,6 +210,9 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
if (err)
return;
if (edev->ulp_tbl->msix_requested)
bnxt_fill_msix_vecs(bp, edev->msix_entries);
if (aux_priv) {
struct auxiliary_device *adev;
@ -392,12 +395,13 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp)
if (!edev)
goto aux_dev_uninit;
aux_priv->edev = edev;
ulp = kzalloc(sizeof(*ulp), GFP_KERNEL);
if (!ulp)
goto aux_dev_uninit;
edev->ulp_tbl = ulp;
aux_priv->edev = edev;
bp->edev = edev;
bnxt_set_edev_info(edev, bp);

View File

@ -4721,19 +4721,19 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
*/
rvu_write64(rvu, blkaddr, NIX_AF_CFG,
rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
/* Set chan/link to backpressure TL3 instead of TL2 */
rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
/* Disable SQ manager's sticky mode operation (set TM6 = 0)
* This sticky mode is known to cause SQ stalls when multiple
* SQs are mapped to same SMQ and transmitting pkts at a time.
*/
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
cfg &= ~BIT_ULL(15);
rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
}
/* Set chan/link to backpressure TL3 instead of TL2 */
rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
/* Disable SQ manager's sticky mode operation (set TM6 = 0)
* This sticky mode is known to cause SQ stalls when multiple
* SQs are mapped to same SMQ and transmitting pkts at a time.
*/
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
cfg &= ~BIT_ULL(15);
rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
ltdefs = rvu->kpu.lt_def;
/* Calibrate X2P bus to check if CGX/LBK links are fine */
err = nix_calibrate_x2p(rvu, blkaddr);

View File

@ -382,6 +382,7 @@ static void otx2_qos_read_txschq_cfg_tl(struct otx2_qos_node *parent,
otx2_qos_read_txschq_cfg_tl(node, cfg);
cnt = cfg->static_node_pos[node->level];
cfg->schq_contig_list[node->level][cnt] = node->schq;
cfg->schq_index_used[node->level][cnt] = true;
cfg->schq_contig[node->level]++;
cfg->static_node_pos[node->level]++;
otx2_qos_read_txschq_cfg_schq(node, cfg);

View File

@ -95,9 +95,15 @@ static inline void mlx5e_ptp_metadata_fifo_push(struct mlx5e_ptp_metadata_fifo *
}
static inline u8
mlx5e_ptp_metadata_fifo_peek(struct mlx5e_ptp_metadata_fifo *fifo)
{
return fifo->data[fifo->mask & fifo->cc];
}
static inline void
mlx5e_ptp_metadata_fifo_pop(struct mlx5e_ptp_metadata_fifo *fifo)
{
return fifo->data[fifo->mask & fifo->cc++];
fifo->cc++;
}
static inline void

View File

@ -83,24 +83,25 @@ int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
txq_ix = mlx5e_qid_from_qos(chs, node_qid);
WARN_ON(node_qid > priv->htb_max_qos_sqs);
if (node_qid == priv->htb_max_qos_sqs) {
struct mlx5e_sq_stats *stats, **stats_list = NULL;
WARN_ON(node_qid >= mlx5e_htb_cur_leaf_nodes(priv->htb));
if (!priv->htb_qos_sq_stats) {
struct mlx5e_sq_stats **stats_list;
if (priv->htb_max_qos_sqs == 0) {
stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
sizeof(*stats_list),
GFP_KERNEL);
if (!stats_list)
return -ENOMEM;
}
stats = kzalloc(sizeof(*stats), GFP_KERNEL);
if (!stats) {
kvfree(stats_list);
stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
sizeof(*stats_list), GFP_KERNEL);
if (!stats_list)
return -ENOMEM;
}
if (stats_list)
WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
}
if (!priv->htb_qos_sq_stats[node_qid]) {
struct mlx5e_sq_stats *stats;
stats = kzalloc(sizeof(*stats), GFP_KERNEL);
if (!stats)
return -ENOMEM;
WRITE_ONCE(priv->htb_qos_sq_stats[node_qid], stats);
/* Order htb_max_qos_sqs increment after writing the array pointer.
* Pairs with smp_load_acquire in en_stats.c.

View File

@ -57,6 +57,7 @@ int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
{
mutex_lock(selq->state_lock);
WARN_ON_ONCE(selq->is_prepared);
kvfree(selq->standby);
@ -67,6 +68,7 @@ void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
kvfree(selq->standby);
selq->standby = NULL;
mutex_unlock(selq->state_lock);
}
void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params)

View File

@ -451,6 +451,23 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
mutex_lock(&priv->state_lock);
/* If RXFH is configured, changing the channels number is allowed only if
* it does not require resizing the RSS table. This is because the previous
* configuration may no longer be compatible with the new RSS table.
*/
if (netif_is_rxfh_configured(priv->netdev)) {
int cur_rqt_size = mlx5e_rqt_size(priv->mdev, cur_params->num_channels);
int new_rqt_size = mlx5e_rqt_size(priv->mdev, count);
if (new_rqt_size != cur_rqt_size) {
err = -EINVAL;
netdev_err(priv->netdev,
"%s: RXFH is configured, block changing channels number that affects RSS table size (new: %d, current: %d)\n",
__func__, new_rqt_size, cur_rqt_size);
goto out;
}
}
/* Don't allow changing the number of channels if HTB offload is active,
* because the numeration of the QoS SQs will change, while per-queue
* qdiscs are attached.

View File

@ -5695,9 +5695,7 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
kfree(priv->tx_rates);
kfree(priv->txq2sq);
destroy_workqueue(priv->wq);
mutex_lock(&priv->state_lock);
mlx5e_selq_cleanup(&priv->selq);
mutex_unlock(&priv->state_lock);
free_cpumask_var(priv->scratchpad.cpumask);
for (i = 0; i < priv->htb_max_qos_sqs; i++)

View File

@ -398,6 +398,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
u8 metadata_index = be32_to_cpu(eseg->flow_table_metadata);
mlx5e_ptp_metadata_fifo_pop(&sq->ptpsq->metadata_freelist);
mlx5e_skb_cb_hwtstamp_init(skb);
mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
metadata_index);
@ -496,9 +498,6 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
err_drop:
stats->dropped++;
if (unlikely(sq->ptpsq && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
mlx5e_ptp_metadata_fifo_push(&sq->ptpsq->metadata_freelist,
be32_to_cpu(eseg->flow_table_metadata));
dev_kfree_skb_any(skb);
mlx5e_tx_flush(sq);
}
@ -657,7 +656,7 @@ static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
{
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
eseg->flow_table_metadata =
cpu_to_be32(mlx5e_ptp_metadata_fifo_pop(&ptpsq->metadata_freelist));
cpu_to_be32(mlx5e_ptp_metadata_fifo_peek(&ptpsq->metadata_freelist));
}
static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,

View File

@ -1664,6 +1664,16 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
return err;
}
static bool mlx5_pkt_reformat_cmp(struct mlx5_pkt_reformat *p1,
struct mlx5_pkt_reformat *p2)
{
return p1->owner == p2->owner &&
(p1->owner == MLX5_FLOW_RESOURCE_OWNER_FW ?
p1->id == p2->id :
mlx5_fs_dr_action_get_pkt_reformat_id(p1) ==
mlx5_fs_dr_action_get_pkt_reformat_id(p2));
}
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
struct mlx5_flow_destination *d2)
{
@ -1675,8 +1685,8 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
(d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
(d1->vport.pkt_reformat->id ==
d2->vport.pkt_reformat->id) : true)) ||
mlx5_pkt_reformat_cmp(d1->vport.pkt_reformat,
d2->vport.pkt_reformat) : true)) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
d1->ft == d2->ft) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
@ -1808,8 +1818,9 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
}
trace_mlx5_fs_set_fte(fte, false);
/* Link newly added rules into the tree. */
for (i = 0; i < handle->num_rules; i++) {
if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
if (!handle->rule[i]->node.parent) {
tree_add_node(&handle->rule[i]->node, &fte->node);
trace_mlx5_fs_add_rule(handle->rule[i]);
}

View File

@ -1480,6 +1480,14 @@ int mlx5_init_one_devl_locked(struct mlx5_core_dev *dev)
if (err)
goto err_register;
err = mlx5_crdump_enable(dev);
if (err)
mlx5_core_err(dev, "mlx5_crdump_enable failed with error code %d\n", err);
err = mlx5_hwmon_dev_register(dev);
if (err)
mlx5_core_err(dev, "mlx5_hwmon_dev_register failed with error code %d\n", err);
mutex_unlock(&dev->intf_state_mutex);
return 0;
@ -1505,7 +1513,10 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
int err;
devl_lock(devlink);
devl_register(devlink);
err = mlx5_init_one_devl_locked(dev);
if (err)
devl_unregister(devlink);
devl_unlock(devlink);
return err;
}
@ -1517,6 +1528,8 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
devl_lock(devlink);
mutex_lock(&dev->intf_state_mutex);
mlx5_hwmon_dev_unregister(dev);
mlx5_crdump_disable(dev);
mlx5_unregister_device(dev);
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
@ -1534,6 +1547,7 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
mlx5_function_teardown(dev, true);
out:
mutex_unlock(&dev->intf_state_mutex);
devl_unregister(devlink);
devl_unlock(devlink);
}
@ -1680,16 +1694,20 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev)
}
devl_lock(devlink);
devl_register(devlink);
err = mlx5_devlink_params_register(priv_to_devlink(dev));
devl_unlock(devlink);
if (err) {
mlx5_core_warn(dev, "mlx5_devlink_param_reg err = %d\n", err);
goto query_hca_caps_err;
}
devl_unlock(devlink);
return 0;
query_hca_caps_err:
devl_unregister(devlink);
devl_unlock(devlink);
mlx5_function_disable(dev, true);
out:
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
@ -1702,6 +1720,7 @@ void mlx5_uninit_one_light(struct mlx5_core_dev *dev)
devl_lock(devlink);
mlx5_devlink_params_unregister(priv_to_devlink(dev));
devl_unregister(devlink);
devl_unlock(devlink);
if (dev->state != MLX5_DEVICE_STATE_UP)
return;
@ -1943,16 +1962,7 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_init_one;
}
err = mlx5_crdump_enable(dev);
if (err)
dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
err = mlx5_hwmon_dev_register(dev);
if (err)
mlx5_core_err(dev, "mlx5_hwmon_dev_register failed with error code %d\n", err);
pci_save_state(pdev);
devlink_register(devlink);
return 0;
err_init_one:
@ -1973,16 +1983,9 @@ static void remove_one(struct pci_dev *pdev)
struct devlink *devlink = priv_to_devlink(dev);
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
/* mlx5_drain_fw_reset() and mlx5_drain_health_wq() are using
* devlink notify APIs.
* Hence, we must drain them before unregistering the devlink.
*/
mlx5_drain_fw_reset(dev);
mlx5_drain_health_wq(dev);
devlink_unregister(devlink);
mlx5_sriov_disable(pdev, false);
mlx5_hwmon_dev_unregister(dev);
mlx5_crdump_disable(dev);
mlx5_uninit_one(dev);
mlx5_pci_close(dev);
mlx5_mdev_uninit(dev);

View File

@ -19,6 +19,7 @@
#define MLX5_IRQ_CTRL_SF_MAX 8
/* min num of vectors for SFs to be enabled */
#define MLX5_IRQ_VEC_COMP_BASE_SF 2
#define MLX5_IRQ_VEC_COMP_BASE 1
#define MLX5_EQ_SHARE_IRQ_MAX_COMP (8)
#define MLX5_EQ_SHARE_IRQ_MAX_CTRL (UINT_MAX)
@ -246,6 +247,7 @@ static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
return;
}
vecidx -= MLX5_IRQ_VEC_COMP_BASE;
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx);
}
@ -585,7 +587,7 @@ struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu,
struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
struct mlx5_irq_pool *pool = table->pcif_pool;
struct irq_affinity_desc af_desc;
int offset = 1;
int offset = MLX5_IRQ_VEC_COMP_BASE;
if (!pool->xa_num_irqs.max)
offset = 0;

Some files were not shown because too many files have changed in this diff Show More