This is the 6.6.30 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmYzpCIACgkQONu9yGCS
 aT4OkA//QoG03giM1mJF/41bLNs3tC3cIvBgMnzPXIYoz5Klm2yvca/BKxvUQYLd
 t4EsiISf5tVmwJQK90ymVgmnsqpPPlLG/XHsI5kzRmIWfxbLfg7yJPoKcpiFX6Nd
 vv7gOuMIx8XQqzXlHTP4SyPEQN929tvl9Nd391Sk7n4G9XoooiQ/i533vzsF8MmM
 W1xSfUATmA45+6IYsL9GiNMR672j0nGzWr2hL+WsjBIvhleEDpTieox+nR+/zNge
 K0DyOp2TusbNayWqndqKj3+jOjfsnImT6iDeqpRcrzbU/7ZsXrKRSiZidLHxhtQj
 LWbrA2S6S5vEYykVCDxBuBEGAwJBXtXug5MoQlrsmLQnrVMdvnSpcNCKXrsbvrta
 yoYCk1KkCnWCj0kup9zOmymQ+uY0GdwlWIeaScBMjoz937Vg8tZrKv+J6CU4ewBB
 GtM/Wv4d2ugapb4TP9WtZtx2eqlsvoo4JlTy0D1taVBKjxaj2U9aQLOl9pE2aNuG
 hxigm3rIIUZkkW3Gw+cxYV74iLlSMwMUspgThHsTogL7ATb+JVNeElRxze7tWvQC
 zmqu9foHM46F2ldtAqZg5EbmFqLf0hr7f9hJlHAyoPWiIdzxX2UfCdQKSE9LaBvY
 Qe0FUB18dY9iAvphLDPLYyOiyomYWVNGWC/nb4uftlp45075kpE=
 =F03h
 -----END PGP SIGNATURE-----

Merge v6.6.30

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2024-05-02 16:32:50 +02:00
commit a2c9c3581a
198 changed files with 1786 additions and 1131 deletions

View File

@ -172,7 +172,7 @@ variables.
Offset of the free_list's member. This value is used to compute the number
of free pages.
Each zone has a free_area structure array called free_area[MAX_ORDER + 1].
Each zone has a free_area structure array called free_area[NR_PAGE_ORDERS].
The free_list represents a linked list of free page blocks.
(list_head, next|prev)
@ -189,8 +189,8 @@ Offsets of the vmap_area's members. They carry vmalloc-specific
information. Makedumpfile gets the start address of the vmalloc region
from this.
(zone.free_area, MAX_ORDER + 1)
-------------------------------
(zone.free_area, NR_PAGE_ORDERS)
--------------------------------
Free areas descriptor. User-space tools use this value to iterate the
free_area ranges. MAX_ORDER is used by the zone buddy allocator.

View File

@ -205,6 +205,11 @@ Will increase power usage.
Default: 0 (off)
mem_pcpu_rsv
------------
Per-cpu reserved forward alloc cache size in page units. Default 1MB per CPU.
rmem_default
------------

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 29
SUBLEVEL = 30
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth

View File

@ -9,6 +9,14 @@
#
source "arch/$(SRCARCH)/Kconfig"
config ARCH_CONFIGURES_CPU_MITIGATIONS
bool
if !ARCH_CONFIGURES_CPU_MITIGATIONS
config CPU_MITIGATIONS
def_bool y
endif
menu "General architecture-dependent options"
config ARCH_HAS_SUBPAGE_FAULTS

View File

@ -205,7 +205,6 @@
};
gmac: ethernet@8000 {
#interrupt-cells = <1>;
compatible = "snps,dwmac";
reg = <0x8000 0x2000>;
interrupts = <10>;

View File

@ -293,7 +293,7 @@
regulator-state-standby {
regulator-on-in-suspend;
regulator-suspend-voltage = <1150000>;
regulator-suspend-microvolt = <1150000>;
regulator-mode = <4>;
};
@ -314,7 +314,7 @@
regulator-state-standby {
regulator-on-in-suspend;
regulator-suspend-voltage = <1050000>;
regulator-suspend-microvolt = <1050000>;
regulator-mode = <4>;
};
@ -331,7 +331,7 @@
regulator-always-on;
regulator-state-standby {
regulator-suspend-voltage = <1800000>;
regulator-suspend-microvolt = <1800000>;
regulator-on-in-suspend;
};
@ -346,7 +346,7 @@
regulator-max-microvolt = <3700000>;
regulator-state-standby {
regulator-suspend-voltage = <1800000>;
regulator-suspend-microvolt = <1800000>;
regulator-on-in-suspend;
};

View File

@ -805,6 +805,7 @@
&pinctrl_usb_pwr>;
dr_mode = "host";
power-active-high;
over-current-active-low;
disable-over-current;
status = "okay";
};

View File

@ -129,7 +129,7 @@
};
&pio {
eth_default: eth_default {
eth_default: eth-default-pins {
tx_pins {
pinmux = <MT2712_PIN_71_GBE_TXD3__FUNC_GBE_TXD3>,
<MT2712_PIN_72_GBE_TXD2__FUNC_GBE_TXD2>,
@ -156,7 +156,7 @@
};
};
eth_sleep: eth_sleep {
eth_sleep: eth-sleep-pins {
tx_pins {
pinmux = <MT2712_PIN_71_GBE_TXD3__FUNC_GPIO71>,
<MT2712_PIN_72_GBE_TXD2__FUNC_GPIO72>,
@ -182,14 +182,14 @@
};
};
usb0_id_pins_float: usb0_iddig {
usb0_id_pins_float: usb0-iddig-pins {
pins_iddig {
pinmux = <MT2712_PIN_12_IDDIG_P0__FUNC_IDDIG_A>;
bias-pull-up;
};
};
usb1_id_pins_float: usb1_iddig {
usb1_id_pins_float: usb1-iddig-pins {
pins_iddig {
pinmux = <MT2712_PIN_14_IDDIG_P1__FUNC_IDDIG_B>;
bias-pull-up;

View File

@ -249,10 +249,11 @@
#clock-cells = <1>;
};
infracfg: syscon@10001000 {
infracfg: clock-controller@10001000 {
compatible = "mediatek,mt2712-infracfg", "syscon";
reg = <0 0x10001000 0 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
};
pericfg: syscon@10003000 {

View File

@ -252,7 +252,7 @@
clock-names = "hif_sel";
};
cir: cir@10009000 {
cir: ir-receiver@10009000 {
compatible = "mediatek,mt7622-cir";
reg = <0 0x10009000 0 0x1000>;
interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_LOW>;
@ -283,16 +283,14 @@
};
};
apmixedsys: apmixedsys@10209000 {
compatible = "mediatek,mt7622-apmixedsys",
"syscon";
apmixedsys: clock-controller@10209000 {
compatible = "mediatek,mt7622-apmixedsys";
reg = <0 0x10209000 0 0x1000>;
#clock-cells = <1>;
};
topckgen: topckgen@10210000 {
compatible = "mediatek,mt7622-topckgen",
"syscon";
topckgen: clock-controller@10210000 {
compatible = "mediatek,mt7622-topckgen";
reg = <0 0x10210000 0 0x1000>;
#clock-cells = <1>;
};
@ -515,7 +513,6 @@
<&pericfg CLK_PERI_AUXADC_PD>;
clock-names = "therm", "auxadc";
resets = <&pericfg MT7622_PERI_THERM_SW_RST>;
reset-names = "therm";
mediatek,auxadc = <&auxadc>;
mediatek,apmixedsys = <&apmixedsys>;
nvmem-cells = <&thermal_calibration>;
@ -734,9 +731,8 @@
power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>;
};
ssusbsys: ssusbsys@1a000000 {
compatible = "mediatek,mt7622-ssusbsys",
"syscon";
ssusbsys: clock-controller@1a000000 {
compatible = "mediatek,mt7622-ssusbsys";
reg = <0 0x1a000000 0 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
@ -793,9 +789,8 @@
};
};
pciesys: pciesys@1a100800 {
compatible = "mediatek,mt7622-pciesys",
"syscon";
pciesys: clock-controller@1a100800 {
compatible = "mediatek,mt7622-pciesys";
reg = <0 0x1a100800 0 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
@ -921,12 +916,13 @@
};
};
hifsys: syscon@1af00000 {
compatible = "mediatek,mt7622-hifsys", "syscon";
hifsys: clock-controller@1af00000 {
compatible = "mediatek,mt7622-hifsys";
reg = <0 0x1af00000 0 0x70>;
#clock-cells = <1>;
};
ethsys: syscon@1b000000 {
ethsys: clock-controller@1b000000 {
compatible = "mediatek,mt7622-ethsys",
"syscon";
reg = <0 0x1b000000 0 0x1000>;
@ -966,9 +962,7 @@
};
eth: ethernet@1b100000 {
compatible = "mediatek,mt7622-eth",
"mediatek,mt2701-eth",
"syscon";
compatible = "mediatek,mt7622-eth";
reg = <0 0x1b100000 0 0x20000>;
interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 224 IRQ_TYPE_LEVEL_LOW>,

View File

@ -146,19 +146,19 @@
&cpu_thermal {
cooling-maps {
cpu-active-high {
map-cpu-active-high {
/* active: set fan to cooling level 2 */
cooling-device = <&fan 2 2>;
trip = <&cpu_trip_active_high>;
};
cpu-active-med {
map-cpu-active-med {
/* active: set fan to cooling level 1 */
cooling-device = <&fan 1 1>;
trip = <&cpu_trip_active_med>;
};
cpu-active-low {
map-cpu-active-low {
/* active: set fan to cooling level 0 */
cooling-device = <&fan 0 0>;
trip = <&cpu_trip_active_low>;

View File

@ -16,6 +16,42 @@
#address-cells = <2>;
#size-cells = <2>;
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu0: cpu@0 {
compatible = "arm,cortex-a53";
reg = <0x0>;
device_type = "cpu";
enable-method = "psci";
#cooling-cells = <2>;
};
cpu1: cpu@1 {
compatible = "arm,cortex-a53";
reg = <0x1>;
device_type = "cpu";
enable-method = "psci";
#cooling-cells = <2>;
};
cpu2: cpu@2 {
compatible = "arm,cortex-a53";
reg = <0x2>;
device_type = "cpu";
enable-method = "psci";
#cooling-cells = <2>;
};
cpu3: cpu@3 {
compatible = "arm,cortex-a53";
reg = <0x3>;
device_type = "cpu";
enable-method = "psci";
#cooling-cells = <2>;
};
};
clk40m: oscillator-40m {
compatible = "fixed-clock";
clock-frequency = <40000000>;
@ -23,42 +59,6 @@
clock-output-names = "clkxtal";
};
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a53";
enable-method = "psci";
reg = <0x0>;
#cooling-cells = <2>;
};
cpu1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a53";
enable-method = "psci";
reg = <0x1>;
#cooling-cells = <2>;
};
cpu2: cpu@2 {
device_type = "cpu";
compatible = "arm,cortex-a53";
enable-method = "psci";
reg = <0x2>;
#cooling-cells = <2>;
};
cpu3: cpu@3 {
device_type = "cpu";
enable-method = "psci";
compatible = "arm,cortex-a53";
reg = <0x3>;
#cooling-cells = <2>;
};
};
psci {
compatible = "arm,psci-0.2";
method = "smc";
@ -121,32 +121,23 @@
};
timer {
compatible = "arm,armv8-timer";
interrupt-parent = <&gic>;
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
<GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
<GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
<GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
};
soc {
#address-cells = <2>;
#size-cells = <2>;
compatible = "simple-bus";
ranges;
#address-cells = <2>;
#size-cells = <2>;
gic: interrupt-controller@c000000 {
compatible = "arm,gic-v3";
#interrupt-cells = <3>;
interrupt-parent = <&gic>;
interrupt-controller;
reg = <0 0x0c000000 0 0x10000>, /* GICD */
<0 0x0c080000 0 0x80000>, /* GICR */
<0 0x0c400000 0 0x2000>, /* GICC */
<0 0x0c410000 0 0x1000>, /* GICH */
<0 0x0c420000 0 0x2000>; /* GICV */
interrupt-parent = <&gic>;
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
#interrupt-cells = <3>;
};
infracfg: infracfg@10001000 {
@ -203,6 +194,19 @@
#interrupt-cells = <2>;
};
pwm: pwm@10048000 {
compatible = "mediatek,mt7986-pwm";
reg = <0 0x10048000 0 0x1000>;
#pwm-cells = <2>;
interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&topckgen CLK_TOP_PWM_SEL>,
<&infracfg CLK_INFRA_PWM_STA>,
<&infracfg CLK_INFRA_PWM1_CK>,
<&infracfg CLK_INFRA_PWM2_CK>;
clock-names = "top", "main", "pwm1", "pwm2";
status = "disabled";
};
sgmiisys0: syscon@10060000 {
compatible = "mediatek,mt7986-sgmiisys_0",
"syscon";
@ -240,19 +244,6 @@
status = "disabled";
};
pwm: pwm@10048000 {
compatible = "mediatek,mt7986-pwm";
reg = <0 0x10048000 0 0x1000>;
#pwm-cells = <2>;
interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&topckgen CLK_TOP_PWM_SEL>,
<&infracfg CLK_INFRA_PWM_STA>,
<&infracfg CLK_INFRA_PWM1_CK>,
<&infracfg CLK_INFRA_PWM2_CK>;
clock-names = "top", "main", "pwm1", "pwm2";
status = "disabled";
};
uart0: serial@11002000 {
compatible = "mediatek,mt7986-uart",
"mediatek,mt6577-uart";
@ -310,9 +301,9 @@
spi0: spi@1100a000 {
compatible = "mediatek,mt7986-spi-ipm", "mediatek,spi-ipm";
reg = <0 0x1100a000 0 0x100>;
#address-cells = <1>;
#size-cells = <0>;
reg = <0 0x1100a000 0 0x100>;
interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&topckgen CLK_TOP_MPLL_D2>,
<&topckgen CLK_TOP_SPI_SEL>,
@ -324,9 +315,9 @@
spi1: spi@1100b000 {
compatible = "mediatek,mt7986-spi-ipm", "mediatek,spi-ipm";
reg = <0 0x1100b000 0 0x100>;
#address-cells = <1>;
#size-cells = <0>;
reg = <0 0x1100b000 0 0x100>;
interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&topckgen CLK_TOP_MPLL_D2>,
<&topckgen CLK_TOP_SPIM_MST_SEL>,
@ -336,6 +327,20 @@
status = "disabled";
};
thermal: thermal@1100c800 {
compatible = "mediatek,mt7986-thermal";
reg = <0 0x1100c800 0 0x800>;
interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&infracfg CLK_INFRA_THERM_CK>,
<&infracfg CLK_INFRA_ADC_26M_CK>;
clock-names = "therm", "auxadc";
nvmem-cells = <&thermal_calibration>;
nvmem-cell-names = "calibration-data";
#thermal-sensor-cells = <1>;
mediatek,auxadc = <&auxadc>;
mediatek,apmixedsys = <&apmixedsys>;
};
auxadc: adc@1100d000 {
compatible = "mediatek,mt7986-auxadc";
reg = <0 0x1100d000 0 0x1000>;
@ -387,39 +392,23 @@
status = "disabled";
};
thermal: thermal@1100c800 {
#thermal-sensor-cells = <1>;
compatible = "mediatek,mt7986-thermal";
reg = <0 0x1100c800 0 0x800>;
interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&infracfg CLK_INFRA_THERM_CK>,
<&infracfg CLK_INFRA_ADC_26M_CK>,
<&infracfg CLK_INFRA_ADC_FRC_CK>;
clock-names = "therm", "auxadc", "adc_32k";
mediatek,auxadc = <&auxadc>;
mediatek,apmixedsys = <&apmixedsys>;
nvmem-cells = <&thermal_calibration>;
nvmem-cell-names = "calibration-data";
};
pcie: pcie@11280000 {
compatible = "mediatek,mt7986-pcie",
"mediatek,mt8192-pcie";
reg = <0x00 0x11280000 0x00 0x4000>;
reg-names = "pcie-mac";
ranges = <0x82000000 0x00 0x20000000 0x00
0x20000000 0x00 0x10000000>;
device_type = "pci";
#address-cells = <3>;
#size-cells = <2>;
reg = <0x00 0x11280000 0x00 0x4000>;
reg-names = "pcie-mac";
interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
bus-range = <0x00 0xff>;
ranges = <0x82000000 0x00 0x20000000 0x00
0x20000000 0x00 0x10000000>;
clocks = <&infracfg CLK_INFRA_IPCIE_PIPE_CK>,
<&infracfg CLK_INFRA_IPCIE_CK>,
<&infracfg CLK_INFRA_IPCIER_CK>,
<&infracfg CLK_INFRA_IPCIEB_CK>;
clock-names = "pl_250m", "tl_26m", "peri_26m", "top_133m";
status = "disabled";
phys = <&pcie_port PHY_TYPE_PCIE>;
phy-names = "pcie-phy";
@ -430,6 +419,8 @@
<0 0 0 2 &pcie_intc 1>,
<0 0 0 3 &pcie_intc 2>,
<0 0 0 4 &pcie_intc 3>;
status = "disabled";
pcie_intc: interrupt-controller {
#address-cells = <0>;
#interrupt-cells = <1>;
@ -440,9 +431,9 @@
pcie_phy: t-phy {
compatible = "mediatek,mt7986-tphy",
"mediatek,generic-tphy-v2";
ranges;
#address-cells = <2>;
#size-cells = <2>;
ranges;
status = "disabled";
pcie_port: pcie-phy@11c00000 {
@ -467,9 +458,9 @@
usb_phy: t-phy@11e10000 {
compatible = "mediatek,mt7986-tphy",
"mediatek,generic-tphy-v2";
ranges = <0 0 0x11e10000 0x1700>;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0 0 0x11e10000 0x1700>;
status = "disabled";
u2port0: usb-phy@0 {
@ -497,8 +488,6 @@
};
ethsys: syscon@15000000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "mediatek,mt7986-ethsys",
"syscon";
reg = <0 0x15000000 0 0x1000>;
@ -532,20 +521,6 @@
mediatek,wo-ccif = <&wo_ccif1>;
};
wo_ccif0: syscon@151a5000 {
compatible = "mediatek,mt7986-wo-ccif", "syscon";
reg = <0 0x151a5000 0 0x1000>;
interrupt-parent = <&gic>;
interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
};
wo_ccif1: syscon@151ad000 {
compatible = "mediatek,mt7986-wo-ccif", "syscon";
reg = <0 0x151ad000 0 0x1000>;
interrupt-parent = <&gic>;
interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
};
eth: ethernet@15100000 {
compatible = "mediatek,mt7986-eth";
reg = <0 0x15100000 0 0x80000>;
@ -578,26 +553,39 @@
<&topckgen CLK_TOP_SGM_325M_SEL>;
assigned-clock-parents = <&apmixedsys CLK_APMIXED_NET2PLL>,
<&apmixedsys CLK_APMIXED_SGMPLL>;
#address-cells = <1>;
#size-cells = <0>;
mediatek,ethsys = <&ethsys>;
mediatek,sgmiisys = <&sgmiisys0>, <&sgmiisys1>;
mediatek,wed-pcie = <&wed_pcie>;
mediatek,wed = <&wed0>, <&wed1>;
#reset-cells = <1>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
wo_ccif0: syscon@151a5000 {
compatible = "mediatek,mt7986-wo-ccif", "syscon";
reg = <0 0x151a5000 0 0x1000>;
interrupt-parent = <&gic>;
interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
};
wo_ccif1: syscon@151ad000 {
compatible = "mediatek,mt7986-wo-ccif", "syscon";
reg = <0 0x151ad000 0 0x1000>;
interrupt-parent = <&gic>;
interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
};
wifi: wifi@18000000 {
compatible = "mediatek,mt7986-wmac";
reg = <0 0x18000000 0 0x1000000>,
<0 0x10003000 0 0x1000>,
<0 0x11d10000 0 0x1000>;
resets = <&watchdog MT7986_TOPRGU_CONSYS_SW_RST>;
reset-names = "consys";
clocks = <&topckgen CLK_TOP_CONN_MCUSYS_SEL>,
<&topckgen CLK_TOP_AP2CNN_HOST_SEL>;
clock-names = "mcu", "ap2conn";
reg = <0 0x18000000 0 0x1000000>,
<0 0x10003000 0 0x1000>,
<0 0x11d10000 0 0x1000>;
interrupts = <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>,
@ -645,4 +633,13 @@
};
};
};
timer {
compatible = "arm,armv8-timer";
interrupt-parent = <&gic>;
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
<GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
<GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
<GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
};
};

View File

@ -405,7 +405,6 @@
};
&mt6358_vgpu_reg {
regulator-min-microvolt = <625000>;
regulator-max-microvolt = <900000>;
regulator-coupled-with = <&mt6358_vsram_gpu_reg>;

View File

@ -1628,6 +1628,7 @@
compatible = "mediatek,mt8183-mfgcfg", "syscon";
reg = <0 0x13000000 0 0x1000>;
#clock-cells = <1>;
power-domains = <&spm MT8183_POWER_DOMAIN_MFG_ASYNC>;
};
gpu: gpu@13040000 {

View File

@ -1392,7 +1392,7 @@
mt6315_6_vbuck1: vbuck1 {
regulator-compatible = "vbuck1";
regulator-name = "Vbcpu";
regulator-min-microvolt = <300000>;
regulator-min-microvolt = <400000>;
regulator-max-microvolt = <1193750>;
regulator-enable-ramp-delay = <256>;
regulator-allowed-modes = <0 1 2>;
@ -1402,7 +1402,7 @@
mt6315_6_vbuck3: vbuck3 {
regulator-compatible = "vbuck3";
regulator-name = "Vlcpu";
regulator-min-microvolt = <300000>;
regulator-min-microvolt = <400000>;
regulator-max-microvolt = <1193750>;
regulator-enable-ramp-delay = <256>;
regulator-allowed-modes = <0 1 2>;
@ -1419,7 +1419,7 @@
mt6315_7_vbuck1: vbuck1 {
regulator-compatible = "vbuck1";
regulator-name = "Vgpu";
regulator-min-microvolt = <606250>;
regulator-min-microvolt = <400000>;
regulator-max-microvolt = <800000>;
regulator-enable-ramp-delay = <256>;
regulator-allowed-modes = <0 1 2>;

View File

@ -1412,6 +1412,7 @@
reg = <0 0x14001000 0 0x1000>;
interrupts = <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&mmsys CLK_MM_DISP_MUTEX0>;
mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x1000 0x1000>;
mediatek,gce-events = <CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_0>,
<CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_1>;
power-domains = <&spm MT8192_POWER_DOMAIN_DISP>;

View File

@ -114,6 +114,77 @@
regulator-boot-on;
};
/* Murata NCP03WF104F05RL */
tboard_thermistor1: thermal-sensor-t1 {
compatible = "generic-adc-thermal";
#thermal-sensor-cells = <0>;
io-channels = <&auxadc 0>;
io-channel-names = "sensor-channel";
temperature-lookup-table = < (-10000) 1553
(-5000) 1485
0 1406
5000 1317
10000 1219
15000 1115
20000 1007
25000 900
30000 796
35000 697
40000 605
45000 523
50000 449
55000 384
60000 327
65000 279
70000 237
75000 202
80000 172
85000 147
90000 125
95000 107
100000 92
105000 79
110000 68
115000 59
120000 51
125000 44>;
};
tboard_thermistor2: thermal-sensor-t2 {
compatible = "generic-adc-thermal";
#thermal-sensor-cells = <0>;
io-channels = <&auxadc 1>;
io-channel-names = "sensor-channel";
temperature-lookup-table = < (-10000) 1553
(-5000) 1485
0 1406
5000 1317
10000 1219
15000 1115
20000 1007
25000 900
30000 796
35000 697
40000 605
45000 523
50000 449
55000 384
60000 327
65000 279
70000 237
75000 202
80000 172
85000 147
90000 125
95000 107
100000 92
105000 79
110000 68
115000 59
120000 51
125000 44>;
};
usb_vbus: regulator-5v0-usb-vbus {
compatible = "regulator-fixed";
regulator-name = "usb-vbus";
@ -176,6 +247,42 @@
memory-region = <&afe_mem>;
};
&auxadc {
status = "okay";
};
&cpu0 {
cpu-supply = <&mt6359_vcore_buck_reg>;
};
&cpu1 {
cpu-supply = <&mt6359_vcore_buck_reg>;
};
&cpu2 {
cpu-supply = <&mt6359_vcore_buck_reg>;
};
&cpu3 {
cpu-supply = <&mt6359_vcore_buck_reg>;
};
&cpu4 {
cpu-supply = <&mt6315_6_vbuck1>;
};
&cpu5 {
cpu-supply = <&mt6315_6_vbuck1>;
};
&cpu6 {
cpu-supply = <&mt6315_6_vbuck1>;
};
&cpu7 {
cpu-supply = <&mt6315_6_vbuck1>;
};
&dp_intf0 {
status = "okay";
@ -1098,7 +1205,7 @@
mt6315_6_vbuck1: vbuck1 {
regulator-compatible = "vbuck1";
regulator-name = "Vbcpu";
regulator-min-microvolt = <300000>;
regulator-min-microvolt = <400000>;
regulator-max-microvolt = <1193750>;
regulator-enable-ramp-delay = <256>;
regulator-ramp-delay = <6250>;
@ -1116,7 +1223,7 @@
mt6315_7_vbuck1: vbuck1 {
regulator-compatible = "vbuck1";
regulator-name = "Vgpu";
regulator-min-microvolt = <625000>;
regulator-min-microvolt = <400000>;
regulator-max-microvolt = <1193750>;
regulator-enable-ramp-delay = <256>;
regulator-ramp-delay = <6250>;
@ -1127,6 +1234,36 @@
};
};
&thermal_zones {
soc-area-thermal {
polling-delay = <1000>;
polling-delay-passive = <250>;
thermal-sensors = <&tboard_thermistor1>;
trips {
trip-crit {
temperature = <84000>;
hysteresis = <1000>;
type = "critical";
};
};
};
pmic-area-thermal {
polling-delay = <1000>;
polling-delay-passive = <0>;
thermal-sensors = <&tboard_thermistor2>;
trips {
trip-crit {
temperature = <84000>;
hysteresis = <1000>;
type = "critical";
};
};
};
};
&u3phy0 {
status = "okay";
};

View File

@ -1963,6 +1963,7 @@
compatible = "mediatek,mt8195-vppsys0", "syscon";
reg = <0 0x14000000 0 0x1000>;
#clock-cells = <1>;
mediatek,gce-client-reg = <&gce1 SUBSYS_1400XXXX 0 0x1000>;
};
mutex@1400f000 {
@ -2077,6 +2078,7 @@
compatible = "mediatek,mt8195-vppsys1", "syscon";
reg = <0 0x14f00000 0 0x1000>;
#clock-cells = <1>;
mediatek,gce-client-reg = <&gce1 SUBSYS_14f0XXXX 0 0x1000>;
};
mutex@14f01000 {
@ -2623,6 +2625,7 @@
reg = <0 0x1c01a000 0 0x1000>;
mboxes = <&gce0 0 CMDQ_THR_PRIO_4>;
#clock-cells = <1>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c01XXXX 0xa000 0x1000>;
};
@ -2776,6 +2779,7 @@
interrupts = <GIC_SPI 658 IRQ_TYPE_LEVEL_HIGH 0>;
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS0>;
clocks = <&vdosys0 CLK_VDO0_DISP_MUTEX0>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c01XXXX 0x6000 0x1000>;
mediatek,gce-events = <CMDQ_EVENT_VDO0_DISP_STREAM_DONE_0>;
};
@ -2846,6 +2850,7 @@
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
clocks = <&vdosys1 CLK_VDO1_DISP_MUTEX>;
clock-names = "vdo1_mutex";
mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0x1000 0x1000>;
mediatek,gce-events = <CMDQ_EVENT_VDO1_STREAM_DONE_ENG_0>;
};

View File

@ -2644,7 +2644,7 @@
resets = <&gcc GCC_USB30_SEC_BCR>;
power-domains = <&gcc USB30_SEC_GDSC>;
interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
<&pdc 7 IRQ_TYPE_LEVEL_HIGH>,
<&pdc 40 IRQ_TYPE_LEVEL_HIGH>,
<&pdc 10 IRQ_TYPE_EDGE_BOTH>,
<&pdc 11 IRQ_TYPE_EDGE_BOTH>;
interrupt-names = "hs_phy_irq", "ss_phy_irq",

View File

@ -1773,6 +1773,7 @@
reset-names = "pci";
power-domains = <&gcc PCIE_4_GDSC>;
required-opps = <&rpmhpd_opp_nom>;
phys = <&pcie4_phy>;
phy-names = "pciephy";
@ -1871,6 +1872,7 @@
reset-names = "pci";
power-domains = <&gcc PCIE_3B_GDSC>;
required-opps = <&rpmhpd_opp_nom>;
phys = <&pcie3b_phy>;
phy-names = "pciephy";
@ -1969,6 +1971,7 @@
reset-names = "pci";
power-domains = <&gcc PCIE_3A_GDSC>;
required-opps = <&rpmhpd_opp_nom>;
phys = <&pcie3a_phy>;
phy-names = "pciephy";
@ -2070,6 +2073,7 @@
reset-names = "pci";
power-domains = <&gcc PCIE_2B_GDSC>;
required-opps = <&rpmhpd_opp_nom>;
phys = <&pcie2b_phy>;
phy-names = "pciephy";
@ -2168,6 +2172,7 @@
reset-names = "pci";
power-domains = <&gcc PCIE_2A_GDSC>;
required-opps = <&rpmhpd_opp_nom>;
phys = <&pcie2a_phy>;
phy-names = "pciephy";

View File

@ -1774,12 +1774,8 @@
ranges = <0x01000000 0x0 0x00000000 0x0 0x60200000 0x0 0x100000>,
<0x02000000 0x0 0x60300000 0x0 0x60300000 0x0 0x3d00000>;
/*
* MSIs for BDF (1:0.0) only works with Device ID 0x5980.
* Hence, the IDs are swapped.
*/
msi-map = <0x0 &gic_its 0x5981 0x1>,
<0x100 &gic_its 0x5980 0x1>;
msi-map = <0x0 &gic_its 0x5980 0x1>,
<0x100 &gic_its 0x5981 0x1>;
msi-map-mask = <0xff00>;
interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
@ -1888,12 +1884,8 @@
ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
<0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;
/*
* MSIs for BDF (1:0.0) only works with Device ID 0x5a00.
* Hence, the IDs are swapped.
*/
msi-map = <0x0 &gic_its 0x5a01 0x1>,
<0x100 &gic_its 0x5a00 0x1>;
msi-map = <0x0 &gic_its 0x5a00 0x1>,
<0x100 &gic_its 0x5a01 0x1>;
msi-map-mask = <0xff00>;
interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";

View File

@ -779,7 +779,6 @@
};
&pcie0 {
bus-scan-delay-ms = <1000>;
ep-gpios = <&gpio2 RK_PD4 GPIO_ACTIVE_HIGH>;
num-lanes = <4>;
pinctrl-names = "default";

View File

@ -401,16 +401,22 @@
gpio1830-supply = <&vcc_1v8>;
};
&pmu_io_domains {
status = "okay";
pmu1830-supply = <&vcc_1v8>;
};
&pwm2 {
status = "okay";
&pcie_clkreqn_cpm {
rockchip,pins =
<2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_up>;
};
&pinctrl {
pinctrl-names = "default";
pinctrl-0 = <&q7_thermal_pin>;
gpios {
q7_thermal_pin: q7-thermal-pin {
rockchip,pins =
<0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
i2c8 {
i2c8_xfer_a: i2c8-xfer {
rockchip,pins =
@ -443,11 +449,20 @@
usb3 {
usb3_id: usb3-id {
rockchip,pins =
<1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
<1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
};
&pmu_io_domains {
status = "okay";
pmu1830-supply = <&vcc_1v8>;
};
&pwm2 {
status = "okay";
};
&sdhci {
/*
* Signal integrity isn't great at 200MHz but 100MHz has proven stable

View File

@ -416,6 +416,8 @@
vccio_sd: LDO_REG5 {
regulator-name = "vccio_sd";
regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
@ -525,9 +527,9 @@
#address-cells = <1>;
#size-cells = <0>;
switch@0 {
switch@1f {
compatible = "mediatek,mt7531";
reg = <0>;
reg = <0x1f>;
ports {
#address-cells = <1>;

View File

@ -16,7 +16,7 @@ struct hyp_pool {
* API at EL2.
*/
hyp_spinlock_t lock;
struct list_head free_area[MAX_ORDER + 1];
struct list_head free_area[NR_PAGE_ORDERS];
phys_addr_t range_start;
phys_addr_t range_end;
unsigned short max_order;

View File

@ -7,6 +7,14 @@
#ifndef __LOONGARCH_PERF_EVENT_H__
#define __LOONGARCH_PERF_EVENT_H__
#include <asm/ptrace.h>
#define perf_arch_bpf_user_pt_regs(regs) (struct user_pt_regs *)regs
#define perf_arch_fetch_caller_regs(regs, __ip) { \
(regs)->csr_era = (__ip); \
(regs)->regs[3] = current_stack_pointer; \
(regs)->regs[22] = (unsigned long) __builtin_frame_address(0); \
}
#endif /* __LOONGARCH_PERF_EVENT_H__ */

View File

@ -202,10 +202,10 @@ good_area:
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
if (!(vma->vm_flags & VM_READ) && address != exception_era(regs))
goto bad_area;
if (!(vma->vm_flags & VM_EXEC) && address == exception_era(regs))
goto bad_area;
if (!(vma->vm_flags & (VM_READ | VM_WRITE)) && address != exception_era(regs))
goto bad_area;
}
/*

View File

@ -89,7 +89,7 @@ typedef struct page *pgtable_t;
#define PTE_FMT "%08lx"
#endif
#ifdef CONFIG_64BIT
#if defined(CONFIG_64BIT) && defined(CONFIG_MMU)
/*
* We override this value as its generic definition uses __pa too early in
* the boot process (before kernel_map.va_pa_offset is set).

View File

@ -903,8 +903,8 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
#define PAGE_SHARED __pgprot(0)
#define PAGE_KERNEL __pgprot(0)
#define swapper_pg_dir NULL
#define TASK_SIZE 0xffffffffUL
#define VMALLOC_START 0
#define TASK_SIZE _AC(-1, UL)
#define VMALLOC_START _AC(0, UL)
#define VMALLOC_END TASK_SIZE
#endif /* !CONFIG_MMU */

View File

@ -173,6 +173,19 @@ static void __init init_resources(void)
if (ret < 0)
goto error;
#ifdef CONFIG_KEXEC_CORE
if (crashk_res.start != crashk_res.end) {
ret = add_resource(&iomem_resource, &crashk_res);
if (ret < 0)
goto error;
}
if (crashk_low_res.start != crashk_low_res.end) {
ret = add_resource(&iomem_resource, &crashk_low_res);
if (ret < 0)
goto error;
}
#endif
#ifdef CONFIG_CRASH_DUMP
if (elfcorehdr_size > 0) {
elfcorehdr_res.start = elfcorehdr_addr;

View File

@ -230,7 +230,7 @@ static void __init setup_bootmem(void)
* In 64-bit, any use of __va/__pa before this point is wrong as we
* did not know the start of DRAM before.
*/
if (IS_ENABLED(CONFIG_64BIT))
if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU))
kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
/*

View File

@ -897,7 +897,7 @@ void __init cheetah_ecache_flush_init(void)
/* Now allocate error trap reporting scoreboard. */
sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
for (order = 0; order <= MAX_ORDER; order++) {
for (order = 0; order < NR_PAGE_ORDERS; order++) {
if ((PAGE_SIZE << order) >= sz)
break;
}

View File

@ -62,6 +62,7 @@ config X86
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ARCH_32BIT_OFF_T if X86_32
select ARCH_CLOCKSOURCE_INIT
select ARCH_CONFIGURES_CPU_MITIGATIONS
select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
select ARCH_ENABLE_HUGEPAGE_MIGRATION if X86_64 && HUGETLB_PAGE && MIGRATION
select ARCH_ENABLE_MEMORY_HOTPLUG if X86_64
@ -2421,17 +2422,17 @@ config PREFIX_SYMBOLS
def_bool y
depends on CALL_PADDING && !CFI_CLANG
menuconfig SPECULATION_MITIGATIONS
bool "Mitigations for speculative execution vulnerabilities"
menuconfig CPU_MITIGATIONS
bool "Mitigations for CPU vulnerabilities"
default y
help
Say Y here to enable options which enable mitigations for
speculative execution hardware vulnerabilities.
Say Y here to enable options which enable mitigations for hardware
vulnerabilities (usually related to speculative execution).
If you say N, all mitigations will be disabled. You really
should know what you are doing to say so.
if SPECULATION_MITIGATIONS
if CPU_MITIGATIONS
config PAGE_TABLE_ISOLATION
bool "Remove the kernel mapping in user mode"

View File

@ -12,9 +12,10 @@ enum cc_vendor {
};
extern enum cc_vendor cc_vendor;
extern u64 cc_mask;
#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
extern u64 cc_mask;
static inline void cc_set_mask(u64 mask)
{
RIP_REL_REF(cc_mask) = mask;
@ -24,6 +25,8 @@ u64 cc_mkenc(u64 val);
u64 cc_mkdec(u64 val);
void cc_random_init(void);
#else
static const u64 cc_mask = 0;
static inline u64 cc_mkenc(u64 val)
{
return val;

View File

@ -148,7 +148,7 @@
#define _COMMON_PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
_PAGE_SPECIAL | _PAGE_ACCESSED | \
_PAGE_DIRTY_BITS | _PAGE_SOFT_DIRTY | \
_PAGE_DEVMAP | _PAGE_ENC | _PAGE_UFFD_WP)
_PAGE_DEVMAP | _PAGE_CC | _PAGE_UFFD_WP)
#define _PAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PAT)
#define _HPAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PAT_LARGE)
@ -173,6 +173,7 @@ enum page_cache_mode {
};
#endif
#define _PAGE_CC (_AT(pteval_t, cc_mask))
#define _PAGE_ENC (_AT(pteval_t, sme_me_mask))
#define _PAGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)

View File

@ -138,7 +138,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
log_lvl, d3, d6, d7);
}
if (cpu_feature_enabled(X86_FEATURE_OSPKE))
if (cr4 & X86_CR4_PKE)
printk("%sPKRU: %08x\n", log_lvl, read_pkru());
}

View File

@ -691,6 +691,8 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu)
*/
void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
if (KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm))
return;
@ -700,8 +702,34 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
*/
kvm_pmu_reset(vcpu);
bitmap_zero(vcpu_to_pmu(vcpu)->all_valid_pmc_idx, X86_PMC_IDX_MAX);
pmu->version = 0;
pmu->nr_arch_gp_counters = 0;
pmu->nr_arch_fixed_counters = 0;
pmu->counter_bitmask[KVM_PMC_GP] = 0;
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
pmu->reserved_bits = 0xffffffff00200000ull;
pmu->raw_event_mask = X86_RAW_EVENT_MASK;
pmu->global_ctrl_mask = ~0ull;
pmu->global_status_mask = ~0ull;
pmu->fixed_ctr_ctrl_mask = ~0ull;
pmu->pebs_enable_mask = ~0ull;
pmu->pebs_data_cfg_mask = ~0ull;
bitmap_zero(pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX);
if (!vcpu->kvm->arch.enable_pmu)
return;
static_call(kvm_x86_pmu_refresh)(vcpu);
/*
* At RESET, both Intel and AMD CPUs set all enable bits for general
* purpose counters in IA32_PERF_GLOBAL_CTRL (so that software that
* was written for v1 PMUs don't unknowingly leave GP counters disabled
* in the global controls). Emulate that behavior when refreshing the
* PMU so that userspace doesn't need to manually set PERF_GLOBAL_CTRL.
*/
if (kvm_pmu_has_perf_global_ctrl(pmu) && pmu->nr_arch_gp_counters)
pmu->global_ctrl = GENMASK_ULL(pmu->nr_arch_gp_counters - 1, 0);
}
void kvm_pmu_init(struct kvm_vcpu *vcpu)

View File

@ -493,19 +493,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
u64 counter_mask;
int i;
pmu->nr_arch_gp_counters = 0;
pmu->nr_arch_fixed_counters = 0;
pmu->counter_bitmask[KVM_PMC_GP] = 0;
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
pmu->version = 0;
pmu->reserved_bits = 0xffffffff00200000ull;
pmu->raw_event_mask = X86_RAW_EVENT_MASK;
pmu->global_ctrl_mask = ~0ull;
pmu->global_status_mask = ~0ull;
pmu->fixed_ctr_ctrl_mask = ~0ull;
pmu->pebs_enable_mask = ~0ull;
pmu->pebs_data_cfg_mask = ~0ull;
memset(&lbr_desc->records, 0, sizeof(lbr_desc->records));
/*
@ -517,8 +504,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
return;
entry = kvm_find_cpuid_entry(vcpu, 0xa);
if (!entry || !vcpu->kvm->arch.enable_pmu)
if (!entry)
return;
eax.full = entry->eax;
edx.full = entry->edx;

View File

@ -163,6 +163,13 @@ show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
/* Check for valid access_width, otherwise, fallback to using bit_width */
#define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
/* Shift and apply the mask for CPC reads/writes */
#define MASK_VAL(reg, val) (((val) >> (reg)->bit_offset) & \
GENMASK(((reg)->bit_width) - 1, 0))
static ssize_t show_feedback_ctrs(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@ -777,6 +784,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
} else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
if (gas_t->address) {
void __iomem *addr;
size_t access_width;
if (!osc_cpc_flexible_adr_space_confirmed) {
pr_debug("Flexible address space capability not supported\n");
@ -784,7 +792,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
goto out_free;
}
addr = ioremap(gas_t->address, gas_t->bit_width/8);
access_width = GET_BIT_WIDTH(gas_t) / 8;
addr = ioremap(gas_t->address, access_width);
if (!addr)
goto out_free;
cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
@ -980,6 +989,7 @@ int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
{
void __iomem *vaddr = NULL;
int size;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
@ -989,14 +999,14 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
}
*val = 0;
size = GET_BIT_WIDTH(reg);
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
u32 width = 8 << (reg->access_width - 1);
u32 val_u32;
acpi_status status;
status = acpi_os_read_port((acpi_io_address)reg->address,
&val_u32, width);
&val_u32, size);
if (ACPI_FAILURE(status)) {
pr_debug("Error: Failed to read SystemIO port %llx\n",
reg->address);
@ -1005,17 +1015,24 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
*val = val_u32;
return 0;
} else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
} else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
/*
* For registers in PCC space, the register size is determined
* by the bit width field; the access size is used to indicate
* the PCC subspace id.
*/
size = reg->bit_width;
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
}
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
return cpc_read_ffh(cpu, reg, val);
else
return acpi_os_read_memory((acpi_physical_address)reg->address,
val, reg->bit_width);
val, size);
switch (reg->bit_width) {
switch (size) {
case 8:
*val = readb_relaxed(vaddr);
break;
@ -1029,27 +1046,37 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
*val = readq_relaxed(vaddr);
break;
default:
pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
reg->bit_width, pcc_ss_id);
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
pr_debug("Error: Cannot read %u bit width from system memory: 0x%llx\n",
size, reg->address);
} else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
size, pcc_ss_id);
}
return -EFAULT;
}
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
*val = MASK_VAL(reg, *val);
return 0;
}
static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
{
int ret_val = 0;
int size;
void __iomem *vaddr = NULL;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
size = GET_BIT_WIDTH(reg);
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
u32 width = 8 << (reg->access_width - 1);
acpi_status status;
status = acpi_os_write_port((acpi_io_address)reg->address,
(u32)val, width);
(u32)val, size);
if (ACPI_FAILURE(status)) {
pr_debug("Error: Failed to write SystemIO port %llx\n",
reg->address);
@ -1057,17 +1084,27 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
}
return 0;
} else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
} else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
/*
* For registers in PCC space, the register size is determined
* by the bit width field; the access size is used to indicate
* the PCC subspace id.
*/
size = reg->bit_width;
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
}
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
return cpc_write_ffh(cpu, reg, val);
else
return acpi_os_write_memory((acpi_physical_address)reg->address,
val, reg->bit_width);
val, size);
switch (reg->bit_width) {
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
val = MASK_VAL(reg, val);
switch (size) {
case 8:
writeb_relaxed(val, vaddr);
break;
@ -1081,8 +1118,13 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
writeq_relaxed(val, vaddr);
break;
default:
pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
reg->bit_width, pcc_ss_id);
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
pr_debug("Error: Cannot write %u bit width to system memory: 0x%llx\n",
size, reg->address);
} else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
size, pcc_ss_id);
}
ret_val = -EFAULT;
break;
}

View File

@ -380,8 +380,10 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
switch (data->cd_info.state) {
case HCI_DEVCOREDUMP_IDLE:
err = hci_devcd_init(hdev, MTK_COREDUMP_SIZE);
if (err < 0)
if (err < 0) {
kfree_skb(skb);
break;
}
data->cd_info.cnt = 0;
/* It is supposed coredump can be done within 5 seconds */
@ -407,9 +409,6 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
break;
}
if (err < 0)
kfree_skb(skb);
return err;
}
EXPORT_SYMBOL_GPL(btmtk_process_coredump);

View File

@ -541,6 +541,8 @@ static const struct usb_device_id quirks_table[] = {
/* Realtek 8852BE Bluetooth devices */
{ USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0x4853), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK |
@ -3457,13 +3459,12 @@ static void btusb_dump_hdr_qca(struct hci_dev *hdev, struct sk_buff *skb)
static void btusb_coredump_qca(struct hci_dev *hdev)
{
int err;
static const u8 param[] = { 0x26 };
struct sk_buff *skb;
skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT);
if (IS_ERR(skb))
bt_dev_err(hdev, "%s: triggle crash failed (%ld)", __func__, PTR_ERR(skb));
kfree_skb(skb);
err = __hci_cmd_send(hdev, 0xfc0c, 1, param);
if (err < 0)
bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
}
/*

View File

@ -1672,6 +1672,9 @@ static bool qca_wakeup(struct hci_dev *hdev)
struct hci_uart *hu = hci_get_drvdata(hdev);
bool wakeup;
if (!hu->serdev)
return true;
/* BT SoC attached through the serial bus is handled by the serdev driver.
* So we need to use the device handle of the serdev driver to get the
* status of device may wakeup.
@ -1935,8 +1938,10 @@ retry:
qca_debugfs_init(hdev);
hu->hdev->hw_error = qca_hw_error;
hu->hdev->cmd_timeout = qca_cmd_timeout;
if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
hu->hdev->wakeup = qca_wakeup;
if (hu->serdev) {
if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
hu->hdev->wakeup = qca_wakeup;
}
} else if (ret == -ENOENT) {
/* No patch/nvm-config found, run with original fw/config */
set_bit(QCA_ROM_FW, &qca->flags);
@ -2298,16 +2303,21 @@ static int qca_serdev_probe(struct serdev_device *serdev)
(data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855)) {
dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
power_ctrl_enabled = false;
return PTR_ERR(qcadev->bt_en);
}
if (!qcadev->bt_en)
power_ctrl_enabled = false;
qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
GPIOD_IN);
if (IS_ERR(qcadev->sw_ctrl) &&
(data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855 ||
data->soc_type == QCA_WCN7850))
dev_warn(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
data->soc_type == QCA_WCN7850)) {
dev_err(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
return PTR_ERR(qcadev->sw_ctrl);
}
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
if (IS_ERR(qcadev->susclk)) {
@ -2326,10 +2336,13 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
if (IS_ERR(qcadev->bt_en)) {
dev_warn(&serdev->dev, "failed to acquire enable gpio\n");
power_ctrl_enabled = false;
dev_err(&serdev->dev, "failed to acquire enable gpio\n");
return PTR_ERR(qcadev->bt_en);
}
if (!qcadev->bt_en)
power_ctrl_enabled = false;
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
if (IS_ERR(qcadev->susclk)) {
dev_warn(&serdev->dev, "failed to acquire clk\n");

View File

@ -959,25 +959,22 @@ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
struct device *dev = mds->cxlds.dev;
struct cxl_get_event_payload *payload;
struct cxl_mbox_cmd mbox_cmd;
u8 log_type = type;
u16 nr_rec;
mutex_lock(&mds->event.log_lock);
payload = mds->event.buf;
mbox_cmd = (struct cxl_mbox_cmd) {
.opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
.payload_in = &log_type,
.size_in = sizeof(log_type),
.payload_out = payload,
.min_out = struct_size(payload, records, 0),
};
do {
int rc, i;
mbox_cmd.size_out = mds->payload_size;
struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd) {
.opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
.payload_in = &log_type,
.size_in = sizeof(log_type),
.payload_out = payload,
.size_out = mds->payload_size,
.min_out = struct_size(payload, records, 0),
};
rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc) {
@ -1311,7 +1308,6 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_mbox_poison_out *po;
struct cxl_mbox_poison_in pi;
struct cxl_mbox_cmd mbox_cmd;
int nr_records = 0;
int rc;
@ -1323,16 +1319,16 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
pi.offset = cpu_to_le64(offset);
pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT);
mbox_cmd = (struct cxl_mbox_cmd) {
.opcode = CXL_MBOX_OP_GET_POISON,
.size_in = sizeof(pi),
.payload_in = &pi,
.size_out = mds->payload_size,
.payload_out = po,
.min_out = struct_size(po, record, 0),
};
do {
struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd){
.opcode = CXL_MBOX_OP_GET_POISON,
.size_in = sizeof(pi),
.payload_in = &pi,
.size_out = mds->payload_size,
.payload_out = po,
.min_out = struct_size(po, record, 0),
};
rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc)
break;

View File

@ -171,6 +171,10 @@ static irqreturn_t idma64_irq(int irq, void *dev)
u32 status_err;
unsigned short i;
/* Since IRQ may be shared, check if DMA controller is powered on */
if (status == GENMASK(31, 0))
return IRQ_NONE;
dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
/* Check if we have any interrupt from the DMA controller */

View File

@ -342,7 +342,7 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
if (!evl)
return;
spin_lock(&evl->lock);
mutex_lock(&evl->lock);
status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = status.tail;
h = status.head;
@ -354,9 +354,8 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
set_bit(h, evl->bmap);
h = (h + 1) % size;
}
spin_unlock(&evl->lock);
drain_workqueue(wq->wq);
mutex_unlock(&evl->lock);
}
static int idxd_cdev_release(struct inode *node, struct file *filep)

View File

@ -66,7 +66,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
if (!evl || !evl->log)
return 0;
spin_lock(&evl->lock);
mutex_lock(&evl->lock);
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = evl_status.tail;
@ -87,7 +87,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
dump_event_entry(idxd, s, i, &count, processed);
}
spin_unlock(&evl->lock);
mutex_unlock(&evl->lock);
return 0;
}

View File

@ -770,7 +770,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
goto err_alloc;
}
spin_lock(&evl->lock);
mutex_lock(&evl->lock);
evl->log = addr;
evl->dma = dma_addr;
evl->log_size = size;
@ -791,7 +791,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
gencfg.evl_en = 1;
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
spin_unlock(&evl->lock);
mutex_unlock(&evl->lock);
return 0;
err_alloc:
@ -814,7 +814,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
if (!gencfg.evl_en)
return;
spin_lock(&evl->lock);
mutex_lock(&evl->lock);
gencfg.evl_en = 0;
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
@ -831,7 +831,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
evl_dma = evl->dma;
evl->log = NULL;
evl->size = IDXD_EVL_SIZE_MIN;
spin_unlock(&evl->lock);
mutex_unlock(&evl->lock);
dma_free_coherent(dev, evl_log_size, evl_log, evl_dma);
}

View File

@ -279,7 +279,7 @@ struct idxd_driver_data {
struct idxd_evl {
/* Lock to protect event log access. */
spinlock_t lock;
struct mutex lock;
void *log;
dma_addr_t dma;
/* Total size of event log = number of entries * entry size. */

View File

@ -353,7 +353,7 @@ static int idxd_init_evl(struct idxd_device *idxd)
if (!evl)
return -ENOMEM;
spin_lock_init(&evl->lock);
mutex_init(&evl->lock);
evl->size = IDXD_EVL_SIZE_MIN;
idxd_name = dev_name(idxd_confdev(idxd));

View File

@ -363,7 +363,7 @@ static void process_evl_entries(struct idxd_device *idxd)
evl_status.bits = 0;
evl_status.int_pending = 1;
spin_lock(&evl->lock);
mutex_lock(&evl->lock);
/* Clear interrupt pending bit */
iowrite32(evl_status.bits_upper32,
idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
@ -380,7 +380,7 @@ static void process_evl_entries(struct idxd_device *idxd)
evl_status.head = h;
iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
spin_unlock(&evl->lock);
mutex_unlock(&evl->lock);
}
irqreturn_t idxd_misc_thread(int vec, void *data)

View File

@ -528,14 +528,11 @@ static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node)
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
/* migrate events if there is a valid target */
if (target < nr_cpu_ids)
if (target < nr_cpu_ids) {
cpumask_set_cpu(target, &perfmon_dsa_cpu_mask);
else
target = -1;
perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
}
return 0;
}

View File

@ -250,7 +250,7 @@ static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
else
regval &= ~val;
writel(val, pchan->base + reg);
writel(regval, pchan->base + reg);
}
static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
@ -274,7 +274,7 @@ static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
else
regval &= ~val;
writel(val, od->base + reg);
writel(regval, od->base + reg);
}
static void dma_writel(struct owl_dma *od, u32 reg, u32 data)

View File

@ -746,6 +746,9 @@ static int tegra_dma_get_residual(struct tegra_dma_channel *tdc)
bytes_xfer = dma_desc->bytes_xfer +
sg_req[dma_desc->sg_idx].len - (wcount * 4);
if (dma_desc->bytes_req == bytes_xfer)
return 0;
residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req);
return residual;

View File

@ -214,7 +214,8 @@ struct xilinx_dpdma_tx_desc {
* @running: true if the channel is running
* @first_frame: flag for the first frame of stream
* @video_group: flag if multi-channel operation is needed for video channels
* @lock: lock to access struct xilinx_dpdma_chan
* @lock: lock to access struct xilinx_dpdma_chan. Must be taken before
* @vchan.lock, if both are to be held.
* @desc_pool: descriptor allocation pool
* @err_task: error IRQ bottom half handler
* @desc: References to descriptors being processed
@ -1097,12 +1098,14 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
* Complete the active descriptor, if any, promote the pending
* descriptor to active, and queue the next transfer, if any.
*/
spin_lock(&chan->vchan.lock);
if (chan->desc.active)
vchan_cookie_complete(&chan->desc.active->vdesc);
chan->desc.active = pending;
chan->desc.pending = NULL;
xilinx_dpdma_chan_queue_transfer(chan);
spin_unlock(&chan->vchan.lock);
out:
spin_unlock_irqrestore(&chan->lock, flags);
@ -1264,10 +1267,12 @@ static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
unsigned long flags;
spin_lock_irqsave(&chan->vchan.lock, flags);
spin_lock_irqsave(&chan->lock, flags);
spin_lock(&chan->vchan.lock);
if (vchan_issue_pending(&chan->vchan))
xilinx_dpdma_chan_queue_transfer(chan);
spin_unlock_irqrestore(&chan->vchan.lock, flags);
spin_unlock(&chan->vchan.lock);
spin_unlock_irqrestore(&chan->lock, flags);
}
static int xilinx_dpdma_config(struct dma_chan *dchan,
@ -1495,7 +1500,9 @@ static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
spin_lock_irqsave(&chan->lock, flags);
spin_lock(&chan->vchan.lock);
xilinx_dpdma_chan_queue_transfer(chan);
spin_unlock(&chan->vchan.lock);
spin_unlock_irqrestore(&chan->lock, flags);
}

View File

@ -205,7 +205,8 @@ static int tng_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
static void tng_irq_ack(struct irq_data *d)
{
struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct tng_gpio *priv = gpiochip_get_data(gc);
irq_hw_number_t gpio = irqd_to_hwirq(d);
unsigned long flags;
void __iomem *gisr;
@ -241,7 +242,8 @@ static void tng_irq_unmask_mask(struct tng_gpio *priv, u32 gpio, bool unmask)
static void tng_irq_mask(struct irq_data *d)
{
struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct tng_gpio *priv = gpiochip_get_data(gc);
irq_hw_number_t gpio = irqd_to_hwirq(d);
tng_irq_unmask_mask(priv, gpio, false);
@ -250,7 +252,8 @@ static void tng_irq_mask(struct irq_data *d)
static void tng_irq_unmask(struct irq_data *d)
{
struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct tng_gpio *priv = gpiochip_get_data(gc);
irq_hw_number_t gpio = irqd_to_hwirq(d);
gpiochip_enable_irq(&priv->chip, gpio);

View File

@ -36,12 +36,6 @@
#define TEGRA186_GPIO_SCR_SEC_REN BIT(27)
#define TEGRA186_GPIO_SCR_SEC_G1W BIT(9)
#define TEGRA186_GPIO_SCR_SEC_G1R BIT(1)
#define TEGRA186_GPIO_FULL_ACCESS (TEGRA186_GPIO_SCR_SEC_WEN | \
TEGRA186_GPIO_SCR_SEC_REN | \
TEGRA186_GPIO_SCR_SEC_G1R | \
TEGRA186_GPIO_SCR_SEC_G1W)
#define TEGRA186_GPIO_SCR_SEC_ENABLE (TEGRA186_GPIO_SCR_SEC_WEN | \
TEGRA186_GPIO_SCR_SEC_REN)
/* control registers */
#define TEGRA186_GPIO_ENABLE_CONFIG 0x00
@ -177,10 +171,18 @@ static inline bool tegra186_gpio_is_accessible(struct tegra_gpio *gpio, unsigned
value = __raw_readl(secure + TEGRA186_GPIO_SCR);
if ((value & TEGRA186_GPIO_SCR_SEC_ENABLE) == 0)
return true;
/*
* When SCR_SEC_[R|W]EN is unset, then we have full read/write access to all the
* registers for given GPIO pin.
* When SCR_SEC[R|W]EN is set, then there is need to further check the accompanying
* SCR_SEC_G1[R|W] bit to determine read/write access to all the registers for given
* GPIO pin.
*/
if ((value & TEGRA186_GPIO_FULL_ACCESS) == TEGRA186_GPIO_FULL_ACCESS)
if (((value & TEGRA186_GPIO_SCR_SEC_REN) == 0 ||
((value & TEGRA186_GPIO_SCR_SEC_REN) && (value & TEGRA186_GPIO_SCR_SEC_G1R))) &&
((value & TEGRA186_GPIO_SCR_SEC_WEN) == 0 ||
((value & TEGRA186_GPIO_SCR_SEC_WEN) && (value & TEGRA186_GPIO_SCR_SEC_G1W))))
return true;
return false;

View File

@ -1785,6 +1785,7 @@ err_node_allow:
err_bo_create:
amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id);
err_reserve_limit:
amdgpu_sync_free(&(*mem)->sync);
mutex_destroy(&(*mem)->lock);
if (gobj)
drm_gem_object_put(gobj);

View File

@ -819,7 +819,7 @@ retry:
p->bytes_moved += ctx.bytes_moved;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
amdgpu_bo_in_cpu_visible_vram(bo))
amdgpu_res_cpu_visible(adev, bo->tbo.resource))
p->bytes_moved_vis += ctx.bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {

View File

@ -105,6 +105,10 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
stats.requested_visible_vram/1024UL);
drm_printf(p, "amd-requested-gtt:\t%llu KiB\n",
stats.requested_gtt/1024UL);
drm_printf(p, "drm-shared-vram:\t%llu KiB\n", stats.vram_shared/1024UL);
drm_printf(p, "drm-shared-gtt:\t%llu KiB\n", stats.gtt_shared/1024UL);
drm_printf(p, "drm-shared-cpu:\t%llu KiB\n", stats.cpu_shared/1024UL);
for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
if (!usage[hw_ip])
continue;

View File

@ -625,8 +625,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
return r;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
bo->tbo.resource->mem_type == TTM_PL_VRAM &&
amdgpu_bo_in_cpu_visible_vram(bo))
amdgpu_res_cpu_visible(adev, bo->tbo.resource))
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
ctx.bytes_moved);
else
@ -1280,26 +1279,39 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
struct amdgpu_mem_stats *stats)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_resource *res = bo->tbo.resource;
uint64_t size = amdgpu_bo_size(bo);
struct drm_gem_object *obj;
unsigned int domain;
bool shared;
/* Abort if the BO doesn't currently have a backing store */
if (!bo->tbo.resource)
if (!res)
return;
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
obj = &bo->tbo.base;
shared = drm_gem_object_is_shared_for_memory_stats(obj);
domain = amdgpu_mem_type_to_domain(res->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
stats->vram += size;
if (amdgpu_bo_in_cpu_visible_vram(bo))
if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
stats->visible_vram += size;
if (shared)
stats->vram_shared += size;
break;
case AMDGPU_GEM_DOMAIN_GTT:
stats->gtt += size;
if (shared)
stats->gtt_shared += size;
break;
case AMDGPU_GEM_DOMAIN_CPU:
default:
stats->cpu += size;
if (shared)
stats->cpu_shared += size;
break;
}
@ -1384,10 +1396,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* Remember that this BO was accessed by the CPU */
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
if (bo->resource->mem_type != TTM_PL_VRAM)
return 0;
if (amdgpu_bo_in_cpu_visible_vram(abo))
if (amdgpu_res_cpu_visible(adev, bo->resource))
return 0;
/* Can't move a pinned BO to visible VRAM */
@ -1411,7 +1420,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* this should never happen */
if (bo->resource->mem_type == TTM_PL_VRAM &&
!amdgpu_bo_in_cpu_visible_vram(abo))
!amdgpu_res_cpu_visible(adev, bo->resource))
return VM_FAULT_SIGBUS;
ttm_bo_move_to_lru_tail_unlocked(bo);
@ -1571,6 +1580,7 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
*/
u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct dma_buf_attachment *attachment;
struct dma_buf *dma_buf;
const char *placement;
@ -1579,10 +1589,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
if (dma_resv_trylock(bo->tbo.base.resv)) {
unsigned int domain;
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
if (amdgpu_bo_in_cpu_visible_vram(bo))
if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
placement = "VRAM VISIBLE";
else
placement = "VRAM";

View File

@ -138,12 +138,18 @@ struct amdgpu_bo_vm {
struct amdgpu_mem_stats {
/* current VRAM usage, includes visible VRAM */
uint64_t vram;
/* current shared VRAM usage, includes visible VRAM */
uint64_t vram_shared;
/* current visible VRAM usage */
uint64_t visible_vram;
/* current GTT usage */
uint64_t gtt;
/* current shared GTT usage */
uint64_t gtt_shared;
/* current system memory usage */
uint64_t cpu;
/* current shared system memory usage */
uint64_t cpu_shared;
/* sum of evicted buffers, includes visible VRAM */
uint64_t evicted_vram;
/* sum of evicted buffers due to CPU access */
@ -244,28 +250,6 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
}
/**
* amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
*/
static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_res_cursor cursor;
if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM)
return false;
amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
while (cursor.remaining) {
if (cursor.start < adev->gmc.visible_vram_size)
return true;
amdgpu_res_next(&cursor, cursor.size);
}
return false;
}
/**
* amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
*/

View File

@ -137,7 +137,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
amdgpu_bo_in_cpu_visible_vram(abo)) {
amdgpu_res_cpu_visible(adev, bo->resource)) {
/* Try evicting to the CPU inaccessible part of VRAM
* first, but only set GTT as busy placement, so this
@ -408,40 +408,55 @@ error:
return r;
}
/**
* amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
* @adev: amdgpu device
* @res: the resource to check
*
* Returns: true if the full resource is CPU visible, false otherwise.
*/
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
struct ttm_resource *res)
{
struct amdgpu_res_cursor cursor;
if (!res)
return false;
if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
res->mem_type == AMDGPU_PL_PREEMPT)
return true;
if (res->mem_type != TTM_PL_VRAM)
return false;
amdgpu_res_first(res, 0, res->size, &cursor);
while (cursor.remaining) {
if ((cursor.start + cursor.size) >= adev->gmc.visible_vram_size)
return false;
amdgpu_res_next(&cursor, cursor.size);
}
return true;
}
/*
* amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
* amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
*
* Called by amdgpu_bo_move()
*/
static bool amdgpu_mem_visible(struct amdgpu_device *adev,
struct ttm_resource *mem)
static bool amdgpu_res_copyable(struct amdgpu_device *adev,
struct ttm_resource *mem)
{
u64 mem_size = (u64)mem->size;
struct amdgpu_res_cursor cursor;
u64 end;
if (mem->mem_type == TTM_PL_SYSTEM ||
mem->mem_type == TTM_PL_TT)
return true;
if (mem->mem_type != TTM_PL_VRAM)
if (!amdgpu_res_cpu_visible(adev, mem))
return false;
amdgpu_res_first(mem, 0, mem_size, &cursor);
end = cursor.start + cursor.size;
while (cursor.remaining) {
amdgpu_res_next(&cursor, cursor.size);
/* ttm_resource_ioremap only supports contiguous memory */
if (mem->mem_type == TTM_PL_VRAM &&
!(mem->placement & TTM_PL_FLAG_CONTIGUOUS))
return false;
if (!cursor.remaining)
break;
/* ttm_resource_ioremap only supports contiguous memory */
if (end != cursor.start)
return false;
end = cursor.start + cursor.size;
}
return end <= adev->gmc.visible_vram_size;
return true;
}
/*
@ -534,8 +549,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
if (r) {
/* Check that all memory is CPU accessible */
if (!amdgpu_mem_visible(adev, old_mem) ||
!amdgpu_mem_visible(adev, new_mem)) {
if (!amdgpu_res_copyable(adev, old_mem) ||
!amdgpu_res_copyable(adev, new_mem)) {
pr_err("Move buffer fallback to memcpy unavailable\n");
return r;
}

View File

@ -139,6 +139,9 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
uint64_t start);
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
struct ttm_resource *res);
int amdgpu_ttm_init(struct amdgpu_device *adev);
void amdgpu_ttm_fini(struct amdgpu_device *adev);
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,

View File

@ -365,7 +365,8 @@ static void sdma_v4_4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0
<< (ring->me % adev->sdma.num_inst_per_aid);
sdma_v4_4_2_wait_reg_mem(ring, 0, 1,
adev->nbio.funcs->get_hdp_flush_done_offset(adev),

View File

@ -292,17 +292,21 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
if (ring->me > 1) {
amdgpu_asic_flush_hdp(adev, ring);
} else {
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
amdgpu_ring_write(ring, ref_and_mask); /* reference */
amdgpu_ring_write(ring, ref_and_mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
amdgpu_ring_write(ring, ref_and_mask); /* reference */
amdgpu_ring_write(ring, ref_and_mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
}
/**

View File

@ -34,7 +34,6 @@ gma500_gfx-y += \
psb_intel_lvds.o \
psb_intel_modes.o \
psb_intel_sdvo.o \
psb_lid.o \
psb_irq.o
gma500_gfx-$(CONFIG_ACPI) += opregion.o

View File

@ -73,8 +73,7 @@ static int psb_backlight_setup(struct drm_device *dev)
}
psb_intel_lvds_set_brightness(dev, PSB_MAX_BRIGHTNESS);
/* This must occur after the backlight is properly initialised */
psb_lid_timer_init(dev_priv);
return 0;
}
@ -259,8 +258,6 @@ static int psb_chip_setup(struct drm_device *dev)
static void psb_chip_teardown(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
psb_lid_timer_takedown(dev_priv);
gma_intel_teardown_gmbus(dev);
}

View File

@ -170,7 +170,6 @@
#define PSB_NUM_VBLANKS 2
#define PSB_WATCHDOG_DELAY (HZ * 2)
#define PSB_LID_DELAY (HZ / 10)
#define PSB_MAX_BRIGHTNESS 100
@ -499,11 +498,7 @@ struct drm_psb_private {
/* Hotplug handling */
struct work_struct hotplug_work;
/* LID-Switch */
spinlock_t lid_lock;
struct timer_list lid_timer;
struct psb_intel_opregion opregion;
u32 lid_last_state;
/* Watchdog */
uint32_t apm_reg;
@ -599,10 +594,6 @@ struct psb_ops {
int i2c_bus; /* I2C bus identifier for Moorestown */
};
/* psb_lid.c */
extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
/* modesetting */
extern void psb_modeset_init(struct drm_device *dev);
extern void psb_modeset_cleanup(struct drm_device *dev);

View File

@ -1,80 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/**************************************************************************
* Copyright (c) 2007, Intel Corporation.
*
* Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
**************************************************************************/
#include <linux/spinlock.h>
#include "psb_drv.h"
#include "psb_intel_reg.h"
#include "psb_reg.h"
static void psb_lid_timer_func(struct timer_list *t)
{
struct drm_psb_private *dev_priv = from_timer(dev_priv, t, lid_timer);
struct drm_device *dev = (struct drm_device *)&dev_priv->dev;
struct timer_list *lid_timer = &dev_priv->lid_timer;
unsigned long irq_flags;
u32 __iomem *lid_state = dev_priv->opregion.lid_state;
u32 pp_status;
if (readl(lid_state) == dev_priv->lid_last_state)
goto lid_timer_schedule;
if ((readl(lid_state)) & 0x01) {
/*lid state is open*/
REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
do {
pp_status = REG_READ(PP_STATUS);
} while ((pp_status & PP_ON) == 0 &&
(pp_status & PP_SEQUENCE_MASK) != 0);
if (REG_READ(PP_STATUS) & PP_ON) {
/*FIXME: should be backlight level before*/
psb_intel_lvds_set_brightness(dev, 100);
} else {
DRM_DEBUG("LVDS panel never powered up");
return;
}
} else {
psb_intel_lvds_set_brightness(dev, 0);
REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
do {
pp_status = REG_READ(PP_STATUS);
} while ((pp_status & PP_ON) == 0);
}
dev_priv->lid_last_state = readl(lid_state);
lid_timer_schedule:
spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
if (!timer_pending(lid_timer)) {
lid_timer->expires = jiffies + PSB_LID_DELAY;
add_timer(lid_timer);
}
spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
}
void psb_lid_timer_init(struct drm_psb_private *dev_priv)
{
struct timer_list *lid_timer = &dev_priv->lid_timer;
unsigned long irq_flags;
spin_lock_init(&dev_priv->lid_lock);
spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
timer_setup(lid_timer, psb_lid_timer_func, 0);
lid_timer->expires = jiffies + PSB_LID_DELAY;
add_timer(lid_timer);
spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
}
void psb_lid_timer_takedown(struct drm_psb_private *dev_priv)
{
del_timer_sync(&dev_priv->lid_timer);
}

View File

@ -175,7 +175,7 @@ static void ttm_device_init_pools(struct kunit *test)
if (params->pools_init_expected) {
for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
for (int j = 0; j <= MAX_ORDER; ++j) {
for (int j = 0; j < NR_PAGE_ORDERS; ++j) {
pt = pool->caching[i].orders[j];
KUNIT_EXPECT_PTR_EQ(test, pt.pool, pool);
KUNIT_EXPECT_EQ(test, pt.caching, i);

View File

@ -65,11 +65,11 @@ module_param(page_pool_size, ulong, 0644);
static atomic_long_t allocated_pages;
static struct ttm_pool_type global_write_combined[MAX_ORDER + 1];
static struct ttm_pool_type global_uncached[MAX_ORDER + 1];
static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS];
static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS];
static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER + 1];
static struct ttm_pool_type global_dma32_uncached[MAX_ORDER + 1];
static struct ttm_pool_type global_dma32_write_combined[NR_PAGE_ORDERS];
static struct ttm_pool_type global_dma32_uncached[NR_PAGE_ORDERS];
static spinlock_t shrinker_lock;
static struct list_head shrinker_list;
@ -287,17 +287,23 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
enum ttm_caching caching,
unsigned int order)
{
if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE)
if (pool->use_dma_alloc)
return &pool->caching[caching].orders[order];
#ifdef CONFIG_X86
switch (caching) {
case ttm_write_combined:
if (pool->nid != NUMA_NO_NODE)
return &pool->caching[caching].orders[order];
if (pool->use_dma32)
return &global_dma32_write_combined[order];
return &global_write_combined[order];
case ttm_uncached:
if (pool->nid != NUMA_NO_NODE)
return &pool->caching[caching].orders[order];
if (pool->use_dma32)
return &global_dma32_uncached[order];
@ -563,11 +569,17 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
pool->use_dma_alloc = use_dma_alloc;
pool->use_dma32 = use_dma32;
if (use_dma_alloc || nid != NUMA_NO_NODE) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
for (j = 0; j <= MAX_ORDER; ++j)
ttm_pool_type_init(&pool->caching[i].orders[j],
pool, i, j);
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
for (j = 0; j < NR_PAGE_ORDERS; ++j) {
struct ttm_pool_type *pt;
/* Initialize only pool types which are actually used */
pt = ttm_pool_select_type(pool, i, j);
if (pt != &pool->caching[i].orders[j])
continue;
ttm_pool_type_init(pt, pool, i, j);
}
}
}
EXPORT_SYMBOL(ttm_pool_init);
@ -584,10 +596,16 @@ void ttm_pool_fini(struct ttm_pool *pool)
{
unsigned int i, j;
if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
for (j = 0; j <= MAX_ORDER; ++j)
ttm_pool_type_fini(&pool->caching[i].orders[j]);
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
for (j = 0; j < NR_PAGE_ORDERS; ++j) {
struct ttm_pool_type *pt;
pt = ttm_pool_select_type(pool, i, j);
if (pt != &pool->caching[i].orders[j])
continue;
ttm_pool_type_fini(pt);
}
}
/* We removed the pool types from the LRU, but we need to also make sure
@ -641,7 +659,7 @@ static void ttm_pool_debugfs_header(struct seq_file *m)
unsigned int i;
seq_puts(m, "\t ");
for (i = 0; i <= MAX_ORDER; ++i)
for (i = 0; i < NR_PAGE_ORDERS; ++i)
seq_printf(m, " ---%2u---", i);
seq_puts(m, "\n");
}
@ -652,7 +670,7 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
{
unsigned int i;
for (i = 0; i <= MAX_ORDER; ++i)
for (i = 0; i < NR_PAGE_ORDERS; ++i)
seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
seq_puts(m, "\n");
}
@ -761,7 +779,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
spin_lock_init(&shrinker_lock);
INIT_LIST_HEAD(&shrinker_list);
for (i = 0; i <= MAX_ORDER; ++i) {
for (i = 0; i < NR_PAGE_ORDERS; ++i) {
ttm_pool_type_init(&global_write_combined[i], NULL,
ttm_write_combined, i);
ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
@ -794,7 +812,7 @@ void ttm_pool_mgr_fini(void)
{
unsigned int i;
for (i = 0; i <= MAX_ORDER; ++i) {
for (i = 0; i < NR_PAGE_ORDERS; ++i) {
ttm_pool_type_fini(&global_write_combined[i]);
ttm_pool_type_fini(&global_uncached[i]);

View File

@ -965,9 +965,7 @@ static void logi_hidpp_dev_conn_notif_equad(struct hid_device *hdev,
}
break;
case REPORT_TYPE_MOUSE:
workitem->reports_supported |= STD_MOUSE | HIDPP;
if (djrcv_dev->type == recvr_type_mouse_only)
workitem->reports_supported |= MULTIMEDIA;
workitem->reports_supported |= STD_MOUSE | HIDPP | MULTIMEDIA;
break;
}
}

View File

@ -64,7 +64,6 @@
/* flags */
#define I2C_HID_STARTED 0
#define I2C_HID_RESET_PENDING 1
#define I2C_HID_READ_PENDING 2
#define I2C_HID_PWR_ON 0x00
#define I2C_HID_PWR_SLEEP 0x01
@ -190,15 +189,10 @@ static int i2c_hid_xfer(struct i2c_hid *ihid,
msgs[n].len = recv_len;
msgs[n].buf = recv_buf;
n++;
set_bit(I2C_HID_READ_PENDING, &ihid->flags);
}
ret = i2c_transfer(client->adapter, msgs, n);
if (recv_len)
clear_bit(I2C_HID_READ_PENDING, &ihid->flags);
if (ret != n)
return ret < 0 ? ret : -EIO;
@ -566,9 +560,6 @@ static irqreturn_t i2c_hid_irq(int irq, void *dev_id)
{
struct i2c_hid *ihid = dev_id;
if (test_bit(I2C_HID_READ_PENDING, &ihid->flags))
return IRQ_HANDLED;
i2c_hid_get_input(ihid);
return IRQ_HANDLED;

View File

@ -948,6 +948,7 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
if (!dev)
return NULL;
dev->devc = &pdev->dev;
ishtp_device_init(dev);
init_waitqueue_head(&dev->wait_hw_ready);
@ -983,7 +984,6 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
}
dev->ops = &ish_hw_ops;
dev->devc = &pdev->dev;
dev->mtu = IPC_PAYLOAD_SIZE - sizeof(struct ishtp_msg_hdr);
return dev;
}

View File

@ -2187,13 +2187,18 @@ static int i2c_check_for_quirks(struct i2c_adapter *adap, struct i2c_msg *msgs,
* Returns negative errno, else the number of messages executed.
*
* Adapter lock must be held when calling this function. No debug logging
* takes place. adap->algo->master_xfer existence isn't checked.
* takes place.
*/
int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
unsigned long orig_jiffies;
int ret, try;
if (!adap->algo->master_xfer) {
dev_dbg(&adap->dev, "I2C level transfers not supported\n");
return -EOPNOTSUPP;
}
if (WARN_ON(!msgs || num < 1))
return -EINVAL;
@ -2260,11 +2265,6 @@ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
int ret;
if (!adap->algo->master_xfer) {
dev_dbg(&adap->dev, "I2C level transfers not supported\n");
return -EOPNOTSUPP;
}
/* REVISIT the fault reporting model here is weak:
*
* - When we get an error after receiving N bytes from a slave,

View File

@ -4561,13 +4561,8 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i));
}
if (err) {
if (i > 0)
its_vpe_irq_domain_free(domain, virq, i);
its_lpi_free(bitmap, base, nr_ids);
its_free_prop_table(vprop_page);
}
if (err)
its_vpe_irq_domain_free(domain, virq, i);
return err;
}

View File

@ -2694,6 +2694,11 @@ static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
host->runtime_suspended = true;
spin_unlock_irqrestore(&host->lock, flags);
/* Drop the performance vote */
dev_pm_opp_set_rate(dev, 0);
@ -2708,6 +2713,7 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
unsigned long flags;
int ret;
ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
@ -2726,7 +2732,15 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
dev_pm_opp_set_rate(dev, msm_host->clk_rate);
return sdhci_msm_ice_resume(msm_host);
ret = sdhci_msm_ice_resume(msm_host);
if (ret)
return ret;
spin_lock_irqsave(&host->lock, flags);
host->runtime_suspended = false;
spin_unlock_irqrestore(&host->lock, flags);
return ret;
}
static const struct dev_pm_ops sdhci_msm_pm_ops = {

View File

@ -53,7 +53,7 @@ static unsigned long doc_locations[] __initdata = {
0xe8000, 0xea000, 0xec000, 0xee000,
#endif
#endif
0xffffffff };
};
static struct mtd_info *doclist = NULL;
@ -1552,7 +1552,7 @@ static int __init init_nanddoc(void)
if (ret < 0)
return ret;
} else {
for (i = 0; (doc_locations[i] != 0xffffffff); i++) {
for (i = 0; i < ARRAY_SIZE(doc_locations); i++) {
doc_probe(doc_locations[i]);
}
}

View File

@ -2815,7 +2815,7 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
host->cfg0_raw & ~(7 << CW_PER_PAGE));
nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
instrs = 3;
} else {
} else if (q_op.cmd_reg != OP_RESET_DEVICE) {
return 0;
}
@ -2830,9 +2830,8 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
nandc_set_reg(chip, NAND_EXEC_CMD, 1);
write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
(q_op.cmd_reg == OP_BLOCK_ERASE) ? write_reg_dma(nandc, NAND_DEV0_CFG0,
2, NAND_BAM_NEXT_SGL) : read_reg_dma(nandc,
NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
if (q_op.cmd_reg == OP_BLOCK_ERASE)
write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);

View File

@ -566,13 +566,61 @@ static void mv88e6xxx_translate_cmode(u8 cmode, unsigned long *supported)
phy_interface_set_rgmii(supported);
}
static void
mv88e6250_setup_supported_interfaces(struct mv88e6xxx_chip *chip, int port,
struct phylink_config *config)
{
unsigned long *supported = config->supported_interfaces;
int err;
u16 reg;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
if (err) {
dev_err(chip->dev, "p%d: failed to read port status\n", port);
return;
}
switch (reg & MV88E6250_PORT_STS_PORTMODE_MASK) {
case MV88E6250_PORT_STS_PORTMODE_MII_10_HALF_PHY:
case MV88E6250_PORT_STS_PORTMODE_MII_100_HALF_PHY:
case MV88E6250_PORT_STS_PORTMODE_MII_10_FULL_PHY:
case MV88E6250_PORT_STS_PORTMODE_MII_100_FULL_PHY:
__set_bit(PHY_INTERFACE_MODE_REVMII, supported);
break;
case MV88E6250_PORT_STS_PORTMODE_MII_HALF:
case MV88E6250_PORT_STS_PORTMODE_MII_FULL:
__set_bit(PHY_INTERFACE_MODE_MII, supported);
break;
case MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL_PHY:
case MV88E6250_PORT_STS_PORTMODE_MII_200_RMII_FULL_PHY:
case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_HALF_PHY:
case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL_PHY:
__set_bit(PHY_INTERFACE_MODE_REVRMII, supported);
break;
case MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL:
case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL:
__set_bit(PHY_INTERFACE_MODE_RMII, supported);
break;
case MV88E6250_PORT_STS_PORTMODE_MII_100_RGMII:
__set_bit(PHY_INTERFACE_MODE_RGMII, supported);
break;
default:
dev_err(chip->dev,
"p%d: invalid port mode in status register: %04x\n",
port, reg);
}
}
static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
struct phylink_config *config)
{
unsigned long *supported = config->supported_interfaces;
/* Translate the default cmode */
mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
if (!mv88e6xxx_phy_is_internal(chip, port))
mv88e6250_setup_supported_interfaces(chip, port, config);
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
}

View File

@ -25,10 +25,25 @@
#define MV88E6250_PORT_STS_PORTMODE_PHY_100_HALF 0x0900
#define MV88E6250_PORT_STS_PORTMODE_PHY_10_FULL 0x0a00
#define MV88E6250_PORT_STS_PORTMODE_PHY_100_FULL 0x0b00
#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF 0x0c00
#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF 0x0d00
#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL 0x0e00
#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL 0x0f00
/* - Modes with PHY suffix use output instead of input clock
* - Modes without RMII or RGMII use MII
* - Modes without speed do not have a fixed speed specified in the manual
* ("DC to x MHz" - variable clock support?)
*/
#define MV88E6250_PORT_STS_PORTMODE_MII_DISABLED 0x0000
#define MV88E6250_PORT_STS_PORTMODE_MII_100_RGMII 0x0100
#define MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL_PHY 0x0200
#define MV88E6250_PORT_STS_PORTMODE_MII_200_RMII_FULL_PHY 0x0400
#define MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL 0x0600
#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL 0x0700
#define MV88E6250_PORT_STS_PORTMODE_MII_HALF 0x0800
#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_HALF_PHY 0x0900
#define MV88E6250_PORT_STS_PORTMODE_MII_FULL 0x0a00
#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL_PHY 0x0b00
#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF_PHY 0x0c00
#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF_PHY 0x0d00
#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL_PHY 0x0e00
#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL_PHY 0x0f00
#define MV88E6XXX_PORT_STS_LINK 0x0800
#define MV88E6XXX_PORT_STS_DUPLEX 0x0400
#define MV88E6XXX_PORT_STS_SPEED_MASK 0x0300

View File

@ -435,10 +435,8 @@ static void umac_init(struct bcmasp_intf *intf)
umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ);
}
static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
static int bcmasp_tx_reclaim(struct bcmasp_intf *intf)
{
struct bcmasp_intf *intf =
container_of(napi, struct bcmasp_intf, tx_napi);
struct bcmasp_intf_stats64 *stats = &intf->stats64;
struct device *kdev = &intf->parent->pdev->dev;
unsigned long read, released = 0;
@ -481,10 +479,16 @@ static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
DESC_RING_COUNT);
}
/* Ensure all descriptors have been written to DRAM for the hardware
* to see updated contents.
*/
wmb();
return released;
}
static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
{
struct bcmasp_intf *intf =
container_of(napi, struct bcmasp_intf, tx_napi);
int released = 0;
released = bcmasp_tx_reclaim(intf);
napi_complete(&intf->tx_napi);
@ -794,6 +798,7 @@ static int bcmasp_init_tx(struct bcmasp_intf *intf)
intf->tx_spb_index = 0;
intf->tx_spb_clean_index = 0;
memset(intf->tx_cbs, 0, sizeof(struct bcmasp_tx_cb) * DESC_RING_COUNT);
netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
@ -904,6 +909,8 @@ static void bcmasp_netif_deinit(struct net_device *dev)
} while (timeout-- > 0);
tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL);
bcmasp_tx_reclaim(intf);
umac_enable_set(intf, UMC_CMD_TX_EN, 0);
phy_stop(dev->phydev);

View File

@ -2009,12 +2009,14 @@ static int b44_set_pauseparam(struct net_device *dev,
bp->flags |= B44_FLAG_TX_PAUSE;
else
bp->flags &= ~B44_FLAG_TX_PAUSE;
if (bp->flags & B44_FLAG_PAUSE_AUTO) {
b44_halt(bp);
b44_init_rings(bp);
b44_init_hw(bp, B44_FULL_RESET);
} else {
__b44_set_flow_ctrl(bp, bp->flags);
if (netif_running(dev)) {
if (bp->flags & B44_FLAG_PAUSE_AUTO) {
b44_halt(bp);
b44_init_rings(bp);
b44_init_hw(bp, B44_FULL_RESET);
} else {
__b44_set_flow_ctrl(bp, bp->flags);
}
}
spin_unlock_irq(&bp->lock);

View File

@ -1659,7 +1659,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
cpr->sw_stats.rx.rx_oom_discards += 1;
cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
} else {
@ -1669,7 +1669,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
if (!new_data) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
cpr->sw_stats.rx.rx_oom_discards += 1;
cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
@ -1685,7 +1685,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (!skb) {
skb_free_frag(data);
bnxt_abort_tpa(cpr, idx, agg_bufs);
cpr->sw_stats.rx.rx_oom_discards += 1;
cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
skb_reserve(skb, bp->rx_offset);
@ -1696,7 +1696,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
cpr->sw_stats.rx.rx_oom_discards += 1;
cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
}
@ -1914,11 +1914,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
cp_cons, agg_bufs,
false);
if (!frag_len) {
cpr->sw_stats.rx.rx_oom_discards += 1;
rc = -ENOMEM;
goto next_rx;
}
if (!frag_len)
goto oom_next_rx;
}
xdp_active = true;
}
@ -1941,9 +1938,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
else
bnxt_xdp_buff_frags_free(rxr, &xdp);
}
cpr->sw_stats.rx.rx_oom_discards += 1;
rc = -ENOMEM;
goto next_rx;
goto oom_next_rx;
}
} else {
u32 payload;
@ -1954,29 +1949,21 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
payload = 0;
skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
payload | len);
if (!skb) {
cpr->sw_stats.rx.rx_oom_discards += 1;
rc = -ENOMEM;
goto next_rx;
}
if (!skb)
goto oom_next_rx;
}
if (agg_bufs) {
if (!xdp_active) {
skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
if (!skb) {
cpr->sw_stats.rx.rx_oom_discards += 1;
rc = -ENOMEM;
goto next_rx;
}
if (!skb)
goto oom_next_rx;
} else {
skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
if (!skb) {
/* we should be able to free the old skb here */
bnxt_xdp_buff_frags_free(rxr, &xdp);
cpr->sw_stats.rx.rx_oom_discards += 1;
rc = -ENOMEM;
goto next_rx;
goto oom_next_rx;
}
}
}
@ -2054,6 +2041,11 @@ next_rx_no_prod_no_len:
*raw_cons = tmp_raw_cons;
return rc;
oom_next_rx:
cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
rc = -ENOMEM;
goto next_rx;
}
/* In netpoll mode, if we are using a combined completion ring, we need to
@ -2099,7 +2091,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
}
rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
if (rc && rc != -EBUSY)
cpr->sw_stats.rx.rx_netpoll_discards += 1;
cpr->bnapi->cp_ring.sw_stats.rx.rx_netpoll_discards += 1;
return rc;
}
@ -11807,6 +11799,16 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
bnxt_rtnl_unlock_sp(bp);
}
static void bnxt_fw_fatal_close(struct bnxt *bp)
{
bnxt_tx_disable(bp);
bnxt_disable_napi(bp);
bnxt_disable_int_sync(bp);
bnxt_free_irq(bp);
bnxt_clear_int_mode(bp);
pci_disable_device(bp->pdev);
}
static void bnxt_fw_reset_close(struct bnxt *bp)
{
bnxt_ulp_stop(bp);
@ -11820,12 +11822,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
if (val == 0xffff)
bp->fw_reset_min_dsecs = 0;
bnxt_tx_disable(bp);
bnxt_disable_napi(bp);
bnxt_disable_int_sync(bp);
bnxt_free_irq(bp);
bnxt_clear_int_mode(bp);
pci_disable_device(bp->pdev);
bnxt_fw_fatal_close(bp);
}
__bnxt_close_nic(bp, true, false);
bnxt_vf_reps_free(bp);
@ -13967,6 +13964,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
bool abort = false;
netdev_info(netdev, "PCI I/O error detected\n");
@ -13975,16 +13973,27 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
bnxt_ulp_stop(bp);
if (state == pci_channel_io_perm_failure) {
if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
netdev_err(bp->dev, "Firmware reset already in progress\n");
abort = true;
}
if (abort || state == pci_channel_io_perm_failure) {
rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
}
if (state == pci_channel_io_frozen)
/* Link is not reliable anymore if state is pci_channel_io_frozen
* so we disable bus master to prevent any potential bad DMAs before
* freeing kernel memory.
*/
if (state == pci_channel_io_frozen) {
set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
bnxt_fw_fatal_close(bp);
}
if (netif_running(netdev))
bnxt_close(netdev);
__bnxt_close_nic(bp, true, true);
if (pci_is_enabled(pdev))
pci_disable_device(pdev);
@ -14070,6 +14079,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
}
reset_exit:
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bnxt_clear_reservations(bp, true);
rtnl_unlock();

View File

@ -16237,8 +16237,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
if (val < MAX_FRAME_SIZE_DEFAULT)
dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
pf->hw.port, val);
dev_warn(&pdev->dev, "MFS for port %x (%d) has been set below the default (%d)\n",
pf->hw.port, val, MAX_FRAME_SIZE_DEFAULT);
/* Add a filter to drop all Flow control frames from any VSI from being
* transmitted. By doing so we stop a malicious VF from sending out
@ -16778,7 +16778,7 @@ static int __init i40e_init_module(void)
* since we need to be able to guarantee forward progress even under
* memory pressure.
*/
i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
i40e_wq = alloc_workqueue("%s", 0, 0, i40e_driver_name);
if (!i40e_wq) {
pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
return -ENOMEM;

View File

@ -3574,6 +3574,34 @@ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
spin_unlock_bh(&adapter->cloud_filter_list_lock);
}
/**
* iavf_is_tc_config_same - Compare the mqprio TC config with the
* TC config already configured on this adapter.
* @adapter: board private structure
* @mqprio_qopt: TC config received from kernel.
*
* This function compares the TC config received from the kernel
* with the config already configured on the adapter.
*
* Return: True if configuration is same, false otherwise.
**/
static bool iavf_is_tc_config_same(struct iavf_adapter *adapter,
struct tc_mqprio_qopt *mqprio_qopt)
{
struct virtchnl_channel_info *ch = &adapter->ch_config.ch_info[0];
int i;
if (adapter->num_tc != mqprio_qopt->num_tc)
return false;
for (i = 0; i < adapter->num_tc; i++) {
if (ch[i].count != mqprio_qopt->count[i] ||
ch[i].offset != mqprio_qopt->offset[i])
return false;
}
return true;
}
/**
* __iavf_setup_tc - configure multiple traffic classes
* @netdev: network interface device structure
@ -3631,7 +3659,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
if (ret)
return ret;
/* Return if same TC config is requested */
if (adapter->num_tc == num_tc)
if (iavf_is_tc_config_same(adapter, &mqprio_qopt->qopt))
return 0;
adapter->num_tc = num_tc;

View File

@ -847,6 +847,11 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
return 0;
}
if (flags & ICE_VF_RESET_LOCK)
mutex_lock(&vf->cfg_lock);
else
lockdep_assert_held(&vf->cfg_lock);
lag = pf->lag;
mutex_lock(&pf->lag_mutex);
if (lag && lag->bonded && lag->primary) {
@ -858,11 +863,6 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
act_prt = ICE_LAG_INVALID_PORT;
}
if (flags & ICE_VF_RESET_LOCK)
mutex_lock(&vf->cfg_lock);
else
lockdep_assert_held(&vf->cfg_lock);
if (ice_is_vf_disabled(vf)) {
vsi = ice_get_vf_vsi(vf);
if (!vsi) {
@ -947,14 +947,14 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
ice_mbx_clear_malvf(&vf->mbx_info);
out_unlock:
if (flags & ICE_VF_RESET_LOCK)
mutex_unlock(&vf->cfg_lock);
if (lag && lag->bonded && lag->primary &&
act_prt != ICE_LAG_INVALID_PORT)
ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
mutex_unlock(&pf->lag_mutex);
if (flags & ICE_VF_RESET_LOCK)
mutex_unlock(&vf->cfg_lock);
return err;
}

View File

@ -1640,6 +1640,7 @@ static const struct macsec_ops macsec_offload_ops = {
.mdo_add_secy = mlx5e_macsec_add_secy,
.mdo_upd_secy = mlx5e_macsec_upd_secy,
.mdo_del_secy = mlx5e_macsec_del_secy,
.rx_uses_md_dst = true,
};
bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)

View File

@ -835,7 +835,7 @@ free_skb:
static const struct mlxsw_listener mlxsw_emad_rx_listener =
MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
EMAD, DISCARD);
EMAD, FORWARD);
static int mlxsw_emad_tlv_enable(struct mlxsw_core *mlxsw_core)
{

View File

@ -95,7 +95,7 @@ struct mlxsw_afa_set {
*/
has_trap:1,
has_police:1;
unsigned int ref_count;
refcount_t ref_count;
struct mlxsw_afa_set *next; /* Pointer to the next set. */
struct mlxsw_afa_set *prev; /* Pointer to the previous set,
* note that set may have multiple
@ -120,7 +120,7 @@ struct mlxsw_afa_fwd_entry {
struct rhash_head ht_node;
struct mlxsw_afa_fwd_entry_ht_key ht_key;
u32 kvdl_index;
unsigned int ref_count;
refcount_t ref_count;
};
static const struct rhashtable_params mlxsw_afa_fwd_entry_ht_params = {
@ -282,7 +282,7 @@ static struct mlxsw_afa_set *mlxsw_afa_set_create(bool is_first)
/* Need to initialize the set to pass by default */
mlxsw_afa_set_goto_set(set, MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
set->ht_key.is_first = is_first;
set->ref_count = 1;
refcount_set(&set->ref_count, 1);
return set;
}
@ -330,7 +330,7 @@ static void mlxsw_afa_set_unshare(struct mlxsw_afa *mlxsw_afa,
static void mlxsw_afa_set_put(struct mlxsw_afa *mlxsw_afa,
struct mlxsw_afa_set *set)
{
if (--set->ref_count)
if (!refcount_dec_and_test(&set->ref_count))
return;
if (set->shared)
mlxsw_afa_set_unshare(mlxsw_afa, set);
@ -350,7 +350,7 @@ static struct mlxsw_afa_set *mlxsw_afa_set_get(struct mlxsw_afa *mlxsw_afa,
set = rhashtable_lookup_fast(&mlxsw_afa->set_ht, &orig_set->ht_key,
mlxsw_afa_set_ht_params);
if (set) {
set->ref_count++;
refcount_inc(&set->ref_count);
mlxsw_afa_set_put(mlxsw_afa, orig_set);
} else {
set = orig_set;
@ -564,7 +564,7 @@ mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u16 local_port)
if (!fwd_entry)
return ERR_PTR(-ENOMEM);
fwd_entry->ht_key.local_port = local_port;
fwd_entry->ref_count = 1;
refcount_set(&fwd_entry->ref_count, 1);
err = rhashtable_insert_fast(&mlxsw_afa->fwd_entry_ht,
&fwd_entry->ht_node,
@ -607,7 +607,7 @@ mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u16 local_port)
fwd_entry = rhashtable_lookup_fast(&mlxsw_afa->fwd_entry_ht, &ht_key,
mlxsw_afa_fwd_entry_ht_params);
if (fwd_entry) {
fwd_entry->ref_count++;
refcount_inc(&fwd_entry->ref_count);
return fwd_entry;
}
return mlxsw_afa_fwd_entry_create(mlxsw_afa, local_port);
@ -616,7 +616,7 @@ mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u16 local_port)
static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa,
struct mlxsw_afa_fwd_entry *fwd_entry)
{
if (--fwd_entry->ref_count)
if (!refcount_dec_and_test(&fwd_entry->ref_count))
return;
mlxsw_afa_fwd_entry_destroy(mlxsw_afa, fwd_entry);
}

View File

@ -5,6 +5,7 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/refcount.h>
#include "item.h"
#include "core_acl_flex_keys.h"
@ -105,7 +106,7 @@ EXPORT_SYMBOL(mlxsw_afk_destroy);
struct mlxsw_afk_key_info {
struct list_head list;
unsigned int ref_count;
refcount_t ref_count;
unsigned int blocks_count;
int element_to_block[MLXSW_AFK_ELEMENT_MAX]; /* index is element, value
* is index inside "blocks"
@ -282,7 +283,7 @@ mlxsw_afk_key_info_create(struct mlxsw_afk *mlxsw_afk,
if (err)
goto err_picker;
list_add(&key_info->list, &mlxsw_afk->key_info_list);
key_info->ref_count = 1;
refcount_set(&key_info->ref_count, 1);
return key_info;
err_picker:
@ -304,7 +305,7 @@ mlxsw_afk_key_info_get(struct mlxsw_afk *mlxsw_afk,
key_info = mlxsw_afk_key_info_find(mlxsw_afk, elusage);
if (key_info) {
key_info->ref_count++;
refcount_inc(&key_info->ref_count);
return key_info;
}
return mlxsw_afk_key_info_create(mlxsw_afk, elusage);
@ -313,7 +314,7 @@ EXPORT_SYMBOL(mlxsw_afk_key_info_get);
void mlxsw_afk_key_info_put(struct mlxsw_afk_key_info *key_info)
{
if (--key_info->ref_count)
if (!refcount_dec_and_test(&key_info->ref_count))
return;
mlxsw_afk_key_info_destroy(key_info);
}

View File

@ -1357,24 +1357,20 @@ static struct mlxsw_linecards_event_ops mlxsw_env_event_ops = {
.got_inactive = mlxsw_env_got_inactive,
};
static int mlxsw_env_max_module_eeprom_len_query(struct mlxsw_env *mlxsw_env)
static void mlxsw_env_max_module_eeprom_len_query(struct mlxsw_env *mlxsw_env)
{
char mcam_pl[MLXSW_REG_MCAM_LEN];
bool mcia_128b_supported;
bool mcia_128b_supported = false;
int err;
mlxsw_reg_mcam_pack(mcam_pl,
MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
err = mlxsw_reg_query(mlxsw_env->core, MLXSW_REG(mcam), mcam_pl);
if (err)
return err;
mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_MCIA_128B,
&mcia_128b_supported);
if (!err)
mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_MCIA_128B,
&mcia_128b_supported);
mlxsw_env->max_eeprom_len = mcia_128b_supported ? 128 : 48;
return 0;
}
int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
@ -1445,15 +1441,11 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
if (err)
goto err_type_set;
err = mlxsw_env_max_module_eeprom_len_query(env);
if (err)
goto err_eeprom_len_query;
mlxsw_env_max_module_eeprom_len_query(env);
env->line_cards[0]->active = true;
return 0;
err_eeprom_len_query:
err_type_set:
mlxsw_env_module_event_disable(env, 0);
err_mlxsw_env_module_event_enable:

View File

@ -9,6 +9,7 @@
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
#include <net/net_namespace.h>
#include <net/tc_act/tc_vlan.h>
@ -55,7 +56,7 @@ struct mlxsw_sp_acl_ruleset {
struct rhash_head ht_node; /* Member of acl HT */
struct mlxsw_sp_acl_ruleset_ht_key ht_key;
struct rhashtable rule_ht;
unsigned int ref_count;
refcount_t ref_count;
unsigned int min_prio;
unsigned int max_prio;
unsigned long priv[];
@ -99,7 +100,7 @@ static bool
mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
{
/* We hold a reference on ruleset ourselves */
return ruleset->ref_count == 2;
return refcount_read(&ruleset->ref_count) == 2;
}
int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
@ -176,7 +177,7 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
ruleset = kzalloc(alloc_size, GFP_KERNEL);
if (!ruleset)
return ERR_PTR(-ENOMEM);
ruleset->ref_count = 1;
refcount_set(&ruleset->ref_count, 1);
ruleset->ht_key.block = block;
ruleset->ht_key.chain_index = chain_index;
ruleset->ht_key.ops = ops;
@ -222,13 +223,13 @@ static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
{
ruleset->ref_count++;
refcount_inc(&ruleset->ref_count);
}
static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset)
{
if (--ruleset->ref_count)
if (!refcount_dec_and_test(&ruleset->ref_count))
return;
mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
}

View File

@ -9,6 +9,8 @@
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
#include <linux/idr.h>
#include <net/devlink.h>
#include <trace/events/mlxsw.h>
@ -57,41 +59,43 @@ int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
u16 *p_id)
{
u16 id;
int id;
id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
if (id < tcam->max_regions) {
__set_bit(id, tcam->used_regions);
*p_id = id;
return 0;
}
return -ENOBUFS;
id = ida_alloc_max(&tcam->used_regions, tcam->max_regions - 1,
GFP_KERNEL);
if (id < 0)
return id;
*p_id = id;
return 0;
}
static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
u16 id)
{
__clear_bit(id, tcam->used_regions);
ida_free(&tcam->used_regions, id);
}
static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
u16 *p_id)
{
u16 id;
int id;
id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
if (id < tcam->max_groups) {
__set_bit(id, tcam->used_groups);
*p_id = id;
return 0;
}
return -ENOBUFS;
id = ida_alloc_max(&tcam->used_groups, tcam->max_groups - 1,
GFP_KERNEL);
if (id < 0)
return id;
*p_id = id;
return 0;
}
static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
u16 id)
{
__clear_bit(id, tcam->used_groups);
ida_free(&tcam->used_groups, id);
}
struct mlxsw_sp_acl_tcam_pattern {
@ -155,7 +159,7 @@ struct mlxsw_sp_acl_tcam_vregion {
struct mlxsw_sp_acl_tcam_rehash_ctx ctx;
} rehash;
struct mlxsw_sp *mlxsw_sp;
unsigned int ref_count;
refcount_t ref_count;
};
struct mlxsw_sp_acl_tcam_vchunk;
@ -176,7 +180,7 @@ struct mlxsw_sp_acl_tcam_vchunk {
unsigned int priority; /* Priority within the vregion and group */
struct mlxsw_sp_acl_tcam_vgroup *vgroup;
struct mlxsw_sp_acl_tcam_vregion *vregion;
unsigned int ref_count;
refcount_t ref_count;
};
struct mlxsw_sp_acl_tcam_entry {
@ -714,7 +718,9 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
rehash.dw.work);
int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
mutex_lock(&vregion->lock);
mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
mutex_unlock(&vregion->lock);
if (credits < 0)
/* Rehash gone out of credits so it was interrupted.
* Schedule the work as soon as possible to continue.
@ -724,6 +730,17 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
}
static void
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
{
/* The entry markers are relative to the current chunk and therefore
* needs to be reset together with the chunk marker.
*/
ctx->current_vchunk = NULL;
ctx->start_ventry = NULL;
ctx->stop_ventry = NULL;
}
static void
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
{
@ -746,7 +763,7 @@ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *v
* the current chunk pointer to make sure all chunks
* are properly migrated.
*/
vregion->rehash.ctx.current_vchunk = NULL;
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(&vregion->rehash.ctx);
}
static struct mlxsw_sp_acl_tcam_vregion *
@ -769,7 +786,7 @@ mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
vregion->tcam = tcam;
vregion->mlxsw_sp = mlxsw_sp;
vregion->vgroup = vgroup;
vregion->ref_count = 1;
refcount_set(&vregion->ref_count, 1);
vregion->key_info = mlxsw_afk_key_info_get(afk, elusage);
if (IS_ERR(vregion->key_info)) {
@ -819,10 +836,14 @@ mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
mutex_lock(&tcam->lock);
list_del(&vregion->tlist);
mutex_unlock(&tcam->lock);
cancel_delayed_work_sync(&vregion->rehash.dw);
if (cancel_delayed_work_sync(&vregion->rehash.dw) &&
ctx->hints_priv)
ops->region_rehash_hints_put(ctx->hints_priv);
}
mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
if (vregion->region2)
@ -856,7 +877,7 @@ mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp,
*/
return ERR_PTR(-EOPNOTSUPP);
}
vregion->ref_count++;
refcount_inc(&vregion->ref_count);
return vregion;
}
@ -871,7 +892,7 @@ static void
mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vregion *vregion)
{
if (--vregion->ref_count)
if (!refcount_dec_and_test(&vregion->ref_count))
return;
mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
}
@ -924,7 +945,7 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
INIT_LIST_HEAD(&vchunk->ventry_list);
vchunk->priority = priority;
vchunk->vgroup = vgroup;
vchunk->ref_count = 1;
refcount_set(&vchunk->ref_count, 1);
vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup,
priority, elusage);
@ -1008,7 +1029,7 @@ mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info,
elusage)))
return ERR_PTR(-EINVAL);
vchunk->ref_count++;
refcount_inc(&vchunk->ref_count);
return vchunk;
}
return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup,
@ -1019,7 +1040,7 @@ static void
mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vchunk *vchunk)
{
if (--vchunk->ref_count)
if (!refcount_dec_and_test(&vchunk->ref_count))
return;
mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk);
}
@ -1153,8 +1174,14 @@ mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_ventry *ventry,
bool *activity)
{
return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
ventry->entry, activity);
struct mlxsw_sp_acl_tcam_vregion *vregion = ventry->vchunk->vregion;
int err;
mutex_lock(&vregion->lock);
err = mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, ventry->entry,
activity);
mutex_unlock(&vregion->lock);
return err;
}
static int
@ -1188,6 +1215,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_tcam_chunk *new_chunk;
WARN_ON(vchunk->chunk2);
new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
if (IS_ERR(new_chunk))
return PTR_ERR(new_chunk);
@ -1206,7 +1235,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
{
mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
vchunk->chunk2 = NULL;
ctx->current_vchunk = NULL;
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
}
static int
@ -1229,6 +1258,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
return 0;
}
if (list_empty(&vchunk->ventry_list))
goto out;
/* If the migration got interrupted, we have the ventry to start from
* stored in context.
*/
@ -1238,6 +1270,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
ventry = list_first_entry(&vchunk->ventry_list,
typeof(*ventry), list);
WARN_ON(ventry->vchunk != vchunk);
list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
/* During rollback, once we reach the ventry that failed
* to migrate, we are done.
@ -1278,6 +1312,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
}
}
out:
mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
return 0;
}
@ -1291,6 +1326,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vchunk *vchunk;
int err;
if (list_empty(&vregion->vchunk_list))
return 0;
/* If the migration got interrupted, we have the vchunk
* we are working on stored in context.
*/
@ -1319,16 +1357,17 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
int err, err2;
trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
mutex_lock(&vregion->lock);
err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits);
if (err) {
if (ctx->this_is_rollback)
return err;
/* In case migration was not successful, we need to swap
* so the original region pointer is assigned again
* to vregion->region.
*/
swap(vregion->region, vregion->region2);
ctx->current_vchunk = NULL;
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
ctx->this_is_rollback = true;
err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits);
@ -1339,7 +1378,6 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
/* Let the rollback to be continued later on. */
}
}
mutex_unlock(&vregion->lock);
trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
return err;
}
@ -1388,6 +1426,7 @@ mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
ctx->hints_priv = hints_priv;
ctx->this_is_rollback = false;
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
return 0;
@ -1440,7 +1479,8 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
ctx, credits);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
return;
}
if (*credits >= 0)
@ -1548,19 +1588,11 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
if (max_tcam_regions < max_regions)
max_regions = max_tcam_regions;
tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL);
if (!tcam->used_regions) {
err = -ENOMEM;
goto err_alloc_used_regions;
}
ida_init(&tcam->used_regions);
tcam->max_regions = max_regions;
max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
tcam->used_groups = bitmap_zalloc(max_groups, GFP_KERNEL);
if (!tcam->used_groups) {
err = -ENOMEM;
goto err_alloc_used_groups;
}
ida_init(&tcam->used_groups);
tcam->max_groups = max_groups;
tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_MAX_GROUP_SIZE);
@ -1574,10 +1606,8 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_tcam_init:
bitmap_free(tcam->used_groups);
err_alloc_used_groups:
bitmap_free(tcam->used_regions);
err_alloc_used_regions:
ida_destroy(&tcam->used_groups);
ida_destroy(&tcam->used_regions);
mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
err_rehash_params_register:
mutex_destroy(&tcam->lock);
@ -1590,8 +1620,8 @@ void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
ops->fini(mlxsw_sp, tcam->priv);
bitmap_free(tcam->used_groups);
bitmap_free(tcam->used_regions);
ida_destroy(&tcam->used_groups);
ida_destroy(&tcam->used_regions);
mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
mutex_destroy(&tcam->lock);
}

View File

@ -6,15 +6,16 @@
#include <linux/list.h>
#include <linux/parman.h>
#include <linux/idr.h>
#include "reg.h"
#include "spectrum.h"
#include "core_acl_flex_keys.h"
struct mlxsw_sp_acl_tcam {
unsigned long *used_regions; /* bit array */
struct ida used_regions;
unsigned int max_regions;
unsigned long *used_groups; /* bit array */
struct ida used_groups;
unsigned int max_groups;
unsigned int max_group_size;
struct mutex lock; /* guards vregion list */

View File

@ -501,7 +501,7 @@ struct mlxsw_sp_rt6 {
struct mlxsw_sp_lpm_tree {
u8 id; /* tree ID */
unsigned int ref_count;
refcount_t ref_count;
enum mlxsw_sp_l3proto proto;
unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
struct mlxsw_sp_prefix_usage prefix_usage;
@ -578,7 +578,7 @@ mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
lpm_tree = &mlxsw_sp->router->lpm.trees[i];
if (lpm_tree->ref_count == 0)
if (refcount_read(&lpm_tree->ref_count) == 0)
return lpm_tree;
}
return NULL;
@ -654,7 +654,7 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
sizeof(lpm_tree->prefix_usage));
memset(&lpm_tree->prefix_ref_count, 0,
sizeof(lpm_tree->prefix_ref_count));
lpm_tree->ref_count = 1;
refcount_set(&lpm_tree->ref_count, 1);
return lpm_tree;
err_left_struct_set:
@ -678,7 +678,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
lpm_tree = &mlxsw_sp->router->lpm.trees[i];
if (lpm_tree->ref_count != 0 &&
if (refcount_read(&lpm_tree->ref_count) &&
lpm_tree->proto == proto &&
mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
prefix_usage)) {
@ -691,14 +691,15 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
{
lpm_tree->ref_count++;
refcount_inc(&lpm_tree->ref_count);
}
static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
if (--lpm_tree->ref_count == 0)
mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
if (!refcount_dec_and_test(&lpm_tree->ref_count))
return;
mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
}
#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */

View File

@ -61,7 +61,7 @@ struct mlxsw_sp_bridge_port {
struct mlxsw_sp_bridge_device *bridge_device;
struct list_head list;
struct list_head vlans_list;
unsigned int ref_count;
refcount_t ref_count;
u8 stp_state;
unsigned long flags;
bool mrouter;
@ -495,7 +495,7 @@ mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
BR_MCAST_FLOOD;
INIT_LIST_HEAD(&bridge_port->vlans_list);
list_add(&bridge_port->list, &bridge_device->ports_list);
bridge_port->ref_count = 1;
refcount_set(&bridge_port->ref_count, 1);
err = switchdev_bridge_port_offload(brport_dev, mlxsw_sp_port->dev,
NULL, NULL, NULL, false, extack);
@ -531,7 +531,7 @@ mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
if (bridge_port) {
bridge_port->ref_count++;
refcount_inc(&bridge_port->ref_count);
return bridge_port;
}
@ -558,7 +558,7 @@ static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
{
struct mlxsw_sp_bridge_device *bridge_device;
if (--bridge_port->ref_count != 0)
if (!refcount_dec_and_test(&bridge_port->ref_count))
return;
bridge_device = bridge_port->bridge_device;
mlxsw_sp_bridge_port_destroy(bridge_port);

View File

@ -784,6 +784,11 @@ static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
struct am65_cpts_skb_cb_data *skb_cb =
(struct am65_cpts_skb_cb_data *)skb->cb;
if ((ptp_classify_raw(skb) & PTP_CLASS_V1) &&
((mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK) ==
(skb_cb->skb_mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK)))
mtype_seqid = skb_cb->skb_mtype_seqid;
if (mtype_seqid == skb_cb->skb_mtype_seqid) {
u64 ns = event->timestamp;

View File

@ -421,12 +421,14 @@ static int prueth_init_rx_chns(struct prueth_emac *emac,
if (!i)
fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
i);
rx_chn->irq[i] = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
if (rx_chn->irq[i] <= 0) {
ret = rx_chn->irq[i];
ret = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
if (ret <= 0) {
if (!ret)
ret = -ENXIO;
netdev_err(ndev, "Failed to get rx dma irq");
goto fail;
}
rx_chn->irq[i] = ret;
}
return 0;

View File

@ -1585,7 +1585,7 @@ static void wx_set_num_queues(struct wx *wx)
*/
static int wx_acquire_msix_vectors(struct wx *wx)
{
struct irq_affinity affd = {0, };
struct irq_affinity affd = { .pre_vectors = 1 };
int nvecs, i;
nvecs = min_t(int, num_online_cpus(), wx->mac.max_msix_vectors);

Some files were not shown because too many files have changed in this diff Show More