Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

This commit is contained in:
David S. Miller 2017-02-11 02:31:11 -05:00
commit 35eeacf182
97 changed files with 863 additions and 351 deletions

View File

@ -1091,7 +1091,7 @@ F: arch/arm/boot/dts/aspeed-*
F: drivers/*/*aspeed*
ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
M: Nicolas Ferre <nicolas.ferre@atmel.com>
M: Nicolas Ferre <nicolas.ferre@microchip.com>
M: Alexandre Belloni <alexandre.belloni@free-electrons.com>
M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@ -1773,7 +1773,7 @@ F: drivers/soc/renesas/
F: include/linux/soc/renesas/
ARM/SOCFPGA ARCHITECTURE
M: Dinh Nguyen <dinguyen@opensource.altera.com>
M: Dinh Nguyen <dinguyen@kernel.org>
S: Maintained
F: arch/arm/mach-socfpga/
F: arch/arm/boot/dts/socfpga*
@ -1783,7 +1783,7 @@ W: http://www.rocketboards.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT
M: Dinh Nguyen <dinguyen@opensource.altera.com>
M: Dinh Nguyen <dinguyen@kernel.org>
S: Maintained
F: drivers/clk/socfpga/
@ -2175,56 +2175,56 @@ F: include/linux/atm*
F: include/uapi/linux/atm*
ATMEL AT91 / AT32 MCI DRIVER
M: Ludovic Desroches <ludovic.desroches@atmel.com>
M: Ludovic Desroches <ludovic.desroches@microchip.com>
S: Maintained
F: drivers/mmc/host/atmel-mci.c
ATMEL AT91 SAMA5D2-Compatible Shutdown Controller
M: Nicolas Ferre <nicolas.ferre@atmel.com>
M: Nicolas Ferre <nicolas.ferre@microchip.com>
S: Supported
F: drivers/power/reset/at91-sama5d2_shdwc.c
ATMEL SAMA5D2 ADC DRIVER
M: Ludovic Desroches <ludovic.desroches@atmel.com>
M: Ludovic Desroches <ludovic.desroches@microchip.com>
L: linux-iio@vger.kernel.org
S: Supported
F: drivers/iio/adc/at91-sama5d2_adc.c
ATMEL Audio ALSA driver
M: Nicolas Ferre <nicolas.ferre@atmel.com>
M: Nicolas Ferre <nicolas.ferre@microchip.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
F: sound/soc/atmel
ATMEL XDMA DRIVER
M: Ludovic Desroches <ludovic.desroches@atmel.com>
M: Ludovic Desroches <ludovic.desroches@microchip.com>
L: linux-arm-kernel@lists.infradead.org
L: dmaengine@vger.kernel.org
S: Supported
F: drivers/dma/at_xdmac.c
ATMEL I2C DRIVER
M: Ludovic Desroches <ludovic.desroches@atmel.com>
M: Ludovic Desroches <ludovic.desroches@microchip.com>
L: linux-i2c@vger.kernel.org
S: Supported
F: drivers/i2c/busses/i2c-at91.c
ATMEL ISI DRIVER
M: Ludovic Desroches <ludovic.desroches@atmel.com>
M: Ludovic Desroches <ludovic.desroches@microchip.com>
L: linux-media@vger.kernel.org
S: Supported
F: drivers/media/platform/soc_camera/atmel-isi.c
F: include/media/atmel-isi.h
ATMEL LCDFB DRIVER
M: Nicolas Ferre <nicolas.ferre@atmel.com>
M: Nicolas Ferre <nicolas.ferre@microchip.com>
L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/video/fbdev/atmel_lcdfb.c
F: include/video/atmel_lcdc.h
ATMEL MACB ETHERNET DRIVER
M: Nicolas Ferre <nicolas.ferre@atmel.com>
M: Nicolas Ferre <nicolas.ferre@microchip.com>
S: Supported
F: drivers/net/ethernet/cadence/
@ -2236,32 +2236,32 @@ S: Supported
F: drivers/mtd/nand/atmel_nand*
ATMEL SDMMC DRIVER
M: Ludovic Desroches <ludovic.desroches@atmel.com>
M: Ludovic Desroches <ludovic.desroches@microchip.com>
L: linux-mmc@vger.kernel.org
S: Supported
F: drivers/mmc/host/sdhci-of-at91.c
ATMEL SPI DRIVER
M: Nicolas Ferre <nicolas.ferre@atmel.com>
M: Nicolas Ferre <nicolas.ferre@microchip.com>
S: Supported
F: drivers/spi/spi-atmel.*
ATMEL SSC DRIVER
M: Nicolas Ferre <nicolas.ferre@atmel.com>
M: Nicolas Ferre <nicolas.ferre@microchip.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: drivers/misc/atmel-ssc.c
F: include/linux/atmel-ssc.h
ATMEL Timer Counter (TC) AND CLOCKSOURCE DRIVERS
M: Nicolas Ferre <nicolas.ferre@atmel.com>
M: Nicolas Ferre <nicolas.ferre@microchip.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: drivers/misc/atmel_tclib.c
F: drivers/clocksource/tcb_clksrc.c
ATMEL USBA UDC DRIVER
M: Nicolas Ferre <nicolas.ferre@atmel.com>
M: Nicolas Ferre <nicolas.ferre@microchip.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: drivers/usb/gadget/udc/atmel_usba_udc.*
@ -9765,7 +9765,7 @@ S: Maintained
F: drivers/pinctrl/pinctrl-at91.*
PIN CONTROLLER - ATMEL AT91 PIO4
M: Ludovic Desroches <ludovic.desroches@atmel.com>
M: Ludovic Desroches <ludovic.desroches@microchip.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-gpio@vger.kernel.org
S: Supported

View File

@ -617,7 +617,7 @@ dtb-$(CONFIG_ARCH_ORION5X) += \
orion5x-lacie-ethernet-disk-mini-v2.dtb \
orion5x-linkstation-lsgl.dtb \
orion5x-linkstation-lswtgl.dtb \
orion5x-lschl.dtb \
orion5x-linkstation-lschl.dtb \
orion5x-lswsgl.dtb \
orion5x-maxtor-shared-storage-2.dtb \
orion5x-netgear-wnr854t.dtb \

View File

@ -18,6 +18,14 @@
/ {
#address-cells = <1>;
#size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases {
gpio0 = &gpio1;

View File

@ -16,6 +16,14 @@
#size-cells = <1>;
interrupt-parent = <&icoll>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases {
gpio0 = &gpio0;

View File

@ -14,6 +14,14 @@
/ {
#address-cells = <1>;
#size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases {
ethernet0 = &fec;

View File

@ -19,6 +19,14 @@
/ {
#address-cells = <1>;
#size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases {
ethernet0 = &fec;

View File

@ -17,6 +17,14 @@
#size-cells = <1>;
interrupt-parent = <&icoll>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases {
ethernet0 = &mac0;

View File

@ -12,6 +12,14 @@
/ {
#address-cells = <1>;
#size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases {
serial0 = &uart1;

View File

@ -13,6 +13,14 @@
/ {
#address-cells = <1>;
#size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases {
ethernet0 = &fec;

View File

@ -17,6 +17,14 @@
/ {
#address-cells = <1>;
#size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases {
ethernet0 = &fec;

View File

@ -19,6 +19,14 @@
/ {
#address-cells = <1>;
#size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases {
ethernet0 = &fec;

View File

@ -19,6 +19,14 @@
/ {
#address-cells = <1>;
#size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases {
ethernet0 = &fec;

View File

@ -137,7 +137,7 @@
&gpio4 {
gpio-ranges = <&iomuxc 5 136 1>, <&iomuxc 6 145 1>, <&iomuxc 7 150 1>,
<&iomuxc 8 146 1>, <&iomuxc 9 151 1>, <&iomuxc 10 147 1>,
<&iomuxc 11 151 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>,
<&iomuxc 11 152 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>,
<&iomuxc 14 149 1>, <&iomuxc 15 154 1>, <&iomuxc 16 39 7>,
<&iomuxc 23 56 1>, <&iomuxc 24 61 7>, <&iomuxc 31 46 1>;
};

View File

@ -16,6 +16,14 @@
/ {
#address-cells = <1>;
#size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases {
ethernet0 = &fec;

View File

@ -14,6 +14,14 @@
/ {
#address-cells = <1>;
#size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases {
ethernet0 = &fec;

View File

@ -15,6 +15,14 @@
/ {
#address-cells = <1>;
#size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases {
can0 = &flexcan1;

View File

@ -15,6 +15,14 @@
/ {
#address-cells = <1>;
#size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases {
ethernet0 = &fec1;

View File

@ -50,6 +50,14 @@
/ {
#address-cells = <1>;
#size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases {
gpio0 = &gpio1;

View File

@ -2,7 +2,7 @@
* Device Tree file for Buffalo Linkstation LS-CHLv3
*
* Copyright (C) 2016 Ash Hughes <ashley.hughes@blueyonder.co.uk>
* Copyright (C) 2015, 2016
* Copyright (C) 2015-2017
* Roger Shimizu <rogershimizu@gmail.com>
*
* This file is dual-licensed: you can use it either under the terms
@ -52,7 +52,7 @@
#include <dt-bindings/gpio/gpio.h>
/ {
model = "Buffalo Linkstation Live v3 (LS-CHL)";
model = "Buffalo Linkstation LiveV3 (LS-CHL)";
compatible = "buffalo,lschl", "marvell,orion5x-88f5182", "marvell,orion5x";
memory { /* 128 MB */

View File

@ -680,6 +680,7 @@
phy-names = "usb2-phy", "usb3-phy";
phys = <&usb2_picophy0>,
<&phy_port2 PHY_TYPE_USB3>;
snps,dis_u3_susphy_quirk;
};
};

View File

@ -64,8 +64,8 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CT_PROTO_SCTP=m
CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CT_PROTO_SCTP=y
CONFIG_NF_CT_PROTO_UDPLITE=y
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m

View File

@ -56,8 +56,8 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CT_PROTO_SCTP=m
CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CT_PROTO_SCTP=y
CONFIG_NF_CT_PROTO_UDPLITE=y
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m

View File

@ -600,7 +600,7 @@ static int gpr_set(struct task_struct *target,
const void *kbuf, const void __user *ubuf)
{
int ret;
struct pt_regs newregs;
struct pt_regs newregs = *task_pt_regs(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&newregs,

View File

@ -60,7 +60,6 @@
#define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
static enum cpuhp_state cpuhp_mmdc_state;
static int ddr_type;
struct fsl_mmdc_devtype_data {
@ -82,6 +81,7 @@ static const struct of_device_id imx_mmdc_dt_ids[] = {
#ifdef CONFIG_PERF_EVENTS
static enum cpuhp_state cpuhp_mmdc_state;
static DEFINE_IDA(mmdc_ida);
PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00")

View File

@ -610,9 +610,9 @@ static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
void __init early_abt_enable(void)
{
fsr_info[22].fn = early_abort_handler;
fsr_info[FSR_FS_AEA].fn = early_abort_handler;
local_abt_enable();
fsr_info[22].fn = do_bad;
fsr_info[FSR_FS_AEA].fn = do_bad;
}
#ifndef CONFIG_ARM_LPAE

View File

@ -11,11 +11,15 @@
#define FSR_FS5_0 (0x3f)
#ifdef CONFIG_ARM_LPAE
#define FSR_FS_AEA 17
static inline int fsr_fs(unsigned int fsr)
{
return fsr & FSR_FS5_0;
}
#else
#define FSR_FS_AEA 22
static inline int fsr_fs(unsigned int fsr)
{
return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;

View File

@ -55,6 +55,24 @@
#address-cells = <2>;
#size-cells = <2>;
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
ranges;
/* 16 MiB reserved for Hardware ROM Firmware */
hwrom_reserved: hwrom@0 {
reg = <0x0 0x0 0x0 0x1000000>;
no-map;
};
/* 2 MiB reserved for ARM Trusted Firmware (BL31) */
secmon_reserved: secmon@10000000 {
reg = <0x0 0x10000000 0x0 0x200000>;
no-map;
};
};
cpus {
#address-cells = <0x2>;
#size-cells = <0x0>;

View File

@ -151,6 +151,18 @@
status = "okay";
pinctrl-0 = <&eth_rgmii_pins>;
pinctrl-names = "default";
phy-handle = <&eth_phy0>;
mdio {
compatible = "snps,dwmac-mdio";
#address-cells = <1>;
#size-cells = <0>;
eth_phy0: ethernet-phy@0 {
reg = <0>;
eee-broken-1000t;
};
};
};
&ir {

View File

@ -649,9 +649,10 @@
#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */
#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 and 9 */
#define SRR1_WAKESYSERR 0x00300000 /* System error */
#define SRR1_WAKEEE 0x00200000 /* External interrupt */
#define SRR1_WAKEHVI 0x00240000 /* Hypervisor Virtualization Interrupt (P9) */
#define SRR1_WAKEMT 0x00280000 /* mtctrl */
#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */
#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */

View File

@ -44,6 +44,7 @@ static inline int icp_hv_init(void) { return -ENODEV; }
#ifdef CONFIG_PPC_POWERNV
extern int icp_opal_init(void);
extern void icp_opal_flush_interrupt(void);
#else
static inline int icp_opal_init(void) { return -ENODEV; }
#endif

View File

@ -253,8 +253,11 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
if (unlikely(debugger_fault_handler(regs)))
goto bail;
/* On a kernel SLB miss we can only check for a valid exception entry */
if (!user_mode(regs) && (address >= TASK_SIZE)) {
/*
* The kernel should never take an execute fault nor should it
* take a page fault to a kernel address.
*/
if (!user_mode(regs) && (is_exec || (address >= TASK_SIZE))) {
rc = SIGSEGV;
goto bail;
}
@ -390,20 +393,6 @@ good_area:
#endif /* CONFIG_8xx */
if (is_exec) {
/*
* An execution fault + no execute ?
*
* On CPUs that don't have CPU_FTR_COHERENT_ICACHE we
* deliberately create NX mappings, and use the fault to do the
* cache flush. This is usually handled in hash_page_do_lazy_icache()
* but we could end up here if that races with a concurrent PTE
* update. In that case we need to fall through here to the VMA
* check below.
*/
if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
(regs->msr & SRR1_ISI_N_OR_G))
goto bad_area;
/*
* Allow execution from readable areas if the MMU does not
* provide separate controls over reading and executing.

View File

@ -50,9 +50,7 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
__tlbiel_pid(pid, set, ric);
}
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
return;
asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
}
static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
@ -85,8 +83,6 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
asm volatile("ptesync": : :"memory");
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
}
static inline void _tlbie_va(unsigned long va, unsigned long pid,

View File

@ -155,8 +155,10 @@ static void pnv_smp_cpu_kill_self(void)
wmask = SRR1_WAKEMASK_P8;
idle_states = pnv_get_supported_cpuidle_states();
/* We don't want to take decrementer interrupts while we are offline,
* so clear LPCR:PECE1. We keep PECE2 enabled.
* so clear LPCR:PECE1. We keep PECE2 (and LPCR_PECE_HVEE on P9)
* enabled as to let IPIs in.
*/
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
@ -206,8 +208,12 @@ static void pnv_smp_cpu_kill_self(void)
* contains 0.
*/
if (((srr1 & wmask) == SRR1_WAKEEE) ||
((srr1 & wmask) == SRR1_WAKEHVI) ||
(local_paca->irq_happened & PACA_IRQ_EE)) {
icp_native_flush_interrupt();
if (cpu_has_feature(CPU_FTR_ARCH_300))
icp_opal_flush_interrupt();
else
icp_native_flush_interrupt();
} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
@ -221,6 +227,8 @@ static void pnv_smp_cpu_kill_self(void)
if (srr1 && !generic_check_cpu_restart(cpu))
DBG("CPU%d Unexpected exit while offline !\n", cpu);
}
/* Re-enable decrementer interrupts */
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
DBG("CPU%d coming online...\n", cpu);
}

View File

@ -120,18 +120,49 @@ static void icp_opal_cause_ipi(int cpu, unsigned long data)
{
int hw_cpu = get_hard_smp_processor_id(cpu);
kvmppc_set_host_ipi(cpu, 1);
opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
}
static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
{
int hw_cpu = hard_smp_processor_id();
int cpu = smp_processor_id();
opal_int_set_mfrr(hw_cpu, 0xff);
kvmppc_set_host_ipi(cpu, 0);
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
return smp_ipi_demux();
}
/*
* Called when an interrupt is received on an off-line CPU to
* clear the interrupt, so that the CPU can go back to nap mode.
*/
void icp_opal_flush_interrupt(void)
{
unsigned int xirr;
unsigned int vec;
do {
xirr = icp_opal_get_xirr();
vec = xirr & 0x00ffffff;
if (vec == XICS_IRQ_SPURIOUS)
break;
if (vec == XICS_IPI) {
/* Clear pending IPI */
int cpu = smp_processor_id();
kvmppc_set_host_ipi(cpu, 0);
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
} else {
pr_err("XICS: hw interrupt 0x%x to offline cpu, "
"disabling\n", vec);
xics_mask_unknown_vec(vec);
}
/* EOI the interrupt */
} while (opal_int_eoi(xirr) > 0);
}
#endif /* CONFIG_SMP */
static const struct icp_ops icp_opal_ops = {

View File

@ -1875,7 +1875,6 @@ static struct irq_chip ioapic_chip __read_mostly = {
.irq_ack = irq_chip_ack_parent,
.irq_eoi = ioapic_ack_level,
.irq_set_affinity = ioapic_set_affinity,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
@ -1887,7 +1886,6 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
.irq_ack = irq_chip_ack_parent,
.irq_eoi = ioapic_ir_ack_level,
.irq_set_affinity = ioapic_set_affinity,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.flags = IRQCHIP_SKIP_SET_WAKE,
};

View File

@ -306,11 +306,6 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
if (ret == 0 || (ret && ret != -EOPNOTSUPP))
goto out;
ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
ZERO_PAGE(0), biop);
if (ret == 0 || (ret && ret != -EOPNOTSUPP))
goto out;
ret = 0;
while (nr_sects != 0) {
bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES),
@ -369,6 +364,10 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
return 0;
}
if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
ZERO_PAGE(0)))
return 0;
blk_start_plug(&plug);
ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
&bio, discard);

View File

@ -92,7 +92,6 @@ static void add_early_randomness(struct hwrng *rng)
mutex_unlock(&reading_mutex);
if (bytes_read > 0)
add_device_randomness(rng_buffer, bytes_read);
memset(rng_buffer, 0, size);
}
static inline void cleanup_rng(struct kref *kref)
@ -288,7 +287,6 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
}
}
out:
memset(rng_buffer, 0, rng_buffer_size());
return ret ? : err;
out_unlock_reading:
@ -427,7 +425,6 @@ static int hwrng_fillfn(void *unused)
/* Outside lock, sure, but y'know: randomness. */
add_hwgenerator_randomness((void *)rng_fillbuf, rc,
rc * current_quality * 8 >> 10);
memset(rng_fillbuf, 0, rng_buffer_size());
}
hwrng_fill = NULL;
return 0;

View File

@ -213,7 +213,8 @@ static void intel_detect_pch(struct drm_device *dev)
} else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_KBP;
DRM_DEBUG_KMS("Found KabyPoint PCH\n");
WARN_ON(!IS_KABYLAKE(dev_priv));
WARN_ON(!IS_SKYLAKE(dev_priv) &&
!IS_KABYLAKE(dev_priv));
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
(id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
@ -2427,6 +2428,7 @@ static int intel_runtime_resume(struct device *kdev)
* we can do is to hope that things will still work (and disable RPM).
*/
i915_gem_init_swizzling(dev_priv);
i915_gem_restore_fences(dev_priv);
intel_runtime_pm_enable_interrupts(dev_priv);

View File

@ -2010,8 +2010,16 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
if (WARN_ON(reg->pin_count))
continue;
/* Ideally we want to assert that the fence register is not
* live at this point (i.e. that no piece of code will be
* trying to write through fence + GTT, as that both violates
* our tracking of activity and associated locking/barriers,
* but also is illegal given that the hw is powered down).
*
* Previously we used reg->pin_count as a "liveness" indicator.
* That is not sufficient, and we need a more fine-grained
* tool if we want to have a sanity check here.
*/
if (!reg->vma)
continue;
@ -3478,7 +3486,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
/* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
if (obj->cache_dirty) {
if (obj->cache_dirty || obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj, true);
intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
}

View File

@ -1181,14 +1181,14 @@ validate_exec_list(struct drm_device *dev,
if (exec[i].offset !=
gen8_canonical_addr(exec[i].offset & PAGE_MASK))
return -EINVAL;
/* From drm_mm perspective address space is continuous,
* so from this point we're always using non-canonical
* form internally.
*/
exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
}
/* From drm_mm perspective address space is continuous,
* so from this point we're always using non-canonical
* form internally.
*/
exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
return -EINVAL;

View File

@ -66,8 +66,16 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
max_order = MAX_ORDER;
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */
max_order = min(max_order, ilog2(IO_TLB_SEGPAGES));
if (swiotlb_nr_tbl()) {
unsigned int max_segment;
max_segment = swiotlb_max_segment();
if (max_segment) {
max_segment = max_t(unsigned int, max_segment,
PAGE_SIZE) >> PAGE_SHIFT;
max_order = min(max_order, ilog2(max_segment));
}
}
#endif
gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;

View File

@ -4262,10 +4262,10 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
drm_crtc_vblank_put(&intel_crtc->base);
wake_up_all(&dev_priv->pending_flip_queue);
queue_work(dev_priv->wq, &work->unpin_work);
trace_i915_flip_complete(intel_crtc->plane,
work->pending_flip_obj);
queue_work(dev_priv->wq, &work->unpin_work);
}
static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)

View File

@ -1730,7 +1730,8 @@ bxt_get_dpll(struct intel_crtc *crtc,
return NULL;
if ((encoder->type == INTEL_OUTPUT_DP ||
encoder->type == INTEL_OUTPUT_EDP) &&
encoder->type == INTEL_OUTPUT_EDP ||
encoder->type == INTEL_OUTPUT_DP_MST) &&
!bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
return NULL;

View File

@ -858,7 +858,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
}
}
plane = &vc4_plane->base;
ret = drm_universal_plane_init(dev, plane, 0xff,
ret = drm_universal_plane_init(dev, plane, 0,
&vc4_plane_funcs,
formats, num_formats,
type, NULL);

View File

@ -481,8 +481,7 @@ static int vmw_fb_kms_framebuffer(struct fb_info *info)
mode_cmd.height = var->yres;
mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width;
mode_cmd.pixel_format =
drm_mode_legacy_fb_format(var->bits_per_pixel,
((var->bits_per_pixel + 7) / 8) * mode_cmd.width);
drm_mode_legacy_fb_format(var->bits_per_pixel, depth);
cur_fb = par->set_fb;
if (cur_fb && cur_fb->width == mode_cmd.width &&

View File

@ -58,7 +58,7 @@
#define SMBSLVDAT (0xC + piix4_smba)
/* count for request_region */
#define SMBIOSIZE 8
#define SMBIOSIZE 9
/* PCI Address Constants */
#define SMBBA 0x090
@ -592,6 +592,8 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
u8 port;
int retval;
mutex_lock(&piix4_mutex_sb800);
/* Request the SMBUS semaphore, avoid conflicts with the IMC */
smbslvcnt = inb_p(SMBSLVCNT);
do {
@ -605,10 +607,10 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
usleep_range(1000, 2000);
} while (--retries);
/* SMBus is still owned by the IMC, we give up */
if (!retries)
if (!retries) {
mutex_unlock(&piix4_mutex_sb800);
return -EBUSY;
mutex_lock(&piix4_mutex_sb800);
}
outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
@ -623,11 +625,11 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
outb_p(smba_en_lo, SB800_PIIX4_SMB_IDX + 1);
mutex_unlock(&piix4_mutex_sb800);
/* Release the semaphore */
outb_p(smbslvcnt | 0x20, SMBSLVCNT);
mutex_unlock(&piix4_mutex_sb800);
return retval;
}

View File

@ -59,9 +59,11 @@ int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
case RXE_MEM_TYPE_MR:
case RXE_MEM_TYPE_FMR:
return ((iova < mem->iova) ||
((iova + length) > (mem->iova + mem->length))) ?
-EFAULT : 0;
if (iova < mem->iova ||
length > mem->length ||
iova > mem->iova + mem->length - length)
return -EFAULT;
return 0;
default:
return -EFAULT;

View File

@ -479,7 +479,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
goto err2;
}
resid = mtu;
qp->resp.resid = mtu;
} else {
if (pktlen != resid) {
state = RESPST_ERR_LENGTH;

View File

@ -263,13 +263,21 @@ static int uinput_create_device(struct uinput_device *udev)
return -EINVAL;
}
if (test_bit(ABS_MT_SLOT, dev->absbit)) {
nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1;
error = input_mt_init_slots(dev, nslot, 0);
if (error)
if (test_bit(EV_ABS, dev->evbit)) {
input_alloc_absinfo(dev);
if (!dev->absinfo) {
error = -EINVAL;
goto fail1;
} else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
input_set_events_per_packet(dev, 60);
}
if (test_bit(ABS_MT_SLOT, dev->absbit)) {
nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1;
error = input_mt_init_slots(dev, nslot, 0);
if (error)
goto fail1;
} else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
input_set_events_per_packet(dev, 60);
}
}
if (test_bit(EV_FF, dev->evbit) && !udev->ff_effects_max) {

View File

@ -42,13 +42,19 @@ config RMI4_SMB
config RMI4_F03
bool "RMI4 Function 03 (PS2 Guest)"
depends on RMI4_CORE
depends on SERIO=y || RMI4_CORE=SERIO
help
Say Y here if you want to add support for RMI4 function 03.
Function 03 provides PS2 guest support for RMI4 devices. This
includes support for TrackPoints on TouchPads.
config RMI4_F03_SERIO
tristate
depends on RMI4_CORE
depends on RMI4_F03
default RMI4_CORE
select SERIO
config RMI4_2D_SENSOR
bool
depends on RMI4_CORE

View File

@ -1023,7 +1023,12 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
if (!host->busy_status && busy_resp &&
!(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
(readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
/* Unmask the busy IRQ */
/* Clear the busy start IRQ */
writel(host->variant->busy_detect_mask,
host->base + MMCICLEAR);
/* Unmask the busy end IRQ */
writel(readl(base + MMCIMASK0) |
host->variant->busy_detect_mask,
base + MMCIMASK0);
@ -1038,10 +1043,14 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
/*
* At this point we are not busy with a command, we have
* not received a new busy request, mask the busy IRQ and
* fall through to process the IRQ.
* not received a new busy request, clear and mask the busy
* end IRQ and fall through to process the IRQ.
*/
if (host->busy_status) {
writel(host->variant->busy_detect_mask,
host->base + MMCICLEAR);
writel(readl(base + MMCIMASK0) &
~host->variant->busy_detect_mask,
base + MMCIMASK0);
@ -1283,12 +1292,21 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
}
/*
* We intentionally clear the MCI_ST_CARDBUSY IRQ here (if it's
* enabled) since the HW seems to be triggering the IRQ on both
* edges while monitoring DAT0 for busy completion.
* We intentionally clear the MCI_ST_CARDBUSY IRQ (if it's
* enabled) in mmci_cmd_irq() function where ST Micro busy
* detection variant is handled. Considering the HW seems to be
* triggering the IRQ on both edges while monitoring DAT0 for
* busy completion and that same status bit is used to monitor
* start and end of busy detection, special care must be taken
* to make sure that both start and end interrupts are always
* cleared one after the other.
*/
status &= readl(host->base + MMCIMASK0);
writel(status, host->base + MMCICLEAR);
if (host->variant->busy_detect)
writel(status & ~host->variant->busy_detect_mask,
host->base + MMCICLEAR);
else
writel(status, host->base + MMCICLEAR);
dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);

View File

@ -31,6 +31,7 @@ struct lmac {
u8 lmac_type;
u8 lane_to_sds;
bool use_training;
bool autoneg;
bool link_up;
int lmacid; /* ID within BGX */
int lmacid_bd; /* ID on board */
@ -461,7 +462,17 @@ static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac)
/* power down, reset autoneg, autoneg enable */
cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
cfg &= ~PCS_MRX_CTL_PWR_DN;
cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
cfg |= PCS_MRX_CTL_RST_AN;
if (lmac->phydev) {
cfg |= PCS_MRX_CTL_AN_EN;
} else {
/* In scenarios where PHY driver is not present or it's a
* non-standard PHY, FW sets AN_EN to inform Linux driver
* to do auto-neg and link polling or not.
*/
if (cfg & PCS_MRX_CTL_AN_EN)
lmac->autoneg = true;
}
bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
if (lmac->lmac_type == BGX_MODE_QSGMII) {
@ -472,7 +483,7 @@ static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac)
return 0;
}
if (lmac->lmac_type == BGX_MODE_SGMII) {
if ((lmac->lmac_type == BGX_MODE_SGMII) && lmac->phydev) {
if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
PCS_MRX_STATUS_AN_CPT, false)) {
dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
@ -678,12 +689,71 @@ static int bgx_xaui_check_link(struct lmac *lmac)
return -1;
}
static void bgx_poll_for_sgmii_link(struct lmac *lmac)
{
u64 pcs_link, an_result;
u8 speed;
pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
BGX_GMP_PCS_MRX_STATUS);
/*Link state bit is sticky, read it again*/
if (!(pcs_link & PCS_MRX_STATUS_LINK))
pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
BGX_GMP_PCS_MRX_STATUS);
if (bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_GMP_PCS_MRX_STATUS,
PCS_MRX_STATUS_AN_CPT, false)) {
lmac->link_up = false;
lmac->last_speed = SPEED_UNKNOWN;
lmac->last_duplex = DUPLEX_UNKNOWN;
goto next_poll;
}
lmac->link_up = ((pcs_link & PCS_MRX_STATUS_LINK) != 0) ? true : false;
an_result = bgx_reg_read(lmac->bgx, lmac->lmacid,
BGX_GMP_PCS_ANX_AN_RESULTS);
speed = (an_result >> 3) & 0x3;
lmac->last_duplex = (an_result >> 1) & 0x1;
switch (speed) {
case 0:
lmac->last_speed = 10;
break;
case 1:
lmac->last_speed = 100;
break;
case 2:
lmac->last_speed = 1000;
break;
default:
lmac->link_up = false;
lmac->last_speed = SPEED_UNKNOWN;
lmac->last_duplex = DUPLEX_UNKNOWN;
break;
}
next_poll:
if (lmac->last_link != lmac->link_up) {
if (lmac->link_up)
bgx_sgmii_change_link_state(lmac);
lmac->last_link = lmac->link_up;
}
queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 3);
}
static void bgx_poll_for_link(struct work_struct *work)
{
struct lmac *lmac;
u64 spu_link, smu_link;
lmac = container_of(work, struct lmac, dwork.work);
if (lmac->is_sgmii) {
bgx_poll_for_sgmii_link(lmac);
return;
}
/* Receive link is latching low. Force it high and verify it */
bgx_reg_modify(lmac->bgx, lmac->lmacid,
@ -775,9 +845,21 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
(lmac->lmac_type != BGX_MODE_XLAUI) &&
(lmac->lmac_type != BGX_MODE_40G_KR) &&
(lmac->lmac_type != BGX_MODE_10G_KR)) {
if (!lmac->phydev)
return -ENODEV;
if (!lmac->phydev) {
if (lmac->autoneg) {
bgx_reg_write(bgx, lmacid,
BGX_GMP_PCS_LINKX_TIMER,
PCS_LINKX_TIMER_COUNT);
goto poll;
} else {
/* Default to below link speed and duplex */
lmac->link_up = true;
lmac->last_speed = 1000;
lmac->last_duplex = 1;
bgx_sgmii_change_link_state(lmac);
return 0;
}
}
lmac->phydev->dev_flags = 0;
if (phy_connect_direct(&lmac->netdev, lmac->phydev,
@ -786,15 +868,17 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
return -ENODEV;
phy_start_aneg(lmac->phydev);
} else {
lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
WQ_MEM_RECLAIM, 1);
if (!lmac->check_link)
return -ENOMEM;
INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
return 0;
}
poll:
lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
WQ_MEM_RECLAIM, 1);
if (!lmac->check_link)
return -ENOMEM;
INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
return 0;
}

View File

@ -153,10 +153,15 @@
#define PCS_MRX_CTL_LOOPBACK1 BIT_ULL(14)
#define PCS_MRX_CTL_RESET BIT_ULL(15)
#define BGX_GMP_PCS_MRX_STATUS 0x30008
#define PCS_MRX_STATUS_LINK BIT_ULL(2)
#define PCS_MRX_STATUS_AN_CPT BIT_ULL(5)
#define BGX_GMP_PCS_ANX_ADV 0x30010
#define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020
#define BGX_GMP_PCS_LINKX_TIMER 0x30040
#define PCS_LINKX_TIMER_COUNT 0x1E84
#define BGX_GMP_PCS_SGM_AN_ADV 0x30068
#define BGX_GMP_PCS_MISCX_CTL 0x30078
#define PCS_MISC_CTL_MODE BIT_ULL(8)
#define PCS_MISC_CTL_DISP_EN BIT_ULL(13)
#define PCS_MISC_CTL_GMX_ENO BIT_ULL(11)
#define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full

View File

@ -305,8 +305,8 @@ int hns_nic_net_xmit_hw(struct net_device *ndev,
struct hns_nic_ring_data *ring_data)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct device *dev = priv->dev;
struct hnae_ring *ring = ring_data->ring;
struct device *dev = ring_to_dev(ring);
struct netdev_queue *dev_queue;
struct skb_frag_struct *frag;
int buf_num;

View File

@ -648,8 +648,8 @@ static void ax_setup(struct net_device *dev)
{
/* Finish setting up the DEVICE info. */
dev->mtu = AX_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->hard_header_len = AX25_MAX_HEADER_LEN;
dev->addr_len = AX25_ADDR_LEN;
dev->type = ARPHRD_AX25;
dev->tx_queue_len = 10;
dev->header_ops = &ax25_header_ops;

View File

@ -163,6 +163,7 @@ static void loopback_setup(struct net_device *dev)
{
dev->mtu = 64 * 1024;
dev->hard_header_len = ETH_HLEN; /* 14 */
dev->min_header_len = ETH_HLEN; /* 14 */
dev->addr_len = ETH_ALEN; /* 6 */
dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
dev->flags = IFF_LOOPBACK;

View File

@ -81,8 +81,6 @@ static int iproc_mdio_read(struct mii_bus *bus, int phy_id, int reg)
if (rc)
return rc;
iproc_mdio_config_clk(priv->base);
/* Prepare the read operation */
cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) |
(reg << MII_DATA_RA_SHIFT) |
@ -112,8 +110,6 @@ static int iproc_mdio_write(struct mii_bus *bus, int phy_id,
if (rc)
return rc;
iproc_mdio_config_clk(priv->base);
/* Prepare the write operation */
cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) |
(reg << MII_DATA_RA_SHIFT) |
@ -163,6 +159,8 @@ static int iproc_mdio_probe(struct platform_device *pdev)
bus->read = iproc_mdio_read;
bus->write = iproc_mdio_write;
iproc_mdio_config_clk(priv->base);
rc = of_mdiobus_register(bus, pdev->dev.of_node);
if (rc) {
dev_err(&pdev->dev, "MDIO bus registration failed\n");

View File

@ -908,6 +908,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
struct module *ndev_owner = dev->dev.parent->driver->owner;
struct mii_bus *bus = phydev->mdio.bus;
struct device *d = &phydev->mdio.dev;
bool using_genphy = false;
int err;
/* For Ethernet device drivers that register their own MDIO bus, we
@ -920,11 +921,6 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
return -EIO;
}
if (!try_module_get(d->driver->owner)) {
dev_err(&dev->dev, "failed to get the device driver module\n");
return -EIO;
}
get_device(d);
/* Assume that if there is no driver, that it doesn't
@ -938,12 +934,22 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
d->driver =
&genphy_driver[GENPHY_DRV_1G].mdiodrv.driver;
using_genphy = true;
}
if (!try_module_get(d->driver->owner)) {
dev_err(&dev->dev, "failed to get the device driver module\n");
err = -EIO;
goto error_put_device;
}
if (using_genphy) {
err = d->driver->probe(d);
if (err >= 0)
err = device_bind_driver(d);
if (err)
goto error;
goto error_module_put;
}
if (phydev->attached_dev) {
@ -980,9 +986,14 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
return err;
error:
/* phy_detach() does all of the cleanup below */
phy_detach(phydev);
put_device(d);
return err;
error_module_put:
module_put(d->driver->owner);
error_put_device:
put_device(d);
if (ndev_owner != bus->owner)
module_put(bus->owner);
return err;
@ -1045,6 +1056,8 @@ void phy_detach(struct phy_device *phydev)
phy_led_triggers_unregister(phydev);
module_put(phydev->mdio.dev.driver->owner);
/* If the device had no specific driver before (i.e. - it
* was using the generic driver), we unbind the device
* from the generic driver so that there's a chance a
@ -1065,7 +1078,6 @@ void phy_detach(struct phy_device *phydev)
bus = phydev->mdio.bus;
put_device(&phydev->mdio.dev);
module_put(phydev->mdio.dev.driver->owner);
if (ndev_owner != bus->owner)
module_put(bus->owner);
}

View File

@ -73,8 +73,6 @@ static atomic_t iface_counter = ATOMIC_INIT(0);
/* Private data structure */
struct sierra_net_data {
u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */
u16 link_up; /* air link up or down */
u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */
@ -122,6 +120,7 @@ struct param {
/* LSI Protocol types */
#define SIERRA_NET_PROTOCOL_UMTS 0x01
#define SIERRA_NET_PROTOCOL_UMTS_DS 0x04
/* LSI Coverage */
#define SIERRA_NET_COVERAGE_NONE 0x00
#define SIERRA_NET_COVERAGE_NOPACKET 0x01
@ -129,7 +128,8 @@ struct param {
/* LSI Session */
#define SIERRA_NET_SESSION_IDLE 0x00
/* LSI Link types */
#define SIERRA_NET_AS_LINK_TYPE_IPv4 0x00
#define SIERRA_NET_AS_LINK_TYPE_IPV4 0x00
#define SIERRA_NET_AS_LINK_TYPE_IPV6 0x02
struct lsi_umts {
u8 protocol;
@ -137,9 +137,14 @@ struct lsi_umts {
__be16 length;
/* eventually use a union for the rest - assume umts for now */
u8 coverage;
u8 unused2[41];
u8 network_len; /* network name len */
u8 network[40]; /* network name (UCS2, bigendian) */
u8 session_state;
u8 unused3[33];
} __packed;
struct lsi_umts_single {
struct lsi_umts lsi;
u8 link_type;
u8 pdp_addr_len; /* NW-supplied PDP address len */
u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */
@ -158,10 +163,31 @@ struct lsi_umts {
u8 reserved[8];
} __packed;
struct lsi_umts_dual {
struct lsi_umts lsi;
u8 pdp_addr4_len; /* NW-supplied PDP IPv4 address len */
u8 pdp_addr4[4]; /* NW-supplied PDP IPv4 address (bigendian)) */
u8 pdp_addr6_len; /* NW-supplied PDP IPv6 address len */
u8 pdp_addr6[16]; /* NW-supplied PDP IPv6 address (bigendian)) */
u8 unused4[23];
u8 dns1_addr4_len; /* NW-supplied 1st DNS v4 address len (bigendian) */
u8 dns1_addr4[4]; /* NW-supplied 1st DNS v4 address */
u8 dns1_addr6_len; /* NW-supplied 1st DNS v6 address len */
u8 dns1_addr6[16]; /* NW-supplied 1st DNS v6 address (bigendian)*/
u8 dns2_addr4_len; /* NW-supplied 2nd DNS v4 address len (bigendian) */
u8 dns2_addr4[4]; /* NW-supplied 2nd DNS v4 address */
u8 dns2_addr6_len; /* NW-supplied 2nd DNS v6 address len */
u8 dns2_addr6[16]; /* NW-supplied 2nd DNS v6 address (bigendian)*/
u8 unused5[68];
} __packed;
#define SIERRA_NET_LSI_COMMON_LEN 4
#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts))
#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts_single))
#define SIERRA_NET_LSI_UMTS_STATUS_LEN \
(SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN)
#define SIERRA_NET_LSI_UMTS_DS_LEN (sizeof(struct lsi_umts_dual))
#define SIERRA_NET_LSI_UMTS_DS_STATUS_LEN \
(SIERRA_NET_LSI_UMTS_DS_LEN - SIERRA_NET_LSI_COMMON_LEN)
/* Forward definitions */
static void sierra_sync_timer(unsigned long syncdata);
@ -190,10 +216,11 @@ static inline void sierra_net_set_private(struct usbnet *dev,
dev->data[0] = (unsigned long)priv;
}
/* is packet IPv4 */
/* is packet IPv4/IPv6 */
static inline int is_ip(struct sk_buff *skb)
{
return skb->protocol == cpu_to_be16(ETH_P_IP);
return skb->protocol == cpu_to_be16(ETH_P_IP) ||
skb->protocol == cpu_to_be16(ETH_P_IPV6);
}
/*
@ -349,46 +376,51 @@ static inline int sierra_net_is_valid_addrlen(u8 len)
static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen)
{
struct lsi_umts *lsi = (struct lsi_umts *)data;
u32 expected_length;
if (datalen < sizeof(struct lsi_umts)) {
netdev_err(dev->net, "%s: Data length %d, exp %Zu\n",
__func__, datalen,
sizeof(struct lsi_umts));
if (datalen < sizeof(struct lsi_umts_single)) {
netdev_err(dev->net, "%s: Data length %d, exp >= %Zu\n",
__func__, datalen, sizeof(struct lsi_umts_single));
return -1;
}
if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) {
netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
__func__, be16_to_cpu(lsi->length),
(u32)SIERRA_NET_LSI_UMTS_STATUS_LEN);
return -1;
}
/* Validate the protocol - only support UMTS for now */
if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) {
netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
lsi->protocol);
return -1;
}
/* Validate the link type */
if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) {
netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
lsi->link_type);
return -1;
}
/* Validate the coverage */
if (lsi->coverage == SIERRA_NET_COVERAGE_NONE
|| lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
return 0;
}
/* Validate the session state */
if (lsi->session_state == SIERRA_NET_SESSION_IDLE) {
netdev_err(dev->net, "Session idle, 0x%02x\n",
lsi->session_state);
lsi->session_state);
return 0;
}
/* Validate the protocol - only support UMTS for now */
if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS) {
struct lsi_umts_single *single = (struct lsi_umts_single *)lsi;
/* Validate the link type */
if (single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV4 &&
single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV6) {
netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
single->link_type);
return -1;
}
expected_length = SIERRA_NET_LSI_UMTS_STATUS_LEN;
} else if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS_DS) {
expected_length = SIERRA_NET_LSI_UMTS_DS_STATUS_LEN;
} else {
netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
lsi->protocol);
return -1;
}
if (be16_to_cpu(lsi->length) != expected_length) {
netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
__func__, be16_to_cpu(lsi->length), expected_length);
return -1;
}
/* Validate the coverage */
if (lsi->coverage == SIERRA_NET_COVERAGE_NONE ||
lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
return 0;
}
@ -652,7 +684,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
u8 numendpoints;
u16 fwattr = 0;
int status;
struct ethhdr *eth;
struct sierra_net_data *priv;
static const u8 sync_tmplate[sizeof(priv->sync_msg)] = {
0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00};
@ -690,11 +721,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
/* we will have to manufacture ethernet headers, prepare template */
eth = (struct ethhdr *)priv->ethr_hdr_tmpl;
memcpy(&eth->h_dest, dev->net->dev_addr, ETH_ALEN);
eth->h_proto = cpu_to_be16(ETH_P_IP);
/* prepare shutdown message template */
memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg));
/* set context index initially to 0 - prepares tx hdr template */
@ -824,9 +850,14 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb_pull(skb, hh.hdrlen);
/* We are going to accept this packet, prepare it */
memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl,
ETH_HLEN);
/* We are going to accept this packet, prepare it.
* In case protocol is IPv6, keep it, otherwise force IPv4.
*/
skb_reset_mac_header(skb);
if (eth_hdr(skb)->h_proto != cpu_to_be16(ETH_P_IPV6))
eth_hdr(skb)->h_proto = cpu_to_be16(ETH_P_IP);
eth_zero_addr(eth_hdr(skb)->h_source);
memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
/* Last packet in batch handled by usbnet */
if (hh.payload_len.word == skb->len)

View File

@ -281,6 +281,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
{
RING_IDX req_prod = queue->rx.req_prod_pvt;
int notify;
int err = 0;
if (unlikely(!netif_carrier_ok(queue->info->netdev)))
return;
@ -295,8 +296,10 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
struct xen_netif_rx_request *req;
skb = xennet_alloc_one_rx_buffer(queue);
if (!skb)
if (!skb) {
err = -ENOMEM;
break;
}
id = xennet_rxidx(req_prod);
@ -320,8 +323,13 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
queue->rx.req_prod_pvt = req_prod;
/* Not enough requests? Try again later. */
if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) {
/* Try again later if there are not enough requests or skb allocation
* failed.
* Enough requests is quantified as the sum of newly created slots and
* the unconsumed slots at the backend.
*/
if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
unlikely(err)) {
mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
return;
}
@ -1377,6 +1385,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
for (i = 0; i < num_queues && info->queues; ++i) {
struct netfront_queue *queue = &info->queues[i];
del_timer_sync(&queue->rx_refill_timer);
if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
unbind_from_irqhandler(queue->tx_irq, queue);
if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
@ -1731,7 +1741,6 @@ static void xennet_destroy_queues(struct netfront_info *info)
if (netif_running(info->netdev))
napi_disable(&queue->napi);
del_timer_sync(&queue->rx_refill_timer);
netif_napi_del(&queue->napi);
}
@ -1820,27 +1829,19 @@ static int talk_to_netback(struct xenbus_device *dev,
xennet_destroy_queues(info);
err = xennet_create_queues(info, &num_queues);
if (err < 0)
goto destroy_ring;
if (err < 0) {
xenbus_dev_fatal(dev, err, "creating queues");
kfree(info->queues);
info->queues = NULL;
goto out;
}
/* Create shared ring, alloc event channel -- for each queue */
for (i = 0; i < num_queues; ++i) {
queue = &info->queues[i];
err = setup_netfront(dev, queue, feature_split_evtchn);
if (err) {
/* setup_netfront() will tidy up the current
* queue on error, but we need to clean up
* those already allocated.
*/
if (i > 0) {
rtnl_lock();
netif_set_real_num_tx_queues(info->netdev, i);
rtnl_unlock();
goto destroy_ring;
} else {
goto out;
}
}
if (err)
goto destroy_ring;
}
again:
@ -1930,9 +1931,10 @@ abort_transaction_no_dev_fatal:
xenbus_transaction_end(xbt, 1);
destroy_ring:
xennet_disconnect_backend(info);
kfree(info->queues);
info->queues = NULL;
xennet_destroy_queues(info);
out:
unregister_netdev(info->netdev);
xennet_free_netdev(info->netdev);
return err;
}

View File

@ -31,7 +31,6 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/pci.h>
#include "../pci.h"
#include "pciehp.h"
@ -99,7 +98,6 @@ static int board_added(struct slot *p_slot)
pciehp_green_led_blink(p_slot);
/* Check link training status */
pm_runtime_get_sync(&ctrl->pcie->port->dev);
retval = pciehp_check_link_status(ctrl);
if (retval) {
ctrl_err(ctrl, "Failed to check link status\n");
@ -120,14 +118,12 @@ static int board_added(struct slot *p_slot)
if (retval != -EEXIST)
goto err_exit;
}
pm_runtime_put(&ctrl->pcie->port->dev);
pciehp_green_led_on(p_slot);
pciehp_set_attention_status(p_slot, 0);
return 0;
err_exit:
pm_runtime_put(&ctrl->pcie->port->dev);
set_slot_off(ctrl, p_slot);
return retval;
}
@ -141,9 +137,7 @@ static int remove_board(struct slot *p_slot)
int retval;
struct controller *ctrl = p_slot->ctrl;
pm_runtime_get_sync(&ctrl->pcie->port->dev);
retval = pciehp_unconfigure_device(p_slot);
pm_runtime_put(&ctrl->pcie->port->dev);
if (retval)
return retval;

View File

@ -1206,6 +1206,16 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
if (flags & PCI_IRQ_AFFINITY) {
if (!affd)
affd = &msi_default_affd;
if (affd->pre_vectors + affd->post_vectors > min_vecs)
return -EINVAL;
/*
* If there aren't any vectors left after applying the pre/post
* vectors don't bother with assigning affinity.
*/
if (affd->pre_vectors + affd->post_vectors == min_vecs)
affd = NULL;
} else {
if (WARN_ON(affd))
affd = NULL;

View File

@ -2241,10 +2241,13 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
return false;
/*
* Hotplug ports handled by firmware in System Management Mode
* Hotplug interrupts cannot be delivered if the link is down,
* so parents of a hotplug port must stay awake. In addition,
* hotplug ports handled by firmware in System Management Mode
* may not be put into D3 by the OS (Thunderbolt on non-Macs).
* For simplicity, disallow in general for now.
*/
if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
if (bridge->is_hotplug_bridge)
return false;
if (pci_bridge_d3_force)
@ -2276,10 +2279,7 @@ static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
!pci_pme_capable(dev, PCI_D3cold)) ||
/* If it is a bridge it must be allowed to go to D3. */
!pci_power_manageable(dev) ||
/* Hotplug interrupts cannot be delivered if the link is down. */
dev->is_hotplug_bridge)
!pci_power_manageable(dev))
*d3cold_ok = false;

View File

@ -3816,6 +3816,7 @@ static struct configfs_attribute *ibmvscsis_tpg_attrs[] = {
static const struct target_core_fabric_ops ibmvscsis_ops = {
.module = THIS_MODULE,
.name = "ibmvscsis",
.max_data_sg_nents = MAX_TXU / PAGE_SIZE,
.get_fabric_name = ibmvscsis_get_fabric_name,
.tpg_get_wwn = ibmvscsis_get_fabric_wwn,
.tpg_get_tag = ibmvscsis_get_tag,

View File

@ -390,15 +390,13 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
result = VM_FAULT_LOCKED;
break;
case -ENODATA:
case -EAGAIN:
case -EFAULT:
result = VM_FAULT_NOPAGE;
break;
case -ENOMEM:
result = VM_FAULT_OOM;
break;
case -EAGAIN:
result = VM_FAULT_RETRY;
break;
default:
result = VM_FAULT_SIGBUS;
break;

View File

@ -352,7 +352,15 @@ int core_enable_device_list_for_node(
kfree(new);
return -EINVAL;
}
BUG_ON(orig->se_lun_acl != NULL);
if (orig->se_lun_acl != NULL) {
pr_warn_ratelimited("Detected existing explicit"
" se_lun_acl->se_lun_group reference for %s"
" mapped_lun: %llu, failing\n",
nacl->initiatorname, mapped_lun);
mutex_unlock(&nacl->lun_entry_mutex);
kfree(new);
return -EINVAL;
}
rcu_assign_pointer(new->se_lun, lun);
rcu_assign_pointer(new->se_lun_acl, lun_acl);

View File

@ -451,6 +451,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
int *post_ret)
{
struct se_device *dev = cmd->se_dev;
sense_reason_t ret = TCM_NO_SENSE;
/*
* Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
@ -458,9 +459,12 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
* sent to the backend driver.
*/
spin_lock_irq(&cmd->t_state_lock);
if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
if (cmd->transport_state & CMD_T_SENT) {
cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
*post_ret = 1;
if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
spin_unlock_irq(&cmd->t_state_lock);
@ -470,7 +474,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
*/
up(&dev->caw_sem);
return TCM_NO_SENSE;
return ret;
}
static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,

View File

@ -457,8 +457,20 @@ static void target_complete_nacl(struct kref *kref)
{
struct se_node_acl *nacl = container_of(kref,
struct se_node_acl, acl_kref);
struct se_portal_group *se_tpg = nacl->se_tpg;
complete(&nacl->acl_free_comp);
if (!nacl->dynamic_stop) {
complete(&nacl->acl_free_comp);
return;
}
mutex_lock(&se_tpg->acl_node_mutex);
list_del(&nacl->acl_list);
mutex_unlock(&se_tpg->acl_node_mutex);
core_tpg_wait_for_nacl_pr_ref(nacl);
core_free_device_list_for_node(nacl, se_tpg);
kfree(nacl);
}
void target_put_nacl(struct se_node_acl *nacl)
@ -499,12 +511,39 @@ EXPORT_SYMBOL(transport_deregister_session_configfs);
void transport_free_session(struct se_session *se_sess)
{
struct se_node_acl *se_nacl = se_sess->se_node_acl;
/*
* Drop the se_node_acl->nacl_kref obtained from within
* core_tpg_get_initiator_node_acl().
*/
if (se_nacl) {
struct se_portal_group *se_tpg = se_nacl->se_tpg;
const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
unsigned long flags;
se_sess->se_node_acl = NULL;
/*
* Also determine if we need to drop the extra ->cmd_kref if
* it had been previously dynamically generated, and
* the endpoint is not caching dynamic ACLs.
*/
mutex_lock(&se_tpg->acl_node_mutex);
if (se_nacl->dynamic_node_acl &&
!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
if (list_empty(&se_nacl->acl_sess_list))
se_nacl->dynamic_stop = true;
spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
if (se_nacl->dynamic_stop)
list_del(&se_nacl->acl_list);
}
mutex_unlock(&se_tpg->acl_node_mutex);
if (se_nacl->dynamic_stop)
target_put_nacl(se_nacl);
target_put_nacl(se_nacl);
}
if (se_sess->sess_cmd_map) {
@ -518,16 +557,12 @@ EXPORT_SYMBOL(transport_free_session);
void transport_deregister_session(struct se_session *se_sess)
{
struct se_portal_group *se_tpg = se_sess->se_tpg;
const struct target_core_fabric_ops *se_tfo;
struct se_node_acl *se_nacl;
unsigned long flags;
bool drop_nacl = false;
if (!se_tpg) {
transport_free_session(se_sess);
return;
}
se_tfo = se_tpg->se_tpg_tfo;
spin_lock_irqsave(&se_tpg->session_lock, flags);
list_del(&se_sess->sess_list);
@ -535,33 +570,15 @@ void transport_deregister_session(struct se_session *se_sess)
se_sess->fabric_sess_ptr = NULL;
spin_unlock_irqrestore(&se_tpg->session_lock, flags);
/*
* Determine if we need to do extra work for this initiator node's
* struct se_node_acl if it had been previously dynamically generated.
*/
se_nacl = se_sess->se_node_acl;
mutex_lock(&se_tpg->acl_node_mutex);
if (se_nacl && se_nacl->dynamic_node_acl) {
if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
list_del(&se_nacl->acl_list);
drop_nacl = true;
}
}
mutex_unlock(&se_tpg->acl_node_mutex);
if (drop_nacl) {
core_tpg_wait_for_nacl_pr_ref(se_nacl);
core_free_device_list_for_node(se_nacl, se_tpg);
se_sess->se_node_acl = NULL;
kfree(se_nacl);
}
pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
se_tpg->se_tpg_tfo->get_fabric_name());
/*
* If last kref is dropping now for an explicit NodeACL, awake sleeping
* ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
* removal context from within transport_free_session() code.
*
* For dynamic ACL, target_put_nacl() uses target_complete_nacl()
* to release all remaining generate_node_acl=1 created ACL resources.
*/
transport_free_session(se_sess);
@ -3110,7 +3127,6 @@ static void target_tmr_work(struct work_struct *work)
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
goto check_stop;
}
cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
cmd->se_tfo->queue_tm_rsp(cmd);
@ -3123,11 +3139,25 @@ int transport_generic_handle_tmr(
struct se_cmd *cmd)
{
unsigned long flags;
bool aborted = false;
spin_lock_irqsave(&cmd->t_state_lock, flags);
cmd->transport_state |= CMD_T_ACTIVE;
if (cmd->transport_state & CMD_T_ABORTED) {
aborted = true;
} else {
cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
cmd->transport_state |= CMD_T_ACTIVE;
}
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
if (aborted) {
pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d"
"ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function,
cmd->se_tmr_req->ref_task_tag, cmd->tag);
transport_cmd_check_stop_to_fabric(cmd);
return 0;
}
INIT_WORK(&cmd->work, target_tmr_work);
queue_work(cmd->se_dev->tmr_wq, &cmd->work);
return 0;

View File

@ -864,7 +864,7 @@ out:
" CHECK_CONDITION -> sending response\n", rc);
ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
}
target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
target_complete_cmd(ec_cmd, ec_cmd->scsi_status);
}
sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)

View File

@ -1245,6 +1245,8 @@ static void tce_iommu_release_ownership_ddw(struct tce_container *container,
static long tce_iommu_take_ownership_ddw(struct tce_container *container,
struct iommu_table_group *table_group)
{
long i, ret = 0;
if (!table_group->ops->create_table || !table_group->ops->set_window ||
!table_group->ops->release_ownership) {
WARN_ON_ONCE(1);
@ -1253,7 +1255,27 @@ static long tce_iommu_take_ownership_ddw(struct tce_container *container,
table_group->ops->take_ownership(table_group);
/* Set all windows to the new group */
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
struct iommu_table *tbl = container->tables[i];
if (!tbl)
continue;
ret = table_group->ops->set_window(table_group, i, tbl);
if (ret)
goto release_exit;
}
return 0;
release_exit:
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
table_group->ops->unset_window(table_group, i);
table_group->ops->release_ownership(table_group);
return ret;
}
static int tce_iommu_attach_group(void *iommu_data,

View File

@ -332,6 +332,37 @@ nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
}
}
static __be32
nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct iattr *iap)
{
struct inode *inode = d_inode(fhp->fh_dentry);
int host_err;
if (iap->ia_size < inode->i_size) {
__be32 err;
err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE);
if (err)
return err;
}
host_err = get_write_access(inode);
if (host_err)
goto out_nfserrno;
host_err = locks_verify_truncate(inode, NULL, iap->ia_size);
if (host_err)
goto out_put_write_access;
return 0;
out_put_write_access:
put_write_access(inode);
out_nfserrno:
return nfserrno(host_err);
}
/*
* Set various file attributes. After this call fhp needs an fh_put.
*/
@ -346,6 +377,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
__be32 err;
int host_err;
bool get_write_count;
int size_change = 0;
if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
@ -358,11 +390,11 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
/* Get inode */
err = fh_verify(rqstp, fhp, ftype, accmode);
if (err)
return err;
goto out;
if (get_write_count) {
host_err = fh_want_write(fhp);
if (host_err)
goto out_host_err;
return nfserrno(host_err);
}
dentry = fhp->fh_dentry;
@ -373,59 +405,50 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
iap->ia_valid &= ~ATTR_MODE;
if (!iap->ia_valid)
return 0;
goto out;
nfsd_sanitize_attrs(inode, iap);
if (check_guard && guardtime != inode->i_ctime.tv_sec)
return nfserr_notsync;
/*
* The size case is special, it changes the file in addition to the
* attributes, and file systems don't expect it to be mixed with
* "random" attribute changes. We thus split out the size change
* into a separate call for vfs_truncate, and do the rest as a
* a separate setattr call.
* attributes.
*/
if (iap->ia_valid & ATTR_SIZE) {
struct path path = {
.mnt = fhp->fh_export->ex_path.mnt,
.dentry = dentry,
};
bool implicit_mtime = false;
err = nfsd_get_write_access(rqstp, fhp, iap);
if (err)
goto out;
size_change = 1;
/*
* vfs_truncate implicity updates the mtime IFF the file size
* actually changes. Avoid the additional seattr call below if
* the only other attribute that the client sends is the mtime.
* RFC5661, Section 18.30.4:
* Changing the size of a file with SETATTR indirectly
* changes the time_modify and change attributes.
*
* (and similar for the older RFCs)
*/
if (iap->ia_size != i_size_read(inode) &&
((iap->ia_valid & ~(ATTR_SIZE | ATTR_MTIME)) == 0))
implicit_mtime = true;
host_err = vfs_truncate(&path, iap->ia_size);
if (host_err)
goto out_host_err;
iap->ia_valid &= ~ATTR_SIZE;
if (implicit_mtime)
iap->ia_valid &= ~ATTR_MTIME;
if (!iap->ia_valid)
goto done;
if (iap->ia_size != i_size_read(inode))
iap->ia_valid |= ATTR_MTIME;
}
iap->ia_valid |= ATTR_CTIME;
if (check_guard && guardtime != inode->i_ctime.tv_sec) {
err = nfserr_notsync;
goto out_put_write_access;
}
fh_lock(fhp);
host_err = notify_change(dentry, iap, NULL);
fh_unlock(fhp);
if (host_err)
goto out_host_err;
err = nfserrno(host_err);
done:
host_err = commit_metadata(fhp);
out_host_err:
return nfserrno(host_err);
out_put_write_access:
if (size_change)
put_write_access(inode);
if (!err)
err = nfserrno(commit_metadata(fhp));
out:
return err;
}
#if defined(CONFIG_NFSD_V4)

View File

@ -280,7 +280,7 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
1, id, type, PSTORE_TYPE_PMSG, 0);
/* ftrace is last since it may want to dynamically allocate memory. */
if (!prz_ok(prz)) {
if (!prz_ok(prz) && cxt->fprzs) {
if (!(cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU)) {
prz = ramoops_get_next_prz(cxt->fprzs,
&cxt->ftrace_read_cnt, 1, id, type,

View File

@ -243,12 +243,10 @@ static inline int block_page_mkwrite_return(int err)
{
if (err == 0)
return VM_FAULT_LOCKED;
if (err == -EFAULT)
if (err == -EFAULT || err == -EAGAIN)
return VM_FAULT_NOPAGE;
if (err == -ENOMEM)
return VM_FAULT_OOM;
if (err == -EAGAIN)
return VM_FAULT_RETRY;
/* -ENOSPC, -EDQUOT, -EIO ... */
return VM_FAULT_SIGBUS;
}

View File

@ -560,7 +560,7 @@ static inline void cpumask_copy(struct cpumask *dstp,
static inline int cpumask_parse_user(const char __user *buf, int len,
struct cpumask *dstp)
{
return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids);
return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
}
/**
@ -575,7 +575,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
struct cpumask *dstp)
{
return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
nr_cpu_ids);
nr_cpumask_bits);
}
/**
@ -590,7 +590,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
char *nl = strchr(buf, '\n');
unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids);
return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
}
/**
@ -602,7 +602,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
*/
static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
{
return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids);
return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
}
/**

View File

@ -1508,6 +1508,7 @@ enum netdev_priv_flags {
* @max_mtu: Interface Maximum MTU value
* @type: Interface hardware type
* @hard_header_len: Maximum hardware header length.
* @min_header_len: Minimum hardware header length
*
* @needed_headroom: Extra headroom the hardware may need, but not in all
* cases can this be guaranteed
@ -1724,6 +1725,7 @@ struct net_device {
unsigned int max_mtu;
unsigned short type;
unsigned short hard_header_len;
unsigned short min_header_len;
unsigned short needed_headroom;
unsigned short needed_tailroom;
@ -2688,6 +2690,8 @@ static inline bool dev_validate_header(const struct net_device *dev,
{
if (likely(len >= dev->hard_header_len))
return true;
if (len < dev->min_header_len)
return false;
if (capable(CAP_SYS_RAWIO)) {
memset(ll_header + len, 0, dev->hard_header_len - len);

View File

@ -178,7 +178,10 @@ static inline int lwtunnel_valid_encap_type(u16 encap_type)
}
static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len)
{
return -EOPNOTSUPP;
/* return 0 since we are not walking attr looking for
* RTA_ENCAP_TYPE attribute on nexthops.
*/
return 0;
}
static inline int lwtunnel_build_state(u16 encap_type,

View File

@ -538,6 +538,7 @@ struct se_node_acl {
char initiatorname[TRANSPORT_IQN_LEN];
/* Used to signal demo mode created ACL, disabled by default */
bool dynamic_node_acl;
bool dynamic_stop;
u32 queue_depth;
u32 acl_index;
enum target_prot_type saved_prot_type;

View File

@ -37,7 +37,6 @@
#define IB_USER_VERBS_H
#include <linux/types.h>
#include <rdma/ib_verbs.h>
/*
* Increment this value if any changes that break userspace ABI
@ -548,11 +547,17 @@ enum {
};
enum {
IB_USER_LEGACY_LAST_QP_ATTR_MASK = IB_QP_DEST_QPN
/*
* This value is equal to IB_QP_DEST_QPN.
*/
IB_USER_LEGACY_LAST_QP_ATTR_MASK = 1ULL << 20,
};
enum {
IB_USER_LAST_QP_ATTR_MASK = IB_QP_RATE_LIMIT
/*
* This value is equal to IB_QP_RATE_LIMIT.
*/
IB_USER_LAST_QP_ATTR_MASK = 1ULL << 25,
};
struct ib_uverbs_ex_create_qp {

View File

@ -227,11 +227,10 @@ static __init int user_namespace_sysctl_init(void)
* properly.
*/
user_header = register_sysctl("user", empty);
kmemleak_ignore(user_header);
BUG_ON(!user_header);
BUG_ON(!setup_userns_sysctls(&init_user_ns));
#endif
return 0;
}
subsys_initcall(user_namespace_sysctl_init);

View File

@ -1422,6 +1422,10 @@ static int init_cache_random_seq(struct kmem_cache *s)
int err;
unsigned long i, count = oo_objects(s->oo);
/* Bailout if already initialised */
if (s->random_seq)
return 0;
err = cache_random_seq_create(s, count, GFP_KERNEL);
if (err) {
pr_err("SLUB: Unable to initialize free list for %s\n",

View File

@ -271,6 +271,7 @@ static int dsa_user_port_apply(struct dsa_port *port, u32 index,
if (err) {
dev_warn(ds->dev, "Failed to create slave %d: %d\n",
index, err);
ds->ports[index].netdev = NULL;
return err;
}

View File

@ -356,6 +356,7 @@ void ether_setup(struct net_device *dev)
dev->header_ops = &eth_header_ops;
dev->type = ARPHRD_ETHER;
dev->hard_header_len = ETH_HLEN;
dev->min_header_len = ETH_HLEN;
dev->mtu = ETH_DATA_LEN;
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = ETH_DATA_LEN;

View File

@ -1172,6 +1172,7 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
psf->sf_crcount = im->crcount;
}
in_dev_put(pmc->interface);
kfree(pmc);
}
spin_unlock_bh(&im->lock);
}

View File

@ -642,6 +642,8 @@ static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
{
struct sk_buff *skb = skb_peek(&sk->sk_write_queue);
if (!skb)
return 0;
pfh->wcheck = csum_partial((char *)&pfh->icmph,
sizeof(struct icmphdr), pfh->wcheck);
pfh->icmph.checksum = csum_fold(pfh->wcheck);

View File

@ -4022,6 +4022,12 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id)
if (bump_id)
rt_genid_bump_ipv6(dev_net(dev));
/* Make sure that a new temporary address will be created
* before this temporary address becomes deprecated.
*/
if (ifp->flags & IFA_F_TEMPORARY)
addrconf_verify_rtnl();
}
static void addrconf_dad_run(struct inet6_dev *idev)

View File

@ -779,6 +779,7 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
psf->sf_crcount = im->mca_crcount;
}
in6_dev_put(pmc->idev);
kfree(pmc);
}
spin_unlock_bh(&im->mca_lock);
}

View File

@ -1380,6 +1380,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
if (err) {
free_percpu(dev->tstats);
dev->tstats = NULL;
return err;
}

View File

@ -929,24 +929,26 @@ static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
goto out_error;
}
/* New message, alloc head skb */
head = alloc_skb(0, sk->sk_allocation);
while (!head) {
kcm_push(kcm);
err = sk_stream_wait_memory(sk, &timeo);
if (err)
goto out_error;
if (msg_data_left(msg)) {
/* New message, alloc head skb */
head = alloc_skb(0, sk->sk_allocation);
while (!head) {
kcm_push(kcm);
err = sk_stream_wait_memory(sk, &timeo);
if (err)
goto out_error;
head = alloc_skb(0, sk->sk_allocation);
}
skb = head;
/* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
* csum_and_copy_from_iter from skb_do_copy_data_nocache.
*/
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
skb = head;
/* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
* csum_and_copy_from_iter from skb_do_copy_data_nocache.
*/
skb->ip_summed = CHECKSUM_UNNECESSARY;
start:
while (msg_data_left(msg)) {
bool merge = true;
@ -1018,10 +1020,12 @@ wait_for_memory:
if (eor) {
bool not_busy = skb_queue_empty(&sk->sk_write_queue);
/* Message complete, queue it on send buffer */
__skb_queue_tail(&sk->sk_write_queue, head);
kcm->seq_skb = NULL;
KCM_STATS_INCR(kcm->stats.tx_msgs);
if (head) {
/* Message complete, queue it on send buffer */
__skb_queue_tail(&sk->sk_write_queue, head);
kcm->seq_skb = NULL;
KCM_STATS_INCR(kcm->stats.tx_msgs);
}
if (msg->msg_flags & MSG_BATCH) {
kcm->tx_wait_more = true;

View File

@ -263,6 +263,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb,
int l2tp_nl_register_ops(enum l2tp_pwtype pw_type,
const struct l2tp_nl_cmd_ops *ops);
void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg);
/* Session reference counts. Incremented when code obtains a reference
* to a session.

View File

@ -11,6 +11,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/ioctls.h>
#include <linux/icmp.h>
#include <linux/module.h>
#include <linux/skbuff.h>
@ -560,6 +561,30 @@ out:
return err ? err : copied;
}
int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
struct sk_buff *skb;
int amount;
switch (cmd) {
case SIOCOUTQ:
amount = sk_wmem_alloc_get(sk);
break;
case SIOCINQ:
spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
amount = skb ? skb->len : 0;
spin_unlock_bh(&sk->sk_receive_queue.lock);
break;
default:
return -ENOIOCTLCMD;
}
return put_user(amount, (int __user *)arg);
}
EXPORT_SYMBOL(l2tp_ioctl);
static struct proto l2tp_ip_prot = {
.name = "L2TP/IP",
.owner = THIS_MODULE,
@ -568,7 +593,7 @@ static struct proto l2tp_ip_prot = {
.bind = l2tp_ip_bind,
.connect = l2tp_ip_connect,
.disconnect = l2tp_ip_disconnect,
.ioctl = udp_ioctl,
.ioctl = l2tp_ioctl,
.destroy = l2tp_ip_destroy_sock,
.setsockopt = ip_setsockopt,
.getsockopt = ip_getsockopt,

View File

@ -731,7 +731,7 @@ static struct proto l2tp_ip6_prot = {
.bind = l2tp_ip6_bind,
.connect = l2tp_ip6_connect,
.disconnect = l2tp_ip6_disconnect,
.ioctl = udp_ioctl,
.ioctl = l2tp_ioctl,
.destroy = l2tp_ip6_destroy_sock,
.setsockopt = ipv6_setsockopt,
.getsockopt = ipv6_getsockopt,

View File

@ -2776,7 +2776,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
struct virtio_net_hdr vnet_hdr = { 0 };
int offset = 0;
struct packet_sock *po = pkt_sk(sk);
int hlen, tlen;
int hlen, tlen, linear;
int extra_len = 0;
/*
@ -2837,8 +2837,9 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
err = -ENOBUFS;
hlen = LL_RESERVED_SPACE(dev);
tlen = dev->needed_tailroom;
skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
__virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len),
linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
linear = max(linear, min_t(int, len, dev->hard_header_len));
skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
msg->msg_flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto out_unlock;

View File

@ -5888,7 +5888,7 @@ static int selinux_setprocattr(struct task_struct *p,
return error;
/* Obtain a SID for the context, if one was specified. */
if (size && str[1] && str[1] != '\n') {
if (size && str[0] && str[0] != '\n') {
if (str[size-1] == '\n') {
str[size-1] = 0;
size--;

View File

@ -419,7 +419,6 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
{
unsigned long flags;
struct snd_seq_event_cell *ptr;
int max_count = 5 * HZ;
if (snd_BUG_ON(!pool))
return -EINVAL;
@ -432,14 +431,8 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
if (waitqueue_active(&pool->output_sleep))
wake_up(&pool->output_sleep);
while (atomic_read(&pool->counter) > 0) {
if (max_count == 0) {
pr_warn("ALSA: snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter));
break;
}
while (atomic_read(&pool->counter) > 0)
schedule_timeout_uninterruptible(1);
max_count--;
}
/* release all resources */
spin_lock_irqsave(&pool->lock, flags);

View File

@ -181,6 +181,8 @@ void __exit snd_seq_queues_delete(void)
}
}
static void queue_use(struct snd_seq_queue *queue, int client, int use);
/* allocate a new queue -
* return queue index value or negative value for error
*/
@ -192,11 +194,11 @@ int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
if (q == NULL)
return -ENOMEM;
q->info_flags = info_flags;
queue_use(q, client, 1);
if (queue_list_add(q) < 0) {
queue_delete(q);
return -ENOMEM;
}
snd_seq_queue_use(q->queue, client, 1); /* use this queue */
return q->queue;
}
@ -502,19 +504,9 @@ int snd_seq_queue_timer_set_tempo(int queueid, int client,
return result;
}
/* use or unuse this queue -
* if it is the first client, starts the timer.
* if it is not longer used by any clients, stop the timer.
*/
int snd_seq_queue_use(int queueid, int client, int use)
/* use or unuse this queue */
static void queue_use(struct snd_seq_queue *queue, int client, int use)
{
struct snd_seq_queue *queue;
queue = queueptr(queueid);
if (queue == NULL)
return -EINVAL;
mutex_lock(&queue->timer_mutex);
if (use) {
if (!test_and_set_bit(client, queue->clients_bitmap))
queue->clients++;
@ -529,6 +521,21 @@ int snd_seq_queue_use(int queueid, int client, int use)
} else {
snd_seq_timer_close(queue);
}
}
/* use or unuse this queue -
* if it is the first client, starts the timer.
* if it is not longer used by any clients, stop the timer.
*/
int snd_seq_queue_use(int queueid, int client, int use)
{
struct snd_seq_queue *queue;
queue = queueptr(queueid);
if (queue == NULL)
return -EINVAL;
mutex_lock(&queue->timer_mutex);
queue_use(queue, client, use);
mutex_unlock(&queue->timer_mutex);
queuefree(queue);
return 0;

View File

@ -3639,6 +3639,7 @@ HDA_CODEC_ENTRY(0x10de0070, "GPU 70 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de0080, "GPU 80 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de0082, "GPU 82 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch),

View File

@ -754,8 +754,9 @@ int line6_probe(struct usb_interface *interface,
goto error;
}
line6_get_interval(line6);
if (properties->capabilities & LINE6_CAP_CONTROL) {
line6_get_interval(line6);
ret = line6_init_cap_control(line6);
if (ret < 0)
goto error;