Merge branch 'linus' into perf/core, to pick up fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2016-12-11 13:05:59 +01:00
commit 6de75a37b8
118 changed files with 1074 additions and 399 deletions

View file

@ -643,9 +643,8 @@ lcdif: lcdif@30730000 {
reg = <0x30730000 0x10000>;
interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX7D_LCDIF_PIXEL_ROOT_CLK>,
<&clks IMX7D_CLK_DUMMY>,
<&clks IMX7D_CLK_DUMMY>;
clock-names = "pix", "axi", "disp_axi";
<&clks IMX7D_LCDIF_PIXEL_ROOT_CLK>;
clock-names = "pix", "axi";
status = "disabled";
};
};

View file

@ -82,6 +82,10 @@ &usb_power {
gpios = <&gpio0 9 GPIO_ACTIVE_HIGH>;
};
&sata {
nr-ports = <2>;
};
&ehci1 {
status = "okay";
};

View file

@ -410,7 +410,7 @@ uart2_pins: uart2 {
};
uart3_pins: uart3 {
allwinner,pins = "PG13", "PG14";
allwinner,pins = "PA13", "PA14";
allwinner,function = "uart3";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;

View file

@ -95,9 +95,10 @@ CONFIG_NF_TABLES_INET=m
CONFIG_NF_TABLES_NETDEV=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_RBTREE=m
CONFIG_NFT_HASH=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@ -105,8 +106,10 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
@ -366,6 +369,7 @@ CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_A2065=y
CONFIG_ARIADNE=y
# CONFIG_NET_VENDOR_ARC is not set

View file

@ -93,9 +93,10 @@ CONFIG_NF_TABLES_INET=m
CONFIG_NF_TABLES_NETDEV=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_RBTREE=m
CONFIG_NFT_HASH=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@ -103,8 +104,10 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
@ -347,6 +350,7 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set

View file

@ -93,9 +93,10 @@ CONFIG_NF_TABLES_INET=m
CONFIG_NF_TABLES_NETDEV=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_RBTREE=m
CONFIG_NFT_HASH=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@ -103,8 +104,10 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
@ -356,6 +359,7 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_ATARILANCE=y
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set

View file

@ -91,9 +91,10 @@ CONFIG_NF_TABLES_INET=m
CONFIG_NF_TABLES_NETDEV=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_RBTREE=m
CONFIG_NFT_HASH=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@ -101,8 +102,10 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
@ -346,6 +349,7 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set

View file

@ -93,9 +93,10 @@ CONFIG_NF_TABLES_INET=m
CONFIG_NF_TABLES_NETDEV=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_RBTREE=m
CONFIG_NFT_HASH=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@ -103,8 +104,10 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
@ -347,6 +350,7 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_HPLANCE=y
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set

View file

@ -92,9 +92,10 @@ CONFIG_NF_TABLES_INET=m
CONFIG_NF_TABLES_NETDEV=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_RBTREE=m
CONFIG_NFT_HASH=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@ -102,8 +103,10 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
@ -363,6 +366,7 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_MACMACE=y
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set

View file

@ -102,9 +102,10 @@ CONFIG_NF_TABLES_INET=m
CONFIG_NF_TABLES_NETDEV=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_RBTREE=m
CONFIG_NFT_HASH=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@ -112,8 +113,10 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
@ -397,6 +400,7 @@ CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_A2065=y
CONFIG_ARIADNE=y
CONFIG_ATARILANCE=y

View file

@ -90,9 +90,10 @@ CONFIG_NF_TABLES_INET=m
CONFIG_NF_TABLES_NETDEV=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_RBTREE=m
CONFIG_NFT_HASH=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@ -100,8 +101,10 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
@ -345,6 +348,7 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_MVME147_NET=y
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set

View file

@ -91,9 +91,10 @@ CONFIG_NF_TABLES_INET=m
CONFIG_NF_TABLES_NETDEV=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_RBTREE=m
CONFIG_NFT_HASH=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@ -101,8 +102,10 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
@ -346,6 +349,7 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set

View file

@ -91,9 +91,10 @@ CONFIG_NF_TABLES_INET=m
CONFIG_NF_TABLES_NETDEV=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_RBTREE=m
CONFIG_NFT_HASH=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@ -101,8 +102,10 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
@ -353,6 +356,7 @@ CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_AMD is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set

View file

@ -88,9 +88,10 @@ CONFIG_NF_TABLES_INET=m
CONFIG_NF_TABLES_NETDEV=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_RBTREE=m
CONFIG_NFT_HASH=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@ -98,8 +99,10 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
@ -343,6 +346,7 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_SUN3LANCE=y
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set

View file

@ -88,9 +88,10 @@ CONFIG_NF_TABLES_INET=m
CONFIG_NF_TABLES_NETDEV=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_RBTREE=m
CONFIG_NFT_HASH=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@ -98,8 +99,10 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
@ -343,6 +346,7 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_SUN3LANCE=y
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set

View file

@ -114,6 +114,6 @@ static inline void __udelay(unsigned long usecs)
*/
#define HZSCALE (268435456 / (1000000 / HZ))
#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6), 1000));
#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6), 1000))
#endif /* defined(_M68K_DELAY_H) */

View file

@ -65,9 +65,9 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
unsigned long flags; \
spin_lock_irqsave(&pa_tlb_lock, flags); \
old_pte = *ptep; \
set_pte(ptep, pteval); \
if (pte_inserted(old_pte)) \
purge_tlb_entries(mm, addr); \
set_pte(ptep, pteval); \
spin_unlock_irqrestore(&pa_tlb_lock, flags); \
} while (0)
@ -478,8 +478,8 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
spin_unlock_irqrestore(&pa_tlb_lock, flags);
return 0;
}
set_pte(ptep, pte_mkold(pte));
purge_tlb_entries(vma->vm_mm, addr);
set_pte(ptep, pte_mkold(pte));
spin_unlock_irqrestore(&pa_tlb_lock, flags);
return 1;
}
@ -492,9 +492,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
spin_lock_irqsave(&pa_tlb_lock, flags);
old_pte = *ptep;
set_pte(ptep, __pte(0));
if (pte_inserted(old_pte))
purge_tlb_entries(mm, addr);
set_pte(ptep, __pte(0));
spin_unlock_irqrestore(&pa_tlb_lock, flags);
return old_pte;
@ -504,8 +504,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
{
unsigned long flags;
spin_lock_irqsave(&pa_tlb_lock, flags);
set_pte(ptep, pte_wrprotect(*ptep));
purge_tlb_entries(mm, addr);
set_pte(ptep, pte_wrprotect(*ptep));
spin_unlock_irqrestore(&pa_tlb_lock, flags);
}

View file

@ -393,6 +393,15 @@ void __init parisc_setup_cache_timing(void)
/* calculate TLB flush threshold */
/* On SMP machines, skip the TLB measure of kernel text which
* has been mapped as huge pages. */
if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
threshold = max(cache_info.it_size, cache_info.dt_size);
threshold *= PAGE_SIZE;
threshold /= num_online_cpus();
goto set_tlb_threshold;
}
alltime = mfctl(16);
flush_tlb_all();
alltime = mfctl(16) - alltime;
@ -411,6 +420,8 @@ void __init parisc_setup_cache_timing(void)
alltime, size, rangetime);
threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime);
set_tlb_threshold:
if (threshold)
parisc_tlb_flush_threshold = threshold;
printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",

View file

@ -892,19 +892,10 @@ ENTRY_CFI(flush_dcache_page_asm)
fdc,m r31(%r28)
fdc,m r31(%r28)
fdc,m r31(%r28)
cmpb,COND(<<) %r28, %r25,1b
cmpb,COND(<<) %r28, %r25,1b
fdc,m r31(%r28)
sync
#ifdef CONFIG_PA20
pdtlb,l %r0(%r25)
#else
tlb_lock %r20,%r21,%r22
pdtlb %r0(%r25)
tlb_unlock %r20,%r21,%r22
#endif
bv %r0(%r2)
nop
.exit
@ -979,17 +970,6 @@ ENTRY_CFI(flush_icache_page_asm)
fic,m %r31(%sr4,%r28)
sync
#ifdef CONFIG_PA20
pdtlb,l %r0(%r28)
pitlb,l %r0(%sr4,%r25)
#else
tlb_lock %r20,%r21,%r22
pdtlb %r0(%r28)
pitlb %r0(%sr4,%r25)
tlb_unlock %r20,%r21,%r22
#endif
bv %r0(%r2)
nop
.exit

View file

@ -69,7 +69,7 @@ u64 x86_perf_event_update(struct perf_event *event)
int shift = 64 - x86_pmu.cntval_bits;
u64 prev_raw_count, new_raw_count;
int idx = hwc->idx;
s64 delta;
u64 delta;
if (idx == INTEL_PMC_IDX_FIXED_BTS)
return 0;

View file

@ -4034,7 +4034,7 @@ __init int intel_pmu_init(void)
/* Support full width counters using alternative MSR range */
if (x86_pmu.intel_cap.full_width_write) {
x86_pmu.max_period = x86_pmu.cntval_mask;
x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
x86_pmu.perfctr = MSR_IA32_PMC0;
pr_cont("full-width counters, ");
}

View file

@ -540,6 +540,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates),
{ },
};
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);

View file

@ -272,7 +272,6 @@ struct compat_shmid64_ds {
/*
* The type of struct elf_prstatus.pr_reg in compatible core dumps.
*/
#ifdef CONFIG_X86_X32_ABI
typedef struct user_regs_struct compat_elf_gregset_t;
/* Full regset -- prstatus on x32, otherwise on ia32 */
@ -281,10 +280,9 @@ typedef struct user_regs_struct compat_elf_gregset_t;
do { *(int *) (((void *) &((S)->pr_reg)) + R) = (V); } \
while (0)
#ifdef CONFIG_X86_X32_ABI
#define COMPAT_USE_64BIT_TIME \
(!!(task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT))
#else
typedef struct user_regs_struct32 compat_elf_gregset_t;
#endif
/*

View file

@ -815,9 +815,9 @@ static __init void map_mmioh_high_uv3(int index, int min_pnode, int max_pnode)
l = li;
}
addr1 = (base << shift) +
f * (unsigned long)(1 << m_io);
f * (1ULL << m_io);
addr2 = (base << shift) +
(l + 1) * (unsigned long)(1 << m_io);
(l + 1) * (1ULL << m_io);
pr_info("UV: %s[%03d..%03d] NASID 0x%04x ADDR 0x%016lx - 0x%016lx\n",
id, fi, li, lnasid, addr1, addr2);
if (max_io < l)

View file

@ -7,11 +7,13 @@
unsigned long unwind_get_return_address(struct unwind_state *state)
{
unsigned long addr = READ_ONCE_NOCHECK(*state->sp);
unsigned long addr;
if (unwind_done(state))
return 0;
addr = READ_ONCE_NOCHECK(*state->sp);
return ftrace_graph_ret_addr(state->task, &state->graph_idx,
addr, state->sp);
}
@ -25,11 +27,12 @@ bool unwind_next_frame(struct unwind_state *state)
return false;
do {
unsigned long addr = READ_ONCE_NOCHECK(*state->sp);
for (state->sp++; state->sp < info->end; state->sp++) {
unsigned long addr = READ_ONCE_NOCHECK(*state->sp);
for (state->sp++; state->sp < info->end; state->sp++)
if (__kernel_text_address(addr))
return true;
}
state->sp = info->next_sp;

View file

@ -196,6 +196,7 @@ static int xo15_sci_remove(struct acpi_device *device)
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int xo15_sci_resume(struct device *dev)
{
/* Enable all EC events */
@ -207,6 +208,7 @@ static int xo15_sci_resume(struct device *dev)
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(xo15_sci_pm, NULL, xo15_sci_resume);

View file

@ -16,7 +16,7 @@
#include <regex.h>
#include <tools/le_byteshift.h>
void die(char *fmt, ...);
void die(char *fmt, ...) __attribute__((noreturn));
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))

View file

@ -118,6 +118,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
struct iov_iter i;
int ret;
if (!iter_is_iovec(iter))
goto fail;
if (map_data)
copy = true;
else if (iov_iter_alignment(iter) & align)
@ -140,6 +143,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
unmap_rq:
__blk_rq_unmap_user(bio);
fail:
rq->bio = NULL;
return -EINVAL;
}

View file

@ -81,7 +81,11 @@ static inline bool aead_sufficient_data(struct aead_ctx *ctx)
{
unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
return ctx->used >= ctx->aead_assoclen + as;
/*
* The minimum amount of memory needed for an AEAD cipher is
* the AAD and in case of decryption the tag.
*/
return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
}
static void aead_reset_ctx(struct aead_ctx *ctx)
@ -416,7 +420,7 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
unsigned int i, reqlen = GET_REQ_SIZE(tfm);
int err = -ENOMEM;
unsigned long used;
size_t outlen;
size_t outlen = 0;
size_t usedpages = 0;
lock_sock(sk);
@ -426,12 +430,15 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
goto unlock;
}
used = ctx->used;
outlen = used;
if (!aead_sufficient_data(ctx))
goto unlock;
used = ctx->used;
if (ctx->enc)
outlen = used + as;
else
outlen = used - as;
req = sock_kmalloc(sk, reqlen, GFP_KERNEL);
if (unlikely(!req))
goto unlock;
@ -445,7 +452,7 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
aead_request_set_ad(req, ctx->aead_assoclen);
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
aead_async_cb, sk);
used -= ctx->aead_assoclen + (ctx->enc ? as : 0);
used -= ctx->aead_assoclen;
/* take over all tx sgls from ctx */
areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * sgl->cur,
@ -461,7 +468,7 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
areq->tsgls = sgl->cur;
/* create rx sgls */
while (iov_iter_count(&msg->msg_iter)) {
while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
(outlen - usedpages));
@ -491,16 +498,14 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
last_rsgl = rsgl;
/* we do not need more iovecs as we have sufficient memory */
if (outlen <= usedpages)
break;
iov_iter_advance(&msg->msg_iter, err);
}
err = -EINVAL;
/* ensure output buffer is sufficiently large */
if (usedpages < outlen)
goto free;
if (usedpages < outlen) {
err = -EINVAL;
goto unlock;
}
aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used,
areq->iv);
@ -571,6 +576,7 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
goto unlock;
}
/* data length provided by caller via sendmsg/sendpage */
used = ctx->used;
/*
@ -585,16 +591,27 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
if (!aead_sufficient_data(ctx))
goto unlock;
outlen = used;
/*
* Calculate the minimum output buffer size holding the result of the
* cipher operation. When encrypting data, the receiving buffer is
* larger by the tag length compared to the input buffer as the
* encryption operation generates the tag. For decryption, the input
* buffer provides the tag which is consumed resulting in only the
* plaintext without a buffer for the tag returned to the caller.
*/
if (ctx->enc)
outlen = used + as;
else
outlen = used - as;
/*
* The cipher operation input data is reduced by the associated data
* length as this data is processed separately later on.
*/
used -= ctx->aead_assoclen + (ctx->enc ? as : 0);
used -= ctx->aead_assoclen;
/* convert iovecs of output buffers into scatterlists */
while (iov_iter_count(&msg->msg_iter)) {
while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
(outlen - usedpages));
@ -621,16 +638,14 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
last_rsgl = rsgl;
/* we do not need more iovecs as we have sufficient memory */
if (outlen <= usedpages)
break;
iov_iter_advance(&msg->msg_iter, err);
}
err = -EINVAL;
/* ensure output buffer is sufficiently large */
if (usedpages < outlen)
if (usedpages < outlen) {
err = -EINVAL;
goto unlock;
}
sg_mark_end(sgl->sg + sgl->cur - 1);
aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg,

View file

@ -254,18 +254,22 @@ static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
goto out;
}
static inline void mcryptd_check_internal(struct rtattr **tb, u32 *type,
static inline bool mcryptd_check_internal(struct rtattr **tb, u32 *type,
u32 *mask)
{
struct crypto_attr_type *algt;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return;
if ((algt->type & CRYPTO_ALG_INTERNAL))
*type |= CRYPTO_ALG_INTERNAL;
if ((algt->mask & CRYPTO_ALG_INTERNAL))
*mask |= CRYPTO_ALG_INTERNAL;
return false;
*type |= algt->type & CRYPTO_ALG_INTERNAL;
*mask |= algt->mask & CRYPTO_ALG_INTERNAL;
if (*type & *mask & CRYPTO_ALG_INTERNAL)
return true;
else
return false;
}
static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
@ -492,7 +496,8 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
u32 mask = 0;
int err;
mcryptd_check_internal(tb, &type, &mask);
if (!mcryptd_check_internal(tb, &type, &mask))
return -EINVAL;
halg = ahash_attr_alg(tb[1], type, mask);
if (IS_ERR(halg))

View file

@ -94,7 +94,7 @@ static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
return to_acpi_device(acpi_desc->dev);
}
static int xlat_status(void *buf, unsigned int cmd, u32 status)
static int xlat_bus_status(void *buf, unsigned int cmd, u32 status)
{
struct nd_cmd_clear_error *clear_err;
struct nd_cmd_ars_status *ars_status;
@ -113,7 +113,7 @@ static int xlat_status(void *buf, unsigned int cmd, u32 status)
flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
if ((status >> 16 & flags) == 0)
return -ENOTTY;
break;
return 0;
case ND_CMD_ARS_START:
/* ARS is in progress */
if ((status & 0xffff) == NFIT_ARS_START_BUSY)
@ -122,7 +122,7 @@ static int xlat_status(void *buf, unsigned int cmd, u32 status)
/* Command failed */
if (status & 0xffff)
return -EIO;
break;
return 0;
case ND_CMD_ARS_STATUS:
ars_status = buf;
/* Command failed */
@ -146,7 +146,8 @@ static int xlat_status(void *buf, unsigned int cmd, u32 status)
* then just continue with the returned results.
*/
if (status == NFIT_ARS_STATUS_INTR) {
if (ars_status->flags & NFIT_ARS_F_OVERFLOW)
if (ars_status->out_length >= 40 && (ars_status->flags
& NFIT_ARS_F_OVERFLOW))
return -ENOSPC;
return 0;
}
@ -154,7 +155,7 @@ static int xlat_status(void *buf, unsigned int cmd, u32 status)
/* Unknown status */
if (status >> 16)
return -EIO;
break;
return 0;
case ND_CMD_CLEAR_ERROR:
clear_err = buf;
if (status & 0xffff)
@ -163,7 +164,7 @@ static int xlat_status(void *buf, unsigned int cmd, u32 status)
return -EIO;
if (clear_err->length > clear_err->cleared)
return clear_err->cleared;
break;
return 0;
default:
break;
}
@ -174,9 +175,18 @@ static int xlat_status(void *buf, unsigned int cmd, u32 status)
return 0;
}
static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd, void *buf,
unsigned int buf_len, int *cmd_rc)
static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
u32 status)
{
if (!nvdimm)
return xlat_bus_status(buf, cmd, status);
if (status)
return -EIO;
return 0;
}
int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
{
struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
union acpi_object in_obj, in_buf, *out_obj;
@ -298,7 +308,8 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
for (i = 0, offset = 0; i < desc->out_num; i++) {
u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
(u32 *) out_obj->buffer.pointer);
(u32 *) out_obj->buffer.pointer,
out_obj->buffer.length - offset);
if (offset + out_size > out_obj->buffer.length) {
dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n",
@ -333,7 +344,8 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
*/
rc = buf_len - offset - in_buf.buffer.length;
if (cmd_rc)
*cmd_rc = xlat_status(buf, cmd, fw_status);
*cmd_rc = xlat_status(nvdimm, buf, cmd,
fw_status);
} else {
dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
__func__, dimm_name, cmd_name, buf_len,
@ -343,7 +355,7 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
} else {
rc = 0;
if (cmd_rc)
*cmd_rc = xlat_status(buf, cmd, fw_status);
*cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status);
}
out:
@ -351,6 +363,7 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
return rc;
}
EXPORT_SYMBOL_GPL(acpi_nfit_ctl);
static const char *spa_type_name(u16 type)
{
@ -2001,19 +2014,32 @@ static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
return cmd_rc;
}
static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus,
static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc,
struct nd_cmd_ars_status *ars_status)
{
struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
int rc;
u32 i;
/*
* First record starts at 44 byte offset from the start of the
* payload.
*/
if (ars_status->out_length < 44)
return 0;
for (i = 0; i < ars_status->num_records; i++) {
/* only process full records */
if (ars_status->out_length
< 44 + sizeof(struct nd_ars_record) * (i + 1))
break;
rc = nvdimm_bus_add_poison(nvdimm_bus,
ars_status->records[i].err_address,
ars_status->records[i].length);
if (rc)
return rc;
}
if (i < ars_status->num_records)
dev_warn(acpi_desc->dev, "detected truncated ars results\n");
return 0;
}
@ -2266,8 +2292,7 @@ static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc,
if (rc < 0 && rc != -ENOSPC)
return rc;
if (ars_status_process_records(acpi_desc->nvdimm_bus,
acpi_desc->ars_status))
if (ars_status_process_records(acpi_desc, acpi_desc->ars_status))
return -ENOMEM;
return 0;

View file

@ -240,5 +240,7 @@ const u8 *to_nfit_uuid(enum nfit_uuids id);
int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz);
void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event);
void __acpi_nvdimm_notify(struct device *dev, u32 event);
int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc);
void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev);
#endif /* __NFIT_H__ */

View file

@ -1159,6 +1159,7 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
{
sdev->use_10_for_rw = 1;
sdev->use_10_for_ms = 1;
sdev->no_write_same = 1;
/* Schedule policy is determined by ->qc_defer() callback and
* it needs to see every deferred qc. Set dev_blocked to 1 to

View file

@ -4090,7 +4090,20 @@ static int mv_platform_probe(struct platform_device *pdev)
/* allocate host */
if (pdev->dev.of_node) {
of_property_read_u32(pdev->dev.of_node, "nr-ports", &n_ports);
rc = of_property_read_u32(pdev->dev.of_node, "nr-ports",
&n_ports);
if (rc) {
dev_err(&pdev->dev,
"error parsing nr-ports property: %d\n", rc);
return rc;
}
if (n_ports <= 0) {
dev_err(&pdev->dev, "nr-ports must be positive: %d\n",
n_ports);
return -EINVAL;
}
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
} else {
mv_platform_data = dev_get_platdata(&pdev->dev);

View file

@ -1727,7 +1727,7 @@ static int eni_do_init(struct atm_dev *dev)
printk("\n");
printk(KERN_ERR DEV_LABEL "(itf %d): can't set up page "
"mapping\n",dev->number);
return error;
return -ENOMEM;
}
eni_dev->ioaddr = base;
eni_dev->base_diff = real_base - (unsigned long) base;

View file

@ -2143,6 +2143,7 @@ static int lanai_dev_open(struct atm_dev *atmdev)
lanai->base = (bus_addr_t) ioremap(raw_base, LANAI_MAPPING_SIZE);
if (lanai->base == NULL) {
printk(KERN_ERR DEV_LABEL ": couldn't remap I/O space\n");
result = -ENOMEM;
goto error_pci;
}
/* 3.3: Reset lanai and PHY */

View file

@ -1413,8 +1413,14 @@ static ssize_t hot_remove_store(struct class *class,
return ret ? ret : count;
}
/*
* NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
* sense that reading from this file does alter the state of your system -- it
* creates a new un-initialized zram device and returns back this device's
* device_id (or an error code if it fails to create a new device).
*/
static struct class_attribute zram_control_class_attrs[] = {
__ATTR_RO(hot_add),
__ATTR(hot_add, 0400, hot_add_show, NULL),
__ATTR_WO(hot_remove),
__ATTR_NULL,
};

View file

@ -558,8 +558,9 @@ static int caam_probe(struct platform_device *pdev)
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
* long pointers in master configuration register
*/
clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH |
MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE | MCFGR_LARGE_BURST |
clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
MCFGR_WDENABLE | MCFGR_LARGE_BURST |
(sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
/*

View file

@ -168,12 +168,11 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
mv_cesa_adjust_op(engine, &creq->op_tmpl);
memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
for (i = 0; i < digsize / 4; i++)
writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
mv_cesa_adjust_op(engine, &creq->op_tmpl);
memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
if (!sreq->offset) {
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
for (i = 0; i < digsize / 4; i++)
writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
}
if (creq->cache_ptr)
memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,

View file

@ -271,7 +271,7 @@ static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma,
return -ENXIO;
/* prevent private mappings from being established */
if ((vma->vm_flags & VM_SHARED) != VM_SHARED) {
if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
dev_info(dev, "%s: %s: fail, attempted private mapping\n",
current->comm, func);
return -EINVAL;

View file

@ -2472,6 +2472,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
void amdgpu_driver_preclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
int amdgpu_suspend(struct amdgpu_device *adev);
int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon);
int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);

View file

@ -1493,7 +1493,7 @@ static int amdgpu_fini(struct amdgpu_device *adev)
return 0;
}
static int amdgpu_suspend(struct amdgpu_device *adev)
int amdgpu_suspend(struct amdgpu_device *adev)
{
int i, r;

View file

@ -479,12 +479,15 @@ amdgpu_pci_remove(struct pci_dev *pdev)
static void
amdgpu_pci_shutdown(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = dev->dev_private;
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown.
* unfortunately we can't detect certain
* hypervisors so just do this all the time.
*/
amdgpu_pci_remove(pdev);
amdgpu_suspend(adev);
}
static int amdgpu_pmops_suspend(struct device *dev)

View file

@ -755,8 +755,10 @@ static int __init ser_gigaset_init(void)
driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
GIGASET_MODULENAME, GIGASET_DEVNAME,
&ops, THIS_MODULE);
if (!driver)
if (!driver) {
rc = -ENOMEM;
goto error;
}
rc = tty_register_ldisc(N_GIGASET_M101, &gigaset_ldisc);
if (rc != 0) {

View file

@ -1499,6 +1499,7 @@ hfc4s8s_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
printk(KERN_INFO
"HFC-4S/8S: failed to request address space at 0x%04x\n",
hw->iobase);
err = -EBUSY;
goto out;
}

View file

@ -870,23 +870,25 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
static void peak_usb_disconnect(struct usb_interface *intf)
{
struct peak_usb_device *dev;
struct peak_usb_device *dev_prev_siblings;
/* unregister as many netdev devices as siblings */
for (dev = usb_get_intfdata(intf); dev; dev = dev->prev_siblings) {
for (dev = usb_get_intfdata(intf); dev; dev = dev_prev_siblings) {
struct net_device *netdev = dev->netdev;
char name[IFNAMSIZ];
dev_prev_siblings = dev->prev_siblings;
dev->state &= ~PCAN_USB_STATE_CONNECTED;
strncpy(name, netdev->name, IFNAMSIZ);
unregister_netdev(netdev);
free_candev(netdev);
kfree(dev->cmd_buf);
dev->next_siblings = NULL;
if (dev->adapter->dev_free)
dev->adapter->dev_free(dev);
free_candev(netdev);
dev_info(&intf->dev, "%s removed\n", name);
}

View file

@ -1872,8 +1872,16 @@ static void bnx2x_get_ringparam(struct net_device *dev,
ering->rx_max_pending = MAX_RX_AVAIL;
/* If size isn't already set, we give an estimation of the number
* of buffers we'll have. We're neglecting some possible conditions
* [we couldn't know for certain at this point if number of queues
* might shrink] but the number would be correct for the likely
* scenario.
*/
if (bp->rx_ring_size)
ering->rx_pending = bp->rx_ring_size;
else if (BNX2X_NUM_RX_QUEUES(bp))
ering->rx_pending = MAX_RX_AVAIL / BNX2X_NUM_RX_QUEUES(bp);
else
ering->rx_pending = MAX_RX_AVAIL;

View file

@ -10138,7 +10138,7 @@ static void __bnx2x_add_udp_port(struct bnx2x *bp, u16 port,
{
struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
if (!netif_running(bp->dev) || !IS_PF(bp))
if (!netif_running(bp->dev) || !IS_PF(bp) || CHIP_IS_E1x(bp))
return;
if (udp_port->count && udp_port->dst_port == port) {
@ -10163,7 +10163,7 @@ static void __bnx2x_del_udp_port(struct bnx2x *bp, u16 port,
{
struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
if (!IS_PF(bp))
if (!IS_PF(bp) || CHIP_IS_E1x(bp))
return;
if (!udp_port->count || udp_port->dst_port != port) {
@ -13505,6 +13505,7 @@ static int bnx2x_init_firmware(struct bnx2x *bp)
/* Initialize the pointers to the init arrays */
/* Blob */
rc = -ENOMEM;
BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
/* Opcodes */

View file

@ -4120,7 +4120,7 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
}
mutex_unlock(&bp->hwrm_cmd_lock);
return 0;
return rc;
}
static int bnxt_hwrm_func_qcfg(struct bnxt *bp)

View file

@ -4931,6 +4931,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
for_each_port(adapter, i) {
pi = adap2pinfo(adapter, i);
adapter->port[i]->dev_port = pi->lport;
netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);

View file

@ -7851,7 +7851,6 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
return ret;
memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
adap->port[i]->dev_port = j;
j++;
}
return 0;

View file

@ -2969,6 +2969,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
netdev->netdev_ops = &cxgb4vf_netdev_ops;
netdev->ethtool_ops = &cxgb4vf_ethtool_ops;
netdev->dev_port = pi->port_id;
/*
* Initialize the hardware/software state for the port.

View file

@ -468,6 +468,9 @@ static void ep93xx_free_buffers(struct ep93xx_priv *ep)
struct device *dev = ep->dev->dev.parent;
int i;
if (!ep->descs)
return;
for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
dma_addr_t d;
@ -490,6 +493,7 @@ static void ep93xx_free_buffers(struct ep93xx_priv *ep)
dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs,
ep->descs_dma_addr);
ep->descs = NULL;
}
static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)

View file

@ -90,7 +90,8 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
{
OPCODE_COMMON_SET_HSW_CONFIG,
CMD_SUBSYSTEM_COMMON,
BE_PRIV_DEVCFG | BE_PRIV_VHADM
BE_PRIV_DEVCFG | BE_PRIV_VHADM |
BE_PRIV_DEVSEC
},
{
OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES,

View file

@ -2313,6 +2313,8 @@ static const struct fec_stat {
{ "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
};
#define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64))
static void fec_enet_update_ethtool_stats(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
@ -2330,7 +2332,7 @@ static void fec_enet_get_ethtool_stats(struct net_device *dev,
if (netif_running(dev))
fec_enet_update_ethtool_stats(dev);
memcpy(data, fep->ethtool_stats, ARRAY_SIZE(fec_stats) * sizeof(u64));
memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
}
static void fec_enet_get_strings(struct net_device *netdev,
@ -2355,6 +2357,12 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset)
return -EOPNOTSUPP;
}
}
#else /* !defined(CONFIG_M5272) */
#define FEC_STATS_SIZE 0
static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
{
}
#endif /* !defined(CONFIG_M5272) */
static int fec_enet_nway_reset(struct net_device *dev)
@ -3293,8 +3301,7 @@ fec_probe(struct platform_device *pdev)
/* Init network device */
ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
ARRAY_SIZE(fec_stats) * sizeof(u64),
num_tx_qs, num_rx_qs);
FEC_STATS_SIZE, num_tx_qs, num_rx_qs);
if (!ndev)
return -ENOMEM;

View file

@ -58,7 +58,7 @@ static struct kobj_type ktype_veth_pool;
static const char ibmveth_driver_name[] = "ibmveth";
static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
#define ibmveth_driver_version "1.05"
#define ibmveth_driver_version "1.06"
MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
@ -137,6 +137,11 @@ static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
}
static inline int ibmveth_rxq_large_packet(struct ibmveth_adapter *adapter)
{
return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_LRG_PKT;
}
static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
{
return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
@ -1174,6 +1179,45 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
goto retry_bounce;
}
static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
{
int offset = 0;
/* only TCP packets will be aggregated */
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *iph = (struct iphdr *)skb->data;
if (iph->protocol == IPPROTO_TCP) {
offset = iph->ihl * 4;
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
} else {
return;
}
} else if (skb->protocol == htons(ETH_P_IPV6)) {
struct ipv6hdr *iph6 = (struct ipv6hdr *)skb->data;
if (iph6->nexthdr == IPPROTO_TCP) {
offset = sizeof(struct ipv6hdr);
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
} else {
return;
}
} else {
return;
}
/* if mss is not set through Large Packet bit/mss in rx buffer,
* expect that the mss will be written to the tcp header checksum.
*/
if (lrg_pkt) {
skb_shinfo(skb)->gso_size = mss;
} else if (offset) {
struct tcphdr *tcph = (struct tcphdr *)(skb->data + offset);
skb_shinfo(skb)->gso_size = ntohs(tcph->check);
tcph->check = 0;
}
}
static int ibmveth_poll(struct napi_struct *napi, int budget)
{
struct ibmveth_adapter *adapter =
@ -1182,6 +1226,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
int frames_processed = 0;
unsigned long lpar_rc;
struct iphdr *iph;
u16 mss = 0;
restart_poll:
while (frames_processed < budget) {
@ -1199,9 +1244,21 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
int length = ibmveth_rxq_frame_length(adapter);
int offset = ibmveth_rxq_frame_offset(adapter);
int csum_good = ibmveth_rxq_csum_good(adapter);
int lrg_pkt = ibmveth_rxq_large_packet(adapter);
skb = ibmveth_rxq_get_buffer(adapter);
/* if the large packet bit is set in the rx queue
* descriptor, the mss will be written by PHYP eight
* bytes from the start of the rx buffer, which is
* skb->data at this stage
*/
if (lrg_pkt) {
__be64 *rxmss = (__be64 *)(skb->data + 8);
mss = (u16)be64_to_cpu(*rxmss);
}
new_skb = NULL;
if (length < rx_copybreak)
new_skb = netdev_alloc_skb(netdev, length);
@ -1235,11 +1292,15 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
if (iph->check == 0xffff) {
iph->check = 0;
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
adapter->rx_large_packets++;
}
}
}
if (length > netdev->mtu + ETH_HLEN) {
ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
adapter->rx_large_packets++;
}
napi_gro_receive(napi, skb); /* send it up */
netdev->stats.rx_packets++;

View file

@ -209,6 +209,7 @@ struct ibmveth_rx_q_entry {
#define IBMVETH_RXQ_TOGGLE 0x80000000
#define IBMVETH_RXQ_TOGGLE_SHIFT 31
#define IBMVETH_RXQ_VALID 0x40000000
#define IBMVETH_RXQ_LRG_PKT 0x04000000
#define IBMVETH_RXQ_NO_CSUM 0x02000000
#define IBMVETH_RXQ_CSUM_GOOD 0x01000000
#define IBMVETH_RXQ_OFF_MASK 0x0000FFFF

View file

@ -704,6 +704,7 @@ ltq_etop_probe(struct platform_device *pdev)
priv->pldata = dev_get_platdata(&pdev->dev);
priv->netdev = dev;
spin_lock_init(&priv->lock);
SET_NETDEV_DEV(dev, &pdev->dev);
for (i = 0; i < MAX_DMA_CHAN; i++) {
if (IS_TX(i))

View file

@ -18,8 +18,6 @@ config MLX5_CORE_EN
default n
---help---
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
Ethernet and Infiniband support in ConnectX-4 are currently mutually
exclusive.
config MLX5_CORE_EN_DCB
bool "Data Center Bridging (DCB) Support"

View file

@ -268,11 +268,6 @@ static void dump_buf(void *buf, int size, int data_only, int offset)
pr_debug("\n");
}
enum {
MLX5_DRIVER_STATUS_ABORTED = 0xfe,
MLX5_DRIVER_SYND = 0xbadd00de,
};
static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
u32 *synd, u8 *status)
{

View file

@ -241,7 +241,7 @@ struct mlx5e_tstamp {
};
enum {
MLX5E_RQ_STATE_FLUSH,
MLX5E_RQ_STATE_ENABLED,
MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
MLX5E_RQ_STATE_AM,
};
@ -394,7 +394,7 @@ struct mlx5e_sq_dma {
};
enum {
MLX5E_SQ_STATE_FLUSH,
MLX5E_SQ_STATE_ENABLED,
MLX5E_SQ_STATE_BF_ENABLE,
};

View file

@ -759,6 +759,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (err)
goto err_destroy_rq;
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
if (err)
goto err_disable_rq;
@ -773,6 +774,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
return 0;
err_disable_rq:
clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
mlx5e_disable_rq(rq);
err_destroy_rq:
mlx5e_destroy_rq(rq);
@ -782,7 +784,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
static void mlx5e_close_rq(struct mlx5e_rq *rq)
{
set_bit(MLX5E_RQ_STATE_FLUSH, &rq->state);
clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
cancel_work_sync(&rq->am.work);
@ -1006,7 +1008,6 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
MLX5_SET(sqc, sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1);
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, uar_page, sq->uar.index);
@ -1083,6 +1084,7 @@ static int mlx5e_open_sq(struct mlx5e_channel *c,
if (err)
goto err_destroy_sq;
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
false, 0);
if (err)
@ -1096,6 +1098,7 @@ static int mlx5e_open_sq(struct mlx5e_channel *c,
return 0;
err_disable_sq:
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
mlx5e_disable_sq(sq);
err_destroy_sq:
mlx5e_destroy_sq(sq);
@ -1112,7 +1115,7 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
static void mlx5e_close_sq(struct mlx5e_sq *sq)
{
set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
/* prevent netif_tx_wake_queue */
napi_synchronize(&sq->channel->napi);
@ -3092,7 +3095,7 @@ static void mlx5e_tx_timeout(struct net_device *dev)
if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
continue;
sched_work = true;
set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
}
@ -3147,13 +3150,13 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
for (i = 0; i < priv->params.num_channels; i++) {
struct mlx5e_channel *c = priv->channel[i];
set_bit(MLX5E_RQ_STATE_FLUSH, &c->rq.state);
clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
napi_synchronize(&c->napi);
/* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
old_prog = xchg(&c->rq.xdp_prog, prog);
clear_bit(MLX5E_RQ_STATE_FLUSH, &c->rq.state);
set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
/* napi_schedule in case we have missed anything */
set_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
napi_schedule(&c->napi);

View file

@ -340,7 +340,7 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
sq->db.ico_wqe[pi].num_wqebbs = 1;
mlx5e_send_nop(sq, true);
mlx5e_send_nop(sq, false);
}
wqe = mlx5_wq_cyc_get_wqe(wq, pi);
@ -412,7 +412,7 @@ void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) {
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) {
mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
return;
}
@ -445,7 +445,7 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
}
#define RQ_CANNOT_POST(rq) \
(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state) || \
(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state) || \
test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
@ -924,7 +924,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
struct mlx5e_sq *xdp_sq = &rq->channel->xdp_sq;
int work_done = 0;
if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state)))
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return 0;
if (cq->decmprs_left)

View file

@ -409,7 +409,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
sq = container_of(cq, struct mlx5e_sq, cq);
if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return false;
npkts = 0;

View file

@ -56,7 +56,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
struct mlx5_cqe64 *cqe;
u16 sqcc;
if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return;
cqe = mlx5e_get_cqe(cq);
@ -113,7 +113,7 @@ static inline bool mlx5e_poll_xdp_tx_cq(struct mlx5e_cq *cq)
sq = container_of(cq, struct mlx5e_sq, cq);
if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return false;
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),

View file

@ -62,13 +62,13 @@ MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRIVER_VERSION);
int mlx5_core_debug_mask;
module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644);
unsigned int mlx5_core_debug_mask;
module_param_named(debug_mask, mlx5_core_debug_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
#define MLX5_DEFAULT_PROF 2
static int prof_sel = MLX5_DEFAULT_PROF;
module_param_named(prof_sel, prof_sel, int, 0444);
static unsigned int prof_sel = MLX5_DEFAULT_PROF;
module_param_named(prof_sel, prof_sel, uint, 0444);
MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
enum {
@ -732,13 +732,15 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
u8 status;
mlx5_cmd_mbox_status(query_out, &status, &syndrome);
if (status == MLX5_CMD_STAT_BAD_OP_ERR) {
pr_debug("Only ISSI 0 is supported\n");
return 0;
if (!status || syndrome == MLX5_DRIVER_SYND) {
mlx5_core_err(dev, "Failed to query ISSI err(%d) status(%d) synd(%d)\n",
err, status, syndrome);
return err;
}
pr_err("failed to query ISSI err(%d)\n", err);
return err;
mlx5_core_warn(dev, "Query ISSI is not supported by FW, ISSI is 0\n");
dev->issi = 0;
return 0;
}
sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
@ -752,7 +754,8 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
err = mlx5_cmd_exec(dev, set_in, sizeof(set_in),
set_out, sizeof(set_out));
if (err) {
pr_err("failed to set ISSI=1 err(%d)\n", err);
mlx5_core_err(dev, "Failed to set ISSI to 1 err(%d)\n",
err);
return err;
}
@ -1227,13 +1230,6 @@ static int init_one(struct pci_dev *pdev,
dev->pdev = pdev;
dev->event = mlx5_core_event;
if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) {
mlx5_core_warn(dev,
"selected profile out of range, selecting default (%d)\n",
MLX5_DEFAULT_PROF);
prof_sel = MLX5_DEFAULT_PROF;
}
dev->profile = &profile[prof_sel];
INIT_LIST_HEAD(&priv->ctx_list);
@ -1450,10 +1446,22 @@ static struct pci_driver mlx5_core_driver = {
.sriov_configure = mlx5_core_sriov_configure,
};
static void mlx5_core_verify_params(void)
{
if (prof_sel >= ARRAY_SIZE(profile)) {
pr_warn("mlx5_core: WARNING: Invalid module parameter prof_sel %d, valid range 0-%zu, changing back to default(%d)\n",
prof_sel,
ARRAY_SIZE(profile) - 1,
MLX5_DEFAULT_PROF);
prof_sel = MLX5_DEFAULT_PROF;
}
}
static int __init init(void)
{
int err;
mlx5_core_verify_params();
mlx5_register_debugfs();
err = pci_register_driver(&mlx5_core_driver);

View file

@ -44,11 +44,11 @@
#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev))
extern int mlx5_core_debug_mask;
extern uint mlx5_core_debug_mask;
#define mlx5_core_dbg(__dev, format, ...) \
dev_dbg(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \
(__dev)->priv.name, __func__, __LINE__, current->pid, \
dev_dbg(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_dbg_mask(__dev, mask, format, ...) \
@ -63,8 +63,8 @@ do { \
##__VA_ARGS__)
#define mlx5_core_warn(__dev, format, ...) \
dev_warn(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \
(__dev)->priv.name, __func__, __LINE__, current->pid, \
dev_warn(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_info(__dev, format, ...) \
@ -75,6 +75,11 @@ enum {
MLX5_CMD_TIME, /* print command execution time */
};
enum {
MLX5_DRIVER_STATUS_ABORTED = 0xfe,
MLX5_DRIVER_SYND = 0xbadd00de,
};
int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);

View file

@ -1730,6 +1730,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
mapping))) {
DP_NOTICE(cdev,
"Unable to map frag - dropping packet\n");
rc = -ENOMEM;
goto err;
}
} else {

View file

@ -438,9 +438,16 @@ static int smsc911x_request_resources(struct platform_device *pdev)
ret = regulator_bulk_get(&pdev->dev,
ARRAY_SIZE(pdata->supplies),
pdata->supplies);
if (ret)
if (ret) {
/*
* Retry on deferrals, else just report the error
* and try to continue.
*/
if (ret == -EPROBE_DEFER)
return ret;
netdev_err(ndev, "couldn't get regulators %d\n",
ret);
}
/* Request optional RESET GPIO */
pdata->reset_gpiod = devm_gpiod_get_optional(&pdev->dev,

View file

@ -43,9 +43,11 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
if (axi->axi_xit_frm)
value |= DMA_AXI_LPI_XIT_FRM;
value &= ~DMA_AXI_WR_OSR_LMT;
value |= (axi->axi_wr_osr_lmt & DMA_AXI_WR_OSR_LMT_MASK) <<
DMA_AXI_WR_OSR_LMT_SHIFT;
value &= ~DMA_AXI_RD_OSR_LMT;
value |= (axi->axi_rd_osr_lmt & DMA_AXI_RD_OSR_LMT_MASK) <<
DMA_AXI_RD_OSR_LMT_SHIFT;

View file

@ -30,9 +30,11 @@ static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
if (axi->axi_xit_frm)
value |= DMA_AXI_LPI_XIT_FRM;
value &= ~DMA_AXI_WR_OSR_LMT;
value |= (axi->axi_wr_osr_lmt & DMA_AXI_OSR_MAX) <<
DMA_AXI_WR_OSR_LMT_SHIFT;
value &= ~DMA_AXI_RD_OSR_LMT;
value |= (axi->axi_rd_osr_lmt & DMA_AXI_OSR_MAX) <<
DMA_AXI_RD_OSR_LMT_SHIFT;

View file

@ -126,8 +126,10 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
axi->axi_mb = of_property_read_bool(np, "snps,axi_mb");
axi->axi_rb = of_property_read_bool(np, "snps,axi_rb");
of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt);
of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt);
if (of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt))
axi->axi_wr_osr_lmt = 1;
if (of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt))
axi->axi_rd_osr_lmt = 1;
of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN);
of_node_put(np);

View file

@ -1113,6 +1113,7 @@ static int cpmac_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
platform_set_drvdata(pdev, dev);
priv = netdev_priv(dev);

View file

@ -81,6 +81,7 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
};
mask = GMII_SEL_MODE_MASK << (slave * 2) | BIT(slave + 6);
mask |= BIT(slave + 4);
mode <<= slave * 2;
if (priv->rmii_clock_external) {

View file

@ -47,6 +47,10 @@
NETIF_F_TSO | \
NETIF_F_TSO6 | \
NETIF_F_HW_CSUM)
/* Restrict GSO size to account for NVGRE */
#define NETVSC_GSO_MAX_SIZE 62768
static int ring_size = 128;
module_param(ring_size, int, S_IRUGO);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
@ -1400,6 +1404,7 @@ static int netvsc_probe(struct hv_device *dev,
nvdev = net_device_ctx->nvdev;
netif_set_real_num_tx_queues(net, nvdev->num_chn);
netif_set_real_num_rx_queues(net, nvdev->num_chn);
netif_set_gso_max_size(net, NETVSC_GSO_MAX_SIZE);
ret = register_netdev(net);
if (ret != 0) {

View file

@ -546,13 +546,15 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
}
err = ipvlan_set_port_mode(port, mode);
if (err) {
goto unregister_netdev;
goto unlink_netdev;
}
list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans);
netif_stacked_transfer_operstate(phy_dev, dev);
return 0;
unlink_netdev:
netdev_upper_dev_unlink(phy_dev, dev);
unregister_netdev:
unregister_netdevice(dev);
destroy_ipvlan_port:

View file

@ -1723,6 +1723,7 @@ static int irda_usb_probe(struct usb_interface *intf,
/* Don't change this buffer size and allocation without doing
* some heavy and complete testing. Don't ask why :-(
* Jean II */
ret = -ENOMEM;
self->speed_buff = kzalloc(IRDA_USB_SPEED_MTU, GFP_KERNEL);
if (!self->speed_buff)
goto err_out_3;

View file

@ -857,11 +857,17 @@ EXPORT_SYMBOL(phy_attached_print);
int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
u32 flags, phy_interface_t interface)
{
struct module *ndev_owner = dev->dev.parent->driver->owner;
struct mii_bus *bus = phydev->mdio.bus;
struct device *d = &phydev->mdio.dev;
int err;
if (!try_module_get(bus->owner)) {
/* For Ethernet device drivers that register their own MDIO bus, we
* will have bus->owner match ndev_mod, so we do not want to increment
* our own module->refcnt here, otherwise we would not be able to
* unload later on.
*/
if (ndev_owner != bus->owner && !try_module_get(bus->owner)) {
dev_err(&dev->dev, "failed to get the bus module\n");
return -EIO;
}
@ -921,7 +927,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
error:
put_device(d);
module_put(bus->owner);
if (ndev_owner != bus->owner)
module_put(bus->owner);
return err;
}
EXPORT_SYMBOL(phy_attach_direct);
@ -971,6 +978,8 @@ EXPORT_SYMBOL(phy_attach);
*/
void phy_detach(struct phy_device *phydev)
{
struct net_device *dev = phydev->attached_dev;
struct module *ndev_owner = dev->dev.parent->driver->owner;
struct mii_bus *bus;
int i;
@ -998,7 +1007,8 @@ void phy_detach(struct phy_device *phydev)
bus = phydev->mdio.bus;
put_device(&phydev->mdio.dev);
module_put(bus->owner);
if (ndev_owner != bus->owner)
module_put(bus->owner);
}
EXPORT_SYMBOL(phy_detach);

View file

@ -602,6 +602,21 @@ static const struct driver_info cdc_mbim_info_ndp_to_end = {
.data = CDC_NCM_FLAG_NDP_TO_END,
};
/* Some modems (e.g. Telit LE922A6) do not work properly with altsetting
* toggle done in cdc_ncm_bind_common. CDC_MBIM_FLAG_AVOID_ALTSETTING_TOGGLE
* flag is used to avoid this procedure.
*/
static const struct driver_info cdc_mbim_info_avoid_altsetting_toggle = {
.description = "CDC MBIM",
.flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
.bind = cdc_mbim_bind,
.unbind = cdc_mbim_unbind,
.manage_power = cdc_mbim_manage_power,
.rx_fixup = cdc_mbim_rx_fixup,
.tx_fixup = cdc_mbim_tx_fixup,
.data = CDC_MBIM_FLAG_AVOID_ALTSETTING_TOGGLE,
};
static const struct usb_device_id mbim_devs[] = {
/* This duplicate NCM entry is intentional. MBIM devices can
* be disguised as NCM by default, and this is necessary to
@ -626,6 +641,12 @@ static const struct usb_device_id mbim_devs[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
},
/* Telit LE922A6 in MBIM composition */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1041, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
},
/* default entry */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_zlp,

View file

@ -839,11 +839,18 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
/* Device-specific flags */
ctx->drvflags = drvflags;
/* Reset data interface. Some devices will not reset properly
* unless they are configured first. Toggle the altsetting to
* force a reset
* force a reset.
* Some other devices do not work properly with this procedure
* that can be avoided using quirk CDC_MBIM_FLAG_AVOID_ALTSETTING_TOGGLE
*/
usb_set_interface(dev->udev, iface_no, data_altsetting);
if (!(ctx->drvflags & CDC_MBIM_FLAG_AVOID_ALTSETTING_TOGGLE))
usb_set_interface(dev->udev, iface_no, data_altsetting);
temp = usb_set_interface(dev->udev, iface_no, 0);
if (temp) {
dev_dbg(&intf->dev, "set interface failed\n");
@ -890,9 +897,6 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
/* finish setting up the device specific data */
cdc_ncm_setup(dev);
/* Device-specific flags */
ctx->drvflags = drvflags;
/* Allocate the delayed NDP if needed. */
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);

View file

@ -3395,6 +3395,7 @@ static int lan78xx_probe(struct usb_interface *intf,
if (buf) {
dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->urb_intr) {
ret = -ENOMEM;
kfree(buf);
goto out3;
} else {

View file

@ -969,12 +969,17 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
struct virtnet_info *vi = netdev_priv(dev);
struct virtio_device *vdev = vi->vdev;
int ret;
struct sockaddr *addr = p;
struct sockaddr *addr;
struct scatterlist sg;
ret = eth_prepare_mac_addr_change(dev, p);
addr = kmalloc(sizeof(*addr), GFP_KERNEL);
if (!addr)
return -ENOMEM;
memcpy(addr, p, sizeof(*addr));
ret = eth_prepare_mac_addr_change(dev, addr);
if (ret)
return ret;
goto out;
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
sg_init_one(&sg, addr->sa_data, dev->addr_len);
@ -982,7 +987,8 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
dev_warn(&vdev->dev,
"Failed to set mac address by vq command.\n");
return -EINVAL;
ret = -EINVAL;
goto out;
}
} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
@ -996,8 +1002,11 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
}
eth_commit_mac_addr_change(dev, p);
ret = 0;
return 0;
out:
kfree(addr);
return ret;
}
static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,

View file

@ -715,7 +715,7 @@ EXPORT_SYMBOL_GPL(nd_cmd_in_size);
u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
const struct nd_cmd_desc *desc, int idx, const u32 *in_field,
const u32 *out_field)
const u32 *out_field, unsigned long remainder)
{
if (idx >= desc->out_num)
return UINT_MAX;
@ -727,9 +727,24 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
return in_field[1];
else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2)
return out_field[1];
else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2)
return out_field[1] - 8;
else if (cmd == ND_CMD_CALL) {
else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2) {
/*
* Per table 9-276 ARS Data in ACPI 6.1, out_field[1] is
* "Size of Output Buffer in bytes, including this
* field."
*/
if (out_field[1] < 4)
return 0;
/*
* ACPI 6.1 is ambiguous if 'status' is included in the
* output size. If we encounter an output size that
* overshoots the remainder by 4 bytes, assume it was
* including 'status'.
*/
if (out_field[1] - 8 == remainder)
return remainder;
return out_field[1] - 4;
} else if (cmd == ND_CMD_CALL) {
struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field;
return pkg->nd_size_out;
@ -876,7 +891,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
/* process an output envelope */
for (i = 0; i < desc->out_num; i++) {
u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i,
(u32 *) in_env, (u32 *) out_env);
(u32 *) in_env, (u32 *) out_env, 0);
u32 copy;
if (out_size == UINT_MAX) {

View file

@ -1323,18 +1323,20 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
{
lockdep_assert_held(&phba->hbalock);
BUG_ON(!piocb || !piocb->vport);
BUG_ON(!piocb);
list_add_tail(&piocb->list, &pring->txcmplq);
piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
(piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
(piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN) &&
(!(piocb->vport->load_flag & FC_UNLOADING)))
mod_timer(&piocb->vport->els_tmofunc,
jiffies +
msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
(piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
BUG_ON(!piocb->vport);
if (!(piocb->vport->load_flag & FC_UNLOADING))
mod_timer(&piocb->vport->els_tmofunc,
jiffies +
msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
}
return 0;
}

View file

@ -506,7 +506,7 @@ static void vhost_vsock_reset_orphans(struct sock *sk)
* executing.
*/
if (!vhost_vsock_get(vsk->local_addr.svm_cid)) {
if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) {
sock_set_flag(sk, SOCK_DONE);
vsk->peer_shutdown = SHUTDOWN_MASK;
sk->sk_state = SS_UNCONNECTED;

View file

@ -1261,26 +1261,30 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
return -ECHILD;
op = ceph_snap(dir) == CEPH_SNAPDIR ?
CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_GETATTR;
req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
if (!IS_ERR(req)) {
req->r_dentry = dget(dentry);
req->r_num_caps = 2;
req->r_num_caps = op == CEPH_MDS_OP_GETATTR ? 1 : 2;
mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
if (ceph_security_xattr_wanted(dir))
mask |= CEPH_CAP_XATTR_SHARED;
req->r_args.getattr.mask = mask;
req->r_locked_dir = dir;
err = ceph_mdsc_do_request(mdsc, NULL, req);
if (err == 0 || err == -ENOENT) {
if (dentry == req->r_dentry) {
valid = !d_unhashed(dentry);
} else {
d_invalidate(req->r_dentry);
err = -EAGAIN;
}
switch (err) {
case 0:
if (d_really_is_positive(dentry) &&
d_inode(dentry) == req->r_target_inode)
valid = 1;
break;
case -ENOENT:
if (d_really_is_negative(dentry))
valid = 1;
/* Fallthrough */
default:
break;
}
ceph_mdsc_put_request(req);
dout("d_revalidate %p lookup result=%d\n",

View file

@ -1739,8 +1739,6 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr)
* This should be done on write(), truncate() and chown().
*/
if (!fc->handle_killpriv) {
int kill;
/*
* ia_mode calculation may have used stale i_mode.
* Refresh and recalculate.
@ -1750,12 +1748,11 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr)
return ret;
attr->ia_mode = inode->i_mode;
kill = should_remove_suid(entry);
if (kill & ATTR_KILL_SUID) {
if (inode->i_mode & S_ISUID) {
attr->ia_valid |= ATTR_MODE;
attr->ia_mode &= ~S_ISUID;
}
if (kill & ATTR_KILL_SGID) {
if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
attr->ia_valid |= ATTR_MODE;
attr->ia_mode &= ~S_ISGID;
}

View file

@ -54,7 +54,6 @@ KSYM(__kstrtab_\name):
KSYM(__kcrctab_\name):
__put KSYM(__crc_\name)
.weak KSYM(__crc_\name)
.set KSYM(__crc_\name), 0
.previous
#endif
#endif

View file

@ -143,7 +143,7 @@ u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
const struct nd_cmd_desc *desc, int idx, void *buf);
u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
const struct nd_cmd_desc *desc, int idx, const u32 *in_field,
const u32 *out_field);
const u32 *out_field, unsigned long remainder);
int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count);
struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
struct nd_region_desc *ndr_desc);

View file

@ -81,7 +81,8 @@
#define CDC_NCM_TIMER_INTERVAL_MAX (U32_MAX / NSEC_PER_USEC)
/* Driver flags */
#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */
#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */
#define CDC_MBIM_FLAG_AVOID_ALTSETTING_TOGGLE 0x04 /* Avoid altsetting toggle during init */
#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \
(x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE)

View file

@ -196,5 +196,6 @@ struct can_filter {
};
#define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */
#define CAN_RAW_FILTER_MAX 512 /* maximum number of can_filter set via setsockopt() */
#endif /* !_UAPI_CAN_H */

View file

@ -31,7 +31,7 @@
#include <linux/hdlc/ioctl.h>
/* For glibc compatibility. An empty enum does not compile. */
#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && \
#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 || \
__UAPI_DEF_IF_NET_DEVICE_FLAGS != 0
/**
* enum net_device_flags - &struct net_device flags
@ -99,7 +99,7 @@ enum net_device_flags {
IFF_ECHO = 1<<18, /* volatile */
#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
};
#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0 */
#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 || __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0 */
/* for compatibility with glibc net/if.h */
#if __UAPI_DEF_IF_NET_DEVICE_FLAGS

View file

@ -5,6 +5,7 @@ header-y += nf_conntrack_ftp.h
header-y += nf_conntrack_sctp.h
header-y += nf_conntrack_tcp.h
header-y += nf_conntrack_tuple_common.h
header-y += nf_log.h
header-y += nf_tables.h
header-y += nf_tables_compat.h
header-y += nf_nat.h

View file

@ -12,3 +12,4 @@ header-y += tc_bpf.h
header-y += tc_connmark.h
header-y += tc_ife.h
header-y += tc_tunnel_key.h
header-y += tc_skbmod.h

View file

@ -903,17 +903,14 @@ list_update_cgroup_event(struct perf_event *event,
*/
cpuctx = __get_cpu_context(ctx);
/* Only set/clear cpuctx->cgrp if current task uses event->cgrp. */
if (perf_cgroup_from_task(current, ctx) != event->cgrp) {
/*
* We are removing the last cpu event in this context.
* If that event is not active in this cpu, cpuctx->cgrp
* should've been cleared by perf_cgroup_switch.
*/
WARN_ON_ONCE(!add && cpuctx->cgrp);
return;
}
cpuctx->cgrp = add ? event->cgrp : NULL;
/*
* cpuctx->cgrp is NULL until a cgroup event is sched in or
* ctx->nr_cgroup == 0 .
*/
if (add && perf_cgroup_from_task(current, ctx) == event->cgrp)
cpuctx->cgrp = event->cgrp;
else if (!add)
cpuctx->cgrp = NULL;
}
#else /* !CONFIG_CGROUP_PERF */

View file

@ -7,6 +7,7 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>

View file

@ -506,13 +506,13 @@ static void __print_lock_name(struct lock_class *class)
name = class->name;
if (!name) {
name = __get_key_name(class->key, str);
printk("%s", name);
printk(KERN_CONT "%s", name);
} else {
printk("%s", name);
printk(KERN_CONT "%s", name);
if (class->name_version > 1)
printk("#%d", class->name_version);
printk(KERN_CONT "#%d", class->name_version);
if (class->subclass)
printk("/%d", class->subclass);
printk(KERN_CONT "/%d", class->subclass);
}
}
@ -522,9 +522,9 @@ static void print_lock_name(struct lock_class *class)
get_usage_chars(class, usage);
printk(" (");
printk(KERN_CONT " (");
__print_lock_name(class);
printk("){%s}", usage);
printk(KERN_CONT "){%s}", usage);
}
static void print_lockdep_cache(struct lockdep_map *lock)
@ -536,7 +536,7 @@ static void print_lockdep_cache(struct lockdep_map *lock)
if (!name)
name = __get_key_name(lock->key->subkeys, str);
printk("%s", name);
printk(KERN_CONT "%s", name);
}
static void print_lock(struct held_lock *hlock)
@ -551,13 +551,13 @@ static void print_lock(struct held_lock *hlock)
barrier();
if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) {
printk("<RELEASED>\n");
printk(KERN_CONT "<RELEASED>\n");
return;
}
print_lock_name(lock_classes + class_idx - 1);
printk(", at: ");
print_ip_sym(hlock->acquire_ip);
printk(KERN_CONT ", at: [<%p>] %pS\n",
(void *)hlock->acquire_ip, (void *)hlock->acquire_ip);
}
static void lockdep_print_held_locks(struct task_struct *curr)
@ -792,8 +792,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
printk("\nnew class %p: %s", class->key, class->name);
if (class->name_version > 1)
printk("#%d", class->name_version);
printk("\n");
printk(KERN_CONT "#%d", class->name_version);
printk(KERN_CONT "\n");
dump_stack();
if (!graph_lock()) {
@ -1071,7 +1071,7 @@ print_circular_bug_entry(struct lock_list *target, int depth)
return 0;
printk("\n-> #%u", depth);
print_lock_name(target->class);
printk(":\n");
printk(KERN_CONT ":\n");
print_stack_trace(&target->trace, 6);
return 0;
@ -1102,11 +1102,11 @@ print_circular_lock_scenario(struct held_lock *src,
if (parent != source) {
printk("Chain exists of:\n ");
__print_lock_name(source);
printk(" --> ");
printk(KERN_CONT " --> ");
__print_lock_name(parent);
printk(" --> ");
printk(KERN_CONT " --> ");
__print_lock_name(target);
printk("\n\n");
printk(KERN_CONT "\n\n");
}
printk(" Possible unsafe locking scenario:\n\n");
@ -1114,16 +1114,16 @@ print_circular_lock_scenario(struct held_lock *src,
printk(" ---- ----\n");
printk(" lock(");
__print_lock_name(target);
printk(");\n");
printk(KERN_CONT ");\n");
printk(" lock(");
__print_lock_name(parent);
printk(");\n");
printk(KERN_CONT ");\n");
printk(" lock(");
__print_lock_name(target);
printk(");\n");
printk(KERN_CONT ");\n");
printk(" lock(");
__print_lock_name(source);
printk(");\n");
printk(KERN_CONT ");\n");
printk("\n *** DEADLOCK ***\n\n");
}
@ -1359,22 +1359,22 @@ static void print_lock_class_header(struct lock_class *class, int depth)
printk("%*s->", depth, "");
print_lock_name(class);
printk(" ops: %lu", class->ops);
printk(" {\n");
printk(KERN_CONT " ops: %lu", class->ops);
printk(KERN_CONT " {\n");
for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
if (class->usage_mask & (1 << bit)) {
int len = depth;
len += printk("%*s %s", depth, "", usage_str[bit]);
len += printk(" at:\n");
len += printk(KERN_CONT " at:\n");
print_stack_trace(class->usage_traces + bit, len);
}
}
printk("%*s }\n", depth, "");
printk("%*s ... key at: ",depth,"");
print_ip_sym((unsigned long)class->key);
printk("%*s ... key at: [<%p>] %pS\n",
depth, "", class->key, class->key);
}
/*
@ -1437,11 +1437,11 @@ print_irq_lock_scenario(struct lock_list *safe_entry,
if (middle_class != unsafe_class) {
printk("Chain exists of:\n ");
__print_lock_name(safe_class);
printk(" --> ");
printk(KERN_CONT " --> ");
__print_lock_name(middle_class);
printk(" --> ");
printk(KERN_CONT " --> ");
__print_lock_name(unsafe_class);
printk("\n\n");
printk(KERN_CONT "\n\n");
}
printk(" Possible interrupt unsafe locking scenario:\n\n");
@ -1449,18 +1449,18 @@ print_irq_lock_scenario(struct lock_list *safe_entry,
printk(" ---- ----\n");
printk(" lock(");
__print_lock_name(unsafe_class);
printk(");\n");
printk(KERN_CONT ");\n");
printk(" local_irq_disable();\n");
printk(" lock(");
__print_lock_name(safe_class);
printk(");\n");
printk(KERN_CONT ");\n");
printk(" lock(");
__print_lock_name(middle_class);
printk(");\n");
printk(KERN_CONT ");\n");
printk(" <Interrupt>\n");
printk(" lock(");
__print_lock_name(safe_class);
printk(");\n");
printk(KERN_CONT ");\n");
printk("\n *** DEADLOCK ***\n\n");
}
@ -1497,9 +1497,9 @@ print_bad_irq_dependency(struct task_struct *curr,
print_lock(prev);
printk("which would create a new lock dependency:\n");
print_lock_name(hlock_class(prev));
printk(" ->");
printk(KERN_CONT " ->");
print_lock_name(hlock_class(next));
printk("\n");
printk(KERN_CONT "\n");
printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
irqclass);
@ -1521,8 +1521,7 @@ print_bad_irq_dependency(struct task_struct *curr,
lockdep_print_held_locks(curr);
printk("\nthe dependencies between %s-irq-safe lock", irqclass);
printk(" and the holding lock:\n");
printk("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
if (!save_trace(&prev_root->trace))
return 0;
print_shortest_lock_dependencies(backwards_entry, prev_root);
@ -1694,10 +1693,10 @@ print_deadlock_scenario(struct held_lock *nxt,
printk(" ----\n");
printk(" lock(");
__print_lock_name(prev);
printk(");\n");
printk(KERN_CONT ");\n");
printk(" lock(");
__print_lock_name(next);
printk(");\n");
printk(KERN_CONT ");\n");
printk("\n *** DEADLOCK ***\n\n");
printk(" May be due to missing lock nesting notation\n\n");
}
@ -1891,9 +1890,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
graph_unlock();
printk("\n new dependency: ");
print_lock_name(hlock_class(prev));
printk(" => ");
printk(KERN_CONT " => ");
print_lock_name(hlock_class(next));
printk("\n");
printk(KERN_CONT "\n");
dump_stack();
return graph_lock();
}
@ -2343,11 +2342,11 @@ print_usage_bug_scenario(struct held_lock *lock)
printk(" ----\n");
printk(" lock(");
__print_lock_name(class);
printk(");\n");
printk(KERN_CONT ");\n");
printk(" <Interrupt>\n");
printk(" lock(");
__print_lock_name(class);
printk(");\n");
printk(KERN_CONT ");\n");
printk("\n *** DEADLOCK ***\n\n");
}
@ -2522,14 +2521,18 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
void print_irqtrace_events(struct task_struct *curr)
{
printk("irq event stamp: %u\n", curr->irq_events);
printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event);
print_ip_sym(curr->hardirq_enable_ip);
printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
print_ip_sym(curr->hardirq_disable_ip);
printk("softirqs last enabled at (%u): ", curr->softirq_enable_event);
print_ip_sym(curr->softirq_enable_ip);
printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
print_ip_sym(curr->softirq_disable_ip);
printk("hardirqs last enabled at (%u): [<%p>] %pS\n",
curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
(void *)curr->hardirq_enable_ip);
printk("hardirqs last disabled at (%u): [<%p>] %pS\n",
curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip,
(void *)curr->hardirq_disable_ip);
printk("softirqs last enabled at (%u): [<%p>] %pS\n",
curr->softirq_enable_event, (void *)curr->softirq_enable_ip,
(void *)curr->softirq_enable_ip);
printk("softirqs last disabled at (%u): [<%p>] %pS\n",
curr->softirq_disable_event, (void *)curr->softirq_disable_ip,
(void *)curr->softirq_disable_ip);
}
static int HARDIRQ_verbose(struct lock_class *class)
@ -3235,8 +3238,8 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
if (very_verbose(class)) {
printk("\nacquire class [%p] %s", class->key, class->name);
if (class->name_version > 1)
printk("#%d", class->name_version);
printk("\n");
printk(KERN_CONT "#%d", class->name_version);
printk(KERN_CONT "\n");
dump_stack();
}
@ -3378,7 +3381,7 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
printk("%s/%d is trying to release lock (",
curr->comm, task_pid_nr(curr));
print_lockdep_cache(lock);
printk(") at:\n");
printk(KERN_CONT ") at:\n");
print_ip_sym(ip);
printk("but there are no more locks to release!\n");
printk("\nother info that might help us debug this:\n");
@ -3871,7 +3874,7 @@ print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
printk("%s/%d is trying to contend lock (",
curr->comm, task_pid_nr(curr));
print_lockdep_cache(lock);
printk(") at:\n");
printk(KERN_CONT ") at:\n");
print_ip_sym(ip);
printk("but there are no locks held!\n");
printk("\nother info that might help us debug this:\n");

View file

@ -65,8 +65,72 @@ static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
{
if (!rt_mutex_has_waiters(lock))
clear_rt_mutex_waiters(lock);
unsigned long owner, *p = (unsigned long *) &lock->owner;
if (rt_mutex_has_waiters(lock))
return;
/*
* The rbtree has no waiters enqueued, now make sure that the
* lock->owner still has the waiters bit set, otherwise the
* following can happen:
*
* CPU 0 CPU 1 CPU2
* l->owner=T1
* rt_mutex_lock(l)
* lock(l->lock)
* l->owner = T1 | HAS_WAITERS;
* enqueue(T2)
* boost()
* unlock(l->lock)
* block()
*
* rt_mutex_lock(l)
* lock(l->lock)
* l->owner = T1 | HAS_WAITERS;
* enqueue(T3)
* boost()
* unlock(l->lock)
* block()
* signal(->T2) signal(->T3)
* lock(l->lock)
* dequeue(T2)
* deboost()
* unlock(l->lock)
* lock(l->lock)
* dequeue(T3)
* ==> wait list is empty
* deboost()
* unlock(l->lock)
* lock(l->lock)
* fixup_rt_mutex_waiters()
* if (wait_list_empty(l) {
* l->owner = owner
* owner = l->owner & ~HAS_WAITERS;
* ==> l->owner = T1
* }
* lock(l->lock)
* rt_mutex_unlock(l) fixup_rt_mutex_waiters()
* if (wait_list_empty(l) {
* owner = l->owner & ~HAS_WAITERS;
* cmpxchg(l->owner, T1, NULL)
* ===> Success (l->owner = NULL)
*
* l->owner = owner
* ==> l->owner = T1
* }
*
* With the check for the waiter bit in place T3 on CPU2 will not
* overwrite. All tasks fiddling with the waiters bit are
* serialized by l->lock, so nothing else can modify the waiters
* bit. If the bit is set then nothing can change l->owner either
* so the simple RMW is safe. The cmpxchg() will simply fail if it
* happens in the middle of the RMW because the waiters bit is
* still set.
*/
owner = READ_ONCE(*p);
if (owner & RT_MUTEX_HAS_WAITERS)
WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
}
/*

View file

@ -75,8 +75,9 @@ task_top_pi_waiter(struct task_struct *p)
static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
{
return (struct task_struct *)
((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL);
unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
return (struct task_struct *) (owner & ~RT_MUTEX_OWNER_MASKALL);
}
/*

View file

@ -212,6 +212,7 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
{
static unsigned long next = INITIAL_JIFFIES;
struct autogroup *ag;
unsigned long shares;
int err;
if (nice < MIN_NICE || nice > MAX_NICE)
@ -230,9 +231,10 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
next = HZ / 10 + jiffies;
ag = autogroup_task_get(p);
shares = scale_load(sched_prio_to_weight[nice + 20]);
down_write(&ag->lock);
err = sched_group_set_shares(ag->tg, sched_prio_to_weight[nice + 20]);
err = sched_group_set_shares(ag->tg, shares);
if (!err)
ag->nice = nice;
up_write(&ag->lock);

Some files were not shown because too many files have changed in this diff Show more