Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Build failure in drivers/net/wwan/mhi_wwan_mbim.c:
add missing parameter (0, assuming we don't want buffer pre-alloc).

Conflict in drivers/net/dsa/sja1105/sja1105_main.c between:
  589918df93 ("net: dsa: sja1105: be stateless with FDB entries on SJA1105P/Q/R/S/SJA1110 too")
  0fac6aa098 ("net: dsa: sja1105: delete the best_effort_vlan_filtering mode")

Follow the instructions from the commit message of the former commit
- removed the if conditions. When looking at commit 589918df93 ("net:
dsa: sja1105: be stateless with FDB entries on SJA1105P/Q/R/S/SJA1110 too")
note that the mask_iotag fields get removed by the following patch.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2021-08-05 14:59:40 -07:00
commit 0ca8d3ca45
113 changed files with 1260 additions and 490 deletions

View file

@ -228,6 +228,23 @@ before posting to the mailing list. The patchwork build bot instance
gets overloaded very easily and netdev@vger really doesn't need more gets overloaded very easily and netdev@vger really doesn't need more
traffic if we can help it. traffic if we can help it.
netdevsim is great, can I extend it for my out-of-tree tests?
-------------------------------------------------------------
No, `netdevsim` is a test vehicle solely for upstream tests.
(Please add your tests under tools/testing/selftests/.)
We also give no guarantees that `netdevsim` won't change in the future
in a way which would break what would normally be considered uAPI.
Is netdevsim considered a "user" of an API?
-------------------------------------------
Linux kernel has a long standing rule that no API should be added unless
it has a real, in-tree user. Mock-ups and tests based on `netdevsim` are
strongly encouraged when adding new APIs, but `netdevsim` in itself
is **not** considered a use case/user.
Any other tips to help ensure my net/net-next patch gets OK'd? Any other tips to help ensure my net/net-next patch gets OK'd?
-------------------------------------------------------------- --------------------------------------------------------------
Attention to detail. Re-read your own work as if you were the Attention to detail. Re-read your own work as if you were the

View file

@ -73,7 +73,9 @@ IF_OPER_LOWERLAYERDOWN (3):
state (f.e. VLAN). state (f.e. VLAN).
IF_OPER_TESTING (4): IF_OPER_TESTING (4):
Unused in current kernel. Interface is in testing mode, for example executing driver self-tests
or media (cable) test. It can't be used for normal traffic until tests
complete.
IF_OPER_DORMANT (5): IF_OPER_DORMANT (5):
Interface is L1 up, but waiting for an external event, f.e. for a Interface is L1 up, but waiting for an external event, f.e. for a
@ -111,7 +113,7 @@ it as lower layer.
Note that for certain kind of soft-devices, which are not managing any Note that for certain kind of soft-devices, which are not managing any
real hardware, it is possible to set this bit from userspace. One real hardware, it is possible to set this bit from userspace. One
should use TVL IFLA_CARRIER to do so. should use TLV IFLA_CARRIER to do so.
netif_carrier_ok() can be used to query that bit. netif_carrier_ok() can be used to query that bit.

View file

@ -2,7 +2,7 @@
VERSION = 5 VERSION = 5
PATCHLEVEL = 14 PATCHLEVEL = 14
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc3 EXTRAVERSION = -rc4
NAME = Opossums on Parade NAME = Opossums on Parade
# *DOCUMENTATION* # *DOCUMENTATION*
@ -546,7 +546,6 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
PHONY += scripts_basic PHONY += scripts_basic
scripts_basic: scripts_basic:
$(Q)$(MAKE) $(build)=scripts/basic $(Q)$(MAKE) $(build)=scripts/basic
$(Q)rm -f .tmp_quiet_recordmcount
PHONY += outputmakefile PHONY += outputmakefile
ifdef building_out_of_srctree ifdef building_out_of_srctree

View file

@ -27,6 +27,13 @@ KASAN_SANITIZE := n
ccflags-y := -shared -fno-common -fno-builtin -nostdlib \ ccflags-y := -shared -fno-common -fno-builtin -nostdlib \
-Wl,-soname=linux-vdso64.so.1 -Wl,--hash-style=both -Wl,-soname=linux-vdso64.so.1 -Wl,--hash-style=both
# Go prior to 1.16.x assumes r30 is not clobbered by any VDSO code. That used to be true
# by accident when the VDSO was hand-written asm code, but may not be now that the VDSO is
# compiler generated. To avoid breaking Go tell GCC not to use r30. Impact on code
# generation is minimal, it will just use r29 instead.
ccflags-y += $(call cc-option, -ffixed-r30)
asflags-y := -D__VDSO64__ -s asflags-y := -D__VDSO64__ -s
targets += vdso64.lds targets += vdso64.lds

View file

@ -77,7 +77,7 @@
#include "../../../../drivers/pci/pci.h" #include "../../../../drivers/pci/pci.h"
DEFINE_STATIC_KEY_FALSE(shared_processor); DEFINE_STATIC_KEY_FALSE(shared_processor);
EXPORT_SYMBOL_GPL(shared_processor); EXPORT_SYMBOL(shared_processor);
int CMO_PrPSP = -1; int CMO_PrPSP = -1;
int CMO_SecPSP = -1; int CMO_SecPSP = -1;

View file

@ -11,6 +11,7 @@ UBSAN_SANITIZE := n
KASAN_SANITIZE := n KASAN_SANITIZE := n
obj-y := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o obj-y := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o
obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o
obj-all := $(obj-y) piggy.o syms.o obj-all := $(obj-y) piggy.o syms.o
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4 targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4

View file

@ -0,0 +1,2 @@
// SPDX-License-Identifier: GPL-2.0
#include "../../../../lib/clz_ctz.c"

View file

@ -335,7 +335,7 @@ CONFIG_L2TP_DEBUGFS=m
CONFIG_L2TP_V3=y CONFIG_L2TP_V3=y
CONFIG_L2TP_IP=m CONFIG_L2TP_IP=m
CONFIG_L2TP_ETH=m CONFIG_L2TP_ETH=m
CONFIG_BRIDGE=m CONFIG_BRIDGE=y
CONFIG_BRIDGE_MRP=y CONFIG_BRIDGE_MRP=y
CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y CONFIG_VLAN_8021Q_GVRP=y

View file

@ -325,7 +325,7 @@ CONFIG_L2TP_DEBUGFS=m
CONFIG_L2TP_V3=y CONFIG_L2TP_V3=y
CONFIG_L2TP_IP=m CONFIG_L2TP_IP=m
CONFIG_L2TP_ETH=m CONFIG_L2TP_ETH=m
CONFIG_BRIDGE=m CONFIG_BRIDGE=y
CONFIG_BRIDGE_MRP=y CONFIG_BRIDGE_MRP=y
CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y CONFIG_VLAN_8021Q_GVRP=y

View file

@ -51,6 +51,7 @@ SECTIONS
.rela.dyn ALIGN(8) : { *(.rela.dyn) } .rela.dyn ALIGN(8) : { *(.rela.dyn) }
.got ALIGN(8) : { *(.got .toc) } .got ALIGN(8) : { *(.got .toc) }
.got.plt ALIGN(8) : { *(.got.plt) }
_end = .; _end = .;
PROVIDE(end = .); PROVIDE(end = .);

View file

@ -51,6 +51,7 @@ SECTIONS
.rela.dyn ALIGN(8) : { *(.rela.dyn) } .rela.dyn ALIGN(8) : { *(.rela.dyn) }
.got ALIGN(8) : { *(.got .toc) } .got ALIGN(8) : { *(.got .toc) }
.got.plt ALIGN(8) : { *(.got.plt) }
_end = .; _end = .;
PROVIDE(end = .); PROVIDE(end = .);

View file

@ -2016,6 +2016,7 @@ static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result) static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
{ {
trace_kvm_hv_hypercall_done(result);
kvm_hv_hypercall_set_result(vcpu, result); kvm_hv_hypercall_set_result(vcpu, result);
++vcpu->stat.hypercalls; ++vcpu->stat.hypercalls;
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
@ -2139,6 +2140,7 @@ static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code)
int kvm_hv_hypercall(struct kvm_vcpu *vcpu) int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
struct kvm_hv_hcall hc; struct kvm_hv_hcall hc;
u64 ret = HV_STATUS_SUCCESS; u64 ret = HV_STATUS_SUCCESS;
@ -2173,17 +2175,25 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff; hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
hc.rep = !!(hc.rep_cnt || hc.rep_idx); hc.rep = !!(hc.rep_cnt || hc.rep_idx);
if (hc.fast && is_xmm_fast_hypercall(&hc))
kvm_hv_hypercall_read_xmm(&hc);
trace_kvm_hv_hypercall(hc.code, hc.fast, hc.rep_cnt, hc.rep_idx, trace_kvm_hv_hypercall(hc.code, hc.fast, hc.rep_cnt, hc.rep_idx,
hc.ingpa, hc.outgpa); hc.ingpa, hc.outgpa);
if (unlikely(!hv_check_hypercall_access(to_hv_vcpu(vcpu), hc.code))) { if (unlikely(!hv_check_hypercall_access(hv_vcpu, hc.code))) {
ret = HV_STATUS_ACCESS_DENIED; ret = HV_STATUS_ACCESS_DENIED;
goto hypercall_complete; goto hypercall_complete;
} }
if (hc.fast && is_xmm_fast_hypercall(&hc)) {
if (unlikely(hv_vcpu->enforce_cpuid &&
!(hv_vcpu->cpuid_cache.features_edx &
HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE))) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
kvm_hv_hypercall_read_xmm(&hc);
}
switch (hc.code) { switch (hc.code) {
case HVCALL_NOTIFY_LONG_SPIN_WAIT: case HVCALL_NOTIFY_LONG_SPIN_WAIT:
if (unlikely(hc.rep)) { if (unlikely(hc.rep)) {

View file

@ -1644,7 +1644,7 @@ static int is_empty_shadow_page(u64 *spt)
* aggregate version in order to make the slab shrinker * aggregate version in order to make the slab shrinker
* faster * faster
*/ */
static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr) static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
{ {
kvm->arch.n_used_mmu_pages += nr; kvm->arch.n_used_mmu_pages += nr;
percpu_counter_add(&kvm_total_used_mmu_pages, nr); percpu_counter_add(&kvm_total_used_mmu_pages, nr);

View file

@ -64,6 +64,7 @@ static DEFINE_MUTEX(sev_bitmap_lock);
unsigned int max_sev_asid; unsigned int max_sev_asid;
static unsigned int min_sev_asid; static unsigned int min_sev_asid;
static unsigned long sev_me_mask; static unsigned long sev_me_mask;
static unsigned int nr_asids;
static unsigned long *sev_asid_bitmap; static unsigned long *sev_asid_bitmap;
static unsigned long *sev_reclaim_asid_bitmap; static unsigned long *sev_reclaim_asid_bitmap;
@ -78,11 +79,11 @@ struct enc_region {
/* Called with the sev_bitmap_lock held, or on shutdown */ /* Called with the sev_bitmap_lock held, or on shutdown */
static int sev_flush_asids(int min_asid, int max_asid) static int sev_flush_asids(int min_asid, int max_asid)
{ {
int ret, pos, error = 0; int ret, asid, error = 0;
/* Check if there are any ASIDs to reclaim before performing a flush */ /* Check if there are any ASIDs to reclaim before performing a flush */
pos = find_next_bit(sev_reclaim_asid_bitmap, max_asid, min_asid); asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
if (pos >= max_asid) if (asid > max_asid)
return -EBUSY; return -EBUSY;
/* /*
@ -115,15 +116,15 @@ static bool __sev_recycle_asids(int min_asid, int max_asid)
/* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */ /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */
bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap, bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
max_sev_asid); nr_asids);
bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid); bitmap_zero(sev_reclaim_asid_bitmap, nr_asids);
return true; return true;
} }
static int sev_asid_new(struct kvm_sev_info *sev) static int sev_asid_new(struct kvm_sev_info *sev)
{ {
int pos, min_asid, max_asid, ret; int asid, min_asid, max_asid, ret;
bool retry = true; bool retry = true;
enum misc_res_type type; enum misc_res_type type;
@ -143,11 +144,11 @@ static int sev_asid_new(struct kvm_sev_info *sev)
* SEV-enabled guests must use asid from min_sev_asid to max_sev_asid. * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
* SEV-ES-enabled guest can use from 1 to min_sev_asid - 1. * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
*/ */
min_asid = sev->es_active ? 0 : min_sev_asid - 1; min_asid = sev->es_active ? 1 : min_sev_asid;
max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid; max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
again: again:
pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_asid); asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
if (pos >= max_asid) { if (asid > max_asid) {
if (retry && __sev_recycle_asids(min_asid, max_asid)) { if (retry && __sev_recycle_asids(min_asid, max_asid)) {
retry = false; retry = false;
goto again; goto again;
@ -157,11 +158,11 @@ static int sev_asid_new(struct kvm_sev_info *sev)
goto e_uncharge; goto e_uncharge;
} }
__set_bit(pos, sev_asid_bitmap); __set_bit(asid, sev_asid_bitmap);
mutex_unlock(&sev_bitmap_lock); mutex_unlock(&sev_bitmap_lock);
return pos + 1; return asid;
e_uncharge: e_uncharge:
misc_cg_uncharge(type, sev->misc_cg, 1); misc_cg_uncharge(type, sev->misc_cg, 1);
put_misc_cg(sev->misc_cg); put_misc_cg(sev->misc_cg);
@ -179,17 +180,16 @@ static int sev_get_asid(struct kvm *kvm)
static void sev_asid_free(struct kvm_sev_info *sev) static void sev_asid_free(struct kvm_sev_info *sev)
{ {
struct svm_cpu_data *sd; struct svm_cpu_data *sd;
int cpu, pos; int cpu;
enum misc_res_type type; enum misc_res_type type;
mutex_lock(&sev_bitmap_lock); mutex_lock(&sev_bitmap_lock);
pos = sev->asid - 1; __set_bit(sev->asid, sev_reclaim_asid_bitmap);
__set_bit(pos, sev_reclaim_asid_bitmap);
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
sd = per_cpu(svm_data, cpu); sd = per_cpu(svm_data, cpu);
sd->sev_vmcbs[pos] = NULL; sd->sev_vmcbs[sev->asid] = NULL;
} }
mutex_unlock(&sev_bitmap_lock); mutex_unlock(&sev_bitmap_lock);
@ -1857,12 +1857,17 @@ void __init sev_hardware_setup(void)
min_sev_asid = edx; min_sev_asid = edx;
sev_me_mask = 1UL << (ebx & 0x3f); sev_me_mask = 1UL << (ebx & 0x3f);
/* Initialize SEV ASID bitmaps */ /*
sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL); * Initialize SEV ASID bitmaps. Allocate space for ASID 0 in the bitmap,
* even though it's never used, so that the bitmap is indexed by the
* actual ASID.
*/
nr_asids = max_sev_asid + 1;
sev_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
if (!sev_asid_bitmap) if (!sev_asid_bitmap)
goto out; goto out;
sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL); sev_reclaim_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
if (!sev_reclaim_asid_bitmap) { if (!sev_reclaim_asid_bitmap) {
bitmap_free(sev_asid_bitmap); bitmap_free(sev_asid_bitmap);
sev_asid_bitmap = NULL; sev_asid_bitmap = NULL;
@ -1907,7 +1912,7 @@ void sev_hardware_teardown(void)
return; return;
/* No need to take sev_bitmap_lock, all VMs have been destroyed. */ /* No need to take sev_bitmap_lock, all VMs have been destroyed. */
sev_flush_asids(0, max_sev_asid); sev_flush_asids(1, max_sev_asid);
bitmap_free(sev_asid_bitmap); bitmap_free(sev_asid_bitmap);
bitmap_free(sev_reclaim_asid_bitmap); bitmap_free(sev_reclaim_asid_bitmap);
@ -1921,7 +1926,7 @@ int sev_cpu_init(struct svm_cpu_data *sd)
if (!sev_enabled) if (!sev_enabled)
return 0; return 0;
sd->sev_vmcbs = kcalloc(max_sev_asid + 1, sizeof(void *), GFP_KERNEL); sd->sev_vmcbs = kcalloc(nr_asids, sizeof(void *), GFP_KERNEL);
if (!sd->sev_vmcbs) if (!sd->sev_vmcbs)
return -ENOMEM; return -ENOMEM;

View file

@ -92,6 +92,21 @@ TRACE_EVENT(kvm_hv_hypercall,
__entry->outgpa) __entry->outgpa)
); );
TRACE_EVENT(kvm_hv_hypercall_done,
TP_PROTO(u64 result),
TP_ARGS(result),
TP_STRUCT__entry(
__field(__u64, result)
),
TP_fast_assign(
__entry->result = result;
),
TP_printk("result 0x%llx", __entry->result)
);
/* /*
* Tracepoint for Xen hypercall. * Tracepoint for Xen hypercall.
*/ */

View file

@ -4358,8 +4358,17 @@ static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu) static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
{ {
return kvm_arch_interrupt_allowed(vcpu) && /*
kvm_cpu_accept_dm_intr(vcpu); * Do not cause an interrupt window exit if an exception
* is pending or an event needs reinjection; userspace
* might want to inject the interrupt manually using KVM_SET_REGS
* or KVM_SET_SREGS. For that to work, we must be at an
* instruction boundary and with no events half-injected.
*/
return (kvm_arch_interrupt_allowed(vcpu) &&
kvm_cpu_accept_dm_intr(vcpu) &&
!kvm_event_needs_reinjection(vcpu) &&
!vcpu->arch.exception.pending);
} }
static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,

View file

@ -682,7 +682,7 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
struct image_info *img_info); struct image_info *img_info);
void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl); void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan); struct mhi_chan *mhi_chan, unsigned int flags);
int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan); struct mhi_chan *mhi_chan);
void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,

View file

@ -1430,7 +1430,7 @@ static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
} }
int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan) struct mhi_chan *mhi_chan, unsigned int flags)
{ {
int ret = 0; int ret = 0;
struct device *dev = &mhi_chan->mhi_dev->dev; struct device *dev = &mhi_chan->mhi_dev->dev;
@ -1455,6 +1455,9 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
if (ret) if (ret)
goto error_pm_state; goto error_pm_state;
if (mhi_chan->dir == DMA_FROM_DEVICE)
mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS);
/* Pre-allocate buffer for xfer ring */ /* Pre-allocate buffer for xfer ring */
if (mhi_chan->pre_alloc) { if (mhi_chan->pre_alloc) {
int nr_el = get_nr_avail_ring_elements(mhi_cntrl, int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
@ -1610,7 +1613,7 @@ void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
} }
/* Move channel to start state */ /* Move channel to start state */
int mhi_prepare_for_transfer(struct mhi_device *mhi_dev) int mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags)
{ {
int ret, dir; int ret, dir;
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
@ -1621,7 +1624,7 @@ int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
if (!mhi_chan) if (!mhi_chan)
continue; continue;
ret = mhi_prepare_channel(mhi_cntrl, mhi_chan); ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);
if (ret) if (ret)
goto error_open_chan; goto error_open_chan;
} }

View file

@ -92,13 +92,20 @@ int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks,
} }
EXPORT_SYMBOL_GPL(devm_clk_bulk_get_optional); EXPORT_SYMBOL_GPL(devm_clk_bulk_get_optional);
static void devm_clk_bulk_release_all(struct device *dev, void *res)
{
struct clk_bulk_devres *devres = res;
clk_bulk_put_all(devres->num_clks, devres->clks);
}
int __must_check devm_clk_bulk_get_all(struct device *dev, int __must_check devm_clk_bulk_get_all(struct device *dev,
struct clk_bulk_data **clks) struct clk_bulk_data **clks)
{ {
struct clk_bulk_devres *devres; struct clk_bulk_devres *devres;
int ret; int ret;
devres = devres_alloc(devm_clk_bulk_release, devres = devres_alloc(devm_clk_bulk_release_all,
sizeof(*devres), GFP_KERNEL); sizeof(*devres), GFP_KERNEL);
if (!devres) if (!devres)
return -ENOMEM; return -ENOMEM;

View file

@ -526,7 +526,7 @@ struct stm32f4_pll {
struct stm32f4_pll_post_div_data { struct stm32f4_pll_post_div_data {
int idx; int idx;
u8 pll_num; int pll_idx;
const char *name; const char *name;
const char *parent; const char *parent;
u8 flag; u8 flag;
@ -557,13 +557,13 @@ static const struct clk_div_table post_divr_table[] = {
#define MAX_POST_DIV 3 #define MAX_POST_DIV 3
static const struct stm32f4_pll_post_div_data post_div_data[MAX_POST_DIV] = { static const struct stm32f4_pll_post_div_data post_div_data[MAX_POST_DIV] = {
{ CLK_I2SQ_PDIV, PLL_I2S, "plli2s-q-div", "plli2s-q", { CLK_I2SQ_PDIV, PLL_VCO_I2S, "plli2s-q-div", "plli2s-q",
CLK_SET_RATE_PARENT, STM32F4_RCC_DCKCFGR, 0, 5, 0, NULL}, CLK_SET_RATE_PARENT, STM32F4_RCC_DCKCFGR, 0, 5, 0, NULL},
{ CLK_SAIQ_PDIV, PLL_SAI, "pllsai-q-div", "pllsai-q", { CLK_SAIQ_PDIV, PLL_VCO_SAI, "pllsai-q-div", "pllsai-q",
CLK_SET_RATE_PARENT, STM32F4_RCC_DCKCFGR, 8, 5, 0, NULL }, CLK_SET_RATE_PARENT, STM32F4_RCC_DCKCFGR, 8, 5, 0, NULL },
{ NO_IDX, PLL_SAI, "pllsai-r-div", "pllsai-r", CLK_SET_RATE_PARENT, { NO_IDX, PLL_VCO_SAI, "pllsai-r-div", "pllsai-r", CLK_SET_RATE_PARENT,
STM32F4_RCC_DCKCFGR, 16, 2, 0, post_divr_table }, STM32F4_RCC_DCKCFGR, 16, 2, 0, post_divr_table },
}; };
@ -1774,7 +1774,7 @@ static void __init stm32f4_rcc_init(struct device_node *np)
post_div->width, post_div->width,
post_div->flag_div, post_div->flag_div,
post_div->div_table, post_div->div_table,
clks[post_div->pll_num], clks[post_div->pll_idx],
&stm32f4_clk_lock); &stm32f4_clk_lock);
if (post_div->idx != NO_IDX) if (post_div->idx != NO_IDX)

View file

@ -18,6 +18,7 @@ config COMMON_CLK_HI3519
config COMMON_CLK_HI3559A config COMMON_CLK_HI3559A
bool "Hi3559A Clock Driver" bool "Hi3559A Clock Driver"
depends on ARCH_HISI || COMPILE_TEST depends on ARCH_HISI || COMPILE_TEST
select RESET_HISI
default ARCH_HISI default ARCH_HISI
help help
Build the clock driver for hi3559a. Build the clock driver for hi3559a.

View file

@ -467,7 +467,7 @@ DEFINE_CLK_SMD_RPM(msm8936, sysmmnoc_clk, sysmmnoc_a_clk, QCOM_SMD_RPM_BUS_CLK,
static struct clk_smd_rpm *msm8936_clks[] = { static struct clk_smd_rpm *msm8936_clks[] = {
[RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk, [RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk,
[RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_clk, [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk,
[RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk, [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk,
[RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk, [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk,
[RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk, [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,

View file

@ -194,6 +194,15 @@ static void clk_sdmmc_mux_disable(struct clk_hw *hw)
gate_ops->disable(gate_hw); gate_ops->disable(gate_hw);
} }
static void clk_sdmmc_mux_disable_unused(struct clk_hw *hw)
{
struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
const struct clk_ops *gate_ops = sdmmc_mux->gate_ops;
struct clk_hw *gate_hw = &sdmmc_mux->gate.hw;
gate_ops->disable_unused(gate_hw);
}
static void clk_sdmmc_mux_restore_context(struct clk_hw *hw) static void clk_sdmmc_mux_restore_context(struct clk_hw *hw)
{ {
struct clk_hw *parent = clk_hw_get_parent(hw); struct clk_hw *parent = clk_hw_get_parent(hw);
@ -218,6 +227,7 @@ static const struct clk_ops tegra_clk_sdmmc_mux_ops = {
.is_enabled = clk_sdmmc_mux_is_enabled, .is_enabled = clk_sdmmc_mux_is_enabled,
.enable = clk_sdmmc_mux_enable, .enable = clk_sdmmc_mux_enable,
.disable = clk_sdmmc_mux_disable, .disable = clk_sdmmc_mux_disable,
.disable_unused = clk_sdmmc_mux_disable_unused,
.restore_context = clk_sdmmc_mux_restore_context, .restore_context = clk_sdmmc_mux_restore_context,
}; };

View file

@ -405,7 +405,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev, mpc8xxx_gc->irqn, ret = devm_request_irq(&pdev->dev, mpc8xxx_gc->irqn,
mpc8xxx_gpio_irq_cascade, mpc8xxx_gpio_irq_cascade,
IRQF_SHARED, "gpio-cascade", IRQF_NO_THREAD | IRQF_SHARED, "gpio-cascade",
mpc8xxx_gc); mpc8xxx_gc);
if (ret) { if (ret) {
dev_err(&pdev->dev, dev_err(&pdev->dev,

View file

@ -238,8 +238,8 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
struct resource *res; struct resource *res;
int ret, irq; int ret, irq;
irq = platform_get_irq(pdev, 0); irq = platform_get_irq_optional(pdev, 0);
if (irq < 0) if (irq < 0 && irq != -ENXIO)
return irq; return irq;
res = platform_get_resource(pdev, IORESOURCE_IO, 0); res = platform_get_resource(pdev, IORESOURCE_IO, 0);
@ -278,7 +278,7 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev); pm_runtime_enable(&pdev->dev);
if (irq) { if (irq > 0) {
struct irq_chip *irq_chip = &gpio->irq_chip; struct irq_chip *irq_chip = &gpio->irq_chip;
u8 irq_status; u8 irq_status;

View file

@ -1573,6 +1573,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
struct media_request *req) struct media_request *req)
{ {
struct vb2_buffer *vb; struct vb2_buffer *vb;
enum vb2_buffer_state orig_state;
int ret; int ret;
if (q->error) { if (q->error) {
@ -1673,6 +1674,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
* Add to the queued buffers list, a buffer will stay on it until * Add to the queued buffers list, a buffer will stay on it until
* dequeued in dqbuf. * dequeued in dqbuf.
*/ */
orig_state = vb->state;
list_add_tail(&vb->queued_entry, &q->queued_list); list_add_tail(&vb->queued_entry, &q->queued_list);
q->queued_count++; q->queued_count++;
q->waiting_for_buffers = false; q->waiting_for_buffers = false;
@ -1703,8 +1705,17 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
if (q->streaming && !q->start_streaming_called && if (q->streaming && !q->start_streaming_called &&
q->queued_count >= q->min_buffers_needed) { q->queued_count >= q->min_buffers_needed) {
ret = vb2_start_streaming(q); ret = vb2_start_streaming(q);
if (ret) if (ret) {
/*
* Since vb2_core_qbuf will return with an error,
* we should return it to state DEQUEUED since
* the error indicates that the buffer wasn't queued.
*/
list_del(&vb->queued_entry);
q->queued_count--;
vb->state = orig_state;
return ret; return ret;
}
} }
dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index); dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index);

View file

@ -8,6 +8,7 @@ config VIDEO_ATMEL_ISC
select VIDEOBUF2_DMA_CONTIG select VIDEOBUF2_DMA_CONTIG
select REGMAP_MMIO select REGMAP_MMIO
select V4L2_FWNODE select V4L2_FWNODE
select VIDEO_ATMEL_ISC_BASE
help help
This module makes the ATMEL Image Sensor Controller available This module makes the ATMEL Image Sensor Controller available
as a v4l2 device. as a v4l2 device.
@ -19,10 +20,17 @@ config VIDEO_ATMEL_XISC
select VIDEOBUF2_DMA_CONTIG select VIDEOBUF2_DMA_CONTIG
select REGMAP_MMIO select REGMAP_MMIO
select V4L2_FWNODE select V4L2_FWNODE
select VIDEO_ATMEL_ISC_BASE
help help
This module makes the ATMEL eXtended Image Sensor Controller This module makes the ATMEL eXtended Image Sensor Controller
available as a v4l2 device. available as a v4l2 device.
config VIDEO_ATMEL_ISC_BASE
tristate
default n
help
ATMEL ISC and XISC common code base.
config VIDEO_ATMEL_ISI config VIDEO_ATMEL_ISI
tristate "ATMEL Image Sensor Interface (ISI) support" tristate "ATMEL Image Sensor Interface (ISI) support"
depends on VIDEO_V4L2 && OF depends on VIDEO_V4L2 && OF

View file

@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
atmel-isc-objs = atmel-sama5d2-isc.o atmel-isc-base.o atmel-isc-objs = atmel-sama5d2-isc.o
atmel-xisc-objs = atmel-sama7g5-isc.o atmel-isc-base.o atmel-xisc-objs = atmel-sama7g5-isc.o
obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel-isi.o obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel-isi.o
obj-$(CONFIG_VIDEO_ATMEL_ISC_BASE) += atmel-isc-base.o
obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel-isc.o obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel-isc.o
obj-$(CONFIG_VIDEO_ATMEL_XISC) += atmel-xisc.o obj-$(CONFIG_VIDEO_ATMEL_XISC) += atmel-xisc.o

View file

@ -378,6 +378,7 @@ int isc_clk_init(struct isc_device *isc)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(isc_clk_init);
void isc_clk_cleanup(struct isc_device *isc) void isc_clk_cleanup(struct isc_device *isc)
{ {
@ -392,6 +393,7 @@ void isc_clk_cleanup(struct isc_device *isc)
clk_unregister(isc_clk->clk); clk_unregister(isc_clk->clk);
} }
} }
EXPORT_SYMBOL_GPL(isc_clk_cleanup);
static int isc_queue_setup(struct vb2_queue *vq, static int isc_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes, unsigned int *nbuffers, unsigned int *nplanes,
@ -1578,6 +1580,7 @@ irqreturn_t isc_interrupt(int irq, void *dev_id)
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(isc_interrupt);
static void isc_hist_count(struct isc_device *isc, u32 *min, u32 *max) static void isc_hist_count(struct isc_device *isc, u32 *min, u32 *max)
{ {
@ -2212,6 +2215,7 @@ const struct v4l2_async_notifier_operations isc_async_ops = {
.unbind = isc_async_unbind, .unbind = isc_async_unbind,
.complete = isc_async_complete, .complete = isc_async_complete,
}; };
EXPORT_SYMBOL_GPL(isc_async_ops);
void isc_subdev_cleanup(struct isc_device *isc) void isc_subdev_cleanup(struct isc_device *isc)
{ {
@ -2224,6 +2228,7 @@ void isc_subdev_cleanup(struct isc_device *isc)
INIT_LIST_HEAD(&isc->subdev_entities); INIT_LIST_HEAD(&isc->subdev_entities);
} }
EXPORT_SYMBOL_GPL(isc_subdev_cleanup);
int isc_pipeline_init(struct isc_device *isc) int isc_pipeline_init(struct isc_device *isc)
{ {
@ -2264,6 +2269,7 @@ int isc_pipeline_init(struct isc_device *isc)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(isc_pipeline_init);
/* regmap configuration */ /* regmap configuration */
#define ATMEL_ISC_REG_MAX 0xd5c #define ATMEL_ISC_REG_MAX 0xd5c
@ -2273,4 +2279,9 @@ const struct regmap_config isc_regmap_config = {
.val_bits = 32, .val_bits = 32,
.max_register = ATMEL_ISC_REG_MAX, .max_register = ATMEL_ISC_REG_MAX,
}; };
EXPORT_SYMBOL_GPL(isc_regmap_config);
MODULE_AUTHOR("Songjun Wu");
MODULE_AUTHOR("Eugen Hristev");
MODULE_DESCRIPTION("Atmel ISC common code base");
MODULE_LICENSE("GPL v2");

View file

@ -37,7 +37,16 @@ static int rtl28xxu_ctrl_msg(struct dvb_usb_device *d, struct rtl28xxu_req *req)
} else { } else {
/* read */ /* read */
requesttype = (USB_TYPE_VENDOR | USB_DIR_IN); requesttype = (USB_TYPE_VENDOR | USB_DIR_IN);
pipe = usb_rcvctrlpipe(d->udev, 0);
/*
* Zero-length transfers must use usb_sndctrlpipe() and
* rtl28xxu_identify_state() uses a zero-length i2c read
* command to determine the chip type.
*/
if (req->size)
pipe = usb_rcvctrlpipe(d->udev, 0);
else
pipe = usb_sndctrlpipe(d->udev, 0);
} }
ret = usb_control_msg(d->udev, pipe, 0, requesttype, req->value, ret = usb_control_msg(d->udev, pipe, 0, requesttype, req->value,
@ -612,9 +621,8 @@ static int rtl28xxu_read_config(struct dvb_usb_device *d)
static int rtl28xxu_identify_state(struct dvb_usb_device *d, const char **name) static int rtl28xxu_identify_state(struct dvb_usb_device *d, const char **name)
{ {
struct rtl28xxu_dev *dev = d_to_priv(d); struct rtl28xxu_dev *dev = d_to_priv(d);
u8 buf[1];
int ret; int ret;
struct rtl28xxu_req req_demod_i2c = {0x0020, CMD_I2C_DA_RD, 1, buf}; struct rtl28xxu_req req_demod_i2c = {0x0020, CMD_I2C_DA_RD, 0, NULL};
dev_dbg(&d->intf->dev, "\n"); dev_dbg(&d->intf->dev, "\n");

View file

@ -837,16 +837,24 @@ static int ar9331_mdio_write(void *ctx, u32 reg, u32 val)
return 0; return 0;
} }
ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg, val); /* In case of this switch we work with 32bit registers on top of 16bit
if (ret < 0) * bus. Some registers (for example access to forwarding database) have
goto error; * trigger bit on the first 16bit half of request, the result and
* configuration of request in the second half.
* To make it work properly, we should do the second part of transfer
* before the first one is done.
*/
ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg + 2, ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg + 2,
val >> 16); val >> 16);
if (ret < 0) if (ret < 0)
goto error; goto error;
ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg, val);
if (ret < 0)
goto error;
return 0; return 0;
error: error:
dev_err_ratelimited(&sbus->dev, "Bus error. Failed to write register.\n"); dev_err_ratelimited(&sbus->dev, "Bus error. Failed to write register.\n");
return ret; return ret;

View file

@ -304,6 +304,15 @@ sja1105pqrs_common_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
hostcmd = SJA1105_HOSTCMD_INVALIDATE; hostcmd = SJA1105_HOSTCMD_INVALIDATE;
} }
sja1105_packing(p, &hostcmd, 25, 23, size, op); sja1105_packing(p, &hostcmd, 25, 23, size, op);
}
static void
sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
int entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, entry_size);
/* Hack - The hardware takes the 'index' field within /* Hack - The hardware takes the 'index' field within
* struct sja1105_l2_lookup_entry as the index on which this command * struct sja1105_l2_lookup_entry as the index on which this command
@ -313,26 +322,18 @@ sja1105pqrs_common_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
* such that our API doesn't need to ask for a full-blown entry * such that our API doesn't need to ask for a full-blown entry
* structure when e.g. a delete is requested. * structure when e.g. a delete is requested.
*/ */
sja1105_packing(buf, &cmd->index, 15, 6, sja1105_packing(buf, &cmd->index, 15, 6, entry_size, op);
SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY, op);
}
static void
sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
int size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
return sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, size);
} }
static void static void
sja1110_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, sja1110_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op) enum packing_op op)
{ {
int size = SJA1110_SIZE_L2_LOOKUP_ENTRY; int entry_size = SJA1110_SIZE_L2_LOOKUP_ENTRY;
return sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, size); sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, entry_size);
sja1105_packing(buf, &cmd->index, 10, 1, entry_size, op);
} }
/* The switch is so retarded that it makes our command/entry abstraction /* The switch is so retarded that it makes our command/entry abstraction

View file

@ -1484,10 +1484,11 @@ static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
int sja1105et_fdb_add(struct dsa_switch *ds, int port, int sja1105et_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid) const unsigned char *addr, u16 vid)
{ {
struct sja1105_l2_lookup_entry l2_lookup = {0}; struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp;
struct sja1105_private *priv = ds->priv; struct sja1105_private *priv = ds->priv;
struct device *dev = ds->dev; struct device *dev = ds->dev;
int last_unused = -1; int last_unused = -1;
int start, end, i;
int bin, way, rc; int bin, way, rc;
bin = sja1105et_fdb_hash(priv, addr, vid); bin = sja1105et_fdb_hash(priv, addr, vid);
@ -1499,7 +1500,7 @@ int sja1105et_fdb_add(struct dsa_switch *ds, int port,
* mask? If yes, we need to do nothing. If not, we need * mask? If yes, we need to do nothing. If not, we need
* to rewrite the entry by adding this port to it. * to rewrite the entry by adding this port to it.
*/ */
if (l2_lookup.destports & BIT(port)) if ((l2_lookup.destports & BIT(port)) && l2_lookup.lockeds)
return 0; return 0;
l2_lookup.destports |= BIT(port); l2_lookup.destports |= BIT(port);
} else { } else {
@ -1530,6 +1531,7 @@ int sja1105et_fdb_add(struct dsa_switch *ds, int port,
index, NULL, false); index, NULL, false);
} }
} }
l2_lookup.lockeds = true;
l2_lookup.index = sja1105et_fdb_index(bin, way); l2_lookup.index = sja1105et_fdb_index(bin, way);
rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
@ -1538,6 +1540,29 @@ int sja1105et_fdb_add(struct dsa_switch *ds, int port,
if (rc < 0) if (rc < 0)
return rc; return rc;
/* Invalidate a dynamically learned entry if that exists */
start = sja1105et_fdb_index(bin, 0);
end = sja1105et_fdb_index(bin, way);
for (i = start; i < end; i++) {
rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
i, &tmp);
if (rc == -ENOENT)
continue;
if (rc)
return rc;
if (tmp.macaddr != ether_addr_to_u64(addr) || tmp.vlanid != vid)
continue;
rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
i, NULL, false);
if (rc)
return rc;
break;
}
return sja1105_static_fdb_change(priv, port, &l2_lookup, true); return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
} }
@ -1579,32 +1604,30 @@ int sja1105et_fdb_del(struct dsa_switch *ds, int port,
int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port, int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid) const unsigned char *addr, u16 vid)
{ {
struct sja1105_l2_lookup_entry l2_lookup = {0}; struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp;
struct sja1105_private *priv = ds->priv; struct sja1105_private *priv = ds->priv;
int rc, i; int rc, i;
/* Search for an existing entry in the FDB table */ /* Search for an existing entry in the FDB table */
l2_lookup.macaddr = ether_addr_to_u64(addr); l2_lookup.macaddr = ether_addr_to_u64(addr);
l2_lookup.vlanid = vid; l2_lookup.vlanid = vid;
l2_lookup.iotag = SJA1105_S_TAG;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
if (priv->vlan_aware) { l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.mask_iotag = BIT(0);
} else {
l2_lookup.mask_vlanid = 0;
l2_lookup.mask_iotag = 0;
}
l2_lookup.destports = BIT(port); l2_lookup.destports = BIT(port);
tmp = l2_lookup;
rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
SJA1105_SEARCH, &l2_lookup); SJA1105_SEARCH, &tmp);
if (rc == 0) { if (rc == 0 && tmp.index != SJA1105_MAX_L2_LOOKUP_COUNT - 1) {
/* Found and this port is already in the entry's /* Found a static entry and this port is already in the entry's
* port mask => job done * port mask => job done
*/ */
if (l2_lookup.destports & BIT(port)) if ((tmp.destports & BIT(port)) && tmp.lockeds)
return 0; return 0;
l2_lookup = tmp;
/* l2_lookup.index is populated by the switch in case it /* l2_lookup.index is populated by the switch in case it
* found something. * found something.
*/ */
@ -1626,16 +1649,46 @@ int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
dev_err(ds->dev, "FDB is full, cannot add entry.\n"); dev_err(ds->dev, "FDB is full, cannot add entry.\n");
return -EINVAL; return -EINVAL;
} }
l2_lookup.lockeds = true;
l2_lookup.index = i; l2_lookup.index = i;
skip_finding_an_index: skip_finding_an_index:
l2_lookup.lockeds = true;
rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
l2_lookup.index, &l2_lookup, l2_lookup.index, &l2_lookup,
true); true);
if (rc < 0) if (rc < 0)
return rc; return rc;
/* The switch learns dynamic entries and looks up the FDB left to
* right. It is possible that our addition was concurrent with the
* dynamic learning of the same address, so now that the static entry
* has been installed, we are certain that address learning for this
* particular address has been turned off, so the dynamic entry either
* is in the FDB at an index smaller than the static one, or isn't (it
* can also be at a larger index, but in that case it is inactive
* because the static FDB entry will match first, and the dynamic one
* will eventually age out). Search for a dynamically learned address
* prior to our static one and invalidate it.
*/
tmp = l2_lookup;
rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
SJA1105_SEARCH, &tmp);
if (rc < 0) {
dev_err(ds->dev,
"port %d failed to read back entry for %pM vid %d: %pe\n",
port, addr, vid, ERR_PTR(rc));
return rc;
}
if (tmp.index < l2_lookup.index) {
rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
tmp.index, NULL, false);
if (rc < 0)
return rc;
}
return sja1105_static_fdb_change(priv, port, &l2_lookup, true); return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
} }
@ -1649,15 +1702,8 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
l2_lookup.macaddr = ether_addr_to_u64(addr); l2_lookup.macaddr = ether_addr_to_u64(addr);
l2_lookup.vlanid = vid; l2_lookup.vlanid = vid;
l2_lookup.iotag = SJA1105_S_TAG;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
if (priv->vlan_aware) { l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.mask_iotag = BIT(0);
} else {
l2_lookup.mask_vlanid = 0;
l2_lookup.mask_iotag = 0;
}
l2_lookup.destports = BIT(port); l2_lookup.destports = BIT(port);
rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,

View file

@ -2669,7 +2669,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
} }
/* Allocated memory for FW statistics */ /* Allocated memory for FW statistics */
if (bnx2x_alloc_fw_stats_mem(bp)) rc = bnx2x_alloc_fw_stats_mem(bp);
if (rc)
LOAD_ERROR_EXIT(bp, load_error0); LOAD_ERROR_EXIT(bp, load_error0);
/* request pf to initialize status blocks */ /* request pf to initialize status blocks */

View file

@ -4017,13 +4017,13 @@ fec_drv_remove(struct platform_device *pdev)
if (of_phy_is_fixed_link(np)) if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np); of_phy_deregister_fixed_link(np);
of_node_put(fep->phy_node); of_node_put(fep->phy_node);
free_netdev(ndev);
clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg); clk_disable_unprepare(fep->clk_ipg);
pm_runtime_put_noidle(&pdev->dev); pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
free_netdev(ndev);
return 0; return 0;
} }

View file

@ -530,6 +530,8 @@ static int prestera_devlink_traps_register(struct prestera_switch *sw)
prestera_trap = &prestera_trap_items_arr[i]; prestera_trap = &prestera_trap_items_arr[i];
devlink_traps_unregister(devlink, &prestera_trap->trap, 1); devlink_traps_unregister(devlink, &prestera_trap->trap, 1);
} }
devlink_trap_groups_unregister(devlink, prestera_trap_groups_arr,
groups_count);
err_groups_register: err_groups_register:
kfree(trap_data->trap_items_arr); kfree(trap_data->trap_items_arr);
err_trap_items_alloc: err_trap_items_alloc:

View file

@ -13,19 +13,26 @@
*/ */
#define VSTAX 73 #define VSTAX 73
static void ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width) #define ifh_encode_bitfield(ifh, value, pos, _width) \
({ \
u32 width = (_width); \
\
/* Max width is 5 bytes - 40 bits. In worst case this will
* spread over 6 bytes - 48 bits
*/ \
compiletime_assert(width <= 40, \
"Unsupported width, must be <= 40"); \
__ifh_encode_bitfield((ifh), (value), (pos), width); \
})
static void __ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width)
{ {
u8 *ifh_hdr = ifh; u8 *ifh_hdr = ifh;
/* Calculate the Start IFH byte position of this IFH bit position */ /* Calculate the Start IFH byte position of this IFH bit position */
u32 byte = (35 - (pos / 8)); u32 byte = (35 - (pos / 8));
/* Calculate the Start bit position in the Start IFH byte */ /* Calculate the Start bit position in the Start IFH byte */
u32 bit = (pos % 8); u32 bit = (pos % 8);
u64 encode = GENMASK(bit + width - 1, bit) & (value << bit); u64 encode = GENMASK_ULL(bit + width - 1, bit) & (value << bit);
/* Max width is 5 bytes - 40 bits. In worst case this will
* spread over 6 bytes - 48 bits
*/
compiletime_assert(width <= 40, "Unsupported width, must be <= 40");
/* The b0-b7 goes into the start IFH byte */ /* The b0-b7 goes into the start IFH byte */
if (encode & 0xFF) if (encode & 0xFF)

View file

@ -819,7 +819,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
printk(version); printk(version);
#endif #endif
i = pci_enable_device(pdev); i = pcim_enable_device(pdev);
if (i) return i; if (i) return i;
/* natsemi has a non-standard PM control register /* natsemi has a non-standard PM control register
@ -852,7 +852,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
ioaddr = ioremap(iostart, iosize); ioaddr = ioremap(iostart, iosize);
if (!ioaddr) { if (!ioaddr) {
i = -ENOMEM; i = -ENOMEM;
goto err_ioremap; goto err_pci_request_regions;
} }
/* Work around the dropped serial bit. */ /* Work around the dropped serial bit. */
@ -974,9 +974,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
err_register_netdev: err_register_netdev:
iounmap(ioaddr); iounmap(ioaddr);
err_ioremap:
pci_release_regions(pdev);
err_pci_request_regions: err_pci_request_regions:
free_netdev(dev); free_netdev(dev);
return i; return i;
@ -3241,7 +3238,6 @@ static void natsemi_remove1(struct pci_dev *pdev)
NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround); NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround);
unregister_netdev (dev); unregister_netdev (dev);
pci_release_regions (pdev);
iounmap(ioaddr); iounmap(ioaddr);
free_netdev (dev); free_netdev (dev);
} }

View file

@ -3512,13 +3512,13 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev)
kfree(vdev->vpaths); kfree(vdev->vpaths);
/* we are safe to free it now */
free_netdev(dev);
vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered", vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
buf); buf);
vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf, vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
__func__, __LINE__); __func__, __LINE__);
/* we are safe to free it now */
free_netdev(dev);
} }
/* /*

View file

@ -286,6 +286,8 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
/* Init to unknowns */ /* Init to unknowns */
ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
cmd->base.port = PORT_OTHER; cmd->base.port = PORT_OTHER;
cmd->base.speed = SPEED_UNKNOWN; cmd->base.speed = SPEED_UNKNOWN;
cmd->base.duplex = DUPLEX_UNKNOWN; cmd->base.duplex = DUPLEX_UNKNOWN;

View file

@ -492,6 +492,7 @@ struct qede_fastpath {
#define QEDE_SP_HW_ERR 4 #define QEDE_SP_HW_ERR 4
#define QEDE_SP_ARFS_CONFIG 5 #define QEDE_SP_ARFS_CONFIG 5
#define QEDE_SP_AER 7 #define QEDE_SP_AER 7
#define QEDE_SP_DISABLE 8
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,

View file

@ -1005,6 +1005,13 @@ static void qede_sp_task(struct work_struct *work)
struct qede_dev *edev = container_of(work, struct qede_dev, struct qede_dev *edev = container_of(work, struct qede_dev,
sp_task.work); sp_task.work);
/* Disable execution of this deferred work once
* qede removal is in progress, this stop any future
* scheduling of sp_task.
*/
if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags))
return;
/* The locking scheme depends on the specific flag: /* The locking scheme depends on the specific flag:
* In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
* ensure that ongoing flows are ended and new ones are not started. * ensure that ongoing flows are ended and new ones are not started.
@ -1292,6 +1299,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY)); qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
if (mode != QEDE_REMOVE_RECOVERY) { if (mode != QEDE_REMOVE_RECOVERY) {
set_bit(QEDE_SP_DISABLE, &edev->sp_flags);
unregister_netdev(ndev); unregister_netdev(ndev);
cancel_delayed_work_sync(&edev->sp_task); cancel_delayed_work_sync(&edev->sp_task);

View file

@ -2061,8 +2061,12 @@ static void am65_cpsw_port_offload_fwd_mark_update(struct am65_cpsw_common *comm
for (i = 1; i <= common->port_num; i++) { for (i = 1; i <= common->port_num; i++) {
struct am65_cpsw_port *port = am65_common_get_port(common, i); struct am65_cpsw_port *port = am65_common_get_port(common, i);
struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(port->ndev); struct am65_cpsw_ndev_priv *priv;
if (!port->ndev)
continue;
priv = am65_ndev_to_priv(port->ndev);
priv->offload_fwd_mark = set_val; priv->offload_fwd_mark = set_val;
} }
} }

View file

@ -319,7 +319,7 @@ static int mhi_net_newlink(struct mhi_device *mhi_dev, struct net_device *ndev)
u64_stats_init(&mhi_netdev->stats.tx_syncp); u64_stats_init(&mhi_netdev->stats.tx_syncp);
/* Start MHI channels */ /* Start MHI channels */
err = mhi_prepare_for_transfer(mhi_dev); err = mhi_prepare_for_transfer(mhi_dev, 0);
if (err) if (err)
goto out_err; goto out_err;

View file

@ -401,11 +401,11 @@ static int ksz8041_config_aneg(struct phy_device *phydev)
} }
static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev, static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
const u32 ksz_phy_id) const bool ksz_8051)
{ {
int ret; int ret;
if ((phydev->phy_id & MICREL_PHY_ID_MASK) != ksz_phy_id) if ((phydev->phy_id & MICREL_PHY_ID_MASK) != PHY_ID_KSZ8051)
return 0; return 0;
ret = phy_read(phydev, MII_BMSR); ret = phy_read(phydev, MII_BMSR);
@ -418,7 +418,7 @@ static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
* the switch does not. * the switch does not.
*/ */
ret &= BMSR_ERCAP; ret &= BMSR_ERCAP;
if (ksz_phy_id == PHY_ID_KSZ8051) if (ksz_8051)
return ret; return ret;
else else
return !ret; return !ret;
@ -426,7 +426,7 @@ static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
static int ksz8051_match_phy_device(struct phy_device *phydev) static int ksz8051_match_phy_device(struct phy_device *phydev)
{ {
return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ8051); return ksz8051_ksz8795_match_phy_device(phydev, true);
} }
static int ksz8081_config_init(struct phy_device *phydev) static int ksz8081_config_init(struct phy_device *phydev)
@ -535,7 +535,7 @@ static int ksz8061_config_init(struct phy_device *phydev)
static int ksz8795_match_phy_device(struct phy_device *phydev) static int ksz8795_match_phy_device(struct phy_device *phydev)
{ {
return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ87XX); return ksz8051_ksz8795_match_phy_device(phydev, false);
} }
static int ksz9021_load_values_from_of(struct phy_device *phydev, static int ksz9021_load_values_from_of(struct phy_device *phydev,

View file

@ -1154,7 +1154,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
{ {
struct phy_device *phydev = dev->net->phydev; struct phy_device *phydev = dev->net->phydev;
struct ethtool_link_ksettings ecmd; struct ethtool_link_ksettings ecmd;
int ladv, radv, ret; int ladv, radv, ret, link;
u32 buf; u32 buf;
/* clear LAN78xx interrupt status */ /* clear LAN78xx interrupt status */
@ -1162,9 +1162,12 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
if (unlikely(ret < 0)) if (unlikely(ret < 0))
return -EIO; return -EIO;
mutex_lock(&phydev->lock);
phy_read_status(phydev); phy_read_status(phydev);
link = phydev->link;
mutex_unlock(&phydev->lock);
if (!phydev->link && dev->link_on) { if (!link && dev->link_on) {
dev->link_on = false; dev->link_on = false;
/* reset MAC */ /* reset MAC */
@ -1177,7 +1180,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
return -EIO; return -EIO;
del_timer(&dev->stat_monitor); del_timer(&dev->stat_monitor);
} else if (phydev->link && !dev->link_on) { } else if (link && !dev->link_on) {
dev->link_on = true; dev->link_on = true;
phy_ethtool_ksettings_get(phydev, &ecmd); phy_ethtool_ksettings_get(phydev, &ecmd);
@ -1466,9 +1469,14 @@ static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
static u32 lan78xx_get_link(struct net_device *net) static u32 lan78xx_get_link(struct net_device *net)
{ {
phy_read_status(net->phydev); u32 link;
return net->phydev->link; mutex_lock(&net->phydev->lock);
phy_read_status(net->phydev);
link = net->phydev->link;
mutex_unlock(&net->phydev->lock);
return link;
} }
static void lan78xx_get_drvinfo(struct net_device *net, static void lan78xx_get_drvinfo(struct net_device *net,

View file

@ -1,31 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* Copyright (c) 1999-2013 Petko Manolov (petkan@nucleusys.com) * Copyright (c) 1999-2021 Petko Manolov (petkan@nucleusys.com)
* *
* ChangeLog:
* .... Most of the time spent on reading sources & docs.
* v0.2.x First official release for the Linux kernel.
* v0.3.0 Beutified and structured, some bugs fixed.
* v0.3.x URBifying bulk requests and bugfixing. First relatively
* stable release. Still can touch device's registers only
* from top-halves.
* v0.4.0 Control messages remained unurbified are now URBs.
* Now we can touch the HW at any time.
* v0.4.9 Control urbs again use process context to wait. Argh...
* Some long standing bugs (enable_net_traffic) fixed.
* Also nasty trick about resubmiting control urb from
* interrupt context used. Please let me know how it
* behaves. Pegasus II support added since this version.
* TODO: suppressing HCD warnings spewage on disconnect.
* v0.4.13 Ethernet address is now set at probe(), not at open()
* time as this seems to break dhcpd.
* v0.5.0 branch to 2.5.x kernels
* v0.5.1 ethtool support added
* v0.5.5 rx socket buffers are in a pool and the their allocation
* is out of the interrupt routine.
* ...
* v0.9.3 simplified [get|set]_register(s), async update registers
* logic revisited, receive skb_pool removed.
*/ */
#include <linux/sched.h> #include <linux/sched.h>
@ -45,7 +21,6 @@
/* /*
* Version Information * Version Information
*/ */
#define DRIVER_VERSION "v0.9.3 (2013/04/25)"
#define DRIVER_AUTHOR "Petko Manolov <petkan@nucleusys.com>" #define DRIVER_AUTHOR "Petko Manolov <petkan@nucleusys.com>"
#define DRIVER_DESC "Pegasus/Pegasus II USB Ethernet driver" #define DRIVER_DESC "Pegasus/Pegasus II USB Ethernet driver"
@ -132,9 +107,15 @@ static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
const void *data) const void *data)
{ {
return usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REGS, int ret;
ret = usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REGS,
PEGASUS_REQT_WRITE, 0, indx, data, size, PEGASUS_REQT_WRITE, 0, indx, data, size,
1000, GFP_NOIO); 1000, GFP_NOIO);
if (ret < 0)
netif_dbg(pegasus, drv, pegasus->net, "%s failed with %d\n", __func__, ret);
return ret;
} }
/* /*
@ -145,10 +126,15 @@ static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data) static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
{ {
void *buf = &data; void *buf = &data;
int ret;
return usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REG, ret = usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REG,
PEGASUS_REQT_WRITE, data, indx, buf, 1, PEGASUS_REQT_WRITE, data, indx, buf, 1,
1000, GFP_NOIO); 1000, GFP_NOIO);
if (ret < 0)
netif_dbg(pegasus, drv, pegasus->net, "%s failed with %d\n", __func__, ret);
return ret;
} }
static int update_eth_regs_async(pegasus_t *pegasus) static int update_eth_regs_async(pegasus_t *pegasus)
@ -188,10 +174,9 @@ static int update_eth_regs_async(pegasus_t *pegasus)
static int __mii_op(pegasus_t *p, __u8 phy, __u8 indx, __u16 *regd, __u8 cmd) static int __mii_op(pegasus_t *p, __u8 phy, __u8 indx, __u16 *regd, __u8 cmd)
{ {
int i; int i, ret;
__u8 data[4] = { phy, 0, 0, indx };
__le16 regdi; __le16 regdi;
int ret = -ETIMEDOUT; __u8 data[4] = { phy, 0, 0, indx };
if (cmd & PHY_WRITE) { if (cmd & PHY_WRITE) {
__le16 *t = (__le16 *) & data[1]; __le16 *t = (__le16 *) & data[1];
@ -207,12 +192,15 @@ static int __mii_op(pegasus_t *p, __u8 phy, __u8 indx, __u16 *regd, __u8 cmd)
if (data[0] & PHY_DONE) if (data[0] & PHY_DONE)
break; break;
} }
if (i >= REG_TIMEOUT) if (i >= REG_TIMEOUT) {
ret = -ETIMEDOUT;
goto fail; goto fail;
}
if (cmd & PHY_READ) { if (cmd & PHY_READ) {
ret = get_registers(p, PhyData, 2, &regdi); ret = get_registers(p, PhyData, 2, &regdi);
if (ret < 0)
goto fail;
*regd = le16_to_cpu(regdi); *regd = le16_to_cpu(regdi);
return ret;
} }
return 0; return 0;
fail: fail:
@ -235,9 +223,13 @@ static int write_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 *regd)
static int mdio_read(struct net_device *dev, int phy_id, int loc) static int mdio_read(struct net_device *dev, int phy_id, int loc)
{ {
pegasus_t *pegasus = netdev_priv(dev); pegasus_t *pegasus = netdev_priv(dev);
int ret;
u16 res; u16 res;
read_mii_word(pegasus, phy_id, loc, &res); ret = read_mii_word(pegasus, phy_id, loc, &res);
if (ret < 0)
return ret;
return (int)res; return (int)res;
} }
@ -251,10 +243,9 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata) static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
{ {
int i; int ret, i;
__u8 tmp = 0;
__le16 retdatai; __le16 retdatai;
int ret; __u8 tmp = 0;
set_register(pegasus, EpromCtrl, 0); set_register(pegasus, EpromCtrl, 0);
set_register(pegasus, EpromOffset, index); set_register(pegasus, EpromOffset, index);
@ -262,21 +253,25 @@ static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
for (i = 0; i < REG_TIMEOUT; i++) { for (i = 0; i < REG_TIMEOUT; i++) {
ret = get_registers(pegasus, EpromCtrl, 1, &tmp); ret = get_registers(pegasus, EpromCtrl, 1, &tmp);
if (ret < 0)
goto fail;
if (tmp & EPROM_DONE) if (tmp & EPROM_DONE)
break; break;
if (ret == -ESHUTDOWN)
goto fail;
} }
if (i >= REG_TIMEOUT) if (i >= REG_TIMEOUT) {
ret = -ETIMEDOUT;
goto fail; goto fail;
}
ret = get_registers(pegasus, EpromData, 2, &retdatai); ret = get_registers(pegasus, EpromData, 2, &retdatai);
if (ret < 0)
goto fail;
*retdata = le16_to_cpu(retdatai); *retdata = le16_to_cpu(retdatai);
return ret; return ret;
fail: fail:
netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__); netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return -ETIMEDOUT; return ret;
} }
#ifdef PEGASUS_WRITE_EEPROM #ifdef PEGASUS_WRITE_EEPROM
@ -324,10 +319,10 @@ static int write_eprom_word(pegasus_t *pegasus, __u8 index, __u16 data)
return ret; return ret;
fail: fail:
netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__); netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return -ETIMEDOUT; return -ETIMEDOUT;
} }
#endif /* PEGASUS_WRITE_EEPROM */ #endif /* PEGASUS_WRITE_EEPROM */
static inline int get_node_id(pegasus_t *pegasus, u8 *id) static inline int get_node_id(pegasus_t *pegasus, u8 *id)
{ {
@ -367,19 +362,21 @@ static void set_ethernet_addr(pegasus_t *pegasus)
return; return;
err: err:
eth_hw_addr_random(pegasus->net); eth_hw_addr_random(pegasus->net);
dev_info(&pegasus->intf->dev, "software assigned MAC address.\n"); netif_dbg(pegasus, drv, pegasus->net, "software assigned MAC address.\n");
return; return;
} }
static inline int reset_mac(pegasus_t *pegasus) static inline int reset_mac(pegasus_t *pegasus)
{ {
int ret, i;
__u8 data = 0x8; __u8 data = 0x8;
int i;
set_register(pegasus, EthCtrl1, data); set_register(pegasus, EthCtrl1, data);
for (i = 0; i < REG_TIMEOUT; i++) { for (i = 0; i < REG_TIMEOUT; i++) {
get_registers(pegasus, EthCtrl1, 1, &data); ret = get_registers(pegasus, EthCtrl1, 1, &data);
if (ret < 0)
goto fail;
if (~data & 0x08) { if (~data & 0x08) {
if (loopback) if (loopback)
break; break;
@ -402,22 +399,29 @@ static inline int reset_mac(pegasus_t *pegasus)
} }
if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_ELCON) { if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_ELCON) {
__u16 auxmode; __u16 auxmode;
read_mii_word(pegasus, 3, 0x1b, &auxmode); ret = read_mii_word(pegasus, 3, 0x1b, &auxmode);
if (ret < 0)
goto fail;
auxmode |= 4; auxmode |= 4;
write_mii_word(pegasus, 3, 0x1b, &auxmode); write_mii_word(pegasus, 3, 0x1b, &auxmode);
} }
return 0; return 0;
fail:
netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return ret;
} }
static int enable_net_traffic(struct net_device *dev, struct usb_device *usb) static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
{ {
__u16 linkpart;
__u8 data[4];
pegasus_t *pegasus = netdev_priv(dev); pegasus_t *pegasus = netdev_priv(dev);
int ret; int ret;
__u16 linkpart;
__u8 data[4];
read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart); ret = read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart);
if (ret < 0)
goto fail;
data[0] = 0xc8; /* TX & RX enable, append status, no CRC */ data[0] = 0xc8; /* TX & RX enable, append status, no CRC */
data[1] = 0; data[1] = 0;
if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL)) if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL))
@ -435,11 +439,16 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS2 || usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS2 ||
usb_dev_id[pegasus->dev_index].vendor == VENDOR_DLINK) { usb_dev_id[pegasus->dev_index].vendor == VENDOR_DLINK) {
u16 auxmode; u16 auxmode;
read_mii_word(pegasus, 0, 0x1b, &auxmode); ret = read_mii_word(pegasus, 0, 0x1b, &auxmode);
if (ret < 0)
goto fail;
auxmode |= 4; auxmode |= 4;
write_mii_word(pegasus, 0, 0x1b, &auxmode); write_mii_word(pegasus, 0, 0x1b, &auxmode);
} }
return 0;
fail:
netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return ret; return ret;
} }
@ -447,9 +456,9 @@ static void read_bulk_callback(struct urb *urb)
{ {
pegasus_t *pegasus = urb->context; pegasus_t *pegasus = urb->context;
struct net_device *net; struct net_device *net;
u8 *buf = urb->transfer_buffer;
int rx_status, count = urb->actual_length; int rx_status, count = urb->actual_length;
int status = urb->status; int status = urb->status;
u8 *buf = urb->transfer_buffer;
__u16 pkt_len; __u16 pkt_len;
if (!pegasus) if (!pegasus)
@ -735,12 +744,16 @@ static inline void disable_net_traffic(pegasus_t *pegasus)
set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp); set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp);
} }
static inline void get_interrupt_interval(pegasus_t *pegasus) static inline int get_interrupt_interval(pegasus_t *pegasus)
{ {
u16 data; u16 data;
u8 interval; u8 interval;
int ret;
ret = read_eprom_word(pegasus, 4, &data);
if (ret < 0)
return ret;
read_eprom_word(pegasus, 4, &data);
interval = data >> 8; interval = data >> 8;
if (pegasus->usb->speed != USB_SPEED_HIGH) { if (pegasus->usb->speed != USB_SPEED_HIGH) {
if (interval < 0x80) { if (interval < 0x80) {
@ -755,6 +768,8 @@ static inline void get_interrupt_interval(pegasus_t *pegasus)
} }
} }
pegasus->intr_interval = interval; pegasus->intr_interval = interval;
return 0;
} }
static void set_carrier(struct net_device *net) static void set_carrier(struct net_device *net)
@ -880,7 +895,6 @@ static void pegasus_get_drvinfo(struct net_device *dev,
pegasus_t *pegasus = netdev_priv(dev); pegasus_t *pegasus = netdev_priv(dev);
strlcpy(info->driver, driver_name, sizeof(info->driver)); strlcpy(info->driver, driver_name, sizeof(info->driver));
strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
usb_make_path(pegasus->usb, info->bus_info, sizeof(info->bus_info)); usb_make_path(pegasus->usb, info->bus_info, sizeof(info->bus_info));
} }
@ -999,8 +1013,7 @@ static int pegasus_siocdevprivate(struct net_device *net, struct ifreq *rq,
data[0] = pegasus->phy; data[0] = pegasus->phy;
fallthrough; fallthrough;
case SIOCDEVPRIVATE + 1: case SIOCDEVPRIVATE + 1:
read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]); res = read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]);
res = 0;
break; break;
case SIOCDEVPRIVATE + 2: case SIOCDEVPRIVATE + 2:
if (!capable(CAP_NET_ADMIN)) if (!capable(CAP_NET_ADMIN))
@ -1034,22 +1047,25 @@ static void pegasus_set_multicast(struct net_device *net)
static __u8 mii_phy_probe(pegasus_t *pegasus) static __u8 mii_phy_probe(pegasus_t *pegasus)
{ {
int i; int i, ret;
__u16 tmp; __u16 tmp;
for (i = 0; i < 32; i++) { for (i = 0; i < 32; i++) {
read_mii_word(pegasus, i, MII_BMSR, &tmp); ret = read_mii_word(pegasus, i, MII_BMSR, &tmp);
if (ret < 0)
goto fail;
if (tmp == 0 || tmp == 0xffff || (tmp & BMSR_MEDIA) == 0) if (tmp == 0 || tmp == 0xffff || (tmp & BMSR_MEDIA) == 0)
continue; continue;
else else
return i; return i;
} }
fail:
return 0xff; return 0xff;
} }
static inline void setup_pegasus_II(pegasus_t *pegasus) static inline void setup_pegasus_II(pegasus_t *pegasus)
{ {
int ret;
__u8 data = 0xa5; __u8 data = 0xa5;
set_register(pegasus, Reg1d, 0); set_register(pegasus, Reg1d, 0);
@ -1061,7 +1077,9 @@ static inline void setup_pegasus_II(pegasus_t *pegasus)
set_register(pegasus, Reg7b, 2); set_register(pegasus, Reg7b, 2);
set_register(pegasus, 0x83, data); set_register(pegasus, 0x83, data);
get_registers(pegasus, 0x83, 1, &data); ret = get_registers(pegasus, 0x83, 1, &data);
if (ret < 0)
goto fail;
if (data == 0xa5) if (data == 0xa5)
pegasus->chip = 0x8513; pegasus->chip = 0x8513;
@ -1076,6 +1094,10 @@ static inline void setup_pegasus_II(pegasus_t *pegasus)
set_register(pegasus, Reg81, 6); set_register(pegasus, Reg81, 6);
else else
set_register(pegasus, Reg81, 2); set_register(pegasus, Reg81, 2);
return;
fail:
netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
} }
static void check_carrier(struct work_struct *work) static void check_carrier(struct work_struct *work)
@ -1150,7 +1172,9 @@ static int pegasus_probe(struct usb_interface *intf,
| NETIF_MSG_PROBE | NETIF_MSG_LINK); | NETIF_MSG_PROBE | NETIF_MSG_LINK);
pegasus->features = usb_dev_id[dev_index].private; pegasus->features = usb_dev_id[dev_index].private;
get_interrupt_interval(pegasus); res = get_interrupt_interval(pegasus);
if (res)
goto out2;
if (reset_mac(pegasus)) { if (reset_mac(pegasus)) {
dev_err(&intf->dev, "can't reset MAC\n"); dev_err(&intf->dev, "can't reset MAC\n");
res = -EIO; res = -EIO;
@ -1297,7 +1321,7 @@ static void __init parse_id(char *id)
static int __init pegasus_init(void) static int __init pegasus_init(void)
{ {
pr_info("%s: %s, " DRIVER_DESC "\n", driver_name, DRIVER_VERSION); pr_info("%s: " DRIVER_DESC "\n", driver_name);
if (devid) if (devid)
parse_id(devid); parse_id(devid);
return usb_register(&pegasus_driver); return usb_register(&pegasus_driver);

View file

@ -10,10 +10,10 @@
#define IOSM_CP_VERSION 0x0100UL #define IOSM_CP_VERSION 0x0100UL
/* DL dir Aggregation support mask */ /* DL dir Aggregation support mask */
#define DL_AGGR BIT(23) #define DL_AGGR BIT(9)
/* UL dir Aggregation support mask */ /* UL dir Aggregation support mask */
#define UL_AGGR BIT(22) #define UL_AGGR BIT(8)
/* UL flow credit support mask */ /* UL flow credit support mask */
#define UL_FLOW_CREDIT BIT(21) #define UL_FLOW_CREDIT BIT(21)

View file

@ -320,7 +320,7 @@ static void ipc_mux_dl_fcth_decode(struct iosm_mux *ipc_mux,
return; return;
} }
ul_credits = fct->vfl.nr_of_bytes; ul_credits = le32_to_cpu(fct->vfl.nr_of_bytes);
dev_dbg(ipc_mux->dev, "Flow_Credit:: if_id[%d] Old: %d Grants: %d", dev_dbg(ipc_mux->dev, "Flow_Credit:: if_id[%d] Old: %d Grants: %d",
if_id, ipc_mux->session[if_id].ul_flow_credits, ul_credits); if_id, ipc_mux->session[if_id].ul_flow_credits, ul_credits);
@ -586,7 +586,7 @@ static bool ipc_mux_lite_send_qlt(struct iosm_mux *ipc_mux)
qlt->reserved[0] = 0; qlt->reserved[0] = 0;
qlt->reserved[1] = 0; qlt->reserved[1] = 0;
qlt->vfl.nr_of_bytes = session->ul_list.qlen; qlt->vfl.nr_of_bytes = cpu_to_le32(session->ul_list.qlen);
/* Add QLT to the transfer list. */ /* Add QLT to the transfer list. */
skb_queue_tail(&ipc_mux->channel->ul_list, skb_queue_tail(&ipc_mux->channel->ul_list,

View file

@ -106,7 +106,7 @@ struct mux_lite_cmdh {
* @nr_of_bytes: Number of bytes available to transmit in the queue. * @nr_of_bytes: Number of bytes available to transmit in the queue.
*/ */
struct mux_lite_vfl { struct mux_lite_vfl {
u32 nr_of_bytes; __le32 nr_of_bytes;
}; };
/** /**

View file

@ -412,8 +412,8 @@ struct sk_buff *ipc_protocol_dl_td_process(struct iosm_protocol *ipc_protocol,
} }
if (p_td->buffer.address != IPC_CB(skb)->mapping) { if (p_td->buffer.address != IPC_CB(skb)->mapping) {
dev_err(ipc_protocol->dev, "invalid buf=%p or skb=%p", dev_err(ipc_protocol->dev, "invalid buf=%llx or skb=%p",
(void *)p_td->buffer.address, skb->data); (unsigned long long)p_td->buffer.address, skb->data);
ipc_pcie_kfree_skb(ipc_protocol->pcie, skb); ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
skb = NULL; skb = NULL;
goto ret; goto ret;

View file

@ -228,7 +228,7 @@ static void ipc_wwan_dellink(void *ctxt, struct net_device *dev,
RCU_INIT_POINTER(ipc_wwan->sub_netlist[if_id], NULL); RCU_INIT_POINTER(ipc_wwan->sub_netlist[if_id], NULL);
/* unregistering includes synchronize_net() */ /* unregistering includes synchronize_net() */
unregister_netdevice(dev); unregister_netdevice_queue(dev, head);
unlock: unlock:
mutex_unlock(&ipc_wwan->if_mutex); mutex_unlock(&ipc_wwan->if_mutex);

View file

@ -110,7 +110,7 @@ static int mhi_wwan_ctrl_start(struct wwan_port *port)
int ret; int ret;
/* Start mhi device's channel(s) */ /* Start mhi device's channel(s) */
ret = mhi_prepare_for_transfer(mhiwwan->mhi_dev); ret = mhi_prepare_for_transfer(mhiwwan->mhi_dev, 0);
if (ret) if (ret)
return ret; return ret;

View file

@ -608,7 +608,7 @@ static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
INIT_DELAYED_WORK(&mbim->rx_refill, mhi_net_rx_refill_work); INIT_DELAYED_WORK(&mbim->rx_refill, mhi_net_rx_refill_work);
/* Start MHI channels */ /* Start MHI channels */
err = mhi_prepare_for_transfer(mhi_dev); err = mhi_prepare_for_transfer(mhi_dev, 0);
if (err) if (err)
return err; return err;

View file

@ -112,6 +112,7 @@ static int i82092aa_pci_probe(struct pci_dev *dev,
for (i = 0; i < socket_count; i++) { for (i = 0; i < socket_count; i++) {
sockets[i].card_state = 1; /* 1 = present but empty */ sockets[i].card_state = 1; /* 1 = present but empty */
sockets[i].io_base = pci_resource_start(dev, 0); sockets[i].io_base = pci_resource_start(dev, 0);
sockets[i].dev = dev;
sockets[i].socket.features |= SS_CAP_PCCARD; sockets[i].socket.features |= SS_CAP_PCCARD;
sockets[i].socket.map_size = 0x1000; sockets[i].socket.map_size = 0x1000;
sockets[i].socket.irq_mask = 0; sockets[i].socket.irq_mask = 0;

View file

@ -453,8 +453,8 @@ static int initialize_controller(struct scsi_device *sdev,
if (!h->ctlr) if (!h->ctlr)
err = SCSI_DH_RES_TEMP_UNAVAIL; err = SCSI_DH_RES_TEMP_UNAVAIL;
else { else {
list_add_rcu(&h->node, &h->ctlr->dh_list);
h->sdev = sdev; h->sdev = sdev;
list_add_rcu(&h->node, &h->ctlr->dh_list);
} }
spin_unlock(&list_lock); spin_unlock(&list_lock);
err = SCSI_DH_OK; err = SCSI_DH_OK;
@ -778,11 +778,11 @@ static void rdac_bus_detach( struct scsi_device *sdev )
spin_lock(&list_lock); spin_lock(&list_lock);
if (h->ctlr) { if (h->ctlr) {
list_del_rcu(&h->node); list_del_rcu(&h->node);
h->sdev = NULL;
kref_put(&h->ctlr->kref, release_controller); kref_put(&h->ctlr->kref, release_controller);
} }
spin_unlock(&list_lock); spin_unlock(&list_lock);
sdev->handler_data = NULL; sdev->handler_data = NULL;
synchronize_rcu();
kfree(h); kfree(h);
} }

View file

@ -807,6 +807,13 @@ static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
struct ibmvfc_event *evt = &pool->events[i]; struct ibmvfc_event *evt = &pool->events[i];
/*
* evt->active states
* 1 = in flight
* 0 = being completed
* -1 = free/freed
*/
atomic_set(&evt->active, -1);
atomic_set(&evt->free, 1); atomic_set(&evt->free, 1);
evt->crq.valid = 0x80; evt->crq.valid = 0x80;
evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i)); evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
@ -1017,6 +1024,7 @@ static void ibmvfc_free_event(struct ibmvfc_event *evt)
BUG_ON(!ibmvfc_valid_event(pool, evt)); BUG_ON(!ibmvfc_valid_event(pool, evt));
BUG_ON(atomic_inc_return(&evt->free) != 1); BUG_ON(atomic_inc_return(&evt->free) != 1);
BUG_ON(atomic_dec_and_test(&evt->active));
spin_lock_irqsave(&evt->queue->l_lock, flags); spin_lock_irqsave(&evt->queue->l_lock, flags);
list_add_tail(&evt->queue_list, &evt->queue->free); list_add_tail(&evt->queue_list, &evt->queue->free);
@ -1072,6 +1080,12 @@ static void ibmvfc_complete_purge(struct list_head *purge_list)
**/ **/
static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code) static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
{ {
/*
* Anything we are failing should still be active. Otherwise, it
* implies we already got a response for the command and are doing
* something bad like double completing it.
*/
BUG_ON(!atomic_dec_and_test(&evt->active));
if (evt->cmnd) { if (evt->cmnd) {
evt->cmnd->result = (error_code << 16); evt->cmnd->result = (error_code << 16);
evt->done = ibmvfc_scsi_eh_done; evt->done = ibmvfc_scsi_eh_done;
@ -1723,6 +1737,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
evt->done(evt); evt->done(evt);
} else { } else {
atomic_set(&evt->active, 1);
spin_unlock_irqrestore(&evt->queue->l_lock, flags); spin_unlock_irqrestore(&evt->queue->l_lock, flags);
ibmvfc_trc_start(evt); ibmvfc_trc_start(evt);
} }
@ -3251,7 +3266,7 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
return; return;
} }
if (unlikely(atomic_read(&evt->free))) { if (unlikely(atomic_dec_if_positive(&evt->active))) {
dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n", dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
crq->ioba); crq->ioba);
return; return;
@ -3778,7 +3793,7 @@ static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost
return; return;
} }
if (unlikely(atomic_read(&evt->free))) { if (unlikely(atomic_dec_if_positive(&evt->active))) {
dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n", dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
crq->ioba); crq->ioba);
return; return;

View file

@ -745,6 +745,7 @@ struct ibmvfc_event {
struct ibmvfc_target *tgt; struct ibmvfc_target *tgt;
struct scsi_cmnd *cmnd; struct scsi_cmnd *cmnd;
atomic_t free; atomic_t free;
atomic_t active;
union ibmvfc_iu *xfer_iu; union ibmvfc_iu *xfer_iu;
void (*done)(struct ibmvfc_event *evt); void (*done)(struct ibmvfc_event *evt);
void (*_done)(struct ibmvfc_event *evt); void (*_done)(struct ibmvfc_event *evt);

View file

@ -238,7 +238,7 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
mimd_t mimd; mimd_t mimd;
uint32_t adapno; uint32_t adapno;
int iterator; int iterator;
bool is_found;
if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) { if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
*rval = -EFAULT; *rval = -EFAULT;
@ -254,12 +254,16 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
adapter = NULL; adapter = NULL;
iterator = 0; iterator = 0;
is_found = false;
list_for_each_entry(adapter, &adapters_list_g, list) { list_for_each_entry(adapter, &adapters_list_g, list) {
if (iterator++ == adapno) break; if (iterator++ == adapno) {
is_found = true;
break;
}
} }
if (!adapter) { if (!is_found) {
*rval = -ENODEV; *rval = -ENODEV;
return NULL; return NULL;
} }
@ -725,6 +729,7 @@ ioctl_done(uioc_t *kioc)
uint32_t adapno; uint32_t adapno;
int iterator; int iterator;
mraid_mmadp_t* adapter; mraid_mmadp_t* adapter;
bool is_found;
/* /*
* When the kioc returns from driver, make sure it still doesn't * When the kioc returns from driver, make sure it still doesn't
@ -747,19 +752,23 @@ ioctl_done(uioc_t *kioc)
iterator = 0; iterator = 0;
adapter = NULL; adapter = NULL;
adapno = kioc->adapno; adapno = kioc->adapno;
is_found = false;
con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed " con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
"ioctl that was timedout before\n")); "ioctl that was timedout before\n"));
list_for_each_entry(adapter, &adapters_list_g, list) { list_for_each_entry(adapter, &adapters_list_g, list) {
if (iterator++ == adapno) break; if (iterator++ == adapno) {
is_found = true;
break;
}
} }
kioc->timedout = 0; kioc->timedout = 0;
if (adapter) { if (is_found)
mraid_mm_dealloc_kioc( adapter, kioc ); mraid_mm_dealloc_kioc( adapter, kioc );
}
} }
else { else {
wake_up(&wait_q); wake_up(&wait_q);

View file

@ -684,8 +684,7 @@ int pm8001_dev_found(struct domain_device *dev)
void pm8001_task_done(struct sas_task *task) void pm8001_task_done(struct sas_task *task)
{ {
if (!del_timer(&task->slow_task->timer)) del_timer(&task->slow_task->timer);
return;
complete(&task->slow_task->completion); complete(&task->slow_task->completion);
} }
@ -693,9 +692,14 @@ static void pm8001_tmf_timedout(struct timer_list *t)
{ {
struct sas_task_slow *slow = from_timer(slow, t, timer); struct sas_task_slow *slow = from_timer(slow, t, timer);
struct sas_task *task = slow->task; struct sas_task *task = slow->task;
unsigned long flags;
task->task_state_flags |= SAS_TASK_STATE_ABORTED; spin_lock_irqsave(&task->task_state_lock, flags);
complete(&task->slow_task->completion); if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
complete(&task->slow_task->completion);
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
} }
#define PM8001_TASK_TIMEOUT 20 #define PM8001_TASK_TIMEOUT 20
@ -748,13 +752,10 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
} }
res = -TMF_RESP_FUNC_FAILED; res = -TMF_RESP_FUNC_FAILED;
/* Even TMF timed out, return direct. */ /* Even TMF timed out, return direct. */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n",
pm8001_dbg(pm8001_ha, FAIL, tmf->tmf);
"TMF task[%x]timeout.\n", goto ex_err;
tmf->tmf);
goto ex_err;
}
} }
if (task->task_status.resp == SAS_TASK_COMPLETE && if (task->task_status.resp == SAS_TASK_COMPLETE &&
@ -834,12 +835,9 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
wait_for_completion(&task->slow_task->completion); wait_for_completion(&task->slow_task->completion);
res = TMF_RESP_FUNC_FAILED; res = TMF_RESP_FUNC_FAILED;
/* Even TMF timed out, return direct. */ /* Even TMF timed out, return direct. */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { pm8001_dbg(pm8001_ha, FAIL, "TMF task timeout.\n");
pm8001_dbg(pm8001_ha, FAIL, goto ex_err;
"TMF task timeout.\n");
goto ex_err;
}
} }
if (task->task_status.resp == SAS_TASK_COMPLETE && if (task->task_status.resp == SAS_TASK_COMPLETE &&

View file

@ -475,7 +475,8 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
error = shost->hostt->target_alloc(starget); error = shost->hostt->target_alloc(starget);
if(error) { if(error) {
dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error); if (error != -ENXIO)
dev_err(dev, "target allocation failed, error %d\n", error);
/* don't want scsi_target_reap to do the final /* don't want scsi_target_reap to do the final
* put because it will be under the host lock */ * put because it will be under the host lock */
scsi_target_destroy(starget); scsi_target_destroy(starget);

View file

@ -807,11 +807,14 @@ store_state_field(struct device *dev, struct device_attribute *attr,
mutex_lock(&sdev->state_mutex); mutex_lock(&sdev->state_mutex);
ret = scsi_device_set_state(sdev, state); ret = scsi_device_set_state(sdev, state);
/* /*
* If the device state changes to SDEV_RUNNING, we need to run * If the device state changes to SDEV_RUNNING, we need to
* the queue to avoid I/O hang. * rescan the device to revalidate it, and run the queue to
* avoid I/O hang.
*/ */
if (ret == 0 && state == SDEV_RUNNING) if (ret == 0 && state == SDEV_RUNNING) {
scsi_rescan_device(dev);
blk_mq_run_hw_queues(sdev->request_queue, true); blk_mq_run_hw_queues(sdev->request_queue, true);
}
mutex_unlock(&sdev->state_mutex); mutex_unlock(&sdev->state_mutex);
return ret == 0 ? count : -EINVAL; return ret == 0 ? count : -EINVAL;

View file

@ -221,7 +221,7 @@ static unsigned int sr_get_events(struct scsi_device *sdev)
else if (med->media_event_code == 2) else if (med->media_event_code == 2)
return DISK_EVENT_MEDIA_CHANGE; return DISK_EVENT_MEDIA_CHANGE;
else if (med->media_event_code == 3) else if (med->media_event_code == 3)
return DISK_EVENT_EJECT_REQUEST; return DISK_EVENT_MEDIA_CHANGE;
return 0; return 0;
} }

View file

@ -4619,7 +4619,7 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
static int cifs_readpage(struct file *file, struct page *page) static int cifs_readpage(struct file *file, struct page *page)
{ {
loff_t offset = (loff_t)page->index << PAGE_SHIFT; loff_t offset = page_file_offset(page);
int rc = -EACCES; int rc = -EACCES;
unsigned int xid; unsigned int xid;

View file

@ -925,6 +925,13 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
ctx->cred_uid = uid; ctx->cred_uid = uid;
ctx->cruid_specified = true; ctx->cruid_specified = true;
break; break;
case Opt_backupuid:
uid = make_kuid(current_user_ns(), result.uint_32);
if (!uid_valid(uid))
goto cifs_parse_mount_err;
ctx->backupuid = uid;
ctx->backupuid_specified = true;
break;
case Opt_backupgid: case Opt_backupgid:
gid = make_kgid(current_user_ns(), result.uint_32); gid = make_kgid(current_user_ns(), result.uint_32);
if (!gid_valid(gid)) if (!gid_valid(gid))

View file

@ -3617,7 +3617,8 @@ static int smb3_simple_fallocate_write_range(unsigned int xid,
char *buf) char *buf)
{ {
struct cifs_io_parms io_parms = {0}; struct cifs_io_parms io_parms = {0};
int rc, nbytes; int nbytes;
int rc = 0;
struct kvec iov[2]; struct kvec iov[2];
io_parms.netfid = cfile->fid.netfid; io_parms.netfid = cfile->fid.netfid;

View file

@ -31,6 +31,21 @@
#include "internal.h" #include "internal.h"
/*
* New pipe buffers will be restricted to this size while the user is exceeding
* their pipe buffer quota. The general pipe use case needs at least two
* buffers: one for data yet to be read, and one for new data. If this is less
* than two, then a write to a non-empty pipe may block even if the pipe is not
* full. This can occur with GNU make jobserver or similar uses of pipes as
* semaphores: multiple processes may be waiting to write tokens back to the
* pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/.
*
* Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their
* own risk, namely: pipe writes to non-full pipes may block until the pipe is
* emptied.
*/
#define PIPE_MIN_DEF_BUFFERS 2
/* /*
* The max size that a non-root user is allowed to grow the pipe. Can * The max size that a non-root user is allowed to grow the pipe. Can
* be set by root in /proc/sys/fs/pipe-max-size * be set by root in /proc/sys/fs/pipe-max-size
@ -781,8 +796,8 @@ struct pipe_inode_info *alloc_pipe_info(void)
user_bufs = account_pipe_buffers(user, 0, pipe_bufs); user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) { if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) {
user_bufs = account_pipe_buffers(user, pipe_bufs, 1); user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS);
pipe_bufs = 1; pipe_bufs = PIPE_MIN_DEF_BUFFERS;
} }
if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user()) if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user())

View file

@ -411,7 +411,16 @@ struct xfs_log_dinode {
/* start of the extended dinode, writable fields */ /* start of the extended dinode, writable fields */
uint32_t di_crc; /* CRC of the inode */ uint32_t di_crc; /* CRC of the inode */
uint64_t di_changecount; /* number of attribute changes */ uint64_t di_changecount; /* number of attribute changes */
xfs_lsn_t di_lsn; /* flush sequence */
/*
* The LSN we write to this field during formatting is not a reflection
* of the current on-disk LSN. It should never be used for recovery
* sequencing, nor should it be recovered into the on-disk inode at all.
* See xlog_recover_inode_commit_pass2() and xfs_log_dinode_to_disk()
* for details.
*/
xfs_lsn_t di_lsn;
uint64_t di_flags2; /* more random flags */ uint64_t di_flags2; /* more random flags */
uint32_t di_cowextsize; /* basic cow extent size for file */ uint32_t di_cowextsize; /* basic cow extent size for file */
uint8_t di_pad2[12]; /* more padding for future expansion */ uint8_t di_pad2[12]; /* more padding for future expansion */

View file

@ -698,7 +698,8 @@ xlog_recover_do_inode_buffer(
static xfs_lsn_t static xfs_lsn_t
xlog_recover_get_buf_lsn( xlog_recover_get_buf_lsn(
struct xfs_mount *mp, struct xfs_mount *mp,
struct xfs_buf *bp) struct xfs_buf *bp,
struct xfs_buf_log_format *buf_f)
{ {
uint32_t magic32; uint32_t magic32;
uint16_t magic16; uint16_t magic16;
@ -706,11 +707,20 @@ xlog_recover_get_buf_lsn(
void *blk = bp->b_addr; void *blk = bp->b_addr;
uuid_t *uuid; uuid_t *uuid;
xfs_lsn_t lsn = -1; xfs_lsn_t lsn = -1;
uint16_t blft;
/* v4 filesystems always recover immediately */ /* v4 filesystems always recover immediately */
if (!xfs_sb_version_hascrc(&mp->m_sb)) if (!xfs_sb_version_hascrc(&mp->m_sb))
goto recover_immediately; goto recover_immediately;
/*
* realtime bitmap and summary file blocks do not have magic numbers or
* UUIDs, so we must recover them immediately.
*/
blft = xfs_blft_from_flags(buf_f);
if (blft == XFS_BLFT_RTBITMAP_BUF || blft == XFS_BLFT_RTSUMMARY_BUF)
goto recover_immediately;
magic32 = be32_to_cpu(*(__be32 *)blk); magic32 = be32_to_cpu(*(__be32 *)blk);
switch (magic32) { switch (magic32) {
case XFS_ABTB_CRC_MAGIC: case XFS_ABTB_CRC_MAGIC:
@ -796,6 +806,7 @@ xlog_recover_get_buf_lsn(
switch (magicda) { switch (magicda) {
case XFS_DIR3_LEAF1_MAGIC: case XFS_DIR3_LEAF1_MAGIC:
case XFS_DIR3_LEAFN_MAGIC: case XFS_DIR3_LEAFN_MAGIC:
case XFS_ATTR3_LEAF_MAGIC:
case XFS_DA3_NODE_MAGIC: case XFS_DA3_NODE_MAGIC:
lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn); lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
uuid = &((struct xfs_da3_blkinfo *)blk)->uuid; uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
@ -919,7 +930,7 @@ xlog_recover_buf_commit_pass2(
* the verifier will be reset to match whatever recover turns that * the verifier will be reset to match whatever recover turns that
* buffer into. * buffer into.
*/ */
lsn = xlog_recover_get_buf_lsn(mp, bp); lsn = xlog_recover_get_buf_lsn(mp, bp, buf_f);
if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
trace_xfs_log_recover_buf_skip(log, buf_f); trace_xfs_log_recover_buf_skip(log, buf_f);
xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN); xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);

View file

@ -145,7 +145,8 @@ xfs_log_dinode_to_disk_ts(
STATIC void STATIC void
xfs_log_dinode_to_disk( xfs_log_dinode_to_disk(
struct xfs_log_dinode *from, struct xfs_log_dinode *from,
struct xfs_dinode *to) struct xfs_dinode *to,
xfs_lsn_t lsn)
{ {
to->di_magic = cpu_to_be16(from->di_magic); to->di_magic = cpu_to_be16(from->di_magic);
to->di_mode = cpu_to_be16(from->di_mode); to->di_mode = cpu_to_be16(from->di_mode);
@ -182,7 +183,7 @@ xfs_log_dinode_to_disk(
to->di_flags2 = cpu_to_be64(from->di_flags2); to->di_flags2 = cpu_to_be64(from->di_flags2);
to->di_cowextsize = cpu_to_be32(from->di_cowextsize); to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
to->di_ino = cpu_to_be64(from->di_ino); to->di_ino = cpu_to_be64(from->di_ino);
to->di_lsn = cpu_to_be64(from->di_lsn); to->di_lsn = cpu_to_be64(lsn);
memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2)); memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
uuid_copy(&to->di_uuid, &from->di_uuid); uuid_copy(&to->di_uuid, &from->di_uuid);
to->di_flushiter = 0; to->di_flushiter = 0;
@ -261,16 +262,25 @@ xlog_recover_inode_commit_pass2(
} }
/* /*
* If the inode has an LSN in it, recover the inode only if it's less * If the inode has an LSN in it, recover the inode only if the on-disk
* than the lsn of the transaction we are replaying. Note: we still * inode's LSN is older than the lsn of the transaction we are
* need to replay an owner change even though the inode is more recent * replaying. We can have multiple checkpoints with the same start LSN,
* than the transaction as there is no guarantee that all the btree * so the current LSN being equal to the on-disk LSN doesn't necessarily
* blocks are more recent than this transaction, too. * mean that the on-disk inode is more recent than the change being
* replayed.
*
* We must check the current_lsn against the on-disk inode
* here because the we can't trust the log dinode to contain a valid LSN
* (see comment below before replaying the log dinode for details).
*
* Note: we still need to replay an owner change even though the inode
* is more recent than the transaction as there is no guarantee that all
* the btree blocks are more recent than this transaction, too.
*/ */
if (dip->di_version >= 3) { if (dip->di_version >= 3) {
xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn); xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn);
if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) > 0) {
trace_xfs_log_recover_inode_skip(log, in_f); trace_xfs_log_recover_inode_skip(log, in_f);
error = 0; error = 0;
goto out_owner_change; goto out_owner_change;
@ -368,8 +378,17 @@ xlog_recover_inode_commit_pass2(
goto out_release; goto out_release;
} }
/* recover the log dinode inode into the on disk inode */ /*
xfs_log_dinode_to_disk(ldip, dip); * Recover the log dinode inode into the on disk inode.
*
* The LSN in the log dinode is garbage - it can be zero or reflect
* stale in-memory runtime state that isn't coherent with the changes
* logged in this transaction or the changes written to the on-disk
* inode. Hence we write the current lSN into the inode because that
* matches what xfs_iflush() would write inode the inode when flushing
* the changes in this transaction.
*/
xfs_log_dinode_to_disk(ldip, dip, current_lsn);
fields = in_f->ilf_fields; fields = in_f->ilf_fields;
if (fields & XFS_ILOG_DEV) if (fields & XFS_ILOG_DEV)

View file

@ -78,13 +78,12 @@ xlog_verify_iclog(
STATIC void STATIC void
xlog_verify_tail_lsn( xlog_verify_tail_lsn(
struct xlog *log, struct xlog *log,
struct xlog_in_core *iclog, struct xlog_in_core *iclog);
xfs_lsn_t tail_lsn);
#else #else
#define xlog_verify_dest_ptr(a,b) #define xlog_verify_dest_ptr(a,b)
#define xlog_verify_grant_tail(a) #define xlog_verify_grant_tail(a)
#define xlog_verify_iclog(a,b,c) #define xlog_verify_iclog(a,b,c)
#define xlog_verify_tail_lsn(a,b,c) #define xlog_verify_tail_lsn(a,b)
#endif #endif
STATIC int STATIC int
@ -487,51 +486,80 @@ xfs_log_reserve(
return error; return error;
} }
static bool
__xlog_state_release_iclog(
struct xlog *log,
struct xlog_in_core *iclog)
{
lockdep_assert_held(&log->l_icloglock);
if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
/* update tail before writing to iclog */
xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp);
iclog->ic_state = XLOG_STATE_SYNCING;
iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
xlog_verify_tail_lsn(log, iclog, tail_lsn);
/* cycle incremented when incrementing curr_block */
trace_xlog_iclog_syncing(iclog, _RET_IP_);
return true;
}
ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
return false;
}
/* /*
* Flush iclog to disk if this is the last reference to the given iclog and the * Flush iclog to disk if this is the last reference to the given iclog and the
* it is in the WANT_SYNC state. * it is in the WANT_SYNC state.
*
* If the caller passes in a non-zero @old_tail_lsn and the current log tail
* does not match, there may be metadata on disk that must be persisted before
* this iclog is written. To satisfy that requirement, set the
* XLOG_ICL_NEED_FLUSH flag as a condition for writing this iclog with the new
* log tail value.
*
* If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the
* log tail is updated correctly. NEED_FUA indicates that the iclog will be
* written to stable storage, and implies that a commit record is contained
* within the iclog. We need to ensure that the log tail does not move beyond
* the tail that the first commit record in the iclog ordered against, otherwise
* correct recovery of that checkpoint becomes dependent on future operations
* performed on this iclog.
*
* Hence if NEED_FUA is set and the current iclog tail lsn is empty, write the
* current tail into iclog. Once the iclog tail is set, future operations must
* not modify it, otherwise they potentially violate ordering constraints for
* the checkpoint commit that wrote the initial tail lsn value. The tail lsn in
* the iclog will get zeroed on activation of the iclog after sync, so we
* always capture the tail lsn on the iclog on the first NEED_FUA release
* regardless of the number of active reference counts on this iclog.
*/ */
int int
xlog_state_release_iclog( xlog_state_release_iclog(
struct xlog *log, struct xlog *log,
struct xlog_in_core *iclog) struct xlog_in_core *iclog,
xfs_lsn_t old_tail_lsn)
{ {
xfs_lsn_t tail_lsn;
lockdep_assert_held(&log->l_icloglock); lockdep_assert_held(&log->l_icloglock);
trace_xlog_iclog_release(iclog, _RET_IP_); trace_xlog_iclog_release(iclog, _RET_IP_);
if (iclog->ic_state == XLOG_STATE_IOERROR) if (iclog->ic_state == XLOG_STATE_IOERROR)
return -EIO; return -EIO;
if (atomic_dec_and_test(&iclog->ic_refcnt) && /*
__xlog_state_release_iclog(log, iclog)) { * Grabbing the current log tail needs to be atomic w.r.t. the writing
spin_unlock(&log->l_icloglock); * of the tail LSN into the iclog so we guarantee that the log tail does
xlog_sync(log, iclog); * not move between deciding if a cache flush is required and writing
spin_lock(&log->l_icloglock); * the LSN into the iclog below.
*/
if (old_tail_lsn || iclog->ic_state == XLOG_STATE_WANT_SYNC) {
tail_lsn = xlog_assign_tail_lsn(log->l_mp);
if (old_tail_lsn && tail_lsn != old_tail_lsn)
iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
if ((iclog->ic_flags & XLOG_ICL_NEED_FUA) &&
!iclog->ic_header.h_tail_lsn)
iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
} }
if (!atomic_dec_and_test(&iclog->ic_refcnt))
return 0;
if (iclog->ic_state != XLOG_STATE_WANT_SYNC) {
ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
return 0;
}
iclog->ic_state = XLOG_STATE_SYNCING;
if (!iclog->ic_header.h_tail_lsn)
iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
xlog_verify_tail_lsn(log, iclog);
trace_xlog_iclog_syncing(iclog, _RET_IP_);
spin_unlock(&log->l_icloglock);
xlog_sync(log, iclog);
spin_lock(&log->l_icloglock);
return 0; return 0;
} }
@ -773,6 +801,21 @@ xfs_log_mount_cancel(
xfs_log_unmount(mp); xfs_log_unmount(mp);
} }
/*
* Flush out the iclog to disk ensuring that device caches are flushed and
* the iclog hits stable storage before any completion waiters are woken.
*/
static inline int
xlog_force_iclog(
struct xlog_in_core *iclog)
{
atomic_inc(&iclog->ic_refcnt);
iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
if (iclog->ic_state == XLOG_STATE_ACTIVE)
xlog_state_switch_iclogs(iclog->ic_log, iclog, 0);
return xlog_state_release_iclog(iclog->ic_log, iclog, 0);
}
/* /*
* Wait for the iclog and all prior iclogs to be written disk as required by the * Wait for the iclog and all prior iclogs to be written disk as required by the
* log force state machine. Waiting on ic_force_wait ensures iclog completions * log force state machine. Waiting on ic_force_wait ensures iclog completions
@ -827,13 +870,6 @@ xlog_write_unmount_record(
/* account for space used by record data */ /* account for space used by record data */
ticket->t_curr_res -= sizeof(ulf); ticket->t_curr_res -= sizeof(ulf);
/*
* For external log devices, we need to flush the data device cache
* first to ensure all metadata writeback is on stable storage before we
* stamp the tail LSN into the unmount record.
*/
if (log->l_targ != log->l_mp->m_ddev_targp)
blkdev_issue_flush(log->l_targ->bt_bdev);
return xlog_write(log, &vec, ticket, NULL, NULL, XLOG_UNMOUNT_TRANS); return xlog_write(log, &vec, ticket, NULL, NULL, XLOG_UNMOUNT_TRANS);
} }
@ -865,18 +901,7 @@ xlog_unmount_write(
spin_lock(&log->l_icloglock); spin_lock(&log->l_icloglock);
iclog = log->l_iclog; iclog = log->l_iclog;
atomic_inc(&iclog->ic_refcnt); error = xlog_force_iclog(iclog);
if (iclog->ic_state == XLOG_STATE_ACTIVE)
xlog_state_switch_iclogs(log, iclog, 0);
else
ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
iclog->ic_state == XLOG_STATE_IOERROR);
/*
* Ensure the journal is fully flushed and on stable storage once the
* iclog containing the unmount record is written.
*/
iclog->ic_flags |= (XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
error = xlog_state_release_iclog(log, iclog);
xlog_wait_on_iclog(iclog); xlog_wait_on_iclog(iclog);
if (tic) { if (tic) {
@ -1796,10 +1821,20 @@ xlog_write_iclog(
* metadata writeback and causing priority inversions. * metadata writeback and causing priority inversions.
*/ */
iclog->ic_bio.bi_opf = REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE; iclog->ic_bio.bi_opf = REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE;
if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) {
iclog->ic_bio.bi_opf |= REQ_PREFLUSH; iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
/*
* For external log devices, we also need to flush the data
* device cache first to ensure all metadata writeback covered
* by the LSN in this iclog is on stable storage. This is slow,
* but it *must* complete before we issue the external log IO.
*/
if (log->l_targ != log->l_mp->m_ddev_targp)
blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev);
}
if (iclog->ic_flags & XLOG_ICL_NEED_FUA) if (iclog->ic_flags & XLOG_ICL_NEED_FUA)
iclog->ic_bio.bi_opf |= REQ_FUA; iclog->ic_bio.bi_opf |= REQ_FUA;
iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA); iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) { if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) {
@ -2310,7 +2345,7 @@ xlog_write_copy_finish(
return 0; return 0;
release_iclog: release_iclog:
error = xlog_state_release_iclog(log, iclog); error = xlog_state_release_iclog(log, iclog, 0);
spin_unlock(&log->l_icloglock); spin_unlock(&log->l_icloglock);
return error; return error;
} }
@ -2529,7 +2564,7 @@ xlog_write(
ASSERT(optype & XLOG_COMMIT_TRANS); ASSERT(optype & XLOG_COMMIT_TRANS);
*commit_iclog = iclog; *commit_iclog = iclog;
} else { } else {
error = xlog_state_release_iclog(log, iclog); error = xlog_state_release_iclog(log, iclog, 0);
} }
spin_unlock(&log->l_icloglock); spin_unlock(&log->l_icloglock);
@ -2567,6 +2602,7 @@ xlog_state_activate_iclog(
memset(iclog->ic_header.h_cycle_data, 0, memset(iclog->ic_header.h_cycle_data, 0,
sizeof(iclog->ic_header.h_cycle_data)); sizeof(iclog->ic_header.h_cycle_data));
iclog->ic_header.h_lsn = 0; iclog->ic_header.h_lsn = 0;
iclog->ic_header.h_tail_lsn = 0;
} }
/* /*
@ -2967,7 +3003,7 @@ xlog_state_get_iclog_space(
* reference to the iclog. * reference to the iclog.
*/ */
if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
error = xlog_state_release_iclog(log, iclog); error = xlog_state_release_iclog(log, iclog, 0);
spin_unlock(&log->l_icloglock); spin_unlock(&log->l_icloglock);
if (error) if (error)
return error; return error;
@ -3131,6 +3167,35 @@ xlog_state_switch_iclogs(
log->l_iclog = iclog->ic_next; log->l_iclog = iclog->ic_next;
} }
/*
* Force the iclog to disk and check if the iclog has been completed before
* xlog_force_iclog() returns. This can happen on synchronous (e.g.
* pmem) or fast async storage because we drop the icloglock to issue the IO.
* If completion has already occurred, tell the caller so that it can avoid an
* unnecessary wait on the iclog.
*/
static int
xlog_force_and_check_iclog(
struct xlog_in_core *iclog,
bool *completed)
{
xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn);
int error;
*completed = false;
error = xlog_force_iclog(iclog);
if (error)
return error;
/*
* If the iclog has already been completed and reused the header LSN
* will have been rewritten by completion
*/
if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
*completed = true;
return 0;
}
/* /*
* Write out all data in the in-core log as of this exact moment in time. * Write out all data in the in-core log as of this exact moment in time.
* *
@ -3165,7 +3230,6 @@ xfs_log_force(
{ {
struct xlog *log = mp->m_log; struct xlog *log = mp->m_log;
struct xlog_in_core *iclog; struct xlog_in_core *iclog;
xfs_lsn_t lsn;
XFS_STATS_INC(mp, xs_log_force); XFS_STATS_INC(mp, xs_log_force);
trace_xfs_log_force(mp, 0, _RET_IP_); trace_xfs_log_force(mp, 0, _RET_IP_);
@ -3193,39 +3257,33 @@ xfs_log_force(
iclog = iclog->ic_prev; iclog = iclog->ic_prev;
} else if (iclog->ic_state == XLOG_STATE_ACTIVE) { } else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
if (atomic_read(&iclog->ic_refcnt) == 0) { if (atomic_read(&iclog->ic_refcnt) == 0) {
/* /* We have exclusive access to this iclog. */
* We are the only one with access to this iclog. bool completed;
*
* Flush it out now. There should be a roundoff of zero if (xlog_force_and_check_iclog(iclog, &completed))
* to show that someone has already taken care of the
* roundoff from the previous sync.
*/
atomic_inc(&iclog->ic_refcnt);
lsn = be64_to_cpu(iclog->ic_header.h_lsn);
xlog_state_switch_iclogs(log, iclog, 0);
if (xlog_state_release_iclog(log, iclog))
goto out_error; goto out_error;
if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) if (completed)
goto out_unlock; goto out_unlock;
} else { } else {
/* /*
* Someone else is writing to this iclog. * Someone else is still writing to this iclog, so we
* * need to ensure that when they release the iclog it
* Use its call to flush out the data. However, the * gets synced immediately as we may be waiting on it.
* other thread may not force out this LR, so we mark
* it WANT_SYNC.
*/ */
xlog_state_switch_iclogs(log, iclog, 0); xlog_state_switch_iclogs(log, iclog, 0);
} }
} else {
/*
* If the head iclog is not active nor dirty, we just attach
* ourselves to the head and go to sleep if necessary.
*/
;
} }
/*
* The iclog we are about to wait on may contain the checkpoint pushed
* by the above xlog_cil_force() call, but it may not have been pushed
* to disk yet. Like the ACTIVE case above, we need to make sure caches
* are flushed when this iclog is written.
*/
if (iclog->ic_state == XLOG_STATE_WANT_SYNC)
iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
if (flags & XFS_LOG_SYNC) if (flags & XFS_LOG_SYNC)
return xlog_wait_on_iclog(iclog); return xlog_wait_on_iclog(iclog);
out_unlock: out_unlock:
@ -3245,6 +3303,7 @@ xlog_force_lsn(
bool already_slept) bool already_slept)
{ {
struct xlog_in_core *iclog; struct xlog_in_core *iclog;
bool completed;
spin_lock(&log->l_icloglock); spin_lock(&log->l_icloglock);
iclog = log->l_iclog; iclog = log->l_iclog;
@ -3258,7 +3317,8 @@ xlog_force_lsn(
goto out_unlock; goto out_unlock;
} }
if (iclog->ic_state == XLOG_STATE_ACTIVE) { switch (iclog->ic_state) {
case XLOG_STATE_ACTIVE:
/* /*
* We sleep here if we haven't already slept (e.g. this is the * We sleep here if we haven't already slept (e.g. this is the
* first time we've looked at the correct iclog buf) and the * first time we've looked at the correct iclog buf) and the
@ -3281,12 +3341,31 @@ xlog_force_lsn(
&log->l_icloglock); &log->l_icloglock);
return -EAGAIN; return -EAGAIN;
} }
atomic_inc(&iclog->ic_refcnt); if (xlog_force_and_check_iclog(iclog, &completed))
xlog_state_switch_iclogs(log, iclog, 0);
if (xlog_state_release_iclog(log, iclog))
goto out_error; goto out_error;
if (log_flushed) if (log_flushed)
*log_flushed = 1; *log_flushed = 1;
if (completed)
goto out_unlock;
break;
case XLOG_STATE_WANT_SYNC:
/*
* This iclog may contain the checkpoint pushed by the
* xlog_cil_force_seq() call, but there are other writers still
* accessing it so it hasn't been pushed to disk yet. Like the
* ACTIVE case above, we need to make sure caches are flushed
* when this iclog is written.
*/
iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
break;
default:
/*
* The entire checkpoint was written by the CIL force and is on
* its way to disk already. It will be stable when it
* completes, so we don't need to manipulate caches here at all.
* We just need to wait for completion if necessary.
*/
break;
} }
if (flags & XFS_LOG_SYNC) if (flags & XFS_LOG_SYNC)
@ -3559,10 +3638,10 @@ xlog_verify_grant_tail(
STATIC void STATIC void
xlog_verify_tail_lsn( xlog_verify_tail_lsn(
struct xlog *log, struct xlog *log,
struct xlog_in_core *iclog, struct xlog_in_core *iclog)
xfs_lsn_t tail_lsn)
{ {
int blocks; xfs_lsn_t tail_lsn = be64_to_cpu(iclog->ic_header.h_tail_lsn);
int blocks;
if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) { if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
blocks = blocks =

View file

@ -654,8 +654,9 @@ xlog_cil_push_work(
struct xfs_trans_header thdr; struct xfs_trans_header thdr;
struct xfs_log_iovec lhdr; struct xfs_log_iovec lhdr;
struct xfs_log_vec lvhdr = { NULL }; struct xfs_log_vec lvhdr = { NULL };
xfs_lsn_t preflush_tail_lsn;
xfs_lsn_t commit_lsn; xfs_lsn_t commit_lsn;
xfs_lsn_t push_seq; xfs_csn_t push_seq;
struct bio bio; struct bio bio;
DECLARE_COMPLETION_ONSTACK(bdev_flush); DECLARE_COMPLETION_ONSTACK(bdev_flush);
@ -730,7 +731,15 @@ xlog_cil_push_work(
* because we hold the flush lock exclusively. Hence we can now issue * because we hold the flush lock exclusively. Hence we can now issue
* a cache flush to ensure all the completed metadata in the journal we * a cache flush to ensure all the completed metadata in the journal we
* are about to overwrite is on stable storage. * are about to overwrite is on stable storage.
*
* Because we are issuing this cache flush before we've written the
* tail lsn to the iclog, we can have metadata IO completions move the
* tail forwards between the completion of this flush and the iclog
* being written. In this case, we need to re-issue the cache flush
* before the iclog write. To detect whether the log tail moves, sample
* the tail LSN *before* we issue the flush.
*/ */
preflush_tail_lsn = atomic64_read(&log->l_tail_lsn);
xfs_flush_bdev_async(&bio, log->l_mp->m_ddev_targp->bt_bdev, xfs_flush_bdev_async(&bio, log->l_mp->m_ddev_targp->bt_bdev,
&bdev_flush); &bdev_flush);
@ -941,7 +950,7 @@ xlog_cil_push_work(
* storage. * storage.
*/ */
commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA; commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA;
xlog_state_release_iclog(log, commit_iclog); xlog_state_release_iclog(log, commit_iclog, preflush_tail_lsn);
spin_unlock(&log->l_icloglock); spin_unlock(&log->l_icloglock);
return; return;

View file

@ -59,6 +59,16 @@ enum xlog_iclog_state {
{ XLOG_STATE_DIRTY, "XLOG_STATE_DIRTY" }, \ { XLOG_STATE_DIRTY, "XLOG_STATE_DIRTY" }, \
{ XLOG_STATE_IOERROR, "XLOG_STATE_IOERROR" } { XLOG_STATE_IOERROR, "XLOG_STATE_IOERROR" }
/*
* In core log flags
*/
#define XLOG_ICL_NEED_FLUSH (1 << 0) /* iclog needs REQ_PREFLUSH */
#define XLOG_ICL_NEED_FUA (1 << 1) /* iclog needs REQ_FUA */
#define XLOG_ICL_STRINGS \
{ XLOG_ICL_NEED_FLUSH, "XLOG_ICL_NEED_FLUSH" }, \
{ XLOG_ICL_NEED_FUA, "XLOG_ICL_NEED_FUA" }
/* /*
* Log ticket flags * Log ticket flags
@ -143,9 +153,6 @@ enum xlog_iclog_state {
#define XLOG_COVER_OPS 5 #define XLOG_COVER_OPS 5
#define XLOG_ICL_NEED_FLUSH (1 << 0) /* iclog needs REQ_PREFLUSH */
#define XLOG_ICL_NEED_FUA (1 << 1) /* iclog needs REQ_FUA */
/* Ticket reservation region accounting */ /* Ticket reservation region accounting */
#define XLOG_TIC_LEN_MAX 15 #define XLOG_TIC_LEN_MAX 15
@ -497,7 +504,8 @@ int xlog_commit_record(struct xlog *log, struct xlog_ticket *ticket,
void xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket); void xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket);
void xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket); void xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket);
int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog); int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog,
xfs_lsn_t log_tail_lsn);
/* /*
* When we crack an atomic LSN, we sample it first so that the value will not * When we crack an atomic LSN, we sample it first so that the value will not

View file

@ -3944,6 +3944,7 @@ DECLARE_EVENT_CLASS(xlog_iclog_class,
__field(uint32_t, state) __field(uint32_t, state)
__field(int32_t, refcount) __field(int32_t, refcount)
__field(uint32_t, offset) __field(uint32_t, offset)
__field(uint32_t, flags)
__field(unsigned long long, lsn) __field(unsigned long long, lsn)
__field(unsigned long, caller_ip) __field(unsigned long, caller_ip)
), ),
@ -3952,15 +3953,17 @@ DECLARE_EVENT_CLASS(xlog_iclog_class,
__entry->state = iclog->ic_state; __entry->state = iclog->ic_state;
__entry->refcount = atomic_read(&iclog->ic_refcnt); __entry->refcount = atomic_read(&iclog->ic_refcnt);
__entry->offset = iclog->ic_offset; __entry->offset = iclog->ic_offset;
__entry->flags = iclog->ic_flags;
__entry->lsn = be64_to_cpu(iclog->ic_header.h_lsn); __entry->lsn = be64_to_cpu(iclog->ic_header.h_lsn);
__entry->caller_ip = caller_ip; __entry->caller_ip = caller_ip;
), ),
TP_printk("dev %d:%d state %s refcnt %d offset %u lsn 0x%llx caller %pS", TP_printk("dev %d:%d state %s refcnt %d offset %u lsn 0x%llx flags %s caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
__print_symbolic(__entry->state, XLOG_STATE_STRINGS), __print_symbolic(__entry->state, XLOG_STATE_STRINGS),
__entry->refcount, __entry->refcount,
__entry->offset, __entry->offset,
__entry->lsn, __entry->lsn,
__print_flags(__entry->flags, "|", XLOG_ICL_STRINGS),
(char *)__entry->caller_ip) (char *)__entry->caller_ip)
); );

View file

@ -721,8 +721,13 @@ void mhi_device_put(struct mhi_device *mhi_dev);
* host and device execution environments match and * host and device execution environments match and
* channels are in a DISABLED state. * channels are in a DISABLED state.
* @mhi_dev: Device associated with the channels * @mhi_dev: Device associated with the channels
* @flags: MHI channel flags
*/ */
int mhi_prepare_for_transfer(struct mhi_device *mhi_dev); int mhi_prepare_for_transfer(struct mhi_device *mhi_dev,
unsigned int flags);
/* Automatically allocate and queue inbound buffers */
#define MHI_CH_INBOUND_ALLOC_BUFS BIT(0)
/** /**
* mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer. * mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer.

View file

@ -1230,6 +1230,7 @@ struct hci_dev *hci_alloc_dev(void);
void hci_free_dev(struct hci_dev *hdev); void hci_free_dev(struct hci_dev *hdev);
int hci_register_dev(struct hci_dev *hdev); int hci_register_dev(struct hci_dev *hdev);
void hci_unregister_dev(struct hci_dev *hdev); void hci_unregister_dev(struct hci_dev *hdev);
void hci_cleanup_dev(struct hci_dev *hdev);
int hci_suspend_dev(struct hci_dev *hdev); int hci_suspend_dev(struct hci_dev *hdev);
int hci_resume_dev(struct hci_dev *hdev); int hci_resume_dev(struct hci_dev *hdev);
int hci_reset_dev(struct hci_dev *hdev); int hci_reset_dev(struct hci_dev *hdev);

View file

@ -293,7 +293,7 @@ static inline bool flow_action_has_entries(const struct flow_action *action)
} }
/** /**
* flow_action_has_one_action() - check if exactly one action is present * flow_offload_has_one_action() - check if exactly one action is present
* @action: tc filter flow offload action * @action: tc filter flow offload action
* *
* Returns true if exactly one action is present. * Returns true if exactly one action is present.

View file

@ -265,7 +265,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
static inline unsigned int ip6_skb_dst_mtu(struct sk_buff *skb) static inline unsigned int ip6_skb_dst_mtu(struct sk_buff *skb)
{ {
int mtu; unsigned int mtu;
struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
inet6_sk(skb->sk) : NULL; inet6_sk(skb->sk) : NULL;

View file

@ -75,6 +75,7 @@ struct netns_xfrm {
#endif #endif
spinlock_t xfrm_state_lock; spinlock_t xfrm_state_lock;
seqcount_spinlock_t xfrm_state_hash_generation; seqcount_spinlock_t xfrm_state_hash_generation;
seqcount_spinlock_t xfrm_policy_hash_generation;
spinlock_t xfrm_policy_lock; spinlock_t xfrm_policy_lock;
struct mutex xfrm_cfg_mutex; struct mutex xfrm_cfg_mutex;

View file

@ -329,6 +329,9 @@ int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
/** /**
* struct tcf_pkt_info - packet information * struct tcf_pkt_info - packet information
*
* @ptr: start of the pkt data
* @nexthdr: offset of the next header
*/ */
struct tcf_pkt_info { struct tcf_pkt_info {
unsigned char * ptr; unsigned char * ptr;
@ -347,6 +350,7 @@ struct tcf_ematch_ops;
* @ops: the operations lookup table of the corresponding ematch module * @ops: the operations lookup table of the corresponding ematch module
* @datalen: length of the ematch specific configuration data * @datalen: length of the ematch specific configuration data
* @data: ematch specific data * @data: ematch specific data
* @net: the network namespace
*/ */
struct tcf_ematch { struct tcf_ematch {
struct tcf_ematch_ops * ops; struct tcf_ematch_ops * ops;

View file

@ -9135,8 +9135,10 @@ static int trace_array_create_dir(struct trace_array *tr)
return -EINVAL; return -EINVAL;
ret = event_trace_add_tracer(tr->dir, tr); ret = event_trace_add_tracer(tr->dir, tr);
if (ret) if (ret) {
tracefs_remove(tr->dir); tracefs_remove(tr->dir);
return ret;
}
init_tracer_tracefs(tr, tr->dir); init_tracer_tracefs(tr, tr->dir);
__update_tracer_options(tr); __update_tracer_options(tr);

View file

@ -65,7 +65,8 @@
C(INVALID_SORT_MODIFIER,"Invalid sort modifier"), \ C(INVALID_SORT_MODIFIER,"Invalid sort modifier"), \
C(EMPTY_SORT_FIELD, "Empty sort field"), \ C(EMPTY_SORT_FIELD, "Empty sort field"), \
C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"), \ C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"), \
C(INVALID_SORT_FIELD, "Sort field must be a key or a val"), C(INVALID_SORT_FIELD, "Sort field must be a key or a val"), \
C(INVALID_STR_OPERAND, "String type can not be an operand in expression"),
#undef C #undef C
#define C(a, b) HIST_ERR_##a #define C(a, b) HIST_ERR_##a
@ -2156,6 +2157,13 @@ static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
ret = PTR_ERR(operand1); ret = PTR_ERR(operand1);
goto free; goto free;
} }
if (operand1->flags & HIST_FIELD_FL_STRING) {
/* String type can not be the operand of unary operator. */
hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
destroy_hist_field(operand1, 0);
ret = -EINVAL;
goto free;
}
expr->flags |= operand1->flags & expr->flags |= operand1->flags &
(HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
@ -2257,6 +2265,11 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
operand1 = NULL; operand1 = NULL;
goto free; goto free;
} }
if (operand1->flags & HIST_FIELD_FL_STRING) {
hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(operand1_str));
ret = -EINVAL;
goto free;
}
/* rest of string could be another expression e.g. b+c in a+b+c */ /* rest of string could be another expression e.g. b+c in a+b+c */
operand_flags = 0; operand_flags = 0;
@ -2266,6 +2279,11 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
operand2 = NULL; operand2 = NULL;
goto free; goto free;
} }
if (operand2->flags & HIST_FIELD_FL_STRING) {
hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
ret = -EINVAL;
goto free;
}
ret = check_expr_operands(file->tr, operand1, operand2); ret = check_expr_operands(file->tr, operand1, operand2);
if (ret) if (ret)
@ -2287,6 +2305,10 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
expr->operands[0] = operand1; expr->operands[0] = operand1;
expr->operands[1] = operand2; expr->operands[1] = operand2;
/* The operand sizes should be the same, so just pick one */
expr->size = operand1->size;
expr->operator = field_op; expr->operator = field_op;
expr->name = expr_str(expr, 0); expr->name = expr_str(expr, 0);
expr->type = kstrdup(operand1->type, GFP_KERNEL); expr->type = kstrdup(operand1->type, GFP_KERNEL);

View file

@ -327,7 +327,7 @@ static void move_to_next_cpu(void)
get_online_cpus(); get_online_cpus();
cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask); cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
next_cpu = cpumask_next(smp_processor_id(), current_mask); next_cpu = cpumask_next(raw_smp_processor_id(), current_mask);
put_online_cpus(); put_online_cpus();
if (next_cpu >= nr_cpu_ids) if (next_cpu >= nr_cpu_ids)

View file

@ -160,6 +160,7 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
{ {
struct hlist_head *hashent = ucounts_hashentry(ns, uid); struct hlist_head *hashent = ucounts_hashentry(ns, uid);
struct ucounts *ucounts, *new; struct ucounts *ucounts, *new;
long overflow;
spin_lock_irq(&ucounts_lock); spin_lock_irq(&ucounts_lock);
ucounts = find_ucounts(ns, uid, hashent); ucounts = find_ucounts(ns, uid, hashent);
@ -184,8 +185,12 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
return new; return new;
} }
} }
overflow = atomic_add_negative(1, &ucounts->count);
spin_unlock_irq(&ucounts_lock); spin_unlock_irq(&ucounts_lock);
ucounts = get_ucounts(ucounts); if (overflow) {
put_ucounts(ucounts);
return NULL;
}
return ucounts; return ucounts;
} }
@ -193,8 +198,7 @@ void put_ucounts(struct ucounts *ucounts)
{ {
unsigned long flags; unsigned long flags;
if (atomic_dec_and_test(&ucounts->count)) { if (atomic_dec_and_lock_irqsave(&ucounts->count, &ucounts_lock, flags)) {
spin_lock_irqsave(&ucounts_lock, flags);
hlist_del_init(&ucounts->node); hlist_del_init(&ucounts->node);
spin_unlock_irqrestore(&ucounts_lock, flags); spin_unlock_irqrestore(&ucounts_lock, flags);
kfree(ucounts); kfree(ucounts);

View file

@ -3996,14 +3996,10 @@ EXPORT_SYMBOL(hci_register_dev);
/* Unregister HCI device */ /* Unregister HCI device */
void hci_unregister_dev(struct hci_dev *hdev) void hci_unregister_dev(struct hci_dev *hdev)
{ {
int id;
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
hci_dev_set_flag(hdev, HCI_UNREGISTER); hci_dev_set_flag(hdev, HCI_UNREGISTER);
id = hdev->id;
write_lock(&hci_dev_list_lock); write_lock(&hci_dev_list_lock);
list_del(&hdev->list); list_del(&hdev->list);
write_unlock(&hci_dev_list_lock); write_unlock(&hci_dev_list_lock);
@ -4038,7 +4034,14 @@ void hci_unregister_dev(struct hci_dev *hdev)
} }
device_del(&hdev->dev); device_del(&hdev->dev);
/* Actual cleanup is deferred until hci_cleanup_dev(). */
hci_dev_put(hdev);
}
EXPORT_SYMBOL(hci_unregister_dev);
/* Cleanup HCI device */
void hci_cleanup_dev(struct hci_dev *hdev)
{
debugfs_remove_recursive(hdev->debugfs); debugfs_remove_recursive(hdev->debugfs);
kfree_const(hdev->hw_info); kfree_const(hdev->hw_info);
kfree_const(hdev->fw_info); kfree_const(hdev->fw_info);
@ -4063,11 +4066,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
hci_blocked_keys_clear(hdev); hci_blocked_keys_clear(hdev);
hci_dev_unlock(hdev); hci_dev_unlock(hdev);
hci_dev_put(hdev); ida_simple_remove(&hci_index_ida, hdev->id);
ida_simple_remove(&hci_index_ida, id);
} }
EXPORT_SYMBOL(hci_unregister_dev);
/* Suspend HCI device */ /* Suspend HCI device */
int hci_suspend_dev(struct hci_dev *hdev) int hci_suspend_dev(struct hci_dev *hdev)

View file

@ -59,6 +59,17 @@ struct hci_pinfo {
char comm[TASK_COMM_LEN]; char comm[TASK_COMM_LEN];
}; };
static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
{
struct hci_dev *hdev = hci_pi(sk)->hdev;
if (!hdev)
return ERR_PTR(-EBADFD);
if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
return ERR_PTR(-EPIPE);
return hdev;
}
void hci_sock_set_flag(struct sock *sk, int nr) void hci_sock_set_flag(struct sock *sk, int nr)
{ {
set_bit(nr, &hci_pi(sk)->flags); set_bit(nr, &hci_pi(sk)->flags);
@ -759,19 +770,13 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
if (event == HCI_DEV_UNREG) { if (event == HCI_DEV_UNREG) {
struct sock *sk; struct sock *sk;
/* Detach sockets from device */ /* Wake up sockets using this dead device */
read_lock(&hci_sk_list.lock); read_lock(&hci_sk_list.lock);
sk_for_each(sk, &hci_sk_list.head) { sk_for_each(sk, &hci_sk_list.head) {
lock_sock(sk);
if (hci_pi(sk)->hdev == hdev) { if (hci_pi(sk)->hdev == hdev) {
hci_pi(sk)->hdev = NULL;
sk->sk_err = EPIPE; sk->sk_err = EPIPE;
sk->sk_state = BT_OPEN;
sk->sk_state_change(sk); sk->sk_state_change(sk);
hci_dev_put(hdev);
} }
release_sock(sk);
} }
read_unlock(&hci_sk_list.lock); read_unlock(&hci_sk_list.lock);
} }
@ -930,10 +935,10 @@ static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
unsigned long arg) unsigned long arg)
{ {
struct hci_dev *hdev = hci_pi(sk)->hdev; struct hci_dev *hdev = hci_hdev_from_sock(sk);
if (!hdev) if (IS_ERR(hdev))
return -EBADFD; return PTR_ERR(hdev);
if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
return -EBUSY; return -EBUSY;
@ -1103,6 +1108,18 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
lock_sock(sk); lock_sock(sk);
/* Allow detaching from dead device and attaching to alive device, if
* the caller wants to re-bind (instead of close) this socket in
* response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
*/
hdev = hci_pi(sk)->hdev;
if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
hci_pi(sk)->hdev = NULL;
sk->sk_state = BT_OPEN;
hci_dev_put(hdev);
}
hdev = NULL;
if (sk->sk_state == BT_BOUND) { if (sk->sk_state == BT_BOUND) {
err = -EALREADY; err = -EALREADY;
goto done; goto done;
@ -1379,9 +1396,9 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
lock_sock(sk); lock_sock(sk);
hdev = hci_pi(sk)->hdev; hdev = hci_hdev_from_sock(sk);
if (!hdev) { if (IS_ERR(hdev)) {
err = -EBADFD; err = PTR_ERR(hdev);
goto done; goto done;
} }
@ -1743,9 +1760,9 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
goto done; goto done;
} }
hdev = hci_pi(sk)->hdev; hdev = hci_hdev_from_sock(sk);
if (!hdev) { if (IS_ERR(hdev)) {
err = -EBADFD; err = PTR_ERR(hdev);
goto done; goto done;
} }

View file

@ -83,6 +83,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
static void bt_host_release(struct device *dev) static void bt_host_release(struct device *dev)
{ {
struct hci_dev *hdev = to_hci_dev(dev); struct hci_dev *hdev = to_hci_dev(dev);
if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
hci_cleanup_dev(hdev);
kfree(hdev); kfree(hdev);
module_put(THIS_MODULE); module_put(THIS_MODULE);
} }

View file

@ -166,7 +166,8 @@ static int br_switchdev_event(struct notifier_block *unused,
case SWITCHDEV_FDB_ADD_TO_BRIDGE: case SWITCHDEV_FDB_ADD_TO_BRIDGE:
fdb_info = ptr; fdb_info = ptr;
err = br_fdb_external_learn_add(br, p, fdb_info->addr, err = br_fdb_external_learn_add(br, p, fdb_info->addr,
fdb_info->vid, false); fdb_info->vid,
fdb_info->is_local, false);
if (err) { if (err) {
err = notifier_from_errno(err); err = notifier_from_errno(err);
break; break;

View file

@ -1011,7 +1011,8 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br, static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
struct net_bridge_port *p, const unsigned char *addr, struct net_bridge_port *p, const unsigned char *addr,
u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[]) u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[],
struct netlink_ext_ack *extack)
{ {
int err = 0; int err = 0;
@ -1030,7 +1031,15 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
rcu_read_unlock(); rcu_read_unlock();
local_bh_enable(); local_bh_enable();
} else if (ndm->ndm_flags & NTF_EXT_LEARNED) { } else if (ndm->ndm_flags & NTF_EXT_LEARNED) {
err = br_fdb_external_learn_add(br, p, addr, vid, true); if (!p && !(ndm->ndm_state & NUD_PERMANENT)) {
NL_SET_ERR_MSG_MOD(extack,
"FDB entry towards bridge must be permanent");
return -EINVAL;
}
err = br_fdb_external_learn_add(br, p, addr, vid,
ndm->ndm_state & NUD_PERMANENT,
true);
} else { } else {
spin_lock_bh(&br->hash_lock); spin_lock_bh(&br->hash_lock);
err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid, nfea_tb); err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid, nfea_tb);
@ -1102,9 +1111,11 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
} }
/* VID was specified, so use it. */ /* VID was specified, so use it. */
err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb); err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb,
extack);
} else { } else {
err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb); err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb,
extack);
if (err || !vg || !vg->num_vlans) if (err || !vg || !vg->num_vlans)
goto out; goto out;
@ -1116,7 +1127,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
if (!br_vlan_should_use(v)) if (!br_vlan_should_use(v))
continue; continue;
err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid, err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid,
nfea_tb); nfea_tb, extack);
if (err) if (err)
goto out; goto out;
} }
@ -1256,7 +1267,7 @@ void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
} }
int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
const unsigned char *addr, u16 vid, const unsigned char *addr, u16 vid, bool is_local,
bool swdev_notify) bool swdev_notify)
{ {
struct net_bridge_fdb_entry *fdb; struct net_bridge_fdb_entry *fdb;
@ -1273,6 +1284,10 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
if (swdev_notify) if (swdev_notify)
flags |= BIT(BR_FDB_ADDED_BY_USER); flags |= BIT(BR_FDB_ADDED_BY_USER);
if (is_local)
flags |= BIT(BR_FDB_LOCAL);
fdb = fdb_create(br, p, addr, vid, flags); fdb = fdb_create(br, p, addr, vid, flags);
if (!fdb) { if (!fdb) {
err = -ENOMEM; err = -ENOMEM;
@ -1299,6 +1314,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
if (swdev_notify) if (swdev_notify)
set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags); set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
if (is_local)
set_bit(BR_FDB_LOCAL, &fdb->flags);
if (modified) if (modified)
fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify); fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
} }

View file

@ -770,7 +770,7 @@ int br_fdb_get(struct sk_buff *skb, struct nlattr *tb[], struct net_device *dev,
int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p); int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p);
void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p); void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p);
int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
const unsigned char *addr, u16 vid, const unsigned char *addr, u16 vid, bool is_local,
bool swdev_notify); bool swdev_notify);
int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p, int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
const unsigned char *addr, u16 vid, const unsigned char *addr, u16 vid,

View file

@ -298,6 +298,9 @@ int tcp_gro_complete(struct sk_buff *skb)
if (th->cwr) if (th->cwr)
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
if (skb->encapsulation)
skb->inner_transport_header = skb->transport_header;
return 0; return 0;
} }
EXPORT_SYMBOL(tcp_gro_complete); EXPORT_SYMBOL(tcp_gro_complete);

View file

@ -624,6 +624,10 @@ static int udp_gro_complete_segment(struct sk_buff *skb)
skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4; skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
if (skb->encapsulation)
skb->inner_transport_header = skb->transport_header;
return 0; return 0;
} }

View file

@ -27,7 +27,6 @@ struct mptcp_pm_addr_entry {
struct mptcp_addr_info addr; struct mptcp_addr_info addr;
u8 flags; u8 flags;
int ifindex; int ifindex;
struct rcu_head rcu;
struct socket *lsk; struct socket *lsk;
}; };

View file

@ -15,6 +15,7 @@ struct qrtr_mhi_dev {
struct qrtr_endpoint ep; struct qrtr_endpoint ep;
struct mhi_device *mhi_dev; struct mhi_device *mhi_dev;
struct device *dev; struct device *dev;
struct completion ready;
}; };
/* From MHI to QRTR */ /* From MHI to QRTR */
@ -50,6 +51,10 @@ static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep); struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep);
int rc; int rc;
rc = wait_for_completion_interruptible(&qdev->ready);
if (rc)
goto free_skb;
if (skb->sk) if (skb->sk)
sock_hold(skb->sk); sock_hold(skb->sk);
@ -79,7 +84,7 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
int rc; int rc;
/* start channels */ /* start channels */
rc = mhi_prepare_for_transfer(mhi_dev); rc = mhi_prepare_for_transfer(mhi_dev, 0);
if (rc) if (rc)
return rc; return rc;
@ -96,6 +101,15 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
if (rc) if (rc)
return rc; return rc;
/* start channels */
rc = mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS);
if (rc) {
qrtr_endpoint_unregister(&qdev->ep);
dev_set_drvdata(&mhi_dev->dev, NULL);
return rc;
}
complete_all(&qdev->ready);
dev_dbg(qdev->dev, "Qualcomm MHI QRTR driver probed\n"); dev_dbg(qdev->dev, "Qualcomm MHI QRTR driver probed\n");
return 0; return 0;

View file

@ -913,7 +913,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
/* seqlock has the same scope of busylock, for NOLOCK qdisc */ /* seqlock has the same scope of busylock, for NOLOCK qdisc */
spin_lock_init(&sch->seqlock); spin_lock_init(&sch->seqlock);
lockdep_set_class(&sch->busylock, lockdep_set_class(&sch->seqlock,
dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
seqcount_init(&sch->running); seqcount_init(&sch->running);

View file

@ -1739,8 +1739,6 @@ static void taprio_attach(struct Qdisc *sch)
if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
old = dev_graft_qdisc(qdisc->dev_queue, qdisc); old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
if (ntx < dev->real_num_tx_queues)
qdisc_hash_add(qdisc, false);
} else { } else {
old = dev_graft_qdisc(qdisc->dev_queue, sch); old = dev_graft_qdisc(qdisc->dev_queue, sch);
qdisc_refcount_inc(sch); qdisc_refcount_inc(sch);

View file

@ -857,14 +857,18 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
memcpy(key->data, &auth_key->sca_key[0], auth_key->sca_keylength); memcpy(key->data, &auth_key->sca_key[0], auth_key->sca_keylength);
cur_key->key = key; cur_key->key = key;
if (replace) { if (!replace) {
list_del_init(&shkey->key_list); list_add(&cur_key->key_list, sh_keys);
sctp_auth_shkey_release(shkey); return 0;
if (asoc && asoc->active_key_id == auth_key->sca_keynumber)
sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL);
} }
list_del_init(&shkey->key_list);
sctp_auth_shkey_release(shkey);
list_add(&cur_key->key_list, sh_keys); list_add(&cur_key->key_list, sh_keys);
if (asoc && asoc->active_key_id == auth_key->sca_keynumber)
sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL);
return 0; return 0;
} }

View file

@ -1079,6 +1079,9 @@ virtio_transport_recv_connected(struct sock *sk,
virtio_transport_recv_enqueue(vsk, pkt); virtio_transport_recv_enqueue(vsk, pkt);
sk->sk_data_ready(sk); sk->sk_data_ready(sk);
return err; return err;
case VIRTIO_VSOCK_OP_CREDIT_REQUEST:
virtio_transport_send_credit_update(vsk);
break;
case VIRTIO_VSOCK_OP_CREDIT_UPDATE: case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
sk->sk_write_space(sk); sk->sk_write_space(sk);
break; break;

View file

@ -298,8 +298,16 @@ static int xfrm_xlate64(struct sk_buff *dst, const struct nlmsghdr *nlh_src)
len = nlmsg_attrlen(nlh_src, xfrm_msg_min[type]); len = nlmsg_attrlen(nlh_src, xfrm_msg_min[type]);
nla_for_each_attr(nla, attrs, len, remaining) { nla_for_each_attr(nla, attrs, len, remaining) {
int err = xfrm_xlate64_attr(dst, nla); int err;
switch (type) {
case XFRM_MSG_NEWSPDINFO:
err = xfrm_nla_cpy(dst, nla, nla_len(nla));
break;
default:
err = xfrm_xlate64_attr(dst, nla);
break;
}
if (err) if (err)
return err; return err;
} }
@ -341,7 +349,8 @@ static int xfrm_alloc_compat(struct sk_buff *skb, const struct nlmsghdr *nlh_src
/* Calculates len of translated 64-bit message. */ /* Calculates len of translated 64-bit message. */
static size_t xfrm_user_rcv_calculate_len64(const struct nlmsghdr *src, static size_t xfrm_user_rcv_calculate_len64(const struct nlmsghdr *src,
struct nlattr *attrs[XFRMA_MAX+1]) struct nlattr *attrs[XFRMA_MAX + 1],
int maxtype)
{ {
size_t len = nlmsg_len(src); size_t len = nlmsg_len(src);
@ -358,10 +367,20 @@ static size_t xfrm_user_rcv_calculate_len64(const struct nlmsghdr *src,
case XFRM_MSG_POLEXPIRE: case XFRM_MSG_POLEXPIRE:
len += 8; len += 8;
break; break;
case XFRM_MSG_NEWSPDINFO:
/* attirbutes are xfrm_spdattr_type_t, not xfrm_attr_type_t */
return len;
default: default:
break; break;
} }
/* Unexpected for anything, but XFRM_MSG_NEWSPDINFO, please
* correct both 64=>32-bit and 32=>64-bit translators to copy
* new attributes.
*/
if (WARN_ON_ONCE(maxtype))
return len;
if (attrs[XFRMA_SA]) if (attrs[XFRMA_SA])
len += 4; len += 4;
if (attrs[XFRMA_POLICY]) if (attrs[XFRMA_POLICY])
@ -440,7 +459,8 @@ static int xfrm_xlate32_attr(void *dst, const struct nlattr *nla,
static int xfrm_xlate32(struct nlmsghdr *dst, const struct nlmsghdr *src, static int xfrm_xlate32(struct nlmsghdr *dst, const struct nlmsghdr *src,
struct nlattr *attrs[XFRMA_MAX+1], struct nlattr *attrs[XFRMA_MAX+1],
size_t size, u8 type, struct netlink_ext_ack *extack) size_t size, u8 type, int maxtype,
struct netlink_ext_ack *extack)
{ {
size_t pos; size_t pos;
int i; int i;
@ -520,6 +540,25 @@ static int xfrm_xlate32(struct nlmsghdr *dst, const struct nlmsghdr *src,
} }
pos = dst->nlmsg_len; pos = dst->nlmsg_len;
if (maxtype) {
/* attirbutes are xfrm_spdattr_type_t, not xfrm_attr_type_t */
WARN_ON_ONCE(src->nlmsg_type != XFRM_MSG_NEWSPDINFO);
for (i = 1; i <= maxtype; i++) {
int err;
if (!attrs[i])
continue;
/* just copy - no need for translation */
err = xfrm_attr_cpy32(dst, &pos, attrs[i], size,
nla_len(attrs[i]), nla_len(attrs[i]));
if (err)
return err;
}
return 0;
}
for (i = 1; i < XFRMA_MAX + 1; i++) { for (i = 1; i < XFRMA_MAX + 1; i++) {
int err; int err;
@ -564,7 +603,7 @@ static struct nlmsghdr *xfrm_user_rcv_msg_compat(const struct nlmsghdr *h32,
if (err < 0) if (err < 0)
return ERR_PTR(err); return ERR_PTR(err);
len = xfrm_user_rcv_calculate_len64(h32, attrs); len = xfrm_user_rcv_calculate_len64(h32, attrs, maxtype);
/* The message doesn't need translation */ /* The message doesn't need translation */
if (len == nlmsg_len(h32)) if (len == nlmsg_len(h32))
return NULL; return NULL;
@ -574,7 +613,7 @@ static struct nlmsghdr *xfrm_user_rcv_msg_compat(const struct nlmsghdr *h32,
if (!h64) if (!h64)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
err = xfrm_xlate32(h64, h32, attrs, len, type, extack); err = xfrm_xlate32(h64, h32, attrs, len, type, maxtype, extack);
if (err < 0) { if (err < 0) {
kvfree(h64); kvfree(h64);
return ERR_PTR(err); return ERR_PTR(err);

View file

@ -241,7 +241,7 @@ static void ipcomp_free_tfms(struct crypto_comp * __percpu *tfms)
break; break;
} }
WARN_ON(!pos); WARN_ON(list_entry_is_head(pos, &ipcomp_tfms_list, list));
if (--pos->users) if (--pos->users)
return; return;

Some files were not shown because too many files have changed in this diff Show more