Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:

 1) Several nf_flow_table_offload fixes from Pablo Neira Ayuso,
    including adding a missing ipv6 match description.

 2) Several heap overflow fixes in mwifiex from qize wang and Ganapathi
    Bhat.

 3) Fix uninit value in bond_neigh_init(), from Eric Dumazet.

 4) Fix non-ACPI probing of nxp-nci, from Stephan Gerhold.

 5) Fix use after free in tipc_disc_rcv(), from Tuong Lien.

 6) Enforce limit of 33 tail calls in mips and riscv JIT, from Paul
    Chaignon.

 7) Multicast MAC limit test is off by one in qede, from Manish Chopra.

 8) Fix established socket lookup race when socket goes from
    TCP_ESTABLISHED to TCP_LISTEN, because there lacks an intervening
    RCU grace period. From Eric Dumazet.

 9) Don't send empty SKBs from tcp_write_xmit(), also from Eric Dumazet.

10) Fix active backup transition after link failure in bonding, from
    Mahesh Bandewar.

11) Avoid zero sized hash table in gtp driver, from Taehee Yoo.

12) Fix wrong interface passed to ->mac_link_up(), from Russell King.

13) Fix DSA egress flooding settings in b53, from Florian Fainelli.

14) Memory leak in gmac_setup_txqs(), from Navid Emamdoost.

15) Fix double free in dpaa2-ptp code, from Ioana Ciornei.

16) Reject invalid MTU values in stmmac, from Jose Abreu.

17) Fix refcount leak in error path of u32 classifier, from Davide
    Caratti.

18) Fix regression causing iwlwifi firmware crashes on boot, from Anders
    Kaseorg.

19) Fix inverted return value logic in llc2 code, from Chan Shu Tak.

20) Disable hardware GRO when XDP is attached to qede, frm Manish
    Chopra.

21) Since we encode state in the low pointer bits, dst metrics must be
    at least 4 byte aligned, which is not necessarily true on m68k. Add
    annotations to fix this, from Geert Uytterhoeven.

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (160 commits)
  sfc: Include XDP packet headroom in buffer step size.
  sfc: fix channel allocation with brute force
  net: dst: Force 4-byte alignment of dst_metrics
  selftests: pmtu: fix init mtu value in description
  hv_netvsc: Fix unwanted rx_table reset
  net: phy: ensure that phy IDs are correctly typed
  mod_devicetable: fix PHY module format
  qede: Disable hardware gro when xdp prog is installed
  net: ena: fix issues in setting interrupt moderation params in ethtool
  net: ena: fix default tx interrupt moderation interval
  net/smc: unregister ib devices in reboot_event
  net: stmmac: platform: Fix MDIO init for platforms without PHY
  llc2: Fix return statement of llc_stat_ev_rx_null_dsap_xid_c (and _test_c)
  net: hisilicon: Fix a BUG trigered by wrong bytes_compl
  net: dsa: ksz: use common define for tag len
  s390/qeth: don't return -ENOTSUPP to userspace
  s390/qeth: fix promiscuous mode after reset
  s390/qeth: handle error due to unsupported transport mode
  cxgb4: fix refcount init for TC-MQPRIO offload
  tc-testing: initial tdc selftests for cls_u32
  ...
This commit is contained in:
Linus Torvalds 2019-12-22 09:54:33 -08:00
commit 78bac77b52
176 changed files with 2158 additions and 1025 deletions

View File

@ -266,6 +266,7 @@ Vinod Koul <vkoul@kernel.org> <vkoul@infradead.org>
Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com> Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com> Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com> Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
Vivien Didelot <vivien.didelot@gmail.com> <vivien.didelot@savoirfairelinux.com>
Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com> Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com> Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com> Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>

View File

@ -10,7 +10,6 @@ Required properties:
- #size-cells: 0 - #size-cells: 0
- spi-max-frequency: Maximum frequency of the SPI bus the chip can - spi-max-frequency: Maximum frequency of the SPI bus the chip can
operate at should be less than or equal to 18 MHz. operate at should be less than or equal to 18 MHz.
- device-wake-gpios: Wake up GPIO to wake up the TCAN device.
- interrupt-parent: the phandle to the interrupt controller which provides - interrupt-parent: the phandle to the interrupt controller which provides
the interrupt. the interrupt.
- interrupts: interrupt specification for data-ready. - interrupts: interrupt specification for data-ready.
@ -23,6 +22,7 @@ Optional properties:
reset. reset.
- device-state-gpios: Input GPIO that indicates if the device is in - device-state-gpios: Input GPIO that indicates if the device is in
a sleep state or if the device is active. a sleep state or if the device is active.
- device-wake-gpios: Wake up GPIO to wake up the TCAN device.
Example: Example:
tcan4x5x: tcan4x5x@0 { tcan4x5x: tcan4x5x@0 {
@ -36,5 +36,5 @@ tcan4x5x: tcan4x5x@0 {
interrupts = <14 GPIO_ACTIVE_LOW>; interrupts = <14 GPIO_ACTIVE_LOW>;
device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>; device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
device-wake-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>; device-wake-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
reset-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>; reset-gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>;
}; };

View File

@ -339,7 +339,7 @@ To claim an address following code example can be used:
.pgn = J1939_PGN_ADDRESS_CLAIMED, .pgn = J1939_PGN_ADDRESS_CLAIMED,
.pgn_mask = J1939_PGN_PDU1_MAX, .pgn_mask = J1939_PGN_PDU1_MAX,
}, { }, {
.pgn = J1939_PGN_ADDRESS_REQUEST, .pgn = J1939_PGN_REQUEST,
.pgn_mask = J1939_PGN_PDU1_MAX, .pgn_mask = J1939_PGN_PDU1_MAX,
}, { }, {
.pgn = J1939_PGN_ADDRESS_COMMANDED, .pgn = J1939_PGN_ADDRESS_COMMANDED,

View File

@ -10110,6 +10110,7 @@ S: Maintained
F: drivers/media/radio/radio-maxiradio* F: drivers/media/radio/radio-maxiradio*
MCAN MMIO DEVICE DRIVER MCAN MMIO DEVICE DRIVER
M: Dan Murphy <dmurphy@ti.com>
M: Sriram Dash <sriram.dash@samsung.com> M: Sriram Dash <sriram.dash@samsung.com>
L: linux-can@vger.kernel.org L: linux-can@vger.kernel.org
S: Maintained S: Maintained
@ -13710,6 +13711,15 @@ L: linux-arm-msm@vger.kernel.org
S: Maintained S: Maintained
F: drivers/iommu/qcom_iommu.c F: drivers/iommu/qcom_iommu.c
QUALCOMM RMNET DRIVER
M: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
M: Sean Tranchetti <stranche@codeaurora.org>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/qualcomm/rmnet/
F: Documentation/networking/device_drivers/qualcomm/rmnet.txt
F: include/linux/if_rmnet.h
QUALCOMM TSENS THERMAL DRIVER QUALCOMM TSENS THERMAL DRIVER
M: Amit Kucheria <amit.kucheria@linaro.org> M: Amit Kucheria <amit.kucheria@linaro.org>
L: linux-pm@vger.kernel.org L: linux-pm@vger.kernel.org
@ -16533,6 +16543,13 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Odd Fixes S: Odd Fixes
F: sound/soc/codecs/tas571x* F: sound/soc/codecs/tas571x*
TI TCAN4X5X DEVICE DRIVER
M: Dan Murphy <dmurphy@ti.com>
L: linux-can@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/net/can/tcan4x5x.txt
F: drivers/net/can/m_can/tcan4x5x.c
TI TRF7970A NFC DRIVER TI TRF7970A NFC DRIVER
M: Mark Greer <mgreer@animalcreek.com> M: Mark Greer <mgreer@animalcreek.com>
L: linux-wireless@vger.kernel.org L: linux-wireless@vger.kernel.org

View File

@ -604,6 +604,7 @@ static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value)
static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx) static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
{ {
int off, b_off; int off, b_off;
int tcc_reg;
ctx->flags |= EBPF_SEEN_TC; ctx->flags |= EBPF_SEEN_TC;
/* /*
@ -616,14 +617,14 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
b_off = b_imm(this_idx + 1, ctx); b_off = b_imm(this_idx + 1, ctx);
emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off); emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off);
/* /*
* if (--TCC < 0) * if (TCC-- < 0)
* goto out; * goto out;
*/ */
/* Delay slot */ /* Delay slot */
emit_instr(ctx, daddiu, MIPS_R_T5, tcc_reg = (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4;
(ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1); emit_instr(ctx, daddiu, MIPS_R_T5, tcc_reg, -1);
b_off = b_imm(this_idx + 1, ctx); b_off = b_imm(this_idx + 1, ctx);
emit_instr(ctx, bltz, MIPS_R_T5, b_off); emit_instr(ctx, bltz, tcc_reg, b_off);
/* /*
* prog = array->ptrs[index]; * prog = array->ptrs[index];
* if (prog == NULL) * if (prog == NULL)

View File

@ -631,14 +631,14 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
return -1; return -1;
emit(rv_bgeu(RV_REG_A2, RV_REG_T1, off >> 1), ctx); emit(rv_bgeu(RV_REG_A2, RV_REG_T1, off >> 1), ctx);
/* if (--TCC < 0) /* if (TCC-- < 0)
* goto out; * goto out;
*/ */
emit(rv_addi(RV_REG_T1, tcc, -1), ctx); emit(rv_addi(RV_REG_T1, tcc, -1), ctx);
off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
if (is_13b_check(off, insn)) if (is_13b_check(off, insn))
return -1; return -1;
emit(rv_blt(RV_REG_T1, RV_REG_ZERO, off >> 1), ctx); emit(rv_blt(tcc, RV_REG_ZERO, off >> 1), ctx);
/* prog = array->ptrs[index]; /* prog = array->ptrs[index];
* if (!prog) * if (!prog)

View File

@ -2272,9 +2272,6 @@ static void bond_miimon_commit(struct bonding *bond)
} else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* make it immediately active */ /* make it immediately active */
bond_set_active_slave(slave); bond_set_active_slave(slave);
} else if (slave != primary) {
/* prevent it from being the active one */
bond_set_backup_slave(slave);
} }
slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n", slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n",
@ -3702,32 +3699,35 @@ static int bond_neigh_init(struct neighbour *n)
const struct net_device_ops *slave_ops; const struct net_device_ops *slave_ops;
struct neigh_parms parms; struct neigh_parms parms;
struct slave *slave; struct slave *slave;
int ret; int ret = 0;
slave = bond_first_slave(bond); rcu_read_lock();
slave = bond_first_slave_rcu(bond);
if (!slave) if (!slave)
return 0; goto out;
slave_ops = slave->dev->netdev_ops; slave_ops = slave->dev->netdev_ops;
if (!slave_ops->ndo_neigh_setup) if (!slave_ops->ndo_neigh_setup)
return 0; goto out;
parms.neigh_setup = NULL; /* TODO: find another way [1] to implement this.
parms.neigh_cleanup = NULL; * Passing a zeroed structure is fragile,
ret = slave_ops->ndo_neigh_setup(slave->dev, &parms); * but at least we do not pass garbage.
if (ret) *
return ret; * [1] One way would be that ndo_neigh_setup() never touch
* struct neigh_parms, but propagate the new neigh_setup()
/* Assign slave's neigh_cleanup to neighbour in case cleanup is called * back to ___neigh_create() / neigh_parms_alloc()
* after the last slave has been detached. Assumes that all slaves
* utilize the same neigh_cleanup (true at this writing as only user
* is ipoib).
*/ */
n->parms->neigh_cleanup = parms.neigh_cleanup; memset(&parms, 0, sizeof(parms));
ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
if (!parms.neigh_setup) if (ret)
return 0; goto out;
return parms.neigh_setup(n); if (parms.neigh_setup)
ret = parms.neigh_setup(n);
out:
rcu_read_unlock();
return ret;
} }
/* The bonding ndo_neigh_setup is called at init time beofre any /* The bonding ndo_neigh_setup is called at init time beofre any

View File

@ -389,6 +389,34 @@ static struct flexcan_mb __iomem *flexcan_get_mb(const struct flexcan_priv *priv
(&priv->regs->mb[bank][priv->mb_size * mb_index]); (&priv->regs->mb[bank][priv->mb_size * mb_index]);
} }
static int flexcan_low_power_enter_ack(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->regs;
unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
while (timeout-- && !(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
udelay(10);
if (!(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
return -ETIMEDOUT;
return 0;
}
static int flexcan_low_power_exit_ack(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->regs;
unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
udelay(10);
if (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
return -ETIMEDOUT;
return 0;
}
static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable) static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable)
{ {
struct flexcan_regs __iomem *regs = priv->regs; struct flexcan_regs __iomem *regs = priv->regs;
@ -407,7 +435,6 @@ static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable)
static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv) static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
{ {
struct flexcan_regs __iomem *regs = priv->regs; struct flexcan_regs __iomem *regs = priv->regs;
unsigned int ackval;
u32 reg_mcr; u32 reg_mcr;
reg_mcr = priv->read(&regs->mcr); reg_mcr = priv->read(&regs->mcr);
@ -418,36 +445,24 @@ static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr, regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
1 << priv->stm.req_bit, 1 << priv->stm.req_bit); 1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
/* get stop acknowledgment */ return flexcan_low_power_enter_ack(priv);
if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr,
ackval, ackval & (1 << priv->stm.ack_bit),
0, FLEXCAN_TIMEOUT_US))
return -ETIMEDOUT;
return 0;
} }
static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv) static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
{ {
struct flexcan_regs __iomem *regs = priv->regs; struct flexcan_regs __iomem *regs = priv->regs;
unsigned int ackval;
u32 reg_mcr; u32 reg_mcr;
/* remove stop request */ /* remove stop request */
regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr, regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
1 << priv->stm.req_bit, 0); 1 << priv->stm.req_bit, 0);
/* get stop acknowledgment */
if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr,
ackval, !(ackval & (1 << priv->stm.ack_bit)),
0, FLEXCAN_TIMEOUT_US))
return -ETIMEDOUT;
reg_mcr = priv->read(&regs->mcr); reg_mcr = priv->read(&regs->mcr);
reg_mcr &= ~FLEXCAN_MCR_SLF_WAK; reg_mcr &= ~FLEXCAN_MCR_SLF_WAK;
priv->write(reg_mcr, &regs->mcr); priv->write(reg_mcr, &regs->mcr);
return 0; return flexcan_low_power_exit_ack(priv);
} }
static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv) static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
@ -506,39 +521,25 @@ static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv)
static int flexcan_chip_enable(struct flexcan_priv *priv) static int flexcan_chip_enable(struct flexcan_priv *priv)
{ {
struct flexcan_regs __iomem *regs = priv->regs; struct flexcan_regs __iomem *regs = priv->regs;
unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
u32 reg; u32 reg;
reg = priv->read(&regs->mcr); reg = priv->read(&regs->mcr);
reg &= ~FLEXCAN_MCR_MDIS; reg &= ~FLEXCAN_MCR_MDIS;
priv->write(reg, &regs->mcr); priv->write(reg, &regs->mcr);
while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)) return flexcan_low_power_exit_ack(priv);
udelay(10);
if (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
return -ETIMEDOUT;
return 0;
} }
static int flexcan_chip_disable(struct flexcan_priv *priv) static int flexcan_chip_disable(struct flexcan_priv *priv)
{ {
struct flexcan_regs __iomem *regs = priv->regs; struct flexcan_regs __iomem *regs = priv->regs;
unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
u32 reg; u32 reg;
reg = priv->read(&regs->mcr); reg = priv->read(&regs->mcr);
reg |= FLEXCAN_MCR_MDIS; reg |= FLEXCAN_MCR_MDIS;
priv->write(reg, &regs->mcr); priv->write(reg, &regs->mcr);
while (timeout-- && !(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)) return flexcan_low_power_enter_ack(priv);
udelay(10);
if (!(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
return -ETIMEDOUT;
return 0;
} }
static int flexcan_chip_freeze(struct flexcan_priv *priv) static int flexcan_chip_freeze(struct flexcan_priv *priv)
@ -1722,6 +1723,9 @@ static int __maybe_unused flexcan_resume(struct device *device)
netif_start_queue(dev); netif_start_queue(dev);
if (device_may_wakeup(device)) { if (device_may_wakeup(device)) {
disable_irq_wake(dev->irq); disable_irq_wake(dev->irq);
err = flexcan_exit_stop_mode(priv);
if (err)
return err;
} else { } else {
err = pm_runtime_force_resume(device); err = pm_runtime_force_resume(device);
if (err) if (err)
@ -1767,14 +1771,9 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
{ {
struct net_device *dev = dev_get_drvdata(device); struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_priv *priv = netdev_priv(dev);
int err;
if (netif_running(dev) && device_may_wakeup(device)) { if (netif_running(dev) && device_may_wakeup(device))
flexcan_enable_wakeup_irq(priv, false); flexcan_enable_wakeup_irq(priv, false);
err = flexcan_exit_stop_mode(priv);
if (err)
return err;
}
return 0; return 0;
} }

View File

@ -101,6 +101,8 @@
#define TCAN4X5X_MODE_STANDBY BIT(6) #define TCAN4X5X_MODE_STANDBY BIT(6)
#define TCAN4X5X_MODE_NORMAL BIT(7) #define TCAN4X5X_MODE_NORMAL BIT(7)
#define TCAN4X5X_DISABLE_WAKE_MSK (BIT(31) | BIT(30))
#define TCAN4X5X_SW_RESET BIT(2) #define TCAN4X5X_SW_RESET BIT(2)
#define TCAN4X5X_MCAN_CONFIGURED BIT(5) #define TCAN4X5X_MCAN_CONFIGURED BIT(5)
@ -338,6 +340,14 @@ static int tcan4x5x_init(struct m_can_classdev *cdev)
return ret; return ret;
} }
static int tcan4x5x_disable_wake(struct m_can_classdev *cdev)
{
struct tcan4x5x_priv *tcan4x5x = cdev->device_data;
return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
TCAN4X5X_DISABLE_WAKE_MSK, 0x00);
}
static int tcan4x5x_parse_config(struct m_can_classdev *cdev) static int tcan4x5x_parse_config(struct m_can_classdev *cdev)
{ {
struct tcan4x5x_priv *tcan4x5x = cdev->device_data; struct tcan4x5x_priv *tcan4x5x = cdev->device_data;
@ -345,8 +355,10 @@ static int tcan4x5x_parse_config(struct m_can_classdev *cdev)
tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake", tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake",
GPIOD_OUT_HIGH); GPIOD_OUT_HIGH);
if (IS_ERR(tcan4x5x->device_wake_gpio)) { if (IS_ERR(tcan4x5x->device_wake_gpio)) {
dev_err(cdev->dev, "device-wake gpio not defined\n"); if (PTR_ERR(tcan4x5x->power) == -EPROBE_DEFER)
return -EINVAL; return -EPROBE_DEFER;
tcan4x5x_disable_wake(cdev);
} }
tcan4x5x->reset_gpio = devm_gpiod_get_optional(cdev->dev, "reset", tcan4x5x->reset_gpio = devm_gpiod_get_optional(cdev->dev, "reset",
@ -354,6 +366,8 @@ static int tcan4x5x_parse_config(struct m_can_classdev *cdev)
if (IS_ERR(tcan4x5x->reset_gpio)) if (IS_ERR(tcan4x5x->reset_gpio))
tcan4x5x->reset_gpio = NULL; tcan4x5x->reset_gpio = NULL;
usleep_range(700, 1000);
tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev, tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev,
"device-state", "device-state",
GPIOD_IN); GPIOD_IN);
@ -428,10 +442,6 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
spi_set_drvdata(spi, priv); spi_set_drvdata(spi, priv);
ret = tcan4x5x_parse_config(mcan_class);
if (ret)
goto out_clk;
/* Configure the SPI bus */ /* Configure the SPI bus */
spi->bits_per_word = 32; spi->bits_per_word = 32;
ret = spi_setup(spi); ret = spi_setup(spi);
@ -441,6 +451,10 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus, priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus,
&spi->dev, &tcan4x5x_regmap); &spi->dev, &tcan4x5x_regmap);
ret = tcan4x5x_parse_config(mcan_class);
if (ret)
goto out_clk;
tcan4x5x_power_enable(priv->power, 1); tcan4x5x_power_enable(priv->power, 1);
ret = m_can_class_register(mcan_class); ret = m_can_class_register(mcan_class);

View File

@ -608,7 +608,7 @@ static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv,
struct kvaser_cmd *cmd; struct kvaser_cmd *cmd;
int err; int err;
cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC); cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (!cmd) if (!cmd)
return -ENOMEM; return -ENOMEM;
@ -1140,7 +1140,7 @@ static int kvaser_usb_leaf_set_opt_mode(const struct kvaser_usb_net_priv *priv)
struct kvaser_cmd *cmd; struct kvaser_cmd *cmd;
int rc; int rc;
cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) if (!cmd)
return -ENOMEM; return -ENOMEM;
@ -1206,7 +1206,7 @@ static int kvaser_usb_leaf_flush_queue(struct kvaser_usb_net_priv *priv)
struct kvaser_cmd *cmd; struct kvaser_cmd *cmd;
int rc; int rc;
cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) if (!cmd)
return -ENOMEM; return -ENOMEM;

View File

@ -60,6 +60,8 @@ enum xcan_reg {
XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */ XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */
XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */ XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */
XCAN_RXMSG_2_BASE_OFFSET = 0x2100, /* RX Message Space */ XCAN_RXMSG_2_BASE_OFFSET = 0x2100, /* RX Message Space */
XCAN_AFR_2_MASK_OFFSET = 0x0A00, /* Acceptance Filter MASK */
XCAN_AFR_2_ID_OFFSET = 0x0A04, /* Acceptance Filter ID */
}; };
#define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00) #define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00)
@ -1809,6 +1811,11 @@ static int xcan_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev); pm_runtime_put(&pdev->dev);
if (priv->devtype.flags & XCAN_FLAG_CANFD_2) {
priv->write_reg(priv, XCAN_AFR_2_ID_OFFSET, 0x00000000);
priv->write_reg(priv, XCAN_AFR_2_MASK_OFFSET, 0x00000000);
}
netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n", netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n",
priv->reg_base, ndev->irq, priv->can.clock.freq, priv->reg_base, ndev->irq, priv->can.clock.freq,
hw_tx_max, priv->tx_max); hw_tx_max, priv->tx_max);

View File

@ -347,7 +347,7 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
* frames should be flooded or not. * frames should be flooded or not.
*/ */
b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN; mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN;
b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
} }
@ -526,6 +526,8 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
cpu_port = dsa_to_port(ds, port)->cpu_dp->index; cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
b53_br_egress_floods(ds, port, true, true);
if (dev->ops->irq_enable) if (dev->ops->irq_enable)
ret = dev->ops->irq_enable(dev, port); ret = dev->ops->irq_enable(dev, port);
if (ret) if (ret)
@ -641,6 +643,8 @@ static void b53_enable_cpu_port(struct b53_device *dev, int port)
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl); b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl);
b53_brcm_hdr_setup(dev->ds, port); b53_brcm_hdr_setup(dev->ds, port);
b53_br_egress_floods(dev->ds, port, true, true);
} }
static void b53_enable_mib(struct b53_device *dev) static void b53_enable_mib(struct b53_device *dev)
@ -1821,19 +1825,26 @@ int b53_br_egress_floods(struct dsa_switch *ds, int port,
struct b53_device *dev = ds->priv; struct b53_device *dev = ds->priv;
u16 uc, mc; u16 uc, mc;
b53_read16(dev, B53_CTRL_PAGE, B53_UC_FWD_EN, &uc); b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
if (unicast) if (unicast)
uc |= BIT(port); uc |= BIT(port);
else else
uc &= ~BIT(port); uc &= ~BIT(port);
b53_write16(dev, B53_CTRL_PAGE, B53_UC_FWD_EN, uc); b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
b53_read16(dev, B53_CTRL_PAGE, B53_MC_FWD_EN, &mc); b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
if (multicast) if (multicast)
mc |= BIT(port); mc |= BIT(port);
else else
mc &= ~BIT(port); mc &= ~BIT(port);
b53_write16(dev, B53_CTRL_PAGE, B53_MC_FWD_EN, mc); b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
if (multicast)
mc |= BIT(port);
else
mc &= ~BIT(port);
b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
return 0; return 0;

View File

@ -2,6 +2,7 @@
config NET_DSA_MSCC_FELIX config NET_DSA_MSCC_FELIX
tristate "Ocelot / Felix Ethernet switch support" tristate "Ocelot / Felix Ethernet switch support"
depends on NET_DSA && PCI depends on NET_DSA && PCI
depends on NET_VENDOR_MICROSEMI
select MSCC_OCELOT_SWITCH select MSCC_OCELOT_SWITCH
select NET_DSA_TAG_OCELOT select NET_DSA_TAG_OCELOT
help help

View File

@ -72,7 +72,7 @@
/*****************************************************************************/ /*****************************************************************************/
/* ENA adaptive interrupt moderation settings */ /* ENA adaptive interrupt moderation settings */
#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 196 #define ENA_INTR_INITIAL_TX_INTERVAL_USECS 64
#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0 #define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0
#define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1 #define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1

View File

@ -315,10 +315,9 @@ static int ena_get_coalesce(struct net_device *net_dev,
ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) * ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) *
ena_dev->intr_delay_resolution; ena_dev->intr_delay_resolution;
if (!ena_com_get_adaptive_moderation_enabled(ena_dev)) coalesce->rx_coalesce_usecs =
coalesce->rx_coalesce_usecs = ena_com_get_nonadaptive_moderation_interval_rx(ena_dev)
ena_com_get_nonadaptive_moderation_interval_rx(ena_dev) * ena_dev->intr_delay_resolution;
* ena_dev->intr_delay_resolution;
coalesce->use_adaptive_rx_coalesce = coalesce->use_adaptive_rx_coalesce =
ena_com_get_adaptive_moderation_enabled(ena_dev); ena_com_get_adaptive_moderation_enabled(ena_dev);
@ -367,12 +366,6 @@ static int ena_set_coalesce(struct net_device *net_dev,
ena_update_tx_rings_intr_moderation(adapter); ena_update_tx_rings_intr_moderation(adapter);
if (coalesce->use_adaptive_rx_coalesce) {
if (!ena_com_get_adaptive_moderation_enabled(ena_dev))
ena_com_enable_adaptive_moderation(ena_dev);
return 0;
}
rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev, rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev,
coalesce->rx_coalesce_usecs); coalesce->rx_coalesce_usecs);
if (rc) if (rc)
@ -380,10 +373,13 @@ static int ena_set_coalesce(struct net_device *net_dev,
ena_update_rx_rings_intr_moderation(adapter); ena_update_rx_rings_intr_moderation(adapter);
if (!coalesce->use_adaptive_rx_coalesce) { if (coalesce->use_adaptive_rx_coalesce &&
if (ena_com_get_adaptive_moderation_enabled(ena_dev)) !ena_com_get_adaptive_moderation_enabled(ena_dev))
ena_com_disable_adaptive_moderation(ena_dev); ena_com_enable_adaptive_moderation(ena_dev);
}
if (!coalesce->use_adaptive_rx_coalesce &&
ena_com_get_adaptive_moderation_enabled(ena_dev))
ena_com_disable_adaptive_moderation(ena_dev);
return 0; return 0;
} }

View File

@ -1238,8 +1238,8 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
struct ena_ring *tx_ring, *rx_ring; struct ena_ring *tx_ring, *rx_ring;
u32 tx_work_done; int tx_work_done;
u32 rx_work_done; int rx_work_done = 0;
int tx_budget; int tx_budget;
int napi_comp_call = 0; int napi_comp_call = 0;
int ret; int ret;
@ -1256,7 +1256,11 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
} }
tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget); tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); /* On netpoll the budget is zero and the handler should only clean the
* tx completions.
*/
if (likely(budget))
rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
/* If the device is about to reset or down, avoid unmask /* If the device is about to reset or down, avoid unmask
* the interrupt and return 0 so NAPI won't reschedule * the interrupt and return 0 so NAPI won't reschedule

View File

@ -313,7 +313,7 @@ struct ag71xx {
struct ag71xx_desc *stop_desc; struct ag71xx_desc *stop_desc;
dma_addr_t stop_desc_dma; dma_addr_t stop_desc_dma;
int phy_if_mode; phy_interface_t phy_if_mode;
struct delayed_work restart_work; struct delayed_work restart_work;
struct timer_list oom_timer; struct timer_list oom_timer;
@ -1744,7 +1744,7 @@ static int ag71xx_probe(struct platform_device *pdev)
eth_random_addr(ndev->dev_addr); eth_random_addr(ndev->dev_addr);
} }
err = of_get_phy_mode(np, ag->phy_if_mode); err = of_get_phy_mode(np, &ag->phy_if_mode);
if (err) { if (err) {
netif_err(ag, probe, ndev, "missing phy-mode property in DT\n"); netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
goto err_free; goto err_free;

View File

@ -1109,7 +1109,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
for (i = 0; i < E1H_FUNC_MAX / 2; i++) { for (i = 0; i < E1H_FUNC_MAX / 2; i++) {
u32 func_config = u32 func_config =
MF_CFG_RD(bp, MF_CFG_RD(bp,
func_mf_config[BP_PORT(bp) + 2 * i]. func_mf_config[BP_PATH(bp) + 2 * i].
config); config);
func_num += func_num +=
((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1); ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1);

View File

@ -9976,10 +9976,18 @@ static void bnx2x_recovery_failed(struct bnx2x *bp)
*/ */
static void bnx2x_parity_recover(struct bnx2x *bp) static void bnx2x_parity_recover(struct bnx2x *bp)
{ {
bool global = false;
u32 error_recovered, error_unrecovered; u32 error_recovered, error_unrecovered;
bool is_parity; bool is_parity, global = false;
#ifdef CONFIG_BNX2X_SRIOV
int vf_idx;
for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) {
struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
if (vf)
vf->state = VF_LOST;
}
#endif
DP(NETIF_MSG_HW, "Handling parity\n"); DP(NETIF_MSG_HW, "Handling parity\n");
while (1) { while (1) {
switch (bp->recovery_state) { switch (bp->recovery_state) {

View File

@ -139,6 +139,7 @@ struct bnx2x_virtf {
#define VF_ACQUIRED 1 /* VF acquired, but not initialized */ #define VF_ACQUIRED 1 /* VF acquired, but not initialized */
#define VF_ENABLED 2 /* VF Enabled */ #define VF_ENABLED 2 /* VF Enabled */
#define VF_RESET 3 /* VF FLR'd, pending cleanup */ #define VF_RESET 3 /* VF FLR'd, pending cleanup */
#define VF_LOST 4 /* Recovery while VFs are loaded */
bool flr_clnup_stage; /* true during flr cleanup */ bool flr_clnup_stage; /* true during flr cleanup */
bool malicious; /* true if FW indicated so, until FLR */ bool malicious; /* true if FW indicated so, until FLR */

View File

@ -2107,6 +2107,18 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
{ {
int i; int i;
if (vf->state == VF_LOST) {
/* Just ack the FW and return if VFs are lost
* in case of parity error. VFs are supposed to be timedout
* on waiting for PF response.
*/
DP(BNX2X_MSG_IOV,
"VF 0x%x lost, not handling the request\n", vf->abs_vfid);
storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
return;
}
/* check if tlv type is known */ /* check if tlv type is known */
if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) { if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
/* Lock the per vf op mutex and note the locker's identity. /* Lock the per vf op mutex and note the locker's identity.

View File

@ -2001,6 +2001,9 @@ static int bnxt_async_event_process(struct bnxt *bp,
case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
u32 data1 = le32_to_cpu(cmpl->event_data1); u32 data1 = le32_to_cpu(cmpl->event_data1);
if (!bp->fw_health)
goto async_event_process_exit;
bp->fw_reset_timestamp = jiffies; bp->fw_reset_timestamp = jiffies;
bp->fw_reset_min_dsecs = cmpl->timestamp_lo; bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
if (!bp->fw_reset_min_dsecs) if (!bp->fw_reset_min_dsecs)
@ -4421,8 +4424,9 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE | flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
@ -6186,7 +6190,7 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
val = clamp_t(u16, tmr, 1, val = clamp_t(u16, tmr, 1,
coal_cap->cmpl_aggr_dma_tmr_during_int_max); coal_cap->cmpl_aggr_dma_tmr_during_int_max);
req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr); req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
req->enables |= req->enables |=
cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
} }
@ -7115,14 +7119,6 @@ static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc) if (rc)
goto err_recovery_out; goto err_recovery_out;
if (!fw_health) {
fw_health = kzalloc(sizeof(*fw_health), GFP_KERNEL);
bp->fw_health = fw_health;
if (!fw_health) {
rc = -ENOMEM;
goto err_recovery_out;
}
}
fw_health->flags = le32_to_cpu(resp->flags); fw_health->flags = le32_to_cpu(resp->flags);
if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
!(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
@ -8796,6 +8792,9 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
if (fw_reset) { if (fw_reset) {
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
bnxt_ulp_stop(bp); bnxt_ulp_stop(bp);
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
rc = bnxt_fw_init_one(bp); rc = bnxt_fw_init_one(bp);
if (rc) { if (rc) {
set_bit(BNXT_STATE_ABORT_ERR, &bp->state); set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
@ -9990,8 +9989,7 @@ static void bnxt_fw_health_check(struct bnxt *bp)
struct bnxt_fw_health *fw_health = bp->fw_health; struct bnxt_fw_health *fw_health = bp->fw_health;
u32 val; u32 val;
if (!fw_health || !fw_health->enabled || if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return; return;
if (fw_health->tmr_counter) { if (fw_health->tmr_counter) {
@ -10482,6 +10480,23 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
} }
static void bnxt_alloc_fw_health(struct bnxt *bp)
{
if (bp->fw_health)
return;
if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
return;
bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
if (!bp->fw_health) {
netdev_warn(bp->dev, "Failed to allocate fw_health\n");
bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
}
}
static int bnxt_fw_init_one_p1(struct bnxt *bp) static int bnxt_fw_init_one_p1(struct bnxt *bp)
{ {
int rc; int rc;
@ -10528,6 +10543,7 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
rc); rc);
bnxt_alloc_fw_health(bp);
rc = bnxt_hwrm_error_recovery_qcfg(bp); rc = bnxt_hwrm_error_recovery_qcfg(bp);
if (rc) if (rc)
netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
@ -10609,6 +10625,12 @@ static int bnxt_fw_init_one(struct bnxt *bp)
rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
if (rc) if (rc)
return rc; return rc;
/* In case fw capabilities have changed, destroy the unneeded
* reporters and create newly capable ones.
*/
bnxt_dl_fw_reporters_destroy(bp, false);
bnxt_dl_fw_reporters_create(bp);
bnxt_fw_init_one_p3(bp); bnxt_fw_init_one_p3(bp);
return 0; return 0;
} }
@ -10751,8 +10773,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
return; return;
case BNXT_FW_RESET_STATE_ENABLE_DEV: case BNXT_FW_RESET_STATE_ENABLE_DEV:
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) && if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
bp->fw_health) {
u32 val; u32 val;
val = bnxt_fw_health_readl(bp, val = bnxt_fw_health_readl(bp,
@ -11396,11 +11417,11 @@ static void bnxt_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev); struct net_device *dev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
if (BNXT_PF(bp)) { if (BNXT_PF(bp))
bnxt_sriov_disable(bp); bnxt_sriov_disable(bp);
bnxt_dl_unregister(bp);
}
bnxt_dl_fw_reporters_destroy(bp, true);
bnxt_dl_unregister(bp);
pci_disable_pcie_error_reporting(pdev); pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev); unregister_netdev(dev);
bnxt_shutdown_tc(bp); bnxt_shutdown_tc(bp);
@ -11415,6 +11436,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_dcb_free(bp); bnxt_dcb_free(bp);
kfree(bp->edev); kfree(bp->edev);
bp->edev = NULL; bp->edev = NULL;
kfree(bp->fw_health);
bp->fw_health = NULL;
bnxt_cleanup_pci(bp); bnxt_cleanup_pci(bp);
bnxt_free_ctx_mem(bp); bnxt_free_ctx_mem(bp);
kfree(bp->ctx); kfree(bp->ctx);
@ -11875,8 +11898,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc) if (rc)
goto init_err_cleanup_tc; goto init_err_cleanup_tc;
if (BNXT_PF(bp)) bnxt_dl_register(bp);
bnxt_dl_register(bp); bnxt_dl_fw_reporters_create(bp);
netdev_info(dev, "%s found at mem %lx, node addr %pM\n", netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
board_info[ent->driver_data].name, board_info[ent->driver_data].name,

View File

@ -39,11 +39,10 @@ static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct bnxt *bp = devlink_health_reporter_priv(reporter); struct bnxt *bp = devlink_health_reporter_priv(reporter);
struct bnxt_fw_health *health = bp->fw_health;
u32 val, health_status; u32 val, health_status;
int rc; int rc;
if (!health || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return 0; return 0;
val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
@ -126,21 +125,15 @@ struct devlink_health_reporter_ops bnxt_dl_fw_fatal_reporter_ops = {
.recover = bnxt_fw_fatal_recover, .recover = bnxt_fw_fatal_recover,
}; };
static void bnxt_dl_fw_reporters_create(struct bnxt *bp) void bnxt_dl_fw_reporters_create(struct bnxt *bp)
{ {
struct bnxt_fw_health *health = bp->fw_health; struct bnxt_fw_health *health = bp->fw_health;
if (!health) if (!bp->dl || !health)
return; return;
health->fw_reporter = if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) || health->fw_reset_reporter)
devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_reporter_ops, goto err_recovery;
0, false, bp);
if (IS_ERR(health->fw_reporter)) {
netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
PTR_ERR(health->fw_reporter));
health->fw_reporter = NULL;
}
health->fw_reset_reporter = health->fw_reset_reporter =
devlink_health_reporter_create(bp->dl, devlink_health_reporter_create(bp->dl,
@ -150,8 +143,30 @@ static void bnxt_dl_fw_reporters_create(struct bnxt *bp)
netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n", netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
PTR_ERR(health->fw_reset_reporter)); PTR_ERR(health->fw_reset_reporter));
health->fw_reset_reporter = NULL; health->fw_reset_reporter = NULL;
bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
} }
err_recovery:
if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
return;
if (!health->fw_reporter) {
health->fw_reporter =
devlink_health_reporter_create(bp->dl,
&bnxt_dl_fw_reporter_ops,
0, false, bp);
if (IS_ERR(health->fw_reporter)) {
netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
PTR_ERR(health->fw_reporter));
health->fw_reporter = NULL;
bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
return;
}
}
if (health->fw_fatal_reporter)
return;
health->fw_fatal_reporter = health->fw_fatal_reporter =
devlink_health_reporter_create(bp->dl, devlink_health_reporter_create(bp->dl,
&bnxt_dl_fw_fatal_reporter_ops, &bnxt_dl_fw_fatal_reporter_ops,
@ -160,24 +175,35 @@ static void bnxt_dl_fw_reporters_create(struct bnxt *bp)
netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n", netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
PTR_ERR(health->fw_fatal_reporter)); PTR_ERR(health->fw_fatal_reporter));
health->fw_fatal_reporter = NULL; health->fw_fatal_reporter = NULL;
bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
} }
} }
static void bnxt_dl_fw_reporters_destroy(struct bnxt *bp) void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all)
{ {
struct bnxt_fw_health *health = bp->fw_health; struct bnxt_fw_health *health = bp->fw_health;
if (!health) if (!bp->dl || !health)
return; return;
if (health->fw_reporter) if ((all || !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) &&
devlink_health_reporter_destroy(health->fw_reporter); health->fw_reset_reporter) {
if (health->fw_reset_reporter)
devlink_health_reporter_destroy(health->fw_reset_reporter); devlink_health_reporter_destroy(health->fw_reset_reporter);
health->fw_reset_reporter = NULL;
}
if (health->fw_fatal_reporter) if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && !all)
return;
if (health->fw_reporter) {
devlink_health_reporter_destroy(health->fw_reporter);
health->fw_reporter = NULL;
}
if (health->fw_fatal_reporter) {
devlink_health_reporter_destroy(health->fw_fatal_reporter); devlink_health_reporter_destroy(health->fw_fatal_reporter);
health->fw_fatal_reporter = NULL;
}
} }
void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event) void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event)
@ -185,9 +211,6 @@ void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event)
struct bnxt_fw_health *fw_health = bp->fw_health; struct bnxt_fw_health *fw_health = bp->fw_health;
struct bnxt_fw_reporter_ctx fw_reporter_ctx; struct bnxt_fw_reporter_ctx fw_reporter_ctx;
if (!fw_health)
return;
fw_reporter_ctx.sp_event = event; fw_reporter_ctx.sp_event = event;
switch (event) { switch (event) {
case BNXT_FW_RESET_NOTIFY_SP_EVENT: case BNXT_FW_RESET_NOTIFY_SP_EVENT:
@ -247,6 +270,8 @@ static const struct devlink_ops bnxt_dl_ops = {
.flash_update = bnxt_dl_flash_update, .flash_update = bnxt_dl_flash_update,
}; };
static const struct devlink_ops bnxt_vf_dl_ops;
enum bnxt_dl_param_id { enum bnxt_dl_param_id {
BNXT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, BNXT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK,
@ -460,7 +485,10 @@ int bnxt_dl_register(struct bnxt *bp)
return -ENOTSUPP; return -ENOTSUPP;
} }
dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl)); if (BNXT_PF(bp))
dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl));
else
dl = devlink_alloc(&bnxt_vf_dl_ops, sizeof(struct bnxt_dl));
if (!dl) { if (!dl) {
netdev_warn(bp->dev, "devlink_alloc failed"); netdev_warn(bp->dev, "devlink_alloc failed");
return -ENOMEM; return -ENOMEM;
@ -479,6 +507,9 @@ int bnxt_dl_register(struct bnxt *bp)
goto err_dl_free; goto err_dl_free;
} }
if (!BNXT_PF(bp))
return 0;
rc = devlink_params_register(dl, bnxt_dl_params, rc = devlink_params_register(dl, bnxt_dl_params,
ARRAY_SIZE(bnxt_dl_params)); ARRAY_SIZE(bnxt_dl_params));
if (rc) { if (rc) {
@ -506,8 +537,6 @@ int bnxt_dl_register(struct bnxt *bp)
devlink_params_publish(dl); devlink_params_publish(dl);
bnxt_dl_fw_reporters_create(bp);
return 0; return 0;
err_dl_port_unreg: err_dl_port_unreg:
@ -530,12 +559,14 @@ void bnxt_dl_unregister(struct bnxt *bp)
if (!dl) if (!dl)
return; return;
bnxt_dl_fw_reporters_destroy(bp); if (BNXT_PF(bp)) {
devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params, devlink_port_params_unregister(&bp->dl_port,
ARRAY_SIZE(bnxt_dl_port_params)); bnxt_dl_port_params,
devlink_port_unregister(&bp->dl_port); ARRAY_SIZE(bnxt_dl_port_params));
devlink_params_unregister(dl, bnxt_dl_params, devlink_port_unregister(&bp->dl_port);
ARRAY_SIZE(bnxt_dl_params)); devlink_params_unregister(dl, bnxt_dl_params,
ARRAY_SIZE(bnxt_dl_params));
}
devlink_unregister(dl); devlink_unregister(dl);
devlink_free(dl); devlink_free(dl);
} }

View File

@ -58,6 +58,8 @@ struct bnxt_dl_nvm_param {
void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event); void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy); void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy);
void bnxt_dl_fw_reporters_create(struct bnxt *bp);
void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all);
int bnxt_dl_register(struct bnxt *bp); int bnxt_dl_register(struct bnxt *bp);
void bnxt_dl_unregister(struct bnxt *bp); void bnxt_dl_unregister(struct bnxt *bp);

View File

@ -3071,8 +3071,15 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
} }
} }
if (info->dest_buf) if (info->dest_buf) {
memcpy(info->dest_buf + off, dma_buf, len); if ((info->seg_start + off + len) <=
BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
memcpy(info->dest_buf + off, dma_buf, len);
} else {
rc = -ENOBUFS;
break;
}
}
if (cmn_req->req_type == if (cmn_req->req_type ==
cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE)) cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
@ -3126,7 +3133,7 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
u16 segment_id, u32 *seg_len, u16 segment_id, u32 *seg_len,
void *buf, u32 offset) void *buf, u32 buf_len, u32 offset)
{ {
struct hwrm_dbg_coredump_retrieve_input req = {0}; struct hwrm_dbg_coredump_retrieve_input req = {0};
struct bnxt_hwrm_dbg_dma_info info = {NULL}; struct bnxt_hwrm_dbg_dma_info info = {NULL};
@ -3141,8 +3148,11 @@ static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
seq_no); seq_no);
info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output, info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
data_len); data_len);
if (buf) if (buf) {
info.dest_buf = buf + offset; info.dest_buf = buf + offset;
info.buf_len = buf_len;
info.seg_start = offset;
}
rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
if (!rc) if (!rc)
@ -3232,14 +3242,17 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
{ {
u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output); u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0;
struct coredump_segment_record *seg_record = NULL; struct coredump_segment_record *seg_record = NULL;
u32 offset = 0, seg_hdr_len, seg_record_len;
struct bnxt_coredump_segment_hdr seg_hdr; struct bnxt_coredump_segment_hdr seg_hdr;
struct bnxt_coredump coredump = {NULL}; struct bnxt_coredump coredump = {NULL};
time64_t start_time; time64_t start_time;
u16 start_utc; u16 start_utc;
int rc = 0, i; int rc = 0, i;
if (buf)
buf_len = *dump_len;
start_time = ktime_get_real_seconds(); start_time = ktime_get_real_seconds();
start_utc = sys_tz.tz_minuteswest * 60; start_utc = sys_tz.tz_minuteswest * 60;
seg_hdr_len = sizeof(seg_hdr); seg_hdr_len = sizeof(seg_hdr);
@ -3272,6 +3285,12 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
u32 duration = 0, seg_len = 0; u32 duration = 0, seg_len = 0;
unsigned long start, end; unsigned long start, end;
if (buf && ((offset + seg_hdr_len) >
BNXT_COREDUMP_BUF_LEN(buf_len))) {
rc = -ENOBUFS;
goto err;
}
start = jiffies; start = jiffies;
rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id); rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
@ -3284,9 +3303,11 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
/* Write segment data into the buffer */ /* Write segment data into the buffer */
rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id, rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
&seg_len, buf, &seg_len, buf, buf_len,
offset + seg_hdr_len); offset + seg_hdr_len);
if (rc) if (rc && rc == -ENOBUFS)
goto err;
else if (rc)
netdev_err(bp->dev, netdev_err(bp->dev,
"Failed to retrieve coredump for seg = %d\n", "Failed to retrieve coredump for seg = %d\n",
seg_record->segment_id); seg_record->segment_id);
@ -3316,7 +3337,8 @@ err:
rc); rc);
kfree(coredump.data); kfree(coredump.data);
*dump_len += sizeof(struct bnxt_coredump_record); *dump_len += sizeof(struct bnxt_coredump_record);
if (rc == -ENOBUFS)
netdev_err(bp->dev, "Firmware returned large coredump buffer");
return rc; return rc;
} }

View File

@ -31,6 +31,8 @@ struct bnxt_coredump {
u16 total_segs; u16 total_segs;
}; };
#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record))
struct bnxt_hwrm_dbg_dma_info { struct bnxt_hwrm_dbg_dma_info {
void *dest_buf; void *dest_buf;
int dest_buf_size; int dest_buf_size;
@ -38,6 +40,8 @@ struct bnxt_hwrm_dbg_dma_info {
u16 seq_off; u16 seq_off;
u16 data_len_off; u16 data_len_off;
u16 segs; u16 segs;
u32 seg_start;
u32 buf_len;
}; };
struct hwrm_dbg_cmn_input { struct hwrm_dbg_cmn_input {

View File

@ -113,8 +113,10 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
{ {
struct net_device *dev = edev->net; struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
struct bnxt_hw_resc *hw_resc;
int max_idx, max_cp_rings; int max_idx, max_cp_rings;
int avail_msix, idx; int avail_msix, idx;
int total_vecs;
int rc = 0; int rc = 0;
ASSERT_RTNL(); ASSERT_RTNL();
@ -142,7 +144,10 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
} }
edev->ulp_tbl[ulp_id].msix_base = idx; edev->ulp_tbl[ulp_id].msix_base = idx;
edev->ulp_tbl[ulp_id].msix_requested = avail_msix; edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
if (bp->total_irqs < (idx + avail_msix)) { hw_resc = &bp->hw_resc;
total_vecs = idx + avail_msix;
if (bp->total_irqs < total_vecs ||
(BNXT_NEW_RM(bp) && hw_resc->resv_irqs < total_vecs)) {
if (netif_running(dev)) { if (netif_running(dev)) {
bnxt_close_nic(bp, true, false); bnxt_close_nic(bp, true, false);
rc = bnxt_open_nic(bp, true, false); rc = bnxt_open_nic(bp, true, false);
@ -156,7 +161,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
} }
if (BNXT_NEW_RM(bp)) { if (BNXT_NEW_RM(bp)) {
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int resv_msix; int resv_msix;
resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings; resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;

View File

@ -664,9 +664,30 @@ static int macb_mii_probe(struct net_device *dev)
return 0; return 0;
} }
static int macb_mdiobus_register(struct macb *bp)
{
struct device_node *child, *np = bp->pdev->dev.of_node;
/* Only create the PHY from the device tree if at least one PHY is
* described. Otherwise scan the entire MDIO bus. We do this to support
* old device tree that did not follow the best practices and did not
* describe their network PHYs.
*/
for_each_available_child_of_node(np, child)
if (of_mdiobus_child_is_phy(child)) {
/* The loop increments the child refcount,
* decrement it before returning.
*/
of_node_put(child);
return of_mdiobus_register(bp->mii_bus, np);
}
return mdiobus_register(bp->mii_bus);
}
static int macb_mii_init(struct macb *bp) static int macb_mii_init(struct macb *bp)
{ {
struct device_node *np;
int err = -ENXIO; int err = -ENXIO;
/* Enable management port */ /* Enable management port */
@ -688,9 +709,7 @@ static int macb_mii_init(struct macb *bp)
dev_set_drvdata(&bp->dev->dev, bp->mii_bus); dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
np = bp->pdev->dev.of_node; err = macb_mdiobus_register(bp);
err = of_mdiobus_register(bp->mii_bus, np);
if (err) if (err)
goto err_out_free_mdiobus; goto err_out_free_mdiobus;

View File

@ -3048,6 +3048,9 @@ static int sge_queue_entries(const struct adapter *adap)
int tot_uld_entries = 0; int tot_uld_entries = 0;
int i; int i;
if (!is_uld(adap))
goto lld_only;
mutex_lock(&uld_mutex); mutex_lock(&uld_mutex);
for (i = 0; i < CXGB4_TX_MAX; i++) for (i = 0; i < CXGB4_TX_MAX; i++)
tot_uld_entries += sge_qinfo_uld_txq_entries(adap, i); tot_uld_entries += sge_qinfo_uld_txq_entries(adap, i);
@ -3058,6 +3061,7 @@ static int sge_queue_entries(const struct adapter *adap)
} }
mutex_unlock(&uld_mutex); mutex_unlock(&uld_mutex);
lld_only:
return DIV_ROUND_UP(adap->sge.ethqsets, 4) + return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
(adap->sge.eohw_txq ? DIV_ROUND_UP(adap->sge.eoqsets, 4) : 0) + (adap->sge.eohw_txq ? DIV_ROUND_UP(adap->sge.eoqsets, 4) : 0) +
tot_uld_entries + tot_uld_entries +

View File

@ -145,6 +145,10 @@ static int cxgb4_mqprio_alloc_hw_resources(struct net_device *dev)
kfree(adap->sge.eohw_rxq); kfree(adap->sge.eohw_rxq);
return -ENOMEM; return -ENOMEM;
} }
refcount_set(&adap->tc_mqprio->refcnt, 1);
} else {
refcount_inc(&adap->tc_mqprio->refcnt);
} }
if (!(adap->flags & CXGB4_USING_MSIX)) if (!(adap->flags & CXGB4_USING_MSIX))
@ -205,7 +209,6 @@ static int cxgb4_mqprio_alloc_hw_resources(struct net_device *dev)
cxgb4_enable_rx(adap, &eorxq->rspq); cxgb4_enable_rx(adap, &eorxq->rspq);
} }
refcount_inc(&adap->tc_mqprio->refcnt);
return 0; return 0;
out_free_msix: out_free_msix:
@ -234,9 +237,10 @@ out_free_queues:
t4_sge_free_ethofld_txq(adap, eotxq); t4_sge_free_ethofld_txq(adap, eotxq);
} }
kfree(adap->sge.eohw_txq); if (refcount_dec_and_test(&adap->tc_mqprio->refcnt)) {
kfree(adap->sge.eohw_rxq); kfree(adap->sge.eohw_txq);
kfree(adap->sge.eohw_rxq);
}
return ret; return ret;
} }

View File

@ -576,6 +576,8 @@ static int gmac_setup_txqs(struct net_device *netdev)
if (port->txq_dma_base & ~DMA_Q_BASE_MASK) { if (port->txq_dma_base & ~DMA_Q_BASE_MASK) {
dev_warn(geth->dev, "TX queue base is not aligned\n"); dev_warn(geth->dev, "TX queue base is not aligned\n");
dma_free_coherent(geth->dev, len * sizeof(*desc_ring),
desc_ring, port->txq_dma_base);
kfree(skb_tab); kfree(skb_tab);
return -ENOMEM; return -ENOMEM;
} }

View File

@ -160,10 +160,10 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
irq = mc_dev->irqs[0]; irq = mc_dev->irqs[0];
ptp_qoriq->irq = irq->msi_desc->irq; ptp_qoriq->irq = irq->msi_desc->irq;
err = devm_request_threaded_irq(dev, ptp_qoriq->irq, NULL, err = request_threaded_irq(ptp_qoriq->irq, NULL,
dpaa2_ptp_irq_handler_thread, dpaa2_ptp_irq_handler_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT, IRQF_NO_SUSPEND | IRQF_ONESHOT,
dev_name(dev), ptp_qoriq); dev_name(dev), ptp_qoriq);
if (err < 0) { if (err < 0) {
dev_err(dev, "devm_request_threaded_irq(): %d\n", err); dev_err(dev, "devm_request_threaded_irq(): %d\n", err);
goto err_free_mc_irq; goto err_free_mc_irq;
@ -173,18 +173,20 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
DPRTC_IRQ_INDEX, 1); DPRTC_IRQ_INDEX, 1);
if (err < 0) { if (err < 0) {
dev_err(dev, "dprtc_set_irq_enable(): %d\n", err); dev_err(dev, "dprtc_set_irq_enable(): %d\n", err);
goto err_free_mc_irq; goto err_free_threaded_irq;
} }
err = ptp_qoriq_init(ptp_qoriq, base, &dpaa2_ptp_caps); err = ptp_qoriq_init(ptp_qoriq, base, &dpaa2_ptp_caps);
if (err) if (err)
goto err_free_mc_irq; goto err_free_threaded_irq;
dpaa2_phc_index = ptp_qoriq->phc_index; dpaa2_phc_index = ptp_qoriq->phc_index;
dev_set_drvdata(dev, ptp_qoriq); dev_set_drvdata(dev, ptp_qoriq);
return 0; return 0;
err_free_threaded_irq:
free_irq(ptp_qoriq->irq, ptp_qoriq);
err_free_mc_irq: err_free_mc_irq:
fsl_mc_free_irqs(mc_dev); fsl_mc_free_irqs(mc_dev);
err_unmap: err_unmap:

View File

@ -543,9 +543,9 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
hip04_set_xmit_desc(priv, phys); hip04_set_xmit_desc(priv, phys);
priv->tx_head = TX_NEXT(tx_head);
count++; count++;
netdev_sent_queue(ndev, skb->len); netdev_sent_queue(ndev, skb->len);
priv->tx_head = TX_NEXT(tx_head);
stats->tx_bytes += skb->len; stats->tx_bytes += skb->len;
stats->tx_packets++; stats->tx_packets++;

View File

@ -184,7 +184,7 @@ static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
netdev_err(netdev, "Device down!\n"); netdev_err(netdev, "Device down!\n");
return -ENODEV; return -ENODEV;
} }
if (retry--) if (!retry--)
break; break;
if (wait_for_completion_timeout(comp_done, div_timeout)) if (wait_for_completion_timeout(comp_done, div_timeout))
return 0; return 0;

View File

@ -1152,7 +1152,7 @@ void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags);
static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi) static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
{ {
return !!vsi->xdp_prog; return !!READ_ONCE(vsi->xdp_prog);
} }
int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);

View File

@ -6823,8 +6823,8 @@ void i40e_down(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_clean_tx_ring(vsi->tx_rings[i]); i40e_clean_tx_ring(vsi->tx_rings[i]);
if (i40e_enabled_xdp_vsi(vsi)) { if (i40e_enabled_xdp_vsi(vsi)) {
/* Make sure that in-progress ndo_xdp_xmit /* Make sure that in-progress ndo_xdp_xmit and
* calls are completed. * ndo_xsk_wakeup calls are completed.
*/ */
synchronize_rcu(); synchronize_rcu();
i40e_clean_tx_ring(vsi->xdp_rings[i]); i40e_clean_tx_ring(vsi->xdp_rings[i]);
@ -12546,8 +12546,12 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
old_prog = xchg(&vsi->xdp_prog, prog); old_prog = xchg(&vsi->xdp_prog, prog);
if (need_reset) if (need_reset) {
if (!prog)
/* Wait until ndo_xsk_wakeup completes. */
synchronize_rcu();
i40e_reset_and_rebuild(pf, true, true); i40e_reset_and_rebuild(pf, true, true);
}
for (i = 0; i < vsi->num_queue_pairs; i++) for (i = 0; i < vsi->num_queue_pairs; i++)
WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);

View File

@ -787,8 +787,12 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
{ {
struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_ring *ring; struct i40e_ring *ring;
if (test_bit(__I40E_CONFIG_BUSY, pf->state))
return -ENETDOWN;
if (test_bit(__I40E_VSI_DOWN, vsi->state)) if (test_bit(__I40E_VSI_DOWN, vsi->state))
return -ENETDOWN; return -ENETDOWN;

View File

@ -10261,7 +10261,12 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
/* If transitioning XDP modes reconfigure rings */ /* If transitioning XDP modes reconfigure rings */
if (need_reset) { if (need_reset) {
int err = ixgbe_setup_tc(dev, adapter->hw_tcs); int err;
if (!prog)
/* Wait until ndo_xsk_wakeup completes. */
synchronize_rcu();
err = ixgbe_setup_tc(dev, adapter->hw_tcs);
if (err) { if (err) {
rcu_assign_pointer(adapter->xdp_prog, old_prog); rcu_assign_pointer(adapter->xdp_prog, old_prog);

View File

@ -709,10 +709,14 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
if (qid >= adapter->num_xdp_queues) if (qid >= adapter->num_xdp_queues)
return -ENXIO; return -ENXIO;
if (!adapter->xdp_ring[qid]->xsk_umem) ring = adapter->xdp_ring[qid];
if (test_bit(__IXGBE_TX_DISABLED, &ring->state))
return -ENETDOWN;
if (!ring->xsk_umem)
return -ENXIO; return -ENXIO;
ring = adapter->xdp_ring[qid];
if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) { if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
u64 eics = BIT_ULL(ring->q_vector->v_idx); u64 eics = BIT_ULL(ring->q_vector->v_idx);

View File

@ -3680,7 +3680,7 @@ static int mvpp2_open(struct net_device *dev)
valid = true; valid = true;
} }
if (priv->hw_version == MVPP22 && port->link_irq && !port->phylink) { if (priv->hw_version == MVPP22 && port->link_irq) {
err = request_irq(port->link_irq, mvpp2_link_status_isr, 0, err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
dev->name, port); dev->name, port);
if (err) { if (err) {

View File

@ -760,7 +760,7 @@ enum {
MLX5E_STATE_OPENED, MLX5E_STATE_OPENED,
MLX5E_STATE_DESTROYING, MLX5E_STATE_DESTROYING,
MLX5E_STATE_XDP_TX_ENABLED, MLX5E_STATE_XDP_TX_ENABLED,
MLX5E_STATE_XDP_OPEN, MLX5E_STATE_XDP_ACTIVE,
}; };
struct mlx5e_rqt { struct mlx5e_rqt {

View File

@ -75,12 +75,18 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv) static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
{ {
set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
if (priv->channels.params.xdp_prog)
set_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
} }
static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv) static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
{ {
if (priv->channels.params.xdp_prog)
clear_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
/* let other device's napi(s) see our new state */ /* Let other device's napi(s) and XSK wakeups see our new state. */
synchronize_rcu(); synchronize_rcu();
} }
@ -89,19 +95,9 @@ static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
} }
static inline void mlx5e_xdp_set_open(struct mlx5e_priv *priv) static inline bool mlx5e_xdp_is_active(struct mlx5e_priv *priv)
{ {
set_bit(MLX5E_STATE_XDP_OPEN, &priv->state); return test_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
}
static inline void mlx5e_xdp_set_closed(struct mlx5e_priv *priv)
{
clear_bit(MLX5E_STATE_XDP_OPEN, &priv->state);
}
static inline bool mlx5e_xdp_is_open(struct mlx5e_priv *priv)
{
return test_bit(MLX5E_STATE_XDP_OPEN, &priv->state);
} }
static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)

View File

@ -144,6 +144,7 @@ void mlx5e_close_xsk(struct mlx5e_channel *c)
{ {
clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state); clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
napi_synchronize(&c->napi); napi_synchronize(&c->napi);
synchronize_rcu(); /* Sync with the XSK wakeup. */
mlx5e_close_rq(&c->xskrq); mlx5e_close_rq(&c->xskrq);
mlx5e_close_cq(&c->xskrq.cq); mlx5e_close_cq(&c->xskrq.cq);

View File

@ -14,7 +14,7 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
struct mlx5e_channel *c; struct mlx5e_channel *c;
u16 ix; u16 ix;
if (unlikely(!mlx5e_xdp_is_open(priv))) if (unlikely(!mlx5e_xdp_is_active(priv)))
return -ENETDOWN; return -ENETDOWN;
if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix))) if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix)))

View File

@ -3000,12 +3000,9 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
int mlx5e_open_locked(struct net_device *netdev) int mlx5e_open_locked(struct net_device *netdev)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
bool is_xdp = priv->channels.params.xdp_prog;
int err; int err;
set_bit(MLX5E_STATE_OPENED, &priv->state); set_bit(MLX5E_STATE_OPENED, &priv->state);
if (is_xdp)
mlx5e_xdp_set_open(priv);
err = mlx5e_open_channels(priv, &priv->channels); err = mlx5e_open_channels(priv, &priv->channels);
if (err) if (err)
@ -3020,8 +3017,6 @@ int mlx5e_open_locked(struct net_device *netdev)
return 0; return 0;
err_clear_state_opened_flag: err_clear_state_opened_flag:
if (is_xdp)
mlx5e_xdp_set_closed(priv);
clear_bit(MLX5E_STATE_OPENED, &priv->state); clear_bit(MLX5E_STATE_OPENED, &priv->state);
return err; return err;
} }
@ -3053,8 +3048,6 @@ int mlx5e_close_locked(struct net_device *netdev)
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0; return 0;
if (priv->channels.params.xdp_prog)
mlx5e_xdp_set_closed(priv);
clear_bit(MLX5E_STATE_OPENED, &priv->state); clear_bit(MLX5E_STATE_OPENED, &priv->state);
netif_carrier_off(priv->netdev); netif_carrier_off(priv->netdev);
@ -4371,16 +4364,6 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
return 0; return 0;
} }
static int mlx5e_xdp_update_state(struct mlx5e_priv *priv)
{
if (priv->channels.params.xdp_prog)
mlx5e_xdp_set_open(priv);
else
mlx5e_xdp_set_closed(priv);
return 0;
}
static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
@ -4415,7 +4398,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
mlx5e_set_rq_type(priv->mdev, &new_channels.params); mlx5e_set_rq_type(priv->mdev, &new_channels.params);
old_prog = priv->channels.params.xdp_prog; old_prog = priv->channels.params.xdp_prog;
err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_xdp_update_state); err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
if (err) if (err)
goto unlock; goto unlock;
} else { } else {

View File

@ -5742,8 +5742,13 @@ static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
if (mlxsw_sp_fib6_rt_should_ignore(rt)) if (mlxsw_sp_fib6_rt_should_ignore(rt))
return; return;
/* Multipath routes are first added to the FIB trie and only then
* notified. If we vetoed the addition, we will get a delete
* notification for a route we do not have. Therefore, do not warn if
* route was not found.
*/
fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt); fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
if (WARN_ON(!fib6_entry)) if (!fib6_entry)
return; return;
/* If not all the nexthops are deleted, then only reduce the nexthop /* If not all the nexthops are deleted, then only reduce the nexthop

View File

@ -65,17 +65,17 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
freed_stats_id = priv->stats_ring_size; freed_stats_id = priv->stats_ring_size;
/* Check for unallocated entries first. */ /* Check for unallocated entries first. */
if (priv->stats_ids.init_unalloc > 0) { if (priv->stats_ids.init_unalloc > 0) {
if (priv->active_mem_unit == priv->total_mem_units) {
priv->stats_ids.init_unalloc--;
priv->active_mem_unit = 0;
}
*stats_context_id = *stats_context_id =
FIELD_PREP(NFP_FL_STAT_ID_STAT, FIELD_PREP(NFP_FL_STAT_ID_STAT,
priv->stats_ids.init_unalloc - 1) | priv->stats_ids.init_unalloc - 1) |
FIELD_PREP(NFP_FL_STAT_ID_MU_NUM, FIELD_PREP(NFP_FL_STAT_ID_MU_NUM,
priv->active_mem_unit); priv->active_mem_unit);
priv->active_mem_unit++;
if (++priv->active_mem_unit == priv->total_mem_units) {
priv->stats_ids.init_unalloc--;
priv->active_mem_unit = 0;
}
return 0; return 0;
} }

View File

@ -1230,7 +1230,7 @@ qede_configure_mcast_filtering(struct net_device *ndev,
netif_addr_lock_bh(ndev); netif_addr_lock_bh(ndev);
mc_count = netdev_mc_count(ndev); mc_count = netdev_mc_count(ndev);
if (mc_count < 64) { if (mc_count <= 64) {
netdev_for_each_mc_addr(ha, ndev) { netdev_for_each_mc_addr(ha, ndev) {
ether_addr_copy(temp, ha->addr); ether_addr_copy(temp, ha->addr);
temp += ETH_ALEN; temp += ETH_ALEN;

View File

@ -1406,6 +1406,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
rxq->rx_buf_seg_size = roundup_pow_of_two(size); rxq->rx_buf_seg_size = roundup_pow_of_two(size);
} else { } else {
rxq->rx_buf_seg_size = PAGE_SIZE; rxq->rx_buf_seg_size = PAGE_SIZE;
edev->ndev->features &= ~NETIF_F_GRO_HW;
} }
/* Allocate the parallel driver ring for Rx buffers */ /* Allocate the parallel driver ring for Rx buffers */
@ -1450,6 +1451,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
} }
} }
edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
if (!edev->gro_disable) if (!edev->gro_disable)
qede_set_tpa_param(rxq); qede_set_tpa_param(rxq);
err: err:
@ -1702,8 +1704,6 @@ static void qede_init_fp(struct qede_dev *edev)
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
edev->ndev->name, queue_id); edev->ndev->name, queue_id);
} }
edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
} }
static int qede_set_real_num_queues(struct qede_dev *edev) static int qede_set_real_num_queues(struct qede_dev *edev)

View File

@ -2756,6 +2756,9 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
int err; int err;
for (i = 0; i < qdev->num_large_buffers; i++) { for (i = 0; i < qdev->num_large_buffers; i++) {
lrg_buf_cb = &qdev->lrg_buf[i];
memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
skb = netdev_alloc_skb(qdev->ndev, skb = netdev_alloc_skb(qdev->ndev,
qdev->lrg_buffer_len); qdev->lrg_buffer_len);
if (unlikely(!skb)) { if (unlikely(!skb)) {
@ -2766,11 +2769,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
ql_free_large_buffers(qdev); ql_free_large_buffers(qdev);
return -ENOMEM; return -ENOMEM;
} else { } else {
lrg_buf_cb = &qdev->lrg_buf[i];
memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
lrg_buf_cb->index = i; lrg_buf_cb->index = i;
lrg_buf_cb->skb = skb;
/* /*
* We save some space to copy the ethhdr from first * We save some space to copy the ethhdr from first
* buffer * buffer
@ -2792,6 +2791,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
return -ENOMEM; return -ENOMEM;
} }
lrg_buf_cb->skb = skb;
dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
dma_unmap_len_set(lrg_buf_cb, maplen, dma_unmap_len_set(lrg_buf_cb, maplen,
qdev->lrg_buffer_len - qdev->lrg_buffer_len -

View File

@ -1472,6 +1472,12 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
n_xdp_tx = num_possible_cpus(); n_xdp_tx = num_possible_cpus();
n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_TXQ_TYPES); n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_TXQ_TYPES);
vec_count = pci_msix_vec_count(efx->pci_dev);
if (vec_count < 0)
return vec_count;
max_channels = min_t(unsigned int, vec_count, max_channels);
/* Check resources. /* Check resources.
* We need a channel per event queue, plus a VI per tx queue. * We need a channel per event queue, plus a VI per tx queue.
* This may be more pessimistic than it needs to be. * This may be more pessimistic than it needs to be.
@ -1493,11 +1499,6 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
n_xdp_tx, n_xdp_ev); n_xdp_tx, n_xdp_ev);
} }
n_channels = min(n_channels, max_channels);
vec_count = pci_msix_vec_count(efx->pci_dev);
if (vec_count < 0)
return vec_count;
if (vec_count < n_channels) { if (vec_count < n_channels) {
netif_err(efx, drv, efx->net_dev, netif_err(efx, drv, efx->net_dev,
"WARNING: Insufficient MSI-X vectors available (%d < %u).\n", "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
@ -1507,11 +1508,9 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
n_channels = vec_count; n_channels = vec_count;
} }
efx->n_channels = n_channels; n_channels = min(n_channels, max_channels);
/* Do not create the PTP TX queue(s) if PTP uses the MC directly. */ efx->n_channels = n_channels;
if (extra_channels && !efx_ptp_use_mac_tx_timestamps(efx))
n_channels--;
/* Ignore XDP tx channels when creating rx channels. */ /* Ignore XDP tx channels when creating rx channels. */
n_channels -= efx->n_xdp_channels; n_channels -= efx->n_xdp_channels;
@ -1531,11 +1530,10 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
efx->n_rx_channels = n_channels; efx->n_rx_channels = n_channels;
} }
if (efx->n_xdp_channels) efx->n_rx_channels = min(efx->n_rx_channels, parallelism);
efx->xdp_channel_offset = efx->tx_channel_offset + efx->n_tx_channels = min(efx->n_tx_channels, parallelism);
efx->n_tx_channels;
else efx->xdp_channel_offset = n_channels;
efx->xdp_channel_offset = efx->n_channels;
netif_dbg(efx, drv, efx->net_dev, netif_dbg(efx, drv, efx->net_dev,
"Allocating %u RX channels\n", "Allocating %u RX channels\n",
@ -1550,6 +1548,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
static int efx_probe_interrupts(struct efx_nic *efx) static int efx_probe_interrupts(struct efx_nic *efx)
{ {
unsigned int extra_channels = 0; unsigned int extra_channels = 0;
unsigned int rss_spread;
unsigned int i, j; unsigned int i, j;
int rc; int rc;
@ -1631,8 +1630,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) { for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
if (!efx->extra_channel_type[i]) if (!efx->extra_channel_type[i])
continue; continue;
if (efx->interrupt_mode != EFX_INT_MODE_MSIX || if (j <= efx->tx_channel_offset + efx->n_tx_channels) {
efx->n_channels <= extra_channels) {
efx->extra_channel_type[i]->handle_no_channel(efx); efx->extra_channel_type[i]->handle_no_channel(efx);
} else { } else {
--j; --j;
@ -1643,16 +1641,17 @@ static int efx_probe_interrupts(struct efx_nic *efx)
} }
} }
rss_spread = efx->n_rx_channels;
/* RSS might be usable on VFs even if it is disabled on the PF */ /* RSS might be usable on VFs even if it is disabled on the PF */
#ifdef CONFIG_SFC_SRIOV #ifdef CONFIG_SFC_SRIOV
if (efx->type->sriov_wanted) { if (efx->type->sriov_wanted) {
efx->rss_spread = ((efx->n_rx_channels > 1 || efx->rss_spread = ((rss_spread > 1 ||
!efx->type->sriov_wanted(efx)) ? !efx->type->sriov_wanted(efx)) ?
efx->n_rx_channels : efx_vf_size(efx)); rss_spread : efx_vf_size(efx));
return 0; return 0;
} }
#endif #endif
efx->rss_spread = efx->n_rx_channels; efx->rss_spread = rss_spread;
return 0; return 0;
} }

View File

@ -1533,9 +1533,7 @@ static inline bool efx_channel_is_xdp_tx(struct efx_channel *channel)
static inline bool efx_channel_has_tx_queues(struct efx_channel *channel) static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
{ {
return efx_channel_is_xdp_tx(channel) || return true;
(channel->type && channel->type->want_txqs &&
channel->type->want_txqs(channel));
} }
static inline struct efx_tx_queue * static inline struct efx_tx_queue *

View File

@ -96,11 +96,12 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx,
void efx_rx_config_page_split(struct efx_nic *efx) void efx_rx_config_page_split(struct efx_nic *efx)
{ {
efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align, efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
XDP_PACKET_HEADROOM,
EFX_RX_BUF_ALIGNMENT); EFX_RX_BUF_ALIGNMENT);
efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
(efx->rx_page_buf_step + XDP_PACKET_HEADROOM)); efx->rx_page_buf_step);
efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) / efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
efx->rx_bufs_per_page; efx->rx_bufs_per_page;
efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH, efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
@ -190,14 +191,13 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
page_offset = sizeof(struct efx_rx_page_state); page_offset = sizeof(struct efx_rx_page_state);
do { do {
page_offset += XDP_PACKET_HEADROOM;
dma_addr += XDP_PACKET_HEADROOM;
index = rx_queue->added_count & rx_queue->ptr_mask; index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index); rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->dma_addr = dma_addr + efx->rx_ip_align; rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
XDP_PACKET_HEADROOM;
rx_buf->page = page; rx_buf->page = page;
rx_buf->page_offset = page_offset + efx->rx_ip_align; rx_buf->page_offset = page_offset + efx->rx_ip_align +
XDP_PACKET_HEADROOM;
rx_buf->len = efx->rx_dma_len; rx_buf->len = efx->rx_dma_len;
rx_buf->flags = 0; rx_buf->flags = 0;
++rx_queue->added_count; ++rx_queue->added_count;

View File

@ -365,9 +365,8 @@ struct dma_features {
unsigned int arpoffsel; unsigned int arpoffsel;
}; };
/* GMAC TX FIFO is 8K, Rx FIFO is 16K */ /* RX Buffer size must be multiple of 4/8/16 bytes */
#define BUF_SIZE_16KiB 16384 #define BUF_SIZE_16KiB 16368
/* RX Buffer size must be < 8191 and multiple of 4/8/16 bytes */
#define BUF_SIZE_8KiB 8188 #define BUF_SIZE_8KiB 8188
#define BUF_SIZE_4KiB 4096 #define BUF_SIZE_4KiB 4096
#define BUF_SIZE_2KiB 2048 #define BUF_SIZE_2KiB 2048

View File

@ -343,6 +343,8 @@
#define XGMAC_DMA_CH_RX_CONTROL(x) (0x00003108 + (0x80 * (x))) #define XGMAC_DMA_CH_RX_CONTROL(x) (0x00003108 + (0x80 * (x)))
#define XGMAC_RxPBL GENMASK(21, 16) #define XGMAC_RxPBL GENMASK(21, 16)
#define XGMAC_RxPBL_SHIFT 16 #define XGMAC_RxPBL_SHIFT 16
#define XGMAC_RBSZ GENMASK(14, 1)
#define XGMAC_RBSZ_SHIFT 1
#define XGMAC_RXST BIT(0) #define XGMAC_RXST BIT(0)
#define XGMAC_DMA_CH_TxDESC_HADDR(x) (0x00003110 + (0x80 * (x))) #define XGMAC_DMA_CH_TxDESC_HADDR(x) (0x00003110 + (0x80 * (x)))
#define XGMAC_DMA_CH_TxDESC_LADDR(x) (0x00003114 + (0x80 * (x))) #define XGMAC_DMA_CH_TxDESC_LADDR(x) (0x00003114 + (0x80 * (x)))

View File

@ -482,7 +482,8 @@ static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
u32 value; u32 value;
value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
value |= bfsize << 1; value &= ~XGMAC_RBSZ;
value |= bfsize << XGMAC_RBSZ_SHIFT;
writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
} }

View File

@ -46,7 +46,7 @@
#include "dwxgmac2.h" #include "dwxgmac2.h"
#include "hwif.h" #include "hwif.h"
#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES) #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
#define TSO_MAX_BUFF_SIZE (SZ_16K - 1) #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
/* Module parameters */ /* Module parameters */
@ -1109,7 +1109,9 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
{ {
int ret = bufsize; int ret = bufsize;
if (mtu >= BUF_SIZE_4KiB) if (mtu >= BUF_SIZE_8KiB)
ret = BUF_SIZE_16KiB;
else if (mtu >= BUF_SIZE_4KiB)
ret = BUF_SIZE_8KiB; ret = BUF_SIZE_8KiB;
else if (mtu >= BUF_SIZE_2KiB) else if (mtu >= BUF_SIZE_2KiB)
ret = BUF_SIZE_4KiB; ret = BUF_SIZE_4KiB;
@ -1293,19 +1295,9 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_count = priv->plat->rx_queues_to_use; u32 rx_count = priv->plat->rx_queues_to_use;
int ret = -ENOMEM; int ret = -ENOMEM;
int bfsize = 0;
int queue; int queue;
int i; int i;
bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
if (bfsize < 0)
bfsize = 0;
if (bfsize < BUF_SIZE_16KiB)
bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
priv->dma_buf_sz = bfsize;
/* RX INITIALIZATION */ /* RX INITIALIZATION */
netif_dbg(priv, probe, priv->dev, netif_dbg(priv, probe, priv->dev,
"SKB addresses:\nskb\t\tskb data\tdma data\n"); "SKB addresses:\nskb\t\tskb data\tdma data\n");
@ -1347,8 +1339,6 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
} }
} }
buf_sz = bfsize;
return 0; return 0;
err_init_rx_buffers: err_init_rx_buffers:
@ -2658,6 +2648,7 @@ static void stmmac_hw_teardown(struct net_device *dev)
static int stmmac_open(struct net_device *dev) static int stmmac_open(struct net_device *dev)
{ {
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
int bfsize = 0;
u32 chan; u32 chan;
int ret; int ret;
@ -2677,7 +2668,16 @@ static int stmmac_open(struct net_device *dev)
memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
priv->xstats.threshold = tc; priv->xstats.threshold = tc;
priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
if (bfsize < 0)
bfsize = 0;
if (bfsize < BUF_SIZE_16KiB)
bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
priv->dma_buf_sz = bfsize;
buf_sz = bfsize;
priv->rx_copybreak = STMMAC_RX_COPYBREAK; priv->rx_copybreak = STMMAC_RX_COPYBREAK;
ret = alloc_dma_desc_resources(priv); ret = alloc_dma_desc_resources(priv);
@ -3053,8 +3053,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_count_frames = 0; tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc); stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++; priv->xstats.tx_set_ic_bit++;
} else {
stmmac_tx_timer_arm(priv, queue);
} }
/* We've used all descriptors we need for this skb, however, /* We've used all descriptors we need for this skb, however,
@ -3125,6 +3123,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
stmmac_tx_timer_arm(priv, queue);
return NETDEV_TX_OK; return NETDEV_TX_OK;
@ -3276,8 +3275,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_count_frames = 0; tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc); stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++; priv->xstats.tx_set_ic_bit++;
} else {
stmmac_tx_timer_arm(priv, queue);
} }
/* We've used all descriptors we need for this skb, however, /* We've used all descriptors we need for this skb, however,
@ -3366,6 +3363,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
stmmac_tx_timer_arm(priv, queue);
return NETDEV_TX_OK; return NETDEV_TX_OK;
@ -3646,8 +3644,9 @@ read_again:
* feature is always disabled and packets need to be * feature is always disabled and packets need to be
* stripped manually. * stripped manually.
*/ */
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) || if (likely(!(status & rx_not_ls)) &&
unlikely(status != llc_snap)) { (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
unlikely(status != llc_snap))) {
if (buf2_len) if (buf2_len)
buf2_len -= ETH_FCS_LEN; buf2_len -= ETH_FCS_LEN;
else else
@ -3829,12 +3828,24 @@ static void stmmac_set_rx_mode(struct net_device *dev)
static int stmmac_change_mtu(struct net_device *dev, int new_mtu) static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
{ {
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
int txfifosz = priv->plat->tx_fifo_size;
if (txfifosz == 0)
txfifosz = priv->dma_cap.tx_fifo_size;
txfifosz /= priv->plat->tx_queues_to_use;
if (netif_running(dev)) { if (netif_running(dev)) {
netdev_err(priv->dev, "must be stopped to change its MTU\n"); netdev_err(priv->dev, "must be stopped to change its MTU\n");
return -EBUSY; return -EBUSY;
} }
new_mtu = STMMAC_ALIGN(new_mtu);
/* If condition true, FIFO is too small or MTU too large */
if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
return -EINVAL;
dev->mtu = new_mtu; dev->mtu = new_mtu;
netdev_update_features(dev); netdev_update_features(dev);

View File

@ -320,7 +320,7 @@ out:
static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
struct device_node *np, struct device *dev) struct device_node *np, struct device *dev)
{ {
bool mdio = true; bool mdio = false;
static const struct of_device_id need_mdio_ids[] = { static const struct of_device_id need_mdio_ids[] = {
{ .compatible = "snps,dwc-qos-ethernet-4.10" }, { .compatible = "snps,dwc-qos-ethernet-4.10" },
{}, {},

View File

@ -624,6 +624,8 @@ static int stmmac_test_mcfilt(struct stmmac_priv *priv)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries) if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
return -EOPNOTSUPP;
while (--tries) { while (--tries) {
/* We only need to check the mc_addr for collisions */ /* We only need to check the mc_addr for collisions */
@ -666,6 +668,8 @@ static int stmmac_test_ucfilt(struct stmmac_priv *priv)
if (stmmac_filter_check(priv)) if (stmmac_filter_check(priv))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
return -EOPNOTSUPP;
if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins) if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
return -EOPNOTSUPP; return -EOPNOTSUPP;

View File

@ -63,6 +63,7 @@ config TI_CPSW_SWITCHDEV
tristate "TI CPSW Switch Support with switchdev" tristate "TI CPSW Switch Support with switchdev"
depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
depends on NET_SWITCHDEV depends on NET_SWITCHDEV
select PAGE_POOL
select TI_DAVINCI_MDIO select TI_DAVINCI_MDIO
select MFD_SYSCON select MFD_SYSCON
select REGMAP select REGMAP

View File

@ -5,6 +5,7 @@
obj-$(CONFIG_TI_CPSW) += cpsw-common.o obj-$(CONFIG_TI_CPSW) += cpsw-common.o
obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o
obj-$(CONFIG_TI_CPSW_SWITCHDEV) += cpsw-common.o
obj-$(CONFIG_TLAN) += tlan.o obj-$(CONFIG_TLAN) += tlan.o
obj-$(CONFIG_CPMAC) += cpmac.o obj-$(CONFIG_CPMAC) += cpmac.o

View File

@ -1018,7 +1018,6 @@ static int cpdma_chan_submit_si(struct submit_info *si)
struct cpdma_chan *chan = si->chan; struct cpdma_chan *chan = si->chan;
struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_ctlr *ctlr = chan->ctlr;
int len = si->len; int len = si->len;
int swlen = len;
struct cpdma_desc __iomem *desc; struct cpdma_desc __iomem *desc;
dma_addr_t buffer; dma_addr_t buffer;
u32 mode; u32 mode;
@ -1046,7 +1045,6 @@ static int cpdma_chan_submit_si(struct submit_info *si)
if (si->data_dma) { if (si->data_dma) {
buffer = si->data_dma; buffer = si->data_dma;
dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir); dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir);
swlen |= CPDMA_DMA_EXT_MAP;
} else { } else {
buffer = dma_map_single(ctlr->dev, si->data_virt, len, chan->dir); buffer = dma_map_single(ctlr->dev, si->data_virt, len, chan->dir);
ret = dma_mapping_error(ctlr->dev, buffer); ret = dma_mapping_error(ctlr->dev, buffer);
@ -1065,7 +1063,8 @@ static int cpdma_chan_submit_si(struct submit_info *si)
writel_relaxed(mode | len, &desc->hw_mode); writel_relaxed(mode | len, &desc->hw_mode);
writel_relaxed((uintptr_t)si->token, &desc->sw_token); writel_relaxed((uintptr_t)si->token, &desc->sw_token);
writel_relaxed(buffer, &desc->sw_buffer); writel_relaxed(buffer, &desc->sw_buffer);
writel_relaxed(swlen, &desc->sw_len); writel_relaxed(si->data_dma ? len | CPDMA_DMA_EXT_MAP : len,
&desc->sw_len);
desc_read(desc, sw_len); desc_read(desc, sw_len);
__cpdma_chan_submit(chan, desc); __cpdma_chan_submit(chan, desc);

View File

@ -166,6 +166,9 @@ static int fjes_acpi_add(struct acpi_device *device)
/* create platform_device */ /* create platform_device */
plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource, plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
ARRAY_SIZE(fjes_resource)); ARRAY_SIZE(fjes_resource));
if (IS_ERR(plat_dev))
return PTR_ERR(plat_dev);
device->driver_data = plat_dev; device->driver_data = plat_dev;
return 0; return 0;

View File

@ -38,7 +38,6 @@ struct pdp_ctx {
struct hlist_node hlist_addr; struct hlist_node hlist_addr;
union { union {
u64 tid;
struct { struct {
u64 tid; u64 tid;
u16 flow; u16 flow;
@ -641,9 +640,16 @@ static void gtp_link_setup(struct net_device *dev)
} }
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize); static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
static void gtp_hashtable_free(struct gtp_dev *gtp);
static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]); static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
static void gtp_destructor(struct net_device *dev)
{
struct gtp_dev *gtp = netdev_priv(dev);
kfree(gtp->addr_hash);
kfree(gtp->tid_hash);
}
static int gtp_newlink(struct net *src_net, struct net_device *dev, static int gtp_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[], struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
@ -661,10 +667,13 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
if (err < 0) if (err < 0)
return err; return err;
if (!data[IFLA_GTP_PDP_HASHSIZE]) if (!data[IFLA_GTP_PDP_HASHSIZE]) {
hashsize = 1024; hashsize = 1024;
else } else {
hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]); hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
if (!hashsize)
hashsize = 1024;
}
err = gtp_hashtable_new(gtp, hashsize); err = gtp_hashtable_new(gtp, hashsize);
if (err < 0) if (err < 0)
@ -678,13 +687,15 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
gn = net_generic(dev_net(dev), gtp_net_id); gn = net_generic(dev_net(dev), gtp_net_id);
list_add_rcu(&gtp->list, &gn->gtp_dev_list); list_add_rcu(&gtp->list, &gn->gtp_dev_list);
dev->priv_destructor = gtp_destructor;
netdev_dbg(dev, "registered new GTP interface\n"); netdev_dbg(dev, "registered new GTP interface\n");
return 0; return 0;
out_hashtable: out_hashtable:
gtp_hashtable_free(gtp); kfree(gtp->addr_hash);
kfree(gtp->tid_hash);
out_encap: out_encap:
gtp_encap_disable(gtp); gtp_encap_disable(gtp);
return err; return err;
@ -693,8 +704,13 @@ out_encap:
static void gtp_dellink(struct net_device *dev, struct list_head *head) static void gtp_dellink(struct net_device *dev, struct list_head *head)
{ {
struct gtp_dev *gtp = netdev_priv(dev); struct gtp_dev *gtp = netdev_priv(dev);
struct pdp_ctx *pctx;
int i;
for (i = 0; i < gtp->hash_size; i++)
hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
pdp_context_delete(pctx);
gtp_hashtable_free(gtp);
list_del_rcu(&gtp->list); list_del_rcu(&gtp->list);
unregister_netdevice_queue(dev, head); unregister_netdevice_queue(dev, head);
} }
@ -772,20 +788,6 @@ err1:
return -ENOMEM; return -ENOMEM;
} }
static void gtp_hashtable_free(struct gtp_dev *gtp)
{
struct pdp_ctx *pctx;
int i;
for (i = 0; i < gtp->hash_size; i++)
hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
pdp_context_delete(pctx);
synchronize_rcu();
kfree(gtp->addr_hash);
kfree(gtp->tid_hash);
}
static struct sock *gtp_encap_enable_socket(int fd, int type, static struct sock *gtp_encap_enable_socket(int fd, int type,
struct gtp_dev *gtp) struct gtp_dev *gtp)
{ {
@ -926,24 +928,31 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
} }
} }
static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk, static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
struct genl_info *info) struct genl_info *info)
{ {
struct pdp_ctx *pctx, *pctx_tid = NULL;
struct net_device *dev = gtp->dev; struct net_device *dev = gtp->dev;
u32 hash_ms, hash_tid = 0; u32 hash_ms, hash_tid = 0;
struct pdp_ctx *pctx; unsigned int version;
bool found = false; bool found = false;
__be32 ms_addr; __be32 ms_addr;
ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size; hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
version = nla_get_u32(info->attrs[GTPA_VERSION]);
hlist_for_each_entry_rcu(pctx, &gtp->addr_hash[hash_ms], hlist_addr) { pctx = ipv4_pdp_find(gtp, ms_addr);
if (pctx->ms_addr_ip4.s_addr == ms_addr) { if (pctx)
found = true; found = true;
break; if (version == GTP_V0)
} pctx_tid = gtp0_pdp_find(gtp,
} nla_get_u64(info->attrs[GTPA_TID]));
else if (version == GTP_V1)
pctx_tid = gtp1_pdp_find(gtp,
nla_get_u32(info->attrs[GTPA_I_TEI]));
if (pctx_tid)
found = true;
if (found) { if (found) {
if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
@ -951,6 +960,11 @@ static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE) if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (pctx && pctx_tid)
return -EEXIST;
if (!pctx)
pctx = pctx_tid;
ipv4_pdp_fill(pctx, info); ipv4_pdp_fill(pctx, info);
if (pctx->gtp_version == GTP_V0) if (pctx->gtp_version == GTP_V0)
@ -1074,7 +1088,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
goto out_unlock; goto out_unlock;
} }
err = ipv4_pdp_add(gtp, sk, info); err = gtp_pdp_add(gtp, sk, info);
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();
@ -1232,43 +1246,46 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb,
struct netlink_callback *cb) struct netlink_callback *cb)
{ {
struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp; struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
int i, j, bucket = cb->args[0], skip = cb->args[1];
struct net *net = sock_net(skb->sk); struct net *net = sock_net(skb->sk);
struct gtp_net *gn = net_generic(net, gtp_net_id);
unsigned long tid = cb->args[1];
int i, k = cb->args[0], ret;
struct pdp_ctx *pctx; struct pdp_ctx *pctx;
struct gtp_net *gn;
gn = net_generic(net, gtp_net_id);
if (cb->args[4]) if (cb->args[4])
return 0; return 0;
rcu_read_lock();
list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) { list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
if (last_gtp && last_gtp != gtp) if (last_gtp && last_gtp != gtp)
continue; continue;
else else
last_gtp = NULL; last_gtp = NULL;
for (i = k; i < gtp->hash_size; i++) { for (i = bucket; i < gtp->hash_size; i++) {
hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) { j = 0;
if (tid && tid != pctx->u.tid) hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i],
continue; hlist_tid) {
else if (j >= skip &&
tid = 0; gtp_genl_fill_info(skb,
NETLINK_CB(cb->skb).portid,
ret = gtp_genl_fill_info(skb, cb->nlh->nlmsg_seq,
NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_type, pctx)) {
cb->nlh->nlmsg_seq,
cb->nlh->nlmsg_type, pctx);
if (ret < 0) {
cb->args[0] = i; cb->args[0] = i;
cb->args[1] = pctx->u.tid; cb->args[1] = j;
cb->args[2] = (unsigned long)gtp; cb->args[2] = (unsigned long)gtp;
goto out; goto out;
} }
j++;
} }
skip = 0;
} }
bucket = 0;
} }
cb->args[4] = 1; cb->args[4] = 1;
out: out:
rcu_read_unlock();
return skb->len; return skb->len;
} }

View File

@ -654,10 +654,10 @@ static void sixpack_close(struct tty_struct *tty)
{ {
struct sixpack *sp; struct sixpack *sp;
write_lock_bh(&disc_data_lock); write_lock_irq(&disc_data_lock);
sp = tty->disc_data; sp = tty->disc_data;
tty->disc_data = NULL; tty->disc_data = NULL;
write_unlock_bh(&disc_data_lock); write_unlock_irq(&disc_data_lock);
if (!sp) if (!sp)
return; return;

View File

@ -773,10 +773,10 @@ static void mkiss_close(struct tty_struct *tty)
{ {
struct mkiss *ax; struct mkiss *ax;
write_lock_bh(&disc_data_lock); write_lock_irq(&disc_data_lock);
ax = tty->disc_data; ax = tty->disc_data;
tty->disc_data = NULL; tty->disc_data = NULL;
write_unlock_bh(&disc_data_lock); write_unlock_irq(&disc_data_lock);
if (!ax) if (!ax)
return; return;

View File

@ -169,7 +169,6 @@ struct rndis_device {
u8 hw_mac_adr[ETH_ALEN]; u8 hw_mac_adr[ETH_ALEN];
u8 rss_key[NETVSC_HASH_KEYLEN]; u8 rss_key[NETVSC_HASH_KEYLEN];
u16 rx_table[ITAB_NUM];
}; };
@ -940,6 +939,8 @@ struct net_device_context {
u32 tx_table[VRSS_SEND_TAB_SIZE]; u32 tx_table[VRSS_SEND_TAB_SIZE];
u16 rx_table[ITAB_NUM];
/* Ethtool settings */ /* Ethtool settings */
u8 duplex; u8 duplex;
u32 speed; u32 speed;

View File

@ -1662,7 +1662,7 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
rndis_dev = ndev->extension; rndis_dev = ndev->extension;
if (indir) { if (indir) {
for (i = 0; i < ITAB_NUM; i++) for (i = 0; i < ITAB_NUM; i++)
indir[i] = rndis_dev->rx_table[i]; indir[i] = ndc->rx_table[i];
} }
if (key) if (key)
@ -1692,7 +1692,7 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
return -EINVAL; return -EINVAL;
for (i = 0; i < ITAB_NUM; i++) for (i = 0; i < ITAB_NUM; i++)
rndis_dev->rx_table[i] = indir[i]; ndc->rx_table[i] = indir[i];
} }
if (!key) { if (!key) {

View File

@ -773,6 +773,7 @@ static int rndis_set_rss_param_msg(struct rndis_device *rdev,
const u8 *rss_key, u16 flag) const u8 *rss_key, u16 flag)
{ {
struct net_device *ndev = rdev->ndev; struct net_device *ndev = rdev->ndev;
struct net_device_context *ndc = netdev_priv(ndev);
struct rndis_request *request; struct rndis_request *request;
struct rndis_set_request *set; struct rndis_set_request *set;
struct rndis_set_complete *set_complete; struct rndis_set_complete *set_complete;
@ -812,7 +813,7 @@ static int rndis_set_rss_param_msg(struct rndis_device *rdev,
/* Set indirection table entries */ /* Set indirection table entries */
itab = (u32 *)(rssp + 1); itab = (u32 *)(rssp + 1);
for (i = 0; i < ITAB_NUM; i++) for (i = 0; i < ITAB_NUM; i++)
itab[i] = rdev->rx_table[i]; itab[i] = ndc->rx_table[i];
/* Set hask key values */ /* Set hask key values */
keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset); keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset);
@ -1171,6 +1172,9 @@ int rndis_set_subchannel(struct net_device *ndev,
wait_event(nvdev->subchan_open, wait_event(nvdev->subchan_open,
atomic_read(&nvdev->open_chn) == nvdev->num_chn); atomic_read(&nvdev->open_chn) == nvdev->num_chn);
for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
ndev_ctx->tx_table[i] = i % nvdev->num_chn;
/* ignore failures from setting rss parameters, still have channels */ /* ignore failures from setting rss parameters, still have channels */
if (dev_info) if (dev_info)
rndis_filter_set_rss_param(rdev, dev_info->rss_key); rndis_filter_set_rss_param(rdev, dev_info->rss_key);
@ -1180,9 +1184,6 @@ int rndis_set_subchannel(struct net_device *ndev,
netif_set_real_num_tx_queues(ndev, nvdev->num_chn); netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
netif_set_real_num_rx_queues(ndev, nvdev->num_chn); netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
ndev_ctx->tx_table[i] = i % nvdev->num_chn;
return 0; return 0;
} }
@ -1312,6 +1313,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
struct netvsc_device_info *device_info) struct netvsc_device_info *device_info)
{ {
struct net_device *net = hv_get_drvdata(dev); struct net_device *net = hv_get_drvdata(dev);
struct net_device_context *ndc = netdev_priv(net);
struct netvsc_device *net_device; struct netvsc_device *net_device;
struct rndis_device *rndis_device; struct rndis_device *rndis_device;
struct ndis_recv_scale_cap rsscap; struct ndis_recv_scale_cap rsscap;
@ -1398,9 +1400,11 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
/* We will use the given number of channels if available. */ /* We will use the given number of channels if available. */
net_device->num_chn = min(net_device->max_chn, device_info->num_chn); net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
for (i = 0; i < ITAB_NUM; i++) if (!netif_is_rxfh_configured(net)) {
rndis_device->rx_table[i] = ethtool_rxfh_indir_default( for (i = 0; i < ITAB_NUM; i++)
ndc->rx_table[i] = ethtool_rxfh_indir_default(
i, net_device->num_chn); i, net_device->num_chn);
}
atomic_set(&net_device->open_chn, 1); atomic_set(&net_device->open_chn, 1);
vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);

View File

@ -553,7 +553,7 @@ static const struct device_type mdio_bus_phy_type = {
.pm = MDIO_BUS_PHY_PM_OPS, .pm = MDIO_BUS_PHY_PM_OPS,
}; };
static int phy_request_driver_module(struct phy_device *dev, int phy_id) static int phy_request_driver_module(struct phy_device *dev, u32 phy_id)
{ {
int ret; int ret;
@ -565,15 +565,15 @@ static int phy_request_driver_module(struct phy_device *dev, int phy_id)
* then modprobe isn't available. * then modprobe isn't available.
*/ */
if (IS_ENABLED(CONFIG_MODULES) && ret < 0 && ret != -ENOENT) { if (IS_ENABLED(CONFIG_MODULES) && ret < 0 && ret != -ENOENT) {
phydev_err(dev, "error %d loading PHY driver module for ID 0x%08x\n", phydev_err(dev, "error %d loading PHY driver module for ID 0x%08lx\n",
ret, phy_id); ret, (unsigned long)phy_id);
return ret; return ret;
} }
return 0; return 0;
} }
struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
bool is_c45, bool is_c45,
struct phy_c45_device_ids *c45_ids) struct phy_c45_device_ids *c45_ids)
{ {

View File

@ -442,8 +442,7 @@ static void phylink_mac_link_up(struct phylink *pl,
pl->cur_interface = link_state.interface; pl->cur_interface = link_state.interface;
pl->ops->mac_link_up(pl->config, pl->link_an_mode, pl->ops->mac_link_up(pl->config, pl->link_an_mode,
pl->phy_state.interface, pl->cur_interface, pl->phydev);
pl->phydev);
if (ndev) if (ndev)
netif_carrier_on(ndev); netif_carrier_on(ndev);

View File

@ -511,7 +511,7 @@ static int lan78xx_read_stats(struct lan78xx_net *dev,
} }
} else { } else {
netdev_warn(dev->net, netdev_warn(dev->net,
"Failed to read stat ret = 0x%x", ret); "Failed to read stat ret = %d", ret);
} }
kfree(stats); kfree(stats);
@ -1808,6 +1808,7 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev)
dev->mdiobus->read = lan78xx_mdiobus_read; dev->mdiobus->read = lan78xx_mdiobus_read;
dev->mdiobus->write = lan78xx_mdiobus_write; dev->mdiobus->write = lan78xx_mdiobus_write;
dev->mdiobus->name = "lan78xx-mdiobus"; dev->mdiobus->name = "lan78xx-mdiobus";
dev->mdiobus->parent = &dev->udev->dev;
snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d", snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
dev->udev->bus->busnum, dev->udev->devnum); dev->udev->bus->busnum, dev->udev->devnum);

View File

@ -8958,6 +8958,7 @@ int ath10k_mac_register(struct ath10k *ar)
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
wiphy_ext_feature_set(ar->hw->wiphy, wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_SET_SCAN_DWELL); NL80211_EXT_FEATURE_SET_SCAN_DWELL);
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_AQL);
if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map) || if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map) ||
test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, ar->wmi.svc_map)) test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, ar->wmi.svc_map))

View File

@ -83,7 +83,7 @@ static int ath9k_pci_fixup(struct pci_dev *pdev, const u16 *cal_data,
val = swahb32(val); val = swahb32(val);
} }
__raw_writel(val, mem + reg); iowrite32(val, mem + reg);
usleep_range(100, 120); usleep_range(100, 120);
} }

View File

@ -1111,18 +1111,18 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* same thing for QuZ... */ /* same thing for QuZ... */
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) { if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr) if (cfg == &iwl_ax101_cfg_qu_hr)
iwl_trans->cfg = &iwl_ax101_cfg_quz_hr; cfg = &iwl_ax101_cfg_quz_hr;
else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) else if (cfg == &iwl_ax201_cfg_qu_hr)
iwl_trans->cfg = &iwl_ax201_cfg_quz_hr; cfg = &iwl_ax201_cfg_quz_hr;
else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0) else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
iwl_trans->cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc; cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0) else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
iwl_trans->cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc; cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0) else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
iwl_trans->cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc; cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
iwl_trans->cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc; cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
} }
#endif #endif

View File

@ -57,24 +57,6 @@
#include "internal.h" #include "internal.h"
#include "fw/dbg.h" #include "fw/dbg.h"
static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
{
iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
udelay(20);
iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
HPM_HIPM_GEN_CFG_CR_PG_EN |
HPM_HIPM_GEN_CFG_CR_SLP_EN);
udelay(20);
iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
iwl_trans_sw_reset(trans);
iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
return 0;
}
/* /*
* Start up NIC's basic functionality after it has been reset * Start up NIC's basic functionality after it has been reset
* (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
@ -110,13 +92,6 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
iwl_pcie_apm_config(trans); iwl_pcie_apm_config(trans);
if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
trans->cfg->integrated) {
ret = iwl_pcie_gen2_force_power_gating(trans);
if (ret)
return ret;
}
ret = iwl_finish_nic_init(trans, trans->trans_cfg); ret = iwl_finish_nic_init(trans, trans->trans_cfg);
if (ret) if (ret)
return ret; return ret;

View File

@ -1783,6 +1783,29 @@ static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans)
return 0; return 0;
} }
static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
{
int ret;
ret = iwl_finish_nic_init(trans, trans->trans_cfg);
if (ret < 0)
return ret;
iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
udelay(20);
iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
HPM_HIPM_GEN_CFG_CR_PG_EN |
HPM_HIPM_GEN_CFG_CR_SLP_EN);
udelay(20);
iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
iwl_trans_pcie_sw_reset(trans);
return 0;
}
static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@ -1802,6 +1825,13 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
iwl_trans_pcie_sw_reset(trans); iwl_trans_pcie_sw_reset(trans);
if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
trans->cfg->integrated) {
err = iwl_pcie_gen2_force_power_gating(trans);
if (err)
return err;
}
err = iwl_pcie_apm_init(trans); err = iwl_pcie_apm_init(trans);
if (err) if (err)
return err; return err;

View File

@ -229,6 +229,14 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
"11D: skip setting domain info in FW\n"); "11D: skip setting domain info in FW\n");
return 0; return 0;
} }
if (country_ie_len >
(IEEE80211_COUNTRY_STRING_LEN + MWIFIEX_MAX_TRIPLET_802_11D)) {
mwifiex_dbg(priv->adapter, ERROR,
"11D: country_ie_len overflow!, deauth AP\n");
return -EINVAL;
}
memcpy(priv->adapter->country_code, &country_ie[2], 2); memcpy(priv->adapter->country_code, &country_ie[2], 2);
domain_info->country_code[0] = country_ie[2]; domain_info->country_code[0] = country_ie[2];
@ -272,8 +280,9 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
priv->scan_block = false; priv->scan_block = false;
if (bss) { if (bss) {
if (adapter->region_code == 0x00) if (adapter->region_code == 0x00 &&
mwifiex_process_country_ie(priv, bss); mwifiex_process_country_ie(priv, bss))
return -EINVAL;
/* Allocate and fill new bss descriptor */ /* Allocate and fill new bss descriptor */
bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor), bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor),

View File

@ -953,59 +953,117 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
switch (*pos) { switch (*pos) {
case WLAN_EID_SUPP_RATES: case WLAN_EID_SUPP_RATES:
if (pos[1] > 32)
return;
sta_ptr->tdls_cap.rates_len = pos[1]; sta_ptr->tdls_cap.rates_len = pos[1];
for (i = 0; i < pos[1]; i++) for (i = 0; i < pos[1]; i++)
sta_ptr->tdls_cap.rates[i] = pos[i + 2]; sta_ptr->tdls_cap.rates[i] = pos[i + 2];
break; break;
case WLAN_EID_EXT_SUPP_RATES: case WLAN_EID_EXT_SUPP_RATES:
if (pos[1] > 32)
return;
basic = sta_ptr->tdls_cap.rates_len; basic = sta_ptr->tdls_cap.rates_len;
if (pos[1] > 32 - basic)
return;
for (i = 0; i < pos[1]; i++) for (i = 0; i < pos[1]; i++)
sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2]; sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2];
sta_ptr->tdls_cap.rates_len += pos[1]; sta_ptr->tdls_cap.rates_len += pos[1];
break; break;
case WLAN_EID_HT_CAPABILITY: case WLAN_EID_HT_CAPABILITY:
memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos, if (pos > end - sizeof(struct ieee80211_ht_cap) - 2)
return;
if (pos[1] != sizeof(struct ieee80211_ht_cap))
return;
/* copy the ie's value into ht_capb*/
memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos + 2,
sizeof(struct ieee80211_ht_cap)); sizeof(struct ieee80211_ht_cap));
sta_ptr->is_11n_enabled = 1; sta_ptr->is_11n_enabled = 1;
break; break;
case WLAN_EID_HT_OPERATION: case WLAN_EID_HT_OPERATION:
memcpy(&sta_ptr->tdls_cap.ht_oper, pos, if (pos > end -
sizeof(struct ieee80211_ht_operation) - 2)
return;
if (pos[1] != sizeof(struct ieee80211_ht_operation))
return;
/* copy the ie's value into ht_oper*/
memcpy(&sta_ptr->tdls_cap.ht_oper, pos + 2,
sizeof(struct ieee80211_ht_operation)); sizeof(struct ieee80211_ht_operation));
break; break;
case WLAN_EID_BSS_COEX_2040: case WLAN_EID_BSS_COEX_2040:
if (pos > end - 3)
return;
if (pos[1] != 1)
return;
sta_ptr->tdls_cap.coex_2040 = pos[2]; sta_ptr->tdls_cap.coex_2040 = pos[2];
break; break;
case WLAN_EID_EXT_CAPABILITY: case WLAN_EID_EXT_CAPABILITY:
if (pos > end - sizeof(struct ieee_types_header))
return;
if (pos[1] < sizeof(struct ieee_types_header))
return;
if (pos[1] > 8)
return;
memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos, memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos,
sizeof(struct ieee_types_header) + sizeof(struct ieee_types_header) +
min_t(u8, pos[1], 8)); min_t(u8, pos[1], 8));
break; break;
case WLAN_EID_RSN: case WLAN_EID_RSN:
if (pos > end - sizeof(struct ieee_types_header))
return;
if (pos[1] < sizeof(struct ieee_types_header))
return;
if (pos[1] > IEEE_MAX_IE_SIZE -
sizeof(struct ieee_types_header))
return;
memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos, memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos,
sizeof(struct ieee_types_header) + sizeof(struct ieee_types_header) +
min_t(u8, pos[1], IEEE_MAX_IE_SIZE - min_t(u8, pos[1], IEEE_MAX_IE_SIZE -
sizeof(struct ieee_types_header))); sizeof(struct ieee_types_header)));
break; break;
case WLAN_EID_QOS_CAPA: case WLAN_EID_QOS_CAPA:
if (pos > end - 3)
return;
if (pos[1] != 1)
return;
sta_ptr->tdls_cap.qos_info = pos[2]; sta_ptr->tdls_cap.qos_info = pos[2];
break; break;
case WLAN_EID_VHT_OPERATION: case WLAN_EID_VHT_OPERATION:
if (priv->adapter->is_hw_11ac_capable) if (priv->adapter->is_hw_11ac_capable) {
memcpy(&sta_ptr->tdls_cap.vhtoper, pos, if (pos > end -
sizeof(struct ieee80211_vht_operation) - 2)
return;
if (pos[1] !=
sizeof(struct ieee80211_vht_operation))
return;
/* copy the ie's value into vhtoper*/
memcpy(&sta_ptr->tdls_cap.vhtoper, pos + 2,
sizeof(struct ieee80211_vht_operation)); sizeof(struct ieee80211_vht_operation));
}
break; break;
case WLAN_EID_VHT_CAPABILITY: case WLAN_EID_VHT_CAPABILITY:
if (priv->adapter->is_hw_11ac_capable) { if (priv->adapter->is_hw_11ac_capable) {
memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos, if (pos > end -
sizeof(struct ieee80211_vht_cap) - 2)
return;
if (pos[1] != sizeof(struct ieee80211_vht_cap))
return;
/* copy the ie's value into vhtcap*/
memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos + 2,
sizeof(struct ieee80211_vht_cap)); sizeof(struct ieee80211_vht_cap));
sta_ptr->is_11ac_enabled = 1; sta_ptr->is_11ac_enabled = 1;
} }
break; break;
case WLAN_EID_AID: case WLAN_EID_AID:
if (priv->adapter->is_hw_11ac_capable) if (priv->adapter->is_hw_11ac_capable) {
if (pos > end - 4)
return;
if (pos[1] != 2)
return;
sta_ptr->tdls_cap.aid = sta_ptr->tdls_cap.aid =
get_unaligned_le16((pos + 2)); get_unaligned_le16((pos + 2));
}
break;
default: default:
break; break;
} }

View File

@ -342,8 +342,11 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev)
dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n", dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n",
version, fae); version, fae);
mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR); memcpy(dev->mt76.macaddr, (u8 *)dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
ETH_ALEN);
mt76_eeprom_override(&dev->mt76); mt76_eeprom_override(&dev->mt76);
mt76x02_mac_setaddr(dev, dev->mt76.macaddr);
mt76x0_set_chip_cap(dev); mt76x0_set_chip_cap(dev);
mt76x0_set_freq_offset(dev); mt76x0_set_freq_offset(dev);
mt76x0_set_temp_offset(dev); mt76x0_set_temp_offset(dev);

View File

@ -628,18 +628,6 @@ err:
static void xenvif_disconnect_queue(struct xenvif_queue *queue) static void xenvif_disconnect_queue(struct xenvif_queue *queue)
{ {
if (queue->tx_irq) {
unbind_from_irqhandler(queue->tx_irq, queue);
if (queue->tx_irq == queue->rx_irq)
queue->rx_irq = 0;
queue->tx_irq = 0;
}
if (queue->rx_irq) {
unbind_from_irqhandler(queue->rx_irq, queue);
queue->rx_irq = 0;
}
if (queue->task) { if (queue->task) {
kthread_stop(queue->task); kthread_stop(queue->task);
queue->task = NULL; queue->task = NULL;
@ -655,6 +643,18 @@ static void xenvif_disconnect_queue(struct xenvif_queue *queue)
queue->napi.poll = NULL; queue->napi.poll = NULL;
} }
if (queue->tx_irq) {
unbind_from_irqhandler(queue->tx_irq, queue);
if (queue->tx_irq == queue->rx_irq)
queue->rx_irq = 0;
queue->tx_irq = 0;
}
if (queue->rx_irq) {
unbind_from_irqhandler(queue->rx_irq, queue);
queue->rx_irq = 0;
}
xenvif_unmap_frontend_data_rings(queue); xenvif_unmap_frontend_data_rings(queue);
} }

View File

@ -278,7 +278,7 @@ static int nxp_nci_i2c_probe(struct i2c_client *client,
r = devm_acpi_dev_add_driver_gpios(dev, acpi_nxp_nci_gpios); r = devm_acpi_dev_add_driver_gpios(dev, acpi_nxp_nci_gpios);
if (r) if (r)
return r; dev_dbg(dev, "Unable to add GPIO mapping table\n");
phy->gpiod_en = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); phy->gpiod_en = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(phy->gpiod_en)) { if (IS_ERR(phy->gpiod_en)) {

View File

@ -507,7 +507,10 @@ int s3fwrn5_fw_recv_frame(struct nci_dev *ndev, struct sk_buff *skb)
struct s3fwrn5_info *info = nci_get_drvdata(ndev); struct s3fwrn5_info *info = nci_get_drvdata(ndev);
struct s3fwrn5_fw_info *fw_info = &info->fw_info; struct s3fwrn5_fw_info *fw_info = &info->fw_info;
BUG_ON(fw_info->rsp); if (WARN_ON(fw_info->rsp)) {
kfree_skb(skb);
return -EINVAL;
}
fw_info->rsp = skb; fw_info->rsp = skb;

View File

@ -162,7 +162,7 @@ static const struct of_device_id whitelist_phys[] = {
* A device which is not a phy is expected to have a compatible string * A device which is not a phy is expected to have a compatible string
* indicating what sort of device it is. * indicating what sort of device it is.
*/ */
static bool of_mdiobus_child_is_phy(struct device_node *child) bool of_mdiobus_child_is_phy(struct device_node *child)
{ {
u32 phy_id; u32 phy_id;
@ -187,6 +187,7 @@ static bool of_mdiobus_child_is_phy(struct device_node *child)
return false; return false;
} }
EXPORT_SYMBOL(of_mdiobus_child_is_phy);
/** /**
* of_mdiobus_register - Register mii_bus and create PHYs from the device tree * of_mdiobus_register - Register mii_bus and create PHYs from the device tree

View File

@ -121,7 +121,7 @@ config PTP_1588_CLOCK_KVM
config PTP_1588_CLOCK_IDTCM config PTP_1588_CLOCK_IDTCM
tristate "IDT CLOCKMATRIX as PTP clock" tristate "IDT CLOCKMATRIX as PTP clock"
depends on PTP_1588_CLOCK depends on PTP_1588_CLOCK && I2C
default n default n
help help
This driver adds support for using IDT CLOCKMATRIX(TM) as a PTP This driver adds support for using IDT CLOCKMATRIX(TM) as a PTP

View File

@ -655,17 +655,17 @@ static int qeth_check_idx_response(struct qeth_card *card,
unsigned char *buffer) unsigned char *buffer)
{ {
QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
if ((buffer[2] & 0xc0) == 0xc0) { if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n", QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
buffer[4]); buffer[4]);
QETH_CARD_TEXT(card, 2, "ckidxres"); QETH_CARD_TEXT(card, 2, "ckidxres");
QETH_CARD_TEXT(card, 2, " idxterm"); QETH_CARD_TEXT(card, 2, " idxterm");
QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
if (buffer[4] == 0xf6) { if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
dev_err(&card->gdev->dev, dev_err(&card->gdev->dev,
"The qeth device is not configured " "The device does not support the configured transport mode\n");
"for the OSI layer required by z/VM\n"); return -EPROTONOSUPPORT;
return -EPERM;
} }
return -EIO; return -EIO;
} }
@ -742,10 +742,10 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
case 0: case 0:
break; break;
case -EIO: case -EIO:
qeth_clear_ipacmd_list(card);
qeth_schedule_recovery(card); qeth_schedule_recovery(card);
/* fall through */ /* fall through */
default: default:
qeth_clear_ipacmd_list(card);
goto out; goto out;
} }

View File

@ -899,6 +899,11 @@ extern unsigned char IDX_ACTIVATE_WRITE[];
#define QETH_IDX_ACT_ERR_AUTH 0x1E #define QETH_IDX_ACT_ERR_AUTH 0x1E
#define QETH_IDX_ACT_ERR_AUTH_USER 0x20 #define QETH_IDX_ACT_ERR_AUTH_USER 0x20
#define QETH_IDX_TERMINATE 0xc0
#define QETH_IDX_TERMINATE_MASK 0xc0
#define QETH_IDX_TERM_BAD_TRANSPORT 0x41
#define QETH_IDX_TERM_BAD_TRANSPORT_VM 0xf6
#define PDU_ENCAPSULATION(buffer) \ #define PDU_ENCAPSULATION(buffer) \
(buffer + *(buffer + (*(buffer + 0x0b)) + \ (buffer + *(buffer + (*(buffer + 0x0b)) + \
*(buffer + *(buffer + 0x0b) + 0x11) + 0x07)) *(buffer + *(buffer + 0x0b) + 0x11) + 0x07))

View File

@ -207,7 +207,7 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
} else if (sysfs_streq(buf, "prio_queueing_vlan")) { } else if (sysfs_streq(buf, "prio_queueing_vlan")) {
if (IS_LAYER3(card)) { if (IS_LAYER3(card)) {
rc = -ENOTSUPP; rc = -EOPNOTSUPP;
goto out; goto out;
} }
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN; card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN;

View File

@ -295,6 +295,7 @@ static void qeth_l2_stop_card(struct qeth_card *card)
flush_workqueue(card->event_wq); flush_workqueue(card->event_wq);
card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
card->info.promisc_mode = 0;
} }
static int qeth_l2_process_inbound_buffer(struct qeth_card *card, static int qeth_l2_process_inbound_buffer(struct qeth_card *card,

View File

@ -262,7 +262,8 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
return; return;
mutex_lock(&card->sbp_lock); mutex_lock(&card->sbp_lock);
if (card->options.sbp.role != QETH_SBP_ROLE_NONE) { if (!card->options.sbp.reflect_promisc &&
card->options.sbp.role != QETH_SBP_ROLE_NONE) {
/* Conditional to avoid spurious error messages */ /* Conditional to avoid spurious error messages */
qeth_bridgeport_setrole(card, card->options.sbp.role); qeth_bridgeport_setrole(card, card->options.sbp.role);
/* Let the callback function refresh the stored role value. */ /* Let the callback function refresh the stored role value. */

View File

@ -1314,6 +1314,7 @@ static void qeth_l3_stop_card(struct qeth_card *card)
} }
flush_workqueue(card->event_wq); flush_workqueue(card->event_wq);
card->info.promisc_mode = 0;
} }
static void qeth_l3_set_promisc_mode(struct qeth_card *card) static void qeth_l3_set_promisc_mode(struct qeth_card *card)

View File

@ -157,8 +157,8 @@ void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
struct cgroup *cgroup, struct cgroup *cgroup,
enum bpf_attach_type type); enum bpf_attach_type type);
void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage); void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map); int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map); void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, struct bpf_map *map);
int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value); int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
@ -360,9 +360,9 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
static inline void bpf_cgroup_storage_set( static inline void bpf_cgroup_storage_set(
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {} struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog, static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
struct bpf_map *map) { return 0; } struct bpf_map *map) { return 0; }
static inline void bpf_cgroup_storage_release(struct bpf_prog *prog, static inline void bpf_cgroup_storage_release(struct bpf_prog_aux *aux,
struct bpf_map *map) {} struct bpf_map *map) {}
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; } struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }

View File

@ -461,6 +461,7 @@ struct bpf_trampoline {
struct { struct {
struct btf_func_model model; struct btf_func_model model;
void *addr; void *addr;
bool ftrace_managed;
} func; } func;
/* list of BPF programs using this trampoline */ /* list of BPF programs using this trampoline */
struct hlist_head progs_hlist[BPF_TRAMP_MAX]; struct hlist_head progs_hlist[BPF_TRAMP_MAX];
@ -817,6 +818,8 @@ struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog); void bpf_prog_put(struct bpf_prog *prog);
int __bpf_prog_charge(struct user_struct *user, u32 pages); int __bpf_prog_charge(struct user_struct *user, u32 pages);
void __bpf_prog_uncharge(struct user_struct *user, u32 pages); void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
void __bpf_free_used_maps(struct bpf_prog_aux *aux,
struct bpf_map **used_maps, u32 len);
void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);

View File

@ -587,9 +587,9 @@ struct platform_device_id {
#define MDIO_NAME_SIZE 32 #define MDIO_NAME_SIZE 32
#define MDIO_MODULE_PREFIX "mdio:" #define MDIO_MODULE_PREFIX "mdio:"
#define MDIO_ID_FMT "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d" #define MDIO_ID_FMT "%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u"
#define MDIO_ID_ARGS(_id) \ #define MDIO_ID_ARGS(_id) \
(_id)>>31, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \ ((_id)>>31) & 1, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \
((_id)>>27) & 1, ((_id)>>26) & 1, ((_id)>>25) & 1, ((_id)>>24) & 1, \ ((_id)>>27) & 1, ((_id)>>26) & 1, ((_id)>>25) & 1, ((_id)>>24) & 1, \
((_id)>>23) & 1, ((_id)>>22) & 1, ((_id)>>21) & 1, ((_id)>>20) & 1, \ ((_id)>>23) & 1, ((_id)>>22) & 1, ((_id)>>21) & 1, ((_id)>>20) & 1, \
((_id)>>19) & 1, ((_id)>>18) & 1, ((_id)>>17) & 1, ((_id)>>16) & 1, \ ((_id)>>19) & 1, ((_id)>>18) & 1, ((_id)>>17) & 1, ((_id)>>16) & 1, \

View File

@ -1775,7 +1775,7 @@ enum netdev_priv_flags {
* for hardware timestamping * for hardware timestamping
* @sfp_bus: attached &struct sfp_bus structure. * @sfp_bus: attached &struct sfp_bus structure.
* @qdisc_tx_busylock_key: lockdep class annotating Qdisc->busylock * @qdisc_tx_busylock_key: lockdep class annotating Qdisc->busylock
spinlock * spinlock
* @qdisc_running_key: lockdep class annotating Qdisc->running seqcount * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
* @qdisc_xmit_lock_key: lockdep class annotating * @qdisc_xmit_lock_key: lockdep class annotating
* netdev_queue->_xmit_lock spinlock * netdev_queue->_xmit_lock spinlock

View File

@ -12,6 +12,7 @@
#include <linux/of.h> #include <linux/of.h>
#if IS_ENABLED(CONFIG_OF_MDIO) #if IS_ENABLED(CONFIG_OF_MDIO)
extern bool of_mdiobus_child_is_phy(struct device_node *child);
extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np); extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
extern struct phy_device *of_phy_find_device(struct device_node *phy_np); extern struct phy_device *of_phy_find_device(struct device_node *phy_np);
extern struct phy_device *of_phy_connect(struct net_device *dev, extern struct phy_device *of_phy_connect(struct net_device *dev,
@ -54,6 +55,11 @@ static inline int of_mdio_parse_addr(struct device *dev,
} }
#else /* CONFIG_OF_MDIO */ #else /* CONFIG_OF_MDIO */
static bool of_mdiobus_child_is_phy(struct device_node *child)
{
return false;
}
static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
{ {
/* /*

View File

@ -1000,7 +1000,7 @@ int phy_modify_paged_changed(struct phy_device *phydev, int page, u32 regnum,
int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum, int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum,
u16 mask, u16 set); u16 mask, u16 set);
struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
bool is_c45, bool is_c45,
struct phy_c45_device_ids *c45_ids); struct phy_c45_device_ids *c45_ids);
#if IS_ENABLED(CONFIG_PHYLIB) #if IS_ENABLED(CONFIG_PHYLIB)

View File

@ -100,6 +100,43 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
first->pprev = &n->next; first->pprev = &n->next;
} }
/**
* hlist_nulls_add_tail_rcu
* @n: the element to add to the hash list.
* @h: the list to add to.
*
* Description:
* Adds the specified element to the specified hlist_nulls,
* while permitting racing traversals.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
* or hlist_nulls_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
* problems on Alpha CPUs. Regardless of the type of CPU, the
* list-traversal primitive must be guarded by rcu_read_lock().
*/
static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
struct hlist_nulls_head *h)
{
struct hlist_nulls_node *i, *last = NULL;
/* Note: write side code, so rcu accessors are not needed. */
for (i = h->first; !is_a_nulls(i); i = i->next)
last = i;
if (last) {
n->next = last->next;
n->pprev = &last->next;
rcu_assign_pointer(hlist_next_rcu(last), n);
} else {
hlist_nulls_add_head_rcu(n, h);
}
}
/** /**
* hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
* @tpos: the type * to use as a loop cursor. * @tpos: the type * to use as a loop cursor.

View File

@ -82,7 +82,7 @@ struct dst_entry {
struct dst_metrics { struct dst_metrics {
u32 metrics[RTAX_MAX]; u32 metrics[RTAX_MAX];
refcount_t refcnt; refcount_t refcnt;
}; } __aligned(4); /* Low pointer bits contain DST_METRICS_FLAGS */
extern const struct dst_metrics dst_default_metrics; extern const struct dst_metrics dst_default_metrics;
u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);

View File

@ -103,13 +103,19 @@ struct inet_bind_hashbucket {
struct hlist_head chain; struct hlist_head chain;
}; };
/* /* Sockets can be hashed in established or listening table.
* Sockets can be hashed in established or listening table * We must use different 'nulls' end-of-chain value for all hash buckets :
* A socket might transition from ESTABLISH to LISTEN state without
* RCU grace period. A lookup in ehash table needs to handle this case.
*/ */
#define LISTENING_NULLS_BASE (1U << 29)
struct inet_listen_hashbucket { struct inet_listen_hashbucket {
spinlock_t lock; spinlock_t lock;
unsigned int count; unsigned int count;
struct hlist_head head; union {
struct hlist_head head;
struct hlist_nulls_head nulls_head;
};
}; };
/* This is for listening sockets, thus all sockets which possess wildcards. */ /* This is for listening sockets, thus all sockets which possess wildcards. */

Some files were not shown because too many files have changed in this diff Show More