Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

This commit is contained in:
David S. Miller 2021-04-26 12:00:00 -07:00
commit 6876a18d33
28 changed files with 184 additions and 98 deletions

View File

@ -8532,7 +8532,6 @@ IBM Power SRIOV Virtual NIC Device Driver
M: Dany Madden <drt@linux.ibm.com>
M: Sukadev Bhattiprolu <sukadev@linux.ibm.com>
R: Thomas Falcon <tlfalcon@linux.ibm.com>
R: Lijun Pan <lijunp213@gmail.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/ibm/ibmvnic.*

View File

@ -583,7 +583,7 @@
clocks = <&sys_clk 6>;
reset-names = "ether";
resets = <&sys_rst 6>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
local-mac-address = [00 00 00 00 00 00];
socionext,syscon-phy-mode = <&soc_glue 0>;

View File

@ -734,7 +734,7 @@
clocks = <&sys_clk 6>;
reset-names = "ether";
resets = <&sys_rst 6>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
local-mac-address = [00 00 00 00 00 00];
socionext,syscon-phy-mode = <&soc_glue 0>;

View File

@ -564,7 +564,7 @@
clocks = <&sys_clk 6>;
reset-names = "ether";
resets = <&sys_rst 6>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
local-mac-address = [00 00 00 00 00 00];
socionext,syscon-phy-mode = <&soc_glue 0>;
@ -585,7 +585,7 @@
clocks = <&sys_clk 7>;
reset-names = "ether";
resets = <&sys_rst 7>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
local-mac-address = [00 00 00 00 00 00];
socionext,syscon-phy-mode = <&soc_glue 1>;

View File

@ -4391,9 +4391,7 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
int agg_id = 0;
int ret = 0;
#ifdef CONFIG_LOCKDEP
WARN_ON(lockdep_is_held(&bond->mode_lock));
#endif
might_sleep();
usable_slaves = kzalloc(struct_size(usable_slaves, arr,
bond->slave_cnt), GFP_KERNEL);
@ -4406,7 +4404,9 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
spin_lock_bh(&bond->mode_lock);
if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
spin_unlock_bh(&bond->mode_lock);
pr_debug("bond_3ad_get_active_agg_info failed\n");
/* No active aggragator means it's not safe to use
* the previous array.
@ -4414,6 +4414,7 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
bond_reset_slave_arr(bond);
goto out;
}
spin_unlock_bh(&bond->mode_lock);
agg_id = ad_info.aggregator_id;
}
bond_for_each_slave(bond, slave, iter) {

View File

@ -1755,14 +1755,16 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
cons = rxcmp->rx_cmp_opaque;
if (unlikely(cons != rxr->rx_next_cons)) {
int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
/* 0xffff is forced error, don't print it */
if (rxr->rx_next_cons != 0xffff)
netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
cons, rxr->rx_next_cons);
bnxt_sched_reset(bp, rxr);
return rc1;
if (rc1)
return rc1;
goto next_rx_no_prod_no_len;
}
rx_buf = &rxr->rx_buf_ring[cons];
data = rx_buf->data;
@ -9804,7 +9806,9 @@ static ssize_t bnxt_show_temp(struct device *dev,
if (!rc)
len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
mutex_unlock(&bp->hwrm_cmd_lock);
return rc ?: len;
if (rc)
return rc;
return len;
}
static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);

View File

@ -431,7 +431,8 @@ static void prestera_port_handle_event(struct prestera_switch *sw,
netif_carrier_on(port->dev);
if (!delayed_work_pending(caching_dw))
queue_delayed_work(prestera_wq, caching_dw, 0);
} else {
} else if (netif_running(port->dev) &&
netif_carrier_ok(port->dev)) {
netif_carrier_off(port->dev);
if (delayed_work_pending(caching_dw))
cancel_delayed_work(caching_dw);

View File

@ -364,6 +364,7 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
attrs.split = eth_port.is_split;
attrs.splittable = !attrs.split;
attrs.lanes = eth_port.port_lanes;
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = eth_port.label_port;
attrs.phys.split_subport_number = eth_port.label_subport;

View File

@ -913,31 +913,20 @@ static int ravb_poll(struct napi_struct *napi, int budget)
int q = napi - priv->napi;
int mask = BIT(q);
int quota = budget;
u32 ris0, tis;
for (;;) {
tis = ravb_read(ndev, TIS);
ris0 = ravb_read(ndev, RIS0);
if (!((ris0 & mask) || (tis & mask)))
break;
/* Processing RX Descriptor Ring */
/* Clear RX interrupt */
ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
if (ravb_rx(ndev, &quota, q))
goto out;
/* Processing RX Descriptor Ring */
if (ris0 & mask) {
/* Clear RX interrupt */
ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
if (ravb_rx(ndev, &quota, q))
goto out;
}
/* Processing TX Descriptor Ring */
if (tis & mask) {
spin_lock_irqsave(&priv->lock, flags);
/* Clear TX interrupt */
ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
ravb_tx_free(ndev, q, true);
netif_wake_subqueue(ndev, q);
spin_unlock_irqrestore(&priv->lock, flags);
}
}
/* Processing RX Descriptor Ring */
spin_lock_irqsave(&priv->lock, flags);
/* Clear TX interrupt */
ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
ravb_tx_free(ndev, q, true);
netif_wake_subqueue(ndev, q);
spin_unlock_irqrestore(&priv->lock, flags);
napi_complete(napi);

View File

@ -2944,8 +2944,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
/* Get the transmit queue */
tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
tx_queue = efx_channel_get_tx_queue(channel,
tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
tx_queue = channel->tx_queue + (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
if (!tx_queue->timestamping) {
/* Transmit completion */

View File

@ -835,14 +835,14 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
/* Transmit completion */
tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
tx_queue = efx_channel_get_tx_queue(
channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
tx_queue = channel->tx_queue +
(tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
efx_xmit_done(tx_queue, tx_ev_desc_ptr);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
/* Rewrite the FIFO write pointer */
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
tx_queue = efx_channel_get_tx_queue(
channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
tx_queue = channel->tx_queue +
(tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
netif_tx_lock(efx->net_dev);
efx_farch_notify_tx_desc(tx_queue);
@ -1081,16 +1081,16 @@ static void
efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
struct efx_tx_queue *tx_queue;
struct efx_channel *channel;
int qid;
qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
tx_queue = efx_get_tx_queue(efx, qid / EFX_MAX_TXQ_PER_CHANNEL,
qid % EFX_MAX_TXQ_PER_CHANNEL);
if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
channel = efx_get_tx_channel(efx, qid / EFX_MAX_TXQ_PER_CHANNEL);
tx_queue = channel->tx_queue + (qid % EFX_MAX_TXQ_PER_CHANNEL);
if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0))
efx_farch_magic_event(tx_queue->channel,
EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
}
}
}

View File

@ -3303,8 +3303,15 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
/* Enable TSO */
if (priv->tso) {
for (chan = 0; chan < tx_cnt; chan++)
for (chan = 0; chan < tx_cnt; chan++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
/* TSO and TBS cannot co-exist */
if (tx_q->tbs & STMMAC_TBS_AVAIL)
continue;
stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
}
}
/* Enable Split Header */
@ -3674,9 +3681,8 @@ int stmmac_open(struct net_device *dev)
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
/* Setup per-TXQ tbs flag before TX descriptor alloc */
tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan))
tx_q->tbs &= ~STMMAC_TBS_AVAIL;
}
ret = alloc_dma_desc_resources(priv);

View File

@ -169,11 +169,11 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
/* EMAC mac_status register */
#define EMAC_MACSTATUS_TXERRCODE_MASK (0xF00000)
#define EMAC_MACSTATUS_TXERRCODE_SHIFT (20)
#define EMAC_MACSTATUS_TXERRCH_MASK (0x7)
#define EMAC_MACSTATUS_TXERRCH_MASK (0x70000)
#define EMAC_MACSTATUS_TXERRCH_SHIFT (16)
#define EMAC_MACSTATUS_RXERRCODE_MASK (0xF000)
#define EMAC_MACSTATUS_RXERRCODE_SHIFT (12)
#define EMAC_MACSTATUS_RXERRCH_MASK (0x7)
#define EMAC_MACSTATUS_RXERRCH_MASK (0x700)
#define EMAC_MACSTATUS_RXERRCH_SHIFT (8)
/* EMAC RX register masks */

View File

@ -1087,7 +1087,7 @@ static int init_queues(struct port *port)
int i;
if (!ports_open) {
dma_pool = dma_pool_create(DRV_NAME, port->netdev->dev.parent,
dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
POOL_ALLOC_SIZE, 32, 0);
if (!dma_pool)
return -ENOMEM;
@ -1506,6 +1506,9 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
ndev->netdev_ops = &ixp4xx_netdev_ops;
ndev->ethtool_ops = &ixp4xx_ethtool_ops;
ndev->tx_queue_len = 100;
/* Inherit the DMA masks from the platform device */
ndev->dev.dma_mask = dev->dma_mask;
ndev->dev.coherent_dma_mask = dev->coherent_dma_mask;
netif_napi_add(ndev, &port->napi, eth_poll, NAPI_WEIGHT);

View File

@ -892,7 +892,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
if (!pskb_inet_may_pull(skb))
return -EINVAL;
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
@ -989,7 +989,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
if (!pskb_inet_may_pull(skb))
return -EINVAL;
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);

View File

@ -11,6 +11,18 @@
#define XWAY_MDIO_IMASK 0x19 /* interrupt mask */
#define XWAY_MDIO_ISTAT 0x1A /* interrupt status */
#define XWAY_MDIO_LED 0x1B /* led control */
/* bit 15:12 are reserved */
#define XWAY_MDIO_LED_LED3_EN BIT(11) /* Enable the integrated function of LED3 */
#define XWAY_MDIO_LED_LED2_EN BIT(10) /* Enable the integrated function of LED2 */
#define XWAY_MDIO_LED_LED1_EN BIT(9) /* Enable the integrated function of LED1 */
#define XWAY_MDIO_LED_LED0_EN BIT(8) /* Enable the integrated function of LED0 */
/* bit 7:4 are reserved */
#define XWAY_MDIO_LED_LED3_DA BIT(3) /* Direct Access to LED3 */
#define XWAY_MDIO_LED_LED2_DA BIT(2) /* Direct Access to LED2 */
#define XWAY_MDIO_LED_LED1_DA BIT(1) /* Direct Access to LED1 */
#define XWAY_MDIO_LED_LED0_DA BIT(0) /* Direct Access to LED0 */
#define XWAY_MDIO_INIT_WOL BIT(15) /* Wake-On-LAN */
#define XWAY_MDIO_INIT_MSRE BIT(14)
@ -159,6 +171,15 @@ static int xway_gphy_config_init(struct phy_device *phydev)
/* Clear all pending interrupts */
phy_read(phydev, XWAY_MDIO_ISTAT);
/* Ensure that integrated led function is enabled for all leds */
err = phy_write(phydev, XWAY_MDIO_LED,
XWAY_MDIO_LED_LED0_EN |
XWAY_MDIO_LED_LED1_EN |
XWAY_MDIO_LED_LED2_EN |
XWAY_MDIO_LED_LED3_EN);
if (err)
return err;
phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LEDCH,
XWAY_MMD_LEDCH_NACS_NONE |
XWAY_MMD_LEDCH_SBF_F02HZ |

View File

@ -978,22 +978,28 @@ static int m88e1111_get_downshift(struct phy_device *phydev, u8 *data)
static int m88e1111_set_downshift(struct phy_device *phydev, u8 cnt)
{
int val;
int val, err;
if (cnt > MII_M1111_PHY_EXT_CR_DOWNSHIFT_MAX)
return -E2BIG;
if (!cnt)
return phy_clear_bits(phydev, MII_M1111_PHY_EXT_CR,
MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN);
if (!cnt) {
err = phy_clear_bits(phydev, MII_M1111_PHY_EXT_CR,
MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN);
} else {
val = MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN;
val |= FIELD_PREP(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, cnt - 1);
val = MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN;
val |= FIELD_PREP(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, cnt - 1);
err = phy_modify(phydev, MII_M1111_PHY_EXT_CR,
MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN |
MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK,
val);
}
return phy_modify(phydev, MII_M1111_PHY_EXT_CR,
MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN |
MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK,
val);
if (err < 0)
return err;
return genphy_soft_reset(phydev);
}
static int m88e1111_get_tunable(struct phy_device *phydev,
@ -1036,22 +1042,28 @@ static int m88e1011_get_downshift(struct phy_device *phydev, u8 *data)
static int m88e1011_set_downshift(struct phy_device *phydev, u8 cnt)
{
int val;
int val, err;
if (cnt > MII_M1011_PHY_SCR_DOWNSHIFT_MAX)
return -E2BIG;
if (!cnt)
return phy_clear_bits(phydev, MII_M1011_PHY_SCR,
MII_M1011_PHY_SCR_DOWNSHIFT_EN);
if (!cnt) {
err = phy_clear_bits(phydev, MII_M1011_PHY_SCR,
MII_M1011_PHY_SCR_DOWNSHIFT_EN);
} else {
val = MII_M1011_PHY_SCR_DOWNSHIFT_EN;
val |= FIELD_PREP(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, cnt - 1);
val = MII_M1011_PHY_SCR_DOWNSHIFT_EN;
val |= FIELD_PREP(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, cnt - 1);
err = phy_modify(phydev, MII_M1011_PHY_SCR,
MII_M1011_PHY_SCR_DOWNSHIFT_EN |
MII_M1011_PHY_SCR_DOWNSHIFT_MASK,
val);
}
return phy_modify(phydev, MII_M1011_PHY_SCR,
MII_M1011_PHY_SCR_DOWNSHIFT_EN |
MII_M1011_PHY_SCR_DOWNSHIFT_MASK,
val);
if (err < 0)
return err;
return genphy_soft_reset(phydev);
}
static int m88e1011_get_tunable(struct phy_device *phydev,

View File

@ -40,6 +40,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
struct iwl_tfh_tfd *tfd;
unsigned long flags;
copy_size = sizeof(struct iwl_cmd_header_wide);
cmd_size = sizeof(struct iwl_cmd_header_wide);
@ -108,14 +109,14 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
goto free_dup_buf;
}
spin_lock_bh(&txq->lock);
spin_lock_irqsave(&txq->lock, flags);
idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
memset(tfd, 0, sizeof(*tfd));
if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
spin_unlock_irqrestore(&txq->lock, flags);
IWL_ERR(trans, "No space in command queue\n");
iwl_op_mode_cmd_queue_full(trans->op_mode);
@ -250,7 +251,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
spin_unlock(&trans_pcie->reg_lock);
out:
spin_unlock_bh(&txq->lock);
spin_unlock_irqrestore(&txq->lock, flags);
free_dup_buf:
if (idx < 0)
kfree(dup_buf);

View File

@ -71,6 +71,9 @@ static int vlan_group_prealloc_vid(struct vlan_group *vg,
if (array == NULL)
return -ENOBUFS;
/* paired with smp_rmb() in __vlan_group_get_device() */
smp_wmb();
vg->vlan_devices_arrays[pidx][vidx] = array;
return 0;
}

View File

@ -57,6 +57,10 @@ static inline struct net_device *__vlan_group_get_device(struct vlan_group *vg,
array = vg->vlan_devices_arrays[pidx]
[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
/* paired with smp_wmb() in vlan_group_prealloc_vid() */
smp_rmb();
return array ? array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] : NULL;
}

View File

@ -5966,7 +5966,7 @@ static void gro_list_prepare(const struct list_head *head,
}
}
static void skb_gro_reset_offset(struct sk_buff *skb)
static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
{
const struct skb_shared_info *pinfo = skb_shinfo(skb);
const skb_frag_t *frag0 = &pinfo->frags[0];
@ -5977,7 +5977,7 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
if (!skb_headlen(skb) && pinfo->nr_frags &&
!PageHighMem(skb_frag_page(frag0)) &&
(!NET_IP_ALIGN || !(skb_frag_off(frag0) & 3))) {
(!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
skb_frag_size(frag0),
@ -6195,7 +6195,7 @@ gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
skb_mark_napi_id(skb, napi);
trace_napi_gro_receive_entry(skb);
skb_gro_reset_offset(skb);
skb_gro_reset_offset(skb, 0);
ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
trace_napi_gro_receive_exit(ret);
@ -6284,7 +6284,7 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
napi->skb = NULL;
skb_reset_mac_header(skb);
skb_gro_reset_offset(skb);
skb_gro_reset_offset(skb, hlen);
if (unlikely(skb_gro_header_hard(skb, hlen))) {
eth = skb_gro_header_slow(skb, hlen, 0);

View File

@ -131,6 +131,9 @@ static void neigh_update_gc_list(struct neighbour *n)
write_lock_bh(&n->tbl->lock);
write_lock(&n->lock);
if (n->dead)
goto out;
/* remove from the gc list if new state is permanent or if neighbor
* is externally learned; otherwise entry should be on the gc list
*/
@ -147,6 +150,7 @@ static void neigh_update_gc_list(struct neighbour *n)
atomic_inc(&n->tbl->gc_entries);
}
out:
write_unlock(&n->lock);
write_unlock_bh(&n->tbl->lock);
}

View File

@ -392,6 +392,14 @@ static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
return false;
}
static void mptcp_set_datafin_timeout(const struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
mptcp_sk(sk)->timer_ival = min(TCP_RTO_MAX,
TCP_RTO_MIN << icsk->icsk_retransmits);
}
static void mptcp_set_timeout(const struct sock *sk, const struct sock *ssk)
{
long tout = ssk && inet_csk(ssk)->icsk_pending ?
@ -1062,7 +1070,7 @@ out:
}
if (snd_una == READ_ONCE(msk->snd_nxt)) {
if (msk->timer_ival)
if (msk->timer_ival && !mptcp_data_fin_enabled(msk))
mptcp_stop_timer(sk);
} else {
mptcp_reset_timer(sk);
@ -2287,8 +2295,19 @@ static void __mptcp_retrans(struct sock *sk)
__mptcp_clean_una_wakeup(sk);
dfrag = mptcp_rtx_head(sk);
if (!dfrag)
if (!dfrag) {
if (mptcp_data_fin_enabled(msk)) {
struct inet_connection_sock *icsk = inet_csk(sk);
icsk->icsk_retransmits++;
mptcp_set_datafin_timeout(sk);
mptcp_send_ack(msk);
goto reset_timer;
}
return;
}
ssk = mptcp_subflow_get_retrans(msk);
if (!ssk)
@ -2474,6 +2493,8 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
pr_debug("Sending DATA_FIN on subflow %p", ssk);
mptcp_set_timeout(sk, ssk);
tcp_send_ack(ssk);
if (!mptcp_timer_pending(sk))
mptcp_reset_timer(sk);
}
break;
}

View File

@ -392,7 +392,7 @@ static struct dp_meter *dp_meter_create(struct nlattr **a)
*
* Start with a full bucket.
*/
band->bucket = (band->burst_size + band->rate) * 1000ULL;
band->bucket = band->burst_size * 1000ULL;
band_max_delta_t = div_u64(band->bucket, band->rate);
if (band_max_delta_t > meter->max_delta_t)
meter->max_delta_t = band_max_delta_t;
@ -641,7 +641,7 @@ bool ovs_meter_execute(struct datapath *dp, struct sk_buff *skb,
long long int max_bucket_size;
band = &meter->bands[i];
max_bucket_size = (band->burst_size + band->rate) * 1000LL;
max_bucket_size = band->burst_size * 1000LL;
band->bucket += delta_ms * band->rate;
if (band->bucket > max_bucket_size)

View File

@ -50,6 +50,9 @@ static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep);
int rc;
if (skb->sk)
sock_hold(skb->sk);
rc = skb_linearize(skb);
if (rc)
goto free_skb;
@ -59,12 +62,11 @@ static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
if (rc)
goto free_skb;
if (skb->sk)
sock_hold(skb->sk);
return rc;
free_skb:
if (skb->sk)
sock_put(skb->sk);
kfree_skb(skb);
return rc;

View File

@ -945,6 +945,12 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
list_for_each_entry(entry, &new->entries, list)
cycle = ktime_add_ns(cycle, entry->interval);
if (!cycle) {
NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
return -EINVAL;
}
new->cycle_time = cycle;
}

View File

@ -733,6 +733,23 @@ static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
return t->send_pkt(reply);
}
/* This function should be called with sk_lock held and SOCK_DONE set */
static void virtio_transport_remove_sock(struct vsock_sock *vsk)
{
struct virtio_vsock_sock *vvs = vsk->trans;
struct virtio_vsock_pkt *pkt, *tmp;
/* We don't need to take rx_lock, as the socket is closing and we are
* removing it.
*/
list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
list_del(&pkt->list);
virtio_transport_free_pkt(pkt);
}
vsock_remove_sock(vsk);
}
static void virtio_transport_wait_close(struct sock *sk, long timeout)
{
if (timeout) {
@ -765,7 +782,7 @@ static void virtio_transport_do_close(struct vsock_sock *vsk,
(!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
vsk->close_work_scheduled = false;
vsock_remove_sock(vsk);
virtio_transport_remove_sock(vsk);
/* Release refcnt obtained when we scheduled the timeout */
sock_put(sk);
@ -828,22 +845,15 @@ static bool virtio_transport_close(struct vsock_sock *vsk)
void virtio_transport_release(struct vsock_sock *vsk)
{
struct virtio_vsock_sock *vvs = vsk->trans;
struct virtio_vsock_pkt *pkt, *tmp;
struct sock *sk = &vsk->sk;
bool remove_sock = true;
if (sk->sk_type == SOCK_STREAM)
remove_sock = virtio_transport_close(vsk);
list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
list_del(&pkt->list);
virtio_transport_free_pkt(pkt);
}
if (remove_sock) {
sock_set_flag(sk, SOCK_DONE);
vsock_remove_sock(vsk);
virtio_transport_remove_sock(vsk);
}
}
EXPORT_SYMBOL_GPL(virtio_transport_release);

View File

@ -568,8 +568,7 @@ vmci_transport_queue_pair_alloc(struct vmci_qp **qpair,
peer, flags, VMCI_NO_PRIVILEGE_FLAGS);
out:
if (err < 0) {
pr_err("Could not attach to queue pair with %d\n",
err);
pr_err_once("Could not attach to queue pair with %d\n", err);
err = vmci_transport_error_to_vsock_error(err);
}