Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) ICE, E1000, IGB, IXGBE, and I40E bug fixes from the Intel folks.

 2) Better fix for AB-BA deadlock in packet scheduler code, from Cong
    Wang.

 3) bpf sockmap fixes (zero sized key handling, etc.) from Daniel
    Borkmann.

 4) Send zero IPID in TCP resets and SYN-RECV state ACKs, to prevent
    attackers using it as a side-channel. From Eric Dumazet.

 5) Memory leak in mediatek bluetooth driver, from Gustavo A. R. Silva.

 6) Hook up rt->dst.input of ipv6 anycast routes properly, from Hangbin
    Liu.

 7) hns and hns3 bug fixes from Huazhong Tan.

 8) Fix RIF leak in mlxsw driver, from Ido Schimmel.

 9) iova range check fix in vhost, from Jason Wang.

10) Fix hang in do_tcp_sendpages() with tls, from John Fastabend.

11) More r8152 chips need to disable RX aggregation, from Kai-Heng Feng.

12) Memory exposure in TCA_U32_SEL handling, from Kees Cook.

13) TCP BBR congestion control fixes from Kevin Yang.

14) hv_netvsc, ignore non-PCI devices, from Stephen Hemminger.

15) qed driver fixes from Tomer Tayar.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (77 commits)
  net: sched: Fix memory exposure from short TCA_U32_SEL
  qed: fix spelling mistake "comparsion" -> "comparison"
  vhost: correctly check the iova range when waking virtqueue
  qlge: Fix netdev features configuration.
  net: macb: do not disable MDIO bus at open/close time
  Revert "net: stmmac: fix build failure due to missing COMMON_CLK dependency"
  net: macb: Fix regression breaking non-MDIO fixed-link PHYs
  mlxsw: spectrum_switchdev: Do not leak RIFs when removing bridge
  i40e: fix condition of WARN_ONCE for stat strings
  i40e: Fix for Tx timeouts when interface is brought up if DCB is enabled
  ixgbe: fix driver behaviour after issuing VFLR
  ixgbe: Prevent unsupported configurations with XDP
  ixgbe: Replace GFP_ATOMIC with GFP_KERNEL
  igb: Replace mdelay() with msleep() in igb_integrated_phy_loopback()
  igb: Replace GFP_ATOMIC with GFP_KERNEL in igb_sw_init()
  igb: Use an advanced ctx descriptor for launchtime
  e1000: ensure to free old tx/rx rings in set_ringparam()
  e1000: check on netif_running() before calling e1000_up()
  ixgb: use dma_zalloc_coherent instead of allocator/memset
  ice: Trivial formatting fixes
  ...
This commit is contained in:
Linus Torvalds 2018-08-27 11:59:39 -07:00
commit 050cdc6c95
92 changed files with 754 additions and 711 deletions

View file

@ -200,6 +200,7 @@ config BT_HCIUART_RTL
depends on BT_HCIUART depends on BT_HCIUART
depends on BT_HCIUART_SERDEV depends on BT_HCIUART_SERDEV
depends on GPIOLIB depends on GPIOLIB
depends on ACPI
select BT_HCIUART_3WIRE select BT_HCIUART_3WIRE
select BT_RTL select BT_RTL
help help

View file

@ -144,8 +144,10 @@ static int mtk_setup_fw(struct hci_dev *hdev)
fw_size = fw->size; fw_size = fw->size;
/* The size of patch header is 30 bytes, should be skip */ /* The size of patch header is 30 bytes, should be skip */
if (fw_size < 30) if (fw_size < 30) {
return -EINVAL; err = -EINVAL;
goto free_fw;
}
fw_size -= 30; fw_size -= 30;
fw_ptr += 30; fw_ptr += 30;
@ -172,8 +174,8 @@ static int mtk_setup_fw(struct hci_dev *hdev)
fw_ptr += dlen; fw_ptr += dlen;
} }
free_fw:
release_firmware(fw); release_firmware(fw);
return err; return err;
} }

View file

@ -110,16 +110,14 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
struct tcf_exts *tc_exts) struct tcf_exts *tc_exts)
{ {
const struct tc_action *tc_act; const struct tc_action *tc_act;
LIST_HEAD(tc_actions); int i, rc;
int rc;
if (!tcf_exts_has_actions(tc_exts)) { if (!tcf_exts_has_actions(tc_exts)) {
netdev_info(bp->dev, "no actions"); netdev_info(bp->dev, "no actions");
return -EINVAL; return -EINVAL;
} }
tcf_exts_to_list(tc_exts, &tc_actions); tcf_exts_for_each_action(i, tc_act, tc_exts) {
list_for_each_entry(tc_act, &tc_actions, list) {
/* Drop action */ /* Drop action */
if (is_tcf_gact_shot(tc_act)) { if (is_tcf_gact_shot(tc_act)) {
actions->flags |= BNXT_TC_ACTION_FLAG_DROP; actions->flags |= BNXT_TC_ACTION_FLAG_DROP;

View file

@ -482,11 +482,6 @@ static int macb_mii_probe(struct net_device *dev)
if (np) { if (np) {
if (of_phy_is_fixed_link(np)) { if (of_phy_is_fixed_link(np)) {
if (of_phy_register_fixed_link(np) < 0) {
dev_err(&bp->pdev->dev,
"broken fixed-link specification\n");
return -ENODEV;
}
bp->phy_node = of_node_get(np); bp->phy_node = of_node_get(np);
} else { } else {
bp->phy_node = of_parse_phandle(np, "phy-handle", 0); bp->phy_node = of_parse_phandle(np, "phy-handle", 0);
@ -569,7 +564,7 @@ static int macb_mii_init(struct macb *bp)
{ {
struct macb_platform_data *pdata; struct macb_platform_data *pdata;
struct device_node *np; struct device_node *np;
int err; int err = -ENXIO;
/* Enable management port */ /* Enable management port */
macb_writel(bp, NCR, MACB_BIT(MPE)); macb_writel(bp, NCR, MACB_BIT(MPE));
@ -592,12 +587,23 @@ static int macb_mii_init(struct macb *bp)
dev_set_drvdata(&bp->dev->dev, bp->mii_bus); dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
np = bp->pdev->dev.of_node; np = bp->pdev->dev.of_node;
if (pdata) if (np && of_phy_is_fixed_link(np)) {
bp->mii_bus->phy_mask = pdata->phy_mask; if (of_phy_register_fixed_link(np) < 0) {
dev_err(&bp->pdev->dev,
"broken fixed-link specification %pOF\n", np);
goto err_out_free_mdiobus;
}
err = mdiobus_register(bp->mii_bus);
} else {
if (pdata)
bp->mii_bus->phy_mask = pdata->phy_mask;
err = of_mdiobus_register(bp->mii_bus, np);
}
err = of_mdiobus_register(bp->mii_bus, np);
if (err) if (err)
goto err_out_free_mdiobus; goto err_out_free_fixed_link;
err = macb_mii_probe(bp->dev); err = macb_mii_probe(bp->dev);
if (err) if (err)
@ -607,6 +613,7 @@ static int macb_mii_init(struct macb *bp)
err_out_unregister_bus: err_out_unregister_bus:
mdiobus_unregister(bp->mii_bus); mdiobus_unregister(bp->mii_bus);
err_out_free_fixed_link:
if (np && of_phy_is_fixed_link(np)) if (np && of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np); of_phy_deregister_fixed_link(np);
err_out_free_mdiobus: err_out_free_mdiobus:
@ -2028,14 +2035,17 @@ static void macb_reset_hw(struct macb *bp)
{ {
struct macb_queue *queue; struct macb_queue *queue;
unsigned int q; unsigned int q;
u32 ctrl = macb_readl(bp, NCR);
/* Disable RX and TX (XXX: Should we halt the transmission /* Disable RX and TX (XXX: Should we halt the transmission
* more gracefully?) * more gracefully?)
*/ */
macb_writel(bp, NCR, 0); ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
/* Clear the stats registers (XXX: Update stats first?) */ /* Clear the stats registers (XXX: Update stats first?) */
macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); ctrl |= MACB_BIT(CLRSTAT);
macb_writel(bp, NCR, ctrl);
/* Clear all status flags */ /* Clear all status flags */
macb_writel(bp, TSR, -1); macb_writel(bp, TSR, -1);
@ -2223,7 +2233,7 @@ static void macb_init_hw(struct macb *bp)
} }
/* Enable TX and RX */ /* Enable TX and RX */
macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
} }
/* The hash address register is 64 bits long and takes up two /* The hash address register is 64 bits long and takes up two

View file

@ -417,10 +417,9 @@ static void cxgb4_process_flow_actions(struct net_device *in,
struct ch_filter_specification *fs) struct ch_filter_specification *fs)
{ {
const struct tc_action *a; const struct tc_action *a;
LIST_HEAD(actions); int i;
tcf_exts_to_list(cls->exts, &actions); tcf_exts_for_each_action(i, a, cls->exts) {
list_for_each_entry(a, &actions, list) {
if (is_tcf_gact_ok(a)) { if (is_tcf_gact_ok(a)) {
fs->action = FILTER_PASS; fs->action = FILTER_PASS;
} else if (is_tcf_gact_shot(a)) { } else if (is_tcf_gact_shot(a)) {
@ -591,10 +590,9 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
bool act_redir = false; bool act_redir = false;
bool act_pedit = false; bool act_pedit = false;
bool act_vlan = false; bool act_vlan = false;
LIST_HEAD(actions); int i;
tcf_exts_to_list(cls->exts, &actions); tcf_exts_for_each_action(i, a, cls->exts) {
list_for_each_entry(a, &actions, list) {
if (is_tcf_gact_ok(a)) { if (is_tcf_gact_ok(a)) {
/* Do nothing */ /* Do nothing */
} else if (is_tcf_gact_shot(a)) { } else if (is_tcf_gact_shot(a)) {

View file

@ -93,14 +93,13 @@ static int fill_action_fields(struct adapter *adap,
unsigned int num_actions = 0; unsigned int num_actions = 0;
const struct tc_action *a; const struct tc_action *a;
struct tcf_exts *exts; struct tcf_exts *exts;
LIST_HEAD(actions); int i;
exts = cls->knode.exts; exts = cls->knode.exts;
if (!tcf_exts_has_actions(exts)) if (!tcf_exts_has_actions(exts))
return -EINVAL; return -EINVAL;
tcf_exts_to_list(exts, &actions); tcf_exts_for_each_action(i, a, exts) {
list_for_each_entry(a, &actions, list) {
/* Don't allow more than one action per rule. */ /* Don't allow more than one action per rule. */
if (num_actions) if (num_actions)
return -EINVAL; return -EINVAL;

View file

@ -220,10 +220,10 @@ struct hnae_desc_cb {
/* priv data for the desc, e.g. skb when use with ip stack*/ /* priv data for the desc, e.g. skb when use with ip stack*/
void *priv; void *priv;
u16 page_offset; u32 page_offset;
u16 reuse_flag; u32 length; /* length of the buffer */
u16 length; /* length of the buffer */ u16 reuse_flag;
/* desc type, used by the ring user to mark the type of the priv data */ /* desc type, used by the ring user to mark the type of the priv data */
u16 type; u16 type;

View file

@ -406,113 +406,13 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
/**
* hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
* @data: pointer to the start of the headers
* @max: total length of section to find headers in
*
* This function is meant to determine the length of headers that will
* be recognized by hardware for LRO, GRO, and RSC offloads. The main
* motivation of doing this is to only perform one pull for IPv4 TCP
* packets so that we can do basic things like calculating the gso_size
* based on the average data per packet.
**/
static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag,
unsigned int max_size)
{
unsigned char *network;
u8 hlen;
/* this should never happen, but better safe than sorry */
if (max_size < ETH_HLEN)
return max_size;
/* initialize network frame pointer */
network = data;
/* set first protocol and move network header forward */
network += ETH_HLEN;
/* handle any vlan tag if present */
if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S)
== HNS_RX_FLAG_VLAN_PRESENT) {
if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
return max_size;
network += VLAN_HLEN;
}
/* handle L3 protocols */
if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
== HNS_RX_FLAG_L3ID_IPV4) {
if ((typeof(max_size))(network - data) >
(max_size - sizeof(struct iphdr)))
return max_size;
/* access ihl as a u8 to avoid unaligned access on ia64 */
hlen = (network[0] & 0x0F) << 2;
/* verify hlen meets minimum size requirements */
if (hlen < sizeof(struct iphdr))
return network - data;
/* record next protocol if header is present */
} else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
== HNS_RX_FLAG_L3ID_IPV6) {
if ((typeof(max_size))(network - data) >
(max_size - sizeof(struct ipv6hdr)))
return max_size;
/* record next protocol */
hlen = sizeof(struct ipv6hdr);
} else {
return network - data;
}
/* relocate pointer to start of L4 header */
network += hlen;
/* finally sort out TCP/UDP */
if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
== HNS_RX_FLAG_L4ID_TCP) {
if ((typeof(max_size))(network - data) >
(max_size - sizeof(struct tcphdr)))
return max_size;
/* access doff as a u8 to avoid unaligned access on ia64 */
hlen = (network[12] & 0xF0) >> 2;
/* verify hlen meets minimum size requirements */
if (hlen < sizeof(struct tcphdr))
return network - data;
network += hlen;
} else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
== HNS_RX_FLAG_L4ID_UDP) {
if ((typeof(max_size))(network - data) >
(max_size - sizeof(struct udphdr)))
return max_size;
network += sizeof(struct udphdr);
}
/* If everything has gone correctly network should be the
* data section of the packet and will be the end of the header.
* If not then it probably represents the end of the last recognized
* header.
*/
if ((typeof(max_size))(network - data) < max_size)
return network - data;
else
return max_size;
}
static void hns_nic_reuse_page(struct sk_buff *skb, int i, static void hns_nic_reuse_page(struct sk_buff *skb, int i,
struct hnae_ring *ring, int pull_len, struct hnae_ring *ring, int pull_len,
struct hnae_desc_cb *desc_cb) struct hnae_desc_cb *desc_cb)
{ {
struct hnae_desc *desc; struct hnae_desc *desc;
int truesize, size; u32 truesize;
int size;
int last_offset; int last_offset;
bool twobufs; bool twobufs;
@ -530,7 +430,7 @@ static void hns_nic_reuse_page(struct sk_buff *skb, int i,
} }
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize - pull_len); size - pull_len, truesize);
/* avoid re-using remote pages,flag default unreuse */ /* avoid re-using remote pages,flag default unreuse */
if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
@ -695,7 +595,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
} else { } else {
ring->stats.seg_pkt_cnt++; ring->stats.seg_pkt_cnt++;
pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE); pull_len = eth_get_headlen(va, HNS_RX_HEAD_SIZE);
memcpy(__skb_put(skb, pull_len), va, memcpy(__skb_put(skb, pull_len), va,
ALIGN(pull_len, sizeof(long))); ALIGN(pull_len, sizeof(long)));

View file

@ -2019,7 +2019,8 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_desc_cb *desc_cb) struct hns3_desc_cb *desc_cb)
{ {
struct hns3_desc *desc; struct hns3_desc *desc;
int truesize, size; u32 truesize;
int size;
int last_offset; int last_offset;
bool twobufs; bool twobufs;

View file

@ -284,11 +284,11 @@ struct hns3_desc_cb {
/* priv data for the desc, e.g. skb when use with ip stack*/ /* priv data for the desc, e.g. skb when use with ip stack*/
void *priv; void *priv;
u16 page_offset; u32 page_offset;
u16 reuse_flag;
u32 length; /* length of the buffer */ u32 length; /* length of the buffer */
u16 reuse_flag;
/* desc type, used by the ring user to mark the type of the priv data */ /* desc type, used by the ring user to mark the type of the priv data */
u16 type; u16 type;
}; };

View file

@ -624,14 +624,14 @@ static int e1000_set_ringparam(struct net_device *netdev,
adapter->tx_ring = tx_old; adapter->tx_ring = tx_old;
e1000_free_all_rx_resources(adapter); e1000_free_all_rx_resources(adapter);
e1000_free_all_tx_resources(adapter); e1000_free_all_tx_resources(adapter);
kfree(tx_old);
kfree(rx_old);
adapter->rx_ring = rxdr; adapter->rx_ring = rxdr;
adapter->tx_ring = txdr; adapter->tx_ring = txdr;
err = e1000_up(adapter); err = e1000_up(adapter);
if (err) if (err)
goto err_setup; goto err_setup;
} }
kfree(tx_old);
kfree(rx_old);
clear_bit(__E1000_RESETTING, &adapter->flags); clear_bit(__E1000_RESETTING, &adapter->flags);
return 0; return 0;
@ -644,7 +644,8 @@ static int e1000_set_ringparam(struct net_device *netdev,
err_alloc_rx: err_alloc_rx:
kfree(txdr); kfree(txdr);
err_alloc_tx: err_alloc_tx:
e1000_up(adapter); if (netif_running(adapter->netdev))
e1000_up(adapter);
err_setup: err_setup:
clear_bit(__E1000_RESETTING, &adapter->flags); clear_bit(__E1000_RESETTING, &adapter->flags);
return err; return err;

View file

@ -2013,7 +2013,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i); i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i);
WARN_ONCE(p - data != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN, WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
"stat strings count mismatch!"); "stat strings count mismatch!");
} }

View file

@ -5122,15 +5122,17 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
u8 *bw_share) u8 *bw_share)
{ {
struct i40e_aqc_configure_vsi_tc_bw_data bw_data; struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
struct i40e_pf *pf = vsi->back;
i40e_status ret; i40e_status ret;
int i; int i;
if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) /* There is no need to reset BW when mqprio mode is on. */
if (pf->flags & I40E_FLAG_TC_MQPRIO)
return 0; return 0;
if (!vsi->mqprio_qopt.qopt.hw) { if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
ret = i40e_set_bw_limit(vsi, vsi->seid, 0); ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
if (ret) if (ret)
dev_info(&vsi->back->pdev->dev, dev_info(&pf->pdev->dev,
"Failed to reset tx rate for vsi->seid %u\n", "Failed to reset tx rate for vsi->seid %u\n",
vsi->seid); vsi->seid);
return ret; return ret;
@ -5139,12 +5141,11 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
bw_data.tc_bw_credits[i] = bw_share[i]; bw_data.tc_bw_credits[i] = bw_share[i];
ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
NULL);
if (ret) { if (ret) {
dev_info(&vsi->back->pdev->dev, dev_info(&pf->pdev->dev,
"AQ command Config VSI BW allocation per TC failed = %d\n", "AQ command Config VSI BW allocation per TC failed = %d\n",
vsi->back->hw.aq.asq_last_status); pf->hw.aq.asq_last_status);
return -EINVAL; return -EINVAL;
} }

View file

@ -89,6 +89,13 @@ extern const char ice_drv_ver[];
#define ice_for_each_rxq(vsi, i) \ #define ice_for_each_rxq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
/* Macros for each allocated tx/rx ring whether used or not in a VSI */
#define ice_for_each_alloc_txq(vsi, i) \
for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++)
#define ice_for_each_alloc_rxq(vsi, i) \
for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++)
struct ice_tc_info { struct ice_tc_info {
u16 qoffset; u16 qoffset;
u16 qcount; u16 qcount;
@ -189,9 +196,9 @@ struct ice_vsi {
struct list_head tmp_sync_list; /* MAC filters to be synced */ struct list_head tmp_sync_list; /* MAC filters to be synced */
struct list_head tmp_unsync_list; /* MAC filters to be unsynced */ struct list_head tmp_unsync_list; /* MAC filters to be unsynced */
bool irqs_ready; u8 irqs_ready;
bool current_isup; /* Sync 'link up' logging */ u8 current_isup; /* Sync 'link up' logging */
bool stat_offsets_loaded; u8 stat_offsets_loaded;
/* queue information */ /* queue information */
u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@ -262,7 +269,7 @@ struct ice_pf {
struct ice_hw_port_stats stats; struct ice_hw_port_stats stats;
struct ice_hw_port_stats stats_prev; struct ice_hw_port_stats stats_prev;
struct ice_hw hw; struct ice_hw hw;
bool stat_prev_loaded; /* has previous stats been loaded */ u8 stat_prev_loaded; /* has previous stats been loaded */
char int_name[ICE_INT_NAME_STR_LEN]; char int_name[ICE_INT_NAME_STR_LEN];
}; };

View file

@ -329,19 +329,19 @@ struct ice_aqc_vsi_props {
/* VLAN section */ /* VLAN section */
__le16 pvid; /* VLANS include priority bits */ __le16 pvid; /* VLANS include priority bits */
u8 pvlan_reserved[2]; u8 pvlan_reserved[2];
u8 port_vlan_flags; u8 vlan_flags;
#define ICE_AQ_VSI_PVLAN_MODE_S 0 #define ICE_AQ_VSI_VLAN_MODE_S 0
#define ICE_AQ_VSI_PVLAN_MODE_M (0x3 << ICE_AQ_VSI_PVLAN_MODE_S) #define ICE_AQ_VSI_VLAN_MODE_M (0x3 << ICE_AQ_VSI_VLAN_MODE_S)
#define ICE_AQ_VSI_PVLAN_MODE_UNTAGGED 0x1 #define ICE_AQ_VSI_VLAN_MODE_UNTAGGED 0x1
#define ICE_AQ_VSI_PVLAN_MODE_TAGGED 0x2 #define ICE_AQ_VSI_VLAN_MODE_TAGGED 0x2
#define ICE_AQ_VSI_PVLAN_MODE_ALL 0x3 #define ICE_AQ_VSI_VLAN_MODE_ALL 0x3
#define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2) #define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2)
#define ICE_AQ_VSI_PVLAN_EMOD_S 3 #define ICE_AQ_VSI_VLAN_EMOD_S 3
#define ICE_AQ_VSI_PVLAN_EMOD_M (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S) #define ICE_AQ_VSI_VLAN_EMOD_M (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
#define ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_PVLAN_EMOD_S) #define ICE_AQ_VSI_VLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_VLAN_EMOD_S)
#define ICE_AQ_VSI_PVLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_PVLAN_EMOD_S) #define ICE_AQ_VSI_VLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_VLAN_EMOD_S)
#define ICE_AQ_VSI_PVLAN_EMOD_STR (0x2 << ICE_AQ_VSI_PVLAN_EMOD_S) #define ICE_AQ_VSI_VLAN_EMOD_STR (0x2 << ICE_AQ_VSI_VLAN_EMOD_S)
#define ICE_AQ_VSI_PVLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S) #define ICE_AQ_VSI_VLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
u8 pvlan_reserved2[3]; u8 pvlan_reserved2[3];
/* ingress egress up sections */ /* ingress egress up sections */
__le32 ingress_table; /* bitmap, 3 bits per up */ __le32 ingress_table; /* bitmap, 3 bits per up */
@ -594,6 +594,7 @@ struct ice_sw_rule_lg_act {
#define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S) #define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S)
#define ICE_LG_ACT_GENERIC_PRIORITY_S 22 #define ICE_LG_ACT_GENERIC_PRIORITY_S 22
#define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S) #define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S)
#define ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX 7
/* Action = 7 - Set Stat count */ /* Action = 7 - Set Stat count */
#define ICE_LG_ACT_STAT_COUNT 0x7 #define ICE_LG_ACT_STAT_COUNT 0x7

View file

@ -45,6 +45,9 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
/** /**
* ice_clear_pf_cfg - Clear PF configuration * ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
*
* Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
* configuration, flow director filters, etc.).
*/ */
enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
{ {
@ -1483,7 +1486,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
struct ice_phy_info *phy_info; struct ice_phy_info *phy_info;
enum ice_status status = 0; enum ice_status status = 0;
if (!pi) if (!pi || !link_up)
return ICE_ERR_PARAM; return ICE_ERR_PARAM;
phy_info = &pi->phy; phy_info = &pi->phy;
@ -1619,20 +1622,23 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
} }
/* LUT size is only valid for Global and PF table types */ /* LUT size is only valid for Global and PF table types */
if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128) { switch (lut_size) {
flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG << case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & break;
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
} else if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512) {
flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
} else if ((lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) && break;
(lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF)) { case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
} else { ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
break;
}
/* fall-through */
default:
status = ICE_ERR_PARAM; status = ICE_ERR_PARAM;
goto ice_aq_get_set_rss_lut_exit; goto ice_aq_get_set_rss_lut_exit;
} }

View file

@ -597,10 +597,14 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
return 0; return 0;
init_ctrlq_free_rq: init_ctrlq_free_rq:
ice_shutdown_rq(hw, cq); if (cq->rq.head) {
ice_shutdown_sq(hw, cq); ice_shutdown_rq(hw, cq);
mutex_destroy(&cq->sq_lock); mutex_destroy(&cq->rq_lock);
mutex_destroy(&cq->rq_lock); }
if (cq->sq.head) {
ice_shutdown_sq(hw, cq);
mutex_destroy(&cq->sq_lock);
}
return status; return status;
} }
@ -706,10 +710,14 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
return; return;
} }
ice_shutdown_sq(hw, cq); if (cq->sq.head) {
ice_shutdown_rq(hw, cq); ice_shutdown_sq(hw, cq);
mutex_destroy(&cq->sq_lock); mutex_destroy(&cq->sq_lock);
mutex_destroy(&cq->rq_lock); }
if (cq->rq.head) {
ice_shutdown_rq(hw, cq);
mutex_destroy(&cq->rq_lock);
}
} }
/** /**
@ -1057,8 +1065,11 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
clean_rq_elem_out: clean_rq_elem_out:
/* Set pending if needed, unlock and return */ /* Set pending if needed, unlock and return */
if (pending) if (pending) {
/* re-read HW head to calculate actual pending messages */
ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
*pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
}
clean_rq_elem_err: clean_rq_elem_err:
mutex_unlock(&cq->rq_lock); mutex_unlock(&cq->rq_lock);

View file

@ -26,7 +26,7 @@ static int ice_q_stats_len(struct net_device *netdev)
{ {
struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_netdev_priv *np = netdev_priv(netdev);
return ((np->vsi->num_txq + np->vsi->num_rxq) * return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) *
(sizeof(struct ice_q_stats) / sizeof(u64))); (sizeof(struct ice_q_stats) / sizeof(u64)));
} }
@ -218,7 +218,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
ice_for_each_txq(vsi, i) { ice_for_each_alloc_txq(vsi, i) {
snprintf(p, ETH_GSTRING_LEN, snprintf(p, ETH_GSTRING_LEN,
"tx-queue-%u.tx_packets", i); "tx-queue-%u.tx_packets", i);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
@ -226,7 +226,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
ice_for_each_rxq(vsi, i) { ice_for_each_alloc_rxq(vsi, i) {
snprintf(p, ETH_GSTRING_LEN, snprintf(p, ETH_GSTRING_LEN,
"rx-queue-%u.rx_packets", i); "rx-queue-%u.rx_packets", i);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
@ -253,6 +253,24 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
{ {
switch (sset) { switch (sset) {
case ETH_SS_STATS: case ETH_SS_STATS:
/* The number (and order) of strings reported *must* remain
* constant for a given netdevice. This function must not
* report a different number based on run time parameters
* (such as the number of queues in use, or the setting of
* a private ethtool flag). This is due to the nature of the
* ethtool stats API.
*
* User space programs such as ethtool must make 3 separate
* ioctl requests, one for size, one for the strings, and
* finally one for the stats. Since these cross into
* user space, changes to the number or size could result in
* undefined memory access or incorrect string<->value
* correlations for statistics.
*
* Even if it appears to be safe, changes to the size or
* order of strings will suffer from race conditions and are
* not safe.
*/
return ICE_ALL_STATS_LEN(netdev); return ICE_ALL_STATS_LEN(netdev);
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
@ -280,18 +298,26 @@ ice_get_ethtool_stats(struct net_device *netdev,
/* populate per queue stats */ /* populate per queue stats */
rcu_read_lock(); rcu_read_lock();
ice_for_each_txq(vsi, j) { ice_for_each_alloc_txq(vsi, j) {
ring = READ_ONCE(vsi->tx_rings[j]); ring = READ_ONCE(vsi->tx_rings[j]);
if (!ring) if (ring) {
continue; data[i++] = ring->stats.pkts;
data[i++] = ring->stats.pkts; data[i++] = ring->stats.bytes;
data[i++] = ring->stats.bytes; } else {
data[i++] = 0;
data[i++] = 0;
}
} }
ice_for_each_rxq(vsi, j) { ice_for_each_alloc_rxq(vsi, j) {
ring = READ_ONCE(vsi->rx_rings[j]); ring = READ_ONCE(vsi->rx_rings[j]);
data[i++] = ring->stats.pkts; if (ring) {
data[i++] = ring->stats.bytes; data[i++] = ring->stats.pkts;
data[i++] = ring->stats.bytes;
} else {
data[i++] = 0;
data[i++] = 0;
}
} }
rcu_read_unlock(); rcu_read_unlock();
@ -519,7 +545,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
goto done; goto done;
} }
for (i = 0; i < vsi->num_txq; i++) { for (i = 0; i < vsi->alloc_txq; i++) {
/* clone ring and setup updated count */ /* clone ring and setup updated count */
tx_rings[i] = *vsi->tx_rings[i]; tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_cnt; tx_rings[i].count = new_tx_cnt;
@ -551,7 +577,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
goto done; goto done;
} }
for (i = 0; i < vsi->num_rxq; i++) { for (i = 0; i < vsi->alloc_rxq; i++) {
/* clone ring and setup updated count */ /* clone ring and setup updated count */
rx_rings[i] = *vsi->rx_rings[i]; rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_cnt; rx_rings[i].count = new_rx_cnt;

View file

@ -121,10 +121,6 @@
#define PFINT_FW_CTL_CAUSE_ENA_S 30 #define PFINT_FW_CTL_CAUSE_ENA_S 30
#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S) #define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S)
#define PFINT_OICR 0x0016CA00 #define PFINT_OICR 0x0016CA00
#define PFINT_OICR_HLP_RDY_S 14
#define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S)
#define PFINT_OICR_CPM_RDY_S 15
#define PFINT_OICR_CPM_RDY_M BIT(PFINT_OICR_CPM_RDY_S)
#define PFINT_OICR_ECC_ERR_S 16 #define PFINT_OICR_ECC_ERR_S 16
#define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S) #define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S)
#define PFINT_OICR_MAL_DETECT_S 19 #define PFINT_OICR_MAL_DETECT_S 19
@ -133,10 +129,6 @@
#define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S) #define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S)
#define PFINT_OICR_PCI_EXCEPTION_S 21 #define PFINT_OICR_PCI_EXCEPTION_S 21
#define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S) #define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S)
#define PFINT_OICR_GPIO_S 22
#define PFINT_OICR_GPIO_M BIT(PFINT_OICR_GPIO_S)
#define PFINT_OICR_STORM_DETECT_S 24
#define PFINT_OICR_STORM_DETECT_M BIT(PFINT_OICR_STORM_DETECT_S)
#define PFINT_OICR_HMC_ERR_S 26 #define PFINT_OICR_HMC_ERR_S 26
#define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S) #define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S)
#define PFINT_OICR_PE_CRITERR_S 28 #define PFINT_OICR_PE_CRITERR_S 28

View file

@ -265,6 +265,7 @@ enum ice_rx_flex_desc_status_error_0_bits {
struct ice_rlan_ctx { struct ice_rlan_ctx {
u16 head; u16 head;
u16 cpuid; /* bigger than needed, see above for reason */ u16 cpuid; /* bigger than needed, see above for reason */
#define ICE_RLAN_BASE_S 7
u64 base; u64 base;
u16 qlen; u16 qlen;
#define ICE_RLAN_CTX_DBUF_S 7 #define ICE_RLAN_CTX_DBUF_S 7

View file

@ -901,7 +901,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
case ice_aqc_opc_get_link_status: case ice_aqc_opc_get_link_status:
if (ice_handle_link_event(pf)) if (ice_handle_link_event(pf))
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"Could not handle link event"); "Could not handle link event\n");
break; break;
default: default:
dev_dbg(&pf->pdev->dev, dev_dbg(&pf->pdev->dev,
@ -916,6 +916,21 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
return pending && (i == ICE_DFLT_IRQ_WORK); return pending && (i == ICE_DFLT_IRQ_WORK);
} }
/**
* ice_ctrlq_pending - check if there is a difference between ntc and ntu
* @hw: pointer to hardware info
* @cq: control queue information
*
* returns true if there are pending messages in a queue, false if there aren't
*/
static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
u16 ntu;
ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
return cq->rq.next_to_clean != ntu;
}
/** /**
* ice_clean_adminq_subtask - clean the AdminQ rings * ice_clean_adminq_subtask - clean the AdminQ rings
* @pf: board private structure * @pf: board private structure
@ -923,7 +938,6 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
static void ice_clean_adminq_subtask(struct ice_pf *pf) static void ice_clean_adminq_subtask(struct ice_pf *pf)
{ {
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
u32 val;
if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
return; return;
@ -933,9 +947,13 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf)
clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
/* re-enable Admin queue interrupt causes */ /* There might be a situation where new messages arrive to a control
val = rd32(hw, PFINT_FW_CTL); * queue between processing the last message and clearing the
wr32(hw, PFINT_FW_CTL, (val | PFINT_FW_CTL_CAUSE_ENA_M)); * EVENT_PENDING bit. So before exiting, check queue head again (using
* ice_ctrlq_pending) and process new messages if any.
*/
if (ice_ctrlq_pending(hw, &hw->adminq))
__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
ice_flush(hw); ice_flush(hw);
} }
@ -1295,11 +1313,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
qcount = numq_tc; qcount = numq_tc;
} }
/* find higher power-of-2 of qcount */ /* find the (rounded up) power-of-2 of qcount */
pow = ilog2(qcount); pow = order_base_2(qcount);
if (!is_power_of_2(qcount))
pow++;
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
if (!(vsi->tc_cfg.ena_tc & BIT(i))) { if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
@ -1352,14 +1367,15 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
/* Traffic from VSI can be sent to LAN */ /* Traffic from VSI can be sent to LAN */
ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
/* Allow all packets untagged/tagged */
ctxt->info.port_vlan_flags = ((ICE_AQ_VSI_PVLAN_MODE_ALL & /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
ICE_AQ_VSI_PVLAN_MODE_M) >> * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
ICE_AQ_VSI_PVLAN_MODE_S); * packets untagged/tagged.
/* Show VLAN/UP from packets in Rx descriptors */ */
ctxt->info.port_vlan_flags |= ((ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH & ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
ICE_AQ_VSI_PVLAN_EMOD_M) >> ICE_AQ_VSI_VLAN_MODE_M) >>
ICE_AQ_VSI_PVLAN_EMOD_S); ICE_AQ_VSI_VLAN_MODE_S);
/* Have 1:1 UP mapping for both ingress/egress tables */ /* Have 1:1 UP mapping for both ingress/egress tables */
table |= ICE_UP_TABLE_TRANSLATE(0, 0); table |= ICE_UP_TABLE_TRANSLATE(0, 0);
table |= ICE_UP_TABLE_TRANSLATE(1, 1); table |= ICE_UP_TABLE_TRANSLATE(1, 1);
@ -1688,15 +1704,12 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
rd32(hw, PFINT_OICR); /* read to clear */ rd32(hw, PFINT_OICR); /* read to clear */
val = (PFINT_OICR_HLP_RDY_M | val = (PFINT_OICR_ECC_ERR_M |
PFINT_OICR_CPM_RDY_M |
PFINT_OICR_ECC_ERR_M |
PFINT_OICR_MAL_DETECT_M | PFINT_OICR_MAL_DETECT_M |
PFINT_OICR_GRST_M | PFINT_OICR_GRST_M |
PFINT_OICR_PCI_EXCEPTION_M | PFINT_OICR_PCI_EXCEPTION_M |
PFINT_OICR_GPIO_M | PFINT_OICR_HMC_ERR_M |
PFINT_OICR_STORM_DETECT_M | PFINT_OICR_PE_CRITERR_M);
PFINT_OICR_HMC_ERR_M);
wr32(hw, PFINT_OICR_ENA, val); wr32(hw, PFINT_OICR_ENA, val);
@ -2058,15 +2071,13 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
skip_req_irq: skip_req_irq:
ice_ena_misc_vector(pf); ice_ena_misc_vector(pf);
val = (pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
(ICE_RX_ITR & PFINT_OICR_CTL_ITR_INDX_M) | PFINT_OICR_CTL_CAUSE_ENA_M);
PFINT_OICR_CTL_CAUSE_ENA_M;
wr32(hw, PFINT_OICR_CTL, val); wr32(hw, PFINT_OICR_CTL, val);
/* This enables Admin queue Interrupt causes */ /* This enables Admin queue Interrupt causes */
val = (pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
(ICE_RX_ITR & PFINT_FW_CTL_ITR_INDX_M) | PFINT_FW_CTL_CAUSE_ENA_M);
PFINT_FW_CTL_CAUSE_ENA_M;
wr32(hw, PFINT_FW_CTL, val); wr32(hw, PFINT_FW_CTL, val);
itr_gran = hw->itr_gran_200; itr_gran = hw->itr_gran_200;
@ -3246,8 +3257,10 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf)
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
ice_dis_msix(pf); ice_dis_msix(pf);
devm_kfree(&pf->pdev->dev, pf->irq_tracker); if (pf->irq_tracker) {
pf->irq_tracker = NULL; devm_kfree(&pf->pdev->dev, pf->irq_tracker);
pf->irq_tracker = NULL;
}
} }
/** /**
@ -3271,7 +3284,7 @@ static int ice_probe(struct pci_dev *pdev,
err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
if (err) { if (err) {
dev_err(&pdev->dev, "I/O map error %d\n", err); dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err);
return err; return err;
} }
@ -3720,10 +3733,10 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
enum ice_status status; enum ice_status status;
/* Here we are configuring the VSI to let the driver add VLAN tags by /* Here we are configuring the VSI to let the driver add VLAN tags by
* setting port_vlan_flags to ICE_AQ_VSI_PVLAN_MODE_ALL. The actual VLAN * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
* tag insertion happens in the Tx hot path, in ice_tx_map. * insertion happens in the Tx hot path, in ice_tx_map.
*/ */
ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_MODE_ALL; ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
ctxt.vsi_num = vsi->vsi_num; ctxt.vsi_num = vsi->vsi_num;
@ -3735,7 +3748,7 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
return -EIO; return -EIO;
} }
vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags; vsi->info.vlan_flags = ctxt.info.vlan_flags;
return 0; return 0;
} }
@ -3757,12 +3770,15 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
*/ */
if (ena) { if (ena) {
/* Strip VLAN tag from Rx packet and put it in the desc */ /* Strip VLAN tag from Rx packet and put it in the desc */
ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH; ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
} else { } else {
/* Disable stripping. Leave tag in packet */ /* Disable stripping. Leave tag in packet */
ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_NOTHING; ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
} }
/* Allow all packets untagged/tagged */
ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
ctxt.vsi_num = vsi->vsi_num; ctxt.vsi_num = vsi->vsi_num;
@ -3773,7 +3789,7 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
return -EIO; return -EIO;
} }
vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags; vsi->info.vlan_flags = ctxt.info.vlan_flags;
return 0; return 0;
} }
@ -3986,7 +4002,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
/* clear the context structure first */ /* clear the context structure first */
memset(&rlan_ctx, 0, sizeof(rlan_ctx)); memset(&rlan_ctx, 0, sizeof(rlan_ctx));
rlan_ctx.base = ring->dma >> 7; rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;
rlan_ctx.qlen = ring->count; rlan_ctx.qlen = ring->count;
@ -4098,11 +4114,12 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
{ {
int err; int err;
ice_set_rx_mode(vsi->netdev); if (vsi->netdev) {
ice_set_rx_mode(vsi->netdev);
err = ice_restore_vlan(vsi); err = ice_restore_vlan(vsi);
if (err) if (err)
return err; return err;
}
err = ice_vsi_cfg_txqs(vsi); err = ice_vsi_cfg_txqs(vsi);
if (!err) if (!err)
@ -4868,7 +4885,7 @@ int ice_down(struct ice_vsi *vsi)
*/ */
static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
{ {
int i, err; int i, err = 0;
if (!vsi->num_txq) { if (!vsi->num_txq) {
dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n", dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
@ -4893,7 +4910,7 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
*/ */
static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
{ {
int i, err; int i, err = 0;
if (!vsi->num_rxq) { if (!vsi->num_rxq) {
dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n", dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
@ -5235,7 +5252,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
u8 count = 0; u8 count = 0;
if (new_mtu == netdev->mtu) { if (new_mtu == netdev->mtu) {
netdev_warn(netdev, "mtu is already %d\n", netdev->mtu); netdev_warn(netdev, "mtu is already %u\n", netdev->mtu);
return 0; return 0;
} }

View file

@ -131,9 +131,8 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
* *
* This function will request NVM ownership. * This function will request NVM ownership.
*/ */
static enum static enum ice_status
ice_status ice_acquire_nvm(struct ice_hw *hw, ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
enum ice_aq_res_access_type access)
{ {
if (hw->nvm.blank_nvm_mode) if (hw->nvm.blank_nvm_mode)
return 0; return 0;

View file

@ -1576,8 +1576,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
return status; return status;
} }
if (owner == ICE_SCHED_NODE_OWNER_LAN) vsi->max_lanq[tc] = new_numqs;
vsi->max_lanq[tc] = new_numqs;
return status; return status;
} }

View file

@ -645,14 +645,14 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
lg_act->pdata.lg_act.act[1] = cpu_to_le32(act); lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
act = (7 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M; act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
/* Third action Marker value */ /* Third action Marker value */
act |= ICE_LG_ACT_GENERIC; act |= ICE_LG_ACT_GENERIC;
act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
ICE_LG_ACT_GENERIC_VALUE_M; ICE_LG_ACT_GENERIC_VALUE_M;
act |= (0 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M;
lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
/* call the fill switch rule to fill the lookup tx rx structure */ /* call the fill switch rule to fill the lookup tx rx structure */

View file

@ -17,7 +17,7 @@ struct ice_vsi_ctx {
u16 vsis_unallocated; u16 vsis_unallocated;
u16 flags; u16 flags;
struct ice_aqc_vsi_props info; struct ice_aqc_vsi_props info;
bool alloc_from_pool; u8 alloc_from_pool;
}; };
enum ice_sw_fwd_act_type { enum ice_sw_fwd_act_type {
@ -94,8 +94,8 @@ struct ice_fltr_info {
u8 qgrp_size; u8 qgrp_size;
/* Rule creations populate these indicators basing on the switch type */ /* Rule creations populate these indicators basing on the switch type */
bool lb_en; /* Indicate if packet can be looped back */ u8 lb_en; /* Indicate if packet can be looped back */
bool lan_en; /* Indicate if packet can be forwarded to the uplink */ u8 lan_en; /* Indicate if packet can be forwarded to the uplink */
}; };
/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */ /* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */

View file

@ -143,7 +143,7 @@ struct ice_ring {
u16 next_to_use; u16 next_to_use;
u16 next_to_clean; u16 next_to_clean;
bool ring_active; /* is ring online or not */ u8 ring_active; /* is ring online or not */
/* stats structs */ /* stats structs */
struct ice_q_stats stats; struct ice_q_stats stats;

View file

@ -83,7 +83,7 @@ struct ice_link_status {
u64 phy_type_low; u64 phy_type_low;
u16 max_frame_size; u16 max_frame_size;
u16 link_speed; u16 link_speed;
bool lse_ena; /* Link Status Event notification */ u8 lse_ena; /* Link Status Event notification */
u8 link_info; u8 link_info;
u8 an_info; u8 an_info;
u8 ext_info; u8 ext_info;
@ -101,7 +101,7 @@ struct ice_phy_info {
struct ice_link_status link_info_old; struct ice_link_status link_info_old;
u64 phy_type_low; u64 phy_type_low;
enum ice_media_type media_type; enum ice_media_type media_type;
bool get_link_info; u8 get_link_info;
}; };
/* Common HW capabilities for SW use */ /* Common HW capabilities for SW use */
@ -167,7 +167,7 @@ struct ice_nvm_info {
u32 oem_ver; /* OEM version info */ u32 oem_ver; /* OEM version info */
u16 sr_words; /* Shadow RAM size in words */ u16 sr_words; /* Shadow RAM size in words */
u16 ver; /* NVM package version */ u16 ver; /* NVM package version */
bool blank_nvm_mode; /* is NVM empty (no FW present) */ u8 blank_nvm_mode; /* is NVM empty (no FW present) */
}; };
/* Max number of port to queue branches w.r.t topology */ /* Max number of port to queue branches w.r.t topology */
@ -181,7 +181,7 @@ struct ice_sched_node {
struct ice_aqc_txsched_elem_data info; struct ice_aqc_txsched_elem_data info;
u32 agg_id; /* aggregator group id */ u32 agg_id; /* aggregator group id */
u16 vsi_id; u16 vsi_id;
bool in_use; /* suspended or in use */ u8 in_use; /* suspended or in use */
u8 tx_sched_layer; /* Logical Layer (1-9) */ u8 tx_sched_layer; /* Logical Layer (1-9) */
u8 num_children; u8 num_children;
u8 tc_num; u8 tc_num;
@ -218,7 +218,7 @@ struct ice_sched_vsi_info {
struct ice_sched_tx_policy { struct ice_sched_tx_policy {
u16 max_num_vsis; u16 max_num_vsis;
u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS]; u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS];
bool rdma_ena; u8 rdma_ena;
}; };
struct ice_port_info { struct ice_port_info {
@ -243,7 +243,7 @@ struct ice_port_info {
struct list_head agg_list; /* lists all aggregator */ struct list_head agg_list; /* lists all aggregator */
u8 lport; u8 lport;
#define ICE_LPORT_MASK 0xff #define ICE_LPORT_MASK 0xff
bool is_vf; u8 is_vf;
}; };
struct ice_switch_info { struct ice_switch_info {
@ -287,7 +287,7 @@ struct ice_hw {
u8 max_cgds; u8 max_cgds;
u8 sw_entry_point_layer; u8 sw_entry_point_layer;
bool evb_veb; /* true for VEB, false for VEPA */ u8 evb_veb; /* true for VEB, false for VEPA */
struct ice_bus_info bus; struct ice_bus_info bus;
struct ice_nvm_info nvm; struct ice_nvm_info nvm;
struct ice_hw_dev_caps dev_caps; /* device capabilities */ struct ice_hw_dev_caps dev_caps; /* device capabilities */
@ -318,7 +318,7 @@ struct ice_hw {
u8 itr_gran_100; u8 itr_gran_100;
u8 itr_gran_50; u8 itr_gran_50;
u8 itr_gran_25; u8 itr_gran_25;
bool ucast_shared; /* true if VSIs can share unicast addr */ u8 ucast_shared; /* true if VSIs can share unicast addr */
}; };

View file

@ -1649,7 +1649,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
if (hw->phy.type == e1000_phy_m88) if (hw->phy.type == e1000_phy_m88)
igb_phy_disable_receiver(adapter); igb_phy_disable_receiver(adapter);
mdelay(500); msleep(500);
return 0; return 0;
} }

View file

@ -3873,7 +3873,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
adapter->mac_table = kcalloc(hw->mac.rar_entry_count, adapter->mac_table = kcalloc(hw->mac.rar_entry_count,
sizeof(struct igb_mac_addr), sizeof(struct igb_mac_addr),
GFP_ATOMIC); GFP_KERNEL);
if (!adapter->mac_table) if (!adapter->mac_table)
return -ENOMEM; return -ENOMEM;
@ -3883,7 +3883,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
/* Setup and initialize a copy of the hw vlan table array */ /* Setup and initialize a copy of the hw vlan table array */
adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
GFP_ATOMIC); GFP_KERNEL);
if (!adapter->shadow_vfta) if (!adapter->shadow_vfta)
return -ENOMEM; return -ENOMEM;
@ -5816,7 +5816,8 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
if (skb->ip_summed != CHECKSUM_PARTIAL) { if (skb->ip_summed != CHECKSUM_PARTIAL) {
csum_failed: csum_failed:
if (!(first->tx_flags & IGB_TX_FLAGS_VLAN)) if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) &&
!tx_ring->launchtime_enable)
return; return;
goto no_csum; goto no_csum;
} }

View file

@ -771,14 +771,13 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
rxdr->size = ALIGN(rxdr->size, 4096); rxdr->size = ALIGN(rxdr->size, 4096);
rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
GFP_KERNEL); GFP_KERNEL);
if (!rxdr->desc) { if (!rxdr->desc) {
vfree(rxdr->buffer_info); vfree(rxdr->buffer_info);
return -ENOMEM; return -ENOMEM;
} }
memset(rxdr->desc, 0, rxdr->size);
rxdr->next_to_clean = 0; rxdr->next_to_clean = 0;
rxdr->next_to_use = 0; rxdr->next_to_use = 0;

View file

@ -192,7 +192,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
} }
/* alloc the udl from per cpu ddp pool */ /* alloc the udl from per cpu ddp pool */
ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp);
if (!ddp->udl) { if (!ddp->udl) {
e_err(drv, "failed allocated ddp context\n"); e_err(drv, "failed allocated ddp context\n");
goto out_noddp_unmap; goto out_noddp_unmap;
@ -760,7 +760,7 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
return 0; return 0;
/* Extra buffer to be shared by all DDPs for HW work around */ /* Extra buffer to be shared by all DDPs for HW work around */
buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_KERNEL);
if (!buffer) if (!buffer)
return -ENOMEM; return -ENOMEM;

View file

@ -6201,7 +6201,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
adapter->mac_table = kcalloc(hw->mac.num_rar_entries, adapter->mac_table = kcalloc(hw->mac.num_rar_entries,
sizeof(struct ixgbe_mac_addr), sizeof(struct ixgbe_mac_addr),
GFP_ATOMIC); GFP_KERNEL);
if (!adapter->mac_table) if (!adapter->mac_table)
return -ENOMEM; return -ENOMEM;
@ -6620,8 +6620,18 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
if (adapter->xdp_prog) { if (adapter->xdp_prog) {
e_warn(probe, "MTU cannot be changed while XDP program is loaded\n"); int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
return -EPERM; VLAN_HLEN;
int i;
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
if (new_frame_size > ixgbe_rx_bufsz(ring)) {
e_warn(probe, "Requested MTU size is not supported with XDP\n");
return -EINVAL;
}
}
} }
/* /*
@ -8983,6 +8993,15 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
#ifdef CONFIG_IXGBE_DCB #ifdef CONFIG_IXGBE_DCB
if (tc) { if (tc) {
if (adapter->xdp_prog) {
e_warn(probe, "DCB is not supported with XDP\n");
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(dev))
ixgbe_open(dev);
return -EINVAL;
}
netdev_set_num_tc(dev, tc); netdev_set_num_tc(dev, tc);
ixgbe_set_prio_tc_map(adapter); ixgbe_set_prio_tc_map(adapter);
@ -9171,14 +9190,12 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
struct tcf_exts *exts, u64 *action, u8 *queue) struct tcf_exts *exts, u64 *action, u8 *queue)
{ {
const struct tc_action *a; const struct tc_action *a;
LIST_HEAD(actions); int i;
if (!tcf_exts_has_actions(exts)) if (!tcf_exts_has_actions(exts))
return -EINVAL; return -EINVAL;
tcf_exts_to_list(exts, &actions); tcf_exts_for_each_action(i, a, exts) {
list_for_each_entry(a, &actions, list) {
/* Drop action */ /* Drop action */
if (is_tcf_gact_shot(a)) { if (is_tcf_gact_shot(a)) {
*action = IXGBE_FDIR_DROP_QUEUE; *action = IXGBE_FDIR_DROP_QUEUE;
@ -9936,6 +9953,11 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
int tcs = adapter->hw_tcs ? : 1; int tcs = adapter->hw_tcs ? : 1;
int pool, err; int pool, err;
if (adapter->xdp_prog) {
e_warn(probe, "L2FW offload is not supported with XDP\n");
return ERR_PTR(-EINVAL);
}
/* The hardware supported by ixgbe only filters on the destination MAC /* The hardware supported by ixgbe only filters on the destination MAC
* address. In order to avoid issues we only support offloading modes * address. In order to avoid issues we only support offloading modes
* where the hardware can actually provide the functionality. * where the hardware can actually provide the functionality.

View file

@ -53,6 +53,11 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
int i; int i;
if (adapter->xdp_prog) {
e_warn(probe, "SRIOV is not supported with XDP\n");
return -EINVAL;
}
/* Enable VMDq flag so device will be set in VM mode */ /* Enable VMDq flag so device will be set in VM mode */
adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED | adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED |
IXGBE_FLAG_VMDQ_ENABLED; IXGBE_FLAG_VMDQ_ENABLED;
@ -688,8 +693,13 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
{ {
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
u8 num_tcs = adapter->hw_tcs; u8 num_tcs = adapter->hw_tcs;
u32 reg_val;
u32 queue;
u32 word;
/* remove VLAN filters beloning to this VF */ /* remove VLAN filters beloning to this VF */
ixgbe_clear_vf_vlans(adapter, vf); ixgbe_clear_vf_vlans(adapter, vf);
@ -726,6 +736,27 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
/* reset VF api back to unknown */ /* reset VF api back to unknown */
adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
/* Restart each queue for given VF */
for (queue = 0; queue < q_per_pool; queue++) {
unsigned int reg_idx = (vf * q_per_pool) + queue;
reg_val = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(reg_idx));
/* Re-enabling only configured queues */
if (reg_val) {
reg_val |= IXGBE_TXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val);
reg_val &= ~IXGBE_TXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val);
}
}
/* Clear VF's mailbox memory */
for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0);
IXGBE_WRITE_FLUSH(hw);
} }
static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,

View file

@ -2518,6 +2518,7 @@ enum {
/* Translated register #defines */ /* Translated register #defines */
#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P))) #define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P)))
#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P))) #define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P)))
#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P)))
#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) #define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P)))
#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) #define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P)))

View file

@ -1982,14 +1982,15 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
goto out_ok; goto out_ok;
modify_ip_header = false; modify_ip_header = false;
tcf_exts_to_list(exts, &actions); tcf_exts_for_each_action(i, a, exts) {
list_for_each_entry(a, &actions, list) { int k;
if (!is_tcf_pedit(a)) if (!is_tcf_pedit(a))
continue; continue;
nkeys = tcf_pedit_nkeys(a); nkeys = tcf_pedit_nkeys(a);
for (i = 0; i < nkeys; i++) { for (k = 0; k < nkeys; k++) {
htype = tcf_pedit_htype(a, i); htype = tcf_pedit_htype(a, k);
if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 || if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) { htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
modify_ip_header = true; modify_ip_header = true;
@ -2053,15 +2054,14 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
const struct tc_action *a; const struct tc_action *a;
LIST_HEAD(actions); LIST_HEAD(actions);
u32 action = 0; u32 action = 0;
int err; int err, i;
if (!tcf_exts_has_actions(exts)) if (!tcf_exts_has_actions(exts))
return -EINVAL; return -EINVAL;
attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
tcf_exts_to_list(exts, &actions); tcf_exts_for_each_action(i, a, exts) {
list_for_each_entry(a, &actions, list) {
if (is_tcf_gact_shot(a)) { if (is_tcf_gact_shot(a)) {
action |= MLX5_FLOW_CONTEXT_ACTION_DROP; action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
if (MLX5_CAP_FLOWTABLE(priv->mdev, if (MLX5_CAP_FLOWTABLE(priv->mdev,
@ -2666,7 +2666,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
LIST_HEAD(actions); LIST_HEAD(actions);
bool encap = false; bool encap = false;
u32 action = 0; u32 action = 0;
int err; int err, i;
if (!tcf_exts_has_actions(exts)) if (!tcf_exts_has_actions(exts))
return -EINVAL; return -EINVAL;
@ -2674,8 +2674,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
attr->in_rep = rpriv->rep; attr->in_rep = rpriv->rep;
attr->in_mdev = priv->mdev; attr->in_mdev = priv->mdev;
tcf_exts_to_list(exts, &actions); tcf_exts_for_each_action(i, a, exts) {
list_for_each_entry(a, &actions, list) {
if (is_tcf_gact_shot(a)) { if (is_tcf_gact_shot(a)) {
action |= MLX5_FLOW_CONTEXT_ACTION_DROP | action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT; MLX5_FLOW_CONTEXT_ACTION_COUNT;

View file

@ -1346,8 +1346,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
return -ENOMEM; return -ENOMEM;
mall_tc_entry->cookie = f->cookie; mall_tc_entry->cookie = f->cookie;
tcf_exts_to_list(f->exts, &actions); a = tcf_exts_first_action(f->exts);
a = list_first_entry(&actions, struct tc_action, list);
if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;

View file

@ -414,6 +414,8 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
void void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev);
/* spectrum_kvdl.c */ /* spectrum_kvdl.c */
enum mlxsw_sp_kvdl_entry_type { enum mlxsw_sp_kvdl_entry_type {

View file

@ -21,8 +21,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
const struct tc_action *a; const struct tc_action *a;
LIST_HEAD(actions); int err, i;
int err;
if (!tcf_exts_has_actions(exts)) if (!tcf_exts_has_actions(exts))
return 0; return 0;
@ -32,8 +31,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
if (err) if (err)
return err; return err;
tcf_exts_to_list(exts, &actions); tcf_exts_for_each_action(i, a, exts) {
list_for_each_entry(a, &actions, list) {
if (is_tcf_gact_ok(a)) { if (is_tcf_gact_ok(a)) {
err = mlxsw_sp_acl_rulei_act_terminate(rulei); err = mlxsw_sp_acl_rulei_act_terminate(rulei);
if (err) { if (err) {

View file

@ -6234,6 +6234,17 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
mlxsw_sp_vr_put(mlxsw_sp, vr); mlxsw_sp_vr_put(mlxsw_sp, vr);
} }
void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev)
{
struct mlxsw_sp_rif *rif;
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!rif)
return;
mlxsw_sp_rif_destroy(rif);
}
static void static void
mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params, mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)

View file

@ -127,6 +127,24 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
} }
static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
void *data)
{
struct mlxsw_sp *mlxsw_sp = data;
mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
return 0;
}
static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev)
{
mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
netdev_walk_all_upper_dev_rcu(dev,
mlxsw_sp_bridge_device_upper_rif_destroy,
mlxsw_sp);
}
static struct mlxsw_sp_bridge_device * static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
struct net_device *br_dev) struct net_device *br_dev)
@ -165,6 +183,8 @@ static void
mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge, mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
struct mlxsw_sp_bridge_device *bridge_device) struct mlxsw_sp_bridge_device *bridge_device)
{ {
mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
bridge_device->dev);
list_del(&bridge_device->list); list_del(&bridge_device->list);
if (bridge_device->vlan_enabled) if (bridge_device->vlan_enabled)
bridge->vlan_enabled_exists = false; bridge->vlan_enabled_exists = false;

View file

@ -796,11 +796,10 @@ int nfp_flower_compile_action(struct nfp_app *app,
struct net_device *netdev, struct net_device *netdev,
struct nfp_fl_payload *nfp_flow) struct nfp_fl_payload *nfp_flow)
{ {
int act_len, act_cnt, err, tun_out_cnt, out_cnt; int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
enum nfp_flower_tun_type tun_type; enum nfp_flower_tun_type tun_type;
const struct tc_action *a; const struct tc_action *a;
u32 csum_updated = 0; u32 csum_updated = 0;
LIST_HEAD(actions);
memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
nfp_flow->meta.act_len = 0; nfp_flow->meta.act_len = 0;
@ -810,8 +809,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
tun_out_cnt = 0; tun_out_cnt = 0;
out_cnt = 0; out_cnt = 0;
tcf_exts_to_list(flow->exts, &actions); tcf_exts_for_each_action(i, a, flow->exts) {
list_for_each_entry(a, &actions, list) {
err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len, err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len,
netdev, &tun_type, &tun_out_cnt, netdev, &tun_type, &tun_out_cnt,
&out_cnt, &csum_updated); &out_cnt, &csum_updated);

View file

@ -407,7 +407,7 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
if (i == QED_INIT_MAX_POLL_COUNT) { if (i == QED_INIT_MAX_POLL_COUNT) {
DP_ERR(p_hwfn, DP_ERR(p_hwfn,
"Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n", "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
addr, le32_to_cpu(cmd->expected_val), addr, le32_to_cpu(cmd->expected_val),
val, le32_to_cpu(cmd->op_data)); val, le32_to_cpu(cmd->op_data));
} }

View file

@ -48,7 +48,7 @@
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sriov.h" #include "qed_sriov.h"
#define CHIP_MCP_RESP_ITER_US 10 #define QED_MCP_RESP_ITER_US 10
#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
@ -183,18 +183,57 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn)
return 0; return 0;
} }
/* Maximum of 1 sec to wait for the SHMEM ready indication */
#define QED_MCP_SHMEM_RDY_MAX_RETRIES 20
#define QED_MCP_SHMEM_RDY_ITER_MS 50
static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{ {
struct qed_mcp_info *p_info = p_hwfn->mcp_info; struct qed_mcp_info *p_info = p_hwfn->mcp_info;
u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
u32 drv_mb_offsize, mfw_mb_offsize; u32 drv_mb_offsize, mfw_mb_offsize;
u32 mcp_pf_id = MCP_PF_ID(p_hwfn); u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
if (!p_info->public_base) if (!p_info->public_base) {
return 0; DP_NOTICE(p_hwfn,
"The address of the MCP scratch-pad is not configured\n");
return -EINVAL;
}
p_info->public_base |= GRCBASE_MCP; p_info->public_base |= GRCBASE_MCP;
/* Get the MFW MB address and number of supported messages */
mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
SECTION_OFFSIZE_ADDR(p_info->public_base,
PUBLIC_MFW_MB));
p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
p_info->mfw_mb_addr +
offsetof(struct public_mfw_mb,
sup_msgs));
/* The driver can notify that there was an MCP reset, and might read the
* SHMEM values before the MFW has completed initializing them.
* To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
* data ready indication.
*/
while (!p_info->mfw_mb_length && --cnt) {
msleep(msec);
p_info->mfw_mb_length =
(u16)qed_rd(p_hwfn, p_ptt,
p_info->mfw_mb_addr +
offsetof(struct public_mfw_mb, sup_msgs));
}
if (!cnt) {
DP_NOTICE(p_hwfn,
"Failed to get the SHMEM ready notification after %d msec\n",
QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
return -EBUSY;
}
/* Calculate the driver and MFW mailbox address */ /* Calculate the driver and MFW mailbox address */
drv_mb_offsize = qed_rd(p_hwfn, p_ptt, drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
SECTION_OFFSIZE_ADDR(p_info->public_base, SECTION_OFFSIZE_ADDR(p_info->public_base,
@ -204,13 +243,6 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
"drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n", "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
/* Set the MFW MB address */
mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
SECTION_OFFSIZE_ADDR(p_info->public_base,
PUBLIC_MFW_MB));
p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
/* Get the current driver mailbox sequence before sending /* Get the current driver mailbox sequence before sending
* the first command * the first command
*/ */
@ -285,9 +317,15 @@ static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{ {
u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0; u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
int rc = 0; int rc = 0;
if (p_hwfn->mcp_info->b_block_cmd) {
DP_NOTICE(p_hwfn,
"The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
return -EBUSY;
}
/* Ensure that only a single thread is accessing the mailbox */ /* Ensure that only a single thread is accessing the mailbox */
spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
@ -413,14 +451,41 @@ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
(p_mb_params->cmd | seq_num), p_mb_params->param); (p_mb_params->cmd | seq_num), p_mb_params->param);
} }
static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd)
{
p_hwfn->mcp_info->b_block_cmd = block_cmd;
DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
block_cmd ? "Block" : "Unblock");
}
static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
u32 delay = QED_MCP_RESP_ITER_US;
cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
udelay(delay);
cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
udelay(delay);
cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
DP_NOTICE(p_hwfn,
"MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
}
static int static int
_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
struct qed_mcp_mb_params *p_mb_params, struct qed_mcp_mb_params *p_mb_params,
u32 max_retries, u32 delay) u32 max_retries, u32 usecs)
{ {
u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
struct qed_mcp_cmd_elem *p_cmd_elem; struct qed_mcp_cmd_elem *p_cmd_elem;
u32 cnt = 0;
u16 seq_num; u16 seq_num;
int rc = 0; int rc = 0;
@ -443,7 +508,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
goto err; goto err;
spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
udelay(delay);
if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
msleep(msecs);
else
udelay(usecs);
} while (++cnt < max_retries); } while (++cnt < max_retries);
if (cnt >= max_retries) { if (cnt >= max_retries) {
@ -472,7 +541,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
* The spinlock stays locked until the list element is removed. * The spinlock stays locked until the list element is removed.
*/ */
udelay(delay); if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
msleep(msecs);
else
udelay(usecs);
spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
if (p_cmd_elem->b_is_completed) if (p_cmd_elem->b_is_completed)
@ -491,11 +564,15 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, DP_NOTICE(p_hwfn,
"The MFW failed to respond to command 0x%08x [param 0x%08x].\n", "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
p_mb_params->cmd, p_mb_params->param); p_mb_params->cmd, p_mb_params->param);
qed_mcp_print_cpu_info(p_hwfn, p_ptt);
spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
qed_mcp_cmd_set_blocking(p_hwfn, true);
return -EAGAIN; return -EAGAIN;
} }
@ -507,7 +584,7 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
"MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
p_mb_params->mcp_resp, p_mb_params->mcp_resp,
p_mb_params->mcp_param, p_mb_params->mcp_param,
(cnt * delay) / 1000, (cnt * delay) % 1000); (cnt * usecs) / 1000, (cnt * usecs) % 1000);
/* Clear the sequence number from the MFW response */ /* Clear the sequence number from the MFW response */
p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
@ -525,7 +602,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
{ {
size_t union_data_size = sizeof(union drv_union_data); size_t union_data_size = sizeof(union drv_union_data);
u32 max_retries = QED_DRV_MB_MAX_RETRIES; u32 max_retries = QED_DRV_MB_MAX_RETRIES;
u32 delay = CHIP_MCP_RESP_ITER_US; u32 usecs = QED_MCP_RESP_ITER_US;
/* MCP not initialized */ /* MCP not initialized */
if (!qed_mcp_is_init(p_hwfn)) { if (!qed_mcp_is_init(p_hwfn)) {
@ -533,6 +610,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
return -EBUSY; return -EBUSY;
} }
if (p_hwfn->mcp_info->b_block_cmd) {
DP_NOTICE(p_hwfn,
"The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
p_mb_params->cmd, p_mb_params->param);
return -EBUSY;
}
if (p_mb_params->data_src_size > union_data_size || if (p_mb_params->data_src_size > union_data_size ||
p_mb_params->data_dst_size > union_data_size) { p_mb_params->data_dst_size > union_data_size) {
DP_ERR(p_hwfn, DP_ERR(p_hwfn,
@ -542,8 +626,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
return -EINVAL; return -EINVAL;
} }
if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
max_retries = DIV_ROUND_UP(max_retries, 1000);
usecs *= 1000;
}
return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
delay); usecs);
} }
int qed_mcp_cmd(struct qed_hwfn *p_hwfn, int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
@ -761,6 +850,7 @@ __qed_mcp_load_req(struct qed_hwfn *p_hwfn,
mb_params.data_src_size = sizeof(load_req); mb_params.data_src_size = sizeof(load_req);
mb_params.p_data_dst = &load_rsp; mb_params.p_data_dst = &load_rsp;
mb_params.data_dst_size = sizeof(load_rsp); mb_params.data_dst_size = sizeof(load_rsp);
mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
DP_VERBOSE(p_hwfn, QED_MSG_SP, DP_VERBOSE(p_hwfn, QED_MSG_SP,
"Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
@ -982,7 +1072,8 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{ {
u32 wol_param, mcp_resp, mcp_param; struct qed_mcp_mb_params mb_params;
u32 wol_param;
switch (p_hwfn->cdev->wol_config) { switch (p_hwfn->cdev->wol_config) {
case QED_OV_WOL_DISABLED: case QED_OV_WOL_DISABLED:
@ -1000,8 +1091,12 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
} }
return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param, memset(&mb_params, 0, sizeof(mb_params));
&mcp_resp, &mcp_param); mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
mb_params.param = wol_param;
mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
} }
int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@ -2077,31 +2172,65 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
return rc; return rc;
} }
/* A maximal 100 msec waiting time for the MCP to halt */
#define QED_MCP_HALT_SLEEP_MS 10
#define QED_MCP_HALT_MAX_RETRIES 10
int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{ {
u32 resp = 0, param = 0; u32 resp = 0, param = 0, cpu_state, cnt = 0;
int rc; int rc;
rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
&param); &param);
if (rc) if (rc) {
DP_ERR(p_hwfn, "MCP response failure, aborting\n"); DP_ERR(p_hwfn, "MCP response failure, aborting\n");
return rc;
}
return rc; do {
msleep(QED_MCP_HALT_SLEEP_MS);
cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
break;
} while (++cnt < QED_MCP_HALT_MAX_RETRIES);
if (cnt == QED_MCP_HALT_MAX_RETRIES) {
DP_NOTICE(p_hwfn,
"Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
return -EBUSY;
}
qed_mcp_cmd_set_blocking(p_hwfn, true);
return 0;
} }
#define QED_MCP_RESUME_SLEEP_MS 10
int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{ {
u32 value, cpu_mode; u32 cpu_mode, cpu_state;
qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
msleep(QED_MCP_RESUME_SLEEP_MS);
cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0; if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
DP_NOTICE(p_hwfn,
"Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
cpu_mode, cpu_state);
return -EBUSY;
}
qed_mcp_cmd_set_blocking(p_hwfn, false);
return 0;
} }
int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,

View file

@ -635,11 +635,14 @@ struct qed_mcp_info {
*/ */
spinlock_t cmd_lock; spinlock_t cmd_lock;
/* Flag to indicate whether sending a MFW mailbox command is blocked */
bool b_block_cmd;
/* Spinlock used for syncing SW link-changes and link-changes /* Spinlock used for syncing SW link-changes and link-changes
* originating from attention context. * originating from attention context.
*/ */
spinlock_t link_lock; spinlock_t link_lock;
bool block_mb_sending;
u32 public_base; u32 public_base;
u32 drv_mb_addr; u32 drv_mb_addr;
u32 mfw_mb_addr; u32 mfw_mb_addr;
@ -660,14 +663,20 @@ struct qed_mcp_info {
}; };
struct qed_mcp_mb_params { struct qed_mcp_mb_params {
u32 cmd; u32 cmd;
u32 param; u32 param;
void *p_data_src; void *p_data_src;
u8 data_src_size; void *p_data_dst;
void *p_data_dst; u8 data_src_size;
u8 data_dst_size; u8 data_dst_size;
u32 mcp_resp; u32 mcp_resp;
u32 mcp_param; u32 mcp_param;
u32 flags;
#define QED_MB_FLAG_CAN_SLEEP (0x1 << 0)
#define QED_MB_FLAG_AVOID_BLOCK (0x1 << 1)
#define QED_MB_FLAGS_IS_SET(params, flag) \
({ typeof(params) __params = (params); \
(__params && (__params->flags & QED_MB_FLAG_ ## flag)); })
}; };
struct qed_drv_tlv_hdr { struct qed_drv_tlv_hdr {

View file

@ -562,8 +562,10 @@
0 0
#define MCP_REG_CPU_STATE \ #define MCP_REG_CPU_STATE \
0xe05004UL 0xe05004UL
#define MCP_REG_CPU_STATE_SOFT_HALTED (0x1UL << 10)
#define MCP_REG_CPU_EVENT_MASK \ #define MCP_REG_CPU_EVENT_MASK \
0xe05008UL 0xe05008UL
#define MCP_REG_CPU_PROGRAM_COUNTER 0xe0501cUL
#define PGLUE_B_REG_PF_BAR0_SIZE \ #define PGLUE_B_REG_PF_BAR0_SIZE \
0x2aae60UL 0x2aae60UL
#define PGLUE_B_REG_PF_BAR1_SIZE \ #define PGLUE_B_REG_PF_BAR1_SIZE \

View file

@ -2006,18 +2006,16 @@ int qede_get_arfs_filter_count(struct qede_dev *edev)
static int qede_parse_actions(struct qede_dev *edev, static int qede_parse_actions(struct qede_dev *edev,
struct tcf_exts *exts) struct tcf_exts *exts)
{ {
int rc = -EINVAL, num_act = 0; int rc = -EINVAL, num_act = 0, i;
const struct tc_action *a; const struct tc_action *a;
bool is_drop = false; bool is_drop = false;
LIST_HEAD(actions);
if (!tcf_exts_has_actions(exts)) { if (!tcf_exts_has_actions(exts)) {
DP_NOTICE(edev, "No tc actions received\n"); DP_NOTICE(edev, "No tc actions received\n");
return rc; return rc;
} }
tcf_exts_to_list(exts, &actions); tcf_exts_for_each_action(i, a, exts) {
list_for_each_entry(a, &actions, list) {
num_act++; num_act++;
if (is_tcf_gact_shot(a)) if (is_tcf_gact_shot(a))

View file

@ -2384,26 +2384,20 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
return status; return status;
} }
static netdev_features_t qlge_fix_features(struct net_device *ndev,
netdev_features_t features)
{
int err;
/* Update the behavior of vlan accel in the adapter */
err = qlge_update_hw_vlan_features(ndev, features);
if (err)
return err;
return features;
}
static int qlge_set_features(struct net_device *ndev, static int qlge_set_features(struct net_device *ndev,
netdev_features_t features) netdev_features_t features)
{ {
netdev_features_t changed = ndev->features ^ features; netdev_features_t changed = ndev->features ^ features;
int err;
if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
/* Update the behavior of vlan accel in the adapter */
err = qlge_update_hw_vlan_features(ndev, features);
if (err)
return err;
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
qlge_vlan_mode(ndev, features); qlge_vlan_mode(ndev, features);
}
return 0; return 0;
} }
@ -4719,7 +4713,6 @@ static const struct net_device_ops qlge_netdev_ops = {
.ndo_set_mac_address = qlge_set_mac_address, .ndo_set_mac_address = qlge_set_mac_address,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = qlge_tx_timeout, .ndo_tx_timeout = qlge_tx_timeout,
.ndo_fix_features = qlge_fix_features,
.ndo_set_features = qlge_set_features, .ndo_set_features = qlge_set_features,
.ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid, .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,

View file

@ -1,3 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Renesas Ethernet AVB device driver /* Renesas Ethernet AVB device driver
* *
* Copyright (C) 2014-2015 Renesas Electronics Corporation * Copyright (C) 2014-2015 Renesas Electronics Corporation
@ -5,10 +6,6 @@
* Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
* *
* Based on the SuperH Ethernet driver * Based on the SuperH Ethernet driver
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*/ */
#ifndef __RAVB_H__ #ifndef __RAVB_H__

View file

@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
/* Renesas Ethernet AVB device driver /* Renesas Ethernet AVB device driver
* *
* Copyright (C) 2014-2015 Renesas Electronics Corporation * Copyright (C) 2014-2015 Renesas Electronics Corporation
@ -5,10 +6,6 @@
* Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
* *
* Based on the SuperH Ethernet driver * Based on the SuperH Ethernet driver
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*/ */
#include <linux/cache.h> #include <linux/cache.h>

View file

@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
/* SuperH Ethernet device driver /* SuperH Ethernet device driver
* *
* Copyright (C) 2014 Renesas Electronics Corporation * Copyright (C) 2014 Renesas Electronics Corporation
@ -5,18 +6,6 @@
* Copyright (C) 2008-2014 Renesas Solutions Corp. * Copyright (C) 2008-2014 Renesas Solutions Corp.
* Copyright (C) 2013-2017 Cogent Embedded, Inc. * Copyright (C) 2013-2017 Cogent Embedded, Inc.
* Copyright (C) 2014 Codethink Limited * Copyright (C) 2014 Codethink Limited
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*/ */
#include <linux/module.h> #include <linux/module.h>

View file

@ -1,19 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* SuperH Ethernet device driver /* SuperH Ethernet device driver
* *
* Copyright (C) 2006-2012 Nobuhiro Iwamatsu * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
* Copyright (C) 2008-2012 Renesas Solutions Corp. * Copyright (C) 2008-2012 Renesas Solutions Corp.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*/ */
#ifndef __SH_ETH_H__ #ifndef __SH_ETH_H__

View file

@ -33,7 +33,7 @@ config DWMAC_DWC_QOS_ETH
select PHYLIB select PHYLIB
select CRC32 select CRC32
select MII select MII
depends on OF && COMMON_CLK && HAS_DMA depends on OF && HAS_DMA
help help
Support for chips using the snps,dwc-qos-ethernet.txt DT binding. Support for chips using the snps,dwc-qos-ethernet.txt DT binding.
@ -57,7 +57,7 @@ config DWMAC_ANARION
config DWMAC_IPQ806X config DWMAC_IPQ806X
tristate "QCA IPQ806x DWMAC support" tristate "QCA IPQ806x DWMAC support"
default ARCH_QCOM default ARCH_QCOM
depends on OF && COMMON_CLK && (ARCH_QCOM || COMPILE_TEST) depends on OF && (ARCH_QCOM || COMPILE_TEST)
select MFD_SYSCON select MFD_SYSCON
help help
Support for QCA IPQ806X DWMAC Ethernet. Support for QCA IPQ806X DWMAC Ethernet.
@ -100,7 +100,7 @@ config DWMAC_OXNAS
config DWMAC_ROCKCHIP config DWMAC_ROCKCHIP
tristate "Rockchip dwmac support" tristate "Rockchip dwmac support"
default ARCH_ROCKCHIP default ARCH_ROCKCHIP
depends on OF && COMMON_CLK && (ARCH_ROCKCHIP || COMPILE_TEST) depends on OF && (ARCH_ROCKCHIP || COMPILE_TEST)
select MFD_SYSCON select MFD_SYSCON
help help
Support for Ethernet controller on Rockchip RK3288 SoC. Support for Ethernet controller on Rockchip RK3288 SoC.
@ -123,7 +123,7 @@ config DWMAC_SOCFPGA
config DWMAC_STI config DWMAC_STI
tristate "STi GMAC support" tristate "STi GMAC support"
default ARCH_STI default ARCH_STI
depends on OF && COMMON_CLK && (ARCH_STI || COMPILE_TEST) depends on OF && (ARCH_STI || COMPILE_TEST)
select MFD_SYSCON select MFD_SYSCON
---help--- ---help---
Support for ethernet controller on STi SOCs. Support for ethernet controller on STi SOCs.
@ -147,7 +147,7 @@ config DWMAC_STM32
config DWMAC_SUNXI config DWMAC_SUNXI
tristate "Allwinner GMAC support" tristate "Allwinner GMAC support"
default ARCH_SUNXI default ARCH_SUNXI
depends on OF && COMMON_CLK && (ARCH_SUNXI || COMPILE_TEST) depends on OF && (ARCH_SUNXI || COMPILE_TEST)
---help--- ---help---
Support for Allwinner A20/A31 GMAC ethernet controllers. Support for Allwinner A20/A31 GMAC ethernet controllers.

View file

@ -61,7 +61,7 @@ static int tc_fill_actions(struct stmmac_tc_entry *entry,
struct stmmac_tc_entry *action_entry = entry; struct stmmac_tc_entry *action_entry = entry;
const struct tc_action *act; const struct tc_action *act;
struct tcf_exts *exts; struct tcf_exts *exts;
LIST_HEAD(actions); int i;
exts = cls->knode.exts; exts = cls->knode.exts;
if (!tcf_exts_has_actions(exts)) if (!tcf_exts_has_actions(exts))
@ -69,8 +69,7 @@ static int tc_fill_actions(struct stmmac_tc_entry *entry,
if (frag) if (frag)
action_entry = frag; action_entry = frag;
tcf_exts_to_list(exts, &actions); tcf_exts_for_each_action(i, act, exts) {
list_for_each_entry(act, &actions, list) {
/* Accept */ /* Accept */
if (is_tcf_gact_ok(act)) { if (is_tcf_gact_ok(act)) {
action_entry->val.af = 1; action_entry->val.af = 1;

View file

@ -29,6 +29,7 @@
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/inetdevice.h> #include <linux/inetdevice.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/pci.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <linux/in.h> #include <linux/in.h>
@ -2039,12 +2040,16 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
{ {
struct net_device *ndev; struct net_device *ndev;
struct net_device_context *net_device_ctx; struct net_device_context *net_device_ctx;
struct device *pdev = vf_netdev->dev.parent;
struct netvsc_device *netvsc_dev; struct netvsc_device *netvsc_dev;
int ret; int ret;
if (vf_netdev->addr_len != ETH_ALEN) if (vf_netdev->addr_len != ETH_ALEN)
return NOTIFY_DONE; return NOTIFY_DONE;
if (!pdev || !dev_is_pci(pdev) || dev_is_pf(pdev))
return NOTIFY_DONE;
/* /*
* We will use the MAC address to locate the synthetic interface to * We will use the MAC address to locate the synthetic interface to
* associate with the VF interface. If we don't find a matching * associate with the VF interface. If we don't find a matching

View file

@ -5217,8 +5217,8 @@ static int rtl8152_probe(struct usb_interface *intf,
netdev->hw_features &= ~NETIF_F_RXCSUM; netdev->hw_features &= ~NETIF_F_RXCSUM;
} }
if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
udev->serial && !strcmp(udev->serial, "000001000000")) { (!strcmp(udev->serial, "000001000000") || !strcmp(udev->serial, "000002000000"))) {
dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation"); dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation");
set_bit(DELL_TB_RX_AGG_BUG, &tp->flags); set_bit(DELL_TB_RX_AGG_BUG, &tp->flags);
} }

View file

@ -952,7 +952,7 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
list_for_each_entry_safe(node, n, &d->pending_list, node) { list_for_each_entry_safe(node, n, &d->pending_list, node) {
struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb; struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
if (msg->iova <= vq_msg->iova && if (msg->iova <= vq_msg->iova &&
msg->iova + msg->size - 1 > vq_msg->iova && msg->iova + msg->size - 1 >= vq_msg->iova &&
vq_msg->type == VHOST_IOTLB_MISS) { vq_msg->type == VHOST_IOTLB_MISS) {
vhost_poll_queue(&node->vq->poll); vhost_poll_queue(&node->vq->poll);
list_del(&node->node); list_del(&node->node);

View file

@ -23,13 +23,11 @@ struct tc_action {
const struct tc_action_ops *ops; const struct tc_action_ops *ops;
__u32 type; /* for backward compat(TCA_OLD_COMPAT) */ __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
__u32 order; __u32 order;
struct list_head list;
struct tcf_idrinfo *idrinfo; struct tcf_idrinfo *idrinfo;
u32 tcfa_index; u32 tcfa_index;
refcount_t tcfa_refcnt; refcount_t tcfa_refcnt;
atomic_t tcfa_bindcnt; atomic_t tcfa_bindcnt;
u32 tcfa_capab;
int tcfa_action; int tcfa_action;
struct tcf_t tcfa_tm; struct tcf_t tcfa_tm;
struct gnet_stats_basic_packed tcfa_bstats; struct gnet_stats_basic_packed tcfa_bstats;
@ -44,7 +42,6 @@ struct tc_action {
#define tcf_index common.tcfa_index #define tcf_index common.tcfa_index
#define tcf_refcnt common.tcfa_refcnt #define tcf_refcnt common.tcfa_refcnt
#define tcf_bindcnt common.tcfa_bindcnt #define tcf_bindcnt common.tcfa_bindcnt
#define tcf_capab common.tcfa_capab
#define tcf_action common.tcfa_action #define tcf_action common.tcfa_action
#define tcf_tm common.tcfa_tm #define tcf_tm common.tcfa_tm
#define tcf_bstats common.tcfa_bstats #define tcf_bstats common.tcfa_bstats
@ -102,7 +99,6 @@ struct tc_action_ops {
size_t (*get_fill_size)(const struct tc_action *act); size_t (*get_fill_size)(const struct tc_action *act);
struct net_device *(*get_dev)(const struct tc_action *a); struct net_device *(*get_dev)(const struct tc_action *a);
void (*put_dev)(struct net_device *dev); void (*put_dev)(struct net_device *dev);
int (*delete)(struct net *net, u32 index);
}; };
struct tc_action_net { struct tc_action_net {
@ -148,8 +144,6 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
const struct tc_action_ops *ops, const struct tc_action_ops *ops,
struct netlink_ext_ack *extack); struct netlink_ext_ack *extack);
int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index); int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index);
bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a,
int bind);
int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
struct tc_action **a, const struct tc_action_ops *ops, struct tc_action **a, const struct tc_action_ops *ops,
int bind, bool cpustats); int bind, bool cpustats);
@ -158,7 +152,6 @@ void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a);
void tcf_idr_cleanup(struct tc_action_net *tn, u32 index); void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index, int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
struct tc_action **a, int bind); struct tc_action **a, int bind);
int tcf_idr_delete_index(struct tc_action_net *tn, u32 index);
int __tcf_idr_release(struct tc_action *a, bool bind, bool strict); int __tcf_idr_release(struct tc_action *a, bool bind, bool strict);
static inline int tcf_idr_release(struct tc_action *a, bool bind) static inline int tcf_idr_release(struct tc_action *a, bool bind)

View file

@ -298,19 +298,13 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts)
#endif #endif
} }
static inline void tcf_exts_to_list(const struct tcf_exts *exts,
struct list_head *actions)
{
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
int i; #define tcf_exts_for_each_action(i, a, exts) \
for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
for (i = 0; i < exts->nr_actions; i++) { #else
struct tc_action *a = exts->actions[i]; #define tcf_exts_for_each_action(i, a, exts) \
for (; 0; (void)(i), (void)(a), (void)(exts))
list_add_tail(&a->list, actions);
}
#endif #endif
}
static inline void static inline void
tcf_exts_stats_update(const struct tcf_exts *exts, tcf_exts_stats_update(const struct tcf_exts *exts,
@ -361,6 +355,15 @@ static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
#endif #endif
} }
static inline struct tc_action *tcf_exts_first_action(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
return exts->actions[0];
#else
return NULL;
#endif
}
/** /**
* tcf_exts_exec - execute tc filter extensions * tcf_exts_exec - execute tc filter extensions
* @skb: socket buffer * @skb: socket buffer

View file

@ -15,6 +15,7 @@
#include <linux/jhash.h> #include <linux/jhash.h>
#include <linux/filter.h> #include <linux/filter.h>
#include <linux/rculist_nulls.h> #include <linux/rculist_nulls.h>
#include <linux/random.h>
#include <uapi/linux/btf.h> #include <uapi/linux/btf.h>
#include "percpu_freelist.h" #include "percpu_freelist.h"
#include "bpf_lru_list.h" #include "bpf_lru_list.h"
@ -41,6 +42,7 @@ struct bpf_htab {
atomic_t count; /* number of elements in this hashtable */ atomic_t count; /* number of elements in this hashtable */
u32 n_buckets; /* number of hash buckets */ u32 n_buckets; /* number of hash buckets */
u32 elem_size; /* size of each element in bytes */ u32 elem_size; /* size of each element in bytes */
u32 hashrnd;
}; };
/* each htab element is struct htab_elem + key + value */ /* each htab element is struct htab_elem + key + value */
@ -371,6 +373,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
if (!htab->buckets) if (!htab->buckets)
goto free_htab; goto free_htab;
htab->hashrnd = get_random_int();
for (i = 0; i < htab->n_buckets; i++) { for (i = 0; i < htab->n_buckets; i++) {
INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
raw_spin_lock_init(&htab->buckets[i].lock); raw_spin_lock_init(&htab->buckets[i].lock);
@ -402,9 +405,9 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
return ERR_PTR(err); return ERR_PTR(err);
} }
static inline u32 htab_map_hash(const void *key, u32 key_len) static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
{ {
return jhash(key, key_len, 0); return jhash(key, key_len, hashrnd);
} }
static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
@ -470,7 +473,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
key_size = map->key_size; key_size = map->key_size;
hash = htab_map_hash(key, key_size); hash = htab_map_hash(key, key_size, htab->hashrnd);
head = select_bucket(htab, hash); head = select_bucket(htab, hash);
@ -597,7 +600,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
if (!key) if (!key)
goto find_first_elem; goto find_first_elem;
hash = htab_map_hash(key, key_size); hash = htab_map_hash(key, key_size, htab->hashrnd);
head = select_bucket(htab, hash); head = select_bucket(htab, hash);
@ -824,7 +827,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
key_size = map->key_size; key_size = map->key_size;
hash = htab_map_hash(key, key_size); hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash); b = __select_bucket(htab, hash);
head = &b->head; head = &b->head;
@ -880,7 +883,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
key_size = map->key_size; key_size = map->key_size;
hash = htab_map_hash(key, key_size); hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash); b = __select_bucket(htab, hash);
head = &b->head; head = &b->head;
@ -945,7 +948,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
key_size = map->key_size; key_size = map->key_size;
hash = htab_map_hash(key, key_size); hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash); b = __select_bucket(htab, hash);
head = &b->head; head = &b->head;
@ -998,7 +1001,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
key_size = map->key_size; key_size = map->key_size;
hash = htab_map_hash(key, key_size); hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash); b = __select_bucket(htab, hash);
head = &b->head; head = &b->head;
@ -1071,7 +1074,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
key_size = map->key_size; key_size = map->key_size;
hash = htab_map_hash(key, key_size); hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash); b = __select_bucket(htab, hash);
head = &b->head; head = &b->head;
@ -1103,7 +1106,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
key_size = map->key_size; key_size = map->key_size;
hash = htab_map_hash(key, key_size); hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash); b = __select_bucket(htab, hash);
head = &b->head; head = &b->head;

View file

@ -1427,12 +1427,15 @@ static void smap_tx_work(struct work_struct *w)
static void smap_write_space(struct sock *sk) static void smap_write_space(struct sock *sk)
{ {
struct smap_psock *psock; struct smap_psock *psock;
void (*write_space)(struct sock *sk);
rcu_read_lock(); rcu_read_lock();
psock = smap_psock_sk(sk); psock = smap_psock_sk(sk);
if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state))) if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
schedule_work(&psock->tx_work); schedule_work(&psock->tx_work);
write_space = psock->save_write_space;
rcu_read_unlock(); rcu_read_unlock();
write_space(sk);
} }
static void smap_stop_sock(struct smap_psock *psock, struct sock *sk) static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
@ -2140,7 +2143,9 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
return ERR_PTR(-EPERM); return ERR_PTR(-EPERM);
/* check sanity of attributes */ /* check sanity of attributes */
if (attr->max_entries == 0 || attr->value_size != 4 || if (attr->max_entries == 0 ||
attr->key_size == 0 ||
attr->value_size != 4 ||
attr->map_flags & ~SOCK_CREATE_FLAG_MASK) attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
@ -2267,8 +2272,10 @@ static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
} }
l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN, l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
htab->map.numa_node); htab->map.numa_node);
if (!l_new) if (!l_new) {
atomic_dec(&htab->count);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
}
memcpy(l_new->key, key, key_size); memcpy(l_new->key, key, key_size);
l_new->sk = sk; l_new->sk = sk;

View file

@ -28,7 +28,6 @@
#include <linux/rhashtable.h> #include <linux/rhashtable.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/rhashtable.h>
#define HASH_DEFAULT_SIZE 64UL #define HASH_DEFAULT_SIZE 64UL
#define HASH_MIN_SIZE 4U #define HASH_MIN_SIZE 4U

View file

@ -767,7 +767,6 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev,
const struct tc_action *a; const struct tc_action *a;
struct dsa_port *to_dp; struct dsa_port *to_dp;
int err = -EOPNOTSUPP; int err = -EOPNOTSUPP;
LIST_HEAD(actions);
if (!ds->ops->port_mirror_add) if (!ds->ops->port_mirror_add)
return err; return err;
@ -775,8 +774,7 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev,
if (!tcf_exts_has_one_action(cls->exts)) if (!tcf_exts_has_one_action(cls->exts))
return err; return err;
tcf_exts_to_list(cls->exts, &actions); a = tcf_exts_first_action(cls->exts);
a = list_first_entry(&actions, struct tc_action, list);
if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
struct dsa_mall_mirror_tc_entry *mirror; struct dsa_mall_mirror_tc_entry *mirror;

View file

@ -95,11 +95,10 @@ struct bbr {
u32 mode:3, /* current bbr_mode in state machine */ u32 mode:3, /* current bbr_mode in state machine */
prev_ca_state:3, /* CA state on previous ACK */ prev_ca_state:3, /* CA state on previous ACK */
packet_conservation:1, /* use packet conservation? */ packet_conservation:1, /* use packet conservation? */
restore_cwnd:1, /* decided to revert cwnd to old value */
round_start:1, /* start of packet-timed tx->ack round? */ round_start:1, /* start of packet-timed tx->ack round? */
idle_restart:1, /* restarting after idle? */ idle_restart:1, /* restarting after idle? */
probe_rtt_round_done:1, /* a BBR_PROBE_RTT round at 4 pkts? */ probe_rtt_round_done:1, /* a BBR_PROBE_RTT round at 4 pkts? */
unused:12, unused:13,
lt_is_sampling:1, /* taking long-term ("LT") samples now? */ lt_is_sampling:1, /* taking long-term ("LT") samples now? */
lt_rtt_cnt:7, /* round trips in long-term interval */ lt_rtt_cnt:7, /* round trips in long-term interval */
lt_use_bw:1; /* use lt_bw as our bw estimate? */ lt_use_bw:1; /* use lt_bw as our bw estimate? */
@ -175,6 +174,8 @@ static const u32 bbr_lt_bw_diff = 4000 / 8;
/* If we estimate we're policed, use lt_bw for this many round trips: */ /* If we estimate we're policed, use lt_bw for this many round trips: */
static const u32 bbr_lt_bw_max_rtts = 48; static const u32 bbr_lt_bw_max_rtts = 48;
static void bbr_check_probe_rtt_done(struct sock *sk);
/* Do we estimate that STARTUP filled the pipe? */ /* Do we estimate that STARTUP filled the pipe? */
static bool bbr_full_bw_reached(const struct sock *sk) static bool bbr_full_bw_reached(const struct sock *sk)
{ {
@ -309,6 +310,8 @@ static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
*/ */
if (bbr->mode == BBR_PROBE_BW) if (bbr->mode == BBR_PROBE_BW)
bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT); bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT);
else if (bbr->mode == BBR_PROBE_RTT)
bbr_check_probe_rtt_done(sk);
} }
} }
@ -396,17 +399,11 @@ static bool bbr_set_cwnd_to_recover_or_restore(
cwnd = tcp_packets_in_flight(tp) + acked; cwnd = tcp_packets_in_flight(tp) + acked;
} else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) { } else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) {
/* Exiting loss recovery; restore cwnd saved before recovery. */ /* Exiting loss recovery; restore cwnd saved before recovery. */
bbr->restore_cwnd = 1; cwnd = max(cwnd, bbr->prior_cwnd);
bbr->packet_conservation = 0; bbr->packet_conservation = 0;
} }
bbr->prev_ca_state = state; bbr->prev_ca_state = state;
if (bbr->restore_cwnd) {
/* Restore cwnd after exiting loss recovery or PROBE_RTT. */
cwnd = max(cwnd, bbr->prior_cwnd);
bbr->restore_cwnd = 0;
}
if (bbr->packet_conservation) { if (bbr->packet_conservation) {
*new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked); *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked);
return true; /* yes, using packet conservation */ return true; /* yes, using packet conservation */
@ -423,10 +420,10 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk); struct bbr *bbr = inet_csk_ca(sk);
u32 cwnd = 0, target_cwnd = 0; u32 cwnd = tp->snd_cwnd, target_cwnd = 0;
if (!acked) if (!acked)
return; goto done; /* no packet fully ACKed; just apply caps */
if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd)) if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
goto done; goto done;
@ -748,6 +745,20 @@ static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs)
bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */ bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */
} }
static void bbr_check_probe_rtt_done(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
if (!(bbr->probe_rtt_done_stamp &&
after(tcp_jiffies32, bbr->probe_rtt_done_stamp)))
return;
bbr->min_rtt_stamp = tcp_jiffies32; /* wait a while until PROBE_RTT */
tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd);
bbr_reset_mode(sk);
}
/* The goal of PROBE_RTT mode is to have BBR flows cooperatively and /* The goal of PROBE_RTT mode is to have BBR flows cooperatively and
* periodically drain the bottleneck queue, to converge to measure the true * periodically drain the bottleneck queue, to converge to measure the true
* min_rtt (unloaded propagation delay). This allows the flows to keep queues * min_rtt (unloaded propagation delay). This allows the flows to keep queues
@ -806,12 +817,8 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
} else if (bbr->probe_rtt_done_stamp) { } else if (bbr->probe_rtt_done_stamp) {
if (bbr->round_start) if (bbr->round_start)
bbr->probe_rtt_round_done = 1; bbr->probe_rtt_round_done = 1;
if (bbr->probe_rtt_round_done && if (bbr->probe_rtt_round_done)
after(tcp_jiffies32, bbr->probe_rtt_done_stamp)) { bbr_check_probe_rtt_done(sk);
bbr->min_rtt_stamp = tcp_jiffies32;
bbr->restore_cwnd = 1; /* snap to prior_cwnd */
bbr_reset_mode(sk);
}
} }
} }
/* Restart after idle ends only once we process a new S/ACK for data */ /* Restart after idle ends only once we process a new S/ACK for data */
@ -862,7 +869,6 @@ static void bbr_init(struct sock *sk)
bbr->has_seen_rtt = 0; bbr->has_seen_rtt = 0;
bbr_init_pacing_rate_from_rtt(sk); bbr_init_pacing_rate_from_rtt(sk);
bbr->restore_cwnd = 0;
bbr->round_start = 0; bbr->round_start = 0;
bbr->idle_restart = 0; bbr->idle_restart = 0;
bbr->full_bw_reached = 0; bbr->full_bw_reached = 0;

View file

@ -2517,6 +2517,12 @@ static int __net_init tcp_sk_init(struct net *net)
if (res) if (res)
goto fail; goto fail;
sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
/* Please enforce IP_DF and IPID==0 for RST and
* ACK sent in SYN-RECV and TIME-WAIT state.
*/
inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
} }

View file

@ -2398,7 +2398,7 @@ static void addrconf_add_mroute(struct net_device *dev)
ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0); ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
ip6_route_add(&cfg, GFP_ATOMIC, NULL); ip6_route_add(&cfg, GFP_KERNEL, NULL);
} }
static struct inet6_dev *addrconf_add_dev(struct net_device *dev) static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
@ -3062,7 +3062,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
if (addr.s6_addr32[3]) { if (addr.s6_addr32[3]) {
add_addr(idev, &addr, plen, scope); add_addr(idev, &addr, plen, scope);
addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags, addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags,
GFP_ATOMIC); GFP_KERNEL);
return; return;
} }
@ -3087,7 +3087,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
add_addr(idev, &addr, plen, flag); add_addr(idev, &addr, plen, flag);
addrconf_prefix_route(&addr, plen, 0, idev->dev, addrconf_prefix_route(&addr, plen, 0, idev->dev,
0, pflags, GFP_ATOMIC); 0, pflags, GFP_KERNEL);
} }
} }
} }

View file

@ -198,6 +198,8 @@ void fib6_info_destroy_rcu(struct rcu_head *head)
} }
} }
lwtstate_put(f6i->fib6_nh.nh_lwtstate);
if (f6i->fib6_nh.nh_dev) if (f6i->fib6_nh.nh_dev)
dev_put(f6i->fib6_nh.nh_dev); dev_put(f6i->fib6_nh.nh_dev);

View file

@ -1094,7 +1094,8 @@ static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n,
} }
t = rtnl_dereference(ip6n->tnls_wc[0]); t = rtnl_dereference(ip6n->tnls_wc[0]);
unregister_netdevice_queue(t->dev, list); if (t)
unregister_netdevice_queue(t->dev, list);
} }
static int __net_init vti6_init_net(struct net *net) static int __net_init vti6_init_net(struct net *net)

View file

@ -956,7 +956,7 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
rt->dst.error = 0; rt->dst.error = 0;
rt->dst.output = ip6_output; rt->dst.output = ip6_output;
if (ort->fib6_type == RTN_LOCAL) { if (ort->fib6_type == RTN_LOCAL || ort->fib6_type == RTN_ANYCAST) {
rt->dst.input = ip6_input; rt->dst.input = ip6_input;
} else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) { } else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
rt->dst.input = ip6_mc_input; rt->dst.input = ip6_mc_input;

View file

@ -100,7 +100,7 @@ static int ncsi_write_package_info(struct sk_buff *skb,
bool found; bool found;
int rc; int rc;
if (id > ndp->package_num) { if (id > ndp->package_num - 1) {
netdev_info(ndp->ndev.dev, "NCSI: No package with id %u\n", id); netdev_info(ndp->ndev.dev, "NCSI: No package with id %u\n", id);
return -ENODEV; return -ENODEV;
} }
@ -240,7 +240,7 @@ static int ncsi_pkg_info_all_nl(struct sk_buff *skb,
return 0; /* done */ return 0; /* done */
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
&ncsi_genl_family, 0, NCSI_CMD_PKG_INFO); &ncsi_genl_family, NLM_F_MULTI, NCSI_CMD_PKG_INFO);
if (!hdr) { if (!hdr) {
rc = -EMSGSIZE; rc = -EMSGSIZE;
goto err; goto err;

View file

@ -37,7 +37,6 @@
#include <net/tcp.h> #include <net/tcp.h>
#include <net/net_namespace.h> #include <net/net_namespace.h>
#include <net/netns/generic.h> #include <net/netns/generic.h>
#include <net/tcp.h>
#include <net/addrconf.h> #include <net/addrconf.h>
#include "rds.h" #include "rds.h"

View file

@ -300,21 +300,17 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
} }
EXPORT_SYMBOL(tcf_generic_walker); EXPORT_SYMBOL(tcf_generic_walker);
static bool __tcf_idr_check(struct tc_action_net *tn, u32 index, int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
struct tc_action **a, int bind)
{ {
struct tcf_idrinfo *idrinfo = tn->idrinfo; struct tcf_idrinfo *idrinfo = tn->idrinfo;
struct tc_action *p; struct tc_action *p;
spin_lock(&idrinfo->lock); spin_lock(&idrinfo->lock);
p = idr_find(&idrinfo->action_idr, index); p = idr_find(&idrinfo->action_idr, index);
if (IS_ERR(p)) { if (IS_ERR(p))
p = NULL; p = NULL;
} else if (p) { else if (p)
refcount_inc(&p->tcfa_refcnt); refcount_inc(&p->tcfa_refcnt);
if (bind)
atomic_inc(&p->tcfa_bindcnt);
}
spin_unlock(&idrinfo->lock); spin_unlock(&idrinfo->lock);
if (p) { if (p) {
@ -323,23 +319,10 @@ static bool __tcf_idr_check(struct tc_action_net *tn, u32 index,
} }
return false; return false;
} }
int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
{
return __tcf_idr_check(tn, index, a, 0);
}
EXPORT_SYMBOL(tcf_idr_search); EXPORT_SYMBOL(tcf_idr_search);
bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a, static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
int bind)
{ {
return __tcf_idr_check(tn, index, a, bind);
}
EXPORT_SYMBOL(tcf_idr_check);
int tcf_idr_delete_index(struct tc_action_net *tn, u32 index)
{
struct tcf_idrinfo *idrinfo = tn->idrinfo;
struct tc_action *p; struct tc_action *p;
int ret = 0; int ret = 0;
@ -370,7 +353,6 @@ int tcf_idr_delete_index(struct tc_action_net *tn, u32 index)
spin_unlock(&idrinfo->lock); spin_unlock(&idrinfo->lock);
return ret; return ret;
} }
EXPORT_SYMBOL(tcf_idr_delete_index);
int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
struct tc_action **a, const struct tc_action_ops *ops, struct tc_action **a, const struct tc_action_ops *ops,
@ -409,7 +391,6 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
p->idrinfo = idrinfo; p->idrinfo = idrinfo;
p->ops = ops; p->ops = ops;
INIT_LIST_HEAD(&p->list);
*a = p; *a = p;
return 0; return 0;
err3: err3:
@ -686,14 +667,18 @@ static int tcf_action_put(struct tc_action *p)
return __tcf_action_put(p, false); return __tcf_action_put(p, false);
} }
/* Put all actions in this array, skip those NULL's. */
static void tcf_action_put_many(struct tc_action *actions[]) static void tcf_action_put_many(struct tc_action *actions[])
{ {
int i; int i;
for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
struct tc_action *a = actions[i]; struct tc_action *a = actions[i];
const struct tc_action_ops *ops = a->ops; const struct tc_action_ops *ops;
if (!a)
continue;
ops = a->ops;
if (tcf_action_put(a)) if (tcf_action_put(a))
module_put(ops->owner); module_put(ops->owner);
} }
@ -1175,41 +1160,38 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
return err; return err;
} }
static int tcf_action_delete(struct net *net, struct tc_action *actions[], static int tcf_action_delete(struct net *net, struct tc_action *actions[])
int *acts_deleted, struct netlink_ext_ack *extack)
{ {
u32 act_index; int i;
int ret, i;
for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
struct tc_action *a = actions[i]; struct tc_action *a = actions[i];
const struct tc_action_ops *ops = a->ops; const struct tc_action_ops *ops = a->ops;
/* Actions can be deleted concurrently so we must save their /* Actions can be deleted concurrently so we must save their
* type and id to search again after reference is released. * type and id to search again after reference is released.
*/ */
act_index = a->tcfa_index; struct tcf_idrinfo *idrinfo = a->idrinfo;
u32 act_index = a->tcfa_index;
if (tcf_action_put(a)) { if (tcf_action_put(a)) {
/* last reference, action was deleted concurrently */ /* last reference, action was deleted concurrently */
module_put(ops->owner); module_put(ops->owner);
} else { } else {
int ret;
/* now do the delete */ /* now do the delete */
ret = ops->delete(net, act_index); ret = tcf_idr_delete_index(idrinfo, act_index);
if (ret < 0) { if (ret < 0)
*acts_deleted = i + 1;
return ret; return ret;
}
} }
actions[i] = NULL;
} }
*acts_deleted = i;
return 0; return 0;
} }
static int static int
tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
int *acts_deleted, u32 portid, size_t attr_size, u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
struct netlink_ext_ack *extack)
{ {
int ret; int ret;
struct sk_buff *skb; struct sk_buff *skb;
@ -1227,7 +1209,7 @@ tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
} }
/* now do the delete */ /* now do the delete */
ret = tcf_action_delete(net, actions, acts_deleted, extack); ret = tcf_action_delete(net, actions);
if (ret < 0) { if (ret < 0) {
NL_SET_ERR_MSG(extack, "Failed to delete TC action"); NL_SET_ERR_MSG(extack, "Failed to delete TC action");
kfree_skb(skb); kfree_skb(skb);
@ -1249,8 +1231,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct tc_action *act; struct tc_action *act;
size_t attr_size = 0; size_t attr_size = 0;
struct tc_action *actions[TCA_ACT_MAX_PRIO + 1] = {}; struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
int acts_deleted = 0;
ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack); ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack);
if (ret < 0) if (ret < 0)
@ -1280,14 +1261,13 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
if (event == RTM_GETACTION) if (event == RTM_GETACTION)
ret = tcf_get_notify(net, portid, n, actions, event, extack); ret = tcf_get_notify(net, portid, n, actions, event, extack);
else { /* delete */ else { /* delete */
ret = tcf_del_notify(net, n, actions, &acts_deleted, portid, ret = tcf_del_notify(net, n, actions, portid, attr_size, extack);
attr_size, extack);
if (ret) if (ret)
goto err; goto err;
return ret; return 0;
} }
err: err:
tcf_action_put_many(&actions[acts_deleted]); tcf_action_put_many(actions);
return ret; return ret;
} }

View file

@ -395,13 +395,6 @@ static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index); return tcf_idr_search(tn, a, index);
} }
static int tcf_bpf_delete(struct net *net, u32 index)
{
struct tc_action_net *tn = net_generic(net, bpf_net_id);
return tcf_idr_delete_index(tn, index);
}
static struct tc_action_ops act_bpf_ops __read_mostly = { static struct tc_action_ops act_bpf_ops __read_mostly = {
.kind = "bpf", .kind = "bpf",
.type = TCA_ACT_BPF, .type = TCA_ACT_BPF,
@ -412,7 +405,6 @@ static struct tc_action_ops act_bpf_ops __read_mostly = {
.init = tcf_bpf_init, .init = tcf_bpf_init,
.walk = tcf_bpf_walker, .walk = tcf_bpf_walker,
.lookup = tcf_bpf_search, .lookup = tcf_bpf_search,
.delete = tcf_bpf_delete,
.size = sizeof(struct tcf_bpf), .size = sizeof(struct tcf_bpf),
}; };

View file

@ -198,13 +198,6 @@ static int tcf_connmark_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index); return tcf_idr_search(tn, a, index);
} }
static int tcf_connmark_delete(struct net *net, u32 index)
{
struct tc_action_net *tn = net_generic(net, connmark_net_id);
return tcf_idr_delete_index(tn, index);
}
static struct tc_action_ops act_connmark_ops = { static struct tc_action_ops act_connmark_ops = {
.kind = "connmark", .kind = "connmark",
.type = TCA_ACT_CONNMARK, .type = TCA_ACT_CONNMARK,
@ -214,7 +207,6 @@ static struct tc_action_ops act_connmark_ops = {
.init = tcf_connmark_init, .init = tcf_connmark_init,
.walk = tcf_connmark_walker, .walk = tcf_connmark_walker,
.lookup = tcf_connmark_search, .lookup = tcf_connmark_search,
.delete = tcf_connmark_delete,
.size = sizeof(struct tcf_connmark_info), .size = sizeof(struct tcf_connmark_info),
}; };

View file

@ -659,13 +659,6 @@ static size_t tcf_csum_get_fill_size(const struct tc_action *act)
return nla_total_size(sizeof(struct tc_csum)); return nla_total_size(sizeof(struct tc_csum));
} }
static int tcf_csum_delete(struct net *net, u32 index)
{
struct tc_action_net *tn = net_generic(net, csum_net_id);
return tcf_idr_delete_index(tn, index);
}
static struct tc_action_ops act_csum_ops = { static struct tc_action_ops act_csum_ops = {
.kind = "csum", .kind = "csum",
.type = TCA_ACT_CSUM, .type = TCA_ACT_CSUM,
@ -677,7 +670,6 @@ static struct tc_action_ops act_csum_ops = {
.walk = tcf_csum_walker, .walk = tcf_csum_walker,
.lookup = tcf_csum_search, .lookup = tcf_csum_search,
.get_fill_size = tcf_csum_get_fill_size, .get_fill_size = tcf_csum_get_fill_size,
.delete = tcf_csum_delete,
.size = sizeof(struct tcf_csum), .size = sizeof(struct tcf_csum),
}; };

View file

@ -243,13 +243,6 @@ static size_t tcf_gact_get_fill_size(const struct tc_action *act)
return sz; return sz;
} }
static int tcf_gact_delete(struct net *net, u32 index)
{
struct tc_action_net *tn = net_generic(net, gact_net_id);
return tcf_idr_delete_index(tn, index);
}
static struct tc_action_ops act_gact_ops = { static struct tc_action_ops act_gact_ops = {
.kind = "gact", .kind = "gact",
.type = TCA_ACT_GACT, .type = TCA_ACT_GACT,
@ -261,7 +254,6 @@ static struct tc_action_ops act_gact_ops = {
.walk = tcf_gact_walker, .walk = tcf_gact_walker,
.lookup = tcf_gact_search, .lookup = tcf_gact_search,
.get_fill_size = tcf_gact_get_fill_size, .get_fill_size = tcf_gact_get_fill_size,
.delete = tcf_gact_delete,
.size = sizeof(struct tcf_gact), .size = sizeof(struct tcf_gact),
}; };

View file

@ -167,16 +167,16 @@ static struct tcf_meta_ops *find_ife_oplist(u16 metaid)
{ {
struct tcf_meta_ops *o; struct tcf_meta_ops *o;
read_lock_bh(&ife_mod_lock); read_lock(&ife_mod_lock);
list_for_each_entry(o, &ifeoplist, list) { list_for_each_entry(o, &ifeoplist, list) {
if (o->metaid == metaid) { if (o->metaid == metaid) {
if (!try_module_get(o->owner)) if (!try_module_get(o->owner))
o = NULL; o = NULL;
read_unlock_bh(&ife_mod_lock); read_unlock(&ife_mod_lock);
return o; return o;
} }
} }
read_unlock_bh(&ife_mod_lock); read_unlock(&ife_mod_lock);
return NULL; return NULL;
} }
@ -190,12 +190,12 @@ int register_ife_op(struct tcf_meta_ops *mops)
!mops->get || !mops->alloc) !mops->get || !mops->alloc)
return -EINVAL; return -EINVAL;
write_lock_bh(&ife_mod_lock); write_lock(&ife_mod_lock);
list_for_each_entry(m, &ifeoplist, list) { list_for_each_entry(m, &ifeoplist, list) {
if (m->metaid == mops->metaid || if (m->metaid == mops->metaid ||
(strcmp(mops->name, m->name) == 0)) { (strcmp(mops->name, m->name) == 0)) {
write_unlock_bh(&ife_mod_lock); write_unlock(&ife_mod_lock);
return -EEXIST; return -EEXIST;
} }
} }
@ -204,7 +204,7 @@ int register_ife_op(struct tcf_meta_ops *mops)
mops->release = ife_release_meta_gen; mops->release = ife_release_meta_gen;
list_add_tail(&mops->list, &ifeoplist); list_add_tail(&mops->list, &ifeoplist);
write_unlock_bh(&ife_mod_lock); write_unlock(&ife_mod_lock);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(unregister_ife_op); EXPORT_SYMBOL_GPL(unregister_ife_op);
@ -214,7 +214,7 @@ int unregister_ife_op(struct tcf_meta_ops *mops)
struct tcf_meta_ops *m; struct tcf_meta_ops *m;
int err = -ENOENT; int err = -ENOENT;
write_lock_bh(&ife_mod_lock); write_lock(&ife_mod_lock);
list_for_each_entry(m, &ifeoplist, list) { list_for_each_entry(m, &ifeoplist, list) {
if (m->metaid == mops->metaid) { if (m->metaid == mops->metaid) {
list_del(&mops->list); list_del(&mops->list);
@ -222,7 +222,7 @@ int unregister_ife_op(struct tcf_meta_ops *mops)
break; break;
} }
} }
write_unlock_bh(&ife_mod_lock); write_unlock(&ife_mod_lock);
return err; return err;
} }
@ -265,11 +265,8 @@ static const char *ife_meta_id2name(u32 metaid)
#endif #endif
/* called when adding new meta information /* called when adding new meta information
* under ife->tcf_lock for existing action
*/ */
static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, static int load_metaops_and_vet(u32 metaid, void *val, int len, bool rtnl_held)
void *val, int len, bool exists,
bool rtnl_held)
{ {
struct tcf_meta_ops *ops = find_ife_oplist(metaid); struct tcf_meta_ops *ops = find_ife_oplist(metaid);
int ret = 0; int ret = 0;
@ -277,15 +274,11 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
if (!ops) { if (!ops) {
ret = -ENOENT; ret = -ENOENT;
#ifdef CONFIG_MODULES #ifdef CONFIG_MODULES
if (exists)
spin_unlock_bh(&ife->tcf_lock);
if (rtnl_held) if (rtnl_held)
rtnl_unlock(); rtnl_unlock();
request_module("ife-meta-%s", ife_meta_id2name(metaid)); request_module("ife-meta-%s", ife_meta_id2name(metaid));
if (rtnl_held) if (rtnl_held)
rtnl_lock(); rtnl_lock();
if (exists)
spin_lock_bh(&ife->tcf_lock);
ops = find_ife_oplist(metaid); ops = find_ife_oplist(metaid);
#endif #endif
} }
@ -302,24 +295,17 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
} }
/* called when adding new meta information /* called when adding new meta information
* under ife->tcf_lock for existing action
*/ */
static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, static int __add_metainfo(const struct tcf_meta_ops *ops,
int len, bool atomic) struct tcf_ife_info *ife, u32 metaid, void *metaval,
int len, bool atomic, bool exists)
{ {
struct tcf_meta_info *mi = NULL; struct tcf_meta_info *mi = NULL;
struct tcf_meta_ops *ops = find_ife_oplist(metaid);
int ret = 0; int ret = 0;
if (!ops)
return -ENOENT;
mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL); mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
if (!mi) { if (!mi)
/*put back what find_ife_oplist took */
module_put(ops->owner);
return -ENOMEM; return -ENOMEM;
}
mi->metaid = metaid; mi->metaid = metaid;
mi->ops = ops; mi->ops = ops;
@ -327,29 +313,47 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL); ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
if (ret != 0) { if (ret != 0) {
kfree(mi); kfree(mi);
module_put(ops->owner);
return ret; return ret;
} }
} }
if (exists)
spin_lock_bh(&ife->tcf_lock);
list_add_tail(&mi->metalist, &ife->metalist); list_add_tail(&mi->metalist, &ife->metalist);
if (exists)
spin_unlock_bh(&ife->tcf_lock);
return ret; return ret;
} }
static int use_all_metadata(struct tcf_ife_info *ife) static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
int len, bool exists)
{
const struct tcf_meta_ops *ops = find_ife_oplist(metaid);
int ret;
if (!ops)
return -ENOENT;
ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists);
if (ret)
/*put back what find_ife_oplist took */
module_put(ops->owner);
return ret;
}
static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
{ {
struct tcf_meta_ops *o; struct tcf_meta_ops *o;
int rc = 0; int rc = 0;
int installed = 0; int installed = 0;
read_lock_bh(&ife_mod_lock); read_lock(&ife_mod_lock);
list_for_each_entry(o, &ifeoplist, list) { list_for_each_entry(o, &ifeoplist, list) {
rc = add_metainfo(ife, o->metaid, NULL, 0, true); rc = __add_metainfo(o, ife, o->metaid, NULL, 0, true, exists);
if (rc == 0) if (rc == 0)
installed += 1; installed += 1;
} }
read_unlock_bh(&ife_mod_lock); read_unlock(&ife_mod_lock);
if (installed) if (installed)
return 0; return 0;
@ -422,7 +426,6 @@ static void tcf_ife_cleanup(struct tc_action *a)
kfree_rcu(p, rcu); kfree_rcu(p, rcu);
} }
/* under ife->tcf_lock for existing action */
static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
bool exists, bool rtnl_held) bool exists, bool rtnl_held)
{ {
@ -436,8 +439,7 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
val = nla_data(tb[i]); val = nla_data(tb[i]);
len = nla_len(tb[i]); len = nla_len(tb[i]);
rc = load_metaops_and_vet(ife, i, val, len, exists, rc = load_metaops_and_vet(i, val, len, rtnl_held);
rtnl_held);
if (rc != 0) if (rc != 0)
return rc; return rc;
@ -540,8 +542,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
p->eth_type = ife_type; p->eth_type = ife_type;
} }
if (exists)
spin_lock_bh(&ife->tcf_lock);
if (ret == ACT_P_CREATED) if (ret == ACT_P_CREATED)
INIT_LIST_HEAD(&ife->metalist); INIT_LIST_HEAD(&ife->metalist);
@ -551,10 +551,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
NULL, NULL); NULL, NULL);
if (err) { if (err) {
metadata_parse_err: metadata_parse_err:
if (exists)
spin_unlock_bh(&ife->tcf_lock);
tcf_idr_release(*a, bind); tcf_idr_release(*a, bind);
kfree(p); kfree(p);
return err; return err;
} }
@ -569,17 +566,16 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
* as we can. You better have at least one else we are * as we can. You better have at least one else we are
* going to bail out * going to bail out
*/ */
err = use_all_metadata(ife); err = use_all_metadata(ife, exists);
if (err) { if (err) {
if (exists)
spin_unlock_bh(&ife->tcf_lock);
tcf_idr_release(*a, bind); tcf_idr_release(*a, bind);
kfree(p); kfree(p);
return err; return err;
} }
} }
if (exists)
spin_lock_bh(&ife->tcf_lock);
ife->tcf_action = parm->action; ife->tcf_action = parm->action;
/* protected by tcf_lock when modifying existing action */ /* protected by tcf_lock when modifying existing action */
rcu_swap_protected(ife->params, p, 1); rcu_swap_protected(ife->params, p, 1);
@ -853,13 +849,6 @@ static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index); return tcf_idr_search(tn, a, index);
} }
static int tcf_ife_delete(struct net *net, u32 index)
{
struct tc_action_net *tn = net_generic(net, ife_net_id);
return tcf_idr_delete_index(tn, index);
}
static struct tc_action_ops act_ife_ops = { static struct tc_action_ops act_ife_ops = {
.kind = "ife", .kind = "ife",
.type = TCA_ACT_IFE, .type = TCA_ACT_IFE,
@ -870,7 +859,6 @@ static struct tc_action_ops act_ife_ops = {
.init = tcf_ife_init, .init = tcf_ife_init,
.walk = tcf_ife_walker, .walk = tcf_ife_walker,
.lookup = tcf_ife_search, .lookup = tcf_ife_search,
.delete = tcf_ife_delete,
.size = sizeof(struct tcf_ife_info), .size = sizeof(struct tcf_ife_info),
}; };

View file

@ -337,13 +337,6 @@ static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index); return tcf_idr_search(tn, a, index);
} }
static int tcf_ipt_delete(struct net *net, u32 index)
{
struct tc_action_net *tn = net_generic(net, ipt_net_id);
return tcf_idr_delete_index(tn, index);
}
static struct tc_action_ops act_ipt_ops = { static struct tc_action_ops act_ipt_ops = {
.kind = "ipt", .kind = "ipt",
.type = TCA_ACT_IPT, .type = TCA_ACT_IPT,
@ -354,7 +347,6 @@ static struct tc_action_ops act_ipt_ops = {
.init = tcf_ipt_init, .init = tcf_ipt_init,
.walk = tcf_ipt_walker, .walk = tcf_ipt_walker,
.lookup = tcf_ipt_search, .lookup = tcf_ipt_search,
.delete = tcf_ipt_delete,
.size = sizeof(struct tcf_ipt), .size = sizeof(struct tcf_ipt),
}; };
@ -395,13 +387,6 @@ static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index); return tcf_idr_search(tn, a, index);
} }
static int tcf_xt_delete(struct net *net, u32 index)
{
struct tc_action_net *tn = net_generic(net, xt_net_id);
return tcf_idr_delete_index(tn, index);
}
static struct tc_action_ops act_xt_ops = { static struct tc_action_ops act_xt_ops = {
.kind = "xt", .kind = "xt",
.type = TCA_ACT_XT, .type = TCA_ACT_XT,
@ -412,7 +397,6 @@ static struct tc_action_ops act_xt_ops = {
.init = tcf_xt_init, .init = tcf_xt_init,
.walk = tcf_xt_walker, .walk = tcf_xt_walker,
.lookup = tcf_xt_search, .lookup = tcf_xt_search,
.delete = tcf_xt_delete,
.size = sizeof(struct tcf_ipt), .size = sizeof(struct tcf_ipt),
}; };

View file

@ -395,13 +395,6 @@ static void tcf_mirred_put_dev(struct net_device *dev)
dev_put(dev); dev_put(dev);
} }
static int tcf_mirred_delete(struct net *net, u32 index)
{
struct tc_action_net *tn = net_generic(net, mirred_net_id);
return tcf_idr_delete_index(tn, index);
}
static struct tc_action_ops act_mirred_ops = { static struct tc_action_ops act_mirred_ops = {
.kind = "mirred", .kind = "mirred",
.type = TCA_ACT_MIRRED, .type = TCA_ACT_MIRRED,
@ -416,7 +409,6 @@ static struct tc_action_ops act_mirred_ops = {
.size = sizeof(struct tcf_mirred), .size = sizeof(struct tcf_mirred),
.get_dev = tcf_mirred_get_dev, .get_dev = tcf_mirred_get_dev,
.put_dev = tcf_mirred_put_dev, .put_dev = tcf_mirred_put_dev,
.delete = tcf_mirred_delete,
}; };
static __net_init int mirred_init_net(struct net *net) static __net_init int mirred_init_net(struct net *net)

View file

@ -300,13 +300,6 @@ static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index); return tcf_idr_search(tn, a, index);
} }
static int tcf_nat_delete(struct net *net, u32 index)
{
struct tc_action_net *tn = net_generic(net, nat_net_id);
return tcf_idr_delete_index(tn, index);
}
static struct tc_action_ops act_nat_ops = { static struct tc_action_ops act_nat_ops = {
.kind = "nat", .kind = "nat",
.type = TCA_ACT_NAT, .type = TCA_ACT_NAT,
@ -316,7 +309,6 @@ static struct tc_action_ops act_nat_ops = {
.init = tcf_nat_init, .init = tcf_nat_init,
.walk = tcf_nat_walker, .walk = tcf_nat_walker,
.lookup = tcf_nat_search, .lookup = tcf_nat_search,
.delete = tcf_nat_delete,
.size = sizeof(struct tcf_nat), .size = sizeof(struct tcf_nat),
}; };

View file

@ -460,13 +460,6 @@ static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index); return tcf_idr_search(tn, a, index);
} }
static int tcf_pedit_delete(struct net *net, u32 index)
{
struct tc_action_net *tn = net_generic(net, pedit_net_id);
return tcf_idr_delete_index(tn, index);
}
static struct tc_action_ops act_pedit_ops = { static struct tc_action_ops act_pedit_ops = {
.kind = "pedit", .kind = "pedit",
.type = TCA_ACT_PEDIT, .type = TCA_ACT_PEDIT,
@ -477,7 +470,6 @@ static struct tc_action_ops act_pedit_ops = {
.init = tcf_pedit_init, .init = tcf_pedit_init,
.walk = tcf_pedit_walker, .walk = tcf_pedit_walker,
.lookup = tcf_pedit_search, .lookup = tcf_pedit_search,
.delete = tcf_pedit_delete,
.size = sizeof(struct tcf_pedit), .size = sizeof(struct tcf_pedit),
}; };

View file

@ -320,13 +320,6 @@ static int tcf_police_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index); return tcf_idr_search(tn, a, index);
} }
static int tcf_police_delete(struct net *net, u32 index)
{
struct tc_action_net *tn = net_generic(net, police_net_id);
return tcf_idr_delete_index(tn, index);
}
MODULE_AUTHOR("Alexey Kuznetsov"); MODULE_AUTHOR("Alexey Kuznetsov");
MODULE_DESCRIPTION("Policing actions"); MODULE_DESCRIPTION("Policing actions");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
@ -340,7 +333,6 @@ static struct tc_action_ops act_police_ops = {
.init = tcf_police_init, .init = tcf_police_init,
.walk = tcf_police_walker, .walk = tcf_police_walker,
.lookup = tcf_police_search, .lookup = tcf_police_search,
.delete = tcf_police_delete,
.size = sizeof(struct tcf_police), .size = sizeof(struct tcf_police),
}; };

View file

@ -232,13 +232,6 @@ static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index); return tcf_idr_search(tn, a, index);
} }
static int tcf_sample_delete(struct net *net, u32 index)
{
struct tc_action_net *tn = net_generic(net, sample_net_id);
return tcf_idr_delete_index(tn, index);
}
static struct tc_action_ops act_sample_ops = { static struct tc_action_ops act_sample_ops = {
.kind = "sample", .kind = "sample",
.type = TCA_ACT_SAMPLE, .type = TCA_ACT_SAMPLE,
@ -249,7 +242,6 @@ static struct tc_action_ops act_sample_ops = {
.cleanup = tcf_sample_cleanup, .cleanup = tcf_sample_cleanup,
.walk = tcf_sample_walker, .walk = tcf_sample_walker,
.lookup = tcf_sample_search, .lookup = tcf_sample_search,
.delete = tcf_sample_delete,
.size = sizeof(struct tcf_sample), .size = sizeof(struct tcf_sample),
}; };

View file

@ -196,13 +196,6 @@ static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index); return tcf_idr_search(tn, a, index);
} }
static int tcf_simp_delete(struct net *net, u32 index)
{
struct tc_action_net *tn = net_generic(net, simp_net_id);
return tcf_idr_delete_index(tn, index);
}
static struct tc_action_ops act_simp_ops = { static struct tc_action_ops act_simp_ops = {
.kind = "simple", .kind = "simple",
.type = TCA_ACT_SIMP, .type = TCA_ACT_SIMP,
@ -213,7 +206,6 @@ static struct tc_action_ops act_simp_ops = {
.init = tcf_simp_init, .init = tcf_simp_init,
.walk = tcf_simp_walker, .walk = tcf_simp_walker,
.lookup = tcf_simp_search, .lookup = tcf_simp_search,
.delete = tcf_simp_delete,
.size = sizeof(struct tcf_defact), .size = sizeof(struct tcf_defact),
}; };

View file

@ -299,13 +299,6 @@ static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index); return tcf_idr_search(tn, a, index);
} }
static int tcf_skbedit_delete(struct net *net, u32 index)
{
struct tc_action_net *tn = net_generic(net, skbedit_net_id);
return tcf_idr_delete_index(tn, index);
}
static struct tc_action_ops act_skbedit_ops = { static struct tc_action_ops act_skbedit_ops = {
.kind = "skbedit", .kind = "skbedit",
.type = TCA_ACT_SKBEDIT, .type = TCA_ACT_SKBEDIT,
@ -316,7 +309,6 @@ static struct tc_action_ops act_skbedit_ops = {
.cleanup = tcf_skbedit_cleanup, .cleanup = tcf_skbedit_cleanup,
.walk = tcf_skbedit_walker, .walk = tcf_skbedit_walker,
.lookup = tcf_skbedit_search, .lookup = tcf_skbedit_search,
.delete = tcf_skbedit_delete,
.size = sizeof(struct tcf_skbedit), .size = sizeof(struct tcf_skbedit),
}; };

View file

@ -259,13 +259,6 @@ static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index); return tcf_idr_search(tn, a, index);
} }
static int tcf_skbmod_delete(struct net *net, u32 index)
{
struct tc_action_net *tn = net_generic(net, skbmod_net_id);
return tcf_idr_delete_index(tn, index);
}
static struct tc_action_ops act_skbmod_ops = { static struct tc_action_ops act_skbmod_ops = {
.kind = "skbmod", .kind = "skbmod",
.type = TCA_ACT_SKBMOD, .type = TCA_ACT_SKBMOD,
@ -276,7 +269,6 @@ static struct tc_action_ops act_skbmod_ops = {
.cleanup = tcf_skbmod_cleanup, .cleanup = tcf_skbmod_cleanup,
.walk = tcf_skbmod_walker, .walk = tcf_skbmod_walker,
.lookup = tcf_skbmod_search, .lookup = tcf_skbmod_search,
.delete = tcf_skbmod_delete,
.size = sizeof(struct tcf_skbmod), .size = sizeof(struct tcf_skbmod),
}; };

View file

@ -548,13 +548,6 @@ static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index); return tcf_idr_search(tn, a, index);
} }
static int tunnel_key_delete(struct net *net, u32 index)
{
struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
return tcf_idr_delete_index(tn, index);
}
static struct tc_action_ops act_tunnel_key_ops = { static struct tc_action_ops act_tunnel_key_ops = {
.kind = "tunnel_key", .kind = "tunnel_key",
.type = TCA_ACT_TUNNEL_KEY, .type = TCA_ACT_TUNNEL_KEY,
@ -565,7 +558,6 @@ static struct tc_action_ops act_tunnel_key_ops = {
.cleanup = tunnel_key_release, .cleanup = tunnel_key_release,
.walk = tunnel_key_walker, .walk = tunnel_key_walker,
.lookup = tunnel_key_search, .lookup = tunnel_key_search,
.delete = tunnel_key_delete,
.size = sizeof(struct tcf_tunnel_key), .size = sizeof(struct tcf_tunnel_key),
}; };

View file

@ -296,13 +296,6 @@ static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index); return tcf_idr_search(tn, a, index);
} }
static int tcf_vlan_delete(struct net *net, u32 index)
{
struct tc_action_net *tn = net_generic(net, vlan_net_id);
return tcf_idr_delete_index(tn, index);
}
static struct tc_action_ops act_vlan_ops = { static struct tc_action_ops act_vlan_ops = {
.kind = "vlan", .kind = "vlan",
.type = TCA_ACT_VLAN, .type = TCA_ACT_VLAN,
@ -313,7 +306,6 @@ static struct tc_action_ops act_vlan_ops = {
.cleanup = tcf_vlan_cleanup, .cleanup = tcf_vlan_cleanup,
.walk = tcf_vlan_walker, .walk = tcf_vlan_walker,
.lookup = tcf_vlan_search, .lookup = tcf_vlan_search,
.delete = tcf_vlan_delete,
.size = sizeof(struct tcf_vlan), .size = sizeof(struct tcf_vlan),
}; };

View file

@ -914,6 +914,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_U32_MAX + 1]; struct nlattr *tb[TCA_U32_MAX + 1];
u32 htid, flags = 0; u32 htid, flags = 0;
size_t sel_size;
int err; int err;
#ifdef CONFIG_CLS_U32_PERF #ifdef CONFIG_CLS_U32_PERF
size_t size; size_t size;
@ -1076,8 +1077,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
} }
s = nla_data(tb[TCA_U32_SEL]); s = nla_data(tb[TCA_U32_SEL]);
sel_size = struct_size(s, keys, s->nkeys);
if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
err = -EINVAL;
goto erridr;
}
n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL); n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL);
if (n == NULL) { if (n == NULL) {
err = -ENOBUFS; err = -ENOBUFS;
goto erridr; goto erridr;
@ -1092,7 +1098,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
} }
#endif #endif
memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); memcpy(&n->sel, s, sel_size);
RCU_INIT_POINTER(n->ht_up, ht); RCU_INIT_POINTER(n->ht_up, ht);
n->handle = handle; n->handle = handle;
n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0; n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;

View file

@ -64,7 +64,6 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/reciprocal_div.h> #include <linux/reciprocal_div.h>
#include <net/netlink.h> #include <net/netlink.h>
#include <linux/version.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <net/pkt_sched.h> #include <net/pkt_sched.h>
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
@ -621,15 +620,20 @@ static bool cake_ddst(int flow_mode)
} }
static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
int flow_mode) int flow_mode, u16 flow_override, u16 host_override)
{ {
u32 flow_hash = 0, srchost_hash, dsthost_hash; u32 flow_hash = 0, srchost_hash = 0, dsthost_hash = 0;
u16 reduced_hash, srchost_idx, dsthost_idx; u16 reduced_hash, srchost_idx, dsthost_idx;
struct flow_keys keys, host_keys; struct flow_keys keys, host_keys;
if (unlikely(flow_mode == CAKE_FLOW_NONE)) if (unlikely(flow_mode == CAKE_FLOW_NONE))
return 0; return 0;
/* If both overrides are set we can skip packet dissection entirely */
if ((flow_override || !(flow_mode & CAKE_FLOW_FLOWS)) &&
(host_override || !(flow_mode & CAKE_FLOW_HOSTS)))
goto skip_hash;
skb_flow_dissect_flow_keys(skb, &keys, skb_flow_dissect_flow_keys(skb, &keys,
FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
@ -676,6 +680,14 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
if (flow_mode & CAKE_FLOW_FLOWS) if (flow_mode & CAKE_FLOW_FLOWS)
flow_hash = flow_hash_from_keys(&keys); flow_hash = flow_hash_from_keys(&keys);
skip_hash:
if (flow_override)
flow_hash = flow_override - 1;
if (host_override) {
dsthost_hash = host_override - 1;
srchost_hash = host_override - 1;
}
if (!(flow_mode & CAKE_FLOW_FLOWS)) { if (!(flow_mode & CAKE_FLOW_FLOWS)) {
if (flow_mode & CAKE_FLOW_SRC_IP) if (flow_mode & CAKE_FLOW_SRC_IP)
flow_hash ^= srchost_hash; flow_hash ^= srchost_hash;
@ -1571,7 +1583,7 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
struct cake_sched_data *q = qdisc_priv(sch); struct cake_sched_data *q = qdisc_priv(sch);
struct tcf_proto *filter; struct tcf_proto *filter;
struct tcf_result res; struct tcf_result res;
u32 flow = 0; u16 flow = 0, host = 0;
int result; int result;
filter = rcu_dereference_bh(q->filter_list); filter = rcu_dereference_bh(q->filter_list);
@ -1595,10 +1607,12 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
#endif #endif
if (TC_H_MIN(res.classid) <= CAKE_QUEUES) if (TC_H_MIN(res.classid) <= CAKE_QUEUES)
flow = TC_H_MIN(res.classid); flow = TC_H_MIN(res.classid);
if (TC_H_MAJ(res.classid) <= (CAKE_QUEUES << 16))
host = TC_H_MAJ(res.classid) >> 16;
} }
hash: hash:
*t = cake_select_tin(sch, skb); *t = cake_select_tin(sch, skb);
return flow ?: cake_hash(*t, skb, flow_mode) + 1; return cake_hash(*t, skb, flow_mode, flow, host) + 1;
} }
static void cake_reconfigure(struct Qdisc *sch); static void cake_reconfigure(struct Qdisc *sch);

View file

@ -213,9 +213,14 @@ static void tls_write_space(struct sock *sk)
{ {
struct tls_context *ctx = tls_get_ctx(sk); struct tls_context *ctx = tls_get_ctx(sk);
/* We are already sending pages, ignore notification */ /* If in_tcp_sendpages call lower protocol write space handler
if (ctx->in_tcp_sendpages) * to ensure we wake up any waiting operations there. For example
* if do_tcp_sendpages where to call sk_wait_event.
*/
if (ctx->in_tcp_sendpages) {
ctx->sk_write_space(sk);
return; return;
}
if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) { if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) {
gfp_t sk_allocation = sk->sk_allocation; gfp_t sk_allocation = sk->sk_allocation;

View file

@ -74,14 +74,14 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
return 0; return 0;
if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit) if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit)
return force_zc ? -ENOTSUPP : 0; /* fail or fallback */ return force_zc ? -EOPNOTSUPP : 0; /* fail or fallback */
bpf.command = XDP_QUERY_XSK_UMEM; bpf.command = XDP_QUERY_XSK_UMEM;
rtnl_lock(); rtnl_lock();
err = xdp_umem_query(dev, queue_id); err = xdp_umem_query(dev, queue_id);
if (err) { if (err) {
err = err < 0 ? -ENOTSUPP : -EBUSY; err = err < 0 ? -EOPNOTSUPP : -EBUSY;
goto err_rtnl_unlock; goto err_rtnl_unlock;
} }

View file

@ -194,8 +194,10 @@ int do_event_pipe(int argc, char **argv)
} }
while (argc) { while (argc) {
if (argc < 2) if (argc < 2) {
BAD_ARG(); BAD_ARG();
goto err_close_map;
}
if (is_prefix(*argv, "cpu")) { if (is_prefix(*argv, "cpu")) {
char *endptr; char *endptr;
@ -221,6 +223,7 @@ int do_event_pipe(int argc, char **argv)
NEXT_ARG(); NEXT_ARG();
} else { } else {
BAD_ARG(); BAD_ARG();
goto err_close_map;
} }
do_all = false; do_all = false;