Current release - regressions:

- tcp: fix tcp_disordered_ack() vs usec TS resolution
 
 Current release - new code bugs:
 
   - dpll: sanitize possible null pointer dereference in dpll_pin_parent_pin_set()
 
   - eth: octeon_ep: initialise control mbox tasks before using APIs
 
 Previous releases - regressions:
 
   - io_uring/af_unix: disable sending io_uring over sockets
 
   - eth: mlx5e:
     - TC, don't offload post action rule if not supported
     - fix possible deadlock on mlx5e_tx_timeout_work
 
   - eth: iavf: fix iavf_shutdown to call iavf_remove instead iavf_close
 
   - eth: bnxt_en: fix skb recycling logic in bnxt_deliver_skb()
 
   - eth: ena: fix DMA syncing in XDP path when SWIOTLB is on
 
   - eth: team: fix use-after-free when an option instance allocation fails
 
 Previous releases - always broken:
 
   - neighbour: don't let neigh_forced_gc() disable preemption for long
 
   - net: prevent mss overflow in skb_segment()
 
   - ipv6: support reporting otherwise unknown prefix flags in RTM_NEWPREFIX
 
   - tcp: remove acked SYN flag from packet in the transmit queue correctly
 
   - eth: octeontx2-af:
     - fix a use-after-free in rvu_nix_register_reporters
     - fix promisc mcam entry action
 
   - eth: dwmac-loongson: make sure MDIO is initialized before use
 
   - eth: atlantic: fix double free in ring reinit logic
 
 Signed-off-by: Paolo Abeni <pabeni@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmV6/E4SHHBhYmVuaUBy
 ZWRoYXQuY29tAAoJECkkeY3MjxOkas8P/if7c+MUxkegwRbO0vOObG/B/QXJ+dR8
 UcqPYnroF0u7s2KhDqbj/h9msbNhAmWzrhzk4c086hpIkq34piiS+W319K/tia6u
 H1fRbVfBAo/mcQ8eG7EPiDYrNKDhuiGL6Gsd/Fdl9om1CMjW4fAFWY1F79OoL7F5
 mDTiVdnHik06CGgic6zRdp4xy6zHZ5oBanS60VNjLa4sb69g1Z1fjLQoJt4qXYbJ
 jWZ9QkJ1t/98MOca6mFIZNJY+f3doYMRv5dP1oUSJmbFGfCYjbMcdpa3BQlTiDdu
 96xWF01p5uJ2UBib0nKiGSZmg1Xz1xal9V+ahApmTe8BpZAn6PJeXYbtMQO2SXYf
 VW3V7rSkCB482UPN3siubhtZnOE5oYixM/5OL/UGZv113ShF8HNjj4AAZOeXtJPc
 75QeQOSRy+vhopEexCZ+21Zou+Ao3MjEFlVMCfTJ7couvjFg9LNkazHTXfAkwe0J
 QaLYpbbaXwS3lOspwWFK2rV/G+3fpJZBrW2WRwlLBMMg3lXLuo2OdqrewV9GoI36
 ksqv2c5mMtLwomdM2QfK0zeUc6kDeqlpEcjMzfapn/92A+pcAmcBpT2FfFDR4QUz
 nhoULC2XvTdlri7nxxp/9AYbQK0DFXqChPPV3NdcN/HPI7fYFHTv387ZkLU5zDlN
 nwnXj8rbA0d5
 =84lK
 -----END PGP SIGNATURE-----

Merge tag 'net-6.7-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Paolo Abeni:
"Current release - regressions:

   - tcp: fix tcp_disordered_ack() vs usec TS resolution

  Current release - new code bugs:

   - dpll: sanitize possible null pointer dereference in
     dpll_pin_parent_pin_set()

   - eth: octeon_ep: initialise control mbox tasks before using APIs

  Previous releases - regressions:

   - io_uring/af_unix: disable sending io_uring over sockets

   - eth: mlx5e:
       - TC, don't offload post action rule if not supported
       - fix possible deadlock on mlx5e_tx_timeout_work

   - eth: iavf: fix iavf_shutdown to call iavf_remove instead iavf_close

   - eth: bnxt_en: fix skb recycling logic in bnxt_deliver_skb()

   - eth: ena: fix DMA syncing in XDP path when SWIOTLB is on

   - eth: team: fix use-after-free when an option instance allocation
     fails

  Previous releases - always broken:

   - neighbour: don't let neigh_forced_gc() disable preemption for long

   - net: prevent mss overflow in skb_segment()

   - ipv6: support reporting otherwise unknown prefix flags in
     RTM_NEWPREFIX

   - tcp: remove acked SYN flag from packet in the transmit queue
     correctly

   - eth: octeontx2-af:
       - fix a use-after-free in rvu_nix_register_reporters
       - fix promisc mcam entry action

   - eth: dwmac-loongson: make sure MDIO is initialized before use

   - eth: atlantic: fix double free in ring reinit logic"

* tag 'net-6.7-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (62 commits)
  net: atlantic: fix double free in ring reinit logic
  appletalk: Fix Use-After-Free in atalk_ioctl
  net: stmmac: Handle disabled MDIO busses from devicetree
  net: stmmac: dwmac-qcom-ethqos: Fix drops in 10M SGMII RX
  dpaa2-switch: do not ask for MDB, VLAN and FDB replay
  dpaa2-switch: fix size of the dma_unmap
  net: prevent mss overflow in skb_segment()
  vsock/virtio: Fix unsigned integer wrap around in virtio_transport_has_space()
  Revert "tcp: disable tcp_autocorking for socket when TCP_NODELAY flag is set"
  MIPS: dts: loongson: drop incorrect dwmac fallback compatible
  stmmac: dwmac-loongson: drop useless check for compatible fallback
  stmmac: dwmac-loongson: Make sure MDIO is initialized before use
  tcp: disable tcp_autocorking for socket when TCP_NODELAY flag is set
  dpll: sanitize possible null pointer dereference in dpll_pin_parent_pin_set()
  net: ena: Fix XDP redirection error
  net: ena: Fix DMA syncing in XDP path when SWIOTLB is on
  net: ena: Fix xdp drops handling due to multibuf packets
  net: ena: Destroy correct number of xdp queues upon failure
  net: Remove acked SYN flag from packet in the transmit queue correctly
  qed: Fix a potential use-after-free in qed_cxt_tables_alloc
  ...
This commit is contained in:
Linus Torvalds 2023-12-14 13:11:49 -08:00
commit c7402612e2
62 changed files with 1154 additions and 586 deletions

View File

@ -19564,7 +19564,6 @@ S: Maintained
F: drivers/misc/sgi-xp/
SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
M: Karsten Graul <kgraul@linux.ibm.com>
M: Wenjia Zhang <wenjia@linux.ibm.com>
M: Jan Karcher <jaka@linux.ibm.com>
R: D. Wythe <alibuda@linux.alibaba.com>

View File

@ -130,8 +130,7 @@
compatible = "pci0014,7a03.0",
"pci0014,7a03",
"pciclass0c0320",
"pciclass0c03",
"loongson, pci-gmac";
"pciclass0c03";
reg = <0x1800 0x0 0x0 0x0 0x0>;
interrupts = <12 IRQ_TYPE_LEVEL_LOW>,

View File

@ -193,8 +193,7 @@
compatible = "pci0014,7a03.0",
"pci0014,7a03",
"pciclass020000",
"pciclass0200",
"loongson, pci-gmac";
"pciclass0200";
reg = <0x1800 0x0 0x0 0x0 0x0>;
interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,

View File

@ -449,9 +449,9 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
struct sk_buff *skb;
unsigned int len;
spin_lock(&card->cli_queue_lock);
spin_lock_bh(&card->cli_queue_lock);
skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]);
spin_unlock(&card->cli_queue_lock);
spin_unlock_bh(&card->cli_queue_lock);
if(skb == NULL)
return sprintf(buf, "No data.\n");
@ -956,14 +956,14 @@ static void pclose(struct atm_vcc *vcc)
struct pkt_hdr *header;
/* Remove any yet-to-be-transmitted packets from the pending queue */
spin_lock(&card->tx_queue_lock);
spin_lock_bh(&card->tx_queue_lock);
skb_queue_walk_safe(&card->tx_queue[port], skb, tmpskb) {
if (SKB_CB(skb)->vcc == vcc) {
skb_unlink(skb, &card->tx_queue[port]);
solos_pop(vcc, skb);
}
}
spin_unlock(&card->tx_queue_lock);
spin_unlock_bh(&card->tx_queue_lock);
skb = alloc_skb(sizeof(*header), GFP_KERNEL);
if (!skb) {

View File

@ -925,7 +925,6 @@ dpll_pin_parent_pin_set(struct dpll_pin *pin, struct nlattr *parent_nest,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[DPLL_A_PIN_MAX + 1];
enum dpll_pin_state state;
u32 ppin_idx;
int ret;
@ -936,10 +935,14 @@ dpll_pin_parent_pin_set(struct dpll_pin *pin, struct nlattr *parent_nest,
return -EINVAL;
}
ppin_idx = nla_get_u32(tb[DPLL_A_PIN_PARENT_ID]);
state = nla_get_u32(tb[DPLL_A_PIN_STATE]);
ret = dpll_pin_on_pin_state_set(pin, ppin_idx, state, extack);
if (ret)
return ret;
if (tb[DPLL_A_PIN_STATE]) {
enum dpll_pin_state state = nla_get_u32(tb[DPLL_A_PIN_STATE]);
ret = dpll_pin_on_pin_state_set(pin, ppin_idx, state, extack);
if (ret)
return ret;
}
return 0;
}

View File

@ -328,9 +328,6 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
* compare it to the stored version, just create the meta
*/
if (io_sq->disable_meta_caching) {
if (unlikely(!ena_tx_ctx->meta_valid))
return -EINVAL;
*have_meta = true;
return ena_com_create_meta(io_sq, ena_meta);
}

View File

@ -74,6 +74,8 @@ static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
struct ena_tx_buffer *tx_info);
static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
int first_index, int count);
static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
int first_index, int count);
/* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
static void ena_increase_stat(u64 *statp, u64 cnt,
@ -457,23 +459,22 @@ static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
{
u32 xdp_first_ring = adapter->xdp_first_ring;
u32 xdp_num_queues = adapter->xdp_num_queues;
int rc = 0;
rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring,
adapter->xdp_num_queues);
rc = ena_setup_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
if (rc)
goto setup_err;
rc = ena_create_io_tx_queues_in_range(adapter,
adapter->xdp_first_ring,
adapter->xdp_num_queues);
rc = ena_create_io_tx_queues_in_range(adapter, xdp_first_ring, xdp_num_queues);
if (rc)
goto create_err;
return 0;
create_err:
ena_free_all_io_tx_resources(adapter);
ena_free_all_io_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
setup_err:
return rc;
}
@ -1492,11 +1493,6 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
if (unlikely(!skb))
return NULL;
/* sync this buffer for CPU use */
dma_sync_single_for_cpu(rx_ring->dev,
dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
len,
DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, buf_addr + buf_offset, len);
dma_sync_single_for_device(rx_ring->dev,
dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
@ -1515,17 +1511,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom);
pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr);
/* If XDP isn't loaded try to reuse part of the RX buffer */
reuse_rx_buf_page = !is_xdp_loaded &&
ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset);
dma_sync_single_for_cpu(rx_ring->dev,
pre_reuse_paddr + pkt_offset,
len,
DMA_FROM_DEVICE);
if (!reuse_rx_buf_page)
ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC);
@ -1671,20 +1660,23 @@ static void ena_set_rx_hash(struct ena_ring *rx_ring,
}
}
static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp, u16 num_descs)
{
struct ena_rx_buffer *rx_info;
int ret;
/* XDP multi-buffer packets not supported */
if (unlikely(num_descs > 1)) {
netdev_err_once(rx_ring->adapter->netdev,
"xdp: dropped unsupported multi-buffer packets\n");
ena_increase_stat(&rx_ring->rx_stats.xdp_drop, 1, &rx_ring->syncp);
return ENA_XDP_DROP;
}
rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
xdp_prepare_buff(xdp, page_address(rx_info->page),
rx_info->buf_offset,
rx_ring->ena_bufs[0].len, false);
/* If for some reason we received a bigger packet than
* we expect, then we simply drop it
*/
if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
return ENA_XDP_DROP;
ret = ena_xdp_execute(rx_ring, xdp);
@ -1719,6 +1711,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
int xdp_flags = 0;
int total_len = 0;
int xdp_verdict;
u8 pkt_offset;
int rc = 0;
int i;
@ -1745,15 +1738,21 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
/* First descriptor might have an offset set by the device */
rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
rx_info->buf_offset += ena_rx_ctx.pkt_offset;
pkt_offset = ena_rx_ctx.pkt_offset;
rx_info->buf_offset += pkt_offset;
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
dma_sync_single_for_cpu(rx_ring->dev,
dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
rx_ring->ena_bufs[0].len,
DMA_FROM_DEVICE);
if (ena_xdp_present_ring(rx_ring))
xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp, ena_rx_ctx.descs);
/* allocate skb and fill it */
if (xdp_verdict == ENA_XDP_PASS)
@ -1777,7 +1776,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
if (xdp_verdict & ENA_XDP_FORWARDED) {
ena_unmap_rx_buff_attrs(rx_ring,
&rx_ring->rx_buffer_info[req_id],
0);
DMA_ATTR_SKIP_CPU_SYNC);
rx_ring->rx_buffer_info[req_id].page = NULL;
}
}

View File

@ -938,11 +938,14 @@ void aq_ring_free(struct aq_ring_s *self)
return;
kfree(self->buff_ring);
self->buff_ring = NULL;
if (self->dx_ring)
if (self->dx_ring) {
dma_free_coherent(aq_nic_get_dev(self->aq_nic),
self->size * self->dx_size, self->dx_ring,
self->dx_ring_pa);
self->dx_ring = NULL;
}
}
unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)

View File

@ -1748,16 +1748,32 @@ static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
struct sk_buff *skb)
{
skb_mark_for_recycle(skb);
if (skb->dev != bp->dev) {
/* this packet belongs to a vf-rep */
bnxt_vf_rep_rx(bp, skb);
return;
}
skb_record_rx_queue(skb, bnapi->index);
skb_mark_for_recycle(skb);
napi_gro_receive(&bnapi->napi, skb);
}
static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts)
{
u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
if (BNXT_PTP_RX_TS_VALID(flags))
goto ts_valid;
if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
return false;
ts_valid:
*cmpl_ts = ts;
return true;
}
/* returns the following:
* 1 - 1 packet successfully received
* 0 - successful TPA_START, packet not completed yet
@ -1783,6 +1799,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct sk_buff *skb;
struct xdp_buff xdp;
u32 flags, misc;
u32 cmpl_ts;
void *data;
int rc = 0;
@ -2005,10 +2022,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
}
if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) {
if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
if (bp->flags & BNXT_FLAG_CHIP_P5) {
u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
u64 ns, ts;
if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
@ -10731,10 +10746,8 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
bnxt_free_mem(bp, irq_re_init);
}
int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
/* If we get here, it means firmware reset is in progress
* while we are trying to close. We can safely proceed with
@ -10749,15 +10762,18 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
#ifdef CONFIG_BNXT_SRIOV
if (bp->sriov_cfg) {
int rc;
rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
!bp->sriov_cfg,
BNXT_SRIOV_CFG_WAIT_TMO);
if (rc)
netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
if (!rc)
netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
else if (rc < 0)
netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
}
#endif
__bnxt_close_nic(bp, irq_re_init, link_re_init);
return rc;
}
static int bnxt_close(struct net_device *dev)
@ -13940,6 +13956,8 @@ static int bnxt_resume(struct device *device)
if (rc)
goto resume_exit;
bnxt_clear_reservations(bp, true);
if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
rc = -ENODEV;
goto resume_exit;

View File

@ -161,7 +161,7 @@ struct rx_cmp {
#define RX_CMP_FLAGS_ERROR (1 << 6)
#define RX_CMP_FLAGS_PLACEMENT (7 << 7)
#define RX_CMP_FLAGS_RSS_VALID (1 << 10)
#define RX_CMP_FLAGS_UNUSED (1 << 11)
#define RX_CMP_FLAGS_PKT_METADATA_PRESENT (1 << 11)
#define RX_CMP_FLAGS_ITYPES_SHIFT 12
#define RX_CMP_FLAGS_ITYPES_MASK 0xf000
#define RX_CMP_FLAGS_ITYPE_UNKNOWN (0 << 12)
@ -188,6 +188,12 @@ struct rx_cmp {
__le32 rx_cmp_rss_hash;
};
#define BNXT_PTP_RX_TS_VALID(flags) \
(((flags) & RX_CMP_FLAGS_ITYPES_MASK) == RX_CMP_FLAGS_ITYPE_PTP_W_TS)
#define BNXT_ALL_RX_TS_VALID(flags) \
!((flags) & RX_CMP_FLAGS_PKT_METADATA_PRESENT)
#define RX_CMP_HASH_VALID(rxcmp) \
((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID))
@ -2375,7 +2381,7 @@ int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
void bnxt_reenable_sriov(struct bnxt *bp);
int bnxt_close_nic(struct bnxt *, bool, bool);
void bnxt_close_nic(struct bnxt *, bool, bool);
void bnxt_get_ring_err_stats(struct bnxt *bp,
struct bnxt_total_ring_err_stats *stats);
int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,

View File

@ -449,15 +449,8 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
return -ENODEV;
}
bnxt_ulp_stop(bp);
if (netif_running(bp->dev)) {
rc = bnxt_close_nic(bp, true, true);
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Failed to close");
dev_close(bp->dev);
rtnl_unlock();
break;
}
}
if (netif_running(bp->dev))
bnxt_close_nic(bp, true, true);
bnxt_vf_reps_free(bp);
rc = bnxt_hwrm_func_drv_unrgtr(bp);
if (rc) {

View File

@ -165,9 +165,8 @@ static int bnxt_set_coalesce(struct net_device *dev,
reset_coalesce:
if (test_bit(BNXT_STATE_OPEN, &bp->state)) {
if (update_stats) {
rc = bnxt_close_nic(bp, true, false);
if (!rc)
rc = bnxt_open_nic(bp, true, false);
bnxt_close_nic(bp, true, false);
rc = bnxt_open_nic(bp, true, false);
} else {
rc = bnxt_hwrm_set_coal(bp);
}
@ -972,12 +971,7 @@ static int bnxt_set_channels(struct net_device *dev,
* before PF unload
*/
}
rc = bnxt_close_nic(bp, true, false);
if (rc) {
netdev_err(bp->dev, "Set channel failure rc :%x\n",
rc);
return rc;
}
bnxt_close_nic(bp, true, false);
}
if (sh) {
@ -4042,12 +4036,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
bnxt_run_fw_tests(bp, test_mask, &test_results);
} else {
bnxt_ulp_stop(bp);
rc = bnxt_close_nic(bp, true, false);
if (rc) {
etest->flags |= ETH_TEST_FL_FAILED;
bnxt_ulp_start(bp, rc);
return;
}
bnxt_close_nic(bp, true, false);
bnxt_run_fw_tests(bp, test_mask, &test_results);
buf[BNXT_MACLPBK_TEST_IDX] = 1;

View File

@ -521,9 +521,8 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
if (netif_running(bp->dev)) {
if (ptp->rx_filter == HWTSTAMP_FILTER_ALL) {
rc = bnxt_close_nic(bp, false, false);
if (!rc)
rc = bnxt_open_nic(bp, false, false);
bnxt_close_nic(bp, false, false);
rc = bnxt_open_nic(bp, false, false);
} else {
bnxt_ptp_cfg_tstamp_filters(bp);
}

View File

@ -139,7 +139,8 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
filter_block->acl_id, acl_entry_cfg);
dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
dma_unmap_single(dev, acl_entry_cfg->key_iova,
DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
DMA_TO_DEVICE);
if (err) {
dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err);
@ -181,8 +182,8 @@ dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
block->acl_id, acl_entry_cfg);
dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
DMA_TO_DEVICE);
dma_unmap_single(dev, acl_entry_cfg->key_iova,
DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, DMA_TO_DEVICE);
if (err) {
dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err);
kfree(cmd_buff);

View File

@ -1998,9 +1998,6 @@ static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
return notifier_from_errno(err);
}
static struct notifier_block dpaa2_switch_port_switchdev_nb;
static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb;
static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
struct net_device *upper_dev,
struct netlink_ext_ack *extack)
@ -2043,9 +2040,7 @@ static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
goto err_egress_flood;
err = switchdev_bridge_port_offload(netdev, netdev, NULL,
&dpaa2_switch_port_switchdev_nb,
&dpaa2_switch_port_switchdev_blocking_nb,
false, extack);
NULL, NULL, false, extack);
if (err)
goto err_switchdev_offload;
@ -2079,9 +2074,7 @@ static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, vo
static void dpaa2_switch_port_pre_bridge_leave(struct net_device *netdev)
{
switchdev_bridge_port_unoffload(netdev, NULL,
&dpaa2_switch_port_switchdev_nb,
&dpaa2_switch_port_switchdev_blocking_nb);
switchdev_bridge_port_unoffload(netdev, NULL, NULL, NULL);
}
static int dpaa2_switch_port_bridge_leave(struct net_device *netdev)

View File

@ -3731,31 +3731,26 @@ static int fec_set_features(struct net_device *netdev,
return 0;
}
static u16 fec_enet_get_raw_vlan_tci(struct sk_buff *skb)
{
struct vlan_ethhdr *vhdr;
unsigned short vlan_TCI = 0;
if (skb->protocol == htons(ETH_P_ALL)) {
vhdr = (struct vlan_ethhdr *)(skb->data);
vlan_TCI = ntohs(vhdr->h_vlan_TCI);
}
return vlan_TCI;
}
static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
struct net_device *sb_dev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
u16 vlan_tag;
u16 vlan_tag = 0;
if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
return netdev_pick_tx(ndev, skb, NULL);
vlan_tag = fec_enet_get_raw_vlan_tci(skb);
if (!vlan_tag)
/* VLAN is present in the payload.*/
if (eth_type_vlan(skb->protocol)) {
struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb);
vlan_tag = ntohs(vhdr->h_vlan_TCI);
/* VLAN is present in the skb but not yet pushed in the payload.*/
} else if (skb_vlan_tag_present(skb)) {
vlan_tag = skb->vlan_tci;
} else {
return vlan_tag;
}
return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
}

View File

@ -292,6 +292,7 @@ struct iavf_adapter {
#define IAVF_FLAG_QUEUES_DISABLED BIT(17)
#define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18)
#define IAVF_FLAG_REINIT_MSIX_NEEDED BIT(20)
#define IAVF_FLAG_FDIR_ENABLED BIT(21)
/* duplicates for common code */
#define IAVF_FLAG_DCB_ENABLED 0
/* flags for admin queue service task */

View File

@ -1061,7 +1061,7 @@ iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter,
struct iavf_fdir_fltr *rule = NULL;
int ret = 0;
if (!FDIR_FLTR_SUPPORT(adapter))
if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
spin_lock_bh(&adapter->fdir_fltr_lock);
@ -1203,7 +1203,7 @@ iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd,
unsigned int cnt = 0;
int val = 0;
if (!FDIR_FLTR_SUPPORT(adapter))
if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
cmd->data = IAVF_MAX_FDIR_FILTERS;
@ -1395,7 +1395,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
int count = 50;
int err;
if (!FDIR_FLTR_SUPPORT(adapter))
if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
if (fsp->flow_type & FLOW_MAC_EXT)
@ -1436,12 +1436,16 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
spin_lock_bh(&adapter->fdir_fltr_lock);
iavf_fdir_list_add_fltr(adapter, fltr);
adapter->fdir_active_fltr++;
fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
if (adapter->link_up) {
fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
} else {
fltr->state = IAVF_FDIR_FLTR_INACTIVE;
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
if (adapter->link_up)
mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
ret:
if (err && fltr)
kfree(fltr);
@ -1463,7 +1467,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
struct iavf_fdir_fltr *fltr = NULL;
int err = 0;
if (!FDIR_FLTR_SUPPORT(adapter))
if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
spin_lock_bh(&adapter->fdir_fltr_lock);
@ -1472,6 +1476,11 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
} else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) {
list_del(&fltr->list);
kfree(fltr);
adapter->fdir_active_fltr--;
fltr = NULL;
} else {
err = -EBUSY;
}
@ -1780,7 +1789,7 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
ret = 0;
break;
case ETHTOOL_GRXCLSRLCNT:
if (!FDIR_FLTR_SUPPORT(adapter))
if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
break;
spin_lock_bh(&adapter->fdir_fltr_lock);
cmd->rule_cnt = adapter->fdir_active_fltr;

View File

@ -6,12 +6,25 @@
struct iavf_adapter;
/* State of Flow Director filter */
/* State of Flow Director filter
*
* *_REQUEST states are used to mark filter to be sent to PF driver to perform
* an action (either add or delete filter). *_PENDING states are an indication
* that request was sent to PF and the driver is waiting for response.
*
* Both DELETE and DISABLE states are being used to delete a filter in PF.
* The difference is that after a successful response filter in DEL_PENDING
* state is being deleted from VF driver as well and filter in DIS_PENDING state
* is being changed to INACTIVE state.
*/
enum iavf_fdir_fltr_state_t {
IAVF_FDIR_FLTR_ADD_REQUEST, /* User requests to add filter */
IAVF_FDIR_FLTR_ADD_PENDING, /* Filter pending add by the PF */
IAVF_FDIR_FLTR_DEL_REQUEST, /* User requests to delete filter */
IAVF_FDIR_FLTR_DEL_PENDING, /* Filter pending delete by the PF */
IAVF_FDIR_FLTR_DIS_REQUEST, /* Filter scheduled to be disabled */
IAVF_FDIR_FLTR_DIS_PENDING, /* Filter pending disable by the PF */
IAVF_FDIR_FLTR_INACTIVE, /* Filter inactive on link down */
IAVF_FDIR_FLTR_ACTIVE, /* Filter is active */
};

View File

@ -276,27 +276,6 @@ void iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem)
kfree(mem->va);
}
/**
* iavf_lock_timeout - try to lock mutex but give up after timeout
* @lock: mutex that should be locked
* @msecs: timeout in msecs
*
* Returns 0 on success, negative on failure
**/
static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
{
unsigned int wait, delay = 10;
for (wait = 0; wait < msecs; wait += delay) {
if (mutex_trylock(lock))
return 0;
msleep(delay);
}
return -1;
}
/**
* iavf_schedule_reset - Set the flags and schedule a reset event
* @adapter: board private structure
@ -1353,18 +1332,20 @@ static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
**/
static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
{
struct iavf_fdir_fltr *fdir, *fdirtmp;
struct iavf_fdir_fltr *fdir;
/* remove all Flow Director filters */
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
list) {
list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
list_del(&fdir->list);
kfree(fdir);
adapter->fdir_active_fltr--;
} else {
fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
/* Cancel a request, keep filter as inactive */
fdir->state = IAVF_FDIR_FLTR_INACTIVE;
} else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
/* Disable filters which are active or have a pending
* request to PF to be added
*/
fdir->state = IAVF_FDIR_FLTR_DIS_REQUEST;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
@ -4112,6 +4093,33 @@ static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
}
}
/**
* iavf_restore_fdir_filters
* @adapter: board private structure
*
* Restore existing FDIR filters when VF netdev comes back up.
**/
static void iavf_restore_fdir_filters(struct iavf_adapter *adapter)
{
struct iavf_fdir_fltr *f;
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry(f, &adapter->fdir_list_head, list) {
if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST) {
/* Cancel a request, keep filter as active */
f->state = IAVF_FDIR_FLTR_ACTIVE;
} else if (f->state == IAVF_FDIR_FLTR_DIS_PENDING ||
f->state == IAVF_FDIR_FLTR_INACTIVE) {
/* Add filters which are inactive or have a pending
* request to PF to be deleted
*/
f->state = IAVF_FDIR_FLTR_ADD_REQUEST;
adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
}
/**
* iavf_open - Called when a network interface is made active
* @netdev: network interface device structure
@ -4179,8 +4187,9 @@ static int iavf_open(struct net_device *netdev)
spin_unlock_bh(&adapter->mac_vlan_list_lock);
/* Restore VLAN filters that were removed with IFF_DOWN */
/* Restore filters that were removed with IFF_DOWN */
iavf_restore_filters(adapter);
iavf_restore_fdir_filters(adapter);
iavf_configure(adapter);
@ -4311,6 +4320,49 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
return ret;
}
/**
* iavf_disable_fdir - disable Flow Director and clear existing filters
* @adapter: board private structure
**/
static void iavf_disable_fdir(struct iavf_adapter *adapter)
{
struct iavf_fdir_fltr *fdir, *fdirtmp;
bool del_filters = false;
adapter->flags &= ~IAVF_FLAG_FDIR_ENABLED;
/* remove all Flow Director filters */
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
list) {
if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST ||
fdir->state == IAVF_FDIR_FLTR_INACTIVE) {
/* Delete filters not registered in PF */
list_del(&fdir->list);
kfree(fdir);
adapter->fdir_active_fltr--;
} else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
/* Filters registered in PF, schedule their deletion */
fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
del_filters = true;
} else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
/* Request to delete filter already sent to PF, change
* state to DEL_PENDING to delete filter after PF's
* response, not set as INACTIVE
*/
fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
if (del_filters) {
adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
}
}
#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_HW_VLAN_STAG_RX | \
@ -4336,6 +4388,13 @@ static int iavf_set_features(struct net_device *netdev,
((netdev->features & NETIF_F_RXFCS) ^ (features & NETIF_F_RXFCS)))
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
if ((netdev->features & NETIF_F_NTUPLE) ^ (features & NETIF_F_NTUPLE)) {
if (features & NETIF_F_NTUPLE)
adapter->flags |= IAVF_FLAG_FDIR_ENABLED;
else
iavf_disable_fdir(adapter);
}
return 0;
}
@ -4685,6 +4744,9 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
features = iavf_fix_netdev_vlan_features(adapter, features);
if (!FDIR_FLTR_SUPPORT(adapter))
features &= ~NETIF_F_NTUPLE;
return iavf_fix_strip_features(adapter, features);
}
@ -4802,6 +4864,12 @@ int iavf_process_config(struct iavf_adapter *adapter)
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
if (FDIR_FLTR_SUPPORT(adapter)) {
netdev->hw_features |= NETIF_F_NTUPLE;
netdev->features |= NETIF_F_NTUPLE;
adapter->flags |= IAVF_FLAG_FDIR_ENABLED;
}
netdev->priv_flags |= IFF_UNICAST_FLT;
/* Do not turn on offloads when they are requested to be turned off.
@ -4825,34 +4893,6 @@ int iavf_process_config(struct iavf_adapter *adapter)
return 0;
}
/**
* iavf_shutdown - Shutdown the device in preparation for a reboot
* @pdev: pci device structure
**/
static void iavf_shutdown(struct pci_dev *pdev)
{
struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
struct net_device *netdev = adapter->netdev;
netif_device_detach(netdev);
if (netif_running(netdev))
iavf_close(netdev);
if (iavf_lock_timeout(&adapter->crit_lock, 5000))
dev_warn(&adapter->pdev->dev, "%s: failed to acquire crit_lock\n", __func__);
/* Prevent the watchdog from running. */
iavf_change_state(adapter, __IAVF_REMOVE);
adapter->aq_required = 0;
mutex_unlock(&adapter->crit_lock);
#ifdef CONFIG_PM
pci_save_state(pdev);
#endif
pci_disable_device(pdev);
}
/**
* iavf_probe - Device Initialization Routine
* @pdev: PCI device information struct
@ -5063,16 +5103,21 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
**/
static void iavf_remove(struct pci_dev *pdev)
{
struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
struct iavf_fdir_fltr *fdir, *fdirtmp;
struct iavf_vlan_filter *vlf, *vlftmp;
struct iavf_cloud_filter *cf, *cftmp;
struct iavf_adv_rss *rss, *rsstmp;
struct iavf_mac_filter *f, *ftmp;
struct iavf_adapter *adapter;
struct net_device *netdev;
struct iavf_hw *hw;
netdev = adapter->netdev;
/* Don't proceed with remove if netdev is already freed */
netdev = pci_get_drvdata(pdev);
if (!netdev)
return;
adapter = iavf_pdev_to_adapter(pdev);
hw = &adapter->hw;
if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
@ -5184,11 +5229,25 @@ static void iavf_remove(struct pci_dev *pdev)
destroy_workqueue(adapter->wq);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
pci_disable_device(pdev);
}
/**
* iavf_shutdown - Shutdown the device in preparation for a reboot
* @pdev: pci device structure
**/
static void iavf_shutdown(struct pci_dev *pdev)
{
iavf_remove(pdev);
if (system_state == SYSTEM_POWER_OFF)
pci_set_power_state(pdev, PCI_D3hot);
}
static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
static struct pci_driver iavf_driver = {

View File

@ -1735,8 +1735,8 @@ void iavf_add_fdir_filter(struct iavf_adapter *adapter)
**/
void iavf_del_fdir_filter(struct iavf_adapter *adapter)
{
struct virtchnl_fdir_del f = {};
struct iavf_fdir_fltr *fdir;
struct virtchnl_fdir_del f;
bool process_fltr = false;
int len;
@ -1753,11 +1753,16 @@ void iavf_del_fdir_filter(struct iavf_adapter *adapter)
list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) {
process_fltr = true;
memset(&f, 0, len);
f.vsi_id = fdir->vc_add_msg.vsi_id;
f.flow_id = fdir->flow_id;
fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
break;
} else if (fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST) {
process_fltr = true;
f.vsi_id = fdir->vc_add_msg.vsi_id;
f.flow_id = fdir->flow_id;
fdir->state = IAVF_FDIR_FLTR_DIS_PENDING;
break;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
@ -1901,6 +1906,48 @@ static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev,
netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
}
/**
* iavf_activate_fdir_filters - Reactivate all FDIR filters after a reset
* @adapter: private adapter structure
*
* Called after a reset to re-add all FDIR filters and delete some of them
* if they were pending to be deleted.
*/
static void iavf_activate_fdir_filters(struct iavf_adapter *adapter)
{
struct iavf_fdir_fltr *f, *ftmp;
bool add_filters = false;
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry_safe(f, ftmp, &adapter->fdir_list_head, list) {
if (f->state == IAVF_FDIR_FLTR_ADD_REQUEST ||
f->state == IAVF_FDIR_FLTR_ADD_PENDING ||
f->state == IAVF_FDIR_FLTR_ACTIVE) {
/* All filters and requests have been removed in PF,
* restore them
*/
f->state = IAVF_FDIR_FLTR_ADD_REQUEST;
add_filters = true;
} else if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
f->state == IAVF_FDIR_FLTR_DIS_PENDING) {
/* Link down state, leave filters as inactive */
f->state = IAVF_FDIR_FLTR_INACTIVE;
} else if (f->state == IAVF_FDIR_FLTR_DEL_REQUEST ||
f->state == IAVF_FDIR_FLTR_DEL_PENDING) {
/* Delete filters that were pending to be deleted, the
* list on PF is already cleared after a reset
*/
list_del(&f->list);
kfree(f);
adapter->fdir_active_fltr--;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
if (add_filters)
adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
}
/**
* iavf_virtchnl_completion
* @adapter: adapter structure
@ -2078,7 +2125,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry(fdir, &adapter->fdir_list_head,
list) {
if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING ||
fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
fdir->state = IAVF_FDIR_FLTR_ACTIVE;
dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n",
iavf_stat_str(&adapter->hw,
@ -2214,6 +2262,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
spin_unlock_bh(&adapter->mac_vlan_list_lock);
iavf_activate_fdir_filters(adapter);
iavf_parse_vf_resource_msg(adapter);
/* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the
@ -2390,7 +2440,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head,
list) {
if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
del_fltr->status ==
VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
fdir->loc);
list_del(&fdir->list);
@ -2402,6 +2454,17 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
del_fltr->status);
iavf_print_fdir_fltr(adapter, fdir);
}
} else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
del_fltr->status ==
VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
fdir->state = IAVF_FDIR_FLTR_INACTIVE;
} else {
fdir->state = IAVF_FDIR_FLTR_ACTIVE;
dev_info(&adapter->pdev->dev, "Failed to disable Flow Director filter with status: %d\n",
del_fltr->status);
iavf_print_fdir_fltr(adapter, fdir);
}
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);

View File

@ -1193,6 +1193,13 @@ int octep_device_setup(struct octep_device *oct)
if (ret)
return ret;
INIT_WORK(&oct->tx_timeout_task, octep_tx_timeout_task);
INIT_WORK(&oct->ctrl_mbox_task, octep_ctrl_mbox_task);
INIT_DELAYED_WORK(&oct->intr_poll_task, octep_intr_poll_task);
oct->poll_non_ioq_intr = true;
queue_delayed_work(octep_wq, &oct->intr_poll_task,
msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS));
atomic_set(&oct->hb_miss_cnt, 0);
INIT_DELAYED_WORK(&oct->hb_task, octep_hb_timeout_task);
@ -1258,7 +1265,8 @@ static bool get_fw_ready_status(struct pci_dev *pdev)
pci_read_config_byte(pdev, (pos + 8), &status);
dev_info(&pdev->dev, "Firmware ready status = %u\n", status);
return status;
#define FW_STATUS_READY 1ULL
return status == FW_STATUS_READY;
}
return false;
}
@ -1325,21 +1333,18 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_octep_config;
}
octep_ctrl_net_get_info(octep_dev, OCTEP_CTRL_NET_INVALID_VFID,
&octep_dev->conf->fw_info);
err = octep_ctrl_net_get_info(octep_dev, OCTEP_CTRL_NET_INVALID_VFID,
&octep_dev->conf->fw_info);
if (err) {
dev_err(&pdev->dev, "Failed to get firmware info\n");
goto register_dev_err;
}
dev_info(&octep_dev->pdev->dev, "Heartbeat interval %u msecs Heartbeat miss count %u\n",
octep_dev->conf->fw_info.hb_interval,
octep_dev->conf->fw_info.hb_miss_count);
queue_delayed_work(octep_wq, &octep_dev->hb_task,
msecs_to_jiffies(octep_dev->conf->fw_info.hb_interval));
INIT_WORK(&octep_dev->tx_timeout_task, octep_tx_timeout_task);
INIT_WORK(&octep_dev->ctrl_mbox_task, octep_ctrl_mbox_task);
INIT_DELAYED_WORK(&octep_dev->intr_poll_task, octep_intr_poll_task);
octep_dev->poll_non_ioq_intr = true;
queue_delayed_work(octep_wq, &octep_dev->intr_poll_task,
msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS));
netdev->netdev_ops = &octep_netdev_ops;
octep_set_ethtool_ops(netdev);
netif_carrier_off(netdev);

View File

@ -373,6 +373,11 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
/* Disable forward pause to driver */
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD;
rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
/* Enable channel mask for all LMACS */
if (is_dev_rpm2(rpm))
rpm_write(rpm, lmac_id, RPM2_CMR_CHAN_MSK_OR, 0xffff);
@ -616,12 +621,10 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p
if (rx_pause) {
cfg &= ~(RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE);
} else {
cfg |= (RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE);
}
if (tx_pause) {

View File

@ -538,7 +538,7 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
if (!rvu_dl->devlink_wq)
goto err;
return -ENOMEM;
INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
@ -546,9 +546,6 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
return 0;
err:
rvu_nix_health_reporters_destroy(rvu_dl);
return -ENOMEM;
}
static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)

View File

@ -671,6 +671,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
int blkaddr, ucast_idx, index;
struct nix_rx_action action = { 0 };
u64 relaxed_mask;
u8 flow_key_alg;
if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc))
return;
@ -701,6 +702,8 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
action.op = NIX_RX_ACTIONOP_UCAST;
}
flow_key_alg = action.flow_key_alg;
/* RX_ACTION set to MCAST for CGX PF's */
if (hw->cap.nix_rx_multicast && pfvf->use_mce_list &&
is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
@ -740,7 +743,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
req.vf = pcifunc;
req.index = action.index;
req.match_id = action.match_id;
req.flow_key_alg = action.flow_key_alg;
req.flow_key_alg = flow_key_alg;
rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
@ -854,6 +857,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
u8 mac_addr[ETH_ALEN] = { 0 };
struct nix_rx_action action = { 0 };
struct rvu_pfvf *pfvf;
u8 flow_key_alg;
u16 vf_func;
/* Only CGX PF/VF can add allmulticast entry */
@ -888,6 +892,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
*(u64 *)&action = npc_get_mcam_action(rvu, mcam,
blkaddr, ucast_idx);
flow_key_alg = action.flow_key_alg;
if (action.op != NIX_RX_ACTIONOP_RSS) {
*(u64 *)&action = 0;
action.op = NIX_RX_ACTIONOP_UCAST;
@ -924,7 +929,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
req.vf = pcifunc | vf_func;
req.index = action.index;
req.match_id = action.match_id;
req.flow_key_alg = action.flow_key_alg;
req.flow_key_alg = flow_key_alg;
rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
@ -990,11 +995,38 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
mutex_unlock(&mcam->lock);
}
static void npc_update_rx_action_with_alg_idx(struct rvu *rvu, struct nix_rx_action action,
struct rvu_pfvf *pfvf, int mcam_index, int blkaddr,
int alg_idx)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
struct rvu_hwinfo *hw = rvu->hw;
int bank, op_rss;
if (!is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_index))
return;
op_rss = (!hw->cap.nix_rx_multicast || !pfvf->use_mce_list);
bank = npc_get_bank(mcam, mcam_index);
mcam_index &= (mcam->banksize - 1);
/* If Rx action is MCAST update only RSS algorithm index */
if (!op_rss) {
*(u64 *)&action = rvu_read64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank));
action.flow_key_alg = alg_idx;
}
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank), *(u64 *)&action);
}
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
struct rvu_hwinfo *hw = rvu->hw;
struct nix_rx_action action;
int blkaddr, index, bank;
struct rvu_pfvf *pfvf;
@ -1050,15 +1082,16 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
/* If PF's promiscuous entry is enabled,
* Set RSS action for that entry as well
*/
if ((!hw->cap.nix_rx_multicast || !pfvf->use_mce_list) &&
is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
bank = npc_get_bank(mcam, index);
index &= (mcam->banksize - 1);
npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr,
alg_idx);
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank),
*(u64 *)&action);
}
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_ALLMULTI_ENTRY);
/* If PF's allmulti entry is enabled,
* Set RSS action for that entry as well
*/
npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr,
alg_idx);
}
void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,

View File

@ -1650,6 +1650,21 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
mutex_unlock(&mbox->lock);
}
static bool otx2_promisc_use_mce_list(struct otx2_nic *pfvf)
{
int vf;
/* The AF driver will determine whether to allow the VF netdev or not */
if (is_otx2_vf(pfvf->pcifunc))
return true;
/* check if there are any trusted VFs associated with the PF netdev */
for (vf = 0; vf < pci_num_vf(pfvf->pdev); vf++)
if (pfvf->vf_configs[vf].trusted)
return true;
return false;
}
static void otx2_do_set_rx_mode(struct otx2_nic *pf)
{
struct net_device *netdev = pf->netdev;
@ -1682,7 +1697,8 @@ static void otx2_do_set_rx_mode(struct otx2_nic *pf)
if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
req->mode |= NIX_RX_MODE_ALLMULTI;
req->mode |= NIX_RX_MODE_USE_MCE;
if (otx2_promisc_use_mce_list(pf))
req->mode |= NIX_RX_MODE_USE_MCE;
otx2_sync_mbox_msg(&pf->mbox);
mutex_unlock(&pf->mbox.lock);
@ -2691,11 +2707,14 @@ static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf,
pf->vf_configs[vf].trusted = enable;
rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF);
if (rc)
if (rc) {
pf->vf_configs[vf].trusted = !enable;
else
} else {
netdev_info(pf->netdev, "VF %d is %strusted\n",
vf, enable ? "" : "not ");
otx2_set_rx_mode(netdev);
}
return rc;
}

View File

@ -826,6 +826,7 @@ enum {
MLX5E_STATE_DESTROYING,
MLX5E_STATE_XDP_TX_ENABLED,
MLX5E_STATE_XDP_ACTIVE,
MLX5E_STATE_CHANNELS_ACTIVE,
};
struct mlx5e_modify_sq_param {

View File

@ -83,6 +83,9 @@ mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act,
struct mlx5_flow_spec *spec;
int err;
if (IS_ERR(post_act))
return PTR_ERR(post_act);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
@ -111,6 +114,9 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *po
struct mlx5e_post_act_handle *handle;
int err;
if (IS_ERR(post_act))
return ERR_CAST(post_act);
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return ERR_PTR(-ENOMEM);

View File

@ -121,7 +121,14 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
sa_entry->esn_state.esn = esn;
if (sa_entry->esn_state.esn_msb)
sa_entry->esn_state.esn = esn;
else
/* According to RFC4303, section "3.3.3. Sequence Number Generation",
* the first packet sent using a given SA will contain a sequence
* number of 1.
*/
sa_entry->esn_state.esn = max_t(u32, esn, 1);
sa_entry->esn_state.esn_msb = esn_msb;
if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
@ -335,6 +342,27 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
attrs->replay_esn.esn = sa_entry->esn_state.esn;
attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
switch (x->replay_esn->replay_window) {
case 32:
attrs->replay_esn.replay_window =
MLX5_IPSEC_ASO_REPLAY_WIN_32BIT;
break;
case 64:
attrs->replay_esn.replay_window =
MLX5_IPSEC_ASO_REPLAY_WIN_64BIT;
break;
case 128:
attrs->replay_esn.replay_window =
MLX5_IPSEC_ASO_REPLAY_WIN_128BIT;
break;
case 256:
attrs->replay_esn.replay_window =
MLX5_IPSEC_ASO_REPLAY_WIN_256BIT;
break;
default:
WARN_ON(true);
return;
}
}
attrs->dir = x->xso.dir;
@ -907,9 +935,11 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
return;
mlx5e_accel_ipsec_fs_cleanup(ipsec);
if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL)
if (ipsec->netevent_nb.notifier_call) {
unregister_netevent_notifier(&ipsec->netevent_nb);
if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
ipsec->netevent_nb.notifier_call = NULL;
}
if (ipsec->aso)
mlx5e_ipsec_aso_cleanup(ipsec);
destroy_workqueue(ipsec->wq);
kfree(ipsec);
@ -1018,6 +1048,12 @@ static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
}
}
if (x->xdo.type == XFRM_DEV_OFFLOAD_PACKET &&
!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported");
return -EINVAL;
}
return 0;
}
@ -1113,14 +1149,6 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
.xdo_dev_state_free = mlx5e_xfrm_free_state,
.xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
};
static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = {
.xdo_dev_state_add = mlx5e_xfrm_add_state,
.xdo_dev_state_delete = mlx5e_xfrm_del_state,
.xdo_dev_state_free = mlx5e_xfrm_free_state,
.xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
.xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft,
.xdo_dev_policy_add = mlx5e_xfrm_add_policy,
@ -1138,11 +1166,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
netdev->xfrmdev_ops = &mlx5e_ipsec_packet_xfrmdev_ops;
else
netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
netdev->features |= NETIF_F_HW_ESP;
netdev->hw_enc_features |= NETIF_F_HW_ESP;

View File

@ -189,11 +189,19 @@ struct mlx5e_ipsec_ft {
u32 refcnt;
};
struct mlx5e_ipsec_drop {
struct mlx5_flow_handle *rule;
struct mlx5_fc *fc;
};
struct mlx5e_ipsec_rule {
struct mlx5_flow_handle *rule;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat;
struct mlx5_fc *fc;
struct mlx5e_ipsec_drop replay;
struct mlx5e_ipsec_drop auth;
struct mlx5e_ipsec_drop trailer;
};
struct mlx5e_ipsec_miss {
@ -201,19 +209,6 @@ struct mlx5e_ipsec_miss {
struct mlx5_flow_handle *rule;
};
struct mlx5e_ipsec_rx {
struct mlx5e_ipsec_ft ft;
struct mlx5e_ipsec_miss pol;
struct mlx5e_ipsec_miss sa;
struct mlx5e_ipsec_rule status;
struct mlx5e_ipsec_miss status_drop;
struct mlx5_fc *status_drop_cnt;
struct mlx5e_ipsec_fc *fc;
struct mlx5_fs_chains *chains;
u8 allow_tunnel_mode : 1;
struct xarray ipsec_obj_id_map;
};
struct mlx5e_ipsec_tx_create_attr {
int prio;
int pol_level;
@ -248,6 +243,7 @@ struct mlx5e_ipsec {
struct mlx5_ipsec_fs *roce;
u8 is_uplink_rep: 1;
struct mlx5e_ipsec_mpv_work mpv_work;
struct xarray ipsec_obj_id_map;
};
struct mlx5e_ipsec_esn_state {

View File

@ -32,6 +32,22 @@ struct mlx5e_ipsec_tx {
u8 allow_tunnel_mode : 1;
};
struct mlx5e_ipsec_status_checks {
struct mlx5_flow_group *drop_all_group;
struct mlx5e_ipsec_drop all;
};
struct mlx5e_ipsec_rx {
struct mlx5e_ipsec_ft ft;
struct mlx5e_ipsec_miss pol;
struct mlx5e_ipsec_miss sa;
struct mlx5e_ipsec_rule status;
struct mlx5e_ipsec_status_checks status_drops;
struct mlx5e_ipsec_fc *fc;
struct mlx5_fs_chains *chains;
u8 allow_tunnel_mode : 1;
};
/* IPsec RX flow steering */
static enum mlx5_traffic_types family2tt(u32 family)
{
@ -128,14 +144,37 @@ static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
}
static int ipsec_status_rule(struct mlx5_core_dev *mdev,
struct mlx5e_ipsec_rx *rx,
struct mlx5_flow_destination *dest)
static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
mlx5_del_flow_rules(rx->status_drops.all.rule);
mlx5_fc_destroy(ipsec->mdev, rx->status_drops.all.fc);
mlx5_destroy_flow_group(rx->status_drops.drop_all_group);
}
static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
mlx5_del_flow_rules(rx->status.rule);
if (rx != ipsec->rx_esw)
return;
#ifdef CONFIG_MLX5_ESWITCH
mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
#endif
}
static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5e_ipsec_rx *rx)
{
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5_flow_table *ft = rx->ft.status;
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {};
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_flow_handle *fte;
struct mlx5_flow_handle *rule;
struct mlx5_fc *flow_counter;
struct mlx5_flow_spec *spec;
int err;
@ -143,48 +182,273 @@ static int ipsec_status_rule(struct mlx5_core_dev *mdev,
if (!spec)
return -ENOMEM;
/* Action to copy 7 bit ipsec_syndrome to regB[24:30] */
MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
MLX5_SET(copy_action_in, action, src_offset, 0);
MLX5_SET(copy_action_in, action, length, 7);
MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
MLX5_SET(copy_action_in, action, dst_offset, 24);
modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
1, action);
if (IS_ERR(modify_hdr)) {
err = PTR_ERR(modify_hdr);
flow_counter = mlx5_fc_create(mdev, true);
if (IS_ERR(flow_counter)) {
err = PTR_ERR(flow_counter);
mlx5_core_err(mdev,
"fail to alloc ipsec copy modify_header_id err=%d\n", err);
goto out_spec;
"Failed to add ipsec rx status drop rule counter, err=%d\n", err);
goto err_cnt;
}
sa_entry->ipsec_rule.auth.fc = flow_counter;
/* create fte */
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_act.modify_hdr = modify_hdr;
fte = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
if (IS_ERR(fte)) {
err = PTR_ERR(fte);
mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err);
goto out;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_act.flags = FLOW_ACT_NO_APPEND;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest.counter_id = mlx5_fc_id(flow_counter);
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
MLX5_SET(fte_match_param, spec->match_value,
misc_parameters_2.metadata_reg_c_2,
sa_entry->ipsec_obj_id | BIT(31));
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_err(mdev,
"Failed to add ipsec rx status drop rule, err=%d\n", err);
goto err_rule;
}
sa_entry->ipsec_rule.auth.rule = rule;
flow_counter = mlx5_fc_create(mdev, true);
if (IS_ERR(flow_counter)) {
err = PTR_ERR(flow_counter);
mlx5_core_err(mdev,
"Failed to add ipsec rx status drop rule counter, err=%d\n", err);
goto err_cnt_2;
}
sa_entry->ipsec_rule.trailer.fc = flow_counter;
dest.counter_id = mlx5_fc_id(flow_counter);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_err(mdev,
"Failed to add ipsec rx status drop rule, err=%d\n", err);
goto err_rule_2;
}
sa_entry->ipsec_rule.trailer.rule = rule;
kvfree(spec);
rx->status.rule = fte;
rx->status.modify_hdr = modify_hdr;
return 0;
out:
mlx5_modify_header_dealloc(mdev, modify_hdr);
out_spec:
err_rule_2:
mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.trailer.fc);
err_cnt_2:
mlx5_del_flow_rules(sa_entry->ipsec_rule.auth.rule);
err_rule:
mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.auth.fc);
err_cnt:
kvfree(spec);
return err;
}
static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5e_ipsec_rx *rx)
{
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5_flow_table *ft = rx->ft.status;
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
struct mlx5_fc *flow_counter;
struct mlx5_flow_spec *spec;
int err;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
flow_counter = mlx5_fc_create(mdev, true);
if (IS_ERR(flow_counter)) {
err = PTR_ERR(flow_counter);
mlx5_core_err(mdev,
"Failed to add ipsec rx status drop rule counter, err=%d\n", err);
goto err_cnt;
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_act.flags = FLOW_ACT_NO_APPEND;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest.counter_id = mlx5_fc_id(flow_counter);
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_2,
sa_entry->ipsec_obj_id | BIT(31));
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_err(mdev,
"Failed to add ipsec rx status drop rule, err=%d\n", err);
goto err_rule;
}
sa_entry->ipsec_rule.replay.rule = rule;
sa_entry->ipsec_rule.replay.fc = flow_counter;
kvfree(spec);
return 0;
err_rule:
mlx5_fc_destroy(mdev, flow_counter);
err_cnt:
kvfree(spec);
return err;
}
static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table *ft = rx->ft.status;
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
struct mlx5_fc *flow_counter;
struct mlx5_flow_spec *spec;
struct mlx5_flow_group *g;
u32 *flow_group_in;
int err = 0;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!flow_group_in || !spec) {
err = -ENOMEM;
goto err_out;
}
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
g = mlx5_create_flow_group(ft, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
mlx5_core_err(mdev,
"Failed to add ipsec rx status drop flow group, err=%d\n", err);
goto err_out;
}
flow_counter = mlx5_fc_create(mdev, false);
if (IS_ERR(flow_counter)) {
err = PTR_ERR(flow_counter);
mlx5_core_err(mdev,
"Failed to add ipsec rx status drop rule counter, err=%d\n", err);
goto err_cnt;
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest.counter_id = mlx5_fc_id(flow_counter);
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_err(mdev,
"Failed to add ipsec rx status drop rule, err=%d\n", err);
goto err_rule;
}
rx->status_drops.drop_all_group = g;
rx->status_drops.all.rule = rule;
rx->status_drops.all.fc = flow_counter;
kvfree(flow_group_in);
kvfree(spec);
return 0;
err_rule:
mlx5_fc_destroy(mdev, flow_counter);
err_cnt:
mlx5_destroy_flow_group(g);
err_out:
kvfree(flow_group_in);
kvfree(spec);
return err;
}
static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5_flow_destination *dest)
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
int err;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
misc_parameters_2.ipsec_syndrome);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
misc_parameters_2.metadata_reg_c_4);
MLX5_SET(fte_match_param, spec->match_value,
misc_parameters_2.ipsec_syndrome, 0);
MLX5_SET(fte_match_param, spec->match_value,
misc_parameters_2.metadata_reg_c_4, 0);
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
flow_act.flags = FLOW_ACT_NO_APPEND;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_warn(ipsec->mdev,
"Failed to add ipsec rx status pass rule, err=%d\n", err);
goto err_rule;
}
rx->status.rule = rule;
kvfree(spec);
return 0;
err_rule:
kvfree(spec);
return err;
}
static void mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
ipsec_rx_status_pass_destroy(ipsec, rx);
ipsec_rx_status_drop_destroy(ipsec, rx);
}
static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5_flow_destination *dest)
{
int err;
err = ipsec_rx_status_drop_all_create(ipsec, rx);
if (err)
return err;
err = ipsec_rx_status_pass_create(ipsec, rx, dest);
if (err)
goto err_pass_create;
return 0;
err_pass_create:
ipsec_rx_status_drop_destroy(ipsec, rx);
return err;
}
static int ipsec_miss_create(struct mlx5_core_dev *mdev,
struct mlx5_flow_table *ft,
struct mlx5e_ipsec_miss *miss,
@ -333,12 +597,7 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_destroy_flow_table(rx->ft.sa);
if (rx->allow_tunnel_mode)
mlx5_eswitch_unblock_encap(mdev);
if (rx == ipsec->rx_esw) {
mlx5_esw_ipsec_rx_status_destroy(ipsec, rx);
} else {
mlx5_del_flow_rules(rx->status.rule);
mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
}
mlx5_ipsec_rx_status_destroy(ipsec, rx);
mlx5_destroy_flow_table(rx->ft.status);
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
@ -419,7 +678,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (err)
return err;
ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 1, 0);
ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft_status;
@ -428,10 +687,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
if (rx == ipsec->rx_esw)
err = mlx5_esw_ipsec_rx_status_create(ipsec, rx, dest);
else
err = ipsec_status_rule(mdev, rx, dest);
err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
if (err)
goto err_add;
@ -956,13 +1212,22 @@ static void setup_fte_esp(struct mlx5_flow_spec *spec)
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
}
static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi)
static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi, bool encap)
{
/* SPI number */
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi, spi);
if (encap) {
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
misc_parameters.inner_esp_spi);
MLX5_SET(fte_match_param, spec->match_value,
misc_parameters.inner_esp_spi, spi);
} else {
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
misc_parameters.outer_esp_spi);
MLX5_SET(fte_match_param, spec->match_value,
misc_parameters.outer_esp_spi, spi);
}
}
static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
@ -1052,29 +1317,48 @@ static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8
struct mlx5_flow_act *flow_act)
{
enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, type, dir);
u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
u8 action[3][MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_modify_hdr *modify_hdr;
u8 num_of_actions = 1;
MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action[0], action_type, MLX5_ACTION_TYPE_SET);
switch (dir) {
case XFRM_DEV_OFFLOAD_IN:
MLX5_SET(set_action_in, action, field,
MLX5_SET(set_action_in, action[0], field,
MLX5_ACTION_IN_FIELD_METADATA_REG_B);
num_of_actions++;
MLX5_SET(set_action_in, action[1], action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action[1], field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_2);
MLX5_SET(set_action_in, action[1], data, val);
MLX5_SET(set_action_in, action[1], offset, 0);
MLX5_SET(set_action_in, action[1], length, 32);
if (type == XFRM_DEV_OFFLOAD_CRYPTO) {
num_of_actions++;
MLX5_SET(set_action_in, action[2], action_type,
MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action[2], field,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
MLX5_SET(set_action_in, action[2], data, 0);
MLX5_SET(set_action_in, action[2], offset, 0);
MLX5_SET(set_action_in, action[2], length, 32);
}
break;
case XFRM_DEV_OFFLOAD_OUT:
MLX5_SET(set_action_in, action, field,
MLX5_SET(set_action_in, action[0], field,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
break;
default:
return -EINVAL;
}
MLX5_SET(set_action_in, action, data, val);
MLX5_SET(set_action_in, action, offset, 0);
MLX5_SET(set_action_in, action, length, 32);
MLX5_SET(set_action_in, action[0], data, val);
MLX5_SET(set_action_in, action[0], offset, 0);
MLX5_SET(set_action_in, action[0], length, 32);
modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action);
modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, num_of_actions, action);
if (IS_ERR(modify_hdr)) {
mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
PTR_ERR(modify_hdr));
@ -1321,8 +1605,9 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
else
setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
setup_fte_spi(spec, attrs->spi);
setup_fte_esp(spec);
setup_fte_spi(spec, attrs->spi, attrs->encap);
if (!attrs->encap)
setup_fte_esp(spec);
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
@ -1372,6 +1657,15 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
goto err_add_flow;
}
if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
err = rx_add_rule_drop_replay(sa_entry, rx);
if (err)
goto err_add_replay;
err = rx_add_rule_drop_auth_trailer(sa_entry, rx);
if (err)
goto err_drop_reason;
kvfree(spec);
sa_entry->ipsec_rule.rule = rule;
@ -1380,6 +1674,13 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
return 0;
err_drop_reason:
if (sa_entry->ipsec_rule.replay.rule) {
mlx5_del_flow_rules(sa_entry->ipsec_rule.replay.rule);
mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.replay.fc);
}
err_add_replay:
mlx5_del_flow_rules(rule);
err_add_flow:
mlx5_fc_destroy(mdev, counter);
err_add_cnt:
@ -1428,7 +1729,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
switch (attrs->type) {
case XFRM_DEV_OFFLOAD_CRYPTO:
setup_fte_spi(spec, attrs->spi);
setup_fte_spi(spec, attrs->spi, false);
setup_fte_esp(spec);
setup_fte_reg_a(spec);
break;
@ -1809,8 +2110,11 @@ static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
struct mlx5_eswitch *esw = mdev->priv.eswitch;
int err = 0;
if (esw)
down_write(&esw->mode_lock);
if (esw) {
err = mlx5_esw_lock(esw);
if (err)
return err;
}
if (mdev->num_block_ipsec) {
err = -EBUSY;
@ -1821,7 +2125,7 @@ static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
unlock:
if (esw)
up_write(&esw->mode_lock);
mlx5_esw_unlock(esw);
return err;
}
@ -1887,6 +2191,17 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
if (ipsec_rule->modify_hdr)
mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
mlx5_del_flow_rules(ipsec_rule->trailer.rule);
mlx5_fc_destroy(mdev, ipsec_rule->trailer.fc);
mlx5_del_flow_rules(ipsec_rule->auth.rule);
mlx5_fc_destroy(mdev, ipsec_rule->auth.fc);
if (ipsec_rule->replay.rule) {
mlx5_del_flow_rules(ipsec_rule->replay.rule);
mlx5_fc_destroy(mdev, ipsec_rule->replay.fc);
}
mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
}
@ -1957,7 +2272,7 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
kfree(ipsec->rx_ipv6);
if (ipsec->is_uplink_rep) {
xa_destroy(&ipsec->rx_esw->ipsec_obj_id_map);
xa_destroy(&ipsec->ipsec_obj_id_map);
mutex_destroy(&ipsec->tx_esw->ft.mutex);
WARN_ON(ipsec->tx_esw->ft.refcnt);
@ -2020,7 +2335,7 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec,
mutex_init(&ipsec->tx_esw->ft.mutex);
mutex_init(&ipsec->rx_esw->ft.mutex);
ipsec->tx_esw->ns = ns_esw;
xa_init_flags(&ipsec->rx_esw->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
xa_init_flags(&ipsec->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
} else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) {
ipsec->roce = mlx5_ipsec_fs_roce_init(mdev, devcom);
} else {

View File

@ -6,6 +6,8 @@
#include "ipsec.h"
#include "lib/crypto.h"
#include "lib/ipsec_fs_roce.h"
#include "fs_core.h"
#include "eswitch.h"
enum {
MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
@ -38,7 +40,10 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
caps |= MLX5_IPSEC_CAP_CRYPTO;
if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload)) {
if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) &&
(mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS ||
(mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS &&
is_mdev_legacy_mode(mdev)))) {
if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
reformat_add_esp_trasport) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
@ -95,7 +100,7 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
MLX5_SET(ipsec_aso, aso_ctx, window_sz,
attrs->replay_esn.replay_window / 64);
attrs->replay_esn.replay_window);
MLX5_SET(ipsec_aso, aso_ctx, mode,
MLX5_IPSEC_ASO_REPLAY_PROTECTION);
}
@ -559,6 +564,7 @@ void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
DMA_BIDIRECTIONAL);
kfree(aso);
ipsec->aso = NULL;
}
static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,

View File

@ -2731,6 +2731,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs)
{
int i;
ASSERT_RTNL();
if (chs->ptp) {
mlx5e_ptp_close(chs->ptp);
chs->ptp = NULL;
@ -3012,17 +3013,29 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
if (mlx5e_is_vport_rep(priv))
mlx5e_rep_activate_channels(priv);
set_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state);
mlx5e_wait_channels_min_rx_wqes(&priv->channels);
if (priv->rx_res)
mlx5e_rx_res_channels_activate(priv->rx_res, &priv->channels);
}
static void mlx5e_cancel_tx_timeout_work(struct mlx5e_priv *priv)
{
WARN_ON_ONCE(test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state));
if (current_work() != &priv->tx_timeout_work)
cancel_work_sync(&priv->tx_timeout_work);
}
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
{
if (priv->rx_res)
mlx5e_rx_res_channels_deactivate(priv->rx_res);
clear_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state);
mlx5e_cancel_tx_timeout_work(priv);
if (mlx5e_is_vport_rep(priv))
mlx5e_rep_deactivate_channels(priv);
@ -4801,8 +4814,17 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
struct net_device *netdev = priv->netdev;
int i;
rtnl_lock();
mutex_lock(&priv->state_lock);
/* Take rtnl_lock to ensure no change in netdev->real_num_tx_queues
* through this flow. However, channel closing flows have to wait for
* this work to finish while holding rtnl lock too. So either get the
* lock or find that channels are being closed for other reason and
* this work is not relevant anymore.
*/
while (!rtnl_trylock()) {
if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state))
return;
msleep(20);
}
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
@ -4821,7 +4843,6 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
}
unlock:
mutex_unlock(&priv->state_lock);
rtnl_unlock();
}

View File

@ -1497,7 +1497,7 @@ mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch,
rpriv->rep->vport);
if (dl_port) {
if (!IS_ERR(dl_port)) {
SET_NETDEV_DEVLINK_PORT(netdev, dl_port);
mlx5e_rep_vnic_reporter_create(priv, dl_port);
}

View File

@ -444,6 +444,9 @@ mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
struct mlx5e_flow_meter_handle *meter;
enum mlx5e_post_meter_type type;
if (IS_ERR(post_act))
return PTR_ERR(post_act);
meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params);
if (IS_ERR(meter)) {
mlx5_core_err(priv->mdev, "Failed to get flow meter\n");
@ -3738,6 +3741,20 @@ out_free:
return err;
}
static int
set_branch_dest_ft(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr)
{
struct mlx5e_post_act *post_act = get_post_action(priv);
if (IS_ERR(post_act))
return PTR_ERR(post_act);
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
attr->dest_ft = mlx5e_tc_post_act_get_ft(post_act);
return 0;
}
static int
alloc_branch_attr(struct mlx5e_tc_flow *flow,
struct mlx5e_tc_act_branch_ctrl *cond,
@ -3761,8 +3778,8 @@ alloc_branch_attr(struct mlx5e_tc_flow *flow,
break;
case FLOW_ACTION_ACCEPT:
case FLOW_ACTION_PIPE:
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
if (set_branch_dest_ft(flow->priv, attr))
goto out_err;
break;
case FLOW_ACTION_JUMP:
if (*jump_count) {
@ -3771,8 +3788,8 @@ alloc_branch_attr(struct mlx5e_tc_flow *flow,
goto out_err;
}
*jump_count = cond->extval;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
if (set_branch_dest_ft(flow->priv, attr))
goto out_err;
break;
default:
err = -EOPNOTSUPP;

View File

@ -21,158 +21,6 @@ enum {
MLX5_ESW_IPSEC_TX_ESP_FT_CNT_LEVEL,
};
static void esw_ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
mlx5_del_flow_rules(rx->status_drop.rule);
mlx5_destroy_flow_group(rx->status_drop.group);
mlx5_fc_destroy(ipsec->mdev, rx->status_drop_cnt);
}
static void esw_ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
mlx5_del_flow_rules(rx->status.rule);
mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
}
static int esw_ipsec_rx_status_drop_create(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table *ft = rx->ft.status;
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
struct mlx5_fc *flow_counter;
struct mlx5_flow_spec *spec;
struct mlx5_flow_group *g;
u32 *flow_group_in;
int err = 0;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!flow_group_in || !spec) {
err = -ENOMEM;
goto err_out;
}
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
g = mlx5_create_flow_group(ft, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
mlx5_core_err(mdev,
"Failed to add ipsec rx status drop flow group, err=%d\n", err);
goto err_out;
}
flow_counter = mlx5_fc_create(mdev, false);
if (IS_ERR(flow_counter)) {
err = PTR_ERR(flow_counter);
mlx5_core_err(mdev,
"Failed to add ipsec rx status drop rule counter, err=%d\n", err);
goto err_cnt;
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest.counter_id = mlx5_fc_id(flow_counter);
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_err(mdev,
"Failed to add ipsec rx status drop rule, err=%d\n", err);
goto err_rule;
}
rx->status_drop.group = g;
rx->status_drop.rule = rule;
rx->status_drop_cnt = flow_counter;
kvfree(flow_group_in);
kvfree(spec);
return 0;
err_rule:
mlx5_fc_destroy(mdev, flow_counter);
err_cnt:
mlx5_destroy_flow_group(g);
err_out:
kvfree(flow_group_in);
kvfree(spec);
return err;
}
static int esw_ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5_flow_destination *dest)
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
int err;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
misc_parameters_2.ipsec_syndrome);
MLX5_SET(fte_match_param, spec->match_value,
misc_parameters_2.ipsec_syndrome, 0);
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
flow_act.flags = FLOW_ACT_NO_APPEND;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_warn(ipsec->mdev,
"Failed to add ipsec rx status pass rule, err=%d\n", err);
goto err_rule;
}
rx->status.rule = rule;
kvfree(spec);
return 0;
err_rule:
kvfree(spec);
return err;
}
void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
esw_ipsec_rx_status_pass_destroy(ipsec, rx);
esw_ipsec_rx_status_drop_destroy(ipsec, rx);
}
int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5_flow_destination *dest)
{
int err;
err = esw_ipsec_rx_status_drop_create(ipsec, rx);
if (err)
return err;
err = esw_ipsec_rx_status_pass_create(ipsec, rx, dest);
if (err)
goto err_pass_create;
return 0;
err_pass_create:
esw_ipsec_rx_status_drop_destroy(ipsec, rx);
return err;
}
void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr)
{
@ -202,7 +50,7 @@ int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
u32 mapped_id;
int err;
err = xa_alloc_bh(&ipsec->rx_esw->ipsec_obj_id_map, &mapped_id,
err = xa_alloc_bh(&ipsec->ipsec_obj_id_map, &mapped_id,
xa_mk_value(sa_entry->ipsec_obj_id),
XA_LIMIT(1, ESW_IPSEC_RX_MAPPED_ID_MASK), 0);
if (err)
@ -233,7 +81,7 @@ int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
return 0;
err_header_alloc:
xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map, mapped_id);
xa_erase_bh(&ipsec->ipsec_obj_id_map, mapped_id);
return err;
}
@ -242,7 +90,7 @@ void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
if (sa_entry->rx_mapped_id)
xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map,
xa_erase_bh(&ipsec->ipsec_obj_id_map,
sa_entry->rx_mapped_id);
}
@ -252,7 +100,7 @@ int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
struct mlx5e_ipsec *ipsec = priv->ipsec;
void *val;
val = xa_load(&ipsec->rx_esw->ipsec_obj_id_map, id);
val = xa_load(&ipsec->ipsec_obj_id_map, id);
if (!val)
return -ENOENT;
@ -304,7 +152,7 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
xa_for_each(&esw->offloads.vport_reps, i, rep) {
rpriv = rep->rep_data[REP_ETH].priv;
if (!rpriv || !rpriv->netdev)
if (!rpriv || !rpriv->netdev || !atomic_read(&rpriv->tc_ht.nelems))
continue;
rhashtable_walk_enter(&rpriv->tc_ht, &iter);

View File

@ -8,11 +8,6 @@ struct mlx5e_ipsec;
struct mlx5e_ipsec_sa_entry;
#ifdef CONFIG_MLX5_ESWITCH
void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx);
int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5_flow_destination *dest);
void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr);
int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
@ -26,16 +21,6 @@ void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_tx_create_attr *attr);
void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev);
#else
static inline void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx) {}
static inline int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5_flow_destination *dest)
{
return -EINVAL;
}
static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr) {}

View File

@ -1463,7 +1463,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
{
int err;
lockdep_assert_held(&esw->mode_lock);
devl_assert_locked(priv_to_devlink(esw->dev));
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
@ -1531,7 +1531,6 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
if (toggle_lag)
mlx5_lag_disable_change(esw->dev);
down_write(&esw->mode_lock);
if (!mlx5_esw_is_fdb_created(esw)) {
ret = mlx5_eswitch_enable_locked(esw, num_vfs);
} else {
@ -1554,8 +1553,6 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
}
}
up_write(&esw->mode_lock);
if (toggle_lag)
mlx5_lag_enable_change(esw->dev);
@ -1569,12 +1566,11 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
return;
devl_assert_locked(priv_to_devlink(esw->dev));
down_write(&esw->mode_lock);
/* If driver is unloaded, this function is called twice by remove_one()
* and mlx5_unload(). Prevent the second call.
*/
if (!esw->esw_funcs.num_vfs && !esw->esw_funcs.num_ec_vfs && !clear_vf)
goto unlock;
return;
esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
@ -1603,9 +1599,6 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
esw->esw_funcs.num_vfs = 0;
else
esw->esw_funcs.num_ec_vfs = 0;
unlock:
up_write(&esw->mode_lock);
}
/* Free resources for corresponding eswitch mode. It is called by devlink
@ -1647,10 +1640,8 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
devl_assert_locked(priv_to_devlink(esw->dev));
mlx5_lag_disable_change(esw->dev);
down_write(&esw->mode_lock);
mlx5_eswitch_disable_locked(esw);
esw->mode = MLX5_ESWITCH_LEGACY;
up_write(&esw->mode_lock);
mlx5_lag_enable_change(esw->dev);
}
@ -2254,8 +2245,13 @@ bool mlx5_esw_hold(struct mlx5_core_dev *mdev)
if (!mlx5_esw_allowed(esw))
return true;
if (down_read_trylock(&esw->mode_lock) != 0)
if (down_read_trylock(&esw->mode_lock) != 0) {
if (esw->eswitch_operation_in_progress) {
up_read(&esw->mode_lock);
return false;
}
return true;
}
return false;
}
@ -2312,7 +2308,8 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
if (down_write_trylock(&esw->mode_lock) == 0)
return -EINVAL;
if (atomic64_read(&esw->user_count) > 0) {
if (esw->eswitch_operation_in_progress ||
atomic64_read(&esw->user_count) > 0) {
up_write(&esw->mode_lock);
return -EBUSY;
}
@ -2320,6 +2317,18 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
return esw->mode;
}
int mlx5_esw_lock(struct mlx5_eswitch *esw)
{
down_write(&esw->mode_lock);
if (esw->eswitch_operation_in_progress) {
up_write(&esw->mode_lock);
return -EBUSY;
}
return 0;
}
/**
* mlx5_esw_unlock() - Release write lock on esw mode lock
* @esw: eswitch device.

View File

@ -383,6 +383,7 @@ struct mlx5_eswitch {
struct xarray paired;
struct mlx5_devcom_comp_dev *devcom;
u16 enabled_ipsec_vf_count;
bool eswitch_operation_in_progress;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
@ -827,6 +828,7 @@ void mlx5_esw_release(struct mlx5_core_dev *dev);
void mlx5_esw_get(struct mlx5_core_dev *dev);
void mlx5_esw_put(struct mlx5_core_dev *dev);
int mlx5_esw_try_lock(struct mlx5_eswitch *esw);
int mlx5_esw_lock(struct mlx5_eswitch *esw);
void mlx5_esw_unlock(struct mlx5_eswitch *esw);
void esw_vport_change_handle_locked(struct mlx5_vport *vport);

View File

@ -3653,14 +3653,18 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct net *devl_net, *netdev_net;
struct mlx5_eswitch *esw;
bool ret = false;
esw = mlx5_devlink_eswitch_nocheck_get(devlink);
netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev);
devl_net = devlink_net(devlink);
return net_eq(devl_net, netdev_net);
mutex_lock(&dev->mlx5e_res.uplink_netdev_lock);
if (dev->mlx5e_res.uplink_netdev) {
netdev_net = dev_net(dev->mlx5e_res.uplink_netdev);
devl_net = devlink_net(devlink);
ret = net_eq(devl_net, netdev_net);
}
mutex_unlock(&dev->mlx5e_res.uplink_netdev_lock);
return ret;
}
int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev)
@ -3733,13 +3737,16 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
goto unlock;
}
esw->eswitch_operation_in_progress = true;
up_write(&esw->mode_lock);
mlx5_eswitch_disable_locked(esw);
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
if (mlx5_devlink_trap_get_num_active(esw->dev)) {
NL_SET_ERR_MSG_MOD(extack,
"Can't change mode while devlink traps are active");
err = -EOPNOTSUPP;
goto unlock;
goto skip;
}
err = esw_offloads_start(esw, extack);
} else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
@ -3749,6 +3756,9 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
err = -EINVAL;
}
skip:
down_write(&esw->mode_lock);
esw->eswitch_operation_in_progress = false;
unlock:
mlx5_esw_unlock(esw);
enable_lag:
@ -3759,16 +3769,12 @@ enable_lag:
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
struct mlx5_eswitch *esw;
int err;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
down_read(&esw->mode_lock);
err = esw_mode_to_devlink(esw->mode, mode);
up_read(&esw->mode_lock);
return err;
return esw_mode_to_devlink(esw->mode, mode);
}
static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
@ -3862,11 +3868,15 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
if (err)
goto out;
err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
if (err)
goto out;
esw->eswitch_operation_in_progress = true;
up_write(&esw->mode_lock);
esw->offloads.inline_mode = mlx5_mode;
err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
if (!err)
esw->offloads.inline_mode = mlx5_mode;
down_write(&esw->mode_lock);
esw->eswitch_operation_in_progress = false;
up_write(&esw->mode_lock);
return 0;
@ -3878,16 +3888,12 @@ out:
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
{
struct mlx5_eswitch *esw;
int err;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
down_read(&esw->mode_lock);
err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
up_read(&esw->mode_lock);
return err;
return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
}
bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
@ -3969,6 +3975,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
goto unlock;
}
esw->eswitch_operation_in_progress = true;
up_write(&esw->mode_lock);
esw_destroy_offloads_fdb_tables(esw);
esw->offloads.encap = encap;
@ -3982,6 +3991,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
(void)esw_create_offloads_fdb_tables(esw);
}
down_write(&esw->mode_lock);
esw->eswitch_operation_in_progress = false;
unlock:
up_write(&esw->mode_lock);
return err;
@ -3996,9 +4008,7 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
if (IS_ERR(esw))
return PTR_ERR(esw);
down_read(&esw->mode_lock);
*encap = esw->offloads.encap;
up_read(&esw->mode_lock);
return 0;
}

View File

@ -325,6 +325,29 @@ static void mlx5_fw_live_patch_event(struct work_struct *work)
mlx5_core_err(dev, "Failed to reload FW tracer\n");
}
#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev)
{
struct pci_dev *bridge = dev->pdev->bus->self;
u16 reg16;
int err;
if (!bridge)
return -EOPNOTSUPP;
err = pcie_capability_read_word(bridge, PCI_EXP_SLTCTL, &reg16);
if (err)
return err;
if ((reg16 & PCI_EXP_SLTCTL_HPIE) && (reg16 & PCI_EXP_SLTCTL_DLLSCE)) {
mlx5_core_warn(dev, "FW reset is not supported as HotPlug is enabled\n");
return -EOPNOTSUPP;
}
return 0;
}
#endif
static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id)
{
struct pci_bus *bridge_bus = dev->pdev->bus;
@ -357,6 +380,12 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev)
return false;
}
#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
err = mlx5_check_hotplug_interrupt(dev);
if (err)
return false;
#endif
err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
if (err)
return false;

View File

@ -933,6 +933,7 @@ static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
p_dma->virt_addr = NULL;
}
kfree(p_mngr->ilt_shadow);
p_mngr->ilt_shadow = NULL;
}
static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,

View File

@ -30,6 +30,8 @@
#define QCASPI_MAX_REGS 0x20
#define QCASPI_RX_MAX_FRAMES 4
static const u16 qcaspi_spi_regs[] = {
SPI_REG_BFR_SIZE,
SPI_REG_WRBUF_SPC_AVA,
@ -252,9 +254,9 @@ qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
{
struct qcaspi *qca = netdev_priv(dev);
ring->rx_max_pending = 4;
ring->rx_max_pending = QCASPI_RX_MAX_FRAMES;
ring->tx_max_pending = TX_RING_MAX_LEN;
ring->rx_pending = 4;
ring->rx_pending = QCASPI_RX_MAX_FRAMES;
ring->tx_pending = qca->txr.count;
}
@ -263,22 +265,21 @@ qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
const struct net_device_ops *ops = dev->netdev_ops;
struct qcaspi *qca = netdev_priv(dev);
if ((ring->rx_pending) ||
if (ring->rx_pending != QCASPI_RX_MAX_FRAMES ||
(ring->rx_mini_pending) ||
(ring->rx_jumbo_pending))
return -EINVAL;
if (netif_running(dev))
ops->ndo_stop(dev);
if (qca->spi_thread)
kthread_park(qca->spi_thread);
qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN);
qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN);
if (netif_running(dev))
ops->ndo_open(dev);
if (qca->spi_thread)
kthread_unpark(qca->spi_thread);
return 0;
}

View File

@ -580,6 +580,18 @@ qcaspi_spi_thread(void *data)
netdev_info(qca->net_dev, "SPI thread created\n");
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_park()) {
netif_tx_disable(qca->net_dev);
netif_carrier_off(qca->net_dev);
qcaspi_flush_tx_ring(qca);
kthread_parkme();
if (qca->sync == QCASPI_SYNC_READY) {
netif_carrier_on(qca->net_dev);
netif_wake_queue(qca->net_dev);
}
continue;
}
if ((qca->intr_req == qca->intr_svc) &&
!qca->txr.skb[qca->txr.head])
schedule();
@ -608,11 +620,17 @@ qcaspi_spi_thread(void *data)
if (intr_cause & SPI_INT_CPU_ON) {
qcaspi_qca7k_sync(qca, QCASPI_EVENT_CPUON);
/* Frame decoding in progress */
if (qca->frm_handle.state != qca->frm_handle.init)
qca->net_dev->stats.rx_dropped++;
qcafrm_fsm_init_spi(&qca->frm_handle);
qca->stats.device_reset++;
/* not synced. */
if (qca->sync != QCASPI_SYNC_READY)
continue;
qca->stats.device_reset++;
netif_wake_queue(qca->net_dev);
netif_carrier_on(qca->net_dev);
}

View File

@ -59,26 +59,19 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
return -ENODEV;
}
if (!of_device_is_compatible(np, "loongson, pci-gmac")) {
pr_info("dwmac_loongson_pci: Incompatible OF node\n");
return -ENODEV;
}
plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
if (!plat)
return -ENOMEM;
plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
sizeof(*plat->mdio_bus_data),
GFP_KERNEL);
if (!plat->mdio_bus_data)
return -ENOMEM;
plat->mdio_node = of_get_child_by_name(np, "mdio");
if (plat->mdio_node) {
dev_info(&pdev->dev, "Found MDIO subnode\n");
plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
sizeof(*plat->mdio_bus_data),
GFP_KERNEL);
if (!plat->mdio_bus_data) {
ret = -ENOMEM;
goto err_put_node;
}
plat->mdio_bus_data->needs_reset = true;
}

View File

@ -34,6 +34,7 @@
#define RGMII_CONFIG_LOOPBACK_EN BIT(2)
#define RGMII_CONFIG_PROG_SWAP BIT(1)
#define RGMII_CONFIG_DDR_MODE BIT(0)
#define RGMII_CONFIG_SGMII_CLK_DVDR GENMASK(18, 10)
/* SDCC_HC_REG_DLL_CONFIG fields */
#define SDCC_DLL_CONFIG_DLL_RST BIT(30)
@ -78,6 +79,8 @@
#define ETHQOS_MAC_CTRL_SPEED_MODE BIT(14)
#define ETHQOS_MAC_CTRL_PORT_SEL BIT(15)
#define SGMII_10M_RX_CLK_DVDR 0x31
struct ethqos_emac_por {
unsigned int offset;
unsigned int value;
@ -598,6 +601,9 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
return 0;
}
/* On interface toggle MAC registers gets reset.
* Configure MAC block for SGMII on ethernet phy link up
*/
static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
{
int val;
@ -617,6 +623,10 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
case SPEED_10:
val |= ETHQOS_MAC_CTRL_PORT_SEL;
val &= ~ETHQOS_MAC_CTRL_SPEED_MODE;
rgmii_updatel(ethqos, RGMII_CONFIG_SGMII_CLK_DVDR,
FIELD_PREP(RGMII_CONFIG_SGMII_CLK_DVDR,
SGMII_10M_RX_CLK_DVDR),
RGMII_IO_MACRO_CONFIG);
break;
}

View File

@ -591,7 +591,11 @@ int stmmac_mdio_register(struct net_device *ndev)
new_bus->parent = priv->device;
err = of_mdiobus_register(new_bus, mdio_node);
if (err != 0) {
if (err == -ENODEV) {
err = 0;
dev_info(dev, "MDIO bus is disabled\n");
goto bus_register_fail;
} else if (err) {
dev_err_probe(dev, err, "Cannot register the MDIO bus\n");
goto bus_register_fail;
}

View File

@ -281,8 +281,10 @@ static int __team_options_register(struct team *team,
return 0;
inst_rollback:
for (i--; i >= 0; i--)
for (i--; i >= 0; i--) {
__team_option_inst_del_option(team, dst_opts[i]);
list_del(&dst_opts[i]->list);
}
i = option_count;
alloc_rollback:

View File

@ -621,7 +621,7 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_140[0x8];
u8 bth_dst_qp[0x18];
u8 reserved_at_160[0x20];
u8 inner_esp_spi[0x20];
u8 outer_esp_spi[0x20];
u8 reserved_at_1a0[0x60];
};
@ -12001,6 +12001,13 @@ enum {
MLX5_IPSEC_ASO_INC_SN = 0x2,
};
enum {
MLX5_IPSEC_ASO_REPLAY_WIN_32BIT = 0x0,
MLX5_IPSEC_ASO_REPLAY_WIN_64BIT = 0x1,
MLX5_IPSEC_ASO_REPLAY_WIN_128BIT = 0x2,
MLX5_IPSEC_ASO_REPLAY_WIN_256BIT = 0x3,
};
struct mlx5_ifc_ipsec_aso_bits {
u8 valid[0x1];
u8 reserved_at_201[0x1];

View File

@ -31,17 +31,22 @@ struct prefix_info {
__u8 length;
__u8 prefix_len;
union __packed {
__u8 flags;
struct __packed {
#if defined(__BIG_ENDIAN_BITFIELD)
__u8 onlink : 1,
__u8 onlink : 1,
autoconf : 1,
reserved : 6;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
__u8 reserved : 6,
__u8 reserved : 6,
autoconf : 1,
onlink : 1;
#else
#error "Please fix <asm/byteorder.h>"
#endif
};
};
__be32 valid;
__be32 prefered;
__be32 reserved2;
@ -49,6 +54,9 @@ struct prefix_info {
struct in6_addr prefix;
};
/* rfc4861 4.6.2: IPv6 PIO is 32 bytes in size */
static_assert(sizeof(struct prefix_info) == 32);
#include <linux/ipv6.h>
#include <linux/netdevice.h>
#include <net/if_inet6.h>

View File

@ -22,10 +22,6 @@
#define IF_RS_SENT 0x10
#define IF_READY 0x80000000
/* prefix flags */
#define IF_PREFIX_ONLINK 0x01
#define IF_PREFIX_AUTOCONF 0x02
enum {
INET6_IFADDR_STATE_PREDAD,
INET6_IFADDR_STATE_DAD,

View File

@ -62,6 +62,8 @@ struct nf_flowtable_type {
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule);
void (*free)(struct nf_flowtable *ft);
void (*get)(struct nf_flowtable *ft);
void (*put)(struct nf_flowtable *ft);
nf_hookfn *hook;
struct module *owner;
};
@ -240,6 +242,11 @@ nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
}
list_add_tail(&block_cb->list, &block->cb_list);
up_write(&flow_table->flow_block_lock);
if (flow_table->type->get)
flow_table->type->get(flow_table);
return 0;
unlock:
up_write(&flow_table->flow_block_lock);
@ -262,6 +269,9 @@ nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
WARN_ON(true);
}
up_write(&flow_table->flow_block_lock);
if (flow_table->type->put)
flow_table->type->put(flow_table);
}
void flow_offload_route_init(struct flow_offload *flow,

View File

@ -1775,15 +1775,14 @@ static int atalk_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
break;
}
case TIOCINQ: {
/*
* These two are safe on a single CPU system as only
* user tasks fiddle here
*/
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
struct sk_buff *skb;
long amount = 0;
spin_lock_irq(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
if (skb)
amount = skb->len - sizeof(struct ddpehdr);
spin_unlock_irq(&sk->sk_receive_queue.lock);
rc = put_user(amount, (int __user *)argp);
break;
}

View File

@ -73,14 +73,17 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd,
case SIOCINQ:
{
struct sk_buff *skb;
int amount;
if (sock->state != SS_CONNECTED) {
error = -EINVAL;
goto done;
}
spin_lock_irq(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
error = put_user(skb ? skb->len : 0,
(int __user *)argp) ? -EFAULT : 0;
amount = skb ? skb->len : 0;
spin_unlock_irq(&sk->sk_receive_queue.lock);
error = put_user(amount, (int __user *)argp) ? -EFAULT : 0;
goto done;
}
case ATM_SETSC:

View File

@ -253,9 +253,11 @@ static int neigh_forced_gc(struct neigh_table *tbl)
{
int max_clean = atomic_read(&tbl->gc_entries) -
READ_ONCE(tbl->gc_thresh2);
u64 tmax = ktime_get_ns() + NSEC_PER_MSEC;
unsigned long tref = jiffies - 5 * HZ;
struct neighbour *n, *tmp;
int shrunk = 0;
int loop = 0;
NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
@ -278,11 +280,16 @@ static int neigh_forced_gc(struct neigh_table *tbl)
shrunk++;
if (shrunk >= max_clean)
break;
if (++loop == 16) {
if (ktime_get_ns() > tmax)
goto unlock;
loop = 0;
}
}
}
WRITE_ONCE(tbl->last_flush, jiffies);
unlock:
write_unlock_bh(&tbl->lock);
return shrunk;

View File

@ -4522,8 +4522,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
/* GSO partial only requires that we trim off any excess that
* doesn't fit into an MSS sized block, so take care of that
* now.
* Cap len to not accidentally hit GSO_BY_FRAGS.
*/
partial_segs = len / mss;
partial_segs = min(len, GSO_BY_FRAGS - 1) / mss;
if (partial_segs > 1)
mss *= partial_segs;
else

View File

@ -4368,6 +4368,23 @@ EXPORT_SYMBOL(tcp_do_parse_auth_options);
* up to bandwidth of 18Gigabit/sec. 8) ]
*/
/* Estimates max number of increments of remote peer TSval in
* a replay window (based on our current RTO estimation).
*/
static u32 tcp_tsval_replay(const struct sock *sk)
{
/* If we use usec TS resolution,
* then expect the remote peer to use the same resolution.
*/
if (tcp_sk(sk)->tcp_usec_ts)
return inet_csk(sk)->icsk_rto * (USEC_PER_SEC / HZ);
/* RFC 7323 recommends a TSval clock between 1ms and 1sec.
* We know that some OS (including old linux) can use 1200 Hz.
*/
return inet_csk(sk)->icsk_rto * 1200 / HZ;
}
static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
{
const struct tcp_sock *tp = tcp_sk(sk);
@ -4375,7 +4392,7 @@ static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
u32 seq = TCP_SKB_CB(skb)->seq;
u32 ack = TCP_SKB_CB(skb)->ack_seq;
return (/* 1. Pure ACK with correct sequence number. */
return /* 1. Pure ACK with correct sequence number. */
(th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) &&
/* 2. ... and duplicate ACK. */
@ -4385,7 +4402,8 @@ static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
!tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) &&
/* 4. ... and sits in replay window. */
(s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ);
(s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <=
tcp_tsval_replay(sk);
}
static inline bool tcp_paws_discard(const struct sock *sk,

View File

@ -3293,7 +3293,13 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
if (skb_still_in_host_queue(sk, skb))
return -EBUSY;
start:
if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
TCP_SKB_CB(skb)->seq++;
goto start;
}
if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
WARN_ON_ONCE(1);
return -EINVAL;

View File

@ -6149,11 +6149,7 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
pmsg->prefix_len = pinfo->prefix_len;
pmsg->prefix_type = pinfo->type;
pmsg->prefix_pad3 = 0;
pmsg->prefix_flags = 0;
if (pinfo->onlink)
pmsg->prefix_flags |= IF_PREFIX_ONLINK;
if (pinfo->autoconf)
pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
pmsg->prefix_flags = pinfo->flags;
if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
goto nla_put_failure;

View File

@ -1315,9 +1315,11 @@ static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
case TIOCINQ: {
struct sk_buff *skb;
long amount = 0L;
/* These two are safe on a single CPU system as only user tasks fiddle here */
spin_lock_irq(&sk->sk_receive_queue.lock);
if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
spin_unlock_irq(&sk->sk_receive_queue.lock);
return put_user(amount, (unsigned int __user *) argp);
}

View File

@ -286,9 +286,31 @@ static bool tcf_ct_flow_is_outdated(const struct flow_offload *flow)
!test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
}
static void tcf_ct_flow_table_get_ref(struct tcf_ct_flow_table *ct_ft);
static void tcf_ct_nf_get(struct nf_flowtable *ft)
{
struct tcf_ct_flow_table *ct_ft =
container_of(ft, struct tcf_ct_flow_table, nf_ft);
tcf_ct_flow_table_get_ref(ct_ft);
}
static void tcf_ct_flow_table_put(struct tcf_ct_flow_table *ct_ft);
static void tcf_ct_nf_put(struct nf_flowtable *ft)
{
struct tcf_ct_flow_table *ct_ft =
container_of(ft, struct tcf_ct_flow_table, nf_ft);
tcf_ct_flow_table_put(ct_ft);
}
static struct nf_flowtable_type flowtable_ct = {
.gc = tcf_ct_flow_is_outdated,
.action = tcf_ct_flow_table_fill_actions,
.get = tcf_ct_nf_get,
.put = tcf_ct_nf_put,
.owner = THIS_MODULE,
};
@ -337,9 +359,13 @@ err_alloc:
return err;
}
static void tcf_ct_flow_table_get_ref(struct tcf_ct_flow_table *ct_ft)
{
refcount_inc(&ct_ft->ref);
}
static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
{
struct flow_block_cb *block_cb, *tmp_cb;
struct tcf_ct_flow_table *ct_ft;
struct flow_block *block;
@ -347,13 +373,9 @@ static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
rwork);
nf_flow_table_free(&ct_ft->nf_ft);
/* Remove any remaining callbacks before cleanup */
block = &ct_ft->nf_ft.flow_block;
down_write(&ct_ft->nf_ft.flow_block_lock);
list_for_each_entry_safe(block_cb, tmp_cb, &block->cb_list, list) {
list_del(&block_cb->list);
flow_block_cb_free(block_cb);
}
WARN_ON(!list_empty(&block->cb_list));
up_write(&ct_ft->nf_ft.flow_block_lock);
kfree(ct_ft);

View File

@ -843,7 +843,7 @@ static s64 virtio_transport_has_space(struct vsock_sock *vsk)
struct virtio_vsock_sock *vvs = vsk->trans;
s64 bytes;
bytes = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
bytes = (s64)vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
if (bytes < 0)
bytes = 0;