Including fixes from bpf, netfilter and WiFi.

Current release - regressions:
 
   - bpf: fix a kernel crash for the riscv 64 JIT
 
   - bnxt_en: fix memory leak in bnxt_hwrm_get_rings()
 
   - revert "net: macsec: use skb_ensure_writable_head_tail to expand the skb"
 
 Previous releases - regressions:
 
   - core: fix removing a namespace with conflicting altnames
 
   - tc/flower: fix chain template offload memory leak
 
   - tcp:
     - make sure init the accept_queue's spinlocks once
     - fix autocork on CPUs with weak memory model
 
   - udp: fix busy polling
 
   - mlx5e:
     - fix out-of-bound read in port timestamping
     - fix peer flow lists corruption
 
   - iwlwifi: fix a memory corruption
 
 Previous releases - always broken:
 
   - netfilter:
     - nft_chain_filter: handle NETDEV_UNREGISTER for inet/ingress basechain
     - nft_limit: reject configurations that cause integer overflow
 
   - bpf: fix bpf_xdp_adjust_tail() with XSK zero-copy mbuf, avoiding
     a NULL pointer dereference upon shrinking
 
   - llc: make llc_ui_sendmsg() more robust against bonding changes
 
   - smc: fix illegal rmb_desc access in SMC-D connection dump
 
   - dpll: fix pin dump crash for rebound module
 
   - bnxt_en: fix possible crash after creating sw mqprio TCs
 
   - hv_netvsc: calculate correct ring size when PAGE_SIZE is not 4 Kbytes
 
 Misc:
 
   - several self-tests fixes for better integration with the netdev CI
 
   - added several missing modules descriptions
 
 Signed-off-by: Paolo Abeni <pabeni@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmWyUSISHHBhYmVuaUBy
 ZWRoYXQuY29tAAoJECkkeY3MjxOkiuIP/0IChNqw3KtJJQOb4eIu12qRTblMmucU
 Yf+Q4ZBHI7Epz3HFWrmqoj7N7CaWAj+U8b9JEHbcZxP7cwo4mc7ScXmm78IOvpEl
 ypWdjWVW89UVz6GI/Yz/MNC2H7By51NUkiDpbbAA4pZVK6N1+rO8oAU9Fy2IiFTb
 ixt61S1zsVYdmUjHOD/PiU9b8i5cRBukaXF8jnznRj8nAT/cU9XUV/YyFixj33vQ
 Rbs3HoKxD9Mk9KdJ7jgEMi7Vazb40w5TmfMLWyNglJQdNz4DUg+9tqQGCkEf5UGU
 CpRKu4RVr2uzAn9N5Hav4O0He2kDUVH1MoPqgS6MnJAERzCDKoDFxo8ljTmHBk6b
 ISmstRzRon/AXcp+94pwU5RT78B7HekXYlZPcj5tGVKiM7HMdgLiodOcZcsG5fdW
 8okeYhpCL5ew/fxGOZnbNS/BiODZBaa+/e6ns8NasmWZHgGJa9uHiO865g5I53/H
 jWnm53Bi1Zmkgu/+NwXEx1I1vWSa9GCBA8ia5oSuEQDWhHhm3EcUcr44kjrD/R+S
 6elNScjrJ2kMF+fvOb1BEITUf77fk7/ATJarCk8oybJCAt7do+DZ1E47NN0M9Km8
 gARKvThije9rmc4OZ7RYR7R9iQgvwpb7fgGJZw+SI3XFK/WxmcDJHV4dXLiA6+Mu
 vvt+x7EmWiWf
 =ER+y
 -----END PGP SIGNATURE-----

Merge tag 'net-6.8-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Paolo Abeni:
 "Including fixes from bpf, netfilter and WiFi.

  Jakub is doing a lot of work to include the self-tests in our CI, as a
  result a significant amount of self-tests related fixes is flowing in
  (and will likely continue in the next few weeks).

  Current release - regressions:

   - bpf: fix a kernel crash for the riscv 64 JIT

   - bnxt_en: fix memory leak in bnxt_hwrm_get_rings()

   - revert "net: macsec: use skb_ensure_writable_head_tail to expand
     the skb"

  Previous releases - regressions:

   - core: fix removing a namespace with conflicting altnames

   - tc/flower: fix chain template offload memory leak

   - tcp:
      - make sure init the accept_queue's spinlocks once
      - fix autocork on CPUs with weak memory model

   - udp: fix busy polling

   - mlx5e:
      - fix out-of-bound read in port timestamping
      - fix peer flow lists corruption

   - iwlwifi: fix a memory corruption

  Previous releases - always broken:

   - netfilter:
      - nft_chain_filter: handle NETDEV_UNREGISTER for inet/ingress
        basechain
      - nft_limit: reject configurations that cause integer overflow

   - bpf: fix bpf_xdp_adjust_tail() with XSK zero-copy mbuf, avoiding a
     NULL pointer dereference upon shrinking

   - llc: make llc_ui_sendmsg() more robust against bonding changes

   - smc: fix illegal rmb_desc access in SMC-D connection dump

   - dpll: fix pin dump crash for rebound module

   - bnxt_en: fix possible crash after creating sw mqprio TCs

   - hv_netvsc: calculate correct ring size when PAGE_SIZE is not 4kB

  Misc:

   - several self-tests fixes for better integration with the netdev CI

   - added several missing modules descriptions"

* tag 'net-6.8-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (88 commits)
  tsnep: Fix XDP_RING_NEED_WAKEUP for empty fill ring
  tsnep: Remove FCS for XDP data path
  net: fec: fix the unhandled context fault from smmu
  selftests: bonding: do not test arp/ns target with mode balance-alb/tlb
  fjes: fix memleaks in fjes_hw_setup
  i40e: update xdp_rxq_info::frag_size for ZC enabled Rx queue
  i40e: set xdp_rxq_info::frag_size
  xdp: reflect tail increase for MEM_TYPE_XSK_BUFF_POOL
  ice: update xdp_rxq_info::frag_size for ZC enabled Rx queue
  intel: xsk: initialize skb_frag_t::bv_offset in ZC drivers
  ice: remove redundant xdp_rxq_info registration
  i40e: handle multi-buffer packets that are shrunk by xdp prog
  ice: work on pre-XDP prog frag count
  xsk: fix usage of multi-buffer BPF helpers for ZC XDP
  xsk: make xsk_buff_pool responsible for clearing xdp_buff::flags
  xsk: recycle buffer in case Rx queue was full
  net: fill in MODULE_DESCRIPTION()s for rvu_mbox
  net: fill in MODULE_DESCRIPTION()s for litex
  net: fill in MODULE_DESCRIPTION()s for fsl_pq_mdio
  net: fill in MODULE_DESCRIPTION()s for fec
  ...
This commit is contained in:
Linus Torvalds 2024-01-25 10:58:35 -08:00
commit ecb1b8288d
120 changed files with 928 additions and 341 deletions

View File

@ -795,6 +795,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
bool is_struct_ops = flags & BPF_TRAMP_F_INDIRECT;
void *orig_call = func_addr;
bool save_ret;
u32 insn;
@ -878,7 +879,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
stack_size = round_up(stack_size, 16);
if (func_addr) {
if (!is_struct_ops) {
/* For the trampoline called from function entry,
* the frame of traced function and the frame of
* trampoline need to be considered.
@ -998,7 +999,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
emit_ld(RV_REG_S1, -sreg_off, RV_REG_FP, ctx);
if (func_addr) {
if (!is_struct_ops) {
/* trampoline called from function entry */
emit_ld(RV_REG_T0, stack_size - 8, RV_REG_SP, ctx);
emit_ld(RV_REG_FP, stack_size - 16, RV_REG_SP, ctx);

View File

@ -29,8 +29,6 @@ static u32 dpll_pin_xa_id;
WARN_ON_ONCE(!xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
#define ASSERT_DPLL_NOT_REGISTERED(d) \
WARN_ON_ONCE(xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
#define ASSERT_PIN_REGISTERED(p) \
WARN_ON_ONCE(!xa_get_mark(&dpll_pin_xa, (p)->id, DPLL_REGISTERED))
struct dpll_device_registration {
struct list_head list;
@ -425,6 +423,53 @@ void dpll_device_unregister(struct dpll_device *dpll,
}
EXPORT_SYMBOL_GPL(dpll_device_unregister);
static void dpll_pin_prop_free(struct dpll_pin_properties *prop)
{
kfree(prop->package_label);
kfree(prop->panel_label);
kfree(prop->board_label);
kfree(prop->freq_supported);
}
static int dpll_pin_prop_dup(const struct dpll_pin_properties *src,
struct dpll_pin_properties *dst)
{
memcpy(dst, src, sizeof(*dst));
if (src->freq_supported && src->freq_supported_num) {
size_t freq_size = src->freq_supported_num *
sizeof(*src->freq_supported);
dst->freq_supported = kmemdup(src->freq_supported,
freq_size, GFP_KERNEL);
if (!src->freq_supported)
return -ENOMEM;
}
if (src->board_label) {
dst->board_label = kstrdup(src->board_label, GFP_KERNEL);
if (!dst->board_label)
goto err_board_label;
}
if (src->panel_label) {
dst->panel_label = kstrdup(src->panel_label, GFP_KERNEL);
if (!dst->panel_label)
goto err_panel_label;
}
if (src->package_label) {
dst->package_label = kstrdup(src->package_label, GFP_KERNEL);
if (!dst->package_label)
goto err_package_label;
}
return 0;
err_package_label:
kfree(dst->panel_label);
err_panel_label:
kfree(dst->board_label);
err_board_label:
kfree(dst->freq_supported);
return -ENOMEM;
}
static struct dpll_pin *
dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
const struct dpll_pin_properties *prop)
@ -441,20 +486,24 @@ dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
if (WARN_ON(prop->type < DPLL_PIN_TYPE_MUX ||
prop->type > DPLL_PIN_TYPE_MAX)) {
ret = -EINVAL;
goto err;
goto err_pin_prop;
}
pin->prop = prop;
ret = dpll_pin_prop_dup(prop, &pin->prop);
if (ret)
goto err_pin_prop;
refcount_set(&pin->refcount, 1);
xa_init_flags(&pin->dpll_refs, XA_FLAGS_ALLOC);
xa_init_flags(&pin->parent_refs, XA_FLAGS_ALLOC);
ret = xa_alloc_cyclic(&dpll_pin_xa, &pin->id, pin, xa_limit_32b,
&dpll_pin_xa_id, GFP_KERNEL);
if (ret)
goto err;
goto err_xa_alloc;
return pin;
err:
err_xa_alloc:
xa_destroy(&pin->dpll_refs);
xa_destroy(&pin->parent_refs);
dpll_pin_prop_free(&pin->prop);
err_pin_prop:
kfree(pin);
return ERR_PTR(ret);
}
@ -514,6 +563,7 @@ void dpll_pin_put(struct dpll_pin *pin)
xa_destroy(&pin->dpll_refs);
xa_destroy(&pin->parent_refs);
xa_erase(&dpll_pin_xa, pin->id);
dpll_pin_prop_free(&pin->prop);
kfree(pin);
}
mutex_unlock(&dpll_lock);
@ -564,8 +614,6 @@ dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
WARN_ON(!ops->state_on_dpll_get) ||
WARN_ON(!ops->direction_get))
return -EINVAL;
if (ASSERT_DPLL_REGISTERED(dpll))
return -EINVAL;
mutex_lock(&dpll_lock);
if (WARN_ON(!(dpll->module == pin->module &&
@ -636,15 +684,13 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
unsigned long i, stop;
int ret;
if (WARN_ON(parent->prop->type != DPLL_PIN_TYPE_MUX))
if (WARN_ON(parent->prop.type != DPLL_PIN_TYPE_MUX))
return -EINVAL;
if (WARN_ON(!ops) ||
WARN_ON(!ops->state_on_pin_get) ||
WARN_ON(!ops->direction_get))
return -EINVAL;
if (ASSERT_PIN_REGISTERED(parent))
return -EINVAL;
mutex_lock(&dpll_lock);
ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv);

View File

@ -44,7 +44,7 @@ struct dpll_device {
* @module: module of creator
* @dpll_refs: hold referencees to dplls pin was registered with
* @parent_refs: hold references to parent pins pin was registered with
* @prop: pointer to pin properties given by registerer
* @prop: pin properties copied from the registerer
* @rclk_dev_name: holds name of device when pin can recover clock from it
* @refcount: refcount
**/
@ -55,7 +55,7 @@ struct dpll_pin {
struct module *module;
struct xarray dpll_refs;
struct xarray parent_refs;
const struct dpll_pin_properties *prop;
struct dpll_pin_properties prop;
refcount_t refcount;
};

View File

@ -303,17 +303,17 @@ dpll_msg_add_pin_freq(struct sk_buff *msg, struct dpll_pin *pin,
if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY, sizeof(freq), &freq,
DPLL_A_PIN_PAD))
return -EMSGSIZE;
for (fs = 0; fs < pin->prop->freq_supported_num; fs++) {
for (fs = 0; fs < pin->prop.freq_supported_num; fs++) {
nest = nla_nest_start(msg, DPLL_A_PIN_FREQUENCY_SUPPORTED);
if (!nest)
return -EMSGSIZE;
freq = pin->prop->freq_supported[fs].min;
freq = pin->prop.freq_supported[fs].min;
if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MIN, sizeof(freq),
&freq, DPLL_A_PIN_PAD)) {
nla_nest_cancel(msg, nest);
return -EMSGSIZE;
}
freq = pin->prop->freq_supported[fs].max;
freq = pin->prop.freq_supported[fs].max;
if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MAX, sizeof(freq),
&freq, DPLL_A_PIN_PAD)) {
nla_nest_cancel(msg, nest);
@ -329,9 +329,9 @@ static bool dpll_pin_is_freq_supported(struct dpll_pin *pin, u32 freq)
{
int fs;
for (fs = 0; fs < pin->prop->freq_supported_num; fs++)
if (freq >= pin->prop->freq_supported[fs].min &&
freq <= pin->prop->freq_supported[fs].max)
for (fs = 0; fs < pin->prop.freq_supported_num; fs++)
if (freq >= pin->prop.freq_supported[fs].min &&
freq <= pin->prop.freq_supported[fs].max)
return true;
return false;
}
@ -421,7 +421,7 @@ static int
dpll_cmd_pin_get_one(struct sk_buff *msg, struct dpll_pin *pin,
struct netlink_ext_ack *extack)
{
const struct dpll_pin_properties *prop = pin->prop;
const struct dpll_pin_properties *prop = &pin->prop;
struct dpll_pin_ref *ref;
int ret;
@ -553,6 +553,24 @@ __dpll_device_change_ntf(struct dpll_device *dpll)
return dpll_device_event_send(DPLL_CMD_DEVICE_CHANGE_NTF, dpll);
}
static bool dpll_pin_available(struct dpll_pin *pin)
{
struct dpll_pin_ref *par_ref;
unsigned long i;
if (!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED))
return false;
xa_for_each(&pin->parent_refs, i, par_ref)
if (xa_get_mark(&dpll_pin_xa, par_ref->pin->id,
DPLL_REGISTERED))
return true;
xa_for_each(&pin->dpll_refs, i, par_ref)
if (xa_get_mark(&dpll_device_xa, par_ref->dpll->id,
DPLL_REGISTERED))
return true;
return false;
}
/**
* dpll_device_change_ntf - notify that the dpll device has been changed
* @dpll: registered dpll pointer
@ -579,7 +597,7 @@ dpll_pin_event_send(enum dpll_cmd event, struct dpll_pin *pin)
int ret = -ENOMEM;
void *hdr;
if (WARN_ON(!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED)))
if (!dpll_pin_available(pin))
return -ENODEV;
msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
@ -717,7 +735,7 @@ dpll_pin_on_pin_state_set(struct dpll_pin *pin, u32 parent_idx,
int ret;
if (!(DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE &
pin->prop->capabilities)) {
pin->prop.capabilities)) {
NL_SET_ERR_MSG(extack, "state changing is not allowed");
return -EOPNOTSUPP;
}
@ -753,7 +771,7 @@ dpll_pin_state_set(struct dpll_device *dpll, struct dpll_pin *pin,
int ret;
if (!(DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE &
pin->prop->capabilities)) {
pin->prop.capabilities)) {
NL_SET_ERR_MSG(extack, "state changing is not allowed");
return -EOPNOTSUPP;
}
@ -780,7 +798,7 @@ dpll_pin_prio_set(struct dpll_device *dpll, struct dpll_pin *pin,
int ret;
if (!(DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE &
pin->prop->capabilities)) {
pin->prop.capabilities)) {
NL_SET_ERR_MSG(extack, "prio changing is not allowed");
return -EOPNOTSUPP;
}
@ -808,7 +826,7 @@ dpll_pin_direction_set(struct dpll_pin *pin, struct dpll_device *dpll,
int ret;
if (!(DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE &
pin->prop->capabilities)) {
pin->prop.capabilities)) {
NL_SET_ERR_MSG(extack, "direction changing is not allowed");
return -EOPNOTSUPP;
}
@ -838,8 +856,8 @@ dpll_pin_phase_adj_set(struct dpll_pin *pin, struct nlattr *phase_adj_attr,
int ret;
phase_adj = nla_get_s32(phase_adj_attr);
if (phase_adj > pin->prop->phase_range.max ||
phase_adj < pin->prop->phase_range.min) {
if (phase_adj > pin->prop.phase_range.max ||
phase_adj < pin->prop.phase_range.min) {
NL_SET_ERR_MSG_ATTR(extack, phase_adj_attr,
"phase adjust value not supported");
return -EINVAL;
@ -1023,7 +1041,7 @@ dpll_pin_find(u64 clock_id, struct nlattr *mod_name_attr,
unsigned long i;
xa_for_each_marked(&dpll_pin_xa, i, pin, DPLL_REGISTERED) {
prop = pin->prop;
prop = &pin->prop;
cid_match = clock_id ? pin->clock_id == clock_id : true;
mod_match = mod_name_attr && module_name(pin->module) ?
!nla_strcmp(mod_name_attr,
@ -1130,6 +1148,10 @@ int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info)
}
pin = dpll_pin_find_from_nlattr(info);
if (!IS_ERR(pin)) {
if (!dpll_pin_available(pin)) {
nlmsg_free(msg);
return -ENODEV;
}
ret = dpll_msg_add_pin_handle(msg, pin);
if (ret) {
nlmsg_free(msg);
@ -1179,6 +1201,8 @@ int dpll_nl_pin_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
xa_for_each_marked_start(&dpll_pin_xa, i, pin, DPLL_REGISTERED,
ctx->idx) {
if (!dpll_pin_available(pin))
continue;
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
&dpll_nl_family, NLM_F_MULTI,
@ -1441,7 +1465,8 @@ int dpll_pin_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
}
info->user_ptr[0] = xa_load(&dpll_pin_xa,
nla_get_u32(info->attrs[DPLL_A_PIN_ID]));
if (!info->user_ptr[0]) {
if (!info->user_ptr[0] ||
!dpll_pin_available(info->user_ptr[0])) {
NL_SET_ERR_MSG(info->extack, "pin not found");
ret = -ENODEV;
goto unlock_dev;

View File

@ -100,4 +100,5 @@ static void __exit ns8390_module_exit(void)
module_init(ns8390_module_init);
module_exit(ns8390_module_exit);
#endif /* MODULE */
MODULE_DESCRIPTION("National Semiconductor 8390 core driver");
MODULE_LICENSE("GPL");

View File

@ -102,4 +102,5 @@ static void __exit NS8390p_cleanup_module(void)
module_init(NS8390p_init_module);
module_exit(NS8390p_cleanup_module);
MODULE_DESCRIPTION("National Semiconductor 8390 core for ISA driver");
MODULE_LICENSE("GPL");

View File

@ -610,4 +610,5 @@ static int init_pcmcia(void)
return 1;
}
MODULE_DESCRIPTION("National Semiconductor 8390 Amiga PCMCIA ethernet driver");
MODULE_LICENSE("GPL");

View File

@ -270,4 +270,5 @@ static void __exit hydra_cleanup_module(void)
module_init(hydra_init_module);
module_exit(hydra_cleanup_module);
MODULE_DESCRIPTION("Zorro-II Hydra 8390 ethernet driver");
MODULE_LICENSE("GPL");

View File

@ -296,4 +296,5 @@ static void __exit stnic_cleanup(void)
module_init(stnic_probe);
module_exit(stnic_cleanup);
MODULE_DESCRIPTION("National Semiconductor DP83902AV ethernet driver");
MODULE_LICENSE("GPL");

View File

@ -443,4 +443,5 @@ static void __exit zorro8390_cleanup_module(void)
module_init(zorro8390_init_module);
module_exit(zorro8390_cleanup_module);
MODULE_DESCRIPTION("Zorro NS8390-based ethernet driver");
MODULE_LICENSE("GPL");

View File

@ -793,5 +793,6 @@ static struct platform_driver bcm4908_enet_driver = {
};
module_platform_driver(bcm4908_enet_driver);
MODULE_DESCRIPTION("Broadcom BCM4908 Gigabit Ethernet driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, bcm4908_enet_of_match);

View File

@ -260,4 +260,5 @@ void bcma_mdio_mii_unregister(struct mii_bus *mii_bus)
EXPORT_SYMBOL_GPL(bcma_mdio_mii_unregister);
MODULE_AUTHOR("Rafał Miłecki");
MODULE_DESCRIPTION("Broadcom iProc GBit BCMA MDIO helpers");
MODULE_LICENSE("GPL");

View File

@ -362,4 +362,5 @@ module_init(bgmac_init)
module_exit(bgmac_exit)
MODULE_AUTHOR("Rafał Miłecki");
MODULE_DESCRIPTION("Broadcom iProc GBit BCMA interface driver");
MODULE_LICENSE("GPL");

View File

@ -298,4 +298,5 @@ static struct platform_driver bgmac_enet_driver = {
};
module_platform_driver(bgmac_enet_driver);
MODULE_DESCRIPTION("Broadcom iProc GBit platform interface driver");
MODULE_LICENSE("GPL");

View File

@ -1626,4 +1626,5 @@ int bgmac_enet_resume(struct bgmac *bgmac)
EXPORT_SYMBOL_GPL(bgmac_enet_resume);
MODULE_AUTHOR("Rafał Miłecki");
MODULE_DESCRIPTION("Broadcom iProc GBit driver");
MODULE_LICENSE("GPL");

View File

@ -3817,7 +3817,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
{
bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
int i, j, rc, ulp_base_vec, ulp_msix;
int tcs = netdev_get_num_tc(bp->dev);
int tcs = bp->num_tc;
if (!tcs)
tcs = 1;
@ -5935,8 +5935,12 @@ static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
{
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
if (!rx_rings)
return 0;
return bnxt_calc_nr_ring_pages(rx_rings - 1,
BNXT_RSS_TABLE_ENTRIES_P5);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
return 2;
return 1;
@ -6926,7 +6930,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
if (cp < (rx + tx)) {
rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
if (rc)
return rc;
goto get_rings_exit;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
hw_resc->resv_rx_rings = rx;
@ -6938,8 +6942,9 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
hw_resc->resv_cp_rings = cp;
hw_resc->resv_stat_ctxs = stats;
}
get_rings_exit:
hwrm_req_drop(bp, req);
return 0;
return rc;
}
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
@ -7000,10 +7005,11 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
req->num_rx_rings = cpu_to_le16(rx_rings);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
req->num_msix = cpu_to_le16(cp_rings);
req->num_rsscos_ctxs =
cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
} else {
req->num_cmpl_rings = cpu_to_le16(cp_rings);
req->num_hw_ring_grps = cpu_to_le16(ring_grps);
@ -7050,8 +7056,10 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
req->num_tx_rings = cpu_to_le16(tx_rings);
req->num_rx_rings = cpu_to_le16(rx_rings);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
} else {
req->num_cmpl_rings = cpu_to_le16(cp_rings);
req->num_hw_ring_grps = cpu_to_le16(ring_grps);
@ -9938,7 +9946,7 @@ static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
{
int tcs = netdev_get_num_tc(bp->dev);
int tcs = bp->num_tc;
if (!tcs)
tcs = 1;
@ -9947,7 +9955,7 @@ int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
{
int tcs = netdev_get_num_tc(bp->dev);
int tcs = bp->num_tc;
return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
bp->tx_nr_rings_xdp;
@ -9977,7 +9985,7 @@ static void bnxt_setup_msix(struct bnxt *bp)
struct net_device *dev = bp->dev;
int tcs, i;
tcs = netdev_get_num_tc(dev);
tcs = bp->num_tc;
if (tcs) {
int i, off, count;
@ -10009,8 +10017,10 @@ static void bnxt_setup_inta(struct bnxt *bp)
{
const int len = sizeof(bp->irq_tbl[0].name);
if (netdev_get_num_tc(bp->dev))
if (bp->num_tc) {
netdev_reset_tc(bp->dev);
bp->num_tc = 0;
}
snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
0);
@ -10236,8 +10246,8 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
{
int tcs = netdev_get_num_tc(bp->dev);
bool irq_cleared = false;
int tcs = bp->num_tc;
int rc;
if (!bnxt_need_reserve_rings(bp))
@ -10263,6 +10273,7 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
netdev_err(bp->dev, "tx ring reservation failure\n");
netdev_reset_tc(bp->dev);
bp->num_tc = 0;
if (bp->tx_nr_rings_xdp)
bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
else
@ -11564,10 +11575,12 @@ int bnxt_half_open_nic(struct bnxt *bp)
netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
goto half_open_err;
}
bnxt_init_napi(bp);
set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
rc = bnxt_init_nic(bp, true);
if (rc) {
clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
bnxt_del_napi(bp);
netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
goto half_open_err;
}
@ -11586,6 +11599,7 @@ half_open_err:
void bnxt_half_close_nic(struct bnxt *bp)
{
bnxt_hwrm_resource_free(bp, false, true);
bnxt_del_napi(bp);
bnxt_free_skbs(bp);
bnxt_free_mem(bp, true);
clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
@ -13232,6 +13246,11 @@ static int bnxt_fw_init_one_p1(struct bnxt *bp)
bp->fw_cap = 0;
rc = bnxt_hwrm_ver_get(bp);
/* FW may be unresponsive after FLR. FLR must complete within 100 msec
* so wait before continuing with recovery.
*/
if (rc)
msleep(100);
bnxt_try_map_fw_health_reg(bp);
if (rc) {
rc = bnxt_try_recover_fw(bp);
@ -13784,7 +13803,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
return -EINVAL;
}
if (netdev_get_num_tc(dev) == tc)
if (bp->num_tc == tc)
return 0;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
@ -13802,9 +13821,11 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
if (tc) {
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
netdev_set_num_tc(dev, tc);
bp->num_tc = tc;
} else {
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
netdev_reset_tc(dev);
bp->num_tc = 0;
}
bp->tx_nr_rings += bp->tx_nr_rings_xdp;
tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);

View File

@ -2225,6 +2225,7 @@ struct bnxt {
u8 tc_to_qidx[BNXT_MAX_QUEUE];
u8 q_ids[BNXT_MAX_QUEUE];
u8 max_q;
u8 num_tc;
unsigned int current_interval;
#define BNXT_TIMER_INTERVAL HZ

View File

@ -228,7 +228,7 @@ static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask)
}
}
if (bp->ieee_ets) {
int tc = netdev_get_num_tc(bp->dev);
int tc = bp->num_tc;
if (!tc)
tc = 1;

View File

@ -884,7 +884,7 @@ static void bnxt_get_channels(struct net_device *dev,
if (max_tx_sch_inputs)
max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
tcs = netdev_get_num_tc(dev);
tcs = bp->num_tc;
tx_grps = max(tcs, 1);
if (bp->tx_nr_rings_xdp)
tx_grps++;
@ -944,7 +944,7 @@ static int bnxt_set_channels(struct net_device *dev,
if (channel->combined_count)
sh = true;
tcs = netdev_get_num_tc(dev);
tcs = bp->num_tc;
req_tx_rings = sh ? channel->combined_count : channel->tx_count;
req_rx_rings = sh ? channel->combined_count : channel->rx_count;
@ -1574,7 +1574,8 @@ u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5);
return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
BNXT_RSS_TABLE_ENTRIES_P5;
return HW_HASH_INDEX_SIZE;
}

View File

@ -407,7 +407,7 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
if (prog)
tx_xdp = bp->rx_nr_rings;
tc = netdev_get_num_tc(dev);
tc = bp->num_tc;
if (!tc)
tc = 1;
rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,

View File

@ -27,6 +27,7 @@
#include "octeon_network.h"
MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Core");
MODULE_LICENSE("GPL");
/* OOM task polling interval */

View File

@ -868,5 +868,6 @@ static struct platform_driver ep93xx_eth_driver = {
module_platform_driver(ep93xx_eth_driver);
MODULE_DESCRIPTION("Cirrus EP93xx Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ep93xx-eth");

View File

@ -1485,7 +1485,7 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
xdp_prepare_buff(&xdp, page_address(entry->page),
XDP_PACKET_HEADROOM + TSNEP_RX_INLINE_METADATA_SIZE,
length, false);
length - ETH_FCS_LEN, false);
consume = tsnep_xdp_run_prog(rx, prog, &xdp,
&xdp_status, tx_nq, tx);
@ -1568,7 +1568,7 @@ static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
prefetch(entry->xdp->data);
length = __le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_LENGTH_MASK;
xsk_buff_set_size(entry->xdp, length);
xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN);
xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool);
/* RX metadata with timestamps is in front of actual data,
@ -1762,6 +1762,19 @@ static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx)
allocated--;
}
}
/* set need wakeup flag immediately if ring is not filled completely,
* first polling would be too late as need wakeup signalisation would
* be delayed for an indefinite time
*/
if (xsk_uses_need_wakeup(rx->xsk_pool)) {
int desc_available = tsnep_rx_desc_available(rx);
if (desc_available)
xsk_set_rx_need_wakeup(rx->xsk_pool);
else
xsk_clear_rx_need_wakeup(rx->xsk_pool);
}
}
static bool tsnep_pending(struct tsnep_queue *queue)

View File

@ -661,4 +661,5 @@ static struct platform_driver nps_enet_driver = {
module_platform_driver(nps_enet_driver);
MODULE_AUTHOR("EZchip Semiconductor");
MODULE_DESCRIPTION("EZchip NPS Ethernet driver");
MODULE_LICENSE("GPL v2");

View File

@ -3216,4 +3216,5 @@ void enetc_pci_remove(struct pci_dev *pdev)
}
EXPORT_SYMBOL_GPL(enetc_pci_remove);
MODULE_DESCRIPTION("NXP ENETC Ethernet driver");
MODULE_LICENSE("Dual BSD/GPL");

View File

@ -2036,6 +2036,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
/* if any of the above changed restart the FEC */
if (status_change) {
netif_stop_queue(ndev);
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_restart(ndev);
@ -2045,6 +2046,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
}
} else {
if (fep->link) {
netif_stop_queue(ndev);
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_stop(ndev);
@ -4769,4 +4771,5 @@ static struct platform_driver fec_driver = {
module_platform_driver(fec_driver);
MODULE_DESCRIPTION("NXP Fast Ethernet Controller (FEC) driver");
MODULE_LICENSE("GPL");

View File

@ -531,4 +531,5 @@ static struct platform_driver fsl_pq_mdio_driver = {
module_platform_driver(fsl_pq_mdio_driver);
MODULE_DESCRIPTION("Freescale PQ MDIO helpers");
MODULE_LICENSE("GPL");

View File

@ -3588,40 +3588,55 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
struct i40e_hmc_obj_rxq rx_ctx;
int err = 0;
bool ok;
int ret;
bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
/* clear the context structure first */
memset(&rx_ctx, 0, sizeof(rx_ctx));
if (ring->vsi->type == I40E_VSI_MAIN)
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring->rx_buf_len = vsi->rx_buf_len;
/* XDP RX-queue info only needed for RX rings exposed to XDP */
if (ring->vsi->type != I40E_VSI_MAIN)
goto skip;
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->queue_index,
ring->q_vector->napi.napi_id,
ring->rx_buf_len);
if (err)
return err;
}
ring->xsk_pool = i40e_xsk_pool(ring);
if (ring->xsk_pool) {
ring->rx_buf_len =
xsk_pool_get_rx_frame_size(ring->xsk_pool);
ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
xdp_rxq_info_unreg(&ring->xdp_rxq);
ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->queue_index,
ring->q_vector->napi.napi_id,
ring->rx_buf_len);
if (err)
return err;
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
NULL);
if (ret)
return ret;
if (err)
return err;
dev_info(&vsi->back->pdev->dev,
"Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->queue_index);
} else {
ring->rx_buf_len = vsi->rx_buf_len;
if (ring->vsi->type == I40E_VSI_MAIN) {
ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED,
NULL);
if (ret)
return ret;
}
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED,
NULL);
if (err)
return err;
}
skip:
xdp_init_buff(&ring->xdp, i40e_rx_pg_size(ring) / 2, &ring->xdp_rxq);
rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,

View File

@ -1548,7 +1548,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
int err;
u64_stats_init(&rx_ring->syncp);
@ -1569,14 +1568,6 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
rx_ring->next_to_process = 0;
rx_ring->next_to_use = 0;
/* XDP RX-queue info only needed for RX rings exposed to XDP */
if (rx_ring->vsi->type == I40E_VSI_MAIN) {
err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
rx_ring->queue_index, rx_ring->q_vector->napi.napi_id);
if (err < 0)
return err;
}
rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
rx_ring->rx_bi =
@ -2087,7 +2078,8 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
struct xdp_buff *xdp)
{
u32 next = rx_ring->next_to_clean;
u32 nr_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
u32 next = rx_ring->next_to_clean, i = 0;
struct i40e_rx_buffer *rx_buffer;
xdp->flags = 0;
@ -2100,10 +2092,10 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
if (!rx_buffer->page)
continue;
if (xdp_res == I40E_XDP_CONSUMED)
rx_buffer->pagecnt_bias++;
else
if (xdp_res != I40E_XDP_CONSUMED)
i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
else if (i++ <= nr_frags)
rx_buffer->pagecnt_bias++;
/* EOP buffer will be put in i40e_clean_rx_irq() */
if (next == rx_ring->next_to_process)
@ -2117,20 +2109,20 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
* i40e_construct_skb - Allocate skb and populate it
* @rx_ring: rx descriptor ring to transact packets on
* @xdp: xdp_buff pointing to the data
* @nr_frags: number of buffers for the packet
*
* This function allocates an skb. It then populates it with the page
* data from the current receive descriptor, taking care to set up the
* skb correctly.
*/
static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
struct xdp_buff *xdp,
u32 nr_frags)
struct xdp_buff *xdp)
{
unsigned int size = xdp->data_end - xdp->data;
struct i40e_rx_buffer *rx_buffer;
struct skb_shared_info *sinfo;
unsigned int headlen;
struct sk_buff *skb;
u32 nr_frags = 0;
/* prefetch first cache line of first page */
net_prefetch(xdp->data);
@ -2168,6 +2160,10 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
memcpy(__skb_put(skb, headlen), xdp->data,
ALIGN(headlen, sizeof(long)));
if (unlikely(xdp_buff_has_frags(xdp))) {
sinfo = xdp_get_shared_info_from_buff(xdp);
nr_frags = sinfo->nr_frags;
}
rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
/* update all of the pointers */
size -= headlen;
@ -2187,9 +2183,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
}
if (unlikely(xdp_buff_has_frags(xdp))) {
struct skb_shared_info *sinfo, *skinfo = skb_shinfo(skb);
struct skb_shared_info *skinfo = skb_shinfo(skb);
sinfo = xdp_get_shared_info_from_buff(xdp);
memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
sizeof(skb_frag_t) * nr_frags);
@ -2212,17 +2207,17 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
* i40e_build_skb - Build skb around an existing buffer
* @rx_ring: Rx descriptor ring to transact packets on
* @xdp: xdp_buff pointing to the data
* @nr_frags: number of buffers for the packet
*
* This function builds an skb around an existing Rx buffer, taking care
* to set up the skb correctly and avoid any memcpy overhead.
*/
static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
struct xdp_buff *xdp,
u32 nr_frags)
struct xdp_buff *xdp)
{
unsigned int metasize = xdp->data - xdp->data_meta;
struct skb_shared_info *sinfo;
struct sk_buff *skb;
u32 nr_frags;
/* Prefetch first cache line of first page. If xdp->data_meta
* is unused, this points exactly as xdp->data, otherwise we
@ -2231,6 +2226,11 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
*/
net_prefetch(xdp->data_meta);
if (unlikely(xdp_buff_has_frags(xdp))) {
sinfo = xdp_get_shared_info_from_buff(xdp);
nr_frags = sinfo->nr_frags;
}
/* build an skb around the page buffer */
skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
if (unlikely(!skb))
@ -2243,9 +2243,6 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
skb_metadata_set(skb, metasize);
if (unlikely(xdp_buff_has_frags(xdp))) {
struct skb_shared_info *sinfo;
sinfo = xdp_get_shared_info_from_buff(xdp);
xdp_update_skb_shared_info(skb, nr_frags,
sinfo->xdp_frags_size,
nr_frags * xdp->frame_sz,
@ -2589,9 +2586,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
total_rx_bytes += size;
} else {
if (ring_uses_build_skb(rx_ring))
skb = i40e_build_skb(rx_ring, xdp, nfrags);
skb = i40e_build_skb(rx_ring, xdp);
else
skb = i40e_construct_skb(rx_ring, xdp, nfrags);
skb = i40e_construct_skb(rx_ring, xdp);
/* drop if we failed to retrieve a buffer */
if (!skb) {

View File

@ -414,7 +414,8 @@ i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first,
}
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
virt_to_page(xdp->data_hard_start), 0, size);
virt_to_page(xdp->data_hard_start),
XDP_PACKET_HEADROOM, size);
sinfo->xdp_frags_size += size;
xsk_buff_add_frag(xdp);
@ -498,7 +499,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
xdp_res = i40e_run_xdp_zc(rx_ring, first, xdp_prog);
i40e_handle_xdp_result_zc(rx_ring, first, rx_desc, &rx_packets,
&rx_bytes, xdp_res, &failure);
first->flags = 0;
next_to_clean = next_to_process;
if (failure)
break;

View File

@ -547,19 +547,27 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
ring->rx_buf_len = ring->vsi->rx_buf_len;
if (ring->vsi->type == ICE_VSI_PF) {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
__xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index,
ring->q_vector->napi.napi_id,
ring->vsi->rx_buf_len);
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index,
ring->q_vector->napi.napi_id,
ring->rx_buf_len);
if (err)
return err;
}
ring->xsk_pool = ice_xsk_pool(ring);
if (ring->xsk_pool) {
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
xdp_rxq_info_unreg(&ring->xdp_rxq);
ring->rx_buf_len =
xsk_pool_get_rx_frame_size(ring->xsk_pool);
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index,
ring->q_vector->napi.napi_id,
ring->rx_buf_len);
if (err)
return err;
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
NULL);
@ -571,13 +579,14 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
} else {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
__xdp_rxq_info_reg(&ring->xdp_rxq,
ring->netdev,
ring->q_index,
ring->q_vector->napi.napi_id,
ring->vsi->rx_buf_len);
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index,
ring->q_vector->napi.napi_id,
ring->rx_buf_len);
if (err)
return err;
}
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED,

View File

@ -513,11 +513,6 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
if (ice_is_xdp_ena_vsi(rx_ring->vsi))
WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
if (rx_ring->vsi->type == ICE_VSI_PF &&
!xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
goto err;
return 0;
err:
@ -603,9 +598,7 @@ out_failure:
ret = ICE_XDP_CONSUMED;
}
exit:
rx_buf->act = ret;
if (unlikely(xdp_buff_has_frags(xdp)))
ice_set_rx_bufs_act(xdp, rx_ring, ret);
ice_set_rx_bufs_act(xdp, rx_ring, ret);
}
/**
@ -893,14 +886,17 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
}
if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
if (unlikely(xdp_buff_has_frags(xdp)))
ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
return -ENOMEM;
}
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
rx_buf->page_offset, size);
sinfo->xdp_frags_size += size;
/* remember frag count before XDP prog execution; bpf_xdp_adjust_tail()
* can pop off frags but driver has to handle it on its own
*/
rx_ring->nr_frags = sinfo->nr_frags;
if (page_is_pfmemalloc(rx_buf->page))
xdp_buff_set_frag_pfmemalloc(xdp);
@ -1251,6 +1247,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
xdp->data = NULL;
rx_ring->first_desc = ntc;
rx_ring->nr_frags = 0;
continue;
construct_skb:
if (likely(ice_ring_uses_build_skb(rx_ring)))
@ -1266,10 +1263,12 @@ construct_skb:
ICE_XDP_CONSUMED);
xdp->data = NULL;
rx_ring->first_desc = ntc;
rx_ring->nr_frags = 0;
break;
}
xdp->data = NULL;
rx_ring->first_desc = ntc;
rx_ring->nr_frags = 0;
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,

View File

@ -358,6 +358,7 @@ struct ice_rx_ring {
struct ice_tx_ring *xdp_ring;
struct ice_rx_ring *next; /* pointer to next ring in q_vector */
struct xsk_buff_pool *xsk_pool;
u32 nr_frags;
dma_addr_t dma; /* physical address of ring */
u16 rx_buf_len;
u8 dcb_tc; /* Traffic class of ring */

View File

@ -12,26 +12,39 @@
* act: action to store onto Rx buffers related to XDP buffer parts
*
* Set action that should be taken before putting Rx buffer from first frag
* to one before last. Last one is handled by caller of this function as it
* is the EOP frag that is currently being processed. This function is
* supposed to be called only when XDP buffer contains frags.
* to the last.
*/
static inline void
ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
const unsigned int act)
{
const struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
u32 first = rx_ring->first_desc;
u32 nr_frags = sinfo->nr_frags;
u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
u32 nr_frags = rx_ring->nr_frags + 1;
u32 idx = rx_ring->first_desc;
u32 cnt = rx_ring->count;
struct ice_rx_buf *buf;
for (int i = 0; i < nr_frags; i++) {
buf = &rx_ring->rx_buf[first];
buf = &rx_ring->rx_buf[idx];
buf->act = act;
if (++first == cnt)
first = 0;
if (++idx == cnt)
idx = 0;
}
/* adjust pagecnt_bias on frags freed by XDP prog */
if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) {
u32 delta = rx_ring->nr_frags - sinfo_frags;
while (delta) {
if (idx == 0)
idx = cnt - 1;
else
idx--;
buf = &rx_ring->rx_buf[idx];
buf->pagecnt_bias--;
delta--;
}
}
}

View File

@ -825,7 +825,8 @@ ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
}
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
virt_to_page(xdp->data_hard_start), 0, size);
virt_to_page(xdp->data_hard_start),
XDP_PACKET_HEADROOM, size);
sinfo->xdp_frags_size += size;
xsk_buff_add_frag(xdp);
@ -895,7 +896,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
if (!first) {
first = xdp;
xdp_buff_clear_frags_flag(first);
} else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
break;
}

View File

@ -783,6 +783,8 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
/* setup watchdog timeout value to be 5 second */
netdev->watchdog_timeo = 5 * HZ;
netdev->dev_port = idx;
/* configure default MTU size */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = vport->max_mtu;

View File

@ -318,4 +318,5 @@ static struct platform_driver liteeth_driver = {
module_platform_driver(liteeth_driver);
MODULE_AUTHOR("Joel Stanley <joel@jms.id.au>");
MODULE_DESCRIPTION("LiteX Liteeth Ethernet driver");
MODULE_LICENSE("GPL");

View File

@ -614,12 +614,38 @@ static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv)
mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val);
}
/* Cleanup pool before actual initialization in the OS */
static void mvpp2_bm_pool_cleanup(struct mvpp2 *priv, int pool_id)
{
unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
u32 val;
int i;
/* Drain the BM from all possible residues left by firmware */
for (i = 0; i < MVPP2_BM_POOL_SIZE_MAX; i++)
mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(pool_id));
put_cpu();
/* Stop the BM pool */
val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(pool_id));
val |= MVPP2_BM_STOP_MASK;
mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(pool_id), val);
}
static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
{
enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
int i, err, poolnum = MVPP2_BM_POOLS_NUM;
struct mvpp2_port *port;
if (priv->percpu_pools)
poolnum = mvpp2_get_nrxqs(priv) * 2;
/* Clean up the pool state in case it contains stale state */
for (i = 0; i < poolnum; i++)
mvpp2_bm_pool_cleanup(priv, i);
if (priv->percpu_pools) {
for (i = 0; i < priv->port_count; i++) {
port = priv->port_list[i];
@ -629,7 +655,6 @@ static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
}
}
poolnum = mvpp2_get_nrxqs(priv) * 2;
for (i = 0; i < poolnum; i++) {
/* the pool in use */
int pn = i / (poolnum / 2);

View File

@ -413,4 +413,5 @@ const char *otx2_mbox_id2name(u16 id)
EXPORT_SYMBOL(otx2_mbox_id2name);
MODULE_AUTHOR("Marvell.");
MODULE_DESCRIPTION("Marvell RVU NIC Mbox helpers");
MODULE_LICENSE("GPL v2");

View File

@ -1923,6 +1923,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
{
const char *namep = mlx5_command_str(opcode);
struct mlx5_cmd_stats *stats;
unsigned long flags;
if (!err || !(strcmp(namep, "unknown command opcode")))
return;
@ -1930,7 +1931,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
stats = xa_load(&dev->cmd.stats, opcode);
if (!stats)
return;
spin_lock_irq(&stats->lock);
spin_lock_irqsave(&stats->lock, flags);
stats->failed++;
if (err < 0)
stats->last_failed_errno = -err;
@ -1939,7 +1940,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
stats->last_failed_mbox_status = status;
stats->last_failed_syndrome = syndrome;
}
spin_unlock_irq(&stats->lock);
spin_unlock_irqrestore(&stats->lock, flags);
}
/* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */

View File

@ -1124,7 +1124,7 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
extern const struct ethtool_ops mlx5e_ethtool_ops;
int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey);
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
bool enable_mc_lb);

View File

@ -436,6 +436,7 @@ static int fs_any_create_groups(struct mlx5e_flow_table *ft)
in = kvzalloc(inlen, GFP_KERNEL);
if (!in || !ft->g) {
kfree(ft->g);
ft->g = NULL;
kvfree(in);
return -ENOMEM;
}

View File

@ -1064,8 +1064,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
bool allow_swp;
allow_swp =
mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev);
allow_swp = mlx5_geneve_tx_allowed(mdev) ||
(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO);
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
MLX5_SET(sqc, sqc, allow_swp, allow_swp);

View File

@ -213,7 +213,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
out:
napi_consume_skb(skb, budget);
md_buff[*md_buff_sz++] = metadata_id;
md_buff[(*md_buff_sz)++] = metadata_id;
if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) &&
!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);

View File

@ -336,12 +336,17 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
/* iv len */
aes_gcm->icv_len = x->aead->alg_icv_len;
attrs->dir = x->xso.dir;
/* esn */
if (x->props.flags & XFRM_STATE_ESN) {
attrs->replay_esn.trigger = true;
attrs->replay_esn.esn = sa_entry->esn_state.esn;
attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
goto skip_replay_window;
switch (x->replay_esn->replay_window) {
case 32:
attrs->replay_esn.replay_window =
@ -365,7 +370,7 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
}
}
attrs->dir = x->xso.dir;
skip_replay_window:
/* spi */
attrs->spi = be32_to_cpu(x->id.spi);
@ -501,7 +506,8 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
return -EINVAL;
}
if (x->replay_esn && x->replay_esn->replay_window != 32 &&
if (x->replay_esn && x->xso.dir == XFRM_DEV_OFFLOAD_IN &&
x->replay_esn->replay_window != 32 &&
x->replay_esn->replay_window != 64 &&
x->replay_esn->replay_window != 128 &&
x->replay_esn->replay_window != 256) {

View File

@ -254,11 +254,13 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
sizeof(*ft->g), GFP_KERNEL);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in || !ft->g) {
kfree(ft->g);
kvfree(in);
if (!ft->g)
return -ENOMEM;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_free_g;
}
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
@ -278,7 +280,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
break;
default:
err = -EINVAL;
goto out;
goto err_free_in;
}
switch (type) {
@ -300,7 +302,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
break;
default:
err = -EINVAL;
goto out;
goto err_free_in;
}
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
@ -309,7 +311,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
goto err;
goto err_clean_group;
ft->num_groups++;
memset(in, 0, inlen);
@ -318,18 +320,20 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
goto err;
goto err_clean_group;
ft->num_groups++;
kvfree(in);
return 0;
err:
err_clean_group:
err = PTR_ERR(ft->g[ft->num_groups]);
ft->g[ft->num_groups] = NULL;
out:
err_free_in:
kvfree(in);
err_free_g:
kfree(ft->g);
ft->g = NULL;
return err;
}

View File

@ -95,7 +95,7 @@ static void mlx5e_destroy_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PO
{
int tc, i;
for (i = 0; i < MLX5_MAX_PORTS; i++)
for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++)
for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++)
mlx5e_destroy_tis(mdev, tisn[i][tc]);
}
@ -110,7 +110,7 @@ static int mlx5e_create_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PORT
int tc, i;
int err;
for (i = 0; i < MLX5_MAX_PORTS; i++) {
for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++) {
for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++) {
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
void *tisc;
@ -140,7 +140,7 @@ err_close_tises:
return err;
}
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises)
{
struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
int err;
@ -169,11 +169,15 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
goto err_destroy_mkey;
}
err = mlx5e_create_tises(mdev, res->tisn);
if (err) {
mlx5_core_err(mdev, "alloc tises failed, %d\n", err);
goto err_destroy_bfreg;
if (create_tises) {
err = mlx5e_create_tises(mdev, res->tisn);
if (err) {
mlx5_core_err(mdev, "alloc tises failed, %d\n", err);
goto err_destroy_bfreg;
}
res->tisn_valid = true;
}
INIT_LIST_HEAD(&res->td.tirs_list);
mutex_init(&res->td.list_lock);
@ -203,7 +207,8 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
mlx5_crypto_dek_cleanup(mdev->mlx5e_res.dek_priv);
mdev->mlx5e_res.dek_priv = NULL;
mlx5e_destroy_tises(mdev, res->tisn);
if (res->tisn_valid)
mlx5e_destroy_tises(mdev, res->tisn);
mlx5_free_bfreg(mdev, &res->bfreg);
mlx5_core_destroy_mkey(mdev, res->mkey);
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);

View File

@ -5992,7 +5992,7 @@ static int mlx5e_resume(struct auxiliary_device *adev)
if (netif_device_present(netdev))
return 0;
err = mlx5e_create_mdev_resources(mdev);
err = mlx5e_create_mdev_resources(mdev, true);
if (err)
return err;

View File

@ -761,7 +761,7 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
err = mlx5e_rss_params_indir_init(&indir, mdev,
mlx5e_rqt_size(mdev, hp->num_channels),
mlx5e_rqt_size(mdev, priv->max_nch));
mlx5e_rqt_size(mdev, hp->num_channels));
if (err)
return err;
@ -2014,9 +2014,10 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow,
list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) {
if (peer_index != mlx5_get_dev_index(peer_flow->priv->mdev))
continue;
list_del(&peer_flow->peer_flows);
if (refcount_dec_and_test(&peer_flow->refcnt)) {
mlx5e_tc_del_fdb_flow(peer_flow->priv, peer_flow);
list_del(&peer_flow->peer_flows);
kfree(peer_flow);
}
}

View File

@ -83,6 +83,7 @@ mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_md
i++;
}
rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, outer_headers.dmac_47_16);
ether_addr_copy(dmac_v, entry->key.addr);
@ -587,6 +588,7 @@ mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_po
if (!rule_spec)
return ERR_PTR(-ENOMEM);
rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
@ -662,6 +664,7 @@ mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port)
dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
dest.vport.vhca_id = port->esw_owner_vhca_id;
}
rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
kvfree(rule_spec);

View File

@ -566,6 +566,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
fte->flow_context.flow_tag);
MLX5_SET(flow_context, in_flow_context, flow_source,
fte->flow_context.flow_source);
MLX5_SET(flow_context, in_flow_context, uplink_hairpin_en,
!!(fte->flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN));
MLX5_SET(flow_context, in_flow_context, extended_destination,
extended_dest);

View File

@ -783,7 +783,7 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num,
}
/* This should only be called once per mdev */
err = mlx5e_create_mdev_resources(mdev);
err = mlx5e_create_mdev_resources(mdev, false);
if (err)
goto destroy_ht;
}

View File

@ -98,7 +98,7 @@ static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
MLX5_SET(cqc, cqc, cq_period_mode, DIM_CQ_PERIOD_MODE_START_FROM_EQE);
MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -

View File

@ -788,6 +788,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
switch (action_type) {
case DR_ACTION_TYP_DROP:
attr.final_icm_addr = nic_dmn->drop_icm_addr;
attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
break;
case DR_ACTION_TYP_FT:
dest_action = action;
@ -873,11 +874,17 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
action->sampler->tx_icm_addr;
break;
case DR_ACTION_TYP_VPORT:
attr.hit_gvmi = action->vport->caps->vhca_gvmi;
dest_action = action;
attr.final_icm_addr = rx_rule ?
action->vport->caps->icm_address_rx :
action->vport->caps->icm_address_tx;
if (unlikely(rx_rule && action->vport->caps->num == MLX5_VPORT_UPLINK)) {
/* can't go to uplink on RX rule - dropping instead */
attr.final_icm_addr = nic_dmn->drop_icm_addr;
attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
} else {
attr.hit_gvmi = action->vport->caps->vhca_gvmi;
dest_action = action;
attr.final_icm_addr = rx_rule ?
action->vport->caps->icm_address_rx :
action->vport->caps->icm_address_tx;
}
break;
case DR_ACTION_TYP_POP_VLAN:
if (!rx_rule && !(dmn->ste_ctx->actions_caps &

View File

@ -440,6 +440,27 @@ out:
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group)
{
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
u32 *out;
int err;
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
err = mlx5_query_nic_vport_context(mdev, 0, out);
if (err)
goto out;
*sd_group = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.sd_group);
out:
kvfree(out);
return err;
}
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
{
u32 *out;

View File

@ -7542,6 +7542,9 @@ int stmmac_dvr_probe(struct device *device,
dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
ERR_PTR(ret));
/* Wait a bit for the reset to take effect */
udelay(10);
/* Init MAC and get the capabilities */
ret = stmmac_hw_init(priv);
if (ret)

View File

@ -221,21 +221,25 @@ static int fjes_hw_setup(struct fjes_hw *hw)
mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid);
hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL);
if (!(hw->hw_info.req_buf))
return -ENOMEM;
if (!(hw->hw_info.req_buf)) {
result = -ENOMEM;
goto free_ep_info;
}
hw->hw_info.req_buf_size = mem_size;
mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid);
hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL);
if (!(hw->hw_info.res_buf))
return -ENOMEM;
if (!(hw->hw_info.res_buf)) {
result = -ENOMEM;
goto free_req_buf;
}
hw->hw_info.res_buf_size = mem_size;
result = fjes_hw_alloc_shared_status_region(hw);
if (result)
return result;
goto free_res_buf;
hw->hw_info.buffer_share_bit = 0;
hw->hw_info.buffer_unshare_reserve_bit = 0;
@ -246,11 +250,11 @@ static int fjes_hw_setup(struct fjes_hw *hw)
result = fjes_hw_alloc_epbuf(&buf_pair->tx);
if (result)
return result;
goto free_epbuf;
result = fjes_hw_alloc_epbuf(&buf_pair->rx);
if (result)
return result;
goto free_epbuf;
spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&buf_pair->tx, mac,
@ -273,6 +277,25 @@ static int fjes_hw_setup(struct fjes_hw *hw)
fjes_hw_init_command_registers(hw, &param);
return 0;
free_epbuf:
for (epidx = 0; epidx < hw->max_epid ; epidx++) {
if (epidx == hw->my_epid)
continue;
fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx);
fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx);
}
fjes_hw_free_shared_status_region(hw);
free_res_buf:
kfree(hw->hw_info.res_buf);
hw->hw_info.res_buf = NULL;
free_req_buf:
kfree(hw->hw_info.req_buf);
hw->hw_info.req_buf = NULL;
free_ep_info:
kfree(hw->ep_shm_info);
hw->ep_shm_info = NULL;
return result;
}
static void fjes_hw_cleanup(struct fjes_hw *hw)

View File

@ -44,7 +44,7 @@
static unsigned int ring_size __ro_after_init = 128;
module_param(ring_size, uint, 0444);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of 4K pages)");
unsigned int netvsc_ring_bytes __ro_after_init;
static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
@ -2807,7 +2807,7 @@ static int __init netvsc_drv_init(void)
pr_info("Increased ring_size to %u (min allowed)\n",
ring_size);
}
netvsc_ring_bytes = ring_size * PAGE_SIZE;
netvsc_ring_bytes = VMBUS_RING_SIZE(ring_size * 4096);
register_netdevice_notifier(&netvsc_netdev_notifier);

View File

@ -607,11 +607,26 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
return ERR_PTR(-EINVAL);
}
ret = skb_ensure_writable_head_tail(skb, dev);
if (unlikely(ret < 0)) {
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(ret);
if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
struct sk_buff *nskb = skb_copy_expand(skb,
MACSEC_NEEDED_HEADROOM,
MACSEC_NEEDED_TAILROOM,
GFP_ATOMIC);
if (likely(nskb)) {
consume_skb(skb);
skb = nskb;
} else {
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(-ENOMEM);
}
} else {
skb = skb_unshare(skb, GFP_ATOMIC);
if (!skb) {
macsec_txsa_put(tx_sa);
return ERR_PTR(-ENOMEM);
}
}
unprotected_len = skb->len;

View File

@ -120,6 +120,11 @@
*/
#define LAN8814_1PPM_FORMAT 17179
#define PTP_RX_VERSION 0x0248
#define PTP_TX_VERSION 0x0288
#define PTP_MAX_VERSION(x) (((x) & GENMASK(7, 0)) << 8)
#define PTP_MIN_VERSION(x) ((x) & GENMASK(7, 0))
#define PTP_RX_MOD 0x024F
#define PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_ BIT(3)
#define PTP_RX_TIMESTAMP_EN 0x024D
@ -3150,6 +3155,12 @@ static void lan8814_ptp_init(struct phy_device *phydev)
lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_IP_ADDR_EN, 0);
lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_IP_ADDR_EN, 0);
/* Disable checking for minorVersionPTP field */
lanphy_write_page_reg(phydev, 5, PTP_RX_VERSION,
PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
lanphy_write_page_reg(phydev, 5, PTP_TX_VERSION,
PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
skb_queue_head_init(&ptp_priv->tx_queue);
skb_queue_head_init(&ptp_priv->rx_queue);
INIT_LIST_HEAD(&ptp_priv->rx_ts_list);

View File

@ -1630,13 +1630,19 @@ static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
switch (act) {
case XDP_REDIRECT:
err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
if (err)
if (err) {
dev_core_stats_rx_dropped_inc(tun->dev);
return err;
}
dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
break;
case XDP_TX:
err = tun_xdp_tx(tun->dev, xdp);
if (err < 0)
if (err < 0) {
dev_core_stats_rx_dropped_inc(tun->dev);
return err;
}
dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
break;
case XDP_PASS:
break;

View File

@ -368,10 +368,6 @@ struct ath11k_vif {
struct ieee80211_chanctx_conf chanctx;
struct ath11k_arp_ns_offload arp_ns_offload;
struct ath11k_rekey_data rekey_data;
#ifdef CONFIG_ATH11K_DEBUGFS
struct dentry *debugfs_twt;
#endif /* CONFIG_ATH11K_DEBUGFS */
};
struct ath11k_vif_iter {

View File

@ -1894,35 +1894,30 @@ static const struct file_operations ath11k_fops_twt_resume_dialog = {
.open = simple_open
};
void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
void ath11k_debugfs_op_vif_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k_base *ab = arvif->ar->ab;
struct dentry *debugfs_twt;
if (arvif->vif->type != NL80211_IFTYPE_AP &&
!(arvif->vif->type == NL80211_IFTYPE_STATION &&
test_bit(WMI_TLV_SERVICE_STA_TWT, ab->wmi_ab.svc_map)))
return;
arvif->debugfs_twt = debugfs_create_dir("twt",
arvif->vif->debugfs_dir);
debugfs_create_file("add_dialog", 0200, arvif->debugfs_twt,
debugfs_twt = debugfs_create_dir("twt",
arvif->vif->debugfs_dir);
debugfs_create_file("add_dialog", 0200, debugfs_twt,
arvif, &ath11k_fops_twt_add_dialog);
debugfs_create_file("del_dialog", 0200, arvif->debugfs_twt,
debugfs_create_file("del_dialog", 0200, debugfs_twt,
arvif, &ath11k_fops_twt_del_dialog);
debugfs_create_file("pause_dialog", 0200, arvif->debugfs_twt,
debugfs_create_file("pause_dialog", 0200, debugfs_twt,
arvif, &ath11k_fops_twt_pause_dialog);
debugfs_create_file("resume_dialog", 0200, arvif->debugfs_twt,
debugfs_create_file("resume_dialog", 0200, debugfs_twt,
arvif, &ath11k_fops_twt_resume_dialog);
}
void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
{
if (!arvif->debugfs_twt)
return;
debugfs_remove_recursive(arvif->debugfs_twt);
arvif->debugfs_twt = NULL;
}

View File

@ -307,8 +307,8 @@ static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
return ar->debug.rx_filter;
}
void ath11k_debugfs_add_interface(struct ath11k_vif *arvif);
void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif);
void ath11k_debugfs_op_vif_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
enum wmi_direct_buffer_module id,
enum ath11k_dbg_dbr_event event,
@ -387,14 +387,6 @@ static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar,
return 0;
}
static inline void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
{
}
static inline void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
{
}
static inline void
ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
enum wmi_direct_buffer_module id,

View File

@ -6756,13 +6756,6 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
goto err;
}
/* In the case of hardware recovery, debugfs files are
* not deleted since ieee80211_ops.remove_interface() is
* not invoked. In such cases, try to delete the files.
* These will be re-created later.
*/
ath11k_debugfs_remove_interface(arvif);
memset(arvif, 0, sizeof(*arvif));
arvif->ar = ar;
@ -6939,8 +6932,6 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
ath11k_dp_vdev_tx_attach(ar, arvif);
ath11k_debugfs_add_interface(arvif);
if (vif->type != NL80211_IFTYPE_MONITOR &&
test_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) {
ret = ath11k_mac_monitor_vdev_create(ar);
@ -7056,8 +7047,6 @@ err_vdev_del:
/* Recalc txpower for remaining vdev */
ath11k_mac_txpower_recalc(ar);
ath11k_debugfs_remove_interface(arvif);
/* TODO: recal traffic pause state based on the available vdevs */
mutex_unlock(&ar->conf_mutex);
@ -9153,6 +9142,7 @@ static const struct ieee80211_ops ath11k_ops = {
#endif
#ifdef CONFIG_ATH11K_DEBUGFS
.vif_add_debugfs = ath11k_debugfs_op_vif_add,
.sta_add_debugfs = ath11k_debugfs_sta_op_add,
#endif

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2018-2023 Intel Corporation
* Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/firmware.h>
#include "iwl-drv.h"
@ -1096,7 +1096,7 @@ static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt,
node_trig = (void *)node_tlv->data;
}
memcpy(node_trig->data + offset, trig->data, trig_data_len);
memcpy((u8 *)node_trig->data + offset, trig->data, trig_data_len);
node_tlv->length = cpu_to_le32(size);
if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) {

View File

@ -125,7 +125,7 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
"FW rev %s - Softmac protocol %x.%x\n",
fw_version, priv->fw_var >> 8, priv->fw_var & 0xff);
snprintf(dev->wiphy->fw_version, sizeof(dev->wiphy->fw_version),
"%s - %x.%x", fw_version,
"%.19s - %x.%x", fw_version,
priv->fw_var >> 8, priv->fw_var & 0xff);
}

View File

@ -681,6 +681,7 @@ struct mlx5e_resources {
struct mlx5_sq_bfreg bfreg;
#define MLX5_MAX_NUM_TC 8
u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC];
bool tisn_valid;
} hw_objs;
struct net_device *uplink_netdev;
struct mutex uplink_netdev_lock;

View File

@ -132,6 +132,7 @@ struct mlx5_flow_handle;
enum {
FLOW_CONTEXT_HAS_TAG = BIT(0),
FLOW_CONTEXT_UPLINK_HAIRPIN_EN = BIT(1),
};
struct mlx5_flow_context {

View File

@ -3576,7 +3576,7 @@ struct mlx5_ifc_flow_context_bits {
u8 action[0x10];
u8 extended_destination[0x1];
u8 reserved_at_81[0x1];
u8 uplink_hairpin_en[0x1];
u8 flow_source[0x2];
u8 encrypt_decrypt_type[0x4];
u8 destination_list_size[0x18];
@ -4036,8 +4036,13 @@ struct mlx5_ifc_nic_vport_context_bits {
u8 affiliation_criteria[0x4];
u8 affiliated_vhca_id[0x10];
u8 reserved_at_60[0xd0];
u8 reserved_at_60[0xa0];
u8 reserved_at_100[0x1];
u8 sd_group[0x3];
u8 reserved_at_104[0x1c];
u8 reserved_at_120[0x10];
u8 mtu[0x10];
u8 system_image_guid[0x40];
@ -10122,8 +10127,7 @@ struct mlx5_ifc_mpir_reg_bits {
u8 reserved_at_20[0x20];
u8 local_port[0x8];
u8 reserved_at_28[0x15];
u8 sd_group[0x3];
u8 reserved_at_28[0x18];
u8 reserved_at_60[0x20];
};

View File

@ -72,6 +72,7 @@ int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
u64 *system_image_guid);
int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group);
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
u16 vport, u64 node_guid);

View File

@ -505,12 +505,6 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
return !!psock->saved_data_ready;
}
static inline bool sk_is_udp(const struct sock *sk)
{
return sk->sk_type == SOCK_DGRAM &&
sk->sk_protocol == IPPROTO_UDP;
}
#if IS_ENABLED(CONFIG_NET_SOCK_MSG)
#define BPF_F_STRPARSER (1UL << 1)

View File

@ -357,4 +357,12 @@ static inline bool inet_csk_has_ulp(const struct sock *sk)
return inet_test_bit(IS_ICSK, sk) && !!inet_csk(sk)->icsk_ulp_ops;
}
static inline void inet_init_csk_locks(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
spin_lock_init(&icsk->icsk_accept_queue.rskq_lock);
spin_lock_init(&icsk->icsk_accept_queue.fastopenq.lock);
}
#endif /* _INET_CONNECTION_SOCK_H */

View File

@ -307,11 +307,6 @@ static inline unsigned long inet_cmsg_flags(const struct inet_sock *inet)
#define inet_assign_bit(nr, sk, val) \
assign_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags, val)
static inline bool sk_is_inet(struct sock *sk)
{
return sk->sk_family == AF_INET || sk->sk_family == AF_INET6;
}
/**
* sk_to_full_sk - Access to a full socket
* @sk: pointer to a socket

View File

@ -262,8 +262,7 @@ static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
*/
static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
{
if (skb->protocol == htons(ETH_P_802_2))
memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
}
/**
@ -275,8 +274,7 @@ static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
*/
static inline void llc_pdu_decode_da(struct sk_buff *skb, u8 *da)
{
if (skb->protocol == htons(ETH_P_802_2))
memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
}
/**

View File

@ -205,6 +205,7 @@ static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
* @nla: netlink attributes
* @portid: netlink portID of the original message
* @seq: netlink sequence number
* @flags: modifiers to new request
* @family: protocol family
* @level: depth of the chains
* @report: notify via unicast netlink message
@ -282,6 +283,7 @@ struct nft_elem_priv { };
*
* @key: element key
* @key_end: closing element key
* @data: element data
* @priv: element private data and extensions
*/
struct nft_set_elem {
@ -325,10 +327,10 @@ struct nft_set_iter {
* @dtype: data type
* @dlen: data length
* @objtype: object type
* @flags: flags
* @size: number of set elements
* @policy: set policy
* @gc_int: garbage collector interval
* @timeout: element timeout
* @field_len: length of each field in concatenation, bytes
* @field_count: number of concatenated fields in element
* @expr: set must support for expressions
@ -351,9 +353,9 @@ struct nft_set_desc {
/**
* enum nft_set_class - performance class
*
* @NFT_LOOKUP_O_1: constant, O(1)
* @NFT_LOOKUP_O_LOG_N: logarithmic, O(log N)
* @NFT_LOOKUP_O_N: linear, O(N)
* @NFT_SET_CLASS_O_1: constant, O(1)
* @NFT_SET_CLASS_O_LOG_N: logarithmic, O(log N)
* @NFT_SET_CLASS_O_N: linear, O(N)
*/
enum nft_set_class {
NFT_SET_CLASS_O_1,
@ -422,9 +424,13 @@ struct nft_set_ext;
* @remove: remove element from set
* @walk: iterate over all set elements
* @get: get set elements
* @commit: commit set elements
* @abort: abort set elements
* @privsize: function to return size of set private data
* @estimate: estimate the required memory size and the lookup complexity class
* @init: initialize private data of new set instance
* @destroy: destroy private data of set instance
* @gc_init: initialize garbage collection
* @elemsize: element private size
*
* Operations lookup, update and delete have simpler interfaces, are faster
@ -540,13 +546,16 @@ struct nft_set_elem_expr {
* @policy: set parameterization (see enum nft_set_policies)
* @udlen: user data length
* @udata: user data
* @expr: stateful expression
* @pending_update: list of pending update set element
* @ops: set ops
* @flags: set flags
* @dead: set will be freed, never cleared
* @genmask: generation mask
* @klen: key length
* @dlen: data length
* @num_exprs: numbers of exprs
* @exprs: stateful expression
* @catchall_list: list of catch-all set element
* @data: private set data
*/
struct nft_set {
@ -692,6 +701,7 @@ extern const struct nft_set_ext_type nft_set_ext_types[];
*
* @len: length of extension area
* @offset: offsets of individual extension types
* @ext_len: length of the expected extension(used to sanity check)
*/
struct nft_set_ext_tmpl {
u16 len;
@ -840,6 +850,7 @@ struct nft_expr_ops;
* @select_ops: function to select nft_expr_ops
* @release_ops: release nft_expr_ops
* @ops: default ops, used when no select_ops functions is present
* @inner_ops: inner ops, used for inner packet operation
* @list: used internally
* @name: Identifier
* @owner: module reference
@ -881,14 +892,22 @@ struct nft_offload_ctx;
* struct nft_expr_ops - nf_tables expression operations
*
* @eval: Expression evaluation function
* @clone: Expression clone function
* @size: full expression size, including private data size
* @init: initialization function
* @activate: activate expression in the next generation
* @deactivate: deactivate expression in next generation
* @destroy: destruction function, called after synchronize_rcu
* @destroy_clone: destruction clone function
* @dump: function to dump parameters
* @type: expression type
* @validate: validate expression, called during loop detection
* @reduce: reduce expression
* @gc: garbage collection expression
* @offload: hardware offload expression
* @offload_action: function to report true/false to allocate one slot or not in the flow
* offload array
* @offload_stats: function to synchronize hardware stats via updating the counter expression
* @type: expression type
* @data: extra data to attach to this expression operation
*/
struct nft_expr_ops {
@ -1041,14 +1060,21 @@ struct nft_rule_blob {
/**
* struct nft_chain - nf_tables chain
*
* @blob_gen_0: rule blob pointer to the current generation
* @blob_gen_1: rule blob pointer to the future generation
* @rules: list of rules in the chain
* @list: used internally
* @rhlhead: used internally
* @table: table that this chain belongs to
* @handle: chain handle
* @use: number of jump references to this chain
* @flags: bitmask of enum nft_chain_flags
* @flags: bitmask of enum NFTA_CHAIN_FLAGS
* @bound: bind or not
* @genmask: generation mask
* @name: name of the chain
* @udlen: user data length
* @udata: user data in the chain
* @blob_next: rule blob pointer to the next in the chain
*/
struct nft_chain {
struct nft_rule_blob __rcu *blob_gen_0;
@ -1146,6 +1172,7 @@ struct nft_hook {
* @hook_list: list of netfilter hooks (for NFPROTO_NETDEV family)
* @type: chain type
* @policy: default policy
* @flags: indicate the base chain disabled or not
* @stats: per-cpu chain stats
* @chain: the chain
* @flow_block: flow block (for hardware offload)
@ -1274,11 +1301,13 @@ struct nft_object_hash_key {
* struct nft_object - nf_tables stateful object
*
* @list: table stateful object list node
* @key: keys that identify this object
* @rhlhead: nft_objname_ht node
* @key: keys that identify this object
* @genmask: generation mask
* @use: number of references to this stateful object
* @handle: unique object handle
* @udlen: length of user data
* @udata: user data
* @ops: object operations
* @data: object data, layout depends on type
*/
@ -1344,6 +1373,7 @@ struct nft_object_type {
* @destroy: release existing stateful object
* @dump: netlink dump stateful object
* @update: update stateful object
* @type: pointer to object type
*/
struct nft_object_ops {
void (*eval)(struct nft_object *obj,
@ -1379,9 +1409,8 @@ void nft_unregister_obj(struct nft_object_type *obj_type);
* @genmask: generation mask
* @use: number of references to this flow table
* @handle: unique object handle
* @dev_name: array of device names
* @hook_list: hook list for hooks per net_device in flowtables
* @data: rhashtable and garbage collector
* @ops: array of hooks
*/
struct nft_flowtable {
struct list_head list;

View File

@ -375,6 +375,10 @@ struct tcf_proto_ops {
struct nlattr **tca,
struct netlink_ext_ack *extack);
void (*tmplt_destroy)(void *tmplt_priv);
void (*tmplt_reoffload)(struct tcf_chain *chain,
bool add,
flow_setup_cb_t *cb,
void *cb_priv);
struct tcf_exts * (*get_exts)(const struct tcf_proto *tp,
u32 handle);

View File

@ -2765,9 +2765,25 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
&skb_shinfo(skb)->tskey);
}
static inline bool sk_is_inet(const struct sock *sk)
{
int family = READ_ONCE(sk->sk_family);
return family == AF_INET || family == AF_INET6;
}
static inline bool sk_is_tcp(const struct sock *sk)
{
return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP;
return sk_is_inet(sk) &&
sk->sk_type == SOCK_STREAM &&
sk->sk_protocol == IPPROTO_TCP;
}
static inline bool sk_is_udp(const struct sock *sk)
{
return sk_is_inet(sk) &&
sk->sk_type == SOCK_DGRAM &&
sk->sk_protocol == IPPROTO_UDP;
}
static inline bool sk_is_stream_unix(const struct sock *sk)

View File

@ -159,11 +159,29 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
return ret;
}
static inline void xsk_buff_del_tail(struct xdp_buff *tail)
{
struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
list_del(&xskb->xskb_list_node);
}
static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
{
struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
struct xdp_buff_xsk *frag;
frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
xskb_list_node);
return &frag->xdp;
}
static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
{
xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
xdp->data_meta = xdp->data;
xdp->data_end = xdp->data + size;
xdp->flags = 0;
}
static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
@ -350,6 +368,15 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
return NULL;
}
static inline void xsk_buff_del_tail(struct xdp_buff *tail)
{
}
static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
{
return NULL;
}
static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
{
}

View File

@ -118,12 +118,16 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
}
if (data[IFLA_VLAN_INGRESS_QOS]) {
nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
if (nla_type(attr) != IFLA_VLAN_QOS_MAPPING)
continue;
m = nla_data(attr);
vlan_dev_set_ingress_priority(dev, m->to, m->from);
}
}
if (data[IFLA_VLAN_EGRESS_QOS]) {
nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
if (nla_type(attr) != IFLA_VLAN_QOS_MAPPING)
continue;
m = nla_data(attr);
err = vlan_dev_set_egress_priority(dev, m->from, m->to);
if (err)

View File

@ -11551,6 +11551,7 @@ static struct pernet_operations __net_initdata netdev_net_ops = {
static void __net_exit default_device_exit_net(struct net *net)
{
struct netdev_name_node *name_node, *tmp;
struct net_device *dev, *aux;
/*
* Push all migratable network devices back to the
@ -11573,6 +11574,14 @@ static void __net_exit default_device_exit_net(struct net *net)
snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
if (netdev_name_in_use(&init_net, fb_name))
snprintf(fb_name, IFNAMSIZ, "dev%%d");
netdev_for_each_altname_safe(dev, name_node, tmp)
if (netdev_name_in_use(&init_net, name_node->name)) {
netdev_name_node_del(name_node);
synchronize_rcu();
__netdev_name_node_alt_destroy(name_node);
}
err = dev_change_net_namespace(dev, &init_net, fb_name);
if (err) {
pr_emerg("%s: failed to move %s to init_net: %d\n",

View File

@ -63,6 +63,9 @@ int dev_change_name(struct net_device *dev, const char *newname);
#define netdev_for_each_altname(dev, namenode) \
list_for_each_entry((namenode), &(dev)->name_node->list, list)
#define netdev_for_each_altname_safe(dev, namenode, next) \
list_for_each_entry_safe((namenode), (next), &(dev)->name_node->list, \
list)
int netdev_name_node_alt_create(struct net_device *dev, const char *name);
int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);

View File

@ -83,6 +83,7 @@
#include <net/netfilter/nf_conntrack_bpf.h>
#include <net/netkit.h>
#include <linux/un.h>
#include <net/xdp_sock_drv.h>
#include "dev.h"
@ -4092,10 +4093,46 @@ static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset)
memset(skb_frag_address(frag) + skb_frag_size(frag), 0, offset);
skb_frag_size_add(frag, offset);
sinfo->xdp_frags_size += offset;
if (rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
xsk_buff_get_tail(xdp)->data_end += offset;
return 0;
}
static void bpf_xdp_shrink_data_zc(struct xdp_buff *xdp, int shrink,
struct xdp_mem_info *mem_info, bool release)
{
struct xdp_buff *zc_frag = xsk_buff_get_tail(xdp);
if (release) {
xsk_buff_del_tail(zc_frag);
__xdp_return(NULL, mem_info, false, zc_frag);
} else {
zc_frag->data_end -= shrink;
}
}
static bool bpf_xdp_shrink_data(struct xdp_buff *xdp, skb_frag_t *frag,
int shrink)
{
struct xdp_mem_info *mem_info = &xdp->rxq->mem;
bool release = skb_frag_size(frag) == shrink;
if (mem_info->type == MEM_TYPE_XSK_BUFF_POOL) {
bpf_xdp_shrink_data_zc(xdp, shrink, mem_info, release);
goto out;
}
if (release) {
struct page *page = skb_frag_page(frag);
__xdp_return(page_address(page), mem_info, false, NULL);
}
out:
return release;
}
static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
@ -4110,12 +4147,7 @@ static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset)
len_free += shrink;
offset -= shrink;
if (skb_frag_size(frag) == shrink) {
struct page *page = skb_frag_page(frag);
__xdp_return(page_address(page), &xdp->rxq->mem,
false, NULL);
if (bpf_xdp_shrink_data(xdp, frag, shrink)) {
n_frags_free++;
} else {
skb_frag_size_sub(frag, shrink);

View File

@ -33,9 +33,6 @@
void reqsk_queue_alloc(struct request_sock_queue *queue)
{
spin_lock_init(&queue->rskq_lock);
spin_lock_init(&queue->fastopenq.lock);
queue->fastopenq.rskq_rst_head = NULL;
queue->fastopenq.rskq_rst_tail = NULL;
queue->fastopenq.qlen = 0;

View File

@ -107,6 +107,7 @@
#include <linux/interrupt.h>
#include <linux/poll.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/user_namespace.h>
@ -4144,8 +4145,14 @@ bool sk_busy_loop_end(void *p, unsigned long start_time)
{
struct sock *sk = p;
return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
sk_busy_loop_timeout(sk, start_time);
if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
return true;
if (sk_is_udp(sk) &&
!skb_queue_empty_lockless(&udp_sk(sk)->reader_queue))
return true;
return sk_busy_loop_timeout(sk, start_time);
}
EXPORT_SYMBOL(sk_busy_loop_end);
#endif /* CONFIG_NET_RX_BUSY_POLL */

View File

@ -330,6 +330,9 @@ lookup_protocol:
if (INET_PROTOSW_REUSE & answer_flags)
sk->sk_reuse = SK_CAN_REUSE;
if (INET_PROTOSW_ICSK & answer_flags)
inet_init_csk_locks(sk);
inet = inet_sk(sk);
inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags);

View File

@ -727,6 +727,10 @@ out:
}
if (req)
reqsk_put(req);
if (newsk)
inet_init_csk_locks(newsk);
return newsk;
out_err:
newsk = NULL;

View File

@ -722,6 +722,7 @@ void tcp_push(struct sock *sk, int flags, int mss_now,
if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
smp_mb__after_atomic();
}
/* It is possible TX completion already happened
* before we set TSQ_THROTTLED.

View File

@ -199,6 +199,9 @@ lookup_protocol:
if (INET_PROTOSW_REUSE & answer_flags)
sk->sk_reuse = SK_CAN_REUSE;
if (INET_PROTOSW_ICSK & answer_flags)
inet_init_csk_locks(sk);
inet = inet_sk(sk);
inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags);

View File

@ -928,14 +928,15 @@ copy_uaddr:
*/
static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name);
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name);
int flags = msg->msg_flags;
int noblock = flags & MSG_DONTWAIT;
int rc = -EINVAL, copied = 0, hdrlen, hh_len;
struct sk_buff *skb = NULL;
struct net_device *dev;
size_t size = 0;
int rc = -EINVAL, copied = 0, hdrlen;
dprintk("%s: sending from %02X to %02X\n", __func__,
llc->laddr.lsap, llc->daddr.lsap);
@ -955,22 +956,29 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
if (rc)
goto out;
}
hdrlen = llc->dev->hard_header_len + llc_ui_header_len(sk, addr);
dev = llc->dev;
hh_len = LL_RESERVED_SPACE(dev);
hdrlen = llc_ui_header_len(sk, addr);
size = hdrlen + len;
if (size > llc->dev->mtu)
size = llc->dev->mtu;
size = min_t(size_t, size, READ_ONCE(dev->mtu));
copied = size - hdrlen;
rc = -EINVAL;
if (copied < 0)
goto out;
release_sock(sk);
skb = sock_alloc_send_skb(sk, size, noblock, &rc);
skb = sock_alloc_send_skb(sk, hh_len + size, noblock, &rc);
lock_sock(sk);
if (!skb)
goto out;
skb->dev = llc->dev;
if (sock_flag(sk, SOCK_ZAPPED) ||
llc->dev != dev ||
hdrlen != llc_ui_header_len(sk, addr) ||
hh_len != LL_RESERVED_SPACE(dev) ||
size > READ_ONCE(dev->mtu))
goto out;
skb->dev = dev;
skb->protocol = llc_proto_type(addr->sllc_arphrd);
skb_reserve(skb, hdrlen);
skb_reserve(skb, hh_len + hdrlen);
rc = memcpy_from_msg(skb_put(skb, copied), msg, copied);
if (rc)
goto out;

View File

@ -135,22 +135,15 @@ static struct packet_type llc_packet_type __read_mostly = {
.func = llc_rcv,
};
static struct packet_type llc_tr_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_TR_802_2),
.func = llc_rcv,
};
static int __init llc_init(void)
{
dev_add_pack(&llc_packet_type);
dev_add_pack(&llc_tr_packet_type);
return 0;
}
static void __exit llc_exit(void)
{
dev_remove_pack(&llc_packet_type);
dev_remove_pack(&llc_tr_packet_type);
}
module_init(llc_init);

View File

@ -62,7 +62,6 @@ config MAC80211_KUNIT_TEST
depends on KUNIT
depends on MAC80211
default KUNIT_ALL_TESTS
depends on !KERNEL_6_2
help
Enable this option to test mac80211 internals with kunit.

View File

@ -404,7 +404,10 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
int i;
for (i = 0; i < ARRAY_SIZE(sta->link); i++) {
if (!(sta->sta.valid_links & BIT(i)))
struct link_sta_info *link_sta;
link_sta = rcu_access_pointer(sta->link[i]);
if (!link_sta)
continue;
sta_remove_link(sta, i, false);
@ -910,6 +913,8 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
if (ieee80211_vif_is_mesh(&sdata->vif))
mesh_accept_plinks_update(sdata);
ieee80211_check_fast_xmit(sta);
return 0;
out_remove:
if (sta->sta.valid_links)

View File

@ -3048,7 +3048,7 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
sdata->vif.type == NL80211_IFTYPE_STATION)
goto out;
if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED) || !sta->uploaded)
goto out;
if (test_sta_flag(sta, WLAN_STA_PS_STA) ||

View File

@ -24,6 +24,7 @@
#include <net/sock.h>
#define NFT_MODULE_AUTOLOAD_LIMIT (MODULE_NAME_LEN - sizeof("nft-expr-255-"))
#define NFT_SET_MAX_ANONLEN 16
unsigned int nf_tables_net_id __read_mostly;
@ -4413,6 +4414,9 @@ static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
if (p[1] != 'd' || strchr(p + 2, '%'))
return -EINVAL;
if (strnlen(name, NFT_SET_MAX_ANONLEN) >= NFT_SET_MAX_ANONLEN)
return -EINVAL;
inuse = (unsigned long *)get_zeroed_page(GFP_KERNEL);
if (inuse == NULL)
return -ENOMEM;
@ -10988,16 +10992,10 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
data->verdict.code = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE]));
switch (data->verdict.code) {
default:
switch (data->verdict.code & NF_VERDICT_MASK) {
case NF_ACCEPT:
case NF_DROP:
case NF_QUEUE:
break;
default:
return -EINVAL;
}
fallthrough;
case NF_ACCEPT:
case NF_DROP:
case NF_QUEUE:
break;
case NFT_CONTINUE:
case NFT_BREAK:
case NFT_RETURN:
@ -11032,6 +11030,8 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
data->verdict.chain = chain;
break;
default:
return -EINVAL;
}
desc->len = sizeof(data->verdict);

View File

@ -357,9 +357,10 @@ static int nf_tables_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct nft_base_chain *basechain;
struct nftables_pernet *nft_net;
struct nft_table *table;
struct nft_chain *chain, *nr;
struct nft_table *table;
struct nft_ctx ctx = {
.net = dev_net(dev),
};
@ -371,7 +372,8 @@ static int nf_tables_netdev_event(struct notifier_block *this,
nft_net = nft_pernet(ctx.net);
mutex_lock(&nft_net->commit_mutex);
list_for_each_entry(table, &nft_net->tables, list) {
if (table->family != NFPROTO_NETDEV)
if (table->family != NFPROTO_NETDEV &&
table->family != NFPROTO_INET)
continue;
ctx.family = table->family;
@ -380,6 +382,11 @@ static int nf_tables_netdev_event(struct notifier_block *this,
if (!nft_is_base_chain(chain))
continue;
basechain = nft_base_chain(chain);
if (table->family == NFPROTO_INET &&
basechain->ops.hooknum != NF_INET_INGRESS)
continue;
ctx.chain = chain;
nft_netdev_event(event, dev, &ctx);
}

View File

@ -350,6 +350,12 @@ static int nft_target_validate(const struct nft_ctx *ctx,
unsigned int hook_mask = 0;
int ret;
if (ctx->family != NFPROTO_IPV4 &&
ctx->family != NFPROTO_IPV6 &&
ctx->family != NFPROTO_BRIDGE &&
ctx->family != NFPROTO_ARP)
return -EOPNOTSUPP;
if (nft_is_base_chain(ctx->chain)) {
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);
@ -595,6 +601,12 @@ static int nft_match_validate(const struct nft_ctx *ctx,
unsigned int hook_mask = 0;
int ret;
if (ctx->family != NFPROTO_IPV4 &&
ctx->family != NFPROTO_IPV6 &&
ctx->family != NFPROTO_BRIDGE &&
ctx->family != NFPROTO_ARP)
return -EOPNOTSUPP;
if (nft_is_base_chain(ctx->chain)) {
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);

View File

@ -384,6 +384,11 @@ static int nft_flow_offload_validate(const struct nft_ctx *ctx,
{
unsigned int hook_mask = (1 << NF_INET_FORWARD);
if (ctx->family != NFPROTO_IPV4 &&
ctx->family != NFPROTO_IPV6 &&
ctx->family != NFPROTO_INET)
return -EOPNOTSUPP;
return nft_chain_validate_hooks(ctx->chain, hook_mask);
}

View File

@ -58,17 +58,19 @@ static inline bool nft_limit_eval(struct nft_limit_priv *priv, u64 cost)
static int nft_limit_init(struct nft_limit_priv *priv,
const struct nlattr * const tb[], bool pkts)
{
u64 unit, tokens, rate_with_burst;
bool invert = false;
u64 unit, tokens;
if (tb[NFTA_LIMIT_RATE] == NULL ||
tb[NFTA_LIMIT_UNIT] == NULL)
return -EINVAL;
priv->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
if (priv->rate == 0)
return -EINVAL;
unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
priv->nsecs = unit * NSEC_PER_SEC;
if (priv->rate == 0 || priv->nsecs < unit)
if (check_mul_overflow(unit, NSEC_PER_SEC, &priv->nsecs))
return -EOVERFLOW;
if (tb[NFTA_LIMIT_BURST])
@ -77,18 +79,25 @@ static int nft_limit_init(struct nft_limit_priv *priv,
if (pkts && priv->burst == 0)
priv->burst = NFT_LIMIT_PKT_BURST_DEFAULT;
if (priv->rate + priv->burst < priv->rate)
if (check_add_overflow(priv->rate, priv->burst, &rate_with_burst))
return -EOVERFLOW;
if (pkts) {
tokens = div64_u64(priv->nsecs, priv->rate) * priv->burst;
u64 tmp = div64_u64(priv->nsecs, priv->rate);
if (check_mul_overflow(tmp, priv->burst, &tokens))
return -EOVERFLOW;
} else {
u64 tmp;
/* The token bucket size limits the number of tokens can be
* accumulated. tokens_max specifies the bucket size.
* tokens_max = unit * (rate + burst) / rate.
*/
tokens = div64_u64(priv->nsecs * (priv->rate + priv->burst),
priv->rate);
if (check_mul_overflow(priv->nsecs, rate_with_burst, &tmp))
return -EOVERFLOW;
tokens = div64_u64(tmp, priv->rate);
}
if (tb[NFTA_LIMIT_FLAGS]) {

View File

@ -143,6 +143,11 @@ static int nft_nat_validate(const struct nft_ctx *ctx,
struct nft_nat *priv = nft_expr_priv(expr);
int err;
if (ctx->family != NFPROTO_IPV4 &&
ctx->family != NFPROTO_IPV6 &&
ctx->family != NFPROTO_INET)
return -EOPNOTSUPP;
err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
if (err < 0)
return err;

Some files were not shown because too many files have changed in this diff Show More