Merge branch 'net-tja11xx-macsec-support'

Radu Pirea says:

====================
Add MACsec support for TJA11XX C45 PHYs

This is the MACsec support for TJA11XX PHYs. The MACsec block encrypts
the ethernet frames on the fly and has no buffering. This operation will
grow the frames by 32 bytes. If the frames are sent back to back, the
MACsec block will not have enough room to insert the SecTAG and the ICV
and the frames will be dropped.

To mitigate this, the PHY can parse a specific ethertype with some
padding bytes and replace them with the SecTAG and ICV. These padding
bytes might be dummy or might contain information about TX SC that must
be used to encrypt the frame.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2023-12-27 13:08:10 +00:00
commit 2f7ccf1d88
12 changed files with 2043 additions and 101 deletions

View File

@ -15444,7 +15444,7 @@ NXP C45 TJA11XX PHY DRIVER
M: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/phy/nxp-c45-tja11xx.c
F: drivers/net/phy/nxp-c45-tja11xx*
NXP FSPI DRIVER
M: Han Xu <han.xu@nxp.com>

View File

@ -93,6 +93,8 @@ struct pcpu_secy_stats {
* @secys: linked list of SecY's on the underlying device
* @gro_cells: pointer to the Generic Receive Offload cell
* @offload: status of offloading on the MACsec device
* @insert_tx_tag: when offloading, device requires to insert an
* additional tag
*/
struct macsec_dev {
struct macsec_secy secy;
@ -102,6 +104,7 @@ struct macsec_dev {
struct list_head secys;
struct gro_cells gro_cells;
enum macsec_offload offload;
bool insert_tx_tag;
};
/**
@ -604,26 +607,11 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
return ERR_PTR(-EINVAL);
}
if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
struct sk_buff *nskb = skb_copy_expand(skb,
MACSEC_NEEDED_HEADROOM,
MACSEC_NEEDED_TAILROOM,
GFP_ATOMIC);
if (likely(nskb)) {
consume_skb(skb);
skb = nskb;
} else {
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(-ENOMEM);
}
} else {
skb = skb_unshare(skb, GFP_ATOMIC);
if (!skb) {
macsec_txsa_put(tx_sa);
return ERR_PTR(-ENOMEM);
}
ret = skb_ensure_writable_head_tail(skb, dev);
if (unlikely(ret < 0)) {
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(ret);
}
unprotected_len = skb->len;
@ -2583,6 +2571,33 @@ static bool macsec_is_configured(struct macsec_dev *macsec)
return false;
}
static bool macsec_needs_tx_tag(struct macsec_dev *macsec,
const struct macsec_ops *ops)
{
return macsec->offload == MACSEC_OFFLOAD_PHY &&
ops->mdo_insert_tx_tag;
}
static void macsec_set_head_tail_room(struct net_device *dev)
{
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
int needed_headroom, needed_tailroom;
const struct macsec_ops *ops;
ops = macsec_get_ops(macsec, NULL);
if (ops) {
needed_headroom = ops->needed_headroom;
needed_tailroom = ops->needed_tailroom;
} else {
needed_headroom = MACSEC_NEEDED_HEADROOM;
needed_tailroom = MACSEC_NEEDED_TAILROOM;
}
dev->needed_headroom = real_dev->needed_headroom + needed_headroom;
dev->needed_tailroom = real_dev->needed_tailroom + needed_tailroom;
}
static int macsec_update_offload(struct net_device *dev, enum macsec_offload offload)
{
enum macsec_offload prev_offload;
@ -2620,8 +2635,13 @@ static int macsec_update_offload(struct net_device *dev, enum macsec_offload off
ctx.secy = &macsec->secy;
ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload(ops->mdo_del_secy, &ctx)
: macsec_offload(ops->mdo_add_secy, &ctx);
if (ret)
if (ret) {
macsec->offload = prev_offload;
return ret;
}
macsec_set_head_tail_room(dev);
macsec->insert_tx_tag = macsec_needs_tx_tag(macsec, ops);
return ret;
}
@ -3379,6 +3399,40 @@ static struct genl_family macsec_fam __ro_after_init = {
.resv_start_op = MACSEC_CMD_UPD_OFFLOAD + 1,
};
static struct sk_buff *macsec_insert_tx_tag(struct sk_buff *skb,
struct net_device *dev)
{
struct macsec_dev *macsec = macsec_priv(dev);
const struct macsec_ops *ops;
struct phy_device *phydev;
struct macsec_context ctx;
int skb_final_len;
int err;
ops = macsec_get_ops(macsec, &ctx);
skb_final_len = skb->len - ETH_HLEN + ops->needed_headroom +
ops->needed_tailroom;
if (unlikely(skb_final_len > macsec->real_dev->mtu)) {
err = -EINVAL;
goto cleanup;
}
phydev = macsec->real_dev->phydev;
err = skb_ensure_writable_head_tail(skb, dev);
if (unlikely(err < 0))
goto cleanup;
err = ops->mdo_insert_tx_tag(phydev, skb);
if (unlikely(err))
goto cleanup;
return skb;
cleanup:
kfree_skb(skb);
return ERR_PTR(err);
}
static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@ -3393,6 +3447,15 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
skb_dst_drop(skb);
dst_hold(&md_dst->dst);
skb_dst_set(skb, &md_dst->dst);
if (macsec->insert_tx_tag) {
skb = macsec_insert_tx_tag(skb, dev);
if (IS_ERR(skb)) {
DEV_STATS_INC(dev, tx_dropped);
return NETDEV_TX_OK;
}
}
skb->dev = macsec->real_dev;
return dev_queue_xmit(skb);
}
@ -3454,10 +3517,7 @@ static int macsec_dev_init(struct net_device *dev)
dev->features = real_dev->features & MACSEC_FEATURES;
dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
dev->needed_headroom = real_dev->needed_headroom +
MACSEC_NEEDED_HEADROOM;
dev->needed_tailroom = real_dev->needed_tailroom +
MACSEC_NEEDED_TAILROOM;
macsec_set_head_tail_room(dev);
if (is_zero_ether_addr(dev->dev_addr))
eth_hw_addr_inherit(dev, real_dev);
@ -3604,21 +3664,19 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
struct sockaddr *addr = p;
u8 old_addr[ETH_ALEN];
int err;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
if (!(dev->flags & IFF_UP))
goto out;
if (dev->flags & IFF_UP) {
err = dev_uc_add(real_dev, addr->sa_data);
if (err < 0)
return err;
}
err = dev_uc_add(real_dev, addr->sa_data);
if (err < 0)
return err;
dev_uc_del(real_dev, dev->dev_addr);
out:
ether_addr_copy(old_addr, dev->dev_addr);
eth_hw_addr_set(dev, addr->sa_data);
/* If h/w offloading is available, propagate to the device */
@ -3627,13 +3685,29 @@ out:
struct macsec_context ctx;
ops = macsec_get_ops(macsec, &ctx);
if (ops) {
ctx.secy = &macsec->secy;
macsec_offload(ops->mdo_upd_secy, &ctx);
if (!ops) {
err = -EOPNOTSUPP;
goto restore_old_addr;
}
ctx.secy = &macsec->secy;
err = macsec_offload(ops->mdo_upd_secy, &ctx);
if (err)
goto restore_old_addr;
}
if (dev->flags & IFF_UP)
dev_uc_del(real_dev, old_addr);
return 0;
restore_old_addr:
if (dev->flags & IFF_UP)
dev_uc_del(real_dev, addr->sa_data);
eth_hw_addr_set(dev, old_addr);
return err;
}
static int macsec_change_mtu(struct net_device *dev, int new_mtu)
@ -4126,6 +4200,9 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
err = macsec_offload(ops->mdo_add_secy, &ctx);
if (err)
goto del_dev;
macsec->insert_tx_tag =
macsec_needs_tx_tag(macsec, ops);
}
}

View File

@ -3,11 +3,6 @@
#include <net/macsec.h>
#include "netdevsim.h"
static inline u64 sci_to_cpu(sci_t sci)
{
return be64_to_cpu((__force __be64)sci);
}
static int nsim_macsec_find_secy(struct netdevsim *ns, sci_t sci)
{
int i;

View File

@ -317,9 +317,10 @@ config NXP_CBTX_PHY
config NXP_C45_TJA11XX_PHY
tristate "NXP C45 TJA11XX PHYs"
depends on PTP_1588_CLOCK_OPTIONAL
depends on MACSEC || !MACSEC
help
Enable support for NXP C45 TJA11XX PHYs.
Currently supports the TJA1103 and TJA1120 PHYs.
Currently supports the TJA1103, TJA1104 and TJA1120 PHYs.
config NXP_TJA11XX_PHY
tristate "NXP TJA11xx PHYs support"

View File

@ -84,7 +84,11 @@ obj-$(CONFIG_MICROSEMI_PHY) += mscc/
obj-$(CONFIG_MOTORCOMM_PHY) += motorcomm.o
obj-$(CONFIG_NATIONAL_PHY) += national.o
obj-$(CONFIG_NCN26000_PHY) += ncn26000.o
obj-$(CONFIG_NXP_C45_TJA11XX_PHY) += nxp-c45-tja11xx.o
nxp-c45-tja-objs += nxp-c45-tja11xx.o
ifdef CONFIG_MACSEC
nxp-c45-tja-objs += nxp-c45-tja11xx-macsec.o
endif
obj-$(CONFIG_NXP_C45_TJA11XX_PHY) += nxp-c45-tja.o
obj-$(CONFIG_NXP_CBTX_PHY) += nxp-cbtx.o
obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o
obj-$(CONFIG_QSEMI_PHY) += qsemi.o

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* NXP C45 PHY driver
* Copyright (C) 2021 NXP
* Copyright 2021-2023 NXP
* Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
*/
@ -14,9 +14,10 @@
#include <linux/processor.h>
#include <linux/property.h>
#include <linux/ptp_classify.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include "nxp-c45-tja11xx.h"
#define PHY_ID_TJA_1103 0x001BB010
#define PHY_ID_TJA_1120 0x001BB031
@ -75,9 +76,11 @@
#define PORT_CONTROL_EN BIT(14)
#define VEND1_PORT_ABILITIES 0x8046
#define MACSEC_ABILITY BIT(5)
#define PTP_ABILITY BIT(3)
#define VEND1_PORT_FUNC_IRQ_EN 0x807A
#define MACSEC_IRQS BIT(5)
#define PTP_IRQS BIT(3)
#define VEND1_PTP_IRQ_ACK 0x9008
@ -148,7 +151,6 @@
#define TS_SEC_MASK GENMASK(1, 0)
#define VEND1_PORT_FUNC_ENABLES 0x8048
#define PTP_ENABLE BIT(3)
#define PHY_TEST_ENABLE BIT(0)
@ -281,25 +283,6 @@ struct nxp_c45_phy_data {
irqreturn_t *irq_status);
};
struct nxp_c45_phy {
const struct nxp_c45_phy_data *phy_data;
struct phy_device *phydev;
struct mii_timestamper mii_ts;
struct ptp_clock *ptp_clock;
struct ptp_clock_info caps;
struct sk_buff_head tx_queue;
struct sk_buff_head rx_queue;
/* used to access the PTP registers atomic */
struct mutex ptp_lock;
int hwts_tx;
int hwts_rx;
u32 tx_delay;
u32 rx_delay;
struct timespec64 extts_ts;
int extts_index;
bool extts;
};
static const
struct nxp_c45_phy_data *nxp_c45_get_data(struct phy_device *phydev)
{
@ -1215,12 +1198,25 @@ static int nxp_c45_start_op(struct phy_device *phydev)
static int nxp_c45_config_intr(struct phy_device *phydev)
{
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
int ret;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
if (ret)
return ret;
return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
else
return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
}
ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
if (ret)
return ret;
return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
}
static int tja1103_config_intr(struct phy_device *phydev)
@ -1286,6 +1282,7 @@ static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
}
data->nmi_handler(phydev, &ret);
nxp_c45_handle_macsec_interrupt(phydev, &ret);
return ret;
}
@ -1611,6 +1608,9 @@ static int nxp_c45_config_init(struct phy_device *phydev)
nxp_c45_counters_enable(phydev);
nxp_c45_ptp_init(phydev);
ret = nxp_c45_macsec_config_init(phydev);
if (ret)
return ret;
return nxp_c45_start_op(phydev);
}
@ -1626,7 +1626,9 @@ static int nxp_c45_get_features(struct phy_device *phydev)
static int nxp_c45_probe(struct phy_device *phydev)
{
struct nxp_c45_phy *priv;
int ptp_ability;
bool macsec_ability;
int phy_abilities;
bool ptp_ability;
int ret = 0;
priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
@ -1642,9 +1644,9 @@ static int nxp_c45_probe(struct phy_device *phydev)
mutex_init(&priv->ptp_lock);
ptp_ability = phy_read_mmd(phydev, MDIO_MMD_VEND1,
VEND1_PORT_ABILITIES);
ptp_ability = !!(ptp_ability & PTP_ABILITY);
phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
VEND1_PORT_ABILITIES);
ptp_ability = !!(phy_abilities & PTP_ABILITY);
if (!ptp_ability) {
phydev_dbg(phydev, "the phy does not support PTP");
goto no_ptp_support;
@ -1663,6 +1665,20 @@ static int nxp_c45_probe(struct phy_device *phydev)
}
no_ptp_support:
macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
if (!macsec_ability) {
phydev_info(phydev, "the phy does not support MACsec\n");
goto no_macsec_support;
}
if (IS_ENABLED(CONFIG_MACSEC)) {
ret = nxp_c45_macsec_probe(phydev);
phydev_dbg(phydev, "MACsec support enabled.");
} else {
phydev_dbg(phydev, "MACsec support not enabled even if the phy supports it");
}
no_macsec_support:
return ret;
}
@ -1676,6 +1692,7 @@ static void nxp_c45_remove(struct phy_device *phydev)
skb_queue_purge(&priv->tx_queue);
skb_queue_purge(&priv->rx_queue);
nxp_c45_macsec_remove(phydev);
}
static void tja1103_counters_enable(struct phy_device *phydev)

View File

@ -0,0 +1,62 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* NXP C45 PHY driver header file
* Copyright 2023 NXP
* Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
*/
#include <linux/ptp_clock_kernel.h>
#define VEND1_PORT_FUNC_ENABLES 0x8048
struct nxp_c45_macsec;
struct nxp_c45_phy {
const struct nxp_c45_phy_data *phy_data;
struct phy_device *phydev;
struct mii_timestamper mii_ts;
struct ptp_clock *ptp_clock;
struct ptp_clock_info caps;
struct sk_buff_head tx_queue;
struct sk_buff_head rx_queue;
/* used to access the PTP registers atomic */
struct mutex ptp_lock;
int hwts_tx;
int hwts_rx;
u32 tx_delay;
u32 rx_delay;
struct timespec64 extts_ts;
int extts_index;
bool extts;
struct nxp_c45_macsec *macsec;
};
#if IS_ENABLED(CONFIG_MACSEC)
int nxp_c45_macsec_config_init(struct phy_device *phydev);
int nxp_c45_macsec_probe(struct phy_device *phydev);
void nxp_c45_macsec_remove(struct phy_device *phydev);
void nxp_c45_handle_macsec_interrupt(struct phy_device *phydev,
irqreturn_t *ret);
#else
static inline
int nxp_c45_macsec_config_init(struct phy_device *phydev)
{
return 0;
}
static inline
int nxp_c45_macsec_probe(struct phy_device *phydev)
{
return 0;
}
static inline
void nxp_c45_macsec_remove(struct phy_device *phydev)
{
}
static inline
void nxp_c45_handle_macsec_interrupt(struct phy_device *phydev,
irqreturn_t *ret)
{
}
#endif

View File

@ -4007,6 +4007,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features
unsigned int offset);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len);
int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev);
int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
int skb_vlan_pop(struct sk_buff *skb);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);

View File

@ -247,6 +247,23 @@ struct macsec_secy {
/**
* struct macsec_context - MACsec context for hardware offloading
* @netdev: a valid pointer to a struct net_device if @offload ==
* MACSEC_OFFLOAD_MAC
* @phydev: a valid pointer to a struct phy_device if @offload ==
* MACSEC_OFFLOAD_PHY
* @offload: MACsec offload status
* @secy: pointer to a MACsec SecY
* @rx_sc: pointer to a RX SC
* @update_pn: when updating the SA, update the next PN
* @assoc_num: association number of the target SA
* @key: key of the target SA
* @rx_sa: pointer to an RX SA if a RX SA is added/updated/removed
* @tx_sa: pointer to an TX SA if a TX SA is added/updated/removed
* @tx_sc_stats: pointer to TX SC stats structure
* @tx_sa_stats: pointer to TX SA stats structure
* @rx_sc_stats: pointer to RX SC stats structure
* @rx_sa_stats: pointer to RX SA stats structure
* @dev_stats: pointer to dev stats structure
*/
struct macsec_context {
union {
@ -277,6 +294,33 @@ struct macsec_context {
/**
* struct macsec_ops - MACsec offloading operations
* @mdo_dev_open: called when the MACsec interface transitions to the up state
* @mdo_dev_stop: called when the MACsec interface transitions to the down
* state
* @mdo_add_secy: called when a new SecY is added
* @mdo_upd_secy: called when the SecY flags are changed or the MAC address of
* the MACsec interface is changed
* @mdo_del_secy: called when the hw offload is disabled or the MACsec
* interface is removed
* @mdo_add_rxsc: called when a new RX SC is added
* @mdo_upd_rxsc: called when a certain RX SC is updated
* @mdo_del_rxsc: called when a certain RX SC is removed
* @mdo_add_rxsa: called when a new RX SA is added
* @mdo_upd_rxsa: called when a certain RX SA is updated
* @mdo_del_rxsa: called when a certain RX SA is removed
* @mdo_add_txsa: called when a new TX SA is added
* @mdo_upd_txsa: called when a certain TX SA is updated
* @mdo_del_txsa: called when a certain TX SA is removed
* @mdo_get_dev_stats: called when dev stats are read
* @mdo_get_tx_sc_stats: called when TX SC stats are read
* @mdo_get_tx_sa_stats: called when TX SA stats are read
* @mdo_get_rx_sc_stats: called when RX SC stats are read
* @mdo_get_rx_sa_stats: called when RX SA stats are read
* @mdo_insert_tx_tag: called to insert the TX tag
* @needed_headroom: number of bytes reserved at the beginning of the sk_buff
* for the TX tag
* @needed_tailroom: number of bytes reserved at the end of the sk_buff for the
* TX tag
*/
struct macsec_ops {
/* Device wide */
@ -303,6 +347,11 @@ struct macsec_ops {
int (*mdo_get_tx_sa_stats)(struct macsec_context *ctx);
int (*mdo_get_rx_sc_stats)(struct macsec_context *ctx);
int (*mdo_get_rx_sa_stats)(struct macsec_context *ctx);
/* Offload tag */
int (*mdo_insert_tx_tag)(struct phy_device *phydev,
struct sk_buff *skb);
unsigned int needed_headroom;
unsigned int needed_tailroom;
};
void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa);
@ -325,4 +374,9 @@ static inline void *macsec_netdev_priv(const struct net_device *dev)
return netdev_priv(dev);
}
static inline u64 sci_to_cpu(sci_t sci)
{
return be64_to_cpu((__force __be64)sci);
}
#endif /* _NET_MACSEC_H_ */

View File

@ -5995,6 +5995,31 @@ int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len)
}
EXPORT_SYMBOL(skb_ensure_writable);
int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev)
{
int needed_headroom = dev->needed_headroom;
int needed_tailroom = dev->needed_tailroom;
/* For tail taggers, we need to pad short frames ourselves, to ensure
* that the tail tag does not fail at its role of being at the end of
* the packet, once the conduit interface pads the frame. Account for
* that pad length here, and pad later.
*/
if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
needed_tailroom += ETH_ZLEN - skb->len;
/* skb_headroom() returns unsigned int... */
needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
/* No reallocation needed, yay! */
return 0;
return pskb_expand_head(skb, needed_headroom, needed_tailroom,
GFP_ATOMIC);
}
EXPORT_SYMBOL(skb_ensure_writable_head_tail);
/* remove VLAN header from packet and update csum accordingly.
* expects a non skb_vlan_tag_present skb with a vlan tag payload
*/

View File

@ -920,30 +920,6 @@ netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
}
EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev)
{
int needed_headroom = dev->needed_headroom;
int needed_tailroom = dev->needed_tailroom;
/* For tail taggers, we need to pad short frames ourselves, to ensure
* that the tail tag does not fail at its role of being at the end of
* the packet, once the conduit interface pads the frame. Account for
* that pad length here, and pad later.
*/
if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
needed_tailroom += ETH_ZLEN - skb->len;
/* skb_headroom() returns unsigned int... */
needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
/* No reallocation needed, yay! */
return 0;
return pskb_expand_head(skb, needed_headroom, needed_tailroom,
GFP_ATOMIC);
}
static netdev_tx_t dsa_user_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_user_priv *p = netdev_priv(dev);
@ -956,13 +932,14 @@ static netdev_tx_t dsa_user_xmit(struct sk_buff *skb, struct net_device *dev)
/* Handle tx timestamp if any */
dsa_skb_tx_timestamp(p, skb);
if (dsa_realloc_skb(skb, dev)) {
if (skb_ensure_writable_head_tail(skb, dev)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
/* needed_tailroom should still be 'warm' in the cache line from
* dsa_realloc_skb(), which has also ensured that padding is safe.
* skb_ensure_writable_head_tail(), which has also ensured that
* padding is safe.
*/
if (dev->needed_tailroom)
eth_skb_pad(skb);