ravb: implement MTU change while device is up

Pre-allocates buffers sufficient for the maximum supported MTU (2026) in
order to eliminate the possibility of resource exhaustion when changing the
MTU while the device is up.

Signed-off-by: Ulrich Hecht <uli+renesas@fpond.eu>
Reviewed-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Ulrich Hecht 2019-11-14 02:49:49 +01:00 committed by David S. Miller
parent d7f9f47d4d
commit 15fb35fa9f
2 changed files with 16 additions and 13 deletions

View file

@ -955,6 +955,8 @@ enum RAVB_QUEUE {
#define NUM_RX_QUEUE 2 #define NUM_RX_QUEUE 2
#define NUM_TX_QUEUE 2 #define NUM_TX_QUEUE 2
#define RX_BUF_SZ (2048 - ETH_FCS_LEN + sizeof(__sum16))
/* TX descriptors per packet */ /* TX descriptors per packet */
#define NUM_TX_DESC_GEN2 2 #define NUM_TX_DESC_GEN2 2
#define NUM_TX_DESC_GEN3 1 #define NUM_TX_DESC_GEN3 1
@ -1018,7 +1020,6 @@ struct ravb_private {
u32 dirty_rx[NUM_RX_QUEUE]; /* Producer ring indices */ u32 dirty_rx[NUM_RX_QUEUE]; /* Producer ring indices */
u32 cur_tx[NUM_TX_QUEUE]; u32 cur_tx[NUM_TX_QUEUE];
u32 dirty_tx[NUM_TX_QUEUE]; u32 dirty_tx[NUM_TX_QUEUE];
u32 rx_buf_sz; /* Based on MTU+slack. */
struct napi_struct napi[NUM_RX_QUEUE]; struct napi_struct napi[NUM_RX_QUEUE];
struct work_struct work; struct work_struct work;
/* MII transceiver section. */ /* MII transceiver section. */

View file

@ -230,7 +230,7 @@ static void ravb_ring_free(struct net_device *ndev, int q)
le32_to_cpu(desc->dptr))) le32_to_cpu(desc->dptr)))
dma_unmap_single(ndev->dev.parent, dma_unmap_single(ndev->dev.parent,
le32_to_cpu(desc->dptr), le32_to_cpu(desc->dptr),
priv->rx_buf_sz, RX_BUF_SZ,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} }
ring_size = sizeof(struct ravb_ex_rx_desc) * ring_size = sizeof(struct ravb_ex_rx_desc) *
@ -293,9 +293,9 @@ static void ravb_ring_format(struct net_device *ndev, int q)
for (i = 0; i < priv->num_rx_ring[q]; i++) { for (i = 0; i < priv->num_rx_ring[q]; i++) {
/* RX descriptor */ /* RX descriptor */
rx_desc = &priv->rx_ring[q][i]; rx_desc = &priv->rx_ring[q][i];
rx_desc->ds_cc = cpu_to_le16(priv->rx_buf_sz); rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
priv->rx_buf_sz, RX_BUF_SZ,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
/* We just set the data size to 0 for a failed mapping which /* We just set the data size to 0 for a failed mapping which
* should prevent DMA from happening... * should prevent DMA from happening...
@ -342,9 +342,6 @@ static int ravb_ring_init(struct net_device *ndev, int q)
int ring_size; int ring_size;
int i; int i;
priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) +
ETH_HLEN + VLAN_HLEN + sizeof(__sum16);
/* Allocate RX and TX skb rings */ /* Allocate RX and TX skb rings */
priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
sizeof(*priv->rx_skb[q]), GFP_KERNEL); sizeof(*priv->rx_skb[q]), GFP_KERNEL);
@ -354,7 +351,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
goto error; goto error;
for (i = 0; i < priv->num_rx_ring[q]; i++) { for (i = 0; i < priv->num_rx_ring[q]; i++) {
skb = netdev_alloc_skb(ndev, priv->rx_buf_sz + RAVB_ALIGN - 1); skb = netdev_alloc_skb(ndev, RX_BUF_SZ + RAVB_ALIGN - 1);
if (!skb) if (!skb)
goto error; goto error;
ravb_set_buffer_align(skb); ravb_set_buffer_align(skb);
@ -584,7 +581,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
skb = priv->rx_skb[q][entry]; skb = priv->rx_skb[q][entry];
priv->rx_skb[q][entry] = NULL; priv->rx_skb[q][entry] = NULL;
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
priv->rx_buf_sz, RX_BUF_SZ,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
get_ts &= (q == RAVB_NC) ? get_ts &= (q == RAVB_NC) ?
RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
@ -617,11 +614,11 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q][entry]; desc = &priv->rx_ring[q][entry];
desc->ds_cc = cpu_to_le16(priv->rx_buf_sz); desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
if (!priv->rx_skb[q][entry]) { if (!priv->rx_skb[q][entry]) {
skb = netdev_alloc_skb(ndev, skb = netdev_alloc_skb(ndev,
priv->rx_buf_sz + RX_BUF_SZ +
RAVB_ALIGN - 1); RAVB_ALIGN - 1);
if (!skb) if (!skb)
break; /* Better luck next round. */ break; /* Better luck next round. */
@ -1801,10 +1798,15 @@ static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
static int ravb_change_mtu(struct net_device *ndev, int new_mtu) static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
{ {
if (netif_running(ndev)) struct ravb_private *priv = netdev_priv(ndev);
return -EBUSY;
ndev->mtu = new_mtu; ndev->mtu = new_mtu;
if (netif_running(ndev)) {
synchronize_irq(priv->emac_irq);
ravb_emac_init(ndev);
}
netdev_update_features(ndev); netdev_update_features(ndev);
return 0; return 0;