linux-stable/drivers/net/ethernet/renesas/ravb_main.c

3217 lines
82 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
/* Renesas Ethernet AVB device driver
*
* Copyright (C) 2014-2019 Renesas Electronics Corporation
* Copyright (C) 2015 Renesas Solutions Corp.
* Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
*
* Based on the SuperH Ethernet driver
*/
#include <linux/cache.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/net_tstamp.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/reset.h>
#include <linux/math64.h>
#include <net/ip.h>
#include "ravb.h"
#define RAVB_DEF_MSG_ENABLE \
(NETIF_MSG_LINK | \
NETIF_MSG_TIMER | \
NETIF_MSG_RX_ERR | \
NETIF_MSG_TX_ERR)
void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
u32 set)
{
ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg);
}
int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
{
int i;
for (i = 0; i < 10000; i++) {
if ((ravb_read(ndev, reg) & mask) == value)
return 0;
udelay(10);
}
return -ETIMEDOUT;
}
static int ravb_set_opmode(struct net_device *ndev, u32 opmode)
{
u32 csr_ops = 1U << (opmode & CCC_OPC);
u32 ccc_mask = CCC_OPC;
int error;
/* If gPTP active in config mode is supported it needs to be configured
* along with CSEL and operating mode in the same access. This is a
* hardware limitation.
*/
if (opmode & CCC_GAC)
ccc_mask |= CCC_GAC | CCC_CSEL;
/* Set operating mode */
ravb_modify(ndev, CCC, ccc_mask, opmode);
/* Check if the operating mode is changed to the requested one */
error = ravb_wait(ndev, CSR, CSR_OPS, csr_ops);
if (error) {
netdev_err(ndev, "failed to switch device to requested mode (%u)\n",
opmode & CCC_OPC);
}
return error;
}
static void ravb_set_rate_gbeth(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
switch (priv->speed) {
case 10: /* 10BASE */
ravb_write(ndev, GBETH_GECMR_SPEED_10, GECMR);
break;
case 100: /* 100BASE */
ravb_write(ndev, GBETH_GECMR_SPEED_100, GECMR);
break;
case 1000: /* 1000BASE */
ravb_write(ndev, GBETH_GECMR_SPEED_1000, GECMR);
break;
}
}
static void ravb_set_rate_rcar(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
switch (priv->speed) {
case 100: /* 100BASE */
ravb_write(ndev, GECMR_SPEED_100, GECMR);
break;
case 1000: /* 1000BASE */
ravb_write(ndev, GECMR_SPEED_1000, GECMR);
break;
}
}
ravb: Create helper to allocate skb and align it The EtherAVB device requires the SKB data to be aligned to 128 bytes. The alignment is done by allocating an skb 128 bytes larger than the maximum frame size supported by the device and adjusting the headroom to fit the requirement. This code has been refactored a few times and small issues have been added along the way. The issues are not harmful but prevent merging parts of the Rx code which have been split in two implementations with the addition of RZ/G2L support, a device that supports larger frame sizes. This change removes the need for duplicated and somewhat inaccurate hardware alignment constrains stored in the hardware information struct by creating a helper to handle the allocation of an skb and alignment of an skb data. For the R-Car class of devices the maximum frame size is 4K and each descriptor is limited to 2K of data. The current implementation does not support split descriptors, this limits the frame size to 2K. The current hardware information however records the descriptor size just under 2K due to bad understanding of the device when larger MTUs where added. For the RZ/G2L device the maximum frame size is 8K and each descriptor is limited to 4K of data. The current hardware information records this correctly, but it gets the alignment constrains wrong as just aligns it by 128, it does not extend it by 128 bytes to allow the full frame to be stored. This works because the RZ/G2L device supports split descriptors and allocates each skb to 8K and aligns each 4K descriptor in this space. Signed-off-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se> Reviewed-by: Paul Barker <paul.barker.ct@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-03-04 11:08:55 +00:00
static struct sk_buff *
ravb_alloc_skb(struct net_device *ndev, const struct ravb_hw_info *info,
gfp_t gfp_mask)
{
ravb: Create helper to allocate skb and align it The EtherAVB device requires the SKB data to be aligned to 128 bytes. The alignment is done by allocating an skb 128 bytes larger than the maximum frame size supported by the device and adjusting the headroom to fit the requirement. This code has been refactored a few times and small issues have been added along the way. The issues are not harmful but prevent merging parts of the Rx code which have been split in two implementations with the addition of RZ/G2L support, a device that supports larger frame sizes. This change removes the need for duplicated and somewhat inaccurate hardware alignment constrains stored in the hardware information struct by creating a helper to handle the allocation of an skb and alignment of an skb data. For the R-Car class of devices the maximum frame size is 4K and each descriptor is limited to 2K of data. The current implementation does not support split descriptors, this limits the frame size to 2K. The current hardware information however records the descriptor size just under 2K due to bad understanding of the device when larger MTUs where added. For the RZ/G2L device the maximum frame size is 8K and each descriptor is limited to 4K of data. The current hardware information records this correctly, but it gets the alignment constrains wrong as just aligns it by 128, it does not extend it by 128 bytes to allow the full frame to be stored. This works because the RZ/G2L device supports split descriptors and allocates each skb to 8K and aligns each 4K descriptor in this space. Signed-off-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se> Reviewed-by: Paul Barker <paul.barker.ct@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-03-04 11:08:55 +00:00
struct sk_buff *skb;
u32 reserve;
skb = __netdev_alloc_skb(ndev, info->rx_max_frame_size + RAVB_ALIGN - 1,
gfp_mask);
if (!skb)
return NULL;
ravb: Create helper to allocate skb and align it The EtherAVB device requires the SKB data to be aligned to 128 bytes. The alignment is done by allocating an skb 128 bytes larger than the maximum frame size supported by the device and adjusting the headroom to fit the requirement. This code has been refactored a few times and small issues have been added along the way. The issues are not harmful but prevent merging parts of the Rx code which have been split in two implementations with the addition of RZ/G2L support, a device that supports larger frame sizes. This change removes the need for duplicated and somewhat inaccurate hardware alignment constrains stored in the hardware information struct by creating a helper to handle the allocation of an skb and alignment of an skb data. For the R-Car class of devices the maximum frame size is 4K and each descriptor is limited to 2K of data. The current implementation does not support split descriptors, this limits the frame size to 2K. The current hardware information however records the descriptor size just under 2K due to bad understanding of the device when larger MTUs where added. For the RZ/G2L device the maximum frame size is 8K and each descriptor is limited to 4K of data. The current hardware information records this correctly, but it gets the alignment constrains wrong as just aligns it by 128, it does not extend it by 128 bytes to allow the full frame to be stored. This works because the RZ/G2L device supports split descriptors and allocates each skb to 8K and aligns each 4K descriptor in this space. Signed-off-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se> Reviewed-by: Paul Barker <paul.barker.ct@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-03-04 11:08:55 +00:00
reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
if (reserve)
skb_reserve(skb, RAVB_ALIGN - reserve);
ravb: Create helper to allocate skb and align it The EtherAVB device requires the SKB data to be aligned to 128 bytes. The alignment is done by allocating an skb 128 bytes larger than the maximum frame size supported by the device and adjusting the headroom to fit the requirement. This code has been refactored a few times and small issues have been added along the way. The issues are not harmful but prevent merging parts of the Rx code which have been split in two implementations with the addition of RZ/G2L support, a device that supports larger frame sizes. This change removes the need for duplicated and somewhat inaccurate hardware alignment constrains stored in the hardware information struct by creating a helper to handle the allocation of an skb and alignment of an skb data. For the R-Car class of devices the maximum frame size is 4K and each descriptor is limited to 2K of data. The current implementation does not support split descriptors, this limits the frame size to 2K. The current hardware information however records the descriptor size just under 2K due to bad understanding of the device when larger MTUs where added. For the RZ/G2L device the maximum frame size is 8K and each descriptor is limited to 4K of data. The current hardware information records this correctly, but it gets the alignment constrains wrong as just aligns it by 128, it does not extend it by 128 bytes to allow the full frame to be stored. This works because the RZ/G2L device supports split descriptors and allocates each skb to 8K and aligns each 4K descriptor in this space. Signed-off-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se> Reviewed-by: Paul Barker <paul.barker.ct@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-03-04 11:08:55 +00:00
return skb;
}
/* Get MAC address from the MAC address registers
*
* Ethernet AVB device doesn't have ROM for MAC address.
* This function gets the MAC address that was used by a bootloader.
*/
of: net: pass the dst buffer to of_get_mac_address() of_get_mac_address() returns a "const void*" pointer to a MAC address. Lately, support to fetch the MAC address by an NVMEM provider was added. But this will only work with platform devices. It will not work with PCI devices (e.g. of an integrated root complex) and esp. not with DSA ports. There is an of_* variant of the nvmem binding which works without devices. The returned data of a nvmem_cell_read() has to be freed after use. On the other hand the return of_get_mac_address() points to some static data without a lifetime. The trick for now, was to allocate a device resource managed buffer which is then returned. This will only work if we have an actual device. Change it, so that the caller of of_get_mac_address() has to supply a buffer where the MAC address is written to. Unfortunately, this will touch all drivers which use the of_get_mac_address(). Usually the code looks like: const char *addr; addr = of_get_mac_address(np); if (!IS_ERR(addr)) ether_addr_copy(ndev->dev_addr, addr); This can then be simply rewritten as: of_get_mac_address(np, ndev->dev_addr); Sometimes is_valid_ether_addr() is used to test the MAC address. of_get_mac_address() already makes sure, it just returns a valid MAC address. Thus we can just test its return code. But we have to be careful if there are still other sources for the MAC address before the of_get_mac_address(). In this case we have to keep the is_valid_ether_addr() call. The following coccinelle patch was used to convert common cases to the new style. Afterwards, I've manually gone over the drivers and fixed the return code variable: either used a new one or if one was already available use that. Mansour Moufid, thanks for that coccinelle patch! <spml> @a@ identifier x; expression y, z; @@ - x = of_get_mac_address(y); + x = of_get_mac_address(y, z); <... - ether_addr_copy(z, x); ...> @@ identifier a.x; @@ - if (<+... x ...+>) {} @@ identifier a.x; @@ if (<+... x ...+>) { ... } - else {} @@ identifier a.x; expression e; @@ - if (<+... x ...+>@e) - {} - else + if (!(e)) {...} @@ expression x, y, z; @@ - x = of_get_mac_address(y, z); + of_get_mac_address(y, z); ... when != x </spml> All drivers, except drivers/net/ethernet/aeroflex/greth.c, were compile-time tested. Suggested-by: Andrew Lunn <andrew@lunn.ch> Signed-off-by: Michael Walle <michael@walle.cc> Reviewed-by: Andrew Lunn <andrew@lunn.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-12 17:47:17 +00:00
static void ravb_read_mac_address(struct device_node *np,
struct net_device *ndev)
{
of: net: pass the dst buffer to of_get_mac_address() of_get_mac_address() returns a "const void*" pointer to a MAC address. Lately, support to fetch the MAC address by an NVMEM provider was added. But this will only work with platform devices. It will not work with PCI devices (e.g. of an integrated root complex) and esp. not with DSA ports. There is an of_* variant of the nvmem binding which works without devices. The returned data of a nvmem_cell_read() has to be freed after use. On the other hand the return of_get_mac_address() points to some static data without a lifetime. The trick for now, was to allocate a device resource managed buffer which is then returned. This will only work if we have an actual device. Change it, so that the caller of of_get_mac_address() has to supply a buffer where the MAC address is written to. Unfortunately, this will touch all drivers which use the of_get_mac_address(). Usually the code looks like: const char *addr; addr = of_get_mac_address(np); if (!IS_ERR(addr)) ether_addr_copy(ndev->dev_addr, addr); This can then be simply rewritten as: of_get_mac_address(np, ndev->dev_addr); Sometimes is_valid_ether_addr() is used to test the MAC address. of_get_mac_address() already makes sure, it just returns a valid MAC address. Thus we can just test its return code. But we have to be careful if there are still other sources for the MAC address before the of_get_mac_address(). In this case we have to keep the is_valid_ether_addr() call. The following coccinelle patch was used to convert common cases to the new style. Afterwards, I've manually gone over the drivers and fixed the return code variable: either used a new one or if one was already available use that. Mansour Moufid, thanks for that coccinelle patch! <spml> @a@ identifier x; expression y, z; @@ - x = of_get_mac_address(y); + x = of_get_mac_address(y, z); <... - ether_addr_copy(z, x); ...> @@ identifier a.x; @@ - if (<+... x ...+>) {} @@ identifier a.x; @@ if (<+... x ...+>) { ... } - else {} @@ identifier a.x; expression e; @@ - if (<+... x ...+>@e) - {} - else + if (!(e)) {...} @@ expression x, y, z; @@ - x = of_get_mac_address(y, z); + of_get_mac_address(y, z); ... when != x </spml> All drivers, except drivers/net/ethernet/aeroflex/greth.c, were compile-time tested. Suggested-by: Andrew Lunn <andrew@lunn.ch> Signed-off-by: Michael Walle <michael@walle.cc> Reviewed-by: Andrew Lunn <andrew@lunn.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-12 17:47:17 +00:00
int ret;
ret = of_get_ethdev_address(np, ndev);
of: net: pass the dst buffer to of_get_mac_address() of_get_mac_address() returns a "const void*" pointer to a MAC address. Lately, support to fetch the MAC address by an NVMEM provider was added. But this will only work with platform devices. It will not work with PCI devices (e.g. of an integrated root complex) and esp. not with DSA ports. There is an of_* variant of the nvmem binding which works without devices. The returned data of a nvmem_cell_read() has to be freed after use. On the other hand the return of_get_mac_address() points to some static data without a lifetime. The trick for now, was to allocate a device resource managed buffer which is then returned. This will only work if we have an actual device. Change it, so that the caller of of_get_mac_address() has to supply a buffer where the MAC address is written to. Unfortunately, this will touch all drivers which use the of_get_mac_address(). Usually the code looks like: const char *addr; addr = of_get_mac_address(np); if (!IS_ERR(addr)) ether_addr_copy(ndev->dev_addr, addr); This can then be simply rewritten as: of_get_mac_address(np, ndev->dev_addr); Sometimes is_valid_ether_addr() is used to test the MAC address. of_get_mac_address() already makes sure, it just returns a valid MAC address. Thus we can just test its return code. But we have to be careful if there are still other sources for the MAC address before the of_get_mac_address(). In this case we have to keep the is_valid_ether_addr() call. The following coccinelle patch was used to convert common cases to the new style. Afterwards, I've manually gone over the drivers and fixed the return code variable: either used a new one or if one was already available use that. Mansour Moufid, thanks for that coccinelle patch! <spml> @a@ identifier x; expression y, z; @@ - x = of_get_mac_address(y); + x = of_get_mac_address(y, z); <... - ether_addr_copy(z, x); ...> @@ identifier a.x; @@ - if (<+... x ...+>) {} @@ identifier a.x; @@ if (<+... x ...+>) { ... } - else {} @@ identifier a.x; expression e; @@ - if (<+... x ...+>@e) - {} - else + if (!(e)) {...} @@ expression x, y, z; @@ - x = of_get_mac_address(y, z); + of_get_mac_address(y, z); ... when != x </spml> All drivers, except drivers/net/ethernet/aeroflex/greth.c, were compile-time tested. Suggested-by: Andrew Lunn <andrew@lunn.ch> Signed-off-by: Michael Walle <michael@walle.cc> Reviewed-by: Andrew Lunn <andrew@lunn.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-12 17:47:17 +00:00
if (ret) {
u32 mahr = ravb_read(ndev, MAHR);
u32 malr = ravb_read(ndev, MALR);
u8 addr[ETH_ALEN];
addr[0] = (mahr >> 24) & 0xFF;
addr[1] = (mahr >> 16) & 0xFF;
addr[2] = (mahr >> 8) & 0xFF;
addr[3] = (mahr >> 0) & 0xFF;
addr[4] = (malr >> 8) & 0xFF;
addr[5] = (malr >> 0) & 0xFF;
eth_hw_addr_set(ndev, addr);
}
}
static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
{
struct ravb_private *priv = container_of(ctrl, struct ravb_private,
mdiobb);
ravb_modify(priv->ndev, PIR, mask, set ? mask : 0);
}
/* MDC pin control */
static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
{
ravb_mdio_ctrl(ctrl, PIR_MDC, level);
}
/* Data I/O pin control */
static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
{
ravb_mdio_ctrl(ctrl, PIR_MMD, output);
}
/* Set data bit */
static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
{
ravb_mdio_ctrl(ctrl, PIR_MDO, value);
}
/* Get data bit */
static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
{
struct ravb_private *priv = container_of(ctrl, struct ravb_private,
mdiobb);
return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
}
/* MDIO bus control struct */
static const struct mdiobb_ops bb_ops = {
.owner = THIS_MODULE,
.set_mdc = ravb_set_mdc,
.set_mdio_dir = ravb_set_mdio_dir,
.set_mdio_data = ravb_set_mdio_data,
.get_mdio_data = ravb_get_mdio_data,
};
static struct ravb_rx_desc *
ravb_rx_get_desc(struct ravb_private *priv, unsigned int q,
unsigned int i)
{
return priv->rx_ring[q].raw + priv->info->rx_desc_size * i;
}
/* Free TX skb function for AVB-IP */
static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
{
struct ravb_private *priv = netdev_priv(ndev);
struct net_device_stats *stats = &priv->stats[q];
unsigned int num_tx_desc = priv->num_tx_desc;
struct ravb_tx_desc *desc;
unsigned int entry;
int free_num = 0;
u32 size;
for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
bool txed;
entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
num_tx_desc);
desc = &priv->tx_ring[q][entry];
txed = desc->die_dt == DT_FEMPTY;
if (free_txed_only && !txed)
break;
/* Descriptor type must be checked before all other reads */
dma_rmb();
size = le16_to_cpu(desc->ds_tagl) & TX_DS;
/* Free the original skb. */
if (priv->tx_skb[q][entry / num_tx_desc]) {
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
size, DMA_TO_DEVICE);
/* Last packet descriptor? */
if (entry % num_tx_desc == num_tx_desc - 1) {
entry /= num_tx_desc;
dev_kfree_skb_any(priv->tx_skb[q][entry]);
priv->tx_skb[q][entry] = NULL;
if (txed)
stats->tx_packets++;
}
free_num++;
}
if (txed)
stats->tx_bytes += size;
desc->die_dt = DT_EEMPTY;
}
return free_num;
}
static void ravb_rx_ring_free(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int ring_size;
unsigned int i;
if (!priv->rx_ring[q].raw)
return;
for (i = 0; i < priv->num_rx_ring[q]; i++) {
struct ravb_rx_desc *desc = ravb_rx_get_desc(priv, q, i);
if (!dma_mapping_error(ndev->dev.parent,
le32_to_cpu(desc->dptr)))
dma_unmap_single(ndev->dev.parent,
le32_to_cpu(desc->dptr),
ravb: Create helper to allocate skb and align it The EtherAVB device requires the SKB data to be aligned to 128 bytes. The alignment is done by allocating an skb 128 bytes larger than the maximum frame size supported by the device and adjusting the headroom to fit the requirement. This code has been refactored a few times and small issues have been added along the way. The issues are not harmful but prevent merging parts of the Rx code which have been split in two implementations with the addition of RZ/G2L support, a device that supports larger frame sizes. This change removes the need for duplicated and somewhat inaccurate hardware alignment constrains stored in the hardware information struct by creating a helper to handle the allocation of an skb and alignment of an skb data. For the R-Car class of devices the maximum frame size is 4K and each descriptor is limited to 2K of data. The current implementation does not support split descriptors, this limits the frame size to 2K. The current hardware information however records the descriptor size just under 2K due to bad understanding of the device when larger MTUs where added. For the RZ/G2L device the maximum frame size is 8K and each descriptor is limited to 4K of data. The current hardware information records this correctly, but it gets the alignment constrains wrong as just aligns it by 128, it does not extend it by 128 bytes to allow the full frame to be stored. This works because the RZ/G2L device supports split descriptors and allocates each skb to 8K and aligns each 4K descriptor in this space. Signed-off-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se> Reviewed-by: Paul Barker <paul.barker.ct@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-03-04 11:08:55 +00:00
priv->info->rx_max_frame_size,
DMA_FROM_DEVICE);
}
ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].raw,
priv->rx_desc_dma[q]);
priv->rx_ring[q].raw = NULL;
}
/* Free skb's and DMA buffers for Ethernet AVB */
static void ravb_ring_free(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int num_tx_desc = priv->num_tx_desc;
unsigned int ring_size;
unsigned int i;
ravb_rx_ring_free(ndev, q);
if (priv->tx_ring[q]) {
ravb_tx_free(ndev, q, false);
ring_size = sizeof(struct ravb_tx_desc) *
(priv->num_tx_ring[q] * num_tx_desc + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
priv->tx_desc_dma[q]);
priv->tx_ring[q] = NULL;
}
ravb: Fix use-after-free on `ifconfig eth0 down` Commit a47b70ea86bd ("ravb: unmap descriptors when freeing rings") has introduced the issue seen in [1] reproduced on H3ULCB board. Fix this by relocating the RX skb ringbuffer free operation, so that swiotlb page unmapping can be done first. Freeing of aligned TX buffers is not relevant to the issue seen in [1]. Still, reposition TX free calls as well, to have all kfree() operations performed consistently _after_ dma_unmap_*()/dma_free_*(). [1] Console screenshot with the problem reproduced: salvator-x login: root root@salvator-x:~# ifconfig eth0 up Micrel KSZ9031 Gigabit PHY e6800000.ethernet-ffffffff:00: \ attached PHY driver [Micrel KSZ9031 Gigabit PHY] \ (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=235) IPv6: ADDRCONF(NETDEV_UP): eth0: link is not ready root@salvator-x:~# root@salvator-x:~# ifconfig eth0 down ================================================================== BUG: KASAN: use-after-free in swiotlb_tbl_unmap_single+0xc4/0x35c Write of size 1538 at addr ffff8006d884f780 by task ifconfig/1649 CPU: 0 PID: 1649 Comm: ifconfig Not tainted 4.12.0-rc4-00004-g112eb07287d1 #32 Hardware name: Renesas H3ULCB board based on r8a7795 (DT) Call trace: [<ffff20000808f11c>] dump_backtrace+0x0/0x3a4 [<ffff20000808f4d4>] show_stack+0x14/0x1c [<ffff20000865970c>] dump_stack+0xf8/0x150 [<ffff20000831f8b0>] print_address_description+0x7c/0x330 [<ffff200008320010>] kasan_report+0x2e0/0x2f4 [<ffff20000831eac0>] check_memory_region+0x20/0x14c [<ffff20000831f054>] memcpy+0x48/0x68 [<ffff20000869ed50>] swiotlb_tbl_unmap_single+0xc4/0x35c [<ffff20000869fcf4>] unmap_single+0x90/0xa4 [<ffff20000869fd14>] swiotlb_unmap_page+0xc/0x14 [<ffff2000080a2974>] __swiotlb_unmap_page+0xcc/0xe4 [<ffff2000088acdb8>] ravb_ring_free+0x514/0x870 [<ffff2000088b25dc>] ravb_close+0x288/0x36c [<ffff200008aaf8c4>] __dev_close_many+0x14c/0x174 [<ffff200008aaf9b4>] __dev_close+0xc8/0x144 [<ffff200008ac2100>] __dev_change_flags+0xd8/0x194 [<ffff200008ac221c>] dev_change_flags+0x60/0xb0 [<ffff200008ba2dec>] devinet_ioctl+0x484/0x9d4 [<ffff200008ba7b78>] inet_ioctl+0x190/0x194 [<ffff200008a78c44>] sock_do_ioctl+0x78/0xa8 [<ffff200008a7a128>] sock_ioctl+0x110/0x3c4 [<ffff200008365a70>] vfs_ioctl+0x90/0xa0 [<ffff200008365dbc>] do_vfs_ioctl+0x148/0xc38 [<ffff2000083668f0>] SyS_ioctl+0x44/0x74 [<ffff200008083770>] el0_svc_naked+0x24/0x28 The buggy address belongs to the page: page:ffff7e001b6213c0 count:0 mapcount:0 mapping: (null) index:0x0 flags: 0x4000000000000000() raw: 4000000000000000 0000000000000000 0000000000000000 00000000ffffffff raw: 0000000000000000 ffff7e001b6213e0 0000000000000000 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff8006d884f680: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ffff8006d884f700: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff >ffff8006d884f780: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ^ ffff8006d884f800: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ffff8006d884f880: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ================================================================== Disabling lock debugging due to kernel taint root@salvator-x:~# Fixes: a47b70ea86bd ("ravb: unmap descriptors when freeing rings") Signed-off-by: Eugeniu Rosca <erosca@de.adit-jv.com> Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-05 22:08:10 +00:00
/* Free RX skb ringbuffer */
if (priv->rx_skb[q]) {
for (i = 0; i < priv->num_rx_ring[q]; i++)
dev_kfree_skb(priv->rx_skb[q][i]);
}
kfree(priv->rx_skb[q]);
priv->rx_skb[q] = NULL;
/* Free aligned TX buffers */
kfree(priv->tx_align[q]);
priv->tx_align[q] = NULL;
/* Free TX skb ringbuffer.
* SKBs are freed by ravb_tx_free() call above.
*/
kfree(priv->tx_skb[q]);
priv->tx_skb[q] = NULL;
}
static void ravb_rx_ring_format(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
struct ravb_rx_desc *rx_desc;
unsigned int rx_ring_size;
dma_addr_t dma_addr;
unsigned int i;
rx_ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q];
memset(priv->rx_ring[q].raw, 0, rx_ring_size);
/* Build RX ring buffer */
for (i = 0; i < priv->num_rx_ring[q]; i++) {
/* RX descriptor */
rx_desc = ravb_rx_get_desc(priv, q, i);
rx_desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
ravb: Create helper to allocate skb and align it The EtherAVB device requires the SKB data to be aligned to 128 bytes. The alignment is done by allocating an skb 128 bytes larger than the maximum frame size supported by the device and adjusting the headroom to fit the requirement. This code has been refactored a few times and small issues have been added along the way. The issues are not harmful but prevent merging parts of the Rx code which have been split in two implementations with the addition of RZ/G2L support, a device that supports larger frame sizes. This change removes the need for duplicated and somewhat inaccurate hardware alignment constrains stored in the hardware information struct by creating a helper to handle the allocation of an skb and alignment of an skb data. For the R-Car class of devices the maximum frame size is 4K and each descriptor is limited to 2K of data. The current implementation does not support split descriptors, this limits the frame size to 2K. The current hardware information however records the descriptor size just under 2K due to bad understanding of the device when larger MTUs where added. For the RZ/G2L device the maximum frame size is 8K and each descriptor is limited to 4K of data. The current hardware information records this correctly, but it gets the alignment constrains wrong as just aligns it by 128, it does not extend it by 128 bytes to allow the full frame to be stored. This works because the RZ/G2L device supports split descriptors and allocates each skb to 8K and aligns each 4K descriptor in this space. Signed-off-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se> Reviewed-by: Paul Barker <paul.barker.ct@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-03-04 11:08:55 +00:00
priv->info->rx_max_frame_size,
DMA_FROM_DEVICE);
/* We just set the data size to 0 for a failed mapping which
* should prevent DMA from happening...
*/
if (dma_mapping_error(ndev->dev.parent, dma_addr))
rx_desc->ds_cc = cpu_to_le16(0);
rx_desc->dptr = cpu_to_le32(dma_addr);
rx_desc->die_dt = DT_FEMPTY;
}
rx_desc = ravb_rx_get_desc(priv, q, i);
rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
rx_desc->die_dt = DT_LINKFIX; /* type */
}
/* Format skb and descriptor buffer for Ethernet AVB */
static void ravb_ring_format(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int num_tx_desc = priv->num_tx_desc;
struct ravb_tx_desc *tx_desc;
struct ravb_desc *desc;
unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
num_tx_desc;
unsigned int i;
priv->cur_rx[q] = 0;
priv->cur_tx[q] = 0;
priv->dirty_rx[q] = 0;
priv->dirty_tx[q] = 0;
ravb_rx_ring_format(ndev, q);
memset(priv->tx_ring[q], 0, tx_ring_size);
/* Build TX ring buffer */
for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
i++, tx_desc++) {
tx_desc->die_dt = DT_EEMPTY;
if (num_tx_desc > 1) {
tx_desc++;
tx_desc->die_dt = DT_EEMPTY;
}
}
tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
tx_desc->die_dt = DT_LINKFIX; /* type */
/* RX descriptor base address for best effort */
desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
desc->die_dt = DT_LINKFIX; /* type */
desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
/* TX descriptor base address for best effort */
desc = &priv->desc_bat[q];
desc->die_dt = DT_LINKFIX; /* type */
desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
}
static void *ravb_alloc_rx_desc(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int ring_size;
ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
priv->rx_ring[q].raw = dma_alloc_coherent(ndev->dev.parent, ring_size,
&priv->rx_desc_dma[q],
GFP_KERNEL);
return priv->rx_ring[q].raw;
}
/* Init skb and descriptor buffer for Ethernet AVB */
static int ravb_ring_init(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
unsigned int num_tx_desc = priv->num_tx_desc;
unsigned int ring_size;
struct sk_buff *skb;
unsigned int i;
/* Allocate RX and TX skb rings */
priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
sizeof(*priv->rx_skb[q]), GFP_KERNEL);
priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
sizeof(*priv->tx_skb[q]), GFP_KERNEL);
if (!priv->rx_skb[q] || !priv->tx_skb[q])
goto error;
for (i = 0; i < priv->num_rx_ring[q]; i++) {
ravb: Create helper to allocate skb and align it The EtherAVB device requires the SKB data to be aligned to 128 bytes. The alignment is done by allocating an skb 128 bytes larger than the maximum frame size supported by the device and adjusting the headroom to fit the requirement. This code has been refactored a few times and small issues have been added along the way. The issues are not harmful but prevent merging parts of the Rx code which have been split in two implementations with the addition of RZ/G2L support, a device that supports larger frame sizes. This change removes the need for duplicated and somewhat inaccurate hardware alignment constrains stored in the hardware information struct by creating a helper to handle the allocation of an skb and alignment of an skb data. For the R-Car class of devices the maximum frame size is 4K and each descriptor is limited to 2K of data. The current implementation does not support split descriptors, this limits the frame size to 2K. The current hardware information however records the descriptor size just under 2K due to bad understanding of the device when larger MTUs where added. For the RZ/G2L device the maximum frame size is 8K and each descriptor is limited to 4K of data. The current hardware information records this correctly, but it gets the alignment constrains wrong as just aligns it by 128, it does not extend it by 128 bytes to allow the full frame to be stored. This works because the RZ/G2L device supports split descriptors and allocates each skb to 8K and aligns each 4K descriptor in this space. Signed-off-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se> Reviewed-by: Paul Barker <paul.barker.ct@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-03-04 11:08:55 +00:00
skb = ravb_alloc_skb(ndev, info, GFP_KERNEL);
if (!skb)
goto error;
priv->rx_skb[q][i] = skb;
}
if (num_tx_desc > 1) {
/* Allocate rings for the aligned buffers */
priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
DPTR_ALIGN - 1, GFP_KERNEL);
if (!priv->tx_align[q])
goto error;
}
/* Allocate all RX descriptors. */
if (!ravb_alloc_rx_desc(ndev, q))
goto error;
priv->dirty_rx[q] = 0;
/* Allocate all TX descriptors. */
ring_size = sizeof(struct ravb_tx_desc) *
(priv->num_tx_ring[q] * num_tx_desc + 1);
priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
&priv->tx_desc_dma[q],
GFP_KERNEL);
if (!priv->tx_ring[q])
goto error;
return 0;
error:
ravb_ring_free(ndev, q);
return -ENOMEM;
}
static void ravb_csum_init_gbeth(struct net_device *ndev)
{
bool tx_enable = ndev->features & NETIF_F_HW_CSUM;
bool rx_enable = ndev->features & NETIF_F_RXCSUM;
if (!(tx_enable || rx_enable))
goto done;
ravb_write(ndev, 0, CSR0);
if (ravb_wait(ndev, CSR0, CSR0_TPE | CSR0_RPE, 0)) {
netdev_err(ndev, "Timeout enabling hardware checksum\n");
if (tx_enable)
ndev->features &= ~NETIF_F_HW_CSUM;
if (rx_enable)
ndev->features &= ~NETIF_F_RXCSUM;
} else {
if (tx_enable)
ravb_write(ndev, CSR1_TIP4 | CSR1_TTCP4 | CSR1_TUDP4, CSR1);
if (rx_enable)
ravb_write(ndev, CSR2_RIP4 | CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4,
CSR2);
}
done:
ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0);
}
static void ravb_emac_init_gbeth(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
} else {
ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_RGMII, CXR35);
ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
CXR31_SEL_LINK0);
}
/* Receive frame limit set register */
ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR);
/* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */
ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) |
ECMR_TE | ECMR_RE | ECMR_RCPT |
ECMR_TXF | ECMR_RXF, ECMR);
ravb_set_rate_gbeth(ndev);
/* Set MAC address */
ravb_write(ndev,
(ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
(ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
ravb_write(ndev, (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
/* E-MAC status register clear */
ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR);
ravb_csum_init_gbeth(ndev);
/* E-MAC interrupt enable register */
ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
}
static void ravb_emac_init_rcar(struct net_device *ndev)
{
/* Receive frame limit set register */
ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
ravb: RX checksum offload Add support for RX checksum offload. This is enabled by default and may be disabled and re-enabled using ethtool: # ethtool -K eth0 rx off # ethtool -K eth0 rx on The RAVB provides a simple checksumming scheme which appears to be completely compatible with CHECKSUM_COMPLETE: sum of all packet data after the L2 header is appended to packet data; this may be trivially read by the driver and used to update the skb accordingly. In terms of performance throughput is close to gigabit line-rate both with and without RX checksum offload enabled. Perf output, however, appears to indicate that significantly less time is spent in do_csum(). This is as expected. Test results with RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 937.54 Summary of output of perf report: 18.28% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.34% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 9.83% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 7.89% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 4.01% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 3.37% netperf [kernel.kallsyms] [k] __arch_copy_to_user 3.17% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.55% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 2.04% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.03% swapper [kernel.kallsyms] [k] _raw_spin_unlock_irq 1.96% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.59% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Test results without RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 940.20 Summary of output of perf report: 17.10% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.99% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 8.87% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 8.16% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 7.42% ksoftirqd/0 [kernel.kallsyms] [k] do_csum 3.91% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 2.31% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.16% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.14% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.93% netperf [kernel.kallsyms] [k] __arch_copy_to_user 1.79% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 1.63% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Above results collected on an R-Car Gen 3 Salvator-X/r8a7796 ES1.0. Also tested on a R-Car Gen 3 Salvator-X/r8a7795 ES1.0. By inspection this also appears to be compatible with the ravb found on R-Car Gen 2 SoCs, however, this patch is currently untested on such hardware. Signed-off-by: Simon Horman <horms+renesas@verge.net.au> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-04 07:54:27 +00:00
/* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
ravb_write(ndev, ECMR_ZPF | ECMR_DM |
ravb: RX checksum offload Add support for RX checksum offload. This is enabled by default and may be disabled and re-enabled using ethtool: # ethtool -K eth0 rx off # ethtool -K eth0 rx on The RAVB provides a simple checksumming scheme which appears to be completely compatible with CHECKSUM_COMPLETE: sum of all packet data after the L2 header is appended to packet data; this may be trivially read by the driver and used to update the skb accordingly. In terms of performance throughput is close to gigabit line-rate both with and without RX checksum offload enabled. Perf output, however, appears to indicate that significantly less time is spent in do_csum(). This is as expected. Test results with RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 937.54 Summary of output of perf report: 18.28% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.34% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 9.83% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 7.89% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 4.01% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 3.37% netperf [kernel.kallsyms] [k] __arch_copy_to_user 3.17% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.55% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 2.04% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.03% swapper [kernel.kallsyms] [k] _raw_spin_unlock_irq 1.96% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.59% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Test results without RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 940.20 Summary of output of perf report: 17.10% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.99% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 8.87% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 8.16% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 7.42% ksoftirqd/0 [kernel.kallsyms] [k] do_csum 3.91% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 2.31% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.16% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.14% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.93% netperf [kernel.kallsyms] [k] __arch_copy_to_user 1.79% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 1.63% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Above results collected on an R-Car Gen 3 Salvator-X/r8a7796 ES1.0. Also tested on a R-Car Gen 3 Salvator-X/r8a7795 ES1.0. By inspection this also appears to be compatible with the ravb found on R-Car Gen 2 SoCs, however, this patch is currently untested on such hardware. Signed-off-by: Simon Horman <horms+renesas@verge.net.au> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-04 07:54:27 +00:00
(ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
ECMR_TE | ECMR_RE, ECMR);
ravb_set_rate_rcar(ndev);
/* Set MAC address */
ravb_write(ndev,
(ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
(ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
ravb_write(ndev,
(ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
/* E-MAC status register clear */
ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
/* E-MAC interrupt enable register */
ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
}
/* E-MAC init function */
static void ravb_emac_init(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
info->emac_init(ndev);
}
static int ravb_dmac_init_gbeth(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
int error;
error = ravb_ring_init(ndev, RAVB_BE);
if (error)
return error;
/* Descriptor format */
ravb_ring_format(ndev, RAVB_BE);
/* Set DMAC RX */
ravb_write(ndev, 0x60000000, RCR);
/* Set Max Frame Length (RTC) */
ravb_write(ndev, 0x7ffc0000 | priv->info->rx_max_frame_size, RTC);
/* Set FIFO size */
ravb_write(ndev, 0x00222200, TGC);
ravb_write(ndev, 0, TCCR);
/* Frame receive */
ravb_write(ndev, RIC0_FRE0, RIC0);
/* Disable FIFO full warning */
ravb_write(ndev, 0x0, RIC1);
/* Receive FIFO full error, descriptor empty */
ravb_write(ndev, RIC2_QFE0 | RIC2_RFFE, RIC2);
ravb_write(ndev, TIC_FTE0, TIC);
return 0;
}
static int ravb_dmac_init_rcar(struct net_device *ndev)
{
ravb: Add dma queue interrupt support This patch supports the following interrupts. - One interrupt for multiple (timestamp, error, gPTP) - One interrupt for emac - Four interrupts for dma queue (best effort rx/tx, network control rx/tx) This patch improve efficiency of the interrupt handler by adding the interrupt handler corresponding to each interrupt source described above. Additionally, it reduces the number of times of the access to EthernetAVB IF. Also this patch prevent this driver depends on the whim of a boot loader. [ykaneko0929@gmail.com: define bit names of registers] [ykaneko0929@gmail.com: add comment for gen3 only registers] [ykaneko0929@gmail.com: fix coding style] [ykaneko0929@gmail.com: update changelog] [ykaneko0929@gmail.com: gen3: fix initialization of interrupts] [ykaneko0929@gmail.com: gen3: fix clearing interrupts] [ykaneko0929@gmail.com: gen3: add helper function for request_irq()] [ykaneko0929@gmail.com: gen3: remove IRQF_SHARED flag for request_irq()] [ykaneko0929@gmail.com: revert ravb_close() and ravb_ptp_stop()] [ykaneko0929@gmail.com: avoid calling free_irq() to non-hooked interrupts] [ykaneko0929@gmail.com: make NC/BE interrupt handler a function] [ykaneko0929@gmail.com: make timestamp interrupt handler a function] [ykaneko0929@gmail.com: timestamp interrupt is handled in multiple interrupt handler instead of dma queue interrupt handler] Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com> Signed-off-by: Yoshihiro Kaneko <ykaneko0929@gmail.com> Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-03 14:54:38 +00:00
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
int error;
error = ravb_ring_init(ndev, RAVB_BE);
if (error)
return error;
error = ravb_ring_init(ndev, RAVB_NC);
if (error) {
ravb_ring_free(ndev, RAVB_BE);
return error;
}
/* Descriptor format */
ravb_ring_format(ndev, RAVB_BE);
ravb_ring_format(ndev, RAVB_NC);
/* Set AVB RX */
ravb_write(ndev,
RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
/* Set FIFO size */
ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
/* Timestamp enable */
ravb_write(ndev, TCCR_TFEN, TCCR);
/* Interrupt init: */
if (info->multi_irqs) {
ravb: Add dma queue interrupt support This patch supports the following interrupts. - One interrupt for multiple (timestamp, error, gPTP) - One interrupt for emac - Four interrupts for dma queue (best effort rx/tx, network control rx/tx) This patch improve efficiency of the interrupt handler by adding the interrupt handler corresponding to each interrupt source described above. Additionally, it reduces the number of times of the access to EthernetAVB IF. Also this patch prevent this driver depends on the whim of a boot loader. [ykaneko0929@gmail.com: define bit names of registers] [ykaneko0929@gmail.com: add comment for gen3 only registers] [ykaneko0929@gmail.com: fix coding style] [ykaneko0929@gmail.com: update changelog] [ykaneko0929@gmail.com: gen3: fix initialization of interrupts] [ykaneko0929@gmail.com: gen3: fix clearing interrupts] [ykaneko0929@gmail.com: gen3: add helper function for request_irq()] [ykaneko0929@gmail.com: gen3: remove IRQF_SHARED flag for request_irq()] [ykaneko0929@gmail.com: revert ravb_close() and ravb_ptp_stop()] [ykaneko0929@gmail.com: avoid calling free_irq() to non-hooked interrupts] [ykaneko0929@gmail.com: make NC/BE interrupt handler a function] [ykaneko0929@gmail.com: make timestamp interrupt handler a function] [ykaneko0929@gmail.com: timestamp interrupt is handled in multiple interrupt handler instead of dma queue interrupt handler] Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com> Signed-off-by: Yoshihiro Kaneko <ykaneko0929@gmail.com> Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-03 14:54:38 +00:00
/* Clear DIL.DPLx */
ravb_write(ndev, 0, DIL);
/* Set queue specific interrupt */
ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE);
}
/* Frame receive */
ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
/* Disable FIFO full warning */
ravb_write(ndev, 0, RIC1);
/* Receive FIFO full error, descriptor empty */
ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
/* Frame transmitted, timestamp FIFO updated */
ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
return 0;
}
/* Device init function for Ethernet AVB */
static int ravb_dmac_init(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
int error;
/* Set CONFIG mode */
error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
if (error)
return error;
error = info->dmac_init(ndev);
if (error)
return error;
/* Setting the control will start the AVB-DMAC process. */
return ravb_set_opmode(ndev, CCC_OPC_OPERATION);
}
static void ravb_get_tx_tstamp(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
struct ravb_tstamp_skb *ts_skb, *ts_skb2;
struct skb_shared_hwtstamps shhwtstamps;
struct sk_buff *skb;
struct timespec64 ts;
u16 tag, tfa_tag;
int count;
u32 tfa2;
count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
while (count--) {
tfa2 = ravb_read(ndev, TFA2);
tfa_tag = (tfa2 & TFA2_TST) >> 16;
ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
ravb_read(ndev, TFA1);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
list) {
skb = ts_skb->skb;
tag = ts_skb->tag;
list_del(&ts_skb->list);
kfree(ts_skb);
if (tag == tfa_tag) {
skb_tstamp_tx(skb, &shhwtstamps);
dev_consume_skb_any(skb);
break;
} else {
dev_kfree_skb_any(skb);
}
}
ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
}
}
static void ravb_rx_csum_gbeth(struct sk_buff *skb)
{
__wsum csum_ip_hdr, csum_proto;
u8 *hw_csum;
/* The hardware checksum status is contained in sizeof(__sum16) * 2 = 4
* bytes appended to packet data. First 2 bytes is ip header checksum
* and last 2 bytes is protocol checksum.
*/
if (unlikely(skb->len < sizeof(__sum16) * 2))
return;
hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
csum_proto = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
hw_csum -= sizeof(__sum16);
csum_ip_hdr = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
skb_trim(skb, skb->len - 2 * sizeof(__sum16));
/* TODO: IPV6 Rx checksum */
if (skb->protocol == htons(ETH_P_IP) && !csum_ip_hdr && !csum_proto)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
ravb: RX checksum offload Add support for RX checksum offload. This is enabled by default and may be disabled and re-enabled using ethtool: # ethtool -K eth0 rx off # ethtool -K eth0 rx on The RAVB provides a simple checksumming scheme which appears to be completely compatible with CHECKSUM_COMPLETE: sum of all packet data after the L2 header is appended to packet data; this may be trivially read by the driver and used to update the skb accordingly. In terms of performance throughput is close to gigabit line-rate both with and without RX checksum offload enabled. Perf output, however, appears to indicate that significantly less time is spent in do_csum(). This is as expected. Test results with RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 937.54 Summary of output of perf report: 18.28% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.34% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 9.83% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 7.89% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 4.01% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 3.37% netperf [kernel.kallsyms] [k] __arch_copy_to_user 3.17% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.55% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 2.04% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.03% swapper [kernel.kallsyms] [k] _raw_spin_unlock_irq 1.96% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.59% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Test results without RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 940.20 Summary of output of perf report: 17.10% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.99% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 8.87% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 8.16% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 7.42% ksoftirqd/0 [kernel.kallsyms] [k] do_csum 3.91% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 2.31% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.16% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.14% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.93% netperf [kernel.kallsyms] [k] __arch_copy_to_user 1.79% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 1.63% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Above results collected on an R-Car Gen 3 Salvator-X/r8a7796 ES1.0. Also tested on a R-Car Gen 3 Salvator-X/r8a7795 ES1.0. By inspection this also appears to be compatible with the ravb found on R-Car Gen 2 SoCs, however, this patch is currently untested on such hardware. Signed-off-by: Simon Horman <horms+renesas@verge.net.au> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-04 07:54:27 +00:00
static void ravb_rx_csum(struct sk_buff *skb)
{
u8 *hw_csum;
/* The hardware checksum is contained in sizeof(__sum16) (2) bytes
* appended to packet data
*/
if (unlikely(skb->len < sizeof(__sum16)))
ravb: RX checksum offload Add support for RX checksum offload. This is enabled by default and may be disabled and re-enabled using ethtool: # ethtool -K eth0 rx off # ethtool -K eth0 rx on The RAVB provides a simple checksumming scheme which appears to be completely compatible with CHECKSUM_COMPLETE: sum of all packet data after the L2 header is appended to packet data; this may be trivially read by the driver and used to update the skb accordingly. In terms of performance throughput is close to gigabit line-rate both with and without RX checksum offload enabled. Perf output, however, appears to indicate that significantly less time is spent in do_csum(). This is as expected. Test results with RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 937.54 Summary of output of perf report: 18.28% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.34% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 9.83% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 7.89% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 4.01% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 3.37% netperf [kernel.kallsyms] [k] __arch_copy_to_user 3.17% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.55% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 2.04% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.03% swapper [kernel.kallsyms] [k] _raw_spin_unlock_irq 1.96% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.59% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Test results without RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 940.20 Summary of output of perf report: 17.10% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.99% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 8.87% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 8.16% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 7.42% ksoftirqd/0 [kernel.kallsyms] [k] do_csum 3.91% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 2.31% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.16% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.14% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.93% netperf [kernel.kallsyms] [k] __arch_copy_to_user 1.79% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 1.63% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Above results collected on an R-Car Gen 3 Salvator-X/r8a7796 ES1.0. Also tested on a R-Car Gen 3 Salvator-X/r8a7795 ES1.0. By inspection this also appears to be compatible with the ravb found on R-Car Gen 2 SoCs, however, this patch is currently untested on such hardware. Signed-off-by: Simon Horman <horms+renesas@verge.net.au> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-04 07:54:27 +00:00
return;
hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
ravb: RX checksum offload Add support for RX checksum offload. This is enabled by default and may be disabled and re-enabled using ethtool: # ethtool -K eth0 rx off # ethtool -K eth0 rx on The RAVB provides a simple checksumming scheme which appears to be completely compatible with CHECKSUM_COMPLETE: sum of all packet data after the L2 header is appended to packet data; this may be trivially read by the driver and used to update the skb accordingly. In terms of performance throughput is close to gigabit line-rate both with and without RX checksum offload enabled. Perf output, however, appears to indicate that significantly less time is spent in do_csum(). This is as expected. Test results with RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 937.54 Summary of output of perf report: 18.28% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.34% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 9.83% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 7.89% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 4.01% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 3.37% netperf [kernel.kallsyms] [k] __arch_copy_to_user 3.17% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.55% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 2.04% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.03% swapper [kernel.kallsyms] [k] _raw_spin_unlock_irq 1.96% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.59% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Test results without RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 940.20 Summary of output of perf report: 17.10% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.99% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 8.87% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 8.16% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 7.42% ksoftirqd/0 [kernel.kallsyms] [k] do_csum 3.91% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 2.31% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.16% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.14% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.93% netperf [kernel.kallsyms] [k] __arch_copy_to_user 1.79% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 1.63% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Above results collected on an R-Car Gen 3 Salvator-X/r8a7796 ES1.0. Also tested on a R-Car Gen 3 Salvator-X/r8a7795 ES1.0. By inspection this also appears to be compatible with the ravb found on R-Car Gen 2 SoCs, however, this patch is currently untested on such hardware. Signed-off-by: Simon Horman <horms+renesas@verge.net.au> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-04 07:54:27 +00:00
skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
skb->ip_summed = CHECKSUM_COMPLETE;
skb_trim(skb, skb->len - sizeof(__sum16));
ravb: RX checksum offload Add support for RX checksum offload. This is enabled by default and may be disabled and re-enabled using ethtool: # ethtool -K eth0 rx off # ethtool -K eth0 rx on The RAVB provides a simple checksumming scheme which appears to be completely compatible with CHECKSUM_COMPLETE: sum of all packet data after the L2 header is appended to packet data; this may be trivially read by the driver and used to update the skb accordingly. In terms of performance throughput is close to gigabit line-rate both with and without RX checksum offload enabled. Perf output, however, appears to indicate that significantly less time is spent in do_csum(). This is as expected. Test results with RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 937.54 Summary of output of perf report: 18.28% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.34% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 9.83% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 7.89% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 4.01% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 3.37% netperf [kernel.kallsyms] [k] __arch_copy_to_user 3.17% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.55% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 2.04% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.03% swapper [kernel.kallsyms] [k] _raw_spin_unlock_irq 1.96% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.59% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Test results without RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 940.20 Summary of output of perf report: 17.10% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.99% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 8.87% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 8.16% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 7.42% ksoftirqd/0 [kernel.kallsyms] [k] do_csum 3.91% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 2.31% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.16% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.14% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.93% netperf [kernel.kallsyms] [k] __arch_copy_to_user 1.79% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 1.63% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Above results collected on an R-Car Gen 3 Salvator-X/r8a7796 ES1.0. Also tested on a R-Car Gen 3 Salvator-X/r8a7795 ES1.0. By inspection this also appears to be compatible with the ravb found on R-Car Gen 2 SoCs, however, this patch is currently untested on such hardware. Signed-off-by: Simon Horman <horms+renesas@verge.net.au> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-04 07:54:27 +00:00
}
static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry,
struct ravb_rx_desc *desc)
{
struct ravb_private *priv = netdev_priv(ndev);
struct sk_buff *skb;
skb = priv->rx_skb[RAVB_BE][entry];
priv->rx_skb[RAVB_BE][entry] = NULL;
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
ravb: Create helper to allocate skb and align it The EtherAVB device requires the SKB data to be aligned to 128 bytes. The alignment is done by allocating an skb 128 bytes larger than the maximum frame size supported by the device and adjusting the headroom to fit the requirement. This code has been refactored a few times and small issues have been added along the way. The issues are not harmful but prevent merging parts of the Rx code which have been split in two implementations with the addition of RZ/G2L support, a device that supports larger frame sizes. This change removes the need for duplicated and somewhat inaccurate hardware alignment constrains stored in the hardware information struct by creating a helper to handle the allocation of an skb and alignment of an skb data. For the R-Car class of devices the maximum frame size is 4K and each descriptor is limited to 2K of data. The current implementation does not support split descriptors, this limits the frame size to 2K. The current hardware information however records the descriptor size just under 2K due to bad understanding of the device when larger MTUs where added. For the RZ/G2L device the maximum frame size is 8K and each descriptor is limited to 4K of data. The current hardware information records this correctly, but it gets the alignment constrains wrong as just aligns it by 128, it does not extend it by 128 bytes to allow the full frame to be stored. This works because the RZ/G2L device supports split descriptors and allocates each skb to 8K and aligns each 4K descriptor in this space. Signed-off-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se> Reviewed-by: Paul Barker <paul.barker.ct@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-03-04 11:08:55 +00:00
ALIGN(priv->info->rx_max_frame_size, 16),
DMA_FROM_DEVICE);
return skb;
}
/* Packet receive function for Gigabit Ethernet */
static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
struct net_device_stats *stats;
struct ravb_rx_desc *desc;
struct sk_buff *skb;
dma_addr_t dma_addr;
int rx_packets = 0;
u8 desc_status;
u16 pkt_len;
u8 die_dt;
int entry;
int limit;
int i;
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
stats = &priv->stats[q];
desc = &priv->rx_ring[q].desc[entry];
for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) {
/* Descriptor type must be checked before all other reads */
dma_rmb();
desc_status = desc->msc;
pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
/* We use 0-byte descriptors to mark the DMA mapping errors */
if (!pkt_len)
continue;
if (desc_status & MSC_MC)
stats->multicast++;
if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF | MSC_CEEF)) {
stats->rx_errors++;
if (desc_status & MSC_CRC)
stats->rx_crc_errors++;
if (desc_status & MSC_RFE)
stats->rx_frame_errors++;
if (desc_status & (MSC_RTLF | MSC_RTSF))
stats->rx_length_errors++;
if (desc_status & MSC_CEEF)
stats->rx_missed_errors++;
} else {
die_dt = desc->die_dt & 0xF0;
switch (die_dt) {
case DT_FSINGLE:
skb = ravb_get_skb_gbeth(ndev, entry, desc);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
if (ndev->features & NETIF_F_RXCSUM)
ravb_rx_csum_gbeth(skb);
napi_gro_receive(&priv->napi[q], skb);
rx_packets++;
stats->rx_bytes += pkt_len;
break;
case DT_FSTART:
priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
skb_put(priv->rx_1st_skb, pkt_len);
break;
case DT_FMID:
skb = ravb_get_skb_gbeth(ndev, entry, desc);
skb_copy_to_linear_data_offset(priv->rx_1st_skb,
priv->rx_1st_skb->len,
skb->data,
pkt_len);
skb_put(priv->rx_1st_skb, pkt_len);
dev_kfree_skb(skb);
break;
case DT_FEND:
skb = ravb_get_skb_gbeth(ndev, entry, desc);
skb_copy_to_linear_data_offset(priv->rx_1st_skb,
priv->rx_1st_skb->len,
skb->data,
pkt_len);
skb_put(priv->rx_1st_skb, pkt_len);
dev_kfree_skb(skb);
priv->rx_1st_skb->protocol =
eth_type_trans(priv->rx_1st_skb, ndev);
if (ndev->features & NETIF_F_RXCSUM)
ravb_rx_csum_gbeth(skb);
napi_gro_receive(&priv->napi[q],
priv->rx_1st_skb);
rx_packets++;
stats->rx_bytes += pkt_len;
break;
}
}
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
desc = &priv->rx_ring[q].desc[entry];
}
/* Refill the RX ring buffers. */
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q].desc[entry];
desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
if (!priv->rx_skb[q][entry]) {
ravb: Create helper to allocate skb and align it The EtherAVB device requires the SKB data to be aligned to 128 bytes. The alignment is done by allocating an skb 128 bytes larger than the maximum frame size supported by the device and adjusting the headroom to fit the requirement. This code has been refactored a few times and small issues have been added along the way. The issues are not harmful but prevent merging parts of the Rx code which have been split in two implementations with the addition of RZ/G2L support, a device that supports larger frame sizes. This change removes the need for duplicated and somewhat inaccurate hardware alignment constrains stored in the hardware information struct by creating a helper to handle the allocation of an skb and alignment of an skb data. For the R-Car class of devices the maximum frame size is 4K and each descriptor is limited to 2K of data. The current implementation does not support split descriptors, this limits the frame size to 2K. The current hardware information however records the descriptor size just under 2K due to bad understanding of the device when larger MTUs where added. For the RZ/G2L device the maximum frame size is 8K and each descriptor is limited to 4K of data. The current hardware information records this correctly, but it gets the alignment constrains wrong as just aligns it by 128, it does not extend it by 128 bytes to allow the full frame to be stored. This works because the RZ/G2L device supports split descriptors and allocates each skb to 8K and aligns each 4K descriptor in this space. Signed-off-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se> Reviewed-by: Paul Barker <paul.barker.ct@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-03-04 11:08:55 +00:00
skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC);
if (!skb)
break;
dma_addr = dma_map_single(ndev->dev.parent,
skb->data,
ravb: Create helper to allocate skb and align it The EtherAVB device requires the SKB data to be aligned to 128 bytes. The alignment is done by allocating an skb 128 bytes larger than the maximum frame size supported by the device and adjusting the headroom to fit the requirement. This code has been refactored a few times and small issues have been added along the way. The issues are not harmful but prevent merging parts of the Rx code which have been split in two implementations with the addition of RZ/G2L support, a device that supports larger frame sizes. This change removes the need for duplicated and somewhat inaccurate hardware alignment constrains stored in the hardware information struct by creating a helper to handle the allocation of an skb and alignment of an skb data. For the R-Car class of devices the maximum frame size is 4K and each descriptor is limited to 2K of data. The current implementation does not support split descriptors, this limits the frame size to 2K. The current hardware information however records the descriptor size just under 2K due to bad understanding of the device when larger MTUs where added. For the RZ/G2L device the maximum frame size is 8K and each descriptor is limited to 4K of data. The current hardware information records this correctly, but it gets the alignment constrains wrong as just aligns it by 128, it does not extend it by 128 bytes to allow the full frame to be stored. This works because the RZ/G2L device supports split descriptors and allocates each skb to 8K and aligns each 4K descriptor in this space. Signed-off-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se> Reviewed-by: Paul Barker <paul.barker.ct@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-03-04 11:08:55 +00:00
priv->info->rx_max_frame_size,
DMA_FROM_DEVICE);
skb_checksum_none_assert(skb);
/* We just set the data size to 0 for a failed mapping
* which should prevent DMA from happening...
*/
if (dma_mapping_error(ndev->dev.parent, dma_addr))
desc->ds_cc = cpu_to_le16(0);
desc->dptr = cpu_to_le32(dma_addr);
priv->rx_skb[q][entry] = skb;
}
/* Descriptor type must be set after all the above writes */
dma_wmb();
desc->die_dt = DT_FEMPTY;
}
stats->rx_packets += rx_packets;
*quota -= rx_packets;
return *quota == 0;
}
/* Packet receive function for Ethernet AVB */
static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
priv->cur_rx[q];
struct net_device_stats *stats = &priv->stats[q];
struct ravb_ex_rx_desc *desc;
struct sk_buff *skb;
dma_addr_t dma_addr;
struct timespec64 ts;
u8 desc_status;
u16 pkt_len;
int limit;
boguscnt = min(boguscnt, *quota);
limit = boguscnt;
desc = &priv->rx_ring[q].ex_desc[entry];
while (desc->die_dt != DT_FEMPTY) {
/* Descriptor type must be checked before all other reads */
dma_rmb();
desc_status = desc->msc;
pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
if (--boguscnt < 0)
break;
/* We use 0-byte descriptors to mark the DMA mapping errors */
if (!pkt_len)
continue;
if (desc_status & MSC_MC)
stats->multicast++;
if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
MSC_CEEF)) {
stats->rx_errors++;
if (desc_status & MSC_CRC)
stats->rx_crc_errors++;
if (desc_status & MSC_RFE)
stats->rx_frame_errors++;
if (desc_status & (MSC_RTLF | MSC_RTSF))
stats->rx_length_errors++;
if (desc_status & MSC_CEEF)
stats->rx_missed_errors++;
} else {
u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
skb = priv->rx_skb[q][entry];
priv->rx_skb[q][entry] = NULL;
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
ravb: Create helper to allocate skb and align it The EtherAVB device requires the SKB data to be aligned to 128 bytes. The alignment is done by allocating an skb 128 bytes larger than the maximum frame size supported by the device and adjusting the headroom to fit the requirement. This code has been refactored a few times and small issues have been added along the way. The issues are not harmful but prevent merging parts of the Rx code which have been split in two implementations with the addition of RZ/G2L support, a device that supports larger frame sizes. This change removes the need for duplicated and somewhat inaccurate hardware alignment constrains stored in the hardware information struct by creating a helper to handle the allocation of an skb and alignment of an skb data. For the R-Car class of devices the maximum frame size is 4K and each descriptor is limited to 2K of data. The current implementation does not support split descriptors, this limits the frame size to 2K. The current hardware information however records the descriptor size just under 2K due to bad understanding of the device when larger MTUs where added. For the RZ/G2L device the maximum frame size is 8K and each descriptor is limited to 4K of data. The current hardware information records this correctly, but it gets the alignment constrains wrong as just aligns it by 128, it does not extend it by 128 bytes to allow the full frame to be stored. This works because the RZ/G2L device supports split descriptors and allocates each skb to 8K and aligns each 4K descriptor in this space. Signed-off-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se> Reviewed-by: Paul Barker <paul.barker.ct@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-03-04 11:08:55 +00:00
priv->info->rx_max_frame_size,
DMA_FROM_DEVICE);
get_ts &= (q == RAVB_NC) ?
RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
if (get_ts) {
struct skb_shared_hwtstamps *shhwtstamps;
shhwtstamps = skb_hwtstamps(skb);
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
32) | le32_to_cpu(desc->ts_sl);
ts.tv_nsec = le32_to_cpu(desc->ts_n);
shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
}
ravb: RX checksum offload Add support for RX checksum offload. This is enabled by default and may be disabled and re-enabled using ethtool: # ethtool -K eth0 rx off # ethtool -K eth0 rx on The RAVB provides a simple checksumming scheme which appears to be completely compatible with CHECKSUM_COMPLETE: sum of all packet data after the L2 header is appended to packet data; this may be trivially read by the driver and used to update the skb accordingly. In terms of performance throughput is close to gigabit line-rate both with and without RX checksum offload enabled. Perf output, however, appears to indicate that significantly less time is spent in do_csum(). This is as expected. Test results with RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 937.54 Summary of output of perf report: 18.28% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.34% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 9.83% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 7.89% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 4.01% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 3.37% netperf [kernel.kallsyms] [k] __arch_copy_to_user 3.17% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.55% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 2.04% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.03% swapper [kernel.kallsyms] [k] _raw_spin_unlock_irq 1.96% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.59% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Test results without RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 940.20 Summary of output of perf report: 17.10% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.99% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 8.87% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 8.16% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 7.42% ksoftirqd/0 [kernel.kallsyms] [k] do_csum 3.91% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 2.31% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.16% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.14% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.93% netperf [kernel.kallsyms] [k] __arch_copy_to_user 1.79% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 1.63% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Above results collected on an R-Car Gen 3 Salvator-X/r8a7796 ES1.0. Also tested on a R-Car Gen 3 Salvator-X/r8a7795 ES1.0. By inspection this also appears to be compatible with the ravb found on R-Car Gen 2 SoCs, however, this patch is currently untested on such hardware. Signed-off-by: Simon Horman <horms+renesas@verge.net.au> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-04 07:54:27 +00:00
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
ravb: RX checksum offload Add support for RX checksum offload. This is enabled by default and may be disabled and re-enabled using ethtool: # ethtool -K eth0 rx off # ethtool -K eth0 rx on The RAVB provides a simple checksumming scheme which appears to be completely compatible with CHECKSUM_COMPLETE: sum of all packet data after the L2 header is appended to packet data; this may be trivially read by the driver and used to update the skb accordingly. In terms of performance throughput is close to gigabit line-rate both with and without RX checksum offload enabled. Perf output, however, appears to indicate that significantly less time is spent in do_csum(). This is as expected. Test results with RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 937.54 Summary of output of perf report: 18.28% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.34% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 9.83% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 7.89% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 4.01% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 3.37% netperf [kernel.kallsyms] [k] __arch_copy_to_user 3.17% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.55% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 2.04% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.03% swapper [kernel.kallsyms] [k] _raw_spin_unlock_irq 1.96% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.59% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Test results without RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 940.20 Summary of output of perf report: 17.10% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.99% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 8.87% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 8.16% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 7.42% ksoftirqd/0 [kernel.kallsyms] [k] do_csum 3.91% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 2.31% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.16% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.14% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.93% netperf [kernel.kallsyms] [k] __arch_copy_to_user 1.79% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 1.63% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Above results collected on an R-Car Gen 3 Salvator-X/r8a7796 ES1.0. Also tested on a R-Car Gen 3 Salvator-X/r8a7795 ES1.0. By inspection this also appears to be compatible with the ravb found on R-Car Gen 2 SoCs, however, this patch is currently untested on such hardware. Signed-off-by: Simon Horman <horms+renesas@verge.net.au> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-04 07:54:27 +00:00
if (ndev->features & NETIF_F_RXCSUM)
ravb_rx_csum(skb);
napi_gro_receive(&priv->napi[q], skb);
stats->rx_packets++;
stats->rx_bytes += pkt_len;
}
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
desc = &priv->rx_ring[q].ex_desc[entry];
}
/* Refill the RX ring buffers. */
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q].ex_desc[entry];
desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
if (!priv->rx_skb[q][entry]) {
ravb: Create helper to allocate skb and align it The EtherAVB device requires the SKB data to be aligned to 128 bytes. The alignment is done by allocating an skb 128 bytes larger than the maximum frame size supported by the device and adjusting the headroom to fit the requirement. This code has been refactored a few times and small issues have been added along the way. The issues are not harmful but prevent merging parts of the Rx code which have been split in two implementations with the addition of RZ/G2L support, a device that supports larger frame sizes. This change removes the need for duplicated and somewhat inaccurate hardware alignment constrains stored in the hardware information struct by creating a helper to handle the allocation of an skb and alignment of an skb data. For the R-Car class of devices the maximum frame size is 4K and each descriptor is limited to 2K of data. The current implementation does not support split descriptors, this limits the frame size to 2K. The current hardware information however records the descriptor size just under 2K due to bad understanding of the device when larger MTUs where added. For the RZ/G2L device the maximum frame size is 8K and each descriptor is limited to 4K of data. The current hardware information records this correctly, but it gets the alignment constrains wrong as just aligns it by 128, it does not extend it by 128 bytes to allow the full frame to be stored. This works because the RZ/G2L device supports split descriptors and allocates each skb to 8K and aligns each 4K descriptor in this space. Signed-off-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se> Reviewed-by: Paul Barker <paul.barker.ct@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: David S. Miller <davem@davemloft.net>
2024-03-04 11:08:55 +00:00
skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC);
if (!skb)
break; /* Better luck next round. */
dma_addr = dma_map_single(ndev->dev.parent, skb->data,
priv->info->rx_max_frame_size,
DMA_FROM_DEVICE);
skb_checksum_none_assert(skb);
/* We just set the data size to 0 for a failed mapping
* which should prevent DMA from happening...
*/
if (dma_mapping_error(ndev->dev.parent, dma_addr))
desc->ds_cc = cpu_to_le16(0);
desc->dptr = cpu_to_le32(dma_addr);
priv->rx_skb[q][entry] = skb;
}
/* Descriptor type must be set after all the above writes */
dma_wmb();
desc->die_dt = DT_FEMPTY;
}
*quota -= limit - (++boguscnt);
return boguscnt <= 0;
}
/* Packet receive function for Ethernet AVB */
static bool ravb_rx(struct net_device *ndev, int *quota, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
return info->receive(ndev, quota, q);
}
static void ravb_rcv_snd_disable(struct net_device *ndev)
{
/* Disable TX and RX */
ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
}
static void ravb_rcv_snd_enable(struct net_device *ndev)
{
/* Enable TX and RX */
ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
}
/* function for waiting dma process finished */
static int ravb_stop_dma(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
int error;
/* Wait for stopping the hardware TX process */
error = ravb_wait(ndev, TCCR, info->tccr_mask, 0);
if (error)
return error;
error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
0);
if (error)
return error;
/* Stop the E-MAC's RX/TX processes. */
ravb_rcv_snd_disable(ndev);
/* Wait for stopping the RX DMA process */
error = ravb_wait(ndev, CSR, CSR_RPO, 0);
if (error)
return error;
/* Stop AVB-DMAC process */
return ravb_set_opmode(ndev, CCC_OPC_CONFIG);
}
/* E-MAC interrupt handler */
ravb: Add dma queue interrupt support This patch supports the following interrupts. - One interrupt for multiple (timestamp, error, gPTP) - One interrupt for emac - Four interrupts for dma queue (best effort rx/tx, network control rx/tx) This patch improve efficiency of the interrupt handler by adding the interrupt handler corresponding to each interrupt source described above. Additionally, it reduces the number of times of the access to EthernetAVB IF. Also this patch prevent this driver depends on the whim of a boot loader. [ykaneko0929@gmail.com: define bit names of registers] [ykaneko0929@gmail.com: add comment for gen3 only registers] [ykaneko0929@gmail.com: fix coding style] [ykaneko0929@gmail.com: update changelog] [ykaneko0929@gmail.com: gen3: fix initialization of interrupts] [ykaneko0929@gmail.com: gen3: fix clearing interrupts] [ykaneko0929@gmail.com: gen3: add helper function for request_irq()] [ykaneko0929@gmail.com: gen3: remove IRQF_SHARED flag for request_irq()] [ykaneko0929@gmail.com: revert ravb_close() and ravb_ptp_stop()] [ykaneko0929@gmail.com: avoid calling free_irq() to non-hooked interrupts] [ykaneko0929@gmail.com: make NC/BE interrupt handler a function] [ykaneko0929@gmail.com: make timestamp interrupt handler a function] [ykaneko0929@gmail.com: timestamp interrupt is handled in multiple interrupt handler instead of dma queue interrupt handler] Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com> Signed-off-by: Yoshihiro Kaneko <ykaneko0929@gmail.com> Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-03 14:54:38 +00:00
static void ravb_emac_interrupt_unlocked(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
u32 ecsr, psr;
ecsr = ravb_read(ndev, ECSR);
ravb_write(ndev, ecsr, ECSR); /* clear interrupt */
if (ecsr & ECSR_MPD)
pm_wakeup_event(&priv->pdev->dev, 0);
if (ecsr & ECSR_ICD)
ndev->stats.tx_carrier_errors++;
if (ecsr & ECSR_LCHNG) {
/* Link changed */
if (priv->no_avb_link)
return;
psr = ravb_read(ndev, PSR);
if (priv->avb_link_active_low)
psr ^= PSR_LMON;
if (!(psr & PSR_LMON)) {
/* DIsable RX and TX */
ravb_rcv_snd_disable(ndev);
} else {
/* Enable RX and TX */
ravb_rcv_snd_enable(ndev);
}
}
}
ravb: Add dma queue interrupt support This patch supports the following interrupts. - One interrupt for multiple (timestamp, error, gPTP) - One interrupt for emac - Four interrupts for dma queue (best effort rx/tx, network control rx/tx) This patch improve efficiency of the interrupt handler by adding the interrupt handler corresponding to each interrupt source described above. Additionally, it reduces the number of times of the access to EthernetAVB IF. Also this patch prevent this driver depends on the whim of a boot loader. [ykaneko0929@gmail.com: define bit names of registers] [ykaneko0929@gmail.com: add comment for gen3 only registers] [ykaneko0929@gmail.com: fix coding style] [ykaneko0929@gmail.com: update changelog] [ykaneko0929@gmail.com: gen3: fix initialization of interrupts] [ykaneko0929@gmail.com: gen3: fix clearing interrupts] [ykaneko0929@gmail.com: gen3: add helper function for request_irq()] [ykaneko0929@gmail.com: gen3: remove IRQF_SHARED flag for request_irq()] [ykaneko0929@gmail.com: revert ravb_close() and ravb_ptp_stop()] [ykaneko0929@gmail.com: avoid calling free_irq() to non-hooked interrupts] [ykaneko0929@gmail.com: make NC/BE interrupt handler a function] [ykaneko0929@gmail.com: make timestamp interrupt handler a function] [ykaneko0929@gmail.com: timestamp interrupt is handled in multiple interrupt handler instead of dma queue interrupt handler] Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com> Signed-off-by: Yoshihiro Kaneko <ykaneko0929@gmail.com> Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-03 14:54:38 +00:00
static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
struct device *dev = &priv->pdev->dev;
irqreturn_t result = IRQ_HANDLED;
pm_runtime_get_noresume(dev);
if (unlikely(!pm_runtime_active(dev))) {
result = IRQ_NONE;
goto out_rpm_put;
}
ravb: Add dma queue interrupt support This patch supports the following interrupts. - One interrupt for multiple (timestamp, error, gPTP) - One interrupt for emac - Four interrupts for dma queue (best effort rx/tx, network control rx/tx) This patch improve efficiency of the interrupt handler by adding the interrupt handler corresponding to each interrupt source described above. Additionally, it reduces the number of times of the access to EthernetAVB IF. Also this patch prevent this driver depends on the whim of a boot loader. [ykaneko0929@gmail.com: define bit names of registers] [ykaneko0929@gmail.com: add comment for gen3 only registers] [ykaneko0929@gmail.com: fix coding style] [ykaneko0929@gmail.com: update changelog] [ykaneko0929@gmail.com: gen3: fix initialization of interrupts] [ykaneko0929@gmail.com: gen3: fix clearing interrupts] [ykaneko0929@gmail.com: gen3: add helper function for request_irq()] [ykaneko0929@gmail.com: gen3: remove IRQF_SHARED flag for request_irq()] [ykaneko0929@gmail.com: revert ravb_close() and ravb_ptp_stop()] [ykaneko0929@gmail.com: avoid calling free_irq() to non-hooked interrupts] [ykaneko0929@gmail.com: make NC/BE interrupt handler a function] [ykaneko0929@gmail.com: make timestamp interrupt handler a function] [ykaneko0929@gmail.com: timestamp interrupt is handled in multiple interrupt handler instead of dma queue interrupt handler] Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com> Signed-off-by: Yoshihiro Kaneko <ykaneko0929@gmail.com> Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-03 14:54:38 +00:00
spin_lock(&priv->lock);
ravb_emac_interrupt_unlocked(ndev);
spin_unlock(&priv->lock);
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
out_rpm_put:
pm_runtime_put_noidle(dev);
return result;
ravb: Add dma queue interrupt support This patch supports the following interrupts. - One interrupt for multiple (timestamp, error, gPTP) - One interrupt for emac - Four interrupts for dma queue (best effort rx/tx, network control rx/tx) This patch improve efficiency of the interrupt handler by adding the interrupt handler corresponding to each interrupt source described above. Additionally, it reduces the number of times of the access to EthernetAVB IF. Also this patch prevent this driver depends on the whim of a boot loader. [ykaneko0929@gmail.com: define bit names of registers] [ykaneko0929@gmail.com: add comment for gen3 only registers] [ykaneko0929@gmail.com: fix coding style] [ykaneko0929@gmail.com: update changelog] [ykaneko0929@gmail.com: gen3: fix initialization of interrupts] [ykaneko0929@gmail.com: gen3: fix clearing interrupts] [ykaneko0929@gmail.com: gen3: add helper function for request_irq()] [ykaneko0929@gmail.com: gen3: remove IRQF_SHARED flag for request_irq()] [ykaneko0929@gmail.com: revert ravb_close() and ravb_ptp_stop()] [ykaneko0929@gmail.com: avoid calling free_irq() to non-hooked interrupts] [ykaneko0929@gmail.com: make NC/BE interrupt handler a function] [ykaneko0929@gmail.com: make timestamp interrupt handler a function] [ykaneko0929@gmail.com: timestamp interrupt is handled in multiple interrupt handler instead of dma queue interrupt handler] Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com> Signed-off-by: Yoshihiro Kaneko <ykaneko0929@gmail.com> Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-03 14:54:38 +00:00
}
/* Error interrupt handler */
static void ravb_error_interrupt(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
u32 eis, ris2;
eis = ravb_read(ndev, EIS);
ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
if (eis & EIS_QFS) {
ris2 = ravb_read(ndev, RIS2);
ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED),
RIS2);
/* Receive Descriptor Empty int */
if (ris2 & RIS2_QFF0)
priv->stats[RAVB_BE].rx_over_errors++;
/* Receive Descriptor Empty int */
if (ris2 & RIS2_QFF1)
priv->stats[RAVB_NC].rx_over_errors++;
/* Receive FIFO Overflow int */
if (ris2 & RIS2_RFFF)
priv->rx_fifo_errors++;
}
}
ravb: Add dma queue interrupt support This patch supports the following interrupts. - One interrupt for multiple (timestamp, error, gPTP) - One interrupt for emac - Four interrupts for dma queue (best effort rx/tx, network control rx/tx) This patch improve efficiency of the interrupt handler by adding the interrupt handler corresponding to each interrupt source described above. Additionally, it reduces the number of times of the access to EthernetAVB IF. Also this patch prevent this driver depends on the whim of a boot loader. [ykaneko0929@gmail.com: define bit names of registers] [ykaneko0929@gmail.com: add comment for gen3 only registers] [ykaneko0929@gmail.com: fix coding style] [ykaneko0929@gmail.com: update changelog] [ykaneko0929@gmail.com: gen3: fix initialization of interrupts] [ykaneko0929@gmail.com: gen3: fix clearing interrupts] [ykaneko0929@gmail.com: gen3: add helper function for request_irq()] [ykaneko0929@gmail.com: gen3: remove IRQF_SHARED flag for request_irq()] [ykaneko0929@gmail.com: revert ravb_close() and ravb_ptp_stop()] [ykaneko0929@gmail.com: avoid calling free_irq() to non-hooked interrupts] [ykaneko0929@gmail.com: make NC/BE interrupt handler a function] [ykaneko0929@gmail.com: make timestamp interrupt handler a function] [ykaneko0929@gmail.com: timestamp interrupt is handled in multiple interrupt handler instead of dma queue interrupt handler] Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com> Signed-off-by: Yoshihiro Kaneko <ykaneko0929@gmail.com> Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-03 14:54:38 +00:00
static bool ravb_queue_interrupt(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
ravb: Add dma queue interrupt support This patch supports the following interrupts. - One interrupt for multiple (timestamp, error, gPTP) - One interrupt for emac - Four interrupts for dma queue (best effort rx/tx, network control rx/tx) This patch improve efficiency of the interrupt handler by adding the interrupt handler corresponding to each interrupt source described above. Additionally, it reduces the number of times of the access to EthernetAVB IF. Also this patch prevent this driver depends on the whim of a boot loader. [ykaneko0929@gmail.com: define bit names of registers] [ykaneko0929@gmail.com: add comment for gen3 only registers] [ykaneko0929@gmail.com: fix coding style] [ykaneko0929@gmail.com: update changelog] [ykaneko0929@gmail.com: gen3: fix initialization of interrupts] [ykaneko0929@gmail.com: gen3: fix clearing interrupts] [ykaneko0929@gmail.com: gen3: add helper function for request_irq()] [ykaneko0929@gmail.com: gen3: remove IRQF_SHARED flag for request_irq()] [ykaneko0929@gmail.com: revert ravb_close() and ravb_ptp_stop()] [ykaneko0929@gmail.com: avoid calling free_irq() to non-hooked interrupts] [ykaneko0929@gmail.com: make NC/BE interrupt handler a function] [ykaneko0929@gmail.com: make timestamp interrupt handler a function] [ykaneko0929@gmail.com: timestamp interrupt is handled in multiple interrupt handler instead of dma queue interrupt handler] Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com> Signed-off-by: Yoshihiro Kaneko <ykaneko0929@gmail.com> Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-03 14:54:38 +00:00
u32 ris0 = ravb_read(ndev, RIS0);
u32 ric0 = ravb_read(ndev, RIC0);
u32 tis = ravb_read(ndev, TIS);
u32 tic = ravb_read(ndev, TIC);
if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) {
if (napi_schedule_prep(&priv->napi[q])) {
/* Mask RX and TX interrupts */
if (!info->irq_en_dis) {
ravb: Add dma queue interrupt support This patch supports the following interrupts. - One interrupt for multiple (timestamp, error, gPTP) - One interrupt for emac - Four interrupts for dma queue (best effort rx/tx, network control rx/tx) This patch improve efficiency of the interrupt handler by adding the interrupt handler corresponding to each interrupt source described above. Additionally, it reduces the number of times of the access to EthernetAVB IF. Also this patch prevent this driver depends on the whim of a boot loader. [ykaneko0929@gmail.com: define bit names of registers] [ykaneko0929@gmail.com: add comment for gen3 only registers] [ykaneko0929@gmail.com: fix coding style] [ykaneko0929@gmail.com: update changelog] [ykaneko0929@gmail.com: gen3: fix initialization of interrupts] [ykaneko0929@gmail.com: gen3: fix clearing interrupts] [ykaneko0929@gmail.com: gen3: add helper function for request_irq()] [ykaneko0929@gmail.com: gen3: remove IRQF_SHARED flag for request_irq()] [ykaneko0929@gmail.com: revert ravb_close() and ravb_ptp_stop()] [ykaneko0929@gmail.com: avoid calling free_irq() to non-hooked interrupts] [ykaneko0929@gmail.com: make NC/BE interrupt handler a function] [ykaneko0929@gmail.com: make timestamp interrupt handler a function] [ykaneko0929@gmail.com: timestamp interrupt is handled in multiple interrupt handler instead of dma queue interrupt handler] Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com> Signed-off-by: Yoshihiro Kaneko <ykaneko0929@gmail.com> Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-03 14:54:38 +00:00
ravb_write(ndev, ric0 & ~BIT(q), RIC0);
ravb_write(ndev, tic & ~BIT(q), TIC);
} else {
ravb_write(ndev, BIT(q), RID0);
ravb_write(ndev, BIT(q), TID);
}
__napi_schedule(&priv->napi[q]);
} else {
netdev_warn(ndev,
"ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
ris0, ric0);
netdev_warn(ndev,
" tx status 0x%08x, tx mask 0x%08x.\n",
tis, tic);
}
return true;
}
return false;
}
static bool ravb_timestamp_interrupt(struct net_device *ndev)
{
u32 tis = ravb_read(ndev, TIS);
if (tis & TIS_TFUF) {
ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
ravb: Add dma queue interrupt support This patch supports the following interrupts. - One interrupt for multiple (timestamp, error, gPTP) - One interrupt for emac - Four interrupts for dma queue (best effort rx/tx, network control rx/tx) This patch improve efficiency of the interrupt handler by adding the interrupt handler corresponding to each interrupt source described above. Additionally, it reduces the number of times of the access to EthernetAVB IF. Also this patch prevent this driver depends on the whim of a boot loader. [ykaneko0929@gmail.com: define bit names of registers] [ykaneko0929@gmail.com: add comment for gen3 only registers] [ykaneko0929@gmail.com: fix coding style] [ykaneko0929@gmail.com: update changelog] [ykaneko0929@gmail.com: gen3: fix initialization of interrupts] [ykaneko0929@gmail.com: gen3: fix clearing interrupts] [ykaneko0929@gmail.com: gen3: add helper function for request_irq()] [ykaneko0929@gmail.com: gen3: remove IRQF_SHARED flag for request_irq()] [ykaneko0929@gmail.com: revert ravb_close() and ravb_ptp_stop()] [ykaneko0929@gmail.com: avoid calling free_irq() to non-hooked interrupts] [ykaneko0929@gmail.com: make NC/BE interrupt handler a function] [ykaneko0929@gmail.com: make timestamp interrupt handler a function] [ykaneko0929@gmail.com: timestamp interrupt is handled in multiple interrupt handler instead of dma queue interrupt handler] Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com> Signed-off-by: Yoshihiro Kaneko <ykaneko0929@gmail.com> Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-03 14:54:38 +00:00
ravb_get_tx_tstamp(ndev);
return true;
}
return false;
}
static irqreturn_t ravb_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
struct device *dev = &priv->pdev->dev;
irqreturn_t result = IRQ_NONE;
u32 iss;
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
pm_runtime_get_noresume(dev);
if (unlikely(!pm_runtime_active(dev)))
goto out_rpm_put;
spin_lock(&priv->lock);
/* Get interrupt status */
iss = ravb_read(ndev, ISS);
/* Received and transmitted interrupts */
if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
int q;
/* Timestamp updated */
ravb: Add dma queue interrupt support This patch supports the following interrupts. - One interrupt for multiple (timestamp, error, gPTP) - One interrupt for emac - Four interrupts for dma queue (best effort rx/tx, network control rx/tx) This patch improve efficiency of the interrupt handler by adding the interrupt handler corresponding to each interrupt source described above. Additionally, it reduces the number of times of the access to EthernetAVB IF. Also this patch prevent this driver depends on the whim of a boot loader. [ykaneko0929@gmail.com: define bit names of registers] [ykaneko0929@gmail.com: add comment for gen3 only registers] [ykaneko0929@gmail.com: fix coding style] [ykaneko0929@gmail.com: update changelog] [ykaneko0929@gmail.com: gen3: fix initialization of interrupts] [ykaneko0929@gmail.com: gen3: fix clearing interrupts] [ykaneko0929@gmail.com: gen3: add helper function for request_irq()] [ykaneko0929@gmail.com: gen3: remove IRQF_SHARED flag for request_irq()] [ykaneko0929@gmail.com: revert ravb_close() and ravb_ptp_stop()] [ykaneko0929@gmail.com: avoid calling free_irq() to non-hooked interrupts] [ykaneko0929@gmail.com: make NC/BE interrupt handler a function] [ykaneko0929@gmail.com: make timestamp interrupt handler a function] [ykaneko0929@gmail.com: timestamp interrupt is handled in multiple interrupt handler instead of dma queue interrupt handler] Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com> Signed-off-by: Yoshihiro Kaneko <ykaneko0929@gmail.com> Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-03 14:54:38 +00:00
if (ravb_timestamp_interrupt(ndev))
result = IRQ_HANDLED;
/* Network control and best effort queue RX/TX */
if (info->nc_queues) {
for (q = RAVB_NC; q >= RAVB_BE; q--) {
if (ravb_queue_interrupt(ndev, q))
result = IRQ_HANDLED;
}
} else {
if (ravb_queue_interrupt(ndev, RAVB_BE))
result = IRQ_HANDLED;
}
}
/* E-MAC status summary */
if (iss & ISS_MS) {
ravb: Add dma queue interrupt support This patch supports the following interrupts. - One interrupt for multiple (timestamp, error, gPTP) - One interrupt for emac - Four interrupts for dma queue (best effort rx/tx, network control rx/tx) This patch improve efficiency of the interrupt handler by adding the interrupt handler corresponding to each interrupt source described above. Additionally, it reduces the number of times of the access to EthernetAVB IF. Also this patch prevent this driver depends on the whim of a boot loader. [ykaneko0929@gmail.com: define bit names of registers] [ykaneko0929@gmail.com: add comment for gen3 only registers] [ykaneko0929@gmail.com: fix coding style] [ykaneko0929@gmail.com: update changelog] [ykaneko0929@gmail.com: gen3: fix initialization of interrupts] [ykaneko0929@gmail.com: gen3: fix clearing interrupts] [ykaneko0929@gmail.com: gen3: add helper function for request_irq()] [ykaneko0929@gmail.com: gen3: remove IRQF_SHARED flag for request_irq()] [ykaneko0929@gmail.com: revert ravb_close() and ravb_ptp_stop()] [ykaneko0929@gmail.com: avoid calling free_irq() to non-hooked interrupts] [ykaneko0929@gmail.com: make NC/BE interrupt handler a function] [ykaneko0929@gmail.com: make timestamp interrupt handler a function] [ykaneko0929@gmail.com: timestamp interrupt is handled in multiple interrupt handler instead of dma queue interrupt handler] Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com> Signed-off-by: Yoshihiro Kaneko <ykaneko0929@gmail.com> Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-03 14:54:38 +00:00
ravb_emac_interrupt_unlocked(ndev);
result = IRQ_HANDLED;
}
/* Error status summary */
if (iss & ISS_ES) {
ravb_error_interrupt(ndev);
result = IRQ_HANDLED;
}
ravb: Add dma queue interrupt support This patch supports the following interrupts. - One interrupt for multiple (timestamp, error, gPTP) - One interrupt for emac - Four interrupts for dma queue (best effort rx/tx, network control rx/tx) This patch improve efficiency of the interrupt handler by adding the interrupt handler corresponding to each interrupt source described above. Additionally, it reduces the number of times of the access to EthernetAVB IF. Also this patch prevent this driver depends on the whim of a boot loader. [ykaneko0929@gmail.com: define bit names of registers] [ykaneko0929@gmail.com: add comment for gen3 only registers] [ykaneko0929@gmail.com: fix coding style] [ykaneko0929@gmail.com: update changelog] [ykaneko0929@gmail.com: gen3: fix initialization of interrupts] [ykaneko0929@gmail.com: gen3: fix clearing interrupts] [ykaneko0929@gmail.com: gen3: add helper function for request_irq()] [ykaneko0929@gmail.com: gen3: remove IRQF_SHARED flag for request_irq()] [ykaneko0929@gmail.com: revert ravb_close() and ravb_ptp_stop()] [ykaneko0929@gmail.com: avoid calling free_irq() to non-hooked interrupts] [ykaneko0929@gmail.com: make NC/BE interrupt handler a function] [ykaneko0929@gmail.com: make timestamp interrupt handler a function] [ykaneko0929@gmail.com: timestamp interrupt is handled in multiple interrupt handler instead of dma queue interrupt handler] Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com> Signed-off-by: Yoshihiro Kaneko <ykaneko0929@gmail.com> Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-03 14:54:38 +00:00
/* gPTP interrupt status summary */
if (iss & ISS_CGIS) {
ravb_ptp_interrupt(ndev);
result = IRQ_HANDLED;
}
spin_unlock(&priv->lock);
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
out_rpm_put:
pm_runtime_put_noidle(dev);
return result;
}
ravb: Add dma queue interrupt support This patch supports the following interrupts. - One interrupt for multiple (timestamp, error, gPTP) - One interrupt for emac - Four interrupts for dma queue (best effort rx/tx, network control rx/tx) This patch improve efficiency of the interrupt handler by adding the interrupt handler corresponding to each interrupt source described above. Additionally, it reduces the number of times of the access to EthernetAVB IF. Also this patch prevent this driver depends on the whim of a boot loader. [ykaneko0929@gmail.com: define bit names of registers] [ykaneko0929@gmail.com: add comment for gen3 only registers] [ykaneko0929@gmail.com: fix coding style] [ykaneko0929@gmail.com: update changelog] [ykaneko0929@gmail.com: gen3: fix initialization of interrupts] [ykaneko0929@gmail.com: gen3: fix clearing interrupts] [ykaneko0929@gmail.com: gen3: add helper function for request_irq()] [ykaneko0929@gmail.com: gen3: remove IRQF_SHARED flag for request_irq()] [ykaneko0929@gmail.com: revert ravb_close() and ravb_ptp_stop()] [ykaneko0929@gmail.com: avoid calling free_irq() to non-hooked interrupts] [ykaneko0929@gmail.com: make NC/BE interrupt handler a function] [ykaneko0929@gmail.com: make timestamp interrupt handler a function] [ykaneko0929@gmail.com: timestamp interrupt is handled in multiple interrupt handler instead of dma queue interrupt handler] Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com> Signed-off-by: Yoshihiro Kaneko <ykaneko0929@gmail.com> Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-03 14:54:38 +00:00
/* Timestamp/Error/gPTP interrupt handler */
static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
struct device *dev = &priv->pdev->dev;
ravb: Add dma queue interrupt support This patch supports the following interrupts. - One interrupt for multiple (timestamp, error, gPTP) - One interrupt for emac - Four interrupts for dma queue (best effort rx/tx, network control rx/tx) This patch improve efficiency of the interrupt handler by adding the interrupt handler corresponding to each interrupt source described above. Additionally, it reduces the number of times of the access to EthernetAVB IF. Also this patch prevent this driver depends on the whim of a boot loader. [ykaneko0929@gmail.com: define bit names of registers] [ykaneko0929@gmail.com: add comment for gen3 only registers] [ykaneko0929@gmail.com: fix coding style] [ykaneko0929@gmail.com: update changelog] [ykaneko0929@gmail.com: gen3: fix initialization of interrupts] [ykaneko0929@gmail.com: gen3: fix clearing interrupts] [ykaneko0929@gmail.com: gen3: add helper function for request_irq()] [ykaneko0929@gmail.com: gen3: remove IRQF_SHARED flag for request_irq()] [ykaneko0929@gmail.com: revert ravb_close() and ravb_ptp_stop()] [ykaneko0929@gmail.com: avoid calling free_irq() to non-hooked interrupts] [ykaneko0929@gmail.com: make NC/BE interrupt handler a function] [ykaneko0929@gmail.com: make timestamp interrupt handler a function] [ykaneko0929@gmail.com: timestamp interrupt is handled in multiple interrupt handler instead of dma queue interrupt handler] Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com> Signed-off-by: Yoshihiro Kaneko <ykaneko0929@gmail.com> Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-03 14:54:38 +00:00
irqreturn_t result = IRQ_NONE;
u32 iss;
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
pm_runtime_get_noresume(dev);
if (unlikely(!pm_runtime_active(dev)))
goto out_rpm_put;
ravb: Add dma queue interrupt support This patch supports the following interrupts. - One interrupt for multiple (timestamp, error, gPTP) - One interrupt for emac - Four interrupts for dma queue (best effort rx/tx, network control rx/tx) This patch improve efficiency of the interrupt handler by adding the interrupt handler corresponding to each interrupt source described above. Additionally, it reduces the number of times of the access to EthernetAVB IF. Also this patch prevent this driver depends on the whim of a boot loader. [ykaneko0929@gmail.com: define bit names of registers] [ykaneko0929@gmail.com: add comment for gen3 only registers] [ykaneko0929@gmail.com: fix coding style] [ykaneko0929@gmail.com: update changelog] [ykaneko0929@gmail.com: gen3: fix initialization of interrupts] [ykaneko0929@gmail.com: gen3: fix clearing interrupts] [ykaneko0929@gmail.com: gen3: add helper function for request_irq()] [ykaneko0929@gmail.com: gen3: remove IRQF_SHARED flag for request_irq()] [ykaneko0929@gmail.com: revert ravb_close() and ravb_ptp_stop()] [ykaneko0929@gmail.com: avoid calling free_irq() to non-hooked interrupts] [ykaneko0929@gmail.com: make NC/BE interrupt handler a function] [ykaneko0929@gmail.com: make timestamp interrupt handler a function] [ykaneko0929@gmail.com: timestamp interrupt is handled in multiple interrupt handler instead of dma queue interrupt handler] Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com> Signed-off-by: Yoshihiro Kaneko <ykaneko0929@gmail.com> Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-03 14:54:38 +00:00
spin_lock(&priv->lock);
/* Get interrupt status */
iss = ravb_read(ndev, ISS);
/* Timestamp updated */
if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev))
result = IRQ_HANDLED;
/* Error status summary */
if (iss & ISS_ES) {
ravb_error_interrupt(ndev);
result = IRQ_HANDLED;
}
/* gPTP interrupt status summary */
if (iss & ISS_CGIS) {
ravb_ptp_interrupt(ndev);
ravb: Add dma queue interrupt support This patch supports the following interrupts. - One interrupt for multiple (timestamp, error, gPTP) - One interrupt for emac - Four interrupts for dma queue (best effort rx/tx, network control rx/tx) This patch improve efficiency of the interrupt handler by adding the interrupt handler corresponding to each interrupt source described above. Additionally, it reduces the number of times of the access to EthernetAVB IF. Also this patch prevent this driver depends on the whim of a boot loader. [ykaneko0929@gmail.com: define bit names of registers] [ykaneko0929@gmail.com: add comment for gen3 only registers] [ykaneko0929@gmail.com: fix coding style] [ykaneko0929@gmail.com: update changelog] [ykaneko0929@gmail.com: gen3: fix initialization of interrupts] [ykaneko0929@gmail.com: gen3: fix clearing interrupts] [ykaneko0929@gmail.com: gen3: add helper function for request_irq()] [ykaneko0929@gmail.com: gen3: remove IRQF_SHARED flag for request_irq()] [ykaneko0929@gmail.com: revert ravb_close() and ravb_ptp_stop()] [ykaneko0929@gmail.com: avoid calling free_irq() to non-hooked interrupts] [ykaneko0929@gmail.com: make NC/BE interrupt handler a function] [ykaneko0929@gmail.com: make timestamp interrupt handler a function] [ykaneko0929@gmail.com: timestamp interrupt is handled in multiple interrupt handler instead of dma queue interrupt handler] Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com> Signed-off-by: Yoshihiro Kaneko <ykaneko0929@gmail.com> Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-03 14:54:38 +00:00
result = IRQ_HANDLED;
}
ravb: Add dma queue interrupt support This patch supports the following interrupts. - One interrupt for multiple (timestamp, error, gPTP) - One interrupt for emac - Four interrupts for dma queue (best effort rx/tx, network control rx/tx) This patch improve efficiency of the interrupt handler by adding the interrupt handler corresponding to each interrupt source described above. Additionally, it reduces the number of times of the access to EthernetAVB IF. Also this patch prevent this driver depends on the whim of a boot loader. [ykaneko0929@gmail.com: define bit names of registers] [ykaneko0929@gmail.com: add comment for gen3 only registers] [ykaneko0929@gmail.com: fix coding style] [ykaneko0929@gmail.com: update changelog] [ykaneko0929@gmail.com: gen3: fix initialization of interrupts] [ykaneko0929@gmail.com: gen3: fix clearing interrupts] [ykaneko0929@gmail.com: gen3: add helper function for request_irq()] [ykaneko0929@gmail.com: gen3: remove IRQF_SHARED flag for request_irq()] [ykaneko0929@gmail.com: revert ravb_close() and ravb_ptp_stop()] [ykaneko0929@gmail.com: avoid calling free_irq() to non-hooked interrupts] [ykaneko0929@gmail.com: make NC/BE interrupt handler a function] [ykaneko0929@gmail.com: make timestamp interrupt handler a function] [ykaneko0929@gmail.com: timestamp interrupt is handled in multiple interrupt handler instead of dma queue interrupt handler] Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com> Signed-off-by: Yoshihiro Kaneko <ykaneko0929@gmail.com> Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-03 14:54:38 +00:00
spin_unlock(&priv->lock);
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
out_rpm_put:
pm_runtime_put_noidle(dev);
ravb: Add dma queue interrupt support This patch supports the following interrupts. - One interrupt for multiple (timestamp, error, gPTP) - One interrupt for emac - Four interrupts for dma queue (best effort rx/tx, network control rx/tx) This patch improve efficiency of the interrupt handler by adding the interrupt handler corresponding to each interrupt source described above. Additionally, it reduces the number of times of the access to EthernetAVB IF. Also this patch prevent this driver depends on the whim of a boot loader. [ykaneko0929@gmail.com: define bit names of registers] [ykaneko0929@gmail.com: add comment for gen3 only registers] [ykaneko0929@gmail.com: fix coding style] [ykaneko0929@gmail.com: update changelog] [ykaneko0929@gmail.com: gen3: fix initialization of interrupts] [ykaneko0929@gmail.com: gen3: fix clearing interrupts] [ykaneko0929@gmail.com: gen3: add helper function for request_irq()] [ykaneko0929@gmail.com: gen3: remove IRQF_SHARED flag for request_irq()] [ykaneko0929@gmail.com: revert ravb_close() and ravb_ptp_stop()] [ykaneko0929@gmail.com: avoid calling free_irq() to non-hooked interrupts] [ykaneko0929@gmail.com: make NC/BE interrupt handler a function] [ykaneko0929@gmail.com: make timestamp interrupt handler a function] [ykaneko0929@gmail.com: timestamp interrupt is handled in multiple interrupt handler instead of dma queue interrupt handler] Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com> Signed-off-by: Yoshihiro Kaneko <ykaneko0929@gmail.com> Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-03 14:54:38 +00:00
return result;
}
static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
{
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
struct device *dev = &priv->pdev->dev;
ravb: Add dma queue interrupt support This patch supports the following interrupts. - One interrupt for multiple (timestamp, error, gPTP) - One interrupt for emac - Four interrupts for dma queue (best effort rx/tx, network control rx/tx) This patch improve efficiency of the interrupt handler by adding the interrupt handler corresponding to each interrupt source described above. Additionally, it reduces the number of times of the access to EthernetAVB IF. Also this patch prevent this driver depends on the whim of a boot loader. [ykaneko0929@gmail.com: define bit names of registers] [ykaneko0929@gmail.com: add comment for gen3 only registers] [ykaneko0929@gmail.com: fix coding style] [ykaneko0929@gmail.com: update changelog] [ykaneko0929@gmail.com: gen3: fix initialization of interrupts] [ykaneko0929@gmail.com: gen3: fix clearing interrupts] [ykaneko0929@gmail.com: gen3: add helper function for request_irq()] [ykaneko0929@gmail.com: gen3: remove IRQF_SHARED flag for request_irq()] [ykaneko0929@gmail.com: revert ravb_close() and ravb_ptp_stop()] [ykaneko0929@gmail.com: avoid calling free_irq() to non-hooked interrupts] [ykaneko0929@gmail.com: make NC/BE interrupt handler a function] [ykaneko0929@gmail.com: make timestamp interrupt handler a function] [ykaneko0929@gmail.com: timestamp interrupt is handled in multiple interrupt handler instead of dma queue interrupt handler] Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com> Signed-off-by: Yoshihiro Kaneko <ykaneko0929@gmail.com> Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-03 14:54:38 +00:00
irqreturn_t result = IRQ_NONE;
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
pm_runtime_get_noresume(dev);
if (unlikely(!pm_runtime_active(dev)))
goto out_rpm_put;
ravb: Add dma queue interrupt support This patch supports the following interrupts. - One interrupt for multiple (timestamp, error, gPTP) - One interrupt for emac - Four interrupts for dma queue (best effort rx/tx, network control rx/tx) This patch improve efficiency of the interrupt handler by adding the interrupt handler corresponding to each interrupt source described above. Additionally, it reduces the number of times of the access to EthernetAVB IF. Also this patch prevent this driver depends on the whim of a boot loader. [ykaneko0929@gmail.com: define bit names of registers] [ykaneko0929@gmail.com: add comment for gen3 only registers] [ykaneko0929@gmail.com: fix coding style] [ykaneko0929@gmail.com: update changelog] [ykaneko0929@gmail.com: gen3: fix initialization of interrupts] [ykaneko0929@gmail.com: gen3: fix clearing interrupts] [ykaneko0929@gmail.com: gen3: add helper function for request_irq()] [ykaneko0929@gmail.com: gen3: remove IRQF_SHARED flag for request_irq()] [ykaneko0929@gmail.com: revert ravb_close() and ravb_ptp_stop()] [ykaneko0929@gmail.com: avoid calling free_irq() to non-hooked interrupts] [ykaneko0929@gmail.com: make NC/BE interrupt handler a function] [ykaneko0929@gmail.com: make timestamp interrupt handler a function] [ykaneko0929@gmail.com: timestamp interrupt is handled in multiple interrupt handler instead of dma queue interrupt handler] Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com> Signed-off-by: Yoshihiro Kaneko <ykaneko0929@gmail.com> Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-03 14:54:38 +00:00
spin_lock(&priv->lock);
/* Network control/Best effort queue RX/TX */
if (ravb_queue_interrupt(ndev, q))
result = IRQ_HANDLED;
spin_unlock(&priv->lock);
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
out_rpm_put:
pm_runtime_put_noidle(dev);
ravb: Add dma queue interrupt support This patch supports the following interrupts. - One interrupt for multiple (timestamp, error, gPTP) - One interrupt for emac - Four interrupts for dma queue (best effort rx/tx, network control rx/tx) This patch improve efficiency of the interrupt handler by adding the interrupt handler corresponding to each interrupt source described above. Additionally, it reduces the number of times of the access to EthernetAVB IF. Also this patch prevent this driver depends on the whim of a boot loader. [ykaneko0929@gmail.com: define bit names of registers] [ykaneko0929@gmail.com: add comment for gen3 only registers] [ykaneko0929@gmail.com: fix coding style] [ykaneko0929@gmail.com: update changelog] [ykaneko0929@gmail.com: gen3: fix initialization of interrupts] [ykaneko0929@gmail.com: gen3: fix clearing interrupts] [ykaneko0929@gmail.com: gen3: add helper function for request_irq()] [ykaneko0929@gmail.com: gen3: remove IRQF_SHARED flag for request_irq()] [ykaneko0929@gmail.com: revert ravb_close() and ravb_ptp_stop()] [ykaneko0929@gmail.com: avoid calling free_irq() to non-hooked interrupts] [ykaneko0929@gmail.com: make NC/BE interrupt handler a function] [ykaneko0929@gmail.com: make timestamp interrupt handler a function] [ykaneko0929@gmail.com: timestamp interrupt is handled in multiple interrupt handler instead of dma queue interrupt handler] Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com> Signed-off-by: Yoshihiro Kaneko <ykaneko0929@gmail.com> Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-03 14:54:38 +00:00
return result;
}
static irqreturn_t ravb_be_interrupt(int irq, void *dev_id)
{
return ravb_dma_interrupt(irq, dev_id, RAVB_BE);
}
static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id)
{
return ravb_dma_interrupt(irq, dev_id, RAVB_NC);
}
static int ravb_poll(struct napi_struct *napi, int budget)
{
struct net_device *ndev = napi->dev;
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
unsigned long flags;
int q = napi - priv->napi;
int mask = BIT(q);
int quota = budget;
bool unmask;
/* Processing RX Descriptor Ring */
/* Clear RX interrupt */
ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
unmask = !ravb_rx(ndev, &quota, q);
/* Processing TX Descriptor Ring */
spin_lock_irqsave(&priv->lock, flags);
/* Clear TX interrupt */
ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
ravb_tx_free(ndev, q, true);
netif_wake_subqueue(ndev, q);
spin_unlock_irqrestore(&priv->lock, flags);
/* Receive error message handling */
priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
if (info->nc_queues)
priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
if (priv->rx_over_errors != ndev->stats.rx_over_errors)
ndev->stats.rx_over_errors = priv->rx_over_errors;
if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
if (!unmask)
goto out;
napi_complete(napi);
/* Re-enable RX/TX interrupts */
spin_lock_irqsave(&priv->lock, flags);
if (!info->irq_en_dis) {
ravb: Add dma queue interrupt support This patch supports the following interrupts. - One interrupt for multiple (timestamp, error, gPTP) - One interrupt for emac - Four interrupts for dma queue (best effort rx/tx, network control rx/tx) This patch improve efficiency of the interrupt handler by adding the interrupt handler corresponding to each interrupt source described above. Additionally, it reduces the number of times of the access to EthernetAVB IF. Also this patch prevent this driver depends on the whim of a boot loader. [ykaneko0929@gmail.com: define bit names of registers] [ykaneko0929@gmail.com: add comment for gen3 only registers] [ykaneko0929@gmail.com: fix coding style] [ykaneko0929@gmail.com: update changelog] [ykaneko0929@gmail.com: gen3: fix initialization of interrupts] [ykaneko0929@gmail.com: gen3: fix clearing interrupts] [ykaneko0929@gmail.com: gen3: add helper function for request_irq()] [ykaneko0929@gmail.com: gen3: remove IRQF_SHARED flag for request_irq()] [ykaneko0929@gmail.com: revert ravb_close() and ravb_ptp_stop()] [ykaneko0929@gmail.com: avoid calling free_irq() to non-hooked interrupts] [ykaneko0929@gmail.com: make NC/BE interrupt handler a function] [ykaneko0929@gmail.com: make timestamp interrupt handler a function] [ykaneko0929@gmail.com: timestamp interrupt is handled in multiple interrupt handler instead of dma queue interrupt handler] Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com> Signed-off-by: Yoshihiro Kaneko <ykaneko0929@gmail.com> Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-03 14:54:38 +00:00
ravb_modify(ndev, RIC0, mask, mask);
ravb_modify(ndev, TIC, mask, mask);
} else {
ravb_write(ndev, mask, RIE0);
ravb_write(ndev, mask, TIE);
}
spin_unlock_irqrestore(&priv->lock, flags);
out:
return budget - quota;
}
static void ravb_set_duplex_gbeth(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex > 0 ? ECMR_DM : 0);
}
/* PHY state control function */
static void ravb_adjust_link(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
struct phy_device *phydev = ndev->phydev;
bool new_state = false;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
/* Disable TX and RX right over here, if E-MAC change is ignored */
if (priv->no_avb_link)
ravb_rcv_snd_disable(ndev);
if (phydev->link) {
if (info->half_duplex && phydev->duplex != priv->duplex) {
new_state = true;
priv->duplex = phydev->duplex;
ravb_set_duplex_gbeth(ndev);
}
if (phydev->speed != priv->speed) {
new_state = true;
priv->speed = phydev->speed;
info->set_rate(ndev);
}
if (!priv->link) {
ravb_modify(ndev, ECMR, ECMR_TXF, 0);
new_state = true;
priv->link = phydev->link;
}
} else if (priv->link) {
new_state = true;
priv->link = 0;
priv->speed = 0;
if (info->half_duplex)
priv->duplex = -1;
}
/* Enable TX and RX right over here, if E-MAC change is ignored */
if (priv->no_avb_link && phydev->link)
ravb_rcv_snd_enable(ndev);
spin_unlock_irqrestore(&priv->lock, flags);
if (new_state && netif_msg_link(priv))
phy_print_status(phydev);
}
/* PHY init function */
static int ravb_phy_init(struct net_device *ndev)
{
struct device_node *np = ndev->dev.parent->of_node;
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
struct phy_device *phydev;
struct device_node *pn;
ravb: Mask PHY mode to avoid inserting delays twice Until recently, the Micrel KSZ9031 PHY driver ignored any PHY mode ("RGMII-*ID") settings, but used the hardware defaults, augmented by explicit configuration of individual skew values using the "*-skew-ps" DT properties. The lack of PHY mode support was compensated by the EtherAVB MAC driver, which configures TX and/or RX internal delay itself, based on the PHY mode. However, now the KSZ9031 driver has gained PHY mode support, delays may be configured twice, causing regressions. E.g. on the Renesas Salvator-X board with R-Car M3-W ES1.0, TX performance dropped from ca. 400 Mbps to 0.1-0.3 Mbps, as measured by nuttcp. As internal delay configuration supported by the KSZ9031 PHY is too limited for some use cases, the ability to configure MAC internal delay is deemed useful and necessary. Hence a proper fix would involve splitting internal delay configuration in two parts, one for the PHY, and one for the MAC. However, this would require adding new DT properties, thus breaking DTB backwards-compatibility. Hence fix the regression in a backwards-compatibility way, by letting the EtherAVB driver mask the PHY mode when it has inserted a delay, to avoid the PHY driver adding a second delay. This also fixes messages like: Micrel KSZ9031 Gigabit PHY e6800000.ethernet-ffffffff:00: *-skew-ps values should be used only with phy-mode = "rgmii" as the PHY no longer sees the original RGMII-*ID mode. Solving the issue by splitting configuration in two parts can be handled in future patches, and would require retaining a backwards-compatibility mode anyway. Fixes: bcf3440c6dd78bfe ("net: phy: micrel: add phy-mode support for the KSZ9031 PHY") Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Reviewed-by: Andrew Lunn <andrew@lunn.ch> Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-29 12:25:40 +00:00
phy_interface_t iface;
int err;
priv->link = 0;
priv->speed = 0;
priv->duplex = -1;
/* Try connecting to PHY */
pn = of_parse_phandle(np, "phy-handle", 0);
if (!pn) {
/* In the case of a fixed PHY, the DT node associated
* to the PHY is the Ethernet MAC DT node.
*/
if (of_phy_is_fixed_link(np)) {
err = of_phy_register_fixed_link(np);
if (err)
return err;
}
pn = of_node_get(np);
}
ravb: Mask PHY mode to avoid inserting delays twice Until recently, the Micrel KSZ9031 PHY driver ignored any PHY mode ("RGMII-*ID") settings, but used the hardware defaults, augmented by explicit configuration of individual skew values using the "*-skew-ps" DT properties. The lack of PHY mode support was compensated by the EtherAVB MAC driver, which configures TX and/or RX internal delay itself, based on the PHY mode. However, now the KSZ9031 driver has gained PHY mode support, delays may be configured twice, causing regressions. E.g. on the Renesas Salvator-X board with R-Car M3-W ES1.0, TX performance dropped from ca. 400 Mbps to 0.1-0.3 Mbps, as measured by nuttcp. As internal delay configuration supported by the KSZ9031 PHY is too limited for some use cases, the ability to configure MAC internal delay is deemed useful and necessary. Hence a proper fix would involve splitting internal delay configuration in two parts, one for the PHY, and one for the MAC. However, this would require adding new DT properties, thus breaking DTB backwards-compatibility. Hence fix the regression in a backwards-compatibility way, by letting the EtherAVB driver mask the PHY mode when it has inserted a delay, to avoid the PHY driver adding a second delay. This also fixes messages like: Micrel KSZ9031 Gigabit PHY e6800000.ethernet-ffffffff:00: *-skew-ps values should be used only with phy-mode = "rgmii" as the PHY no longer sees the original RGMII-*ID mode. Solving the issue by splitting configuration in two parts can be handled in future patches, and would require retaining a backwards-compatibility mode anyway. Fixes: bcf3440c6dd78bfe ("net: phy: micrel: add phy-mode support for the KSZ9031 PHY") Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Reviewed-by: Andrew Lunn <andrew@lunn.ch> Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-29 12:25:40 +00:00
iface = priv->rgmii_override ? PHY_INTERFACE_MODE_RGMII
: priv->phy_interface;
ravb: Mask PHY mode to avoid inserting delays twice Until recently, the Micrel KSZ9031 PHY driver ignored any PHY mode ("RGMII-*ID") settings, but used the hardware defaults, augmented by explicit configuration of individual skew values using the "*-skew-ps" DT properties. The lack of PHY mode support was compensated by the EtherAVB MAC driver, which configures TX and/or RX internal delay itself, based on the PHY mode. However, now the KSZ9031 driver has gained PHY mode support, delays may be configured twice, causing regressions. E.g. on the Renesas Salvator-X board with R-Car M3-W ES1.0, TX performance dropped from ca. 400 Mbps to 0.1-0.3 Mbps, as measured by nuttcp. As internal delay configuration supported by the KSZ9031 PHY is too limited for some use cases, the ability to configure MAC internal delay is deemed useful and necessary. Hence a proper fix would involve splitting internal delay configuration in two parts, one for the PHY, and one for the MAC. However, this would require adding new DT properties, thus breaking DTB backwards-compatibility. Hence fix the regression in a backwards-compatibility way, by letting the EtherAVB driver mask the PHY mode when it has inserted a delay, to avoid the PHY driver adding a second delay. This also fixes messages like: Micrel KSZ9031 Gigabit PHY e6800000.ethernet-ffffffff:00: *-skew-ps values should be used only with phy-mode = "rgmii" as the PHY no longer sees the original RGMII-*ID mode. Solving the issue by splitting configuration in two parts can be handled in future patches, and would require retaining a backwards-compatibility mode anyway. Fixes: bcf3440c6dd78bfe ("net: phy: micrel: add phy-mode support for the KSZ9031 PHY") Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Reviewed-by: Andrew Lunn <andrew@lunn.ch> Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-29 12:25:40 +00:00
phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface);
of_node_put(pn);
if (!phydev) {
netdev_err(ndev, "failed to connect PHY\n");
err = -ENOENT;
goto err_deregister_fixed_link;
}
if (!info->half_duplex) {
/* 10BASE, Pause and Asym Pause is not supported */
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
/* Half Duplex is not supported */
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
}
phy_attached_info(phydev);
return 0;
err_deregister_fixed_link:
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
return err;
}
/* PHY control start function */
static int ravb_phy_start(struct net_device *ndev)
{
int error;
error = ravb_phy_init(ndev);
if (error)
return error;
phy_start(ndev->phydev);
return 0;
}
static u32 ravb_get_msglevel(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
return priv->msg_enable;
}
static void ravb_set_msglevel(struct net_device *ndev, u32 value)
{
struct ravb_private *priv = netdev_priv(ndev);
priv->msg_enable = value;
}
static const char ravb_gstrings_stats_gbeth[][ETH_GSTRING_LEN] = {
"rx_queue_0_current",
"tx_queue_0_current",
"rx_queue_0_dirty",
"tx_queue_0_dirty",
"rx_queue_0_packets",
"tx_queue_0_packets",
"rx_queue_0_bytes",
"tx_queue_0_bytes",
"rx_queue_0_mcast_packets",
"rx_queue_0_errors",
"rx_queue_0_crc_errors",
"rx_queue_0_frame_errors",
"rx_queue_0_length_errors",
"rx_queue_0_csum_offload_errors",
"rx_queue_0_over_errors",
};
static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
"rx_queue_0_current",
"tx_queue_0_current",
"rx_queue_0_dirty",
"tx_queue_0_dirty",
"rx_queue_0_packets",
"tx_queue_0_packets",
"rx_queue_0_bytes",
"tx_queue_0_bytes",
"rx_queue_0_mcast_packets",
"rx_queue_0_errors",
"rx_queue_0_crc_errors",
"rx_queue_0_frame_errors",
"rx_queue_0_length_errors",
"rx_queue_0_missed_errors",
"rx_queue_0_over_errors",
"rx_queue_1_current",
"tx_queue_1_current",
"rx_queue_1_dirty",
"tx_queue_1_dirty",
"rx_queue_1_packets",
"tx_queue_1_packets",
"rx_queue_1_bytes",
"tx_queue_1_bytes",
"rx_queue_1_mcast_packets",
"rx_queue_1_errors",
"rx_queue_1_crc_errors",
"rx_queue_1_frame_errors",
"rx_queue_1_length_errors",
"rx_queue_1_missed_errors",
"rx_queue_1_over_errors",
};
static int ravb_get_sset_count(struct net_device *netdev, int sset)
{
struct ravb_private *priv = netdev_priv(netdev);
const struct ravb_hw_info *info = priv->info;
switch (sset) {
case ETH_SS_STATS:
return info->stats_len;
default:
return -EOPNOTSUPP;
}
}
static void ravb_get_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *estats, u64 *data)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
int num_rx_q;
int i = 0;
int q;
num_rx_q = info->nc_queues ? NUM_RX_QUEUE : 1;
/* Device-specific stats */
for (q = RAVB_BE; q < num_rx_q; q++) {
struct net_device_stats *stats = &priv->stats[q];
data[i++] = priv->cur_rx[q];
data[i++] = priv->cur_tx[q];
data[i++] = priv->dirty_rx[q];
data[i++] = priv->dirty_tx[q];
data[i++] = stats->rx_packets;
data[i++] = stats->tx_packets;
data[i++] = stats->rx_bytes;
data[i++] = stats->tx_bytes;
data[i++] = stats->multicast;
data[i++] = stats->rx_errors;
data[i++] = stats->rx_crc_errors;
data[i++] = stats->rx_frame_errors;
data[i++] = stats->rx_length_errors;
data[i++] = stats->rx_missed_errors;
data[i++] = stats->rx_over_errors;
}
}
static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
switch (stringset) {
case ETH_SS_STATS:
memcpy(data, info->gstrings_stats, info->gstrings_size);
break;
}
}
static void ravb_get_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
struct ravb_private *priv = netdev_priv(ndev);
ring->rx_max_pending = BE_RX_RING_MAX;
ring->tx_max_pending = BE_TX_RING_MAX;
ring->rx_pending = priv->num_rx_ring[RAVB_BE];
ring->tx_pending = priv->num_tx_ring[RAVB_BE];
}
static int ravb_set_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
int error;
if (ring->tx_pending > BE_TX_RING_MAX ||
ring->rx_pending > BE_RX_RING_MAX ||
ring->tx_pending < BE_TX_RING_MIN ||
ring->rx_pending < BE_RX_RING_MIN)
return -EINVAL;
if (ring->rx_mini_pending || ring->rx_jumbo_pending)
return -EINVAL;
if (netif_running(ndev)) {
netif_device_detach(ndev);
/* Stop PTP Clock driver */
if (info->gptp)
ravb_ptp_stop(ndev);
/* Wait for DMA stopping */
error = ravb_stop_dma(ndev);
if (error) {
netdev_err(ndev,
"cannot set ringparam! Any AVB processes are still running?\n");
return error;
}
synchronize_irq(ndev->irq);
/* Free all the skb's in the RX queue and the DMA buffers. */
ravb_ring_free(ndev, RAVB_BE);
if (info->nc_queues)
ravb_ring_free(ndev, RAVB_NC);
}
/* Set new parameters */
priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
if (netif_running(ndev)) {
error = ravb_dmac_init(ndev);
if (error) {
netdev_err(ndev,
"%s: ravb_dmac_init() failed, error %d\n",
__func__, error);
return error;
}
ravb_emac_init(ndev);
/* Initialise PTP Clock driver */
if (info->gptp)
ravb_ptp_init(ndev, priv->pdev);
netif_device_attach(ndev);
}
return 0;
}
static int ravb_get_ts_info(struct net_device *ndev,
struct ethtool_ts_info *info)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *hw_info = priv->info;
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
info->rx_filters =
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_ALL);
if (hw_info->gptp || hw_info->ccc_gac)
info->phc_index = ptp_clock_index(priv->ptp.clock);
return 0;
}
static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct ravb_private *priv = netdev_priv(ndev);
wol->supported = WAKE_MAGIC;
wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0;
}
static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
if (!info->magic_pkt || (wol->wolopts & ~WAKE_MAGIC))
return -EOPNOTSUPP;
priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
device_set_wakeup_enable(&priv->pdev->dev, priv->wol_enabled);
return 0;
}
static const struct ethtool_ops ravb_ethtool_ops = {
.nway_reset = phy_ethtool_nway_reset,
.get_msglevel = ravb_get_msglevel,
.set_msglevel = ravb_set_msglevel,
.get_link = ethtool_op_get_link,
.get_strings = ravb_get_strings,
.get_ethtool_stats = ravb_get_ethtool_stats,
.get_sset_count = ravb_get_sset_count,
.get_ringparam = ravb_get_ringparam,
.set_ringparam = ravb_set_ringparam,
.get_ts_info = ravb_get_ts_info,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_wol = ravb_get_wol,
.set_wol = ravb_set_wol,
};
static int ravb_set_config_mode(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
int error;
if (info->gptp) {
error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
if (error)
return error;
/* Set CSEL value */
ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
} else if (info->ccc_gac) {
error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB);
} else {
error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
}
return error;
}
static void ravb_set_gti(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
if (!(info->gptp || info->ccc_gac))
return;
ravb_write(ndev, priv->gti_tiv, GTI);
/* Request GTI loading */
ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
}
static int ravb_compute_gti(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
struct device *dev = ndev->dev.parent;
unsigned long rate;
u64 inc;
if (!(info->gptp || info->ccc_gac))
return 0;
if (info->gptp_ref_clk)
rate = clk_get_rate(priv->gptp_clk);
else
rate = clk_get_rate(priv->clk);
if (!rate)
return -EINVAL;
inc = div64_ul(1000000000ULL << 20, rate);
if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
inc, GTI_TIV_MIN, GTI_TIV_MAX);
return -EINVAL;
}
priv->gti_tiv = inc;
return 0;
}
/* Set tx and rx clock internal delay modes */
static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
bool explicit_delay = false;
u32 delay;
if (!priv->info->internal_delay)
return;
if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
/* Valid values are 0 and 1800, according to DT bindings */
priv->rxcidm = !!delay;
explicit_delay = true;
}
if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
/* Valid values are 0 and 2000, according to DT bindings */
priv->txcidm = !!delay;
explicit_delay = true;
}
if (explicit_delay)
return;
/* Fall back to legacy rgmii-*id behavior */
if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
priv->rxcidm = 1;
priv->rgmii_override = 1;
}
if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
priv->txcidm = 1;
priv->rgmii_override = 1;
}
}
static void ravb_set_delay_mode(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
u32 set = 0;
if (!priv->info->internal_delay)
return;
if (priv->rxcidm)
set |= APSR_RDM;
if (priv->txcidm)
set |= APSR_TDM;
ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
}
/* Network device open function for Ethernet AVB */
static int ravb_open(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
struct device *dev = &priv->pdev->dev;
int error;
napi_enable(&priv->napi[RAVB_BE]);
if (info->nc_queues)
napi_enable(&priv->napi[RAVB_NC]);
error = pm_runtime_resume_and_get(dev);
if (error < 0)
goto out_napi_off;
/* Set AVB config mode */
error = ravb_set_config_mode(ndev);
if (error)
goto out_rpm_put;
ravb_set_delay_mode(ndev);
ravb_write(ndev, priv->desc_bat_dma, DBAT);
/* Device init */
error = ravb_dmac_init(ndev);
if (error)
goto out_set_reset;
ravb_emac_init(ndev);
ravb_set_gti(ndev);
/* Initialise PTP Clock driver */
if (info->gptp || info->ccc_gac)
ravb_ptp_init(ndev, priv->pdev);
/* PHY control start */
error = ravb_phy_start(ndev);
if (error)
goto out_ptp_stop;
netif_tx_start_all_queues(ndev);
return 0;
out_ptp_stop:
/* Stop PTP Clock driver */
if (info->gptp || info->ccc_gac)
ravb_ptp_stop(ndev);
ravb_stop_dma(ndev);
out_set_reset:
ravb_set_opmode(ndev, CCC_OPC_RESET);
out_rpm_put:
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
out_napi_off:
if (info->nc_queues)
napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]);
return error;
}
/* Timeout function for Ethernet AVB */
netdev: pass the stuck queue to the timeout handler This allows incrementing the correct timeout statistic without any mess. Down the road, devices can learn to reset just the specific queue. The patch was generated with the following script: use strict; use warnings; our $^I = '.bak'; my @work = ( ["arch/m68k/emu/nfeth.c", "nfeth_tx_timeout"], ["arch/um/drivers/net_kern.c", "uml_net_tx_timeout"], ["arch/um/drivers/vector_kern.c", "vector_net_tx_timeout"], ["arch/xtensa/platforms/iss/network.c", "iss_net_tx_timeout"], ["drivers/char/pcmcia/synclink_cs.c", "hdlcdev_tx_timeout"], ["drivers/infiniband/ulp/ipoib/ipoib_main.c", "ipoib_timeout"], ["drivers/infiniband/ulp/ipoib/ipoib_main.c", "ipoib_timeout"], ["drivers/message/fusion/mptlan.c", "mpt_lan_tx_timeout"], ["drivers/misc/sgi-xp/xpnet.c", "xpnet_dev_tx_timeout"], ["drivers/net/appletalk/cops.c", "cops_timeout"], ["drivers/net/arcnet/arcdevice.h", "arcnet_timeout"], ["drivers/net/arcnet/arcnet.c", "arcnet_timeout"], ["drivers/net/arcnet/com20020.c", "arcnet_timeout"], ["drivers/net/ethernet/3com/3c509.c", "el3_tx_timeout"], ["drivers/net/ethernet/3com/3c515.c", "corkscrew_timeout"], ["drivers/net/ethernet/3com/3c574_cs.c", "el3_tx_timeout"], ["drivers/net/ethernet/3com/3c589_cs.c", "el3_tx_timeout"], ["drivers/net/ethernet/3com/3c59x.c", "vortex_tx_timeout"], ["drivers/net/ethernet/3com/3c59x.c", "vortex_tx_timeout"], ["drivers/net/ethernet/3com/typhoon.c", "typhoon_tx_timeout"], ["drivers/net/ethernet/8390/8390.h", "ei_tx_timeout"], ["drivers/net/ethernet/8390/8390.h", "eip_tx_timeout"], ["drivers/net/ethernet/8390/8390.c", "ei_tx_timeout"], ["drivers/net/ethernet/8390/8390p.c", "eip_tx_timeout"], ["drivers/net/ethernet/8390/ax88796.c", "ax_ei_tx_timeout"], ["drivers/net/ethernet/8390/axnet_cs.c", "axnet_tx_timeout"], ["drivers/net/ethernet/8390/etherh.c", "__ei_tx_timeout"], ["drivers/net/ethernet/8390/hydra.c", "__ei_tx_timeout"], ["drivers/net/ethernet/8390/mac8390.c", "__ei_tx_timeout"], ["drivers/net/ethernet/8390/mcf8390.c", "__ei_tx_timeout"], ["drivers/net/ethernet/8390/lib8390.c", "__ei_tx_timeout"], ["drivers/net/ethernet/8390/ne2k-pci.c", "ei_tx_timeout"], ["drivers/net/ethernet/8390/pcnet_cs.c", "ei_tx_timeout"], ["drivers/net/ethernet/8390/smc-ultra.c", "ei_tx_timeout"], ["drivers/net/ethernet/8390/wd.c", "ei_tx_timeout"], ["drivers/net/ethernet/8390/zorro8390.c", "__ei_tx_timeout"], ["drivers/net/ethernet/adaptec/starfire.c", "tx_timeout"], ["drivers/net/ethernet/agere/et131x.c", "et131x_tx_timeout"], ["drivers/net/ethernet/allwinner/sun4i-emac.c", "emac_timeout"], ["drivers/net/ethernet/alteon/acenic.c", "ace_watchdog"], ["drivers/net/ethernet/amazon/ena/ena_netdev.c", "ena_tx_timeout"], ["drivers/net/ethernet/amd/7990.h", "lance_tx_timeout"], ["drivers/net/ethernet/amd/7990.c", "lance_tx_timeout"], ["drivers/net/ethernet/amd/a2065.c", "lance_tx_timeout"], ["drivers/net/ethernet/amd/am79c961a.c", "am79c961_timeout"], ["drivers/net/ethernet/amd/amd8111e.c", "amd8111e_tx_timeout"], ["drivers/net/ethernet/amd/ariadne.c", "ariadne_tx_timeout"], ["drivers/net/ethernet/amd/atarilance.c", "lance_tx_timeout"], ["drivers/net/ethernet/amd/au1000_eth.c", "au1000_tx_timeout"], ["drivers/net/ethernet/amd/declance.c", "lance_tx_timeout"], ["drivers/net/ethernet/amd/lance.c", "lance_tx_timeout"], ["drivers/net/ethernet/amd/mvme147.c", "lance_tx_timeout"], ["drivers/net/ethernet/amd/ni65.c", "ni65_timeout"], ["drivers/net/ethernet/amd/nmclan_cs.c", "mace_tx_timeout"], ["drivers/net/ethernet/amd/pcnet32.c", "pcnet32_tx_timeout"], ["drivers/net/ethernet/amd/sunlance.c", "lance_tx_timeout"], ["drivers/net/ethernet/amd/xgbe/xgbe-drv.c", "xgbe_tx_timeout"], ["drivers/net/ethernet/apm/xgene-v2/main.c", "xge_timeout"], ["drivers/net/ethernet/apm/xgene/xgene_enet_main.c", "xgene_enet_timeout"], ["drivers/net/ethernet/apple/macmace.c", "mace_tx_timeout"], ["drivers/net/ethernet/atheros/ag71xx.c", "ag71xx_tx_timeout"], ["drivers/net/ethernet/atheros/alx/main.c", "alx_tx_timeout"], ["drivers/net/ethernet/atheros/atl1c/atl1c_main.c", "atl1c_tx_timeout"], ["drivers/net/ethernet/atheros/atl1e/atl1e_main.c", "atl1e_tx_timeout"], ["drivers/net/ethernet/atheros/atlx/atl.c", "atlx_tx_timeout"], ["drivers/net/ethernet/atheros/atlx/atl1.c", "atlx_tx_timeout"], ["drivers/net/ethernet/atheros/atlx/atl2.c", "atl2_tx_timeout"], ["drivers/net/ethernet/broadcom/b44.c", "b44_tx_timeout"], ["drivers/net/ethernet/broadcom/bcmsysport.c", "bcm_sysport_tx_timeout"], ["drivers/net/ethernet/broadcom/bnx2.c", "bnx2_tx_timeout"], ["drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h", "bnx2x_tx_timeout"], ["drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c", "bnx2x_tx_timeout"], ["drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c", "bnx2x_tx_timeout"], ["drivers/net/ethernet/broadcom/bnxt/bnxt.c", "bnxt_tx_timeout"], ["drivers/net/ethernet/broadcom/genet/bcmgenet.c", "bcmgenet_timeout"], ["drivers/net/ethernet/broadcom/sb1250-mac.c", "sbmac_tx_timeout"], ["drivers/net/ethernet/broadcom/tg3.c", "tg3_tx_timeout"], ["drivers/net/ethernet/calxeda/xgmac.c", "xgmac_tx_timeout"], ["drivers/net/ethernet/cavium/liquidio/lio_main.c", "liquidio_tx_timeout"], ["drivers/net/ethernet/cavium/liquidio/lio_vf_main.c", "liquidio_tx_timeout"], ["drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c", "lio_vf_rep_tx_timeout"], ["drivers/net/ethernet/cavium/thunder/nicvf_main.c", "nicvf_tx_timeout"], ["drivers/net/ethernet/cirrus/cs89x0.c", "net_timeout"], ["drivers/net/ethernet/cisco/enic/enic_main.c", "enic_tx_timeout"], ["drivers/net/ethernet/cisco/enic/enic_main.c", "enic_tx_timeout"], ["drivers/net/ethernet/cortina/gemini.c", "gmac_tx_timeout"], ["drivers/net/ethernet/davicom/dm9000.c", "dm9000_timeout"], ["drivers/net/ethernet/dec/tulip/de2104x.c", "de_tx_timeout"], ["drivers/net/ethernet/dec/tulip/tulip_core.c", "tulip_tx_timeout"], ["drivers/net/ethernet/dec/tulip/winbond-840.c", "tx_timeout"], ["drivers/net/ethernet/dlink/dl2k.c", "rio_tx_timeout"], ["drivers/net/ethernet/dlink/sundance.c", "tx_timeout"], ["drivers/net/ethernet/emulex/benet/be_main.c", "be_tx_timeout"], ["drivers/net/ethernet/ethoc.c", "ethoc_tx_timeout"], ["drivers/net/ethernet/faraday/ftgmac100.c", "ftgmac100_tx_timeout"], ["drivers/net/ethernet/fealnx.c", "fealnx_tx_timeout"], ["drivers/net/ethernet/freescale/dpaa/dpaa_eth.c", "dpaa_tx_timeout"], ["drivers/net/ethernet/freescale/fec_main.c", "fec_timeout"], ["drivers/net/ethernet/freescale/fec_mpc52xx.c", "mpc52xx_fec_tx_timeout"], ["drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c", "fs_timeout"], ["drivers/net/ethernet/freescale/gianfar.c", "gfar_timeout"], ["drivers/net/ethernet/freescale/ucc_geth.c", "ucc_geth_timeout"], ["drivers/net/ethernet/fujitsu/fmvj18x_cs.c", "fjn_tx_timeout"], ["drivers/net/ethernet/google/gve/gve_main.c", "gve_tx_timeout"], ["drivers/net/ethernet/hisilicon/hip04_eth.c", "hip04_timeout"], ["drivers/net/ethernet/hisilicon/hix5hd2_gmac.c", "hix5hd2_net_timeout"], ["drivers/net/ethernet/hisilicon/hns/hns_enet.c", "hns_nic_net_timeout"], ["drivers/net/ethernet/hisilicon/hns3/hns3_enet.c", "hns3_nic_net_timeout"], ["drivers/net/ethernet/huawei/hinic/hinic_main.c", "hinic_tx_timeout"], ["drivers/net/ethernet/i825xx/82596.c", "i596_tx_timeout"], ["drivers/net/ethernet/i825xx/ether1.c", "ether1_timeout"], ["drivers/net/ethernet/i825xx/lib82596.c", "i596_tx_timeout"], ["drivers/net/ethernet/i825xx/sun3_82586.c", "sun3_82586_timeout"], ["drivers/net/ethernet/ibm/ehea/ehea_main.c", "ehea_tx_watchdog"], ["drivers/net/ethernet/ibm/emac/core.c", "emac_tx_timeout"], ["drivers/net/ethernet/ibm/emac/core.c", "emac_tx_timeout"], ["drivers/net/ethernet/ibm/ibmvnic.c", "ibmvnic_tx_timeout"], ["drivers/net/ethernet/intel/e100.c", "e100_tx_timeout"], ["drivers/net/ethernet/intel/e1000/e1000_main.c", "e1000_tx_timeout"], ["drivers/net/ethernet/intel/e1000e/netdev.c", "e1000_tx_timeout"], ["drivers/net/ethernet/intel/fm10k/fm10k_netdev.c", "fm10k_tx_timeout"], ["drivers/net/ethernet/intel/i40e/i40e_main.c", "i40e_tx_timeout"], ["drivers/net/ethernet/intel/iavf/iavf_main.c", "iavf_tx_timeout"], ["drivers/net/ethernet/intel/ice/ice_main.c", "ice_tx_timeout"], ["drivers/net/ethernet/intel/ice/ice_main.c", "ice_tx_timeout"], ["drivers/net/ethernet/intel/igb/igb_main.c", "igb_tx_timeout"], ["drivers/net/ethernet/intel/igbvf/netdev.c", "igbvf_tx_timeout"], ["drivers/net/ethernet/intel/ixgb/ixgb_main.c", "ixgb_tx_timeout"], ["drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c", "adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);"], ["drivers/net/ethernet/intel/ixgbe/ixgbe_main.c", "ixgbe_tx_timeout"], ["drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c", "ixgbevf_tx_timeout"], ["drivers/net/ethernet/jme.c", "jme_tx_timeout"], ["drivers/net/ethernet/korina.c", "korina_tx_timeout"], ["drivers/net/ethernet/lantiq_etop.c", "ltq_etop_tx_timeout"], ["drivers/net/ethernet/marvell/mv643xx_eth.c", "mv643xx_eth_tx_timeout"], ["drivers/net/ethernet/marvell/pxa168_eth.c", "pxa168_eth_tx_timeout"], ["drivers/net/ethernet/marvell/skge.c", "skge_tx_timeout"], ["drivers/net/ethernet/marvell/sky2.c", "sky2_tx_timeout"], ["drivers/net/ethernet/marvell/sky2.c", "sky2_tx_timeout"], ["drivers/net/ethernet/mediatek/mtk_eth_soc.c", "mtk_tx_timeout"], ["drivers/net/ethernet/mellanox/mlx4/en_netdev.c", "mlx4_en_tx_timeout"], ["drivers/net/ethernet/mellanox/mlx4/en_netdev.c", "mlx4_en_tx_timeout"], ["drivers/net/ethernet/mellanox/mlx5/core/en_main.c", "mlx5e_tx_timeout"], ["drivers/net/ethernet/micrel/ks8842.c", "ks8842_tx_timeout"], ["drivers/net/ethernet/micrel/ksz884x.c", "netdev_tx_timeout"], ["drivers/net/ethernet/microchip/enc28j60.c", "enc28j60_tx_timeout"], ["drivers/net/ethernet/microchip/encx24j600.c", "encx24j600_tx_timeout"], ["drivers/net/ethernet/natsemi/sonic.h", "sonic_tx_timeout"], ["drivers/net/ethernet/natsemi/sonic.c", "sonic_tx_timeout"], ["drivers/net/ethernet/natsemi/jazzsonic.c", "sonic_tx_timeout"], ["drivers/net/ethernet/natsemi/macsonic.c", "sonic_tx_timeout"], ["drivers/net/ethernet/natsemi/natsemi.c", "ns_tx_timeout"], ["drivers/net/ethernet/natsemi/ns83820.c", "ns83820_tx_timeout"], ["drivers/net/ethernet/natsemi/xtsonic.c", "sonic_tx_timeout"], ["drivers/net/ethernet/neterion/s2io.h", "s2io_tx_watchdog"], ["drivers/net/ethernet/neterion/s2io.c", "s2io_tx_watchdog"], ["drivers/net/ethernet/neterion/vxge/vxge-main.c", "vxge_tx_watchdog"], ["drivers/net/ethernet/netronome/nfp/nfp_net_common.c", "nfp_net_tx_timeout"], ["drivers/net/ethernet/nvidia/forcedeth.c", "nv_tx_timeout"], ["drivers/net/ethernet/nvidia/forcedeth.c", "nv_tx_timeout"], ["drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c", "pch_gbe_tx_timeout"], ["drivers/net/ethernet/packetengines/hamachi.c", "hamachi_tx_timeout"], ["drivers/net/ethernet/packetengines/yellowfin.c", "yellowfin_tx_timeout"], ["drivers/net/ethernet/pensando/ionic/ionic_lif.c", "ionic_tx_timeout"], ["drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c", "netxen_tx_timeout"], ["drivers/net/ethernet/qlogic/qla3xxx.c", "ql3xxx_tx_timeout"], ["drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c", "qlcnic_tx_timeout"], ["drivers/net/ethernet/qualcomm/emac/emac.c", "emac_tx_timeout"], ["drivers/net/ethernet/qualcomm/qca_spi.c", "qcaspi_netdev_tx_timeout"], ["drivers/net/ethernet/qualcomm/qca_uart.c", "qcauart_netdev_tx_timeout"], ["drivers/net/ethernet/rdc/r6040.c", "r6040_tx_timeout"], ["drivers/net/ethernet/realtek/8139cp.c", "cp_tx_timeout"], ["drivers/net/ethernet/realtek/8139too.c", "rtl8139_tx_timeout"], ["drivers/net/ethernet/realtek/atp.c", "tx_timeout"], ["drivers/net/ethernet/realtek/r8169_main.c", "rtl8169_tx_timeout"], ["drivers/net/ethernet/renesas/ravb_main.c", "ravb_tx_timeout"], ["drivers/net/ethernet/renesas/sh_eth.c", "sh_eth_tx_timeout"], ["drivers/net/ethernet/renesas/sh_eth.c", "sh_eth_tx_timeout"], ["drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c", "sxgbe_tx_timeout"], ["drivers/net/ethernet/seeq/ether3.c", "ether3_timeout"], ["drivers/net/ethernet/seeq/sgiseeq.c", "timeout"], ["drivers/net/ethernet/sfc/efx.c", "efx_watchdog"], ["drivers/net/ethernet/sfc/falcon/efx.c", "ef4_watchdog"], ["drivers/net/ethernet/sgi/ioc3-eth.c", "ioc3_timeout"], ["drivers/net/ethernet/sgi/meth.c", "meth_tx_timeout"], ["drivers/net/ethernet/silan/sc92031.c", "sc92031_tx_timeout"], ["drivers/net/ethernet/sis/sis190.c", "sis190_tx_timeout"], ["drivers/net/ethernet/sis/sis900.c", "sis900_tx_timeout"], ["drivers/net/ethernet/smsc/epic100.c", "epic_tx_timeout"], ["drivers/net/ethernet/smsc/smc911x.c", "smc911x_timeout"], ["drivers/net/ethernet/smsc/smc9194.c", "smc_timeout"], ["drivers/net/ethernet/smsc/smc91c92_cs.c", "smc_tx_timeout"], ["drivers/net/ethernet/smsc/smc91x.c", "smc_timeout"], ["drivers/net/ethernet/stmicro/stmmac/stmmac_main.c", "stmmac_tx_timeout"], ["drivers/net/ethernet/sun/cassini.c", "cas_tx_timeout"], ["drivers/net/ethernet/sun/ldmvsw.c", "sunvnet_tx_timeout_common"], ["drivers/net/ethernet/sun/niu.c", "niu_tx_timeout"], ["drivers/net/ethernet/sun/sunbmac.c", "bigmac_tx_timeout"], ["drivers/net/ethernet/sun/sungem.c", "gem_tx_timeout"], ["drivers/net/ethernet/sun/sunhme.c", "happy_meal_tx_timeout"], ["drivers/net/ethernet/sun/sunqe.c", "qe_tx_timeout"], ["drivers/net/ethernet/sun/sunvnet.c", "sunvnet_tx_timeout_common"], ["drivers/net/ethernet/sun/sunvnet_common.c", "sunvnet_tx_timeout_common"], ["drivers/net/ethernet/sun/sunvnet_common.h", "sunvnet_tx_timeout_common"], ["drivers/net/ethernet/synopsys/dwc-xlgmac-net.c", "xlgmac_tx_timeout"], ["drivers/net/ethernet/ti/cpmac.c", "cpmac_tx_timeout"], ["drivers/net/ethernet/ti/cpsw.c", "cpsw_ndo_tx_timeout"], ["drivers/net/ethernet/ti/cpsw_priv.c", "cpsw_ndo_tx_timeout"], ["drivers/net/ethernet/ti/cpsw_priv.h", "cpsw_ndo_tx_timeout"], ["drivers/net/ethernet/ti/davinci_emac.c", "emac_dev_tx_timeout"], ["drivers/net/ethernet/ti/netcp_core.c", "netcp_ndo_tx_timeout"], ["drivers/net/ethernet/ti/tlan.c", "tlan_tx_timeout"], ["drivers/net/ethernet/toshiba/ps3_gelic_net.h", "gelic_net_tx_timeout"], ["drivers/net/ethernet/toshiba/ps3_gelic_net.c", "gelic_net_tx_timeout"], ["drivers/net/ethernet/toshiba/ps3_gelic_wireless.c", "gelic_net_tx_timeout"], ["drivers/net/ethernet/toshiba/spider_net.c", "spider_net_tx_timeout"], ["drivers/net/ethernet/toshiba/tc35815.c", "tc35815_tx_timeout"], ["drivers/net/ethernet/via/via-rhine.c", "rhine_tx_timeout"], ["drivers/net/ethernet/wiznet/w5100.c", "w5100_tx_timeout"], ["drivers/net/ethernet/wiznet/w5300.c", "w5300_tx_timeout"], ["drivers/net/ethernet/xilinx/xilinx_emaclite.c", "xemaclite_tx_timeout"], ["drivers/net/ethernet/xircom/xirc2ps_cs.c", "xirc_tx_timeout"], ["drivers/net/fjes/fjes_main.c", "fjes_tx_retry"], ["drivers/net/slip/slip.c", "sl_tx_timeout"], ["include/linux/usb/usbnet.h", "usbnet_tx_timeout"], ["drivers/net/usb/aqc111.c", "usbnet_tx_timeout"], ["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"], ["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"], ["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"], ["drivers/net/usb/ax88172a.c", "usbnet_tx_timeout"], ["drivers/net/usb/ax88179_178a.c", "usbnet_tx_timeout"], ["drivers/net/usb/catc.c", "catc_tx_timeout"], ["drivers/net/usb/cdc_mbim.c", "usbnet_tx_timeout"], ["drivers/net/usb/cdc_ncm.c", "usbnet_tx_timeout"], ["drivers/net/usb/dm9601.c", "usbnet_tx_timeout"], ["drivers/net/usb/hso.c", "hso_net_tx_timeout"], ["drivers/net/usb/int51x1.c", "usbnet_tx_timeout"], ["drivers/net/usb/ipheth.c", "ipheth_tx_timeout"], ["drivers/net/usb/kaweth.c", "kaweth_tx_timeout"], ["drivers/net/usb/lan78xx.c", "lan78xx_tx_timeout"], ["drivers/net/usb/mcs7830.c", "usbnet_tx_timeout"], ["drivers/net/usb/pegasus.c", "pegasus_tx_timeout"], ["drivers/net/usb/qmi_wwan.c", "usbnet_tx_timeout"], ["drivers/net/usb/r8152.c", "rtl8152_tx_timeout"], ["drivers/net/usb/rndis_host.c", "usbnet_tx_timeout"], ["drivers/net/usb/rtl8150.c", "rtl8150_tx_timeout"], ["drivers/net/usb/sierra_net.c", "usbnet_tx_timeout"], ["drivers/net/usb/smsc75xx.c", "usbnet_tx_timeout"], ["drivers/net/usb/smsc95xx.c", "usbnet_tx_timeout"], ["drivers/net/usb/sr9700.c", "usbnet_tx_timeout"], ["drivers/net/usb/sr9800.c", "usbnet_tx_timeout"], ["drivers/net/usb/usbnet.c", "usbnet_tx_timeout"], ["drivers/net/vmxnet3/vmxnet3_drv.c", "vmxnet3_tx_timeout"], ["drivers/net/wan/cosa.c", "cosa_net_timeout"], ["drivers/net/wan/farsync.c", "fst_tx_timeout"], ["drivers/net/wan/fsl_ucc_hdlc.c", "uhdlc_tx_timeout"], ["drivers/net/wan/lmc/lmc_main.c", "lmc_driver_timeout"], ["drivers/net/wan/x25_asy.c", "x25_asy_timeout"], ["drivers/net/wimax/i2400m/netdev.c", "i2400m_tx_timeout"], ["drivers/net/wireless/intel/ipw2x00/ipw2100.c", "ipw2100_tx_timeout"], ["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"], ["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"], ["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"], ["drivers/net/wireless/intersil/orinoco/main.c", "orinoco_tx_timeout"], ["drivers/net/wireless/intersil/orinoco/orinoco_usb.c", "orinoco_tx_timeout"], ["drivers/net/wireless/intersil/orinoco/orinoco.h", "orinoco_tx_timeout"], ["drivers/net/wireless/intersil/prism54/islpci_dev.c", "islpci_eth_tx_timeout"], ["drivers/net/wireless/intersil/prism54/islpci_eth.c", "islpci_eth_tx_timeout"], ["drivers/net/wireless/intersil/prism54/islpci_eth.h", "islpci_eth_tx_timeout"], ["drivers/net/wireless/marvell/mwifiex/main.c", "mwifiex_tx_timeout"], ["drivers/net/wireless/quantenna/qtnfmac/core.c", "qtnf_netdev_tx_timeout"], ["drivers/net/wireless/quantenna/qtnfmac/core.h", "qtnf_netdev_tx_timeout"], ["drivers/net/wireless/rndis_wlan.c", "usbnet_tx_timeout"], ["drivers/net/wireless/wl3501_cs.c", "wl3501_tx_timeout"], ["drivers/net/wireless/zydas/zd1201.c", "zd1201_tx_timeout"], ["drivers/s390/net/qeth_core.h", "qeth_tx_timeout"], ["drivers/s390/net/qeth_core_main.c", "qeth_tx_timeout"], ["drivers/s390/net/qeth_l2_main.c", "qeth_tx_timeout"], ["drivers/s390/net/qeth_l2_main.c", "qeth_tx_timeout"], ["drivers/s390/net/qeth_l3_main.c", "qeth_tx_timeout"], ["drivers/s390/net/qeth_l3_main.c", "qeth_tx_timeout"], ["drivers/staging/ks7010/ks_wlan_net.c", "ks_wlan_tx_timeout"], ["drivers/staging/qlge/qlge_main.c", "qlge_tx_timeout"], ["drivers/staging/rtl8192e/rtl8192e/rtl_core.c", "_rtl92e_tx_timeout"], ["drivers/staging/rtl8192u/r8192U_core.c", "tx_timeout"], ["drivers/staging/unisys/visornic/visornic_main.c", "visornic_xmit_timeout"], ["drivers/staging/wlan-ng/p80211netdev.c", "p80211knetdev_tx_timeout"], ["drivers/tty/n_gsm.c", "gsm_mux_net_tx_timeout"], ["drivers/tty/synclink.c", "hdlcdev_tx_timeout"], ["drivers/tty/synclink_gt.c", "hdlcdev_tx_timeout"], ["drivers/tty/synclinkmp.c", "hdlcdev_tx_timeout"], ["net/atm/lec.c", "lec_tx_timeout"], ["net/bluetooth/bnep/netdev.c", "bnep_net_timeout"] ); for my $p (@work) { my @pair = @$p; my $file = $pair[0]; my $func = $pair[1]; print STDERR $file , ": ", $func,"\n"; our @ARGV = ($file); while (<ARGV>) { if (m/($func\s*\(struct\s+net_device\s+\*[A-Za-z_]?[A-Za-z-0-9_]*)(\))/) { print STDERR "found $1+$2 in $file\n"; } if (s/($func\s*\(struct\s+net_device\s+\*[A-Za-z_]?[A-Za-z-0-9_]*)(\))/$1, unsigned int txqueue$2/) { print STDERR "$func found in $file\n"; } print; } } where the list of files and functions is simply from: git grep ndo_tx_timeout, with manual addition of headers in the rare cases where the function is from a header, then manually changing the few places which actually call ndo_tx_timeout. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Acked-by: Heiner Kallweit <hkallweit1@gmail.com> Acked-by: Jakub Kicinski <jakub.kicinski@netronome.com> Acked-by: Shannon Nelson <snelson@pensando.io> Reviewed-by: Martin Habets <mhabets@solarflare.com> changes from v9: fixup a forward declaration changes from v9: more leftovers from v3 change changes from v8: fix up a missing direct call to timeout rebased on net-next changes from v7: fixup leftovers from v3 change changes from v6: fix typo in rtl driver changes from v5: add missing files (allow any net device argument name) changes from v4: add a missing driver header changes from v3: change queue # to unsigned Changes from v2: added headers Changes from v1: Fix errors found by kbuild: generalize the pattern a bit, to pick up a couple of instances missed by the previous version. Signed-off-by: David S. Miller <davem@davemloft.net>
2019-12-10 14:23:51 +00:00
static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct ravb_private *priv = netdev_priv(ndev);
netif_err(priv, tx_err, ndev,
"transmit timed out, status %08x, resetting...\n",
ravb_read(ndev, ISS));
/* tx_errors count up */
ndev->stats.tx_errors++;
schedule_work(&priv->work);
}
static void ravb_tx_timeout_work(struct work_struct *work)
{
struct ravb_private *priv = container_of(work, struct ravb_private,
work);
const struct ravb_hw_info *info = priv->info;
struct net_device *ndev = priv->ndev;
int error;
if (!rtnl_trylock()) {
usleep_range(1000, 2000);
schedule_work(&priv->work);
return;
}
netif_tx_stop_all_queues(ndev);
/* Stop PTP Clock driver */
if (info->gptp)
ravb_ptp_stop(ndev);
/* Wait for DMA stopping */
if (ravb_stop_dma(ndev)) {
/* If ravb_stop_dma() fails, the hardware is still operating
* for TX and/or RX. So, this should not call the following
* functions because ravb_dmac_init() is possible to fail too.
* Also, this should not retry ravb_stop_dma() again and again
* here because it's possible to wait forever. So, this just
* re-enables the TX and RX and skip the following
* re-initialization procedure.
*/
ravb_rcv_snd_enable(ndev);
goto out;
}
ravb_ring_free(ndev, RAVB_BE);
if (info->nc_queues)
ravb_ring_free(ndev, RAVB_NC);
/* Device init */
error = ravb_dmac_init(ndev);
if (error) {
/* If ravb_dmac_init() fails, descriptors are freed. So, this
* should return here to avoid re-enabling the TX and RX in
* ravb_emac_init().
*/
netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
__func__, error);
goto out_unlock;
}
ravb_emac_init(ndev);
out:
/* Initialise PTP Clock driver */
if (info->gptp)
ravb_ptp_init(ndev, priv->pdev);
netif_tx_start_all_queues(ndev);
out_unlock:
rtnl_unlock();
}
static bool ravb_can_tx_csum_gbeth(struct sk_buff *skb)
{
struct iphdr *ip = ip_hdr(skb);
/* TODO: Need to add support for VLAN tag 802.1Q */
if (skb_vlan_tag_present(skb))
return false;
/* TODO: Need to add hardware checksum for IPv6 */
if (skb->protocol != htons(ETH_P_IP))
return false;
switch (ip->protocol) {
case IPPROTO_TCP:
break;
case IPPROTO_UDP:
/* If the checksum value in the UDP header field is 0, TOE does
* not calculate checksum for UDP part of this frame as it is
* optional function as per standards.
*/
if (udp_hdr(skb)->check == 0)
return false;
break;
default:
return false;
}
return true;
}
/* Packet transmit function for Ethernet AVB */
static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
unsigned int num_tx_desc = priv->num_tx_desc;
u16 q = skb_get_queue_mapping(skb);
struct ravb_tstamp_skb *ts_skb;
struct ravb_tx_desc *desc;
unsigned long flags;
dma_addr_t dma_addr;
void *buffer;
u32 entry;
u32 len;
if (skb->ip_summed == CHECKSUM_PARTIAL && !ravb_can_tx_csum_gbeth(skb))
skb_checksum_help(skb);
spin_lock_irqsave(&priv->lock, flags);
if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
num_tx_desc) {
netif_err(priv, tx_queued, ndev,
"still transmitting with the full ring!\n");
netif_stop_subqueue(ndev, q);
spin_unlock_irqrestore(&priv->lock, flags);
return NETDEV_TX_BUSY;
}
if (skb_put_padto(skb, ETH_ZLEN))
goto exit;
entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc);
priv->tx_skb[q][entry / num_tx_desc] = skb;
if (num_tx_desc > 1) {
buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
entry / num_tx_desc * DPTR_ALIGN;
len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
/* Zero length DMA descriptors are problematic as they seem
* to terminate DMA transfers. Avoid them by simply using a
* length of DPTR_ALIGN (4) when skb data is aligned to
* DPTR_ALIGN.
*
* As skb is guaranteed to have at least ETH_ZLEN (60)
* bytes of data by the call to skb_put_padto() above this
* is safe with respect to both the length of the first DMA
* descriptor (len) overflowing the available data and the
* length of the second DMA descriptor (skb->len - len)
* being negative.
*/
if (len == 0)
len = DPTR_ALIGN;
ravb: do not use zero-length alignment DMA descriptor Due to alignment requirements of the hardware transmissions are split into two DMA descriptors, a small padding descriptor of 0 - 3 bytes in length followed by a descriptor for rest of the packet. In the case of IP packets the first descriptor will never be zero due to the way that the stack aligns buffers for IP packets. However, for non-IP packets it may be zero. In that case it has been reported that timeouts occur, presumably because transmission stops at the first zero-length DMA descriptor and thus the packet is not transmitted. However, in my environment a BUG is triggered as follows: [ 20.381417] ------------[ cut here ]------------ [ 20.386054] kernel BUG at lib/swiotlb.c:495! [ 20.390324] Internal error: Oops - BUG: 0 [#1] PREEMPT SMP [ 20.395805] Modules linked in: [ 20.398862] CPU: 0 PID: 2089 Comm: mz Not tainted 4.10.0-rc3-00001-gf13ad2db193f #162 [ 20.406689] Hardware name: Renesas Salvator-X board based on r8a7796 (DT) [ 20.413474] task: ffff80063b1f1900 task.stack: ffff80063a71c000 [ 20.419404] PC is at swiotlb_tbl_map_single+0x178/0x2ec [ 20.424625] LR is at map_single+0x4c/0x98 [ 20.428629] pc : [<ffff00000839c4c0>] lr : [<ffff00000839c680>] pstate: 800001c5 [ 20.436019] sp : ffff80063a71f9b0 [ 20.439327] x29: ffff80063a71f9b0 x28: ffff80063a20d500 [ 20.444636] x27: ffff000008ed5000 x26: 0000000000000000 [ 20.449944] x25: 000000067abe2adc x24: 0000000000000000 [ 20.455252] x23: 0000000000200000 x22: 0000000000000001 [ 20.460559] x21: 0000000000175ffe x20: ffff80063b2a0010 [ 20.465866] x19: 0000000000000000 x18: 0000ffffcae6fb20 [ 20.471173] x17: 0000ffffa09ba018 x16: ffff0000087c8b70 [ 20.476480] x15: 0000ffffa084f588 x14: 0000ffffa09cfa14 [ 20.481787] x13: 0000ffffcae87ff0 x12: 000000000063abe2 [ 20.487098] x11: ffff000008096360 x10: ffff80063abe2adc [ 20.492407] x9 : 0000000000000000 x8 : 0000000000000000 [ 20.497718] x7 : 0000000000000000 x6 : ffff000008ed50d0 [ 20.503028] x5 : 0000000000000000 x4 : 0000000000000001 [ 20.508338] x3 : 0000000000000000 x2 : 000000067abe2adc [ 20.513648] x1 : 00000000bafff000 x0 : 0000000000000000 [ 20.518958] [ 20.520446] Process mz (pid: 2089, stack limit = 0xffff80063a71c000) [ 20.526798] Stack: (0xffff80063a71f9b0 to 0xffff80063a720000) [ 20.532543] f9a0: ffff80063a71fa30 ffff00000839c680 [ 20.540374] f9c0: ffff80063b2a0010 ffff80063b2a0010 0000000000000001 0000000000000000 [ 20.548204] f9e0: 000000000000006e ffff80063b23c000 ffff80063b23c000 0000000000000000 [ 20.556034] fa00: ffff80063b23c000 ffff80063a20d500 000000013b1f1900 0000000000000000 [ 20.563864] fa20: ffff80063ffd18e0 ffff80063b2a0010 ffff80063a71fa60 ffff00000839cd10 [ 20.571694] fa40: ffff80063b2a0010 0000000000000000 ffff80063ffd18e0 000000067abe2adc [ 20.579524] fa60: ffff80063a71fa90 ffff000008096380 ffff80063b2a0010 0000000000000000 [ 20.587353] fa80: 0000000000000000 0000000000000001 ffff80063a71fac0 ffff00000864f770 [ 20.595184] faa0: ffff80063b23caf0 0000000000000000 0000000000000000 0000000000000140 [ 20.603014] fac0: ffff80063a71fb60 ffff0000087e6498 ffff80063a20d500 ffff80063b23c000 [ 20.610843] fae0: 0000000000000000 ffff000008daeaf0 0000000000000000 ffff000008daeb00 [ 20.618673] fb00: ffff80063a71fc0c ffff000008da7000 ffff80063b23c090 ffff80063a44f000 [ 20.626503] fb20: 0000000000000000 ffff000008daeb00 ffff80063a71fc0c ffff000008da7000 [ 20.634333] fb40: ffff80063b23c090 0000000000000000 ffff800600000037 ffff0000087e63d8 [ 20.642163] fb60: ffff80063a71fbc0 ffff000008807510 ffff80063a692400 ffff80063a20d500 [ 20.649993] fb80: ffff80063a44f000 ffff80063b23c000 ffff80063a69249c 0000000000000000 [ 20.657823] fba0: 0000000000000000 ffff80063a087800 ffff80063b23c000 ffff80063a20d500 [ 20.665653] fbc0: ffff80063a71fc10 ffff0000087e67dc ffff80063a20d500 ffff80063a692400 [ 20.673483] fbe0: ffff80063b23c000 0000000000000000 ffff80063a44f000 ffff80063a69249c [ 20.681312] fc00: ffff80063a5f1a10 000000103a087800 ffff80063a71fc70 ffff0000087e6b24 [ 20.689142] fc20: ffff80063a5f1a80 ffff80063a71fde8 000000000000000f 00000000000005ea [ 20.696972] fc40: ffff80063a5f1a10 0000000000000000 000000000000000f ffff00000887fbd0 [ 20.704802] fc60: fffffff43a5f1a80 0000000000000000 ffff80063a71fc80 ffff000008880240 [ 20.712632] fc80: ffff80063a71fd90 ffff0000087c7a34 ffff80063afc7180 0000000000000000 [ 20.720462] fca0: 0000ffffcae6fe18 0000000000000014 0000000060000000 0000000000000015 [ 20.728292] fcc0: 0000000000000123 00000000000000ce ffff0000088d2000 ffff80063b1f1900 [ 20.736122] fce0: 0000000000008933 ffff000008e7cb80 ffff80063a71fd80 ffff0000087c50a4 [ 20.743951] fd00: 0000000000008933 ffff000008e7cb80 ffff000008e7cb80 000000100000000e [ 20.751781] fd20: ffff80063a71fe4c 0000ffff00000300 0000000000000123 0000000000000000 [ 20.759611] fd40: 0000000000000000 ffff80063b1f0000 000000000000000e 0000000000000300 [ 20.767441] fd60: 0000000000000000 0000000000000000 0000000000000000 0000000000000000 [ 20.775271] fd80: 0000000000000000 0000000000000000 ffff80063a71fda0 ffff0000087c8c20 [ 20.783100] fda0: 0000000000000000 ffff000008082f30 0000000000000000 0000800637260000 [ 20.790930] fdc0: ffffffffffffffff 0000ffffa0903078 0000000000000000 000000001ea87232 [ 20.798760] fde0: 000000000000000f ffff80063a71fe40 ffff800600000014 ffff000000000001 [ 20.806590] fe00: 0000000000000000 0000000000000000 ffff80063a71fde8 0000000000000000 [ 20.814420] fe20: 0000000000000000 0000000000000000 0000000000000000 0000000000000001 [ 20.822249] fe40: 0000000203000011 0000000000000000 0000000000000000 ffff80063a68aa00 [ 20.830079] fe60: ffff80063a68aa00 0000000000000003 0000000000008933 ffff0000081f1b9c [ 20.837909] fe80: 0000000000000000 ffff000008082f30 0000000000000000 0000800637260000 [ 20.845739] fea0: ffffffffffffffff 0000ffffa07ca81c 0000000060000000 0000000000000015 [ 20.853569] fec0: 0000000000000003 000000001ea87232 000000000000000f 0000000000000000 [ 20.861399] fee0: 0000ffffcae6fe18 0000000000000014 0000000000000300 0000000000000000 [ 20.869228] ff00: 00000000000000ce 0000000000000000 00000000ffffffff 0000000000000000 [ 20.877059] ff20: 0000000000000002 0000ffffcae87ff0 0000ffffa09cfa14 0000ffffa084f588 [ 20.884888] ff40: 0000000000000000 0000ffffa09ba018 0000ffffcae6fb20 000000001ea87010 [ 20.892718] ff60: 0000ffffa09b9000 0000ffffcae6fe30 0000ffffcae6fe18 000000000000000f [ 20.900548] ff80: 0000000000000003 000000001ea87232 0000000000000000 0000000000000000 [ 20.908378] ffa0: 0000000000000000 0000ffffcae6fdc0 0000ffffa09a7824 0000ffffcae6fdc0 [ 20.916208] ffc0: 0000ffffa0903078 0000000060000000 0000000000000003 00000000000000ce [ 20.924038] ffe0: 0000000000000000 0000000000000000 ffffffffffffffff ffffffffffffffff [ 20.931867] Call trace: [ 20.934312] Exception stack(0xffff80063a71f7e0 to 0xffff80063a71f910) [ 20.940750] f7e0: 0000000000000000 0001000000000000 ffff80063a71f9b0 ffff00000839c4c0 [ 20.948580] f800: ffff80063a71f840 ffff00000888a6e4 ffff80063a24c418 ffff80063a24c448 [ 20.956410] f820: 0000000000000000 ffff00000811cd54 ffff80063a71f860 ffff80063a24c458 [ 20.964240] f840: ffff80063a71f870 ffff00000888b258 ffff80063a24c418 0000000000000001 [ 20.972070] f860: ffff80063a71f910 ffff80063a7b7028 ffff80063a71f890 ffff0000088825e4 [ 20.979899] f880: 0000000000000000 00000000bafff000 000000067abe2adc 0000000000000000 [ 20.987729] f8a0: 0000000000000001 0000000000000000 ffff000008ed50d0 0000000000000000 [ 20.995560] f8c0: 0000000000000000 0000000000000000 ffff80063abe2adc ffff000008096360 [ 21.003390] f8e0: 000000000063abe2 0000ffffcae87ff0 0000ffffa09cfa14 0000ffffa084f588 [ 21.011219] f900: ffff0000087c8b70 0000ffffa09ba018 [ 21.016097] [<ffff00000839c4c0>] swiotlb_tbl_map_single+0x178/0x2ec [ 21.022362] [<ffff00000839c680>] map_single+0x4c/0x98 [ 21.027411] [<ffff00000839cd10>] swiotlb_map_page+0xa4/0x138 [ 21.033072] [<ffff000008096380>] __swiotlb_map_page+0x20/0x7c [ 21.038821] [<ffff00000864f770>] ravb_start_xmit+0x174/0x668 [ 21.044484] [<ffff0000087e6498>] dev_hard_start_xmit+0x8c/0x120 [ 21.050407] [<ffff000008807510>] sch_direct_xmit+0x108/0x1a0 [ 21.056064] [<ffff0000087e67dc>] __dev_queue_xmit+0x194/0x4cc [ 21.061807] [<ffff0000087e6b24>] dev_queue_xmit+0x10/0x18 [ 21.067214] [<ffff000008880240>] packet_sendmsg+0xf40/0x1220 [ 21.072873] [<ffff0000087c7a34>] sock_sendmsg+0x18/0x2c [ 21.078097] [<ffff0000087c8c20>] SyS_sendto+0xb0/0xf0 [ 21.083150] [<ffff000008082f30>] el0_svc_naked+0x24/0x28 [ 21.088462] Code: d34bfef7 2a1803f3 1a9f86d6 35fff878 (d4210000) [ 21.094611] ---[ end trace 5bc544ad491f3814 ]--- [ 21.099234] Kernel panic - not syncing: Fatal exception in interrupt [ 21.105587] Kernel Offset: disabled [ 21.109073] Memory Limit: none [ 21.112126] ---[ end Kernel panic - not syncing: Fatal exception in interrupt Fixes: 2f45d1902acf ("ravb: minimize TX data copying") Signed-off-by: Masaru Nagai <masaru.nagai.vx@renesas.com Signed-off-by: Simon Horman <horms+renesas@verge.net.au> Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-01-16 10:45:21 +00:00
memcpy(buffer, skb->data, len);
dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
DMA_TO_DEVICE);
if (dma_mapping_error(ndev->dev.parent, dma_addr))
goto drop;
desc = &priv->tx_ring[q][entry];
desc->ds_tagl = cpu_to_le16(len);
desc->dptr = cpu_to_le32(dma_addr);
buffer = skb->data + len;
len = skb->len - len;
dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
DMA_TO_DEVICE);
if (dma_mapping_error(ndev->dev.parent, dma_addr))
goto unmap;
desc++;
} else {
desc = &priv->tx_ring[q][entry];
len = skb->len;
dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
DMA_TO_DEVICE);
if (dma_mapping_error(ndev->dev.parent, dma_addr))
goto drop;
}
desc->ds_tagl = cpu_to_le16(len);
desc->dptr = cpu_to_le32(dma_addr);
/* TX timestamp required */
if (info->gptp || info->ccc_gac) {
if (q == RAVB_NC) {
ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
if (!ts_skb) {
if (num_tx_desc > 1) {
desc--;
dma_unmap_single(ndev->dev.parent, dma_addr,
len, DMA_TO_DEVICE);
}
goto unmap;
}
ts_skb->skb = skb_get(skb);
ts_skb->tag = priv->ts_skb_tag++;
priv->ts_skb_tag &= 0x3ff;
list_add_tail(&ts_skb->list, &priv->ts_skb_list);
/* TAG and timestamp required flag */
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
}
skb_tx_timestamp(skb);
}
/* Descriptor type must be set after all the above writes */
dma_wmb();
if (num_tx_desc > 1) {
desc->die_dt = DT_FEND;
desc--;
desc->die_dt = DT_FSTART;
} else {
desc->die_dt = DT_FSINGLE;
}
ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
priv->cur_tx[q] += num_tx_desc;
if (priv->cur_tx[q] - priv->dirty_tx[q] >
(priv->num_tx_ring[q] - 1) * num_tx_desc &&
!ravb_tx_free(ndev, q, true))
netif_stop_subqueue(ndev, q);
exit:
spin_unlock_irqrestore(&priv->lock, flags);
return NETDEV_TX_OK;
unmap:
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
drop:
dev_kfree_skb_any(skb);
priv->tx_skb[q][entry / num_tx_desc] = NULL;
goto exit;
}
static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
struct net_device *sb_dev)
{
/* If skb needs TX timestamp, it is handled in network control queue */
return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
RAVB_BE;
}
static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
struct net_device_stats *nstats, *stats0, *stats1;
struct device *dev = &priv->pdev->dev;
nstats = &ndev->stats;
pm_runtime_get_noresume(dev);
if (!pm_runtime_active(dev))
goto out_rpm_put;
stats0 = &priv->stats[RAVB_BE];
if (info->tx_counters) {
nstats->tx_dropped += ravb_read(ndev, TROCR);
ravb_write(ndev, 0, TROCR); /* (write clear) */
}
if (info->carrier_counters) {
nstats->collisions += ravb_read(ndev, CXR41);
ravb_write(ndev, 0, CXR41); /* (write clear) */
nstats->tx_carrier_errors += ravb_read(ndev, CXR42);
ravb_write(ndev, 0, CXR42); /* (write clear) */
}
nstats->rx_packets = stats0->rx_packets;
nstats->tx_packets = stats0->tx_packets;
nstats->rx_bytes = stats0->rx_bytes;
nstats->tx_bytes = stats0->tx_bytes;
nstats->multicast = stats0->multicast;
nstats->rx_errors = stats0->rx_errors;
nstats->rx_crc_errors = stats0->rx_crc_errors;
nstats->rx_frame_errors = stats0->rx_frame_errors;
nstats->rx_length_errors = stats0->rx_length_errors;
nstats->rx_missed_errors = stats0->rx_missed_errors;
nstats->rx_over_errors = stats0->rx_over_errors;
if (info->nc_queues) {
stats1 = &priv->stats[RAVB_NC];
nstats->rx_packets += stats1->rx_packets;
nstats->tx_packets += stats1->tx_packets;
nstats->rx_bytes += stats1->rx_bytes;
nstats->tx_bytes += stats1->tx_bytes;
nstats->multicast += stats1->multicast;
nstats->rx_errors += stats1->rx_errors;
nstats->rx_crc_errors += stats1->rx_crc_errors;
nstats->rx_frame_errors += stats1->rx_frame_errors;
nstats->rx_length_errors += stats1->rx_length_errors;
nstats->rx_missed_errors += stats1->rx_missed_errors;
nstats->rx_over_errors += stats1->rx_over_errors;
}
out_rpm_put:
pm_runtime_put_noidle(dev);
return nstats;
}
/* Update promiscuous bit */
static void ravb_set_rx_mode(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
ravb_modify(ndev, ECMR, ECMR_PRM,
ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
spin_unlock_irqrestore(&priv->lock, flags);
}
/* Device close function for Ethernet AVB */
static int ravb_close(struct net_device *ndev)
{
struct device_node *np = ndev->dev.parent->of_node;
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
struct ravb_tstamp_skb *ts_skb, *ts_skb2;
struct device *dev = &priv->pdev->dev;
int error;
netif_tx_stop_all_queues(ndev);
/* Disable interrupts by clearing the interrupt masks. */
ravb_write(ndev, 0, RIC0);
ravb_write(ndev, 0, RIC2);
ravb_write(ndev, 0, TIC);
/* PHY disconnect */
if (ndev->phydev) {
phy_stop(ndev->phydev);
phy_disconnect(ndev->phydev);
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
}
/* Stop PTP Clock driver */
if (info->gptp || info->ccc_gac)
ravb_ptp_stop(ndev);
/* Set the config mode to stop the AVB-DMAC's processes */
if (ravb_stop_dma(ndev) < 0)
netdev_err(ndev,
"device will be stopped after h/w processes are done.\n");
/* Clear the timestamp list */
if (info->gptp || info->ccc_gac) {
list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
list_del(&ts_skb->list);
kfree_skb(ts_skb->skb);
kfree(ts_skb);
}
}
cancel_work_sync(&priv->work);
if (info->nc_queues)
napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]);
/* Free all the skb's in the RX queue and the DMA buffers. */
ravb_ring_free(ndev, RAVB_BE);
if (info->nc_queues)
ravb_ring_free(ndev, RAVB_NC);
/* Update statistics. */
ravb_get_stats(ndev);
/* Set reset mode. */
error = ravb_set_opmode(ndev, CCC_OPC_RESET);
if (error)
return error;
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
}
static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
{
struct ravb_private *priv = netdev_priv(ndev);
struct hwtstamp_config config;
config.flags = 0;
config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
HWTSTAMP_TX_OFF;
switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) {
case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT:
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
break;
case RAVB_RXTSTAMP_TYPE_ALL:
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
config.rx_filter = HWTSTAMP_FILTER_NONE;
}
return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
/* Control hardware time stamping */
static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
{
struct ravb_private *priv = netdev_priv(ndev);
struct hwtstamp_config config;
u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
u32 tstamp_tx_ctrl;
if (copy_from_user(&config, req->ifr_data, sizeof(config)))
return -EFAULT;
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
tstamp_tx_ctrl = 0;
break;
case HWTSTAMP_TX_ON:
tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
break;
default:
return -ERANGE;
}
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
tstamp_rx_ctrl = 0;
break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
break;
default:
config.rx_filter = HWTSTAMP_FILTER_ALL;
tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
}
priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
/* ioctl to device function */
static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
{
struct phy_device *phydev = ndev->phydev;
if (!netif_running(ndev))
return -EINVAL;
if (!phydev)
return -ENODEV;
switch (cmd) {
case SIOCGHWTSTAMP:
return ravb_hwtstamp_get(ndev, req);
case SIOCSHWTSTAMP:
return ravb_hwtstamp_set(ndev, req);
}
return phy_mii_ioctl(phydev, req, cmd);
}
static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
{
struct ravb_private *priv = netdev_priv(ndev);
ndev->mtu = new_mtu;
if (netif_running(ndev)) {
synchronize_irq(priv->emac_irq);
ravb_emac_init(ndev);
}
netdev_update_features(ndev);
return 0;
}
ravb: RX checksum offload Add support for RX checksum offload. This is enabled by default and may be disabled and re-enabled using ethtool: # ethtool -K eth0 rx off # ethtool -K eth0 rx on The RAVB provides a simple checksumming scheme which appears to be completely compatible with CHECKSUM_COMPLETE: sum of all packet data after the L2 header is appended to packet data; this may be trivially read by the driver and used to update the skb accordingly. In terms of performance throughput is close to gigabit line-rate both with and without RX checksum offload enabled. Perf output, however, appears to indicate that significantly less time is spent in do_csum(). This is as expected. Test results with RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 937.54 Summary of output of perf report: 18.28% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.34% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 9.83% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 7.89% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 4.01% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 3.37% netperf [kernel.kallsyms] [k] __arch_copy_to_user 3.17% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.55% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 2.04% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.03% swapper [kernel.kallsyms] [k] _raw_spin_unlock_irq 1.96% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.59% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Test results without RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 940.20 Summary of output of perf report: 17.10% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.99% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 8.87% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 8.16% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 7.42% ksoftirqd/0 [kernel.kallsyms] [k] do_csum 3.91% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 2.31% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.16% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.14% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.93% netperf [kernel.kallsyms] [k] __arch_copy_to_user 1.79% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 1.63% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Above results collected on an R-Car Gen 3 Salvator-X/r8a7796 ES1.0. Also tested on a R-Car Gen 3 Salvator-X/r8a7795 ES1.0. By inspection this also appears to be compatible with the ravb found on R-Car Gen 2 SoCs, however, this patch is currently untested on such hardware. Signed-off-by: Simon Horman <horms+renesas@verge.net.au> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-04 07:54:27 +00:00
static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
/* Disable TX and RX */
ravb_rcv_snd_disable(ndev);
/* Modify RX Checksum setting */
ravb_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0);
/* Enable TX and RX */
ravb_rcv_snd_enable(ndev);
spin_unlock_irqrestore(&priv->lock, flags);
}
static int ravb_endisable_csum_gbeth(struct net_device *ndev, enum ravb_reg reg,
u32 val, u32 mask)
{
u32 csr0 = CSR0_TPE | CSR0_RPE;
int ret;
ravb_write(ndev, csr0 & ~mask, CSR0);
ret = ravb_wait(ndev, CSR0, mask, 0);
if (!ret)
ravb_write(ndev, val, reg);
ravb_write(ndev, csr0, CSR0);
return ret;
}
static int ravb_set_features_gbeth(struct net_device *ndev,
netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
struct ravb_private *priv = netdev_priv(ndev);
unsigned long flags;
int ret = 0;
u32 val;
spin_lock_irqsave(&priv->lock, flags);
if (changed & NETIF_F_RXCSUM) {
if (features & NETIF_F_RXCSUM)
val = CSR2_RIP4 | CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4;
else
val = 0;
ret = ravb_endisable_csum_gbeth(ndev, CSR2, val, CSR0_RPE);
if (ret)
goto done;
}
if (changed & NETIF_F_HW_CSUM) {
if (features & NETIF_F_HW_CSUM)
val = CSR1_TIP4 | CSR1_TTCP4 | CSR1_TUDP4;
else
val = 0;
ret = ravb_endisable_csum_gbeth(ndev, CSR1, val, CSR0_TPE);
if (ret)
goto done;
}
done:
spin_unlock_irqrestore(&priv->lock, flags);
return ret;
}
static int ravb_set_features_rcar(struct net_device *ndev,
netdev_features_t features)
ravb: RX checksum offload Add support for RX checksum offload. This is enabled by default and may be disabled and re-enabled using ethtool: # ethtool -K eth0 rx off # ethtool -K eth0 rx on The RAVB provides a simple checksumming scheme which appears to be completely compatible with CHECKSUM_COMPLETE: sum of all packet data after the L2 header is appended to packet data; this may be trivially read by the driver and used to update the skb accordingly. In terms of performance throughput is close to gigabit line-rate both with and without RX checksum offload enabled. Perf output, however, appears to indicate that significantly less time is spent in do_csum(). This is as expected. Test results with RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 937.54 Summary of output of perf report: 18.28% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.34% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 9.83% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 7.89% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 4.01% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 3.37% netperf [kernel.kallsyms] [k] __arch_copy_to_user 3.17% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.55% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 2.04% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.03% swapper [kernel.kallsyms] [k] _raw_spin_unlock_irq 1.96% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.59% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Test results without RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 940.20 Summary of output of perf report: 17.10% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.99% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 8.87% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 8.16% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 7.42% ksoftirqd/0 [kernel.kallsyms] [k] do_csum 3.91% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 2.31% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.16% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.14% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.93% netperf [kernel.kallsyms] [k] __arch_copy_to_user 1.79% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 1.63% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Above results collected on an R-Car Gen 3 Salvator-X/r8a7796 ES1.0. Also tested on a R-Car Gen 3 Salvator-X/r8a7795 ES1.0. By inspection this also appears to be compatible with the ravb found on R-Car Gen 2 SoCs, however, this patch is currently untested on such hardware. Signed-off-by: Simon Horman <horms+renesas@verge.net.au> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-04 07:54:27 +00:00
{
netdev_features_t changed = ndev->features ^ features;
if (changed & NETIF_F_RXCSUM)
ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
return 0;
}
static int ravb_set_features(struct net_device *ndev,
netdev_features_t features)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
struct device *dev = &priv->pdev->dev;
int ret;
pm_runtime_get_noresume(dev);
if (pm_runtime_active(dev))
ret = info->set_feature(ndev, features);
else
ret = 0;
pm_runtime_put_noidle(dev);
if (ret)
return ret;
ndev->features = features;
return 0;
}
static const struct net_device_ops ravb_netdev_ops = {
.ndo_open = ravb_open,
.ndo_stop = ravb_close,
.ndo_start_xmit = ravb_start_xmit,
.ndo_select_queue = ravb_select_queue,
.ndo_get_stats = ravb_get_stats,
.ndo_set_rx_mode = ravb_set_rx_mode,
.ndo_tx_timeout = ravb_tx_timeout,
.ndo_eth_ioctl = ravb_do_ioctl,
.ndo_change_mtu = ravb_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
ravb: RX checksum offload Add support for RX checksum offload. This is enabled by default and may be disabled and re-enabled using ethtool: # ethtool -K eth0 rx off # ethtool -K eth0 rx on The RAVB provides a simple checksumming scheme which appears to be completely compatible with CHECKSUM_COMPLETE: sum of all packet data after the L2 header is appended to packet data; this may be trivially read by the driver and used to update the skb accordingly. In terms of performance throughput is close to gigabit line-rate both with and without RX checksum offload enabled. Perf output, however, appears to indicate that significantly less time is spent in do_csum(). This is as expected. Test results with RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 937.54 Summary of output of perf report: 18.28% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.34% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 9.83% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 7.89% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 4.01% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 3.37% netperf [kernel.kallsyms] [k] __arch_copy_to_user 3.17% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.55% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 2.04% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.03% swapper [kernel.kallsyms] [k] _raw_spin_unlock_irq 1.96% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.59% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Test results without RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 940.20 Summary of output of perf report: 17.10% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.99% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 8.87% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 8.16% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 7.42% ksoftirqd/0 [kernel.kallsyms] [k] do_csum 3.91% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 2.31% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.16% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.14% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.93% netperf [kernel.kallsyms] [k] __arch_copy_to_user 1.79% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 1.63% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Above results collected on an R-Car Gen 3 Salvator-X/r8a7796 ES1.0. Also tested on a R-Car Gen 3 Salvator-X/r8a7795 ES1.0. By inspection this also appears to be compatible with the ravb found on R-Car Gen 2 SoCs, however, this patch is currently untested on such hardware. Signed-off-by: Simon Horman <horms+renesas@verge.net.au> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-04 07:54:27 +00:00
.ndo_set_features = ravb_set_features,
};
Revert "ravb: Fixed to be able to unload modules" This reverts commit 1838d6c62f57836639bd3d83e7855e0ee4f6defc. This commit moved the ravb_mdio_init() call (and thus the of_mdiobus_register() call) from the ravb_probe() to the ravb_open() call. This causes a regression during system resume (s2idle/s2ram), as new PHY devices cannot be bound while suspended. During boot, the Micrel PHY is detected like this: Micrel KSZ9031 Gigabit PHY e6800000.ethernet-ffffffff:00: attached PHY driver [Micrel KSZ9031 Gigabit PHY] (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=228) ravb e6800000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off During system suspend, (A) defer_all_probes is set to true, and (B) usermodehelper_disabled is set to UMH_DISABLED, to avoid drivers being probed while suspended. A. If CONFIG_MODULES=n, phy_device_register() calling device_add() merely adds the device, but does not probe it yet, as really_probe() returns early due to defer_all_probes being set: dpm_resume+0x128/0x4f8 device_resume+0xcc/0x1b0 dpm_run_callback+0x74/0x340 ravb_resume+0x190/0x1b8 ravb_open+0x84/0x770 of_mdiobus_register+0x1e0/0x468 of_mdiobus_register_phy+0x1b8/0x250 of_mdiobus_phy_device_register+0x178/0x1e8 phy_device_register+0x114/0x1b8 device_add+0x3d4/0x798 bus_probe_device+0x98/0xa0 device_initial_probe+0x10/0x18 __device_attach+0xe4/0x140 bus_for_each_drv+0x64/0xc8 __device_attach_driver+0xb8/0xe0 driver_probe_device.part.11+0xc4/0xd8 really_probe+0x32c/0x3b8 Later, phy_attach_direct() notices no PHY driver has been bound, and falls back to the Generic PHY, leading to degraded operation: Generic PHY e6800000.ethernet-ffffffff:00: attached PHY driver [Generic PHY] (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=POLL) ravb e6800000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off B. If CONFIG_MODULES=y, request_module() returns early with -EBUSY due to UMH_DISABLED, and MDIO initialization fails completely: mdio_bus e6800000.ethernet-ffffffff:00: error -16 loading PHY driver module for ID 0x00221622 ravb e6800000.ethernet eth0: failed to initialize MDIO PM: dpm_run_callback(): ravb_resume+0x0/0x1b8 returns -16 PM: Device e6800000.ethernet failed to resume: error -16 Ignoring -EBUSY in phy_request_driver_module(), like was done for -ENOENT in commit 21e194425abd65b5 ("net: phy: fix issue with loading PHY driver w/o initramfs"), would makes it fall back to the Generic PHY, like in the CONFIG_MODULES=n case. Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Cc: stable@vger.kernel.org Reviewed-by: Sergei Shtylyov <sergei.shtylyov@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:29:31 +00:00
/* MDIO bus init function */
static int ravb_mdio_init(struct ravb_private *priv)
{
struct platform_device *pdev = priv->pdev;
struct device *dev = &pdev->dev;
struct phy_device *phydev;
struct device_node *pn;
Revert "ravb: Fixed to be able to unload modules" This reverts commit 1838d6c62f57836639bd3d83e7855e0ee4f6defc. This commit moved the ravb_mdio_init() call (and thus the of_mdiobus_register() call) from the ravb_probe() to the ravb_open() call. This causes a regression during system resume (s2idle/s2ram), as new PHY devices cannot be bound while suspended. During boot, the Micrel PHY is detected like this: Micrel KSZ9031 Gigabit PHY e6800000.ethernet-ffffffff:00: attached PHY driver [Micrel KSZ9031 Gigabit PHY] (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=228) ravb e6800000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off During system suspend, (A) defer_all_probes is set to true, and (B) usermodehelper_disabled is set to UMH_DISABLED, to avoid drivers being probed while suspended. A. If CONFIG_MODULES=n, phy_device_register() calling device_add() merely adds the device, but does not probe it yet, as really_probe() returns early due to defer_all_probes being set: dpm_resume+0x128/0x4f8 device_resume+0xcc/0x1b0 dpm_run_callback+0x74/0x340 ravb_resume+0x190/0x1b8 ravb_open+0x84/0x770 of_mdiobus_register+0x1e0/0x468 of_mdiobus_register_phy+0x1b8/0x250 of_mdiobus_phy_device_register+0x178/0x1e8 phy_device_register+0x114/0x1b8 device_add+0x3d4/0x798 bus_probe_device+0x98/0xa0 device_initial_probe+0x10/0x18 __device_attach+0xe4/0x140 bus_for_each_drv+0x64/0xc8 __device_attach_driver+0xb8/0xe0 driver_probe_device.part.11+0xc4/0xd8 really_probe+0x32c/0x3b8 Later, phy_attach_direct() notices no PHY driver has been bound, and falls back to the Generic PHY, leading to degraded operation: Generic PHY e6800000.ethernet-ffffffff:00: attached PHY driver [Generic PHY] (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=POLL) ravb e6800000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off B. If CONFIG_MODULES=y, request_module() returns early with -EBUSY due to UMH_DISABLED, and MDIO initialization fails completely: mdio_bus e6800000.ethernet-ffffffff:00: error -16 loading PHY driver module for ID 0x00221622 ravb e6800000.ethernet eth0: failed to initialize MDIO PM: dpm_run_callback(): ravb_resume+0x0/0x1b8 returns -16 PM: Device e6800000.ethernet failed to resume: error -16 Ignoring -EBUSY in phy_request_driver_module(), like was done for -ENOENT in commit 21e194425abd65b5 ("net: phy: fix issue with loading PHY driver w/o initramfs"), would makes it fall back to the Generic PHY, like in the CONFIG_MODULES=n case. Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Cc: stable@vger.kernel.org Reviewed-by: Sergei Shtylyov <sergei.shtylyov@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:29:31 +00:00
int error;
/* Bitbang init */
priv->mdiobb.ops = &bb_ops;
/* MII controller setting */
priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
if (!priv->mii_bus)
return -ENOMEM;
/* Hook up MII support for ethtool */
priv->mii_bus->name = "ravb_mii";
priv->mii_bus->parent = dev;
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
pdev->name, pdev->id);
/* Register MDIO bus */
error = of_mdiobus_register(priv->mii_bus, dev->of_node);
if (error)
goto out_free_bus;
pn = of_parse_phandle(dev->of_node, "phy-handle", 0);
phydev = of_phy_find_device(pn);
if (phydev) {
phydev->mac_managed_pm = true;
put_device(&phydev->mdio.dev);
}
of_node_put(pn);
Revert "ravb: Fixed to be able to unload modules" This reverts commit 1838d6c62f57836639bd3d83e7855e0ee4f6defc. This commit moved the ravb_mdio_init() call (and thus the of_mdiobus_register() call) from the ravb_probe() to the ravb_open() call. This causes a regression during system resume (s2idle/s2ram), as new PHY devices cannot be bound while suspended. During boot, the Micrel PHY is detected like this: Micrel KSZ9031 Gigabit PHY e6800000.ethernet-ffffffff:00: attached PHY driver [Micrel KSZ9031 Gigabit PHY] (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=228) ravb e6800000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off During system suspend, (A) defer_all_probes is set to true, and (B) usermodehelper_disabled is set to UMH_DISABLED, to avoid drivers being probed while suspended. A. If CONFIG_MODULES=n, phy_device_register() calling device_add() merely adds the device, but does not probe it yet, as really_probe() returns early due to defer_all_probes being set: dpm_resume+0x128/0x4f8 device_resume+0xcc/0x1b0 dpm_run_callback+0x74/0x340 ravb_resume+0x190/0x1b8 ravb_open+0x84/0x770 of_mdiobus_register+0x1e0/0x468 of_mdiobus_register_phy+0x1b8/0x250 of_mdiobus_phy_device_register+0x178/0x1e8 phy_device_register+0x114/0x1b8 device_add+0x3d4/0x798 bus_probe_device+0x98/0xa0 device_initial_probe+0x10/0x18 __device_attach+0xe4/0x140 bus_for_each_drv+0x64/0xc8 __device_attach_driver+0xb8/0xe0 driver_probe_device.part.11+0xc4/0xd8 really_probe+0x32c/0x3b8 Later, phy_attach_direct() notices no PHY driver has been bound, and falls back to the Generic PHY, leading to degraded operation: Generic PHY e6800000.ethernet-ffffffff:00: attached PHY driver [Generic PHY] (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=POLL) ravb e6800000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off B. If CONFIG_MODULES=y, request_module() returns early with -EBUSY due to UMH_DISABLED, and MDIO initialization fails completely: mdio_bus e6800000.ethernet-ffffffff:00: error -16 loading PHY driver module for ID 0x00221622 ravb e6800000.ethernet eth0: failed to initialize MDIO PM: dpm_run_callback(): ravb_resume+0x0/0x1b8 returns -16 PM: Device e6800000.ethernet failed to resume: error -16 Ignoring -EBUSY in phy_request_driver_module(), like was done for -ENOENT in commit 21e194425abd65b5 ("net: phy: fix issue with loading PHY driver w/o initramfs"), would makes it fall back to the Generic PHY, like in the CONFIG_MODULES=n case. Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Cc: stable@vger.kernel.org Reviewed-by: Sergei Shtylyov <sergei.shtylyov@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:29:31 +00:00
return 0;
out_free_bus:
free_mdio_bitbang(priv->mii_bus);
return error;
}
/* MDIO bus release function */
static int ravb_mdio_release(struct ravb_private *priv)
{
/* Unregister mdio bus */
mdiobus_unregister(priv->mii_bus);
/* Free bitbang info */
free_mdio_bitbang(priv->mii_bus);
return 0;
}
static const struct ravb_hw_info ravb_gen3_hw_info = {
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
.set_feature = ravb_set_features_rcar,
.dmac_init = ravb_dmac_init_rcar,
.emac_init = ravb_emac_init_rcar,
.gstrings_stats = ravb_gstrings_stats,
.gstrings_size = sizeof(ravb_gstrings_stats),
.net_hw_features = NETIF_F_RXCSUM,
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
.rx_max_frame_size = SZ_2K,
.rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
.internal_delay = 1,
.tx_counters = 1,
.multi_irqs = 1,
.irq_en_dis = 1,
.ccc_gac = 1,
.nc_queues = 1,
.magic_pkt = 1,
};
static const struct ravb_hw_info ravb_gen2_hw_info = {
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
.set_feature = ravb_set_features_rcar,
.dmac_init = ravb_dmac_init_rcar,
.emac_init = ravb_emac_init_rcar,
.gstrings_stats = ravb_gstrings_stats,
.gstrings_size = sizeof(ravb_gstrings_stats),
.net_hw_features = NETIF_F_RXCSUM,
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
.rx_max_frame_size = SZ_2K,
.rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
.aligned_tx = 1,
.gptp = 1,
.nc_queues = 1,
.magic_pkt = 1,
};
static const struct ravb_hw_info ravb_rzv2m_hw_info = {
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
.set_feature = ravb_set_features_rcar,
.dmac_init = ravb_dmac_init_rcar,
.emac_init = ravb_emac_init_rcar,
.gstrings_stats = ravb_gstrings_stats,
.gstrings_size = sizeof(ravb_gstrings_stats),
.net_hw_features = NETIF_F_RXCSUM,
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
.rx_max_frame_size = SZ_2K,
.rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
.multi_irqs = 1,
.err_mgmt_irqs = 1,
.gptp = 1,
.gptp_ref_clk = 1,
.nc_queues = 1,
.magic_pkt = 1,
};
static const struct ravb_hw_info gbeth_hw_info = {
.receive = ravb_rx_gbeth,
.set_rate = ravb_set_rate_gbeth,
.set_feature = ravb_set_features_gbeth,
.dmac_init = ravb_dmac_init_gbeth,
.emac_init = ravb_emac_init_gbeth,
.gstrings_stats = ravb_gstrings_stats_gbeth,
.gstrings_size = sizeof(ravb_gstrings_stats_gbeth),
.net_hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
.net_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
.tccr_mask = TCCR_TSRQ0,
.rx_max_frame_size = SZ_8K,
.rx_max_desc_use = 4080,
.rx_desc_size = sizeof(struct ravb_rx_desc),
.aligned_tx = 1,
.tx_counters = 1,
.carrier_counters = 1,
.half_duplex = 1,
};
static const struct of_device_id ravb_match_table[] = {
{ .compatible = "renesas,etheravb-r8a7790", .data = &ravb_gen2_hw_info },
{ .compatible = "renesas,etheravb-r8a7794", .data = &ravb_gen2_hw_info },
{ .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
{ .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
{ .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
{ .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen3_hw_info },
{ .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info },
{ .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
{ }
};
MODULE_DEVICE_TABLE(of, ravb_match_table);
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
static int ravb_setup_irq(struct ravb_private *priv, const char *irq_name,
const char *ch, int *irq, irq_handler_t handler)
{
struct platform_device *pdev = priv->pdev;
struct net_device *ndev = priv->ndev;
struct device *dev = &pdev->dev;
const char *dev_name;
unsigned long flags;
int error, irq_num;
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
if (irq_name) {
dev_name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
if (!dev_name)
return -ENOMEM;
irq_num = platform_get_irq_byname(pdev, irq_name);
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
flags = 0;
} else {
dev_name = ndev->name;
irq_num = platform_get_irq(pdev, 0);
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
flags = IRQF_SHARED;
}
if (irq_num < 0)
return irq_num;
if (irq)
*irq = irq_num;
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
error = devm_request_irq(dev, irq_num, handler, flags, dev_name, ndev);
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
if (error)
netdev_err(ndev, "cannot request IRQ %s\n", dev_name);
return error;
}
static int ravb_setup_irqs(struct ravb_private *priv)
{
const struct ravb_hw_info *info = priv->info;
struct net_device *ndev = priv->ndev;
const char *irq_name, *emac_irq_name;
int error;
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
if (!info->multi_irqs)
return ravb_setup_irq(priv, NULL, NULL, &ndev->irq, ravb_interrupt);
if (info->err_mgmt_irqs) {
irq_name = "dia";
emac_irq_name = "line3";
} else {
irq_name = "ch22";
emac_irq_name = "ch24";
}
error = ravb_setup_irq(priv, irq_name, "ch22:multi", &ndev->irq, ravb_multi_interrupt);
if (error)
return error;
error = ravb_setup_irq(priv, emac_irq_name, "ch24:emac", &priv->emac_irq,
ravb_emac_interrupt);
if (error)
return error;
if (info->err_mgmt_irqs) {
error = ravb_setup_irq(priv, "err_a", "err_a", NULL, ravb_multi_interrupt);
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
if (error)
return error;
error = ravb_setup_irq(priv, "mgmt_a", "mgmt_a", NULL, ravb_multi_interrupt);
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
if (error)
return error;
}
error = ravb_setup_irq(priv, "ch0", "ch0:rx_be", NULL, ravb_be_interrupt);
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
if (error)
return error;
error = ravb_setup_irq(priv, "ch1", "ch1:rx_nc", NULL, ravb_nc_interrupt);
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
if (error)
return error;
error = ravb_setup_irq(priv, "ch18", "ch18:tx_be", NULL, ravb_be_interrupt);
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
if (error)
return error;
return ravb_setup_irq(priv, "ch19", "ch19:tx_nc", NULL, ravb_nc_interrupt);
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
}
static int ravb_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const struct ravb_hw_info *info;
struct reset_control *rstc;
struct ravb_private *priv;
struct net_device *ndev;
struct resource *res;
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
int error, q;
if (!np) {
dev_err(&pdev->dev,
"this driver is required to be instantiated from device tree\n");
return -EINVAL;
}
rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(rstc))
return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
"failed to get cpg reset\n");
ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
NUM_TX_QUEUE, NUM_RX_QUEUE);
if (!ndev)
return -ENOMEM;
info = of_device_get_match_data(&pdev->dev);
ndev->features = info->net_features;
ndev->hw_features = info->net_hw_features;
ravb: RX checksum offload Add support for RX checksum offload. This is enabled by default and may be disabled and re-enabled using ethtool: # ethtool -K eth0 rx off # ethtool -K eth0 rx on The RAVB provides a simple checksumming scheme which appears to be completely compatible with CHECKSUM_COMPLETE: sum of all packet data after the L2 header is appended to packet data; this may be trivially read by the driver and used to update the skb accordingly. In terms of performance throughput is close to gigabit line-rate both with and without RX checksum offload enabled. Perf output, however, appears to indicate that significantly less time is spent in do_csum(). This is as expected. Test results with RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 937.54 Summary of output of perf report: 18.28% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.34% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 9.83% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 7.89% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 4.01% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 3.37% netperf [kernel.kallsyms] [k] __arch_copy_to_user 3.17% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.55% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 2.04% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.03% swapper [kernel.kallsyms] [k] _raw_spin_unlock_irq 1.96% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.59% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Test results without RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 940.20 Summary of output of perf report: 17.10% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.99% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 8.87% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 8.16% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 7.42% ksoftirqd/0 [kernel.kallsyms] [k] do_csum 3.91% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 2.31% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.16% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.14% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.93% netperf [kernel.kallsyms] [k] __arch_copy_to_user 1.79% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 1.63% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Above results collected on an R-Car Gen 3 Salvator-X/r8a7796 ES1.0. Also tested on a R-Car Gen 3 Salvator-X/r8a7795 ES1.0. By inspection this also appears to be compatible with the ravb found on R-Car Gen 2 SoCs, however, this patch is currently untested on such hardware. Signed-off-by: Simon Horman <horms+renesas@verge.net.au> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-04 07:54:27 +00:00
error = reset_control_deassert(rstc);
if (error)
goto out_free_netdev;
SET_NETDEV_DEV(ndev, &pdev->dev);
priv = netdev_priv(ndev);
priv->info = info;
priv->rstc = rstc;
priv->ndev = ndev;
priv->pdev = pdev;
priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
if (info->nc_queues) {
priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
}
net: ravb: Move getting/requesting IRQs in the probe() method The runtime PM implementation will disable clocks at the end of ravb_probe(). As some IP variants switch to reset mode as a result of setting module standby through clock disable APIs, to implement runtime PM the resource parsing and requesting are moved in the probe function and IP settings are moved in the open function. This is done because at the end of the probe some IP variants will switch anyway to reset mode and the registers content is lost. Also keeping only register settings operations in the ravb_open()/ravb_close() functions will make them faster. Commit moves IRQ requests to ravb_probe() to have all the IRQs ready when the interface is open. As now getting/requesting IRQs is done in a single place there is no need to keep intermediary data (like ravb_rx_irqs[] and ravb_tx_irqs[] arrays or IRQs in struct ravb_private). In order to avoid accessing the IP registers while the IP is runtime suspended (e.g. in the timeframe b/w the probe requests shared IRQs and IP clocks are enabled) in the interrupt handlers were introduced pm_runtime_active() checks. The device runtime PM usage counter has been incremented to avoid disabling the device's clocks while the check is in progress (if any). This is a preparatory change to add runtime PM support for all IP variants. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2024-02-02 08:41:29 +00:00
error = ravb_setup_irqs(priv);
if (error)
goto out_reset_assert;
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
error = PTR_ERR(priv->clk);
goto out_reset_assert;
}
if (info->gptp_ref_clk) {
priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp");
if (IS_ERR(priv->gptp_clk)) {
error = PTR_ERR(priv->gptp_clk);
goto out_reset_assert;
}
}
priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk");
if (IS_ERR(priv->refclk)) {
error = PTR_ERR(priv->refclk);
goto out_reset_assert;
}
clk_prepare(priv->refclk);
platform_set_drvdata(pdev, ndev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 100);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
error = pm_runtime_resume_and_get(&pdev->dev);
if (error < 0)
goto out_rpm_disable;
priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->addr)) {
error = PTR_ERR(priv->addr);
goto out_rpm_put;
}
/* The Ether-specific entries in the device structure. */
ndev->base_addr = res->start;
spin_lock_init(&priv->lock);
INIT_WORK(&priv->work, ravb_tx_timeout_work);
error = of_get_phy_mode(np, &priv->phy_interface);
if (error && error != -ENODEV)
goto out_rpm_put;
priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
priv->avb_link_active_low =
of_property_read_bool(np, "renesas,ether-link-active-low");
ndev->max_mtu = info->rx_max_frame_size -
(ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
ndev->min_mtu = ETH_MIN_MTU;
/* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer
* Use two descriptor to handle such situation. First descriptor to
* handle aligned data buffer and second descriptor to handle the
* overflow data because of alignment.
*/
priv->num_tx_desc = info->aligned_tx ? 2 : 1;
/* Set function */
ndev->netdev_ops = &ravb_netdev_ops;
ndev->ethtool_ops = &ravb_ethtool_ops;
error = ravb_compute_gti(ndev);
if (error)
goto out_rpm_put;
ravb_parse_delay_mode(np, ndev);
/* Allocate descriptor base address table */
priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
&priv->desc_bat_dma, GFP_KERNEL);
if (!priv->desc_bat) {
dev_err(&pdev->dev,
"Cannot allocate desc base address table (size %d bytes)\n",
priv->desc_bat_size);
error = -ENOMEM;
goto out_rpm_put;
}
for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
priv->desc_bat[q].die_dt = DT_EOS;
/* Initialise HW timestamp list */
INIT_LIST_HEAD(&priv->ts_skb_list);
/* Debug message level */
priv->msg_enable = RAVB_DEF_MSG_ENABLE;
/* Set config mode as this is needed for PHY initialization. */
error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
if (error)
goto out_rpm_put;
/* Read and set MAC address */
of: net: pass the dst buffer to of_get_mac_address() of_get_mac_address() returns a "const void*" pointer to a MAC address. Lately, support to fetch the MAC address by an NVMEM provider was added. But this will only work with platform devices. It will not work with PCI devices (e.g. of an integrated root complex) and esp. not with DSA ports. There is an of_* variant of the nvmem binding which works without devices. The returned data of a nvmem_cell_read() has to be freed after use. On the other hand the return of_get_mac_address() points to some static data without a lifetime. The trick for now, was to allocate a device resource managed buffer which is then returned. This will only work if we have an actual device. Change it, so that the caller of of_get_mac_address() has to supply a buffer where the MAC address is written to. Unfortunately, this will touch all drivers which use the of_get_mac_address(). Usually the code looks like: const char *addr; addr = of_get_mac_address(np); if (!IS_ERR(addr)) ether_addr_copy(ndev->dev_addr, addr); This can then be simply rewritten as: of_get_mac_address(np, ndev->dev_addr); Sometimes is_valid_ether_addr() is used to test the MAC address. of_get_mac_address() already makes sure, it just returns a valid MAC address. Thus we can just test its return code. But we have to be careful if there are still other sources for the MAC address before the of_get_mac_address(). In this case we have to keep the is_valid_ether_addr() call. The following coccinelle patch was used to convert common cases to the new style. Afterwards, I've manually gone over the drivers and fixed the return code variable: either used a new one or if one was already available use that. Mansour Moufid, thanks for that coccinelle patch! <spml> @a@ identifier x; expression y, z; @@ - x = of_get_mac_address(y); + x = of_get_mac_address(y, z); <... - ether_addr_copy(z, x); ...> @@ identifier a.x; @@ - if (<+... x ...+>) {} @@ identifier a.x; @@ if (<+... x ...+>) { ... } - else {} @@ identifier a.x; expression e; @@ - if (<+... x ...+>@e) - {} - else + if (!(e)) {...} @@ expression x, y, z; @@ - x = of_get_mac_address(y, z); + of_get_mac_address(y, z); ... when != x </spml> All drivers, except drivers/net/ethernet/aeroflex/greth.c, were compile-time tested. Suggested-by: Andrew Lunn <andrew@lunn.ch> Signed-off-by: Michael Walle <michael@walle.cc> Reviewed-by: Andrew Lunn <andrew@lunn.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-12 17:47:17 +00:00
ravb_read_mac_address(np, ndev);
if (!is_valid_ether_addr(ndev->dev_addr)) {
dev_warn(&pdev->dev,
"no valid MAC address supplied, using a random one\n");
eth_hw_addr_random(ndev);
}
Revert "ravb: Fixed to be able to unload modules" This reverts commit 1838d6c62f57836639bd3d83e7855e0ee4f6defc. This commit moved the ravb_mdio_init() call (and thus the of_mdiobus_register() call) from the ravb_probe() to the ravb_open() call. This causes a regression during system resume (s2idle/s2ram), as new PHY devices cannot be bound while suspended. During boot, the Micrel PHY is detected like this: Micrel KSZ9031 Gigabit PHY e6800000.ethernet-ffffffff:00: attached PHY driver [Micrel KSZ9031 Gigabit PHY] (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=228) ravb e6800000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off During system suspend, (A) defer_all_probes is set to true, and (B) usermodehelper_disabled is set to UMH_DISABLED, to avoid drivers being probed while suspended. A. If CONFIG_MODULES=n, phy_device_register() calling device_add() merely adds the device, but does not probe it yet, as really_probe() returns early due to defer_all_probes being set: dpm_resume+0x128/0x4f8 device_resume+0xcc/0x1b0 dpm_run_callback+0x74/0x340 ravb_resume+0x190/0x1b8 ravb_open+0x84/0x770 of_mdiobus_register+0x1e0/0x468 of_mdiobus_register_phy+0x1b8/0x250 of_mdiobus_phy_device_register+0x178/0x1e8 phy_device_register+0x114/0x1b8 device_add+0x3d4/0x798 bus_probe_device+0x98/0xa0 device_initial_probe+0x10/0x18 __device_attach+0xe4/0x140 bus_for_each_drv+0x64/0xc8 __device_attach_driver+0xb8/0xe0 driver_probe_device.part.11+0xc4/0xd8 really_probe+0x32c/0x3b8 Later, phy_attach_direct() notices no PHY driver has been bound, and falls back to the Generic PHY, leading to degraded operation: Generic PHY e6800000.ethernet-ffffffff:00: attached PHY driver [Generic PHY] (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=POLL) ravb e6800000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off B. If CONFIG_MODULES=y, request_module() returns early with -EBUSY due to UMH_DISABLED, and MDIO initialization fails completely: mdio_bus e6800000.ethernet-ffffffff:00: error -16 loading PHY driver module for ID 0x00221622 ravb e6800000.ethernet eth0: failed to initialize MDIO PM: dpm_run_callback(): ravb_resume+0x0/0x1b8 returns -16 PM: Device e6800000.ethernet failed to resume: error -16 Ignoring -EBUSY in phy_request_driver_module(), like was done for -ENOENT in commit 21e194425abd65b5 ("net: phy: fix issue with loading PHY driver w/o initramfs"), would makes it fall back to the Generic PHY, like in the CONFIG_MODULES=n case. Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Cc: stable@vger.kernel.org Reviewed-by: Sergei Shtylyov <sergei.shtylyov@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:29:31 +00:00
/* MDIO bus init */
error = ravb_mdio_init(priv);
if (error) {
dev_err(&pdev->dev, "failed to initialize MDIO\n");
goto out_reset_mode;
Revert "ravb: Fixed to be able to unload modules" This reverts commit 1838d6c62f57836639bd3d83e7855e0ee4f6defc. This commit moved the ravb_mdio_init() call (and thus the of_mdiobus_register() call) from the ravb_probe() to the ravb_open() call. This causes a regression during system resume (s2idle/s2ram), as new PHY devices cannot be bound while suspended. During boot, the Micrel PHY is detected like this: Micrel KSZ9031 Gigabit PHY e6800000.ethernet-ffffffff:00: attached PHY driver [Micrel KSZ9031 Gigabit PHY] (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=228) ravb e6800000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off During system suspend, (A) defer_all_probes is set to true, and (B) usermodehelper_disabled is set to UMH_DISABLED, to avoid drivers being probed while suspended. A. If CONFIG_MODULES=n, phy_device_register() calling device_add() merely adds the device, but does not probe it yet, as really_probe() returns early due to defer_all_probes being set: dpm_resume+0x128/0x4f8 device_resume+0xcc/0x1b0 dpm_run_callback+0x74/0x340 ravb_resume+0x190/0x1b8 ravb_open+0x84/0x770 of_mdiobus_register+0x1e0/0x468 of_mdiobus_register_phy+0x1b8/0x250 of_mdiobus_phy_device_register+0x178/0x1e8 phy_device_register+0x114/0x1b8 device_add+0x3d4/0x798 bus_probe_device+0x98/0xa0 device_initial_probe+0x10/0x18 __device_attach+0xe4/0x140 bus_for_each_drv+0x64/0xc8 __device_attach_driver+0xb8/0xe0 driver_probe_device.part.11+0xc4/0xd8 really_probe+0x32c/0x3b8 Later, phy_attach_direct() notices no PHY driver has been bound, and falls back to the Generic PHY, leading to degraded operation: Generic PHY e6800000.ethernet-ffffffff:00: attached PHY driver [Generic PHY] (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=POLL) ravb e6800000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off B. If CONFIG_MODULES=y, request_module() returns early with -EBUSY due to UMH_DISABLED, and MDIO initialization fails completely: mdio_bus e6800000.ethernet-ffffffff:00: error -16 loading PHY driver module for ID 0x00221622 ravb e6800000.ethernet eth0: failed to initialize MDIO PM: dpm_run_callback(): ravb_resume+0x0/0x1b8 returns -16 PM: Device e6800000.ethernet failed to resume: error -16 Ignoring -EBUSY in phy_request_driver_module(), like was done for -ENOENT in commit 21e194425abd65b5 ("net: phy: fix issue with loading PHY driver w/o initramfs"), would makes it fall back to the Generic PHY, like in the CONFIG_MODULES=n case. Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Cc: stable@vger.kernel.org Reviewed-by: Sergei Shtylyov <sergei.shtylyov@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:29:31 +00:00
}
/* Undo previous switch to config opmode. */
error = ravb_set_opmode(ndev, CCC_OPC_RESET);
if (error)
goto out_mdio_release;
netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll);
if (info->nc_queues)
netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll);
/* Network device register */
error = register_netdev(ndev);
if (error)
goto out_napi_del;
device_set_wakeup_capable(&pdev->dev, 1);
/* Print device information */
netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
(u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
out_napi_del:
if (info->nc_queues)
netif_napi_del(&priv->napi[RAVB_NC]);
netif_napi_del(&priv->napi[RAVB_BE]);
out_mdio_release:
Revert "ravb: Fixed to be able to unload modules" This reverts commit 1838d6c62f57836639bd3d83e7855e0ee4f6defc. This commit moved the ravb_mdio_init() call (and thus the of_mdiobus_register() call) from the ravb_probe() to the ravb_open() call. This causes a regression during system resume (s2idle/s2ram), as new PHY devices cannot be bound while suspended. During boot, the Micrel PHY is detected like this: Micrel KSZ9031 Gigabit PHY e6800000.ethernet-ffffffff:00: attached PHY driver [Micrel KSZ9031 Gigabit PHY] (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=228) ravb e6800000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off During system suspend, (A) defer_all_probes is set to true, and (B) usermodehelper_disabled is set to UMH_DISABLED, to avoid drivers being probed while suspended. A. If CONFIG_MODULES=n, phy_device_register() calling device_add() merely adds the device, but does not probe it yet, as really_probe() returns early due to defer_all_probes being set: dpm_resume+0x128/0x4f8 device_resume+0xcc/0x1b0 dpm_run_callback+0x74/0x340 ravb_resume+0x190/0x1b8 ravb_open+0x84/0x770 of_mdiobus_register+0x1e0/0x468 of_mdiobus_register_phy+0x1b8/0x250 of_mdiobus_phy_device_register+0x178/0x1e8 phy_device_register+0x114/0x1b8 device_add+0x3d4/0x798 bus_probe_device+0x98/0xa0 device_initial_probe+0x10/0x18 __device_attach+0xe4/0x140 bus_for_each_drv+0x64/0xc8 __device_attach_driver+0xb8/0xe0 driver_probe_device.part.11+0xc4/0xd8 really_probe+0x32c/0x3b8 Later, phy_attach_direct() notices no PHY driver has been bound, and falls back to the Generic PHY, leading to degraded operation: Generic PHY e6800000.ethernet-ffffffff:00: attached PHY driver [Generic PHY] (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=POLL) ravb e6800000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off B. If CONFIG_MODULES=y, request_module() returns early with -EBUSY due to UMH_DISABLED, and MDIO initialization fails completely: mdio_bus e6800000.ethernet-ffffffff:00: error -16 loading PHY driver module for ID 0x00221622 ravb e6800000.ethernet eth0: failed to initialize MDIO PM: dpm_run_callback(): ravb_resume+0x0/0x1b8 returns -16 PM: Device e6800000.ethernet failed to resume: error -16 Ignoring -EBUSY in phy_request_driver_module(), like was done for -ENOENT in commit 21e194425abd65b5 ("net: phy: fix issue with loading PHY driver w/o initramfs"), would makes it fall back to the Generic PHY, like in the CONFIG_MODULES=n case. Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Cc: stable@vger.kernel.org Reviewed-by: Sergei Shtylyov <sergei.shtylyov@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:29:31 +00:00
ravb_mdio_release(priv);
out_reset_mode:
ravb_set_opmode(ndev, CCC_OPC_RESET);
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
out_rpm_put:
pm_runtime_put(&pdev->dev);
out_rpm_disable:
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
clk_unprepare(priv->refclk);
out_reset_assert:
reset_control_assert(rstc);
out_free_netdev:
free_netdev(ndev);
return error;
}
static void ravb_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
struct device *dev = &priv->pdev->dev;
int error;
error = pm_runtime_resume_and_get(dev);
if (error < 0)
return;
unregister_netdev(ndev);
if (info->nc_queues)
netif_napi_del(&priv->napi[RAVB_NC]);
netif_napi_del(&priv->napi[RAVB_BE]);
net: ravb: Keep reverse order of operations in ravb_remove() On RZ/G3S SMARC Carrier II board having RGMII connections b/w Ethernet MACs and PHYs it has been discovered that doing unbind/bind for ravb driver in a loop leads to wrong speed and duplex for Ethernet links and broken connectivity (the connectivity cannot be restored even with bringing interface down/up). Before doing unbind/bind the Ethernet interfaces were configured though systemd. The sh instructions used to do unbind/bind were: $ cd /sys/bus/platform/drivers/ravb/ $ while :; do echo 11c30000.ethernet > unbind ; \ echo 11c30000.ethernet > bind; done It has been discovered that there is a race b/w IOCTLs initialized by systemd at the response of success binding and the "ravb_write(ndev, CCC_OPC_RESET, CCC)" call in ravb_remove() as follows: 1/ as a result of bind success the user space open/configures the interfaces tough an IOCTL; the following stack trace has been identified on RZ/G3S: Call trace: dump_backtrace+0x9c/0x100 show_stack+0x20/0x38 dump_stack_lvl+0x48/0x60 dump_stack+0x18/0x28 ravb_open+0x70/0xa58 __dev_open+0xf4/0x1e8 __dev_change_flags+0x198/0x218 dev_change_flags+0x2c/0x80 devinet_ioctl+0x640/0x708 inet_ioctl+0x1e4/0x200 sock_do_ioctl+0x50/0x108 sock_ioctl+0x240/0x358 __arm64_sys_ioctl+0xb0/0x100 invoke_syscall+0x50/0x128 el0_svc_common.constprop.0+0xc8/0xf0 do_el0_svc+0x24/0x38 el0_svc+0x34/0xb8 el0t_64_sync_handler+0xc0/0xc8 el0t_64_sync+0x190/0x198 2/ this call may execute concurrently with ravb_remove() as the unbind/bind operation was executed in a loop 3/ if the operation mode is changed to RESET (through ravb_write(ndev, CCC_OPC_RESET, CCC) call in ravb_remove()) while the above ravb_open() is in progress it may lead to MAC (or PHY, or MAC-PHY connection, the right point hasn't been identified at the moment) to be broken, thus the Ethernet connectivity fails to restore. The simple fix for this is to move ravb_write(ndev, CCC_OPC_RESET, CCC)) after unregister_netdev() to avoid resetting the controller while the netdev interface is still registered. To avoid future issues in ravb_remove(), the patch follows the proper order of operations in ravb_remove(): reverse order compared with ravb_probe(). This avoids described races as the IOCTLs as well as unregister_netdev() (called now at the beginning of ravb_remove()) calls rtnl_lock() before continuing and IOCTLs check (though devinet_ioctl()) if device is still registered just after taking the lock: int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr) { // ... rtnl_lock(); ret = -ENODEV; dev = __dev_get_by_name(net, ifr->ifr_name); if (!dev) goto done; // ... done: rtnl_unlock(); out: return ret; } Fixes: c156633f1353 ("Renesas Ethernet AVB driver proper") Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-11-28 08:04:39 +00:00
Revert "ravb: Fixed to be able to unload modules" This reverts commit 1838d6c62f57836639bd3d83e7855e0ee4f6defc. This commit moved the ravb_mdio_init() call (and thus the of_mdiobus_register() call) from the ravb_probe() to the ravb_open() call. This causes a regression during system resume (s2idle/s2ram), as new PHY devices cannot be bound while suspended. During boot, the Micrel PHY is detected like this: Micrel KSZ9031 Gigabit PHY e6800000.ethernet-ffffffff:00: attached PHY driver [Micrel KSZ9031 Gigabit PHY] (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=228) ravb e6800000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off During system suspend, (A) defer_all_probes is set to true, and (B) usermodehelper_disabled is set to UMH_DISABLED, to avoid drivers being probed while suspended. A. If CONFIG_MODULES=n, phy_device_register() calling device_add() merely adds the device, but does not probe it yet, as really_probe() returns early due to defer_all_probes being set: dpm_resume+0x128/0x4f8 device_resume+0xcc/0x1b0 dpm_run_callback+0x74/0x340 ravb_resume+0x190/0x1b8 ravb_open+0x84/0x770 of_mdiobus_register+0x1e0/0x468 of_mdiobus_register_phy+0x1b8/0x250 of_mdiobus_phy_device_register+0x178/0x1e8 phy_device_register+0x114/0x1b8 device_add+0x3d4/0x798 bus_probe_device+0x98/0xa0 device_initial_probe+0x10/0x18 __device_attach+0xe4/0x140 bus_for_each_drv+0x64/0xc8 __device_attach_driver+0xb8/0xe0 driver_probe_device.part.11+0xc4/0xd8 really_probe+0x32c/0x3b8 Later, phy_attach_direct() notices no PHY driver has been bound, and falls back to the Generic PHY, leading to degraded operation: Generic PHY e6800000.ethernet-ffffffff:00: attached PHY driver [Generic PHY] (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=POLL) ravb e6800000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off B. If CONFIG_MODULES=y, request_module() returns early with -EBUSY due to UMH_DISABLED, and MDIO initialization fails completely: mdio_bus e6800000.ethernet-ffffffff:00: error -16 loading PHY driver module for ID 0x00221622 ravb e6800000.ethernet eth0: failed to initialize MDIO PM: dpm_run_callback(): ravb_resume+0x0/0x1b8 returns -16 PM: Device e6800000.ethernet failed to resume: error -16 Ignoring -EBUSY in phy_request_driver_module(), like was done for -ENOENT in commit 21e194425abd65b5 ("net: phy: fix issue with loading PHY driver w/o initramfs"), would makes it fall back to the Generic PHY, like in the CONFIG_MODULES=n case. Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Cc: stable@vger.kernel.org Reviewed-by: Sergei Shtylyov <sergei.shtylyov@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-22 07:29:31 +00:00
ravb_mdio_release(priv);
net: ravb: Keep reverse order of operations in ravb_remove() On RZ/G3S SMARC Carrier II board having RGMII connections b/w Ethernet MACs and PHYs it has been discovered that doing unbind/bind for ravb driver in a loop leads to wrong speed and duplex for Ethernet links and broken connectivity (the connectivity cannot be restored even with bringing interface down/up). Before doing unbind/bind the Ethernet interfaces were configured though systemd. The sh instructions used to do unbind/bind were: $ cd /sys/bus/platform/drivers/ravb/ $ while :; do echo 11c30000.ethernet > unbind ; \ echo 11c30000.ethernet > bind; done It has been discovered that there is a race b/w IOCTLs initialized by systemd at the response of success binding and the "ravb_write(ndev, CCC_OPC_RESET, CCC)" call in ravb_remove() as follows: 1/ as a result of bind success the user space open/configures the interfaces tough an IOCTL; the following stack trace has been identified on RZ/G3S: Call trace: dump_backtrace+0x9c/0x100 show_stack+0x20/0x38 dump_stack_lvl+0x48/0x60 dump_stack+0x18/0x28 ravb_open+0x70/0xa58 __dev_open+0xf4/0x1e8 __dev_change_flags+0x198/0x218 dev_change_flags+0x2c/0x80 devinet_ioctl+0x640/0x708 inet_ioctl+0x1e4/0x200 sock_do_ioctl+0x50/0x108 sock_ioctl+0x240/0x358 __arm64_sys_ioctl+0xb0/0x100 invoke_syscall+0x50/0x128 el0_svc_common.constprop.0+0xc8/0xf0 do_el0_svc+0x24/0x38 el0_svc+0x34/0xb8 el0t_64_sync_handler+0xc0/0xc8 el0t_64_sync+0x190/0x198 2/ this call may execute concurrently with ravb_remove() as the unbind/bind operation was executed in a loop 3/ if the operation mode is changed to RESET (through ravb_write(ndev, CCC_OPC_RESET, CCC) call in ravb_remove()) while the above ravb_open() is in progress it may lead to MAC (or PHY, or MAC-PHY connection, the right point hasn't been identified at the moment) to be broken, thus the Ethernet connectivity fails to restore. The simple fix for this is to move ravb_write(ndev, CCC_OPC_RESET, CCC)) after unregister_netdev() to avoid resetting the controller while the netdev interface is still registered. To avoid future issues in ravb_remove(), the patch follows the proper order of operations in ravb_remove(): reverse order compared with ravb_probe(). This avoids described races as the IOCTLs as well as unregister_netdev() (called now at the beginning of ravb_remove()) calls rtnl_lock() before continuing and IOCTLs check (though devinet_ioctl()) if device is still registered just after taking the lock: int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr) { // ... rtnl_lock(); ret = -ENODEV; dev = __dev_get_by_name(net, ifr->ifr_name); if (!dev) goto done; // ... done: rtnl_unlock(); out: return ret; } Fixes: c156633f1353 ("Renesas Ethernet AVB driver proper") Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-11-28 08:04:39 +00:00
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
net: ravb: Keep reverse order of operations in ravb_remove() On RZ/G3S SMARC Carrier II board having RGMII connections b/w Ethernet MACs and PHYs it has been discovered that doing unbind/bind for ravb driver in a loop leads to wrong speed and duplex for Ethernet links and broken connectivity (the connectivity cannot be restored even with bringing interface down/up). Before doing unbind/bind the Ethernet interfaces were configured though systemd. The sh instructions used to do unbind/bind were: $ cd /sys/bus/platform/drivers/ravb/ $ while :; do echo 11c30000.ethernet > unbind ; \ echo 11c30000.ethernet > bind; done It has been discovered that there is a race b/w IOCTLs initialized by systemd at the response of success binding and the "ravb_write(ndev, CCC_OPC_RESET, CCC)" call in ravb_remove() as follows: 1/ as a result of bind success the user space open/configures the interfaces tough an IOCTL; the following stack trace has been identified on RZ/G3S: Call trace: dump_backtrace+0x9c/0x100 show_stack+0x20/0x38 dump_stack_lvl+0x48/0x60 dump_stack+0x18/0x28 ravb_open+0x70/0xa58 __dev_open+0xf4/0x1e8 __dev_change_flags+0x198/0x218 dev_change_flags+0x2c/0x80 devinet_ioctl+0x640/0x708 inet_ioctl+0x1e4/0x200 sock_do_ioctl+0x50/0x108 sock_ioctl+0x240/0x358 __arm64_sys_ioctl+0xb0/0x100 invoke_syscall+0x50/0x128 el0_svc_common.constprop.0+0xc8/0xf0 do_el0_svc+0x24/0x38 el0_svc+0x34/0xb8 el0t_64_sync_handler+0xc0/0xc8 el0t_64_sync+0x190/0x198 2/ this call may execute concurrently with ravb_remove() as the unbind/bind operation was executed in a loop 3/ if the operation mode is changed to RESET (through ravb_write(ndev, CCC_OPC_RESET, CCC) call in ravb_remove()) while the above ravb_open() is in progress it may lead to MAC (or PHY, or MAC-PHY connection, the right point hasn't been identified at the moment) to be broken, thus the Ethernet connectivity fails to restore. The simple fix for this is to move ravb_write(ndev, CCC_OPC_RESET, CCC)) after unregister_netdev() to avoid resetting the controller while the netdev interface is still registered. To avoid future issues in ravb_remove(), the patch follows the proper order of operations in ravb_remove(): reverse order compared with ravb_probe(). This avoids described races as the IOCTLs as well as unregister_netdev() (called now at the beginning of ravb_remove()) calls rtnl_lock() before continuing and IOCTLs check (though devinet_ioctl()) if device is still registered just after taking the lock: int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr) { // ... rtnl_lock(); ret = -ENODEV; dev = __dev_get_by_name(net, ifr->ifr_name); if (!dev) goto done; // ... done: rtnl_unlock(); out: return ret; } Fixes: c156633f1353 ("Renesas Ethernet AVB driver proper") Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru> Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-11-28 08:04:39 +00:00
pm_runtime_put_sync_suspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(dev);
clk_unprepare(priv->refclk);
reset_control_assert(priv->rstc);
free_netdev(ndev);
platform_set_drvdata(pdev, NULL);
}
static int ravb_wol_setup(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
/* Disable interrupts by clearing the interrupt masks. */
ravb_write(ndev, 0, RIC0);
ravb_write(ndev, 0, RIC2);
ravb_write(ndev, 0, TIC);
/* Only allow ECI interrupts */
synchronize_irq(priv->emac_irq);
if (info->nc_queues)
napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]);
ravb_write(ndev, ECSIPR_MPDIP, ECSIPR);
/* Enable MagicPacket */
ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
if (priv->info->ccc_gac)
ravb_ptp_stop(ndev);
return enable_irq_wake(priv->emac_irq);
}
static int ravb_wol_restore(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
int error;
/* Set reset mode to rearm the WoL logic. */
error = ravb_set_opmode(ndev, CCC_OPC_RESET);
if (error)
return error;
/* Set AVB config mode. */
error = ravb_set_config_mode(ndev);
if (error)
return error;
if (priv->info->ccc_gac)
ravb_ptp_init(ndev, priv->pdev);
if (info->nc_queues)
napi_enable(&priv->napi[RAVB_NC]);
napi_enable(&priv->napi[RAVB_BE]);
/* Disable MagicPacket */
ravb_modify(ndev, ECMR, ECMR_MPDE, 0);
ravb_close(ndev);
return disable_irq_wake(priv->emac_irq);
}
static int ravb_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct ravb_private *priv = netdev_priv(ndev);
int ret;
if (!netif_running(ndev))
goto reset_assert;
netif_device_detach(ndev);
if (priv->wol_enabled)
return ravb_wol_setup(ndev);
ret = ravb_close(ndev);
if (ret)
return ret;
ret = pm_runtime_force_suspend(&priv->pdev->dev);
if (ret)
return ret;
reset_assert:
return reset_control_assert(priv->rstc);
}
static int ravb_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct ravb_private *priv = netdev_priv(ndev);
int ret;
ret = reset_control_deassert(priv->rstc);
if (ret)
return ret;
if (!netif_running(ndev))
return 0;
/* If WoL is enabled restore the interface. */
if (priv->wol_enabled) {
ret = ravb_wol_restore(ndev);
if (ret)
return ret;
} else {
ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
}
/* Reopening the interface will restore the device to the working state. */
ret = ravb_open(ndev);
if (ret < 0)
goto out_rpm_put;
ravb_set_rx_mode(ndev);
netif_device_attach(ndev);
return 0;
out_rpm_put:
if (!priv->wol_enabled) {
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
return ret;
}
static int ravb_runtime_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct ravb_private *priv = netdev_priv(ndev);
clk_disable(priv->refclk);
return 0;
}
static int ravb_runtime_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct ravb_private *priv = netdev_priv(ndev);
return clk_enable(priv->refclk);
}
static const struct dev_pm_ops ravb_dev_pm_ops = {
SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
RUNTIME_PM_OPS(ravb_runtime_suspend, ravb_runtime_resume, NULL)
};
static struct platform_driver ravb_driver = {
.probe = ravb_probe,
.remove_new = ravb_remove,
.driver = {
.name = "ravb",
.pm = pm_ptr(&ravb_dev_pm_ops),
.of_match_table = ravb_match_table,
},
};
module_platform_driver(ravb_driver);
MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
MODULE_LICENSE("GPL v2");