net: axienet: add coalesce timer ethtool configuration

Add the ability to configure the RX/TX coalesce timer with ethtool.
Change default setting to scale with the clock rate rather than being a
fixed number of clock cycles.

Signed-off-by: Robert Hancock <robert.hancock@calian.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Robert Hancock 2022-03-04 20:24:43 -06:00 committed by David S. Miller
parent 40da5d680e
commit 0b79b8dc97
2 changed files with 47 additions and 14 deletions

View file

@ -119,11 +119,11 @@
#define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
#define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
/* Default TX/RX Threshold and waitbound values for SGDMA mode */
/* Default TX/RX Threshold and delay timer values for SGDMA mode */
#define XAXIDMA_DFT_TX_THRESHOLD 24
#define XAXIDMA_DFT_TX_WAITBOUND 254
#define XAXIDMA_DFT_TX_USEC 50
#define XAXIDMA_DFT_RX_THRESHOLD 1
#define XAXIDMA_DFT_RX_WAITBOUND 254
#define XAXIDMA_DFT_RX_USEC 50
#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
@ -425,7 +425,9 @@ struct axidma_bd {
* @csum_offload_on_tx_path: Stores the checksum selection on TX side.
* @csum_offload_on_rx_path: Stores the checksum selection on RX side.
* @coalesce_count_rx: Store the irq coalesce on RX side.
* @coalesce_usec_rx: IRQ coalesce delay for RX
* @coalesce_count_tx: Store the irq coalesce on TX side.
* @coalesce_usec_tx: IRQ coalesce delay for TX
*/
struct axienet_local {
struct net_device *ndev;
@ -482,7 +484,9 @@ struct axienet_local {
int csum_offload_on_rx_path;
u32 coalesce_count_rx;
u32 coalesce_usec_rx;
u32 coalesce_count_tx;
u32 coalesce_usec_tx;
};
/**

View file

@ -33,7 +33,7 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/math64.h>
#include <linux/phy.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
@ -226,6 +226,28 @@ static void axienet_dma_bd_release(struct net_device *ndev)
lp->rx_bd_p);
}
/**
* axienet_usec_to_timer - Calculate IRQ delay timer value
* @lp: Pointer to the axienet_local structure
* @coalesce_usec: Microseconds to convert into timer value
*/
static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
{
u32 result;
u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
if (lp->axi_clk)
clk_rate = clk_get_rate(lp->axi_clk);
/* 1 Timeout Interval = 125 * (clock period of SG clock) */
result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
(u64)125000000);
if (result > 255)
result = 255;
return result;
}
/**
* axienet_dma_start - Set up DMA registers and start DMA operation
* @lp: Pointer to the axienet_local structure
@ -241,7 +263,8 @@ static void axienet_dma_start(struct axienet_local *lp)
* the first RX packet. Otherwise leave at 0 to disable delay interrupt.
*/
if (lp->coalesce_count_rx > 1)
lp->rx_dma_cr |= (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT) |
lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
<< XAXIDMA_DELAY_SHIFT) |
XAXIDMA_IRQ_DELAY_MASK;
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
@ -252,7 +275,8 @@ static void axienet_dma_start(struct axienet_local *lp)
* the first TX packet. Otherwise leave at 0 to disable delay interrupt.
*/
if (lp->coalesce_count_tx > 1)
tx_cr |= (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT) |
tx_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
<< XAXIDMA_DELAY_SHIFT) |
XAXIDMA_IRQ_DELAY_MASK;
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, tx_cr);
@ -1469,14 +1493,12 @@ axienet_ethtools_get_coalesce(struct net_device *ndev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
u32 regval = 0;
struct axienet_local *lp = netdev_priv(ndev);
regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
>> XAXIDMA_COALESCE_SHIFT;
regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
>> XAXIDMA_COALESCE_SHIFT;
ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
return 0;
}
@ -1509,8 +1531,12 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
if (ecoalesce->rx_max_coalesced_frames)
lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
if (ecoalesce->rx_coalesce_usecs)
lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
if (ecoalesce->tx_max_coalesced_frames)
lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
if (ecoalesce->tx_coalesce_usecs)
lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
return 0;
}
@ -1541,7 +1567,8 @@ static int axienet_ethtools_nway_reset(struct net_device *dev)
}
static const struct ethtool_ops axienet_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USECS,
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
.get_regs = axienet_ethtools_get_regs,
@ -2028,7 +2055,9 @@ static int axienet_probe(struct platform_device *pdev)
}
lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
/* Reset core now that clocks are enabled, prior to accessing MDIO */
ret = __axienet_device_reset(lp);