Merge branch 'gve-queue-api'

Shailend Chand says:

====================
gve: Implement queue api

Following the discussion on
https://patchwork.kernel.org/project/linux-media/patch/20240305020153.2787423-2-almasrymina@google.com/,
the queue api defined by Mina is implemented for gve.

The first patch is just Mina's introduction of the api. The rest of the
patches make surgical changes in gve to enable it to work correctly with
only a subset of queues present (thus far it had assumed that either all
queues are up or all are down). The final patch has the api
implementation.

Changes since v1: clang warning fixes, kdoc warning fix, and addressed
review comments.
====================

Reviewed-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2024-05-05 14:35:48 +01:00
commit cdc74c9d06
11 changed files with 489 additions and 406 deletions

View File

@ -610,6 +610,7 @@ struct gve_notify_block {
struct gve_priv *priv;
struct gve_tx_ring *tx; /* tx rings on this block */
struct gve_rx_ring *rx; /* rx rings on this block */
u32 irq;
};
/* Tracks allowed and current queue settings */
@ -637,26 +638,10 @@ struct gve_ptype_lut {
struct gve_ptype ptypes[GVE_NUM_PTYPES];
};
/* Parameters for allocating queue page lists */
struct gve_qpls_alloc_cfg {
struct gve_queue_config *tx_cfg;
struct gve_queue_config *rx_cfg;
u16 num_xdp_queues;
bool raw_addressing;
bool is_gqi;
/* Allocated resources are returned here */
struct gve_queue_page_list *qpls;
};
/* Parameters for allocating resources for tx queues */
struct gve_tx_alloc_rings_cfg {
struct gve_queue_config *qcfg;
/* qpls must already be allocated */
struct gve_queue_page_list *qpls;
u16 ring_size;
u16 start_idx;
u16 num_rings;
@ -672,9 +657,6 @@ struct gve_rx_alloc_rings_cfg {
struct gve_queue_config *qcfg;
struct gve_queue_config *qcfg_tx;
/* qpls must already be allocated */
struct gve_queue_page_list *qpls;
u16 ring_size;
u16 packet_buffer_size;
bool raw_addressing;
@ -700,7 +682,6 @@ struct gve_priv {
struct net_device *dev;
struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
struct gve_queue_page_list *qpls; /* array of num qpls */
struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
dma_addr_t irq_db_indices_bus;
@ -1024,7 +1005,6 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
return priv->tx_cfg.max_queues + rx_qid;
}
/* Returns the index into priv->qpls where a certain rx queue's QPL resides */
static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
{
return tx_cfg->max_queues + rx_qid;
@ -1035,7 +1015,6 @@ static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
return gve_tx_qpl_id(priv, 0);
}
/* Returns the index into priv->qpls where the first rx queue's QPL resides */
static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
{
return gve_get_rx_qpl_id(tx_cfg, 0);
@ -1089,6 +1068,12 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev,
enum dma_data_direction, gfp_t gfp_flags);
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
enum dma_data_direction);
/* qpls */
struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
u32 id, int pages);
void gve_free_queue_page_list(struct gve_priv *priv,
struct gve_queue_page_list *qpl,
u32 id);
/* tx handling */
netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
@ -1125,11 +1110,9 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
void gve_schedule_reset(struct gve_priv *priv);
int gve_reset(struct gve_priv *priv, bool attempt_teardown);
void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
int gve_adjust_config(struct gve_priv *priv,
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
int gve_adjust_queues(struct gve_priv *priv,

View File

@ -630,14 +630,15 @@ int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_que
return gve_adminq_kick_and_wait(priv);
}
static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
union gve_adminq_command *cmd,
u32 queue_index)
{
struct gve_rx_ring *rx = &priv->rx[queue_index];
union gve_adminq_command cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
memset(cmd, 0, sizeof(*cmd));
cmd->opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
cmd->create_rx_queue = (struct gve_adminq_create_rx_queue) {
.queue_id = cpu_to_be32(queue_index),
.ntfy_id = cpu_to_be32(rx->ntfy_id),
.queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
@ -648,13 +649,13 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id;
cmd.create_rx_queue.rx_desc_ring_addr =
cmd->create_rx_queue.rx_desc_ring_addr =
cpu_to_be64(rx->desc.bus),
cmd.create_rx_queue.rx_data_ring_addr =
cmd->create_rx_queue.rx_data_ring_addr =
cpu_to_be64(rx->data.data_bus),
cmd.create_rx_queue.index = cpu_to_be32(queue_index);
cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
cmd->create_rx_queue.index = cpu_to_be32(queue_index);
cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
cmd->create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
} else {
u32 qpl_id = 0;
@ -662,25 +663,40 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
qpl_id = GVE_RAW_ADDRESSING_QPL_ID;
else
qpl_id = rx->dqo.qpl->id;
cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
cmd.create_rx_queue.rx_desc_ring_addr =
cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
cmd->create_rx_queue.rx_desc_ring_addr =
cpu_to_be64(rx->dqo.complq.bus);
cmd.create_rx_queue.rx_data_ring_addr =
cmd->create_rx_queue.rx_data_ring_addr =
cpu_to_be64(rx->dqo.bufq.bus);
cmd.create_rx_queue.packet_buffer_size =
cmd->create_rx_queue.packet_buffer_size =
cpu_to_be16(priv->data_buffer_size_dqo);
cmd.create_rx_queue.rx_buff_ring_size =
cmd->create_rx_queue.rx_buff_ring_size =
cpu_to_be16(priv->rx_desc_cnt);
cmd.create_rx_queue.enable_rsc =
cmd->create_rx_queue.enable_rsc =
!!(priv->dev->features & NETIF_F_LRO);
if (priv->header_split_enabled)
cmd.create_rx_queue.header_buffer_size =
cmd->create_rx_queue.header_buffer_size =
cpu_to_be16(priv->header_buf_size);
}
}
static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
{
union gve_adminq_command cmd;
gve_adminq_get_create_rx_queue_cmd(priv, &cmd, queue_index);
return gve_adminq_issue_cmd(priv, &cmd);
}
/* Unlike gve_adminq_create_rx_queue, this actually rings the doorbell */
int gve_adminq_create_single_rx_queue(struct gve_priv *priv, u32 queue_index)
{
union gve_adminq_command cmd;
gve_adminq_get_create_rx_queue_cmd(priv, &cmd, queue_index);
return gve_adminq_execute_cmd(priv, &cmd);
}
int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
{
int err;
@ -727,22 +743,31 @@ int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_qu
return gve_adminq_kick_and_wait(priv);
}
static void gve_adminq_make_destroy_rx_queue_cmd(union gve_adminq_command *cmd,
u32 queue_index)
{
memset(cmd, 0, sizeof(*cmd));
cmd->opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
cmd->destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) {
.queue_id = cpu_to_be32(queue_index),
};
}
static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
{
union gve_adminq_command cmd;
int err;
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
cmd.destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) {
.queue_id = cpu_to_be32(queue_index),
};
gve_adminq_make_destroy_rx_queue_cmd(&cmd, queue_index);
return gve_adminq_issue_cmd(priv, &cmd);
}
err = gve_adminq_issue_cmd(priv, &cmd);
if (err)
return err;
/* Unlike gve_adminq_destroy_rx_queue, this actually rings the doorbell */
int gve_adminq_destroy_single_rx_queue(struct gve_priv *priv, u32 queue_index)
{
union gve_adminq_command cmd;
return 0;
gve_adminq_make_destroy_rx_queue_cmd(&cmd, queue_index);
return gve_adminq_execute_cmd(priv, &cmd);
}
int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)

View File

@ -451,7 +451,9 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv,
int gve_adminq_deconfigure_device_resources(struct gve_priv *priv);
int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
int gve_adminq_create_single_rx_queue(struct gve_priv *priv, u32 queue_index);
int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues);
int gve_adminq_destroy_single_rx_queue(struct gve_priv *priv, u32 queue_index);
int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
int gve_adminq_register_page_list(struct gve_priv *priv,
struct gve_queue_page_list *qpl);

View File

@ -8,6 +8,7 @@
#include "gve.h"
#include "gve_adminq.h"
#include "gve_dqo.h"
#include "gve_utils.h"
static void gve_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
@ -165,6 +166,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
struct stats *report_stats;
int *rx_qid_to_stats_idx;
int *tx_qid_to_stats_idx;
int num_stopped_rxqs = 0;
int num_stopped_txqs = 0;
struct gve_priv *priv;
bool skip_nic_stats;
unsigned int start;
@ -181,12 +184,23 @@ gve_get_ethtool_stats(struct net_device *netdev,
sizeof(int), GFP_KERNEL);
if (!rx_qid_to_stats_idx)
return;
for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
rx_qid_to_stats_idx[ring] = -1;
if (!gve_rx_was_added_to_block(priv, ring))
num_stopped_rxqs++;
}
tx_qid_to_stats_idx = kmalloc_array(num_tx_queues,
sizeof(int), GFP_KERNEL);
if (!tx_qid_to_stats_idx) {
kfree(rx_qid_to_stats_idx);
return;
}
for (ring = 0; ring < num_tx_queues; ring++) {
tx_qid_to_stats_idx[ring] = -1;
if (!gve_tx_was_added_to_block(priv, ring))
num_stopped_txqs++;
}
for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0,
rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0,
rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0,
@ -260,7 +274,13 @@ gve_get_ethtool_stats(struct net_device *netdev,
/* For rx cross-reporting stats, start from nic rx stats in report */
base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues +
GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
/* The boundary between driver stats and NIC stats shifts if there are
* stopped queues.
*/
base_stats_idx += NIC_RX_STATS_REPORT_NUM * num_stopped_rxqs +
NIC_TX_STATS_REPORT_NUM * num_stopped_txqs;
max_stats_idx = NIC_RX_STATS_REPORT_NUM *
(priv->rx_cfg.num_queues - num_stopped_rxqs) +
base_stats_idx;
/* Preprocess the stats report for rx, map queue id to start index */
skip_nic_stats = false;
@ -274,6 +294,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
skip_nic_stats = true;
break;
}
if (queue_id < 0 || queue_id >= priv->rx_cfg.num_queues) {
net_err_ratelimited("Invalid rxq id in NIC stats\n");
continue;
}
rx_qid_to_stats_idx[queue_id] = stats_idx;
}
/* walk RX rings */
@ -308,11 +332,11 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = rx->rx_copybreak_pkt;
data[i++] = rx->rx_copied_pkt;
/* stats from NIC */
if (skip_nic_stats) {
stats_idx = rx_qid_to_stats_idx[ring];
if (skip_nic_stats || stats_idx < 0) {
/* skip NIC rx stats */
i += NIC_RX_STATS_REPORT_NUM;
} else {
stats_idx = rx_qid_to_stats_idx[ring];
for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
u64 value =
be64_to_cpu(report_stats[stats_idx + j].value);
@ -338,7 +362,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
/* For tx cross-reporting stats, start from nic tx stats in report */
base_stats_idx = max_stats_idx;
max_stats_idx = NIC_TX_STATS_REPORT_NUM * num_tx_queues +
max_stats_idx = NIC_TX_STATS_REPORT_NUM *
(num_tx_queues - num_stopped_txqs) +
max_stats_idx;
/* Preprocess the stats report for tx, map queue id to start index */
skip_nic_stats = false;
@ -352,6 +377,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
skip_nic_stats = true;
break;
}
if (queue_id < 0 || queue_id >= num_tx_queues) {
net_err_ratelimited("Invalid txq id in NIC stats\n");
continue;
}
tx_qid_to_stats_idx[queue_id] = stats_idx;
}
/* walk TX rings */
@ -383,11 +412,11 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = gve_tx_load_event_counter(priv, tx);
data[i++] = tx->dma_mapping_error;
/* stats from NIC */
if (skip_nic_stats) {
stats_idx = tx_qid_to_stats_idx[ring];
if (skip_nic_stats || stats_idx < 0) {
/* skip NIC tx stats */
i += NIC_TX_STATS_REPORT_NUM;
} else {
stats_idx = tx_qid_to_stats_idx[ring];
for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
u64 value =
be64_to_cpu(report_stats[stats_idx + j].value);
@ -509,20 +538,17 @@ static int gve_adjust_ring_sizes(struct gve_priv *priv,
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
int err;
/* get current queue configuration */
gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
/* copy over the new ring_size from ethtool */
tx_alloc_cfg.ring_size = new_tx_desc_cnt;
rx_alloc_cfg.ring_size = new_rx_desc_cnt;
if (netif_running(priv->dev)) {
err = gve_adjust_config(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if (err)
return err;
}

View File

@ -9,6 +9,7 @@
#include <linux/etherdevice.h>
#include <linux/filter.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/sched.h>
@ -253,6 +254,18 @@ static irqreturn_t gve_intr_dqo(int irq, void *arg)
return IRQ_HANDLED;
}
static int gve_is_napi_on_home_cpu(struct gve_priv *priv, u32 irq)
{
int cpu_curr = smp_processor_id();
const struct cpumask *aff_mask;
aff_mask = irq_get_effective_affinity_mask(irq);
if (unlikely(!aff_mask))
return 1;
return cpumask_test_cpu(cpu_curr, aff_mask);
}
int gve_napi_poll(struct napi_struct *napi, int budget)
{
struct gve_notify_block *block;
@ -322,8 +335,21 @@ int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
reschedule |= work_done == budget;
}
if (reschedule)
return budget;
if (reschedule) {
/* Reschedule by returning budget only if already on the correct
* cpu.
*/
if (likely(gve_is_napi_on_home_cpu(priv, block->irq)))
return budget;
/* If not on the cpu with which this queue's irq has affinity
* with, we avoid rescheduling napi and arm the irq instead so
* that napi gets rescheduled back eventually onto the right
* cpu.
*/
if (work_done == budget)
work_done--;
}
if (likely(napi_complete_done(napi, work_done))) {
/* Enable interrupts again.
@ -428,6 +454,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
"Failed to receive msix vector %d\n", i);
goto abort_with_some_ntfy_blocks;
}
block->irq = priv->msix_vectors[msix_idx].vector;
irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
get_cpu_mask(i % active_cpus));
block->irq_db_index = &priv->irq_db_indices[i].index;
@ -441,6 +468,7 @@ abort_with_some_ntfy_blocks:
irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
NULL);
free_irq(priv->msix_vectors[msix_idx].vector, block);
block->irq = 0;
}
kvfree(priv->ntfy_blocks);
priv->ntfy_blocks = NULL;
@ -474,6 +502,7 @@ static void gve_free_notify_blocks(struct gve_priv *priv)
irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
NULL);
free_irq(priv->msix_vectors[msix_idx].vector, block);
block->irq = 0;
}
free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
kvfree(priv->ntfy_blocks);
@ -582,37 +611,36 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
gve_clear_device_resources_ok(priv);
}
static int gve_unregister_qpl(struct gve_priv *priv, u32 i)
static int gve_unregister_qpl(struct gve_priv *priv,
struct gve_queue_page_list *qpl)
{
int err;
err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
if (!qpl)
return 0;
err = gve_adminq_unregister_page_list(priv, qpl->id);
if (err) {
netif_err(priv, drv, priv->dev,
"Failed to unregister queue page list %d\n",
priv->qpls[i].id);
qpl->id);
return err;
}
priv->num_registered_pages -= priv->qpls[i].num_entries;
priv->num_registered_pages -= qpl->num_entries;
return 0;
}
static int gve_register_qpl(struct gve_priv *priv, u32 i)
static int gve_register_qpl(struct gve_priv *priv,
struct gve_queue_page_list *qpl)
{
int num_rx_qpls;
int pages;
int err;
/* Rx QPLs succeed Tx QPLs in the priv->qpls array. */
num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
if (i >= gve_rx_start_qpl_id(&priv->tx_cfg) + num_rx_qpls) {
netif_err(priv, drv, priv->dev,
"Cannot register nonexisting QPL at index %d\n", i);
return -EINVAL;
}
if (!qpl)
return 0;
pages = priv->qpls[i].num_entries;
pages = qpl->num_entries;
if (pages + priv->num_registered_pages > priv->max_registered_pages) {
netif_err(priv, drv, priv->dev,
@ -622,14 +650,11 @@ static int gve_register_qpl(struct gve_priv *priv, u32 i)
return -EINVAL;
}
err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
err = gve_adminq_register_page_list(priv, qpl);
if (err) {
netif_err(priv, drv, priv->dev,
"failed to register queue page list %d\n",
priv->qpls[i].id);
/* This failure will trigger a reset - no need to clean
* up
*/
qpl->id);
return err;
}
@ -637,6 +662,26 @@ static int gve_register_qpl(struct gve_priv *priv, u32 i)
return 0;
}
static struct gve_queue_page_list *gve_tx_get_qpl(struct gve_priv *priv, int idx)
{
struct gve_tx_ring *tx = &priv->tx[idx];
if (gve_is_gqi(priv))
return tx->tx_fifo.qpl;
else
return tx->dqo.qpl;
}
static struct gve_queue_page_list *gve_rx_get_qpl(struct gve_priv *priv, int idx)
{
struct gve_rx_ring *rx = &priv->rx[idx];
if (gve_is_gqi(priv))
return rx->data.qpl;
else
return rx->dqo.qpl;
}
static int gve_register_xdp_qpls(struct gve_priv *priv)
{
int start_id;
@ -645,7 +690,7 @@ static int gve_register_xdp_qpls(struct gve_priv *priv)
start_id = gve_xdp_tx_start_queue_id(priv);
for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
err = gve_register_qpl(priv, i);
err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i));
/* This failure will trigger a reset - no need to clean up */
if (err)
return err;
@ -656,7 +701,6 @@ static int gve_register_xdp_qpls(struct gve_priv *priv)
static int gve_register_qpls(struct gve_priv *priv)
{
int num_tx_qpls, num_rx_qpls;
int start_id;
int err;
int i;
@ -665,15 +709,13 @@ static int gve_register_qpls(struct gve_priv *priv)
num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
for (i = 0; i < num_tx_qpls; i++) {
err = gve_register_qpl(priv, i);
err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i));
if (err)
return err;
}
/* there might be a gap between the tx and rx qpl ids */
start_id = gve_rx_start_qpl_id(&priv->tx_cfg);
for (i = 0; i < num_rx_qpls; i++) {
err = gve_register_qpl(priv, start_id + i);
err = gve_register_qpl(priv, gve_rx_get_qpl(priv, i));
if (err)
return err;
}
@ -689,7 +731,7 @@ static int gve_unregister_xdp_qpls(struct gve_priv *priv)
start_id = gve_xdp_tx_start_queue_id(priv);
for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
err = gve_unregister_qpl(priv, i);
err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i));
/* This failure will trigger a reset - no need to clean */
if (err)
return err;
@ -700,7 +742,6 @@ static int gve_unregister_xdp_qpls(struct gve_priv *priv)
static int gve_unregister_qpls(struct gve_priv *priv)
{
int num_tx_qpls, num_rx_qpls;
int start_id;
int err;
int i;
@ -709,15 +750,14 @@ static int gve_unregister_qpls(struct gve_priv *priv)
num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
for (i = 0; i < num_tx_qpls; i++) {
err = gve_unregister_qpl(priv, i);
err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i));
/* This failure will trigger a reset - no need to clean */
if (err)
return err;
}
start_id = gve_rx_start_qpl_id(&priv->tx_cfg);
for (i = 0; i < num_rx_qpls; i++) {
err = gve_unregister_qpl(priv, start_id + i);
err = gve_unregister_qpl(priv, gve_rx_get_qpl(priv, i));
/* This failure will trigger a reset - no need to clean */
if (err)
return err;
@ -828,7 +868,6 @@ static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
{
cfg->qcfg = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->qpls = priv->qpls;
cfg->ring_size = priv->tx_desc_cnt;
cfg->start_idx = 0;
cfg->num_rings = gve_num_tx_queues(priv);
@ -885,9 +924,9 @@ static int gve_alloc_xdp_rings(struct gve_priv *priv)
return 0;
}
static int gve_alloc_rings(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
static int gve_queues_mem_alloc(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
int err;
@ -973,9 +1012,9 @@ static void gve_free_xdp_rings(struct gve_priv *priv)
}
}
static void gve_free_rings(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_cfg,
struct gve_rx_alloc_rings_cfg *rx_cfg)
static void gve_queues_mem_free(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_cfg,
struct gve_rx_alloc_rings_cfg *rx_cfg)
{
if (gve_is_gqi(priv)) {
gve_tx_free_rings_gqi(priv, tx_cfg);
@ -1004,35 +1043,41 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev,
return 0;
}
static int gve_alloc_queue_page_list(struct gve_priv *priv,
struct gve_queue_page_list *qpl,
u32 id, int pages)
struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
u32 id, int pages)
{
struct gve_queue_page_list *qpl;
int err;
int i;
qpl = kvzalloc(sizeof(*qpl), GFP_KERNEL);
if (!qpl)
return NULL;
qpl->id = id;
qpl->num_entries = 0;
qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL);
/* caller handles clean up */
if (!qpl->pages)
return -ENOMEM;
goto abort;
qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL);
/* caller handles clean up */
if (!qpl->page_buses)
return -ENOMEM;
goto abort;
for (i = 0; i < pages; i++) {
err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
&qpl->page_buses[i],
gve_qpl_dma_dir(priv, id), GFP_KERNEL);
/* caller handles clean up */
if (err)
return -ENOMEM;
goto abort;
qpl->num_entries++;
}
return 0;
return qpl;
abort:
gve_free_queue_page_list(priv, qpl, id);
return NULL;
}
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
@ -1044,14 +1089,16 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
put_page(page);
}
static void gve_free_queue_page_list(struct gve_priv *priv,
struct gve_queue_page_list *qpl,
int id)
void gve_free_queue_page_list(struct gve_priv *priv,
struct gve_queue_page_list *qpl,
u32 id)
{
int i;
if (!qpl->pages)
if (!qpl)
return;
if (!qpl->pages)
goto free_qpl;
if (!qpl->page_buses)
goto free_pages;
@ -1064,109 +1111,8 @@ static void gve_free_queue_page_list(struct gve_priv *priv,
free_pages:
kvfree(qpl->pages);
qpl->pages = NULL;
}
static void gve_free_n_qpls(struct gve_priv *priv,
struct gve_queue_page_list *qpls,
int start_id,
int num_qpls)
{
int i;
for (i = start_id; i < start_id + num_qpls; i++)
gve_free_queue_page_list(priv, &qpls[i], i);
}
static int gve_alloc_n_qpls(struct gve_priv *priv,
struct gve_queue_page_list *qpls,
int page_count,
int start_id,
int num_qpls)
{
int err;
int i;
for (i = start_id; i < start_id + num_qpls; i++) {
err = gve_alloc_queue_page_list(priv, &qpls[i], i, page_count);
if (err)
goto free_qpls;
}
return 0;
free_qpls:
/* Must include the failing QPL too for gve_alloc_queue_page_list fails
* without cleaning up.
*/
gve_free_n_qpls(priv, qpls, start_id, i - start_id + 1);
return err;
}
static int gve_alloc_qpls(struct gve_priv *priv, struct gve_qpls_alloc_cfg *cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
int rx_start_id, tx_num_qpls, rx_num_qpls;
struct gve_queue_page_list *qpls;
u32 page_count;
int err;
if (cfg->raw_addressing)
return 0;
qpls = kvcalloc(max_queues, sizeof(*qpls), GFP_KERNEL);
if (!qpls)
return -ENOMEM;
/* Allocate TX QPLs */
page_count = priv->tx_pages_per_qpl;
tx_num_qpls = gve_num_tx_qpls(cfg->tx_cfg, cfg->num_xdp_queues,
gve_is_qpl(priv));
err = gve_alloc_n_qpls(priv, qpls, page_count, 0, tx_num_qpls);
if (err)
goto free_qpl_array;
/* Allocate RX QPLs */
rx_start_id = gve_rx_start_qpl_id(cfg->tx_cfg);
/* For GQI_QPL number of pages allocated have 1:1 relationship with
* number of descriptors. For DQO, number of pages required are
* more than descriptors (because of out of order completions).
* Set it to twice the number of descriptors.
*/
if (cfg->is_gqi)
page_count = rx_alloc_cfg->ring_size;
else
page_count = gve_get_rx_pages_per_qpl_dqo(rx_alloc_cfg->ring_size);
rx_num_qpls = gve_num_rx_qpls(cfg->rx_cfg, gve_is_qpl(priv));
err = gve_alloc_n_qpls(priv, qpls, page_count, rx_start_id, rx_num_qpls);
if (err)
goto free_tx_qpls;
cfg->qpls = qpls;
return 0;
free_tx_qpls:
gve_free_n_qpls(priv, qpls, 0, tx_num_qpls);
free_qpl_array:
kvfree(qpls);
return err;
}
static void gve_free_qpls(struct gve_priv *priv,
struct gve_qpls_alloc_cfg *cfg)
{
int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
struct gve_queue_page_list *qpls = cfg->qpls;
int i;
if (!qpls)
return;
for (i = 0; i < max_queues; i++)
gve_free_queue_page_list(priv, &qpls[i], i);
kvfree(qpls);
cfg->qpls = NULL;
free_qpl:
kvfree(qpl);
}
/* Use this to schedule a reset when the device is capable of continuing
@ -1270,17 +1216,6 @@ static void gve_drain_page_cache(struct gve_priv *priv)
page_frag_cache_drain(&priv->rx[i].page_cache);
}
static void gve_qpls_get_curr_alloc_cfg(struct gve_priv *priv,
struct gve_qpls_alloc_cfg *cfg)
{
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->is_gqi = gve_is_gqi(priv);
cfg->num_xdp_queues = priv->num_xdp_queues;
cfg->tx_cfg = &priv->tx_cfg;
cfg->rx_cfg = &priv->rx_cfg;
cfg->qpls = priv->qpls;
}
static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg)
{
@ -1288,7 +1223,6 @@ static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
cfg->qcfg_tx = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->enable_header_split = priv->header_split_enabled;
cfg->qpls = priv->qpls;
cfg->ring_size = priv->rx_desc_cnt;
cfg->packet_buffer_size = gve_is_gqi(priv) ?
GVE_DEFAULT_RX_BUFFER_SIZE :
@ -1297,11 +1231,9 @@ static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
}
void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
gve_qpls_get_curr_alloc_cfg(priv, qpls_alloc_cfg);
gve_tx_get_curr_alloc_cfg(priv, tx_alloc_cfg);
gve_rx_get_curr_alloc_cfg(priv, rx_alloc_cfg);
}
@ -1333,53 +1265,13 @@ static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings)
}
}
static void gve_queues_mem_free(struct gve_priv *priv,
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
gve_free_rings(priv, tx_alloc_cfg, rx_alloc_cfg);
gve_free_qpls(priv, qpls_alloc_cfg);
}
static int gve_queues_mem_alloc(struct gve_priv *priv,
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
int err;
err = gve_alloc_qpls(priv, qpls_alloc_cfg, rx_alloc_cfg);
if (err) {
netif_err(priv, drv, priv->dev, "Failed to alloc QPLs\n");
return err;
}
tx_alloc_cfg->qpls = qpls_alloc_cfg->qpls;
rx_alloc_cfg->qpls = qpls_alloc_cfg->qpls;
err = gve_alloc_rings(priv, tx_alloc_cfg, rx_alloc_cfg);
if (err) {
netif_err(priv, drv, priv->dev, "Failed to alloc rings\n");
goto free_qpls;
}
return 0;
free_qpls:
gve_free_qpls(priv, qpls_alloc_cfg);
return err;
}
static void gve_queues_mem_remove(struct gve_priv *priv)
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
gve_queues_mem_free(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
priv->qpls = NULL;
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
gve_queues_mem_free(priv, &tx_alloc_cfg, &rx_alloc_cfg);
priv->tx = NULL;
priv->rx = NULL;
}
@ -1388,7 +1280,6 @@ static void gve_queues_mem_remove(struct gve_priv *priv)
* No memory is allocated. Passed-in memory is freed on errors.
*/
static int gve_queues_start(struct gve_priv *priv,
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
@ -1396,7 +1287,6 @@ static int gve_queues_start(struct gve_priv *priv,
int err;
/* Record new resources into priv */
priv->qpls = qpls_alloc_cfg->qpls;
priv->tx = tx_alloc_cfg->tx;
priv->rx = rx_alloc_cfg->rx;
@ -1468,23 +1358,19 @@ static int gve_open(struct net_device *dev)
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
struct gve_priv *priv = netdev_priv(dev);
int err;
gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
err = gve_queues_mem_alloc(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
err = gve_queues_mem_alloc(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if (err)
return err;
/* No need to free on error: ownership of resources is lost after
* calling gve_queues_start.
*/
err = gve_queues_start(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
err = gve_queues_start(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if (err)
return err;
@ -1543,11 +1429,8 @@ static int gve_close(struct net_device *dev)
static int gve_remove_xdp_queues(struct gve_priv *priv)
{
int qpl_start_id;
int err;
qpl_start_id = gve_xdp_tx_start_queue_id(priv);
err = gve_destroy_xdp_rings(priv);
if (err)
return err;
@ -1559,27 +1442,19 @@ static int gve_remove_xdp_queues(struct gve_priv *priv)
gve_unreg_xdp_info(priv);
gve_free_xdp_rings(priv);
gve_free_n_qpls(priv, priv->qpls, qpl_start_id, gve_num_xdp_qpls(priv));
priv->num_xdp_queues = 0;
return 0;
}
static int gve_add_xdp_queues(struct gve_priv *priv)
{
int start_id;
int err;
priv->num_xdp_queues = priv->rx_cfg.num_queues;
start_id = gve_xdp_tx_start_queue_id(priv);
err = gve_alloc_n_qpls(priv, priv->qpls, priv->tx_pages_per_qpl,
start_id, gve_num_xdp_qpls(priv));
if (err)
goto err;
err = gve_alloc_xdp_rings(priv);
if (err)
goto free_xdp_qpls;
goto err;
err = gve_reg_xdp_info(priv, priv->dev);
if (err)
@ -1597,8 +1472,6 @@ static int gve_add_xdp_queues(struct gve_priv *priv)
free_xdp_rings:
gve_free_xdp_rings(priv);
free_xdp_qpls:
gve_free_n_qpls(priv, priv->qpls, start_id, gve_num_xdp_qpls(priv));
err:
priv->num_xdp_queues = 0;
return err;
@ -1849,15 +1722,13 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
int gve_adjust_config(struct gve_priv *priv,
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
int err;
/* Allocate resources for the new confiugration */
err = gve_queues_mem_alloc(priv, qpls_alloc_cfg,
tx_alloc_cfg, rx_alloc_cfg);
err = gve_queues_mem_alloc(priv, tx_alloc_cfg, rx_alloc_cfg);
if (err) {
netif_err(priv, drv, priv->dev,
"Adjust config failed to alloc new queues");
@ -1869,14 +1740,12 @@ int gve_adjust_config(struct gve_priv *priv,
if (err) {
netif_err(priv, drv, priv->dev,
"Adjust config failed to close old queues");
gve_queues_mem_free(priv, qpls_alloc_cfg,
tx_alloc_cfg, rx_alloc_cfg);
gve_queues_mem_free(priv, tx_alloc_cfg, rx_alloc_cfg);
return err;
}
/* Bring the device back up again with the new resources. */
err = gve_queues_start(priv, qpls_alloc_cfg,
tx_alloc_cfg, rx_alloc_cfg);
err = gve_queues_start(priv, tx_alloc_cfg, rx_alloc_cfg);
if (err) {
netif_err(priv, drv, priv->dev,
"Adjust config failed to start new queues, !!! DISABLING ALL QUEUES !!!\n");
@ -1896,23 +1765,18 @@ int gve_adjust_queues(struct gve_priv *priv,
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
int err;
gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
/* Relay the new config from ethtool */
qpls_alloc_cfg.tx_cfg = &new_tx_config;
tx_alloc_cfg.qcfg = &new_tx_config;
rx_alloc_cfg.qcfg_tx = &new_tx_config;
qpls_alloc_cfg.rx_cfg = &new_rx_config;
rx_alloc_cfg.qcfg = &new_rx_config;
tx_alloc_cfg.num_rings = new_tx_config.num_queues;
if (netif_carrier_ok(priv->dev)) {
err = gve_adjust_config(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
return err;
}
/* Set the config for the next up. */
@ -1937,12 +1801,16 @@ static void gve_turndown(struct gve_priv *priv)
int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
if (!gve_tx_was_added_to_block(priv, idx))
continue;
napi_disable(&block->napi);
}
for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
if (!gve_rx_was_added_to_block(priv, idx))
continue;
napi_disable(&block->napi);
}
@ -1965,6 +1833,9 @@ static void gve_turnup(struct gve_priv *priv)
int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
if (!gve_tx_was_added_to_block(priv, idx))
continue;
napi_enable(&block->napi);
if (gve_is_gqi(priv)) {
iowrite32be(0, gve_irq_doorbell(priv, block));
@ -1972,11 +1843,21 @@ static void gve_turnup(struct gve_priv *priv)
gve_set_itr_coalesce_usecs_dqo(priv, block,
priv->tx_coalesce_usecs);
}
/* Any descs written by the NIC before this barrier will be
* handled by the one-off napi schedule below. Whereas any
* descs after the barrier will generate interrupts.
*/
mb();
napi_schedule(&block->napi);
}
for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
if (!gve_rx_was_added_to_block(priv, idx))
continue;
napi_enable(&block->napi);
if (gve_is_gqi(priv)) {
iowrite32be(0, gve_irq_doorbell(priv, block));
@ -1984,6 +1865,13 @@ static void gve_turnup(struct gve_priv *priv)
gve_set_itr_coalesce_usecs_dqo(priv, block,
priv->rx_coalesce_usecs);
}
/* Any descs written by the NIC before this barrier will be
* handled by the one-off napi schedule below. Whereas any
* descs after the barrier will generate interrupts.
*/
mb();
napi_schedule(&block->napi);
}
gve_set_napi_enabled(priv);
@ -2053,7 +1941,6 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
bool enable_hdr_split;
int err = 0;
@ -2073,15 +1960,13 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
if (enable_hdr_split == priv->header_split_enabled)
return 0;
gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
rx_alloc_cfg.enable_header_split = enable_hdr_split;
rx_alloc_cfg.packet_buffer_size = gve_get_pkt_buf_size(priv, enable_hdr_split);
if (netif_running(priv->dev))
err = gve_adjust_config(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
return err;
}
@ -2091,18 +1976,15 @@ static int gve_set_features(struct net_device *netdev,
const netdev_features_t orig_features = netdev->features;
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
struct gve_priv *priv = netdev_priv(netdev);
int err;
gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
netdev->features ^= NETIF_F_LRO;
if (netif_carrier_ok(netdev)) {
err = gve_adjust_config(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if (err) {
/* Revert the change on error. */
netdev->features = orig_features;

View File

@ -30,6 +30,9 @@ static void gve_rx_unfill_pages(struct gve_priv *priv,
u32 slots = rx->mask + 1;
int i;
if (!rx->data.page_info)
return;
if (rx->data.raw_addressing) {
for (i = 0; i < slots; i++)
gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i],
@ -38,7 +41,6 @@ static void gve_rx_unfill_pages(struct gve_priv *priv,
for (i = 0; i < slots; i++)
page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1);
rx->data.qpl = NULL;
for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) {
page_ref_sub(rx->qpl_copy_pool[i].page,
@ -50,6 +52,41 @@ static void gve_rx_unfill_pages(struct gve_priv *priv,
rx->data.page_info = NULL;
}
static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
{
ctx->skb_head = NULL;
ctx->skb_tail = NULL;
ctx->total_size = 0;
ctx->frag_cnt = 0;
ctx->drop_pkt = false;
}
static void gve_rx_init_ring_state_gqi(struct gve_rx_ring *rx)
{
rx->desc.seqno = 1;
rx->cnt = 0;
gve_rx_ctx_clear(&rx->ctx);
}
static void gve_rx_reset_ring_gqi(struct gve_priv *priv, int idx)
{
struct gve_rx_ring *rx = &priv->rx[idx];
const u32 slots = priv->rx_desc_cnt;
size_t size;
/* Reset desc ring */
if (rx->desc.desc_ring) {
size = slots * sizeof(rx->desc.desc_ring[0]);
memset(rx->desc.desc_ring, 0, size);
}
/* Reset q_resources */
if (rx->q_resources)
memset(rx->q_resources, 0, sizeof(*rx->q_resources));
gve_rx_init_ring_state_gqi(rx);
}
void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@ -59,6 +96,7 @@ void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx)
gve_remove_napi(priv, ntfy_idx);
gve_rx_remove_from_block(priv, idx);
gve_rx_reset_ring_gqi(priv, idx);
}
static void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
@ -68,25 +106,38 @@ static void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
u32 slots = rx->mask + 1;
int idx = rx->q_num;
size_t bytes;
u32 qpl_id;
bytes = sizeof(struct gve_rx_desc) * cfg->ring_size;
dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus);
rx->desc.desc_ring = NULL;
if (rx->desc.desc_ring) {
bytes = sizeof(struct gve_rx_desc) * cfg->ring_size;
dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus);
rx->desc.desc_ring = NULL;
}
dma_free_coherent(dev, sizeof(*rx->q_resources),
rx->q_resources, rx->q_resources_bus);
rx->q_resources = NULL;
if (rx->q_resources) {
dma_free_coherent(dev, sizeof(*rx->q_resources),
rx->q_resources, rx->q_resources_bus);
rx->q_resources = NULL;
}
gve_rx_unfill_pages(priv, rx, cfg);
bytes = sizeof(*rx->data.data_ring) * slots;
dma_free_coherent(dev, bytes, rx->data.data_ring,
rx->data.data_bus);
rx->data.data_ring = NULL;
if (rx->data.data_ring) {
bytes = sizeof(*rx->data.data_ring) * slots;
dma_free_coherent(dev, bytes, rx->data.data_ring,
rx->data.data_bus);
rx->data.data_ring = NULL;
}
kvfree(rx->qpl_copy_pool);
rx->qpl_copy_pool = NULL;
if (rx->data.qpl) {
qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, idx);
gve_free_queue_page_list(priv, rx->data.qpl, qpl_id);
rx->data.qpl = NULL;
}
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
}
@ -143,12 +194,6 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
if (!rx->data.page_info)
return -ENOMEM;
if (!rx->data.raw_addressing) {
u32 qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
rx->data.qpl = &cfg->qpls[qpl_id];
}
for (i = 0; i < slots; i++) {
if (!rx->data.raw_addressing) {
struct page *page = rx->data.qpl->pages[i];
@ -201,8 +246,6 @@ alloc_err_qpl:
page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1);
rx->data.qpl = NULL;
return err;
alloc_err_rda:
@ -213,15 +256,6 @@ alloc_err_rda:
return err;
}
static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
{
ctx->skb_head = NULL;
ctx->skb_tail = NULL;
ctx->total_size = 0;
ctx->frag_cnt = 0;
ctx->drop_pkt = false;
}
void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@ -238,6 +272,8 @@ static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
struct device *hdev = &priv->pdev->dev;
u32 slots = cfg->ring_size;
int filled_pages;
int qpl_page_cnt;
u32 qpl_id = 0;
size_t bytes;
int err;
@ -270,10 +306,22 @@ static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
goto abort_with_slots;
}
if (!rx->data.raw_addressing) {
qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
qpl_page_cnt = cfg->ring_size;
rx->data.qpl = gve_alloc_queue_page_list(priv, qpl_id,
qpl_page_cnt);
if (!rx->data.qpl) {
err = -ENOMEM;
goto abort_with_copy_pool;
}
}
filled_pages = gve_rx_prefill_pages(rx, cfg);
if (filled_pages < 0) {
err = -ENOMEM;
goto abort_with_copy_pool;
goto abort_with_qpl;
}
rx->fill_cnt = filled_pages;
/* Ensure data ring slots (packet buffers) are visible. */
@ -300,9 +348,8 @@ static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
err = -ENOMEM;
goto abort_with_q_resources;
}
rx->cnt = 0;
rx->db_threshold = slots / 2;
rx->desc.seqno = 1;
gve_rx_init_ring_state_gqi(rx);
rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_rx_ctx_clear(&rx->ctx);
@ -315,6 +362,11 @@ abort_with_q_resources:
rx->q_resources = NULL;
abort_filled:
gve_rx_unfill_pages(priv, rx, cfg);
abort_with_qpl:
if (!rx->data.raw_addressing) {
gve_free_queue_page_list(priv, rx->data.qpl, qpl_id);
rx->data.qpl = NULL;
}
abort_with_copy_pool:
kvfree(rx->qpl_copy_pool);
rx->qpl_copy_pool = NULL;
@ -333,12 +385,6 @@ int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
int err = 0;
int i, j;
if (!cfg->raw_addressing && !cfg->qpls) {
netif_err(priv, drv, priv->dev,
"Cannot alloc QPL ring before allocing QPLs\n");
return -EINVAL;
}
rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
GFP_KERNEL);
if (!rx)

View File

@ -211,6 +211,82 @@ static void gve_rx_free_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
}
}
static void gve_rx_init_ring_state_dqo(struct gve_rx_ring *rx,
const u32 buffer_queue_slots,
const u32 completion_queue_slots)
{
int i;
/* Set buffer queue state */
rx->dqo.bufq.mask = buffer_queue_slots - 1;
rx->dqo.bufq.head = 0;
rx->dqo.bufq.tail = 0;
/* Set completion queue state */
rx->dqo.complq.num_free_slots = completion_queue_slots;
rx->dqo.complq.mask = completion_queue_slots - 1;
rx->dqo.complq.cur_gen_bit = 0;
rx->dqo.complq.head = 0;
/* Set RX SKB context */
rx->ctx.skb_head = NULL;
rx->ctx.skb_tail = NULL;
/* Set up linked list of buffer IDs */
if (rx->dqo.buf_states) {
for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
rx->dqo.buf_states[i].next = i + 1;
rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1;
}
rx->dqo.free_buf_states = 0;
rx->dqo.recycled_buf_states.head = -1;
rx->dqo.recycled_buf_states.tail = -1;
rx->dqo.used_buf_states.head = -1;
rx->dqo.used_buf_states.tail = -1;
}
static void gve_rx_reset_ring_dqo(struct gve_priv *priv, int idx)
{
struct gve_rx_ring *rx = &priv->rx[idx];
size_t size;
int i;
const u32 buffer_queue_slots = priv->rx_desc_cnt;
const u32 completion_queue_slots = priv->rx_desc_cnt;
/* Reset buffer queue */
if (rx->dqo.bufq.desc_ring) {
size = sizeof(rx->dqo.bufq.desc_ring[0]) *
buffer_queue_slots;
memset(rx->dqo.bufq.desc_ring, 0, size);
}
/* Reset completion queue */
if (rx->dqo.complq.desc_ring) {
size = sizeof(rx->dqo.complq.desc_ring[0]) *
completion_queue_slots;
memset(rx->dqo.complq.desc_ring, 0, size);
}
/* Reset q_resources */
if (rx->q_resources)
memset(rx->q_resources, 0, sizeof(*rx->q_resources));
/* Reset buf states */
if (rx->dqo.buf_states) {
for (i = 0; i < rx->dqo.num_buf_states; i++) {
struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i];
if (bs->page_info.page)
gve_free_page_dqo(priv, bs, !rx->dqo.qpl);
}
}
gve_rx_init_ring_state_dqo(rx, buffer_queue_slots,
completion_queue_slots);
}
void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@ -220,6 +296,7 @@ void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
gve_remove_napi(priv, ntfy_idx);
gve_rx_remove_from_block(priv, idx);
gve_rx_reset_ring_dqo(priv, idx);
}
static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
@ -230,6 +307,7 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
size_t buffer_queue_slots;
int idx = rx->q_num;
size_t size;
u32 qpl_id;
int i;
completion_queue_slots = rx->dqo.complq.mask + 1;
@ -248,7 +326,11 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
gve_free_page_dqo(priv, bs, !rx->dqo.qpl);
}
rx->dqo.qpl = NULL;
if (rx->dqo.qpl) {
qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
gve_free_queue_page_list(priv, rx->dqo.qpl, qpl_id);
rx->dqo.qpl = NULL;
}
if (rx->dqo.bufq.desc_ring) {
size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots;
@ -273,10 +355,10 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
}
static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx,
const u32 buf_count)
{
struct device *hdev = &priv->pdev->dev;
int buf_count = rx->dqo.bufq.mask + 1;
rx->dqo.hdr_bufs.data = dma_alloc_coherent(hdev, priv->header_buf_size * buf_count,
&rx->dqo.hdr_bufs.addr, GFP_KERNEL);
@ -300,8 +382,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
int idx)
{
struct device *hdev = &priv->pdev->dev;
int qpl_page_cnt;
size_t size;
int i;
u32 qpl_id;
const u32 buffer_queue_slots = cfg->ring_size;
const u32 completion_queue_slots = cfg->ring_size;
@ -311,11 +394,6 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
memset(rx, 0, sizeof(*rx));
rx->gve = priv;
rx->q_num = idx;
rx->dqo.bufq.mask = buffer_queue_slots - 1;
rx->dqo.complq.num_free_slots = completion_queue_slots;
rx->dqo.complq.mask = completion_queue_slots - 1;
rx->ctx.skb_head = NULL;
rx->ctx.skb_tail = NULL;
rx->dqo.num_buf_states = cfg->raw_addressing ?
min_t(s16, S16_MAX, buffer_queue_slots * 4) :
@ -328,19 +406,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
/* Allocate header buffers for header-split */
if (cfg->enable_header_split)
if (gve_rx_alloc_hdr_bufs(priv, rx))
if (gve_rx_alloc_hdr_bufs(priv, rx, buffer_queue_slots))
goto err;
/* Set up linked list of buffer IDs */
for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
rx->dqo.buf_states[i].next = i + 1;
rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1;
rx->dqo.recycled_buf_states.head = -1;
rx->dqo.recycled_buf_states.tail = -1;
rx->dqo.used_buf_states.head = -1;
rx->dqo.used_buf_states.tail = -1;
/* Allocate RX completion queue */
size = sizeof(rx->dqo.complq.desc_ring[0]) *
completion_queue_slots;
@ -357,9 +425,13 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
goto err;
if (!cfg->raw_addressing) {
u32 qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
qpl_page_cnt = gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
rx->dqo.qpl = &cfg->qpls[qpl_id];
rx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
qpl_page_cnt);
if (!rx->dqo.qpl)
goto err;
rx->dqo.next_qpl_page_idx = 0;
}
@ -368,6 +440,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
if (!rx->q_resources)
goto err;
gve_rx_init_ring_state_dqo(rx, buffer_queue_slots,
completion_queue_slots);
return 0;
err:
@ -390,12 +465,6 @@ int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
int err;
int i;
if (!cfg->raw_addressing && !cfg->qpls) {
netif_err(priv, drv, priv->dev,
"Cannot alloc QPL ring before allocing QPLs\n");
return -EINVAL;
}
rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
GFP_KERNEL);
if (!rx)

View File

@ -216,6 +216,7 @@ static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx,
struct device *hdev = &priv->pdev->dev;
int idx = tx->q_num;
size_t bytes;
u32 qpl_id;
u32 slots;
slots = tx->mask + 1;
@ -223,8 +224,12 @@ static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx,
tx->q_resources, tx->q_resources_bus);
tx->q_resources = NULL;
if (!tx->raw_addressing) {
gve_tx_fifo_release(priv, &tx->tx_fifo);
if (tx->tx_fifo.qpl) {
if (tx->tx_fifo.base)
gve_tx_fifo_release(priv, &tx->tx_fifo);
qpl_id = gve_tx_qpl_id(priv, tx->q_num);
gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id);
tx->tx_fifo.qpl = NULL;
}
@ -255,6 +260,8 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
int idx)
{
struct device *hdev = &priv->pdev->dev;
int qpl_page_cnt;
u32 qpl_id = 0;
size_t bytes;
/* Make sure everything is zeroed to start */
@ -279,12 +286,17 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
tx->raw_addressing = cfg->raw_addressing;
tx->dev = hdev;
if (!tx->raw_addressing) {
u32 qpl_id = gve_tx_qpl_id(priv, tx->q_num);
qpl_id = gve_tx_qpl_id(priv, tx->q_num);
qpl_page_cnt = priv->tx_pages_per_qpl;
tx->tx_fifo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
qpl_page_cnt);
if (!tx->tx_fifo.qpl)
goto abort_with_desc;
tx->tx_fifo.qpl = &cfg->qpls[qpl_id];
/* map Tx FIFO */
if (gve_tx_fifo_init(priv, &tx->tx_fifo))
goto abort_with_desc;
goto abort_with_qpl;
}
tx->q_resources =
@ -300,6 +312,11 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
abort_with_fifo:
if (!tx->raw_addressing)
gve_tx_fifo_release(priv, &tx->tx_fifo);
abort_with_qpl:
if (!tx->raw_addressing) {
gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id);
tx->tx_fifo.qpl = NULL;
}
abort_with_desc:
dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
tx->desc = NULL;
@ -316,12 +333,6 @@ int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
int err = 0;
int i, j;
if (!cfg->raw_addressing && !cfg->qpls) {
netif_err(priv, drv, priv->dev,
"Cannot alloc QPL ring before allocing QPLs\n");
return -EINVAL;
}
if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
netif_err(priv, drv, priv->dev,
"Cannot alloc more than the max num of Tx rings\n");

View File

@ -209,6 +209,7 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
struct device *hdev = &priv->pdev->dev;
int idx = tx->q_num;
size_t bytes;
u32 qpl_id;
if (tx->q_resources) {
dma_free_coherent(hdev, sizeof(*tx->q_resources),
@ -236,7 +237,11 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
kvfree(tx->dqo.tx_qpl_buf_next);
tx->dqo.tx_qpl_buf_next = NULL;
tx->dqo.qpl = NULL;
if (tx->dqo.qpl) {
qpl_id = gve_tx_qpl_id(priv, tx->q_num);
gve_free_queue_page_list(priv, tx->dqo.qpl, qpl_id);
tx->dqo.qpl = NULL;
}
netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
}
@ -282,7 +287,9 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
{
struct device *hdev = &priv->pdev->dev;
int num_pending_packets;
int qpl_page_cnt;
size_t bytes;
u32 qpl_id;
int i;
memset(tx, 0, sizeof(*tx));
@ -349,9 +356,13 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
goto err;
if (!cfg->raw_addressing) {
u32 qpl_id = gve_tx_qpl_id(priv, tx->q_num);
qpl_id = gve_tx_qpl_id(priv, tx->q_num);
qpl_page_cnt = priv->tx_pages_per_qpl;
tx->dqo.qpl = &cfg->qpls[qpl_id];
tx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
qpl_page_cnt);
if (!tx->dqo.qpl)
goto err;
if (gve_tx_qpl_buf_init(tx))
goto err;
@ -371,12 +382,6 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
int err = 0;
int i, j;
if (!cfg->raw_addressing && !cfg->qpls) {
netif_err(priv, drv, priv->dev,
"Cannot alloc QPL ring before allocing QPLs\n");
return -EINVAL;
}
if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
netif_err(priv, drv, priv->dev,
"Cannot alloc more than the max num of Tx rings\n");

View File

@ -1957,6 +1957,7 @@ enum netdev_reg_state {
* @sysfs_rx_queue_group: Space for optional per-rx queue attributes
* @rtnl_link_ops: Rtnl_link_ops
* @stat_ops: Optional ops for queue-aware statistics
* @queue_mgmt_ops: Optional ops for queue management
*
* @gso_max_size: Maximum size of generic segmentation offload
* @tso_max_size: Device (as in HW) limit on the max TSO request size
@ -2340,6 +2341,8 @@ struct net_device {
const struct netdev_stat_ops *stat_ops;
const struct netdev_queue_mgmt_ops *queue_mgmt_ops;
/* for setting kernel sock attribute on TCP connection setup */
#define GSO_MAX_SEGS 65535u
#define GSO_LEGACY_MAX_SIZE 65536u

View File

@ -87,6 +87,37 @@ struct netdev_stat_ops {
struct netdev_queue_stats_tx *tx);
};
/**
* struct netdev_queue_mgmt_ops - netdev ops for queue management
*
* @ndo_queue_mem_size: Size of the struct that describes a queue's memory.
*
* @ndo_queue_mem_alloc: Allocate memory for an RX queue at the specified index.
* The new memory is written at the specified address.
*
* @ndo_queue_mem_free: Free memory from an RX queue.
*
* @ndo_queue_start: Start an RX queue with the specified memory and at the
* specified index.
*
* @ndo_queue_stop: Stop the RX queue at the specified index. The stopped
* queue's memory is written at the specified address.
*/
struct netdev_queue_mgmt_ops {
size_t ndo_queue_mem_size;
int (*ndo_queue_mem_alloc)(struct net_device *dev,
void *per_queue_mem,
int idx);
void (*ndo_queue_mem_free)(struct net_device *dev,
void *per_queue_mem);
int (*ndo_queue_start)(struct net_device *dev,
void *per_queue_mem,
int idx);
int (*ndo_queue_stop)(struct net_device *dev,
void *per_queue_mem,
int idx);
};
/**
* DOC: Lockless queue stopping / waking helpers.
*