linux-stable/drivers/infiniband/hw/hfi1/ipoib_rx.c
Grzegorz Andrejczuk 4730f4a6c6 IB/hfi1: Activate the dummy netdev
As described in earlier patches, ipoib netdev will share receive
contexts with existing VNIC netdev through a dummy netdev. The
following changes are made to achieve that:
- Set up netdev receive contexts after user contexts. A function is
  added to count the available netdev receive contexts.
- Add functions to set/get receive map table free index.
- Rename NUM_VNIC_MAP_ENTRIES as NUM_NETDEV_MAP_ENTRIES.
- Let the dummy netdev own the receive contexts instead of VNIC.
- Allocate the dummy netdev when the hfi1 device is added and free it
  when the device is removed.
- Initialize AIP RSM rules when the IpoIb rxq is initialized and
  remove the rules when it is de-initialized.
- Convert VNIC to use the dummy netdev.

Link: https://lore.kernel.org/r/20200511160649.173205.4626.stgit@awfm-01.aw.intel.com
Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Sadanand Warrier <sadanand.warrier@intel.com>
Signed-off-by: Grzegorz Andrejczuk <grzegorz.andrejczuk@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-05-21 11:23:56 -03:00

95 lines
2 KiB
C

// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* Copyright(c) 2020 Intel Corporation.
*
*/
#include "netdev.h"
#include "ipoib.h"
#define HFI1_IPOIB_SKB_PAD ((NET_SKB_PAD) + (NET_IP_ALIGN))
static void copy_ipoib_buf(struct sk_buff *skb, void *data, int size)
{
void *dst_data;
skb_checksum_none_assert(skb);
skb->protocol = *((__be16 *)data);
dst_data = skb_put(skb, size);
memcpy(dst_data, data, size);
skb->mac_header = HFI1_IPOIB_PSEUDO_LEN;
skb_pull(skb, HFI1_IPOIB_ENCAP_LEN);
}
static struct sk_buff *prepare_frag_skb(struct napi_struct *napi, int size)
{
struct sk_buff *skb;
int skb_size = SKB_DATA_ALIGN(size + HFI1_IPOIB_SKB_PAD);
void *frag;
skb_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
skb_size = SKB_DATA_ALIGN(skb_size);
frag = napi_alloc_frag(skb_size);
if (unlikely(!frag))
return napi_alloc_skb(napi, size);
skb = build_skb(frag, skb_size);
if (unlikely(!skb)) {
skb_free_frag(frag);
return NULL;
}
skb_reserve(skb, HFI1_IPOIB_SKB_PAD);
return skb;
}
struct sk_buff *hfi1_ipoib_prepare_skb(struct hfi1_netdev_rxq *rxq,
int size, void *data)
{
struct napi_struct *napi = &rxq->napi;
int skb_size = size + HFI1_IPOIB_ENCAP_LEN;
struct sk_buff *skb;
/*
* For smaller(4k + skb overhead) allocations we will go using
* napi cache. Otherwise we will try to use napi frag cache.
*/
if (size <= SKB_WITH_OVERHEAD(PAGE_SIZE))
skb = napi_alloc_skb(napi, skb_size);
else
skb = prepare_frag_skb(napi, skb_size);
if (unlikely(!skb))
return NULL;
copy_ipoib_buf(skb, data, size);
return skb;
}
int hfi1_ipoib_rxq_init(struct net_device *netdev)
{
struct hfi1_ipoib_dev_priv *ipoib_priv = hfi1_ipoib_priv(netdev);
struct hfi1_devdata *dd = ipoib_priv->dd;
int ret;
ret = hfi1_netdev_rx_init(dd);
if (ret)
return ret;
hfi1_init_aip_rsm(dd);
return ret;
}
void hfi1_ipoib_rxq_deinit(struct net_device *netdev)
{
struct hfi1_ipoib_dev_priv *ipoib_priv = hfi1_ipoib_priv(netdev);
struct hfi1_devdata *dd = ipoib_priv->dd;
hfi1_deinit_aip_rsm(dd);
hfi1_netdev_rx_destroy(dd);
}