Long Li says:

====================
Introduce Microsoft Azure Network Adapter (MANA) RDMA driver [netdev prep]

The first 11 patches which modify the MANA Ethernet driver to support
RDMA driver.

* 'mana-shared-6.2' of https://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  net: mana: Define data structures for protection domain and memory registration
  net: mana: Define data structures for allocating doorbell page from GDMA
  net: mana: Define and process GDMA response code GDMA_STATUS_MORE_ENTRIES
  net: mana: Define max values for SGL entries
  net: mana: Move header files to a common location
  net: mana: Record port number in netdev
  net: mana: Export Work Queue functions for use by RDMA driver
  net: mana: Set the DMA device max segment size
  net: mana: Handle vport sharing between devices
  net: mana: Record the physical address for doorbell page region
  net: mana: Add support for auxiliary device
====================

Link: https://lore.kernel.org/all/1667502990-2559-1-git-send-email-longli@linuxonhyperv.com/
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2022-11-10 12:07:18 -08:00
commit 79b0872b10
13 changed files with 372 additions and 45 deletions

View file

@ -9543,6 +9543,7 @@ F: include/asm-generic/hyperv-tlfs.h
F: include/asm-generic/mshyperv.h
F: include/clocksource/hyperv_timer.h
F: include/linux/hyperv.h
F: include/net/mana
F: include/uapi/linux/hyperv.h
F: net/vmw_vsock/hyperv_transport.c
F: tools/hv/

View file

@ -19,6 +19,7 @@ config MICROSOFT_MANA
tristate "Microsoft Azure Network Adapter (MANA) support"
depends on PCI_MSI && X86_64
depends on PCI_HYPERV
select AUXILIARY_BUS
help
This driver supports Microsoft Azure Network Adapter (MANA).
So far, the driver is only supported on X86_64.

View file

@ -6,7 +6,7 @@
#include <linux/utsname.h>
#include <linux/version.h>
#include "mana.h"
#include <net/mana/mana.h>
static u32 mana_gd_r32(struct gdma_context *g, u64 offset)
{
@ -44,6 +44,9 @@ static void mana_gd_init_vf_regs(struct pci_dev *pdev)
gc->db_page_base = gc->bar0_va +
mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET);
gc->phys_db_page_base = gc->bar0_pa +
mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET);
gc->shm_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET);
}
@ -149,6 +152,7 @@ int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
return mana_hwc_send_request(hwc, req_len, req, resp_len, resp);
}
EXPORT_SYMBOL_NS(mana_gd_send_request, NET_MANA);
int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
struct gdma_mem_info *gmi)
@ -194,7 +198,7 @@ static int mana_gd_create_hw_eq(struct gdma_context *gc,
req.type = queue->type;
req.pdid = queue->gdma_dev->pdid;
req.doolbell_id = queue->gdma_dev->doorbell;
req.gdma_region = queue->mem_info.gdma_region;
req.gdma_region = queue->mem_info.dma_region_handle;
req.queue_size = queue->queue_size;
req.log2_throttle_limit = queue->eq.log2_throttle_limit;
req.eq_pci_msix_index = queue->eq.msix_index;
@ -208,7 +212,7 @@ static int mana_gd_create_hw_eq(struct gdma_context *gc,
queue->id = resp.queue_index;
queue->eq.disable_needed = true;
queue->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
queue->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
return 0;
}
@ -667,24 +671,30 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
return err;
}
static void mana_gd_destroy_dma_region(struct gdma_context *gc, u64 gdma_region)
int mana_gd_destroy_dma_region(struct gdma_context *gc,
gdma_obj_handle_t dma_region_handle)
{
struct gdma_destroy_dma_region_req req = {};
struct gdma_general_resp resp = {};
int err;
if (gdma_region == GDMA_INVALID_DMA_REGION)
return;
if (dma_region_handle == GDMA_INVALID_DMA_REGION)
return 0;
mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req),
sizeof(resp));
req.gdma_region = gdma_region;
req.dma_region_handle = dma_region_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status)
if (err || resp.hdr.status) {
dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
err, resp.hdr.status);
return -EPROTO;
}
return 0;
}
EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, NET_MANA);
static int mana_gd_create_dma_region(struct gdma_dev *gd,
struct gdma_mem_info *gmi)
@ -729,14 +739,15 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
if (err)
goto out;
if (resp.hdr.status || resp.gdma_region == GDMA_INVALID_DMA_REGION) {
if (resp.hdr.status ||
resp.dma_region_handle == GDMA_INVALID_DMA_REGION) {
dev_err(gc->dev, "Failed to create DMA region: 0x%x\n",
resp.hdr.status);
err = -EPROTO;
goto out;
}
gmi->gdma_region = resp.gdma_region;
gmi->dma_region_handle = resp.dma_region_handle;
out:
kfree(req);
return err;
@ -859,7 +870,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
return;
}
mana_gd_destroy_dma_region(gc, gmi->gdma_region);
mana_gd_destroy_dma_region(gc, gmi->dma_region_handle);
mana_gd_free_memory(gmi);
kfree(queue);
}
@ -1393,6 +1404,12 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto release_region;
err = dma_set_max_seg_size(&pdev->dev, UINT_MAX);
if (err) {
dev_err(&pdev->dev, "Failed to set dma device segment size\n");
goto release_region;
}
err = -ENOMEM;
gc = vzalloc(sizeof(*gc));
if (!gc)
@ -1400,6 +1417,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mutex_init(&gc->eq_test_event_mutex);
pci_set_drvdata(pdev, gc);
gc->bar0_pa = pci_resource_start(pdev, 0);
bar0_va = pci_iomap(pdev, bar, 0);
if (!bar0_va)

View file

@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright (c) 2021, Microsoft Corporation. */
#include "gdma.h"
#include "hw_channel.h"
#include <net/mana/gdma.h>
#include <net/mana/hw_channel.h>
static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id)
{
@ -836,7 +836,7 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
goto out;
}
if (ctx->status_code) {
if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) {
dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
ctx->status_code);
err = -EPROTO;

View file

@ -8,7 +8,7 @@
#include <linux/bpf_trace.h>
#include <net/xdp.h>
#include "mana.h"
#include <net/mana/mana.h>
void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev)
{

View file

@ -12,7 +12,20 @@
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include "mana.h"
#include <net/mana/mana.h>
#include <net/mana/mana_auxiliary.h>
static DEFINE_IDA(mana_adev_ida);
static int mana_adev_idx_alloc(void)
{
return ida_alloc(&mana_adev_ida, GFP_KERNEL);
}
static void mana_adev_idx_free(int idx)
{
ida_free(&mana_adev_ida, idx);
}
/* Microsoft Azure Network Adapter (MANA) functions */
@ -176,7 +189,7 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
pkg.wqe_req.client_data_unit = 0;
pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
WARN_ON_ONCE(pkg.wqe_req.num_sge > 30);
WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
pkg.wqe_req.sgl = pkg.sgl_array;
@ -633,13 +646,48 @@ static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
return 0;
}
static int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
u32 doorbell_pg_id)
void mana_uncfg_vport(struct mana_port_context *apc)
{
mutex_lock(&apc->vport_mutex);
apc->vport_use_count--;
WARN_ON(apc->vport_use_count < 0);
mutex_unlock(&apc->vport_mutex);
}
EXPORT_SYMBOL_NS(mana_uncfg_vport, NET_MANA);
int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
u32 doorbell_pg_id)
{
struct mana_config_vport_resp resp = {};
struct mana_config_vport_req req = {};
int err;
/* This function is used to program the Ethernet port in the hardware
* table. It can be called from the Ethernet driver or the RDMA driver.
*
* For Ethernet usage, the hardware supports only one active user on a
* physical port. The driver checks on the port usage before programming
* the hardware when creating the RAW QP (RDMA driver) or exposing the
* device to kernel NET layer (Ethernet driver).
*
* Because the RDMA driver doesn't know in advance which QP type the
* user will create, it exposes the device with all its ports. The user
* may not be able to create RAW QP on a port if this port is already
* in used by the Ethernet driver from the kernel.
*
* This physical port limitation only applies to the RAW QP. For RC QP,
* the hardware doesn't have this limitation. The user can create RC
* QPs on a physical port up to the hardware limits independent of the
* Ethernet usage on the same port.
*/
mutex_lock(&apc->vport_mutex);
if (apc->vport_use_count > 0) {
mutex_unlock(&apc->vport_mutex);
return -EBUSY;
}
apc->vport_use_count++;
mutex_unlock(&apc->vport_mutex);
mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
sizeof(req), sizeof(resp));
req.vport = apc->port_handle;
@ -666,9 +714,16 @@ static int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
apc->tx_shortform_allowed = resp.short_form_allowed;
apc->tx_vp_offset = resp.tx_vport_offset;
netdev_info(apc->ndev, "Configured vPort %llu PD %u DB %u\n",
apc->port_handle, protection_dom_id, doorbell_pg_id);
out:
if (err)
mana_uncfg_vport(apc);
return err;
}
EXPORT_SYMBOL_NS(mana_cfg_vport, NET_MANA);
static int mana_cfg_vport_steering(struct mana_port_context *apc,
enum TRI_STATE rx,
@ -729,16 +784,19 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
resp.hdr.status);
err = -EPROTO;
}
netdev_info(ndev, "Configured steering vPort %llu entries %u\n",
apc->port_handle, num_entries);
out:
kfree(req);
return err;
}
static int mana_create_wq_obj(struct mana_port_context *apc,
mana_handle_t vport,
u32 wq_type, struct mana_obj_spec *wq_spec,
struct mana_obj_spec *cq_spec,
mana_handle_t *wq_obj)
int mana_create_wq_obj(struct mana_port_context *apc,
mana_handle_t vport,
u32 wq_type, struct mana_obj_spec *wq_spec,
struct mana_obj_spec *cq_spec,
mana_handle_t *wq_obj)
{
struct mana_create_wqobj_resp resp = {};
struct mana_create_wqobj_req req = {};
@ -787,9 +845,10 @@ static int mana_create_wq_obj(struct mana_port_context *apc,
out:
return err;
}
EXPORT_SYMBOL_NS(mana_create_wq_obj, NET_MANA);
static void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
mana_handle_t wq_obj)
void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
mana_handle_t wq_obj)
{
struct mana_destroy_wqobj_resp resp = {};
struct mana_destroy_wqobj_req req = {};
@ -814,6 +873,7 @@ static void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
resp.hdr.status);
}
EXPORT_SYMBOL_NS(mana_destroy_wq_obj, NET_MANA);
static void mana_destroy_eq(struct mana_context *ac)
{
@ -1463,10 +1523,10 @@ static int mana_create_txq(struct mana_port_context *apc,
memset(&wq_spec, 0, sizeof(wq_spec));
memset(&cq_spec, 0, sizeof(cq_spec));
wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region;
wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
wq_spec.queue_size = txq->gdma_sq->queue_size;
cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
cq_spec.queue_size = cq->gdma_cq->queue_size;
cq_spec.modr_ctx_id = 0;
cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
@ -1481,8 +1541,10 @@ static int mana_create_txq(struct mana_port_context *apc,
txq->gdma_sq->id = wq_spec.queue_index;
cq->gdma_cq->id = cq_spec.queue_index;
txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
txq->gdma_sq->mem_info.dma_region_handle =
GDMA_INVALID_DMA_REGION;
cq->gdma_cq->mem_info.dma_region_handle =
GDMA_INVALID_DMA_REGION;
txq->gdma_txq_id = txq->gdma_sq->id;
@ -1693,10 +1755,10 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
memset(&wq_spec, 0, sizeof(wq_spec));
memset(&cq_spec, 0, sizeof(cq_spec));
wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region;
wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
wq_spec.queue_size = rxq->gdma_rq->queue_size;
cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
cq_spec.queue_size = cq->gdma_cq->queue_size;
cq_spec.modr_ctx_id = 0;
cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
@ -1709,8 +1771,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
rxq->gdma_rq->id = wq_spec.queue_index;
cq->gdma_cq->id = cq_spec.queue_index;
rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
rxq->gdma_id = rxq->gdma_rq->id;
cq->gdma_id = cq->gdma_cq->id;
@ -1791,6 +1853,7 @@ static void mana_destroy_vport(struct mana_port_context *apc)
}
mana_destroy_txq(apc);
mana_uncfg_vport(apc);
if (gd->gdma_context->is_pf)
mana_pf_deregister_hw_vport(apc);
@ -2063,12 +2126,16 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
apc->pf_filter_handle = INVALID_MANA_HANDLE;
apc->port_idx = port_idx;
mutex_init(&apc->vport_mutex);
apc->vport_use_count = 0;
ndev->netdev_ops = &mana_devops;
ndev->ethtool_ops = &mana_ethtool_ops;
ndev->mtu = ETH_DATA_LEN;
ndev->max_mtu = ndev->mtu;
ndev->min_mtu = ndev->mtu;
ndev->needed_headroom = MANA_HEADROOM;
ndev->dev_port = port_idx;
SET_NETDEV_DEV(ndev, gc->dev);
netif_carrier_off(ndev);
@ -2106,6 +2173,69 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
return err;
}
static void adev_release(struct device *dev)
{
struct mana_adev *madev = container_of(dev, struct mana_adev, adev.dev);
kfree(madev);
}
static void remove_adev(struct gdma_dev *gd)
{
struct auxiliary_device *adev = gd->adev;
int id = adev->id;
auxiliary_device_delete(adev);
auxiliary_device_uninit(adev);
mana_adev_idx_free(id);
gd->adev = NULL;
}
static int add_adev(struct gdma_dev *gd)
{
struct auxiliary_device *adev;
struct mana_adev *madev;
int ret;
madev = kzalloc(sizeof(*madev), GFP_KERNEL);
if (!madev)
return -ENOMEM;
adev = &madev->adev;
ret = mana_adev_idx_alloc();
if (ret < 0)
goto idx_fail;
adev->id = ret;
adev->name = "rdma";
adev->dev.parent = gd->gdma_context->dev;
adev->dev.release = adev_release;
madev->mdev = gd;
ret = auxiliary_device_init(adev);
if (ret)
goto init_fail;
ret = auxiliary_device_add(adev);
if (ret)
goto add_fail;
gd->adev = adev;
return 0;
add_fail:
auxiliary_device_uninit(adev);
init_fail:
mana_adev_idx_free(adev->id);
idx_fail:
kfree(madev);
return ret;
}
int mana_probe(struct gdma_dev *gd, bool resuming)
{
struct gdma_context *gc = gd->gdma_context;
@ -2173,6 +2303,8 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
break;
}
}
err = add_adev(gd);
out:
if (err)
mana_remove(gd, false);
@ -2189,6 +2321,10 @@ void mana_remove(struct gdma_dev *gd, bool suspending)
int err;
int i;
/* adev currently doesn't support suspending, always remove it */
if (gd->adev)
remove_adev(gd);
for (i = 0; i < ac->num_ports; i++) {
ndev = ac->ports[i];
if (!ndev) {
@ -2221,7 +2357,6 @@ void mana_remove(struct gdma_dev *gd, bool suspending)
}
mana_destroy_eq(ac);
out:
mana_gd_deregister_device(gd);

View file

@ -5,7 +5,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include "mana.h"
#include <net/mana/mana.h>
static const struct {
char name[ETH_GSTRING_LEN];

View file

@ -6,7 +6,7 @@
#include <linux/io.h>
#include <linux/mm.h>
#include "shm_channel.h"
#include <net/mana/shm_channel.h>
#define PAGE_FRAME_L48_WIDTH_BYTES 6
#define PAGE_FRAME_L48_WIDTH_BITS (PAGE_FRAME_L48_WIDTH_BYTES * 8)

View file

@ -9,6 +9,8 @@
#include "shm_channel.h"
#define GDMA_STATUS_MORE_ENTRIES 0x00000105
/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
* them are naturally aligned and hence don't need __packed.
*/
@ -22,11 +24,19 @@ enum gdma_request_type {
GDMA_GENERATE_TEST_EQE = 10,
GDMA_CREATE_QUEUE = 12,
GDMA_DISABLE_QUEUE = 13,
GDMA_ALLOCATE_RESOURCE_RANGE = 22,
GDMA_DESTROY_RESOURCE_RANGE = 24,
GDMA_CREATE_DMA_REGION = 25,
GDMA_DMA_REGION_ADD_PAGES = 26,
GDMA_DESTROY_DMA_REGION = 27,
GDMA_CREATE_PD = 29,
GDMA_DESTROY_PD = 30,
GDMA_CREATE_MR = 31,
GDMA_DESTROY_MR = 32,
};
#define GDMA_RESOURCE_DOORBELL_PAGE 27
enum gdma_queue_type {
GDMA_INVALID_QUEUE,
GDMA_SQ,
@ -55,6 +65,8 @@ enum {
GDMA_DEVICE_MANA = 2,
};
typedef u64 gdma_obj_handle_t;
struct gdma_resource {
/* Protect the bitmap */
spinlock_t lock;
@ -188,7 +200,7 @@ struct gdma_mem_info {
u64 length;
/* Allocated by the PF driver */
u64 gdma_region;
gdma_obj_handle_t dma_region_handle;
};
#define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
@ -204,6 +216,8 @@ struct gdma_dev {
/* GDMA driver specific pointer */
void *driver_data;
struct auxiliary_device *adev;
};
#define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
@ -349,9 +363,11 @@ struct gdma_context {
u32 test_event_eq_id;
bool is_pf;
phys_addr_t bar0_pa;
void __iomem *bar0_va;
void __iomem *shm_base;
void __iomem *db_page_base;
phys_addr_t phys_db_page_base;
u32 db_page_size;
int numa_node;
@ -424,6 +440,13 @@ struct gdma_wqe {
#define MAX_TX_WQE_SIZE 512
#define MAX_RX_WQE_SIZE 256
#define MAX_TX_WQE_SGL_ENTRIES ((GDMA_MAX_SQE_SIZE - \
sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \
sizeof(struct gdma_sge))
#define MAX_RX_WQE_SGL_ENTRIES ((GDMA_MAX_RQE_SIZE - \
sizeof(struct gdma_sge)) / sizeof(struct gdma_sge))
struct gdma_cqe {
u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
@ -575,6 +598,26 @@ struct gdma_register_device_resp {
u32 db_id;
}; /* HW DATA */
struct gdma_allocate_resource_range_req {
struct gdma_req_hdr hdr;
u32 resource_type;
u32 num_resources;
u32 alignment;
u32 allocated_resources;
};
struct gdma_allocate_resource_range_resp {
struct gdma_resp_hdr hdr;
u32 allocated_resources;
};
struct gdma_destroy_resource_range_req {
struct gdma_req_hdr hdr;
u32 resource_type;
u32 num_resources;
u32 allocated_resources;
};
/* GDMA_CREATE_QUEUE */
struct gdma_create_queue_req {
struct gdma_req_hdr hdr;
@ -582,7 +625,7 @@ struct gdma_create_queue_req {
u32 reserved1;
u32 pdid;
u32 doolbell_id;
u64 gdma_region;
gdma_obj_handle_t gdma_region;
u32 reserved2;
u32 queue_size;
u32 log2_throttle_limit;
@ -609,6 +652,28 @@ struct gdma_disable_queue_req {
u32 alloc_res_id_on_creation;
}; /* HW DATA */
enum atb_page_size {
ATB_PAGE_SIZE_4K,
ATB_PAGE_SIZE_8K,
ATB_PAGE_SIZE_16K,
ATB_PAGE_SIZE_32K,
ATB_PAGE_SIZE_64K,
ATB_PAGE_SIZE_128K,
ATB_PAGE_SIZE_256K,
ATB_PAGE_SIZE_512K,
ATB_PAGE_SIZE_1M,
ATB_PAGE_SIZE_2M,
ATB_PAGE_SIZE_MAX,
};
enum gdma_mr_access_flags {
GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0),
GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1),
GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2),
GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3),
GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4),
};
/* GDMA_CREATE_DMA_REGION */
struct gdma_create_dma_region_req {
struct gdma_req_hdr hdr;
@ -635,14 +700,14 @@ struct gdma_create_dma_region_req {
struct gdma_create_dma_region_resp {
struct gdma_resp_hdr hdr;
u64 gdma_region;
gdma_obj_handle_t dma_region_handle;
}; /* HW DATA */
/* GDMA_DMA_REGION_ADD_PAGES */
struct gdma_dma_region_add_pages_req {
struct gdma_req_hdr hdr;
u64 gdma_region;
gdma_obj_handle_t dma_region_handle;
u32 page_addr_list_len;
u32 reserved3;
@ -654,9 +719,88 @@ struct gdma_dma_region_add_pages_req {
struct gdma_destroy_dma_region_req {
struct gdma_req_hdr hdr;
u64 gdma_region;
gdma_obj_handle_t dma_region_handle;
}; /* HW DATA */
enum gdma_pd_flags {
GDMA_PD_FLAG_INVALID = 0,
};
struct gdma_create_pd_req {
struct gdma_req_hdr hdr;
enum gdma_pd_flags flags;
u32 reserved;
};/* HW DATA */
struct gdma_create_pd_resp {
struct gdma_resp_hdr hdr;
gdma_obj_handle_t pd_handle;
u32 pd_id;
u32 reserved;
};/* HW DATA */
struct gdma_destroy_pd_req {
struct gdma_req_hdr hdr;
gdma_obj_handle_t pd_handle;
};/* HW DATA */
struct gdma_destory_pd_resp {
struct gdma_resp_hdr hdr;
};/* HW DATA */
enum gdma_mr_type {
/* Guest Virtual Address - MRs of this type allow access
* to memory mapped by PTEs associated with this MR using a virtual
* address that is set up in the MST
*/
GDMA_MR_TYPE_GVA = 2,
};
struct gdma_create_mr_params {
gdma_obj_handle_t pd_handle;
enum gdma_mr_type mr_type;
union {
struct {
gdma_obj_handle_t dma_region_handle;
u64 virtual_address;
enum gdma_mr_access_flags access_flags;
} gva;
};
};
struct gdma_create_mr_request {
struct gdma_req_hdr hdr;
gdma_obj_handle_t pd_handle;
enum gdma_mr_type mr_type;
u32 reserved_1;
union {
struct {
gdma_obj_handle_t dma_region_handle;
u64 virtual_address;
enum gdma_mr_access_flags access_flags;
} gva;
};
u32 reserved_2;
};/* HW DATA */
struct gdma_create_mr_response {
struct gdma_resp_hdr hdr;
gdma_obj_handle_t mr_handle;
u32 lkey;
u32 rkey;
};/* HW DATA */
struct gdma_destroy_mr_request {
struct gdma_req_hdr hdr;
gdma_obj_handle_t mr_handle;
};/* HW DATA */
struct gdma_destroy_mr_response {
struct gdma_resp_hdr hdr;
};/* HW DATA */
int mana_gd_verify_vf_version(struct pci_dev *pdev);
int mana_gd_register_device(struct gdma_dev *gd);
@ -683,4 +827,8 @@ void mana_gd_free_memory(struct gdma_mem_info *gmi);
int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
u32 resp_len, void *resp);
int mana_gd_destroy_dma_region(struct gdma_context *gc,
gdma_obj_handle_t dma_region_handle);
#endif /* _GDMA_H */

View file

@ -265,8 +265,6 @@ struct mana_cq {
int budget;
};
#define GDMA_MAX_RQE_SGES 15
struct mana_recv_buf_oob {
/* A valid GDMA work request representing the data buffer. */
struct gdma_wqe_request wqe_req;
@ -276,7 +274,7 @@ struct mana_recv_buf_oob {
/* SGL of the buffer going to be sent has part of the work request. */
u32 num_sge;
struct gdma_sge sgl[GDMA_MAX_RQE_SGES];
struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES];
/* Required to store the result of mana_gd_post_work_request.
* gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
@ -380,6 +378,10 @@ struct mana_port_context {
mana_handle_t port_handle;
mana_handle_t pf_filter_handle;
/* Mutex for sharing access to vport_use_count */
struct mutex vport_mutex;
int vport_use_count;
u16 port_idx;
bool port_is_up;
@ -631,4 +633,16 @@ struct mana_tx_package {
struct gdma_posted_wqe_info wqe_info;
};
int mana_create_wq_obj(struct mana_port_context *apc,
mana_handle_t vport,
u32 wq_type, struct mana_obj_spec *wq_spec,
struct mana_obj_spec *cq_spec,
mana_handle_t *wq_obj);
void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
mana_handle_t wq_obj);
int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
u32 doorbell_pg_id);
void mana_uncfg_vport(struct mana_port_context *apc);
#endif /* _MANA_H */

View file

@ -0,0 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2022, Microsoft Corporation. */
#include "mana.h"
#include <linux/auxiliary_bus.h>
struct mana_adev {
struct auxiliary_device adev;
struct gdma_dev *mdev;
};