Merge branch 'irdma' into rdma.git for-next

Shiraz Saleem says:

====================
Add Intel Ethernet Protocol Driver for RDMA (irdma)

The following patch series introduces a unified Intel Ethernet Protocol
Driver for RDMA (irdma) for the X722 iWARP device and a new E810 device
which supports iWARP and RoCEv2. The irdma module replaces the legacy
i40iw module for X722 and extends the ABI already defined for i40iw. It is
backward compatible with legacy X722 rdma-core provider (libi40iw).

X722 and E810 are PCI network devices that are RDMA capable. The RDMA
block of this parent device is represented via an auxiliary device
exported to 'irdma' using the core auxiliary bus infrastructure recently
added for 5.11 kernel.  The parent PCI netdev drivers 'i40e' and 'ice'
register auxiliary RDMA devices with private data/ops encapsulated that
bind to auxiliary drivers registered in irdma module.

Currently, default is RoCEv2 for E810. Runtime support for protocol switch
to iWARP will be made available via devlink in a future patch.
====================

Link: https://lore.kernel.org/r/20210602205138.889-1-shiraz.saleem@intel.com
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>

* branch 'irdma':
  RDMA/irdma: Update MAINTAINERS file
  RDMA/irdma: Add irdma Kconfig/Makefile and remove i40iw
  RDMA/irdma: Add ABI definitions
  RDMA/irdma: Add dynamic tracing for CM
  RDMA/irdma: Add miscellaneous utility definitions
  RDMA/irdma: Add user/kernel shared libraries
  RDMA/irdma: Add RoCEv2 UD OP support
  RDMA/irdma: Implement device supported verb APIs
  RDMA/irdma: Add PBLE resource manager
  RDMA/irdma: Add connection manager
  RDMA/irdma: Add QoS definitions
  RDMA/irdma: Add privileged UDA queue implementation
  RDMA/irdma: Add HMC backing store setup functions
  RDMA/irdma: Implement HW Admin Queue OPs
  RDMA/irdma: Implement device initialization definitions
  RDMA/irdma: Register auxiliary driver and implement private channel OPs
  i40e: Register auxiliary devices to provide RDMA
  i40e: Prep i40e header for aux bus conversion
  ice: Register auxiliary device to provide RDMA
  ice: Implement iidc operations
  ice: Initialize RDMA support
  iidc: Introduce iidc.h
  i40e: Replace one-element array with flexible-array member
This commit is contained in:
Jason Gunthorpe 2021-06-02 19:59:10 -03:00
commit 50971e3915
97 changed files with 33755 additions and 29039 deletions

View File

@ -731,26 +731,6 @@ Description:
is the irq number of "sdma3", and M is irq number of "sdma4" in
the /proc/interrupts file.
sysfs interface for Intel(R) X722 iWARP i40iw driver
----------------------------------------------------
What: /sys/class/infiniband/i40iwX/hw_rev
What: /sys/class/infiniband/i40iwX/hca_type
What: /sys/class/infiniband/i40iwX/board_id
Date: Jan, 2016
KernelVersion: v4.10
Contact: linux-rdma@vger.kernel.org
Description:
=============== ==== ========================
hw_rev: (RO) Hardware revision number
hca_type: (RO) Show HCA type (I40IW)
board_id: (RO) I40IW board ID
=============== ==== ========================
sysfs interface for QLogic qedr NIC Driver
------------------------------------------

View File

@ -9130,6 +9130,15 @@ F: Documentation/networking/device_drivers/ethernet/intel/
F: drivers/net/ethernet/intel/
F: drivers/net/ethernet/intel/*/
F: include/linux/avf/virtchnl.h
F: include/linux/net/intel/iidc.h
INTEL ETHERNET PROTOCOL DRIVER FOR RDMA
M: Mustafa Ismail <mustafa.ismail@intel.com>
M: Shiraz Saleem <shiraz.saleem@intel.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/hw/irdma/
F: include/uapi/rdma/irdma-abi.h
INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
M: Maik Broemme <mbroemme@libmpq.org>
@ -9364,14 +9373,6 @@ L: linux-pm@vger.kernel.org
S: Supported
F: drivers/cpufreq/intel_pstate.c
INTEL RDMA RNIC DRIVER
M: Faisal Latif <faisal.latif@intel.com>
M: Shiraz Saleem <shiraz.saleem@intel.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/hw/i40iw/
F: include/uapi/rdma/i40iw-abi.h
INTEL SCU DRIVERS
M: Mika Westerberg <mika.westerberg@linux.intel.com>
S: Maintained

View File

@ -82,7 +82,7 @@ source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/qib/Kconfig"
source "drivers/infiniband/hw/cxgb4/Kconfig"
source "drivers/infiniband/hw/efa/Kconfig"
source "drivers/infiniband/hw/i40iw/Kconfig"
source "drivers/infiniband/hw/irdma/Kconfig"
source "drivers/infiniband/hw/mlx4/Kconfig"
source "drivers/infiniband/hw/mlx5/Kconfig"
source "drivers/infiniband/hw/ocrdma/Kconfig"

View File

@ -3,7 +3,7 @@ obj-$(CONFIG_INFINIBAND_MTHCA) += mthca/
obj-$(CONFIG_INFINIBAND_QIB) += qib/
obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/
obj-$(CONFIG_INFINIBAND_EFA) += efa/
obj-$(CONFIG_INFINIBAND_I40IW) += i40iw/
obj-$(CONFIG_INFINIBAND_IRDMA) += irdma/
obj-$(CONFIG_MLX4_INFINIBAND) += mlx4/
obj-$(CONFIG_MLX5_INFINIBAND) += mlx5/
obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/

View File

@ -1,9 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_I40IW
tristate "Intel(R) Ethernet X722 iWARP Driver"
depends on INET && I40E
depends on IPV6 || !IPV6
depends on PCI
select GENERIC_ALLOCATOR
help
Intel(R) Ethernet X722 iWARP Driver

View File

@ -1,9 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_INFINIBAND_I40IW) += i40iw.o
i40iw-objs :=\
i40iw_cm.o i40iw_ctrl.o \
i40iw_hmc.o i40iw_hw.o i40iw_main.o \
i40iw_pble.o i40iw_puda.o i40iw_uk.o i40iw_utils.o \
i40iw_verbs.o i40iw_virtchnl.o i40iw_vf.o

View File

@ -1,602 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef I40IW_IW_H
#define I40IW_IW_H
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/crc32c.h>
#include <linux/net/intel/i40e_client.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
#include <rdma/rdma_cm.h>
#include <rdma/iw_cm.h>
#include <crypto/hash.h>
#include "i40iw_status.h"
#include "i40iw_osdep.h"
#include "i40iw_d.h"
#include "i40iw_hmc.h"
#include "i40iw_type.h"
#include "i40iw_p.h"
#include <rdma/i40iw-abi.h>
#include "i40iw_pble.h"
#include "i40iw_verbs.h"
#include "i40iw_cm.h"
#include "i40iw_user.h"
#include "i40iw_puda.h"
#define I40IW_FW_VER_DEFAULT 2
#define I40IW_HW_VERSION 2
#define I40IW_ARP_ADD 1
#define I40IW_ARP_DELETE 2
#define I40IW_ARP_RESOLVE 3
#define I40IW_MACIP_ADD 1
#define I40IW_MACIP_DELETE 2
#define IW_CCQ_SIZE (I40IW_CQP_SW_SQSIZE_2048 + 1)
#define IW_CEQ_SIZE 2048
#define IW_AEQ_SIZE 2048
#define RX_BUF_SIZE (1536 + 8)
#define IW_REG0_SIZE (4 * 1024)
#define IW_TX_TIMEOUT (6 * HZ)
#define IW_FIRST_QPN 1
#define IW_SW_CONTEXT_ALIGN 1024
#define MAX_DPC_ITERATIONS 128
#define I40IW_EVENT_TIMEOUT 100000
#define I40IW_VCHNL_EVENT_TIMEOUT 100000
#define I40IW_NO_VLAN 0xffff
#define I40IW_NO_QSET 0xffff
/* access to mcast filter list */
#define IW_ADD_MCAST false
#define IW_DEL_MCAST true
#define I40IW_DRV_OPT_ENABLE_MPA_VER_0 0x00000001
#define I40IW_DRV_OPT_DISABLE_MPA_CRC 0x00000002
#define I40IW_DRV_OPT_DISABLE_FIRST_WRITE 0x00000004
#define I40IW_DRV_OPT_DISABLE_INTF 0x00000008
#define I40IW_DRV_OPT_ENABLE_MSI 0x00000010
#define I40IW_DRV_OPT_DUAL_LOGICAL_PORT 0x00000020
#define I40IW_DRV_OPT_NO_INLINE_DATA 0x00000080
#define I40IW_DRV_OPT_DISABLE_INT_MOD 0x00000100
#define I40IW_DRV_OPT_DISABLE_VIRT_WQ 0x00000200
#define I40IW_DRV_OPT_ENABLE_PAU 0x00000400
#define I40IW_DRV_OPT_MCAST_LOGPORT_MAP 0x00000800
#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
#define IW_CFG_FPM_QP_COUNT 32768
#define I40IW_MAX_PAGES_PER_FMR 512
#define I40IW_MIN_PAGES_PER_FMR 1
#define I40IW_CQP_COMPL_RQ_WQE_FLUSHED 2
#define I40IW_CQP_COMPL_SQ_WQE_FLUSHED 3
#define I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED 4
struct i40iw_cqp_compl_info {
u32 op_ret_val;
u16 maj_err_code;
u16 min_err_code;
bool error;
u8 op_code;
};
#define i40iw_pr_err(fmt, args ...) pr_err("%s: "fmt, __func__, ## args)
#define i40iw_pr_info(fmt, args ...) pr_info("%s: " fmt, __func__, ## args)
#define i40iw_pr_warn(fmt, args ...) pr_warn("%s: " fmt, __func__, ## args)
struct i40iw_cqp_request {
struct cqp_commands_info info;
wait_queue_head_t waitq;
struct list_head list;
atomic_t refcount;
void (*callback_fcn)(struct i40iw_cqp_request*, u32);
void *param;
struct i40iw_cqp_compl_info compl_info;
bool waiting;
bool request_done;
bool dynamic;
};
struct i40iw_cqp {
struct i40iw_sc_cqp sc_cqp;
spinlock_t req_lock; /*cqp request list */
wait_queue_head_t waitq;
struct i40iw_dma_mem sq;
struct i40iw_dma_mem host_ctx;
u64 *scratch_array;
struct i40iw_cqp_request *cqp_requests;
struct list_head cqp_avail_reqs;
struct list_head cqp_pending_reqs;
};
struct i40iw_device;
struct i40iw_ccq {
struct i40iw_sc_cq sc_cq;
spinlock_t lock; /* ccq control */
wait_queue_head_t waitq;
struct i40iw_dma_mem mem_cq;
struct i40iw_dma_mem shadow_area;
};
struct i40iw_ceq {
struct i40iw_sc_ceq sc_ceq;
struct i40iw_dma_mem mem;
u32 irq;
u32 msix_idx;
struct i40iw_device *iwdev;
struct tasklet_struct dpc_tasklet;
};
struct i40iw_aeq {
struct i40iw_sc_aeq sc_aeq;
struct i40iw_dma_mem mem;
};
struct i40iw_arp_entry {
u32 ip_addr[4];
u8 mac_addr[ETH_ALEN];
};
enum init_completion_state {
INVALID_STATE = 0,
INITIAL_STATE,
CQP_CREATED,
HMC_OBJS_CREATED,
PBLE_CHUNK_MEM,
CCQ_CREATED,
AEQ_CREATED,
CEQ_CREATED,
ILQ_CREATED,
IEQ_CREATED,
IP_ADDR_REGISTERED,
RDMA_DEV_REGISTERED
};
struct i40iw_msix_vector {
u32 idx;
u32 irq;
u32 cpu_affinity;
u32 ceq_id;
cpumask_t mask;
};
struct l2params_work {
struct work_struct work;
struct i40iw_device *iwdev;
struct i40iw_l2params l2params;
};
#define I40IW_MSIX_TABLE_SIZE 65
struct virtchnl_work {
struct work_struct work;
union {
struct i40iw_cqp_request *cqp_request;
struct i40iw_virtchnl_work_info work_info;
};
};
struct i40e_qvlist_info;
struct i40iw_device {
struct i40iw_ib_device *iwibdev;
struct net_device *netdev;
wait_queue_head_t vchnl_waitq;
struct i40iw_sc_dev sc_dev;
struct i40iw_sc_vsi vsi;
struct i40iw_handler *hdl;
struct i40e_info *ldev;
struct i40e_client *client;
struct i40iw_hw hw;
struct i40iw_cm_core cm_core;
u8 *mem_resources;
unsigned long *allocated_qps;
unsigned long *allocated_cqs;
unsigned long *allocated_mrs;
unsigned long *allocated_pds;
unsigned long *allocated_arps;
struct i40iw_qp **qp_table;
bool msix_shared;
u32 msix_count;
struct i40iw_msix_vector *iw_msixtbl;
struct i40e_qvlist_info *iw_qvlist;
struct i40iw_hmc_pble_rsrc *pble_rsrc;
struct i40iw_arp_entry *arp_table;
struct i40iw_cqp cqp;
struct i40iw_ccq ccq;
u32 ceqs_count;
struct i40iw_ceq *ceqlist;
struct i40iw_aeq aeq;
u32 arp_table_size;
u32 next_arp_index;
spinlock_t resource_lock; /* hw resource access */
spinlock_t qptable_lock;
u32 vendor_id;
u32 vendor_part_id;
u32 of_device_registered;
u32 device_cap_flags;
unsigned long db_start;
u8 resource_profile;
u8 max_rdma_vfs;
u8 max_enabled_vfs;
u8 max_sge;
u8 iw_status;
u8 send_term_ok;
/* x710 specific */
struct mutex pbl_mutex;
struct tasklet_struct dpc_tasklet;
struct workqueue_struct *virtchnl_wq;
struct virtchnl_work virtchnl_w[I40IW_MAX_PE_ENABLED_VF_COUNT];
struct i40iw_dma_mem obj_mem;
struct i40iw_dma_mem obj_next;
u8 *hmc_info_mem;
u32 sd_type;
struct workqueue_struct *param_wq;
atomic_t params_busy;
enum init_completion_state init_state;
u16 mac_ip_table_idx;
atomic_t vchnl_msgs;
u32 max_mr;
u32 max_qp;
u32 max_cq;
u32 max_pd;
u32 next_qp;
u32 next_cq;
u32 next_pd;
u32 max_mr_size;
u32 max_qp_wr;
u32 max_cqe;
u32 mr_stagmask;
u32 mpa_version;
bool dcb;
bool closing;
bool reset;
u32 used_pds;
u32 used_cqs;
u32 used_mrs;
u32 used_qps;
wait_queue_head_t close_wq;
atomic64_t use_count;
};
struct i40iw_ib_device {
struct ib_device ibdev;
struct i40iw_device *iwdev;
};
struct i40iw_handler {
struct list_head list;
struct i40e_client *client;
struct i40iw_device device;
struct i40e_info ldev;
};
/**
* i40iw_fw_major_ver - get firmware major version
* @dev: iwarp device
**/
static inline u64 i40iw_fw_major_ver(struct i40iw_sc_dev *dev)
{
return RS_64(dev->feature_info[I40IW_FEATURE_FW_INFO],
I40IW_FW_VER_MAJOR);
}
/**
* i40iw_fw_minor_ver - get firmware minor version
* @dev: iwarp device
**/
static inline u64 i40iw_fw_minor_ver(struct i40iw_sc_dev *dev)
{
return RS_64(dev->feature_info[I40IW_FEATURE_FW_INFO],
I40IW_FW_VER_MINOR);
}
/**
* to_iwdev - get device
* @ibdev: ib device
**/
static inline struct i40iw_device *to_iwdev(struct ib_device *ibdev)
{
return container_of(ibdev, struct i40iw_ib_device, ibdev)->iwdev;
}
/**
* to_ucontext - get user context
* @ibucontext: ib user context
**/
static inline struct i40iw_ucontext *to_ucontext(struct ib_ucontext *ibucontext)
{
return container_of(ibucontext, struct i40iw_ucontext, ibucontext);
}
/**
* to_iwpd - get protection domain
* @ibpd: ib pd
**/
static inline struct i40iw_pd *to_iwpd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct i40iw_pd, ibpd);
}
/**
* to_iwmr - get device memory region
* @ibdev: ib memory region
**/
static inline struct i40iw_mr *to_iwmr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct i40iw_mr, ibmr);
}
/**
* to_iwmw - get device memory window
* @ibmw: ib memory window
**/
static inline struct i40iw_mr *to_iwmw(struct ib_mw *ibmw)
{
return container_of(ibmw, struct i40iw_mr, ibmw);
}
/**
* to_iwcq - get completion queue
* @ibcq: ib cqdevice
**/
static inline struct i40iw_cq *to_iwcq(struct ib_cq *ibcq)
{
return container_of(ibcq, struct i40iw_cq, ibcq);
}
/**
* to_iwqp - get device qp
* @ibqp: ib qp
**/
static inline struct i40iw_qp *to_iwqp(struct ib_qp *ibqp)
{
return container_of(ibqp, struct i40iw_qp, ibqp);
}
/* i40iw.c */
void i40iw_qp_add_ref(struct ib_qp *ibqp);
void i40iw_qp_rem_ref(struct ib_qp *ibqp);
struct ib_qp *i40iw_get_qp(struct ib_device *, int);
void i40iw_flush_wqes(struct i40iw_device *iwdev,
struct i40iw_qp *qp);
void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
unsigned char *mac_addr,
u32 *ip_addr,
bool ipv4,
u32 action);
int i40iw_manage_apbvt(struct i40iw_device *iwdev,
u16 accel_local_port,
bool add_port);
struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait);
void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request);
void i40iw_put_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request);
/**
* i40iw_alloc_resource - allocate a resource
* @iwdev: device pointer
* @resource_array: resource bit array:
* @max_resources: maximum resource number
* @req_resources_num: Allocated resource number
* @next: next free id
**/
static inline int i40iw_alloc_resource(struct i40iw_device *iwdev,
unsigned long *resource_array,
u32 max_resources,
u32 *req_resource_num,
u32 *next)
{
u32 resource_num;
unsigned long flags;
spin_lock_irqsave(&iwdev->resource_lock, flags);
resource_num = find_next_zero_bit(resource_array, max_resources, *next);
if (resource_num >= max_resources) {
resource_num = find_first_zero_bit(resource_array, max_resources);
if (resource_num >= max_resources) {
spin_unlock_irqrestore(&iwdev->resource_lock, flags);
return -EOVERFLOW;
}
}
set_bit(resource_num, resource_array);
*next = resource_num + 1;
if (*next == max_resources)
*next = 0;
*req_resource_num = resource_num;
spin_unlock_irqrestore(&iwdev->resource_lock, flags);
return 0;
}
/**
* i40iw_is_resource_allocated - detrmine if resource is
* allocated
* @iwdev: device pointer
* @resource_array: resource array for the resource_num
* @resource_num: resource number to check
**/
static inline bool i40iw_is_resource_allocated(struct i40iw_device *iwdev,
unsigned long *resource_array,
u32 resource_num)
{
bool bit_is_set;
unsigned long flags;
spin_lock_irqsave(&iwdev->resource_lock, flags);
bit_is_set = test_bit(resource_num, resource_array);
spin_unlock_irqrestore(&iwdev->resource_lock, flags);
return bit_is_set;
}
/**
* i40iw_free_resource - free a resource
* @iwdev: device pointer
* @resource_array: resource array for the resource_num
* @resource_num: resource number to free
**/
static inline void i40iw_free_resource(struct i40iw_device *iwdev,
unsigned long *resource_array,
u32 resource_num)
{
unsigned long flags;
spin_lock_irqsave(&iwdev->resource_lock, flags);
clear_bit(resource_num, resource_array);
spin_unlock_irqrestore(&iwdev->resource_lock, flags);
}
struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev);
/**
* iw_init_resources -
*/
u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev);
int i40iw_register_rdma_device(struct i40iw_device *iwdev);
void i40iw_port_ibevent(struct i40iw_device *iwdev);
void i40iw_cm_disconn(struct i40iw_qp *iwqp);
void i40iw_cm_disconn_worker(void *);
int mini_cm_recv_pkt(struct i40iw_cm_core *, struct i40iw_device *,
struct sk_buff *);
enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
struct i40iw_cqp_request *cqp_request);
enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev,
u8 *mac_addr, u8 *mac_index);
int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq);
void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev);
void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev);
void i40iw_add_pdusecount(struct i40iw_pd *iwpd);
void i40iw_rem_devusecount(struct i40iw_device *iwdev);
void i40iw_add_devusecount(struct i40iw_device *iwdev);
void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
struct i40iw_modify_qp_info *info, bool wait);
void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev,
struct i40iw_sc_qp *qp,
bool suspend);
enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
struct i40iw_cm_info *cminfo,
enum i40iw_quad_entry_type etype,
enum i40iw_quad_hash_manage_type mtype,
void *cmnode,
bool wait);
void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf);
void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp);
void i40iw_free_qp_resources(struct i40iw_qp *iwqp);
enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
struct i40iw_dma_mem *memptr,
u32 size, u32 mask);
void i40iw_request_reset(struct i40iw_device *iwdev);
void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev);
int i40iw_setup_cm_core(struct i40iw_device *iwdev);
void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core);
void i40iw_process_ceq(struct i40iw_device *, struct i40iw_ceq *iwceq);
void i40iw_process_aeq(struct i40iw_device *);
void i40iw_next_iw_state(struct i40iw_qp *iwqp,
u8 state, u8 del_hash,
u8 term, u8 term_len);
int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack);
int i40iw_send_reset(struct i40iw_cm_node *cm_node);
struct i40iw_cm_node *i40iw_find_node(struct i40iw_cm_core *cm_core,
u16 rem_port,
u32 *rem_addr,
u16 loc_port,
u32 *loc_addr,
bool add_refcnt,
bool accelerated_list);
enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,
struct i40iw_sc_qp *qp,
struct i40iw_qp_flush_info *info,
bool wait);
void i40iw_gen_ae(struct i40iw_device *iwdev,
struct i40iw_sc_qp *qp,
struct i40iw_gen_ae_info *info,
bool wait);
void i40iw_copy_ip_ntohl(u32 *dst, __be32 *src);
struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *ib_pd,
u64 addr,
u64 size,
int acc,
u64 *iova_start);
int i40iw_inetaddr_event(struct notifier_block *notifier,
unsigned long event,
void *ptr);
int i40iw_inet6addr_event(struct notifier_block *notifier,
unsigned long event,
void *ptr);
int i40iw_net_event(struct notifier_block *notifier,
unsigned long event,
void *ptr);
int i40iw_netdevice_event(struct notifier_block *notifier,
unsigned long event,
void *ptr);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,462 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef I40IW_CM_H
#define I40IW_CM_H
#define QUEUE_EVENTS
#define I40IW_MANAGE_APBVT_DEL 0
#define I40IW_MANAGE_APBVT_ADD 1
#define I40IW_MPA_REQUEST_ACCEPT 1
#define I40IW_MPA_REQUEST_REJECT 2
/* IETF MPA -- defines, enums, structs */
#define IEFT_MPA_KEY_REQ "MPA ID Req Frame"
#define IEFT_MPA_KEY_REP "MPA ID Rep Frame"
#define IETF_MPA_KEY_SIZE 16
#define IETF_MPA_VERSION 1
#define IETF_MAX_PRIV_DATA_LEN 512
#define IETF_MPA_FRAME_SIZE 20
#define IETF_RTR_MSG_SIZE 4
#define IETF_MPA_V2_FLAG 0x10
#define SNDMARKER_SEQNMASK 0x000001FF
#define I40IW_MAX_IETF_SIZE 32
/* IETF RTR MSG Fields */
#define IETF_PEER_TO_PEER 0x8000
#define IETF_FLPDU_ZERO_LEN 0x4000
#define IETF_RDMA0_WRITE 0x8000
#define IETF_RDMA0_READ 0x4000
#define IETF_NO_IRD_ORD 0x3FFF
/* HW-supported IRD sizes*/
#define I40IW_HW_IRD_SETTING_2 2
#define I40IW_HW_IRD_SETTING_4 4
#define I40IW_HW_IRD_SETTING_8 8
#define I40IW_HW_IRD_SETTING_16 16
#define I40IW_HW_IRD_SETTING_32 32
#define I40IW_HW_IRD_SETTING_64 64
#define MAX_PORTS 65536
#define I40IW_VLAN_PRIO_SHIFT 13
enum ietf_mpa_flags {
IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */
IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */
IETF_MPA_FLAGS_REJECT = 0x20, /* Reject */
};
struct ietf_mpa_v1 {
u8 key[IETF_MPA_KEY_SIZE];
u8 flags;
u8 rev;
__be16 priv_data_len;
u8 priv_data[];
};
#define ietf_mpa_req_resp_frame ietf_mpa_frame
struct ietf_rtr_msg {
__be16 ctrl_ird;
__be16 ctrl_ord;
};
struct ietf_mpa_v2 {
u8 key[IETF_MPA_KEY_SIZE];
u8 flags;
u8 rev;
__be16 priv_data_len;
struct ietf_rtr_msg rtr_msg;
u8 priv_data[];
};
struct i40iw_cm_node;
enum i40iw_timer_type {
I40IW_TIMER_TYPE_SEND,
I40IW_TIMER_TYPE_RECV,
I40IW_TIMER_NODE_CLEANUP,
I40IW_TIMER_TYPE_CLOSE,
};
#define I40IW_PASSIVE_STATE_INDICATED 0
#define I40IW_DO_NOT_SEND_RESET_EVENT 1
#define I40IW_SEND_RESET_EVENT 2
#define MAX_I40IW_IFS 4
#define SET_ACK 0x1
#define SET_SYN 0x2
#define SET_FIN 0x4
#define SET_RST 0x8
#define TCP_OPTIONS_PADDING 3
struct option_base {
u8 optionnum;
u8 length;
};
enum option_numbers {
OPTION_NUMBER_END,
OPTION_NUMBER_NONE,
OPTION_NUMBER_MSS,
OPTION_NUMBER_WINDOW_SCALE,
OPTION_NUMBER_SACK_PERM,
OPTION_NUMBER_SACK,
OPTION_NUMBER_WRITE0 = 0xbc
};
struct option_mss {
u8 optionnum;
u8 length;
__be16 mss;
};
struct option_windowscale {
u8 optionnum;
u8 length;
u8 shiftcount;
};
union all_known_options {
char as_end;
struct option_base as_base;
struct option_mss as_mss;
struct option_windowscale as_windowscale;
};
struct i40iw_timer_entry {
struct list_head list;
unsigned long timetosend; /* jiffies */
struct i40iw_puda_buf *sqbuf;
u32 type;
u32 retrycount;
u32 retranscount;
u32 context;
u32 send_retrans;
int close_when_complete;
};
#define I40IW_DEFAULT_RETRYS 64
#define I40IW_DEFAULT_RETRANS 8
#define I40IW_DEFAULT_TTL 0x40
#define I40IW_DEFAULT_RTT_VAR 0x6
#define I40IW_DEFAULT_SS_THRESH 0x3FFFFFFF
#define I40IW_DEFAULT_REXMIT_THRESH 8
#define I40IW_RETRY_TIMEOUT HZ
#define I40IW_SHORT_TIME 10
#define I40IW_LONG_TIME (2 * HZ)
#define I40IW_MAX_TIMEOUT ((unsigned long)(12 * HZ))
#define I40IW_CM_HASHTABLE_SIZE 1024
#define I40IW_CM_TCP_TIMER_INTERVAL 3000
#define I40IW_CM_DEFAULT_MTU 1540
#define I40IW_CM_DEFAULT_FRAME_CNT 10
#define I40IW_CM_THREAD_STACK_SIZE 256
#define I40IW_CM_DEFAULT_RCV_WND 64240
#define I40IW_CM_DEFAULT_RCV_WND_SCALED 0x3fffc
#define I40IW_CM_DEFAULT_RCV_WND_SCALE 2
#define I40IW_CM_DEFAULT_FREE_PKTS 0x000A
#define I40IW_CM_FREE_PKT_LO_WATERMARK 2
#define I40IW_CM_DEFAULT_MSS 536
#define I40IW_CM_DEF_SEQ 0x159bf75f
#define I40IW_CM_DEF_LOCAL_ID 0x3b47
#define I40IW_CM_DEF_SEQ2 0x18ed5740
#define I40IW_CM_DEF_LOCAL_ID2 0xb807
#define MAX_CM_BUFFER (I40IW_MAX_IETF_SIZE + IETF_MAX_PRIV_DATA_LEN)
typedef u32 i40iw_addr_t;
#define i40iw_cm_tsa_context i40iw_qp_context
struct i40iw_qp;
/* cm node transition states */
enum i40iw_cm_node_state {
I40IW_CM_STATE_UNKNOWN,
I40IW_CM_STATE_INITED,
I40IW_CM_STATE_LISTENING,
I40IW_CM_STATE_SYN_RCVD,
I40IW_CM_STATE_SYN_SENT,
I40IW_CM_STATE_ONE_SIDE_ESTABLISHED,
I40IW_CM_STATE_ESTABLISHED,
I40IW_CM_STATE_ACCEPTING,
I40IW_CM_STATE_MPAREQ_SENT,
I40IW_CM_STATE_MPAREQ_RCVD,
I40IW_CM_STATE_MPAREJ_RCVD,
I40IW_CM_STATE_OFFLOADED,
I40IW_CM_STATE_FIN_WAIT1,
I40IW_CM_STATE_FIN_WAIT2,
I40IW_CM_STATE_CLOSE_WAIT,
I40IW_CM_STATE_TIME_WAIT,
I40IW_CM_STATE_LAST_ACK,
I40IW_CM_STATE_CLOSING,
I40IW_CM_STATE_LISTENER_DESTROYED,
I40IW_CM_STATE_CLOSED
};
enum mpa_frame_version {
IETF_MPA_V1 = 1,
IETF_MPA_V2 = 2
};
enum mpa_frame_key {
MPA_KEY_REQUEST,
MPA_KEY_REPLY
};
enum send_rdma0 {
SEND_RDMA_READ_ZERO = 1,
SEND_RDMA_WRITE_ZERO = 2
};
enum i40iw_tcpip_pkt_type {
I40IW_PKT_TYPE_UNKNOWN,
I40IW_PKT_TYPE_SYN,
I40IW_PKT_TYPE_SYNACK,
I40IW_PKT_TYPE_ACK,
I40IW_PKT_TYPE_FIN,
I40IW_PKT_TYPE_RST
};
/* CM context params */
struct i40iw_cm_tcp_context {
u8 client;
u32 loc_seq_num;
u32 loc_ack_num;
u32 rem_ack_num;
u32 rcv_nxt;
u32 loc_id;
u32 rem_id;
u32 snd_wnd;
u32 max_snd_wnd;
u32 rcv_wnd;
u32 mss;
u8 snd_wscale;
u8 rcv_wscale;
};
enum i40iw_cm_listener_state {
I40IW_CM_LISTENER_PASSIVE_STATE = 1,
I40IW_CM_LISTENER_ACTIVE_STATE = 2,
I40IW_CM_LISTENER_EITHER_STATE = 3
};
struct i40iw_cm_listener {
struct list_head list;
struct i40iw_cm_core *cm_core;
u8 loc_mac[ETH_ALEN];
u32 loc_addr[4];
u16 loc_port;
struct iw_cm_id *cm_id;
atomic_t ref_count;
struct i40iw_device *iwdev;
atomic_t pend_accepts_cnt;
int backlog;
enum i40iw_cm_listener_state listener_state;
u32 reused_node;
u8 user_pri;
u8 tos;
u16 vlan_id;
bool qhash_set;
bool ipv4;
struct list_head child_listen_list;
};
struct i40iw_kmem_info {
void *addr;
u32 size;
};
/* per connection node and node state information */
struct i40iw_cm_node {
u32 loc_addr[4], rem_addr[4];
u16 loc_port, rem_port;
u16 vlan_id;
enum i40iw_cm_node_state state;
u8 loc_mac[ETH_ALEN];
u8 rem_mac[ETH_ALEN];
atomic_t ref_count;
struct i40iw_qp *iwqp;
struct i40iw_device *iwdev;
struct i40iw_sc_dev *dev;
struct i40iw_cm_tcp_context tcp_cntxt;
struct i40iw_cm_core *cm_core;
struct i40iw_cm_node *loopbackpartner;
struct i40iw_timer_entry *send_entry;
struct i40iw_timer_entry *close_entry;
spinlock_t retrans_list_lock; /* cm transmit packet */
enum send_rdma0 send_rdma0_op;
u16 ird_size;
u16 ord_size;
u16 mpav2_ird_ord;
struct iw_cm_id *cm_id;
struct list_head list;
bool accelerated;
struct i40iw_cm_listener *listener;
int apbvt_set;
int accept_pend;
struct list_head timer_entry;
struct list_head reset_entry;
struct list_head teardown_entry;
atomic_t passive_state;
bool qhash_set;
u8 user_pri;
u8 tos;
bool ipv4;
bool snd_mark_en;
u16 lsmm_size;
enum mpa_frame_version mpa_frame_rev;
struct i40iw_kmem_info pdata;
union {
struct ietf_mpa_v1 mpa_frame;
struct ietf_mpa_v2 mpa_v2_frame;
};
u8 pdata_buf[IETF_MAX_PRIV_DATA_LEN];
struct i40iw_kmem_info mpa_hdr;
bool ack_rcvd;
};
/* structure for client or CM to fill when making CM api calls. */
/* - only need to set relevant data, based on op. */
struct i40iw_cm_info {
struct iw_cm_id *cm_id;
u16 loc_port;
u16 rem_port;
u32 loc_addr[4];
u32 rem_addr[4];
u16 vlan_id;
int backlog;
u8 user_pri;
u8 tos;
bool ipv4;
};
/* CM event codes */
enum i40iw_cm_event_type {
I40IW_CM_EVENT_UNKNOWN,
I40IW_CM_EVENT_ESTABLISHED,
I40IW_CM_EVENT_MPA_REQ,
I40IW_CM_EVENT_MPA_CONNECT,
I40IW_CM_EVENT_MPA_ACCEPT,
I40IW_CM_EVENT_MPA_REJECT,
I40IW_CM_EVENT_MPA_ESTABLISHED,
I40IW_CM_EVENT_CONNECTED,
I40IW_CM_EVENT_RESET,
I40IW_CM_EVENT_ABORTED
};
/* event to post to CM event handler */
struct i40iw_cm_event {
enum i40iw_cm_event_type type;
struct i40iw_cm_info cm_info;
struct work_struct event_work;
struct i40iw_cm_node *cm_node;
};
struct i40iw_cm_core {
struct i40iw_device *iwdev;
struct i40iw_sc_dev *dev;
struct list_head listen_nodes;
struct list_head accelerated_list;
struct list_head non_accelerated_list;
struct timer_list tcp_timer;
struct workqueue_struct *event_wq;
struct workqueue_struct *disconn_wq;
spinlock_t ht_lock; /* manage hash table */
spinlock_t listen_list_lock; /* listen list */
spinlock_t apbvt_lock; /*manage apbvt entries*/
unsigned long ports_in_use[BITS_TO_LONGS(MAX_PORTS)];
u64 stats_nodes_created;
u64 stats_nodes_destroyed;
u64 stats_listen_created;
u64 stats_listen_destroyed;
u64 stats_listen_nodes_created;
u64 stats_listen_nodes_destroyed;
u64 stats_loopbacks;
u64 stats_accepts;
u64 stats_rejects;
u64 stats_connect_errs;
u64 stats_passive_errs;
u64 stats_pkt_retrans;
u64 stats_backlog_drops;
};
int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
struct i40iw_puda_buf *sqbuf,
enum i40iw_timer_type type,
int send_retrans,
int close_when_complete);
int i40iw_accept(struct iw_cm_id *, struct iw_cm_conn_param *);
int i40iw_reject(struct iw_cm_id *, const void *, u8);
int i40iw_connect(struct iw_cm_id *, struct iw_cm_conn_param *);
int i40iw_create_listen(struct iw_cm_id *, int);
int i40iw_destroy_listen(struct iw_cm_id *);
int i40iw_cm_start(struct i40iw_device *);
int i40iw_cm_stop(struct i40iw_device *);
int i40iw_arp_table(struct i40iw_device *iwdev,
u32 *ip_addr,
bool ipv4,
u8 *mac_addr,
u32 action);
void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
u32 *ipaddr, bool ipv4, bool ifup);
void i40iw_cm_teardown_connections(struct i40iw_device *iwdev, u32 *ipaddr,
struct i40iw_cm_info *nfo,
bool disconnect_all);
bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port);
#endif /* I40IW_CM_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,821 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#include "i40iw_osdep.h"
#include "i40iw_register.h"
#include "i40iw_status.h"
#include "i40iw_hmc.h"
#include "i40iw_d.h"
#include "i40iw_type.h"
#include "i40iw_p.h"
#include "i40iw_vf.h"
#include "i40iw_virtchnl.h"
/**
* i40iw_find_sd_index_limit - finds segment descriptor index limit
* @hmc_info: pointer to the HMC configuration information structure
* @type: type of HMC resources we're searching
* @idx: starting index for the object
* @cnt: number of objects we're trying to create
* @sd_idx: pointer to return index of the segment descriptor in question
* @sd_limit: pointer to return the maximum number of segment descriptors
*
* This function calculates the segment descriptor index and index limit
* for the resource defined by i40iw_hmc_rsrc_type.
*/
static inline void i40iw_find_sd_index_limit(struct i40iw_hmc_info *hmc_info,
u32 type,
u32 idx,
u32 cnt,
u32 *sd_idx,
u32 *sd_limit)
{
u64 fpm_addr, fpm_limit;
fpm_addr = hmc_info->hmc_obj[(type)].base +
hmc_info->hmc_obj[type].size * idx;
fpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt;
*sd_idx = (u32)(fpm_addr / I40IW_HMC_DIRECT_BP_SIZE);
*sd_limit = (u32)((fpm_limit - 1) / I40IW_HMC_DIRECT_BP_SIZE);
*sd_limit += 1;
}
/**
* i40iw_find_pd_index_limit - finds page descriptor index limit
* @hmc_info: pointer to the HMC configuration information struct
* @type: HMC resource type we're examining
* @idx: starting index for the object
* @cnt: number of objects we're trying to create
* @pd_idx: pointer to return page descriptor index
* @pd_limit: pointer to return page descriptor index limit
*
* Calculates the page descriptor index and index limit for the resource
* defined by i40iw_hmc_rsrc_type.
*/
static inline void i40iw_find_pd_index_limit(struct i40iw_hmc_info *hmc_info,
u32 type,
u32 idx,
u32 cnt,
u32 *pd_idx,
u32 *pd_limit)
{
u64 fpm_adr, fpm_limit;
fpm_adr = hmc_info->hmc_obj[type].base +
hmc_info->hmc_obj[type].size * idx;
fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);
*(pd_idx) = (u32)(fpm_adr / I40IW_HMC_PAGED_BP_SIZE);
*(pd_limit) = (u32)((fpm_limit - 1) / I40IW_HMC_PAGED_BP_SIZE);
*(pd_limit) += 1;
}
/**
* i40iw_set_sd_entry - setup entry for sd programming
* @pa: physical addr
* @idx: sd index
* @type: paged or direct sd
* @entry: sd entry ptr
*/
static inline void i40iw_set_sd_entry(u64 pa,
u32 idx,
enum i40iw_sd_entry_type type,
struct update_sd_entry *entry)
{
entry->data = pa | (I40IW_HMC_MAX_BP_COUNT << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
(((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) <<
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |
(1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);
entry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15));
}
/**
* i40iw_clr_sd_entry - setup entry for sd clear
* @idx: sd index
* @type: paged or direct sd
* @entry: sd entry ptr
*/
static inline void i40iw_clr_sd_entry(u32 idx, enum i40iw_sd_entry_type type,
struct update_sd_entry *entry)
{
entry->data = (I40IW_HMC_MAX_BP_COUNT <<
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
(((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) <<
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);
entry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15));
}
/**
* i40iw_hmc_sd_one - setup 1 sd entry for cqp
* @dev: pointer to the device structure
* @hmc_fn_id: hmc's function id
* @pa: physical addr
* @sd_idx: sd index
* @type: paged or direct sd
* @setsd: flag to set or clear sd
*/
enum i40iw_status_code i40iw_hmc_sd_one(struct i40iw_sc_dev *dev,
u8 hmc_fn_id,
u64 pa, u32 sd_idx,
enum i40iw_sd_entry_type type,
bool setsd)
{
struct i40iw_update_sds_info sdinfo;
sdinfo.cnt = 1;
sdinfo.hmc_fn_id = hmc_fn_id;
if (setsd)
i40iw_set_sd_entry(pa, sd_idx, type, sdinfo.entry);
else
i40iw_clr_sd_entry(sd_idx, type, sdinfo.entry);
return dev->cqp->process_cqp_sds(dev, &sdinfo);
}
/**
* i40iw_hmc_sd_grp - setup group od sd entries for cqp
* @dev: pointer to the device structure
* @hmc_info: pointer to the HMC configuration information struct
* @sd_index: sd index
* @sd_cnt: number of sd entries
* @setsd: flag to set or clear sd
*/
static enum i40iw_status_code i40iw_hmc_sd_grp(struct i40iw_sc_dev *dev,
struct i40iw_hmc_info *hmc_info,
u32 sd_index,
u32 sd_cnt,
bool setsd)
{
struct i40iw_hmc_sd_entry *sd_entry;
struct i40iw_update_sds_info sdinfo;
u64 pa;
u32 i;
enum i40iw_status_code ret_code = 0;
memset(&sdinfo, 0, sizeof(sdinfo));
sdinfo.hmc_fn_id = hmc_info->hmc_fn_id;
for (i = sd_index; i < sd_index + sd_cnt; i++) {
sd_entry = &hmc_info->sd_table.sd_entry[i];
if (!sd_entry ||
(!sd_entry->valid && setsd) ||
(sd_entry->valid && !setsd))
continue;
if (setsd) {
pa = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ?
sd_entry->u.pd_table.pd_page_addr.pa :
sd_entry->u.bp.addr.pa;
i40iw_set_sd_entry(pa, i, sd_entry->entry_type,
&sdinfo.entry[sdinfo.cnt]);
} else {
i40iw_clr_sd_entry(i, sd_entry->entry_type,
&sdinfo.entry[sdinfo.cnt]);
}
sdinfo.cnt++;
if (sdinfo.cnt == I40IW_MAX_SD_ENTRIES) {
ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
if (ret_code) {
i40iw_debug(dev, I40IW_DEBUG_HMC,
"i40iw_hmc_sd_grp: sd_programming failed err=%d\n",
ret_code);
return ret_code;
}
sdinfo.cnt = 0;
}
}
if (sdinfo.cnt)
ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
return ret_code;
}
/**
* i40iw_vfdev_from_fpm - return vf dev ptr for hmc function id
* @dev: pointer to the device structure
* @hmc_fn_id: hmc's function id
*/
struct i40iw_vfdev *i40iw_vfdev_from_fpm(struct i40iw_sc_dev *dev, u8 hmc_fn_id)
{
struct i40iw_vfdev *vf_dev = NULL;
u16 idx;
for (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) {
if (dev->vf_dev[idx] &&
((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) {
vf_dev = dev->vf_dev[idx];
break;
}
}
return vf_dev;
}
/**
* i40iw_vf_hmcinfo_from_fpm - get ptr to hmc for func_id
* @dev: pointer to the device structure
* @hmc_fn_id: hmc's function id
*/
struct i40iw_hmc_info *i40iw_vf_hmcinfo_from_fpm(struct i40iw_sc_dev *dev,
u8 hmc_fn_id)
{
struct i40iw_hmc_info *hmc_info = NULL;
u16 idx;
for (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) {
if (dev->vf_dev[idx] &&
((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) {
hmc_info = &dev->vf_dev[idx]->hmc_info;
break;
}
}
return hmc_info;
}
/**
* i40iw_hmc_finish_add_sd_reg - program sd entries for objects
* @dev: pointer to the device structure
* @info: create obj info
*/
static enum i40iw_status_code i40iw_hmc_finish_add_sd_reg(struct i40iw_sc_dev *dev,
struct i40iw_hmc_create_obj_info *info)
{
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
if ((info->start_idx + info->count) >
info->hmc_info->hmc_obj[info->rsrc_type].cnt)
return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
if (!info->add_sd_cnt)
return 0;
return i40iw_hmc_sd_grp(dev, info->hmc_info,
info->hmc_info->sd_indexes[0],
info->add_sd_cnt, true);
}
/**
* i40iw_sc_create_hmc_obj - allocate backing store for hmc objects
* @dev: pointer to the device structure
* @info: pointer to i40iw_hmc_iw_create_obj_info struct
*
* This will allocate memory for PDs and backing pages and populate
* the sd and pd entries.
*/
enum i40iw_status_code i40iw_sc_create_hmc_obj(struct i40iw_sc_dev *dev,
struct i40iw_hmc_create_obj_info *info)
{
struct i40iw_hmc_sd_entry *sd_entry;
u32 sd_idx, sd_lmt;
u32 pd_idx = 0, pd_lmt = 0;
u32 pd_idx1 = 0, pd_lmt1 = 0;
u32 i, j;
bool pd_error = false;
enum i40iw_status_code ret_code = 0;
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
if ((info->start_idx + info->count) >
info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
i40iw_debug(dev, I40IW_DEBUG_HMC,
"%s: error type %u, start = %u, req cnt %u, cnt = %u\n",
__func__, info->rsrc_type, info->start_idx, info->count,
info->hmc_info->hmc_obj[info->rsrc_type].cnt);
return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
}
if (!dev->is_pf)
return i40iw_vchnl_vf_add_hmc_objs(dev, info->rsrc_type, 0, info->count);
i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type,
info->start_idx, info->count,
&sd_idx, &sd_lmt);
if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
sd_lmt > info->hmc_info->sd_table.sd_cnt) {
return I40IW_ERR_INVALID_SD_INDEX;
}
i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type,
info->start_idx, info->count, &pd_idx, &pd_lmt);
for (j = sd_idx; j < sd_lmt; j++) {
ret_code = i40iw_add_sd_table_entry(dev->hw, info->hmc_info,
j,
info->entry_type,
I40IW_HMC_DIRECT_BP_SIZE);
if (ret_code)
goto exit_sd_error;
sd_entry = &info->hmc_info->sd_table.sd_entry[j];
if ((sd_entry->entry_type == I40IW_SD_TYPE_PAGED) &&
((dev->hmc_info == info->hmc_info) &&
(info->rsrc_type != I40IW_HMC_IW_PBLE))) {
pd_idx1 = max(pd_idx, (j * I40IW_HMC_MAX_BP_COUNT));
pd_lmt1 = min(pd_lmt,
(j + 1) * I40IW_HMC_MAX_BP_COUNT);
for (i = pd_idx1; i < pd_lmt1; i++) {
/* update the pd table entry */
ret_code = i40iw_add_pd_table_entry(dev->hw, info->hmc_info,
i, NULL);
if (ret_code) {
pd_error = true;
break;
}
}
if (pd_error) {
while (i && (i > pd_idx1)) {
i40iw_remove_pd_bp(dev->hw, info->hmc_info, (i - 1),
info->is_pf);
i--;
}
}
}
if (sd_entry->valid)
continue;
info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j;
info->add_sd_cnt++;
sd_entry->valid = true;
}
return i40iw_hmc_finish_add_sd_reg(dev, info);
exit_sd_error:
while (j && (j > sd_idx)) {
sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
switch (sd_entry->entry_type) {
case I40IW_SD_TYPE_PAGED:
pd_idx1 = max(pd_idx,
(j - 1) * I40IW_HMC_MAX_BP_COUNT);
pd_lmt1 = min(pd_lmt, (j * I40IW_HMC_MAX_BP_COUNT));
for (i = pd_idx1; i < pd_lmt1; i++)
i40iw_prep_remove_pd_page(info->hmc_info, i);
break;
case I40IW_SD_TYPE_DIRECT:
i40iw_prep_remove_pd_page(info->hmc_info, (j - 1));
break;
default:
ret_code = I40IW_ERR_INVALID_SD_TYPE;
break;
}
j--;
}
return ret_code;
}
/**
* i40iw_finish_del_sd_reg - delete sd entries for objects
* @dev: pointer to the device structure
* @info: dele obj info
* @reset: true if called before reset
*/
static enum i40iw_status_code i40iw_finish_del_sd_reg(struct i40iw_sc_dev *dev,
struct i40iw_hmc_del_obj_info *info,
bool reset)
{
struct i40iw_hmc_sd_entry *sd_entry;
enum i40iw_status_code ret_code = 0;
u32 i, sd_idx;
struct i40iw_dma_mem *mem;
if (dev->is_pf && !reset)
ret_code = i40iw_hmc_sd_grp(dev, info->hmc_info,
info->hmc_info->sd_indexes[0],
info->del_sd_cnt, false);
if (ret_code)
i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd sd_grp\n", __func__);
for (i = 0; i < info->del_sd_cnt; i++) {
sd_idx = info->hmc_info->sd_indexes[i];
sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx];
if (!sd_entry)
continue;
mem = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ?
&sd_entry->u.pd_table.pd_page_addr :
&sd_entry->u.bp.addr;
if (!mem || !mem->va)
i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd mem\n", __func__);
else
i40iw_free_dma_mem(dev->hw, mem);
}
return ret_code;
}
/**
* i40iw_sc_del_hmc_obj - remove pe hmc objects
* @dev: pointer to the device structure
* @info: pointer to i40iw_hmc_del_obj_info struct
* @reset: true if called before reset
*
* This will de-populate the SDs and PDs. It frees
* the memory for PDS and backing storage. After this function is returned,
* caller should deallocate memory allocated previously for
* book-keeping information about PDs and backing storage.
*/
enum i40iw_status_code i40iw_sc_del_hmc_obj(struct i40iw_sc_dev *dev,
struct i40iw_hmc_del_obj_info *info,
bool reset)
{
struct i40iw_hmc_pd_table *pd_table;
u32 sd_idx, sd_lmt;
u32 pd_idx, pd_lmt, rel_pd_idx;
u32 i, j;
enum i40iw_status_code ret_code = 0;
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
i40iw_debug(dev, I40IW_DEBUG_HMC,
"%s: error start_idx[%04d] >= [type %04d].cnt[%04d]\n",
__func__, info->start_idx, info->rsrc_type,
info->hmc_info->hmc_obj[info->rsrc_type].cnt);
return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
}
if ((info->start_idx + info->count) >
info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
i40iw_debug(dev, I40IW_DEBUG_HMC,
"%s: error start_idx[%04d] + count %04d >= [type %04d].cnt[%04d]\n",
__func__, info->start_idx, info->count,
info->rsrc_type,
info->hmc_info->hmc_obj[info->rsrc_type].cnt);
return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
}
if (!dev->is_pf) {
ret_code = i40iw_vchnl_vf_del_hmc_obj(dev, info->rsrc_type, 0,
info->count);
if (info->rsrc_type != I40IW_HMC_IW_PBLE)
return ret_code;
}
i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type,
info->start_idx, info->count, &pd_idx, &pd_lmt);
for (j = pd_idx; j < pd_lmt; j++) {
sd_idx = j / I40IW_HMC_PD_CNT_IN_SD;
if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
I40IW_SD_TYPE_PAGED)
continue;
rel_pd_idx = j % I40IW_HMC_PD_CNT_IN_SD;
pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
if (pd_table->pd_entry[rel_pd_idx].valid) {
ret_code = i40iw_remove_pd_bp(dev->hw, info->hmc_info, j,
info->is_pf);
if (ret_code) {
i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error\n", __func__);
return ret_code;
}
}
}
i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type,
info->start_idx, info->count, &sd_idx, &sd_lmt);
if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
sd_lmt > info->hmc_info->sd_table.sd_cnt) {
i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error invalid sd_idx\n", __func__);
return I40IW_ERR_INVALID_SD_INDEX;
}
for (i = sd_idx; i < sd_lmt; i++) {
if (!info->hmc_info->sd_table.sd_entry[i].valid)
continue;
switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
case I40IW_SD_TYPE_DIRECT:
ret_code = i40iw_prep_remove_sd_bp(info->hmc_info, i);
if (!ret_code) {
info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
info->del_sd_cnt++;
}
break;
case I40IW_SD_TYPE_PAGED:
ret_code = i40iw_prep_remove_pd_page(info->hmc_info, i);
if (!ret_code) {
info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
info->del_sd_cnt++;
}
break;
default:
break;
}
}
return i40iw_finish_del_sd_reg(dev, info, reset);
}
/**
* i40iw_add_sd_table_entry - Adds a segment descriptor to the table
* @hw: pointer to our hw struct
* @hmc_info: pointer to the HMC configuration information struct
* @sd_index: segment descriptor index to manipulate
* @type: what type of segment descriptor we're manipulating
* @direct_mode_sz: size to alloc in direct mode
*/
enum i40iw_status_code i40iw_add_sd_table_entry(struct i40iw_hw *hw,
struct i40iw_hmc_info *hmc_info,
u32 sd_index,
enum i40iw_sd_entry_type type,
u64 direct_mode_sz)
{
enum i40iw_status_code ret_code = 0;
struct i40iw_hmc_sd_entry *sd_entry;
bool dma_mem_alloc_done = false;
struct i40iw_dma_mem mem;
u64 alloc_len;
sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
if (!sd_entry->valid) {
if (type == I40IW_SD_TYPE_PAGED)
alloc_len = I40IW_HMC_PAGED_BP_SIZE;
else
alloc_len = direct_mode_sz;
/* allocate a 4K pd page or 2M backing page */
ret_code = i40iw_allocate_dma_mem(hw, &mem, alloc_len,
I40IW_HMC_PD_BP_BUF_ALIGNMENT);
if (ret_code)
goto exit;
dma_mem_alloc_done = true;
if (type == I40IW_SD_TYPE_PAGED) {
ret_code = i40iw_allocate_virt_mem(hw,
&sd_entry->u.pd_table.pd_entry_virt_mem,
sizeof(struct i40iw_hmc_pd_entry) * 512);
if (ret_code)
goto exit;
sd_entry->u.pd_table.pd_entry = (struct i40iw_hmc_pd_entry *)
sd_entry->u.pd_table.pd_entry_virt_mem.va;
memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem, sizeof(struct i40iw_dma_mem));
} else {
memcpy(&sd_entry->u.bp.addr, &mem, sizeof(struct i40iw_dma_mem));
sd_entry->u.bp.sd_pd_index = sd_index;
}
hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
I40IW_INC_SD_REFCNT(&hmc_info->sd_table);
}
if (sd_entry->entry_type == I40IW_SD_TYPE_DIRECT)
I40IW_INC_BP_REFCNT(&sd_entry->u.bp);
exit:
if (ret_code)
if (dma_mem_alloc_done)
i40iw_free_dma_mem(hw, &mem);
return ret_code;
}
/**
* i40iw_add_pd_table_entry - Adds page descriptor to the specified table
* @hw: pointer to our HW structure
* @hmc_info: pointer to the HMC configuration information structure
* @pd_index: which page descriptor index to manipulate
* @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
*
* This function:
* 1. Initializes the pd entry
* 2. Adds pd_entry in the pd_table
* 3. Mark the entry valid in i40iw_hmc_pd_entry structure
* 4. Initializes the pd_entry's ref count to 1
* assumptions:
* 1. The memory for pd should be pinned down, physically contiguous and
* aligned on 4K boundary and zeroed memory.
* 2. It should be 4K in size.
*/
enum i40iw_status_code i40iw_add_pd_table_entry(struct i40iw_hw *hw,
struct i40iw_hmc_info *hmc_info,
u32 pd_index,
struct i40iw_dma_mem *rsrc_pg)
{
enum i40iw_status_code ret_code = 0;
struct i40iw_hmc_pd_table *pd_table;
struct i40iw_hmc_pd_entry *pd_entry;
struct i40iw_dma_mem mem;
struct i40iw_dma_mem *page = &mem;
u32 sd_idx, rel_pd_idx;
u64 *pd_addr;
u64 page_desc;
if (pd_index / I40IW_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt)
return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
sd_idx = (pd_index / I40IW_HMC_PD_CNT_IN_SD);
if (hmc_info->sd_table.sd_entry[sd_idx].entry_type != I40IW_SD_TYPE_PAGED)
return 0;
rel_pd_idx = (pd_index % I40IW_HMC_PD_CNT_IN_SD);
pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
pd_entry = &pd_table->pd_entry[rel_pd_idx];
if (!pd_entry->valid) {
if (rsrc_pg) {
pd_entry->rsrc_pg = true;
page = rsrc_pg;
} else {
ret_code = i40iw_allocate_dma_mem(hw, page,
I40IW_HMC_PAGED_BP_SIZE,
I40IW_HMC_PD_BP_BUF_ALIGNMENT);
if (ret_code)
return ret_code;
pd_entry->rsrc_pg = false;
}
memcpy(&pd_entry->bp.addr, page, sizeof(struct i40iw_dma_mem));
pd_entry->bp.sd_pd_index = pd_index;
pd_entry->bp.entry_type = I40IW_SD_TYPE_PAGED;
page_desc = page->pa | 0x1;
pd_addr = (u64 *)pd_table->pd_page_addr.va;
pd_addr += rel_pd_idx;
memcpy(pd_addr, &page_desc, sizeof(*pd_addr));
pd_entry->sd_index = sd_idx;
pd_entry->valid = true;
I40IW_INC_PD_REFCNT(pd_table);
if (hmc_info->hmc_fn_id < I40IW_FIRST_VF_FPM_ID)
I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, rel_pd_idx);
else if (hw->hmc.hmc_fn_id != hmc_info->hmc_fn_id)
I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, rel_pd_idx,
hmc_info->hmc_fn_id);
}
I40IW_INC_BP_REFCNT(&pd_entry->bp);
return 0;
}
/**
* i40iw_remove_pd_bp - remove a backing page from a page descriptor
* @hw: pointer to our HW structure
* @hmc_info: pointer to the HMC configuration information structure
* @idx: the page index
* @is_pf: distinguishes a VF from a PF
*
* This function:
* 1. Marks the entry in pd table (for paged address mode) or in sd table
* (for direct address mode) invalid.
* 2. Write to register PMPDINV to invalidate the backing page in FV cache
* 3. Decrement the ref count for the pd _entry
* assumptions:
* 1. Caller can deallocate the memory used by backing storage after this
* function returns.
*/
enum i40iw_status_code i40iw_remove_pd_bp(struct i40iw_hw *hw,
struct i40iw_hmc_info *hmc_info,
u32 idx,
bool is_pf)
{
struct i40iw_hmc_pd_entry *pd_entry;
struct i40iw_hmc_pd_table *pd_table;
struct i40iw_hmc_sd_entry *sd_entry;
u32 sd_idx, rel_pd_idx;
struct i40iw_dma_mem *mem;
u64 *pd_addr;
sd_idx = idx / I40IW_HMC_PD_CNT_IN_SD;
rel_pd_idx = idx % I40IW_HMC_PD_CNT_IN_SD;
if (sd_idx >= hmc_info->sd_table.sd_cnt)
return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
if (sd_entry->entry_type != I40IW_SD_TYPE_PAGED)
return I40IW_ERR_INVALID_SD_TYPE;
pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
pd_entry = &pd_table->pd_entry[rel_pd_idx];
I40IW_DEC_BP_REFCNT(&pd_entry->bp);
if (pd_entry->bp.ref_cnt)
return 0;
pd_entry->valid = false;
I40IW_DEC_PD_REFCNT(pd_table);
pd_addr = (u64 *)pd_table->pd_page_addr.va;
pd_addr += rel_pd_idx;
memset(pd_addr, 0, sizeof(u64));
if (is_pf)
I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
else
I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx,
hmc_info->hmc_fn_id);
if (!pd_entry->rsrc_pg) {
mem = &pd_entry->bp.addr;
if (!mem || !mem->va)
return I40IW_ERR_PARAM;
i40iw_free_dma_mem(hw, mem);
}
if (!pd_table->ref_cnt)
i40iw_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
return 0;
}
/**
* i40iw_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
* @hmc_info: pointer to the HMC configuration information structure
* @idx: the page index
*/
enum i40iw_status_code i40iw_prep_remove_sd_bp(struct i40iw_hmc_info *hmc_info, u32 idx)
{
struct i40iw_hmc_sd_entry *sd_entry;
sd_entry = &hmc_info->sd_table.sd_entry[idx];
I40IW_DEC_BP_REFCNT(&sd_entry->u.bp);
if (sd_entry->u.bp.ref_cnt)
return I40IW_ERR_NOT_READY;
I40IW_DEC_SD_REFCNT(&hmc_info->sd_table);
sd_entry->valid = false;
return 0;
}
/**
* i40iw_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
* @hmc_info: pointer to the HMC configuration information structure
* @idx: segment descriptor index to find the relevant page descriptor
*/
enum i40iw_status_code i40iw_prep_remove_pd_page(struct i40iw_hmc_info *hmc_info,
u32 idx)
{
struct i40iw_hmc_sd_entry *sd_entry;
sd_entry = &hmc_info->sd_table.sd_entry[idx];
if (sd_entry->u.pd_table.ref_cnt)
return I40IW_ERR_NOT_READY;
sd_entry->valid = false;
I40IW_DEC_SD_REFCNT(&hmc_info->sd_table);
return 0;
}
/**
* i40iw_pf_init_vfhmc -
* @vf_cnt_array: array of cnt values of iwarp hmc objects
* @vf_hmc_fn_id: hmc function id ofr vf driver
* @dev: pointer to i40iw_dev struct
*
* Called by pf driver to initialize hmc_info for vf driver instance.
*/
enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev,
u8 vf_hmc_fn_id,
u32 *vf_cnt_array)
{
struct i40iw_hmc_info *hmc_info;
enum i40iw_status_code ret_code = 0;
u32 i;
if ((vf_hmc_fn_id < I40IW_FIRST_VF_FPM_ID) ||
(vf_hmc_fn_id >= I40IW_FIRST_VF_FPM_ID +
I40IW_MAX_PE_ENABLED_VF_COUNT)) {
i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: invalid vf_hmc_fn_id 0x%x\n",
__func__, vf_hmc_fn_id);
return I40IW_ERR_INVALID_HMCFN_ID;
}
ret_code = i40iw_sc_init_iw_hmc(dev, vf_hmc_fn_id);
if (ret_code)
return ret_code;
hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, vf_hmc_fn_id);
for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)
if (vf_cnt_array)
hmc_info->hmc_obj[i].cnt =
vf_cnt_array[i - I40IW_HMC_IW_QP];
else
hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
return 0;
}

View File

@ -1,241 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef I40IW_HMC_H
#define I40IW_HMC_H
#include "i40iw_d.h"
struct i40iw_hw;
enum i40iw_status_code;
#define I40IW_HMC_MAX_BP_COUNT 512
#define I40IW_MAX_SD_ENTRIES 11
#define I40IW_HW_DBG_HMC_INVALID_BP_MARK 0xCA
#define I40IW_HMC_INFO_SIGNATURE 0x484D5347
#define I40IW_HMC_PD_CNT_IN_SD 512
#define I40IW_HMC_DIRECT_BP_SIZE 0x200000
#define I40IW_HMC_MAX_SD_COUNT 4096
#define I40IW_HMC_PAGED_BP_SIZE 4096
#define I40IW_HMC_PD_BP_BUF_ALIGNMENT 4096
#define I40IW_FIRST_VF_FPM_ID 16
#define FPM_MULTIPLIER 1024
#define I40IW_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
#define I40IW_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
#define I40IW_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
#define I40IW_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
#define I40IW_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
#define I40IW_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
/**
* I40IW_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
* @hw: pointer to our hw struct
* @sd_idx: segment descriptor index
* @pd_idx: page descriptor index
*/
#define I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
i40iw_wr32((hw), I40E_PFHMC_PDINV, \
(((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
(0x1 << I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT) | \
((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
/**
* I40IW_INVALIDATE_VF_HMC_PD - Invalidates the pd cache in the hardware
* @hw: pointer to our hw struct
* @sd_idx: segment descriptor index
* @pd_idx: page descriptor index
* @hmc_fn_id: VF's function id
*/
#define I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
i40iw_wr32(hw, I40E_GLHMC_VFPDINV(hmc_fn_id - I40IW_FIRST_VF_FPM_ID), \
((sd_idx << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
(pd_idx << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
struct i40iw_hmc_obj_info {
u64 base;
u32 max_cnt;
u32 cnt;
u64 size;
};
enum i40iw_sd_entry_type {
I40IW_SD_TYPE_INVALID = 0,
I40IW_SD_TYPE_PAGED = 1,
I40IW_SD_TYPE_DIRECT = 2
};
struct i40iw_hmc_bp {
enum i40iw_sd_entry_type entry_type;
struct i40iw_dma_mem addr;
u32 sd_pd_index;
u32 ref_cnt;
};
struct i40iw_hmc_pd_entry {
struct i40iw_hmc_bp bp;
u32 sd_index;
bool rsrc_pg;
bool valid;
};
struct i40iw_hmc_pd_table {
struct i40iw_dma_mem pd_page_addr;
struct i40iw_hmc_pd_entry *pd_entry;
struct i40iw_virt_mem pd_entry_virt_mem;
u32 ref_cnt;
u32 sd_index;
};
struct i40iw_hmc_sd_entry {
enum i40iw_sd_entry_type entry_type;
bool valid;
union {
struct i40iw_hmc_pd_table pd_table;
struct i40iw_hmc_bp bp;
} u;
};
struct i40iw_hmc_sd_table {
struct i40iw_virt_mem addr;
u32 sd_cnt;
u32 ref_cnt;
struct i40iw_hmc_sd_entry *sd_entry;
};
struct i40iw_hmc_info {
u32 signature;
u8 hmc_fn_id;
u16 first_sd_index;
struct i40iw_hmc_obj_info *hmc_obj;
struct i40iw_virt_mem hmc_obj_virt_mem;
struct i40iw_hmc_sd_table sd_table;
u16 sd_indexes[I40IW_HMC_MAX_SD_COUNT];
};
struct update_sd_entry {
u64 cmd;
u64 data;
};
struct i40iw_update_sds_info {
u32 cnt;
u8 hmc_fn_id;
struct update_sd_entry entry[I40IW_MAX_SD_ENTRIES];
};
struct i40iw_ccq_cqe_info;
struct i40iw_hmc_fcn_info {
void (*callback_fcn)(struct i40iw_sc_dev *, void *,
struct i40iw_ccq_cqe_info *);
void *cqp_callback_param;
u32 vf_id;
u16 iw_vf_idx;
bool free_fcn;
};
enum i40iw_hmc_rsrc_type {
I40IW_HMC_IW_QP = 0,
I40IW_HMC_IW_CQ = 1,
I40IW_HMC_IW_SRQ = 2,
I40IW_HMC_IW_HTE = 3,
I40IW_HMC_IW_ARP = 4,
I40IW_HMC_IW_APBVT_ENTRY = 5,
I40IW_HMC_IW_MR = 6,
I40IW_HMC_IW_XF = 7,
I40IW_HMC_IW_XFFL = 8,
I40IW_HMC_IW_Q1 = 9,
I40IW_HMC_IW_Q1FL = 10,
I40IW_HMC_IW_TIMER = 11,
I40IW_HMC_IW_FSIMC = 12,
I40IW_HMC_IW_FSIAV = 13,
I40IW_HMC_IW_PBLE = 14,
I40IW_HMC_IW_MAX = 15,
};
struct i40iw_hmc_create_obj_info {
struct i40iw_hmc_info *hmc_info;
struct i40iw_virt_mem add_sd_virt_mem;
u32 rsrc_type;
u32 start_idx;
u32 count;
u32 add_sd_cnt;
enum i40iw_sd_entry_type entry_type;
bool is_pf;
};
struct i40iw_hmc_del_obj_info {
struct i40iw_hmc_info *hmc_info;
struct i40iw_virt_mem del_sd_virt_mem;
u32 rsrc_type;
u32 start_idx;
u32 count;
u32 del_sd_cnt;
bool is_pf;
};
enum i40iw_status_code i40iw_copy_dma_mem(struct i40iw_hw *hw, void *dest_buf,
struct i40iw_dma_mem *src_mem, u64 src_offset, u64 size);
enum i40iw_status_code i40iw_sc_create_hmc_obj(struct i40iw_sc_dev *dev,
struct i40iw_hmc_create_obj_info *info);
enum i40iw_status_code i40iw_sc_del_hmc_obj(struct i40iw_sc_dev *dev,
struct i40iw_hmc_del_obj_info *info,
bool reset);
enum i40iw_status_code i40iw_hmc_sd_one(struct i40iw_sc_dev *dev, u8 hmc_fn_id,
u64 pa, u32 sd_idx, enum i40iw_sd_entry_type type,
bool setsd);
enum i40iw_status_code i40iw_update_sds_noccq(struct i40iw_sc_dev *dev,
struct i40iw_update_sds_info *info);
struct i40iw_vfdev *i40iw_vfdev_from_fpm(struct i40iw_sc_dev *dev, u8 hmc_fn_id);
struct i40iw_hmc_info *i40iw_vf_hmcinfo_from_fpm(struct i40iw_sc_dev *dev,
u8 hmc_fn_id);
enum i40iw_status_code i40iw_add_sd_table_entry(struct i40iw_hw *hw,
struct i40iw_hmc_info *hmc_info, u32 sd_index,
enum i40iw_sd_entry_type type, u64 direct_mode_sz);
enum i40iw_status_code i40iw_add_pd_table_entry(struct i40iw_hw *hw,
struct i40iw_hmc_info *hmc_info, u32 pd_index,
struct i40iw_dma_mem *rsrc_pg);
enum i40iw_status_code i40iw_remove_pd_bp(struct i40iw_hw *hw,
struct i40iw_hmc_info *hmc_info, u32 idx, bool is_pf);
enum i40iw_status_code i40iw_prep_remove_sd_bp(struct i40iw_hmc_info *hmc_info, u32 idx);
enum i40iw_status_code i40iw_prep_remove_pd_page(struct i40iw_hmc_info *hmc_info, u32 idx);
#define ENTER_SHARED_FUNCTION()
#define EXIT_SHARED_FUNCTION()
#endif /* I40IW_HMC_H */

View File

@ -1,851 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/if_vlan.h>
#include "i40iw.h"
/**
* i40iw_initialize_hw_resources - initialize hw resource during open
* @iwdev: iwarp device
*/
u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev)
{
unsigned long num_pds;
u32 resources_size;
u32 max_mr;
u32 max_qp;
u32 max_cq;
u32 arp_table_size;
u32 mrdrvbits;
void *resource_ptr;
max_qp = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt;
max_cq = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
max_mr = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt;
arp_table_size = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt;
iwdev->max_cqe = 0xFFFFF;
num_pds = I40IW_MAX_PDS;
resources_size = sizeof(struct i40iw_arp_entry) * arp_table_size;
resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_qp);
resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_mr);
resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_cq);
resources_size += sizeof(unsigned long) * BITS_TO_LONGS(num_pds);
resources_size += sizeof(unsigned long) * BITS_TO_LONGS(arp_table_size);
resources_size += sizeof(struct i40iw_qp **) * max_qp;
iwdev->mem_resources = kzalloc(resources_size, GFP_KERNEL);
if (!iwdev->mem_resources)
return -ENOMEM;
iwdev->max_qp = max_qp;
iwdev->max_mr = max_mr;
iwdev->max_cq = max_cq;
iwdev->max_pd = num_pds;
iwdev->arp_table_size = arp_table_size;
iwdev->arp_table = (struct i40iw_arp_entry *)iwdev->mem_resources;
resource_ptr = iwdev->mem_resources + (sizeof(struct i40iw_arp_entry) * arp_table_size);
iwdev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
IB_DEVICE_MEM_WINDOW | IB_DEVICE_MEM_MGT_EXTENSIONS;
iwdev->allocated_qps = resource_ptr;
iwdev->allocated_cqs = &iwdev->allocated_qps[BITS_TO_LONGS(max_qp)];
iwdev->allocated_mrs = &iwdev->allocated_cqs[BITS_TO_LONGS(max_cq)];
iwdev->allocated_pds = &iwdev->allocated_mrs[BITS_TO_LONGS(max_mr)];
iwdev->allocated_arps = &iwdev->allocated_pds[BITS_TO_LONGS(num_pds)];
iwdev->qp_table = (struct i40iw_qp **)(&iwdev->allocated_arps[BITS_TO_LONGS(arp_table_size)]);
set_bit(0, iwdev->allocated_mrs);
set_bit(0, iwdev->allocated_qps);
set_bit(0, iwdev->allocated_cqs);
set_bit(0, iwdev->allocated_pds);
set_bit(0, iwdev->allocated_arps);
/* Following for ILQ/IEQ */
set_bit(1, iwdev->allocated_qps);
set_bit(1, iwdev->allocated_cqs);
set_bit(1, iwdev->allocated_pds);
set_bit(2, iwdev->allocated_cqs);
set_bit(2, iwdev->allocated_pds);
spin_lock_init(&iwdev->resource_lock);
spin_lock_init(&iwdev->qptable_lock);
/* stag index mask has a minimum of 14 bits */
mrdrvbits = 24 - max(get_count_order(iwdev->max_mr), 14);
iwdev->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits));
return 0;
}
/**
* i40iw_cqp_ce_handler - handle cqp completions
* @iwdev: iwarp device
* @arm: flag to arm after completions
* @cq: cq for cqp completions
*/
static void i40iw_cqp_ce_handler(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq, bool arm)
{
struct i40iw_cqp_request *cqp_request;
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
u32 cqe_count = 0;
struct i40iw_ccq_cqe_info info;
int ret;
do {
memset(&info, 0, sizeof(info));
ret = dev->ccq_ops->ccq_get_cqe_info(cq, &info);
if (ret)
break;
cqp_request = (struct i40iw_cqp_request *)(unsigned long)info.scratch;
if (info.error)
i40iw_pr_err("opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
info.op_code, info.maj_err_code, info.min_err_code);
if (cqp_request) {
cqp_request->compl_info.maj_err_code = info.maj_err_code;
cqp_request->compl_info.min_err_code = info.min_err_code;
cqp_request->compl_info.op_ret_val = info.op_ret_val;
cqp_request->compl_info.error = info.error;
if (cqp_request->waiting) {
cqp_request->request_done = true;
wake_up(&cqp_request->waitq);
i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
} else {
if (cqp_request->callback_fcn)
cqp_request->callback_fcn(cqp_request, 1);
i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
}
}
cqe_count++;
} while (1);
if (arm && cqe_count) {
i40iw_process_bh(dev);
dev->ccq_ops->ccq_arm(cq);
}
}
/**
* i40iw_iwarp_ce_handler - handle iwarp completions
* @iwdev: iwarp device
* @iwcq: iwarp cq receiving event
*/
static void i40iw_iwarp_ce_handler(struct i40iw_device *iwdev,
struct i40iw_sc_cq *iwcq)
{
struct i40iw_cq *i40iwcq = iwcq->back_cq;
if (i40iwcq->ibcq.comp_handler)
i40iwcq->ibcq.comp_handler(&i40iwcq->ibcq,
i40iwcq->ibcq.cq_context);
}
/**
* i40iw_puda_ce_handler - handle puda completion events
* @iwdev: iwarp device
* @cq: puda completion q for event
*/
static void i40iw_puda_ce_handler(struct i40iw_device *iwdev,
struct i40iw_sc_cq *cq)
{
struct i40iw_sc_dev *dev = (struct i40iw_sc_dev *)&iwdev->sc_dev;
enum i40iw_status_code status;
u32 compl_error;
do {
status = i40iw_puda_poll_completion(dev, cq, &compl_error);
if (status == I40IW_ERR_QUEUE_EMPTY)
break;
if (status) {
i40iw_pr_err("puda status = %d\n", status);
break;
}
if (compl_error) {
i40iw_pr_err("puda compl_err =0x%x\n", compl_error);
break;
}
} while (1);
dev->ccq_ops->ccq_arm(cq);
}
/**
* i40iw_process_ceq - handle ceq for completions
* @iwdev: iwarp device
* @ceq: ceq having cq for completion
*/
void i40iw_process_ceq(struct i40iw_device *iwdev, struct i40iw_ceq *ceq)
{
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
struct i40iw_sc_ceq *sc_ceq;
struct i40iw_sc_cq *cq;
bool arm = true;
sc_ceq = &ceq->sc_ceq;
do {
cq = dev->ceq_ops->process_ceq(dev, sc_ceq);
if (!cq)
break;
if (cq->cq_type == I40IW_CQ_TYPE_CQP)
i40iw_cqp_ce_handler(iwdev, cq, arm);
else if (cq->cq_type == I40IW_CQ_TYPE_IWARP)
i40iw_iwarp_ce_handler(iwdev, cq);
else if ((cq->cq_type == I40IW_CQ_TYPE_ILQ) ||
(cq->cq_type == I40IW_CQ_TYPE_IEQ))
i40iw_puda_ce_handler(iwdev, cq);
} while (1);
}
/**
* i40iw_next_iw_state - modify qp state
* @iwqp: iwarp qp to modify
* @state: next state for qp
* @del_hash: del hash
* @term: term message
* @termlen: length of term message
*/
void i40iw_next_iw_state(struct i40iw_qp *iwqp,
u8 state,
u8 del_hash,
u8 term,
u8 termlen)
{
struct i40iw_modify_qp_info info;
memset(&info, 0, sizeof(info));
info.next_iwarp_state = state;
info.remove_hash_idx = del_hash;
info.cq_num_valid = true;
info.arp_cache_idx_valid = true;
info.dont_send_term = true;
info.dont_send_fin = true;
info.termlen = termlen;
if (term & I40IWQP_TERM_SEND_TERM_ONLY)
info.dont_send_term = false;
if (term & I40IWQP_TERM_SEND_FIN_ONLY)
info.dont_send_fin = false;
if (iwqp->sc_qp.term_flags && (state == I40IW_QP_STATE_ERROR))
info.reset_tcp_conn = true;
iwqp->hw_iwarp_state = state;
i40iw_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
}
/**
* i40iw_process_aeq - handle aeq events
* @iwdev: iwarp device
*/
void i40iw_process_aeq(struct i40iw_device *iwdev)
{
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
struct i40iw_aeq *aeq = &iwdev->aeq;
struct i40iw_sc_aeq *sc_aeq = &aeq->sc_aeq;
struct i40iw_aeqe_info aeinfo;
struct i40iw_aeqe_info *info = &aeinfo;
int ret;
struct i40iw_qp *iwqp = NULL;
struct i40iw_sc_cq *cq = NULL;
struct i40iw_cq *iwcq = NULL;
struct i40iw_sc_qp *qp = NULL;
struct i40iw_qp_host_ctx_info *ctx_info = NULL;
unsigned long flags;
u32 aeqcnt = 0;
if (!sc_aeq->size)
return;
do {
memset(info, 0, sizeof(*info));
ret = dev->aeq_ops->get_next_aeqe(sc_aeq, info);
if (ret)
break;
aeqcnt++;
i40iw_debug(dev, I40IW_DEBUG_AEQ,
"%s ae_id = 0x%x bool qp=%d qp_id = %d\n",
__func__, info->ae_id, info->qp, info->qp_cq_id);
if (info->qp) {
spin_lock_irqsave(&iwdev->qptable_lock, flags);
iwqp = iwdev->qp_table[info->qp_cq_id];
if (!iwqp) {
spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
i40iw_debug(dev, I40IW_DEBUG_AEQ,
"%s qp_id %d is already freed\n",
__func__, info->qp_cq_id);
continue;
}
i40iw_qp_add_ref(&iwqp->ibqp);
spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
qp = &iwqp->sc_qp;
spin_lock_irqsave(&iwqp->lock, flags);
iwqp->hw_tcp_state = info->tcp_state;
iwqp->hw_iwarp_state = info->iwarp_state;
iwqp->last_aeq = info->ae_id;
spin_unlock_irqrestore(&iwqp->lock, flags);
ctx_info = &iwqp->ctx_info;
ctx_info->err_rq_idx_valid = true;
} else {
if (info->ae_id != I40IW_AE_CQ_OPERATION_ERROR)
continue;
}
switch (info->ae_id) {
case I40IW_AE_LLP_FIN_RECEIVED:
if (qp->term_flags)
break;
if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSE_WAIT;
if ((iwqp->hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) &&
(iwqp->ibqp_state == IB_QPS_RTS)) {
i40iw_next_iw_state(iwqp,
I40IW_QP_STATE_CLOSING, 0, 0, 0);
i40iw_cm_disconn(iwqp);
}
iwqp->cm_id->add_ref(iwqp->cm_id);
i40iw_schedule_cm_timer(iwqp->cm_node,
(struct i40iw_puda_buf *)iwqp,
I40IW_TIMER_TYPE_CLOSE, 1, 0);
}
break;
case I40IW_AE_LLP_CLOSE_COMPLETE:
if (qp->term_flags)
i40iw_terminate_done(qp, 0);
else
i40iw_cm_disconn(iwqp);
break;
case I40IW_AE_BAD_CLOSE:
case I40IW_AE_RESET_SENT:
i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 1, 0, 0);
i40iw_cm_disconn(iwqp);
break;
case I40IW_AE_LLP_CONNECTION_RESET:
if (atomic_read(&iwqp->close_timer_started))
break;
i40iw_cm_disconn(iwqp);
break;
case I40IW_AE_QP_SUSPEND_COMPLETE:
i40iw_qp_suspend_resume(dev, &iwqp->sc_qp, false);
break;
case I40IW_AE_TERMINATE_SENT:
i40iw_terminate_send_fin(qp);
break;
case I40IW_AE_LLP_TERMINATE_RECEIVED:
i40iw_terminate_received(qp, info);
break;
case I40IW_AE_CQ_OPERATION_ERROR:
i40iw_pr_err("Processing an iWARP related AE for CQ misc = 0x%04X\n",
info->ae_id);
cq = (struct i40iw_sc_cq *)(unsigned long)info->compl_ctx;
iwcq = (struct i40iw_cq *)cq->back_cq;
if (iwcq->ibcq.event_handler) {
struct ib_event ibevent;
ibevent.device = iwcq->ibcq.device;
ibevent.event = IB_EVENT_CQ_ERR;
ibevent.element.cq = &iwcq->ibcq;
iwcq->ibcq.event_handler(&ibevent, iwcq->ibcq.cq_context);
}
break;
case I40IW_AE_LLP_DOUBT_REACHABILITY:
break;
case I40IW_AE_PRIV_OPERATION_DENIED:
case I40IW_AE_STAG_ZERO_INVALID:
case I40IW_AE_IB_RREQ_AND_Q1_FULL:
case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
case I40IW_AE_DDP_UBE_INVALID_MO:
case I40IW_AE_DDP_UBE_INVALID_QN:
case I40IW_AE_DDP_NO_L_BIT:
case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST:
case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
case I40IW_AE_INVALID_ARP_ENTRY:
case I40IW_AE_INVALID_TCP_OPTION_RCVD:
case I40IW_AE_STALE_ARP_ENTRY:
case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
case I40IW_AE_LLP_SYN_RECEIVED:
case I40IW_AE_LLP_TOO_MANY_RETRIES:
case I40IW_AE_LCE_QP_CATASTROPHIC:
case I40IW_AE_LCE_FUNCTION_CATASTROPHIC:
case I40IW_AE_LCE_CQ_CATASTROPHIC:
case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT:
ctx_info->err_rq_idx_valid = false;
fallthrough;
default:
if (!info->sq && ctx_info->err_rq_idx_valid) {
ctx_info->err_rq_idx = info->wqe_idx;
ctx_info->tcp_info_valid = false;
ctx_info->iwarp_info_valid = false;
ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
iwqp->host_ctx.va,
ctx_info);
}
i40iw_terminate_connection(qp, info);
break;
}
if (info->qp)
i40iw_qp_rem_ref(&iwqp->ibqp);
} while (1);
if (aeqcnt)
dev->aeq_ops->repost_aeq_entries(dev, aeqcnt);
}
/**
* i40iw_cqp_manage_abvpt_cmd - send cqp command manage abpvt
* @iwdev: iwarp device
* @accel_local_port: port for apbvt
* @add_port: add or delete port
*/
static enum i40iw_status_code
i40iw_cqp_manage_abvpt_cmd(struct i40iw_device *iwdev,
u16 accel_local_port,
bool add_port)
{
struct i40iw_apbvt_info *info;
struct i40iw_cqp_request *cqp_request;
struct cqp_commands_info *cqp_info;
enum i40iw_status_code status;
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, add_port);
if (!cqp_request)
return I40IW_ERR_NO_MEMORY;
cqp_info = &cqp_request->info;
info = &cqp_info->in.u.manage_apbvt_entry.info;
memset(info, 0, sizeof(*info));
info->add = add_port;
info->port = cpu_to_le16(accel_local_port);
cqp_info->cqp_cmd = OP_MANAGE_APBVT_ENTRY;
cqp_info->post_sq = 1;
cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->cqp.sc_cqp;
cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request;
status = i40iw_handle_cqp_op(iwdev, cqp_request);
if (status)
i40iw_pr_err("CQP-OP Manage APBVT entry fail");
return status;
}
/**
* i40iw_manage_apbvt - add or delete tcp port
* @iwdev: iwarp device
* @accel_local_port: port for apbvt
* @add_port: add or delete port
*/
enum i40iw_status_code i40iw_manage_apbvt(struct i40iw_device *iwdev,
u16 accel_local_port,
bool add_port)
{
struct i40iw_cm_core *cm_core = &iwdev->cm_core;
enum i40iw_status_code status;
unsigned long flags;
bool in_use;
/* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to
* protect against race where add APBVT CQP can race ahead of the delete
* APBVT for same port.
*/
if (add_port) {
spin_lock_irqsave(&cm_core->apbvt_lock, flags);
in_use = __test_and_set_bit(accel_local_port,
cm_core->ports_in_use);
spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
if (in_use)
return 0;
return i40iw_cqp_manage_abvpt_cmd(iwdev, accel_local_port,
true);
} else {
spin_lock_irqsave(&cm_core->apbvt_lock, flags);
in_use = i40iw_port_in_use(cm_core, accel_local_port);
if (in_use) {
spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
return 0;
}
__clear_bit(accel_local_port, cm_core->ports_in_use);
status = i40iw_cqp_manage_abvpt_cmd(iwdev, accel_local_port,
false);
spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
return status;
}
}
/**
* i40iw_manage_arp_cache - manage hw arp cache
* @iwdev: iwarp device
* @mac_addr: mac address ptr
* @ip_addr: ip addr for arp cache
* @ipv4: flag indicating IPv4 when true
* @action: add, delete or modify
*/
void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
unsigned char *mac_addr,
u32 *ip_addr,
bool ipv4,
u32 action)
{
struct i40iw_add_arp_cache_entry_info *info;
struct i40iw_cqp_request *cqp_request;
struct cqp_commands_info *cqp_info;
int arp_index;
arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action);
if (arp_index < 0)
return;
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
if (!cqp_request)
return;
cqp_info = &cqp_request->info;
if (action == I40IW_ARP_ADD) {
cqp_info->cqp_cmd = OP_ADD_ARP_CACHE_ENTRY;
info = &cqp_info->in.u.add_arp_cache_entry.info;
memset(info, 0, sizeof(*info));
info->arp_index = cpu_to_le16((u16)arp_index);
info->permanent = true;
ether_addr_copy(info->mac_addr, mac_addr);
cqp_info->in.u.add_arp_cache_entry.scratch = (uintptr_t)cqp_request;
cqp_info->in.u.add_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;
} else {
cqp_info->cqp_cmd = OP_DELETE_ARP_CACHE_ENTRY;
cqp_info->in.u.del_arp_cache_entry.scratch = (uintptr_t)cqp_request;
cqp_info->in.u.del_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;
cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index;
}
cqp_info->in.u.add_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;
cqp_info->in.u.add_arp_cache_entry.scratch = (uintptr_t)cqp_request;
cqp_info->post_sq = 1;
if (i40iw_handle_cqp_op(iwdev, cqp_request))
i40iw_pr_err("CQP-OP Add/Del Arp Cache entry fail");
}
/**
* i40iw_send_syn_cqp_callback - do syn/ack after qhash
* @cqp_request: qhash cqp completion
* @send_ack: flag send ack
*/
static void i40iw_send_syn_cqp_callback(struct i40iw_cqp_request *cqp_request, u32 send_ack)
{
i40iw_send_syn(cqp_request->param, send_ack);
}
/**
* i40iw_manage_qhash - add or modify qhash
* @iwdev: iwarp device
* @cminfo: cm info for qhash
* @etype: type (syn or quad)
* @mtype: type of qhash
* @cmnode: cmnode associated with connection
* @wait: wait for completion
*/
enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
struct i40iw_cm_info *cminfo,
enum i40iw_quad_entry_type etype,
enum i40iw_quad_hash_manage_type mtype,
void *cmnode,
bool wait)
{
struct i40iw_qhash_table_info *info;
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
struct i40iw_sc_vsi *vsi = &iwdev->vsi;
enum i40iw_status_code status;
struct i40iw_cqp *iwcqp = &iwdev->cqp;
struct i40iw_cqp_request *cqp_request;
struct cqp_commands_info *cqp_info;
cqp_request = i40iw_get_cqp_request(iwcqp, wait);
if (!cqp_request)
return I40IW_ERR_NO_MEMORY;
cqp_info = &cqp_request->info;
info = &cqp_info->in.u.manage_qhash_table_entry.info;
memset(info, 0, sizeof(*info));
info->vsi = &iwdev->vsi;
info->manage = mtype;
info->entry_type = etype;
if (cminfo->vlan_id != 0xFFFF) {
info->vlan_valid = true;
info->vlan_id = cpu_to_le16(cminfo->vlan_id);
} else {
info->vlan_valid = false;
}
info->ipv4_valid = cminfo->ipv4;
info->user_pri = cminfo->user_pri;
ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
info->qp_num = cpu_to_le32(vsi->ilq->qp_id);
info->dest_port = cpu_to_le16(cminfo->loc_port);
info->dest_ip[0] = cpu_to_le32(cminfo->loc_addr[0]);
info->dest_ip[1] = cpu_to_le32(cminfo->loc_addr[1]);
info->dest_ip[2] = cpu_to_le32(cminfo->loc_addr[2]);
info->dest_ip[3] = cpu_to_le32(cminfo->loc_addr[3]);
if (etype == I40IW_QHASH_TYPE_TCP_ESTABLISHED) {
info->src_port = cpu_to_le16(cminfo->rem_port);
info->src_ip[0] = cpu_to_le32(cminfo->rem_addr[0]);
info->src_ip[1] = cpu_to_le32(cminfo->rem_addr[1]);
info->src_ip[2] = cpu_to_le32(cminfo->rem_addr[2]);
info->src_ip[3] = cpu_to_le32(cminfo->rem_addr[3]);
}
if (cmnode) {
cqp_request->callback_fcn = i40iw_send_syn_cqp_callback;
cqp_request->param = (void *)cmnode;
}
if (info->ipv4_valid)
i40iw_debug(dev, I40IW_DEBUG_CM,
"%s:%s IP=%pI4, port=%d, mac=%pM, vlan_id=%d\n",
__func__, (!mtype) ? "DELETE" : "ADD",
info->dest_ip,
info->dest_port, info->mac_addr, cminfo->vlan_id);
else
i40iw_debug(dev, I40IW_DEBUG_CM,
"%s:%s IP=%pI6, port=%d, mac=%pM, vlan_id=%d\n",
__func__, (!mtype) ? "DELETE" : "ADD",
info->dest_ip,
info->dest_port, info->mac_addr, cminfo->vlan_id);
cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->cqp.sc_cqp;
cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;
cqp_info->cqp_cmd = OP_MANAGE_QHASH_TABLE_ENTRY;
cqp_info->post_sq = 1;
status = i40iw_handle_cqp_op(iwdev, cqp_request);
if (status)
i40iw_pr_err("CQP-OP Manage Qhash Entry fail");
return status;
}
/**
* i40iw_hw_flush_wqes - flush qp's wqe
* @iwdev: iwarp device
* @qp: hardware control qp
* @info: info for flush
* @wait: flag wait for completion
*/
enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,
struct i40iw_sc_qp *qp,
struct i40iw_qp_flush_info *info,
bool wait)
{
enum i40iw_status_code status;
struct i40iw_qp_flush_info *hw_info;
struct i40iw_cqp_request *cqp_request;
struct cqp_commands_info *cqp_info;
struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
if (!cqp_request)
return I40IW_ERR_NO_MEMORY;
cqp_info = &cqp_request->info;
hw_info = &cqp_request->info.in.u.qp_flush_wqes.info;
memcpy(hw_info, info, sizeof(*hw_info));
cqp_info->cqp_cmd = OP_QP_FLUSH_WQES;
cqp_info->post_sq = 1;
cqp_info->in.u.qp_flush_wqes.qp = qp;
cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;
status = i40iw_handle_cqp_op(iwdev, cqp_request);
if (status) {
i40iw_pr_err("CQP-OP Flush WQE's fail");
complete(&iwqp->sq_drained);
complete(&iwqp->rq_drained);
return status;
}
if (!cqp_request->compl_info.maj_err_code) {
switch (cqp_request->compl_info.min_err_code) {
case I40IW_CQP_COMPL_RQ_WQE_FLUSHED:
complete(&iwqp->sq_drained);
break;
case I40IW_CQP_COMPL_SQ_WQE_FLUSHED:
complete(&iwqp->rq_drained);
break;
case I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED:
break;
default:
complete(&iwqp->sq_drained);
complete(&iwqp->rq_drained);
break;
}
}
return 0;
}
/**
* i40iw_gen_ae - generate AE
* @iwdev: iwarp device
* @qp: qp associated with AE
* @info: info for ae
* @wait: wait for completion
*/
void i40iw_gen_ae(struct i40iw_device *iwdev,
struct i40iw_sc_qp *qp,
struct i40iw_gen_ae_info *info,
bool wait)
{
struct i40iw_gen_ae_info *ae_info;
struct i40iw_cqp_request *cqp_request;
struct cqp_commands_info *cqp_info;
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
if (!cqp_request)
return;
cqp_info = &cqp_request->info;
ae_info = &cqp_request->info.in.u.gen_ae.info;
memcpy(ae_info, info, sizeof(*ae_info));
cqp_info->cqp_cmd = OP_GEN_AE;
cqp_info->post_sq = 1;
cqp_info->in.u.gen_ae.qp = qp;
cqp_info->in.u.gen_ae.scratch = (uintptr_t)cqp_request;
if (i40iw_handle_cqp_op(iwdev, cqp_request))
i40iw_pr_err("CQP OP failed attempting to generate ae_code=0x%x\n",
info->ae_code);
}
/**
* i40iw_hw_manage_vf_pble_bp - manage vf pbles
* @iwdev: iwarp device
* @info: info for managing pble
* @wait: flag wait for completion
*/
enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev,
struct i40iw_manage_vf_pble_info *info,
bool wait)
{
enum i40iw_status_code status;
struct i40iw_manage_vf_pble_info *hw_info;
struct i40iw_cqp_request *cqp_request;
struct cqp_commands_info *cqp_info;
if ((iwdev->init_state < CCQ_CREATED) && wait)
wait = false;
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
if (!cqp_request)
return I40IW_ERR_NO_MEMORY;
cqp_info = &cqp_request->info;
hw_info = &cqp_request->info.in.u.manage_vf_pble_bp.info;
memcpy(hw_info, info, sizeof(*hw_info));
cqp_info->cqp_cmd = OP_MANAGE_VF_PBLE_BP;
cqp_info->post_sq = 1;
cqp_info->in.u.manage_vf_pble_bp.cqp = &iwdev->cqp.sc_cqp;
cqp_info->in.u.manage_vf_pble_bp.scratch = (uintptr_t)cqp_request;
status = i40iw_handle_cqp_op(iwdev, cqp_request);
if (status)
i40iw_pr_err("CQP-OP Manage VF pble_bp fail");
return status;
}
/**
* i40iw_get_ib_wc - return change flush code to IB's
* @opcode: iwarp flush code
*/
static enum ib_wc_status i40iw_get_ib_wc(enum i40iw_flush_opcode opcode)
{
switch (opcode) {
case FLUSH_PROT_ERR:
return IB_WC_LOC_PROT_ERR;
case FLUSH_REM_ACCESS_ERR:
return IB_WC_REM_ACCESS_ERR;
case FLUSH_LOC_QP_OP_ERR:
return IB_WC_LOC_QP_OP_ERR;
case FLUSH_REM_OP_ERR:
return IB_WC_REM_OP_ERR;
case FLUSH_LOC_LEN_ERR:
return IB_WC_LOC_LEN_ERR;
case FLUSH_GENERAL_ERR:
return IB_WC_GENERAL_ERR;
case FLUSH_FATAL_ERR:
default:
return IB_WC_FATAL_ERR;
}
}
/**
* i40iw_set_flush_info - set flush info
* @pinfo: set flush info
* @min: minor err
* @maj: major err
* @opcode: flush error code
*/
static void i40iw_set_flush_info(struct i40iw_qp_flush_info *pinfo,
u16 *min,
u16 *maj,
enum i40iw_flush_opcode opcode)
{
*min = (u16)i40iw_get_ib_wc(opcode);
*maj = CQE_MAJOR_DRV;
pinfo->userflushcode = true;
}
/**
* i40iw_flush_wqes - flush wqe for qp
* @iwdev: iwarp device
* @iwqp: qp to flush wqes
*/
void i40iw_flush_wqes(struct i40iw_device *iwdev, struct i40iw_qp *iwqp)
{
struct i40iw_qp_flush_info info;
struct i40iw_qp_flush_info *pinfo = &info;
struct i40iw_sc_qp *qp = &iwqp->sc_qp;
memset(pinfo, 0, sizeof(*pinfo));
info.sq = true;
info.rq = true;
if (qp->term_flags) {
i40iw_set_flush_info(pinfo, &pinfo->sq_minor_code,
&pinfo->sq_major_code, qp->flush_code);
i40iw_set_flush_info(pinfo, &pinfo->rq_minor_code,
&pinfo->rq_major_code, qp->flush_code);
}
(void)i40iw_hw_flush_wqes(iwdev, &iwqp->sc_qp, &info, true);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,195 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef I40IW_OSDEP_H
#define I40IW_OSDEP_H
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/bitops.h>
#include <net/tcp.h>
#include <crypto/hash.h>
/* get readq/writeq support for 32 bit kernels, use the low-first version */
#include <linux/io-64-nonatomic-lo-hi.h>
#define STATS_TIMER_DELAY 1000
static inline void set_64bit_val(u64 *wqe_words, u32 byte_index, u64 value)
{
wqe_words[byte_index >> 3] = value;
}
/**
* get_64bit_val - read 64 bit value from wqe
* @wqe_words: wqe addr
* @byte_index: index to read from
* @value: read value
**/
static inline void get_64bit_val(u64 *wqe_words, u32 byte_index, u64 *value)
{
*value = wqe_words[byte_index >> 3];
}
struct i40iw_dma_mem {
void *va;
dma_addr_t pa;
u32 size;
} __packed;
struct i40iw_virt_mem {
void *va;
u32 size;
} __packed;
#define i40iw_debug(h, m, s, ...) \
do { \
if (((m) & (h)->debug_mask)) \
pr_info("i40iw " s, ##__VA_ARGS__); \
} while (0)
#define i40iw_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT)
#define I40E_GLHMC_VFSDCMD(_i) (0x000C8000 + ((_i) * 4)) \
/* _i=0...31 */
#define I40E_GLHMC_VFSDCMD_MAX_INDEX 31
#define I40E_GLHMC_VFSDCMD_PMSDIDX_SHIFT 0
#define I40E_GLHMC_VFSDCMD_PMSDIDX_MASK (0xFFF \
<< I40E_GLHMC_VFSDCMD_PMSDIDX_SHIFT)
#define I40E_GLHMC_VFSDCMD_PF_SHIFT 16
#define I40E_GLHMC_VFSDCMD_PF_MASK (0xF << I40E_GLHMC_VFSDCMD_PF_SHIFT)
#define I40E_GLHMC_VFSDCMD_VF_SHIFT 20
#define I40E_GLHMC_VFSDCMD_VF_MASK (0x1FF << I40E_GLHMC_VFSDCMD_VF_SHIFT)
#define I40E_GLHMC_VFSDCMD_PMF_TYPE_SHIFT 29
#define I40E_GLHMC_VFSDCMD_PMF_TYPE_MASK (0x3 \
<< I40E_GLHMC_VFSDCMD_PMF_TYPE_SHIFT)
#define I40E_GLHMC_VFSDCMD_PMSDWR_SHIFT 31
#define I40E_GLHMC_VFSDCMD_PMSDWR_MASK (0x1 << I40E_GLHMC_VFSDCMD_PMSDWR_SHIFT)
#define I40E_GLHMC_VFSDDATAHIGH(_i) (0x000C8200 + ((_i) * 4)) \
/* _i=0...31 */
#define I40E_GLHMC_VFSDDATAHIGH_MAX_INDEX 31
#define I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_SHIFT 0
#define I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_MASK (0xFFFFFFFF \
<< I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_SHIFT)
#define I40E_GLHMC_VFSDDATALOW(_i) (0x000C8100 + ((_i) * 4)) \
/* _i=0...31 */
#define I40E_GLHMC_VFSDDATALOW_MAX_INDEX 31
#define I40E_GLHMC_VFSDDATALOW_PMSDVALID_SHIFT 0
#define I40E_GLHMC_VFSDDATALOW_PMSDVALID_MASK (0x1 \
<< I40E_GLHMC_VFSDDATALOW_PMSDVALID_SHIFT)
#define I40E_GLHMC_VFSDDATALOW_PMSDTYPE_SHIFT 1
#define I40E_GLHMC_VFSDDATALOW_PMSDTYPE_MASK (0x1 \
<< I40E_GLHMC_VFSDDATALOW_PMSDTYPE_SHIFT)
#define I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_SHIFT 2
#define I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_MASK (0x3FF \
<< I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_SHIFT)
#define I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_SHIFT 12
#define I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_MASK (0xFFFFF \
<< I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_SHIFT)
#define I40E_GLPE_FWLDSTATUS 0x0000D200
#define I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_SHIFT 0
#define I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_MASK (0x1 \
<< I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_SHIFT)
#define I40E_GLPE_FWLDSTATUS_DONE_SHIFT 1
#define I40E_GLPE_FWLDSTATUS_DONE_MASK (0x1 << I40E_GLPE_FWLDSTATUS_DONE_SHIFT)
#define I40E_GLPE_FWLDSTATUS_CQP_FAIL_SHIFT 2
#define I40E_GLPE_FWLDSTATUS_CQP_FAIL_MASK (0x1 \
<< I40E_GLPE_FWLDSTATUS_CQP_FAIL_SHIFT)
#define I40E_GLPE_FWLDSTATUS_TEP_FAIL_SHIFT 3
#define I40E_GLPE_FWLDSTATUS_TEP_FAIL_MASK (0x1 \
<< I40E_GLPE_FWLDSTATUS_TEP_FAIL_SHIFT)
#define I40E_GLPE_FWLDSTATUS_OOP_FAIL_SHIFT 4
#define I40E_GLPE_FWLDSTATUS_OOP_FAIL_MASK (0x1 \
<< I40E_GLPE_FWLDSTATUS_OOP_FAIL_SHIFT)
struct i40iw_sc_dev;
struct i40iw_sc_qp;
struct i40iw_puda_buf;
struct i40iw_puda_completion_info;
struct i40iw_update_sds_info;
struct i40iw_hmc_fcn_info;
struct i40iw_virtchnl_work_info;
struct i40iw_manage_vf_pble_info;
struct i40iw_device;
struct i40iw_hmc_info;
struct i40iw_hw;
u8 __iomem *i40iw_get_hw_addr(void *dev);
void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev);
bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev);
enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc, void *addr,
u32 length, u32 value);
struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *buf);
void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, u32 seqnum);
void i40iw_free_hash_desc(struct shash_desc *);
enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **);
enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info,
struct i40iw_puda_buf *buf);
enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev,
struct i40iw_update_sds_info *info);
enum i40iw_status_code i40iw_cqp_manage_hmc_fcn_cmd(struct i40iw_sc_dev *dev,
struct i40iw_hmc_fcn_info *hmcfcninfo);
enum i40iw_status_code i40iw_cqp_query_fpm_values_cmd(struct i40iw_sc_dev *dev,
struct i40iw_dma_mem *values_mem,
u8 hmc_fn_id);
enum i40iw_status_code i40iw_cqp_commit_fpm_values_cmd(struct i40iw_sc_dev *dev,
struct i40iw_dma_mem *values_mem,
u8 hmc_fn_id);
enum i40iw_status_code i40iw_alloc_query_fpm_buf(struct i40iw_sc_dev *dev,
struct i40iw_dma_mem *mem);
enum i40iw_status_code i40iw_cqp_manage_vf_pble_bp(struct i40iw_sc_dev *dev,
struct i40iw_manage_vf_pble_info *info);
void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx);
void *i40iw_remove_head(struct list_head *list);
void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend);
void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len);
void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred);
void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp);
void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp);
enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev,
struct i40iw_manage_vf_pble_info *info,
bool wait);
struct i40iw_sc_vsi;
void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi);
void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi);
#define i40iw_mmiowb() do { } while (0)
void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value);
u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg);
#endif /* _I40IW_OSDEP_H_ */

View File

@ -1,129 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef I40IW_P_H
#define I40IW_P_H
#define PAUSE_TIMER_VALUE 0xFFFF
#define REFRESH_THRESHOLD 0x7FFF
#define HIGH_THRESHOLD 0x800
#define LOW_THRESHOLD 0x200
#define ALL_TC2PFC 0xFF
#define CQP_COMPL_WAIT_TIME 0x3E8
#define CQP_TIMEOUT_THRESHOLD 5
void i40iw_debug_buf(struct i40iw_sc_dev *dev, enum i40iw_debug_flag mask,
char *desc, u64 *buf, u32 size);
/* init operations */
enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
struct i40iw_device_init_info *info);
void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp);
u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch);
void i40iw_check_cqp_progress(struct i40iw_cqp_timeout *cqp_timeout, struct i40iw_sc_dev *dev);
enum i40iw_status_code i40iw_sc_mr_fast_register(struct i40iw_sc_qp *qp,
struct i40iw_fast_reg_stag_info *info,
bool post_sq);
void i40iw_insert_wqe_hdr(u64 *wqe, u64 header);
/* HMC/FPM functions */
enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev,
u8 hmc_fn_id);
enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev, u8 vf_hmc_fn_id,
u32 *vf_cnt_array);
/* stats functions */
void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat *stats);
void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat *stats, struct i40iw_dev_hw_stats *stats_values);
void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat *stats,
enum i40iw_hw_stats_index_32b index,
u64 *value);
void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat *stats,
enum i40iw_hw_stats_index_64b index,
u64 *value);
void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 index, bool is_pf);
/* vsi misc functions */
enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_stats_info *info);
void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi);
void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *info);
void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params);
void i40iw_qp_add_qos(struct i40iw_sc_qp *qp);
void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp);
void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp);
void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info);
void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info);
enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp,
struct i40iw_sc_qp *qp, u64 scratch);
enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp,
struct i40iw_sc_qp *qp, u64 scratch);
enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(struct i40iw_sc_cqp *cqp,
u64 scratch, u8 hmc_fn_id,
bool post_sq,
bool poll_registers);
enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count);
enum i40iw_status_code i40iw_get_rdma_features(struct i40iw_sc_dev *dev);
void free_sd_mem(struct i40iw_sc_dev *dev);
enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev,
struct cqp_commands_info *pcmdinfo);
enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev);
/* prototype for functions used for dynamic memory allocation */
enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,
struct i40iw_dma_mem *mem, u64 size,
u32 alignment);
void i40iw_free_dma_mem(struct i40iw_hw *hw, struct i40iw_dma_mem *mem);
enum i40iw_status_code i40iw_allocate_virt_mem(struct i40iw_hw *hw,
struct i40iw_virt_mem *mem, u32 size);
enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
struct i40iw_virt_mem *mem);
u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq);
void i40iw_reinitialize_ieq(struct i40iw_sc_dev *dev);
#endif

View File

@ -1,611 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#include "i40iw_status.h"
#include "i40iw_osdep.h"
#include "i40iw_register.h"
#include "i40iw_hmc.h"
#include "i40iw_d.h"
#include "i40iw_type.h"
#include "i40iw_p.h"
#include <linux/pci.h>
#include <linux/genalloc.h>
#include <linux/vmalloc.h>
#include "i40iw_pble.h"
#include "i40iw.h"
struct i40iw_device;
static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
struct i40iw_hmc_pble_rsrc *pble_rsrc);
static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk);
/**
* i40iw_destroy_pble_pool - destroy pool during module unload
* @dev: i40iw_sc_dev struct
* @pble_rsrc: pble resources
*/
void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc)
{
struct list_head *clist;
struct list_head *tlist;
struct i40iw_chunk *chunk;
struct i40iw_pble_pool *pinfo = &pble_rsrc->pinfo;
if (pinfo->pool) {
list_for_each_safe(clist, tlist, &pinfo->clist) {
chunk = list_entry(clist, struct i40iw_chunk, list);
if (chunk->type == I40IW_VMALLOC)
i40iw_free_vmalloc_mem(dev->hw, chunk);
kfree(chunk);
}
gen_pool_destroy(pinfo->pool);
}
}
/**
* i40iw_hmc_init_pble - Initialize pble resources during module load
* @dev: i40iw_sc_dev struct
* @pble_rsrc: pble resources
*/
enum i40iw_status_code i40iw_hmc_init_pble(struct i40iw_sc_dev *dev,
struct i40iw_hmc_pble_rsrc *pble_rsrc)
{
struct i40iw_hmc_info *hmc_info;
u32 fpm_idx = 0;
hmc_info = dev->hmc_info;
pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].base;
/* Now start the pble' on 4k boundary */
if (pble_rsrc->fpm_base_addr & 0xfff)
fpm_idx = (PAGE_SIZE - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3;
pble_rsrc->unallocated_pble =
hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt - fpm_idx;
pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3);
pble_rsrc->pinfo.pool_shift = POOL_SHIFT;
pble_rsrc->pinfo.pool = gen_pool_create(pble_rsrc->pinfo.pool_shift, -1);
INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
if (!pble_rsrc->pinfo.pool)
goto error;
if (add_pble_pool(dev, pble_rsrc))
goto error;
return 0;
error:i40iw_destroy_pble_pool(dev, pble_rsrc);
return I40IW_ERR_NO_MEMORY;
}
/**
* get_sd_pd_idx - Returns sd index, pd index and rel_pd_idx from fpm address
* @pble_rsrc: structure containing fpm address
* @idx: where to return indexes
*/
static inline void get_sd_pd_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc,
struct sd_pd_idx *idx)
{
idx->sd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_DIRECT_BP_SIZE;
idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_PAGED_BP_SIZE;
idx->rel_pd_idx = (idx->pd_idx % I40IW_HMC_PD_CNT_IN_SD);
}
/**
* add_sd_direct - add sd direct for pble
* @dev: hardware control device structure
* @pble_rsrc: pble resource ptr
* @info: page info for sd
*/
static enum i40iw_status_code add_sd_direct(struct i40iw_sc_dev *dev,
struct i40iw_hmc_pble_rsrc *pble_rsrc,
struct i40iw_add_page_info *info)
{
enum i40iw_status_code ret_code = 0;
struct sd_pd_idx *idx = &info->idx;
struct i40iw_chunk *chunk = info->chunk;
struct i40iw_hmc_info *hmc_info = info->hmc_info;
struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry;
u32 offset = 0;
if (!sd_entry->valid) {
if (dev->is_pf) {
ret_code = i40iw_add_sd_table_entry(dev->hw, hmc_info,
info->idx.sd_idx,
I40IW_SD_TYPE_DIRECT,
I40IW_HMC_DIRECT_BP_SIZE);
if (ret_code)
return ret_code;
chunk->type = I40IW_DMA_COHERENT;
}
}
offset = idx->rel_pd_idx << I40IW_HMC_PAGED_BP_SHIFT;
chunk->size = info->pages << I40IW_HMC_PAGED_BP_SHIFT;
chunk->vaddr = ((u8 *)sd_entry->u.bp.addr.va + offset);
chunk->fpm_addr = pble_rsrc->next_fpm_addr;
i40iw_debug(dev, I40IW_DEBUG_PBLE, "chunk_size[%d] = 0x%x vaddr=%p fpm_addr = %llx\n",
chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
return 0;
}
/**
* i40iw_free_vmalloc_mem - free vmalloc during close
* @hw: hw struct
* @chunk: chunk information for vmalloc
*/
static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk)
{
struct pci_dev *pcidev = hw->pcidev;
int i;
if (!chunk->pg_cnt)
goto done;
for (i = 0; i < chunk->pg_cnt; i++)
dma_unmap_page(&pcidev->dev, chunk->dmaaddrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
done:
kfree(chunk->dmaaddrs);
chunk->dmaaddrs = NULL;
vfree(chunk->vaddr);
chunk->vaddr = NULL;
chunk->type = 0;
}
/**
* i40iw_get_vmalloc_mem - get 2M page for sd
* @hw: hardware address
* @chunk: chunk to adf
* @pg_cnt: #of 4 K pages
*/
static enum i40iw_status_code i40iw_get_vmalloc_mem(struct i40iw_hw *hw,
struct i40iw_chunk *chunk,
int pg_cnt)
{
struct pci_dev *pcidev = hw->pcidev;
struct page *page;
u8 *addr;
u32 size;
int i;
chunk->dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL);
if (!chunk->dmaaddrs)
return I40IW_ERR_NO_MEMORY;
size = PAGE_SIZE * pg_cnt;
chunk->vaddr = vmalloc(size);
if (!chunk->vaddr) {
kfree(chunk->dmaaddrs);
chunk->dmaaddrs = NULL;
return I40IW_ERR_NO_MEMORY;
}
chunk->size = size;
addr = (u8 *)chunk->vaddr;
for (i = 0; i < pg_cnt; i++) {
page = vmalloc_to_page((void *)addr);
if (!page)
break;
chunk->dmaaddrs[i] = dma_map_page(&pcidev->dev, page, 0,
PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&pcidev->dev, chunk->dmaaddrs[i]))
break;
addr += PAGE_SIZE;
}
chunk->pg_cnt = i;
chunk->type = I40IW_VMALLOC;
if (i == pg_cnt)
return 0;
i40iw_free_vmalloc_mem(hw, chunk);
return I40IW_ERR_NO_MEMORY;
}
/**
* fpm_to_idx - given fpm address, get pble index
* @pble_rsrc: pble resource management
* @addr: fpm address for index
*/
static inline u32 fpm_to_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc, u64 addr)
{
return (addr - (pble_rsrc->fpm_base_addr)) >> 3;
}
/**
* add_bp_pages - add backing pages for sd
* @dev: hardware control device structure
* @pble_rsrc: pble resource management
* @info: page info for sd
*/
static enum i40iw_status_code add_bp_pages(struct i40iw_sc_dev *dev,
struct i40iw_hmc_pble_rsrc *pble_rsrc,
struct i40iw_add_page_info *info)
{
u8 *addr;
struct i40iw_dma_mem mem;
struct i40iw_hmc_pd_entry *pd_entry;
struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry;
struct i40iw_hmc_info *hmc_info = info->hmc_info;
struct i40iw_chunk *chunk = info->chunk;
struct i40iw_manage_vf_pble_info vf_pble_info;
enum i40iw_status_code status = 0;
u32 rel_pd_idx = info->idx.rel_pd_idx;
u32 pd_idx = info->idx.pd_idx;
u32 i;
status = i40iw_get_vmalloc_mem(dev->hw, chunk, info->pages);
if (status)
return I40IW_ERR_NO_MEMORY;
status = i40iw_add_sd_table_entry(dev->hw, hmc_info,
info->idx.sd_idx, I40IW_SD_TYPE_PAGED,
I40IW_HMC_DIRECT_BP_SIZE);
if (status)
goto error;
if (!dev->is_pf) {
status = i40iw_vchnl_vf_add_hmc_objs(dev, I40IW_HMC_IW_PBLE,
fpm_to_idx(pble_rsrc,
pble_rsrc->next_fpm_addr),
(info->pages << PBLE_512_SHIFT));
if (status) {
i40iw_pr_err("allocate PBLEs in the PF. Error %i\n", status);
goto error;
}
}
addr = chunk->vaddr;
for (i = 0; i < info->pages; i++) {
mem.pa = chunk->dmaaddrs[i];
mem.size = PAGE_SIZE;
mem.va = (void *)(addr);
pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++];
if (!pd_entry->valid) {
status = i40iw_add_pd_table_entry(dev->hw, hmc_info, pd_idx++, &mem);
if (status)
goto error;
addr += PAGE_SIZE;
} else {
i40iw_pr_err("pd entry is valid expecting to be invalid\n");
}
}
if (!dev->is_pf) {
vf_pble_info.first_pd_index = info->idx.rel_pd_idx;
vf_pble_info.inv_pd_ent = false;
vf_pble_info.pd_entry_cnt = PBLE_PER_PAGE;
vf_pble_info.pd_pl_pba = sd_entry->u.pd_table.pd_page_addr.pa;
vf_pble_info.sd_index = info->idx.sd_idx;
status = i40iw_hw_manage_vf_pble_bp(dev->back_dev,
&vf_pble_info, true);
if (status) {
i40iw_pr_err("CQP manage VF PBLE BP failed. %i\n", status);
goto error;
}
}
chunk->fpm_addr = pble_rsrc->next_fpm_addr;
return 0;
error:
i40iw_free_vmalloc_mem(dev->hw, chunk);
return status;
}
/**
* add_pble_pool - add a sd entry for pble resoure
* @dev: hardware control device structure
* @pble_rsrc: pble resource management
*/
static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
struct i40iw_hmc_pble_rsrc *pble_rsrc)
{
struct i40iw_hmc_sd_entry *sd_entry;
struct i40iw_hmc_info *hmc_info;
struct i40iw_chunk *chunk;
struct i40iw_add_page_info info;
struct sd_pd_idx *idx = &info.idx;
enum i40iw_status_code ret_code = 0;
enum i40iw_sd_entry_type sd_entry_type;
u64 sd_reg_val = 0;
u32 pages;
if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
return I40IW_ERR_NO_MEMORY;
if (pble_rsrc->next_fpm_addr & 0xfff) {
i40iw_pr_err("next fpm_addr %llx\n", pble_rsrc->next_fpm_addr);
return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
}
chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
if (!chunk)
return I40IW_ERR_NO_MEMORY;
hmc_info = dev->hmc_info;
chunk->fpm_addr = pble_rsrc->next_fpm_addr;
get_sd_pd_idx(pble_rsrc, idx);
sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx];
pages = (idx->rel_pd_idx) ? (I40IW_HMC_PD_CNT_IN_SD -
idx->rel_pd_idx) : I40IW_HMC_PD_CNT_IN_SD;
pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
info.chunk = chunk;
info.hmc_info = hmc_info;
info.pages = pages;
info.sd_entry = sd_entry;
if (!sd_entry->valid) {
sd_entry_type = (!idx->rel_pd_idx &&
(pages == I40IW_HMC_PD_CNT_IN_SD) &&
dev->is_pf) ? I40IW_SD_TYPE_DIRECT : I40IW_SD_TYPE_PAGED;
} else {
sd_entry_type = sd_entry->entry_type;
}
i40iw_debug(dev, I40IW_DEBUG_PBLE,
"pages = %d, unallocated_pble[%u] current_fpm_addr = %llx\n",
pages, pble_rsrc->unallocated_pble, pble_rsrc->next_fpm_addr);
i40iw_debug(dev, I40IW_DEBUG_PBLE, "sd_entry_type = %d sd_entry valid = %d\n",
sd_entry_type, sd_entry->valid);
if (sd_entry_type == I40IW_SD_TYPE_DIRECT)
ret_code = add_sd_direct(dev, pble_rsrc, &info);
if (ret_code)
sd_entry_type = I40IW_SD_TYPE_PAGED;
else
pble_rsrc->stats_direct_sds++;
if (sd_entry_type == I40IW_SD_TYPE_PAGED) {
ret_code = add_bp_pages(dev, pble_rsrc, &info);
if (ret_code)
goto error;
else
pble_rsrc->stats_paged_sds++;
}
if (gen_pool_add_virt(pble_rsrc->pinfo.pool, (unsigned long)chunk->vaddr,
(phys_addr_t)chunk->fpm_addr, chunk->size, -1)) {
i40iw_pr_err("could not allocate memory by gen_pool_addr_virt()\n");
ret_code = I40IW_ERR_NO_MEMORY;
goto error;
}
pble_rsrc->next_fpm_addr += chunk->size;
i40iw_debug(dev, I40IW_DEBUG_PBLE, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n",
pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
pble_rsrc->unallocated_pble -= (chunk->size >> 3);
sd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ?
sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa;
if (dev->is_pf && !sd_entry->valid) {
ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id,
sd_reg_val, idx->sd_idx,
sd_entry->entry_type, true);
if (ret_code) {
i40iw_pr_err("cqp cmd failed for sd (pbles)\n");
goto error;
}
}
sd_entry->valid = true;
list_add(&chunk->list, &pble_rsrc->pinfo.clist);
return 0;
error:
kfree(chunk);
return ret_code;
}
/**
* free_lvl2 - fee level 2 pble
* @pble_rsrc: pble resource management
* @palloc: level 2 pble allocation
*/
static void free_lvl2(struct i40iw_hmc_pble_rsrc *pble_rsrc,
struct i40iw_pble_alloc *palloc)
{
u32 i;
struct gen_pool *pool;
struct i40iw_pble_level2 *lvl2 = &palloc->level2;
struct i40iw_pble_info *root = &lvl2->root;
struct i40iw_pble_info *leaf = lvl2->leaf;
pool = pble_rsrc->pinfo.pool;
for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
if (leaf->addr)
gen_pool_free(pool, leaf->addr, (leaf->cnt << 3));
else
break;
}
if (root->addr)
gen_pool_free(pool, root->addr, (root->cnt << 3));
kfree(lvl2->leaf);
lvl2->leaf = NULL;
}
/**
* get_lvl2_pble - get level 2 pble resource
* @pble_rsrc: pble resource management
* @palloc: level 2 pble allocation
* @pool: pool pointer
*/
static enum i40iw_status_code get_lvl2_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc,
struct i40iw_pble_alloc *palloc,
struct gen_pool *pool)
{
u32 lf4k, lflast, total, i;
u32 pblcnt = PBLE_PER_PAGE;
u64 *addr;
struct i40iw_pble_level2 *lvl2 = &palloc->level2;
struct i40iw_pble_info *root = &lvl2->root;
struct i40iw_pble_info *leaf;
/* number of full 512 (4K) leafs) */
lf4k = palloc->total_cnt >> 9;
lflast = palloc->total_cnt % PBLE_PER_PAGE;
total = (lflast == 0) ? lf4k : lf4k + 1;
lvl2->leaf_cnt = total;
leaf = kzalloc((sizeof(*leaf) * total), GFP_ATOMIC);
if (!leaf)
return I40IW_ERR_NO_MEMORY;
lvl2->leaf = leaf;
/* allocate pbles for the root */
root->addr = gen_pool_alloc(pool, (total << 3));
if (!root->addr) {
kfree(lvl2->leaf);
lvl2->leaf = NULL;
return I40IW_ERR_NO_MEMORY;
}
root->idx = fpm_to_idx(pble_rsrc,
(u64)gen_pool_virt_to_phys(pool, root->addr));
root->cnt = total;
addr = (u64 *)root->addr;
for (i = 0; i < total; i++, leaf++) {
pblcnt = (lflast && ((i + 1) == total)) ? lflast : PBLE_PER_PAGE;
leaf->addr = gen_pool_alloc(pool, (pblcnt << 3));
if (!leaf->addr)
goto error;
leaf->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool, leaf->addr));
leaf->cnt = pblcnt;
*addr = (u64)leaf->idx;
addr++;
}
palloc->level = I40IW_LEVEL_2;
pble_rsrc->stats_lvl2++;
return 0;
error:
free_lvl2(pble_rsrc, palloc);
return I40IW_ERR_NO_MEMORY;
}
/**
* get_lvl1_pble - get level 1 pble resource
* @dev: hardware control device structure
* @pble_rsrc: pble resource management
* @palloc: level 1 pble allocation
*/
static enum i40iw_status_code get_lvl1_pble(struct i40iw_sc_dev *dev,
struct i40iw_hmc_pble_rsrc *pble_rsrc,
struct i40iw_pble_alloc *palloc)
{
u64 *addr;
struct gen_pool *pool;
struct i40iw_pble_info *lvl1 = &palloc->level1;
pool = pble_rsrc->pinfo.pool;
addr = (u64 *)gen_pool_alloc(pool, (palloc->total_cnt << 3));
if (!addr)
return I40IW_ERR_NO_MEMORY;
palloc->level = I40IW_LEVEL_1;
lvl1->addr = (unsigned long)addr;
lvl1->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool,
(unsigned long)addr));
lvl1->cnt = palloc->total_cnt;
pble_rsrc->stats_lvl1++;
return 0;
}
/**
* get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
* @dev: i40iw_sc_dev struct
* @pble_rsrc: pble resources
* @palloc: contains all inforamtion regarding pble (idx + pble addr)
* @pool: pointer to general purpose special memory pool descriptor
*/
static inline enum i40iw_status_code get_lvl1_lvl2_pble(struct i40iw_sc_dev *dev,
struct i40iw_hmc_pble_rsrc *pble_rsrc,
struct i40iw_pble_alloc *palloc,
struct gen_pool *pool)
{
enum i40iw_status_code status = 0;
status = get_lvl1_pble(dev, pble_rsrc, palloc);
if (status && (palloc->total_cnt > PBLE_PER_PAGE))
status = get_lvl2_pble(pble_rsrc, palloc, pool);
return status;
}
/**
* i40iw_get_pble - allocate pbles from the pool
* @dev: i40iw_sc_dev struct
* @pble_rsrc: pble resources
* @palloc: contains all inforamtion regarding pble (idx + pble addr)
* @pble_cnt: #of pbles requested
*/
enum i40iw_status_code i40iw_get_pble(struct i40iw_sc_dev *dev,
struct i40iw_hmc_pble_rsrc *pble_rsrc,
struct i40iw_pble_alloc *palloc,
u32 pble_cnt)
{
struct gen_pool *pool;
enum i40iw_status_code status = 0;
u32 max_sds = 0;
int i;
pool = pble_rsrc->pinfo.pool;
palloc->total_cnt = pble_cnt;
palloc->level = I40IW_LEVEL_0;
/*check first to see if we can get pble's without acquiring additional sd's */
status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);
if (!status)
goto exit;
max_sds = (palloc->total_cnt >> 18) + 1;
for (i = 0; i < max_sds; i++) {
status = add_pble_pool(dev, pble_rsrc);
if (status)
break;
status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);
if (!status)
break;
}
exit:
if (!status)
pble_rsrc->stats_alloc_ok++;
else
pble_rsrc->stats_alloc_fail++;
return status;
}
/**
* i40iw_free_pble - put pbles back into pool
* @pble_rsrc: pble resources
* @palloc: contains all inforamtion regarding pble resource being freed
*/
void i40iw_free_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc,
struct i40iw_pble_alloc *palloc)
{
struct gen_pool *pool;
pool = pble_rsrc->pinfo.pool;
if (palloc->level == I40IW_LEVEL_2)
free_lvl2(pble_rsrc, palloc);
else
gen_pool_free(pool, palloc->level1.addr,
(palloc->level1.cnt << 3));
pble_rsrc->stats_alloc_freed++;
}

View File

@ -1,131 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef I40IW_PBLE_H
#define I40IW_PBLE_H
#define POOL_SHIFT 6
#define PBLE_PER_PAGE 512
#define I40IW_HMC_PAGED_BP_SHIFT 12
#define PBLE_512_SHIFT 9
enum i40iw_pble_level {
I40IW_LEVEL_0 = 0,
I40IW_LEVEL_1 = 1,
I40IW_LEVEL_2 = 2
};
enum i40iw_alloc_type {
I40IW_NO_ALLOC = 0,
I40IW_DMA_COHERENT = 1,
I40IW_VMALLOC = 2
};
struct i40iw_pble_info {
unsigned long addr;
u32 idx;
u32 cnt;
};
struct i40iw_pble_level2 {
struct i40iw_pble_info root;
struct i40iw_pble_info *leaf;
u32 leaf_cnt;
};
struct i40iw_pble_alloc {
u32 total_cnt;
enum i40iw_pble_level level;
union {
struct i40iw_pble_info level1;
struct i40iw_pble_level2 level2;
};
};
struct sd_pd_idx {
u32 sd_idx;
u32 pd_idx;
u32 rel_pd_idx;
};
struct i40iw_add_page_info {
struct i40iw_chunk *chunk;
struct i40iw_hmc_sd_entry *sd_entry;
struct i40iw_hmc_info *hmc_info;
struct sd_pd_idx idx;
u32 pages;
};
struct i40iw_chunk {
struct list_head list;
u32 size;
void *vaddr;
u64 fpm_addr;
u32 pg_cnt;
dma_addr_t *dmaaddrs;
enum i40iw_alloc_type type;
};
struct i40iw_pble_pool {
struct gen_pool *pool;
struct list_head clist;
u32 total_pble_alloc;
u32 free_pble_cnt;
u32 pool_shift;
};
struct i40iw_hmc_pble_rsrc {
u32 unallocated_pble;
u64 fpm_base_addr;
u64 next_fpm_addr;
struct i40iw_pble_pool pinfo;
u32 stats_direct_sds;
u32 stats_paged_sds;
u64 stats_alloc_ok;
u64 stats_alloc_fail;
u64 stats_alloc_freed;
u64 stats_lvl1;
u64 stats_lvl2;
};
void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc);
enum i40iw_status_code i40iw_hmc_init_pble(struct i40iw_sc_dev *dev,
struct i40iw_hmc_pble_rsrc *pble_rsrc);
void i40iw_free_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc, struct i40iw_pble_alloc *palloc);
enum i40iw_status_code i40iw_get_pble(struct i40iw_sc_dev *dev,
struct i40iw_hmc_pble_rsrc *pble_rsrc,
struct i40iw_pble_alloc *palloc,
u32 pble_cnt);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,188 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef I40IW_PUDA_H
#define I40IW_PUDA_H
#define I40IW_IEQ_MPA_FRAMING 6
struct i40iw_sc_dev;
struct i40iw_sc_qp;
struct i40iw_sc_cq;
enum puda_resource_type {
I40IW_PUDA_RSRC_TYPE_ILQ = 1,
I40IW_PUDA_RSRC_TYPE_IEQ
};
enum puda_rsrc_complete {
PUDA_CQ_CREATED = 1,
PUDA_QP_CREATED,
PUDA_TX_COMPLETE,
PUDA_RX_COMPLETE,
PUDA_HASH_CRC_COMPLETE
};
struct i40iw_puda_completion_info {
struct i40iw_qp_uk *qp;
u8 q_type;
u8 vlan_valid;
u8 l3proto;
u8 l4proto;
u16 payload_len;
u32 compl_error; /* No_err=0, else major and minor err code */
u32 qp_id;
u32 wqe_idx;
};
struct i40iw_puda_send_info {
u64 paddr; /* Physical address */
u32 len;
u8 tcplen;
u8 maclen;
bool ipv4;
bool doloopback;
void *scratch;
};
struct i40iw_puda_buf {
struct list_head list; /* MUST be first entry */
struct i40iw_dma_mem mem; /* DMA memory for the buffer */
struct i40iw_puda_buf *next; /* for alloclist in rsrc struct */
struct i40iw_virt_mem buf_mem; /* Buffer memory for this buffer */
void *scratch;
u8 *iph;
u8 *tcph;
u8 *data;
u16 datalen;
u16 vlan_id;
u8 tcphlen; /* tcp length in bytes */
u8 maclen; /* mac length in bytes */
u32 totallen; /* machlen+iphlen+tcphlen+datalen */
atomic_t refcount;
u8 hdrlen;
bool ipv4;
u32 seqnum;
};
struct i40iw_puda_rsrc_info {
enum puda_resource_type type; /* ILQ or IEQ */
u32 count;
u16 pd_id;
u32 cq_id;
u32 qp_id;
u32 sq_size;
u32 rq_size;
u16 buf_size;
u16 mss;
u32 tx_buf_cnt; /* total bufs allocated will be rq_size + tx_buf_cnt */
void (*receive)(struct i40iw_sc_vsi *, struct i40iw_puda_buf *);
void (*xmit_complete)(struct i40iw_sc_vsi *, void *);
};
struct i40iw_puda_rsrc {
struct i40iw_sc_cq cq;
struct i40iw_sc_qp qp;
struct i40iw_sc_pd sc_pd;
struct i40iw_sc_dev *dev;
struct i40iw_sc_vsi *vsi;
struct i40iw_dma_mem cqmem;
struct i40iw_dma_mem qpmem;
struct i40iw_virt_mem ilq_mem;
enum puda_rsrc_complete completion;
enum puda_resource_type type;
u16 buf_size; /*buffer must be max datalen + tcpip hdr + mac */
u16 mss;
u32 cq_id;
u32 qp_id;
u32 sq_size;
u32 rq_size;
u32 cq_size;
struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array;
u64 *rq_wrid_array;
u32 compl_rxwqe_idx;
u32 rx_wqe_idx;
u32 rxq_invalid_cnt;
u32 tx_wqe_avail_cnt;
bool check_crc;
struct shash_desc *hash_desc;
struct list_head txpend;
struct list_head bufpool; /* free buffers pool list for recv and xmit */
u32 alloc_buf_count;
u32 avail_buf_count; /* snapshot of currently available buffers */
spinlock_t bufpool_lock;
struct i40iw_puda_buf *alloclist;
void (*receive)(struct i40iw_sc_vsi *, struct i40iw_puda_buf *);
void (*xmit_complete)(struct i40iw_sc_vsi *, void *);
/* puda stats */
u64 stats_buf_alloc_fail;
u64 stats_pkt_rcvd;
u64 stats_pkt_sent;
u64 stats_rcvd_pkt_err;
u64 stats_sent_pkt_q;
u64 stats_bad_qp_id;
};
struct i40iw_puda_buf *i40iw_puda_get_bufpool(struct i40iw_puda_rsrc *rsrc);
void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc *rsrc,
struct i40iw_puda_buf *buf);
void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc,
struct i40iw_puda_buf *buf);
enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
struct i40iw_puda_send_info *info);
enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
struct i40iw_puda_rsrc_info *info);
void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,
enum puda_resource_type type,
bool reset);
enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
struct i40iw_sc_cq *cq, u32 *compl_err);
struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev,
struct i40iw_puda_buf *buf);
enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info,
struct i40iw_puda_buf *buf);
enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc,
void *addr, u32 length, u32 value);
enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **desc);
void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
void i40iw_free_hash_desc(struct shash_desc *desc);
void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length,
u32 seqnum);
enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,101 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef I40IW_STATUS_H
#define I40IW_STATUS_H
/* Error Codes */
enum i40iw_status_code {
I40IW_SUCCESS = 0,
I40IW_ERR_NVM = -1,
I40IW_ERR_NVM_CHECKSUM = -2,
I40IW_ERR_CONFIG = -4,
I40IW_ERR_PARAM = -5,
I40IW_ERR_DEVICE_NOT_SUPPORTED = -6,
I40IW_ERR_RESET_FAILED = -7,
I40IW_ERR_SWFW_SYNC = -8,
I40IW_ERR_NO_MEMORY = -9,
I40IW_ERR_BAD_PTR = -10,
I40IW_ERR_INVALID_PD_ID = -11,
I40IW_ERR_INVALID_QP_ID = -12,
I40IW_ERR_INVALID_CQ_ID = -13,
I40IW_ERR_INVALID_CEQ_ID = -14,
I40IW_ERR_INVALID_AEQ_ID = -15,
I40IW_ERR_INVALID_SIZE = -16,
I40IW_ERR_INVALID_ARP_INDEX = -17,
I40IW_ERR_INVALID_FPM_FUNC_ID = -18,
I40IW_ERR_QP_INVALID_MSG_SIZE = -19,
I40IW_ERR_QP_TOOMANY_WRS_POSTED = -20,
I40IW_ERR_INVALID_FRAG_COUNT = -21,
I40IW_ERR_QUEUE_EMPTY = -22,
I40IW_ERR_INVALID_ALIGNMENT = -23,
I40IW_ERR_FLUSHED_QUEUE = -24,
I40IW_ERR_INVALID_INLINE_DATA_SIZE = -26,
I40IW_ERR_TIMEOUT = -27,
I40IW_ERR_OPCODE_MISMATCH = -28,
I40IW_ERR_CQP_COMPL_ERROR = -29,
I40IW_ERR_INVALID_VF_ID = -30,
I40IW_ERR_INVALID_HMCFN_ID = -31,
I40IW_ERR_BACKING_PAGE_ERROR = -32,
I40IW_ERR_NO_PBLCHUNKS_AVAILABLE = -33,
I40IW_ERR_INVALID_PBLE_INDEX = -34,
I40IW_ERR_INVALID_SD_INDEX = -35,
I40IW_ERR_INVALID_PAGE_DESC_INDEX = -36,
I40IW_ERR_INVALID_SD_TYPE = -37,
I40IW_ERR_MEMCPY_FAILED = -38,
I40IW_ERR_INVALID_HMC_OBJ_INDEX = -39,
I40IW_ERR_INVALID_HMC_OBJ_COUNT = -40,
I40IW_ERR_INVALID_SRQ_ARM_LIMIT = -41,
I40IW_ERR_SRQ_ENABLED = -42,
I40IW_ERR_BUF_TOO_SHORT = -43,
I40IW_ERR_BAD_IWARP_CQE = -44,
I40IW_ERR_NVM_BLANK_MODE = -45,
I40IW_ERR_NOT_IMPLEMENTED = -46,
I40IW_ERR_PE_DOORBELL_NOT_ENABLED = -47,
I40IW_ERR_NOT_READY = -48,
I40IW_NOT_SUPPORTED = -49,
I40IW_ERR_FIRMWARE_API_VERSION = -50,
I40IW_ERR_RING_FULL = -51,
I40IW_ERR_MPA_CRC = -61,
I40IW_ERR_NO_TXBUFS = -62,
I40IW_ERR_SEQ_NUM = -63,
I40IW_ERR_list_empty = -64,
I40IW_ERR_INVALID_MAC_ADDR = -65,
I40IW_ERR_BAD_STAG = -66,
I40IW_ERR_CQ_COMPL_ERROR = -67,
I40IW_ERR_QUEUE_DESTROYED = -68,
I40IW_ERR_INVALID_FEAT_CNT = -69
};
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,422 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef I40IW_USER_H
#define I40IW_USER_H
enum i40iw_device_capabilities_const {
I40IW_WQE_SIZE = 4,
I40IW_CQP_WQE_SIZE = 8,
I40IW_CQE_SIZE = 4,
I40IW_EXTENDED_CQE_SIZE = 8,
I40IW_AEQE_SIZE = 2,
I40IW_CEQE_SIZE = 1,
I40IW_CQP_CTX_SIZE = 8,
I40IW_SHADOW_AREA_SIZE = 8,
I40IW_CEQ_MAX_COUNT = 256,
I40IW_QUERY_FPM_BUF_SIZE = 128,
I40IW_COMMIT_FPM_BUF_SIZE = 128,
I40IW_MIN_IW_QP_ID = 1,
I40IW_MAX_IW_QP_ID = 262143,
I40IW_MIN_CEQID = 0,
I40IW_MAX_CEQID = 256,
I40IW_MIN_CQID = 0,
I40IW_MAX_CQID = 131071,
I40IW_MIN_AEQ_ENTRIES = 1,
I40IW_MAX_AEQ_ENTRIES = 524287,
I40IW_MIN_CEQ_ENTRIES = 1,
I40IW_MAX_CEQ_ENTRIES = 131071,
I40IW_MIN_CQ_SIZE = 1,
I40IW_MAX_CQ_SIZE = 1048575,
I40IW_DB_ID_ZERO = 0,
I40IW_MAX_WQ_FRAGMENT_COUNT = 3,
I40IW_MAX_SGE_RD = 1,
I40IW_MAX_OUTBOUND_MESSAGE_SIZE = 2147483647,
I40IW_MAX_INBOUND_MESSAGE_SIZE = 2147483647,
I40IW_MAX_PE_ENABLED_VF_COUNT = 32,
I40IW_MAX_VF_FPM_ID = 47,
I40IW_MAX_VF_PER_PF = 127,
I40IW_MAX_SQ_PAYLOAD_SIZE = 2145386496,
I40IW_MAX_INLINE_DATA_SIZE = 48,
I40IW_MAX_IRD_SIZE = 64,
I40IW_MAX_ORD_SIZE = 127,
I40IW_MAX_WQ_ENTRIES = 2048,
I40IW_Q2_BUFFER_SIZE = (248 + 100),
I40IW_MAX_WQE_SIZE_RQ = 128,
I40IW_QP_CTX_SIZE = 248,
I40IW_MAX_PDS = 32768
};
#define i40iw_handle void *
#define i40iw_adapter_handle i40iw_handle
#define i40iw_qp_handle i40iw_handle
#define i40iw_cq_handle i40iw_handle
#define i40iw_srq_handle i40iw_handle
#define i40iw_pd_id i40iw_handle
#define i40iw_stag_handle i40iw_handle
#define i40iw_stag_index u32
#define i40iw_stag u32
#define i40iw_stag_key u8
#define i40iw_tagged_offset u64
#define i40iw_access_privileges u32
#define i40iw_physical_fragment u64
#define i40iw_address_list u64 *
#define I40IW_MAX_MR_SIZE 0x10000000000L
#define I40IW_MAX_RQ_WQE_SHIFT 2
struct i40iw_qp_uk;
struct i40iw_cq_uk;
struct i40iw_srq_uk;
struct i40iw_qp_uk_init_info;
struct i40iw_cq_uk_init_info;
struct i40iw_srq_uk_init_info;
struct i40iw_sge {
i40iw_tagged_offset tag_off;
u32 len;
i40iw_stag stag;
};
#define i40iw_sgl struct i40iw_sge *
struct i40iw_ring {
u32 head;
u32 tail;
u32 size;
};
struct i40iw_cqe {
u64 buf[I40IW_CQE_SIZE];
};
struct i40iw_extended_cqe {
u64 buf[I40IW_EXTENDED_CQE_SIZE];
};
struct i40iw_wqe {
u64 buf[I40IW_WQE_SIZE];
};
struct i40iw_qp_uk_ops;
enum i40iw_addressing_type {
I40IW_ADDR_TYPE_ZERO_BASED = 0,
I40IW_ADDR_TYPE_VA_BASED = 1,
};
#define I40IW_ACCESS_FLAGS_LOCALREAD 0x01
#define I40IW_ACCESS_FLAGS_LOCALWRITE 0x02
#define I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY 0x04
#define I40IW_ACCESS_FLAGS_REMOTEREAD 0x05
#define I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY 0x08
#define I40IW_ACCESS_FLAGS_REMOTEWRITE 0x0a
#define I40IW_ACCESS_FLAGS_BIND_WINDOW 0x10
#define I40IW_ACCESS_FLAGS_ALL 0x1F
#define I40IW_OP_TYPE_RDMA_WRITE 0
#define I40IW_OP_TYPE_RDMA_READ 1
#define I40IW_OP_TYPE_SEND 3
#define I40IW_OP_TYPE_SEND_INV 4
#define I40IW_OP_TYPE_SEND_SOL 5
#define I40IW_OP_TYPE_SEND_SOL_INV 6
#define I40IW_OP_TYPE_REC 7
#define I40IW_OP_TYPE_BIND_MW 8
#define I40IW_OP_TYPE_FAST_REG_NSMR 9
#define I40IW_OP_TYPE_INV_STAG 10
#define I40IW_OP_TYPE_RDMA_READ_INV_STAG 11
#define I40IW_OP_TYPE_NOP 12
enum i40iw_completion_status {
I40IW_COMPL_STATUS_SUCCESS = 0,
I40IW_COMPL_STATUS_FLUSHED,
I40IW_COMPL_STATUS_INVALID_WQE,
I40IW_COMPL_STATUS_QP_CATASTROPHIC,
I40IW_COMPL_STATUS_REMOTE_TERMINATION,
I40IW_COMPL_STATUS_INVALID_STAG,
I40IW_COMPL_STATUS_BASE_BOUND_VIOLATION,
I40IW_COMPL_STATUS_ACCESS_VIOLATION,
I40IW_COMPL_STATUS_INVALID_PD_ID,
I40IW_COMPL_STATUS_WRAP_ERROR,
I40IW_COMPL_STATUS_STAG_INVALID_PDID,
I40IW_COMPL_STATUS_RDMA_READ_ZERO_ORD,
I40IW_COMPL_STATUS_QP_NOT_PRIVLEDGED,
I40IW_COMPL_STATUS_STAG_NOT_INVALID,
I40IW_COMPL_STATUS_INVALID_PHYS_BUFFER_SIZE,
I40IW_COMPL_STATUS_INVALID_PHYS_BUFFER_ENTRY,
I40IW_COMPL_STATUS_INVALID_FBO,
I40IW_COMPL_STATUS_INVALID_LENGTH,
I40IW_COMPL_STATUS_INVALID_ACCESS,
I40IW_COMPL_STATUS_PHYS_BUFFER_LIST_TOO_LONG,
I40IW_COMPL_STATUS_INVALID_VIRT_ADDRESS,
I40IW_COMPL_STATUS_INVALID_REGION,
I40IW_COMPL_STATUS_INVALID_WINDOW,
I40IW_COMPL_STATUS_INVALID_TOTAL_LENGTH
};
enum i40iw_completion_notify {
IW_CQ_COMPL_EVENT = 0,
IW_CQ_COMPL_SOLICITED = 1
};
struct i40iw_post_send {
i40iw_sgl sg_list;
u32 num_sges;
};
struct i40iw_post_inline_send {
void *data;
u32 len;
};
struct i40iw_rdma_write {
i40iw_sgl lo_sg_list;
u32 num_lo_sges;
struct i40iw_sge rem_addr;
};
struct i40iw_inline_rdma_write {
void *data;
u32 len;
struct i40iw_sge rem_addr;
};
struct i40iw_rdma_read {
struct i40iw_sge lo_addr;
struct i40iw_sge rem_addr;
};
struct i40iw_bind_window {
i40iw_stag mr_stag;
u64 bind_length;
void *va;
enum i40iw_addressing_type addressing_type;
bool enable_reads;
bool enable_writes;
i40iw_stag mw_stag;
};
struct i40iw_inv_local_stag {
i40iw_stag target_stag;
};
struct i40iw_post_sq_info {
u64 wr_id;
u8 op_type;
bool signaled;
bool read_fence;
bool local_fence;
bool inline_data;
bool defer_flag;
union {
struct i40iw_post_send send;
struct i40iw_rdma_write rdma_write;
struct i40iw_rdma_read rdma_read;
struct i40iw_rdma_read rdma_read_inv;
struct i40iw_bind_window bind_window;
struct i40iw_inv_local_stag inv_local_stag;
struct i40iw_inline_rdma_write inline_rdma_write;
struct i40iw_post_inline_send inline_send;
} op;
};
struct i40iw_post_rq_info {
u64 wr_id;
i40iw_sgl sg_list;
u32 num_sges;
};
struct i40iw_cq_poll_info {
u64 wr_id;
i40iw_qp_handle qp_handle;
u32 bytes_xfered;
u32 tcp_seq_num;
u32 qp_id;
i40iw_stag inv_stag;
enum i40iw_completion_status comp_status;
u16 major_err;
u16 minor_err;
u8 op_type;
bool stag_invalid_set;
bool error;
bool is_srq;
bool solicited_event;
};
struct i40iw_qp_uk_ops {
void (*iw_qp_post_wr)(struct i40iw_qp_uk *);
enum i40iw_status_code (*iw_rdma_write)(struct i40iw_qp_uk *,
struct i40iw_post_sq_info *, bool);
enum i40iw_status_code (*iw_rdma_read)(struct i40iw_qp_uk *,
struct i40iw_post_sq_info *, bool, bool);
enum i40iw_status_code (*iw_send)(struct i40iw_qp_uk *,
struct i40iw_post_sq_info *, u32, bool);
enum i40iw_status_code (*iw_inline_rdma_write)(struct i40iw_qp_uk *,
struct i40iw_post_sq_info *, bool);
enum i40iw_status_code (*iw_inline_send)(struct i40iw_qp_uk *,
struct i40iw_post_sq_info *, u32, bool);
enum i40iw_status_code (*iw_stag_local_invalidate)(struct i40iw_qp_uk *,
struct i40iw_post_sq_info *, bool);
enum i40iw_status_code (*iw_mw_bind)(struct i40iw_qp_uk *,
struct i40iw_post_sq_info *, bool);
enum i40iw_status_code (*iw_post_receive)(struct i40iw_qp_uk *,
struct i40iw_post_rq_info *);
enum i40iw_status_code (*iw_post_nop)(struct i40iw_qp_uk *, u64, bool, bool);
};
struct i40iw_cq_ops {
void (*iw_cq_request_notification)(struct i40iw_cq_uk *,
enum i40iw_completion_notify);
enum i40iw_status_code (*iw_cq_poll_completion)(struct i40iw_cq_uk *,
struct i40iw_cq_poll_info *);
enum i40iw_status_code (*iw_cq_post_entries)(struct i40iw_cq_uk *, u8 count);
void (*iw_cq_clean)(void *, struct i40iw_cq_uk *);
};
struct i40iw_dev_uk;
struct i40iw_device_uk_ops {
enum i40iw_status_code (*iwarp_cq_uk_init)(struct i40iw_cq_uk *,
struct i40iw_cq_uk_init_info *);
enum i40iw_status_code (*iwarp_qp_uk_init)(struct i40iw_qp_uk *,
struct i40iw_qp_uk_init_info *);
};
struct i40iw_dev_uk {
struct i40iw_device_uk_ops ops_uk;
};
struct i40iw_sq_uk_wr_trk_info {
u64 wrid;
u32 wr_len;
u8 wqe_size;
u8 reserved[3];
};
struct i40iw_qp_quanta {
u64 elem[I40IW_WQE_SIZE];
};
struct i40iw_qp_uk {
struct i40iw_qp_quanta *sq_base;
struct i40iw_qp_quanta *rq_base;
u32 __iomem *wqe_alloc_reg;
struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array;
u64 *rq_wrid_array;
u64 *shadow_area;
struct i40iw_ring sq_ring;
struct i40iw_ring rq_ring;
struct i40iw_ring initial_ring;
u32 qp_id;
u32 sq_size;
u32 rq_size;
u32 max_sq_frag_cnt;
u32 max_rq_frag_cnt;
struct i40iw_qp_uk_ops ops;
bool use_srq;
u8 swqe_polarity;
u8 swqe_polarity_deferred;
u8 rwqe_polarity;
u8 rq_wqe_size;
u8 rq_wqe_size_multiplier;
bool first_sq_wq;
bool deferred_flag;
};
struct i40iw_cq_uk {
struct i40iw_cqe *cq_base;
u32 __iomem *cqe_alloc_reg;
u64 *shadow_area;
u32 cq_id;
u32 cq_size;
struct i40iw_ring cq_ring;
u8 polarity;
bool avoid_mem_cflct;
struct i40iw_cq_ops ops;
};
struct i40iw_qp_uk_init_info {
struct i40iw_qp_quanta *sq;
struct i40iw_qp_quanta *rq;
u32 __iomem *wqe_alloc_reg;
u64 *shadow_area;
struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array;
u64 *rq_wrid_array;
u32 qp_id;
u32 sq_size;
u32 rq_size;
u32 max_sq_frag_cnt;
u32 max_rq_frag_cnt;
u32 max_inline_data;
int abi_ver;
};
struct i40iw_cq_uk_init_info {
u32 __iomem *cqe_alloc_reg;
struct i40iw_cqe *cq_base;
u64 *shadow_area;
u32 cq_size;
u32 cq_id;
bool avoid_mem_cflct;
};
void i40iw_device_init_uk(struct i40iw_dev_uk *dev);
void i40iw_qp_post_wr(struct i40iw_qp_uk *qp);
u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx,
u8 wqe_size,
u32 total_size,
u64 wr_id
);
u64 *i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx);
u64 *i40iw_qp_get_next_srq_wqe(struct i40iw_srq_uk *srq, u32 *wqe_idx);
enum i40iw_status_code i40iw_cq_uk_init(struct i40iw_cq_uk *cq,
struct i40iw_cq_uk_init_info *info);
enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
struct i40iw_qp_uk_init_info *info);
void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq);
enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp, u64 wr_id,
bool signaled, bool post_sq);
enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u32 frag_cnt, u8 *wqe_size);
enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u32 frag_cnt, u8 *wqe_size);
enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
u8 *wqe_size);
void i40iw_get_wqe_shift(u32 sge, u32 inline_data, u8 *shift);
enum i40iw_status_code i40iw_get_sqdepth(u32 sq_size, u8 shift, u32 *sqdepth);
enum i40iw_status_code i40iw_get_rqdepth(u32 rq_size, u8 shift, u32 *rqdepth);
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,179 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef I40IW_VERBS_H
#define I40IW_VERBS_H
struct i40iw_ucontext {
struct ib_ucontext ibucontext;
struct i40iw_device *iwdev;
struct list_head cq_reg_mem_list;
spinlock_t cq_reg_mem_list_lock; /* memory list for cq's */
struct list_head qp_reg_mem_list;
spinlock_t qp_reg_mem_list_lock; /* memory list for qp's */
int abi_ver;
};
struct i40iw_pd {
struct ib_pd ibpd;
struct i40iw_sc_pd sc_pd;
atomic_t usecount;
};
struct i40iw_hmc_pble {
union {
u32 idx;
dma_addr_t addr;
};
};
struct i40iw_cq_mr {
struct i40iw_hmc_pble cq_pbl;
dma_addr_t shadow;
};
struct i40iw_qp_mr {
struct i40iw_hmc_pble sq_pbl;
struct i40iw_hmc_pble rq_pbl;
dma_addr_t shadow;
struct page *sq_page;
};
struct i40iw_pbl {
struct list_head list;
union {
struct i40iw_qp_mr qp_mr;
struct i40iw_cq_mr cq_mr;
};
bool pbl_allocated;
bool on_list;
u64 user_base;
struct i40iw_pble_alloc pble_alloc;
struct i40iw_mr *iwmr;
};
#define MAX_SAVE_PAGE_ADDRS 4
struct i40iw_mr {
union {
struct ib_mr ibmr;
struct ib_mw ibmw;
};
struct ib_umem *region;
u16 type;
u32 page_cnt;
u64 page_size;
u32 npages;
u32 stag;
u64 length;
u64 pgaddrmem[MAX_SAVE_PAGE_ADDRS];
struct i40iw_pbl iwpbl;
};
struct i40iw_cq {
struct ib_cq ibcq;
struct i40iw_sc_cq sc_cq;
u16 cq_head;
u16 cq_size;
u16 cq_number;
bool user_mode;
u32 polled_completions;
u32 cq_mem_size;
struct i40iw_dma_mem kmem;
spinlock_t lock; /* for poll cq */
struct i40iw_pbl *iwpbl;
};
struct disconn_work {
struct work_struct work;
struct i40iw_qp *iwqp;
};
struct iw_cm_id;
struct ietf_mpa_frame;
struct i40iw_ud_file;
struct i40iw_qp_kmode {
struct i40iw_dma_mem dma_mem;
u64 *wrid_mem;
};
struct i40iw_qp {
struct ib_qp ibqp;
struct i40iw_sc_qp sc_qp;
struct i40iw_device *iwdev;
struct i40iw_cq *iwscq;
struct i40iw_cq *iwrcq;
struct i40iw_pd *iwpd;
struct i40iw_qp_host_ctx_info ctx_info;
struct i40iwarp_offload_info iwarp_info;
void *allocated_buffer;
refcount_t refcount;
struct iw_cm_id *cm_id;
void *cm_node;
struct ib_mr *lsmm_mr;
struct work_struct work;
enum ib_qp_state ibqp_state;
u32 iwarp_state;
u32 qp_mem_size;
u32 last_aeq;
atomic_t close_timer_started;
spinlock_t lock; /* for post work requests */
struct i40iw_qp_context *iwqp_context;
void *pbl_vbase;
dma_addr_t pbl_pbase;
struct page *page;
u8 active_conn:1;
u8 user_mode:1;
u8 hte_added:1;
u8 flush_issued:1;
u8 destroyed:1;
u8 sig_all:1;
u8 pau_mode:1;
u8 rsvd:1;
u16 term_sq_flush_code;
u16 term_rq_flush_code;
u8 hw_iwarp_state;
u8 hw_tcp_state;
struct i40iw_qp_kmode kqp;
struct i40iw_dma_mem host_ctx;
struct timer_list terminate_timer;
struct i40iw_pbl iwpbl;
struct i40iw_dma_mem q2_ctx_mem;
struct i40iw_dma_mem ietf_mem;
struct completion sq_drained;
struct completion rq_drained;
struct completion free_qp;
};
#endif

View File

@ -1,85 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#include "i40iw_osdep.h"
#include "i40iw_register.h"
#include "i40iw_status.h"
#include "i40iw_hmc.h"
#include "i40iw_d.h"
#include "i40iw_type.h"
#include "i40iw_p.h"
#include "i40iw_vf.h"
/**
* i40iw_manage_vf_pble_bp - manage vf pble
* @cqp: cqp for cqp' sq wqe
* @info: pble info
* @scratch: pointer for completion
* @post_sq: to post and ring
*/
enum i40iw_status_code i40iw_manage_vf_pble_bp(struct i40iw_sc_cqp *cqp,
struct i40iw_manage_vf_pble_info *info,
u64 scratch,
bool post_sq)
{
u64 *wqe;
u64 temp, header, pd_pl_pba = 0;
wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return I40IW_ERR_RING_FULL;
temp = LS_64(info->pd_entry_cnt, I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT) |
LS_64(info->first_pd_index, I40IW_CQPSQ_MVPBP_FIRST_PD_INX) |
LS_64(info->sd_index, I40IW_CQPSQ_MVPBP_SD_INX);
set_64bit_val(wqe, 16, temp);
header = LS_64((info->inv_pd_ent ? 1 : 0), I40IW_CQPSQ_MVPBP_INV_PD_ENT) |
LS_64(I40IW_CQP_OP_MANAGE_VF_PBLE_BP, I40IW_CQPSQ_OPCODE) |
LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
set_64bit_val(wqe, 24, header);
pd_pl_pba = LS_64(info->pd_pl_pba >> 3, I40IW_CQPSQ_MVPBP_PD_PLPBA);
set_64bit_val(wqe, 32, pd_pl_pba);
i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE VF_PBLE_BP WQE", wqe, I40IW_CQP_WQE_SIZE * 8);
if (post_sq)
i40iw_sc_cqp_post_sq(cqp);
return 0;
}
const struct i40iw_vf_cqp_ops iw_vf_cqp_ops = {
i40iw_manage_vf_pble_bp
};

View File

@ -1,62 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef I40IW_VF_H
#define I40IW_VF_H
struct i40iw_sc_cqp;
struct i40iw_manage_vf_pble_info {
u32 sd_index;
u16 first_pd_index;
u16 pd_entry_cnt;
u8 inv_pd_ent;
u64 pd_pl_pba;
};
struct i40iw_vf_cqp_ops {
enum i40iw_status_code (*manage_vf_pble_bp)(struct i40iw_sc_cqp *,
struct i40iw_manage_vf_pble_info *,
u64,
bool);
};
enum i40iw_status_code i40iw_manage_vf_pble_bp(struct i40iw_sc_cqp *cqp,
struct i40iw_manage_vf_pble_info *info,
u64 scratch,
bool post_sq);
extern const struct i40iw_vf_cqp_ops iw_vf_cqp_ops;
#endif

View File

@ -1,759 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#include "i40iw_osdep.h"
#include "i40iw_register.h"
#include "i40iw_status.h"
#include "i40iw_hmc.h"
#include "i40iw_d.h"
#include "i40iw_type.h"
#include "i40iw_p.h"
#include "i40iw_virtchnl.h"
/**
* vchnl_vf_send_get_ver_req - Request Channel version
* @dev: IWARP device pointer
* @vchnl_req: Virtual channel message request pointer
*/
static enum i40iw_status_code vchnl_vf_send_get_ver_req(struct i40iw_sc_dev *dev,
struct i40iw_virtchnl_req *vchnl_req)
{
enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
if (!dev->vchnl_up)
return ret_code;
memset(vchnl_msg, 0, sizeof(*vchnl_msg));
vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg);
vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_VER;
vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_VER_V0;
ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
if (ret_code)
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s: virt channel send failed 0x%x\n", __func__, ret_code);
return ret_code;
}
/**
* vchnl_vf_send_get_hmc_fcn_req - Request HMC Function from VF
* @dev: IWARP device pointer
* @vchnl_req: Virtual channel message request pointer
*/
static enum i40iw_status_code vchnl_vf_send_get_hmc_fcn_req(struct i40iw_sc_dev *dev,
struct i40iw_virtchnl_req *vchnl_req)
{
enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
if (!dev->vchnl_up)
return ret_code;
memset(vchnl_msg, 0, sizeof(*vchnl_msg));
vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg);
vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_HMC_FCN;
vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_HMC_FCN_V0;
ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
if (ret_code)
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s: virt channel send failed 0x%x\n", __func__, ret_code);
return ret_code;
}
/**
* vchnl_vf_send_get_pe_stats_req - Request PE stats from VF
* @dev: IWARP device pointer
* @vchnl_req: Virtual channel message request pointer
*/
static enum i40iw_status_code vchnl_vf_send_get_pe_stats_req(struct i40iw_sc_dev *dev,
struct i40iw_virtchnl_req *vchnl_req)
{
enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
if (!dev->vchnl_up)
return ret_code;
memset(vchnl_msg, 0, sizeof(*vchnl_msg));
vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_dev_hw_stats) - 1;
vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_STATS;
vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_STATS_V0;
ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
if (ret_code)
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s: virt channel send failed 0x%x\n", __func__, ret_code);
return ret_code;
}
/*
* vchnl_vf_send_add_hmc_objs_req - Add HMC objects
* @dev: IWARP device pointer
* @vchnl_req: Virtual channel message request pointer
*/
static enum i40iw_status_code vchnl_vf_send_add_hmc_objs_req(struct i40iw_sc_dev *dev,
struct i40iw_virtchnl_req *vchnl_req,
enum i40iw_hmc_rsrc_type rsrc_type,
u32 start_index,
u32 rsrc_count)
{
enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;
if (!dev->vchnl_up)
return ret_code;
add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
memset(vchnl_msg, 0, sizeof(*vchnl_msg));
memset(add_hmc_obj, 0, sizeof(*add_hmc_obj));
vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_virtchnl_hmc_obj_range) - 1;
vchnl_msg->iw_op_code = I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE;
vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE_V0;
add_hmc_obj->obj_type = (u16)rsrc_type;
add_hmc_obj->start_index = start_index;
add_hmc_obj->obj_count = rsrc_count;
ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
if (ret_code)
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s: virt channel send failed 0x%x\n", __func__, ret_code);
return ret_code;
}
/**
* vchnl_vf_send_del_hmc_objs_req - del HMC objects
* @dev: IWARP device pointer
* @vchnl_req: Virtual channel message request pointer
* @rsrc_type: resource type to delete
* @start_index: starting index for resource
* @rsrc_count: number of resource type to delete
*/
static enum i40iw_status_code vchnl_vf_send_del_hmc_objs_req(struct i40iw_sc_dev *dev,
struct i40iw_virtchnl_req *vchnl_req,
enum i40iw_hmc_rsrc_type rsrc_type,
u32 start_index,
u32 rsrc_count)
{
enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;
if (!dev->vchnl_up)
return ret_code;
add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
memset(vchnl_msg, 0, sizeof(*vchnl_msg));
memset(add_hmc_obj, 0, sizeof(*add_hmc_obj));
vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_virtchnl_hmc_obj_range) - 1;
vchnl_msg->iw_op_code = I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE;
vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE_V0;
add_hmc_obj->obj_type = (u16)rsrc_type;
add_hmc_obj->start_index = start_index;
add_hmc_obj->obj_count = rsrc_count;
ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
if (ret_code)
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s: virt channel send failed 0x%x\n", __func__, ret_code);
return ret_code;
}
/**
* vchnl_pf_send_get_ver_resp - Send channel version to VF
* @dev: IWARP device pointer
* @vf_id: Virtual function ID associated with the message
* @vchnl_msg: Virtual channel message buffer pointer
*/
static void vchnl_pf_send_get_ver_resp(struct i40iw_sc_dev *dev,
u32 vf_id,
struct i40iw_virtchnl_op_buf *vchnl_msg)
{
enum i40iw_status_code ret_code;
u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(u32) - 1];
struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
memset(resp_buffer, 0, sizeof(*resp_buffer));
vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;
*((u32 *)vchnl_msg_resp->iw_chnl_buf) = I40IW_VCHNL_CHNL_VER_V0;
ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
if (ret_code)
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s: virt channel send failed 0x%x\n", __func__, ret_code);
}
/**
* vchnl_pf_send_get_hmc_fcn_resp - Send HMC Function to VF
* @dev: IWARP device pointer
* @vf_id: Virtual function ID associated with the message
* @vchnl_msg: Virtual channel message buffer pointer
* @hmc_fcn: HMC function index pointer
*/
static void vchnl_pf_send_get_hmc_fcn_resp(struct i40iw_sc_dev *dev,
u32 vf_id,
struct i40iw_virtchnl_op_buf *vchnl_msg,
u16 hmc_fcn)
{
enum i40iw_status_code ret_code;
u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(u16) - 1];
struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
memset(resp_buffer, 0, sizeof(*resp_buffer));
vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;
*((u16 *)vchnl_msg_resp->iw_chnl_buf) = hmc_fcn;
ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
if (ret_code)
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s: virt channel send failed 0x%x\n", __func__, ret_code);
}
/**
* vchnl_pf_send_get_pe_stats_resp - Send PE Stats to VF
* @dev: IWARP device pointer
* @vf_id: Virtual function ID associated with the message
* @vchnl_msg: Virtual channel message buffer pointer
* @hw_stats: HW Stats struct
*/
static void vchnl_pf_send_get_pe_stats_resp(struct i40iw_sc_dev *dev,
u32 vf_id,
struct i40iw_virtchnl_op_buf *vchnl_msg,
struct i40iw_dev_hw_stats *hw_stats)
{
enum i40iw_status_code ret_code;
u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(struct i40iw_dev_hw_stats) - 1];
struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
memset(resp_buffer, 0, sizeof(*resp_buffer));
vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;
*((struct i40iw_dev_hw_stats *)vchnl_msg_resp->iw_chnl_buf) = *hw_stats;
ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
if (ret_code)
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s: virt channel send failed 0x%x\n", __func__, ret_code);
}
/**
* vchnl_pf_send_error_resp - Send an error response to VF
* @dev: IWARP device pointer
* @vf_id: Virtual function ID associated with the message
* @vchnl_msg: Virtual channel message buffer pointer
* @op_ret_code: I40IW_ERR_* status code
*/
static void vchnl_pf_send_error_resp(struct i40iw_sc_dev *dev, u32 vf_id,
struct i40iw_virtchnl_op_buf *vchnl_msg,
u16 op_ret_code)
{
enum i40iw_status_code ret_code;
u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf)];
struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
memset(resp_buffer, 0, sizeof(resp_buffer));
vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
vchnl_msg_resp->iw_op_ret_code = (u16)op_ret_code;
ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
if (ret_code)
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s: virt channel send failed 0x%x\n", __func__, ret_code);
}
/**
* pf_cqp_get_hmc_fcn_callback - Callback for Get HMC Fcn
* @dev: IWARP device pointer
* @callback_param: unused CQP callback parameter
* @cqe_info: CQE information pointer
*/
static void pf_cqp_get_hmc_fcn_callback(struct i40iw_sc_dev *dev, void *callback_param,
struct i40iw_ccq_cqe_info *cqe_info)
{
struct i40iw_vfdev *vf_dev = callback_param;
struct i40iw_virt_mem vf_dev_mem;
if (cqe_info->error) {
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"CQP Completion Error on Get HMC Function. Maj = 0x%04x, Minor = 0x%04x\n",
cqe_info->maj_err_code, cqe_info->min_err_code);
dev->vf_dev[vf_dev->iw_vf_idx] = NULL;
vchnl_pf_send_error_resp(dev, vf_dev->vf_id, &vf_dev->vf_msg_buffer.vchnl_msg,
(u16)I40IW_ERR_CQP_COMPL_ERROR);
vf_dev_mem.va = vf_dev;
vf_dev_mem.size = sizeof(*vf_dev);
i40iw_free_virt_mem(dev->hw, &vf_dev_mem);
} else {
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"CQP Completion Operation Return information = 0x%08x\n",
cqe_info->op_ret_val);
vf_dev->pmf_index = (u16)cqe_info->op_ret_val;
vf_dev->msg_count--;
vchnl_pf_send_get_hmc_fcn_resp(dev,
vf_dev->vf_id,
&vf_dev->vf_msg_buffer.vchnl_msg,
vf_dev->pmf_index);
}
}
/**
* pf_add_hmc_obj_callback - Callback for Add HMC Object
* @work_vf_dev: pointer to the VF Device
*/
static void pf_add_hmc_obj_callback(void *work_vf_dev)
{
struct i40iw_vfdev *vf_dev = (struct i40iw_vfdev *)work_vf_dev;
struct i40iw_hmc_info *hmc_info = &vf_dev->hmc_info;
struct i40iw_virtchnl_op_buf *vchnl_msg = &vf_dev->vf_msg_buffer.vchnl_msg;
struct i40iw_hmc_create_obj_info info;
struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;
enum i40iw_status_code ret_code;
if (!vf_dev->pf_hmc_initialized) {
ret_code = i40iw_pf_init_vfhmc(vf_dev->pf_dev, (u8)vf_dev->pmf_index, NULL);
if (ret_code)
goto add_out;
vf_dev->pf_hmc_initialized = true;
}
add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
memset(&info, 0, sizeof(info));
info.hmc_info = hmc_info;
info.is_pf = false;
info.rsrc_type = (u32)add_hmc_obj->obj_type;
info.entry_type = (info.rsrc_type == I40IW_HMC_IW_PBLE) ? I40IW_SD_TYPE_PAGED : I40IW_SD_TYPE_DIRECT;
info.start_idx = add_hmc_obj->start_index;
info.count = add_hmc_obj->obj_count;
i40iw_debug(vf_dev->pf_dev, I40IW_DEBUG_VIRT,
"I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE. Add %u type %u objects\n",
info.count, info.rsrc_type);
ret_code = i40iw_sc_create_hmc_obj(vf_dev->pf_dev, &info);
if (!ret_code)
vf_dev->hmc_info.hmc_obj[add_hmc_obj->obj_type].cnt = add_hmc_obj->obj_count;
add_out:
vf_dev->msg_count--;
vchnl_pf_send_error_resp(vf_dev->pf_dev, vf_dev->vf_id, vchnl_msg, (u16)ret_code);
}
/**
* pf_del_hmc_obj_callback - Callback for delete HMC Object
* @work_vf_dev: pointer to the VF Device
*/
static void pf_del_hmc_obj_callback(void *work_vf_dev)
{
struct i40iw_vfdev *vf_dev = (struct i40iw_vfdev *)work_vf_dev;
struct i40iw_hmc_info *hmc_info = &vf_dev->hmc_info;
struct i40iw_virtchnl_op_buf *vchnl_msg = &vf_dev->vf_msg_buffer.vchnl_msg;
struct i40iw_hmc_del_obj_info info;
struct i40iw_virtchnl_hmc_obj_range *del_hmc_obj;
enum i40iw_status_code ret_code = I40IW_SUCCESS;
if (!vf_dev->pf_hmc_initialized)
goto del_out;
del_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
memset(&info, 0, sizeof(info));
info.hmc_info = hmc_info;
info.is_pf = false;
info.rsrc_type = (u32)del_hmc_obj->obj_type;
info.start_idx = del_hmc_obj->start_index;
info.count = del_hmc_obj->obj_count;
i40iw_debug(vf_dev->pf_dev, I40IW_DEBUG_VIRT,
"I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE. Delete %u type %u objects\n",
info.count, info.rsrc_type);
ret_code = i40iw_sc_del_hmc_obj(vf_dev->pf_dev, &info, false);
del_out:
vf_dev->msg_count--;
vchnl_pf_send_error_resp(vf_dev->pf_dev, vf_dev->vf_id, vchnl_msg, (u16)ret_code);
}
/**
* i40iw_vf_init_pestat - Initialize stats for VF
* @dev: pointer to the VF Device
* @stats: Statistics structure pointer
* @index: Stats index
*/
static void i40iw_vf_init_pestat(struct i40iw_sc_dev *dev, struct i40iw_vsi_pestat *stats, u16 index)
{
stats->hw = dev->hw;
i40iw_hw_stats_init(stats, (u8)index, false);
spin_lock_init(&stats->lock);
}
/**
* i40iw_vchnl_recv_pf - Receive PF virtual channel messages
* @dev: IWARP device pointer
* @vf_id: Virtual function ID associated with the message
* @msg: Virtual channel message buffer pointer
* @len: Length of the virtual channels message
*/
enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
u32 vf_id,
u8 *msg,
u16 len)
{
struct i40iw_virtchnl_op_buf *vchnl_msg = (struct i40iw_virtchnl_op_buf *)msg;
struct i40iw_vfdev *vf_dev = NULL;
struct i40iw_hmc_fcn_info hmc_fcn_info;
u16 iw_vf_idx;
u16 first_avail_iw_vf = I40IW_MAX_PE_ENABLED_VF_COUNT;
struct i40iw_virt_mem vf_dev_mem;
struct i40iw_virtchnl_work_info work_info;
struct i40iw_vsi_pestat *stats;
enum i40iw_status_code ret_code;
if (!dev || !msg || !len)
return I40IW_ERR_PARAM;
if (!dev->vchnl_up)
return I40IW_ERR_NOT_READY;
if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) {
vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
return I40IW_SUCCESS;
}
for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
if (!dev->vf_dev[iw_vf_idx]) {
if (first_avail_iw_vf == I40IW_MAX_PE_ENABLED_VF_COUNT)
first_avail_iw_vf = iw_vf_idx;
continue;
}
if (dev->vf_dev[iw_vf_idx]->vf_id == vf_id) {
vf_dev = dev->vf_dev[iw_vf_idx];
break;
}
}
if (vf_dev) {
if (!vf_dev->msg_count) {
vf_dev->msg_count++;
} else {
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"VF%u already has a channel message in progress.\n",
vf_id);
return I40IW_SUCCESS;
}
}
switch (vchnl_msg->iw_op_code) {
case I40IW_VCHNL_OP_GET_HMC_FCN:
if (!vf_dev &&
(first_avail_iw_vf != I40IW_MAX_PE_ENABLED_VF_COUNT)) {
ret_code = i40iw_allocate_virt_mem(dev->hw, &vf_dev_mem, sizeof(struct i40iw_vfdev) +
(sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX));
if (!ret_code) {
vf_dev = vf_dev_mem.va;
vf_dev->stats_initialized = false;
vf_dev->pf_dev = dev;
vf_dev->msg_count = 1;
vf_dev->vf_id = vf_id;
vf_dev->iw_vf_idx = first_avail_iw_vf;
vf_dev->pf_hmc_initialized = false;
vf_dev->hmc_info.hmc_obj = (struct i40iw_hmc_obj_info *)(&vf_dev[1]);
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"vf_dev %p, hmc_info %p, hmc_obj %p\n",
vf_dev, &vf_dev->hmc_info, vf_dev->hmc_info.hmc_obj);
dev->vf_dev[first_avail_iw_vf] = vf_dev;
iw_vf_idx = first_avail_iw_vf;
} else {
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"VF%u Unable to allocate a VF device structure.\n",
vf_id);
vchnl_pf_send_error_resp(dev, vf_id, vchnl_msg, (u16)I40IW_ERR_NO_MEMORY);
return I40IW_SUCCESS;
}
memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
hmc_fcn_info.callback_fcn = pf_cqp_get_hmc_fcn_callback;
hmc_fcn_info.vf_id = vf_id;
hmc_fcn_info.iw_vf_idx = vf_dev->iw_vf_idx;
hmc_fcn_info.cqp_callback_param = vf_dev;
hmc_fcn_info.free_fcn = false;
ret_code = i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);
if (ret_code)
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"VF%u error CQP HMC Function operation.\n",
vf_id);
i40iw_vf_init_pestat(dev, &vf_dev->pestat, vf_dev->pmf_index);
vf_dev->stats_initialized = true;
} else {
if (vf_dev) {
vf_dev->msg_count--;
vchnl_pf_send_get_hmc_fcn_resp(dev, vf_id, vchnl_msg, vf_dev->pmf_index);
} else {
vchnl_pf_send_error_resp(dev, vf_id, vchnl_msg,
(u16)I40IW_ERR_NO_MEMORY);
}
}
break;
case I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE:
if (!vf_dev)
return I40IW_ERR_BAD_PTR;
work_info.worker_vf_dev = vf_dev;
work_info.callback_fcn = pf_add_hmc_obj_callback;
memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
i40iw_cqp_spawn_worker(dev, &work_info, vf_dev->iw_vf_idx);
break;
case I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE:
if (!vf_dev)
return I40IW_ERR_BAD_PTR;
work_info.worker_vf_dev = vf_dev;
work_info.callback_fcn = pf_del_hmc_obj_callback;
memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
i40iw_cqp_spawn_worker(dev, &work_info, vf_dev->iw_vf_idx);
break;
case I40IW_VCHNL_OP_GET_STATS:
if (!vf_dev)
return I40IW_ERR_BAD_PTR;
stats = &vf_dev->pestat;
i40iw_hw_stats_read_all(stats, &stats->hw_stats);
vf_dev->msg_count--;
vchnl_pf_send_get_pe_stats_resp(dev, vf_id, vchnl_msg, &stats->hw_stats);
break;
default:
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"40iw_vchnl_recv_pf: Invalid OpCode 0x%x\n",
vchnl_msg->iw_op_code);
vchnl_pf_send_error_resp(dev, vf_id,
vchnl_msg, (u16)I40IW_ERR_NOT_IMPLEMENTED);
}
return I40IW_SUCCESS;
}
/**
* i40iw_vchnl_recv_vf - Receive VF virtual channel messages
* @dev: IWARP device pointer
* @vf_id: Virtual function ID associated with the message
* @msg: Virtual channel message buffer pointer
* @len: Length of the virtual channels message
*/
enum i40iw_status_code i40iw_vchnl_recv_vf(struct i40iw_sc_dev *dev,
u32 vf_id,
u8 *msg,
u16 len)
{
struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)msg;
struct i40iw_virtchnl_req *vchnl_req;
vchnl_req = (struct i40iw_virtchnl_req *)(uintptr_t)vchnl_msg_resp->iw_chnl_op_ctx;
vchnl_req->ret_code = (enum i40iw_status_code)vchnl_msg_resp->iw_op_ret_code;
if (len == (sizeof(*vchnl_msg_resp) + vchnl_req->parm_len - 1)) {
if (vchnl_req->parm_len && vchnl_req->parm)
memcpy(vchnl_req->parm, vchnl_msg_resp->iw_chnl_buf, vchnl_req->parm_len);
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s: Got response, data size %u\n", __func__,
vchnl_req->parm_len);
} else {
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s: error length on response, Got %u, expected %u\n", __func__,
len, (u32)(sizeof(*vchnl_msg_resp) + vchnl_req->parm_len - 1));
}
return I40IW_SUCCESS;
}
/**
* i40iw_vchnl_vf_get_ver - Request Channel version
* @dev: IWARP device pointer
* @vchnl_ver: Virtual channel message version pointer
*/
enum i40iw_status_code i40iw_vchnl_vf_get_ver(struct i40iw_sc_dev *dev,
u32 *vchnl_ver)
{
struct i40iw_virtchnl_req vchnl_req;
enum i40iw_status_code ret_code;
if (!i40iw_vf_clear_to_send(dev))
return I40IW_ERR_TIMEOUT;
memset(&vchnl_req, 0, sizeof(vchnl_req));
vchnl_req.dev = dev;
vchnl_req.parm = vchnl_ver;
vchnl_req.parm_len = sizeof(*vchnl_ver);
vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
ret_code = vchnl_vf_send_get_ver_req(dev, &vchnl_req);
if (ret_code) {
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s Send message failed 0x%0x\n", __func__, ret_code);
return ret_code;
}
ret_code = i40iw_vf_wait_vchnl_resp(dev);
if (ret_code)
return ret_code;
else
return vchnl_req.ret_code;
}
/**
* i40iw_vchnl_vf_get_hmc_fcn - Request HMC Function
* @dev: IWARP device pointer
* @hmc_fcn: HMC function index pointer
*/
enum i40iw_status_code i40iw_vchnl_vf_get_hmc_fcn(struct i40iw_sc_dev *dev,
u16 *hmc_fcn)
{
struct i40iw_virtchnl_req vchnl_req;
enum i40iw_status_code ret_code;
if (!i40iw_vf_clear_to_send(dev))
return I40IW_ERR_TIMEOUT;
memset(&vchnl_req, 0, sizeof(vchnl_req));
vchnl_req.dev = dev;
vchnl_req.parm = hmc_fcn;
vchnl_req.parm_len = sizeof(*hmc_fcn);
vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
ret_code = vchnl_vf_send_get_hmc_fcn_req(dev, &vchnl_req);
if (ret_code) {
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s Send message failed 0x%0x\n", __func__, ret_code);
return ret_code;
}
ret_code = i40iw_vf_wait_vchnl_resp(dev);
if (ret_code)
return ret_code;
else
return vchnl_req.ret_code;
}
/**
* i40iw_vchnl_vf_add_hmc_objs - Add HMC Object
* @dev: IWARP device pointer
* @rsrc_type: HMC Resource type
* @start_index: Starting index of the objects to be added
* @rsrc_count: Number of resources to be added
*/
enum i40iw_status_code i40iw_vchnl_vf_add_hmc_objs(struct i40iw_sc_dev *dev,
enum i40iw_hmc_rsrc_type rsrc_type,
u32 start_index,
u32 rsrc_count)
{
struct i40iw_virtchnl_req vchnl_req;
enum i40iw_status_code ret_code;
if (!i40iw_vf_clear_to_send(dev))
return I40IW_ERR_TIMEOUT;
memset(&vchnl_req, 0, sizeof(vchnl_req));
vchnl_req.dev = dev;
vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
ret_code = vchnl_vf_send_add_hmc_objs_req(dev,
&vchnl_req,
rsrc_type,
start_index,
rsrc_count);
if (ret_code) {
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s Send message failed 0x%0x\n", __func__, ret_code);
return ret_code;
}
ret_code = i40iw_vf_wait_vchnl_resp(dev);
if (ret_code)
return ret_code;
else
return vchnl_req.ret_code;
}
/**
* i40iw_vchnl_vf_del_hmc_obj - del HMC obj
* @dev: IWARP device pointer
* @rsrc_type: HMC Resource type
* @start_index: Starting index of the object to delete
* @rsrc_count: Number of resources to be delete
*/
enum i40iw_status_code i40iw_vchnl_vf_del_hmc_obj(struct i40iw_sc_dev *dev,
enum i40iw_hmc_rsrc_type rsrc_type,
u32 start_index,
u32 rsrc_count)
{
struct i40iw_virtchnl_req vchnl_req;
enum i40iw_status_code ret_code;
if (!i40iw_vf_clear_to_send(dev))
return I40IW_ERR_TIMEOUT;
memset(&vchnl_req, 0, sizeof(vchnl_req));
vchnl_req.dev = dev;
vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
ret_code = vchnl_vf_send_del_hmc_objs_req(dev,
&vchnl_req,
rsrc_type,
start_index,
rsrc_count);
if (ret_code) {
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s Send message failed 0x%0x\n", __func__, ret_code);
return ret_code;
}
ret_code = i40iw_vf_wait_vchnl_resp(dev);
if (ret_code)
return ret_code;
else
return vchnl_req.ret_code;
}
/**
* i40iw_vchnl_vf_get_pe_stats - Get PE stats
* @dev: IWARP device pointer
* @hw_stats: HW stats struct
*/
enum i40iw_status_code i40iw_vchnl_vf_get_pe_stats(struct i40iw_sc_dev *dev,
struct i40iw_dev_hw_stats *hw_stats)
{
struct i40iw_virtchnl_req vchnl_req;
enum i40iw_status_code ret_code;
if (!i40iw_vf_clear_to_send(dev))
return I40IW_ERR_TIMEOUT;
memset(&vchnl_req, 0, sizeof(vchnl_req));
vchnl_req.dev = dev;
vchnl_req.parm = hw_stats;
vchnl_req.parm_len = sizeof(*hw_stats);
vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
ret_code = vchnl_vf_send_get_pe_stats_req(dev, &vchnl_req);
if (ret_code) {
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s Send message failed 0x%0x\n", __func__, ret_code);
return ret_code;
}
ret_code = i40iw_vf_wait_vchnl_resp(dev);
if (ret_code)
return ret_code;
else
return vchnl_req.ret_code;
}

View File

@ -1,124 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef I40IW_VIRTCHNL_H
#define I40IW_VIRTCHNL_H
#include "i40iw_hmc.h"
#pragma pack(push, 1)
struct i40iw_virtchnl_op_buf {
u16 iw_op_code;
u16 iw_op_ver;
u16 iw_chnl_buf_len;
u16 rsvd;
u64 iw_chnl_op_ctx;
/* Member alignment MUST be maintained above this location */
u8 iw_chnl_buf[1];
};
struct i40iw_virtchnl_resp_buf {
u64 iw_chnl_op_ctx;
u16 iw_chnl_buf_len;
s16 iw_op_ret_code;
/* Member alignment MUST be maintained above this location */
u16 rsvd[2];
u8 iw_chnl_buf[1];
};
enum i40iw_virtchnl_ops {
I40IW_VCHNL_OP_GET_VER = 0,
I40IW_VCHNL_OP_GET_HMC_FCN,
I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE,
I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE,
I40IW_VCHNL_OP_GET_STATS
};
#define I40IW_VCHNL_OP_GET_VER_V0 0
#define I40IW_VCHNL_OP_GET_HMC_FCN_V0 0
#define I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE_V0 0
#define I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE_V0 0
#define I40IW_VCHNL_OP_GET_STATS_V0 0
#define I40IW_VCHNL_CHNL_VER_V0 0
struct i40iw_dev_hw_stats;
struct i40iw_virtchnl_hmc_obj_range {
u16 obj_type;
u16 rsvd;
u32 start_index;
u32 obj_count;
};
enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
u32 vf_id,
u8 *msg,
u16 len);
enum i40iw_status_code i40iw_vchnl_recv_vf(struct i40iw_sc_dev *dev,
u32 vf_id,
u8 *msg,
u16 len);
struct i40iw_virtchnl_req {
struct i40iw_sc_dev *dev;
struct i40iw_virtchnl_op_buf *vchnl_msg;
void *parm;
u32 vf_id;
u16 parm_len;
s16 ret_code;
};
#pragma pack(pop)
enum i40iw_status_code i40iw_vchnl_vf_get_ver(struct i40iw_sc_dev *dev,
u32 *vchnl_ver);
enum i40iw_status_code i40iw_vchnl_vf_get_hmc_fcn(struct i40iw_sc_dev *dev,
u16 *hmc_fcn);
enum i40iw_status_code i40iw_vchnl_vf_add_hmc_objs(struct i40iw_sc_dev *dev,
enum i40iw_hmc_rsrc_type rsrc_type,
u32 start_index,
u32 rsrc_count);
enum i40iw_status_code i40iw_vchnl_vf_del_hmc_obj(struct i40iw_sc_dev *dev,
enum i40iw_hmc_rsrc_type rsrc_type,
u32 start_index,
u32 rsrc_count);
enum i40iw_status_code i40iw_vchnl_vf_get_pe_stats(struct i40iw_sc_dev *dev,
struct i40iw_dev_hw_stats *hw_stats);
#endif

View File

@ -0,0 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_IRDMA
tristate "Intel(R) Ethernet Protocol Driver for RDMA"
depends on INET
depends on IPV6 || !IPV6
depends on PCI
depends on ICE && I40E
select GENERIC_ALLOCATOR
select CONFIG_AUXILIARY_BUS
help
This is an Intel(R) Ethernet Protocol Driver for RDMA driver
that support E810 (iWARP/RoCE) and X722 (iWARP) network devices.

View File

@ -0,0 +1,27 @@
# SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
# Copyright (c) 2019, Intel Corporation.
#
# Makefile for the Intel(R) Ethernet Connection RDMA Linux Driver
#
obj-$(CONFIG_INFINIBAND_IRDMA) += irdma.o
irdma-objs := cm.o \
ctrl.o \
hmc.o \
hw.o \
i40iw_hw.o \
i40iw_if.o \
icrdma_hw.o \
main.o \
pble.o \
puda.o \
trace.o \
uda.o \
uk.o \
utils.o \
verbs.o \
ws.o \
CFLAGS_trace.o = -I$(src)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,417 @@
/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
/* Copyright (c) 2015 - 2021 Intel Corporation */
#ifndef IRDMA_CM_H
#define IRDMA_CM_H
#define IRDMA_MPA_REQUEST_ACCEPT 1
#define IRDMA_MPA_REQUEST_REJECT 2
/* IETF MPA -- defines */
#define IEFT_MPA_KEY_REQ "MPA ID Req Frame"
#define IEFT_MPA_KEY_REP "MPA ID Rep Frame"
#define IETF_MPA_KEY_SIZE 16
#define IETF_MPA_VER 1
#define IETF_MAX_PRIV_DATA_LEN 512
#define IETF_MPA_FRAME_SIZE 20
#define IETF_RTR_MSG_SIZE 4
#define IETF_MPA_V2_FLAG 0x10
#define SNDMARKER_SEQNMASK 0x000001ff
#define IRDMA_MAX_IETF_SIZE 32
/* IETF RTR MSG Fields */
#define IETF_PEER_TO_PEER 0x8000
#define IETF_FLPDU_ZERO_LEN 0x4000
#define IETF_RDMA0_WRITE 0x8000
#define IETF_RDMA0_READ 0x4000
#define IETF_NO_IRD_ORD 0x3fff
#define MAX_PORTS 65536
#define IRDMA_PASSIVE_STATE_INDICATED 0
#define IRDMA_DO_NOT_SEND_RESET_EVENT 1
#define IRDMA_SEND_RESET_EVENT 2
#define MAX_IRDMA_IFS 4
#define SET_ACK 1
#define SET_SYN 2
#define SET_FIN 4
#define SET_RST 8
#define TCP_OPTIONS_PADDING 3
#define IRDMA_DEFAULT_RETRYS 64
#define IRDMA_DEFAULT_RETRANS 8
#define IRDMA_DEFAULT_TTL 0x40
#define IRDMA_DEFAULT_RTT_VAR 6
#define IRDMA_DEFAULT_SS_THRESH 0x3fffffff
#define IRDMA_DEFAULT_REXMIT_THRESH 8
#define IRDMA_RETRY_TIMEOUT HZ
#define IRDMA_SHORT_TIME 10
#define IRDMA_LONG_TIME (2 * HZ)
#define IRDMA_MAX_TIMEOUT ((unsigned long)(12 * HZ))
#define IRDMA_CM_HASHTABLE_SIZE 1024
#define IRDMA_CM_TCP_TIMER_INTERVAL 3000
#define IRDMA_CM_DEFAULT_MTU 1540
#define IRDMA_CM_DEFAULT_FRAME_CNT 10
#define IRDMA_CM_THREAD_STACK_SIZE 256
#define IRDMA_CM_DEFAULT_RCV_WND 64240
#define IRDMA_CM_DEFAULT_RCV_WND_SCALED 0x3FFFC
#define IRDMA_CM_DEFAULT_RCV_WND_SCALE 2
#define IRDMA_CM_DEFAULT_FREE_PKTS 10
#define IRDMA_CM_FREE_PKT_LO_WATERMARK 2
#define IRDMA_CM_DEFAULT_MSS 536
#define IRDMA_CM_DEFAULT_MPA_VER 2
#define IRDMA_CM_DEFAULT_SEQ 0x159bf75f
#define IRDMA_CM_DEFAULT_LOCAL_ID 0x3b47
#define IRDMA_CM_DEFAULT_SEQ2 0x18ed5740
#define IRDMA_CM_DEFAULT_LOCAL_ID2 0xb807
#define IRDMA_MAX_CM_BUF (IRDMA_MAX_IETF_SIZE + IETF_MAX_PRIV_DATA_LEN)
enum ietf_mpa_flags {
IETF_MPA_FLAGS_REJECT = 0x20,
IETF_MPA_FLAGS_CRC = 0x40,
IETF_MPA_FLAGS_MARKERS = 0x80,
};
enum irdma_timer_type {
IRDMA_TIMER_TYPE_SEND,
IRDMA_TIMER_TYPE_CLOSE,
};
enum option_nums {
OPTION_NUM_EOL,
OPTION_NUM_NONE,
OPTION_NUM_MSS,
OPTION_NUM_WINDOW_SCALE,
OPTION_NUM_SACK_PERM,
OPTION_NUM_SACK,
OPTION_NUM_WRITE0 = 0xbc,
};
/* cm node transition states */
enum irdma_cm_node_state {
IRDMA_CM_STATE_UNKNOWN,
IRDMA_CM_STATE_INITED,
IRDMA_CM_STATE_LISTENING,
IRDMA_CM_STATE_SYN_RCVD,
IRDMA_CM_STATE_SYN_SENT,
IRDMA_CM_STATE_ONE_SIDE_ESTABLISHED,
IRDMA_CM_STATE_ESTABLISHED,
IRDMA_CM_STATE_ACCEPTING,
IRDMA_CM_STATE_MPAREQ_SENT,
IRDMA_CM_STATE_MPAREQ_RCVD,
IRDMA_CM_STATE_MPAREJ_RCVD,
IRDMA_CM_STATE_OFFLOADED,
IRDMA_CM_STATE_FIN_WAIT1,
IRDMA_CM_STATE_FIN_WAIT2,
IRDMA_CM_STATE_CLOSE_WAIT,
IRDMA_CM_STATE_TIME_WAIT,
IRDMA_CM_STATE_LAST_ACK,
IRDMA_CM_STATE_CLOSING,
IRDMA_CM_STATE_LISTENER_DESTROYED,
IRDMA_CM_STATE_CLOSED,
};
enum mpa_frame_ver {
IETF_MPA_V1 = 1,
IETF_MPA_V2 = 2,
};
enum mpa_frame_key {
MPA_KEY_REQUEST,
MPA_KEY_REPLY,
};
enum send_rdma0 {
SEND_RDMA_READ_ZERO = 1,
SEND_RDMA_WRITE_ZERO = 2,
};
enum irdma_tcpip_pkt_type {
IRDMA_PKT_TYPE_UNKNOWN,
IRDMA_PKT_TYPE_SYN,
IRDMA_PKT_TYPE_SYNACK,
IRDMA_PKT_TYPE_ACK,
IRDMA_PKT_TYPE_FIN,
IRDMA_PKT_TYPE_RST,
};
enum irdma_cm_listener_state {
IRDMA_CM_LISTENER_PASSIVE_STATE = 1,
IRDMA_CM_LISTENER_ACTIVE_STATE = 2,
IRDMA_CM_LISTENER_EITHER_STATE = 3,
};
/* CM event codes */
enum irdma_cm_event_type {
IRDMA_CM_EVENT_UNKNOWN,
IRDMA_CM_EVENT_ESTABLISHED,
IRDMA_CM_EVENT_MPA_REQ,
IRDMA_CM_EVENT_MPA_CONNECT,
IRDMA_CM_EVENT_MPA_ACCEPT,
IRDMA_CM_EVENT_MPA_REJECT,
IRDMA_CM_EVENT_MPA_ESTABLISHED,
IRDMA_CM_EVENT_CONNECTED,
IRDMA_CM_EVENT_RESET,
IRDMA_CM_EVENT_ABORTED,
};
struct irdma_bth { /* Base Trasnport Header */
u8 opcode;
u8 flags;
__be16 pkey;
__be32 qpn;
__be32 apsn;
};
struct ietf_mpa_v1 {
u8 key[IETF_MPA_KEY_SIZE];
u8 flags;
u8 rev;
__be16 priv_data_len;
u8 priv_data[];
};
struct ietf_rtr_msg {
__be16 ctrl_ird;
__be16 ctrl_ord;
};
struct ietf_mpa_v2 {
u8 key[IETF_MPA_KEY_SIZE];
u8 flags;
u8 rev;
__be16 priv_data_len;
struct ietf_rtr_msg rtr_msg;
u8 priv_data[];
};
struct option_base {
u8 optionnum;
u8 len;
};
struct option_mss {
u8 optionnum;
u8 len;
__be16 mss;
};
struct option_windowscale {
u8 optionnum;
u8 len;
u8 shiftcount;
};
union all_known_options {
char eol;
struct option_base base;
struct option_mss mss;
struct option_windowscale windowscale;
};
struct irdma_timer_entry {
struct list_head list;
unsigned long timetosend; /* jiffies */
struct irdma_puda_buf *sqbuf;
u32 type;
u32 retrycount;
u32 retranscount;
u32 context;
u32 send_retrans;
int close_when_complete;
};
/* CM context params */
struct irdma_cm_tcp_context {
u8 client;
u32 loc_seq_num;
u32 loc_ack_num;
u32 rem_ack_num;
u32 rcv_nxt;
u32 loc_id;
u32 rem_id;
u32 snd_wnd;
u32 max_snd_wnd;
u32 rcv_wnd;
u32 mss;
u8 snd_wscale;
u8 rcv_wscale;
};
struct irdma_apbvt_entry {
struct hlist_node hlist;
u32 use_cnt;
u16 port;
};
struct irdma_cm_listener {
struct list_head list;
struct iw_cm_id *cm_id;
struct irdma_cm_core *cm_core;
struct irdma_device *iwdev;
struct list_head child_listen_list;
struct irdma_apbvt_entry *apbvt_entry;
enum irdma_cm_listener_state listener_state;
refcount_t refcnt;
atomic_t pend_accepts_cnt;
u32 loc_addr[4];
u32 reused_node;
int backlog;
u16 loc_port;
u16 vlan_id;
u8 loc_mac[ETH_ALEN];
u8 user_pri;
u8 tos;
bool qhash_set:1;
bool ipv4:1;
};
struct irdma_kmem_info {
void *addr;
u32 size;
};
struct irdma_mpa_priv_info {
const void *addr;
u32 size;
};
struct irdma_cm_node {
struct irdma_qp *iwqp;
struct irdma_device *iwdev;
struct irdma_sc_dev *dev;
struct irdma_cm_tcp_context tcp_cntxt;
struct irdma_cm_core *cm_core;
struct irdma_timer_entry *send_entry;
struct irdma_timer_entry *close_entry;
struct irdma_cm_listener *listener;
struct list_head timer_entry;
struct list_head reset_entry;
struct list_head teardown_entry;
struct irdma_apbvt_entry *apbvt_entry;
struct rcu_head rcu_head;
struct irdma_mpa_priv_info pdata;
struct irdma_sc_ah *ah;
union {
struct ietf_mpa_v1 mpa_frame;
struct ietf_mpa_v2 mpa_v2_frame;
};
struct irdma_kmem_info mpa_hdr;
struct iw_cm_id *cm_id;
struct hlist_node list;
struct completion establish_comp;
spinlock_t retrans_list_lock; /* protect CM node rexmit updates*/
atomic_t passive_state;
refcount_t refcnt;
enum irdma_cm_node_state state;
enum send_rdma0 send_rdma0_op;
enum mpa_frame_ver mpa_frame_rev;
u32 loc_addr[4], rem_addr[4];
u16 loc_port, rem_port;
int apbvt_set;
int accept_pend;
u16 vlan_id;
u16 ird_size;
u16 ord_size;
u16 mpav2_ird_ord;
u16 lsmm_size;
u8 pdata_buf[IETF_MAX_PRIV_DATA_LEN];
u8 loc_mac[ETH_ALEN];
u8 rem_mac[ETH_ALEN];
u8 user_pri;
u8 tos;
bool ack_rcvd:1;
bool qhash_set:1;
bool ipv4:1;
bool snd_mark_en:1;
bool rcv_mark_en:1;
bool do_lpb:1;
bool accelerated:1;
};
/* Used by internal CM APIs to pass CM information*/
struct irdma_cm_info {
struct iw_cm_id *cm_id;
u16 loc_port;
u16 rem_port;
u32 loc_addr[4];
u32 rem_addr[4];
u32 qh_qpid;
u16 vlan_id;
int backlog;
u8 user_pri;
u8 tos;
bool ipv4;
};
struct irdma_cm_event {
enum irdma_cm_event_type type;
struct irdma_cm_info cm_info;
struct work_struct event_work;
struct irdma_cm_node *cm_node;
};
struct irdma_cm_core {
struct irdma_device *iwdev;
struct irdma_sc_dev *dev;
struct list_head listen_list;
DECLARE_HASHTABLE(cm_hash_tbl, 8);
DECLARE_HASHTABLE(apbvt_hash_tbl, 8);
struct timer_list tcp_timer;
struct workqueue_struct *event_wq;
spinlock_t ht_lock; /* protect CM node (active side) list */
spinlock_t listen_list_lock; /* protect listener list */
spinlock_t apbvt_lock; /*serialize apbvt add/del entries*/
u64 stats_nodes_created;
u64 stats_nodes_destroyed;
u64 stats_listen_created;
u64 stats_listen_destroyed;
u64 stats_listen_nodes_created;
u64 stats_listen_nodes_destroyed;
u64 stats_lpbs;
u64 stats_accepts;
u64 stats_rejects;
u64 stats_connect_errs;
u64 stats_passive_errs;
u64 stats_pkt_retrans;
u64 stats_backlog_drops;
struct irdma_puda_buf *(*form_cm_frame)(struct irdma_cm_node *cm_node,
struct irdma_kmem_info *options,
struct irdma_kmem_info *hdr,
struct irdma_mpa_priv_info *pdata,
u8 flags);
int (*cm_create_ah)(struct irdma_cm_node *cm_node, bool wait);
void (*cm_free_ah)(struct irdma_cm_node *cm_node);
};
int irdma_schedule_cm_timer(struct irdma_cm_node *cm_node,
struct irdma_puda_buf *sqbuf,
enum irdma_timer_type type, int send_retrans,
int close_when_complete);
int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
int irdma_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
int irdma_create_listen(struct iw_cm_id *cm_id, int backlog);
int irdma_destroy_listen(struct iw_cm_id *cm_id);
int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, u8 *mac);
void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
struct irdma_cm_info *nfo,
bool disconnect_all);
int irdma_cm_start(struct irdma_device *dev);
int irdma_cm_stop(struct irdma_device *dev);
bool irdma_ipv4_is_lpb(u32 loc_addr, u32 rem_addr);
bool irdma_ipv6_is_lpb(u32 *loc_addr, u32 *rem_addr);
int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, bool ipv4,
u8 *mac_addr, u32 action);
void irdma_if_notify(struct irdma_device *iwdev, struct net_device *netdev,
u32 *ipaddr, bool ipv4, bool ifup);
bool irdma_port_in_use(struct irdma_cm_core *cm_core, u16 port);
void irdma_send_ack(struct irdma_cm_node *cm_node);
void irdma_lpb_nop(struct irdma_sc_qp *qp);
void irdma_rem_ref_cm_node(struct irdma_cm_node *cm_node);
void irdma_add_conn_est_qh(struct irdma_cm_node *cm_node);
#endif /* IRDMA_CM_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,710 @@
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
#include "osdep.h"
#include "status.h"
#include "hmc.h"
#include "defs.h"
#include "type.h"
#include "protos.h"
/**
* irdma_find_sd_index_limit - finds segment descriptor index limit
* @hmc_info: pointer to the HMC configuration information structure
* @type: type of HMC resources we're searching
* @idx: starting index for the object
* @cnt: number of objects we're trying to create
* @sd_idx: pointer to return index of the segment descriptor in question
* @sd_limit: pointer to return the maximum number of segment descriptors
*
* This function calculates the segment descriptor index and index limit
* for the resource defined by irdma_hmc_rsrc_type.
*/
static void irdma_find_sd_index_limit(struct irdma_hmc_info *hmc_info, u32 type,
u32 idx, u32 cnt, u32 *sd_idx,
u32 *sd_limit)
{
u64 fpm_addr, fpm_limit;
fpm_addr = hmc_info->hmc_obj[(type)].base +
hmc_info->hmc_obj[type].size * idx;
fpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt;
*sd_idx = (u32)(fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE);
*sd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_DIRECT_BP_SIZE);
*sd_limit += 1;
}
/**
* irdma_find_pd_index_limit - finds page descriptor index limit
* @hmc_info: pointer to the HMC configuration information struct
* @type: HMC resource type we're examining
* @idx: starting index for the object
* @cnt: number of objects we're trying to create
* @pd_idx: pointer to return page descriptor index
* @pd_limit: pointer to return page descriptor index limit
*
* Calculates the page descriptor index and index limit for the resource
* defined by irdma_hmc_rsrc_type.
*/
static void irdma_find_pd_index_limit(struct irdma_hmc_info *hmc_info, u32 type,
u32 idx, u32 cnt, u32 *pd_idx,
u32 *pd_limit)
{
u64 fpm_adr, fpm_limit;
fpm_adr = hmc_info->hmc_obj[type].base +
hmc_info->hmc_obj[type].size * idx;
fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);
*pd_idx = (u32)(fpm_adr / IRDMA_HMC_PAGED_BP_SIZE);
*pd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_PAGED_BP_SIZE);
*pd_limit += 1;
}
/**
* irdma_set_sd_entry - setup entry for sd programming
* @pa: physical addr
* @idx: sd index
* @type: paged or direct sd
* @entry: sd entry ptr
*/
static void irdma_set_sd_entry(u64 pa, u32 idx, enum irdma_sd_entry_type type,
struct irdma_update_sd_entry *entry)
{
entry->data = pa |
FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) |
FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE,
type == IRDMA_SD_TYPE_PAGED ? 0 : 1) |
FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDVALID, 1);
entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) | BIT(15);
}
/**
* irdma_clr_sd_entry - setup entry for sd clear
* @idx: sd index
* @type: paged or direct sd
* @entry: sd entry ptr
*/
static void irdma_clr_sd_entry(u32 idx, enum irdma_sd_entry_type type,
struct irdma_update_sd_entry *entry)
{
entry->data = FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) |
FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE,
type == IRDMA_SD_TYPE_PAGED ? 0 : 1);
entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) | BIT(15);
}
/**
* irdma_invalidate_pf_hmc_pd - Invalidates the pd cache in the hardware for PF
* @dev: pointer to our device struct
* @sd_idx: segment descriptor index
* @pd_idx: page descriptor index
*/
static inline void irdma_invalidate_pf_hmc_pd(struct irdma_sc_dev *dev, u32 sd_idx,
u32 pd_idx)
{
u32 val = FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDIDX, sd_idx) |
FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDPARTSEL, 1) |
FIELD_PREP(IRDMA_PFHMC_PDINV_PMPDIDX, pd_idx);
writel(val, dev->hw_regs[IRDMA_PFHMC_PDINV]);
}
/**
* irdma_hmc_sd_one - setup 1 sd entry for cqp
* @dev: pointer to the device structure
* @hmc_fn_id: hmc's function id
* @pa: physical addr
* @sd_idx: sd index
* @type: paged or direct sd
* @setsd: flag to set or clear sd
*/
enum irdma_status_code irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id,
u64 pa, u32 sd_idx,
enum irdma_sd_entry_type type,
bool setsd)
{
struct irdma_update_sds_info sdinfo;
sdinfo.cnt = 1;
sdinfo.hmc_fn_id = hmc_fn_id;
if (setsd)
irdma_set_sd_entry(pa, sd_idx, type, sdinfo.entry);
else
irdma_clr_sd_entry(sd_idx, type, sdinfo.entry);
return dev->cqp->process_cqp_sds(dev, &sdinfo);
}
/**
* irdma_hmc_sd_grp - setup group of sd entries for cqp
* @dev: pointer to the device structure
* @hmc_info: pointer to the HMC configuration information struct
* @sd_index: sd index
* @sd_cnt: number of sd entries
* @setsd: flag to set or clear sd
*/
static enum irdma_status_code irdma_hmc_sd_grp(struct irdma_sc_dev *dev,
struct irdma_hmc_info *hmc_info,
u32 sd_index, u32 sd_cnt,
bool setsd)
{
struct irdma_hmc_sd_entry *sd_entry;
struct irdma_update_sds_info sdinfo = {};
u64 pa;
u32 i;
enum irdma_status_code ret_code = 0;
sdinfo.hmc_fn_id = hmc_info->hmc_fn_id;
for (i = sd_index; i < sd_index + sd_cnt; i++) {
sd_entry = &hmc_info->sd_table.sd_entry[i];
if (!sd_entry || (!sd_entry->valid && setsd) ||
(sd_entry->valid && !setsd))
continue;
if (setsd) {
pa = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ?
sd_entry->u.pd_table.pd_page_addr.pa :
sd_entry->u.bp.addr.pa;
irdma_set_sd_entry(pa, i, sd_entry->entry_type,
&sdinfo.entry[sdinfo.cnt]);
} else {
irdma_clr_sd_entry(i, sd_entry->entry_type,
&sdinfo.entry[sdinfo.cnt]);
}
sdinfo.cnt++;
if (sdinfo.cnt == IRDMA_MAX_SD_ENTRIES) {
ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
if (ret_code) {
ibdev_dbg(to_ibdev(dev),
"HMC: sd_programming failed err=%d\n",
ret_code);
return ret_code;
}
sdinfo.cnt = 0;
}
}
if (sdinfo.cnt)
ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
return ret_code;
}
/**
* irdma_hmc_finish_add_sd_reg - program sd entries for objects
* @dev: pointer to the device structure
* @info: create obj info
*/
static enum irdma_status_code
irdma_hmc_finish_add_sd_reg(struct irdma_sc_dev *dev,
struct irdma_hmc_create_obj_info *info)
{
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
return IRDMA_ERR_INVALID_HMC_OBJ_INDEX;
if ((info->start_idx + info->count) >
info->hmc_info->hmc_obj[info->rsrc_type].cnt)
return IRDMA_ERR_INVALID_HMC_OBJ_COUNT;
if (!info->add_sd_cnt)
return 0;
return irdma_hmc_sd_grp(dev, info->hmc_info,
info->hmc_info->sd_indexes[0], info->add_sd_cnt,
true);
}
/**
* irdma_sc_create_hmc_obj - allocate backing store for hmc objects
* @dev: pointer to the device structure
* @info: pointer to irdma_hmc_create_obj_info struct
*
* This will allocate memory for PDs and backing pages and populate
* the sd and pd entries.
*/
enum irdma_status_code
irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
struct irdma_hmc_create_obj_info *info)
{
struct irdma_hmc_sd_entry *sd_entry;
u32 sd_idx, sd_lmt;
u32 pd_idx = 0, pd_lmt = 0;
u32 pd_idx1 = 0, pd_lmt1 = 0;
u32 i, j;
bool pd_error = false;
enum irdma_status_code ret_code = 0;
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
return IRDMA_ERR_INVALID_HMC_OBJ_INDEX;
if ((info->start_idx + info->count) >
info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
ibdev_dbg(to_ibdev(dev),
"HMC: error type %u, start = %u, req cnt %u, cnt = %u\n",
info->rsrc_type, info->start_idx, info->count,
info->hmc_info->hmc_obj[info->rsrc_type].cnt);
return IRDMA_ERR_INVALID_HMC_OBJ_COUNT;
}
irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,
info->start_idx, info->count, &sd_idx,
&sd_lmt);
if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
sd_lmt > info->hmc_info->sd_table.sd_cnt) {
return IRDMA_ERR_INVALID_SD_INDEX;
}
irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
info->start_idx, info->count, &pd_idx,
&pd_lmt);
for (j = sd_idx; j < sd_lmt; j++) {
ret_code = irdma_add_sd_table_entry(dev->hw, info->hmc_info, j,
info->entry_type,
IRDMA_HMC_DIRECT_BP_SIZE);
if (ret_code)
goto exit_sd_error;
sd_entry = &info->hmc_info->sd_table.sd_entry[j];
if (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED &&
(dev->hmc_info == info->hmc_info &&
info->rsrc_type != IRDMA_HMC_IW_PBLE)) {
pd_idx1 = max(pd_idx, (j * IRDMA_HMC_MAX_BP_COUNT));
pd_lmt1 = min(pd_lmt, (j + 1) * IRDMA_HMC_MAX_BP_COUNT);
for (i = pd_idx1; i < pd_lmt1; i++) {
/* update the pd table entry */
ret_code = irdma_add_pd_table_entry(dev,
info->hmc_info,
i, NULL);
if (ret_code) {
pd_error = true;
break;
}
}
if (pd_error) {
while (i && (i > pd_idx1)) {
irdma_remove_pd_bp(dev, info->hmc_info,
i - 1);
i--;
}
}
}
if (sd_entry->valid)
continue;
info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j;
info->add_sd_cnt++;
sd_entry->valid = true;
}
return irdma_hmc_finish_add_sd_reg(dev, info);
exit_sd_error:
while (j && (j > sd_idx)) {
sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
switch (sd_entry->entry_type) {
case IRDMA_SD_TYPE_PAGED:
pd_idx1 = max(pd_idx, (j - 1) * IRDMA_HMC_MAX_BP_COUNT);
pd_lmt1 = min(pd_lmt, (j * IRDMA_HMC_MAX_BP_COUNT));
for (i = pd_idx1; i < pd_lmt1; i++)
irdma_prep_remove_pd_page(info->hmc_info, i);
break;
case IRDMA_SD_TYPE_DIRECT:
irdma_prep_remove_pd_page(info->hmc_info, (j - 1));
break;
default:
ret_code = IRDMA_ERR_INVALID_SD_TYPE;
break;
}
j--;
}
return ret_code;
}
/**
* irdma_finish_del_sd_reg - delete sd entries for objects
* @dev: pointer to the device structure
* @info: dele obj info
* @reset: true if called before reset
*/
static enum irdma_status_code
irdma_finish_del_sd_reg(struct irdma_sc_dev *dev,
struct irdma_hmc_del_obj_info *info, bool reset)
{
struct irdma_hmc_sd_entry *sd_entry;
enum irdma_status_code ret_code = 0;
u32 i, sd_idx;
struct irdma_dma_mem *mem;
if (!reset)
ret_code = irdma_hmc_sd_grp(dev, info->hmc_info,
info->hmc_info->sd_indexes[0],
info->del_sd_cnt, false);
if (ret_code)
ibdev_dbg(to_ibdev(dev), "HMC: error cqp sd sd_grp\n");
for (i = 0; i < info->del_sd_cnt; i++) {
sd_idx = info->hmc_info->sd_indexes[i];
sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx];
mem = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ?
&sd_entry->u.pd_table.pd_page_addr :
&sd_entry->u.bp.addr;
if (!mem || !mem->va) {
ibdev_dbg(to_ibdev(dev), "HMC: error cqp sd mem\n");
} else {
dma_free_coherent(dev->hw->device, mem->size, mem->va,
mem->pa);
mem->va = NULL;
}
}
return ret_code;
}
/**
* irdma_sc_del_hmc_obj - remove pe hmc objects
* @dev: pointer to the device structure
* @info: pointer to irdma_hmc_del_obj_info struct
* @reset: true if called before reset
*
* This will de-populate the SDs and PDs. It frees
* the memory for PDS and backing storage. After this function is returned,
* caller should deallocate memory allocated previously for
* book-keeping information about PDs and backing storage.
*/
enum irdma_status_code irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
struct irdma_hmc_del_obj_info *info,
bool reset)
{
struct irdma_hmc_pd_table *pd_table;
u32 sd_idx, sd_lmt;
u32 pd_idx, pd_lmt, rel_pd_idx;
u32 i, j;
enum irdma_status_code ret_code = 0;
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
ibdev_dbg(to_ibdev(dev),
"HMC: error start_idx[%04d] >= [type %04d].cnt[%04d]\n",
info->start_idx, info->rsrc_type,
info->hmc_info->hmc_obj[info->rsrc_type].cnt);
return IRDMA_ERR_INVALID_HMC_OBJ_INDEX;
}
if ((info->start_idx + info->count) >
info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
ibdev_dbg(to_ibdev(dev),
"HMC: error start_idx[%04d] + count %04d >= [type %04d].cnt[%04d]\n",
info->start_idx, info->count, info->rsrc_type,
info->hmc_info->hmc_obj[info->rsrc_type].cnt);
return IRDMA_ERR_INVALID_HMC_OBJ_COUNT;
}
irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
info->start_idx, info->count, &pd_idx,
&pd_lmt);
for (j = pd_idx; j < pd_lmt; j++) {
sd_idx = j / IRDMA_HMC_PD_CNT_IN_SD;
if (!info->hmc_info->sd_table.sd_entry[sd_idx].valid)
continue;
if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
IRDMA_SD_TYPE_PAGED)
continue;
rel_pd_idx = j % IRDMA_HMC_PD_CNT_IN_SD;
pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
if (pd_table->pd_entry &&
pd_table->pd_entry[rel_pd_idx].valid) {
ret_code = irdma_remove_pd_bp(dev, info->hmc_info, j);
if (ret_code) {
ibdev_dbg(to_ibdev(dev),
"HMC: remove_pd_bp error\n");
return ret_code;
}
}
}
irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,
info->start_idx, info->count, &sd_idx,
&sd_lmt);
if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
sd_lmt > info->hmc_info->sd_table.sd_cnt) {
ibdev_dbg(to_ibdev(dev), "HMC: invalid sd_idx\n");
return IRDMA_ERR_INVALID_SD_INDEX;
}
for (i = sd_idx; i < sd_lmt; i++) {
pd_table = &info->hmc_info->sd_table.sd_entry[i].u.pd_table;
if (!info->hmc_info->sd_table.sd_entry[i].valid)
continue;
switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
case IRDMA_SD_TYPE_DIRECT:
ret_code = irdma_prep_remove_sd_bp(info->hmc_info, i);
if (!ret_code) {
info->hmc_info->sd_indexes[info->del_sd_cnt] =
(u16)i;
info->del_sd_cnt++;
}
break;
case IRDMA_SD_TYPE_PAGED:
ret_code = irdma_prep_remove_pd_page(info->hmc_info, i);
if (ret_code)
break;
if (dev->hmc_info != info->hmc_info &&
info->rsrc_type == IRDMA_HMC_IW_PBLE &&
pd_table->pd_entry) {
kfree(pd_table->pd_entry_virt_mem.va);
pd_table->pd_entry = NULL;
}
info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
info->del_sd_cnt++;
break;
default:
break;
}
}
return irdma_finish_del_sd_reg(dev, info, reset);
}
/**
* irdma_add_sd_table_entry - Adds a segment descriptor to the table
* @hw: pointer to our hw struct
* @hmc_info: pointer to the HMC configuration information struct
* @sd_index: segment descriptor index to manipulate
* @type: what type of segment descriptor we're manipulating
* @direct_mode_sz: size to alloc in direct mode
*/
enum irdma_status_code irdma_add_sd_table_entry(struct irdma_hw *hw,
struct irdma_hmc_info *hmc_info,
u32 sd_index,
enum irdma_sd_entry_type type,
u64 direct_mode_sz)
{
struct irdma_hmc_sd_entry *sd_entry;
struct irdma_dma_mem dma_mem;
u64 alloc_len;
sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
if (!sd_entry->valid) {
if (type == IRDMA_SD_TYPE_PAGED)
alloc_len = IRDMA_HMC_PAGED_BP_SIZE;
else
alloc_len = direct_mode_sz;
/* allocate a 4K pd page or 2M backing page */
dma_mem.size = ALIGN(alloc_len, IRDMA_HMC_PD_BP_BUF_ALIGNMENT);
dma_mem.va = dma_alloc_coherent(hw->device, dma_mem.size,
&dma_mem.pa, GFP_KERNEL);
if (!dma_mem.va)
return IRDMA_ERR_NO_MEMORY;
if (type == IRDMA_SD_TYPE_PAGED) {
struct irdma_virt_mem *vmem =
&sd_entry->u.pd_table.pd_entry_virt_mem;
vmem->size = sizeof(struct irdma_hmc_pd_entry) * 512;
vmem->va = kzalloc(vmem->size, GFP_KERNEL);
if (!vmem->va) {
dma_free_coherent(hw->device, dma_mem.size,
dma_mem.va, dma_mem.pa);
dma_mem.va = NULL;
return IRDMA_ERR_NO_MEMORY;
}
sd_entry->u.pd_table.pd_entry = vmem->va;
memcpy(&sd_entry->u.pd_table.pd_page_addr, &dma_mem,
sizeof(sd_entry->u.pd_table.pd_page_addr));
} else {
memcpy(&sd_entry->u.bp.addr, &dma_mem,
sizeof(sd_entry->u.bp.addr));
sd_entry->u.bp.sd_pd_index = sd_index;
}
hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
hmc_info->sd_table.use_cnt++;
}
if (sd_entry->entry_type == IRDMA_SD_TYPE_DIRECT)
sd_entry->u.bp.use_cnt++;
return 0;
}
/**
* irdma_add_pd_table_entry - Adds page descriptor to the specified table
* @dev: pointer to our device structure
* @hmc_info: pointer to the HMC configuration information structure
* @pd_index: which page descriptor index to manipulate
* @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
*
* This function:
* 1. Initializes the pd entry
* 2. Adds pd_entry in the pd_table
* 3. Mark the entry valid in irdma_hmc_pd_entry structure
* 4. Initializes the pd_entry's ref count to 1
* assumptions:
* 1. The memory for pd should be pinned down, physically contiguous and
* aligned on 4K boundary and zeroed memory.
* 2. It should be 4K in size.
*/
enum irdma_status_code irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
struct irdma_hmc_info *hmc_info,
u32 pd_index,
struct irdma_dma_mem *rsrc_pg)
{
struct irdma_hmc_pd_table *pd_table;
struct irdma_hmc_pd_entry *pd_entry;
struct irdma_dma_mem mem;
struct irdma_dma_mem *page = &mem;
u32 sd_idx, rel_pd_idx;
u64 *pd_addr;
u64 page_desc;
if (pd_index / IRDMA_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt)
return IRDMA_ERR_INVALID_PAGE_DESC_INDEX;
sd_idx = (pd_index / IRDMA_HMC_PD_CNT_IN_SD);
if (hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
IRDMA_SD_TYPE_PAGED)
return 0;
rel_pd_idx = (pd_index % IRDMA_HMC_PD_CNT_IN_SD);
pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
pd_entry = &pd_table->pd_entry[rel_pd_idx];
if (!pd_entry->valid) {
if (rsrc_pg) {
pd_entry->rsrc_pg = true;
page = rsrc_pg;
} else {
page->size = ALIGN(IRDMA_HMC_PAGED_BP_SIZE,
IRDMA_HMC_PD_BP_BUF_ALIGNMENT);
page->va = dma_alloc_coherent(dev->hw->device,
page->size, &page->pa,
GFP_KERNEL);
if (!page->va)
return IRDMA_ERR_NO_MEMORY;
pd_entry->rsrc_pg = false;
}
memcpy(&pd_entry->bp.addr, page, sizeof(pd_entry->bp.addr));
pd_entry->bp.sd_pd_index = pd_index;
pd_entry->bp.entry_type = IRDMA_SD_TYPE_PAGED;
page_desc = page->pa | 0x1;
pd_addr = pd_table->pd_page_addr.va;
pd_addr += rel_pd_idx;
memcpy(pd_addr, &page_desc, sizeof(*pd_addr));
pd_entry->sd_index = sd_idx;
pd_entry->valid = true;
pd_table->use_cnt++;
irdma_invalidate_pf_hmc_pd(dev, sd_idx, rel_pd_idx);
}
pd_entry->bp.use_cnt++;
return 0;
}
/**
* irdma_remove_pd_bp - remove a backing page from a page descriptor
* @dev: pointer to our HW structure
* @hmc_info: pointer to the HMC configuration information structure
* @idx: the page index
*
* This function:
* 1. Marks the entry in pd table (for paged address mode) or in sd table
* (for direct address mode) invalid.
* 2. Write to register PMPDINV to invalidate the backing page in FV cache
* 3. Decrement the ref count for the pd _entry
* assumptions:
* 1. Caller can deallocate the memory used by backing storage after this
* function returns.
*/
enum irdma_status_code irdma_remove_pd_bp(struct irdma_sc_dev *dev,
struct irdma_hmc_info *hmc_info,
u32 idx)
{
struct irdma_hmc_pd_entry *pd_entry;
struct irdma_hmc_pd_table *pd_table;
struct irdma_hmc_sd_entry *sd_entry;
u32 sd_idx, rel_pd_idx;
struct irdma_dma_mem *mem;
u64 *pd_addr;
sd_idx = idx / IRDMA_HMC_PD_CNT_IN_SD;
rel_pd_idx = idx % IRDMA_HMC_PD_CNT_IN_SD;
if (sd_idx >= hmc_info->sd_table.sd_cnt)
return IRDMA_ERR_INVALID_PAGE_DESC_INDEX;
sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
if (sd_entry->entry_type != IRDMA_SD_TYPE_PAGED)
return IRDMA_ERR_INVALID_SD_TYPE;
pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
pd_entry = &pd_table->pd_entry[rel_pd_idx];
if (--pd_entry->bp.use_cnt)
return 0;
pd_entry->valid = false;
pd_table->use_cnt--;
pd_addr = pd_table->pd_page_addr.va;
pd_addr += rel_pd_idx;
memset(pd_addr, 0, sizeof(u64));
irdma_invalidate_pf_hmc_pd(dev, sd_idx, idx);
if (!pd_entry->rsrc_pg) {
mem = &pd_entry->bp.addr;
if (!mem || !mem->va)
return IRDMA_ERR_PARAM;
dma_free_coherent(dev->hw->device, mem->size, mem->va,
mem->pa);
mem->va = NULL;
}
if (!pd_table->use_cnt)
kfree(pd_table->pd_entry_virt_mem.va);
return 0;
}
/**
* irdma_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
* @hmc_info: pointer to the HMC configuration information structure
* @idx: the page index
*/
enum irdma_status_code irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info,
u32 idx)
{
struct irdma_hmc_sd_entry *sd_entry;
sd_entry = &hmc_info->sd_table.sd_entry[idx];
if (--sd_entry->u.bp.use_cnt)
return IRDMA_ERR_NOT_READY;
hmc_info->sd_table.use_cnt--;
sd_entry->valid = false;
return 0;
}
/**
* irdma_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
* @hmc_info: pointer to the HMC configuration information structure
* @idx: segment descriptor index to find the relevant page descriptor
*/
enum irdma_status_code
irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx)
{
struct irdma_hmc_sd_entry *sd_entry;
sd_entry = &hmc_info->sd_table.sd_entry[idx];
if (sd_entry->u.pd_table.use_cnt)
return IRDMA_ERR_NOT_READY;
sd_entry->valid = false;
hmc_info->sd_table.use_cnt--;
return 0;
}

View File

@ -0,0 +1,180 @@
/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
/* Copyright (c) 2015 - 2020 Intel Corporation */
#ifndef IRDMA_HMC_H
#define IRDMA_HMC_H
#include "defs.h"
#define IRDMA_HMC_MAX_BP_COUNT 512
#define IRDMA_MAX_SD_ENTRIES 11
#define IRDMA_HW_DBG_HMC_INVALID_BP_MARK 0xca
#define IRDMA_HMC_INFO_SIGNATURE 0x484d5347
#define IRDMA_HMC_PD_CNT_IN_SD 512
#define IRDMA_HMC_DIRECT_BP_SIZE 0x200000
#define IRDMA_HMC_MAX_SD_COUNT 8192
#define IRDMA_HMC_PAGED_BP_SIZE 4096
#define IRDMA_HMC_PD_BP_BUF_ALIGNMENT 4096
#define IRDMA_FIRST_VF_FPM_ID 8
#define FPM_MULTIPLIER 1024
enum irdma_hmc_rsrc_type {
IRDMA_HMC_IW_QP = 0,
IRDMA_HMC_IW_CQ = 1,
IRDMA_HMC_IW_RESERVED = 2,
IRDMA_HMC_IW_HTE = 3,
IRDMA_HMC_IW_ARP = 4,
IRDMA_HMC_IW_APBVT_ENTRY = 5,
IRDMA_HMC_IW_MR = 6,
IRDMA_HMC_IW_XF = 7,
IRDMA_HMC_IW_XFFL = 8,
IRDMA_HMC_IW_Q1 = 9,
IRDMA_HMC_IW_Q1FL = 10,
IRDMA_HMC_IW_TIMER = 11,
IRDMA_HMC_IW_FSIMC = 12,
IRDMA_HMC_IW_FSIAV = 13,
IRDMA_HMC_IW_PBLE = 14,
IRDMA_HMC_IW_RRF = 15,
IRDMA_HMC_IW_RRFFL = 16,
IRDMA_HMC_IW_HDR = 17,
IRDMA_HMC_IW_MD = 18,
IRDMA_HMC_IW_OOISC = 19,
IRDMA_HMC_IW_OOISCFFL = 20,
IRDMA_HMC_IW_MAX, /* Must be last entry */
};
enum irdma_sd_entry_type {
IRDMA_SD_TYPE_INVALID = 0,
IRDMA_SD_TYPE_PAGED = 1,
IRDMA_SD_TYPE_DIRECT = 2,
};
struct irdma_hmc_obj_info {
u64 base;
u32 max_cnt;
u32 cnt;
u64 size;
};
struct irdma_hmc_bp {
enum irdma_sd_entry_type entry_type;
struct irdma_dma_mem addr;
u32 sd_pd_index;
u32 use_cnt;
};
struct irdma_hmc_pd_entry {
struct irdma_hmc_bp bp;
u32 sd_index;
bool rsrc_pg:1;
bool valid:1;
};
struct irdma_hmc_pd_table {
struct irdma_dma_mem pd_page_addr;
struct irdma_hmc_pd_entry *pd_entry;
struct irdma_virt_mem pd_entry_virt_mem;
u32 use_cnt;
u32 sd_index;
};
struct irdma_hmc_sd_entry {
enum irdma_sd_entry_type entry_type;
bool valid;
union {
struct irdma_hmc_pd_table pd_table;
struct irdma_hmc_bp bp;
} u;
};
struct irdma_hmc_sd_table {
struct irdma_virt_mem addr;
u32 sd_cnt;
u32 use_cnt;
struct irdma_hmc_sd_entry *sd_entry;
};
struct irdma_hmc_info {
u32 signature;
u8 hmc_fn_id;
u16 first_sd_index;
struct irdma_hmc_obj_info *hmc_obj;
struct irdma_virt_mem hmc_obj_virt_mem;
struct irdma_hmc_sd_table sd_table;
u16 sd_indexes[IRDMA_HMC_MAX_SD_COUNT];
};
struct irdma_update_sd_entry {
u64 cmd;
u64 data;
};
struct irdma_update_sds_info {
u32 cnt;
u8 hmc_fn_id;
struct irdma_update_sd_entry entry[IRDMA_MAX_SD_ENTRIES];
};
struct irdma_ccq_cqe_info;
struct irdma_hmc_fcn_info {
u32 vf_id;
u8 free_fcn;
};
struct irdma_hmc_create_obj_info {
struct irdma_hmc_info *hmc_info;
struct irdma_virt_mem add_sd_virt_mem;
u32 rsrc_type;
u32 start_idx;
u32 count;
u32 add_sd_cnt;
enum irdma_sd_entry_type entry_type;
bool privileged;
};
struct irdma_hmc_del_obj_info {
struct irdma_hmc_info *hmc_info;
struct irdma_virt_mem del_sd_virt_mem;
u32 rsrc_type;
u32 start_idx;
u32 count;
u32 del_sd_cnt;
bool privileged;
};
enum irdma_status_code irdma_copy_dma_mem(struct irdma_hw *hw, void *dest_buf,
struct irdma_dma_mem *src_mem,
u64 src_offset, u64 size);
enum irdma_status_code
irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
struct irdma_hmc_create_obj_info *info);
enum irdma_status_code irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
struct irdma_hmc_del_obj_info *info,
bool reset);
enum irdma_status_code irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id,
u64 pa, u32 sd_idx,
enum irdma_sd_entry_type type,
bool setsd);
enum irdma_status_code
irdma_update_sds_noccq(struct irdma_sc_dev *dev,
struct irdma_update_sds_info *info);
struct irdma_vfdev *irdma_vfdev_from_fpm(struct irdma_sc_dev *dev,
u8 hmc_fn_id);
struct irdma_hmc_info *irdma_vf_hmcinfo_from_fpm(struct irdma_sc_dev *dev,
u8 hmc_fn_id);
enum irdma_status_code irdma_add_sd_table_entry(struct irdma_hw *hw,
struct irdma_hmc_info *hmc_info,
u32 sd_index,
enum irdma_sd_entry_type type,
u64 direct_mode_sz);
enum irdma_status_code irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
struct irdma_hmc_info *hmc_info,
u32 pd_index,
struct irdma_dma_mem *rsrc_pg);
enum irdma_status_code irdma_remove_pd_bp(struct irdma_sc_dev *dev,
struct irdma_hmc_info *hmc_info,
u32 idx);
enum irdma_status_code irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info,
u32 idx);
enum irdma_status_code
irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx);
#endif /* IRDMA_HMC_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,216 @@
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
#include "osdep.h"
#include "type.h"
#include "i40iw_hw.h"
#include "status.h"
#include "protos.h"
static u32 i40iw_regs[IRDMA_MAX_REGS] = {
I40E_PFPE_CQPTAIL,
I40E_PFPE_CQPDB,
I40E_PFPE_CCQPSTATUS,
I40E_PFPE_CCQPHIGH,
I40E_PFPE_CCQPLOW,
I40E_PFPE_CQARM,
I40E_PFPE_CQACK,
I40E_PFPE_AEQALLOC,
I40E_PFPE_CQPERRCODES,
I40E_PFPE_WQEALLOC,
I40E_PFINT_DYN_CTLN(0),
I40IW_DB_ADDR_OFFSET,
I40E_GLPCI_LBARCTRL,
I40E_GLPE_CPUSTATUS0,
I40E_GLPE_CPUSTATUS1,
I40E_GLPE_CPUSTATUS2,
I40E_PFINT_AEQCTL,
I40E_PFINT_CEQCTL(0),
I40E_VSIQF_CTL(0),
I40E_PFHMC_PDINV,
I40E_GLHMC_VFPDINV(0),
I40E_GLPE_CRITERR,
0xffffffff /* PFINT_RATEN not used in FPK */
};
static u32 i40iw_stat_offsets_32[IRDMA_HW_STAT_INDEX_MAX_32] = {
I40E_GLPES_PFIP4RXDISCARD(0),
I40E_GLPES_PFIP4RXTRUNC(0),
I40E_GLPES_PFIP4TXNOROUTE(0),
I40E_GLPES_PFIP6RXDISCARD(0),
I40E_GLPES_PFIP6RXTRUNC(0),
I40E_GLPES_PFIP6TXNOROUTE(0),
I40E_GLPES_PFTCPRTXSEG(0),
I40E_GLPES_PFTCPRXOPTERR(0),
I40E_GLPES_PFTCPRXPROTOERR(0),
I40E_GLPES_PFRXVLANERR(0)
};
static u32 i40iw_stat_offsets_64[IRDMA_HW_STAT_INDEX_MAX_64] = {
I40E_GLPES_PFIP4RXOCTSLO(0),
I40E_GLPES_PFIP4RXPKTSLO(0),
I40E_GLPES_PFIP4RXFRAGSLO(0),
I40E_GLPES_PFIP4RXMCPKTSLO(0),
I40E_GLPES_PFIP4TXOCTSLO(0),
I40E_GLPES_PFIP4TXPKTSLO(0),
I40E_GLPES_PFIP4TXFRAGSLO(0),
I40E_GLPES_PFIP4TXMCPKTSLO(0),
I40E_GLPES_PFIP6RXOCTSLO(0),
I40E_GLPES_PFIP6RXPKTSLO(0),
I40E_GLPES_PFIP6RXFRAGSLO(0),
I40E_GLPES_PFIP6RXMCPKTSLO(0),
I40E_GLPES_PFIP6TXOCTSLO(0),
I40E_GLPES_PFIP6TXPKTSLO(0),
I40E_GLPES_PFIP6TXFRAGSLO(0),
I40E_GLPES_PFIP6TXMCPKTSLO(0),
I40E_GLPES_PFTCPRXSEGSLO(0),
I40E_GLPES_PFTCPTXSEGLO(0),
I40E_GLPES_PFRDMARXRDSLO(0),
I40E_GLPES_PFRDMARXSNDSLO(0),
I40E_GLPES_PFRDMARXWRSLO(0),
I40E_GLPES_PFRDMATXRDSLO(0),
I40E_GLPES_PFRDMATXSNDSLO(0),
I40E_GLPES_PFRDMATXWRSLO(0),
I40E_GLPES_PFRDMAVBNDLO(0),
I40E_GLPES_PFRDMAVINVLO(0),
I40E_GLPES_PFIP4RXMCOCTSLO(0),
I40E_GLPES_PFIP4TXMCOCTSLO(0),
I40E_GLPES_PFIP6RXMCOCTSLO(0),
I40E_GLPES_PFIP6TXMCOCTSLO(0),
I40E_GLPES_PFUDPRXPKTSLO(0),
I40E_GLPES_PFUDPTXPKTSLO(0)
};
static u64 i40iw_masks[IRDMA_MAX_MASKS] = {
I40E_PFPE_CCQPSTATUS_CCQP_DONE,
I40E_PFPE_CCQPSTATUS_CCQP_ERR,
I40E_CQPSQ_STAG_PDID,
I40E_CQPSQ_CQ_CEQID,
I40E_CQPSQ_CQ_CQID,
I40E_COMMIT_FPM_CQCNT,
};
static u64 i40iw_shifts[IRDMA_MAX_SHIFTS] = {
I40E_PFPE_CCQPSTATUS_CCQP_DONE_S,
I40E_PFPE_CCQPSTATUS_CCQP_ERR_S,
I40E_CQPSQ_STAG_PDID_S,
I40E_CQPSQ_CQ_CEQID_S,
I40E_CQPSQ_CQ_CQID_S,
I40E_COMMIT_FPM_CQCNT_S,
};
/**
* i40iw_config_ceq- Configure CEQ interrupt
* @dev: pointer to the device structure
* @ceq_id: Completion Event Queue ID
* @idx: vector index
* @enable: Enable CEQ interrupt when true
*/
static void i40iw_config_ceq(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
bool enable)
{
u32 reg_val;
reg_val = FIELD_PREP(I40E_PFINT_LNKLSTN_FIRSTQ_INDX, ceq_id) |
FIELD_PREP(I40E_PFINT_LNKLSTN_FIRSTQ_TYPE, QUEUE_TYPE_CEQ);
wr32(dev->hw, I40E_PFINT_LNKLSTN(idx - 1), reg_val);
reg_val = FIELD_PREP(I40E_PFINT_DYN_CTLN_ITR_INDX, 0x3) |
FIELD_PREP(I40E_PFINT_DYN_CTLN_INTENA, 0x1);
wr32(dev->hw, I40E_PFINT_DYN_CTLN(idx - 1), reg_val);
reg_val = FIELD_PREP(IRDMA_GLINT_CEQCTL_CAUSE_ENA, enable) |
FIELD_PREP(IRDMA_GLINT_CEQCTL_MSIX_INDX, idx) |
FIELD_PREP(I40E_PFINT_CEQCTL_NEXTQ_INDX, NULL_QUEUE_INDEX) |
FIELD_PREP(IRDMA_GLINT_CEQCTL_ITR_INDX, 0x3);
wr32(dev->hw, i40iw_regs[IRDMA_GLINT_CEQCTL] + 4 * ceq_id, reg_val);
}
/**
* i40iw_ena_irq - Enable interrupt
* @dev: pointer to the device structure
* @idx: vector index
*/
static void i40iw_ena_irq(struct irdma_sc_dev *dev, u32 idx)
{
u32 val;
val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, 0x1) |
FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, 0x1) |
FIELD_PREP(IRDMA_GLINT_DYN_CTL_ITR_INDX, 0x3);
wr32(dev->hw, i40iw_regs[IRDMA_GLINT_DYN_CTL] + 4 * (idx - 1), val);
}
/**
* i40iw_disable_irq - Disable interrupt
* @dev: pointer to the device structure
* @idx: vector index
*/
static void i40iw_disable_irq(struct irdma_sc_dev *dev, u32 idx)
{
wr32(dev->hw, i40iw_regs[IRDMA_GLINT_DYN_CTL] + 4 * (idx - 1), 0);
}
static const struct irdma_irq_ops i40iw_irq_ops = {
.irdma_cfg_aeq = irdma_cfg_aeq,
.irdma_cfg_ceq = i40iw_config_ceq,
.irdma_dis_irq = i40iw_disable_irq,
.irdma_en_irq = i40iw_ena_irq,
};
void i40iw_init_hw(struct irdma_sc_dev *dev)
{
int i;
u8 __iomem *hw_addr;
for (i = 0; i < IRDMA_MAX_REGS; ++i) {
hw_addr = dev->hw->hw_addr;
if (i == IRDMA_DB_ADDR_OFFSET)
hw_addr = NULL;
dev->hw_regs[i] = (u32 __iomem *)(i40iw_regs[i] + hw_addr);
}
for (i = 0; i < IRDMA_HW_STAT_INDEX_MAX_32; ++i)
dev->hw_stats_regs_32[i] = i40iw_stat_offsets_32[i];
for (i = 0; i < IRDMA_HW_STAT_INDEX_MAX_64; ++i)
dev->hw_stats_regs_64[i] = i40iw_stat_offsets_64[i];
dev->hw_attrs.first_hw_vf_fpm_id = I40IW_FIRST_VF_FPM_ID;
dev->hw_attrs.max_hw_vf_fpm_id = IRDMA_MAX_VF_FPM_ID;
for (i = 0; i < IRDMA_MAX_SHIFTS; ++i)
dev->hw_shifts[i] = i40iw_shifts[i];
for (i = 0; i < IRDMA_MAX_MASKS; ++i)
dev->hw_masks[i] = i40iw_masks[i];
dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC];
dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM];
dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC];
dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
dev->ceq_itr_mask_db = NULL;
dev->aeq_itr_mask_db = NULL;
dev->irq_ops = &i40iw_irq_ops;
/* Setup the hardware limits, hmc may limit further */
dev->hw_attrs.uk_attrs.max_hw_wq_frags = I40IW_MAX_WQ_FRAGMENT_COUNT;
dev->hw_attrs.uk_attrs.max_hw_read_sges = I40IW_MAX_SGE_RD;
dev->hw_attrs.max_hw_device_pages = I40IW_MAX_PUSH_PAGE_COUNT;
dev->hw_attrs.uk_attrs.max_hw_inline = I40IW_MAX_INLINE_DATA_SIZE;
dev->hw_attrs.max_hw_ird = I40IW_MAX_IRD_SIZE;
dev->hw_attrs.max_hw_ord = I40IW_MAX_ORD_SIZE;
dev->hw_attrs.max_hw_wqes = I40IW_MAX_WQ_ENTRIES;
dev->hw_attrs.uk_attrs.max_hw_rq_quanta = I40IW_QP_SW_MAX_RQ_QUANTA;
dev->hw_attrs.uk_attrs.max_hw_wq_quanta = I40IW_QP_SW_MAX_WQ_QUANTA;
dev->hw_attrs.uk_attrs.max_hw_sq_chunk = I40IW_MAX_QUANTA_PER_WR;
dev->hw_attrs.max_hw_pds = I40IW_MAX_PDS;
dev->hw_attrs.max_stat_inst = I40IW_MAX_STATS_COUNT;
dev->hw_attrs.max_hw_outbound_msg_size = I40IW_MAX_OUTBOUND_MSG_SIZE;
dev->hw_attrs.max_hw_inbound_msg_size = I40IW_MAX_INBOUND_MSG_SIZE;
dev->hw_attrs.max_qp_wr = I40IW_MAX_QP_WRS;
}

View File

@ -0,0 +1,160 @@
/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
/* Copyright (c) 2015 - 2021 Intel Corporation */
#ifndef I40IW_HW_H
#define I40IW_HW_H
#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */
#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */
#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */
#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */
#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */
#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */
#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */
#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */
#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */
#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */
#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */
#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */
#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */
#define I40E_GLPE_CRITERR 0x000B4000 /* Reset: PE_CORER */
#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */
#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */
#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
#define I40IW_DB_ADDR_OFFSET (4 * 1024 * 1024 - 64 * 1024)
#define I40IW_VF_DB_ADDR_OFFSET (64 * 1024)
#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX GENMASK(10, 0)
#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE GENMASK(12, 11)
#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */
#define I40E_PFINT_CEQCTL_MAX_INDEX 511
/* shifts/masks for FLD_[LS/RS]_64 macros used in device table */
#define I40E_PFINT_CEQCTL_MSIX_INDX_S 0
#define I40E_PFINT_CEQCTL_MSIX_INDX GENMASK(7, 0)
#define I40E_PFINT_CEQCTL_ITR_INDX_S 11
#define I40E_PFINT_CEQCTL_ITR_INDX GENMASK(12, 11)
#define I40E_PFINT_CEQCTL_MSIX0_INDX_S 13
#define I40E_PFINT_CEQCTL_MSIX0_INDX GENMASK(15, 13)
#define I40E_PFINT_CEQCTL_NEXTQ_INDX_S 16
#define I40E_PFINT_CEQCTL_NEXTQ_INDX GENMASK(26, 16)
#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_S 27
#define I40E_PFINT_CEQCTL_NEXTQ_TYPE GENMASK(28, 27)
#define I40E_PFINT_CEQCTL_CAUSE_ENA_S 30
#define I40E_PFINT_CEQCTL_CAUSE_ENA BIT(30)
#define I40E_PFINT_CEQCTL_INTEVENT_S 31
#define I40E_PFINT_CEQCTL_INTEVENT BIT(31)
#define I40E_CQPSQ_STAG_PDID_S 48
#define I40E_CQPSQ_STAG_PDID GENMASK_ULL(62, 48)
#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_S 0
#define I40E_PFPE_CCQPSTATUS_CCQP_DONE BIT_ULL(0)
#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_S 31
#define I40E_PFPE_CCQPSTATUS_CCQP_ERR BIT_ULL(31)
#define I40E_PFINT_DYN_CTLN_ITR_INDX_S 3
#define I40E_PFINT_DYN_CTLN_ITR_INDX GENMASK(4, 3)
#define I40E_PFINT_DYN_CTLN_INTENA_S 0
#define I40E_PFINT_DYN_CTLN_INTENA BIT(0)
#define I40E_CQPSQ_CQ_CEQID_S 24
#define I40E_CQPSQ_CQ_CEQID GENMASK(30, 24)
#define I40E_CQPSQ_CQ_CQID_S 0
#define I40E_CQPSQ_CQ_CQID GENMASK_ULL(15, 0)
#define I40E_COMMIT_FPM_CQCNT_S 0
#define I40E_COMMIT_FPM_CQCNT GENMASK_ULL(17, 0)
#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4))
enum i40iw_device_caps_const {
I40IW_MAX_WQ_FRAGMENT_COUNT = 3,
I40IW_MAX_SGE_RD = 1,
I40IW_MAX_PUSH_PAGE_COUNT = 0,
I40IW_MAX_INLINE_DATA_SIZE = 48,
I40IW_MAX_IRD_SIZE = 63,
I40IW_MAX_ORD_SIZE = 127,
I40IW_MAX_WQ_ENTRIES = 2048,
I40IW_MAX_WQE_SIZE_RQ = 128,
I40IW_MAX_PDS = 32768,
I40IW_MAX_STATS_COUNT = 16,
I40IW_MAX_CQ_SIZE = 1048575,
I40IW_MAX_OUTBOUND_MSG_SIZE = 2147483647,
I40IW_MAX_INBOUND_MSG_SIZE = 2147483647,
};
#define I40IW_QP_WQE_MIN_SIZE 32
#define I40IW_QP_WQE_MAX_SIZE 128
#define I40IW_QP_SW_MIN_WQSIZE 4
#define I40IW_MAX_RQ_WQE_SHIFT 2
#define I40IW_MAX_QUANTA_PER_WR 2
#define I40IW_QP_SW_MAX_SQ_QUANTA 2048
#define I40IW_QP_SW_MAX_RQ_QUANTA 16384
#define I40IW_QP_SW_MAX_WQ_QUANTA 2048
#define I40IW_MAX_QP_WRS ((I40IW_QP_SW_MAX_SQ_QUANTA - IRDMA_SQ_RSVD) / I40IW_MAX_QUANTA_PER_WR)
#define I40IW_FIRST_VF_FPM_ID 16
#define QUEUE_TYPE_CEQ 2
#define NULL_QUEUE_INDEX 0x7FF
void i40iw_init_hw(struct irdma_sc_dev *dev);
#endif /* I40IW_HW_H */

View File

@ -0,0 +1,216 @@
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
#include "main.h"
#include "i40iw_hw.h"
#include <linux/net/intel/i40e_client.h>
static struct i40e_client i40iw_client;
/**
* i40iw_l2param_change - handle mss change
* @cdev_info: parent lan device information structure with data/ops
* @client: client for parameter change
* @params: new parameters from L2
*/
static void i40iw_l2param_change(struct i40e_info *cdev_info,
struct i40e_client *client,
struct i40e_params *params)
{
struct irdma_l2params l2params = {};
struct irdma_device *iwdev;
struct ib_device *ibdev;
ibdev = ib_device_get_by_netdev(cdev_info->netdev, RDMA_DRIVER_IRDMA);
if (!ibdev)
return;
iwdev = to_iwdev(ibdev);
if (iwdev->vsi.mtu != params->mtu) {
l2params.mtu_changed = true;
l2params.mtu = params->mtu;
}
irdma_change_l2params(&iwdev->vsi, &l2params);
ib_device_put(ibdev);
}
/**
* i40iw_close - client interface operation close for iwarp/uda device
* @cdev_info: parent lan device information structure with data/ops
* @client: client to close
* @reset: flag to indicate close on reset
*
* Called by the lan driver during the processing of client unregister
* Destroy and clean up the driver resources
*/
static void i40iw_close(struct i40e_info *cdev_info, struct i40e_client *client,
bool reset)
{
struct irdma_device *iwdev;
struct ib_device *ibdev;
ibdev = ib_device_get_by_netdev(cdev_info->netdev, RDMA_DRIVER_IRDMA);
if (WARN_ON(!ibdev))
return;
iwdev = to_iwdev(ibdev);
if (reset)
iwdev->reset = true;
iwdev->iw_status = 0;
irdma_port_ibevent(iwdev);
ib_unregister_device_and_put(ibdev);
pr_debug("INIT: Gen1 PF[%d] close complete\n", PCI_FUNC(cdev_info->pcidev->devfn));
}
static void i40iw_request_reset(struct irdma_pci_f *rf)
{
struct i40e_info *cdev_info = rf->cdev;
cdev_info->ops->request_reset(cdev_info, &i40iw_client, 1);
}
static void i40iw_fill_device_info(struct irdma_device *iwdev, struct i40e_info *cdev_info)
{
struct irdma_pci_f *rf = iwdev->rf;
rf->rdma_ver = IRDMA_GEN_1;
rf->gen_ops.request_reset = i40iw_request_reset;
rf->pcidev = cdev_info->pcidev;
rf->hw.hw_addr = cdev_info->hw_addr;
rf->cdev = cdev_info;
rf->msix_count = cdev_info->msix_count;
rf->msix_entries = cdev_info->msix_entries;
rf->limits_sel = 5;
rf->protocol_used = IRDMA_IWARP_PROTOCOL_ONLY;
rf->iwdev = iwdev;
iwdev->init_state = INITIAL_STATE;
iwdev->rcv_wnd = IRDMA_CM_DEFAULT_RCV_WND_SCALED;
iwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
iwdev->netdev = cdev_info->netdev;
iwdev->vsi_num = 0;
}
/**
* i40iw_open - client interface operation open for iwarp/uda device
* @cdev_info: parent lan device information structure with data/ops
* @client: iwarp client information, provided during registration
*
* Called by the lan driver during the processing of client register
* Create device resources, set up queues, pble and hmc objects and
* register the device with the ib verbs interface
* Return 0 if successful, otherwise return error
*/
static int i40iw_open(struct i40e_info *cdev_info, struct i40e_client *client)
{
struct irdma_l2params l2params = {};
struct irdma_device *iwdev;
struct irdma_pci_f *rf;
int err = -EIO;
int i;
u16 qset;
u16 last_qset = IRDMA_NO_QSET;
iwdev = ib_alloc_device(irdma_device, ibdev);
if (!iwdev)
return -ENOMEM;
iwdev->rf = kzalloc(sizeof(*rf), GFP_KERNEL);
if (!iwdev->rf) {
ib_dealloc_device(&iwdev->ibdev);
return -ENOMEM;
}
i40iw_fill_device_info(iwdev, cdev_info);
rf = iwdev->rf;
if (irdma_ctrl_init_hw(rf)) {
err = -EIO;
goto err_ctrl_init;
}
l2params.mtu = (cdev_info->params.mtu) ? cdev_info->params.mtu : IRDMA_DEFAULT_MTU;
for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) {
qset = cdev_info->params.qos.prio_qos[i].qs_handle;
l2params.up2tc[i] = cdev_info->params.qos.prio_qos[i].tc;
l2params.qs_handle_list[i] = qset;
if (last_qset == IRDMA_NO_QSET)
last_qset = qset;
else if ((qset != last_qset) && (qset != IRDMA_NO_QSET))
iwdev->dcb = true;
}
if (irdma_rt_init_hw(iwdev, &l2params)) {
err = -EIO;
goto err_rt_init;
}
err = irdma_ib_register_device(iwdev);
if (err)
goto err_ibreg;
ibdev_dbg(&iwdev->ibdev, "INIT: Gen1 PF[%d] open success\n",
PCI_FUNC(rf->pcidev->devfn));
return 0;
err_ibreg:
irdma_rt_deinit_hw(iwdev);
err_rt_init:
irdma_ctrl_deinit_hw(rf);
err_ctrl_init:
kfree(iwdev->rf);
ib_dealloc_device(&iwdev->ibdev);
return err;
}
/* client interface functions */
static const struct i40e_client_ops i40e_ops = {
.open = i40iw_open,
.close = i40iw_close,
.l2_param_change = i40iw_l2param_change
};
static struct i40e_client i40iw_client = {
.ops = &i40e_ops,
.type = I40E_CLIENT_IWARP,
};
static int i40iw_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *id)
{
struct i40e_auxiliary_device *i40e_adev = container_of(aux_dev,
struct i40e_auxiliary_device,
aux_dev);
struct i40e_info *cdev_info = i40e_adev->ldev;
strncpy(i40iw_client.name, "irdma", I40E_CLIENT_STR_LENGTH);
i40e_client_device_register(cdev_info, &i40iw_client);
return 0;
}
static void i40iw_remove(struct auxiliary_device *aux_dev)
{
struct i40e_auxiliary_device *i40e_adev = container_of(aux_dev,
struct i40e_auxiliary_device,
aux_dev);
struct i40e_info *cdev_info = i40e_adev->ldev;
return i40e_client_device_unregister(cdev_info);
}
static const struct auxiliary_device_id i40iw_auxiliary_id_table[] = {
{.name = "i40e.iwarp", },
{},
};
MODULE_DEVICE_TABLE(auxiliary, i40iw_auxiliary_id_table);
struct auxiliary_driver i40iw_auxiliary_drv = {
.name = "gen_1",
.id_table = i40iw_auxiliary_id_table,
.probe = i40iw_probe,
.remove = i40iw_remove,
};

View File

@ -0,0 +1,149 @@
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
/* Copyright (c) 2017 - 2021 Intel Corporation */
#include "osdep.h"
#include "type.h"
#include "icrdma_hw.h"
static u32 icrdma_regs[IRDMA_MAX_REGS] = {
PFPE_CQPTAIL,
PFPE_CQPDB,
PFPE_CCQPSTATUS,
PFPE_CCQPHIGH,
PFPE_CCQPLOW,
PFPE_CQARM,
PFPE_CQACK,
PFPE_AEQALLOC,
PFPE_CQPERRCODES,
PFPE_WQEALLOC,
GLINT_DYN_CTL(0),
ICRDMA_DB_ADDR_OFFSET,
GLPCI_LBARCTRL,
GLPE_CPUSTATUS0,
GLPE_CPUSTATUS1,
GLPE_CPUSTATUS2,
PFINT_AEQCTL,
GLINT_CEQCTL(0),
VSIQF_PE_CTL1(0),
PFHMC_PDINV,
GLHMC_VFPDINV(0),
GLPE_CRITERR,
GLINT_RATE(0),
};
static u64 icrdma_masks[IRDMA_MAX_MASKS] = {
ICRDMA_CCQPSTATUS_CCQP_DONE,
ICRDMA_CCQPSTATUS_CCQP_ERR,
ICRDMA_CQPSQ_STAG_PDID,
ICRDMA_CQPSQ_CQ_CEQID,
ICRDMA_CQPSQ_CQ_CQID,
ICRDMA_COMMIT_FPM_CQCNT,
};
static u64 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
ICRDMA_CCQPSTATUS_CCQP_DONE_S,
ICRDMA_CCQPSTATUS_CCQP_ERR_S,
ICRDMA_CQPSQ_STAG_PDID_S,
ICRDMA_CQPSQ_CQ_CEQID_S,
ICRDMA_CQPSQ_CQ_CQID_S,
ICRDMA_COMMIT_FPM_CQCNT_S,
};
/**
* icrdma_ena_irq - Enable interrupt
* @dev: pointer to the device structure
* @idx: vector index
*/
static void icrdma_ena_irq(struct irdma_sc_dev *dev, u32 idx)
{
u32 val;
u32 interval = 0;
if (dev->ceq_itr && dev->aeq->msix_idx != idx)
interval = dev->ceq_itr >> 1; /* 2 usec units */
val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_ITR_INDX, 0) |
FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTERVAL, interval) |
FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, 1) |
FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, 1);
if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1)
writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
else
writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx - 1));
}
/**
* icrdma_disable_irq - Disable interrupt
* @dev: pointer to the device structure
* @idx: vector index
*/
static void icrdma_disable_irq(struct irdma_sc_dev *dev, u32 idx)
{
if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1)
writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
else
writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx - 1));
}
/**
* icrdma_cfg_ceq- Configure CEQ interrupt
* @dev: pointer to the device structure
* @ceq_id: Completion Event Queue ID
* @idx: vector index
* @enable: True to enable, False disables
*/
static void icrdma_cfg_ceq(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
bool enable)
{
u32 reg_val;
reg_val = FIELD_PREP(IRDMA_GLINT_CEQCTL_CAUSE_ENA, enable) |
FIELD_PREP(IRDMA_GLINT_CEQCTL_MSIX_INDX, idx) |
FIELD_PREP(IRDMA_GLINT_CEQCTL_ITR_INDX, 3);
writel(reg_val, dev->hw_regs[IRDMA_GLINT_CEQCTL] + ceq_id);
}
static const struct irdma_irq_ops icrdma_irq_ops = {
.irdma_cfg_aeq = irdma_cfg_aeq,
.irdma_cfg_ceq = icrdma_cfg_ceq,
.irdma_dis_irq = icrdma_disable_irq,
.irdma_en_irq = icrdma_ena_irq,
};
void icrdma_init_hw(struct irdma_sc_dev *dev)
{
int i;
u8 __iomem *hw_addr;
for (i = 0; i < IRDMA_MAX_REGS; ++i) {
hw_addr = dev->hw->hw_addr;
if (i == IRDMA_DB_ADDR_OFFSET)
hw_addr = NULL;
dev->hw_regs[i] = (u32 __iomem *)(hw_addr + icrdma_regs[i]);
}
dev->hw_attrs.max_hw_vf_fpm_id = IRDMA_MAX_VF_FPM_ID;
dev->hw_attrs.first_hw_vf_fpm_id = IRDMA_FIRST_VF_FPM_ID;
for (i = 0; i < IRDMA_MAX_SHIFTS; ++i)
dev->hw_shifts[i] = icrdma_shifts[i];
for (i = 0; i < IRDMA_MAX_MASKS; ++i)
dev->hw_masks[i] = icrdma_masks[i];
dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC];
dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM];
dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC];
dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
dev->irq_ops = &icrdma_irq_ops;
dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE;
dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE |
IRDMA_FEATURE_CQ_RESIZE;
}

View File

@ -0,0 +1,71 @@
/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
/* Copyright (c) 2017 - 2021 Intel Corporation */
#ifndef ICRDMA_HW_H
#define ICRDMA_HW_H
#include "irdma.h"
#define VFPE_CQPTAIL1 0x0000a000
#define VFPE_CQPDB1 0x0000bc00
#define VFPE_CCQPSTATUS1 0x0000b800
#define VFPE_CCQPHIGH1 0x00009800
#define VFPE_CCQPLOW1 0x0000ac00
#define VFPE_CQARM1 0x0000b400
#define VFPE_CQARM1 0x0000b400
#define VFPE_CQACK1 0x0000b000
#define VFPE_AEQALLOC1 0x0000a400
#define VFPE_CQPERRCODES1 0x00009c00
#define VFPE_WQEALLOC1 0x0000c000
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) /* _i=0...63 */
#define PFPE_CQPTAIL 0x00500880
#define PFPE_CQPDB 0x00500800
#define PFPE_CCQPSTATUS 0x0050a000
#define PFPE_CCQPHIGH 0x0050a100
#define PFPE_CCQPLOW 0x0050a080
#define PFPE_CQARM 0x00502c00
#define PFPE_CQACK 0x00502c80
#define PFPE_AEQALLOC 0x00502d00
#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4)) /* _i=0...2047 */
#define GLPCI_LBARCTRL 0x0009de74
#define GLPE_CPUSTATUS0 0x0050ba5c
#define GLPE_CPUSTATUS1 0x0050ba60
#define GLPE_CPUSTATUS2 0x0050ba64
#define PFINT_AEQCTL 0x0016cb00
#define PFPE_CQPERRCODES 0x0050a200
#define PFPE_WQEALLOC 0x00504400
#define GLINT_CEQCTL(_INT) (0x0015c000 + ((_INT) * 4)) /* _i=0...2047 */
#define VSIQF_PE_CTL1(_VSI) (0x00414000 + ((_VSI) * 4)) /* _i=0...767 */
#define PFHMC_PDINV 0x00520300
#define GLHMC_VFPDINV(_i) (0x00528300 + ((_i) * 4)) /* _i=0...31 */
#define GLPE_CRITERR 0x00534000
#define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
#define ICRDMA_DB_ADDR_OFFSET (8 * 1024 * 1024 - 64 * 1024)
#define ICRDMA_VF_DB_ADDR_OFFSET (64 * 1024)
/* shifts/masks for FLD_[LS/RS]_64 macros used in device table */
#define ICRDMA_CCQPSTATUS_CCQP_DONE_S 0
#define ICRDMA_CCQPSTATUS_CCQP_DONE BIT_ULL(0)
#define ICRDMA_CCQPSTATUS_CCQP_ERR_S 31
#define ICRDMA_CCQPSTATUS_CCQP_ERR BIT_ULL(31)
#define ICRDMA_CQPSQ_STAG_PDID_S 46
#define ICRDMA_CQPSQ_STAG_PDID GENMASK_ULL(63, 46)
#define ICRDMA_CQPSQ_CQ_CEQID_S 22
#define ICRDMA_CQPSQ_CQ_CEQID GENMASK_ULL(31, 22)
#define ICRDMA_CQPSQ_CQ_CQID_S 0
#define ICRDMA_CQPSQ_CQ_CQID GENMASK_ULL(18, 0)
#define ICRDMA_COMMIT_FPM_CQCNT_S 0
#define ICRDMA_COMMIT_FPM_CQCNT GENMASK_ULL(19, 0)
enum icrdma_device_caps_const {
ICRDMA_MAX_STATS_COUNT = 128,
ICRDMA_MAX_IRD_SIZE = 127,
ICRDMA_MAX_ORD_SIZE = 255,
};
void icrdma_init_hw(struct irdma_sc_dev *dev);
#endif /* ICRDMA_HW_H*/

View File

@ -0,0 +1,153 @@
/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
/* Copyright (c) 2017 - 2021 Intel Corporation */
#ifndef IRDMA_H
#define IRDMA_H
#define IRDMA_WQEALLOC_WQE_DESC_INDEX GENMASK(31, 20)
#define IRDMA_CQPTAIL_WQTAIL GENMASK(10, 0)
#define IRDMA_CQPTAIL_CQP_OP_ERR BIT(31)
#define IRDMA_CQPERRCODES_CQP_MINOR_CODE GENMASK(15, 0)
#define IRDMA_CQPERRCODES_CQP_MAJOR_CODE GENMASK(31, 16)
#define IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE GENMASK(5, 4)
#define IRDMA_GLINT_RATE_INTERVAL GENMASK(5, 0)
#define IRDMA_GLINT_RATE_INTRL_ENA BIT(6)
#define IRDMA_GLINT_DYN_CTL_INTENA BIT(0)
#define IRDMA_GLINT_DYN_CTL_CLEARPBA BIT(1)
#define IRDMA_GLINT_DYN_CTL_ITR_INDX GENMASK(4, 3)
#define IRDMA_GLINT_DYN_CTL_INTERVAL GENMASK(16, 5)
#define IRDMA_GLINT_CEQCTL_ITR_INDX GENMASK(12, 11)
#define IRDMA_GLINT_CEQCTL_CAUSE_ENA BIT(30)
#define IRDMA_GLINT_CEQCTL_MSIX_INDX GENMASK(10, 0)
#define IRDMA_PFINT_AEQCTL_MSIX_INDX GENMASK(10, 0)
#define IRDMA_PFINT_AEQCTL_ITR_INDX GENMASK(12, 11)
#define IRDMA_PFINT_AEQCTL_CAUSE_ENA BIT(30)
#define IRDMA_PFHMC_PDINV_PMSDIDX GENMASK(11, 0)
#define IRDMA_PFHMC_PDINV_PMSDPARTSEL BIT(15)
#define IRDMA_PFHMC_PDINV_PMPDIDX GENMASK(24, 16)
#define IRDMA_PFHMC_SDDATALOW_PMSDVALID BIT(0)
#define IRDMA_PFHMC_SDDATALOW_PMSDTYPE BIT(1)
#define IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT GENMASK(11, 2)
#define IRDMA_PFHMC_SDDATALOW_PMSDDATALOW GENMASK(31, 12)
#define IRDMA_PFHMC_SDCMD_PMSDWR BIT(31)
#define IRDMA_INVALID_CQ_IDX 0xffffffff
enum irdma_registers {
IRDMA_CQPTAIL,
IRDMA_CQPDB,
IRDMA_CCQPSTATUS,
IRDMA_CCQPHIGH,
IRDMA_CCQPLOW,
IRDMA_CQARM,
IRDMA_CQACK,
IRDMA_AEQALLOC,
IRDMA_CQPERRCODES,
IRDMA_WQEALLOC,
IRDMA_GLINT_DYN_CTL,
IRDMA_DB_ADDR_OFFSET,
IRDMA_GLPCI_LBARCTRL,
IRDMA_GLPE_CPUSTATUS0,
IRDMA_GLPE_CPUSTATUS1,
IRDMA_GLPE_CPUSTATUS2,
IRDMA_PFINT_AEQCTL,
IRDMA_GLINT_CEQCTL,
IRDMA_VSIQF_PE_CTL1,
IRDMA_PFHMC_PDINV,
IRDMA_GLHMC_VFPDINV,
IRDMA_GLPE_CRITERR,
IRDMA_GLINT_RATE,
IRDMA_MAX_REGS, /* Must be last entry */
};
enum irdma_shifts {
IRDMA_CCQPSTATUS_CCQP_DONE_S,
IRDMA_CCQPSTATUS_CCQP_ERR_S,
IRDMA_CQPSQ_STAG_PDID_S,
IRDMA_CQPSQ_CQ_CEQID_S,
IRDMA_CQPSQ_CQ_CQID_S,
IRDMA_COMMIT_FPM_CQCNT_S,
IRDMA_MAX_SHIFTS,
};
enum irdma_masks {
IRDMA_CCQPSTATUS_CCQP_DONE_M,
IRDMA_CCQPSTATUS_CCQP_ERR_M,
IRDMA_CQPSQ_STAG_PDID_M,
IRDMA_CQPSQ_CQ_CEQID_M,
IRDMA_CQPSQ_CQ_CQID_M,
IRDMA_COMMIT_FPM_CQCNT_M,
IRDMA_MAX_MASKS, /* Must be last entry */
};
#define IRDMA_MAX_MGS_PER_CTX 8
struct irdma_mcast_grp_ctx_entry_info {
u32 qp_id;
bool valid_entry;
u16 dest_port;
u32 use_cnt;
};
struct irdma_mcast_grp_info {
u8 dest_mac_addr[ETH_ALEN];
u16 vlan_id;
u8 hmc_fcn_id;
bool ipv4_valid:1;
bool vlan_valid:1;
u16 mg_id;
u32 no_of_mgs;
u32 dest_ip_addr[4];
u16 qs_handle;
struct irdma_dma_mem dma_mem_mc;
struct irdma_mcast_grp_ctx_entry_info mg_ctx_info[IRDMA_MAX_MGS_PER_CTX];
};
enum irdma_vers {
IRDMA_GEN_RSVD,
IRDMA_GEN_1,
IRDMA_GEN_2,
};
struct irdma_uk_attrs {
u64 feature_flags;
u32 max_hw_wq_frags;
u32 max_hw_read_sges;
u32 max_hw_inline;
u32 max_hw_rq_quanta;
u32 max_hw_wq_quanta;
u32 min_hw_cq_size;
u32 max_hw_cq_size;
u16 max_hw_sq_chunk;
u8 hw_rev;
};
struct irdma_hw_attrs {
struct irdma_uk_attrs uk_attrs;
u64 max_hw_outbound_msg_size;
u64 max_hw_inbound_msg_size;
u64 max_mr_size;
u32 min_hw_qp_id;
u32 min_hw_aeq_size;
u32 max_hw_aeq_size;
u32 min_hw_ceq_size;
u32 max_hw_ceq_size;
u32 max_hw_device_pages;
u32 max_hw_vf_fpm_id;
u32 first_hw_vf_fpm_id;
u32 max_hw_ird;
u32 max_hw_ord;
u32 max_hw_wqes;
u32 max_hw_pds;
u32 max_hw_ena_vf_count;
u32 max_qp_wr;
u32 max_pe_ready_count;
u32 max_done_count;
u32 max_sleep_count;
u32 max_cqp_compl_wait_time_ms;
u16 max_stat_inst;
};
void i40iw_init_hw(struct irdma_sc_dev *dev);
void icrdma_init_hw(struct irdma_sc_dev *dev);
#endif /* IRDMA_H*/

View File

@ -0,0 +1,358 @@
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
#include "main.h"
#include "../../../net/ethernet/intel/ice/ice.h"
MODULE_ALIAS("i40iw");
MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Ethernet Protocol Driver for RDMA");
MODULE_LICENSE("Dual BSD/GPL");
static struct notifier_block irdma_inetaddr_notifier = {
.notifier_call = irdma_inetaddr_event
};
static struct notifier_block irdma_inetaddr6_notifier = {
.notifier_call = irdma_inet6addr_event
};
static struct notifier_block irdma_net_notifier = {
.notifier_call = irdma_net_event
};
static struct notifier_block irdma_netdevice_notifier = {
.notifier_call = irdma_netdevice_event
};
static void irdma_register_notifiers(void)
{
register_inetaddr_notifier(&irdma_inetaddr_notifier);
register_inet6addr_notifier(&irdma_inetaddr6_notifier);
register_netevent_notifier(&irdma_net_notifier);
register_netdevice_notifier(&irdma_netdevice_notifier);
}
static void irdma_unregister_notifiers(void)
{
unregister_netevent_notifier(&irdma_net_notifier);
unregister_inetaddr_notifier(&irdma_inetaddr_notifier);
unregister_inet6addr_notifier(&irdma_inetaddr6_notifier);
unregister_netdevice_notifier(&irdma_netdevice_notifier);
}
static void irdma_prep_tc_change(struct irdma_device *iwdev)
{
iwdev->vsi.tc_change_pending = true;
irdma_sc_suspend_resume_qps(&iwdev->vsi, IRDMA_OP_SUSPEND);
/* Wait for all qp's to suspend */
wait_event_timeout(iwdev->suspend_wq,
!atomic_read(&iwdev->vsi.qp_suspend_reqs),
IRDMA_EVENT_TIMEOUT);
irdma_ws_reset(&iwdev->vsi);
}
static void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev)
{
if (mtu < IRDMA_MIN_MTU_IPV4)
ibdev_warn(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 576 for IPv4\n", mtu);
else if (mtu < IRDMA_MIN_MTU_IPV6)
ibdev_warn(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 1280 for IPv6\\n", mtu);
}
static void irdma_fill_qos_info(struct irdma_l2params *l2params,
struct iidc_qos_params *qos_info)
{
int i;
l2params->num_tc = qos_info->num_tc;
l2params->vsi_prio_type = qos_info->vport_priority_type;
l2params->vsi_rel_bw = qos_info->vport_relative_bw;
for (i = 0; i < l2params->num_tc; i++) {
l2params->tc_info[i].egress_virt_up =
qos_info->tc_info[i].egress_virt_up;
l2params->tc_info[i].ingress_virt_up =
qos_info->tc_info[i].ingress_virt_up;
l2params->tc_info[i].prio_type = qos_info->tc_info[i].prio_type;
l2params->tc_info[i].rel_bw = qos_info->tc_info[i].rel_bw;
l2params->tc_info[i].tc_ctx = qos_info->tc_info[i].tc_ctx;
}
for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
l2params->up2tc[i] = qos_info->up2tc[i];
}
static void irdma_iidc_event_handler(struct ice_pf *pf, struct iidc_event *event)
{
struct irdma_device *iwdev = dev_get_drvdata(&pf->adev->dev);
struct irdma_l2params l2params = {};
if (*event->type & BIT(IIDC_EVENT_AFTER_MTU_CHANGE)) {
ibdev_dbg(&iwdev->ibdev, "CLNT: new MTU = %d\n", iwdev->netdev->mtu);
if (iwdev->vsi.mtu != iwdev->netdev->mtu) {
l2params.mtu = iwdev->netdev->mtu;
l2params.mtu_changed = true;
irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev);
irdma_change_l2params(&iwdev->vsi, &l2params);
}
} else if (*event->type & BIT(IIDC_EVENT_BEFORE_TC_CHANGE)) {
if (iwdev->vsi.tc_change_pending)
return;
irdma_prep_tc_change(iwdev);
} else if (*event->type & BIT(IIDC_EVENT_AFTER_TC_CHANGE)) {
struct iidc_qos_params qos_info = {};
if (!iwdev->vsi.tc_change_pending)
return;
l2params.tc_changed = true;
ibdev_dbg(&iwdev->ibdev, "CLNT: TC Change\n");
ice_get_qos_params(pf, &qos_info);
iwdev->dcb = qos_info.num_tc > 1;
irdma_fill_qos_info(&l2params, &qos_info);
irdma_change_l2params(&iwdev->vsi, &l2params);
} else if (*event->type & BIT(IIDC_EVENT_CRIT_ERR)) {
ibdev_warn(&iwdev->ibdev, "ICE OICR event notification: oicr = 0x%08x\n",
event->reg);
if (event->reg & IRDMAPFINT_OICR_PE_CRITERR_M) {
u32 pe_criterr;
pe_criterr = readl(iwdev->rf->sc_dev.hw_regs[IRDMA_GLPE_CRITERR]);
#define IRDMA_Q1_RESOURCE_ERR 0x0001024d
if (pe_criterr != IRDMA_Q1_RESOURCE_ERR) {
ibdev_err(&iwdev->ibdev, "critical PE Error, GLPE_CRITERR=0x%08x\n",
pe_criterr);
iwdev->rf->reset = true;
} else {
ibdev_warn(&iwdev->ibdev, "Q1 Resource Check\n");
}
}
if (event->reg & IRDMAPFINT_OICR_HMC_ERR_M) {
ibdev_err(&iwdev->ibdev, "HMC Error\n");
iwdev->rf->reset = true;
}
if (event->reg & IRDMAPFINT_OICR_PE_PUSH_M) {
ibdev_err(&iwdev->ibdev, "PE Push Error\n");
iwdev->rf->reset = true;
}
if (iwdev->rf->reset)
iwdev->rf->gen_ops.request_reset(iwdev->rf);
}
}
/**
* irdma_request_reset - Request a reset
* @rf: RDMA PCI function
*/
static void irdma_request_reset(struct irdma_pci_f *rf)
{
struct ice_pf *pf = rf->cdev;
ibdev_warn(&rf->iwdev->ibdev, "Requesting a reset\n");
ice_rdma_request_reset(pf, IIDC_PFR);
}
/**
* irdma_lan_register_qset - Register qset with LAN driver
* @vsi: vsi structure
* @tc_node: Traffic class node
*/
static enum irdma_status_code irdma_lan_register_qset(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node)
{
struct irdma_device *iwdev = vsi->back_vsi;
struct ice_pf *pf = iwdev->rf->cdev;
struct iidc_rdma_qset_params qset = {};
int ret;
qset.qs_handle = tc_node->qs_handle;
qset.tc = tc_node->traffic_class;
qset.vport_id = vsi->vsi_idx;
ret = ice_add_rdma_qset(pf, &qset);
if (ret) {
ibdev_dbg(&iwdev->ibdev, "WS: LAN alloc_res for rdma qset failed.\n");
return IRDMA_ERR_REG_QSET;
}
tc_node->l2_sched_node_id = qset.teid;
vsi->qos[tc_node->user_pri].l2_sched_node_id = qset.teid;
return 0;
}
/**
* irdma_lan_unregister_qset - Unregister qset with LAN driver
* @vsi: vsi structure
* @tc_node: Traffic class node
*/
static void irdma_lan_unregister_qset(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node)
{
struct irdma_device *iwdev = vsi->back_vsi;
struct ice_pf *pf = iwdev->rf->cdev;
struct iidc_rdma_qset_params qset = {};
qset.qs_handle = tc_node->qs_handle;
qset.tc = tc_node->traffic_class;
qset.vport_id = vsi->vsi_idx;
qset.teid = tc_node->l2_sched_node_id;
if (ice_del_rdma_qset(pf, &qset))
ibdev_dbg(&iwdev->ibdev, "WS: LAN free_res for rdma qset failed.\n");
}
static void irdma_remove(struct auxiliary_device *aux_dev)
{
struct iidc_auxiliary_dev *iidc_adev = container_of(aux_dev,
struct iidc_auxiliary_dev,
adev);
struct ice_pf *pf = iidc_adev->pf;
struct irdma_device *iwdev = dev_get_drvdata(&aux_dev->dev);
irdma_ib_unregister_device(iwdev);
ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, false);
pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(pf->pdev->devfn));
}
static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf)
{
struct irdma_pci_f *rf = iwdev->rf;
struct ice_vsi *vsi = ice_get_main_vsi(pf);
rf->cdev = pf;
rf->gen_ops.register_qset = irdma_lan_register_qset;
rf->gen_ops.unregister_qset = irdma_lan_unregister_qset;
rf->hw.hw_addr = pf->hw.hw_addr;
rf->pcidev = pf->pdev;
rf->msix_count = pf->num_rdma_msix;
rf->msix_entries = &pf->msix_entries[pf->rdma_base_vector];
rf->default_vsi.vsi_idx = vsi->vsi_num;
rf->protocol_used = IRDMA_ROCE_PROTOCOL_ONLY;
rf->rdma_ver = IRDMA_GEN_2;
rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
rf->gen_ops.request_reset = irdma_request_reset;
rf->limits_sel = 7;
rf->iwdev = iwdev;
iwdev->netdev = vsi->netdev;
iwdev->vsi_num = vsi->vsi_num;
iwdev->init_state = INITIAL_STATE;
iwdev->roce_cwnd = IRDMA_ROCE_CWND_DEFAULT;
iwdev->roce_ackcreds = IRDMA_ROCE_ACKCREDS_DEFAULT;
iwdev->rcv_wnd = IRDMA_CM_DEFAULT_RCV_WND_SCALED;
iwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
if (rf->protocol_used == IRDMA_ROCE_PROTOCOL_ONLY)
iwdev->roce_mode = true;
}
static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *id)
{
struct iidc_auxiliary_dev *iidc_adev = container_of(aux_dev,
struct iidc_auxiliary_dev,
adev);
struct ice_pf *pf = iidc_adev->pf;
struct iidc_qos_params qos_info = {};
struct irdma_device *iwdev;
struct irdma_pci_f *rf;
struct irdma_l2params l2params = {};
int err;
iwdev = ib_alloc_device(irdma_device, ibdev);
if (!iwdev)
return -ENOMEM;
iwdev->rf = kzalloc(sizeof(*rf), GFP_KERNEL);
if (!iwdev->rf) {
ib_dealloc_device(&iwdev->ibdev);
return -ENOMEM;
}
irdma_fill_device_info(iwdev, pf);
rf = iwdev->rf;
if (irdma_ctrl_init_hw(rf)) {
err = -EIO;
goto err_ctrl_init;
}
l2params.mtu = iwdev->netdev->mtu;
ice_get_qos_params(pf, &qos_info);
irdma_fill_qos_info(&l2params, &qos_info);
if (irdma_rt_init_hw(iwdev, &l2params)) {
err = -EIO;
goto err_rt_init;
}
err = irdma_ib_register_device(iwdev);
if (err)
goto err_ibreg;
ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, true);
ibdev_dbg(&iwdev->ibdev, "INIT: Gen2 PF[%d] device probe success\n", PCI_FUNC(rf->pcidev->devfn));
dev_set_drvdata(&aux_dev->dev, iwdev);
return 0;
err_ibreg:
irdma_rt_deinit_hw(iwdev);
err_rt_init:
irdma_ctrl_deinit_hw(rf);
err_ctrl_init:
kfree(iwdev->rf);
ib_dealloc_device(&iwdev->ibdev);
return err;
}
static const struct auxiliary_device_id irdma_auxiliary_id_table[] = {
{.name = "ice.iwarp", },
{.name = "ice.roce", },
{},
};
MODULE_DEVICE_TABLE(auxiliary, irdma_auxiliary_id_table);
static struct iidc_auxiliary_drv irdma_auxiliary_drv = {
.adrv = {
.id_table = irdma_auxiliary_id_table,
.probe = irdma_probe,
.remove = irdma_remove,
},
.event_handler = irdma_iidc_event_handler,
};
static int __init irdma_init_module(void)
{
int ret;
ret = auxiliary_driver_register(&i40iw_auxiliary_drv);
if (ret) {
pr_err("Failed i40iw(gen_1) auxiliary_driver_register() ret=%d\n",
ret);
return ret;
}
ret = auxiliary_driver_register(&irdma_auxiliary_drv.adrv);
if (ret) {
auxiliary_driver_unregister(&i40iw_auxiliary_drv);
pr_err("Failed irdma auxiliary_driver_register() ret=%d\n",
ret);
return ret;
}
irdma_register_notifiers();
return 0;
}
static void __exit irdma_exit_module(void)
{
irdma_unregister_notifiers();
auxiliary_driver_unregister(&irdma_auxiliary_drv.adrv);
auxiliary_driver_unregister(&i40iw_auxiliary_drv);
}
module_init(irdma_init_module);
module_exit(irdma_exit_module);

View File

@ -0,0 +1,555 @@
/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
/* Copyright (c) 2015 - 2021 Intel Corporation */
#ifndef IRDMA_MAIN_H
#define IRDMA_MAIN_H
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/if_vlan.h>
#include <net/addrconf.h>
#include <net/netevent.h>
#include <net/tcp.h>
#include <net/ip6_route.h>
#include <net/flow.h>
#include <net/secure_seq.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/inetdevice.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/crc32c.h>
#include <linux/kthread.h>
#ifndef CONFIG_64BIT
#include <linux/io-64-nonatomic-lo-hi.h>
#endif
#include <linux/auxiliary_bus.h>
#include <linux/net/intel/iidc.h>
#include <crypto/hash.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
#include <rdma/rdma_cm.h>
#include <rdma/iw_cm.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_cache.h>
#include <rdma/uverbs_ioctl.h>
#include "status.h"
#include "osdep.h"
#include "defs.h"
#include "hmc.h"
#include "type.h"
#include "ws.h"
#include "protos.h"
#include "pble.h"
#include "cm.h"
#include <rdma/irdma-abi.h>
#include "verbs.h"
#include "user.h"
#include "puda.h"
extern struct auxiliary_driver i40iw_auxiliary_drv;
#define IRDMA_FW_VER_DEFAULT 2
#define IRDMA_HW_VER 2
#define IRDMA_ARP_ADD 1
#define IRDMA_ARP_DELETE 2
#define IRDMA_ARP_RESOLVE 3
#define IRDMA_MACIP_ADD 1
#define IRDMA_MACIP_DELETE 2
#define IW_CCQ_SIZE (IRDMA_CQP_SW_SQSIZE_2048 + 1)
#define IW_CEQ_SIZE 2048
#define IW_AEQ_SIZE 2048
#define RX_BUF_SIZE (1536 + 8)
#define IW_REG0_SIZE (4 * 1024)
#define IW_TX_TIMEOUT (6 * HZ)
#define IW_FIRST_QPN 1
#define IW_SW_CONTEXT_ALIGN 1024
#define MAX_DPC_ITERATIONS 128
#define IRDMA_EVENT_TIMEOUT 50000
#define IRDMA_VCHNL_EVENT_TIMEOUT 100000
#define IRDMA_RST_TIMEOUT_HZ 4
#define IRDMA_NO_QSET 0xffff
#define IW_CFG_FPM_QP_COUNT 32768
#define IRDMA_MAX_PAGES_PER_FMR 512
#define IRDMA_MIN_PAGES_PER_FMR 1
#define IRDMA_CQP_COMPL_RQ_WQE_FLUSHED 2
#define IRDMA_CQP_COMPL_SQ_WQE_FLUSHED 3
#define IRDMA_Q_TYPE_PE_AEQ 0x80
#define IRDMA_Q_INVALID_IDX 0xffff
#define IRDMA_REM_ENDPOINT_TRK_QPID 3
#define IRDMA_DRV_OPT_ENA_MPA_VER_0 0x00000001
#define IRDMA_DRV_OPT_DISABLE_MPA_CRC 0x00000002
#define IRDMA_DRV_OPT_DISABLE_FIRST_WRITE 0x00000004
#define IRDMA_DRV_OPT_DISABLE_INTF 0x00000008
#define IRDMA_DRV_OPT_ENA_MSI 0x00000010
#define IRDMA_DRV_OPT_DUAL_LOGICAL_PORT 0x00000020
#define IRDMA_DRV_OPT_NO_INLINE_DATA 0x00000080
#define IRDMA_DRV_OPT_DISABLE_INT_MOD 0x00000100
#define IRDMA_DRV_OPT_DISABLE_VIRT_WQ 0x00000200
#define IRDMA_DRV_OPT_ENA_PAU 0x00000400
#define IRDMA_DRV_OPT_MCAST_LOGPORT_MAP 0x00000800
#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
#define IRDMA_ROCE_CWND_DEFAULT 0x400
#define IRDMA_ROCE_ACKCREDS_DEFAULT 0x1E
#define IRDMA_FLUSH_SQ BIT(0)
#define IRDMA_FLUSH_RQ BIT(1)
#define IRDMA_REFLUSH BIT(2)
#define IRDMA_FLUSH_WAIT BIT(3)
enum init_completion_state {
INVALID_STATE = 0,
INITIAL_STATE,
CQP_CREATED,
HMC_OBJS_CREATED,
HW_RSRC_INITIALIZED,
CCQ_CREATED,
CEQ0_CREATED, /* Last state of probe */
ILQ_CREATED,
IEQ_CREATED,
CEQS_CREATED,
PBLE_CHUNK_MEM,
AEQ_CREATED,
IP_ADDR_REGISTERED, /* Last state of open */
};
struct irdma_rsrc_limits {
u32 qplimit;
u32 mrlimit;
u32 cqlimit;
};
struct irdma_cqp_err_info {
u16 maj;
u16 min;
const char *desc;
};
struct irdma_cqp_compl_info {
u32 op_ret_val;
u16 maj_err_code;
u16 min_err_code;
bool error;
u8 op_code;
};
struct irdma_cqp_request {
struct cqp_cmds_info info;
wait_queue_head_t waitq;
struct list_head list;
refcount_t refcnt;
void (*callback_fcn)(struct irdma_cqp_request *cqp_request);
void *param;
struct irdma_cqp_compl_info compl_info;
bool waiting:1;
bool request_done:1;
bool dynamic:1;
};
struct irdma_cqp {
struct irdma_sc_cqp sc_cqp;
spinlock_t req_lock; /* protect CQP request list */
spinlock_t compl_lock; /* protect CQP completion processing */
wait_queue_head_t waitq;
wait_queue_head_t remove_wq;
struct irdma_dma_mem sq;
struct irdma_dma_mem host_ctx;
u64 *scratch_array;
struct irdma_cqp_request *cqp_requests;
struct list_head cqp_avail_reqs;
struct list_head cqp_pending_reqs;
};
struct irdma_ccq {
struct irdma_sc_cq sc_cq;
struct irdma_dma_mem mem_cq;
struct irdma_dma_mem shadow_area;
};
struct irdma_ceq {
struct irdma_sc_ceq sc_ceq;
struct irdma_dma_mem mem;
u32 irq;
u32 msix_idx;
struct irdma_pci_f *rf;
struct tasklet_struct dpc_tasklet;
spinlock_t ce_lock; /* sync cq destroy with cq completion event notification */
};
struct irdma_aeq {
struct irdma_sc_aeq sc_aeq;
struct irdma_dma_mem mem;
struct irdma_pble_alloc palloc;
bool virtual_map;
};
struct irdma_arp_entry {
u32 ip_addr[4];
u8 mac_addr[ETH_ALEN];
};
struct irdma_msix_vector {
u32 idx;
u32 irq;
u32 cpu_affinity;
u32 ceq_id;
cpumask_t mask;
};
struct irdma_mc_table_info {
u32 mgn;
u32 dest_ip[4];
bool lan_fwd:1;
bool ipv4_valid:1;
};
struct mc_table_list {
struct list_head list;
struct irdma_mc_table_info mc_info;
struct irdma_mcast_grp_info mc_grp_ctx;
};
struct irdma_qv_info {
u32 v_idx; /* msix_vector */
u16 ceq_idx;
u16 aeq_idx;
u8 itr_idx;
};
struct irdma_qvlist_info {
u32 num_vectors;
struct irdma_qv_info qv_info[1];
};
struct irdma_gen_ops {
void (*request_reset)(struct irdma_pci_f *rf);
enum irdma_status_code (*register_qset)(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node);
void (*unregister_qset)(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node);
};
struct irdma_pci_f {
bool reset:1;
bool rsrc_created:1;
bool msix_shared:1;
u8 rsrc_profile;
u8 *hmc_info_mem;
u8 *mem_rsrc;
u8 rdma_ver;
u8 rst_to;
enum irdma_protocol_used protocol_used;
u32 sd_type;
u32 msix_count;
u32 max_mr;
u32 max_qp;
u32 max_cq;
u32 max_ah;
u32 next_ah;
u32 max_mcg;
u32 next_mcg;
u32 max_pd;
u32 next_qp;
u32 next_cq;
u32 next_pd;
u32 max_mr_size;
u32 max_cqe;
u32 mr_stagmask;
u32 used_pds;
u32 used_cqs;
u32 used_mrs;
u32 used_qps;
u32 arp_table_size;
u32 next_arp_index;
u32 ceqs_count;
u32 next_ws_node_id;
u32 max_ws_node_id;
u32 limits_sel;
unsigned long *allocated_ws_nodes;
unsigned long *allocated_qps;
unsigned long *allocated_cqs;
unsigned long *allocated_mrs;
unsigned long *allocated_pds;
unsigned long *allocated_mcgs;
unsigned long *allocated_ahs;
unsigned long *allocated_arps;
enum init_completion_state init_state;
struct irdma_sc_dev sc_dev;
struct pci_dev *pcidev;
void *cdev;
struct irdma_hw hw;
struct irdma_cqp cqp;
struct irdma_ccq ccq;
struct irdma_aeq aeq;
struct irdma_ceq *ceqlist;
struct irdma_hmc_pble_rsrc *pble_rsrc;
struct irdma_arp_entry *arp_table;
spinlock_t arp_lock; /*protect ARP table access*/
spinlock_t rsrc_lock; /* protect HW resource array access */
spinlock_t qptable_lock; /*protect QP table access*/
struct irdma_qp **qp_table;
spinlock_t qh_list_lock; /* protect mc_qht_list */
struct mc_table_list mc_qht_list;
struct irdma_msix_vector *iw_msixtbl;
struct irdma_qvlist_info *iw_qvlist;
struct tasklet_struct dpc_tasklet;
struct msix_entry *msix_entries;
struct irdma_dma_mem obj_mem;
struct irdma_dma_mem obj_next;
atomic_t vchnl_msgs;
wait_queue_head_t vchnl_waitq;
struct workqueue_struct *cqp_cmpl_wq;
struct work_struct cqp_cmpl_work;
struct irdma_sc_vsi default_vsi;
void *back_fcn;
struct irdma_gen_ops gen_ops;
struct irdma_device *iwdev;
};
struct irdma_device {
struct ib_device ibdev;
struct irdma_pci_f *rf;
struct net_device *netdev;
struct workqueue_struct *cleanup_wq;
struct irdma_sc_vsi vsi;
struct irdma_cm_core cm_core;
u32 roce_cwnd;
u32 roce_ackcreds;
u32 vendor_id;
u32 vendor_part_id;
u32 device_cap_flags;
u32 push_mode;
u32 rcv_wnd;
u16 mac_ip_table_idx;
u16 vsi_num;
u8 rcv_wscale;
u8 iw_status;
bool roce_mode:1;
bool roce_dcqcn_en:1;
bool dcb:1;
bool reset:1;
bool iw_ooo:1;
enum init_completion_state init_state;
wait_queue_head_t suspend_wq;
};
static inline struct irdma_device *to_iwdev(struct ib_device *ibdev)
{
return container_of(ibdev, struct irdma_device, ibdev);
}
static inline struct irdma_ucontext *to_ucontext(struct ib_ucontext *ibucontext)
{
return container_of(ibucontext, struct irdma_ucontext, ibucontext);
}
static inline struct irdma_user_mmap_entry *
to_irdma_mmap_entry(struct rdma_user_mmap_entry *rdma_entry)
{
return container_of(rdma_entry, struct irdma_user_mmap_entry,
rdma_entry);
}
static inline struct irdma_pd *to_iwpd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct irdma_pd, ibpd);
}
static inline struct irdma_ah *to_iwah(struct ib_ah *ibah)
{
return container_of(ibah, struct irdma_ah, ibah);
}
static inline struct irdma_mr *to_iwmr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct irdma_mr, ibmr);
}
static inline struct irdma_mr *to_iwmw(struct ib_mw *ibmw)
{
return container_of(ibmw, struct irdma_mr, ibmw);
}
static inline struct irdma_cq *to_iwcq(struct ib_cq *ibcq)
{
return container_of(ibcq, struct irdma_cq, ibcq);
}
static inline struct irdma_qp *to_iwqp(struct ib_qp *ibqp)
{
return container_of(ibqp, struct irdma_qp, ibqp);
}
static inline struct irdma_pci_f *dev_to_rf(struct irdma_sc_dev *dev)
{
return container_of(dev, struct irdma_pci_f, sc_dev);
}
/**
* irdma_alloc_resource - allocate a resource
* @iwdev: device pointer
* @resource_array: resource bit array:
* @max_resources: maximum resource number
* @req_resources_num: Allocated resource number
* @next: next free id
**/
static inline int irdma_alloc_rsrc(struct irdma_pci_f *rf,
unsigned long *rsrc_array, u32 max_rsrc,
u32 *req_rsrc_num, u32 *next)
{
u32 rsrc_num;
unsigned long flags;
spin_lock_irqsave(&rf->rsrc_lock, flags);
rsrc_num = find_next_zero_bit(rsrc_array, max_rsrc, *next);
if (rsrc_num >= max_rsrc) {
rsrc_num = find_first_zero_bit(rsrc_array, max_rsrc);
if (rsrc_num >= max_rsrc) {
spin_unlock_irqrestore(&rf->rsrc_lock, flags);
ibdev_dbg(&rf->iwdev->ibdev,
"ERR: resource [%d] allocation failed\n",
rsrc_num);
return -EOVERFLOW;
}
}
__set_bit(rsrc_num, rsrc_array);
*next = rsrc_num + 1;
if (*next == max_rsrc)
*next = 0;
*req_rsrc_num = rsrc_num;
spin_unlock_irqrestore(&rf->rsrc_lock, flags);
return 0;
}
/**
* irdma_free_resource - free a resource
* @iwdev: device pointer
* @resource_array: resource array for the resource_num
* @resource_num: resource number to free
**/
static inline void irdma_free_rsrc(struct irdma_pci_f *rf,
unsigned long *rsrc_array, u32 rsrc_num)
{
unsigned long flags;
spin_lock_irqsave(&rf->rsrc_lock, flags);
__clear_bit(rsrc_num, rsrc_array);
spin_unlock_irqrestore(&rf->rsrc_lock, flags);
}
enum irdma_status_code irdma_ctrl_init_hw(struct irdma_pci_f *rf);
void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf);
enum irdma_status_code irdma_rt_init_hw(struct irdma_device *iwdev,
struct irdma_l2params *l2params);
void irdma_rt_deinit_hw(struct irdma_device *iwdev);
void irdma_qp_add_ref(struct ib_qp *ibqp);
void irdma_qp_rem_ref(struct ib_qp *ibqp);
void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp);
struct ib_qp *irdma_get_qp(struct ib_device *ibdev, int qpn);
void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask);
void irdma_manage_arp_cache(struct irdma_pci_f *rf, unsigned char *mac_addr,
u32 *ip_addr, bool ipv4, u32 action);
struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port);
void irdma_del_apbvt(struct irdma_device *iwdev,
struct irdma_apbvt_entry *entry);
struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
bool wait);
void irdma_free_cqp_request(struct irdma_cqp *cqp,
struct irdma_cqp_request *cqp_request);
void irdma_put_cqp_request(struct irdma_cqp *cqp,
struct irdma_cqp_request *cqp_request);
int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx);
int irdma_add_local_mac_entry(struct irdma_pci_f *rf, u8 *mac_addr, u16 idx);
void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx);
u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf);
void irdma_port_ibevent(struct irdma_device *iwdev);
void irdma_cm_disconn(struct irdma_qp *qp);
bool irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd,
u16 maj_err_code, u16 min_err_code);
enum irdma_status_code
irdma_handle_cqp_op(struct irdma_pci_f *rf,
struct irdma_cqp_request *cqp_request);
int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
struct ib_udata *udata);
int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);
void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf);
enum irdma_status_code irdma_hw_modify_qp(struct irdma_device *iwdev,
struct irdma_qp *iwqp,
struct irdma_modify_qp_info *info,
bool wait);
enum irdma_status_code irdma_qp_suspend_resume(struct irdma_sc_qp *qp,
bool suspend);
enum irdma_status_code
irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
enum irdma_quad_entry_type etype,
enum irdma_quad_hash_manage_type mtype, void *cmnode,
bool wait);
void irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf);
void irdma_free_sqbuf(struct irdma_sc_vsi *vsi, void *bufp);
void irdma_free_qp_rsrc(struct irdma_qp *iwqp);
enum irdma_status_code irdma_setup_cm_core(struct irdma_device *iwdev, u8 ver);
void irdma_cleanup_cm_core(struct irdma_cm_core *cm_core);
void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term,
u8 term_len);
int irdma_send_syn(struct irdma_cm_node *cm_node, u32 sendack);
int irdma_send_reset(struct irdma_cm_node *cm_node);
struct irdma_cm_node *irdma_find_node(struct irdma_cm_core *cm_core,
u16 rem_port, u32 *rem_addr, u16 loc_port,
u32 *loc_addr, u16 vlan_id);
enum irdma_status_code irdma_hw_flush_wqes(struct irdma_pci_f *rf,
struct irdma_sc_qp *qp,
struct irdma_qp_flush_info *info,
bool wait);
void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
struct irdma_gen_ae_info *info, bool wait);
void irdma_copy_ip_ntohl(u32 *dst, __be32 *src);
void irdma_copy_ip_htonl(__be32 *dst, u32 *src);
u16 irdma_get_vlan_ipv4(u32 *addr);
struct net_device *irdma_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac);
struct ib_mr *irdma_reg_phys_mr(struct ib_pd *ib_pd, u64 addr, u64 size,
int acc, u64 *iova_start);
int irdma_upload_qp_context(struct irdma_qp *iwqp, bool freeze, bool raw);
void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);
int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
bool wait,
void (*callback_fcn)(struct irdma_cqp_request *cqp_request),
void *cb_param);
void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request);
int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event,
void *ptr);
int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event,
void *ptr);
int irdma_net_event(struct notifier_block *notifier, unsigned long event,
void *ptr);
int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
void *ptr);
void irdma_add_ip(struct irdma_device *iwdev);
void cqp_compl_worker(struct work_struct *work);
#endif /* IRDMA_MAIN_H */

View File

@ -0,0 +1,86 @@
/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
/* Copyright (c) 2015 - 2021 Intel Corporation */
#ifndef IRDMA_OSDEP_H
#define IRDMA_OSDEP_H
#include <linux/pci.h>
#include <linux/bitfield.h>
#include <crypto/hash.h>
#include <rdma/ib_verbs.h>
#define STATS_TIMER_DELAY 60000
struct irdma_dma_info {
dma_addr_t *dmaaddrs;
};
struct irdma_dma_mem {
void *va;
dma_addr_t pa;
u32 size;
} __packed;
struct irdma_virt_mem {
void *va;
u32 size;
} __packed;
struct irdma_sc_vsi;
struct irdma_sc_dev;
struct irdma_sc_qp;
struct irdma_puda_buf;
struct irdma_puda_cmpl_info;
struct irdma_update_sds_info;
struct irdma_hmc_fcn_info;
struct irdma_manage_vf_pble_info;
struct irdma_hw;
struct irdma_pci_f;
struct ib_device *to_ibdev(struct irdma_sc_dev *dev);
u8 __iomem *irdma_get_hw_addr(void *dev);
void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
enum irdma_status_code irdma_vf_wait_vchnl_resp(struct irdma_sc_dev *dev);
bool irdma_vf_clear_to_send(struct irdma_sc_dev *dev);
void irdma_add_dev_ref(struct irdma_sc_dev *dev);
void irdma_put_dev_ref(struct irdma_sc_dev *dev);
enum irdma_status_code irdma_ieq_check_mpacrc(struct shash_desc *desc,
void *addr, u32 len, u32 val);
struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev,
struct irdma_puda_buf *buf);
void irdma_send_ieq_ack(struct irdma_sc_qp *qp);
void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len,
u32 seqnum);
void irdma_free_hash_desc(struct shash_desc *hash_desc);
enum irdma_status_code irdma_init_hash_desc(struct shash_desc **hash_desc);
enum irdma_status_code
irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
struct irdma_puda_buf *buf);
enum irdma_status_code irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
struct irdma_update_sds_info *info);
enum irdma_status_code
irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,
struct irdma_hmc_fcn_info *hmcfcninfo,
u16 *pmf_idx);
enum irdma_status_code
irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
enum irdma_status_code
irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
enum irdma_status_code irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
struct irdma_dma_mem *mem);
void *irdma_remove_cqp_head(struct irdma_sc_dev *dev);
void irdma_term_modify_qp(struct irdma_sc_qp *qp, u8 next_state, u8 term,
u8 term_len);
void irdma_terminate_done(struct irdma_sc_qp *qp, int timeout_occurred);
void irdma_terminate_start_timer(struct irdma_sc_qp *qp);
void irdma_terminate_del_timer(struct irdma_sc_qp *qp);
void irdma_hw_stats_start_timer(struct irdma_sc_vsi *vsi);
void irdma_hw_stats_stop_timer(struct irdma_sc_vsi *vsi);
void wr32(struct irdma_hw *hw, u32 reg, u32 val);
u32 rd32(struct irdma_hw *hw, u32 reg);
u64 rd64(struct irdma_hw *hw, u32 reg);
enum irdma_status_code irdma_map_vm_page_list(struct irdma_hw *hw, void *va,
dma_addr_t *pg_dma, u32 pg_cnt);
void irdma_unmap_vm_page_list(struct irdma_hw *hw, dma_addr_t *pg_dma, u32 pg_cnt);
#endif /* IRDMA_OSDEP_H */

View File

@ -0,0 +1,521 @@
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
#include "osdep.h"
#include "status.h"
#include "hmc.h"
#include "defs.h"
#include "type.h"
#include "protos.h"
#include "pble.h"
static enum irdma_status_code
add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc);
/**
* irdma_destroy_pble_prm - destroy prm during module unload
* @pble_rsrc: pble resources
*/
void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
{
struct irdma_chunk *chunk;
struct irdma_pble_prm *pinfo = &pble_rsrc->pinfo;
while (!list_empty(&pinfo->clist)) {
chunk = (struct irdma_chunk *) pinfo->clist.next;
list_del(&chunk->list);
if (chunk->type == PBLE_SD_PAGED)
irdma_pble_free_paged_mem(chunk);
if (chunk->bitmapbuf)
kfree(chunk->bitmapmem.va);
kfree(chunk->chunkmem.va);
}
}
/**
* irdma_hmc_init_pble - Initialize pble resources during module load
* @dev: irdma_sc_dev struct
* @pble_rsrc: pble resources
*/
enum irdma_status_code
irdma_hmc_init_pble(struct irdma_sc_dev *dev,
struct irdma_hmc_pble_rsrc *pble_rsrc)
{
struct irdma_hmc_info *hmc_info;
u32 fpm_idx = 0;
enum irdma_status_code status = 0;
hmc_info = dev->hmc_info;
pble_rsrc->dev = dev;
pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].base;
/* Start pble' on 4k boundary */
if (pble_rsrc->fpm_base_addr & 0xfff)
fpm_idx = (4096 - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3;
pble_rsrc->unallocated_pble =
hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt - fpm_idx;
pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3);
pble_rsrc->pinfo.pble_shift = PBLE_SHIFT;
mutex_init(&pble_rsrc->pble_mutex_lock);
spin_lock_init(&pble_rsrc->pinfo.prm_lock);
INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
if (add_pble_prm(pble_rsrc)) {
irdma_destroy_pble_prm(pble_rsrc);
status = IRDMA_ERR_NO_MEMORY;
}
return status;
}
/**
* get_sd_pd_idx - Returns sd index, pd index and rel_pd_idx from fpm address
* @pble_rsrc: structure containing fpm address
* @idx: where to return indexes
*/
static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct sd_pd_idx *idx)
{
idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE);
idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD);
}
/**
* add_sd_direct - add sd direct for pble
* @pble_rsrc: pble resource ptr
* @info: page info for sd
*/
static enum irdma_status_code
add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_add_page_info *info)
{
struct irdma_sc_dev *dev = pble_rsrc->dev;
enum irdma_status_code ret_code = 0;
struct sd_pd_idx *idx = &info->idx;
struct irdma_chunk *chunk = info->chunk;
struct irdma_hmc_info *hmc_info = info->hmc_info;
struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
u32 offset = 0;
if (!sd_entry->valid) {
ret_code = irdma_add_sd_table_entry(dev->hw, hmc_info,
info->idx.sd_idx,
IRDMA_SD_TYPE_DIRECT,
IRDMA_HMC_DIRECT_BP_SIZE);
if (ret_code)
return ret_code;
chunk->type = PBLE_SD_CONTIGOUS;
}
offset = idx->rel_pd_idx << HMC_PAGED_BP_SHIFT;
chunk->size = info->pages << HMC_PAGED_BP_SHIFT;
chunk->vaddr = (uintptr_t)sd_entry->u.bp.addr.va + offset;
chunk->fpm_addr = pble_rsrc->next_fpm_addr;
ibdev_dbg(to_ibdev(dev),
"PBLE: chunk_size[%lld] = 0x%llx vaddr=0x%llx fpm_addr = %llx\n",
chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
return 0;
}
/**
* fpm_to_idx - given fpm address, get pble index
* @pble_rsrc: pble resource management
* @addr: fpm address for index
*/
static u32 fpm_to_idx(struct irdma_hmc_pble_rsrc *pble_rsrc, u64 addr)
{
u64 idx;
idx = (addr - (pble_rsrc->fpm_base_addr)) >> 3;
return (u32)idx;
}
/**
* add_bp_pages - add backing pages for sd
* @pble_rsrc: pble resource management
* @info: page info for sd
*/
static enum irdma_status_code
add_bp_pages(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_add_page_info *info)
{
struct irdma_sc_dev *dev = pble_rsrc->dev;
u8 *addr;
struct irdma_dma_mem mem;
struct irdma_hmc_pd_entry *pd_entry;
struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
struct irdma_hmc_info *hmc_info = info->hmc_info;
struct irdma_chunk *chunk = info->chunk;
enum irdma_status_code status = 0;
u32 rel_pd_idx = info->idx.rel_pd_idx;
u32 pd_idx = info->idx.pd_idx;
u32 i;
if (irdma_pble_get_paged_mem(chunk, info->pages))
return IRDMA_ERR_NO_MEMORY;
status = irdma_add_sd_table_entry(dev->hw, hmc_info, info->idx.sd_idx,
IRDMA_SD_TYPE_PAGED,
IRDMA_HMC_DIRECT_BP_SIZE);
if (status)
goto error;
addr = (u8 *)(uintptr_t)chunk->vaddr;
for (i = 0; i < info->pages; i++) {
mem.pa = (u64)chunk->dmainfo.dmaaddrs[i];
mem.size = 4096;
mem.va = addr;
pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++];
if (!pd_entry->valid) {
status = irdma_add_pd_table_entry(dev, hmc_info,
pd_idx++, &mem);
if (status)
goto error;
addr += 4096;
}
}
chunk->fpm_addr = pble_rsrc->next_fpm_addr;
return 0;
error:
irdma_pble_free_paged_mem(chunk);
return status;
}
/**
* irdma_get_type - add a sd entry type for sd
* @dev: irdma_sc_dev struct
* @idx: index of sd
* @pages: pages in the sd
*/
static enum irdma_sd_entry_type irdma_get_type(struct irdma_sc_dev *dev,
struct sd_pd_idx *idx, u32 pages)
{
enum irdma_sd_entry_type sd_entry_type;
sd_entry_type = !idx->rel_pd_idx && pages == IRDMA_HMC_PD_CNT_IN_SD ?
IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
return sd_entry_type;
}
/**
* add_pble_prm - add a sd entry for pble resoure
* @pble_rsrc: pble resource management
*/
static enum irdma_status_code
add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
{
struct irdma_sc_dev *dev = pble_rsrc->dev;
struct irdma_hmc_sd_entry *sd_entry;
struct irdma_hmc_info *hmc_info;
struct irdma_chunk *chunk;
struct irdma_add_page_info info;
struct sd_pd_idx *idx = &info.idx;
enum irdma_status_code ret_code = 0;
enum irdma_sd_entry_type sd_entry_type;
u64 sd_reg_val = 0;
struct irdma_virt_mem chunkmem;
u32 pages;
if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
return IRDMA_ERR_NO_MEMORY;
if (pble_rsrc->next_fpm_addr & 0xfff)
return IRDMA_ERR_INVALID_PAGE_DESC_INDEX;
chunkmem.size = sizeof(*chunk);
chunkmem.va = kzalloc(chunkmem.size, GFP_KERNEL);
if (!chunkmem.va)
return IRDMA_ERR_NO_MEMORY;
chunk = chunkmem.va;
chunk->chunkmem = chunkmem;
hmc_info = dev->hmc_info;
chunk->dev = dev;
chunk->fpm_addr = pble_rsrc->next_fpm_addr;
get_sd_pd_idx(pble_rsrc, idx);
sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx];
pages = (idx->rel_pd_idx) ? (IRDMA_HMC_PD_CNT_IN_SD - idx->rel_pd_idx) :
IRDMA_HMC_PD_CNT_IN_SD;
pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
info.chunk = chunk;
info.hmc_info = hmc_info;
info.pages = pages;
info.sd_entry = sd_entry;
if (!sd_entry->valid)
sd_entry_type = irdma_get_type(dev, idx, pages);
else
sd_entry_type = sd_entry->entry_type;
ibdev_dbg(to_ibdev(dev),
"PBLE: pages = %d, unallocated_pble[%d] current_fpm_addr = %llx\n",
pages, pble_rsrc->unallocated_pble,
pble_rsrc->next_fpm_addr);
ibdev_dbg(to_ibdev(dev), "PBLE: sd_entry_type = %d\n", sd_entry_type);
if (sd_entry_type == IRDMA_SD_TYPE_DIRECT)
ret_code = add_sd_direct(pble_rsrc, &info);
if (ret_code)
sd_entry_type = IRDMA_SD_TYPE_PAGED;
else
pble_rsrc->stats_direct_sds++;
if (sd_entry_type == IRDMA_SD_TYPE_PAGED) {
ret_code = add_bp_pages(pble_rsrc, &info);
if (ret_code)
goto error;
else
pble_rsrc->stats_paged_sds++;
}
ret_code = irdma_prm_add_pble_mem(&pble_rsrc->pinfo, chunk);
if (ret_code)
goto error;
pble_rsrc->next_fpm_addr += chunk->size;
ibdev_dbg(to_ibdev(dev),
"PBLE: next_fpm_addr = %llx chunk_size[%llu] = 0x%llx\n",
pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
pble_rsrc->unallocated_pble -= (u32)(chunk->size >> 3);
list_add(&chunk->list, &pble_rsrc->pinfo.clist);
sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ?
sd_entry->u.pd_table.pd_page_addr.pa :
sd_entry->u.bp.addr.pa;
if (!sd_entry->valid) {
ret_code = irdma_hmc_sd_one(dev, hmc_info->hmc_fn_id, sd_reg_val,
idx->sd_idx, sd_entry->entry_type, true);
if (ret_code)
goto error;
}
sd_entry->valid = true;
return 0;
error:
if (chunk->bitmapbuf)
kfree(chunk->bitmapmem.va);
kfree(chunk->chunkmem.va);
return ret_code;
}
/**
* free_lvl2 - fee level 2 pble
* @pble_rsrc: pble resource management
* @palloc: level 2 pble allocation
*/
static void free_lvl2(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc)
{
u32 i;
struct irdma_pble_level2 *lvl2 = &palloc->level2;
struct irdma_pble_info *root = &lvl2->root;
struct irdma_pble_info *leaf = lvl2->leaf;
for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
if (leaf->addr)
irdma_prm_return_pbles(&pble_rsrc->pinfo,
&leaf->chunkinfo);
else
break;
}
if (root->addr)
irdma_prm_return_pbles(&pble_rsrc->pinfo, &root->chunkinfo);
kfree(lvl2->leafmem.va);
lvl2->leaf = NULL;
}
/**
* get_lvl2_pble - get level 2 pble resource
* @pble_rsrc: pble resource management
* @palloc: level 2 pble allocation
*/
static enum irdma_status_code
get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc)
{
u32 lf4k, lflast, total, i;
u32 pblcnt = PBLE_PER_PAGE;
u64 *addr;
struct irdma_pble_level2 *lvl2 = &palloc->level2;
struct irdma_pble_info *root = &lvl2->root;
struct irdma_pble_info *leaf;
enum irdma_status_code ret_code;
u64 fpm_addr;
/* number of full 512 (4K) leafs) */
lf4k = palloc->total_cnt >> 9;
lflast = palloc->total_cnt % PBLE_PER_PAGE;
total = (lflast == 0) ? lf4k : lf4k + 1;
lvl2->leaf_cnt = total;
lvl2->leafmem.size = (sizeof(*leaf) * total);
lvl2->leafmem.va = kzalloc(lvl2->leafmem.size, GFP_KERNEL);
if (!lvl2->leafmem.va)
return IRDMA_ERR_NO_MEMORY;
lvl2->leaf = lvl2->leafmem.va;
leaf = lvl2->leaf;
ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &root->chunkinfo,
total << 3, &root->addr, &fpm_addr);
if (ret_code) {
kfree(lvl2->leafmem.va);
lvl2->leaf = NULL;
return IRDMA_ERR_NO_MEMORY;
}
root->idx = fpm_to_idx(pble_rsrc, fpm_addr);
root->cnt = total;
addr = (u64 *)(uintptr_t)root->addr;
for (i = 0; i < total; i++, leaf++) {
pblcnt = (lflast && ((i + 1) == total)) ?
lflast : PBLE_PER_PAGE;
ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo,
&leaf->chunkinfo, pblcnt << 3,
&leaf->addr, &fpm_addr);
if (ret_code)
goto error;
leaf->idx = fpm_to_idx(pble_rsrc, fpm_addr);
leaf->cnt = pblcnt;
*addr = (u64)leaf->idx;
addr++;
}
palloc->level = PBLE_LEVEL_2;
pble_rsrc->stats_lvl2++;
return 0;
error:
free_lvl2(pble_rsrc, palloc);
return IRDMA_ERR_NO_MEMORY;
}
/**
* get_lvl1_pble - get level 1 pble resource
* @pble_rsrc: pble resource management
* @palloc: level 1 pble allocation
*/
static enum irdma_status_code
get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc)
{
enum irdma_status_code ret_code;
u64 fpm_addr, vaddr;
struct irdma_pble_info *lvl1 = &palloc->level1;
ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &lvl1->chunkinfo,
palloc->total_cnt << 3, &vaddr,
&fpm_addr);
if (ret_code)
return IRDMA_ERR_NO_MEMORY;
lvl1->addr = vaddr;
palloc->level = PBLE_LEVEL_1;
lvl1->idx = fpm_to_idx(pble_rsrc, fpm_addr);
lvl1->cnt = palloc->total_cnt;
pble_rsrc->stats_lvl1++;
return 0;
}
/**
* get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
* @pble_rsrc: pble resources
* @palloc: contains all inforamtion regarding pble (idx + pble addr)
* @level1_only: flag for a level 1 PBLE
*/
static enum irdma_status_code
get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc, bool level1_only)
{
enum irdma_status_code status = 0;
status = get_lvl1_pble(pble_rsrc, palloc);
if (!status || level1_only || palloc->total_cnt <= PBLE_PER_PAGE)
return status;
status = get_lvl2_pble(pble_rsrc, palloc);
return status;
}
/**
* irdma_get_pble - allocate pbles from the prm
* @pble_rsrc: pble resources
* @palloc: contains all inforamtion regarding pble (idx + pble addr)
* @pble_cnt: #of pbles requested
* @level1_only: true if only pble level 1 to acquire
*/
enum irdma_status_code irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc,
u32 pble_cnt, bool level1_only)
{
enum irdma_status_code status = 0;
int max_sds = 0;
int i;
palloc->total_cnt = pble_cnt;
palloc->level = PBLE_LEVEL_0;
mutex_lock(&pble_rsrc->pble_mutex_lock);
/*check first to see if we can get pble's without acquiring
* additional sd's
*/
status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only);
if (!status)
goto exit;
max_sds = (palloc->total_cnt >> 18) + 1;
for (i = 0; i < max_sds; i++) {
status = add_pble_prm(pble_rsrc);
if (status)
break;
status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only);
/* if level1_only, only go through it once */
if (!status || level1_only)
break;
}
exit:
if (!status) {
pble_rsrc->allocdpbles += pble_cnt;
pble_rsrc->stats_alloc_ok++;
} else {
pble_rsrc->stats_alloc_fail++;
}
mutex_unlock(&pble_rsrc->pble_mutex_lock);
return status;
}
/**
* irdma_free_pble - put pbles back into prm
* @pble_rsrc: pble resources
* @palloc: contains all information regarding pble resource being freed
*/
void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc)
{
pble_rsrc->freedpbles += palloc->total_cnt;
if (palloc->level == PBLE_LEVEL_2)
free_lvl2(pble_rsrc, palloc);
else
irdma_prm_return_pbles(&pble_rsrc->pinfo,
&palloc->level1.chunkinfo);
pble_rsrc->stats_alloc_freed++;
}

View File

@ -0,0 +1,136 @@
/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
/* Copyright (c) 2015 - 2019 Intel Corporation */
#ifndef IRDMA_PBLE_H
#define IRDMA_PBLE_H
#define PBLE_SHIFT 6
#define PBLE_PER_PAGE 512
#define HMC_PAGED_BP_SHIFT 12
#define PBLE_512_SHIFT 9
#define PBLE_INVALID_IDX 0xffffffff
enum irdma_pble_level {
PBLE_LEVEL_0 = 0,
PBLE_LEVEL_1 = 1,
PBLE_LEVEL_2 = 2,
};
enum irdma_alloc_type {
PBLE_NO_ALLOC = 0,
PBLE_SD_CONTIGOUS = 1,
PBLE_SD_PAGED = 2,
};
struct irdma_chunk;
struct irdma_pble_chunkinfo {
struct irdma_chunk *pchunk;
u64 bit_idx;
u64 bits_used;
};
struct irdma_pble_info {
u64 addr;
u32 idx;
u32 cnt;
struct irdma_pble_chunkinfo chunkinfo;
};
struct irdma_pble_level2 {
struct irdma_pble_info root;
struct irdma_pble_info *leaf;
struct irdma_virt_mem leafmem;
u32 leaf_cnt;
};
struct irdma_pble_alloc {
u32 total_cnt;
enum irdma_pble_level level;
union {
struct irdma_pble_info level1;
struct irdma_pble_level2 level2;
};
};
struct sd_pd_idx {
u32 sd_idx;
u32 pd_idx;
u32 rel_pd_idx;
};
struct irdma_add_page_info {
struct irdma_chunk *chunk;
struct irdma_hmc_sd_entry *sd_entry;
struct irdma_hmc_info *hmc_info;
struct sd_pd_idx idx;
u32 pages;
};
struct irdma_chunk {
struct list_head list;
struct irdma_dma_info dmainfo;
void *bitmapbuf;
u32 sizeofbitmap;
u64 size;
u64 vaddr;
u64 fpm_addr;
u32 pg_cnt;
enum irdma_alloc_type type;
struct irdma_sc_dev *dev;
struct irdma_virt_mem bitmapmem;
struct irdma_virt_mem chunkmem;
};
struct irdma_pble_prm {
struct list_head clist;
spinlock_t prm_lock; /* protect prm bitmap */
u64 total_pble_alloc;
u64 free_pble_cnt;
u8 pble_shift;
};
struct irdma_hmc_pble_rsrc {
u32 unallocated_pble;
struct mutex pble_mutex_lock; /* protect PBLE resource */
struct irdma_sc_dev *dev;
u64 fpm_base_addr;
u64 next_fpm_addr;
struct irdma_pble_prm pinfo;
u64 allocdpbles;
u64 freedpbles;
u32 stats_direct_sds;
u32 stats_paged_sds;
u64 stats_alloc_ok;
u64 stats_alloc_fail;
u64 stats_alloc_freed;
u64 stats_lvl1;
u64 stats_lvl2;
};
void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc);
enum irdma_status_code
irdma_hmc_init_pble(struct irdma_sc_dev *dev,
struct irdma_hmc_pble_rsrc *pble_rsrc);
void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc);
enum irdma_status_code irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc,
u32 pble_cnt, bool level1_only);
enum irdma_status_code irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
struct irdma_chunk *pchunk);
enum irdma_status_code
irdma_prm_get_pbles(struct irdma_pble_prm *pprm,
struct irdma_pble_chunkinfo *chunkinfo, u32 mem_size,
u64 *vaddr, u64 *fpm_addr);
void irdma_prm_return_pbles(struct irdma_pble_prm *pprm,
struct irdma_pble_chunkinfo *chunkinfo);
void irdma_pble_acquire_lock(struct irdma_hmc_pble_rsrc *pble_rsrc,
unsigned long *flags);
void irdma_pble_release_lock(struct irdma_hmc_pble_rsrc *pble_rsrc,
unsigned long *flags);
void irdma_pble_free_paged_mem(struct irdma_chunk *chunk);
enum irdma_status_code irdma_pble_get_paged_mem(struct irdma_chunk *chunk,
u32 pg_cnt);
void irdma_prm_rem_bitmapmem(struct irdma_hw *hw, struct irdma_chunk *chunk);
#endif /* IRDMA_PBLE_H */

View File

@ -0,0 +1,116 @@
/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
/* Copyright (c) 2016 - 2021 Intel Corporation */
#ifndef IRDMA_PROTOS_H
#define IRDMA_PROTOS_H
#define PAUSE_TIMER_VAL 0xffff
#define REFRESH_THRESHOLD 0x7fff
#define HIGH_THRESHOLD 0x800
#define LOW_THRESHOLD 0x200
#define ALL_TC2PFC 0xff
#define CQP_COMPL_WAIT_TIME_MS 10
#define CQP_TIMEOUT_THRESHOLD 500
/* init operations */
enum irdma_status_code irdma_sc_dev_init(enum irdma_vers ver,
struct irdma_sc_dev *dev,
struct irdma_device_init_info *info);
void irdma_sc_rt_init(struct irdma_sc_dev *dev);
void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
__le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch);
enum irdma_status_code
irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
struct irdma_fast_reg_stag_info *info, bool post_sq);
/* HMC/FPM functions */
enum irdma_status_code irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev,
u8 hmc_fn_id);
/* stats misc */
enum irdma_status_code
irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
struct irdma_vsi_pestat *pestat, bool wait);
void irdma_cqp_gather_stats_gen1(struct irdma_sc_dev *dev,
struct irdma_vsi_pestat *pestat);
void irdma_hw_stats_read_all(struct irdma_vsi_pestat *stats,
struct irdma_dev_hw_stats *stats_values,
u64 *hw_stats_regs_32, u64 *hw_stats_regs_64,
u8 hw_rev);
enum irdma_status_code
irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
struct irdma_ws_node_info *node_info);
enum irdma_status_code irdma_cqp_up_map_cmd(struct irdma_sc_dev *dev, u8 cmd,
struct irdma_up_info *map_info);
enum irdma_status_code irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev,
struct irdma_sc_ceq *sc_ceq, u8 op);
enum irdma_status_code irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev,
struct irdma_sc_aeq *sc_aeq, u8 op);
enum irdma_status_code
irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
struct irdma_stats_inst_info *stats_info);
u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev);
void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id);
void irdma_update_stats(struct irdma_dev_hw_stats *hw_stats,
struct irdma_gather_stats *gather_stats,
struct irdma_gather_stats *last_gather_stats);
/* vsi functions */
enum irdma_status_code irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
struct irdma_vsi_stats_info *info);
void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi);
void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
struct irdma_vsi_init_info *info);
enum irdma_status_code irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq,
struct irdma_sc_cq *cq);
void irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq);
/* misc L2 param change functions */
void irdma_change_l2params(struct irdma_sc_vsi *vsi,
struct irdma_l2params *l2params);
void irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 suspend);
enum irdma_status_code irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp,
u8 cmd);
void irdma_qp_add_qos(struct irdma_sc_qp *qp);
void irdma_qp_rem_qos(struct irdma_sc_qp *qp);
struct irdma_sc_qp *irdma_get_qp_from_list(struct list_head *head,
struct irdma_sc_qp *qp);
void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi);
u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev);
void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id);
/* terminate functions*/
void irdma_terminate_send_fin(struct irdma_sc_qp *qp);
void irdma_terminate_connection(struct irdma_sc_qp *qp,
struct irdma_aeqe_info *info);
void irdma_terminate_received(struct irdma_sc_qp *qp,
struct irdma_aeqe_info *info);
/* dynamic memory allocation */
/* misc */
u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type);
void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp);
enum irdma_status_code
irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
u8 hmc_fn_id, bool post_sq,
bool poll_registers);
enum irdma_status_code irdma_cfg_fpm_val(struct irdma_sc_dev *dev,
u32 qp_count);
enum irdma_status_code irdma_get_rdma_features(struct irdma_sc_dev *dev);
void free_sd_mem(struct irdma_sc_dev *dev);
enum irdma_status_code irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
struct cqp_cmds_info *pcmdinfo);
enum irdma_status_code irdma_process_bh(struct irdma_sc_dev *dev);
enum irdma_status_code irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
struct irdma_update_sds_info *info);
enum irdma_status_code
irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
enum irdma_status_code
irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
enum irdma_status_code irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
struct irdma_dma_mem *mem);
enum irdma_status_code
irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,
struct irdma_hmc_fcn_info *hmcfcninfo,
u16 *pmf_idx);
void irdma_add_dev_ref(struct irdma_sc_dev *dev);
void irdma_put_dev_ref(struct irdma_sc_dev *dev);
void *irdma_remove_cqp_head(struct irdma_sc_dev *dev);
#endif /* IRDMA_PROTOS_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,194 @@
/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
/* Copyright (c) 2015 - 2020 Intel Corporation */
#ifndef IRDMA_PUDA_H
#define IRDMA_PUDA_H
#define IRDMA_IEQ_MPA_FRAMING 6
#define IRDMA_TCP_OFFSET 40
#define IRDMA_IPV4_PAD 20
#define IRDMA_MRK_BLK_SZ 512
enum puda_rsrc_type {
IRDMA_PUDA_RSRC_TYPE_ILQ = 1,
IRDMA_PUDA_RSRC_TYPE_IEQ,
IRDMA_PUDA_RSRC_TYPE_MAX, /* Must be last entry */
};
enum puda_rsrc_complete {
PUDA_CQ_CREATED = 1,
PUDA_QP_CREATED,
PUDA_TX_COMPLETE,
PUDA_RX_COMPLETE,
PUDA_HASH_CRC_COMPLETE,
};
struct irdma_sc_dev;
struct irdma_sc_qp;
struct irdma_sc_cq;
struct irdma_puda_cmpl_info {
struct irdma_qp_uk *qp;
u8 q_type;
u8 l3proto;
u8 l4proto;
u16 vlan;
u32 payload_len;
u32 compl_error; /* No_err=0, else major and minor err code */
u32 qp_id;
u32 wqe_idx;
bool ipv4:1;
bool smac_valid:1;
bool vlan_valid:1;
u8 smac[ETH_ALEN];
};
struct irdma_puda_send_info {
u64 paddr; /* Physical address */
u32 len;
u32 ah_id;
u8 tcplen;
u8 maclen;
bool ipv4:1;
bool do_lpb:1;
void *scratch;
};
struct irdma_puda_buf {
struct list_head list; /* MUST be first entry */
struct irdma_dma_mem mem; /* DMA memory for the buffer */
struct irdma_puda_buf *next; /* for alloclist in rsrc struct */
struct irdma_virt_mem buf_mem; /* Buffer memory for this buffer */
void *scratch;
u8 *iph;
u8 *tcph;
u8 *data;
u16 datalen;
u16 vlan_id;
u8 tcphlen; /* tcp length in bytes */
u8 maclen; /* mac length in bytes */
u32 totallen; /* machlen+iphlen+tcphlen+datalen */
refcount_t refcount;
u8 hdrlen;
bool ipv4:1;
bool vlan_valid:1;
bool do_lpb:1; /* Loopback buffer */
bool smac_valid:1;
u32 seqnum;
u32 ah_id;
u8 smac[ETH_ALEN];
struct irdma_sc_vsi *vsi;
};
struct irdma_puda_rsrc_info {
void (*receive)(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *buf);
void (*xmit_complete)(struct irdma_sc_vsi *vsi, void *sqwrid);
enum puda_rsrc_type type; /* ILQ or IEQ */
u32 count;
u32 pd_id;
u32 cq_id;
u32 qp_id;
u32 sq_size;
u32 rq_size;
u32 tx_buf_cnt; /* total bufs allocated will be rq_size + tx_buf_cnt */
u16 buf_size;
u8 stats_idx;
bool stats_idx_valid:1;
int abi_ver;
};
struct irdma_puda_rsrc {
struct irdma_sc_cq cq;
struct irdma_sc_qp qp;
struct irdma_sc_pd sc_pd;
struct irdma_sc_dev *dev;
struct irdma_sc_vsi *vsi;
struct irdma_dma_mem cqmem;
struct irdma_dma_mem qpmem;
struct irdma_virt_mem ilq_mem;
enum puda_rsrc_complete cmpl;
enum puda_rsrc_type type;
u16 buf_size; /*buf must be max datalen + tcpip hdr + mac */
u32 cq_id;
u32 qp_id;
u32 sq_size;
u32 rq_size;
u32 cq_size;
struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
u64 *rq_wrid_array;
u32 compl_rxwqe_idx;
u32 rx_wqe_idx;
u32 rxq_invalid_cnt;
u32 tx_wqe_avail_cnt;
struct shash_desc *hash_desc;
struct list_head txpend;
struct list_head bufpool; /* free buffers pool list for recv and xmit */
u32 alloc_buf_count;
u32 avail_buf_count; /* snapshot of currently available buffers */
spinlock_t bufpool_lock;
struct irdma_puda_buf *alloclist;
void (*receive)(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *buf);
void (*xmit_complete)(struct irdma_sc_vsi *vsi, void *sqwrid);
/* puda stats */
u64 stats_buf_alloc_fail;
u64 stats_pkt_rcvd;
u64 stats_pkt_sent;
u64 stats_rcvd_pkt_err;
u64 stats_sent_pkt_q;
u64 stats_bad_qp_id;
/* IEQ stats */
u64 fpdu_processed;
u64 bad_seq_num;
u64 crc_err;
u64 pmode_count;
u64 partials_handled;
u8 stats_idx;
bool check_crc:1;
bool stats_idx_valid:1;
};
struct irdma_puda_buf *irdma_puda_get_bufpool(struct irdma_puda_rsrc *rsrc);
void irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc,
struct irdma_puda_buf *buf);
void irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc,
struct irdma_puda_buf *buf);
enum irdma_status_code irdma_puda_send(struct irdma_sc_qp *qp,
struct irdma_puda_send_info *info);
enum irdma_status_code
irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
struct irdma_puda_rsrc_info *info);
void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,
bool reset);
enum irdma_status_code irdma_puda_poll_cmpl(struct irdma_sc_dev *dev,
struct irdma_sc_cq *cq,
u32 *compl_err);
struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev,
struct irdma_puda_buf *buf);
enum irdma_status_code
irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
struct irdma_puda_buf *buf);
enum irdma_status_code irdma_ieq_check_mpacrc(struct shash_desc *desc,
void *addr, u32 len, u32 val);
enum irdma_status_code irdma_init_hash_desc(struct shash_desc **desc);
void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
void irdma_free_hash_desc(struct shash_desc *desc);
void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len,
u32 seqnum);
enum irdma_status_code irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev,
struct irdma_sc_qp *qp);
enum irdma_status_code irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev,
struct irdma_sc_cq *cq);
enum irdma_status_code irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
void irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq);
void irdma_puda_ieq_get_ah_info(struct irdma_sc_qp *qp,
struct irdma_ah_info *ah_info);
enum irdma_status_code irdma_puda_create_ah(struct irdma_sc_dev *dev,
struct irdma_ah_info *ah_info,
bool wait, enum puda_rsrc_type type,
void *cb_param,
struct irdma_sc_ah **ah);
void irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah);
void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp,
struct irdma_puda_rsrc *ieq);
void irdma_ieq_cleanup_qp(struct irdma_puda_rsrc *ieq, struct irdma_sc_qp *qp);
#endif /*IRDMA_PROTOS_H */

View File

@ -0,0 +1,71 @@
/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
/* Copyright (c) 2015 - 2020 Intel Corporation */
#ifndef IRDMA_STATUS_H
#define IRDMA_STATUS_H
/* Error Codes */
enum irdma_status_code {
IRDMA_SUCCESS = 0,
IRDMA_ERR_NVM = -1,
IRDMA_ERR_NVM_CHECKSUM = -2,
IRDMA_ERR_CFG = -4,
IRDMA_ERR_PARAM = -5,
IRDMA_ERR_DEVICE_NOT_SUPPORTED = -6,
IRDMA_ERR_RESET_FAILED = -7,
IRDMA_ERR_SWFW_SYNC = -8,
IRDMA_ERR_NO_MEMORY = -9,
IRDMA_ERR_BAD_PTR = -10,
IRDMA_ERR_INVALID_PD_ID = -11,
IRDMA_ERR_INVALID_QP_ID = -12,
IRDMA_ERR_INVALID_CQ_ID = -13,
IRDMA_ERR_INVALID_CEQ_ID = -14,
IRDMA_ERR_INVALID_AEQ_ID = -15,
IRDMA_ERR_INVALID_SIZE = -16,
IRDMA_ERR_INVALID_ARP_INDEX = -17,
IRDMA_ERR_INVALID_FPM_FUNC_ID = -18,
IRDMA_ERR_QP_INVALID_MSG_SIZE = -19,
IRDMA_ERR_QP_TOOMANY_WRS_POSTED = -20,
IRDMA_ERR_INVALID_FRAG_COUNT = -21,
IRDMA_ERR_Q_EMPTY = -22,
IRDMA_ERR_INVALID_ALIGNMENT = -23,
IRDMA_ERR_FLUSHED_Q = -24,
IRDMA_ERR_INVALID_PUSH_PAGE_INDEX = -25,
IRDMA_ERR_INVALID_INLINE_DATA_SIZE = -26,
IRDMA_ERR_TIMEOUT = -27,
IRDMA_ERR_OPCODE_MISMATCH = -28,
IRDMA_ERR_CQP_COMPL_ERROR = -29,
IRDMA_ERR_INVALID_VF_ID = -30,
IRDMA_ERR_INVALID_HMCFN_ID = -31,
IRDMA_ERR_BACKING_PAGE_ERROR = -32,
IRDMA_ERR_NO_PBLCHUNKS_AVAILABLE = -33,
IRDMA_ERR_INVALID_PBLE_INDEX = -34,
IRDMA_ERR_INVALID_SD_INDEX = -35,
IRDMA_ERR_INVALID_PAGE_DESC_INDEX = -36,
IRDMA_ERR_INVALID_SD_TYPE = -37,
IRDMA_ERR_MEMCPY_FAILED = -38,
IRDMA_ERR_INVALID_HMC_OBJ_INDEX = -39,
IRDMA_ERR_INVALID_HMC_OBJ_COUNT = -40,
IRDMA_ERR_BUF_TOO_SHORT = -43,
IRDMA_ERR_BAD_IWARP_CQE = -44,
IRDMA_ERR_NVM_BLANK_MODE = -45,
IRDMA_ERR_NOT_IMPL = -46,
IRDMA_ERR_PE_DOORBELL_NOT_ENA = -47,
IRDMA_ERR_NOT_READY = -48,
IRDMA_NOT_SUPPORTED = -49,
IRDMA_ERR_FIRMWARE_API_VER = -50,
IRDMA_ERR_RING_FULL = -51,
IRDMA_ERR_MPA_CRC = -61,
IRDMA_ERR_NO_TXBUFS = -62,
IRDMA_ERR_SEQ_NUM = -63,
IRDMA_ERR_list_empty = -64,
IRDMA_ERR_INVALID_MAC_ADDR = -65,
IRDMA_ERR_BAD_STAG = -66,
IRDMA_ERR_CQ_COMPL_ERROR = -67,
IRDMA_ERR_Q_DESTROYED = -68,
IRDMA_ERR_INVALID_FEAT_CNT = -69,
IRDMA_ERR_REG_CQ_FULL = -70,
IRDMA_ERR_VF_MSG_ERROR = -71,
IRDMA_ERR_NO_INTR = -72,
IRDMA_ERR_REG_QSET = -73,
};
#endif /* IRDMA_STATUS_H */

View File

@ -0,0 +1,112 @@
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
/* Copyright (c) 2019 Intel Corporation */
#define CREATE_TRACE_POINTS
#include "trace.h"
const char *print_ip_addr(struct trace_seq *p, u32 *addr, u16 port, bool ipv4)
{
const char *ret = trace_seq_buffer_ptr(p);
if (ipv4) {
__be32 myaddr = htonl(*addr);
trace_seq_printf(p, "%pI4:%d", &myaddr, htons(port));
} else {
trace_seq_printf(p, "%pI6:%d", addr, htons(port));
}
trace_seq_putc(p, 0);
return ret;
}
const char *parse_iw_event_type(enum iw_cm_event_type iw_type)
{
switch (iw_type) {
case IW_CM_EVENT_CONNECT_REQUEST:
return "IwRequest";
case IW_CM_EVENT_CONNECT_REPLY:
return "IwReply";
case IW_CM_EVENT_ESTABLISHED:
return "IwEstablished";
case IW_CM_EVENT_DISCONNECT:
return "IwDisconnect";
case IW_CM_EVENT_CLOSE:
return "IwClose";
}
return "Unknown";
}
const char *parse_cm_event_type(enum irdma_cm_event_type cm_type)
{
switch (cm_type) {
case IRDMA_CM_EVENT_ESTABLISHED:
return "CmEstablished";
case IRDMA_CM_EVENT_MPA_REQ:
return "CmMPA_REQ";
case IRDMA_CM_EVENT_MPA_CONNECT:
return "CmMPA_CONNECT";
case IRDMA_CM_EVENT_MPA_ACCEPT:
return "CmMPA_ACCEPT";
case IRDMA_CM_EVENT_MPA_REJECT:
return "CmMPA_REJECT";
case IRDMA_CM_EVENT_MPA_ESTABLISHED:
return "CmMPA_ESTABLISHED";
case IRDMA_CM_EVENT_CONNECTED:
return "CmConnected";
case IRDMA_CM_EVENT_RESET:
return "CmReset";
case IRDMA_CM_EVENT_ABORTED:
return "CmAborted";
case IRDMA_CM_EVENT_UNKNOWN:
return "none";
}
return "Unknown";
}
const char *parse_cm_state(enum irdma_cm_node_state state)
{
switch (state) {
case IRDMA_CM_STATE_UNKNOWN:
return "UNKNOWN";
case IRDMA_CM_STATE_INITED:
return "INITED";
case IRDMA_CM_STATE_LISTENING:
return "LISTENING";
case IRDMA_CM_STATE_SYN_RCVD:
return "SYN_RCVD";
case IRDMA_CM_STATE_SYN_SENT:
return "SYN_SENT";
case IRDMA_CM_STATE_ONE_SIDE_ESTABLISHED:
return "ONE_SIDE_ESTABLISHED";
case IRDMA_CM_STATE_ESTABLISHED:
return "ESTABLISHED";
case IRDMA_CM_STATE_ACCEPTING:
return "ACCEPTING";
case IRDMA_CM_STATE_MPAREQ_SENT:
return "MPAREQ_SENT";
case IRDMA_CM_STATE_MPAREQ_RCVD:
return "MPAREQ_RCVD";
case IRDMA_CM_STATE_MPAREJ_RCVD:
return "MPAREJ_RECVD";
case IRDMA_CM_STATE_OFFLOADED:
return "OFFLOADED";
case IRDMA_CM_STATE_FIN_WAIT1:
return "FIN_WAIT1";
case IRDMA_CM_STATE_FIN_WAIT2:
return "FIN_WAIT2";
case IRDMA_CM_STATE_CLOSE_WAIT:
return "CLOSE_WAIT";
case IRDMA_CM_STATE_TIME_WAIT:
return "TIME_WAIT";
case IRDMA_CM_STATE_LAST_ACK:
return "LAST_ACK";
case IRDMA_CM_STATE_CLOSING:
return "CLOSING";
case IRDMA_CM_STATE_LISTENER_DESTROYED:
return "LISTENER_DESTROYED";
case IRDMA_CM_STATE_CLOSED:
return "CLOSED";
}
return ("Bad state");
}

View File

@ -0,0 +1,3 @@
/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
/* Copyright (c) 2019 Intel Corporation */
#include "trace_cm.h"

View File

@ -0,0 +1,458 @@
/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
/* Copyright (c) 2019 - 2021 Intel Corporation */
#if !defined(__TRACE_CM_H) || defined(TRACE_HEADER_MULTI_READ)
#define __TRACE_CM_H
#include <linux/tracepoint.h>
#include <linux/trace_seq.h>
#include "main.h"
const char *print_ip_addr(struct trace_seq *p, u32 *addr, u16 port, bool ivp4);
const char *parse_iw_event_type(enum iw_cm_event_type iw_type);
const char *parse_cm_event_type(enum irdma_cm_event_type cm_type);
const char *parse_cm_state(enum irdma_cm_node_state);
#define __print_ip_addr(addr, port, ipv4) print_ip_addr(p, addr, port, ipv4)
#undef TRACE_SYSTEM
#define TRACE_SYSTEM irdma_cm
TRACE_EVENT(irdma_create_listen,
TP_PROTO(struct irdma_device *iwdev, struct irdma_cm_info *cm_info),
TP_ARGS(iwdev, cm_info),
TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
__dynamic_array(u32, laddr, 4)
__field(u16, lport)
__field(bool, ipv4)
),
TP_fast_assign(__entry->iwdev = iwdev;
__entry->lport = cm_info->loc_port;
__entry->ipv4 = cm_info->ipv4;
memcpy(__get_dynamic_array(laddr),
cm_info->loc_addr, 4);
),
TP_printk("iwdev=%p loc: %s",
__entry->iwdev,
__print_ip_addr(__get_dynamic_array(laddr),
__entry->lport, __entry->ipv4)
)
);
TRACE_EVENT(irdma_dec_refcnt_listen,
TP_PROTO(struct irdma_cm_listener *listener, void *caller),
TP_ARGS(listener, caller),
TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
__field(u32, refcnt)
__dynamic_array(u32, laddr, 4)
__field(u16, lport)
__field(bool, ipv4)
__field(void *, caller)
),
TP_fast_assign(__entry->iwdev = listener->iwdev;
__entry->lport = listener->loc_port;
__entry->ipv4 = listener->ipv4;
memcpy(__get_dynamic_array(laddr),
listener->loc_addr, 4);
),
TP_printk("iwdev=%p caller=%pS loc: %s",
__entry->iwdev,
__entry->caller,
__print_ip_addr(__get_dynamic_array(laddr),
__entry->lport, __entry->ipv4)
)
);
DECLARE_EVENT_CLASS(listener_template,
TP_PROTO(struct irdma_cm_listener *listener),
TP_ARGS(listener),
TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
__field(u16, lport)
__field(u16, vlan_id)
__field(bool, ipv4)
__field(enum irdma_cm_listener_state,
state)
__dynamic_array(u32, laddr, 4)
),
TP_fast_assign(__entry->iwdev = listener->iwdev;
__entry->lport = listener->loc_port;
__entry->vlan_id = listener->vlan_id;
__entry->ipv4 = listener->ipv4;
__entry->state = listener->listener_state;
memcpy(__get_dynamic_array(laddr),
listener->loc_addr, 4);
),
TP_printk("iwdev=%p vlan=%d loc: %s",
__entry->iwdev,
__entry->vlan_id,
__print_ip_addr(__get_dynamic_array(laddr),
__entry->lport, __entry->ipv4)
)
);
DEFINE_EVENT(listener_template, irdma_find_listener,
TP_PROTO(struct irdma_cm_listener *listener),
TP_ARGS(listener));
DEFINE_EVENT(listener_template, irdma_del_multiple_qhash,
TP_PROTO(struct irdma_cm_listener *listener),
TP_ARGS(listener));
TRACE_EVENT(irdma_negotiate_mpa_v2,
TP_PROTO(struct irdma_cm_node *cm_node),
TP_ARGS(cm_node),
TP_STRUCT__entry(__field(struct irdma_cm_node *, cm_node)
__field(u16, ord_size)
__field(u16, ird_size)
),
TP_fast_assign(__entry->cm_node = cm_node;
__entry->ord_size = cm_node->ord_size;
__entry->ird_size = cm_node->ird_size;
),
TP_printk("MPVA2 Negotiated cm_node=%p ORD:[%d], IRD:[%d]",
__entry->cm_node,
__entry->ord_size,
__entry->ird_size
)
);
DECLARE_EVENT_CLASS(tos_template,
TP_PROTO(struct irdma_device *iwdev, u8 tos, u8 user_pri),
TP_ARGS(iwdev, tos, user_pri),
TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
__field(u8, tos)
__field(u8, user_pri)
),
TP_fast_assign(__entry->iwdev = iwdev;
__entry->tos = tos;
__entry->user_pri = user_pri;
),
TP_printk("iwdev=%p TOS:[%d] UP:[%d]",
__entry->iwdev,
__entry->tos,
__entry->user_pri
)
);
DEFINE_EVENT(tos_template, irdma_listener_tos,
TP_PROTO(struct irdma_device *iwdev, u8 tos, u8 user_pri),
TP_ARGS(iwdev, tos, user_pri));
DEFINE_EVENT(tos_template, irdma_dcb_tos,
TP_PROTO(struct irdma_device *iwdev, u8 tos, u8 user_pri),
TP_ARGS(iwdev, tos, user_pri));
DECLARE_EVENT_CLASS(qhash_template,
TP_PROTO(struct irdma_device *iwdev,
struct irdma_cm_listener *listener,
char *dev_addr),
TP_ARGS(iwdev, listener, dev_addr),
TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
__field(u16, lport)
__field(u16, vlan_id)
__field(bool, ipv4)
__dynamic_array(u32, laddr, 4)
__dynamic_array(u32, mac, ETH_ALEN)
),
TP_fast_assign(__entry->iwdev = iwdev;
__entry->lport = listener->loc_port;
__entry->vlan_id = listener->vlan_id;
__entry->ipv4 = listener->ipv4;
memcpy(__get_dynamic_array(laddr),
listener->loc_addr, 4);
ether_addr_copy(__get_dynamic_array(mac),
dev_addr);
),
TP_printk("iwdev=%p vlan=%d MAC=%6phC loc: %s",
__entry->iwdev,
__entry->vlan_id,
__get_dynamic_array(mac),
__print_ip_addr(__get_dynamic_array(laddr),
__entry->lport, __entry->ipv4)
)
);
DEFINE_EVENT(qhash_template, irdma_add_mqh_6,
TP_PROTO(struct irdma_device *iwdev,
struct irdma_cm_listener *listener, char *dev_addr),
TP_ARGS(iwdev, listener, dev_addr));
DEFINE_EVENT(qhash_template, irdma_add_mqh_4,
TP_PROTO(struct irdma_device *iwdev,
struct irdma_cm_listener *listener, char *dev_addr),
TP_ARGS(iwdev, listener, dev_addr));
TRACE_EVENT(irdma_addr_resolve,
TP_PROTO(struct irdma_device *iwdev, char *dev_addr),
TP_ARGS(iwdev, dev_addr),
TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
__dynamic_array(u8, mac, ETH_ALEN)
),
TP_fast_assign(__entry->iwdev = iwdev;
ether_addr_copy(__get_dynamic_array(mac), dev_addr);
),
TP_printk("iwdev=%p MAC=%6phC", __entry->iwdev,
__get_dynamic_array(mac)
)
);
TRACE_EVENT(irdma_send_cm_event,
TP_PROTO(struct irdma_cm_node *cm_node, struct iw_cm_id *cm_id,
enum iw_cm_event_type type, int status, void *caller),
TP_ARGS(cm_node, cm_id, type, status, caller),
TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
__field(struct irdma_cm_node *, cm_node)
__field(struct iw_cm_id *, cm_id)
__field(u32, refcount)
__field(u16, lport)
__field(u16, rport)
__field(enum irdma_cm_node_state, state)
__field(bool, ipv4)
__field(u16, vlan_id)
__field(int, accel)
__field(enum iw_cm_event_type, type)
__field(int, status)
__field(void *, caller)
__dynamic_array(u32, laddr, 4)
__dynamic_array(u32, raddr, 4)
),
TP_fast_assign(__entry->iwdev = cm_node->iwdev;
__entry->cm_node = cm_node;
__entry->cm_id = cm_id;
__entry->refcount = refcount_read(&cm_node->refcnt);
__entry->state = cm_node->state;
__entry->lport = cm_node->loc_port;
__entry->rport = cm_node->rem_port;
__entry->ipv4 = cm_node->ipv4;
__entry->vlan_id = cm_node->vlan_id;
__entry->accel = cm_node->accelerated;
__entry->type = type;
__entry->status = status;
__entry->caller = caller;
memcpy(__get_dynamic_array(laddr),
cm_node->loc_addr, 4);
memcpy(__get_dynamic_array(raddr),
cm_node->rem_addr, 4);
),
TP_printk("iwdev=%p caller=%pS cm_id=%p node=%p refcnt=%d vlan_id=%d accel=%d state=%s event_type=%s status=%d loc: %s rem: %s",
__entry->iwdev,
__entry->caller,
__entry->cm_id,
__entry->cm_node,
__entry->refcount,
__entry->vlan_id,
__entry->accel,
parse_cm_state(__entry->state),
parse_iw_event_type(__entry->type),
__entry->status,
__print_ip_addr(__get_dynamic_array(laddr),
__entry->lport, __entry->ipv4),
__print_ip_addr(__get_dynamic_array(raddr),
__entry->rport, __entry->ipv4)
)
);
TRACE_EVENT(irdma_send_cm_event_no_node,
TP_PROTO(struct iw_cm_id *cm_id, enum iw_cm_event_type type,
int status, void *caller),
TP_ARGS(cm_id, type, status, caller),
TP_STRUCT__entry(__field(struct iw_cm_id *, cm_id)
__field(enum iw_cm_event_type, type)
__field(int, status)
__field(void *, caller)
),
TP_fast_assign(__entry->cm_id = cm_id;
__entry->type = type;
__entry->status = status;
__entry->caller = caller;
),
TP_printk("cm_id=%p caller=%pS event_type=%s status=%d",
__entry->cm_id,
__entry->caller,
parse_iw_event_type(__entry->type),
__entry->status
)
);
DECLARE_EVENT_CLASS(cm_node_template,
TP_PROTO(struct irdma_cm_node *cm_node,
enum irdma_cm_event_type type, void *caller),
TP_ARGS(cm_node, type, caller),
TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
__field(struct irdma_cm_node *, cm_node)
__field(u32, refcount)
__field(u16, lport)
__field(u16, rport)
__field(enum irdma_cm_node_state, state)
__field(bool, ipv4)
__field(u16, vlan_id)
__field(int, accel)
__field(enum irdma_cm_event_type, type)
__field(void *, caller)
__dynamic_array(u32, laddr, 4)
__dynamic_array(u32, raddr, 4)
),
TP_fast_assign(__entry->iwdev = cm_node->iwdev;
__entry->cm_node = cm_node;
__entry->refcount = refcount_read(&cm_node->refcnt);
__entry->state = cm_node->state;
__entry->lport = cm_node->loc_port;
__entry->rport = cm_node->rem_port;
__entry->ipv4 = cm_node->ipv4;
__entry->vlan_id = cm_node->vlan_id;
__entry->accel = cm_node->accelerated;
__entry->type = type;
__entry->caller = caller;
memcpy(__get_dynamic_array(laddr),
cm_node->loc_addr, 4);
memcpy(__get_dynamic_array(raddr),
cm_node->rem_addr, 4);
),
TP_printk("iwdev=%p caller=%pS node=%p refcnt=%d vlan_id=%d accel=%d state=%s event_type=%s loc: %s rem: %s",
__entry->iwdev,
__entry->caller,
__entry->cm_node,
__entry->refcount,
__entry->vlan_id,
__entry->accel,
parse_cm_state(__entry->state),
parse_cm_event_type(__entry->type),
__print_ip_addr(__get_dynamic_array(laddr),
__entry->lport, __entry->ipv4),
__print_ip_addr(__get_dynamic_array(raddr),
__entry->rport, __entry->ipv4)
)
);
DEFINE_EVENT(cm_node_template, irdma_create_event,
TP_PROTO(struct irdma_cm_node *cm_node,
enum irdma_cm_event_type type, void *caller),
TP_ARGS(cm_node, type, caller));
DEFINE_EVENT(cm_node_template, irdma_accept,
TP_PROTO(struct irdma_cm_node *cm_node,
enum irdma_cm_event_type type, void *caller),
TP_ARGS(cm_node, type, caller));
DEFINE_EVENT(cm_node_template, irdma_connect,
TP_PROTO(struct irdma_cm_node *cm_node,
enum irdma_cm_event_type type, void *caller),
TP_ARGS(cm_node, type, caller));
DEFINE_EVENT(cm_node_template, irdma_reject,
TP_PROTO(struct irdma_cm_node *cm_node,
enum irdma_cm_event_type type, void *caller),
TP_ARGS(cm_node, type, caller));
DEFINE_EVENT(cm_node_template, irdma_find_node,
TP_PROTO(struct irdma_cm_node *cm_node,
enum irdma_cm_event_type type, void *caller),
TP_ARGS(cm_node, type, caller));
DEFINE_EVENT(cm_node_template, irdma_send_reset,
TP_PROTO(struct irdma_cm_node *cm_node,
enum irdma_cm_event_type type, void *caller),
TP_ARGS(cm_node, type, caller));
DEFINE_EVENT(cm_node_template, irdma_rem_ref_cm_node,
TP_PROTO(struct irdma_cm_node *cm_node,
enum irdma_cm_event_type type, void *caller),
TP_ARGS(cm_node, type, caller));
DEFINE_EVENT(cm_node_template, irdma_cm_event_handler,
TP_PROTO(struct irdma_cm_node *cm_node,
enum irdma_cm_event_type type, void *caller),
TP_ARGS(cm_node, type, caller));
TRACE_EVENT(open_err_template,
TP_PROTO(struct irdma_cm_node *cm_node, bool reset, void *caller),
TP_ARGS(cm_node, reset, caller),
TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
__field(struct irdma_cm_node *, cm_node)
__field(enum irdma_cm_node_state, state)
__field(bool, reset)
__field(void *, caller)
),
TP_fast_assign(__entry->iwdev = cm_node->iwdev;
__entry->cm_node = cm_node;
__entry->state = cm_node->state;
__entry->reset = reset;
__entry->caller = caller;
),
TP_printk("iwdev=%p caller=%pS node%p reset=%d state=%s",
__entry->iwdev,
__entry->caller,
__entry->cm_node,
__entry->reset,
parse_cm_state(__entry->state)
)
);
DEFINE_EVENT(open_err_template, irdma_active_open_err,
TP_PROTO(struct irdma_cm_node *cm_node, bool reset, void *caller),
TP_ARGS(cm_node, reset, caller));
DEFINE_EVENT(open_err_template, irdma_passive_open_err,
TP_PROTO(struct irdma_cm_node *cm_node, bool reset, void *caller),
TP_ARGS(cm_node, reset, caller));
DECLARE_EVENT_CLASS(cm_node_ah_template,
TP_PROTO(struct irdma_cm_node *cm_node),
TP_ARGS(cm_node),
TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
__field(struct irdma_cm_node *, cm_node)
__field(struct irdma_sc_ah *, ah)
__field(u32, refcount)
__field(u16, lport)
__field(u16, rport)
__field(enum irdma_cm_node_state, state)
__field(bool, ipv4)
__field(u16, vlan_id)
__field(int, accel)
__dynamic_array(u32, laddr, 4)
__dynamic_array(u32, raddr, 4)
),
TP_fast_assign(__entry->iwdev = cm_node->iwdev;
__entry->cm_node = cm_node;
__entry->ah = cm_node->ah;
__entry->refcount = refcount_read(&cm_node->refcnt);
__entry->lport = cm_node->loc_port;
__entry->rport = cm_node->rem_port;
__entry->state = cm_node->state;
__entry->ipv4 = cm_node->ipv4;
__entry->vlan_id = cm_node->vlan_id;
__entry->accel = cm_node->accelerated;
memcpy(__get_dynamic_array(laddr),
cm_node->loc_addr, 4);
memcpy(__get_dynamic_array(raddr),
cm_node->rem_addr, 4);
),
TP_printk("iwdev=%p node=%p ah=%p refcnt=%d vlan_id=%d accel=%d state=%s loc: %s rem: %s",
__entry->iwdev,
__entry->cm_node,
__entry->ah,
__entry->refcount,
__entry->vlan_id,
__entry->accel,
parse_cm_state(__entry->state),
__print_ip_addr(__get_dynamic_array(laddr),
__entry->lport, __entry->ipv4),
__print_ip_addr(__get_dynamic_array(raddr),
__entry->rport, __entry->ipv4)
)
);
DEFINE_EVENT(cm_node_ah_template, irdma_cm_free_ah,
TP_PROTO(struct irdma_cm_node *cm_node),
TP_ARGS(cm_node));
DEFINE_EVENT(cm_node_ah_template, irdma_create_ah,
TP_PROTO(struct irdma_cm_node *cm_node),
TP_ARGS(cm_node));
#endif /* __TRACE_CM_H */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_cm
#include <trace/define_trace.h>

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,271 @@
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
/* Copyright (c) 2016 - 2021 Intel Corporation */
#include "osdep.h"
#include "status.h"
#include "hmc.h"
#include "defs.h"
#include "type.h"
#include "protos.h"
#include "uda.h"
#include "uda_d.h"
/**
* irdma_sc_access_ah() - Create, modify or delete AH
* @cqp: struct for cqp hw
* @info: ah information
* @op: Operation
* @scratch: u64 saved to be used during cqp completion
*/
enum irdma_status_code irdma_sc_access_ah(struct irdma_sc_cqp *cqp,
struct irdma_ah_info *info,
u32 op, u64 scratch)
{
__le64 *wqe;
u64 qw1, qw2;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return IRDMA_ERR_RING_FULL;
set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr) << 16);
qw1 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXLO, info->pd_idx) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_TC, info->tc_tos) |
FIELD_PREP(IRDMA_UDAQPC_VLANTAG, info->vlan_tag);
qw2 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ARPINDEX, info->dst_arpindex) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_FLOWLABEL, info->flow_label) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_HOPLIMIT, info->hop_ttl) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXHI, info->pd_idx >> 16);
if (!info->ipv4_valid) {
set_64bit_val(wqe, 40,
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
set_64bit_val(wqe, 32,
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
set_64bit_val(wqe, 56,
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->src_ip_addr[0]) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->src_ip_addr[1]));
set_64bit_val(wqe, 48,
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->src_ip_addr[2]) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[3]));
} else {
set_64bit_val(wqe, 32,
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
set_64bit_val(wqe, 48,
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[0]));
}
set_64bit_val(wqe, 8, qw1);
set_64bit_val(wqe, 16, qw2);
dma_wmb(); /* need write block before writing WQE header */
set_64bit_val(
wqe, 24,
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_WQEVALID, cqp->polarity) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_OPCODE, op) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK, info->do_lpbk) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_IPV4VALID, info->ipv4_valid) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_AVIDX, info->ah_idx) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG, info->insert_vlan_tag));
print_hex_dump_debug("WQE: MANAGE_AH WQE", DUMP_PREFIX_OFFSET, 16, 8,
wqe, IRDMA_CQP_WQE_SIZE * 8, false);
irdma_sc_cqp_post_sq(cqp);
return 0;
}
/**
* irdma_create_mg_ctx() - create a mcg context
* @info: multicast group context info
*/
static enum irdma_status_code
irdma_create_mg_ctx(struct irdma_mcast_grp_info *info)
{
struct irdma_mcast_grp_ctx_entry_info *entry_info = NULL;
u8 idx = 0; /* index in the array */
u8 ctx_idx = 0; /* index in the MG context */
memset(info->dma_mem_mc.va, 0, IRDMA_MAX_MGS_PER_CTX * sizeof(u64));
for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
entry_info = &info->mg_ctx_info[idx];
if (entry_info->valid_entry) {
set_64bit_val((__le64 *)info->dma_mem_mc.va,
ctx_idx * sizeof(u64),
FIELD_PREP(IRDMA_UDA_MGCTX_DESTPORT, entry_info->dest_port) |
FIELD_PREP(IRDMA_UDA_MGCTX_VALIDENT, entry_info->valid_entry) |
FIELD_PREP(IRDMA_UDA_MGCTX_QPID, entry_info->qp_id));
ctx_idx++;
}
}
return 0;
}
/**
* irdma_access_mcast_grp() - Access mcast group based on op
* @cqp: Control QP
* @info: multicast group context info
* @op: operation to perform
* @scratch: u64 saved to be used during cqp completion
*/
enum irdma_status_code irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
struct irdma_mcast_grp_info *info,
u32 op, u64 scratch)
{
__le64 *wqe;
enum irdma_status_code ret_code = 0;
if (info->mg_id >= IRDMA_UDA_MAX_FSI_MGS) {
ibdev_dbg(to_ibdev(cqp->dev), "WQE: mg_id out of range\n");
return IRDMA_ERR_PARAM;
}
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe) {
ibdev_dbg(to_ibdev(cqp->dev), "WQE: ring full\n");
return IRDMA_ERR_RING_FULL;
}
ret_code = irdma_create_mg_ctx(info);
if (ret_code)
return ret_code;
set_64bit_val(wqe, 32, info->dma_mem_mc.pa);
set_64bit_val(wqe, 16,
FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANID, info->vlan_id) |
FIELD_PREP(IRDMA_UDA_CQPSQ_QS_HANDLE, info->qs_handle));
set_64bit_val(wqe, 0, ether_addr_to_u64(info->dest_mac_addr));
set_64bit_val(wqe, 8,
FIELD_PREP(IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID, info->hmc_fcn_id));
if (!info->ipv4_valid) {
set_64bit_val(wqe, 56,
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
set_64bit_val(wqe, 48,
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
} else {
set_64bit_val(wqe, 48,
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
}
dma_wmb(); /* need write memory block before writing the WQE header. */
set_64bit_val(wqe, 24,
FIELD_PREP(IRDMA_UDA_CQPSQ_MG_WQEVALID, cqp->polarity) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MG_OPCODE, op) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MG_MGIDX, info->mg_id) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANVALID, info->vlan_valid) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MG_IPV4VALID, info->ipv4_valid));
print_hex_dump_debug("WQE: MANAGE_MCG WQE", DUMP_PREFIX_OFFSET, 16, 8,
wqe, IRDMA_CQP_WQE_SIZE * 8, false);
print_hex_dump_debug("WQE: MCG_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16,
8, info->dma_mem_mc.va,
IRDMA_MAX_MGS_PER_CTX * 8, false);
irdma_sc_cqp_post_sq(cqp);
return 0;
}
/**
* irdma_compare_mgs - Compares two multicast group structures
* @entry1: Multcast group info
* @entry2: Multcast group info in context
*/
static bool irdma_compare_mgs(struct irdma_mcast_grp_ctx_entry_info *entry1,
struct irdma_mcast_grp_ctx_entry_info *entry2)
{
if (entry1->dest_port == entry2->dest_port &&
entry1->qp_id == entry2->qp_id)
return true;
return false;
}
/**
* irdma_sc_add_mcast_grp - Allocates mcast group entry in ctx
* @ctx: Multcast group context
* @mg: Multcast group info
*/
enum irdma_status_code irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
struct irdma_mcast_grp_ctx_entry_info *mg)
{
u32 idx;
bool free_entry_found = false;
u32 free_entry_idx = 0;
/* find either an identical or a free entry for a multicast group */
for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
if (ctx->mg_ctx_info[idx].valid_entry) {
if (irdma_compare_mgs(&ctx->mg_ctx_info[idx], mg)) {
ctx->mg_ctx_info[idx].use_cnt++;
return 0;
}
continue;
}
if (!free_entry_found) {
free_entry_found = true;
free_entry_idx = idx;
}
}
if (free_entry_found) {
ctx->mg_ctx_info[free_entry_idx] = *mg;
ctx->mg_ctx_info[free_entry_idx].valid_entry = true;
ctx->mg_ctx_info[free_entry_idx].use_cnt = 1;
ctx->no_of_mgs++;
return 0;
}
return IRDMA_ERR_NO_MEMORY;
}
/**
* irdma_sc_del_mcast_grp - Delete mcast group
* @ctx: Multcast group context
* @mg: Multcast group info
*
* Finds and removes a specific mulicast group from context, all
* parameters must match to remove a multicast group.
*/
enum irdma_status_code irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx,
struct irdma_mcast_grp_ctx_entry_info *mg)
{
u32 idx;
/* find an entry in multicast group context */
for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
if (!ctx->mg_ctx_info[idx].valid_entry)
continue;
if (irdma_compare_mgs(mg, &ctx->mg_ctx_info[idx])) {
ctx->mg_ctx_info[idx].use_cnt--;
if (!ctx->mg_ctx_info[idx].use_cnt) {
ctx->mg_ctx_info[idx].valid_entry = false;
ctx->no_of_mgs--;
/* Remove gap if element was not the last */
if (idx != ctx->no_of_mgs &&
ctx->no_of_mgs > 0) {
memcpy(&ctx->mg_ctx_info[idx],
&ctx->mg_ctx_info[ctx->no_of_mgs - 1],
sizeof(ctx->mg_ctx_info[idx]));
ctx->mg_ctx_info[ctx->no_of_mgs - 1].valid_entry = false;
}
}
return 0;
}
}
return IRDMA_ERR_PARAM;
}

View File

@ -0,0 +1,89 @@
/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
/* Copyright (c) 2016 - 2021 Intel Corporation */
#ifndef IRDMA_UDA_H
#define IRDMA_UDA_H
#define IRDMA_UDA_MAX_FSI_MGS 4096
#define IRDMA_UDA_MAX_PFS 16
#define IRDMA_UDA_MAX_VFS 128
struct irdma_sc_cqp;
struct irdma_ah_info {
struct irdma_sc_vsi *vsi;
u32 pd_idx;
u32 dst_arpindex;
u32 dest_ip_addr[4];
u32 src_ip_addr[4];
u32 flow_label;
u32 ah_idx;
u16 vlan_tag;
u8 insert_vlan_tag;
u8 tc_tos;
u8 hop_ttl;
u8 mac_addr[ETH_ALEN];
bool ah_valid:1;
bool ipv4_valid:1;
bool do_lpbk:1;
};
struct irdma_sc_ah {
struct irdma_sc_dev *dev;
struct irdma_ah_info ah_info;
};
enum irdma_status_code irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
struct irdma_mcast_grp_ctx_entry_info *mg);
enum irdma_status_code irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx,
struct irdma_mcast_grp_ctx_entry_info *mg);
enum irdma_status_code irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info,
u32 op, u64 scratch);
enum irdma_status_code irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
struct irdma_mcast_grp_info *info,
u32 op, u64 scratch);
static inline void irdma_sc_init_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah)
{
ah->dev = dev;
}
static inline enum irdma_status_code irdma_sc_create_ah(struct irdma_sc_cqp *cqp,
struct irdma_ah_info *info,
u64 scratch)
{
return irdma_sc_access_ah(cqp, info, IRDMA_CQP_OP_CREATE_ADDR_HANDLE,
scratch);
}
static inline enum irdma_status_code irdma_sc_destroy_ah(struct irdma_sc_cqp *cqp,
struct irdma_ah_info *info,
u64 scratch)
{
return irdma_sc_access_ah(cqp, info, IRDMA_CQP_OP_DESTROY_ADDR_HANDLE,
scratch);
}
static inline enum irdma_status_code irdma_sc_create_mcast_grp(struct irdma_sc_cqp *cqp,
struct irdma_mcast_grp_info *info,
u64 scratch)
{
return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_CREATE_MCAST_GRP,
scratch);
}
static inline enum irdma_status_code irdma_sc_modify_mcast_grp(struct irdma_sc_cqp *cqp,
struct irdma_mcast_grp_info *info,
u64 scratch)
{
return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_MODIFY_MCAST_GRP,
scratch);
}
static inline enum irdma_status_code irdma_sc_destroy_mcast_grp(struct irdma_sc_cqp *cqp,
struct irdma_mcast_grp_info *info,
u64 scratch)
{
return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_DESTROY_MCAST_GRP,
scratch);
}
#endif /* IRDMA_UDA_H */

View File

@ -0,0 +1,128 @@
/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
/* Copyright (c) 2016 - 2021 Intel Corporation */
#ifndef IRDMA_UDA_D_H
#define IRDMA_UDA_D_H
/* L4 packet type */
#define IRDMA_E_UDA_SQ_L4T_UNKNOWN 0
#define IRDMA_E_UDA_SQ_L4T_TCP 1
#define IRDMA_E_UDA_SQ_L4T_SCTP 2
#define IRDMA_E_UDA_SQ_L4T_UDP 3
/* Inner IP header type */
#define IRDMA_E_UDA_SQ_IIPT_UNKNOWN 0
#define IRDMA_E_UDA_SQ_IIPT_IPV6 1
#define IRDMA_E_UDA_SQ_IIPT_IPV4_NO_CSUM 2
#define IRDMA_E_UDA_SQ_IIPT_IPV4_CSUM 3
#define IRDMA_UDA_QPSQ_PUSHWQE BIT_ULL(56)
#define IRDMA_UDA_QPSQ_INLINEDATAFLAG BIT_ULL(57)
#define IRDMA_UDA_QPSQ_INLINEDATALEN GENMASK_ULL(55, 48)
#define IRDMA_UDA_QPSQ_ADDFRAGCNT GENMASK_ULL(41, 38)
#define IRDMA_UDA_QPSQ_IPFRAGFLAGS GENMASK_ULL(43, 42)
#define IRDMA_UDA_QPSQ_NOCHECKSUM BIT_ULL(45)
#define IRDMA_UDA_QPSQ_AHIDXVALID BIT_ULL(46)
#define IRDMA_UDA_QPSQ_LOCAL_FENCE BIT_ULL(61)
#define IRDMA_UDA_QPSQ_AHIDX GENMASK_ULL(16, 0)
#define IRDMA_UDA_QPSQ_PROTOCOL GENMASK_ULL(23, 16)
#define IRDMA_UDA_QPSQ_EXTHDRLEN GENMASK_ULL(40, 32)
#define IRDMA_UDA_QPSQ_MULTICAST BIT_ULL(63)
#define IRDMA_UDA_QPSQ_MACLEN GENMASK_ULL(62, 56)
#define IRDMA_UDA_QPSQ_MACLEN_LINE 2
#define IRDMA_UDA_QPSQ_IPLEN GENMASK_ULL(54, 48)
#define IRDMA_UDA_QPSQ_IPLEN_LINE 2
#define IRDMA_UDA_QPSQ_L4T GENMASK_ULL(31, 30)
#define IRDMA_UDA_QPSQ_L4T_LINE 2
#define IRDMA_UDA_QPSQ_IIPT GENMASK_ULL(29, 28)
#define IRDMA_UDA_QPSQ_IIPT_LINE 2
#define IRDMA_UDA_QPSQ_DO_LPB_LINE 3
#define IRDMA_UDA_QPSQ_FWD_PROG_CONFIRM BIT_ULL(45)
#define IRDMA_UDA_QPSQ_FWD_PROG_CONFIRM_LINE 3
#define IRDMA_UDA_QPSQ_IMMDATA GENMASK_ULL(63, 0)
/* Byte Offset 0 */
#define IRDMA_UDAQPC_IPV4_M BIT_ULL(3)
#define IRDMA_UDAQPC_INSERTVLANTAG BIT_ULL(5)
#define IRDMA_UDAQPC_ISQP1 BIT_ULL(6)
#define IRDMA_UDAQPC_ECNENABLE BIT_ULL(14)
#define IRDMA_UDAQPC_PDINDEXHI GENMASK_ULL(21, 20)
#define IRDMA_UDAQPC_DCTCPENABLE BIT_ULL(25)
#define IRDMA_UDAQPC_RCVTPHEN IRDMAQPC_RCVTPHEN
#define IRDMA_UDAQPC_XMITTPHEN IRDMAQPC_XMITTPHEN
#define IRDMA_UDAQPC_RQTPHEN IRDMAQPC_RQTPHEN
#define IRDMA_UDAQPC_SQTPHEN IRDMAQPC_SQTPHEN
#define IRDMA_UDAQPC_PPIDX IRDMAQPC_PPIDX
#define IRDMA_UDAQPC_PMENA IRDMAQPC_PMENA
#define IRDMA_UDAQPC_INSERTTAG2 BIT_ULL(11)
#define IRDMA_UDAQPC_INSERTTAG3 BIT_ULL(14)
#define IRDMA_UDAQPC_RQSIZE IRDMAQPC_RQSIZE
#define IRDMA_UDAQPC_SQSIZE IRDMAQPC_SQSIZE
#define IRDMA_UDAQPC_TXCQNUM IRDMAQPC_TXCQNUM
#define IRDMA_UDAQPC_RXCQNUM IRDMAQPC_RXCQNUM
#define IRDMA_UDAQPC_QPCOMPCTX IRDMAQPC_QPCOMPCTX
#define IRDMA_UDAQPC_SQTPHVAL IRDMAQPC_SQTPHVAL
#define IRDMA_UDAQPC_RQTPHVAL IRDMAQPC_RQTPHVAL
#define IRDMA_UDAQPC_QSHANDLE IRDMAQPC_QSHANDLE
#define IRDMA_UDAQPC_RQHDRRINGBUFSIZE GENMASK_ULL(49, 48)
#define IRDMA_UDAQPC_SQHDRRINGBUFSIZE GENMASK_ULL(33, 32)
#define IRDMA_UDAQPC_PRIVILEGEENABLE BIT_ULL(25)
#define IRDMA_UDAQPC_USE_STATISTICS_INSTANCE BIT_ULL(26)
#define IRDMA_UDAQPC_STATISTICS_INSTANCE_INDEX GENMASK_ULL(6, 0)
#define IRDMA_UDAQPC_PRIVHDRGENENABLE BIT_ULL(0)
#define IRDMA_UDAQPC_RQHDRSPLITENABLE BIT_ULL(3)
#define IRDMA_UDAQPC_RQHDRRINGBUFENABLE BIT_ULL(2)
#define IRDMA_UDAQPC_SQHDRRINGBUFENABLE BIT_ULL(1)
#define IRDMA_UDAQPC_IPID GENMASK_ULL(47, 32)
#define IRDMA_UDAQPC_SNDMSS GENMASK_ULL(29, 16)
#define IRDMA_UDAQPC_VLANTAG GENMASK_ULL(15, 0)
#define IRDMA_UDA_CQPSQ_MAV_PDINDEXHI GENMASK_ULL(21, 20)
#define IRDMA_UDA_CQPSQ_MAV_PDINDEXLO GENMASK_ULL(63, 48)
#define IRDMA_UDA_CQPSQ_MAV_SRCMACADDRINDEX GENMASK_ULL(29, 24)
#define IRDMA_UDA_CQPSQ_MAV_ARPINDEX GENMASK_ULL(63, 48)
#define IRDMA_UDA_CQPSQ_MAV_TC GENMASK_ULL(39, 32)
#define IRDMA_UDA_CQPSQ_MAV_HOPLIMIT GENMASK_ULL(39, 32)
#define IRDMA_UDA_CQPSQ_MAV_FLOWLABEL GENMASK_ULL(19, 0)
#define IRDMA_UDA_CQPSQ_MAV_ADDR0 GENMASK_ULL(63, 32)
#define IRDMA_UDA_CQPSQ_MAV_ADDR1 GENMASK_ULL(31, 0)
#define IRDMA_UDA_CQPSQ_MAV_ADDR2 GENMASK_ULL(63, 32)
#define IRDMA_UDA_CQPSQ_MAV_ADDR3 GENMASK_ULL(31, 0)
#define IRDMA_UDA_CQPSQ_MAV_WQEVALID BIT_ULL(63)
#define IRDMA_UDA_CQPSQ_MAV_OPCODE GENMASK_ULL(37, 32)
#define IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK BIT_ULL(62)
#define IRDMA_UDA_CQPSQ_MAV_IPV4VALID BIT_ULL(59)
#define IRDMA_UDA_CQPSQ_MAV_AVIDX GENMASK_ULL(16, 0)
#define IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG BIT_ULL(60)
#define IRDMA_UDA_MGCTX_VFFLAG BIT_ULL(29)
#define IRDMA_UDA_MGCTX_DESTPORT GENMASK_ULL(47, 32)
#define IRDMA_UDA_MGCTX_VFID GENMASK_ULL(28, 22)
#define IRDMA_UDA_MGCTX_VALIDENT BIT_ULL(31)
#define IRDMA_UDA_MGCTX_PFID GENMASK_ULL(21, 18)
#define IRDMA_UDA_MGCTX_FLAGIGNOREDPORT BIT_ULL(30)
#define IRDMA_UDA_MGCTX_QPID GENMASK_ULL(17, 0)
#define IRDMA_UDA_CQPSQ_MG_WQEVALID BIT_ULL(63)
#define IRDMA_UDA_CQPSQ_MG_OPCODE GENMASK_ULL(37, 32)
#define IRDMA_UDA_CQPSQ_MG_MGIDX GENMASK_ULL(12, 0)
#define IRDMA_UDA_CQPSQ_MG_IPV4VALID BIT_ULL(60)
#define IRDMA_UDA_CQPSQ_MG_VLANVALID BIT_ULL(59)
#define IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID GENMASK_ULL(5, 0)
#define IRDMA_UDA_CQPSQ_MG_VLANID GENMASK_ULL(43, 32)
#define IRDMA_UDA_CQPSQ_QS_HANDLE GENMASK_ULL(9, 0)
#define IRDMA_UDA_CQPSQ_QHASH_QPN GENMASK_ULL(49, 32)
#define IRDMA_UDA_CQPSQ_QHASH_ BIT_ULL(0)
#define IRDMA_UDA_CQPSQ_QHASH_SRC_PORT GENMASK_ULL(31, 16)
#define IRDMA_UDA_CQPSQ_QHASH_DEST_PORT GENMASK_ULL(15, 0)
#define IRDMA_UDA_CQPSQ_QHASH_ADDR0 GENMASK_ULL(63, 32)
#define IRDMA_UDA_CQPSQ_QHASH_ADDR1 GENMASK_ULL(31, 0)
#define IRDMA_UDA_CQPSQ_QHASH_ADDR2 GENMASK_ULL(63, 32)
#define IRDMA_UDA_CQPSQ_QHASH_ADDR3 GENMASK_ULL(31, 0)
#define IRDMA_UDA_CQPSQ_QHASH_WQEVALID BIT_ULL(63)
#define IRDMA_UDA_CQPSQ_QHASH_OPCODE GENMASK_ULL(37, 32)
#define IRDMA_UDA_CQPSQ_QHASH_MANAGE GENMASK_ULL(62, 61)
#define IRDMA_UDA_CQPSQ_QHASH_IPV4VALID GENMASK_ULL(60, 60)
#define IRDMA_UDA_CQPSQ_QHASH_LANFWD GENMASK_ULL(59, 59)
#define IRDMA_UDA_CQPSQ_QHASH_ENTRYTYPE GENMASK_ULL(44, 42)
#endif /* IRDMA_UDA_D_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,437 @@
/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
/* Copyright (c) 2015 - 2020 Intel Corporation */
#ifndef IRDMA_USER_H
#define IRDMA_USER_H
#define irdma_handle void *
#define irdma_adapter_handle irdma_handle
#define irdma_qp_handle irdma_handle
#define irdma_cq_handle irdma_handle
#define irdma_pd_id irdma_handle
#define irdma_stag_handle irdma_handle
#define irdma_stag_index u32
#define irdma_stag u32
#define irdma_stag_key u8
#define irdma_tagged_offset u64
#define irdma_access_privileges u32
#define irdma_physical_fragment u64
#define irdma_address_list u64 *
#define irdma_sgl struct irdma_sge *
#define IRDMA_MAX_MR_SIZE 0x200000000000ULL
#define IRDMA_ACCESS_FLAGS_LOCALREAD 0x01
#define IRDMA_ACCESS_FLAGS_LOCALWRITE 0x02
#define IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY 0x04
#define IRDMA_ACCESS_FLAGS_REMOTEREAD 0x05
#define IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY 0x08
#define IRDMA_ACCESS_FLAGS_REMOTEWRITE 0x0a
#define IRDMA_ACCESS_FLAGS_BIND_WINDOW 0x10
#define IRDMA_ACCESS_FLAGS_ZERO_BASED 0x20
#define IRDMA_ACCESS_FLAGS_ALL 0x3f
#define IRDMA_OP_TYPE_RDMA_WRITE 0x00
#define IRDMA_OP_TYPE_RDMA_READ 0x01
#define IRDMA_OP_TYPE_SEND 0x03
#define IRDMA_OP_TYPE_SEND_INV 0x04
#define IRDMA_OP_TYPE_SEND_SOL 0x05
#define IRDMA_OP_TYPE_SEND_SOL_INV 0x06
#define IRDMA_OP_TYPE_RDMA_WRITE_SOL 0x0d
#define IRDMA_OP_TYPE_BIND_MW 0x08
#define IRDMA_OP_TYPE_FAST_REG_NSMR 0x09
#define IRDMA_OP_TYPE_INV_STAG 0x0a
#define IRDMA_OP_TYPE_RDMA_READ_INV_STAG 0x0b
#define IRDMA_OP_TYPE_NOP 0x0c
#define IRDMA_OP_TYPE_REC 0x3e
#define IRDMA_OP_TYPE_REC_IMM 0x3f
#define IRDMA_FLUSH_MAJOR_ERR 1
enum irdma_device_caps_const {
IRDMA_WQE_SIZE = 4,
IRDMA_CQP_WQE_SIZE = 8,
IRDMA_CQE_SIZE = 4,
IRDMA_EXTENDED_CQE_SIZE = 8,
IRDMA_AEQE_SIZE = 2,
IRDMA_CEQE_SIZE = 1,
IRDMA_CQP_CTX_SIZE = 8,
IRDMA_SHADOW_AREA_SIZE = 8,
IRDMA_QUERY_FPM_BUF_SIZE = 176,
IRDMA_COMMIT_FPM_BUF_SIZE = 176,
IRDMA_GATHER_STATS_BUF_SIZE = 1024,
IRDMA_MIN_IW_QP_ID = 0,
IRDMA_MAX_IW_QP_ID = 262143,
IRDMA_MIN_CEQID = 0,
IRDMA_MAX_CEQID = 1023,
IRDMA_CEQ_MAX_COUNT = IRDMA_MAX_CEQID + 1,
IRDMA_MIN_CQID = 0,
IRDMA_MAX_CQID = 524287,
IRDMA_MIN_AEQ_ENTRIES = 1,
IRDMA_MAX_AEQ_ENTRIES = 524287,
IRDMA_MIN_CEQ_ENTRIES = 1,
IRDMA_MAX_CEQ_ENTRIES = 262143,
IRDMA_MIN_CQ_SIZE = 1,
IRDMA_MAX_CQ_SIZE = 1048575,
IRDMA_DB_ID_ZERO = 0,
IRDMA_MAX_WQ_FRAGMENT_COUNT = 13,
IRDMA_MAX_SGE_RD = 13,
IRDMA_MAX_OUTBOUND_MSG_SIZE = 2147483647,
IRDMA_MAX_INBOUND_MSG_SIZE = 2147483647,
IRDMA_MAX_PUSH_PAGE_COUNT = 1024,
IRDMA_MAX_PE_ENA_VF_COUNT = 32,
IRDMA_MAX_VF_FPM_ID = 47,
IRDMA_MAX_SQ_PAYLOAD_SIZE = 2145386496,
IRDMA_MAX_INLINE_DATA_SIZE = 101,
IRDMA_MAX_WQ_ENTRIES = 32768,
IRDMA_Q2_BUF_SIZE = 256,
IRDMA_QP_CTX_SIZE = 256,
IRDMA_MAX_PDS = 262144,
};
enum irdma_addressing_type {
IRDMA_ADDR_TYPE_ZERO_BASED = 0,
IRDMA_ADDR_TYPE_VA_BASED = 1,
};
enum irdma_flush_opcode {
FLUSH_INVALID = 0,
FLUSH_GENERAL_ERR,
FLUSH_PROT_ERR,
FLUSH_REM_ACCESS_ERR,
FLUSH_LOC_QP_OP_ERR,
FLUSH_REM_OP_ERR,
FLUSH_LOC_LEN_ERR,
FLUSH_FATAL_ERR,
};
enum irdma_cmpl_status {
IRDMA_COMPL_STATUS_SUCCESS = 0,
IRDMA_COMPL_STATUS_FLUSHED,
IRDMA_COMPL_STATUS_INVALID_WQE,
IRDMA_COMPL_STATUS_QP_CATASTROPHIC,
IRDMA_COMPL_STATUS_REMOTE_TERMINATION,
IRDMA_COMPL_STATUS_INVALID_STAG,
IRDMA_COMPL_STATUS_BASE_BOUND_VIOLATION,
IRDMA_COMPL_STATUS_ACCESS_VIOLATION,
IRDMA_COMPL_STATUS_INVALID_PD_ID,
IRDMA_COMPL_STATUS_WRAP_ERROR,
IRDMA_COMPL_STATUS_STAG_INVALID_PDID,
IRDMA_COMPL_STATUS_RDMA_READ_ZERO_ORD,
IRDMA_COMPL_STATUS_QP_NOT_PRIVLEDGED,
IRDMA_COMPL_STATUS_STAG_NOT_INVALID,
IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_SIZE,
IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_ENTRY,
IRDMA_COMPL_STATUS_INVALID_FBO,
IRDMA_COMPL_STATUS_INVALID_LEN,
IRDMA_COMPL_STATUS_INVALID_ACCESS,
IRDMA_COMPL_STATUS_PHYS_BUF_LIST_TOO_LONG,
IRDMA_COMPL_STATUS_INVALID_VIRT_ADDRESS,
IRDMA_COMPL_STATUS_INVALID_REGION,
IRDMA_COMPL_STATUS_INVALID_WINDOW,
IRDMA_COMPL_STATUS_INVALID_TOTAL_LEN,
IRDMA_COMPL_STATUS_UNKNOWN,
};
enum irdma_cmpl_notify {
IRDMA_CQ_COMPL_EVENT = 0,
IRDMA_CQ_COMPL_SOLICITED = 1,
};
enum irdma_qp_caps {
IRDMA_WRITE_WITH_IMM = 1,
IRDMA_SEND_WITH_IMM = 2,
IRDMA_ROCE = 4,
IRDMA_PUSH_MODE = 8,
};
struct irdma_qp_uk;
struct irdma_cq_uk;
struct irdma_qp_uk_init_info;
struct irdma_cq_uk_init_info;
struct irdma_sge {
irdma_tagged_offset tag_off;
u32 len;
irdma_stag stag;
};
struct irdma_ring {
u32 head;
u32 tail;
u32 size;
};
struct irdma_cqe {
__le64 buf[IRDMA_CQE_SIZE];
};
struct irdma_extended_cqe {
__le64 buf[IRDMA_EXTENDED_CQE_SIZE];
};
struct irdma_post_send {
irdma_sgl sg_list;
u32 num_sges;
u32 qkey;
u32 dest_qp;
u32 ah_id;
};
struct irdma_post_inline_send {
void *data;
u32 len;
u32 qkey;
u32 dest_qp;
u32 ah_id;
};
struct irdma_post_rq_info {
u64 wr_id;
irdma_sgl sg_list;
u32 num_sges;
};
struct irdma_rdma_write {
irdma_sgl lo_sg_list;
u32 num_lo_sges;
struct irdma_sge rem_addr;
};
struct irdma_inline_rdma_write {
void *data;
u32 len;
struct irdma_sge rem_addr;
};
struct irdma_rdma_read {
irdma_sgl lo_sg_list;
u32 num_lo_sges;
struct irdma_sge rem_addr;
};
struct irdma_bind_window {
irdma_stag mr_stag;
u64 bind_len;
void *va;
enum irdma_addressing_type addressing_type;
bool ena_reads:1;
bool ena_writes:1;
irdma_stag mw_stag;
bool mem_window_type_1:1;
};
struct irdma_inv_local_stag {
irdma_stag target_stag;
};
struct irdma_post_sq_info {
u64 wr_id;
u8 op_type;
u8 l4len;
bool signaled:1;
bool read_fence:1;
bool local_fence:1;
bool inline_data:1;
bool imm_data_valid:1;
bool push_wqe:1;
bool report_rtt:1;
bool udp_hdr:1;
bool defer_flag:1;
u32 imm_data;
u32 stag_to_inv;
union {
struct irdma_post_send send;
struct irdma_rdma_write rdma_write;
struct irdma_rdma_read rdma_read;
struct irdma_bind_window bind_window;
struct irdma_inv_local_stag inv_local_stag;
struct irdma_inline_rdma_write inline_rdma_write;
struct irdma_post_inline_send inline_send;
} op;
};
struct irdma_cq_poll_info {
u64 wr_id;
irdma_qp_handle qp_handle;
u32 bytes_xfered;
u32 tcp_seq_num_rtt;
u32 qp_id;
u32 ud_src_qpn;
u32 imm_data;
irdma_stag inv_stag; /* or L_R_Key */
enum irdma_cmpl_status comp_status;
u16 major_err;
u16 minor_err;
u16 ud_vlan;
u8 ud_smac[6];
u8 op_type;
bool stag_invalid_set:1; /* or L_R_Key set */
bool push_dropped:1;
bool error:1;
bool solicited_event:1;
bool ipv4:1;
bool ud_vlan_valid:1;
bool ud_smac_valid:1;
bool imm_valid:1;
};
enum irdma_status_code irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info,
bool post_sq);
enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info,
bool post_sq);
enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info,
bool post_sq);
enum irdma_status_code irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id,
bool signaled, bool post_sq);
enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
struct irdma_post_rq_info *info);
void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp);
enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info,
bool inv_stag, bool post_sq);
enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info,
bool post_sq);
enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info, bool post_sq);
enum irdma_status_code irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info,
bool post_sq);
struct irdma_wqe_uk_ops {
void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity);
u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct irdma_sge *sge,
u8 valid);
void (*iw_set_mw_bind_wqe)(__le64 *wqe,
struct irdma_bind_window *op_info);
};
enum irdma_status_code irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
struct irdma_cq_poll_info *info);
void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
enum irdma_cmpl_notify cq_notify);
void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int size);
void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *qp, u16 cnt);
enum irdma_status_code irdma_uk_cq_init(struct irdma_cq_uk *cq,
struct irdma_cq_uk_init_info *info);
enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp,
struct irdma_qp_uk_init_info *info);
struct irdma_sq_uk_wr_trk_info {
u64 wrid;
u32 wr_len;
u16 quanta;
u8 reserved[2];
};
struct irdma_qp_quanta {
__le64 elem[IRDMA_WQE_SIZE];
};
struct irdma_qp_uk {
struct irdma_qp_quanta *sq_base;
struct irdma_qp_quanta *rq_base;
struct irdma_uk_attrs *uk_attrs;
u32 __iomem *wqe_alloc_db;
struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
u64 *rq_wrid_array;
__le64 *shadow_area;
__le32 *push_db;
__le64 *push_wqe;
struct irdma_ring sq_ring;
struct irdma_ring rq_ring;
struct irdma_ring initial_ring;
u32 qp_id;
u32 qp_caps;
u32 sq_size;
u32 rq_size;
u32 max_sq_frag_cnt;
u32 max_rq_frag_cnt;
u32 max_inline_data;
struct irdma_wqe_uk_ops wqe_ops;
u16 conn_wqes;
u8 qp_type;
u8 swqe_polarity;
u8 swqe_polarity_deferred;
u8 rwqe_polarity;
u8 rq_wqe_size;
u8 rq_wqe_size_multiplier;
bool deferred_flag:1;
bool push_mode:1; /* whether the last post wqe was pushed */
bool push_dropped:1;
bool first_sq_wq:1;
bool sq_flush_complete:1; /* Indicates flush was seen and SQ was empty after the flush */
bool rq_flush_complete:1; /* Indicates flush was seen and RQ was empty after the flush */
bool destroy_pending:1; /* Indicates the QP is being destroyed */
void *back_qp;
spinlock_t *lock;
u8 dbg_rq_flushed;
u8 sq_flush_seen;
u8 rq_flush_seen;
};
struct irdma_cq_uk {
struct irdma_cqe *cq_base;
u32 __iomem *cqe_alloc_db;
u32 __iomem *cq_ack_db;
__le64 *shadow_area;
u32 cq_id;
u32 cq_size;
struct irdma_ring cq_ring;
u8 polarity;
bool avoid_mem_cflct:1;
};
struct irdma_qp_uk_init_info {
struct irdma_qp_quanta *sq;
struct irdma_qp_quanta *rq;
struct irdma_uk_attrs *uk_attrs;
u32 __iomem *wqe_alloc_db;
__le64 *shadow_area;
struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
u64 *rq_wrid_array;
u32 qp_id;
u32 qp_caps;
u32 sq_size;
u32 rq_size;
u32 max_sq_frag_cnt;
u32 max_rq_frag_cnt;
u32 max_inline_data;
u8 first_sq_wq;
u8 type;
int abi_ver;
bool legacy_mode;
};
struct irdma_cq_uk_init_info {
u32 __iomem *cqe_alloc_db;
u32 __iomem *cq_ack_db;
struct irdma_cqe *cq_base;
__le64 *shadow_area;
u32 cq_size;
u32 cq_id;
bool avoid_mem_cflct;
};
__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
u16 quanta, u32 total_size,
struct irdma_post_sq_info *info);
__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id,
bool signaled, bool post_sq);
enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta);
enum irdma_status_code irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);
void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
u32 inline_data, u8 *shift);
enum irdma_status_code irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs,
u32 sq_size, u8 shift, u32 *wqdepth);
enum irdma_status_code irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs,
u32 rq_size, u8 shift, u32 *wqdepth);
void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
u32 wqe_idx, bool post_sq);
void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
#endif /* IRDMA_USER_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,225 @@
/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
/* Copyright (c) 2015 - 2021 Intel Corporation */
#ifndef IRDMA_VERBS_H
#define IRDMA_VERBS_H
#define IRDMA_MAX_SAVED_PHY_PGADDR 4
#define IRDMA_PKEY_TBL_SZ 1
#define IRDMA_DEFAULT_PKEY 0xFFFF
struct irdma_ucontext {
struct ib_ucontext ibucontext;
struct irdma_device *iwdev;
struct rdma_user_mmap_entry *db_mmap_entry;
struct list_head cq_reg_mem_list;
spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
struct list_head qp_reg_mem_list;
spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
int abi_ver;
bool legacy_mode;
};
struct irdma_pd {
struct ib_pd ibpd;
struct irdma_sc_pd sc_pd;
};
struct irdma_av {
u8 macaddr[16];
struct rdma_ah_attr attrs;
union {
struct sockaddr saddr;
struct sockaddr_in saddr_in;
struct sockaddr_in6 saddr_in6;
} sgid_addr, dgid_addr;
u8 net_type;
};
struct irdma_ah {
struct ib_ah ibah;
struct irdma_sc_ah sc_ah;
struct irdma_pd *pd;
struct irdma_av av;
u8 sgid_index;
union ib_gid dgid;
};
struct irdma_hmc_pble {
union {
u32 idx;
dma_addr_t addr;
};
};
struct irdma_cq_mr {
struct irdma_hmc_pble cq_pbl;
dma_addr_t shadow;
bool split;
};
struct irdma_qp_mr {
struct irdma_hmc_pble sq_pbl;
struct irdma_hmc_pble rq_pbl;
dma_addr_t shadow;
struct page *sq_page;
};
struct irdma_cq_buf {
struct irdma_dma_mem kmem_buf;
struct irdma_cq_uk cq_uk;
struct irdma_hw *hw;
struct list_head list;
struct work_struct work;
};
struct irdma_pbl {
struct list_head list;
union {
struct irdma_qp_mr qp_mr;
struct irdma_cq_mr cq_mr;
};
bool pbl_allocated:1;
bool on_list:1;
u64 user_base;
struct irdma_pble_alloc pble_alloc;
struct irdma_mr *iwmr;
};
struct irdma_mr {
union {
struct ib_mr ibmr;
struct ib_mw ibmw;
};
struct ib_umem *region;
u16 type;
u32 page_cnt;
u64 page_size;
u32 npages;
u32 stag;
u64 len;
u64 pgaddrmem[IRDMA_MAX_SAVED_PHY_PGADDR];
struct irdma_pbl iwpbl;
};
struct irdma_cq {
struct ib_cq ibcq;
struct irdma_sc_cq sc_cq;
u16 cq_head;
u16 cq_size;
u16 cq_num;
bool user_mode;
u32 polled_cmpls;
u32 cq_mem_size;
struct irdma_dma_mem kmem;
struct irdma_dma_mem kmem_shadow;
spinlock_t lock; /* for poll cq */
struct irdma_pbl *iwpbl;
struct irdma_pbl *iwpbl_shadow;
struct list_head resize_list;
struct irdma_cq_poll_info cur_cqe;
};
struct disconn_work {
struct work_struct work;
struct irdma_qp *iwqp;
};
struct iw_cm_id;
struct irdma_qp_kmode {
struct irdma_dma_mem dma_mem;
struct irdma_sq_uk_wr_trk_info *sq_wrid_mem;
u64 *rq_wrid_mem;
};
struct irdma_qp {
struct ib_qp ibqp;
struct irdma_sc_qp sc_qp;
struct irdma_device *iwdev;
struct irdma_cq *iwscq;
struct irdma_cq *iwrcq;
struct irdma_pd *iwpd;
struct rdma_user_mmap_entry *push_wqe_mmap_entry;
struct rdma_user_mmap_entry *push_db_mmap_entry;
struct irdma_qp_host_ctx_info ctx_info;
union {
struct irdma_iwarp_offload_info iwarp_info;
struct irdma_roce_offload_info roce_info;
};
union {
struct irdma_tcp_offload_info tcp_info;
struct irdma_udp_offload_info udp_info;
};
struct irdma_ah roce_ah;
struct list_head teardown_entry;
refcount_t refcnt;
struct iw_cm_id *cm_id;
struct irdma_cm_node *cm_node;
struct ib_mr *lsmm_mr;
atomic_t hw_mod_qp_pend;
enum ib_qp_state ibqp_state;
u32 qp_mem_size;
u32 last_aeq;
int max_send_wr;
int max_recv_wr;
atomic_t close_timer_started;
spinlock_t lock; /* serialize posting WRs to SQ/RQ */
struct irdma_qp_context *iwqp_context;
void *pbl_vbase;
dma_addr_t pbl_pbase;
struct page *page;
u8 active_conn : 1;
u8 user_mode : 1;
u8 hte_added : 1;
u8 flush_issued : 1;
u8 sig_all : 1;
u8 pau_mode : 1;
u8 rsvd : 1;
u8 iwarp_state;
u16 term_sq_flush_code;
u16 term_rq_flush_code;
u8 hw_iwarp_state;
u8 hw_tcp_state;
struct irdma_qp_kmode kqp;
struct irdma_dma_mem host_ctx;
struct timer_list terminate_timer;
struct irdma_pbl *iwpbl;
struct irdma_dma_mem q2_ctx_mem;
struct irdma_dma_mem ietf_mem;
struct completion free_qp;
wait_queue_head_t waitq;
wait_queue_head_t mod_qp_waitq;
u8 rts_ae_rcvd;
};
enum irdma_mmap_flag {
IRDMA_MMAP_IO_NC,
IRDMA_MMAP_IO_WC,
};
struct irdma_user_mmap_entry {
struct rdma_user_mmap_entry rdma_entry;
u64 bar_offset;
u8 mmap_flag;
};
static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev)
{
return (u16)FIELD_GET(IRDMA_FW_VER_MAJOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
}
static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
{
return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
}
void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4);
int irdma_ib_register_device(struct irdma_device *iwdev);
void irdma_ib_unregister_device(struct irdma_device *iwdev);
void irdma_ib_dealloc_device(struct ib_device *ibdev);
void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);
#endif /* IRDMA_VERBS_H */

View File

@ -0,0 +1,406 @@
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
/* Copyright (c) 2017 - 2021 Intel Corporation */
#include "osdep.h"
#include "status.h"
#include "hmc.h"
#include "defs.h"
#include "type.h"
#include "protos.h"
#include "ws.h"
/**
* irdma_alloc_node - Allocate a WS node and init
* @vsi: vsi pointer
* @user_pri: user priority
* @node_type: Type of node, leaf or parent
* @parent: parent node pointer
*/
static struct irdma_ws_node *irdma_alloc_node(struct irdma_sc_vsi *vsi,
u8 user_pri,
enum irdma_ws_node_type node_type,
struct irdma_ws_node *parent)
{
struct irdma_virt_mem ws_mem;
struct irdma_ws_node *node;
u16 node_index = 0;
ws_mem.size = sizeof(struct irdma_ws_node);
ws_mem.va = kzalloc(ws_mem.size, GFP_KERNEL);
if (!ws_mem.va)
return NULL;
if (parent) {
node_index = irdma_alloc_ws_node_id(vsi->dev);
if (node_index == IRDMA_WS_NODE_INVALID) {
kfree(ws_mem.va);
return NULL;
}
}
node = ws_mem.va;
node->index = node_index;
node->vsi_index = vsi->vsi_idx;
INIT_LIST_HEAD(&node->child_list_head);
if (node_type == WS_NODE_TYPE_LEAF) {
node->type_leaf = true;
node->traffic_class = vsi->qos[user_pri].traffic_class;
node->user_pri = user_pri;
node->rel_bw = vsi->qos[user_pri].rel_bw;
if (!node->rel_bw)
node->rel_bw = 1;
node->lan_qs_handle = vsi->qos[user_pri].lan_qos_handle;
node->prio_type = IRDMA_PRIO_WEIGHTED_RR;
} else {
node->rel_bw = 1;
node->prio_type = IRDMA_PRIO_WEIGHTED_RR;
node->enable = true;
}
node->parent = parent;
return node;
}
/**
* irdma_free_node - Free a WS node
* @vsi: VSI stricture of device
* @node: Pointer to node to free
*/
static void irdma_free_node(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *node)
{
struct irdma_virt_mem ws_mem;
if (node->index)
irdma_free_ws_node_id(vsi->dev, node->index);
ws_mem.va = node;
ws_mem.size = sizeof(struct irdma_ws_node);
kfree(ws_mem.va);
}
/**
* irdma_ws_cqp_cmd - Post CQP work scheduler node cmd
* @vsi: vsi pointer
* @node: pointer to node
* @cmd: add, remove or modify
*/
static enum irdma_status_code
irdma_ws_cqp_cmd(struct irdma_sc_vsi *vsi, struct irdma_ws_node *node, u8 cmd)
{
struct irdma_ws_node_info node_info = {};
node_info.id = node->index;
node_info.vsi = node->vsi_index;
if (node->parent)
node_info.parent_id = node->parent->index;
else
node_info.parent_id = node_info.id;
node_info.weight = node->rel_bw;
node_info.tc = node->traffic_class;
node_info.prio_type = node->prio_type;
node_info.type_leaf = node->type_leaf;
node_info.enable = node->enable;
if (irdma_cqp_ws_node_cmd(vsi->dev, cmd, &node_info)) {
ibdev_dbg(to_ibdev(vsi->dev), "WS: CQP WS CMD failed\n");
return IRDMA_ERR_NO_MEMORY;
}
if (node->type_leaf && cmd == IRDMA_OP_WS_ADD_NODE) {
node->qs_handle = node_info.qs_handle;
vsi->qos[node->user_pri].qs_handle = node_info.qs_handle;
}
return 0;
}
/**
* ws_find_node - Find SC WS node based on VSI id or TC
* @parent: parent node of First VSI or TC node
* @match_val: value to match
* @type: match type VSI/TC
*/
static struct irdma_ws_node *ws_find_node(struct irdma_ws_node *parent,
u16 match_val,
enum irdma_ws_match_type type)
{
struct irdma_ws_node *node;
switch (type) {
case WS_MATCH_TYPE_VSI:
list_for_each_entry(node, &parent->child_list_head, siblings) {
if (node->vsi_index == match_val)
return node;
}
break;
case WS_MATCH_TYPE_TC:
list_for_each_entry(node, &parent->child_list_head, siblings) {
if (node->traffic_class == match_val)
return node;
}
break;
default:
break;
}
return NULL;
}
/**
* irdma_tc_in_use - Checks to see if a leaf node is in use
* @vsi: vsi pointer
* @user_pri: user priority
*/
static bool irdma_tc_in_use(struct irdma_sc_vsi *vsi, u8 user_pri)
{
int i;
mutex_lock(&vsi->qos[user_pri].qos_mutex);
if (!list_empty(&vsi->qos[user_pri].qplist)) {
mutex_unlock(&vsi->qos[user_pri].qos_mutex);
return true;
}
/* Check if the traffic class associated with the given user priority
* is in use by any other user priority. If so, nothing left to do
*/
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
if (vsi->qos[i].traffic_class == vsi->qos[user_pri].traffic_class &&
!list_empty(&vsi->qos[i].qplist)) {
mutex_unlock(&vsi->qos[user_pri].qos_mutex);
return true;
}
}
mutex_unlock(&vsi->qos[user_pri].qos_mutex);
return false;
}
/**
* irdma_remove_leaf - Remove leaf node unconditionally
* @vsi: vsi pointer
* @user_pri: user priority
*/
static void irdma_remove_leaf(struct irdma_sc_vsi *vsi, u8 user_pri)
{
struct irdma_ws_node *ws_tree_root, *vsi_node, *tc_node;
int i;
u16 traffic_class;
traffic_class = vsi->qos[user_pri].traffic_class;
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
if (vsi->qos[i].traffic_class == traffic_class)
vsi->qos[i].valid = false;
ws_tree_root = vsi->dev->ws_tree_root;
if (!ws_tree_root)
return;
vsi_node = ws_find_node(ws_tree_root, vsi->vsi_idx,
WS_MATCH_TYPE_VSI);
if (!vsi_node)
return;
tc_node = ws_find_node(vsi_node,
vsi->qos[user_pri].traffic_class,
WS_MATCH_TYPE_TC);
if (!tc_node)
return;
irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE);
vsi->unregister_qset(vsi, tc_node);
list_del(&tc_node->siblings);
irdma_free_node(vsi, tc_node);
/* Check if VSI node can be freed */
if (list_empty(&vsi_node->child_list_head)) {
irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE);
list_del(&vsi_node->siblings);
irdma_free_node(vsi, vsi_node);
/* Free head node there are no remaining VSI nodes */
if (list_empty(&ws_tree_root->child_list_head)) {
irdma_ws_cqp_cmd(vsi, ws_tree_root,
IRDMA_OP_WS_DELETE_NODE);
irdma_free_node(vsi, ws_tree_root);
vsi->dev->ws_tree_root = NULL;
}
}
}
/**
* irdma_ws_add - Build work scheduler tree, set RDMA qs_handle
* @vsi: vsi pointer
* @user_pri: user priority
*/
enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
{
struct irdma_ws_node *ws_tree_root;
struct irdma_ws_node *vsi_node;
struct irdma_ws_node *tc_node;
u16 traffic_class;
enum irdma_status_code ret = 0;
int i;
mutex_lock(&vsi->dev->ws_mutex);
if (vsi->tc_change_pending) {
ret = IRDMA_ERR_NOT_READY;
goto exit;
}
if (vsi->qos[user_pri].valid)
goto exit;
ws_tree_root = vsi->dev->ws_tree_root;
if (!ws_tree_root) {
ibdev_dbg(to_ibdev(vsi->dev), "WS: Creating root node\n");
ws_tree_root = irdma_alloc_node(vsi, user_pri,
WS_NODE_TYPE_PARENT, NULL);
if (!ws_tree_root) {
ret = IRDMA_ERR_NO_MEMORY;
goto exit;
}
ret = irdma_ws_cqp_cmd(vsi, ws_tree_root, IRDMA_OP_WS_ADD_NODE);
if (ret) {
irdma_free_node(vsi, ws_tree_root);
goto exit;
}
vsi->dev->ws_tree_root = ws_tree_root;
}
/* Find a second tier node that matches the VSI */
vsi_node = ws_find_node(ws_tree_root, vsi->vsi_idx,
WS_MATCH_TYPE_VSI);
/* If VSI node doesn't exist, add one */
if (!vsi_node) {
ibdev_dbg(to_ibdev(vsi->dev),
"WS: Node not found matching VSI %d\n",
vsi->vsi_idx);
vsi_node = irdma_alloc_node(vsi, user_pri, WS_NODE_TYPE_PARENT,
ws_tree_root);
if (!vsi_node) {
ret = IRDMA_ERR_NO_MEMORY;
goto vsi_add_err;
}
ret = irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_ADD_NODE);
if (ret) {
irdma_free_node(vsi, vsi_node);
goto vsi_add_err;
}
list_add(&vsi_node->siblings, &ws_tree_root->child_list_head);
}
ibdev_dbg(to_ibdev(vsi->dev),
"WS: Using node %d which represents VSI %d\n",
vsi_node->index, vsi->vsi_idx);
traffic_class = vsi->qos[user_pri].traffic_class;
tc_node = ws_find_node(vsi_node, traffic_class,
WS_MATCH_TYPE_TC);
if (!tc_node) {
/* Add leaf node */
ibdev_dbg(to_ibdev(vsi->dev),
"WS: Node not found matching VSI %d and TC %d\n",
vsi->vsi_idx, traffic_class);
tc_node = irdma_alloc_node(vsi, user_pri, WS_NODE_TYPE_LEAF,
vsi_node);
if (!tc_node) {
ret = IRDMA_ERR_NO_MEMORY;
goto leaf_add_err;
}
ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_ADD_NODE);
if (ret) {
irdma_free_node(vsi, tc_node);
goto leaf_add_err;
}
list_add(&tc_node->siblings, &vsi_node->child_list_head);
/*
* callback to LAN to update the LAN tree with our node
*/
ret = vsi->register_qset(vsi, tc_node);
if (ret)
goto reg_err;
tc_node->enable = true;
ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_MODIFY_NODE);
if (ret)
goto reg_err;
}
ibdev_dbg(to_ibdev(vsi->dev),
"WS: Using node %d which represents VSI %d TC %d\n",
tc_node->index, vsi->vsi_idx, traffic_class);
/*
* Iterate through other UPs and update the QS handle if they have
* a matching traffic class.
*/
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
if (vsi->qos[i].traffic_class == traffic_class) {
vsi->qos[i].qs_handle = tc_node->qs_handle;
vsi->qos[i].lan_qos_handle = tc_node->lan_qs_handle;
vsi->qos[i].l2_sched_node_id = tc_node->l2_sched_node_id;
vsi->qos[i].valid = true;
}
}
goto exit;
leaf_add_err:
if (list_empty(&vsi_node->child_list_head)) {
if (irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE))
goto exit;
list_del(&vsi_node->siblings);
irdma_free_node(vsi, vsi_node);
}
vsi_add_err:
/* Free head node there are no remaining VSI nodes */
if (list_empty(&ws_tree_root->child_list_head)) {
irdma_ws_cqp_cmd(vsi, ws_tree_root, IRDMA_OP_WS_DELETE_NODE);
vsi->dev->ws_tree_root = NULL;
irdma_free_node(vsi, ws_tree_root);
}
exit:
mutex_unlock(&vsi->dev->ws_mutex);
return ret;
reg_err:
mutex_unlock(&vsi->dev->ws_mutex);
irdma_ws_remove(vsi, user_pri);
return ret;
}
/**
* irdma_ws_remove - Free WS scheduler node, update WS tree
* @vsi: vsi pointer
* @user_pri: user priority
*/
void irdma_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri)
{
mutex_lock(&vsi->dev->ws_mutex);
if (irdma_tc_in_use(vsi, user_pri))
goto exit;
irdma_remove_leaf(vsi, user_pri);
exit:
mutex_unlock(&vsi->dev->ws_mutex);
}
/**
* irdma_ws_reset - Reset entire WS tree
* @vsi: vsi pointer
*/
void irdma_ws_reset(struct irdma_sc_vsi *vsi)
{
u8 i;
mutex_lock(&vsi->dev->ws_mutex);
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; ++i)
irdma_remove_leaf(vsi, i);
mutex_unlock(&vsi->dev->ws_mutex);
}

View File

@ -0,0 +1,41 @@
/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
/* Copyright (c) 2015 - 2020 Intel Corporation */
#ifndef IRDMA_WS_H
#define IRDMA_WS_H
#include "osdep.h"
enum irdma_ws_node_type {
WS_NODE_TYPE_PARENT,
WS_NODE_TYPE_LEAF,
};
enum irdma_ws_match_type {
WS_MATCH_TYPE_VSI,
WS_MATCH_TYPE_TC,
};
struct irdma_ws_node {
struct list_head siblings;
struct list_head child_list_head;
struct irdma_ws_node *parent;
u64 lan_qs_handle; /* opaque handle used by LAN */
u32 l2_sched_node_id;
u16 index;
u16 qs_handle;
u16 vsi_index;
u8 traffic_class;
u8 user_pri;
u8 rel_bw;
u8 abstraction_layer; /* used for splitting a TC */
u8 prio_type;
bool type_leaf:1;
bool enable:1;
};
struct irdma_sc_vsi;
enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri);
void irdma_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri);
void irdma_ws_reset(struct irdma_sc_vsi *vsi);
#endif /* IRDMA_WS_H */

View File

@ -241,6 +241,7 @@ config I40E
tristate "Intel(R) Ethernet Controller XL710 Family support"
imply PTP_1588_CLOCK
depends on PCI
select AUXILIARY_BUS
help
This driver supports Intel(R) Ethernet Controller XL710 Family of
devices. For more information on how to identify your adapter, go
@ -294,6 +295,7 @@ config ICE
tristate "Intel(R) Ethernet Connection E800 Series Support"
default n
depends on PCI_MSI
select AUXILIARY_BUS
select DIMLIB
select NET_DEVLINK
select PLDMFW

View File

@ -870,6 +870,8 @@ struct i40e_netdev_priv {
struct i40e_vsi *vsi;
};
extern struct ida i40e_client_ida;
/* struct that defines an interrupt vector */
struct i40e_q_vector {
struct i40e_vsi *vsi;

View File

@ -8,10 +8,9 @@
#include "i40e.h"
#include "i40e_prototype.h"
static const char i40e_client_interface_version_str[] = I40E_CLIENT_VERSION_STR;
static struct i40e_client *registered_client;
static LIST_HEAD(i40e_devices);
static DEFINE_MUTEX(i40e_device_mutex);
DEFINE_IDA(i40e_client_ida);
static int i40e_client_virtchnl_send(struct i40e_info *ldev,
struct i40e_client *client,
@ -275,6 +274,57 @@ void i40e_client_update_msix_info(struct i40e_pf *pf)
cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector];
}
static void i40e_auxiliary_dev_release(struct device *dev)
{
struct i40e_auxiliary_device *i40e_aux_dev =
container_of(dev, struct i40e_auxiliary_device, aux_dev.dev);
ida_free(&i40e_client_ida, i40e_aux_dev->aux_dev.id);
kfree(i40e_aux_dev);
}
static int i40e_register_auxiliary_dev(struct i40e_info *ldev, const char *name)
{
struct i40e_auxiliary_device *i40e_aux_dev;
struct pci_dev *pdev = ldev->pcidev;
struct auxiliary_device *aux_dev;
int ret;
i40e_aux_dev = kzalloc(sizeof(*i40e_aux_dev), GFP_KERNEL);
if (!i40e_aux_dev)
return -ENOMEM;
i40e_aux_dev->ldev = ldev;
aux_dev = &i40e_aux_dev->aux_dev;
aux_dev->name = name;
aux_dev->dev.parent = &pdev->dev;
aux_dev->dev.release = i40e_auxiliary_dev_release;
ldev->aux_dev = aux_dev;
ret = ida_alloc(&i40e_client_ida, GFP_KERNEL);
if (ret < 0) {
kfree(i40e_aux_dev);
return ret;
}
aux_dev->id = ret;
ret = auxiliary_device_init(aux_dev);
if (ret < 0) {
ida_free(&i40e_client_ida, aux_dev->id);
kfree(i40e_aux_dev);
return ret;
}
ret = auxiliary_device_add(aux_dev);
if (ret) {
auxiliary_device_uninit(aux_dev);
return ret;
}
return ret;
}
/**
* i40e_client_add_instance - add a client instance struct to the instance list
* @pf: pointer to the board struct
@ -286,9 +336,6 @@ static void i40e_client_add_instance(struct i40e_pf *pf)
struct netdev_hw_addr *mac = NULL;
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
if (!registered_client || pf->cinst)
return;
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
return;
@ -308,11 +355,8 @@ static void i40e_client_add_instance(struct i40e_pf *pf)
cdev->lan_info.fw_build = pf->hw.aq.fw_build;
set_bit(__I40E_CLIENT_INSTANCE_NONE, &cdev->state);
if (i40e_client_get_params(vsi, &cdev->lan_info.params)) {
kfree(cdev);
cdev = NULL;
return;
}
if (i40e_client_get_params(vsi, &cdev->lan_info.params))
goto free_cdev;
mac = list_first_entry(&cdev->lan_info.netdev->dev_addrs.list,
struct netdev_hw_addr, list);
@ -321,10 +365,19 @@ static void i40e_client_add_instance(struct i40e_pf *pf)
else
dev_err(&pf->pdev->dev, "MAC address list is empty!\n");
cdev->client = registered_client;
pf->cinst = cdev;
i40e_client_update_msix_info(pf);
cdev->lan_info.msix_count = pf->num_iwarp_msix;
cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector];
if (i40e_register_auxiliary_dev(&cdev->lan_info, "iwarp"))
goto free_cdev;
return;
free_cdev:
kfree(cdev);
pf->cinst = NULL;
}
/**
@ -345,7 +398,7 @@ void i40e_client_del_instance(struct i40e_pf *pf)
**/
void i40e_client_subtask(struct i40e_pf *pf)
{
struct i40e_client *client = registered_client;
struct i40e_client *client;
struct i40e_client_instance *cdev;
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
int ret = 0;
@ -359,9 +412,11 @@ void i40e_client_subtask(struct i40e_pf *pf)
test_bit(__I40E_CONFIG_BUSY, pf->state))
return;
if (!client || !cdev)
if (!cdev || !cdev->client)
return;
client = cdev->client;
/* Here we handle client opens. If the client is down, and
* the netdev is registered, then open the client.
*/
@ -423,16 +478,8 @@ int i40e_lan_add_device(struct i40e_pf *pf)
pf->hw.pf_id, pf->hw.bus.bus_id,
pf->hw.bus.device, pf->hw.bus.func);
/* If a client has already been registered, we need to add an instance
* of it to our new LAN device.
*/
if (registered_client)
i40e_client_add_instance(pf);
i40e_client_add_instance(pf);
/* Since in some cases register may have happened before a device gets
* added, we can schedule a subtask to go initiate the clients if
* they can be launched at probe time.
*/
set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
@ -449,9 +496,13 @@ out:
**/
int i40e_lan_del_device(struct i40e_pf *pf)
{
struct auxiliary_device *aux_dev = pf->cinst->lan_info.aux_dev;
struct i40e_device *ldev, *tmp;
int ret = -ENODEV;
auxiliary_device_delete(aux_dev);
auxiliary_device_uninit(aux_dev);
/* First, remove any client instance. */
i40e_client_del_instance(pf);
@ -471,69 +522,6 @@ int i40e_lan_del_device(struct i40e_pf *pf)
return ret;
}
/**
* i40e_client_release - release client specific resources
* @client: pointer to the registered client
*
**/
static void i40e_client_release(struct i40e_client *client)
{
struct i40e_client_instance *cdev;
struct i40e_device *ldev;
struct i40e_pf *pf;
mutex_lock(&i40e_device_mutex);
list_for_each_entry(ldev, &i40e_devices, list) {
pf = ldev->pf;
cdev = pf->cinst;
if (!cdev)
continue;
while (test_and_set_bit(__I40E_SERVICE_SCHED,
pf->state))
usleep_range(500, 1000);
if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
if (client->ops && client->ops->close)
client->ops->close(&cdev->lan_info, client,
false);
i40e_client_release_qvlist(&cdev->lan_info);
clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
dev_warn(&pf->pdev->dev,
"Client %s instance for PF id %d closed\n",
client->name, pf->hw.pf_id);
}
/* delete the client instance */
i40e_client_del_instance(pf);
dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n",
client->name);
clear_bit(__I40E_SERVICE_SCHED, pf->state);
}
mutex_unlock(&i40e_device_mutex);
}
/**
* i40e_client_prepare - prepare client specific resources
* @client: pointer to the registered client
*
**/
static void i40e_client_prepare(struct i40e_client *client)
{
struct i40e_device *ldev;
struct i40e_pf *pf;
mutex_lock(&i40e_device_mutex);
list_for_each_entry(ldev, &i40e_devices, list) {
pf = ldev->pf;
i40e_client_add_instance(pf);
/* Start the client subtask */
set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
}
mutex_unlock(&i40e_device_mutex);
}
/**
* i40e_client_virtchnl_send - TBD
* @ldev: pointer to L2 context
@ -579,7 +567,7 @@ static int i40e_client_setup_qvlist(struct i40e_info *ldev,
u32 v_idx, i, reg_idx, reg;
ldev->qvlist_info = kzalloc(struct_size(ldev->qvlist_info, qv_info,
qvlist_info->num_vectors - 1), GFP_KERNEL);
qvlist_info->num_vectors), GFP_KERNEL);
if (!ldev->qvlist_info)
return -ENOMEM;
ldev->qvlist_info->num_vectors = qvlist_info->num_vectors;
@ -732,81 +720,34 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
return err;
}
/**
* i40e_register_client - Register a i40e client driver with the L2 driver
* @client: pointer to the i40e_client struct
*
* Returns 0 on success or non-0 on error
**/
int i40e_register_client(struct i40e_client *client)
void i40e_client_device_register(struct i40e_info *ldev, struct i40e_client *client)
{
int ret = 0;
struct i40e_pf *pf = ldev->pf;
if (!client) {
ret = -EIO;
goto out;
}
if (strlen(client->name) == 0) {
pr_info("i40e: Failed to register client with no name\n");
ret = -EIO;
goto out;
}
if (registered_client) {
pr_info("i40e: Client %s has already been registered!\n",
client->name);
ret = -EEXIST;
goto out;
}
if ((client->version.major != I40E_CLIENT_VERSION_MAJOR) ||
(client->version.minor != I40E_CLIENT_VERSION_MINOR)) {
pr_info("i40e: Failed to register client %s due to mismatched client interface version\n",
client->name);
pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n",
client->version.major, client->version.minor,
client->version.build,
i40e_client_interface_version_str);
ret = -EIO;
goto out;
}
registered_client = client;
i40e_client_prepare(client);
pr_info("i40e: Registered client %s\n", client->name);
out:
return ret;
pf->cinst->client = client;
set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
}
EXPORT_SYMBOL(i40e_register_client);
EXPORT_SYMBOL_GPL(i40e_client_device_register);
/**
* i40e_unregister_client - Unregister a i40e client driver with the L2 driver
* @client: pointer to the i40e_client struct
*
* Returns 0 on success or non-0 on error
**/
int i40e_unregister_client(struct i40e_client *client)
void i40e_client_device_unregister(struct i40e_info *ldev)
{
int ret = 0;
struct i40e_pf *pf = ldev->pf;
struct i40e_client_instance *cdev = pf->cinst;
if (registered_client != client) {
pr_info("i40e: Client %s has not been registered\n",
client->name);
ret = -ENODEV;
goto out;
if (!cdev)
return;
while (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
usleep_range(500, 1000);
if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
cdev->client->ops->close(&cdev->lan_info, cdev->client, false);
clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
i40e_client_release_qvlist(&cdev->lan_info);
}
registered_client = NULL;
/* When a unregister request comes through we would have to send
* a close for each of the client instances that were opened.
* client_release function is called to handle this.
*/
i40e_client_release(client);
pr_info("i40e: Unregistered client %s\n", client->name);
out:
return ret;
pf->cinst->client = NULL;
clear_bit(__I40E_SERVICE_SCHED, pf->state);
}
EXPORT_SYMBOL(i40e_unregister_client);
EXPORT_SYMBOL_GPL(i40e_client_device_unregister);

View File

@ -16270,6 +16270,7 @@ static void __exit i40e_exit_module(void)
{
pci_unregister_driver(&i40e_driver);
destroy_workqueue(i40e_wq);
ida_destroy(&i40e_client_ida);
i40e_dbg_exit();
}
module_exit(i40e_exit_module);

View File

@ -22,6 +22,7 @@ ice-y := ice_main.o \
ice_ethtool_fdir.o \
ice_flex_pipe.o \
ice_flow.o \
ice_idc.o \
ice_devlink.o \
ice_fw_update.o \
ice_lag.o \

View File

@ -34,6 +34,7 @@
#include <linux/if_bridge.h>
#include <linux/ctype.h>
#include <linux/bpf.h>
#include <linux/auxiliary_bus.h>
#include <linux/avf/virtchnl.h>
#include <linux/cpu_rmap.h>
#include <linux/dim.h>
@ -55,6 +56,7 @@
#include "ice_switch.h"
#include "ice_common.h"
#include "ice_sched.h"
#include "ice_idc_int.h"
#include "ice_virtchnl_pf.h"
#include "ice_sriov.h"
#include "ice_fdir.h"
@ -78,6 +80,8 @@
#define ICE_MIN_LAN_OICR_MSIX 1
#define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
#define ICE_FDIR_MSIX 2
#define ICE_RDMA_NUM_AEQ_MSIX 4
#define ICE_MIN_RDMA_MSIX 2
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
#define ICE_VSI_MAP_SCATTER 1
@ -88,8 +92,9 @@
#define ICE_MAX_LG_RSS_QS 256
#define ICE_RES_VALID_BIT 0x8000
#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
#define ICE_RES_RDMA_VEC_ID (ICE_RES_MISC_VEC_ID - 1)
/* All VF control VSIs share the same IRQ, so assign a unique ID for them */
#define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_MISC_VEC_ID - 1)
#define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_RDMA_VEC_ID - 1)
#define ICE_INVAL_Q_INDEX 0xffff
#define ICE_INVAL_VFID 256
@ -203,9 +208,9 @@ enum ice_pf_state {
ICE_NEEDS_RESTART,
ICE_PREPARED_FOR_RESET, /* set by driver when prepared */
ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */
ICE_PFR_REQ, /* set by driver and peers */
ICE_CORER_REQ, /* set by driver and peers */
ICE_GLOBR_REQ, /* set by driver and peers */
ICE_PFR_REQ, /* set by driver */
ICE_CORER_REQ, /* set by driver */
ICE_GLOBR_REQ, /* set by driver */
ICE_CORER_RECV, /* set by OICR handler */
ICE_GLOBR_RECV, /* set by OICR handler */
ICE_EMPR_RECV, /* set by OICR handler */
@ -332,6 +337,7 @@ struct ice_vsi {
u16 req_rxq; /* User requested Rx queues */
u16 num_rx_desc;
u16 num_tx_desc;
u16 qset_handle[ICE_MAX_TRAFFIC_CLASS];
struct ice_tc_cfg tc_cfg;
struct bpf_prog *xdp_prog;
struct ice_ring **xdp_rings; /* XDP ring array */
@ -373,12 +379,14 @@ struct ice_q_vector {
enum ice_pf_flags {
ICE_FLAG_FLTR_SYNC,
ICE_FLAG_RDMA_ENA,
ICE_FLAG_RSS_ENA,
ICE_FLAG_SRIOV_ENA,
ICE_FLAG_SRIOV_CAPABLE,
ICE_FLAG_DCB_CAPABLE,
ICE_FLAG_DCB_ENA,
ICE_FLAG_FD_ENA,
ICE_FLAG_AUX_ENA,
ICE_FLAG_ADV_FEATURES,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA,
@ -439,6 +447,8 @@ struct ice_pf {
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
struct mutex tc_mutex; /* lock to protect TC changes */
u32 msg_enable;
u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */
u16 rdma_base_vector;
/* spinlock to protect the AdminQ wait list */
spinlock_t aq_wait_lock;
@ -471,6 +481,8 @@ struct ice_pf {
unsigned long tx_timeout_last_recovery;
u32 tx_timeout_recovery_level;
char int_name[ICE_INT_NAME_STR_LEN];
struct auxiliary_device *adev;
int aux_idx;
u32 sw_int_count;
__le64 nvm_phy_type_lo; /* NVM PHY type low */
@ -636,6 +648,9 @@ int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
int ice_plug_aux_dev(struct ice_pf *pf);
void ice_unplug_aux_dev(struct ice_pf *pf);
int ice_init_rdma(struct ice_pf *pf);
const char *ice_stat_str(enum ice_status stat_err);
const char *ice_aq_str(enum ice_aq_err aq_err);
bool ice_is_wol_supported(struct ice_hw *hw);
@ -660,4 +675,25 @@ int ice_open_internal(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
void ice_service_task_schedule(struct ice_pf *pf);
/**
* ice_set_rdma_cap - enable RDMA support
* @pf: PF struct
*/
static inline void ice_set_rdma_cap(struct ice_pf *pf)
{
if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
ice_plug_aux_dev(pf);
}
}
/**
* ice_clear_rdma_cap - disable RDMA support
* @pf: PF struct
*/
static inline void ice_clear_rdma_cap(struct ice_pf *pf)
{
ice_unplug_aux_dev(pf);
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
}
#endif /* _ICE_H_ */

View File

@ -115,6 +115,7 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_PENDING_OROM_VER 0x004B
#define ICE_AQC_CAPS_NET_VER 0x004C
#define ICE_AQC_CAPS_PENDING_NET_VER 0x004D
#define ICE_AQC_CAPS_RDMA 0x0051
#define ICE_AQC_CAPS_NVM_MGMT 0x0080
u8 major_ver;
@ -1684,6 +1685,36 @@ struct ice_aqc_dis_txq_item {
__le16 q_id[];
} __packed;
/* Add Tx RDMA Queue Set (indirect 0x0C33) */
struct ice_aqc_add_rdma_qset {
u8 num_qset_grps;
u8 reserved[7];
__le32 addr_high;
__le32 addr_low;
};
/* This is the descriptor of each Qset entry for the Add Tx RDMA Queue Set
* command (0x0C33). Only used within struct ice_aqc_add_rdma_qset.
*/
struct ice_aqc_add_tx_rdma_qset_entry {
__le16 tx_qset_id;
u8 rsvd[2];
__le32 qset_teid;
struct ice_aqc_txsched_elem info;
};
/* The format of the command buffer for Add Tx RDMA Queue Set(0x0C33)
* is an array of the following structs. Please note that the length of
* each struct ice_aqc_add_rdma_qset is variable due to the variable
* number of queues in each group!
*/
struct ice_aqc_add_rdma_qset_data {
__le32 parent_teid;
__le16 num_qsets;
u8 rsvd[2];
struct ice_aqc_add_tx_rdma_qset_entry rdma_qsets[];
};
/* Configure Firmware Logging Command (indirect 0xFF09)
* Logging Information Read Response (indirect 0xFF10)
* Note: The 0xFF10 command has no input parameters.
@ -1880,6 +1911,7 @@ struct ice_aq_desc {
struct ice_aqc_get_set_rss_key get_set_rss_key;
struct ice_aqc_add_txqs add_txqs;
struct ice_aqc_dis_txqs dis_txqs;
struct ice_aqc_add_rdma_qset add_rdma_qset;
struct ice_aqc_add_get_update_free_vsi vsi_cmd;
struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
struct ice_aqc_fw_logging fw_logging;
@ -2028,6 +2060,7 @@ enum ice_adminq_opc {
/* Tx queue handling commands/events */
ice_aqc_opc_add_txqs = 0x0C30,
ice_aqc_opc_dis_txqs = 0x0C31,
ice_aqc_opc_add_rdma_qset = 0x0C33,
/* package commands */
ice_aqc_opc_download_pkg = 0x0C40,

View File

@ -2,6 +2,7 @@
/* Copyright (c) 2018, Intel Corporation. */
#include "ice_common.h"
#include "ice_lib.h"
#include "ice_sched.h"
#include "ice_adminq_cmd.h"
#include "ice_flow.h"
@ -1062,7 +1063,8 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
GLNVM_ULD_POR_DONE_1_M |\
GLNVM_ULD_PCIER_DONE_2_M)
uld_mask = ICE_RESET_DONE_MASK;
uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ?
GLNVM_ULD_PE_DONE_M : 0);
/* Device is Active; check Global Reset processes are done */
for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
@ -1938,6 +1940,10 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
caps->nvm_unified_update);
break;
case ICE_AQC_CAPS_RDMA:
caps->rdma = (number == 1);
ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
break;
case ICE_AQC_CAPS_MAX_MTU:
caps->max_mtu = number;
ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
@ -1971,6 +1977,16 @@ ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
caps->maxtc = 4;
ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
caps->maxtc);
if (caps->rdma) {
ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
caps->rdma = 0;
}
/* print message only when processing device capabilities
* during initialization.
*/
if (caps == &hw->dev_caps.common_cap)
dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n");
}
}
@ -3635,6 +3651,52 @@ do_aq:
return status;
}
/**
* ice_aq_add_rdma_qsets
* @hw: pointer to the hardware structure
* @num_qset_grps: Number of RDMA Qset groups
* @qset_list: list of Qset groups to be added
* @buf_size: size of buffer for indirect command
* @cd: pointer to command details structure or NULL
*
* Add Tx RDMA Qsets (0x0C33)
*/
static int
ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
struct ice_aqc_add_rdma_qset_data *qset_list,
u16 buf_size, struct ice_sq_cd *cd)
{
struct ice_aqc_add_rdma_qset_data *list;
struct ice_aqc_add_rdma_qset *cmd;
struct ice_aq_desc desc;
u16 i, sum_size = 0;
cmd = &desc.params.add_rdma_qset;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)
return -EINVAL;
for (i = 0, list = qset_list; i < num_qset_grps; i++) {
u16 num_qsets = le16_to_cpu(list->num_qsets);
sum_size += struct_size(list, rdma_qsets, num_qsets);
list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets +
num_qsets);
}
if (buf_size != sum_size)
return -EINVAL;
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
cmd->num_qset_grps = num_qset_grps;
return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, qset_list,
buf_size, cd));
}
/* End of FW Admin Queue command wrappers */
/**
@ -4132,6 +4194,162 @@ ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
ICE_SCHED_NODE_OWNER_LAN);
}
/**
* ice_cfg_vsi_rdma - configure the VSI RDMA queues
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc_bitmap: TC bitmap
* @max_rdmaqs: max RDMA queues array per TC
*
* This function adds/updates the VSI RDMA queues per TC.
*/
int
ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
u16 *max_rdmaqs)
{
return ice_status_to_errno(ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap,
max_rdmaqs,
ICE_SCHED_NODE_OWNER_RDMA));
}
/**
* ice_ena_vsi_rdma_qset
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc: TC number
* @rdma_qset: pointer to RDMA Qset
* @num_qsets: number of RDMA Qsets
* @qset_teid: pointer to Qset node TEIDs
*
* This function adds RDMA Qset
*/
int
ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
{
struct ice_aqc_txsched_elem_data node = { 0 };
struct ice_aqc_add_rdma_qset_data *buf;
struct ice_sched_node *parent;
enum ice_status status;
struct ice_hw *hw;
u16 i, buf_size;
int ret;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return -EIO;
hw = pi->hw;
if (!ice_is_vsi_valid(hw, vsi_handle))
return -EINVAL;
buf_size = struct_size(buf, rdma_qsets, num_qsets);
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
mutex_lock(&pi->sched_lock);
parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
ICE_SCHED_NODE_OWNER_RDMA);
if (!parent) {
ret = -EINVAL;
goto rdma_error_exit;
}
buf->parent_teid = parent->info.node_teid;
node.parent_teid = parent->info.node_teid;
buf->num_qsets = cpu_to_le16(num_qsets);
for (i = 0; i < num_qsets; i++) {
buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]);
buf->rdma_qsets[i].info.valid_sections =
ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
ICE_AQC_ELEM_VALID_EIR;
buf->rdma_qsets[i].info.generic = 0;
buf->rdma_qsets[i].info.cir_bw.bw_profile_idx =
cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
buf->rdma_qsets[i].info.cir_bw.bw_alloc =
cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
buf->rdma_qsets[i].info.eir_bw.bw_profile_idx =
cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
buf->rdma_qsets[i].info.eir_bw.bw_alloc =
cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
}
ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
if (ret) {
ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
goto rdma_error_exit;
}
node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
for (i = 0; i < num_qsets; i++) {
node.node_teid = buf->rdma_qsets[i].qset_teid;
status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
&node);
if (status) {
ret = ice_status_to_errno(status);
break;
}
qset_teid[i] = le32_to_cpu(node.node_teid);
}
rdma_error_exit:
mutex_unlock(&pi->sched_lock);
kfree(buf);
return ret;
}
/**
* ice_dis_vsi_rdma_qset - free RDMA resources
* @pi: port_info struct
* @count: number of RDMA Qsets to free
* @qset_teid: TEID of Qset node
* @q_id: list of queue IDs being disabled
*/
int
ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id)
{
struct ice_aqc_dis_txq_item *qg_list;
enum ice_status status = 0;
struct ice_hw *hw;
u16 qg_size;
int i;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return -EIO;
hw = pi->hw;
qg_size = struct_size(qg_list, q_id, 1);
qg_list = kzalloc(qg_size, GFP_KERNEL);
if (!qg_list)
return -ENOMEM;
mutex_lock(&pi->sched_lock);
for (i = 0; i < count; i++) {
struct ice_sched_node *node;
node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
if (!node)
continue;
qg_list->parent_teid = node->info.parent_teid;
qg_list->num_qs = 1;
qg_list->q_id[0] =
cpu_to_le16(q_id[i] |
ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);
status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size,
ICE_NO_RESET, 0, NULL);
if (status)
break;
ice_free_sched_node(pi, node);
}
mutex_unlock(&pi->sched_lock);
kfree(qg_list);
return ice_status_to_errno(status);
}
/**
* ice_replay_pre_init - replay pre initialization
* @hw: pointer to the HW struct

View File

@ -147,6 +147,15 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd);
int
ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
u16 *max_rdmaqs);
int
ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 *rdma_qset, u16 num_qsets, u32 *qset_teid);
int
ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id);
enum ice_status
ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
u16 *q_handle, u16 *q_ids, u32 *q_teids,

View File

@ -275,6 +275,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
struct ice_dcbx_cfg *old_cfg, *curr_cfg;
struct device *dev = ice_pf_to_dev(pf);
int ret = ICE_DCB_NO_HW_CHG;
struct iidc_event *event;
struct ice_vsi *pf_vsi;
curr_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
@ -313,6 +314,15 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
goto free_cfg;
}
/* Notify AUX drivers about impending change to TCs */
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event)
return -ENOMEM;
set_bit(IIDC_EVENT_BEFORE_TC_CHANGE, event->type);
ice_send_event_to_aux(pf, event);
kfree(event);
/* avoid race conditions by holding the lock while disabling and
* re-enabling the VSI
*/
@ -640,6 +650,7 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
void ice_pf_dcb_recfg(struct ice_pf *pf)
{
struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
struct iidc_event *event;
u8 tc_map = 0;
int v, ret;
@ -675,6 +686,14 @@ void ice_pf_dcb_recfg(struct ice_pf *pf)
if (vsi->type == ICE_VSI_PF)
ice_dcbnl_set_all(vsi);
}
/* Notify the AUX drivers that TC change is finished */
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event)
return;
set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type);
ice_send_event_to_aux(pf, event);
kfree(event);
}
/**

View File

@ -110,8 +110,6 @@
#define VPGEN_VFRSTAT_VFRD_M BIT(0)
#define VPGEN_VFRTRIG(_VF) (0x00090000 + ((_VF) * 4))
#define VPGEN_VFRTRIG_VFSWR_M BIT(0)
#define PFHMC_ERRORDATA 0x00520500
#define PFHMC_ERRORINFO 0x00520400
#define GLINT_CTL 0x0016CC54
#define GLINT_CTL_DIS_AUTOMASK_M BIT(0)
#define GLINT_CTL_ITR_GRAN_200_S 16
@ -160,6 +158,7 @@
#define PFINT_OICR_GRST_M BIT(20)
#define PFINT_OICR_PCI_EXCEPTION_M BIT(21)
#define PFINT_OICR_HMC_ERR_M BIT(26)
#define PFINT_OICR_PE_PUSH_M BIT(27)
#define PFINT_OICR_PE_CRITERR_M BIT(28)
#define PFINT_OICR_VFLR_M BIT(29)
#define PFINT_OICR_SWINT_M BIT(31)

View File

@ -0,0 +1,334 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021, Intel Corporation. */
/* Inter-Driver Communication */
#include "ice.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
/**
* ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct
* @pf: pointer to PF struct
*
* This function has to be called with a device_lock on the
* pf->adev.dev to avoid race conditions.
*/
static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)
{
struct auxiliary_device *adev;
adev = pf->adev;
if (!adev || !adev->dev.driver)
return NULL;
return container_of(adev->dev.driver, struct iidc_auxiliary_drv,
adrv.driver);
}
/**
* ice_send_event_to_aux - send event to RDMA AUX driver
* @pf: pointer to PF struct
* @event: event struct
*/
void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
{
struct iidc_auxiliary_drv *iadrv;
if (!pf->adev)
return;
device_lock(&pf->adev->dev);
iadrv = ice_get_auxiliary_drv(pf);
if (iadrv && iadrv->event_handler)
iadrv->event_handler(pf, event);
device_unlock(&pf->adev->dev);
}
/**
* ice_find_vsi - Find the VSI from VSI ID
* @pf: The PF pointer to search in
* @vsi_num: The VSI ID to search for
*/
static struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
{
int i;
ice_for_each_vsi(pf, i)
if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num)
return pf->vsi[i];
return NULL;
}
/**
* ice_add_rdma_qset - Add Leaf Node for RDMA Qset
* @pf: PF struct
* @qset: Resource to be allocated
*/
int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
{
u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS];
struct ice_vsi *vsi;
struct device *dev;
u32 qset_teid;
u16 qs_handle;
int status;
int i;
if (WARN_ON(!pf || !qset))
return -EINVAL;
dev = ice_pf_to_dev(pf);
if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
return -EINVAL;
vsi = ice_get_main_vsi(pf);
if (!vsi) {
dev_err(dev, "RDMA QSet invalid VSI\n");
return -EINVAL;
}
ice_for_each_traffic_class(i)
max_rdmaqs[i] = 0;
max_rdmaqs[qset->tc]++;
qs_handle = qset->qs_handle;
status = ice_cfg_vsi_rdma(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_rdmaqs);
if (status) {
dev_err(dev, "Failed VSI RDMA Qset config\n");
return status;
}
status = ice_ena_vsi_rdma_qset(vsi->port_info, vsi->idx, qset->tc,
&qs_handle, 1, &qset_teid);
if (status) {
dev_err(dev, "Failed VSI RDMA Qset enable\n");
return status;
}
vsi->qset_handle[qset->tc] = qset->qs_handle;
qset->teid = qset_teid;
return 0;
}
EXPORT_SYMBOL_GPL(ice_add_rdma_qset);
/**
* ice_del_rdma_qset - Delete leaf node for RDMA Qset
* @pf: PF struct
* @qset: Resource to be freed
*/
int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
{
struct ice_vsi *vsi;
u32 teid;
u16 q_id;
if (WARN_ON(!pf || !qset))
return -EINVAL;
vsi = ice_find_vsi(pf, qset->vport_id);
if (!vsi) {
dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n");
return -EINVAL;
}
q_id = qset->qs_handle;
teid = qset->teid;
vsi->qset_handle[qset->tc] = 0;
return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id);
}
EXPORT_SYMBOL_GPL(ice_del_rdma_qset);
/**
* ice_rdma_request_reset - accept request from RDMA to perform a reset
* @pf: struct for PF
* @reset_type: type of reset
*/
int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type)
{
enum ice_reset_req reset;
if (WARN_ON(!pf))
return -EINVAL;
switch (reset_type) {
case IIDC_PFR:
reset = ICE_RESET_PFR;
break;
case IIDC_CORER:
reset = ICE_RESET_CORER;
break;
case IIDC_GLOBR:
reset = ICE_RESET_GLOBR;
break;
default:
dev_err(ice_pf_to_dev(pf), "incorrect reset request\n");
return -EINVAL;
}
return ice_schedule_reset(pf, reset);
}
EXPORT_SYMBOL_GPL(ice_rdma_request_reset);
/**
* ice_rdma_update_vsi_filter - update main VSI filters for RDMA
* @pf: pointer to struct for PF
* @vsi_id: VSI HW idx to update filter on
* @enable: bool whether to enable or disable filters
*/
int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable)
{
struct ice_vsi *vsi;
int status;
if (WARN_ON(!pf))
return -EINVAL;
vsi = ice_find_vsi(pf, vsi_id);
if (!vsi)
return -EINVAL;
status = ice_cfg_rdma_fltr(&pf->hw, vsi->idx, enable);
if (status) {
dev_err(ice_pf_to_dev(pf), "Failed to %sable RDMA filtering\n",
enable ? "en" : "dis");
} else {
if (enable)
vsi->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
else
vsi->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
}
return status;
}
EXPORT_SYMBOL_GPL(ice_rdma_update_vsi_filter);
/**
* ice_get_qos_params - parse QoS params for RDMA consumption
* @pf: pointer to PF struct
* @qos: set of QoS values
*/
void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
{
struct ice_dcbx_cfg *dcbx_cfg;
unsigned int i;
u32 up2tc;
dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
qos->num_tc = ice_dcb_get_num_tc(dcbx_cfg);
for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
qos->up2tc[i] = (up2tc >> (i * 3)) & 0x7;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
}
EXPORT_SYMBOL_GPL(ice_get_qos_params);
/**
* ice_reserve_rdma_qvector - Reserve vector resources for RDMA driver
* @pf: board private structure to initialize
*/
static int ice_reserve_rdma_qvector(struct ice_pf *pf)
{
if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
int index;
index = ice_get_res(pf, pf->irq_tracker, pf->num_rdma_msix,
ICE_RES_RDMA_VEC_ID);
if (index < 0)
return index;
pf->num_avail_sw_msix -= pf->num_rdma_msix;
pf->rdma_base_vector = (u16)index;
}
return 0;
}
/**
* ice_adev_release - function to be mapped to AUX dev's release op
* @dev: pointer to device to free
*/
static void ice_adev_release(struct device *dev)
{
struct iidc_auxiliary_dev *iadev;
iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev);
kfree(iadev);
}
/**
* ice_plug_aux_dev - allocate and register AUX device
* @pf: pointer to pf struct
*/
int ice_plug_aux_dev(struct ice_pf *pf)
{
struct iidc_auxiliary_dev *iadev;
struct auxiliary_device *adev;
int ret;
iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
if (!iadev)
return -ENOMEM;
adev = &iadev->adev;
pf->adev = adev;
iadev->pf = pf;
adev->id = pf->aux_idx;
adev->dev.release = ice_adev_release;
adev->dev.parent = &pf->pdev->dev;
adev->name = IIDC_RDMA_ROCE_NAME;
ret = auxiliary_device_init(adev);
if (ret) {
pf->adev = NULL;
kfree(iadev);
return ret;
}
ret = auxiliary_device_add(adev);
if (ret) {
pf->adev = NULL;
auxiliary_device_uninit(adev);
return ret;
}
return 0;
}
/* ice_unplug_aux_dev - unregister and free AUX device
* @pf: pointer to pf struct
*/
void ice_unplug_aux_dev(struct ice_pf *pf)
{
if (!pf->adev)
return;
auxiliary_device_delete(pf->adev);
auxiliary_device_uninit(pf->adev);
pf->adev = NULL;
}
/**
* ice_init_rdma - initializes PF for RDMA use
* @pf: ptr to ice_pf
*/
int ice_init_rdma(struct ice_pf *pf)
{
struct device *dev = &pf->pdev->dev;
int ret;
/* Reserve vector resources */
ret = ice_reserve_rdma_qvector(pf);
if (ret < 0) {
dev_err(dev, "failed to reserve vectors for RDMA\n");
return ret;
}
return ice_plug_aux_dev(pf);
}

View File

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2021, Intel Corporation. */
#ifndef _ICE_IDC_INT_H_
#define _ICE_IDC_INT_H_
#include <linux/net/intel/iidc.h>
#include "ice.h"
struct ice_pf;
void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event);
#endif /* !_ICE_IDC_INT_H_ */

View File

@ -172,6 +172,7 @@ ice_lag_link(struct ice_lag *lag, struct netdev_notifier_changeupper_info *info)
}
ice_clear_sriov_cap(pf);
ice_clear_rdma_cap(pf);
lag->bonded = true;
lag->role = ICE_LAG_UNSET;
@ -222,6 +223,7 @@ ice_lag_unlink(struct ice_lag *lag,
}
ice_set_sriov_cap(pf);
ice_set_rdma_cap(pf);
lag->bonded = false;
lag->role = ICE_LAG_NONE;
}

View File

@ -616,6 +616,17 @@ bool ice_is_safe_mode(struct ice_pf *pf)
return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
}
/**
* ice_is_aux_ena
* @pf: pointer to the PF struct
*
* returns true if AUX devices/drivers are supported, false otherwise
*/
bool ice_is_aux_ena(struct ice_pf *pf)
{
return test_bit(ICE_FLAG_AUX_ENA, pf->flags);
}
/**
* ice_vsi_clean_rss_flow_fld - Delete RSS configuration
* @vsi: the VSI being cleaned up

View File

@ -102,7 +102,7 @@ enum ice_status
ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
bool ice_is_safe_mode(struct ice_pf *pf);
bool ice_is_aux_ena(struct ice_pf *pf);
bool ice_is_dflt_vsi_in_use(struct ice_sw *sw);
bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);

View File

@ -35,6 +35,8 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXX
MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
#endif /* !CONFIG_DYNAMIC_DEBUG */
static DEFINE_IDA(ice_aux_ida);
static struct workqueue_struct *ice_wq;
static const struct net_device_ops ice_netdev_safe_mode_ops;
static const struct net_device_ops ice_netdev_ops;
@ -454,6 +456,8 @@ ice_prepare_for_reset(struct ice_pf *pf)
if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
return;
ice_unplug_aux_dev(pf);
/* Notify VFs of impending reset */
if (ice_check_sq_alive(hw, &hw->mailboxq))
ice_vc_notify_reset(pf);
@ -2118,6 +2122,8 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
return -EBUSY;
}
ice_unplug_aux_dev(pf);
switch (reset) {
case ICE_RESET_PFR:
set_bit(ICE_PFR_REQ, pf->state);
@ -2608,6 +2614,7 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
PFINT_OICR_PCI_EXCEPTION_M |
PFINT_OICR_VFLR_M |
PFINT_OICR_HMC_ERR_M |
PFINT_OICR_PE_PUSH_M |
PFINT_OICR_PE_CRITERR_M);
wr32(hw, PFINT_OICR_ENA, val);
@ -2678,8 +2685,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
/* If a reset cycle isn't already in progress, we set a bit in
* pf->state so that the service task can start a reset/rebuild.
* We also make note of which reset happened so that peer
* devices/drivers can be informed.
*/
if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
if (reset == ICE_RESET_CORER)
@ -2706,11 +2711,19 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
}
}
if (oicr & PFINT_OICR_HMC_ERR_M) {
ena_mask &= ~PFINT_OICR_HMC_ERR_M;
dev_dbg(dev, "HMC Error interrupt - info 0x%x, data 0x%x\n",
rd32(hw, PFHMC_ERRORINFO),
rd32(hw, PFHMC_ERRORDATA));
#define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
if (oicr & ICE_AUX_CRIT_ERR) {
struct iidc_event *event;
ena_mask &= ~ICE_AUX_CRIT_ERR;
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (event) {
set_bit(IIDC_EVENT_CRIT_ERR, event->type);
/* report the entire OICR value to AUX driver */
event->reg = oicr;
ice_send_event_to_aux(pf, event);
kfree(event);
}
}
/* Report any remaining unexpected interrupts */
@ -2720,8 +2733,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
/* If a critical error is pending there is no choice but to
* reset the device.
*/
if (oicr & (PFINT_OICR_PE_CRITERR_M |
PFINT_OICR_PCI_EXCEPTION_M |
if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
PFINT_OICR_ECC_ERR_M)) {
set_bit(ICE_PFR_REQ, pf->state);
ice_service_task_schedule(pf);
@ -3276,6 +3288,12 @@ static void ice_set_pf_caps(struct ice_pf *pf)
{
struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
if (func_caps->common_cap.rdma) {
set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
set_bit(ICE_FLAG_AUX_ENA, pf->flags);
}
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
if (func_caps->common_cap.dcb)
set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
@ -3355,11 +3373,12 @@ static int ice_init_pf(struct ice_pf *pf)
*/
static int ice_ena_msix_range(struct ice_pf *pf)
{
int v_left, v_actual, v_other, v_budget = 0;
int num_cpus, v_left, v_actual, v_other, v_budget = 0;
struct device *dev = ice_pf_to_dev(pf);
int needed, err, i;
v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
num_cpus = num_online_cpus();
/* reserve for LAN miscellaneous handler */
needed = ICE_MIN_LAN_OICR_MSIX;
@ -3381,13 +3400,23 @@ static int ice_ena_msix_range(struct ice_pf *pf)
v_other = v_budget;
/* reserve vectors for LAN traffic */
needed = min_t(int, num_online_cpus(), v_left);
needed = num_cpus;
if (v_left < needed)
goto no_hw_vecs_left_err;
pf->num_lan_msix = needed;
v_budget += needed;
v_left -= needed;
/* reserve vectors for RDMA auxiliary driver */
if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
if (v_left < needed)
goto no_hw_vecs_left_err;
pf->num_rdma_msix = needed;
v_budget += needed;
v_left -= needed;
}
pf->msix_entries = devm_kcalloc(dev, v_budget,
sizeof(*pf->msix_entries), GFP_KERNEL);
if (!pf->msix_entries) {
@ -3417,16 +3446,46 @@ static int ice_ena_msix_range(struct ice_pf *pf)
err = -ERANGE;
goto msix_err;
} else {
int v_traffic = v_actual - v_other;
int v_remain = v_actual - v_other;
int v_rdma = 0, v_min_rdma = 0;
if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
/* Need at least 1 interrupt in addition to
* AEQ MSIX
*/
v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
v_min_rdma = ICE_MIN_RDMA_MSIX;
}
if (v_actual == ICE_MIN_MSIX ||
v_traffic < ICE_MIN_LAN_TXRX_MSIX)
v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) {
dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n");
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
pf->num_rdma_msix = 0;
pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
else
pf->num_lan_msix = v_traffic;
} else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
(v_remain - v_rdma < v_rdma)) {
/* Support minimum RDMA and give remaining
* vectors to LAN MSIX
*/
pf->num_rdma_msix = v_min_rdma;
pf->num_lan_msix = v_remain - v_min_rdma;
} else {
/* Split remaining MSIX with RDMA after
* accounting for AEQ MSIX
*/
pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
ICE_RDMA_NUM_AEQ_MSIX;
pf->num_lan_msix = v_remain - pf->num_rdma_msix;
}
dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
pf->num_lan_msix);
if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
pf->num_rdma_msix);
}
}
@ -3441,6 +3500,7 @@ no_hw_vecs_left_err:
needed, v_left);
err = -ERANGE;
exit_err:
pf->num_rdma_msix = 0;
pf->num_lan_msix = 0;
return err;
}
@ -4268,8 +4328,29 @@ probe_done:
/* ready to go, so clear down state bit */
clear_bit(ICE_DOWN, pf->state);
if (ice_is_aux_ena(pf)) {
pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
if (pf->aux_idx < 0) {
dev_err(dev, "Failed to allocate device ID for AUX driver\n");
err = -ENOMEM;
goto err_netdev_reg;
}
err = ice_init_rdma(pf);
if (err) {
dev_err(dev, "Failed to initialize RDMA: %d\n", err);
err = -EIO;
goto err_init_aux_unroll;
}
} else {
dev_warn(dev, "RDMA is not supported on this device\n");
}
return 0;
err_init_aux_unroll:
pf->adev = NULL;
ida_free(&ice_aux_ida, pf->aux_idx);
err_netdev_reg:
err_send_version_unroll:
ice_vsi_release_all(pf);
@ -4379,10 +4460,12 @@ static void ice_remove(struct pci_dev *pdev)
ice_free_vfs(pf);
}
set_bit(ICE_DOWN, pf->state);
ice_service_task_stop(pf);
ice_aq_cancel_waiting_tasks(pf);
ice_unplug_aux_dev(pf);
ida_free(&ice_aux_ida, pf->aux_idx);
set_bit(ICE_DOWN, pf->state);
mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
ice_deinit_lag(pf);
@ -4538,6 +4621,8 @@ static int __maybe_unused ice_suspend(struct device *dev)
*/
disabled = ice_service_task_stop(pf);
ice_unplug_aux_dev(pf);
/* Already suspended?, then there is nothing to do */
if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
if (!disabled)
@ -6208,6 +6293,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
/* if we get here, reset flow is successful */
clear_bit(ICE_RESET_FAILED, pf->state);
ice_plug_aux_dev(pf);
return;
err_vsi_rebuild:
@ -6246,7 +6333,9 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct iidc_event *event;
u8 count = 0;
int err = 0;
if (new_mtu == (int)netdev->mtu) {
netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
@ -6279,27 +6368,38 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return -EBUSY;
}
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event)
return -ENOMEM;
set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
ice_send_event_to_aux(pf, event);
clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
netdev->mtu = (unsigned int)new_mtu;
/* if VSI is up, bring it down and then back up */
if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
int err;
err = ice_down(vsi);
if (err) {
netdev_err(netdev, "change MTU if_down err %d\n", err);
return err;
goto event_after;
}
err = ice_up(vsi);
if (err) {
netdev_err(netdev, "change MTU if_up err %d\n", err);
return err;
goto event_after;
}
}
netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
return 0;
event_after:
set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
ice_send_event_to_aux(pf, event);
kfree(event);
return err;
}
/**

View File

@ -595,6 +595,50 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
return 0;
}
/**
* ice_alloc_rdma_q_ctx - allocate RDMA queue contexts for the given VSI and TC
* @hw: pointer to the HW struct
* @vsi_handle: VSI handle
* @tc: TC number
* @new_numqs: number of queues
*/
static enum ice_status
ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
{
struct ice_vsi_ctx *vsi_ctx;
struct ice_q_ctx *q_ctx;
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx)
return ICE_ERR_PARAM;
/* allocate RDMA queue contexts */
if (!vsi_ctx->rdma_q_ctx[tc]) {
vsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
new_numqs,
sizeof(*q_ctx),
GFP_KERNEL);
if (!vsi_ctx->rdma_q_ctx[tc])
return ICE_ERR_NO_MEMORY;
vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
return 0;
}
/* num queues are increased, update the queue contexts */
if (new_numqs > vsi_ctx->num_rdma_q_entries[tc]) {
u16 prev_num = vsi_ctx->num_rdma_q_entries[tc];
q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
sizeof(*q_ctx), GFP_KERNEL);
if (!q_ctx)
return ICE_ERR_NO_MEMORY;
memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc],
prev_num * sizeof(*q_ctx));
devm_kfree(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc]);
vsi_ctx->rdma_q_ctx[tc] = q_ctx;
vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
}
return 0;
}
/**
* ice_aq_rl_profile - performs a rate limiting task
* @hw: pointer to the HW struct
@ -1774,13 +1818,22 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
if (!vsi_ctx)
return ICE_ERR_PARAM;
prev_numqs = vsi_ctx->sched.max_lanq[tc];
if (owner == ICE_SCHED_NODE_OWNER_LAN)
prev_numqs = vsi_ctx->sched.max_lanq[tc];
else
prev_numqs = vsi_ctx->sched.max_rdmaq[tc];
/* num queues are not changed or less than the previous number */
if (new_numqs <= prev_numqs)
return status;
status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
if (status)
return status;
if (owner == ICE_SCHED_NODE_OWNER_LAN) {
status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
if (status)
return status;
} else {
status = ice_alloc_rdma_q_ctx(hw, vsi_handle, tc, new_numqs);
if (status)
return status;
}
if (new_numqs)
ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
@ -1795,7 +1848,10 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
new_num_nodes, owner);
if (status)
return status;
vsi_ctx->sched.max_lanq[tc] = new_numqs;
if (owner == ICE_SCHED_NODE_OWNER_LAN)
vsi_ctx->sched.max_lanq[tc] = new_numqs;
else
vsi_ctx->sched.max_rdmaq[tc] = new_numqs;
return 0;
}
@ -1861,6 +1917,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
* recreate the child nodes all the time in these cases.
*/
vsi_ctx->sched.max_lanq[tc] = 0;
vsi_ctx->sched.max_rdmaq[tc] = 0;
}
/* update the VSI child nodes */
@ -1990,6 +2047,8 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
}
if (owner == ICE_SCHED_NODE_OWNER_LAN)
vsi_ctx->sched.max_lanq[i] = 0;
else
vsi_ctx->sched.max_rdmaq[i] = 0;
}
status = 0;

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Intel Corporation. */
#include "ice_lib.h"
#include "ice_switch.h"
#define ICE_ETH_DA_OFFSET 0
@ -302,6 +303,10 @@ static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
vsi->lan_q_ctx[i] = NULL;
}
if (vsi->rdma_q_ctx[i]) {
devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
vsi->rdma_q_ctx[i] = NULL;
}
}
}
@ -422,6 +427,29 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
return ice_aq_update_vsi(hw, vsi_ctx, cd);
}
/**
* ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI
* @hw: pointer to HW struct
* @vsi_handle: VSI SW index
* @enable: boolean for enable/disable
*/
int
ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
{
struct ice_vsi_ctx *ctx;
ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!ctx)
return -EIO;
if (enable)
ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
else
ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
return ice_status_to_errno(ice_update_vsi(hw, vsi_handle, ctx, NULL));
}
/**
* ice_aq_alloc_free_vsi_list
* @hw: pointer to the HW struct

View File

@ -26,6 +26,8 @@ struct ice_vsi_ctx {
u8 vf_num;
u16 num_lan_q_entries[ICE_MAX_TRAFFIC_CLASS];
struct ice_q_ctx *lan_q_ctx[ICE_MAX_TRAFFIC_CLASS];
u16 num_rdma_q_entries[ICE_MAX_TRAFFIC_CLASS];
struct ice_q_ctx *rdma_q_ctx[ICE_MAX_TRAFFIC_CLASS];
};
enum ice_sw_fwd_act_type {
@ -223,6 +225,8 @@ enum ice_status
ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list);
enum ice_status
ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list);
int
ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);

View File

@ -45,6 +45,7 @@ static inline u32 ice_round_to_num(u32 N, u32 R)
#define ICE_DBG_FLOW BIT_ULL(9)
#define ICE_DBG_SW BIT_ULL(13)
#define ICE_DBG_SCHED BIT_ULL(14)
#define ICE_DBG_RDMA BIT_ULL(15)
#define ICE_DBG_PKG BIT_ULL(16)
#define ICE_DBG_RES BIT_ULL(17)
#define ICE_DBG_AQ_MSG BIT_ULL(24)
@ -262,6 +263,7 @@ struct ice_hw_common_caps {
u8 rss_table_entry_width; /* RSS Entry width in bits */
u8 dcb;
u8 rdma;
bool nvm_update_pending_nvm;
bool nvm_update_pending_orom;
@ -440,6 +442,7 @@ struct ice_sched_node {
u8 tc_num;
u8 owner;
#define ICE_SCHED_NODE_OWNER_LAN 0
#define ICE_SCHED_NODE_OWNER_RDMA 2
};
/* Access Macros for Tx Sched Elements data */
@ -511,6 +514,7 @@ struct ice_sched_vsi_info {
struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS];
struct list_head list_entry;
u16 max_lanq[ICE_MAX_TRAFFIC_CLASS];
u16 max_rdmaq[ICE_MAX_TRAFFIC_CLASS];
};
/* driver defines the policy */

View File

@ -4,6 +4,8 @@
#ifndef _I40E_CLIENT_H_
#define _I40E_CLIENT_H_
#include <linux/auxiliary_bus.h>
#define I40E_CLIENT_STR_LENGTH 10
/* Client interface version should be updated anytime there is a change in the
@ -48,7 +50,7 @@ struct i40e_qv_info {
struct i40e_qvlist_info {
u32 num_vectors;
struct i40e_qv_info qv_info[1];
struct i40e_qv_info qv_info[];
};
@ -78,6 +80,7 @@ struct i40e_info {
u8 lanmac[6];
struct net_device *netdev;
struct pci_dev *pcidev;
struct auxiliary_device *aux_dev;
u8 __iomem *hw_addr;
u8 fid; /* function id, PF id or VF id */
#define I40E_CLIENT_FTYPE_PF 0
@ -100,6 +103,11 @@ struct i40e_info {
u32 fw_build; /* firmware build number */
};
struct i40e_auxiliary_device {
struct auxiliary_device aux_dev;
struct i40e_info *ldev;
};
#define I40E_CLIENT_RESET_LEVEL_PF 1
#define I40E_CLIENT_RESET_LEVEL_CORE 2
#define I40E_CLIENT_VSI_FLAG_TCP_ENABLE BIT(1)
@ -187,8 +195,7 @@ static inline bool i40e_client_is_registered(struct i40e_client *client)
return test_bit(__I40E_CLIENT_REGISTERED, &client->state);
}
/* used by clients */
int i40e_register_client(struct i40e_client *client);
int i40e_unregister_client(struct i40e_client *client);
void i40e_client_device_register(struct i40e_info *ldev, struct i40e_client *client);
void i40e_client_device_unregister(struct i40e_info *ldev);
#endif /* _I40E_CLIENT_H_ */

View File

@ -0,0 +1,100 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2021, Intel Corporation. */
#ifndef _IIDC_H_
#define _IIDC_H_
#include <linux/auxiliary_bus.h>
#include <linux/dcbnl.h>
#include <linux/device.h>
#include <linux/if_ether.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
enum iidc_event_type {
IIDC_EVENT_BEFORE_MTU_CHANGE,
IIDC_EVENT_AFTER_MTU_CHANGE,
IIDC_EVENT_BEFORE_TC_CHANGE,
IIDC_EVENT_AFTER_TC_CHANGE,
IIDC_EVENT_CRIT_ERR,
IIDC_EVENT_NBITS /* must be last */
};
enum iidc_reset_type {
IIDC_PFR,
IIDC_CORER,
IIDC_GLOBR,
};
#define IIDC_MAX_USER_PRIORITY 8
/* Struct to hold per RDMA Qset info */
struct iidc_rdma_qset_params {
/* Qset TEID returned to the RDMA driver in
* ice_add_rdma_qset and used by RDMA driver
* for calls to ice_del_rdma_qset
*/
u32 teid; /* Qset TEID */
u16 qs_handle; /* RDMA driver provides this */
u16 vport_id; /* VSI index */
u8 tc; /* TC branch the Qset should belong to */
};
struct iidc_qos_info {
u64 tc_ctx;
u8 rel_bw;
u8 prio_type;
u8 egress_virt_up;
u8 ingress_virt_up;
};
/* Struct to pass QoS info */
struct iidc_qos_params {
struct iidc_qos_info tc_info[IEEE_8021QAZ_MAX_TCS];
u8 up2tc[IIDC_MAX_USER_PRIORITY];
u8 vport_relative_bw;
u8 vport_priority_type;
u8 num_tc;
};
struct iidc_event {
DECLARE_BITMAP(type, IIDC_EVENT_NBITS);
u32 reg;
};
struct ice_pf;
int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset);
int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset);
int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type);
int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable);
void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos);
#define IIDC_RDMA_ROCE_NAME "roce"
/* Structure representing auxiliary driver tailored information about the core
* PCI dev, each auxiliary driver using the IIDC interface will have an
* instance of this struct dedicated to it.
*/
struct iidc_auxiliary_dev {
struct auxiliary_device adev;
struct ice_pf *pf;
};
/* structure representing the auxiliary driver. This struct is to be
* allocated and populated by the auxiliary driver's owner. The core PCI
* driver will access these ops by performing a container_of on the
* auxiliary_device->dev.driver.
*/
struct iidc_auxiliary_drv {
struct auxiliary_driver adrv;
/* This event_handler is meant to be a blocking call. For instance,
* when a BEFORE_MTU_CHANGE event comes in, the event_handler will not
* return until the auxiliary driver is ready for the MTU change to
* happen.
*/
void (*event_handler)(struct ice_pf *pf, struct iidc_event *event);
};
#endif /* _IIDC_H_*/

View File

@ -1,107 +0,0 @@
/*
* Copyright (c) 2006 - 2016 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef I40IW_ABI_H
#define I40IW_ABI_H
#include <linux/types.h>
#define I40IW_ABI_VER 5
struct i40iw_alloc_ucontext_req {
__u32 reserved32;
__u8 userspace_ver;
__u8 reserved8[3];
};
struct i40iw_alloc_ucontext_resp {
__u32 max_pds; /* maximum pds allowed for this user process */
__u32 max_qps; /* maximum qps allowed for this user process */
__u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */
__u8 kernel_ver;
__u8 reserved[3];
};
struct i40iw_alloc_pd_resp {
__u32 pd_id;
__u8 reserved[4];
};
struct i40iw_create_cq_req {
__aligned_u64 user_cq_buffer;
__aligned_u64 user_shadow_area;
};
struct i40iw_create_qp_req {
__aligned_u64 user_wqe_buffers;
__aligned_u64 user_compl_ctx;
/* UDA QP PHB */
__aligned_u64 user_sq_phb; /* place for VA of the sq phb buff */
__aligned_u64 user_rq_phb; /* place for VA of the rq phb buff */
};
enum i40iw_memreg_type {
IW_MEMREG_TYPE_MEM = 0x0000,
IW_MEMREG_TYPE_QP = 0x0001,
IW_MEMREG_TYPE_CQ = 0x0002,
};
struct i40iw_mem_reg_req {
__u16 reg_type; /* Memory, QP or CQ */
__u16 cq_pages;
__u16 rq_pages;
__u16 sq_pages;
};
struct i40iw_create_cq_resp {
__u32 cq_id;
__u32 cq_size;
__u32 mmap_db_index;
__u32 reserved;
};
struct i40iw_create_qp_resp {
__u32 qp_id;
__u32 actual_sq_size;
__u32 actual_rq_size;
__u32 i40iw_drv_opt;
__u16 push_idx;
__u8 lsmm;
__u8 rsvd2;
};
#endif

View File

@ -240,6 +240,7 @@ enum rdma_driver_id {
RDMA_DRIVER_OCRDMA,
RDMA_DRIVER_NES,
RDMA_DRIVER_I40IW,
RDMA_DRIVER_IRDMA = RDMA_DRIVER_I40IW,
RDMA_DRIVER_VMW_PVRDMA,
RDMA_DRIVER_QEDR,
RDMA_DRIVER_HNS,

View File

@ -0,0 +1,111 @@
/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
/*
* Copyright (c) 2006 - 2021 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*/
#ifndef IRDMA_ABI_H
#define IRDMA_ABI_H
#include <linux/types.h>
/* irdma must support legacy GEN_1 i40iw kernel
* and user-space whose last ABI ver is 5
*/
#define IRDMA_ABI_VER 5
enum irdma_memreg_type {
IRDMA_MEMREG_TYPE_MEM = 0,
IRDMA_MEMREG_TYPE_QP = 1,
IRDMA_MEMREG_TYPE_CQ = 2,
};
struct irdma_alloc_ucontext_req {
__u32 rsvd32;
__u8 userspace_ver;
__u8 rsvd8[3];
};
struct irdma_alloc_ucontext_resp {
__u32 max_pds;
__u32 max_qps;
__u32 wq_size; /* size of the WQs (SQ+RQ) in the mmaped area */
__u8 kernel_ver;
__u8 rsvd[3];
__aligned_u64 feature_flags;
__aligned_u64 db_mmap_key;
__u32 max_hw_wq_frags;
__u32 max_hw_read_sges;
__u32 max_hw_inline;
__u32 max_hw_rq_quanta;
__u32 max_hw_wq_quanta;
__u32 min_hw_cq_size;
__u32 max_hw_cq_size;
__u16 max_hw_sq_chunk;
__u8 hw_rev;
__u8 rsvd2;
};
struct irdma_alloc_pd_resp {
__u32 pd_id;
__u8 rsvd[4];
};
struct irdma_resize_cq_req {
__aligned_u64 user_cq_buffer;
};
struct irdma_create_cq_req {
__aligned_u64 user_cq_buf;
__aligned_u64 user_shadow_area;
};
struct irdma_create_qp_req {
__aligned_u64 user_wqe_bufs;
__aligned_u64 user_compl_ctx;
};
struct irdma_mem_reg_req {
__u16 reg_type; /* enum irdma_memreg_type */
__u16 cq_pages;
__u16 rq_pages;
__u16 sq_pages;
};
struct irdma_modify_qp_req {
__u8 sq_flush;
__u8 rq_flush;
__u8 rsvd[6];
};
struct irdma_create_cq_resp {
__u32 cq_id;
__u32 cq_size;
};
struct irdma_create_qp_resp {
__u32 qp_id;
__u32 actual_sq_size;
__u32 actual_rq_size;
__u32 irdma_drv_opt;
__u16 push_idx;
__u8 lsmm;
__u8 rsvd;
__u32 qp_caps;
};
struct irdma_modify_qp_resp {
__aligned_u64 push_wqe_mmap_key;
__aligned_u64 push_db_mmap_key;
__u16 push_offset;
__u8 push_valid;
__u8 rsvd[5];
};
struct irdma_create_ah_resp {
__u32 ah_id;
__u8 rsvd[4];
};
#endif /* IRDMA_ABI_H */