2013-07-07 14:25:49 +00:00
|
|
|
/*
|
2015-04-02 14:07:29 +00:00
|
|
|
* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
|
2013-07-07 14:25:49 +00:00
|
|
|
*
|
|
|
|
* This software is available to you under a choice of one of two
|
|
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
|
|
* General Public License (GPL) Version 2, available from the file
|
|
|
|
* COPYING in the main directory of this source tree, or the
|
|
|
|
* OpenIB.org BSD license below:
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or
|
|
|
|
* without modification, are permitted provided that the following
|
|
|
|
* conditions are met:
|
|
|
|
*
|
|
|
|
* - Redistributions of source code must retain the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer.
|
|
|
|
*
|
|
|
|
* - Redistributions in binary form must reproduce the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer in the documentation and/or other materials
|
|
|
|
* provided with the distribution.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
2015-08-28 07:27:19 +00:00
|
|
|
#include <linux/highmem.h>
|
2013-07-07 14:25:49 +00:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/io-mapping.h>
|
2015-05-28 19:28:39 +00:00
|
|
|
#include <linux/interrupt.h>
|
2015-10-14 14:43:47 +00:00
|
|
|
#include <linux/delay.h>
|
2013-07-07 14:25:49 +00:00
|
|
|
#include <linux/mlx5/driver.h>
|
|
|
|
#include <linux/mlx5/cq.h>
|
|
|
|
#include <linux/mlx5/qp.h>
|
|
|
|
#include <linux/debugfs.h>
|
2014-12-02 10:26:11 +00:00
|
|
|
#include <linux/kmod.h>
|
2014-10-02 09:19:44 +00:00
|
|
|
#include <linux/mlx5/mlx5_ifc.h>
|
2017-05-30 06:42:54 +00:00
|
|
|
#include <linux/mlx5/vport.h>
|
2016-04-28 22:36:39 +00:00
|
|
|
#ifdef CONFIG_RFS_ACCEL
|
|
|
|
#include <linux/cpu_rmap.h>
|
|
|
|
#endif
|
2020-10-15 11:52:00 +00:00
|
|
|
#include <linux/version.h>
|
2016-07-01 11:51:02 +00:00
|
|
|
#include <net/devlink.h>
|
2013-07-07 14:25:49 +00:00
|
|
|
#include "mlx5_core.h"
|
2018-11-19 18:52:38 +00:00
|
|
|
#include "lib/eq.h"
|
2018-11-19 18:52:39 +00:00
|
|
|
#include "fs_core.h"
|
2017-06-04 20:11:55 +00:00
|
|
|
#include "lib/mpfs.h"
|
2015-12-01 16:03:18 +00:00
|
|
|
#include "eswitch.h"
|
2018-12-11 14:09:51 +00:00
|
|
|
#include "devlink.h"
|
2020-10-07 06:00:49 +00:00
|
|
|
#include "fw_reset.h"
|
2017-03-26 14:01:57 +00:00
|
|
|
#include "lib/mlx5.h"
|
2017-03-13 18:05:45 +00:00
|
|
|
#include "fpga/core.h"
|
2018-02-18 13:07:20 +00:00
|
|
|
#include "fpga/ipsec.h"
|
2017-04-25 19:42:31 +00:00
|
|
|
#include "accel/ipsec.h"
|
2018-04-30 07:16:18 +00:00
|
|
|
#include "accel/tls.h"
|
2017-08-15 10:46:04 +00:00
|
|
|
#include "lib/clock.h"
|
2018-05-09 20:28:00 +00:00
|
|
|
#include "lib/vxlan.h"
|
2019-01-30 15:21:55 +00:00
|
|
|
#include "lib/geneve.h"
|
2018-12-04 19:24:46 +00:00
|
|
|
#include "lib/devcom.h"
|
2018-06-28 12:05:58 +00:00
|
|
|
#include "lib/pci_vsc.h"
|
2018-02-22 08:01:35 +00:00
|
|
|
#include "diag/fw_tracer.h"
|
2019-02-13 06:55:35 +00:00
|
|
|
#include "ecpf.h"
|
2019-08-22 05:05:51 +00:00
|
|
|
#include "lib/hv_vhca.h"
|
2020-02-11 22:32:43 +00:00
|
|
|
#include "diag/rsc_dump.h"
|
2020-12-12 06:12:16 +00:00
|
|
|
#include "sf/vhca_event.h"
|
2020-12-12 06:12:17 +00:00
|
|
|
#include "sf/dev/dev.h"
|
net/mlx5: SF, Port function state change support
Support changing the state of the SF port's function through devlink.
When activating the SF port's function, enable the hca in the device
followed by adding its auxiliary device.
When deactivating the SF port's function, delete its auxiliary device
followed by disabling the vHCA.
Port function attributes get/set callbacks are invoked with devlink
instance lock held. Such callbacks need to synchronize with sf port
table getting disabled either via sriov sysfs callback. Such callbacks
synchronize with table disable context holding table refcount.
$ devlink dev eswitch set pci/0000:06:00.0 mode switchdev
$ devlink port show
pci/0000:06:00.0/65535: type eth netdev ens2f0np0 flavour physical port 0 splittable false
$ devlink port add pci/0000:06:00.0 flavour pcisf pfnum 0 sfnum 88
pci/0000:06:00.0/32768: type eth netdev eth6 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
function:
hw_addr 00:00:00:00:00:00 state inactive opstate detached
$ devlink port show ens2f0npf0sf88
pci/0000:06:00.0/32768: type eth netdev ens2f0npf0sf88 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
function:
hw_addr 00:00:00:00:88:88 state inactive opstate detached
$ devlink port function set pci/0000:06:00.0/32768 hw_addr 00:00:00:00:88:88 state active
$ devlink port show ens2f0npf0sf88 -jp
{
"port": {
"pci/0000:06:00.0/32768": {
"type": "eth",
"netdev": "ens2f0npf0sf88",
"flavour": "pcisf",
"controller": 0,
"pfnum": 0,
"sfnum": 88,
"external": false,
"splittable": false,
"function": {
"hw_addr": "00:00:00:00:88:88",
"state": "active",
"opstate": "attached"
}
}
}
}
On port function activation, an auxiliary device is created in below
example.
$ devlink dev show
devlink dev show auxiliary/mlx5_core.sf.4
$ devlink port show auxiliary/mlx5_core.sf.4/1
auxiliary/mlx5_core.sf.4/1: type eth netdev p0sf88 flavour virtual port 0 splittable false
Signed-off-by: Parav Pandit <parav@nvidia.com>
Reviewed-by: Vu Pham <vuhuong@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
2020-12-12 06:12:22 +00:00
|
|
|
#include "sf/sf.h"
|
2013-07-07 14:25:49 +00:00
|
|
|
|
|
|
|
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
|
2018-07-17 01:35:37 +00:00
|
|
|
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
|
2013-07-07 14:25:49 +00:00
|
|
|
MODULE_LICENSE("Dual BSD/GPL");
|
|
|
|
|
2016-12-06 15:32:43 +00:00
|
|
|
unsigned int mlx5_core_debug_mask;
|
|
|
|
module_param_named(debug_mask, mlx5_core_debug_mask, uint, 0644);
|
2013-07-07 14:25:49 +00:00
|
|
|
MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
|
|
|
|
|
2016-12-06 15:32:43 +00:00
|
|
|
static unsigned int prof_sel = MLX5_DEFAULT_PROF;
|
|
|
|
module_param_named(prof_sel, prof_sel, uint, 0444);
|
2014-07-28 20:30:22 +00:00
|
|
|
MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
|
|
|
|
|
2018-01-04 15:25:32 +00:00
|
|
|
static u32 sw_owner_id[4];
|
|
|
|
|
2015-12-14 14:34:09 +00:00
|
|
|
enum {
|
|
|
|
MLX5_ATOMIC_REQ_MODE_BE = 0x0,
|
|
|
|
MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
|
|
|
|
};
|
|
|
|
|
2014-07-28 20:30:22 +00:00
|
|
|
static struct mlx5_profile profile[] = {
|
|
|
|
[0] = {
|
|
|
|
.mask = 0,
|
|
|
|
},
|
|
|
|
[1] = {
|
|
|
|
.mask = MLX5_PROF_MASK_QP_SIZE,
|
|
|
|
.log_max_qp = 12,
|
|
|
|
},
|
|
|
|
[2] = {
|
|
|
|
.mask = MLX5_PROF_MASK_QP_SIZE |
|
|
|
|
MLX5_PROF_MASK_MR_CACHE,
|
2017-03-21 13:59:17 +00:00
|
|
|
.log_max_qp = 18,
|
2014-07-28 20:30:22 +00:00
|
|
|
.mr_cache[0] = {
|
|
|
|
.size = 500,
|
|
|
|
.limit = 250
|
|
|
|
},
|
|
|
|
.mr_cache[1] = {
|
|
|
|
.size = 500,
|
|
|
|
.limit = 250
|
|
|
|
},
|
|
|
|
.mr_cache[2] = {
|
|
|
|
.size = 500,
|
|
|
|
.limit = 250
|
|
|
|
},
|
|
|
|
.mr_cache[3] = {
|
|
|
|
.size = 500,
|
|
|
|
.limit = 250
|
|
|
|
},
|
|
|
|
.mr_cache[4] = {
|
|
|
|
.size = 500,
|
|
|
|
.limit = 250
|
|
|
|
},
|
|
|
|
.mr_cache[5] = {
|
|
|
|
.size = 500,
|
|
|
|
.limit = 250
|
|
|
|
},
|
|
|
|
.mr_cache[6] = {
|
|
|
|
.size = 500,
|
|
|
|
.limit = 250
|
|
|
|
},
|
|
|
|
.mr_cache[7] = {
|
|
|
|
.size = 500,
|
|
|
|
.limit = 250
|
|
|
|
},
|
|
|
|
.mr_cache[8] = {
|
|
|
|
.size = 500,
|
|
|
|
.limit = 250
|
|
|
|
},
|
|
|
|
.mr_cache[9] = {
|
|
|
|
.size = 500,
|
|
|
|
.limit = 250
|
|
|
|
},
|
|
|
|
.mr_cache[10] = {
|
|
|
|
.size = 500,
|
|
|
|
.limit = 250
|
|
|
|
},
|
|
|
|
.mr_cache[11] = {
|
|
|
|
.size = 500,
|
|
|
|
.limit = 250
|
|
|
|
},
|
|
|
|
.mr_cache[12] = {
|
|
|
|
.size = 64,
|
|
|
|
.limit = 32
|
|
|
|
},
|
|
|
|
.mr_cache[13] = {
|
|
|
|
.size = 32,
|
|
|
|
.limit = 16
|
|
|
|
},
|
|
|
|
.mr_cache[14] = {
|
|
|
|
.size = 16,
|
|
|
|
.limit = 8
|
|
|
|
},
|
|
|
|
.mr_cache[15] = {
|
|
|
|
.size = 8,
|
|
|
|
.limit = 4
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
2013-07-07 14:25:49 +00:00
|
|
|
|
2017-06-08 16:33:16 +00:00
|
|
|
#define FW_INIT_TIMEOUT_MILI 2000
|
|
|
|
#define FW_INIT_WAIT_MS 2
|
2019-06-10 23:38:14 +00:00
|
|
|
#define FW_PRE_INIT_TIMEOUT_MILI 120000
|
|
|
|
#define FW_INIT_WARN_MESSAGE_INTERVAL 20000
|
2015-10-14 14:43:47 +00:00
|
|
|
|
2020-05-15 22:16:53 +00:00
|
|
|
static int fw_initializing(struct mlx5_core_dev *dev)
|
|
|
|
{
|
|
|
|
return ioread32be(&dev->iseg->initializing) >> 31;
|
|
|
|
}
|
|
|
|
|
2019-06-10 23:38:14 +00:00
|
|
|
static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
|
|
|
|
u32 warn_time_mili)
|
2015-10-14 14:43:47 +00:00
|
|
|
{
|
2019-06-10 23:38:14 +00:00
|
|
|
unsigned long warn = jiffies + msecs_to_jiffies(warn_time_mili);
|
2015-10-14 14:43:47 +00:00
|
|
|
unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
|
|
|
|
int err = 0;
|
|
|
|
|
2019-06-10 23:38:14 +00:00
|
|
|
BUILD_BUG_ON(FW_PRE_INIT_TIMEOUT_MILI < FW_INIT_WARN_MESSAGE_INTERVAL);
|
|
|
|
|
2015-10-14 14:43:47 +00:00
|
|
|
while (fw_initializing(dev)) {
|
|
|
|
if (time_after(jiffies, end)) {
|
|
|
|
err = -EBUSY;
|
|
|
|
break;
|
|
|
|
}
|
2019-06-10 23:38:14 +00:00
|
|
|
if (warn_time_mili && time_after(jiffies, warn)) {
|
|
|
|
mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds\n",
|
|
|
|
jiffies_to_msecs(end - warn) / 1000);
|
|
|
|
warn = jiffies + msecs_to_jiffies(warn_time_mili);
|
|
|
|
}
|
2015-10-14 14:43:47 +00:00
|
|
|
msleep(FW_INIT_WAIT_MS);
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-11-17 11:46:00 +00:00
|
|
|
static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
|
|
|
|
{
|
|
|
|
int driver_ver_sz = MLX5_FLD_SZ_BYTES(set_driver_version_in,
|
|
|
|
driver_version);
|
2020-04-09 13:33:38 +00:00
|
|
|
u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {};
|
2016-11-17 11:46:00 +00:00
|
|
|
int remaining_size = driver_ver_sz;
|
|
|
|
char *string;
|
|
|
|
|
|
|
|
if (!MLX5_CAP_GEN(dev, driver_version))
|
|
|
|
return;
|
|
|
|
|
|
|
|
string = MLX5_ADDR_OF(set_driver_version_in, in, driver_version);
|
|
|
|
|
|
|
|
strncpy(string, "Linux", remaining_size);
|
|
|
|
|
|
|
|
remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
|
|
|
|
strncat(string, ",", remaining_size);
|
|
|
|
|
|
|
|
remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
|
2020-10-04 11:30:58 +00:00
|
|
|
strncat(string, KBUILD_MODNAME, remaining_size);
|
2016-11-17 11:46:00 +00:00
|
|
|
|
|
|
|
remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
|
|
|
|
strncat(string, ",", remaining_size);
|
|
|
|
|
|
|
|
remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
|
2020-10-15 11:52:00 +00:00
|
|
|
|
|
|
|
snprintf(string + strlen(string), remaining_size, "%u.%u.%u",
|
2021-02-12 16:29:24 +00:00
|
|
|
LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL,
|
|
|
|
LINUX_VERSION_SUBLEVEL);
|
2016-11-17 11:46:00 +00:00
|
|
|
|
|
|
|
/*Send the command*/
|
|
|
|
MLX5_SET(set_driver_version_in, in, opcode,
|
|
|
|
MLX5_CMD_OP_SET_DRIVER_VERSION);
|
|
|
|
|
2020-04-09 13:33:38 +00:00
|
|
|
mlx5_cmd_exec_in(dev, set_driver_version, in);
|
2016-11-17 11:46:00 +00:00
|
|
|
}
|
|
|
|
|
2013-07-07 14:25:49 +00:00
|
|
|
static int set_dma_caps(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
|
|
|
if (err) {
|
2014-05-07 19:52:57 +00:00
|
|
|
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
|
2013-07-07 14:25:49 +00:00
|
|
|
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
|
|
if (err) {
|
2014-05-07 19:52:57 +00:00
|
|
|
dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
|
2013-07-07 14:25:49 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
|
|
|
if (err) {
|
|
|
|
dev_warn(&pdev->dev,
|
2014-05-07 19:52:57 +00:00
|
|
|
"Warning: couldn't set 64-bit consistent PCI DMA mask\n");
|
2013-07-07 14:25:49 +00:00
|
|
|
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
|
|
if (err) {
|
|
|
|
dev_err(&pdev->dev,
|
2014-05-07 19:52:57 +00:00
|
|
|
"Can't set consistent PCI DMA mask, aborting\n");
|
2013-07-07 14:25:49 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-10-14 14:43:46 +00:00
|
|
|
static int mlx5_pci_enable_device(struct mlx5_core_dev *dev)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = dev->pdev;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
mutex_lock(&dev->pci_status_mutex);
|
|
|
|
if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) {
|
|
|
|
err = pci_enable_device(pdev);
|
|
|
|
if (!err)
|
|
|
|
dev->pci_status = MLX5_PCI_STATUS_ENABLED;
|
|
|
|
}
|
|
|
|
mutex_unlock(&dev->pci_status_mutex);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mlx5_pci_disable_device(struct mlx5_core_dev *dev)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = dev->pdev;
|
|
|
|
|
|
|
|
mutex_lock(&dev->pci_status_mutex);
|
|
|
|
if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) {
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
dev->pci_status = MLX5_PCI_STATUS_DISABLED;
|
|
|
|
}
|
|
|
|
mutex_unlock(&dev->pci_status_mutex);
|
|
|
|
}
|
|
|
|
|
2013-07-07 14:25:49 +00:00
|
|
|
static int request_bar(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
|
2014-05-07 19:52:57 +00:00
|
|
|
dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
|
2013-07-07 14:25:49 +00:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2020-10-04 11:30:58 +00:00
|
|
|
err = pci_request_regions(pdev, KBUILD_MODNAME);
|
2013-07-07 14:25:49 +00:00
|
|
|
if (err)
|
|
|
|
dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void release_bar(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
pci_release_regions(pdev);
|
|
|
|
}
|
|
|
|
|
2017-05-28 12:24:17 +00:00
|
|
|
struct mlx5_reg_host_endianness {
|
2013-07-07 14:25:49 +00:00
|
|
|
u8 he;
|
|
|
|
u8 rsvd[15];
|
|
|
|
};
|
|
|
|
|
2013-10-23 06:53:20 +00:00
|
|
|
#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
|
|
|
|
|
|
|
|
enum {
|
2014-10-02 09:19:42 +00:00
|
|
|
MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
|
|
|
|
MLX5_DEV_CAP_FLAG_DCT,
|
2013-10-23 06:53:20 +00:00
|
|
|
};
|
|
|
|
|
2016-07-28 13:43:17 +00:00
|
|
|
static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
|
2014-10-02 09:19:42 +00:00
|
|
|
{
|
|
|
|
switch (size) {
|
|
|
|
case 128:
|
|
|
|
return 0;
|
|
|
|
case 256:
|
|
|
|
return 1;
|
|
|
|
case 512:
|
|
|
|
return 2;
|
|
|
|
case 1024:
|
|
|
|
return 3;
|
|
|
|
case 2048:
|
|
|
|
return 4;
|
|
|
|
case 4096:
|
|
|
|
return 5;
|
|
|
|
default:
|
2016-07-28 13:43:17 +00:00
|
|
|
mlx5_core_warn(dev, "invalid pkey table size %d\n", size);
|
2014-10-02 09:19:42 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-23 08:25:22 +00:00
|
|
|
static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
|
|
|
|
enum mlx5_cap_type cap_type,
|
|
|
|
enum mlx5_cap_mode cap_mode)
|
2014-10-02 09:19:42 +00:00
|
|
|
{
|
2014-10-02 09:19:44 +00:00
|
|
|
u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
|
|
|
|
int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
|
2015-05-28 19:28:41 +00:00
|
|
|
void *out, *hca_caps;
|
|
|
|
u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
|
2013-07-07 14:25:49 +00:00
|
|
|
int err;
|
|
|
|
|
2014-10-02 09:19:44 +00:00
|
|
|
memset(in, 0, sizeof(in));
|
|
|
|
out = kzalloc(out_sz, GFP_KERNEL);
|
2014-10-02 09:19:42 +00:00
|
|
|
if (!out)
|
2013-07-07 14:25:49 +00:00
|
|
|
return -ENOMEM;
|
2015-05-28 19:28:41 +00:00
|
|
|
|
2014-10-02 09:19:44 +00:00
|
|
|
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
|
|
|
|
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
|
2020-04-09 13:33:38 +00:00
|
|
|
err = mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
|
2014-10-02 09:19:42 +00:00
|
|
|
if (err) {
|
2015-05-28 19:28:41 +00:00
|
|
|
mlx5_core_warn(dev,
|
|
|
|
"QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
|
|
|
|
cap_type, cap_mode, err);
|
2013-07-07 14:25:49 +00:00
|
|
|
goto query_ex;
|
|
|
|
}
|
2014-10-02 09:19:42 +00:00
|
|
|
|
2015-05-28 19:28:41 +00:00
|
|
|
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
|
|
|
|
|
|
|
|
switch (cap_mode) {
|
|
|
|
case HCA_CAP_OPMOD_GET_MAX:
|
2016-12-14 15:40:41 +00:00
|
|
|
memcpy(dev->caps.hca_max[cap_type], hca_caps,
|
2015-05-28 19:28:41 +00:00
|
|
|
MLX5_UN_SZ_BYTES(hca_cap_union));
|
|
|
|
break;
|
|
|
|
case HCA_CAP_OPMOD_GET_CUR:
|
2016-12-14 15:40:41 +00:00
|
|
|
memcpy(dev->caps.hca_cur[cap_type], hca_caps,
|
2015-05-28 19:28:41 +00:00
|
|
|
MLX5_UN_SZ_BYTES(hca_cap_union));
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
mlx5_core_warn(dev,
|
|
|
|
"Tried to query dev cap type(%x) with wrong opmode(%x)\n",
|
|
|
|
cap_type, cap_mode);
|
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
2014-10-02 09:19:42 +00:00
|
|
|
query_ex:
|
|
|
|
kfree(out);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-02-23 08:25:22 +00:00
|
|
|
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX);
|
|
|
|
}
|
|
|
|
|
2020-03-19 09:43:59 +00:00
|
|
|
static int set_caps(struct mlx5_core_dev *dev, void *in, int opmod)
|
2014-10-02 09:19:42 +00:00
|
|
|
{
|
2014-10-02 09:19:44 +00:00
|
|
|
MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
|
2015-12-14 14:34:09 +00:00
|
|
|
MLX5_SET(set_hca_cap_in, in, op_mod, opmod << 1);
|
2020-04-09 13:33:38 +00:00
|
|
|
return mlx5_cmd_exec_in(dev, set_hca_cap, in);
|
2014-10-02 09:19:42 +00:00
|
|
|
}
|
|
|
|
|
2020-03-19 09:43:59 +00:00
|
|
|
static int handle_hca_cap_atomic(struct mlx5_core_dev *dev, void *set_ctx)
|
2015-12-14 14:34:09 +00:00
|
|
|
{
|
|
|
|
void *set_hca_cap;
|
|
|
|
int req_endianness;
|
|
|
|
int err;
|
|
|
|
|
2020-03-19 09:43:59 +00:00
|
|
|
if (!MLX5_CAP_GEN(dev, atomic))
|
2015-12-14 14:34:09 +00:00
|
|
|
return 0;
|
2020-03-19 09:43:59 +00:00
|
|
|
|
|
|
|
err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2015-12-14 14:34:09 +00:00
|
|
|
|
|
|
|
req_endianness =
|
|
|
|
MLX5_CAP_ATOMIC(dev,
|
2017-05-28 12:24:17 +00:00
|
|
|
supported_atomic_req_8B_endianness_mode_1);
|
2015-12-14 14:34:09 +00:00
|
|
|
|
|
|
|
if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
|
|
|
|
|
|
|
|
/* Set requestor to host endianness */
|
2017-05-28 12:24:17 +00:00
|
|
|
MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianness_mode,
|
2015-12-14 14:34:09 +00:00
|
|
|
MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
|
|
|
|
|
2020-03-19 09:43:59 +00:00
|
|
|
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC);
|
2015-12-14 14:34:09 +00:00
|
|
|
}
|
|
|
|
|
2020-03-19 09:43:59 +00:00
|
|
|
static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
|
2019-01-22 06:48:51 +00:00
|
|
|
{
|
|
|
|
void *set_hca_cap;
|
2019-02-25 06:54:39 +00:00
|
|
|
bool do_set = false;
|
2019-01-22 06:48:51 +00:00
|
|
|
int err;
|
|
|
|
|
2019-02-17 11:11:02 +00:00
|
|
|
if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) ||
|
|
|
|
!MLX5_CAP_GEN(dev, pg))
|
2019-01-22 06:48:51 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
|
|
|
|
memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ODP],
|
|
|
|
MLX5_ST_SZ_BYTES(odp_cap));
|
|
|
|
|
2019-02-25 06:54:39 +00:00
|
|
|
#define ODP_CAP_SET_MAX(dev, field) \
|
|
|
|
do { \
|
|
|
|
u32 _res = MLX5_CAP_ODP_MAX(dev, field); \
|
|
|
|
if (_res) { \
|
|
|
|
do_set = true; \
|
|
|
|
MLX5_SET(odp_cap, set_hca_cap, field, _res); \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
ODP_CAP_SET_MAX(dev, ud_odp_caps.srq_receive);
|
|
|
|
ODP_CAP_SET_MAX(dev, rc_odp_caps.srq_receive);
|
|
|
|
ODP_CAP_SET_MAX(dev, xrc_odp_caps.srq_receive);
|
|
|
|
ODP_CAP_SET_MAX(dev, xrc_odp_caps.send);
|
|
|
|
ODP_CAP_SET_MAX(dev, xrc_odp_caps.receive);
|
|
|
|
ODP_CAP_SET_MAX(dev, xrc_odp_caps.write);
|
|
|
|
ODP_CAP_SET_MAX(dev, xrc_odp_caps.read);
|
|
|
|
ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic);
|
2019-08-19 12:08:13 +00:00
|
|
|
ODP_CAP_SET_MAX(dev, dc_odp_caps.srq_receive);
|
|
|
|
ODP_CAP_SET_MAX(dev, dc_odp_caps.send);
|
|
|
|
ODP_CAP_SET_MAX(dev, dc_odp_caps.receive);
|
|
|
|
ODP_CAP_SET_MAX(dev, dc_odp_caps.write);
|
|
|
|
ODP_CAP_SET_MAX(dev, dc_odp_caps.read);
|
|
|
|
ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic);
|
2019-02-25 06:54:39 +00:00
|
|
|
|
2020-03-19 09:43:59 +00:00
|
|
|
if (!do_set)
|
|
|
|
return 0;
|
2019-02-25 06:54:39 +00:00
|
|
|
|
2020-03-19 09:43:59 +00:00
|
|
|
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP);
|
2019-01-22 06:48:51 +00:00
|
|
|
}
|
|
|
|
|
2020-03-19 09:43:59 +00:00
|
|
|
static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
|
2014-10-02 09:19:42 +00:00
|
|
|
{
|
|
|
|
struct mlx5_profile *prof = dev->profile;
|
2015-05-28 19:28:41 +00:00
|
|
|
void *set_hca_cap;
|
2020-03-19 09:43:59 +00:00
|
|
|
int err;
|
2013-07-07 14:25:49 +00:00
|
|
|
|
2016-02-23 08:25:22 +00:00
|
|
|
err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
|
2013-07-07 14:25:49 +00:00
|
|
|
if (err)
|
2020-03-19 09:43:59 +00:00
|
|
|
return err;
|
2013-07-07 14:25:49 +00:00
|
|
|
|
2015-05-28 19:28:41 +00:00
|
|
|
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
|
|
|
|
capability);
|
2016-12-14 15:40:41 +00:00
|
|
|
memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_GENERAL],
|
2015-05-28 19:28:41 +00:00
|
|
|
MLX5_ST_SZ_BYTES(cmd_hca_cap));
|
|
|
|
|
|
|
|
mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
|
2015-06-04 16:30:41 +00:00
|
|
|
mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
|
2015-05-28 19:28:41 +00:00
|
|
|
128);
|
2014-10-02 09:19:42 +00:00
|
|
|
/* we limit the size of the pkey table to 128 entries for now */
|
2015-05-28 19:28:41 +00:00
|
|
|
MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
|
2016-07-28 13:43:17 +00:00
|
|
|
to_fw_pkey_sz(dev, 128));
|
2014-10-02 09:19:42 +00:00
|
|
|
|
2016-12-28 12:58:32 +00:00
|
|
|
/* Check log_max_qp from HCA caps to set in current profile */
|
|
|
|
if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < profile[prof_sel].log_max_qp) {
|
|
|
|
mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
|
|
|
|
profile[prof_sel].log_max_qp,
|
|
|
|
MLX5_CAP_GEN_MAX(dev, log_max_qp));
|
|
|
|
profile[prof_sel].log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
|
|
|
|
}
|
2014-10-02 09:19:42 +00:00
|
|
|
if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
|
2015-05-28 19:28:41 +00:00
|
|
|
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
|
|
|
|
prof->log_max_qp);
|
2014-10-02 09:19:42 +00:00
|
|
|
|
2015-05-28 19:28:41 +00:00
|
|
|
/* disable cmdif checksum */
|
|
|
|
MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
|
2014-10-02 09:19:42 +00:00
|
|
|
|
2017-05-28 11:47:56 +00:00
|
|
|
/* Enable 4K UAR only when HCA supports it and page size is bigger
|
|
|
|
* than 4K.
|
|
|
|
*/
|
|
|
|
if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096)
|
2017-01-03 21:55:28 +00:00
|
|
|
MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1);
|
|
|
|
|
2015-08-05 16:05:32 +00:00
|
|
|
MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
|
|
|
|
|
2015-11-19 15:12:26 +00:00
|
|
|
if (MLX5_CAP_GEN_MAX(dev, cache_line_128byte))
|
|
|
|
MLX5_SET(cmd_hca_cap,
|
|
|
|
set_hca_cap,
|
|
|
|
cache_line_128byte,
|
2018-02-02 15:32:53 +00:00
|
|
|
cache_line_size() >= 128 ? 1 : 0);
|
2015-11-19 15:12:26 +00:00
|
|
|
|
2018-01-02 14:19:29 +00:00
|
|
|
if (MLX5_CAP_GEN_MAX(dev, dct))
|
|
|
|
MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1);
|
|
|
|
|
2020-10-07 06:00:48 +00:00
|
|
|
if (MLX5_CAP_GEN_MAX(dev, pci_sync_for_fw_update_event))
|
|
|
|
MLX5_SET(cmd_hca_cap, set_hca_cap, pci_sync_for_fw_update_event, 1);
|
|
|
|
|
2018-01-04 15:25:44 +00:00
|
|
|
if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports))
|
|
|
|
MLX5_SET(cmd_hca_cap,
|
|
|
|
set_hca_cap,
|
|
|
|
num_vhca_ports,
|
|
|
|
MLX5_CAP_GEN_MAX(dev, num_vhca_ports));
|
|
|
|
|
2020-04-01 07:30:32 +00:00
|
|
|
if (MLX5_CAP_GEN_MAX(dev, release_all_pages))
|
|
|
|
MLX5_SET(cmd_hca_cap, set_hca_cap, release_all_pages, 1);
|
|
|
|
|
2020-05-20 10:59:06 +00:00
|
|
|
if (MLX5_CAP_GEN_MAX(dev, mkey_by_name))
|
|
|
|
MLX5_SET(cmd_hca_cap, set_hca_cap, mkey_by_name, 1);
|
|
|
|
|
2020-12-12 06:12:16 +00:00
|
|
|
mlx5_vhca_state_cap_handle(dev, set_hca_cap);
|
|
|
|
|
2021-03-14 12:42:55 +00:00
|
|
|
if (MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix))
|
|
|
|
MLX5_SET(cmd_hca_cap, set_hca_cap, num_total_dynamic_vf_msix,
|
|
|
|
MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
|
|
|
|
|
2020-03-19 09:43:59 +00:00
|
|
|
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
|
2013-07-07 14:25:49 +00:00
|
|
|
}
|
2014-10-02 09:19:42 +00:00
|
|
|
|
2020-01-14 03:06:25 +00:00
|
|
|
static int handle_hca_cap_roce(struct mlx5_core_dev *dev, void *set_ctx)
|
|
|
|
{
|
|
|
|
void *set_hca_cap;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!MLX5_CAP_GEN(dev, roce))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (MLX5_CAP_ROCE(dev, sw_r_roce_src_udp_port) ||
|
|
|
|
!MLX5_CAP_ROCE_MAX(dev, sw_r_roce_src_udp_port))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
|
|
|
|
memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ROCE],
|
|
|
|
MLX5_ST_SZ_BYTES(roce_cap));
|
|
|
|
MLX5_SET(roce_cap, set_hca_cap, sw_r_roce_src_udp_port, 1);
|
|
|
|
|
|
|
|
err = set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ROCE);
|
2013-07-07 14:25:49 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-02-17 11:11:02 +00:00
|
|
|
static int set_hca_cap(struct mlx5_core_dev *dev)
|
|
|
|
{
|
2020-03-19 09:43:59 +00:00
|
|
|
int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
|
|
|
|
void *set_ctx;
|
2019-02-17 11:11:02 +00:00
|
|
|
int err;
|
|
|
|
|
2020-03-19 09:43:59 +00:00
|
|
|
set_ctx = kzalloc(set_sz, GFP_KERNEL);
|
|
|
|
if (!set_ctx)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
err = handle_hca_cap(dev, set_ctx);
|
2019-02-17 11:11:02 +00:00
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "handle_hca_cap failed\n");
|
2019-02-17 11:11:02 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-03-19 09:43:59 +00:00
|
|
|
memset(set_ctx, 0, set_sz);
|
|
|
|
err = handle_hca_cap_atomic(dev, set_ctx);
|
2019-02-17 11:11:02 +00:00
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "handle_hca_cap_atomic failed\n");
|
2019-02-17 11:11:02 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-03-19 09:43:59 +00:00
|
|
|
memset(set_ctx, 0, set_sz);
|
|
|
|
err = handle_hca_cap_odp(dev, set_ctx);
|
2019-02-17 11:11:02 +00:00
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "handle_hca_cap_odp failed\n");
|
2019-02-17 11:11:02 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-01-14 03:06:25 +00:00
|
|
|
memset(set_ctx, 0, set_sz);
|
|
|
|
err = handle_hca_cap_roce(dev, set_ctx);
|
|
|
|
if (err) {
|
|
|
|
mlx5_core_err(dev, "handle_hca_cap_roce failed\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2019-02-17 11:11:02 +00:00
|
|
|
out:
|
2020-03-19 09:43:59 +00:00
|
|
|
kfree(set_ctx);
|
2019-02-17 11:11:02 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2013-07-07 14:25:49 +00:00
|
|
|
static int set_hca_ctrl(struct mlx5_core_dev *dev)
|
|
|
|
{
|
2017-05-28 12:24:17 +00:00
|
|
|
struct mlx5_reg_host_endianness he_in;
|
|
|
|
struct mlx5_reg_host_endianness he_out;
|
2013-07-07 14:25:49 +00:00
|
|
|
int err;
|
|
|
|
|
2015-12-01 16:03:09 +00:00
|
|
|
if (!mlx5_core_is_pf(dev))
|
|
|
|
return 0;
|
|
|
|
|
2013-07-07 14:25:49 +00:00
|
|
|
memset(&he_in, 0, sizeof(he_in));
|
|
|
|
he_in.he = MLX5_SET_HOST_ENDIANNESS;
|
|
|
|
err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in),
|
|
|
|
&he_out, sizeof(he_out),
|
|
|
|
MLX5_REG_HOST_ENDIANNESS, 0, 1);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-05-30 06:42:54 +00:00
|
|
|
static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/* Disable local_lb by default */
|
2018-01-09 09:41:10 +00:00
|
|
|
if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
|
2017-05-30 06:42:54 +00:00
|
|
|
ret = mlx5_nic_vport_update_local_lb(dev, false);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-12-01 16:03:08 +00:00
|
|
|
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
|
2013-07-18 12:31:08 +00:00
|
|
|
{
|
2020-04-09 13:33:38 +00:00
|
|
|
u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
|
2013-07-18 12:31:08 +00:00
|
|
|
|
2015-12-01 16:03:08 +00:00
|
|
|
MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
|
|
|
|
MLX5_SET(enable_hca_in, in, function_id, func_id);
|
2019-02-13 06:55:36 +00:00
|
|
|
MLX5_SET(enable_hca_in, in, embedded_cpu_function,
|
|
|
|
dev->caps.embedded_cpu);
|
2020-04-09 13:33:38 +00:00
|
|
|
return mlx5_cmd_exec_in(dev, enable_hca, in);
|
2013-07-18 12:31:08 +00:00
|
|
|
}
|
|
|
|
|
2015-12-01 16:03:08 +00:00
|
|
|
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
|
2013-07-18 12:31:08 +00:00
|
|
|
{
|
2020-04-09 13:33:38 +00:00
|
|
|
u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
|
2013-07-18 12:31:08 +00:00
|
|
|
|
2015-12-01 16:03:08 +00:00
|
|
|
MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
|
|
|
|
MLX5_SET(disable_hca_in, in, function_id, func_id);
|
2019-02-13 06:55:36 +00:00
|
|
|
MLX5_SET(enable_hca_in, in, embedded_cpu_function,
|
|
|
|
dev->caps.embedded_cpu);
|
2020-04-09 13:33:38 +00:00
|
|
|
return mlx5_cmd_exec_in(dev, disable_hca, in);
|
2013-07-18 12:31:08 +00:00
|
|
|
}
|
|
|
|
|
2015-05-28 19:28:48 +00:00
|
|
|
static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
|
|
|
|
{
|
2020-04-09 13:33:38 +00:00
|
|
|
u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {};
|
|
|
|
u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {};
|
2015-05-28 19:28:48 +00:00
|
|
|
u32 sup_issi;
|
2016-07-19 17:17:12 +00:00
|
|
|
int err;
|
2015-05-28 19:28:48 +00:00
|
|
|
|
|
|
|
MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
|
2020-04-09 13:33:38 +00:00
|
|
|
err = mlx5_cmd_exec_inout(dev, query_issi, query_in, query_out);
|
2015-05-28 19:28:48 +00:00
|
|
|
if (err) {
|
2016-07-19 17:17:12 +00:00
|
|
|
u32 syndrome;
|
|
|
|
u8 status;
|
|
|
|
|
|
|
|
mlx5_cmd_mbox_status(query_out, &status, &syndrome);
|
2016-12-06 15:32:45 +00:00
|
|
|
if (!status || syndrome == MLX5_DRIVER_SYND) {
|
|
|
|
mlx5_core_err(dev, "Failed to query ISSI err(%d) status(%d) synd(%d)\n",
|
|
|
|
err, status, syndrome);
|
|
|
|
return err;
|
2015-05-28 19:28:48 +00:00
|
|
|
}
|
|
|
|
|
2016-12-06 15:32:45 +00:00
|
|
|
mlx5_core_warn(dev, "Query ISSI is not supported by FW, ISSI is 0\n");
|
|
|
|
dev->issi = 0;
|
|
|
|
return 0;
|
2015-05-28 19:28:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
|
|
|
|
|
|
|
|
if (sup_issi & (1 << 1)) {
|
2020-04-09 13:33:38 +00:00
|
|
|
u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {};
|
2015-05-28 19:28:48 +00:00
|
|
|
|
|
|
|
MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
|
|
|
|
MLX5_SET(set_issi_in, set_in, current_issi, 1);
|
2020-04-09 13:33:38 +00:00
|
|
|
err = mlx5_cmd_exec_in(dev, set_issi, set_in);
|
2015-05-28 19:28:48 +00:00
|
|
|
if (err) {
|
2016-12-06 15:32:45 +00:00
|
|
|
mlx5_core_err(dev, "Failed to set ISSI to 1 err(%d)\n",
|
|
|
|
err);
|
2015-05-28 19:28:48 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev->issi = 1;
|
|
|
|
|
|
|
|
return 0;
|
2015-06-04 16:30:39 +00:00
|
|
|
} else if (sup_issi & (1 << 0) || !sup_issi) {
|
2015-05-28 19:28:48 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-01-11 17:35:41 +00:00
|
|
|
return -EOPNOTSUPP;
|
2015-05-28 19:28:48 +00:00
|
|
|
}
|
|
|
|
|
2019-03-29 22:37:54 +00:00
|
|
|
static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
|
|
|
|
const struct pci_device_id *id)
|
2015-09-25 07:49:14 +00:00
|
|
|
{
|
2019-03-29 22:37:53 +00:00
|
|
|
struct mlx5_priv *priv = &dev->priv;
|
2015-09-25 07:49:14 +00:00
|
|
|
int err = 0;
|
2013-07-07 14:25:49 +00:00
|
|
|
|
2019-06-28 22:36:00 +00:00
|
|
|
mutex_init(&dev->pci_status_mutex);
|
2019-03-29 22:37:54 +00:00
|
|
|
pci_set_drvdata(dev->pdev, dev);
|
2015-07-23 20:35:57 +00:00
|
|
|
|
2019-03-29 22:38:01 +00:00
|
|
|
dev->bar_addr = pci_resource_start(pdev, 0);
|
2020-09-09 17:41:38 +00:00
|
|
|
priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev));
|
2015-07-23 20:35:57 +00:00
|
|
|
|
2015-10-14 14:43:46 +00:00
|
|
|
err = mlx5_pci_enable_device(dev);
|
2013-07-07 14:25:49 +00:00
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "Cannot enable PCI device, aborting\n");
|
2019-03-29 22:37:54 +00:00
|
|
|
return err;
|
2013-07-07 14:25:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
err = request_bar(pdev);
|
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "error requesting BARs, aborting\n");
|
2013-07-07 14:25:49 +00:00
|
|
|
goto err_disable;
|
|
|
|
}
|
|
|
|
|
|
|
|
pci_set_master(pdev);
|
|
|
|
|
|
|
|
err = set_dma_caps(pdev);
|
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "Failed setting DMA capabilities mask, aborting\n");
|
2013-07-07 14:25:49 +00:00
|
|
|
goto err_clr_master;
|
|
|
|
}
|
|
|
|
|
2019-01-19 00:33:11 +00:00
|
|
|
if (pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP32) &&
|
|
|
|
pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP64) &&
|
|
|
|
pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP128))
|
|
|
|
mlx5_core_dbg(dev, "Enabling pci atomics failed\n");
|
|
|
|
|
2019-03-29 22:38:01 +00:00
|
|
|
dev->iseg_base = dev->bar_addr;
|
2013-07-07 14:25:49 +00:00
|
|
|
dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
|
|
|
|
if (!dev->iseg) {
|
|
|
|
err = -ENOMEM;
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "Failed mapping initialization segment, aborting\n");
|
2013-07-07 14:25:49 +00:00
|
|
|
goto err_clr_master;
|
|
|
|
}
|
2015-09-25 07:49:14 +00:00
|
|
|
|
2018-06-28 12:05:58 +00:00
|
|
|
mlx5_pci_vsc_init(dev);
|
2020-03-09 02:41:22 +00:00
|
|
|
dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
|
2015-09-25 07:49:14 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_clr_master:
|
|
|
|
pci_clear_master(dev->pdev);
|
|
|
|
release_bar(dev->pdev);
|
|
|
|
err_disable:
|
2015-10-14 14:43:46 +00:00
|
|
|
mlx5_pci_disable_device(dev);
|
2015-09-25 07:49:14 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-03-29 22:37:53 +00:00
|
|
|
static void mlx5_pci_close(struct mlx5_core_dev *dev)
|
2015-09-25 07:49:14 +00:00
|
|
|
{
|
2020-05-06 12:59:48 +00:00
|
|
|
/* health work might still be active, and it needs pci bar in
|
|
|
|
* order to know the NIC state. Therefore, drain the health WQ
|
|
|
|
* before removing the pci bars
|
|
|
|
*/
|
|
|
|
mlx5_drain_health_wq(dev);
|
2015-09-25 07:49:14 +00:00
|
|
|
iounmap(dev->iseg);
|
|
|
|
pci_clear_master(dev->pdev);
|
|
|
|
release_bar(dev->pdev);
|
2015-10-14 14:43:46 +00:00
|
|
|
mlx5_pci_disable_device(dev);
|
2015-09-25 07:49:14 +00:00
|
|
|
}
|
|
|
|
|
2019-03-29 22:37:53 +00:00
|
|
|
static int mlx5_init_once(struct mlx5_core_dev *dev)
|
2016-09-09 14:35:20 +00:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2019-03-29 22:37:53 +00:00
|
|
|
dev->priv.devcom = mlx5_devcom_register_device(dev);
|
|
|
|
if (IS_ERR(dev->priv.devcom))
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "failed to register with devcom (0x%p)\n",
|
|
|
|
dev->priv.devcom);
|
2018-12-04 19:24:46 +00:00
|
|
|
|
2016-09-09 14:35:20 +00:00
|
|
|
err = mlx5_query_board_id(dev);
|
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "query board id failed\n");
|
2018-12-04 19:24:46 +00:00
|
|
|
goto err_devcom;
|
2016-09-09 14:35:20 +00:00
|
|
|
}
|
|
|
|
|
2019-06-10 23:38:27 +00:00
|
|
|
err = mlx5_irq_table_init(dev);
|
|
|
|
if (err) {
|
|
|
|
mlx5_core_err(dev, "failed to initialize irq table\n");
|
|
|
|
goto err_devcom;
|
|
|
|
}
|
|
|
|
|
2018-11-19 18:52:38 +00:00
|
|
|
err = mlx5_eq_table_init(dev);
|
2016-09-09 14:35:20 +00:00
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "failed to initialize eq\n");
|
2019-06-10 23:38:27 +00:00
|
|
|
goto err_irq_cleanup;
|
2016-09-09 14:35:20 +00:00
|
|
|
}
|
|
|
|
|
2018-11-20 22:12:27 +00:00
|
|
|
err = mlx5_events_init(dev);
|
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "failed to initialize events\n");
|
2018-11-20 22:12:27 +00:00
|
|
|
goto err_eq_cleanup;
|
|
|
|
}
|
|
|
|
|
2020-10-07 06:00:49 +00:00
|
|
|
err = mlx5_fw_reset_init(dev);
|
|
|
|
if (err) {
|
|
|
|
mlx5_core_err(dev, "failed to initialize fw reset events\n");
|
|
|
|
goto err_events_cleanup;
|
|
|
|
}
|
|
|
|
|
2019-08-10 10:17:18 +00:00
|
|
|
mlx5_cq_debugfs_init(dev);
|
2016-09-09 14:35:20 +00:00
|
|
|
|
2017-03-26 14:01:57 +00:00
|
|
|
mlx5_init_reserved_gids(dev);
|
|
|
|
|
2017-08-15 10:46:04 +00:00
|
|
|
mlx5_init_clock(dev);
|
|
|
|
|
2018-05-09 20:28:00 +00:00
|
|
|
dev->vxlan = mlx5_vxlan_create(dev);
|
2019-01-30 15:21:55 +00:00
|
|
|
dev->geneve = mlx5_geneve_create(dev);
|
2018-05-09 20:28:00 +00:00
|
|
|
|
2016-09-09 14:35:20 +00:00
|
|
|
err = mlx5_init_rl_table(dev);
|
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "Failed to init rate limiting\n");
|
2016-09-09 14:35:20 +00:00
|
|
|
goto err_tables_cleanup;
|
|
|
|
}
|
|
|
|
|
2017-06-04 20:11:55 +00:00
|
|
|
err = mlx5_mpfs_init(dev);
|
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "Failed to init l2 table %d\n", err);
|
2017-06-04 20:11:55 +00:00
|
|
|
goto err_rl_cleanup;
|
|
|
|
}
|
|
|
|
|
2019-06-10 23:38:19 +00:00
|
|
|
err = mlx5_sriov_init(dev);
|
2016-09-09 14:35:23 +00:00
|
|
|
if (err) {
|
2019-06-10 23:38:19 +00:00
|
|
|
mlx5_core_err(dev, "Failed to init sriov %d\n", err);
|
2017-06-04 20:11:55 +00:00
|
|
|
goto err_mpfs_cleanup;
|
2016-09-09 14:35:23 +00:00
|
|
|
}
|
|
|
|
|
2019-06-10 23:38:19 +00:00
|
|
|
err = mlx5_eswitch_init(dev);
|
2016-09-09 14:35:23 +00:00
|
|
|
if (err) {
|
2019-06-10 23:38:19 +00:00
|
|
|
mlx5_core_err(dev, "Failed to init eswitch %d\n", err);
|
|
|
|
goto err_sriov_cleanup;
|
2016-09-09 14:35:23 +00:00
|
|
|
}
|
|
|
|
|
2017-06-14 07:19:54 +00:00
|
|
|
err = mlx5_fpga_init(dev);
|
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "Failed to init fpga device %d\n", err);
|
2019-06-10 23:38:19 +00:00
|
|
|
goto err_eswitch_cleanup;
|
2017-06-14 07:19:54 +00:00
|
|
|
}
|
|
|
|
|
2020-12-12 06:12:16 +00:00
|
|
|
err = mlx5_vhca_event_init(dev);
|
|
|
|
if (err) {
|
|
|
|
mlx5_core_err(dev, "Failed to init vhca event notifier %d\n", err);
|
|
|
|
goto err_fpga_cleanup;
|
|
|
|
}
|
|
|
|
|
net/mlx5: SF, Add port add delete functionality
To handle SF port management outside of the eswitch as independent
software layer, introduce eswitch notifier APIs so that mlx5 upper
layer who wish to support sf port management in switchdev mode can
perform its task whenever eswitch mode is set to switchdev or before
eswitch is disabled.
Initialize sf port table on such eswitch event.
Add SF port add and delete functionality in switchdev mode.
Destroy all SF ports when eswitch is disabled.
Expose SF port add and delete to user via devlink commands.
$ devlink dev eswitch set pci/0000:06:00.0 mode switchdev
$ devlink port show
pci/0000:06:00.0/65535: type eth netdev ens2f0np0 flavour physical port 0 splittable false
$ devlink port add pci/0000:06:00.0 flavour pcisf pfnum 0 sfnum 88
pci/0000:06:00.0/32768: type eth netdev eth6 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
function:
hw_addr 00:00:00:00:00:00 state inactive opstate detached
$ devlink port show ens2f0npf0sf88
pci/0000:06:00.0/32768: type eth netdev ens2f0npf0sf88 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
function:
hw_addr 00:00:00:00:00:00 state inactive opstate detached
or by its unique port index:
$ devlink port show pci/0000:06:00.0/32768
pci/0000:06:00.0/32768: type eth netdev ens2f0npf0sf88 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
function:
hw_addr 00:00:00:00:00:00 state inactive opstate detached
$ devlink port show ens2f0npf0sf88 -jp
{
"port": {
"pci/0000:06:00.0/32768": {
"type": "eth",
"netdev": "ens2f0npf0sf88",
"flavour": "pcisf",
"controller": 0,
"pfnum": 0,
"sfnum": 88,
"external": false,
"splittable": false,
"function": {
"hw_addr": "00:00:00:00:00:00",
"state": "inactive",
"opstate": "detached"
}
}
}
}
Signed-off-by: Parav Pandit <parav@nvidia.com>
Reviewed-by: Vu Pham <vuhuong@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
2020-12-12 06:12:21 +00:00
|
|
|
err = mlx5_sf_hw_table_init(dev);
|
|
|
|
if (err) {
|
|
|
|
mlx5_core_err(dev, "Failed to init SF HW table %d\n", err);
|
|
|
|
goto err_sf_hw_table_cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = mlx5_sf_table_init(dev);
|
|
|
|
if (err) {
|
|
|
|
mlx5_core_err(dev, "Failed to init SF table %d\n", err);
|
|
|
|
goto err_sf_table_cleanup;
|
|
|
|
}
|
|
|
|
|
2019-08-29 23:42:30 +00:00
|
|
|
dev->dm = mlx5_dm_create(dev);
|
|
|
|
if (IS_ERR(dev->dm))
|
|
|
|
mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
|
|
|
|
|
2018-02-22 08:01:35 +00:00
|
|
|
dev->tracer = mlx5_fw_tracer_create(dev);
|
2019-08-22 05:05:51 +00:00
|
|
|
dev->hv_vhca = mlx5_hv_vhca_create(dev);
|
2020-02-11 22:32:43 +00:00
|
|
|
dev->rsc_dump = mlx5_rsc_dump_create(dev);
|
2018-02-22 08:01:35 +00:00
|
|
|
|
2016-09-09 14:35:20 +00:00
|
|
|
return 0;
|
|
|
|
|
net/mlx5: SF, Add port add delete functionality
To handle SF port management outside of the eswitch as independent
software layer, introduce eswitch notifier APIs so that mlx5 upper
layer who wish to support sf port management in switchdev mode can
perform its task whenever eswitch mode is set to switchdev or before
eswitch is disabled.
Initialize sf port table on such eswitch event.
Add SF port add and delete functionality in switchdev mode.
Destroy all SF ports when eswitch is disabled.
Expose SF port add and delete to user via devlink commands.
$ devlink dev eswitch set pci/0000:06:00.0 mode switchdev
$ devlink port show
pci/0000:06:00.0/65535: type eth netdev ens2f0np0 flavour physical port 0 splittable false
$ devlink port add pci/0000:06:00.0 flavour pcisf pfnum 0 sfnum 88
pci/0000:06:00.0/32768: type eth netdev eth6 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
function:
hw_addr 00:00:00:00:00:00 state inactive opstate detached
$ devlink port show ens2f0npf0sf88
pci/0000:06:00.0/32768: type eth netdev ens2f0npf0sf88 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
function:
hw_addr 00:00:00:00:00:00 state inactive opstate detached
or by its unique port index:
$ devlink port show pci/0000:06:00.0/32768
pci/0000:06:00.0/32768: type eth netdev ens2f0npf0sf88 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
function:
hw_addr 00:00:00:00:00:00 state inactive opstate detached
$ devlink port show ens2f0npf0sf88 -jp
{
"port": {
"pci/0000:06:00.0/32768": {
"type": "eth",
"netdev": "ens2f0npf0sf88",
"flavour": "pcisf",
"controller": 0,
"pfnum": 0,
"sfnum": 88,
"external": false,
"splittable": false,
"function": {
"hw_addr": "00:00:00:00:00:00",
"state": "inactive",
"opstate": "detached"
}
}
}
}
Signed-off-by: Parav Pandit <parav@nvidia.com>
Reviewed-by: Vu Pham <vuhuong@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
2020-12-12 06:12:21 +00:00
|
|
|
err_sf_table_cleanup:
|
|
|
|
mlx5_sf_hw_table_cleanup(dev);
|
|
|
|
err_sf_hw_table_cleanup:
|
|
|
|
mlx5_vhca_event_cleanup(dev);
|
2020-12-12 06:12:16 +00:00
|
|
|
err_fpga_cleanup:
|
|
|
|
mlx5_fpga_cleanup(dev);
|
2016-09-09 14:35:23 +00:00
|
|
|
err_eswitch_cleanup:
|
|
|
|
mlx5_eswitch_cleanup(dev->priv.eswitch);
|
2019-06-10 23:38:19 +00:00
|
|
|
err_sriov_cleanup:
|
|
|
|
mlx5_sriov_cleanup(dev);
|
2017-06-04 20:11:55 +00:00
|
|
|
err_mpfs_cleanup:
|
|
|
|
mlx5_mpfs_cleanup(dev);
|
2016-09-09 14:35:23 +00:00
|
|
|
err_rl_cleanup:
|
|
|
|
mlx5_cleanup_rl_table(dev);
|
2016-09-09 14:35:20 +00:00
|
|
|
err_tables_cleanup:
|
2019-01-30 15:21:55 +00:00
|
|
|
mlx5_geneve_destroy(dev->geneve);
|
2018-05-09 20:28:00 +00:00
|
|
|
mlx5_vxlan_destroy(dev->vxlan);
|
net/mlx5: CQ Database per EQ
Before this patch the driver had one CQ database protected via one
spinlock, this spinlock is meant to synchronize between CQ
adding/removing and CQ IRQ interrupt handling.
On a system with large number of CPUs and on a work load that requires
lots of interrupts, this global spinlock becomes a very nasty hotspot
and introduces a contention between the active cores, which will
significantly hurt performance and becomes a bottleneck that prevents
seamless cpu scaling.
To solve this we simply move the CQ database and its spinlock to be per
EQ (IRQ), thus per core.
Tested with:
system: 2 sockets, 14 cores per socket, hyperthreading, 2x14x2=56 cores
netperf command: ./super_netperf 200 -P 0 -t TCP_RR -H <server> -l 30 -- -r 300,300 -o -s 1M,1M -S 1M,1M
WITHOUT THIS PATCH:
Average: CPU %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle
Average: all 4.32 0.00 36.15 0.09 0.00 34.02 0.00 0.00 0.00 25.41
Samples: 2M of event 'cycles:pp', Event count (approx.): 1554616897271
Overhead Command Shared Object Symbol
+ 14.28% swapper [kernel.vmlinux] [k] intel_idle
+ 12.25% swapper [kernel.vmlinux] [k] queued_spin_lock_slowpath
+ 10.29% netserver [kernel.vmlinux] [k] queued_spin_lock_slowpath
+ 1.32% netserver [kernel.vmlinux] [k] mlx5e_xmit
WITH THIS PATCH:
Average: CPU %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle
Average: all 4.27 0.00 34.31 0.01 0.00 18.71 0.00 0.00 0.00 42.69
Samples: 2M of event 'cycles:pp', Event count (approx.): 1498132937483
Overhead Command Shared Object Symbol
+ 23.33% swapper [kernel.vmlinux] [k] intel_idle
+ 1.69% netserver [kernel.vmlinux] [k] mlx5e_xmit
Tested-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Reviewed-by: Gal Pressman <galp@mellanox.com>
2018-01-20 00:13:01 +00:00
|
|
|
mlx5_cq_debugfs_cleanup(dev);
|
2020-10-07 06:00:49 +00:00
|
|
|
mlx5_fw_reset_cleanup(dev);
|
|
|
|
err_events_cleanup:
|
2018-11-20 22:12:27 +00:00
|
|
|
mlx5_events_cleanup(dev);
|
2016-09-09 14:35:20 +00:00
|
|
|
err_eq_cleanup:
|
2018-11-19 18:52:38 +00:00
|
|
|
mlx5_eq_table_cleanup(dev);
|
2019-06-10 23:38:27 +00:00
|
|
|
err_irq_cleanup:
|
|
|
|
mlx5_irq_table_cleanup(dev);
|
2018-12-04 19:24:46 +00:00
|
|
|
err_devcom:
|
|
|
|
mlx5_devcom_unregister_device(dev->priv.devcom);
|
2016-09-09 14:35:20 +00:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
|
|
|
|
{
|
2020-02-11 22:32:43 +00:00
|
|
|
mlx5_rsc_dump_destroy(dev);
|
2019-08-22 05:05:51 +00:00
|
|
|
mlx5_hv_vhca_destroy(dev->hv_vhca);
|
2018-02-22 08:01:35 +00:00
|
|
|
mlx5_fw_tracer_destroy(dev->tracer);
|
2019-08-29 23:42:30 +00:00
|
|
|
mlx5_dm_cleanup(dev);
|
net/mlx5: SF, Add port add delete functionality
To handle SF port management outside of the eswitch as independent
software layer, introduce eswitch notifier APIs so that mlx5 upper
layer who wish to support sf port management in switchdev mode can
perform its task whenever eswitch mode is set to switchdev or before
eswitch is disabled.
Initialize sf port table on such eswitch event.
Add SF port add and delete functionality in switchdev mode.
Destroy all SF ports when eswitch is disabled.
Expose SF port add and delete to user via devlink commands.
$ devlink dev eswitch set pci/0000:06:00.0 mode switchdev
$ devlink port show
pci/0000:06:00.0/65535: type eth netdev ens2f0np0 flavour physical port 0 splittable false
$ devlink port add pci/0000:06:00.0 flavour pcisf pfnum 0 sfnum 88
pci/0000:06:00.0/32768: type eth netdev eth6 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
function:
hw_addr 00:00:00:00:00:00 state inactive opstate detached
$ devlink port show ens2f0npf0sf88
pci/0000:06:00.0/32768: type eth netdev ens2f0npf0sf88 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
function:
hw_addr 00:00:00:00:00:00 state inactive opstate detached
or by its unique port index:
$ devlink port show pci/0000:06:00.0/32768
pci/0000:06:00.0/32768: type eth netdev ens2f0npf0sf88 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
function:
hw_addr 00:00:00:00:00:00 state inactive opstate detached
$ devlink port show ens2f0npf0sf88 -jp
{
"port": {
"pci/0000:06:00.0/32768": {
"type": "eth",
"netdev": "ens2f0npf0sf88",
"flavour": "pcisf",
"controller": 0,
"pfnum": 0,
"sfnum": 88,
"external": false,
"splittable": false,
"function": {
"hw_addr": "00:00:00:00:00:00",
"state": "inactive",
"opstate": "detached"
}
}
}
}
Signed-off-by: Parav Pandit <parav@nvidia.com>
Reviewed-by: Vu Pham <vuhuong@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
2020-12-12 06:12:21 +00:00
|
|
|
mlx5_sf_table_cleanup(dev);
|
|
|
|
mlx5_sf_hw_table_cleanup(dev);
|
2020-12-12 06:12:16 +00:00
|
|
|
mlx5_vhca_event_cleanup(dev);
|
2017-06-14 07:19:54 +00:00
|
|
|
mlx5_fpga_cleanup(dev);
|
2016-09-09 14:35:23 +00:00
|
|
|
mlx5_eswitch_cleanup(dev->priv.eswitch);
|
2019-06-10 23:38:19 +00:00
|
|
|
mlx5_sriov_cleanup(dev);
|
2017-06-04 20:11:55 +00:00
|
|
|
mlx5_mpfs_cleanup(dev);
|
2016-09-09 14:35:20 +00:00
|
|
|
mlx5_cleanup_rl_table(dev);
|
2019-01-30 15:21:55 +00:00
|
|
|
mlx5_geneve_destroy(dev->geneve);
|
2018-05-09 20:28:00 +00:00
|
|
|
mlx5_vxlan_destroy(dev->vxlan);
|
2017-08-15 10:46:04 +00:00
|
|
|
mlx5_cleanup_clock(dev);
|
2017-03-26 14:01:57 +00:00
|
|
|
mlx5_cleanup_reserved_gids(dev);
|
net/mlx5: CQ Database per EQ
Before this patch the driver had one CQ database protected via one
spinlock, this spinlock is meant to synchronize between CQ
adding/removing and CQ IRQ interrupt handling.
On a system with large number of CPUs and on a work load that requires
lots of interrupts, this global spinlock becomes a very nasty hotspot
and introduces a contention between the active cores, which will
significantly hurt performance and becomes a bottleneck that prevents
seamless cpu scaling.
To solve this we simply move the CQ database and its spinlock to be per
EQ (IRQ), thus per core.
Tested with:
system: 2 sockets, 14 cores per socket, hyperthreading, 2x14x2=56 cores
netperf command: ./super_netperf 200 -P 0 -t TCP_RR -H <server> -l 30 -- -r 300,300 -o -s 1M,1M -S 1M,1M
WITHOUT THIS PATCH:
Average: CPU %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle
Average: all 4.32 0.00 36.15 0.09 0.00 34.02 0.00 0.00 0.00 25.41
Samples: 2M of event 'cycles:pp', Event count (approx.): 1554616897271
Overhead Command Shared Object Symbol
+ 14.28% swapper [kernel.vmlinux] [k] intel_idle
+ 12.25% swapper [kernel.vmlinux] [k] queued_spin_lock_slowpath
+ 10.29% netserver [kernel.vmlinux] [k] queued_spin_lock_slowpath
+ 1.32% netserver [kernel.vmlinux] [k] mlx5e_xmit
WITH THIS PATCH:
Average: CPU %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle
Average: all 4.27 0.00 34.31 0.01 0.00 18.71 0.00 0.00 0.00 42.69
Samples: 2M of event 'cycles:pp', Event count (approx.): 1498132937483
Overhead Command Shared Object Symbol
+ 23.33% swapper [kernel.vmlinux] [k] intel_idle
+ 1.69% netserver [kernel.vmlinux] [k] mlx5e_xmit
Tested-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Reviewed-by: Gal Pressman <galp@mellanox.com>
2018-01-20 00:13:01 +00:00
|
|
|
mlx5_cq_debugfs_cleanup(dev);
|
2020-10-07 06:00:49 +00:00
|
|
|
mlx5_fw_reset_cleanup(dev);
|
2018-11-20 22:12:27 +00:00
|
|
|
mlx5_events_cleanup(dev);
|
2018-11-19 18:52:38 +00:00
|
|
|
mlx5_eq_table_cleanup(dev);
|
2019-06-10 23:38:27 +00:00
|
|
|
mlx5_irq_table_cleanup(dev);
|
2018-12-04 19:24:46 +00:00
|
|
|
mlx5_devcom_unregister_device(dev->priv.devcom);
|
2016-09-09 14:35:20 +00:00
|
|
|
}
|
|
|
|
|
2019-03-29 22:37:56 +00:00
|
|
|
static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
|
2015-09-25 07:49:14 +00:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_info(dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
|
|
|
|
fw_rev_min(dev), fw_rev_sub(dev));
|
2013-07-07 14:25:49 +00:00
|
|
|
|
2018-03-30 14:01:34 +00:00
|
|
|
/* Only PFs hold the relevant PCIe information for this query */
|
|
|
|
if (mlx5_core_is_pf(dev))
|
|
|
|
pcie_print_link_status(dev->pdev);
|
|
|
|
|
2017-06-08 16:33:16 +00:00
|
|
|
/* wait for firmware to accept initialization segments configurations
|
|
|
|
*/
|
2019-06-10 23:38:14 +00:00
|
|
|
err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI, FW_INIT_WARN_MESSAGE_INTERVAL);
|
2017-06-08 16:33:16 +00:00
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "Firmware over %d MS in pre-initializing state, aborting\n",
|
|
|
|
FW_PRE_INIT_TIMEOUT_MILI);
|
2019-03-29 22:37:56 +00:00
|
|
|
return err;
|
2017-06-08 16:33:16 +00:00
|
|
|
}
|
|
|
|
|
2013-07-07 14:25:49 +00:00
|
|
|
err = mlx5_cmd_init(dev);
|
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
|
2019-03-29 22:37:56 +00:00
|
|
|
return err;
|
2013-07-07 14:25:49 +00:00
|
|
|
}
|
|
|
|
|
2019-06-10 23:38:14 +00:00
|
|
|
err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI, 0);
|
2015-10-14 14:43:47 +00:00
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "Firmware over %d MS in initializing state, aborting\n",
|
|
|
|
FW_INIT_TIMEOUT_MILI);
|
2017-03-30 14:00:25 +00:00
|
|
|
goto err_cmd_cleanup;
|
2015-10-14 14:43:47 +00:00
|
|
|
}
|
|
|
|
|
2020-03-19 19:43:13 +00:00
|
|
|
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
|
|
|
|
|
2015-12-01 16:03:08 +00:00
|
|
|
err = mlx5_core_enable_hca(dev, 0);
|
2013-07-18 12:31:08 +00:00
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "enable hca failed\n");
|
2016-09-09 14:35:20 +00:00
|
|
|
goto err_cmd_cleanup;
|
2013-07-18 12:31:08 +00:00
|
|
|
}
|
|
|
|
|
2015-05-28 19:28:48 +00:00
|
|
|
err = mlx5_core_set_issi(dev);
|
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "failed to set issi\n");
|
2015-05-28 19:28:48 +00:00
|
|
|
goto err_disable_hca;
|
|
|
|
}
|
|
|
|
|
2013-07-18 12:31:08 +00:00
|
|
|
err = mlx5_satisfy_startup_pages(dev, 1);
|
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "failed to allocate boot pages\n");
|
2013-07-18 12:31:08 +00:00
|
|
|
goto err_disable_hca;
|
|
|
|
}
|
|
|
|
|
2013-07-07 14:25:49 +00:00
|
|
|
err = set_hca_ctrl(dev);
|
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "set_hca_ctrl failed\n");
|
2013-07-18 12:31:08 +00:00
|
|
|
goto reclaim_boot_pages;
|
2013-07-07 14:25:49 +00:00
|
|
|
}
|
|
|
|
|
2019-02-17 11:11:02 +00:00
|
|
|
err = set_hca_cap(dev);
|
2015-12-14 14:34:09 +00:00
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "set_hca_cap failed\n");
|
2019-01-22 06:48:51 +00:00
|
|
|
goto reclaim_boot_pages;
|
|
|
|
}
|
|
|
|
|
2013-07-18 12:31:08 +00:00
|
|
|
err = mlx5_satisfy_startup_pages(dev, 0);
|
2013-07-07 14:25:49 +00:00
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "failed to allocate init pages\n");
|
2013-07-18 12:31:08 +00:00
|
|
|
goto reclaim_boot_pages;
|
2013-07-07 14:25:49 +00:00
|
|
|
}
|
|
|
|
|
2018-01-04 15:25:32 +00:00
|
|
|
err = mlx5_cmd_init_hca(dev, sw_owner_id);
|
2013-07-07 14:25:49 +00:00
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "init hca failed\n");
|
2018-11-20 22:12:23 +00:00
|
|
|
goto reclaim_boot_pages;
|
2013-07-07 14:25:49 +00:00
|
|
|
}
|
|
|
|
|
2016-11-17 11:46:00 +00:00
|
|
|
mlx5_set_driver_version(dev);
|
|
|
|
|
2013-07-07 14:25:49 +00:00
|
|
|
mlx5_start_health_poll(dev);
|
|
|
|
|
2016-10-25 15:36:25 +00:00
|
|
|
err = mlx5_query_hca_caps(dev);
|
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "query hca failed\n");
|
2019-03-29 22:37:56 +00:00
|
|
|
goto stop_health;
|
2016-10-25 15:36:25 +00:00
|
|
|
}
|
|
|
|
|
2019-03-29 22:37:56 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
stop_health:
|
|
|
|
mlx5_stop_health_poll(dev, boot);
|
|
|
|
reclaim_boot_pages:
|
|
|
|
mlx5_reclaim_startup_pages(dev);
|
|
|
|
err_disable_hca:
|
|
|
|
mlx5_core_disable_hca(dev, 0);
|
|
|
|
err_cmd_cleanup:
|
2020-03-19 19:43:13 +00:00
|
|
|
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
|
2019-03-29 22:37:56 +00:00
|
|
|
mlx5_cmd_cleanup(dev);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
mlx5_stop_health_poll(dev, boot);
|
|
|
|
err = mlx5_cmd_teardown_hca(dev);
|
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n");
|
2019-03-29 22:37:56 +00:00
|
|
|
return err;
|
2013-07-07 14:25:49 +00:00
|
|
|
}
|
2019-03-29 22:37:56 +00:00
|
|
|
mlx5_reclaim_startup_pages(dev);
|
|
|
|
mlx5_core_disable_hca(dev, 0);
|
2020-03-19 19:43:13 +00:00
|
|
|
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
|
2019-03-29 22:37:56 +00:00
|
|
|
mlx5_cmd_cleanup(dev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-03-29 22:37:57 +00:00
|
|
|
static int mlx5_load(struct mlx5_core_dev *dev)
|
2019-03-29 22:37:56 +00:00
|
|
|
{
|
|
|
|
int err;
|
2013-07-07 14:25:49 +00:00
|
|
|
|
2017-01-03 21:55:24 +00:00
|
|
|
dev->priv.uar = mlx5_get_uars_page(dev);
|
2017-11-20 07:58:01 +00:00
|
|
|
if (IS_ERR(dev->priv.uar)) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "Failed allocating uar, aborting\n");
|
2017-11-20 07:58:01 +00:00
|
|
|
err = PTR_ERR(dev->priv.uar);
|
2019-03-29 22:37:57 +00:00
|
|
|
return err;
|
2013-07-07 14:25:49 +00:00
|
|
|
}
|
|
|
|
|
2018-11-20 22:12:27 +00:00
|
|
|
mlx5_events_start(dev);
|
2018-11-20 22:12:23 +00:00
|
|
|
mlx5_pagealloc_start(dev);
|
|
|
|
|
2019-06-10 23:38:32 +00:00
|
|
|
err = mlx5_irq_table_create(dev);
|
|
|
|
if (err) {
|
|
|
|
mlx5_core_err(dev, "Failed to alloc IRQs\n");
|
|
|
|
goto err_irq_table;
|
|
|
|
}
|
|
|
|
|
2018-11-19 18:52:36 +00:00
|
|
|
err = mlx5_eq_table_create(dev);
|
2013-07-07 14:25:49 +00:00
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "Failed to create EQs\n");
|
2018-11-19 18:52:36 +00:00
|
|
|
goto err_eq_table;
|
2013-07-07 14:25:49 +00:00
|
|
|
}
|
|
|
|
|
2018-02-22 08:01:35 +00:00
|
|
|
err = mlx5_fw_tracer_init(dev->tracer);
|
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "Failed to init FW tracer\n");
|
2018-02-22 08:01:35 +00:00
|
|
|
goto err_fw_tracer;
|
|
|
|
}
|
|
|
|
|
2020-10-07 06:00:49 +00:00
|
|
|
mlx5_fw_reset_events_start(dev);
|
2019-08-22 05:05:51 +00:00
|
|
|
mlx5_hv_vhca_init(dev->hv_vhca);
|
|
|
|
|
2020-02-11 22:32:43 +00:00
|
|
|
err = mlx5_rsc_dump_init(dev);
|
|
|
|
if (err) {
|
|
|
|
mlx5_core_err(dev, "Failed to init Resource dump\n");
|
|
|
|
goto err_rsc_dump;
|
|
|
|
}
|
|
|
|
|
2017-11-19 15:51:13 +00:00
|
|
|
err = mlx5_fpga_device_start(dev);
|
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "fpga device start failed %d\n", err);
|
2017-11-19 15:51:13 +00:00
|
|
|
goto err_fpga_start;
|
|
|
|
}
|
|
|
|
|
2019-11-18 12:30:20 +00:00
|
|
|
mlx5_accel_ipsec_init(dev);
|
2017-11-19 15:51:13 +00:00
|
|
|
|
2018-04-30 07:16:18 +00:00
|
|
|
err = mlx5_accel_tls_init(dev);
|
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "TLS device start failed %d\n", err);
|
2018-04-30 07:16:18 +00:00
|
|
|
goto err_tls_start;
|
|
|
|
}
|
|
|
|
|
2015-12-10 15:12:44 +00:00
|
|
|
err = mlx5_init_fs(dev);
|
2016-09-09 14:35:20 +00:00
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "Failed to init flow steering\n");
|
2017-05-30 06:42:54 +00:00
|
|
|
goto err_fs;
|
2016-09-09 14:35:20 +00:00
|
|
|
}
|
2013-07-07 14:25:49 +00:00
|
|
|
|
2017-05-30 06:42:54 +00:00
|
|
|
err = mlx5_core_set_hca_defaults(dev);
|
2015-12-10 15:12:44 +00:00
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "Failed to set hca defaults\n");
|
2019-05-16 18:11:11 +00:00
|
|
|
goto err_sriov;
|
2015-12-10 15:12:44 +00:00
|
|
|
}
|
2016-06-23 14:02:37 +00:00
|
|
|
|
2020-12-12 06:12:16 +00:00
|
|
|
mlx5_vhca_event_start(dev);
|
|
|
|
|
net/mlx5: SF, Port function state change support
Support changing the state of the SF port's function through devlink.
When activating the SF port's function, enable the hca in the device
followed by adding its auxiliary device.
When deactivating the SF port's function, delete its auxiliary device
followed by disabling the vHCA.
Port function attributes get/set callbacks are invoked with devlink
instance lock held. Such callbacks need to synchronize with sf port
table getting disabled either via sriov sysfs callback. Such callbacks
synchronize with table disable context holding table refcount.
$ devlink dev eswitch set pci/0000:06:00.0 mode switchdev
$ devlink port show
pci/0000:06:00.0/65535: type eth netdev ens2f0np0 flavour physical port 0 splittable false
$ devlink port add pci/0000:06:00.0 flavour pcisf pfnum 0 sfnum 88
pci/0000:06:00.0/32768: type eth netdev eth6 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
function:
hw_addr 00:00:00:00:00:00 state inactive opstate detached
$ devlink port show ens2f0npf0sf88
pci/0000:06:00.0/32768: type eth netdev ens2f0npf0sf88 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
function:
hw_addr 00:00:00:00:88:88 state inactive opstate detached
$ devlink port function set pci/0000:06:00.0/32768 hw_addr 00:00:00:00:88:88 state active
$ devlink port show ens2f0npf0sf88 -jp
{
"port": {
"pci/0000:06:00.0/32768": {
"type": "eth",
"netdev": "ens2f0npf0sf88",
"flavour": "pcisf",
"controller": 0,
"pfnum": 0,
"sfnum": 88,
"external": false,
"splittable": false,
"function": {
"hw_addr": "00:00:00:00:88:88",
"state": "active",
"opstate": "attached"
}
}
}
}
On port function activation, an auxiliary device is created in below
example.
$ devlink dev show
devlink dev show auxiliary/mlx5_core.sf.4
$ devlink port show auxiliary/mlx5_core.sf.4/1
auxiliary/mlx5_core.sf.4/1: type eth netdev p0sf88 flavour virtual port 0 splittable false
Signed-off-by: Parav Pandit <parav@nvidia.com>
Reviewed-by: Vu Pham <vuhuong@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
2020-12-12 06:12:22 +00:00
|
|
|
err = mlx5_sf_hw_table_create(dev);
|
|
|
|
if (err) {
|
|
|
|
mlx5_core_err(dev, "sf table create failed %d\n", err);
|
|
|
|
goto err_vhca;
|
|
|
|
}
|
|
|
|
|
2019-02-13 06:55:36 +00:00
|
|
|
err = mlx5_ec_init(dev);
|
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "Failed to init embedded CPU\n");
|
2019-02-13 06:55:36 +00:00
|
|
|
goto err_ec;
|
|
|
|
}
|
|
|
|
|
2020-11-20 23:03:38 +00:00
|
|
|
err = mlx5_sriov_attach(dev);
|
|
|
|
if (err) {
|
|
|
|
mlx5_core_err(dev, "sriov init failed %d\n", err);
|
|
|
|
goto err_sriov;
|
|
|
|
}
|
|
|
|
|
2020-12-12 06:12:17 +00:00
|
|
|
mlx5_sf_dev_table_create(dev);
|
|
|
|
|
2013-07-07 14:25:49 +00:00
|
|
|
return 0;
|
|
|
|
|
2016-09-09 14:35:20 +00:00
|
|
|
err_sriov:
|
2020-11-20 23:03:38 +00:00
|
|
|
mlx5_ec_cleanup(dev);
|
|
|
|
err_ec:
|
net/mlx5: SF, Port function state change support
Support changing the state of the SF port's function through devlink.
When activating the SF port's function, enable the hca in the device
followed by adding its auxiliary device.
When deactivating the SF port's function, delete its auxiliary device
followed by disabling the vHCA.
Port function attributes get/set callbacks are invoked with devlink
instance lock held. Such callbacks need to synchronize with sf port
table getting disabled either via sriov sysfs callback. Such callbacks
synchronize with table disable context holding table refcount.
$ devlink dev eswitch set pci/0000:06:00.0 mode switchdev
$ devlink port show
pci/0000:06:00.0/65535: type eth netdev ens2f0np0 flavour physical port 0 splittable false
$ devlink port add pci/0000:06:00.0 flavour pcisf pfnum 0 sfnum 88
pci/0000:06:00.0/32768: type eth netdev eth6 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
function:
hw_addr 00:00:00:00:00:00 state inactive opstate detached
$ devlink port show ens2f0npf0sf88
pci/0000:06:00.0/32768: type eth netdev ens2f0npf0sf88 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
function:
hw_addr 00:00:00:00:88:88 state inactive opstate detached
$ devlink port function set pci/0000:06:00.0/32768 hw_addr 00:00:00:00:88:88 state active
$ devlink port show ens2f0npf0sf88 -jp
{
"port": {
"pci/0000:06:00.0/32768": {
"type": "eth",
"netdev": "ens2f0npf0sf88",
"flavour": "pcisf",
"controller": 0,
"pfnum": 0,
"sfnum": 88,
"external": false,
"splittable": false,
"function": {
"hw_addr": "00:00:00:00:88:88",
"state": "active",
"opstate": "attached"
}
}
}
}
On port function activation, an auxiliary device is created in below
example.
$ devlink dev show
devlink dev show auxiliary/mlx5_core.sf.4
$ devlink port show auxiliary/mlx5_core.sf.4/1
auxiliary/mlx5_core.sf.4/1: type eth netdev p0sf88 flavour virtual port 0 splittable false
Signed-off-by: Parav Pandit <parav@nvidia.com>
Reviewed-by: Vu Pham <vuhuong@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
2020-12-12 06:12:22 +00:00
|
|
|
mlx5_sf_hw_table_destroy(dev);
|
|
|
|
err_vhca:
|
2020-12-12 06:12:16 +00:00
|
|
|
mlx5_vhca_event_stop(dev);
|
2015-12-10 15:12:44 +00:00
|
|
|
mlx5_cleanup_fs(dev);
|
|
|
|
err_fs:
|
2018-04-30 07:16:18 +00:00
|
|
|
mlx5_accel_tls_cleanup(dev);
|
|
|
|
err_tls_start:
|
2017-11-19 15:51:13 +00:00
|
|
|
mlx5_accel_ipsec_cleanup(dev);
|
|
|
|
mlx5_fpga_device_stop(dev);
|
|
|
|
err_fpga_start:
|
2020-02-11 22:32:43 +00:00
|
|
|
mlx5_rsc_dump_cleanup(dev);
|
|
|
|
err_rsc_dump:
|
2019-08-22 05:05:51 +00:00
|
|
|
mlx5_hv_vhca_cleanup(dev->hv_vhca);
|
2020-10-07 06:00:49 +00:00
|
|
|
mlx5_fw_reset_events_stop(dev);
|
2018-02-22 08:01:35 +00:00
|
|
|
mlx5_fw_tracer_cleanup(dev->tracer);
|
|
|
|
err_fw_tracer:
|
2018-11-19 18:52:36 +00:00
|
|
|
mlx5_eq_table_destroy(dev);
|
|
|
|
err_eq_table:
|
2019-06-10 23:38:32 +00:00
|
|
|
mlx5_irq_table_destroy(dev);
|
|
|
|
err_irq_table:
|
2018-11-20 22:12:23 +00:00
|
|
|
mlx5_pagealloc_stop(dev);
|
2018-11-20 22:12:27 +00:00
|
|
|
mlx5_events_stop(dev);
|
2019-03-29 22:37:53 +00:00
|
|
|
mlx5_put_uars_page(dev, dev->priv.uar);
|
2019-03-29 22:37:57 +00:00
|
|
|
return err;
|
|
|
|
}
|
2013-07-07 14:25:49 +00:00
|
|
|
|
2019-03-29 22:37:57 +00:00
|
|
|
static void mlx5_unload(struct mlx5_core_dev *dev)
|
|
|
|
{
|
2020-12-12 06:12:17 +00:00
|
|
|
mlx5_sf_dev_table_destroy(dev);
|
2019-03-29 22:37:57 +00:00
|
|
|
mlx5_sriov_detach(dev);
|
2020-11-20 23:03:38 +00:00
|
|
|
mlx5_ec_cleanup(dev);
|
net/mlx5: SF, Port function state change support
Support changing the state of the SF port's function through devlink.
When activating the SF port's function, enable the hca in the device
followed by adding its auxiliary device.
When deactivating the SF port's function, delete its auxiliary device
followed by disabling the vHCA.
Port function attributes get/set callbacks are invoked with devlink
instance lock held. Such callbacks need to synchronize with sf port
table getting disabled either via sriov sysfs callback. Such callbacks
synchronize with table disable context holding table refcount.
$ devlink dev eswitch set pci/0000:06:00.0 mode switchdev
$ devlink port show
pci/0000:06:00.0/65535: type eth netdev ens2f0np0 flavour physical port 0 splittable false
$ devlink port add pci/0000:06:00.0 flavour pcisf pfnum 0 sfnum 88
pci/0000:06:00.0/32768: type eth netdev eth6 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
function:
hw_addr 00:00:00:00:00:00 state inactive opstate detached
$ devlink port show ens2f0npf0sf88
pci/0000:06:00.0/32768: type eth netdev ens2f0npf0sf88 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
function:
hw_addr 00:00:00:00:88:88 state inactive opstate detached
$ devlink port function set pci/0000:06:00.0/32768 hw_addr 00:00:00:00:88:88 state active
$ devlink port show ens2f0npf0sf88 -jp
{
"port": {
"pci/0000:06:00.0/32768": {
"type": "eth",
"netdev": "ens2f0npf0sf88",
"flavour": "pcisf",
"controller": 0,
"pfnum": 0,
"sfnum": 88,
"external": false,
"splittable": false,
"function": {
"hw_addr": "00:00:00:00:88:88",
"state": "active",
"opstate": "attached"
}
}
}
}
On port function activation, an auxiliary device is created in below
example.
$ devlink dev show
devlink dev show auxiliary/mlx5_core.sf.4
$ devlink port show auxiliary/mlx5_core.sf.4/1
auxiliary/mlx5_core.sf.4/1: type eth netdev p0sf88 flavour virtual port 0 splittable false
Signed-off-by: Parav Pandit <parav@nvidia.com>
Reviewed-by: Vu Pham <vuhuong@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
2020-12-12 06:12:22 +00:00
|
|
|
mlx5_sf_hw_table_destroy(dev);
|
2020-12-12 06:12:16 +00:00
|
|
|
mlx5_vhca_event_stop(dev);
|
2019-03-29 22:37:57 +00:00
|
|
|
mlx5_cleanup_fs(dev);
|
|
|
|
mlx5_accel_ipsec_cleanup(dev);
|
|
|
|
mlx5_accel_tls_cleanup(dev);
|
|
|
|
mlx5_fpga_device_stop(dev);
|
2020-02-11 22:32:43 +00:00
|
|
|
mlx5_rsc_dump_cleanup(dev);
|
2019-08-22 05:05:51 +00:00
|
|
|
mlx5_hv_vhca_cleanup(dev->hv_vhca);
|
2020-10-07 06:00:49 +00:00
|
|
|
mlx5_fw_reset_events_stop(dev);
|
2019-03-29 22:37:57 +00:00
|
|
|
mlx5_fw_tracer_cleanup(dev->tracer);
|
|
|
|
mlx5_eq_table_destroy(dev);
|
2019-06-10 23:38:32 +00:00
|
|
|
mlx5_irq_table_destroy(dev);
|
2019-03-29 22:37:57 +00:00
|
|
|
mlx5_pagealloc_stop(dev);
|
|
|
|
mlx5_events_stop(dev);
|
|
|
|
mlx5_put_uars_page(dev, dev->priv.uar);
|
|
|
|
}
|
2016-09-09 14:35:20 +00:00
|
|
|
|
2020-11-02 14:54:43 +00:00
|
|
|
int mlx5_init_one(struct mlx5_core_dev *dev)
|
2019-03-29 22:37:57 +00:00
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
mutex_lock(&dev->intf_state_mutex);
|
|
|
|
if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
|
|
|
|
mlx5_core_warn(dev, "interface is up, NOP\n");
|
|
|
|
goto out;
|
2014-01-14 15:45:22 +00:00
|
|
|
}
|
2019-03-29 22:37:57 +00:00
|
|
|
/* remove any previous indication of internal error */
|
|
|
|
dev->state = MLX5_DEVICE_STATE_UP;
|
2013-07-07 14:25:49 +00:00
|
|
|
|
2020-11-02 14:54:43 +00:00
|
|
|
err = mlx5_function_setup(dev, true);
|
2019-03-29 22:37:57 +00:00
|
|
|
if (err)
|
2020-05-06 11:52:04 +00:00
|
|
|
goto err_function;
|
2013-07-07 14:25:49 +00:00
|
|
|
|
2020-11-02 14:54:43 +00:00
|
|
|
err = mlx5_init_once(dev);
|
|
|
|
if (err) {
|
|
|
|
mlx5_core_err(dev, "sw objs init failed\n");
|
|
|
|
goto function_teardown;
|
2019-03-29 22:37:57 +00:00
|
|
|
}
|
2013-07-18 12:31:08 +00:00
|
|
|
|
2019-03-29 22:37:57 +00:00
|
|
|
err = mlx5_load(dev);
|
|
|
|
if (err)
|
|
|
|
goto err_load;
|
2013-07-07 14:25:49 +00:00
|
|
|
|
2020-05-15 07:44:06 +00:00
|
|
|
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
|
|
|
|
|
2020-11-02 14:54:43 +00:00
|
|
|
err = mlx5_devlink_register(priv_to_devlink(dev), dev->device);
|
|
|
|
if (err)
|
|
|
|
goto err_devlink_reg;
|
2013-07-07 14:25:49 +00:00
|
|
|
|
2020-11-02 14:54:43 +00:00
|
|
|
err = mlx5_register_device(dev);
|
2020-10-08 13:06:37 +00:00
|
|
|
if (err)
|
|
|
|
goto err_register;
|
|
|
|
|
2020-05-01 15:20:01 +00:00
|
|
|
mutex_unlock(&dev->intf_state_mutex);
|
|
|
|
return 0;
|
2019-03-29 22:37:57 +00:00
|
|
|
|
2020-10-08 13:06:37 +00:00
|
|
|
err_register:
|
2020-11-02 14:54:43 +00:00
|
|
|
mlx5_devlink_unregister(priv_to_devlink(dev));
|
2019-11-20 09:43:49 +00:00
|
|
|
err_devlink_reg:
|
2020-05-15 07:44:06 +00:00
|
|
|
clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
|
2019-03-29 22:37:57 +00:00
|
|
|
mlx5_unload(dev);
|
|
|
|
err_load:
|
2020-11-02 14:54:43 +00:00
|
|
|
mlx5_cleanup_once(dev);
|
2019-03-29 22:37:56 +00:00
|
|
|
function_teardown:
|
2020-11-02 14:54:43 +00:00
|
|
|
mlx5_function_teardown(dev, true);
|
2020-05-06 11:52:04 +00:00
|
|
|
err_function:
|
2015-10-14 14:43:46 +00:00
|
|
|
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
|
2020-05-01 15:20:01 +00:00
|
|
|
out:
|
2015-10-14 14:43:46 +00:00
|
|
|
mutex_unlock(&dev->intf_state_mutex);
|
2013-07-07 14:25:49 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-11-02 14:54:43 +00:00
|
|
|
void mlx5_uninit_one(struct mlx5_core_dev *dev)
|
2013-07-07 14:25:49 +00:00
|
|
|
{
|
2020-05-15 07:44:06 +00:00
|
|
|
mutex_lock(&dev->intf_state_mutex);
|
|
|
|
|
2020-11-02 14:54:43 +00:00
|
|
|
mlx5_unregister_device(dev);
|
|
|
|
mlx5_devlink_unregister(priv_to_devlink(dev));
|
2016-12-28 12:58:33 +00:00
|
|
|
|
2017-08-08 18:17:00 +00:00
|
|
|
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_warn(dev, "%s: interface is down, NOP\n",
|
|
|
|
__func__);
|
2020-11-02 14:54:43 +00:00
|
|
|
mlx5_cleanup_once(dev);
|
2015-10-14 14:43:46 +00:00
|
|
|
goto out;
|
|
|
|
}
|
2016-09-09 14:35:18 +00:00
|
|
|
|
2017-05-25 05:42:07 +00:00
|
|
|
clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
|
2020-11-02 14:54:43 +00:00
|
|
|
mlx5_unload(dev);
|
|
|
|
mlx5_cleanup_once(dev);
|
|
|
|
mlx5_function_teardown(dev, true);
|
|
|
|
out:
|
|
|
|
mutex_unlock(&dev->intf_state_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
int mlx5_load_one(struct mlx5_core_dev *dev)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
mutex_lock(&dev->intf_state_mutex);
|
|
|
|
if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
|
|
|
|
mlx5_core_warn(dev, "interface is up, NOP\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
/* remove any previous indication of internal error */
|
|
|
|
dev->state = MLX5_DEVICE_STATE_UP;
|
|
|
|
|
|
|
|
err = mlx5_function_setup(dev, false);
|
|
|
|
if (err)
|
|
|
|
goto err_function;
|
|
|
|
|
|
|
|
err = mlx5_load(dev);
|
|
|
|
if (err)
|
|
|
|
goto err_load;
|
|
|
|
|
|
|
|
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
|
2017-05-25 05:42:07 +00:00
|
|
|
|
2020-11-02 14:54:43 +00:00
|
|
|
err = mlx5_attach_device(dev);
|
|
|
|
if (err)
|
|
|
|
goto err_attach;
|
|
|
|
|
|
|
|
mutex_unlock(&dev->intf_state_mutex);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_attach:
|
|
|
|
clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
|
2019-03-29 22:37:57 +00:00
|
|
|
mlx5_unload(dev);
|
2020-11-02 14:54:43 +00:00
|
|
|
err_load:
|
|
|
|
mlx5_function_teardown(dev, false);
|
|
|
|
err_function:
|
|
|
|
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
|
|
|
|
out:
|
|
|
|
mutex_unlock(&dev->intf_state_mutex);
|
|
|
|
return err;
|
|
|
|
}
|
2019-03-29 22:37:57 +00:00
|
|
|
|
2020-11-02 14:54:43 +00:00
|
|
|
void mlx5_unload_one(struct mlx5_core_dev *dev)
|
|
|
|
{
|
|
|
|
mutex_lock(&dev->intf_state_mutex);
|
|
|
|
|
|
|
|
mlx5_detach_device(dev);
|
|
|
|
|
|
|
|
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
|
|
|
|
mlx5_core_warn(dev, "%s: interface is down, NOP\n",
|
|
|
|
__func__);
|
|
|
|
goto out;
|
|
|
|
}
|
2014-07-28 20:30:22 +00:00
|
|
|
|
2020-11-02 14:54:43 +00:00
|
|
|
clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
|
|
|
|
mlx5_unload(dev);
|
|
|
|
mlx5_function_teardown(dev, false);
|
2015-10-08 14:14:00 +00:00
|
|
|
out:
|
2015-10-14 14:43:46 +00:00
|
|
|
mutex_unlock(&dev->intf_state_mutex);
|
2014-07-28 20:30:22 +00:00
|
|
|
}
|
2015-04-02 14:07:34 +00:00
|
|
|
|
2020-12-12 06:12:18 +00:00
|
|
|
int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
|
2014-07-28 20:30:22 +00:00
|
|
|
{
|
2019-03-29 22:37:54 +00:00
|
|
|
struct mlx5_priv *priv = &dev->priv;
|
2014-07-28 20:30:22 +00:00
|
|
|
int err;
|
|
|
|
|
2019-03-29 22:37:54 +00:00
|
|
|
dev->profile = &profile[profile_idx];
|
2014-07-28 20:30:22 +00:00
|
|
|
|
2014-11-06 10:51:22 +00:00
|
|
|
INIT_LIST_HEAD(&priv->ctx_list);
|
|
|
|
spin_lock_init(&priv->ctx_lock);
|
2015-10-14 14:43:46 +00:00
|
|
|
mutex_init(&dev->intf_state_mutex);
|
2017-01-02 09:37:46 +00:00
|
|
|
|
2017-01-03 21:55:24 +00:00
|
|
|
mutex_init(&priv->bfregs.reg_head.lock);
|
|
|
|
mutex_init(&priv->bfregs.wc_head.lock);
|
|
|
|
INIT_LIST_HEAD(&priv->bfregs.reg_head.list);
|
|
|
|
INIT_LIST_HEAD(&priv->bfregs.wc_head.list);
|
|
|
|
|
2019-03-29 22:37:54 +00:00
|
|
|
mutex_init(&priv->alloc_mutex);
|
|
|
|
mutex_init(&priv->pgdir_mutex);
|
|
|
|
INIT_LIST_HEAD(&priv->pgdir_list);
|
|
|
|
|
2019-04-29 18:14:02 +00:00
|
|
|
priv->dbg_root = debugfs_create_dir(dev_name(dev->device),
|
|
|
|
mlx5_debugfs_root);
|
2021-01-26 23:24:07 +00:00
|
|
|
INIT_LIST_HEAD(&priv->traps);
|
|
|
|
|
2015-10-08 14:14:00 +00:00
|
|
|
err = mlx5_health_init(dev);
|
2019-03-29 22:37:55 +00:00
|
|
|
if (err)
|
|
|
|
goto err_health_init;
|
2015-10-08 14:14:00 +00:00
|
|
|
|
2018-11-20 22:12:23 +00:00
|
|
|
err = mlx5_pagealloc_init(dev);
|
|
|
|
if (err)
|
|
|
|
goto err_pagealloc_init;
|
2016-09-09 14:35:20 +00:00
|
|
|
|
2020-10-08 13:06:37 +00:00
|
|
|
err = mlx5_adev_init(dev);
|
|
|
|
if (err)
|
|
|
|
goto err_adev_init;
|
|
|
|
|
2019-03-29 22:37:54 +00:00
|
|
|
return 0;
|
2019-03-29 22:37:55 +00:00
|
|
|
|
2020-10-08 13:06:37 +00:00
|
|
|
err_adev_init:
|
|
|
|
mlx5_pagealloc_cleanup(dev);
|
2019-03-29 22:37:55 +00:00
|
|
|
err_pagealloc_init:
|
|
|
|
mlx5_health_cleanup(dev);
|
|
|
|
err_health_init:
|
|
|
|
debugfs_remove(dev->priv.dbg_root);
|
2020-05-15 04:42:45 +00:00
|
|
|
mutex_destroy(&priv->pgdir_mutex);
|
|
|
|
mutex_destroy(&priv->alloc_mutex);
|
|
|
|
mutex_destroy(&priv->bfregs.wc_head.lock);
|
|
|
|
mutex_destroy(&priv->bfregs.reg_head.lock);
|
|
|
|
mutex_destroy(&dev->intf_state_mutex);
|
2019-03-29 22:37:55 +00:00
|
|
|
return err;
|
2019-03-29 22:37:54 +00:00
|
|
|
}
|
|
|
|
|
2020-12-12 06:12:18 +00:00
|
|
|
void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
|
2019-03-29 22:37:54 +00:00
|
|
|
{
|
2020-05-15 04:42:45 +00:00
|
|
|
struct mlx5_priv *priv = &dev->priv;
|
|
|
|
|
2020-10-08 13:06:37 +00:00
|
|
|
mlx5_adev_cleanup(dev);
|
2019-03-29 22:37:55 +00:00
|
|
|
mlx5_pagealloc_cleanup(dev);
|
|
|
|
mlx5_health_cleanup(dev);
|
2019-03-29 22:37:54 +00:00
|
|
|
debugfs_remove_recursive(dev->priv.dbg_root);
|
2020-05-15 04:42:45 +00:00
|
|
|
mutex_destroy(&priv->pgdir_mutex);
|
|
|
|
mutex_destroy(&priv->alloc_mutex);
|
|
|
|
mutex_destroy(&priv->bfregs.wc_head.lock);
|
|
|
|
mutex_destroy(&priv->bfregs.reg_head.lock);
|
|
|
|
mutex_destroy(&dev->intf_state_mutex);
|
2019-03-29 22:37:54 +00:00
|
|
|
}
|
|
|
|
|
2020-11-02 14:54:43 +00:00
|
|
|
static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
2014-07-28 20:30:22 +00:00
|
|
|
{
|
|
|
|
struct mlx5_core_dev *dev;
|
2016-07-01 11:51:02 +00:00
|
|
|
struct devlink *devlink;
|
2014-07-28 20:30:22 +00:00
|
|
|
int err;
|
|
|
|
|
2018-12-11 14:09:51 +00:00
|
|
|
devlink = mlx5_devlink_alloc();
|
2016-07-01 11:51:02 +00:00
|
|
|
if (!devlink) {
|
2018-12-11 14:09:51 +00:00
|
|
|
dev_err(&pdev->dev, "devlink alloc failed\n");
|
2014-07-28 20:30:22 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2016-07-01 11:51:02 +00:00
|
|
|
|
|
|
|
dev = devlink_priv(devlink);
|
2019-04-29 18:14:02 +00:00
|
|
|
dev->device = &pdev->dev;
|
|
|
|
dev->pdev = pdev;
|
2014-07-28 20:30:22 +00:00
|
|
|
|
2019-06-28 22:35:58 +00:00
|
|
|
dev->coredev_type = id->driver_data & MLX5_PCI_DEV_IS_VF ?
|
|
|
|
MLX5_COREDEV_VF : MLX5_COREDEV_PF;
|
|
|
|
|
2020-10-08 13:06:37 +00:00
|
|
|
dev->priv.adev_idx = mlx5_adev_idx_alloc();
|
2021-01-04 08:08:36 +00:00
|
|
|
if (dev->priv.adev_idx < 0) {
|
|
|
|
err = dev->priv.adev_idx;
|
|
|
|
goto adev_init_err;
|
|
|
|
}
|
2020-10-08 13:06:37 +00:00
|
|
|
|
2019-04-29 18:14:02 +00:00
|
|
|
err = mlx5_mdev_init(dev, prof_sel);
|
2019-03-29 22:37:54 +00:00
|
|
|
if (err)
|
|
|
|
goto mdev_init_err;
|
2017-01-03 21:55:24 +00:00
|
|
|
|
2019-03-29 22:37:54 +00:00
|
|
|
err = mlx5_pci_init(dev, pdev, id);
|
2014-07-28 20:30:22 +00:00
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "mlx5_pci_init failed with error code %d\n",
|
|
|
|
err);
|
2019-03-29 22:37:54 +00:00
|
|
|
goto pci_init_err;
|
2014-07-28 20:30:22 +00:00
|
|
|
}
|
|
|
|
|
2020-11-02 14:54:43 +00:00
|
|
|
err = mlx5_init_one(dev);
|
2014-07-28 20:30:22 +00:00
|
|
|
if (err) {
|
2020-11-02 14:54:43 +00:00
|
|
|
mlx5_core_err(dev, "mlx5_init_one failed with error code %d\n",
|
2019-03-29 22:38:00 +00:00
|
|
|
err);
|
2020-11-02 14:54:43 +00:00
|
|
|
goto err_init_one;
|
2014-07-28 20:30:22 +00:00
|
|
|
}
|
2016-09-09 14:35:20 +00:00
|
|
|
|
2018-07-17 08:18:26 +00:00
|
|
|
err = mlx5_crdump_enable(dev);
|
|
|
|
if (err)
|
|
|
|
dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
|
|
|
|
|
2017-03-10 12:33:02 +00:00
|
|
|
pci_save_state(pdev);
|
2020-12-30 07:58:32 +00:00
|
|
|
if (!mlx5_core_is_mp_slave(dev))
|
|
|
|
devlink_reload_enable(devlink);
|
2014-07-28 20:30:22 +00:00
|
|
|
return 0;
|
|
|
|
|
2020-11-02 14:54:43 +00:00
|
|
|
err_init_one:
|
2019-03-29 22:37:53 +00:00
|
|
|
mlx5_pci_close(dev);
|
2019-03-29 22:37:54 +00:00
|
|
|
pci_init_err:
|
|
|
|
mlx5_mdev_uninit(dev);
|
|
|
|
mdev_init_err:
|
2020-10-08 13:06:37 +00:00
|
|
|
mlx5_adev_idx_free(dev->priv.adev_idx);
|
2021-01-04 08:08:36 +00:00
|
|
|
adev_init_err:
|
2018-12-11 14:09:51 +00:00
|
|
|
mlx5_devlink_free(devlink);
|
2015-09-25 07:49:14 +00:00
|
|
|
|
2014-07-28 20:30:22 +00:00
|
|
|
return err;
|
|
|
|
}
|
2015-09-25 07:49:14 +00:00
|
|
|
|
2014-07-28 20:30:22 +00:00
|
|
|
static void remove_one(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
|
2016-07-01 11:51:02 +00:00
|
|
|
struct devlink *devlink = priv_to_devlink(dev);
|
2014-07-28 20:30:22 +00:00
|
|
|
|
2020-05-14 10:12:56 +00:00
|
|
|
devlink_reload_disable(devlink);
|
2018-07-17 08:18:26 +00:00
|
|
|
mlx5_crdump_disable(dev);
|
2020-05-01 14:42:45 +00:00
|
|
|
mlx5_drain_health_wq(dev);
|
2020-11-02 14:54:43 +00:00
|
|
|
mlx5_uninit_one(dev);
|
2019-03-29 22:37:53 +00:00
|
|
|
mlx5_pci_close(dev);
|
2019-03-29 22:37:54 +00:00
|
|
|
mlx5_mdev_uninit(dev);
|
2020-10-08 13:06:37 +00:00
|
|
|
mlx5_adev_idx_free(dev->priv.adev_idx);
|
2018-12-11 14:09:51 +00:00
|
|
|
mlx5_devlink_free(devlink);
|
2014-07-28 20:30:22 +00:00
|
|
|
}
|
|
|
|
|
2015-10-14 14:43:46 +00:00
|
|
|
static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
|
|
|
|
pci_channel_state_t state)
|
|
|
|
{
|
|
|
|
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
|
|
|
|
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_info(dev, "%s was called\n", __func__);
|
2016-10-25 15:36:34 +00:00
|
|
|
|
2017-02-09 12:20:12 +00:00
|
|
|
mlx5_enter_error_state(dev, false);
|
2018-11-12 14:40:17 +00:00
|
|
|
mlx5_error_sw_reset(dev);
|
2020-11-02 14:54:43 +00:00
|
|
|
mlx5_unload_one(dev);
|
2019-01-27 16:38:39 +00:00
|
|
|
mlx5_drain_health_wq(dev);
|
|
|
|
mlx5_pci_disable_device(dev);
|
2016-10-25 15:36:33 +00:00
|
|
|
|
2015-10-14 14:43:46 +00:00
|
|
|
return state == pci_channel_io_perm_failure ?
|
|
|
|
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
|
|
|
|
}
|
|
|
|
|
2016-06-30 14:34:41 +00:00
|
|
|
/* wait for the device to show vital signs by waiting
|
|
|
|
* for the health counter to start counting.
|
2015-10-14 14:43:46 +00:00
|
|
|
*/
|
2016-06-30 14:34:41 +00:00
|
|
|
static int wait_vital(struct pci_dev *pdev)
|
2015-10-14 14:43:46 +00:00
|
|
|
{
|
|
|
|
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
|
|
|
|
struct mlx5_core_health *health = &dev->priv.health;
|
|
|
|
const int niter = 100;
|
2016-06-30 14:34:41 +00:00
|
|
|
u32 last_count = 0;
|
2015-10-14 14:43:46 +00:00
|
|
|
u32 count;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < niter; i++) {
|
|
|
|
count = ioread32be(health->health_counter);
|
|
|
|
if (count && count != 0xffffffff) {
|
2016-06-30 14:34:41 +00:00
|
|
|
if (last_count && last_count != count) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_info(dev,
|
|
|
|
"wait vital counter value 0x%x after %d iterations\n",
|
|
|
|
count, i);
|
2016-06-30 14:34:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
last_count = count;
|
2015-10-14 14:43:46 +00:00
|
|
|
}
|
|
|
|
msleep(50);
|
|
|
|
}
|
|
|
|
|
2016-06-30 14:34:41 +00:00
|
|
|
return -ETIMEDOUT;
|
2015-10-14 14:43:46 +00:00
|
|
|
}
|
|
|
|
|
2016-08-18 18:09:04 +00:00
|
|
|
static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
|
2015-10-14 14:43:46 +00:00
|
|
|
{
|
|
|
|
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
|
|
|
|
int err;
|
|
|
|
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_info(dev, "%s was called\n", __func__);
|
2015-10-14 14:43:46 +00:00
|
|
|
|
2016-08-18 18:09:04 +00:00
|
|
|
err = mlx5_pci_enable_device(dev);
|
2016-06-30 14:34:41 +00:00
|
|
|
if (err) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "%s: mlx5_pci_enable_device failed with error code: %d\n",
|
|
|
|
__func__, err);
|
2016-08-18 18:09:04 +00:00
|
|
|
return PCI_ERS_RESULT_DISCONNECT;
|
|
|
|
}
|
|
|
|
|
|
|
|
pci_set_master(pdev);
|
|
|
|
pci_restore_state(pdev);
|
2017-03-10 12:33:02 +00:00
|
|
|
pci_save_state(pdev);
|
2016-08-18 18:09:04 +00:00
|
|
|
|
|
|
|
if (wait_vital(pdev)) {
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "%s: wait_vital timed out\n", __func__);
|
2016-08-18 18:09:04 +00:00
|
|
|
return PCI_ERS_RESULT_DISCONNECT;
|
2016-06-30 14:34:41 +00:00
|
|
|
}
|
2015-10-14 14:43:46 +00:00
|
|
|
|
2016-08-18 18:09:04 +00:00
|
|
|
return PCI_ERS_RESULT_RECOVERED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mlx5_pci_resume(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
|
|
|
|
int err;
|
|
|
|
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_info(dev, "%s was called\n", __func__);
|
2016-08-18 18:09:04 +00:00
|
|
|
|
2020-11-02 14:54:43 +00:00
|
|
|
err = mlx5_load_one(dev);
|
2015-10-14 14:43:46 +00:00
|
|
|
if (err)
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_err(dev, "%s: mlx5_load_one failed with error code: %d\n",
|
|
|
|
__func__, err);
|
2015-10-14 14:43:46 +00:00
|
|
|
else
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_info(dev, "%s: device recovered\n", __func__);
|
2015-10-14 14:43:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct pci_error_handlers mlx5_err_handler = {
|
|
|
|
.error_detected = mlx5_pci_err_detected,
|
|
|
|
.slot_reset = mlx5_pci_slot_reset,
|
|
|
|
.resume = mlx5_pci_resume
|
|
|
|
};
|
|
|
|
|
2017-02-09 12:20:12 +00:00
|
|
|
static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
|
|
|
|
{
|
2018-08-09 06:55:21 +00:00
|
|
|
bool fast_teardown = false, force_teardown = false;
|
|
|
|
int ret = 1;
|
|
|
|
|
|
|
|
fast_teardown = MLX5_CAP_GEN(dev, fast_teardown);
|
|
|
|
force_teardown = MLX5_CAP_GEN(dev, force_teardown);
|
|
|
|
|
|
|
|
mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown);
|
|
|
|
mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown);
|
2017-02-09 12:20:12 +00:00
|
|
|
|
2018-08-09 06:55:21 +00:00
|
|
|
if (!fast_teardown && !force_teardown)
|
2017-02-09 12:20:12 +00:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
|
|
|
|
mlx5_core_dbg(dev, "Device in internal error state, giving up\n");
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
2017-09-26 20:11:56 +00:00
|
|
|
/* Panic tear down fw command will stop the PCI bus communication
|
|
|
|
* with the HCA, so the health polll is no longer needed.
|
|
|
|
*/
|
|
|
|
mlx5_drain_health_wq(dev);
|
2018-08-05 06:19:33 +00:00
|
|
|
mlx5_stop_health_poll(dev, false);
|
2017-09-26 20:11:56 +00:00
|
|
|
|
2018-08-09 06:55:21 +00:00
|
|
|
ret = mlx5_cmd_fast_teardown_hca(dev);
|
|
|
|
if (!ret)
|
|
|
|
goto succeed;
|
|
|
|
|
2017-02-09 12:20:12 +00:00
|
|
|
ret = mlx5_cmd_force_teardown_hca(dev);
|
2018-08-09 06:55:21 +00:00
|
|
|
if (!ret)
|
|
|
|
goto succeed;
|
|
|
|
|
|
|
|
mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
|
|
|
|
mlx5_start_health_poll(dev);
|
|
|
|
return ret;
|
2017-02-09 12:20:12 +00:00
|
|
|
|
2018-08-09 06:55:21 +00:00
|
|
|
succeed:
|
2017-02-09 12:20:12 +00:00
|
|
|
mlx5_enter_error_state(dev, true);
|
|
|
|
|
2018-03-26 18:35:29 +00:00
|
|
|
/* Some platforms requiring freeing the IRQ's in the shutdown
|
|
|
|
* flow. If they aren't freed they can't be allocated after
|
|
|
|
* kexec. There is no need to cleanup the mlx5_core software
|
|
|
|
* contexts.
|
|
|
|
*/
|
|
|
|
mlx5_core_eq_free_irqs(dev);
|
|
|
|
|
2017-02-09 12:20:12 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-04-21 21:33:07 +00:00
|
|
|
static void shutdown(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
|
2017-02-09 12:20:12 +00:00
|
|
|
int err;
|
2016-04-21 21:33:07 +00:00
|
|
|
|
2019-03-29 22:38:00 +00:00
|
|
|
mlx5_core_info(dev, "Shutdown was called\n");
|
2017-02-09 12:20:12 +00:00
|
|
|
err = mlx5_try_fast_unload(dev);
|
|
|
|
if (err)
|
2020-11-02 14:54:43 +00:00
|
|
|
mlx5_unload_one(dev);
|
2016-04-21 21:33:07 +00:00
|
|
|
mlx5_pci_disable_device(dev);
|
|
|
|
}
|
|
|
|
|
2020-05-20 17:32:08 +00:00
|
|
|
static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state)
|
|
|
|
{
|
|
|
|
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
|
|
|
|
|
2020-11-02 14:54:43 +00:00
|
|
|
mlx5_unload_one(dev);
|
2020-05-20 17:32:08 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mlx5_resume(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
|
|
|
|
|
2020-11-02 14:54:43 +00:00
|
|
|
return mlx5_load_one(dev);
|
2020-05-20 17:32:08 +00:00
|
|
|
}
|
|
|
|
|
2014-07-28 20:30:22 +00:00
|
|
|
static const struct pci_device_id mlx5_core_pci_table[] = {
|
2017-06-20 17:21:26 +00:00
|
|
|
{ PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) },
|
2015-12-01 16:03:09 +00:00
|
|
|
{ PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */
|
2017-06-20 17:21:26 +00:00
|
|
|
{ PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX4) },
|
2015-12-01 16:03:09 +00:00
|
|
|
{ PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
|
2017-06-20 17:21:26 +00:00
|
|
|
{ PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX) },
|
2015-12-01 16:03:09 +00:00
|
|
|
{ PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
|
2016-06-27 09:08:33 +00:00
|
|
|
{ PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
|
2016-04-21 21:33:02 +00:00
|
|
|
{ PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
|
2017-02-23 08:52:19 +00:00
|
|
|
{ PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5 Ex */
|
|
|
|
{ PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 Ex VF */
|
|
|
|
{ PCI_VDEVICE(MELLANOX, 0x101b) }, /* ConnectX-6 */
|
|
|
|
{ PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */
|
2019-01-27 13:01:25 +00:00
|
|
|
{ PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */
|
|
|
|
{ PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
|
2019-11-12 13:10:00 +00:00
|
|
|
{ PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
|
2019-12-12 14:09:33 +00:00
|
|
|
{ PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
|
2017-04-19 10:12:23 +00:00
|
|
|
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
|
|
|
|
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
|
2019-08-26 21:34:12 +00:00
|
|
|
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
|
2020-11-20 23:03:30 +00:00
|
|
|
{ PCI_VDEVICE(MELLANOX, 0xa2dc) }, /* BlueField-3 integrated ConnectX-7 network controller */
|
2014-07-28 20:30:22 +00:00
|
|
|
{ 0, }
|
|
|
|
};
|
|
|
|
|
|
|
|
MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
|
|
|
|
|
2016-10-25 15:36:34 +00:00
|
|
|
void mlx5_disable_device(struct mlx5_core_dev *dev)
|
|
|
|
{
|
2019-01-27 16:38:39 +00:00
|
|
|
mlx5_error_sw_reset(dev);
|
2020-11-02 14:54:43 +00:00
|
|
|
mlx5_unload_one(dev);
|
2016-10-25 15:36:34 +00:00
|
|
|
}
|
|
|
|
|
2020-11-03 16:46:31 +00:00
|
|
|
int mlx5_recover_device(struct mlx5_core_dev *dev)
|
2016-10-25 15:36:34 +00:00
|
|
|
{
|
2020-11-03 16:46:31 +00:00
|
|
|
int ret = -EIO;
|
|
|
|
|
2016-10-25 15:36:34 +00:00
|
|
|
mlx5_pci_disable_device(dev);
|
|
|
|
if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
|
2020-11-03 16:46:31 +00:00
|
|
|
ret = mlx5_load_one(dev);
|
|
|
|
return ret;
|
2016-10-25 15:36:34 +00:00
|
|
|
}
|
|
|
|
|
2014-07-28 20:30:22 +00:00
|
|
|
static struct pci_driver mlx5_core_driver = {
|
2020-10-04 11:30:58 +00:00
|
|
|
.name = KBUILD_MODNAME,
|
2014-07-28 20:30:22 +00:00
|
|
|
.id_table = mlx5_core_pci_table,
|
2020-11-02 14:54:43 +00:00
|
|
|
.probe = probe_one,
|
2015-10-14 14:43:46 +00:00
|
|
|
.remove = remove_one,
|
2020-05-20 17:32:08 +00:00
|
|
|
.suspend = mlx5_suspend,
|
|
|
|
.resume = mlx5_resume,
|
2016-04-21 21:33:07 +00:00
|
|
|
.shutdown = shutdown,
|
2015-12-01 16:03:09 +00:00
|
|
|
.err_handler = &mlx5_err_handler,
|
|
|
|
.sriov_configure = mlx5_core_sriov_configure,
|
2021-03-14 12:42:56 +00:00
|
|
|
.sriov_get_vf_total_msix = mlx5_sriov_get_vf_total_msix,
|
|
|
|
.sriov_set_msix_vec_count = mlx5_core_sriov_set_msix_vec_count,
|
2014-07-28 20:30:22 +00:00
|
|
|
};
|
2013-07-07 14:25:49 +00:00
|
|
|
|
2016-12-06 15:32:43 +00:00
|
|
|
static void mlx5_core_verify_params(void)
|
|
|
|
{
|
|
|
|
if (prof_sel >= ARRAY_SIZE(profile)) {
|
|
|
|
pr_warn("mlx5_core: WARNING: Invalid module parameter prof_sel %d, valid range 0-%zu, changing back to default(%d)\n",
|
|
|
|
prof_sel,
|
|
|
|
ARRAY_SIZE(profile) - 1,
|
|
|
|
MLX5_DEFAULT_PROF);
|
|
|
|
prof_sel = MLX5_DEFAULT_PROF;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-07 14:25:49 +00:00
|
|
|
static int __init init(void)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2020-10-04 11:30:58 +00:00
|
|
|
WARN_ONCE(strcmp(MLX5_ADEV_NAME, KBUILD_MODNAME),
|
|
|
|
"mlx5_core name not in sync with kernel module name");
|
|
|
|
|
2018-01-04 15:25:32 +00:00
|
|
|
get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
|
|
|
|
|
2016-12-06 15:32:43 +00:00
|
|
|
mlx5_core_verify_params();
|
2019-11-18 12:30:20 +00:00
|
|
|
mlx5_fpga_ipsec_build_fs_cmds();
|
2013-07-07 14:25:49 +00:00
|
|
|
mlx5_register_debugfs();
|
|
|
|
|
2014-07-28 20:30:22 +00:00
|
|
|
err = pci_register_driver(&mlx5_core_driver);
|
|
|
|
if (err)
|
2015-10-08 14:14:00 +00:00
|
|
|
goto err_debug;
|
2014-07-28 20:30:22 +00:00
|
|
|
|
2020-12-12 06:12:18 +00:00
|
|
|
err = mlx5_sf_driver_register();
|
|
|
|
if (err)
|
|
|
|
goto err_sf;
|
|
|
|
|
2015-05-28 19:28:48 +00:00
|
|
|
#ifdef CONFIG_MLX5_CORE_EN
|
2020-10-05 05:08:25 +00:00
|
|
|
err = mlx5e_init();
|
|
|
|
if (err) {
|
|
|
|
pci_unregister_driver(&mlx5_core_driver);
|
|
|
|
goto err_debug;
|
|
|
|
}
|
2015-05-28 19:28:48 +00:00
|
|
|
#endif
|
|
|
|
|
2013-07-07 14:25:49 +00:00
|
|
|
return 0;
|
|
|
|
|
2020-12-12 06:12:18 +00:00
|
|
|
err_sf:
|
|
|
|
pci_unregister_driver(&mlx5_core_driver);
|
2013-07-07 14:25:49 +00:00
|
|
|
err_debug:
|
|
|
|
mlx5_unregister_debugfs();
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit cleanup(void)
|
|
|
|
{
|
2015-05-28 19:28:48 +00:00
|
|
|
#ifdef CONFIG_MLX5_CORE_EN
|
|
|
|
mlx5e_cleanup();
|
|
|
|
#endif
|
2020-12-12 06:12:18 +00:00
|
|
|
mlx5_sf_driver_unregister();
|
2014-07-28 20:30:22 +00:00
|
|
|
pci_unregister_driver(&mlx5_core_driver);
|
2013-07-07 14:25:49 +00:00
|
|
|
mlx5_unregister_debugfs();
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(init);
|
|
|
|
module_exit(cleanup);
|