crypto: cavium/nitrox - use pci_alloc_irq_vectors() while enabling MSI-X.

replace pci_enable_msix_exact() with pci_alloc_irq_vectors(). get the
required vector count from pci_msix_vec_count().
use struct nitrox_q_vector as the argument to tasklets.

Signed-off-by: Srikanth Jampala <Jampala.Srikanth@cavium.com>
Reviewed-by: Gadam Sreerama <sgadam@cavium.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Srikanth Jampala 2018-09-29 13:49:10 +05:30 committed by Herbert Xu
parent e7892dd6d8
commit 5155e118dd
8 changed files with 181 additions and 282 deletions

View file

@ -12,13 +12,10 @@ void crypto_free_context(void *ctx);
struct nitrox_device *nitrox_get_first_device(void); struct nitrox_device *nitrox_get_first_device(void);
void nitrox_put_device(struct nitrox_device *ndev); void nitrox_put_device(struct nitrox_device *ndev);
void nitrox_pf_cleanup_isr(struct nitrox_device *ndev);
int nitrox_pf_init_isr(struct nitrox_device *ndev);
int nitrox_common_sw_init(struct nitrox_device *ndev); int nitrox_common_sw_init(struct nitrox_device *ndev);
void nitrox_common_sw_cleanup(struct nitrox_device *ndev); void nitrox_common_sw_cleanup(struct nitrox_device *ndev);
void pkt_slc_resp_handler(unsigned long data); void pkt_slc_resp_tasklet(unsigned long data);
int nitrox_process_se_request(struct nitrox_device *ndev, int nitrox_process_se_request(struct nitrox_device *ndev,
struct se_crypto_request *req, struct se_crypto_request *req,
completion_t cb, completion_t cb,

View file

@ -18,6 +18,7 @@
* @response_head: submitted request list * @response_head: submitted request list
* @backlog_head: backlog queue * @backlog_head: backlog queue
* @dbell_csr_addr: doorbell register address for this queue * @dbell_csr_addr: doorbell register address for this queue
* @compl_cnt_csr_addr: completion count register address of the slc port
* @base: command queue base address * @base: command queue base address
* @dma: dma address of the base * @dma: dma address of the base
* @pending_count: request pending at device * @pending_count: request pending at device
@ -39,6 +40,7 @@ struct nitrox_cmdq {
struct list_head backlog_head; struct list_head backlog_head;
u8 __iomem *dbell_csr_addr; u8 __iomem *dbell_csr_addr;
u8 __iomem *compl_cnt_csr_addr;
u8 *base; u8 *base;
dma_addr_t dma; dma_addr_t dma;
@ -88,30 +90,17 @@ struct nitrox_stats {
atomic64_t dropped; atomic64_t dropped;
}; };
#define MAX_MSIX_VECTOR_NAME 20 #define IRQ_NAMESZ 32
/**
* vectors for queues (64 AE, 64 SE and 64 ZIP) and
* error condition/mailbox.
*/
#define MAX_MSIX_VECTORS 192
struct nitrox_msix { struct nitrox_q_vector {
struct msix_entry *entries; char name[IRQ_NAMESZ];
char **names; bool valid;
DECLARE_BITMAP(irqs, MAX_MSIX_VECTORS); int ring;
u32 nr_entries; struct tasklet_struct resp_tasklet;
}; union {
struct nitrox_cmdq *cmdq;
struct bh_data { struct nitrox_device *ndev;
/* slc port completion count address */ };
u8 __iomem *completion_cnt_csr_addr;
struct nitrox_cmdq *cmdq;
struct tasklet_struct resp_handler;
};
struct nitrox_bh {
struct bh_data *slc;
}; };
/* /*
@ -160,8 +149,7 @@ enum vf_mode {
* @mode: Device mode PF/VF * @mode: Device mode PF/VF
* @ctx_pool: DMA pool for crypto context * @ctx_pool: DMA pool for crypto context
* @pkt_inq: Packet input rings * @pkt_inq: Packet input rings
* @msix: MSI-X information * @qvec: MSI-X queue vectors information
* @bh: post processing work
* @hw: hardware information * @hw: hardware information
* @debugfs_dir: debugfs directory * @debugfs_dir: debugfs directory
*/ */
@ -186,8 +174,8 @@ struct nitrox_device {
struct dma_pool *ctx_pool; struct dma_pool *ctx_pool;
struct nitrox_cmdq *pkt_inq; struct nitrox_cmdq *pkt_inq;
struct nitrox_msix msix; struct nitrox_q_vector *qvec;
struct nitrox_bh bh; int num_vecs;
struct nitrox_stats stats; struct nitrox_stats stats;
struct nitrox_hw hw; struct nitrox_hw hw;

View file

@ -8,8 +8,14 @@
#include "nitrox_common.h" #include "nitrox_common.h"
#include "nitrox_hal.h" #include "nitrox_hal.h"
/**
* One vector for each type of ring
* - NPS packet ring, AQMQ ring and ZQMQ ring
*/
#define NR_RING_VECTORS 3 #define NR_RING_VECTORS 3
#define NPS_CORE_INT_ACTIVE_ENTRY 192 /* base entry for packet ring/port */
#define PKT_RING_MSIX_BASE 0
#define NON_RING_MSIX_BASE 192
/** /**
* nps_pkt_slc_isr - IRQ handler for NPS solicit port * nps_pkt_slc_isr - IRQ handler for NPS solicit port
@ -18,13 +24,14 @@
*/ */
static irqreturn_t nps_pkt_slc_isr(int irq, void *data) static irqreturn_t nps_pkt_slc_isr(int irq, void *data)
{ {
struct bh_data *slc = data; struct nitrox_q_vector *qvec = data;
union nps_pkt_slc_cnts pkt_slc_cnts; union nps_pkt_slc_cnts slc_cnts;
struct nitrox_cmdq *cmdq = qvec->cmdq;
pkt_slc_cnts.value = readq(slc->completion_cnt_csr_addr); slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
/* New packet on SLC output port */ /* New packet on SLC output port */
if (pkt_slc_cnts.s.slc_int) if (slc_cnts.s.slc_int)
tasklet_hi_schedule(&slc->resp_handler); tasklet_hi_schedule(&qvec->resp_tasklet);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@ -191,56 +198,92 @@ static void clear_bmi_err_intr(struct nitrox_device *ndev)
dev_err_ratelimited(DEV(ndev), "BMI_INT 0x%016llx\n", value); dev_err_ratelimited(DEV(ndev), "BMI_INT 0x%016llx\n", value);
} }
/** static void nps_core_int_tasklet(unsigned long data)
* clear_nps_core_int_active - clear NPS_CORE_INT_ACTIVE interrupts
* @ndev: NITROX device
*/
static void clear_nps_core_int_active(struct nitrox_device *ndev)
{ {
union nps_core_int_active core_int_active; struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
struct nitrox_device *ndev = qvec->ndev;
core_int_active.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE); /* if pf mode do queue recovery */
if (ndev->mode == __NDEV_MODE_PF) {
if (core_int_active.s.nps_core) } else {
clear_nps_core_err_intr(ndev); /**
* if VF(s) enabled communicate the error information
if (core_int_active.s.nps_pkt) * to VF(s)
clear_nps_pkt_err_intr(ndev); */
}
if (core_int_active.s.pom)
clear_pom_err_intr(ndev);
if (core_int_active.s.pem)
clear_pem_err_intr(ndev);
if (core_int_active.s.lbc)
clear_lbc_err_intr(ndev);
if (core_int_active.s.efl)
clear_efl_err_intr(ndev);
if (core_int_active.s.bmi)
clear_bmi_err_intr(ndev);
/* If more work callback the ISR, set resend */
core_int_active.s.resend = 1;
nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int_active.value);
} }
/**
* nps_core_int_isr - interrupt handler for NITROX errors and
* mailbox communication
*/
static irqreturn_t nps_core_int_isr(int irq, void *data) static irqreturn_t nps_core_int_isr(int irq, void *data)
{ {
struct nitrox_device *ndev = data; struct nitrox_device *ndev = data;
union nps_core_int_active core_int;
clear_nps_core_int_active(ndev); core_int.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
if (core_int.s.nps_core)
clear_nps_core_err_intr(ndev);
if (core_int.s.nps_pkt)
clear_nps_pkt_err_intr(ndev);
if (core_int.s.pom)
clear_pom_err_intr(ndev);
if (core_int.s.pem)
clear_pem_err_intr(ndev);
if (core_int.s.lbc)
clear_lbc_err_intr(ndev);
if (core_int.s.efl)
clear_efl_err_intr(ndev);
if (core_int.s.bmi)
clear_bmi_err_intr(ndev);
/* If more work callback the ISR, set resend */
core_int.s.resend = 1;
nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int.value);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static int nitrox_enable_msix(struct nitrox_device *ndev) void nitrox_unregister_interrupts(struct nitrox_device *ndev)
{ {
struct msix_entry *entries; struct pci_dev *pdev = ndev->pdev;
char **names; int i;
int i, nr_entries, ret;
for (i = 0; i < ndev->num_vecs; i++) {
struct nitrox_q_vector *qvec;
int vec;
qvec = ndev->qvec + i;
if (!qvec->valid)
continue;
/* get the vector number */
vec = pci_irq_vector(pdev, i);
irq_set_affinity_hint(vec, NULL);
free_irq(vec, qvec);
tasklet_disable(&qvec->resp_tasklet);
tasklet_kill(&qvec->resp_tasklet);
qvec->valid = false;
}
kfree(ndev->qvec);
pci_free_irq_vectors(pdev);
}
int nitrox_register_interrupts(struct nitrox_device *ndev)
{
struct pci_dev *pdev = ndev->pdev;
struct nitrox_q_vector *qvec;
int nr_vecs, vec, cpu;
int ret, i;
/* /*
* PF MSI-X vectors * PF MSI-X vectors
@ -254,216 +297,71 @@ static int nitrox_enable_msix(struct nitrox_device *ndev)
* .... * ....
* Entry 192: NPS_CORE_INT_ACTIVE * Entry 192: NPS_CORE_INT_ACTIVE
*/ */
nr_entries = (ndev->nr_queues * NR_RING_VECTORS) + 1; nr_vecs = pci_msix_vec_count(pdev);
entries = kcalloc_node(nr_entries, sizeof(struct msix_entry),
GFP_KERNEL, ndev->node);
if (!entries)
return -ENOMEM;
names = kcalloc(nr_entries, sizeof(char *), GFP_KERNEL); /* Enable MSI-X */
if (!names) { ret = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
kfree(entries); if (ret < 0) {
return -ENOMEM; dev_err(DEV(ndev), "msix vectors %d alloc failed\n", nr_vecs);
}
/* fill entires */
for (i = 0; i < (nr_entries - 1); i++)
entries[i].entry = i;
entries[i].entry = NPS_CORE_INT_ACTIVE_ENTRY;
for (i = 0; i < nr_entries; i++) {
*(names + i) = kzalloc(MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
if (!(*(names + i))) {
ret = -ENOMEM;
goto msix_fail;
}
}
ndev->msix.entries = entries;
ndev->msix.names = names;
ndev->msix.nr_entries = nr_entries;
ret = pci_enable_msix_exact(ndev->pdev, ndev->msix.entries,
ndev->msix.nr_entries);
if (ret) {
dev_err(&ndev->pdev->dev, "Failed to enable MSI-X IRQ(s) %d\n",
ret);
goto msix_fail;
}
return 0;
msix_fail:
for (i = 0; i < nr_entries; i++)
kfree(*(names + i));
kfree(entries);
kfree(names);
return ret;
}
static void nitrox_cleanup_pkt_slc_bh(struct nitrox_device *ndev)
{
int i;
if (!ndev->bh.slc)
return;
for (i = 0; i < ndev->nr_queues; i++) {
struct bh_data *bh = &ndev->bh.slc[i];
tasklet_disable(&bh->resp_handler);
tasklet_kill(&bh->resp_handler);
}
kfree(ndev->bh.slc);
ndev->bh.slc = NULL;
}
static int nitrox_setup_pkt_slc_bh(struct nitrox_device *ndev)
{
u32 size;
int i;
size = ndev->nr_queues * sizeof(struct bh_data);
ndev->bh.slc = kzalloc(size, GFP_KERNEL);
if (!ndev->bh.slc)
return -ENOMEM;
for (i = 0; i < ndev->nr_queues; i++) {
struct bh_data *bh = &ndev->bh.slc[i];
u64 offset;
offset = NPS_PKT_SLC_CNTSX(i);
/* pre calculate completion count address */
bh->completion_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
bh->cmdq = &ndev->pkt_inq[i];
tasklet_init(&bh->resp_handler, pkt_slc_resp_handler,
(unsigned long)bh);
}
return 0;
}
static int nitrox_request_irqs(struct nitrox_device *ndev)
{
struct pci_dev *pdev = ndev->pdev;
struct msix_entry *msix_ent = ndev->msix.entries;
int nr_ring_vectors, i = 0, ring, cpu, ret;
char *name;
/*
* PF MSI-X vectors
*
* Entry 0: NPS PKT ring 0
* Entry 1: AQMQ ring 0
* Entry 2: ZQM ring 0
* Entry 3: NPS PKT ring 1
* ....
* Entry 192: NPS_CORE_INT_ACTIVE
*/
nr_ring_vectors = ndev->nr_queues * NR_RING_VECTORS;
/* request irq for pkt ring/ports only */
while (i < nr_ring_vectors) {
name = *(ndev->msix.names + i);
ring = (i / NR_RING_VECTORS);
snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-slc-ring%d",
ndev->idx, ring);
ret = request_irq(msix_ent[i].vector, nps_pkt_slc_isr, 0,
name, &ndev->bh.slc[ring]);
if (ret) {
dev_err(&pdev->dev, "failed to get irq %d for %s\n",
msix_ent[i].vector, name);
return ret;
}
cpu = ring % num_online_cpus();
irq_set_affinity_hint(msix_ent[i].vector, get_cpu_mask(cpu));
set_bit(i, ndev->msix.irqs);
i += NR_RING_VECTORS;
}
/* Request IRQ for NPS_CORE_INT_ACTIVE */
name = *(ndev->msix.names + i);
snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-nps-core-int", ndev->idx);
ret = request_irq(msix_ent[i].vector, nps_core_int_isr, 0, name, ndev);
if (ret) {
dev_err(&pdev->dev, "failed to get irq %d for %s\n",
msix_ent[i].vector, name);
return ret; return ret;
} }
set_bit(i, ndev->msix.irqs); ndev->num_vecs = nr_vecs;
return 0; ndev->qvec = kcalloc(nr_vecs, sizeof(*qvec), GFP_KERNEL);
} if (!ndev->qvec) {
pci_free_irq_vectors(pdev);
static void nitrox_disable_msix(struct nitrox_device *ndev) return -ENOMEM;
{
struct msix_entry *msix_ent = ndev->msix.entries;
char **names = ndev->msix.names;
int i = 0, ring, nr_ring_vectors;
nr_ring_vectors = ndev->msix.nr_entries - 1;
/* clear pkt ring irqs */
while (i < nr_ring_vectors) {
if (test_and_clear_bit(i, ndev->msix.irqs)) {
ring = (i / NR_RING_VECTORS);
irq_set_affinity_hint(msix_ent[i].vector, NULL);
free_irq(msix_ent[i].vector, &ndev->bh.slc[ring]);
}
i += NR_RING_VECTORS;
} }
irq_set_affinity_hint(msix_ent[i].vector, NULL);
free_irq(msix_ent[i].vector, ndev);
clear_bit(i, ndev->msix.irqs);
kfree(ndev->msix.entries); /* request irqs for packet rings/ports */
for (i = 0; i < ndev->msix.nr_entries; i++) for (i = PKT_RING_MSIX_BASE; i < (nr_vecs - 1); i += NR_RING_VECTORS) {
kfree(*(names + i)); qvec = &ndev->qvec[i];
kfree(names); qvec->ring = i / NR_RING_VECTORS;
pci_disable_msix(ndev->pdev); if (qvec->ring >= ndev->nr_queues)
} break;
/** snprintf(qvec->name, IRQ_NAMESZ, "nitrox-pkt%d", qvec->ring);
* nitrox_pf_cleanup_isr: Cleanup PF MSI-X and IRQ /* get the vector number */
* @ndev: NITROX device vec = pci_irq_vector(pdev, i);
*/ ret = request_irq(vec, nps_pkt_slc_isr, 0, qvec->name, qvec);
void nitrox_pf_cleanup_isr(struct nitrox_device *ndev) if (ret) {
{ dev_err(DEV(ndev), "irq failed for pkt ring/port%d\n",
nitrox_disable_msix(ndev); qvec->ring);
nitrox_cleanup_pkt_slc_bh(ndev); goto irq_fail;
} }
cpu = qvec->ring % num_online_cpus();
irq_set_affinity_hint(vec, get_cpu_mask(cpu));
/** tasklet_init(&qvec->resp_tasklet, pkt_slc_resp_tasklet,
* nitrox_init_isr - Initialize PF MSI-X vectors and IRQ (unsigned long)qvec);
* @ndev: NITROX device qvec->cmdq = &ndev->pkt_inq[qvec->ring];
* qvec->valid = true;
* Return: 0 on success, a negative value on failure. }
*/
int nitrox_pf_init_isr(struct nitrox_device *ndev)
{
int err;
err = nitrox_setup_pkt_slc_bh(ndev); /* request irqs for non ring vectors */
if (err) i = NON_RING_MSIX_BASE;
return err; qvec = &ndev->qvec[i];
err = nitrox_enable_msix(ndev); snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", i);
if (err) /* get the vector number */
goto msix_fail; vec = pci_irq_vector(pdev, i);
ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec);
err = nitrox_request_irqs(ndev); if (ret) {
if (err) dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n", i);
goto irq_fail; goto irq_fail;
}
cpu = num_online_cpus();
irq_set_affinity_hint(vec, get_cpu_mask(cpu));
tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
(unsigned long)qvec);
qvec->ndev = ndev;
qvec->valid = true;
return 0; return 0;
irq_fail: irq_fail:
nitrox_disable_msix(ndev); nitrox_unregister_interrupts(ndev);
msix_fail: return ret;
nitrox_cleanup_pkt_slc_bh(ndev);
return err;
} }

View file

@ -0,0 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NITROX_ISR_H
#define __NITROX_ISR_H
#include "nitrox_dev.h"
int nitrox_register_interrupts(struct nitrox_device *ndev);
void nitrox_unregister_interrupts(struct nitrox_device *ndev);
#endif /* __NITROX_ISR_H */

View file

@ -69,6 +69,7 @@ static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq)
nitrox_cmdq_reset(cmdq); nitrox_cmdq_reset(cmdq);
cmdq->dbell_csr_addr = NULL; cmdq->dbell_csr_addr = NULL;
cmdq->compl_cnt_csr_addr = NULL;
cmdq->unalign_base = NULL; cmdq->unalign_base = NULL;
cmdq->base = NULL; cmdq->base = NULL;
cmdq->unalign_dma = 0; cmdq->unalign_dma = 0;
@ -112,6 +113,9 @@ static int nitrox_alloc_pktin_queues(struct nitrox_device *ndev)
/* packet input ring doorbell address */ /* packet input ring doorbell address */
offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i); offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset); cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
/* packet solicit port completion count address */
offset = NPS_PKT_SLC_CNTSX(i);
cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
err = nitrox_cmdq_init(cmdq, PKTIN_Q_ALIGN_BYTES); err = nitrox_cmdq_init(cmdq, PKTIN_Q_ALIGN_BYTES);
if (err) if (err)

View file

@ -12,6 +12,7 @@
#include "nitrox_common.h" #include "nitrox_common.h"
#include "nitrox_csr.h" #include "nitrox_csr.h"
#include "nitrox_hal.h" #include "nitrox_hal.h"
#include "nitrox_isr.h"
#define CNN55XX_DEV_ID 0x12 #define CNN55XX_DEV_ID 0x12
#define MAX_PF_QUEUES 64 #define MAX_PF_QUEUES 64
@ -244,7 +245,7 @@ static int nitrox_pf_sw_init(struct nitrox_device *ndev)
if (err) if (err)
return err; return err;
err = nitrox_pf_init_isr(ndev); err = nitrox_register_interrupts(ndev);
if (err) if (err)
nitrox_common_sw_cleanup(ndev); nitrox_common_sw_cleanup(ndev);
@ -253,7 +254,7 @@ static int nitrox_pf_sw_init(struct nitrox_device *ndev)
static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev) static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev)
{ {
nitrox_pf_cleanup_isr(ndev); nitrox_unregister_interrupts(ndev);
nitrox_common_sw_cleanup(ndev); nitrox_common_sw_cleanup(ndev);
} }

View file

@ -721,18 +721,18 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
} }
/** /**
* pkt_slc_resp_handler - post processing of SE responses * pkt_slc_resp_tasklet - post processing of SE responses
*/ */
void pkt_slc_resp_handler(unsigned long data) void pkt_slc_resp_tasklet(unsigned long data)
{ {
struct bh_data *bh = (void *)(uintptr_t)(data); struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
struct nitrox_cmdq *cmdq = bh->cmdq; struct nitrox_cmdq *cmdq = qvec->cmdq;
union nps_pkt_slc_cnts pkt_slc_cnts; union nps_pkt_slc_cnts slc_cnts;
/* read completion count */ /* read completion count */
pkt_slc_cnts.value = readq(bh->completion_cnt_csr_addr); slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
/* resend the interrupt if more work to do */ /* resend the interrupt if more work to do */
pkt_slc_cnts.s.resend = 1; slc_cnts.s.resend = 1;
process_response_list(cmdq); process_response_list(cmdq);
@ -740,7 +740,7 @@ void pkt_slc_resp_handler(unsigned long data)
* clear the interrupt with resend bit enabled, * clear the interrupt with resend bit enabled,
* MSI-X interrupt generates if Completion count > Threshold * MSI-X interrupt generates if Completion count > Threshold
*/ */
writeq(pkt_slc_cnts.value, bh->completion_cnt_csr_addr); writeq(slc_cnts.value, cmdq->compl_cnt_csr_addr);
/* order the writes */ /* order the writes */
mmiowb(); mmiowb();

View file

@ -5,6 +5,7 @@
#include "nitrox_dev.h" #include "nitrox_dev.h"
#include "nitrox_hal.h" #include "nitrox_hal.h"
#include "nitrox_common.h" #include "nitrox_common.h"
#include "nitrox_isr.h"
static inline bool num_vfs_valid(int num_vfs) static inline bool num_vfs_valid(int num_vfs)
{ {
@ -55,7 +56,7 @@ static void pf_sriov_cleanup(struct nitrox_device *ndev)
nitrox_crypto_unregister(); nitrox_crypto_unregister();
/* cleanup PF resources */ /* cleanup PF resources */
nitrox_pf_cleanup_isr(ndev); nitrox_unregister_interrupts(ndev);
nitrox_common_sw_cleanup(ndev); nitrox_common_sw_cleanup(ndev);
} }
@ -68,7 +69,7 @@ static int pf_sriov_init(struct nitrox_device *ndev)
if (err) if (err)
return err; return err;
err = nitrox_pf_init_isr(ndev); err = nitrox_register_interrupts(ndev);
if (err) { if (err) {
nitrox_common_sw_cleanup(ndev); nitrox_common_sw_cleanup(ndev);
return err; return err;