qed: move chain methods to a separate file

Move chain allocation/freeing functions to a new file to not mix it with
hardware-related code.

Reported-by: kernel test robot <lkp@intel.com>
Signed-off-by: Alexander Lobakin <alobakin@marvell.com>
Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
Signed-off-by: Michal Kalderon <michal.kalderon@marvell.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Alexander Lobakin 2020-07-23 01:10:33 +03:00 committed by David S. Miller
parent bdaf98f6d5
commit a08c9b2c7c
3 changed files with 303 additions and 273 deletions

View file

@ -4,6 +4,7 @@
obj-$(CONFIG_QED) := qed.o
qed-y := \
qed_chain.o \
qed_cxt.o \
qed_dcbx.o \
qed_debug.o \

View file

@ -0,0 +1,302 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
/* Copyright (c) 2020 Marvell International Ltd. */
#include <linux/dma-mapping.h>
#include <linux/qed/qed_chain.h>
#include <linux/vmalloc.h>
#include "qed_dev_api.h"
static void qed_chain_free_next_ptr(struct qed_dev *cdev,
struct qed_chain *chain)
{
struct device *dev = &cdev->pdev->dev;
struct qed_chain_next *next;
dma_addr_t phys, phys_next;
void *virt, *virt_next;
u32 size, i;
size = chain->elem_size * chain->usable_per_page;
virt = chain->p_virt_addr;
phys = chain->p_phys_addr;
for (i = 0; i < chain->page_cnt; i++) {
if (!virt)
break;
next = virt + size;
virt_next = next->next_virt;
phys_next = HILO_DMA_REGPAIR(next->next_phys);
dma_free_coherent(dev, QED_CHAIN_PAGE_SIZE, virt, phys);
virt = virt_next;
phys = phys_next;
}
}
static void qed_chain_free_single(struct qed_dev *cdev,
struct qed_chain *chain)
{
if (!chain->p_virt_addr)
return;
dma_free_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE,
chain->p_virt_addr, chain->p_phys_addr);
}
static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain)
{
struct device *dev = &cdev->pdev->dev;
struct addr_tbl_entry *entry;
u32 pbl_size, i;
if (!chain->pbl.pp_addr_tbl)
return;
for (i = 0; i < chain->page_cnt; i++) {
entry = chain->pbl.pp_addr_tbl + i;
if (!entry->virt_addr)
break;
dma_free_coherent(dev, QED_CHAIN_PAGE_SIZE, entry->virt_addr,
entry->dma_map);
}
pbl_size = chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
if (!chain->b_external_pbl)
dma_free_coherent(dev, pbl_size, chain->pbl_sp.p_virt_table,
chain->pbl_sp.p_phys_table);
vfree(chain->pbl.pp_addr_tbl);
chain->pbl.pp_addr_tbl = NULL;
}
/**
* qed_chain_free() - Free chain DMA memory.
*
* @cdev: Main device structure.
* @chain: Chain to free.
*/
void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain)
{
switch (chain->mode) {
case QED_CHAIN_MODE_NEXT_PTR:
qed_chain_free_next_ptr(cdev, chain);
break;
case QED_CHAIN_MODE_SINGLE:
qed_chain_free_single(cdev, chain);
break;
case QED_CHAIN_MODE_PBL:
qed_chain_free_pbl(cdev, chain);
break;
default:
break;
}
}
static int
qed_chain_alloc_sanity_check(struct qed_dev *cdev,
enum qed_chain_cnt_type cnt_type,
size_t elem_size, u32 page_cnt)
{
u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
/* The actual chain size can be larger than the maximal possible value
* after rounding up the requested elements number to pages, and after
* taking into account the unusuable elements (next-ptr elements).
* The size of a "u16" chain can be (U16_MAX + 1) since the chain
* size/capacity fields are of u32 type.
*/
switch (cnt_type) {
case QED_CHAIN_CNT_TYPE_U16:
if (chain_size > U16_MAX + 1)
break;
return 0;
case QED_CHAIN_CNT_TYPE_U32:
if (chain_size > U32_MAX)
break;
return 0;
default:
return -EINVAL;
}
DP_NOTICE(cdev,
"The actual chain size (0x%llx) is larger than the maximal possible value\n",
chain_size);
return -EINVAL;
}
static int qed_chain_alloc_next_ptr(struct qed_dev *cdev,
struct qed_chain *chain)
{
struct device *dev = &cdev->pdev->dev;
void *virt, *virt_prev = NULL;
dma_addr_t phys;
u32 i;
for (i = 0; i < chain->page_cnt; i++) {
virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys,
GFP_KERNEL);
if (!virt)
return -ENOMEM;
if (i == 0) {
qed_chain_init_mem(chain, virt, phys);
qed_chain_reset(chain);
} else {
qed_chain_init_next_ptr_elem(chain, virt_prev, virt,
phys);
}
virt_prev = virt;
}
/* Last page's next element should point to the beginning of the
* chain.
*/
qed_chain_init_next_ptr_elem(chain, virt_prev, chain->p_virt_addr,
chain->p_phys_addr);
return 0;
}
static int qed_chain_alloc_single(struct qed_dev *cdev,
struct qed_chain *chain)
{
dma_addr_t phys;
void *virt;
virt = dma_alloc_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE,
&phys, GFP_KERNEL);
if (!virt)
return -ENOMEM;
qed_chain_init_mem(chain, virt, phys);
qed_chain_reset(chain);
return 0;
}
static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain,
struct qed_chain_ext_pbl *ext_pbl)
{
struct device *dev = &cdev->pdev->dev;
struct addr_tbl_entry *addr_tbl;
dma_addr_t phys, pbl_phys;
void *pbl_virt;
u32 page_cnt, i;
size_t size;
void *virt;
page_cnt = chain->page_cnt;
size = array_size(page_cnt, sizeof(*addr_tbl));
if (unlikely(size == SIZE_MAX))
return -EOVERFLOW;
addr_tbl = vzalloc(size);
if (!addr_tbl)
return -ENOMEM;
chain->pbl.pp_addr_tbl = addr_tbl;
if (ext_pbl) {
size = 0;
pbl_virt = ext_pbl->p_pbl_virt;
pbl_phys = ext_pbl->p_pbl_phys;
chain->b_external_pbl = true;
} else {
size = array_size(page_cnt, QED_CHAIN_PBL_ENTRY_SIZE);
if (unlikely(size == SIZE_MAX))
return -EOVERFLOW;
pbl_virt = dma_alloc_coherent(dev, size, &pbl_phys,
GFP_KERNEL);
}
if (!pbl_virt)
return -ENOMEM;
chain->pbl_sp.p_virt_table = pbl_virt;
chain->pbl_sp.p_phys_table = pbl_phys;
for (i = 0; i < page_cnt; i++) {
virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys,
GFP_KERNEL);
if (!virt)
return -ENOMEM;
if (i == 0) {
qed_chain_init_mem(chain, virt, phys);
qed_chain_reset(chain);
}
/* Fill the PBL table with the physical address of the page */
*(dma_addr_t *)pbl_virt = phys;
pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
/* Keep the virtual address of the page */
addr_tbl[i].virt_addr = virt;
addr_tbl[i].dma_map = phys;
}
return 0;
}
int qed_chain_alloc(struct qed_dev *cdev,
enum qed_chain_use_mode intended_use,
enum qed_chain_mode mode,
enum qed_chain_cnt_type cnt_type,
u32 num_elems,
size_t elem_size,
struct qed_chain *chain,
struct qed_chain_ext_pbl *ext_pbl)
{
u32 page_cnt;
int rc;
if (mode == QED_CHAIN_MODE_SINGLE)
page_cnt = 1;
else
page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt);
if (rc) {
DP_NOTICE(cdev,
"Cannot allocate a chain with the given arguments:\n");
DP_NOTICE(cdev,
"[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
intended_use, mode, cnt_type, num_elems, elem_size);
return rc;
}
qed_chain_init_params(chain, page_cnt, elem_size, intended_use, mode,
cnt_type);
switch (mode) {
case QED_CHAIN_MODE_NEXT_PTR:
rc = qed_chain_alloc_next_ptr(cdev, chain);
break;
case QED_CHAIN_MODE_SINGLE:
rc = qed_chain_alloc_single(cdev, chain);
break;
case QED_CHAIN_MODE_PBL:
rc = qed_chain_alloc_pbl(cdev, chain, ext_pbl);
break;
default:
return -EINVAL;
}
if (!rc)
return 0;
qed_chain_free(cdev, chain);
return rc;
}

View file

@ -4716,279 +4716,6 @@ void qed_hw_remove(struct qed_dev *cdev)
qed_mcp_nvm_info_free(p_hwfn);
}
static void qed_chain_free_next_ptr(struct qed_dev *cdev,
struct qed_chain *p_chain)
{
void *p_virt = p_chain->p_virt_addr, *p_virt_next = NULL;
dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0;
struct qed_chain_next *p_next;
u32 size, i;
if (!p_virt)
return;
size = p_chain->elem_size * p_chain->usable_per_page;
for (i = 0; i < p_chain->page_cnt; i++) {
if (!p_virt)
break;
p_next = (struct qed_chain_next *)((u8 *)p_virt + size);
p_virt_next = p_next->next_virt;
p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys);
dma_free_coherent(&cdev->pdev->dev,
QED_CHAIN_PAGE_SIZE, p_virt, p_phys);
p_virt = p_virt_next;
p_phys = p_phys_next;
}
}
static void qed_chain_free_single(struct qed_dev *cdev,
struct qed_chain *p_chain)
{
if (!p_chain->p_virt_addr)
return;
dma_free_coherent(&cdev->pdev->dev,
QED_CHAIN_PAGE_SIZE,
p_chain->p_virt_addr, p_chain->p_phys_addr);
}
static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
{
struct addr_tbl_entry *pp_addr_tbl = p_chain->pbl.pp_addr_tbl;
u32 page_cnt = p_chain->page_cnt, i, pbl_size;
if (!pp_addr_tbl)
return;
for (i = 0; i < page_cnt; i++) {
if (!pp_addr_tbl[i].virt_addr || !pp_addr_tbl[i].dma_map)
break;
dma_free_coherent(&cdev->pdev->dev,
QED_CHAIN_PAGE_SIZE,
pp_addr_tbl[i].virt_addr,
pp_addr_tbl[i].dma_map);
}
pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
if (!p_chain->b_external_pbl)
dma_free_coherent(&cdev->pdev->dev,
pbl_size,
p_chain->pbl_sp.p_virt_table,
p_chain->pbl_sp.p_phys_table);
vfree(p_chain->pbl.pp_addr_tbl);
p_chain->pbl.pp_addr_tbl = NULL;
}
void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain)
{
switch (p_chain->mode) {
case QED_CHAIN_MODE_NEXT_PTR:
qed_chain_free_next_ptr(cdev, p_chain);
break;
case QED_CHAIN_MODE_SINGLE:
qed_chain_free_single(cdev, p_chain);
break;
case QED_CHAIN_MODE_PBL:
qed_chain_free_pbl(cdev, p_chain);
break;
}
}
static int
qed_chain_alloc_sanity_check(struct qed_dev *cdev,
enum qed_chain_cnt_type cnt_type,
size_t elem_size, u32 page_cnt)
{
u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
/* The actual chain size can be larger than the maximal possible value
* after rounding up the requested elements number to pages, and after
* taking into acount the unusuable elements (next-ptr elements).
* The size of a "u16" chain can be (U16_MAX + 1) since the chain
* size/capacity fields are of a u32 type.
*/
if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 &&
chain_size > ((u32)U16_MAX + 1)) ||
(cnt_type == QED_CHAIN_CNT_TYPE_U32 && chain_size > U32_MAX)) {
DP_NOTICE(cdev,
"The actual chain size (0x%llx) is larger than the maximal possible value\n",
chain_size);
return -EINVAL;
}
return 0;
}
static int
qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain)
{
void *p_virt = NULL, *p_virt_prev = NULL;
dma_addr_t p_phys = 0;
u32 i;
for (i = 0; i < p_chain->page_cnt; i++) {
p_virt = dma_alloc_coherent(&cdev->pdev->dev,
QED_CHAIN_PAGE_SIZE,
&p_phys, GFP_KERNEL);
if (!p_virt)
return -ENOMEM;
if (i == 0) {
qed_chain_init_mem(p_chain, p_virt, p_phys);
qed_chain_reset(p_chain);
} else {
qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
p_virt, p_phys);
}
p_virt_prev = p_virt;
}
/* Last page's next element should point to the beginning of the
* chain.
*/
qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
p_chain->p_virt_addr,
p_chain->p_phys_addr);
return 0;
}
static int
qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain)
{
dma_addr_t p_phys = 0;
void *p_virt = NULL;
p_virt = dma_alloc_coherent(&cdev->pdev->dev,
QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL);
if (!p_virt)
return -ENOMEM;
qed_chain_init_mem(p_chain, p_virt, p_phys);
qed_chain_reset(p_chain);
return 0;
}
static int
qed_chain_alloc_pbl(struct qed_dev *cdev,
struct qed_chain *p_chain,
struct qed_chain_ext_pbl *ext_pbl)
{
u32 page_cnt = p_chain->page_cnt, size, i;
dma_addr_t p_phys = 0, p_pbl_phys = 0;
struct addr_tbl_entry *pp_addr_tbl;
u8 *p_pbl_virt = NULL;
void *p_virt = NULL;
size = page_cnt * sizeof(*pp_addr_tbl);
pp_addr_tbl = vzalloc(size);
if (!pp_addr_tbl)
return -ENOMEM;
/* The allocation of the PBL table is done with its full size, since it
* is expected to be successive.
* qed_chain_init_pbl_mem() is called even in a case of an allocation
* failure, since tbl was previously allocated, and it
* should be saved to allow its freeing during the error flow.
*/
size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
if (!ext_pbl) {
p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
size, &p_pbl_phys, GFP_KERNEL);
} else {
p_pbl_virt = ext_pbl->p_pbl_virt;
p_pbl_phys = ext_pbl->p_pbl_phys;
p_chain->b_external_pbl = true;
}
qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, pp_addr_tbl);
if (!p_pbl_virt)
return -ENOMEM;
for (i = 0; i < page_cnt; i++) {
p_virt = dma_alloc_coherent(&cdev->pdev->dev,
QED_CHAIN_PAGE_SIZE,
&p_phys, GFP_KERNEL);
if (!p_virt)
return -ENOMEM;
if (i == 0) {
qed_chain_init_mem(p_chain, p_virt, p_phys);
qed_chain_reset(p_chain);
}
/* Fill the PBL table with the physical address of the page */
*(dma_addr_t *)p_pbl_virt = p_phys;
/* Keep the virtual address of the page */
p_chain->pbl.pp_addr_tbl[i].virt_addr = p_virt;
p_chain->pbl.pp_addr_tbl[i].dma_map = p_phys;
p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
}
return 0;
}
int qed_chain_alloc(struct qed_dev *cdev,
enum qed_chain_use_mode intended_use,
enum qed_chain_mode mode,
enum qed_chain_cnt_type cnt_type,
u32 num_elems,
size_t elem_size,
struct qed_chain *p_chain,
struct qed_chain_ext_pbl *ext_pbl)
{
u32 page_cnt;
int rc = 0;
if (mode == QED_CHAIN_MODE_SINGLE)
page_cnt = 1;
else
page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt);
if (rc) {
DP_NOTICE(cdev,
"Cannot allocate a chain with the given arguments:\n");
DP_NOTICE(cdev,
"[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
intended_use, mode, cnt_type, num_elems, elem_size);
return rc;
}
qed_chain_init_params(p_chain, page_cnt, (u8) elem_size, intended_use,
mode, cnt_type);
switch (mode) {
case QED_CHAIN_MODE_NEXT_PTR:
rc = qed_chain_alloc_next_ptr(cdev, p_chain);
break;
case QED_CHAIN_MODE_SINGLE:
rc = qed_chain_alloc_single(cdev, p_chain);
break;
case QED_CHAIN_MODE_PBL:
rc = qed_chain_alloc_pbl(cdev, p_chain, ext_pbl);
break;
}
if (rc)
goto nomem;
return 0;
nomem:
qed_chain_free(cdev, p_chain);
return rc;
}
int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
{
if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {