RDMA v6.9

Very small update this cycle:
 
 - Minor code improvements in fi, rxe, ipoib, mana, cxgb4, mlx5, irdma,
   rxe, rtrs, mana
 
 - Simplify the hns hem mechanism
 
 - Fix EFA's MSI-X allocation in resource constrained configurations
 
 - Fix a KASN splat in srpt
 
 - Narrow hns's congestion control selection to QPs granularity and allow
   userspace to select it
 
 - Solve a parallel module loading race between the CM module and a driver
   module
 
 - Flexible array cleanup
 
 - Dump hns's SCC Conext to 'rdma res' for debugging
 
 - Make mana build page lists for HW objects that require a 0 offset
   correctly
 
 - Stuck CM ID debugging
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQRRRCHOFoQz/8F5bUaFwuHvBreFYQUCZfgzdQAKCRCFwuHvBreF
 YbS7AQDLy6uJ/1dgrZQ4efcyQDs6H93LG4jWZKoA7F9Oho+MFQEAsQM/UL4nj18O
 T6vHl30N0Ee0aOCqET7HBbnFGKEADAE=
 =KxUj
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma updates from Jason Gunthorpe:
 "Very small update this cycle:

   - Minor code improvements in fi, rxe, ipoib, mana, cxgb4, mlx5,
     irdma, rxe, rtrs, mana

   - Simplify the hns hem mechanism

   - Fix EFA's MSI-X allocation in resource constrained configurations

   - Fix a KASN splat in srpt

   - Narrow hns's congestion control selection to QPs granularity and
     allow userspace to select it

   - Solve a parallel module loading race between the CM module and a
     driver module

   - Flexible array cleanup

   - Dump hns's SCC Conext to 'rdma res' for debugging

   - Make mana build page lists for HW objects that require a 0 offset
     correctly

   - Stuck CM ID debugging"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (29 commits)
  RDMA/cm: add timeout to cm_destroy_id wait
  RDMA/mana_ib: Use virtual address in dma regions for MRs
  RDMA/mana_ib: Fix bug in creation of dma regions
  RDMA/hns: Append SCC context to the raw dump of QPC
  RDMA/uverbs: Avoid -Wflex-array-member-not-at-end warnings
  RDMA/hns: Support userspace configuring congestion control algorithm with QP granularity
  RDMA/rtrs-clt: Check strnlen return len in sysfs mpath_policy_store()
  RDMA/uverbs: Remove flexible arrays from struct *_filter
  RDMA/device: Fix a race between mad_client and cm_client init
  RDMA/hns: Fix mis-modifying default congestion control algorithm
  RDMA/rxe: Remove unused 'iova' parameter from rxe_mr_init_user
  RDMA/srpt: Do not register event handler until srpt device is fully setup
  RDMA/irdma: Remove duplicate assignment
  RDMA/efa: Limit EQs to available MSI-X vectors
  RDMA/mlx5: Delete unused mlx5_ib_copy_pas prototype
  RDMA/cxgb4: Delete unused c4iw_ep_redirect prototype
  RDMA/mana_ib: Introduce mana_ib_install_cq_cb helper function
  RDMA/mana_ib: Introduce mana_ib_get_netdev helper function
  RDMA/mana_ib: Introduce mdev_to_gc helper function
  RDMA/hns: Simplify 'struct hns_roce_hem' allocation
  ...
This commit is contained in:
Linus Torvalds 2024-03-18 15:34:03 -07:00
commit 6207b37eb5
44 changed files with 900 additions and 699 deletions

View File

@ -34,6 +34,7 @@ MODULE_AUTHOR("Sean Hefty");
MODULE_DESCRIPTION("InfiniBand CM");
MODULE_LICENSE("Dual BSD/GPL");
#define CM_DESTROY_ID_WAIT_TIMEOUT 10000 /* msecs */
static const char * const ibcm_rej_reason_strs[] = {
[IB_CM_REJ_NO_QP] = "no QP",
[IB_CM_REJ_NO_EEC] = "no EEC",
@ -1025,10 +1026,20 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
}
}
static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id)
{
struct cm_id_private *cm_id_priv;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
pr_err("%s: cm_id=%p timed out. state=%d refcnt=%d\n", __func__,
cm_id, cm_id->state, refcount_read(&cm_id_priv->refcount));
}
static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
{
struct cm_id_private *cm_id_priv;
struct cm_work *work;
int ret;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irq(&cm_id_priv->lock);
@ -1135,7 +1146,14 @@ retest:
xa_erase(&cm.local_id_table, cm_local_id(cm_id->local_id));
cm_deref_id(cm_id_priv);
wait_for_completion(&cm_id_priv->comp);
do {
ret = wait_for_completion_timeout(&cm_id_priv->comp,
msecs_to_jiffies(
CM_DESTROY_ID_WAIT_TIMEOUT));
if (!ret) /* timeout happened */
cm_destroy_id_wait_timeout(cm_id);
} while (!ret);
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
cm_free_work(work);

View File

@ -1730,7 +1730,7 @@ static int assign_client_id(struct ib_client *client)
{
int ret;
down_write(&clients_rwsem);
lockdep_assert_held(&clients_rwsem);
/*
* The add/remove callbacks must be called in FIFO/LIFO order. To
* achieve this we assign client_ids so they are sorted in
@ -1739,14 +1739,11 @@ static int assign_client_id(struct ib_client *client)
client->client_id = highest_client_id;
ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL);
if (ret)
goto out;
return ret;
highest_client_id++;
xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
out:
up_write(&clients_rwsem);
return ret;
return 0;
}
static void remove_client_id(struct ib_client *client)
@ -1776,25 +1773,35 @@ int ib_register_client(struct ib_client *client)
{
struct ib_device *device;
unsigned long index;
bool need_unreg = false;
int ret;
refcount_set(&client->uses, 1);
init_completion(&client->uses_zero);
/*
* The devices_rwsem is held in write mode to ensure that a racing
* ib_register_device() sees a consisent view of clients and devices.
*/
down_write(&devices_rwsem);
down_write(&clients_rwsem);
ret = assign_client_id(client);
if (ret)
return ret;
goto out;
down_read(&devices_rwsem);
need_unreg = true;
xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) {
ret = add_client_context(device, client);
if (ret) {
up_read(&devices_rwsem);
ib_unregister_client(client);
return ret;
}
if (ret)
goto out;
}
up_read(&devices_rwsem);
return 0;
ret = 0;
out:
up_write(&clients_rwsem);
up_write(&devices_rwsem);
if (need_unreg && ret)
ib_unregister_client(client);
return ret;
}
EXPORT_SYMBOL(ib_register_client);

View File

@ -2737,7 +2737,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
case IB_FLOW_SPEC_ETH:
ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
ib_filter_sz = sizeof(struct ib_flow_eth_filter);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
@ -2748,7 +2748,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz);
break;
case IB_FLOW_SPEC_IPV4:
ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz);
ib_filter_sz = sizeof(struct ib_flow_ipv4_filter);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
@ -2759,7 +2759,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz);
break;
case IB_FLOW_SPEC_IPV6:
ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz);
ib_filter_sz = sizeof(struct ib_flow_ipv6_filter);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
@ -2775,7 +2775,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
break;
case IB_FLOW_SPEC_TCP:
case IB_FLOW_SPEC_UDP:
ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz);
ib_filter_sz = sizeof(struct ib_flow_tcp_udp_filter);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
@ -2786,7 +2786,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
break;
case IB_FLOW_SPEC_VXLAN_TUNNEL:
ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz);
ib_filter_sz = sizeof(struct ib_flow_tunnel_filter);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
@ -2801,7 +2801,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
return -EINVAL;
break;
case IB_FLOW_SPEC_ESP:
ib_filter_sz = offsetof(struct ib_flow_esp_filter, real_sz);
ib_filter_sz = sizeof(struct ib_flow_esp_filter);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
@ -2812,7 +2812,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
memcpy(&ib_spec->esp.mask, kern_spec_mask, actual_filter_sz);
break;
case IB_FLOW_SPEC_GRE:
ib_filter_sz = offsetof(struct ib_flow_gre_filter, real_sz);
ib_filter_sz = sizeof(struct ib_flow_gre_filter);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
@ -2823,7 +2823,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
memcpy(&ib_spec->gre.mask, kern_spec_mask, actual_filter_sz);
break;
case IB_FLOW_SPEC_MPLS:
ib_filter_sz = offsetof(struct ib_flow_mpls_filter, real_sz);
ib_filter_sz = sizeof(struct ib_flow_mpls_filter);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);

View File

@ -36,13 +36,15 @@
#include "uverbs.h"
struct bundle_alloc_head {
struct bundle_alloc_head *next;
struct_group_tagged(bundle_alloc_head_hdr, hdr,
struct bundle_alloc_head *next;
);
u8 data[];
};
struct bundle_priv {
/* Must be first */
struct bundle_alloc_head alloc_head;
struct bundle_alloc_head_hdr alloc_head;
struct bundle_alloc_head *allocated_mem;
size_t internal_avail;
size_t internal_used;
@ -64,7 +66,7 @@ struct bundle_priv {
* Must be last. bundle ends in a flex array which overlaps
* internal_buffer.
*/
struct uverbs_attr_bundle bundle;
struct uverbs_attr_bundle_hdr bundle;
u64 internal_buffer[32];
};
@ -77,9 +79,10 @@ void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm,
unsigned int num_attrs)
{
struct bundle_priv *pbundle;
struct uverbs_attr_bundle *bundle;
size_t bundle_size =
offsetof(struct bundle_priv, internal_buffer) +
sizeof(*pbundle->bundle.attrs) * method_elm->key_bitmap_len +
sizeof(*bundle->attrs) * method_elm->key_bitmap_len +
sizeof(*pbundle->uattrs) * num_attrs;
method_elm->use_stack = bundle_size <= sizeof(*pbundle);
@ -107,7 +110,7 @@ __malloc void *_uverbs_alloc(struct uverbs_attr_bundle *bundle, size_t size,
gfp_t flags)
{
struct bundle_priv *pbundle =
container_of(bundle, struct bundle_priv, bundle);
container_of(&bundle->hdr, struct bundle_priv, bundle);
size_t new_used;
void *res;
@ -149,7 +152,7 @@ static int uverbs_set_output(const struct uverbs_attr_bundle *bundle,
const struct uverbs_attr *attr)
{
struct bundle_priv *pbundle =
container_of(bundle, struct bundle_priv, bundle);
container_of(&bundle->hdr, struct bundle_priv, bundle);
u16 flags;
flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags |
@ -166,6 +169,8 @@ static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
struct ib_uverbs_attr *uattr,
u32 attr_bkey)
{
struct uverbs_attr_bundle *bundle =
container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr);
const struct uverbs_attr_spec *spec = &attr_uapi->spec;
size_t array_len;
u32 *idr_vals;
@ -184,7 +189,7 @@ static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
return -EINVAL;
attr->uobjects =
uverbs_alloc(&pbundle->bundle,
uverbs_alloc(bundle,
array_size(array_len, sizeof(*attr->uobjects)));
if (IS_ERR(attr->uobjects))
return PTR_ERR(attr->uobjects);
@ -209,7 +214,7 @@ static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
for (i = 0; i != array_len; i++) {
attr->uobjects[i] = uverbs_get_uobject_from_file(
spec->u2.objs_arr.obj_type, spec->u2.objs_arr.access,
idr_vals[i], &pbundle->bundle);
idr_vals[i], bundle);
if (IS_ERR(attr->uobjects[i])) {
ret = PTR_ERR(attr->uobjects[i]);
break;
@ -240,7 +245,9 @@ static int uverbs_process_attr(struct bundle_priv *pbundle,
struct ib_uverbs_attr *uattr, u32 attr_bkey)
{
const struct uverbs_attr_spec *spec = &attr_uapi->spec;
struct uverbs_attr *e = &pbundle->bundle.attrs[attr_bkey];
struct uverbs_attr_bundle *bundle =
container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr);
struct uverbs_attr *e = &bundle->attrs[attr_bkey];
const struct uverbs_attr_spec *val_spec = spec;
struct uverbs_obj_attr *o_attr;
@ -288,7 +295,7 @@ static int uverbs_process_attr(struct bundle_priv *pbundle,
if (val_spec->alloc_and_copy && !uverbs_attr_ptr_is_inline(e)) {
void *p;
p = uverbs_alloc(&pbundle->bundle, uattr->len);
p = uverbs_alloc(bundle, uattr->len);
if (IS_ERR(p))
return PTR_ERR(p);
@ -321,7 +328,7 @@ static int uverbs_process_attr(struct bundle_priv *pbundle,
*/
o_attr->uobject = uverbs_get_uobject_from_file(
spec->u.obj.obj_type, spec->u.obj.access,
uattr->data_s64, &pbundle->bundle);
uattr->data_s64, bundle);
if (IS_ERR(o_attr->uobject))
return PTR_ERR(o_attr->uobject);
__set_bit(attr_bkey, pbundle->uobj_finalize);
@ -422,6 +429,8 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle,
unsigned int num_attrs)
{
int (*handler)(struct uverbs_attr_bundle *attrs);
struct uverbs_attr_bundle *bundle =
container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr);
size_t uattrs_size = array_size(sizeof(*pbundle->uattrs), num_attrs);
unsigned int destroy_bkey = pbundle->method_elm->destroy_bkey;
unsigned int i;
@ -434,7 +443,7 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle,
if (!handler)
return -EIO;
pbundle->uattrs = uverbs_alloc(&pbundle->bundle, uattrs_size);
pbundle->uattrs = uverbs_alloc(bundle, uattrs_size);
if (IS_ERR(pbundle->uattrs))
return PTR_ERR(pbundle->uattrs);
if (copy_from_user(pbundle->uattrs, pbundle->user_attrs, uattrs_size))
@ -453,25 +462,23 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle,
return -EINVAL;
if (pbundle->method_elm->has_udata)
uverbs_fill_udata(&pbundle->bundle,
&pbundle->bundle.driver_udata,
uverbs_fill_udata(bundle, &pbundle->bundle.driver_udata,
UVERBS_ATTR_UHW_IN, UVERBS_ATTR_UHW_OUT);
else
pbundle->bundle.driver_udata = (struct ib_udata){};
if (destroy_bkey != UVERBS_API_ATTR_BKEY_LEN) {
struct uverbs_obj_attr *destroy_attr =
&pbundle->bundle.attrs[destroy_bkey].obj_attr;
struct uverbs_obj_attr *destroy_attr = &bundle->attrs[destroy_bkey].obj_attr;
ret = uobj_destroy(destroy_attr->uobject, &pbundle->bundle);
ret = uobj_destroy(destroy_attr->uobject, bundle);
if (ret)
return ret;
__clear_bit(destroy_bkey, pbundle->uobj_finalize);
ret = handler(&pbundle->bundle);
ret = handler(bundle);
uobj_put_destroy(destroy_attr->uobject);
} else {
ret = handler(&pbundle->bundle);
ret = handler(bundle);
}
/*
@ -481,10 +488,10 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle,
*/
if (!ret && pbundle->method_elm->has_udata) {
const struct uverbs_attr *attr =
uverbs_attr_get(&pbundle->bundle, UVERBS_ATTR_UHW_OUT);
uverbs_attr_get(bundle, UVERBS_ATTR_UHW_OUT);
if (!IS_ERR(attr))
ret = uverbs_set_output(&pbundle->bundle, attr);
ret = uverbs_set_output(bundle, attr);
}
/*
@ -501,6 +508,8 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle,
static void bundle_destroy(struct bundle_priv *pbundle, bool commit)
{
unsigned int key_bitmap_len = pbundle->method_elm->key_bitmap_len;
struct uverbs_attr_bundle *bundle =
container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr);
struct bundle_alloc_head *memblock;
unsigned int i;
@ -508,20 +517,19 @@ static void bundle_destroy(struct bundle_priv *pbundle, bool commit)
i = -1;
while ((i = find_next_bit(pbundle->uobj_finalize, key_bitmap_len,
i + 1)) < key_bitmap_len) {
struct uverbs_attr *attr = &pbundle->bundle.attrs[i];
struct uverbs_attr *attr = &bundle->attrs[i];
uverbs_finalize_object(
attr->obj_attr.uobject,
attr->obj_attr.attr_elm->spec.u.obj.access,
test_bit(i, pbundle->uobj_hw_obj_valid),
commit,
&pbundle->bundle);
commit, bundle);
}
i = -1;
while ((i = find_next_bit(pbundle->spec_finalize, key_bitmap_len,
i + 1)) < key_bitmap_len) {
struct uverbs_attr *attr = &pbundle->bundle.attrs[i];
struct uverbs_attr *attr = &bundle->attrs[i];
const struct uverbs_api_attr *attr_uapi;
void __rcu **slot;
@ -535,7 +543,7 @@ static void bundle_destroy(struct bundle_priv *pbundle, bool commit)
if (attr_uapi->spec.type == UVERBS_ATTR_TYPE_IDRS_ARRAY) {
uverbs_free_idrs_array(attr_uapi, &attr->objs_arr_attr,
commit, &pbundle->bundle);
commit, bundle);
}
}
@ -578,7 +586,8 @@ static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
method_elm->bundle_size -
offsetof(struct bundle_priv, internal_buffer);
pbundle->alloc_head.next = NULL;
pbundle->allocated_mem = &pbundle->alloc_head;
pbundle->allocated_mem = container_of(&pbundle->alloc_head,
struct bundle_alloc_head, hdr);
} else {
pbundle = &onstack;
pbundle->internal_avail = sizeof(pbundle->internal_buffer);
@ -596,8 +605,9 @@ static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
pbundle->user_attrs = user_attrs;
pbundle->internal_used = ALIGN(pbundle->method_elm->key_bitmap_len *
sizeof(*pbundle->bundle.attrs),
sizeof(*pbundle->internal_buffer));
sizeof(*container_of(&pbundle->bundle,
struct uverbs_attr_bundle, hdr)->attrs),
sizeof(*pbundle->internal_buffer));
memset(pbundle->bundle.attr_present, 0,
sizeof(pbundle->bundle.attr_present));
memset(pbundle->uobj_finalize, 0, sizeof(pbundle->uobj_finalize));
@ -700,11 +710,13 @@ void uverbs_fill_udata(struct uverbs_attr_bundle *bundle,
unsigned int attr_out)
{
struct bundle_priv *pbundle =
container_of(bundle, struct bundle_priv, bundle);
container_of(&bundle->hdr, struct bundle_priv, bundle);
struct uverbs_attr_bundle *bundle_aux =
container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr);
const struct uverbs_attr *in =
uverbs_attr_get(&pbundle->bundle, attr_in);
uverbs_attr_get(bundle_aux, attr_in);
const struct uverbs_attr *out =
uverbs_attr_get(&pbundle->bundle, attr_out);
uverbs_attr_get(bundle_aux, attr_out);
if (!IS_ERR(in)) {
udata->inlen = in->ptr_attr.len;
@ -829,7 +841,7 @@ void uverbs_finalize_uobj_create(const struct uverbs_attr_bundle *bundle,
u16 idx)
{
struct bundle_priv *pbundle =
container_of(bundle, struct bundle_priv, bundle);
container_of(&bundle->hdr, struct bundle_priv, bundle);
__set_bit(uapi_bkey_attr(uapi_key_attr(idx)),
pbundle->uobj_hw_obj_valid);

View File

@ -930,8 +930,6 @@ void c4iw_id_table_free(struct c4iw_id_table *alloc);
typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb);
int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
struct l2t_entry *l2t);
void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
struct c4iw_dev_ucontext *uctx);
u32 c4iw_get_resource(struct c4iw_id_table *id_table);

View File

@ -57,6 +57,7 @@ struct efa_dev {
u64 db_bar_addr;
u64 db_bar_len;
unsigned int num_irq_vectors;
int admin_msix_vector_idx;
struct efa_irq admin_irq;

View File

@ -322,7 +322,9 @@ static int efa_create_eqs(struct efa_dev *dev)
int err;
int i;
neqs = min_t(unsigned int, neqs, num_online_cpus());
neqs = min_t(unsigned int, neqs,
dev->num_irq_vectors - EFA_COMP_EQS_VEC_BASE);
dev->neqs = neqs;
dev->eqs = kcalloc(neqs, sizeof(*dev->eqs), GFP_KERNEL);
if (!dev->eqs)
@ -468,34 +470,30 @@ static void efa_disable_msix(struct efa_dev *dev)
static int efa_enable_msix(struct efa_dev *dev)
{
int msix_vecs, irq_num;
int max_vecs, num_vecs;
/*
* Reserve the max msix vectors we might need, one vector is reserved
* for admin.
*/
msix_vecs = min_t(int, pci_msix_vec_count(dev->pdev),
num_online_cpus() + 1);
max_vecs = min_t(int, pci_msix_vec_count(dev->pdev),
num_online_cpus() + 1);
dev_dbg(&dev->pdev->dev, "Trying to enable MSI-X, vectors %d\n",
msix_vecs);
max_vecs);
dev->admin_msix_vector_idx = EFA_MGMNT_MSIX_VEC_IDX;
irq_num = pci_alloc_irq_vectors(dev->pdev, msix_vecs,
msix_vecs, PCI_IRQ_MSIX);
num_vecs = pci_alloc_irq_vectors(dev->pdev, 1,
max_vecs, PCI_IRQ_MSIX);
if (irq_num < 0) {
dev_err(&dev->pdev->dev, "Failed to enable MSI-X. irq_num %d\n",
irq_num);
if (num_vecs < 0) {
dev_err(&dev->pdev->dev, "Failed to enable MSI-X. error %d\n",
num_vecs);
return -ENOSPC;
}
if (irq_num != msix_vecs) {
efa_disable_msix(dev);
dev_err(&dev->pdev->dev,
"Allocated %d MSI-X (out of %d requested)\n",
irq_num, msix_vecs);
return -ENOSPC;
}
dev_dbg(&dev->pdev->dev, "Allocated %d MSI-X vectors\n", num_vecs);
dev->num_irq_vectors = num_vecs;
return 0;
}

View File

@ -315,7 +315,7 @@ int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit)
* This routine returns the receive context associated
* with a a qp's qpn.
*
* Returns the context.
* Return: the context.
*/
static struct hfi1_ctxtdata *qp_to_rcd(struct rvt_dev_info *rdi,
struct rvt_qp *qp)
@ -710,7 +710,7 @@ void hfi1_tid_rdma_flush_wait(struct rvt_qp *qp)
* The exp_lock must be held.
*
* Return:
* On success: a value postive value between 0 and RXE_NUM_TID_FLOWS - 1
* On success: a value positive value between 0 and RXE_NUM_TID_FLOWS - 1
* On failure: -EAGAIN
*/
static int kern_reserve_flow(struct hfi1_ctxtdata *rcd, int last)
@ -1007,7 +1007,7 @@ static u32 tid_flush_pages(struct tid_rdma_pageset *list,
* pages are tested two at a time, i, i + 1 for contiguous
* pages and i - 1 and i contiguous pages.
*
* If any condition is false, any accumlated pages are flushed and
* If any condition is false, any accumulated pages are flushed and
* v0,v1 are emitted as separate PAGE_SIZE pagesets
*
* Otherwise, the current 8k is totaled for a future flush.
@ -1434,7 +1434,7 @@ static void kern_program_rcvarray(struct tid_rdma_flow *flow)
* (5) computes a tidarray with formatted TID entries which can be sent
* to the sender
* (6) Reserves and programs HW flows.
* (7) It also manages queing the QP when TID/flow resources are not
* (7) It also manages queueing the QP when TID/flow resources are not
* available.
*
* @req points to struct tid_rdma_request of which the segments are a part. The
@ -1604,7 +1604,7 @@ void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req)
}
/**
* hfi1_kern_exp_rcv_free_flows - free priviously allocated flow information
* hfi1_kern_exp_rcv_free_flows - free previously allocated flow information
* @req: the tid rdma request to be cleaned
*/
static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req)
@ -2055,7 +2055,7 @@ static int tid_rdma_rcv_error(struct hfi1_packet *packet,
* req->clear_tail is advanced). However, when an earlier
* request is received, this request will not be complete any
* more (qp->s_tail_ack_queue is moved back, see below).
* Consequently, we need to update the TID flow info everytime
* Consequently, we need to update the TID flow info every time
* a duplicate request is received.
*/
bth0 = be32_to_cpu(ohdr->bth[0]);
@ -2219,7 +2219,7 @@ void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet)
/*
* 1. Verify TID RDMA READ REQ as per IB_OPCODE_RC_RDMA_READ
* (see hfi1_rc_rcv())
* 2. Put TID RDMA READ REQ into the response queueu (s_ack_queue)
* 2. Put TID RDMA READ REQ into the response queue (s_ack_queue)
* - Setup struct tid_rdma_req with request info
* - Initialize struct tid_rdma_flow info;
* - Copy TID entries;
@ -2439,7 +2439,7 @@ find_tid_request(struct rvt_qp *qp, u32 psn, enum ib_wr_opcode opcode)
void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet)
{
/* HANDLER FOR TID RDMA READ RESPONSE packet (Requestor side */
/* HANDLER FOR TID RDMA READ RESPONSE packet (Requester side) */
/*
* 1. Find matching SWQE
@ -3649,7 +3649,7 @@ void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet)
* 1. Verify TID RDMA WRITE REQ as per IB_OPCODE_RC_RDMA_WRITE_FIRST
* (see hfi1_rc_rcv())
* - Don't allow 0-length requests.
* 2. Put TID RDMA WRITE REQ into the response queueu (s_ack_queue)
* 2. Put TID RDMA WRITE REQ into the response queue (s_ack_queue)
* - Setup struct tid_rdma_req with request info
* - Prepare struct tid_rdma_flow array?
* 3. Set the qp->s_ack_state as state diagram in design doc.
@ -4026,7 +4026,7 @@ unlock_r_lock:
void hfi1_rc_rcv_tid_rdma_write_resp(struct hfi1_packet *packet)
{
/* HANDLER FOR TID RDMA WRITE RESPONSE packet (Requestor side */
/* HANDLER FOR TID RDMA WRITE RESPONSE packet (Requester side) */
/*
* 1. Find matching SWQE
@ -5440,8 +5440,9 @@ static bool _hfi1_schedule_tid_send(struct rvt_qp *qp)
* the two state machines can step on each other with respect to the
* RVT_S_BUSY flag.
* Therefore, a modified test is used.
* @return true if the second leg is scheduled;
* false if the second leg is not scheduled.
*
* Return: %true if the second leg is scheduled;
* %false if the second leg is not scheduled.
*/
bool hfi1_schedule_tid_send(struct rvt_qp *qp)
{

View File

@ -108,6 +108,9 @@ enum {
HNS_ROCE_CMD_QUERY_CEQC = 0x92,
HNS_ROCE_CMD_DESTROY_CEQC = 0x93,
/* SCC CTX commands */
HNS_ROCE_CMD_QUERY_SCCC = 0xa2,
/* SCC CTX BT commands */
HNS_ROCE_CMD_READ_SCCC_BT0 = 0xa4,
HNS_ROCE_CMD_WRITE_SCCC_BT0 = 0xa5,

View File

@ -133,14 +133,12 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct ib_device *ibdev = &hr_dev->ib_dev;
u64 mtts[MTT_MIN_COUNT] = {};
dma_addr_t dma_handle;
int ret;
ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts),
&dma_handle);
if (!ret) {
ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts));
if (ret) {
ibdev_err(ibdev, "failed to find CQ mtr, ret = %d.\n", ret);
return -EINVAL;
return ret;
}
/* Get CQC memory HEM(Hardware Entry Memory) table */
@ -157,7 +155,8 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
goto err_put;
}
ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts, dma_handle);
ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts,
hns_roce_get_mtr_ba(&hr_cq->mtr));
if (ret)
goto err_xa;

View File

@ -179,6 +179,7 @@ enum {
#define HNS_ROCE_CMD_SUCCESS 1
#define HNS_ROCE_MAX_HOP_NUM 3
/* The minimum page size is 4K for hardware */
#define HNS_HW_PAGE_SHIFT 12
#define HNS_HW_PAGE_SIZE (1 << HNS_HW_PAGE_SHIFT)
@ -269,6 +270,11 @@ struct hns_roce_hem_list {
dma_addr_t root_ba; /* pointer to the root ba table */
};
enum mtr_type {
MTR_DEFAULT = 0,
MTR_PBL,
};
struct hns_roce_buf_attr {
struct {
size_t size; /* region size */
@ -277,7 +283,10 @@ struct hns_roce_buf_attr {
unsigned int region_count; /* valid region count */
unsigned int page_shift; /* buffer page shift */
unsigned int user_access; /* umem access flag */
u64 iova;
enum mtr_type type;
bool mtt_only; /* only alloc buffer-required MTT memory */
bool adaptive; /* adaptive for page_shift and hopnum */
};
struct hns_roce_hem_cfg {
@ -585,6 +594,13 @@ struct hns_roce_work {
u32 queue_num;
};
enum hns_roce_cong_type {
CONG_TYPE_DCQCN,
CONG_TYPE_LDCP,
CONG_TYPE_HC3,
CONG_TYPE_DIP,
};
struct hns_roce_qp {
struct ib_qp ibqp;
struct hns_roce_wq rq;
@ -628,6 +644,7 @@ struct hns_roce_qp {
struct list_head sq_node; /* all send qps are on a list */
struct hns_user_mmap_entry *dwqe_mmap_entry;
u32 config;
enum hns_roce_cong_type cong_type;
};
struct hns_roce_ib_iboe {
@ -699,13 +716,6 @@ struct hns_roce_eq_table {
struct hns_roce_eq *eq;
};
enum cong_type {
CONG_TYPE_DCQCN,
CONG_TYPE_LDCP,
CONG_TYPE_HC3,
CONG_TYPE_DIP,
};
struct hns_roce_caps {
u64 fw_ver;
u8 num_ports;
@ -835,7 +845,8 @@ struct hns_roce_caps {
u16 default_aeq_period;
u16 default_aeq_arm_st;
u16 default_ceq_arm_st;
enum cong_type cong_type;
u8 cong_cap;
enum hns_roce_cong_type default_cong_type;
};
enum hns_roce_device_state {
@ -936,6 +947,7 @@ struct hns_roce_hw {
int (*query_qpc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
int (*query_mpt)(struct hns_roce_dev *hr_dev, u32 key, void *buffer);
int (*query_srqc)(struct hns_roce_dev *hr_dev, u32 srqn, void *buffer);
int (*query_sccc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
int (*query_hw_counter)(struct hns_roce_dev *hr_dev,
u64 *stats, u32 port, int *hw_counters);
const struct ib_device_ops *hns_roce_dev_ops;
@ -1152,8 +1164,13 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
/* hns roce hw need current block and next block addr from mtt */
#define MTT_MIN_COUNT 2
static inline dma_addr_t hns_roce_get_mtr_ba(struct hns_roce_mtr *mtr)
{
return mtr->hem_cfg.root_ba;
}
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
u32 offset, u64 *mtt_buf, int mtt_max);
int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
struct hns_roce_buf_attr *buf_attr,
unsigned int page_shift, struct ib_udata *udata,

View File

@ -249,61 +249,34 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
}
static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
int npages,
unsigned long hem_alloc_size,
gfp_t gfp_mask)
{
struct hns_roce_hem_chunk *chunk = NULL;
struct hns_roce_hem *hem;
struct scatterlist *mem;
int order;
void *buf;
WARN_ON(gfp_mask & __GFP_HIGHMEM);
order = get_order(hem_alloc_size);
if (PAGE_SIZE << order != hem_alloc_size) {
dev_err(hr_dev->dev, "invalid hem_alloc_size: %lu!\n",
hem_alloc_size);
return NULL;
}
hem = kmalloc(sizeof(*hem),
gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
if (!hem)
return NULL;
INIT_LIST_HEAD(&hem->chunk_list);
buf = dma_alloc_coherent(hr_dev->dev, hem_alloc_size,
&hem->dma, gfp_mask);
if (!buf)
goto fail;
order = get_order(hem_alloc_size);
while (npages > 0) {
if (!chunk) {
chunk = kmalloc(sizeof(*chunk),
gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
if (!chunk)
goto fail;
sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN);
chunk->npages = 0;
chunk->nsg = 0;
memset(chunk->buf, 0, sizeof(chunk->buf));
list_add_tail(&chunk->list, &hem->chunk_list);
}
while (1 << order > npages)
--order;
/*
* Alloc memory one time. If failed, don't alloc small block
* memory, directly return fail.
*/
mem = &chunk->mem[chunk->npages];
buf = dma_alloc_coherent(hr_dev->dev, PAGE_SIZE << order,
&sg_dma_address(mem), gfp_mask);
if (!buf)
goto fail;
chunk->buf[chunk->npages] = buf;
sg_dma_len(mem) = PAGE_SIZE << order;
++chunk->npages;
++chunk->nsg;
npages -= 1 << order;
}
hem->buf = buf;
hem->size = hem_alloc_size;
return hem;
@ -314,20 +287,10 @@ fail:
void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
{
struct hns_roce_hem_chunk *chunk, *tmp;
int i;
if (!hem)
return;
list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
for (i = 0; i < chunk->npages; ++i)
dma_free_coherent(hr_dev->dev,
sg_dma_len(&chunk->mem[i]),
chunk->buf[i],
sg_dma_address(&chunk->mem[i]));
kfree(chunk);
}
dma_free_coherent(hr_dev->dev, hem->size, hem->buf, hem->dma);
kfree(hem);
}
@ -415,7 +378,6 @@ static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
{
u32 bt_size = mhop->bt_chunk_size;
struct device *dev = hr_dev->dev;
struct hns_roce_hem_iter iter;
gfp_t flag;
u64 bt_ba;
u32 size;
@ -456,16 +418,15 @@ static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
*/
size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size;
flag = GFP_KERNEL | __GFP_NOWARN;
table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size >> PAGE_SHIFT,
size, flag);
table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size, flag);
if (!table->hem[index->buf]) {
ret = -ENOMEM;
goto err_alloc_hem;
}
index->inited |= HEM_INDEX_BUF;
hns_roce_hem_first(table->hem[index->buf], &iter);
bt_ba = hns_roce_hem_addr(&iter);
bt_ba = table->hem[index->buf]->dma;
if (table->type < HEM_TYPE_MTT) {
if (mhop->hop_num == 2)
*(table->bt_l1[index->l1] + mhop->l2_idx) = bt_ba;
@ -586,7 +547,6 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
}
table->hem[i] = hns_roce_alloc_hem(hr_dev,
table->table_chunk_size >> PAGE_SHIFT,
table->table_chunk_size,
GFP_KERNEL | __GFP_NOWARN);
if (!table->hem[i]) {
@ -725,7 +685,6 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table,
unsigned long obj, dma_addr_t *dma_handle)
{
struct hns_roce_hem_chunk *chunk;
struct hns_roce_hem_mhop mhop;
struct hns_roce_hem *hem;
unsigned long mhop_obj = obj;
@ -734,7 +693,6 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
int offset, dma_offset;
void *addr = NULL;
u32 hem_idx = 0;
int length;
int i, j;
mutex_lock(&table->mutex);
@ -767,23 +725,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
if (!hem)
goto out;
list_for_each_entry(chunk, &hem->chunk_list, list) {
for (i = 0; i < chunk->npages; ++i) {
length = sg_dma_len(&chunk->mem[i]);
if (dma_handle && dma_offset >= 0) {
if (length > (u32)dma_offset)
*dma_handle = sg_dma_address(
&chunk->mem[i]) + dma_offset;
dma_offset -= length;
}
if (length > (u32)offset) {
addr = chunk->buf[i] + offset;
goto out;
}
offset -= length;
}
}
*dma_handle = hem->dma + dma_offset;
addr = hem->buf + offset;
out:
mutex_unlock(&table->mutex);

View File

@ -56,10 +56,6 @@ enum {
HEM_TYPE_TRRL,
};
#define HNS_ROCE_HEM_CHUNK_LEN \
((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \
(sizeof(struct scatterlist) + sizeof(void *)))
#define check_whether_bt_num_3(type, hop_num) \
(type < HEM_TYPE_MTT && hop_num == 2)
@ -72,25 +68,13 @@ enum {
(type >= HEM_TYPE_MTT && hop_num == 1) || \
(type >= HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0))
struct hns_roce_hem_chunk {
struct list_head list;
int npages;
int nsg;
struct scatterlist mem[HNS_ROCE_HEM_CHUNK_LEN];
void *buf[HNS_ROCE_HEM_CHUNK_LEN];
};
struct hns_roce_hem {
struct list_head chunk_list;
void *buf;
dma_addr_t dma;
unsigned long size;
refcount_t refcount;
};
struct hns_roce_hem_iter {
struct hns_roce_hem *hem;
struct hns_roce_hem_chunk *chunk;
int page_idx;
};
struct hns_roce_hem_mhop {
u32 hop_num;
u32 buf_chunk_size;
@ -133,38 +117,4 @@ void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list,
int offset, int *mtt_cnt);
static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
struct hns_roce_hem_iter *iter)
{
iter->hem = hem;
iter->chunk = list_empty(&hem->chunk_list) ? NULL :
list_entry(hem->chunk_list.next,
struct hns_roce_hem_chunk, list);
iter->page_idx = 0;
}
static inline int hns_roce_hem_last(struct hns_roce_hem_iter *iter)
{
return !iter->chunk;
}
static inline void hns_roce_hem_next(struct hns_roce_hem_iter *iter)
{
if (++iter->page_idx >= iter->chunk->nsg) {
if (iter->chunk->list.next == &iter->hem->chunk_list) {
iter->chunk = NULL;
return;
}
iter->chunk = list_entry(iter->chunk->list.next,
struct hns_roce_hem_chunk, list);
iter->page_idx = 0;
}
}
static inline dma_addr_t hns_roce_hem_addr(struct hns_roce_hem_iter *iter)
{
return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
}
#endif /* _HNS_ROCE_HEM_H */

View File

@ -2209,11 +2209,12 @@ static int hns_roce_query_caps(struct hns_roce_dev *hr_dev)
caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth);
caps->num_srqs = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_SRQS);
caps->cong_type = hr_reg_read(resp_d, PF_CAPS_D_CONG_TYPE);
caps->cong_cap = hr_reg_read(resp_d, PF_CAPS_D_CONG_CAP);
caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth);
caps->ceqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_CEQ_DEPTH);
caps->num_comp_vectors = hr_reg_read(resp_d, PF_CAPS_D_NUM_CEQS);
caps->aeqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_AEQ_DEPTH);
caps->default_cong_type = hr_reg_read(resp_d, PF_CAPS_D_DEFAULT_ALG);
caps->reserved_pds = hr_reg_read(resp_d, PF_CAPS_D_RSV_PDS);
caps->num_uars = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_UARS);
caps->reserved_qps = hr_reg_read(resp_d, PF_CAPS_D_RSV_QPS);
@ -3195,21 +3196,22 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
struct ib_device *ibdev = &hr_dev->ib_dev;
dma_addr_t pbl_ba;
int i, count;
int ret;
int i;
count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
min_t(int, ARRAY_SIZE(pages), mr->npages),
&pbl_ba);
if (count < 1) {
ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n",
count);
return -ENOBUFS;
ret = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
min_t(int, ARRAY_SIZE(pages), mr->npages));
if (ret) {
ibdev_err(ibdev, "failed to find PBL mtr, ret = %d.\n", ret);
return ret;
}
/* Aligned to the hardware address access unit */
for (i = 0; i < count; i++)
for (i = 0; i < ARRAY_SIZE(pages); i++)
pages[i] >>= 6;
pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr);
mpt_entry->pbl_size = cpu_to_le32(mr->npages);
mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3);
hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3));
@ -3308,18 +3310,12 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
void *mb_buf, struct hns_roce_mr *mr)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
dma_addr_t pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr);
struct hns_roce_v2_mpt_entry *mpt_entry;
dma_addr_t pbl_ba = 0;
mpt_entry = mb_buf;
memset(mpt_entry, 0, sizeof(*mpt_entry));
if (hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, NULL, 0, &pbl_ba) < 0) {
ibdev_err(ibdev, "failed to find frmr mtr.\n");
return -ENOBUFS;
}
hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE);
hr_reg_write(mpt_entry, MPT_PD, mr->pd);
@ -4063,7 +4059,6 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj,
u32 step_idx)
{
struct hns_roce_hem_iter iter;
struct hns_roce_hem_mhop mhop;
struct hns_roce_hem *hem;
unsigned long mhop_obj = obj;
@ -4100,12 +4095,8 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
if (check_whether_last_step(hop_num, step_idx)) {
hem = table->hem[hem_idx];
for (hns_roce_hem_first(hem, &iter);
!hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
bt_ba = hns_roce_hem_addr(&iter);
ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type,
step_idx);
}
ret = set_hem_to_hw(hr_dev, obj, hem->dma, table->type, step_idx);
} else {
if (step_idx == 0)
bt_ba = table->bt_l0_dma_addr[i];
@ -4346,17 +4337,20 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
{
u64 mtts[MTT_MIN_COUNT] = { 0 };
u64 wqe_sge_ba;
int count;
int ret;
/* Search qp buf's mtts */
count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts,
MTT_MIN_COUNT, &wqe_sge_ba);
if (hr_qp->rq.wqe_cnt && count < 1) {
ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts,
MTT_MIN_COUNT);
if (hr_qp->rq.wqe_cnt && ret) {
ibdev_err(&hr_dev->ib_dev,
"failed to find RQ WQE, QPN = 0x%lx.\n", hr_qp->qpn);
return -EINVAL;
"failed to find QP(0x%lx) RQ WQE buf, ret = %d.\n",
hr_qp->qpn, ret);
return ret;
}
wqe_sge_ba = hns_roce_get_mtr_ba(&hr_qp->mtr);
context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
qpc_mask->wqe_sge_ba = 0;
@ -4418,23 +4412,23 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
struct ib_device *ibdev = &hr_dev->ib_dev;
u64 sge_cur_blk = 0;
u64 sq_cur_blk = 0;
int count;
int ret;
/* search qp buf's mtts */
count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
if (count < 1) {
ibdev_err(ibdev, "failed to find QP(0x%lx) SQ buf.\n",
hr_qp->qpn);
return -EINVAL;
ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->sq.offset,
&sq_cur_blk, 1);
if (ret) {
ibdev_err(ibdev, "failed to find QP(0x%lx) SQ WQE buf, ret = %d.\n",
hr_qp->qpn, ret);
return ret;
}
if (hr_qp->sge.sge_cnt > 0) {
count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
hr_qp->sge.offset,
&sge_cur_blk, 1, NULL);
if (count < 1) {
ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf.\n",
hr_qp->qpn);
return -EINVAL;
ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
hr_qp->sge.offset, &sge_cur_blk, 1);
if (ret) {
ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf, ret = %d.\n",
hr_qp->qpn, ret);
return ret;
}
}
@ -4744,13 +4738,10 @@ enum {
static int check_cong_type(struct ib_qp *ibqp,
struct hns_roce_congestion_algorithm *cong_alg)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
if (ibqp->qp_type == IB_QPT_UD)
hr_dev->caps.cong_type = CONG_TYPE_DCQCN;
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
/* different congestion types match different configurations */
switch (hr_dev->caps.cong_type) {
switch (hr_qp->cong_type) {
case CONG_TYPE_DCQCN:
cong_alg->alg_sel = CONG_DCQCN;
cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
@ -4776,10 +4767,7 @@ static int check_cong_type(struct ib_qp *ibqp,
cong_alg->wnd_mode_sel = WND_LIMIT;
break;
default:
ibdev_warn(&hr_dev->ib_dev,
"invalid type(%u) for congestion selection.\n",
hr_dev->caps.cong_type);
hr_dev->caps.cong_type = CONG_TYPE_DCQCN;
hr_qp->cong_type = CONG_TYPE_DCQCN;
cong_alg->alg_sel = CONG_DCQCN;
cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
cong_alg->dip_vld = DIP_INVALID;
@ -4798,6 +4786,7 @@ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
struct hns_roce_congestion_algorithm cong_field;
struct ib_device *ibdev = ibqp->device;
struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
u32 dip_idx = 0;
int ret;
@ -4810,7 +4799,7 @@ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
return ret;
hr_reg_write(context, QPC_CONG_ALGO_TMPL_ID, hr_dev->cong_algo_tmpl_id +
hr_dev->caps.cong_type * HNS_ROCE_CONG_SIZE);
hr_qp->cong_type * HNS_ROCE_CONG_SIZE);
hr_reg_clear(qpc_mask, QPC_CONG_ALGO_TMPL_ID);
hr_reg_write(&context->ext, QPCEX_CONG_ALG_SEL, cong_field.alg_sel);
hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SEL);
@ -5328,6 +5317,30 @@ out:
return ret;
}
static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 qpn,
void *buffer)
{
struct hns_roce_v2_scc_context *context;
struct hns_roce_cmd_mailbox *mailbox;
int ret;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_SCCC,
qpn);
if (ret)
goto out;
context = mailbox->buf;
memcpy(buffer, context, sizeof(*context));
out:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return ret;
}
static u8 get_qp_timeout_attr(struct hns_roce_dev *hr_dev,
struct hns_roce_v2_qp_context *context)
{
@ -5581,18 +5594,20 @@ static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq,
struct ib_device *ibdev = srq->ibsrq.device;
struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
u64 mtts_idx[MTT_MIN_COUNT] = {};
dma_addr_t dma_handle_idx = 0;
dma_addr_t dma_handle_idx;
int ret;
/* Get physical address of idx que buf */
ret = hns_roce_mtr_find(hr_dev, &idx_que->mtr, 0, mtts_idx,
ARRAY_SIZE(mtts_idx), &dma_handle_idx);
if (ret < 1) {
ARRAY_SIZE(mtts_idx));
if (ret) {
ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n",
ret);
return -ENOBUFS;
return ret;
}
dma_handle_idx = hns_roce_get_mtr_ba(&idx_que->mtr);
hr_reg_write(ctx, SRQC_IDX_HOP_NUM,
to_hr_hem_hopnum(hr_dev->caps.idx_hop_num, srq->wqe_cnt));
@ -5624,20 +5639,22 @@ static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf)
struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
struct hns_roce_srq_context *ctx = mb_buf;
u64 mtts_wqe[MTT_MIN_COUNT] = {};
dma_addr_t dma_handle_wqe = 0;
dma_addr_t dma_handle_wqe;
int ret;
memset(ctx, 0, sizeof(*ctx));
/* Get the physical address of srq buf */
ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
if (ret < 1) {
ARRAY_SIZE(mtts_wqe));
if (ret) {
ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n",
ret);
return -ENOBUFS;
return ret;
}
dma_handle_wqe = hns_roce_get_mtr_ba(&srq->buf_mtr);
hr_reg_write(ctx, SRQC_SRQ_ST, 1);
hr_reg_write_bool(ctx, SRQC_SRQ_TYPE,
srq->ibsrq.srq_type == IB_SRQT_XRC);
@ -6353,7 +6370,7 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
u64 eqe_ba[MTT_MIN_COUNT] = { 0 };
struct hns_roce_eq_context *eqc;
u64 bt_ba = 0;
int count;
int ret;
eqc = mb_buf;
memset(eqc, 0, sizeof(struct hns_roce_eq_context));
@ -6361,13 +6378,15 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
init_eq_config(hr_dev, eq);
/* if not multi-hop, eqe buffer only use one trunk */
count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT,
&bt_ba);
if (count < 1) {
dev_err(hr_dev->dev, "failed to find EQE mtr\n");
return -ENOBUFS;
ret = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba,
ARRAY_SIZE(eqe_ba));
if (ret) {
dev_err(hr_dev->dev, "failed to find EQE mtr, ret = %d\n", ret);
return ret;
}
bt_ba = hns_roce_get_mtr_ba(&eq->mtr);
hr_reg_write(eqc, EQC_EQ_ST, HNS_ROCE_V2_EQ_STATE_VALID);
hr_reg_write(eqc, EQC_EQE_HOP_NUM, eq->hop_num);
hr_reg_write(eqc, EQC_OVER_IGNORE, eq->over_ignore);
@ -6714,6 +6733,7 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.query_qpc = hns_roce_v2_query_qpc,
.query_mpt = hns_roce_v2_query_mpt,
.query_srqc = hns_roce_v2_query_srqc,
.query_sccc = hns_roce_v2_query_sccc,
.query_hw_counter = hns_roce_hw_v2_query_counter,
.hns_roce_dev_ops = &hns_roce_v2_dev_ops,
.hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,

View File

@ -646,6 +646,12 @@ struct hns_roce_v2_qp_context {
#define QPCEX_SQ_RQ_NOT_FORBID_EN QPCEX_FIELD_LOC(23, 23)
#define QPCEX_STASH QPCEX_FIELD_LOC(82, 82)
#define SCC_CONTEXT_SIZE 16
struct hns_roce_v2_scc_context {
__le32 data[SCC_CONTEXT_SIZE];
};
#define V2_QP_RWE_S 1 /* rdma write enable */
#define V2_QP_RRE_S 2 /* rdma read enable */
#define V2_QP_ATE_S 3 /* rdma atomic enable */
@ -1214,12 +1220,13 @@ struct hns_roce_query_pf_caps_d {
#define PF_CAPS_D_RQWQE_HOP_NUM PF_CAPS_D_FIELD_LOC(21, 20)
#define PF_CAPS_D_EX_SGE_HOP_NUM PF_CAPS_D_FIELD_LOC(23, 22)
#define PF_CAPS_D_SQWQE_HOP_NUM PF_CAPS_D_FIELD_LOC(25, 24)
#define PF_CAPS_D_CONG_TYPE PF_CAPS_D_FIELD_LOC(29, 26)
#define PF_CAPS_D_CONG_CAP PF_CAPS_D_FIELD_LOC(29, 26)
#define PF_CAPS_D_CEQ_DEPTH PF_CAPS_D_FIELD_LOC(85, 64)
#define PF_CAPS_D_NUM_CEQS PF_CAPS_D_FIELD_LOC(95, 86)
#define PF_CAPS_D_AEQ_DEPTH PF_CAPS_D_FIELD_LOC(117, 96)
#define PF_CAPS_D_AEQ_ARM_ST PF_CAPS_D_FIELD_LOC(119, 118)
#define PF_CAPS_D_CEQ_ARM_ST PF_CAPS_D_FIELD_LOC(121, 120)
#define PF_CAPS_D_DEFAULT_ALG PF_CAPS_D_FIELD_LOC(127, 122)
#define PF_CAPS_D_RSV_PDS PF_CAPS_D_FIELD_LOC(147, 128)
#define PF_CAPS_D_NUM_UARS PF_CAPS_D_FIELD_LOC(155, 148)
#define PF_CAPS_D_RSV_QPS PF_CAPS_D_FIELD_LOC(179, 160)

View File

@ -394,6 +394,9 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
resp.config |= HNS_ROCE_RSP_CQE_INLINE_FLAGS;
}
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
resp.congest_type = hr_dev->caps.cong_cap;
ret = hns_roce_uar_alloc(hr_dev, &context->uar);
if (ret)
goto error_out;

View File

@ -32,6 +32,7 @@
*/
#include <linux/vmalloc.h>
#include <linux/count_zeros.h>
#include <rdma/ib_umem.h>
#include <linux/math.h>
#include "hns_roce_device.h"
@ -103,14 +104,21 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
buf_attr.user_access = mr->access;
/* fast MR's buffer is alloced before mapping, not at creation */
buf_attr.mtt_only = is_fast;
buf_attr.iova = mr->iova;
/* pagesize and hopnum is fixed for fast MR */
buf_attr.adaptive = !is_fast;
buf_attr.type = MTR_PBL;
err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr,
hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT,
udata, start);
if (err)
if (err) {
ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err);
else
mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count;
return err;
}
mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count;
mr->pbl_hop_num = buf_attr.region[0].hopnum;
return err;
}
@ -695,7 +703,7 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
mtr->umem = NULL;
mtr->kmem = hns_roce_buf_alloc(hr_dev, total_size,
buf_attr->page_shift,
mtr->hem_cfg.is_direct ?
!mtr_has_mtt(buf_attr) ?
HNS_ROCE_BUF_DIRECT : 0);
if (IS_ERR(mtr->kmem)) {
ibdev_err(ibdev, "failed to alloc kmem, ret = %ld.\n",
@ -707,14 +715,41 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
return 0;
}
static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
int page_count, unsigned int page_shift)
static int cal_mtr_pg_cnt(struct hns_roce_mtr *mtr)
{
struct hns_roce_buf_region *region;
int page_cnt = 0;
int i;
for (i = 0; i < mtr->hem_cfg.region_count; i++) {
region = &mtr->hem_cfg.region[i];
page_cnt += region->count;
}
return page_cnt;
}
static bool need_split_huge_page(struct hns_roce_mtr *mtr)
{
/* When HEM buffer uses 0-level addressing, the page size is
* equal to the whole buffer size. If the current MTR has multiple
* regions, we split the buffer into small pages(4k, required by hns
* ROCEE). These pages will be used in multiple regions.
*/
return mtr->hem_cfg.is_direct && mtr->hem_cfg.region_count > 1;
}
static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
int page_count = cal_mtr_pg_cnt(mtr);
unsigned int page_shift;
dma_addr_t *pages;
int npage;
int ret;
page_shift = need_split_huge_page(mtr) ? HNS_HW_PAGE_SHIFT :
mtr->hem_cfg.buf_pg_shift;
/* alloc a tmp array to store buffer's dma address */
pages = kvcalloc(page_count, sizeof(dma_addr_t), GFP_KERNEL);
if (!pages)
@ -734,7 +769,7 @@ static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
goto err_alloc_list;
}
if (mtr->hem_cfg.is_direct && npage > 1) {
if (need_split_huge_page(mtr) && npage > 1) {
ret = mtr_check_direct_pages(pages, npage, page_shift);
if (ret) {
ibdev_err(ibdev, "failed to check %s page: %d / %d.\n",
@ -809,47 +844,53 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
return ret;
}
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
static int hns_roce_get_direct_addr_mtt(struct hns_roce_hem_cfg *cfg,
u32 start_index, u64 *mtt_buf,
int mtt_cnt)
{
struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
int mtt_count, left;
u32 start_index;
int mtt_count;
int total = 0;
__le64 *mtts;
u32 npage;
u64 addr;
if (!mtt_buf || mtt_max < 1)
goto done;
if (mtt_cnt > cfg->region_count)
return -EINVAL;
/* no mtt memory in direct mode, so just return the buffer address */
if (cfg->is_direct) {
start_index = offset >> HNS_HW_PAGE_SHIFT;
for (mtt_count = 0; mtt_count < cfg->region_count &&
total < mtt_max; mtt_count++) {
npage = cfg->region[mtt_count].offset;
if (npage < start_index)
continue;
for (mtt_count = 0; mtt_count < cfg->region_count && total < mtt_cnt;
mtt_count++) {
npage = cfg->region[mtt_count].offset;
if (npage < start_index)
continue;
addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
mtt_buf[total] = addr;
addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
mtt_buf[total] = addr;
total++;
}
goto done;
total++;
}
start_index = offset >> cfg->buf_pg_shift;
left = mtt_max;
if (!total)
return -ENOENT;
return 0;
}
static int hns_roce_get_mhop_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_mtr *mtr, u32 start_index,
u64 *mtt_buf, int mtt_cnt)
{
int left = mtt_cnt;
int total = 0;
int mtt_count;
__le64 *mtts;
u32 npage;
while (left > 0) {
mtt_count = 0;
mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
start_index + total,
&mtt_count);
if (!mtts || !mtt_count)
goto done;
break;
npage = min(mtt_count, left);
left -= npage;
@ -857,69 +898,165 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
mtt_buf[total++] = le64_to_cpu(mtts[mtt_count]);
}
done:
if (base_addr)
*base_addr = cfg->root_ba;
if (!total)
return -ENOENT;
return total;
return 0;
}
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
u32 offset, u64 *mtt_buf, int mtt_max)
{
struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
u32 start_index;
int ret;
if (!mtt_buf || mtt_max < 1)
return -EINVAL;
/* no mtt memory in direct mode, so just return the buffer address */
if (cfg->is_direct) {
start_index = offset >> HNS_HW_PAGE_SHIFT;
ret = hns_roce_get_direct_addr_mtt(cfg, start_index,
mtt_buf, mtt_max);
} else {
start_index = offset >> cfg->buf_pg_shift;
ret = hns_roce_get_mhop_mtt(hr_dev, mtr, start_index,
mtt_buf, mtt_max);
}
return ret;
}
static int get_best_page_shift(struct hns_roce_dev *hr_dev,
struct hns_roce_mtr *mtr,
struct hns_roce_buf_attr *buf_attr)
{
unsigned int page_sz;
if (!buf_attr->adaptive || buf_attr->type != MTR_PBL || !mtr->umem)
return 0;
page_sz = ib_umem_find_best_pgsz(mtr->umem,
hr_dev->caps.page_size_cap,
buf_attr->iova);
if (!page_sz)
return -EINVAL;
buf_attr->page_shift = order_base_2(page_sz);
return 0;
}
static int get_best_hop_num(struct hns_roce_dev *hr_dev,
struct hns_roce_mtr *mtr,
struct hns_roce_buf_attr *buf_attr,
unsigned int ba_pg_shift)
{
#define INVALID_HOPNUM -1
#define MIN_BA_CNT 1
size_t buf_pg_sz = 1 << buf_attr->page_shift;
struct ib_device *ibdev = &hr_dev->ib_dev;
size_t ba_pg_sz = 1 << ba_pg_shift;
int hop_num = INVALID_HOPNUM;
size_t unit = MIN_BA_CNT;
size_t ba_cnt;
int j;
if (!buf_attr->adaptive || buf_attr->type != MTR_PBL)
return 0;
/* Caculating the number of buf pages, each buf page need a BA */
if (mtr->umem)
ba_cnt = ib_umem_num_dma_blocks(mtr->umem, buf_pg_sz);
else
ba_cnt = DIV_ROUND_UP(buf_attr->region[0].size, buf_pg_sz);
for (j = 0; j <= HNS_ROCE_MAX_HOP_NUM; j++) {
if (ba_cnt <= unit) {
hop_num = j;
break;
}
/* Number of BAs can be represented at per hop */
unit *= ba_pg_sz / BA_BYTE_LEN;
}
if (hop_num < 0) {
ibdev_err(ibdev,
"failed to calculate a valid hopnum.\n");
return -EINVAL;
}
buf_attr->region[0].hopnum = hop_num;
return 0;
}
static bool is_buf_attr_valid(struct hns_roce_dev *hr_dev,
struct hns_roce_buf_attr *attr)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
if (attr->region_count > ARRAY_SIZE(attr->region) ||
attr->region_count < 1 || attr->page_shift < HNS_HW_PAGE_SHIFT) {
ibdev_err(ibdev,
"invalid buf attr, region count %d, page shift %u.\n",
attr->region_count, attr->page_shift);
return false;
}
return true;
}
static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
struct hns_roce_buf_attr *attr,
struct hns_roce_hem_cfg *cfg,
unsigned int *buf_page_shift, u64 unalinged_size)
struct hns_roce_mtr *mtr,
struct hns_roce_buf_attr *attr)
{
struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
struct hns_roce_buf_region *r;
u64 first_region_padding;
int page_cnt, region_cnt;
unsigned int page_shift;
size_t buf_pg_sz;
size_t buf_size;
int page_cnt, i;
u64 pgoff = 0;
if (!is_buf_attr_valid(hr_dev, attr))
return -EINVAL;
/* If mtt is disabled, all pages must be within a continuous range */
cfg->is_direct = !mtr_has_mtt(attr);
cfg->region_count = attr->region_count;
buf_size = mtr_bufs_size(attr);
if (cfg->is_direct) {
/* When HEM buffer uses 0-level addressing, the page size is
* equal to the whole buffer size, and we split the buffer into
* small pages which is used to check whether the adjacent
* units are in the continuous space and its size is fixed to
* 4K based on hns ROCEE's requirement.
*/
page_shift = HNS_HW_PAGE_SHIFT;
/* The ROCEE requires the page size to be 4K * 2 ^ N. */
if (need_split_huge_page(mtr)) {
buf_pg_sz = HNS_HW_PAGE_SIZE;
cfg->buf_pg_count = 1;
/* The ROCEE requires the page size to be 4K * 2 ^ N. */
cfg->buf_pg_shift = HNS_HW_PAGE_SHIFT +
order_base_2(DIV_ROUND_UP(buf_size, HNS_HW_PAGE_SIZE));
first_region_padding = 0;
} else {
page_shift = attr->page_shift;
cfg->buf_pg_count = DIV_ROUND_UP(buf_size + unalinged_size,
1 << page_shift);
cfg->buf_pg_shift = page_shift;
first_region_padding = unalinged_size;
buf_pg_sz = 1 << attr->page_shift;
cfg->buf_pg_count = mtr->umem ?
ib_umem_num_dma_blocks(mtr->umem, buf_pg_sz) :
DIV_ROUND_UP(buf_size, buf_pg_sz);
cfg->buf_pg_shift = attr->page_shift;
pgoff = mtr->umem ? mtr->umem->address & ~PAGE_MASK : 0;
}
/* Convert buffer size to page index and page count for each region and
* the buffer's offset needs to be appended to the first region.
*/
for (page_cnt = 0, region_cnt = 0; region_cnt < attr->region_count &&
region_cnt < ARRAY_SIZE(cfg->region); region_cnt++) {
r = &cfg->region[region_cnt];
for (page_cnt = 0, i = 0; i < attr->region_count; i++) {
r = &cfg->region[i];
r->offset = page_cnt;
buf_size = hr_hw_page_align(attr->region[region_cnt].size +
first_region_padding);
r->count = DIV_ROUND_UP(buf_size, 1 << page_shift);
first_region_padding = 0;
buf_size = hr_hw_page_align(attr->region[i].size + pgoff);
if (attr->type == MTR_PBL && mtr->umem)
r->count = ib_umem_num_dma_blocks(mtr->umem, buf_pg_sz);
else
r->count = DIV_ROUND_UP(buf_size, buf_pg_sz);
pgoff = 0;
page_cnt += r->count;
r->hopnum = to_hr_hem_hopnum(attr->region[region_cnt].hopnum,
r->count);
r->hopnum = to_hr_hem_hopnum(attr->region[i].hopnum, r->count);
}
cfg->region_count = region_cnt;
*buf_page_shift = page_shift;
return page_cnt;
return 0;
}
static u64 cal_pages_per_l1ba(unsigned int ba_per_bt, unsigned int hopnum)
@ -1007,50 +1144,58 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
unsigned long user_addr)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
unsigned int buf_page_shift = 0;
int buf_page_cnt;
int ret;
buf_page_cnt = mtr_init_buf_cfg(hr_dev, buf_attr, &mtr->hem_cfg,
&buf_page_shift,
udata ? user_addr & ~PAGE_MASK : 0);
if (buf_page_cnt < 1 || buf_page_shift < HNS_HW_PAGE_SHIFT) {
ibdev_err(ibdev, "failed to init mtr cfg, count %d shift %u.\n",
buf_page_cnt, buf_page_shift);
return -EINVAL;
}
ret = mtr_alloc_mtt(hr_dev, mtr, ba_page_shift);
if (ret) {
ibdev_err(ibdev, "failed to alloc mtr mtt, ret = %d.\n", ret);
return ret;
}
/* The caller has its own buffer list and invokes the hns_roce_mtr_map()
* to finish the MTT configuration.
*/
if (buf_attr->mtt_only) {
mtr->umem = NULL;
mtr->kmem = NULL;
return 0;
} else {
ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, udata, user_addr);
if (ret) {
ibdev_err(ibdev,
"failed to alloc mtr bufs, ret = %d.\n", ret);
return ret;
}
ret = get_best_page_shift(hr_dev, mtr, buf_attr);
if (ret)
goto err_init_buf;
ret = get_best_hop_num(hr_dev, mtr, buf_attr, ba_page_shift);
if (ret)
goto err_init_buf;
}
ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, udata, user_addr);
ret = mtr_init_buf_cfg(hr_dev, mtr, buf_attr);
if (ret)
goto err_init_buf;
ret = mtr_alloc_mtt(hr_dev, mtr, ba_page_shift);
if (ret) {
ibdev_err(ibdev, "failed to alloc mtr bufs, ret = %d.\n", ret);
ibdev_err(ibdev, "failed to alloc mtr mtt, ret = %d.\n", ret);
goto err_init_buf;
}
if (buf_attr->mtt_only)
return 0;
/* Write buffer's dma address to MTT */
ret = mtr_map_bufs(hr_dev, mtr);
if (ret) {
ibdev_err(ibdev, "failed to map mtr bufs, ret = %d.\n", ret);
goto err_alloc_mtt;
}
/* Write buffer's dma address to MTT */
ret = mtr_map_bufs(hr_dev, mtr, buf_page_cnt, buf_page_shift);
if (ret)
ibdev_err(ibdev, "failed to map mtr bufs, ret = %d.\n", ret);
else
return 0;
return 0;
mtr_free_bufs(hr_dev, mtr);
err_alloc_mtt:
mtr_free_mtt(hr_dev, mtr);
err_init_buf:
mtr_free_bufs(hr_dev, mtr);
return ret;
}

View File

@ -1004,6 +1004,60 @@ static void free_kernel_wrid(struct hns_roce_qp *hr_qp)
kfree(hr_qp->sq.wrid);
}
static void default_congest_type(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp)
{
if (hr_qp->ibqp.qp_type == IB_QPT_UD ||
hr_qp->ibqp.qp_type == IB_QPT_GSI)
hr_qp->cong_type = CONG_TYPE_DCQCN;
else
hr_qp->cong_type = hr_dev->caps.default_cong_type;
}
static int set_congest_type(struct hns_roce_qp *hr_qp,
struct hns_roce_ib_create_qp *ucmd)
{
struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
switch (ucmd->cong_type_flags) {
case HNS_ROCE_CREATE_QP_FLAGS_DCQCN:
hr_qp->cong_type = CONG_TYPE_DCQCN;
break;
case HNS_ROCE_CREATE_QP_FLAGS_LDCP:
hr_qp->cong_type = CONG_TYPE_LDCP;
break;
case HNS_ROCE_CREATE_QP_FLAGS_HC3:
hr_qp->cong_type = CONG_TYPE_HC3;
break;
case HNS_ROCE_CREATE_QP_FLAGS_DIP:
hr_qp->cong_type = CONG_TYPE_DIP;
break;
default:
return -EINVAL;
}
if (!test_bit(hr_qp->cong_type, (unsigned long *)&hr_dev->caps.cong_cap))
return -EOPNOTSUPP;
if (hr_qp->ibqp.qp_type == IB_QPT_UD &&
hr_qp->cong_type != CONG_TYPE_DCQCN)
return -EOPNOTSUPP;
return 0;
}
static int set_congest_param(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp,
struct hns_roce_ib_create_qp *ucmd)
{
if (ucmd->comp_mask & HNS_ROCE_CREATE_QP_MASK_CONGEST_TYPE)
return set_congest_type(hr_qp, ucmd);
default_congest_type(hr_dev, hr_qp);
return 0;
}
static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata,
@ -1043,6 +1097,10 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
ibdev_err(ibdev,
"failed to set user SQ size, ret = %d.\n",
ret);
ret = set_congest_param(hr_dev, hr_qp, ucmd);
if (ret)
return ret;
} else {
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
hr_qp->config = HNS_ROCE_EXSGE_FLAGS;
@ -1051,6 +1109,8 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
ibdev_err(ibdev,
"failed to set kernel SQ size, ret = %d.\n",
ret);
default_congest_type(hr_dev, hr_qp);
}
return ret;

View File

@ -97,16 +97,33 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
struct hns_roce_v2_qp_context context;
struct hns_roce_full_qp_ctx {
struct hns_roce_v2_qp_context qpc;
struct hns_roce_v2_scc_context sccc;
} context = {};
int ret;
if (!hr_dev->hw->query_qpc)
return -EINVAL;
ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context);
ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context.qpc);
if (ret)
return -EINVAL;
return ret;
/* If SCC is disabled or the query fails, the queried SCCC will
* be all 0.
*/
if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) ||
!hr_dev->hw->query_sccc)
goto out;
ret = hr_dev->hw->query_sccc(hr_dev, hr_qp->qpn, &context.sccc);
if (ret)
ibdev_warn_ratelimited(&hr_dev->ib_dev,
"failed to query SCCC, ret = %d.\n",
ret);
out:
ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
return ret;

View File

@ -719,7 +719,6 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
ukinfo->qp_id = iwqp->ibqp.qp_num;
iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
@ -944,7 +943,7 @@ static int irdma_create_qp(struct ib_qp *ibqp,
iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
init_info.pd = &iwpd->sc_pd;
init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
init_info.qp_uk_init_info.qp_id = qp_num;
if (!rdma_protocol_roce(&iwdev->ibdev, 1))
init_info.qp_uk_init_info.first_sq_wq = 1;
iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;

View File

@ -16,7 +16,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
gc = mdev->gdma_dev->gdma_context;
gc = mdev_to_gc(mdev);
if (udata->inlen < sizeof(ucmd))
return -EINVAL;
@ -48,7 +48,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
return err;
}
err = mana_ib_gd_create_dma_region(mdev, cq->umem, &cq->gdma_region);
err = mana_ib_create_zero_offset_dma_region(mdev, cq->umem, &cq->gdma_region);
if (err) {
ibdev_dbg(ibdev,
"Failed to create dma region for create cq, %d\n",
@ -57,7 +57,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
}
ibdev_dbg(ibdev,
"mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
"create_dma_region ret %d gdma_region 0x%llx\n",
err, cq->gdma_region);
/*
@ -81,7 +81,7 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
gc = mdev->gdma_dev->gdma_context;
gc = mdev_to_gc(mdev);
err = mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region);
if (err) {
@ -100,10 +100,29 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
return 0;
}
void mana_ib_cq_handler(void *ctx, struct gdma_queue *gdma_cq)
static void mana_ib_cq_handler(void *ctx, struct gdma_queue *gdma_cq)
{
struct mana_ib_cq *cq = ctx;
if (cq->ibcq.comp_handler)
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
{
struct gdma_context *gc = mdev_to_gc(mdev);
struct gdma_queue *gdma_cq;
/* Create CQ table entry */
WARN_ON(gc->cq_table[cq->id]);
gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
if (!gdma_cq)
return -ENOMEM;
gdma_cq->cq.context = cq;
gdma_cq->type = GDMA_CQ;
gdma_cq->cq.callback = mana_ib_cq_handler;
gdma_cq->id = cq->id;
gc->cq_table[cq->id] = gdma_cq;
return 0;
}

View File

@ -8,13 +8,10 @@
void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
u32 port)
{
struct gdma_dev *gd = &dev->gdma_dev->gdma_context->mana;
struct mana_port_context *mpc;
struct net_device *ndev;
struct mana_context *mc;
mc = gd->driver_data;
ndev = mc->ports[port];
ndev = mana_ib_get_netdev(&dev->ib_dev, port);
mpc = netdev_priv(ndev);
mutex_lock(&pd->vport_mutex);
@ -31,14 +28,11 @@ void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port, struct mana_ib_pd *pd,
u32 doorbell_id)
{
struct gdma_dev *mdev = &dev->gdma_dev->gdma_context->mana;
struct mana_port_context *mpc;
struct mana_context *mc;
struct net_device *ndev;
int err;
mc = mdev->driver_data;
ndev = mc->ports[port];
ndev = mana_ib_get_netdev(&dev->ib_dev, port);
mpc = netdev_priv(ndev);
mutex_lock(&pd->vport_mutex);
@ -79,17 +73,17 @@ int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
struct gdma_create_pd_req req = {};
enum gdma_pd_flags flags = 0;
struct mana_ib_dev *dev;
struct gdma_dev *mdev;
struct gdma_context *gc;
int err;
dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
mdev = dev->gdma_dev;
gc = mdev_to_gc(dev);
mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_PD, sizeof(req),
sizeof(resp));
req.flags = flags;
err = mana_gd_send_request(mdev->gdma_context, sizeof(req), &req,
err = mana_gd_send_request(gc, sizeof(req), &req,
sizeof(resp), &resp);
if (err || resp.hdr.status) {
@ -119,17 +113,17 @@ int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
struct gdma_destory_pd_resp resp = {};
struct gdma_destroy_pd_req req = {};
struct mana_ib_dev *dev;
struct gdma_dev *mdev;
struct gdma_context *gc;
int err;
dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
mdev = dev->gdma_dev;
gc = mdev_to_gc(dev);
mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_PD, sizeof(req),
sizeof(resp));
req.pd_handle = pd->pd_handle;
err = mana_gd_send_request(mdev->gdma_context, sizeof(req), &req,
err = mana_gd_send_request(gc, sizeof(req), &req,
sizeof(resp), &resp);
if (err || resp.hdr.status) {
@ -206,13 +200,11 @@ int mana_ib_alloc_ucontext(struct ib_ucontext *ibcontext,
struct ib_device *ibdev = ibcontext->device;
struct mana_ib_dev *mdev;
struct gdma_context *gc;
struct gdma_dev *dev;
int doorbell_page;
int ret;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
dev = mdev->gdma_dev;
gc = dev->gdma_context;
gc = mdev_to_gc(mdev);
/* Allocate a doorbell page index */
ret = mana_gd_allocate_doorbell_page(gc, &doorbell_page);
@ -238,7 +230,7 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
int ret;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
gc = mdev->gdma_dev->gdma_context;
gc = mdev_to_gc(mdev);
ret = mana_gd_destroy_doorbell_page(gc, mana_ucontext->doorbell);
if (ret)
@ -309,8 +301,8 @@ mana_ib_gd_add_dma_region(struct mana_ib_dev *dev, struct gdma_context *gc,
return 0;
}
int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
mana_handle_t *gdma_region)
static int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
mana_handle_t *gdma_region, unsigned long page_sz)
{
struct gdma_dma_region_add_pages_req *add_req = NULL;
size_t num_pages_processed = 0, num_pages_to_handle;
@ -322,23 +314,14 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
size_t max_pgs_create_cmd;
struct gdma_context *gc;
size_t num_pages_total;
struct gdma_dev *mdev;
unsigned long page_sz;
unsigned int tail = 0;
u64 *page_addr_list;
void *request_buf;
int err;
mdev = dev->gdma_dev;
gc = mdev->gdma_context;
gc = mdev_to_gc(dev);
hwc = gc->hwc.driver_data;
/* Hardware requires dma region to align to chosen page size */
page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, 0);
if (!page_sz) {
ibdev_dbg(&dev->ib_dev, "failed to find page size.\n");
return -ENOMEM;
}
num_pages_total = ib_umem_num_dma_blocks(umem, page_sz);
max_pgs_create_cmd =
@ -358,7 +341,7 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
sizeof(struct gdma_create_dma_region_resp));
create_req->length = umem->length;
create_req->offset_in_page = umem->address & (page_sz - 1);
create_req->offset_in_page = ib_umem_dma_offset(umem, page_sz);
create_req->gdma_page_type = order_base_2(page_sz) - PAGE_SHIFT;
create_req->page_count = num_pages_total;
@ -424,12 +407,39 @@ out:
return err;
}
int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
mana_handle_t *gdma_region, u64 virt)
{
unsigned long page_sz;
page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, virt);
if (!page_sz) {
ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
return -EINVAL;
}
return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz);
}
int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
mana_handle_t *gdma_region)
{
unsigned long page_sz;
/* Hardware requires dma region to align to chosen page size */
page_sz = ib_umem_find_best_pgoff(umem, PAGE_SZ_BM, 0);
if (!page_sz) {
ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
return -EINVAL;
}
return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz);
}
int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, u64 gdma_region)
{
struct gdma_dev *mdev = dev->gdma_dev;
struct gdma_context *gc;
struct gdma_context *gc = mdev_to_gc(dev);
gc = mdev->gdma_context;
ibdev_dbg(&dev->ib_dev, "destroy dma region 0x%llx\n", gdma_region);
return mana_gd_destroy_dma_region(gc, gdma_region);
@ -447,7 +457,7 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
int ret;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
gc = mdev->gdma_dev->gdma_context;
gc = mdev_to_gc(mdev);
if (vma->vm_pgoff != 0) {
ibdev_dbg(ibdev, "Unexpected vm_pgoff %lu\n", vma->vm_pgoff);
@ -531,7 +541,7 @@ int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
req.hdr.resp.msg_version = GDMA_MESSAGE_V3;
req.hdr.dev_id = dev->gdma_dev->dev_id;
err = mana_gd_send_request(dev->gdma_dev->gdma_context, sizeof(req),
err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req),
&req, sizeof(resp), &resp);
if (err) {

View File

@ -142,8 +142,29 @@ struct mana_ib_query_adapter_caps_resp {
u32 max_inline_data_size;
}; /* HW Data */
int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
mana_handle_t *gdma_region);
static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev)
{
return mdev->gdma_dev->gdma_context;
}
static inline struct net_device *mana_ib_get_netdev(struct ib_device *ibdev, u32 port)
{
struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
struct gdma_context *gc = mdev_to_gc(mdev);
struct mana_context *mc = gc->mana.driver_data;
if (port < 1 || port > mc->num_ports)
return NULL;
return mc->ports[port - 1];
}
int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq);
int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
mana_handle_t *gdma_region);
int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
mana_handle_t *gdma_region, u64 virt);
int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
mana_handle_t gdma_region);
@ -210,6 +231,4 @@ int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext);
int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *mdev);
void mana_ib_cq_handler(void *ctx, struct gdma_queue *gdma_cq);
#endif

View File

@ -30,12 +30,9 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
{
struct gdma_create_mr_response resp = {};
struct gdma_create_mr_request req = {};
struct gdma_dev *mdev = dev->gdma_dev;
struct gdma_context *gc;
struct gdma_context *gc = mdev_to_gc(dev);
int err;
gc = mdev->gdma_context;
mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req),
sizeof(resp));
req.pd_handle = mr_params->pd_handle;
@ -77,12 +74,9 @@ static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle)
{
struct gdma_destroy_mr_response resp = {};
struct gdma_destroy_mr_request req = {};
struct gdma_dev *mdev = dev->gdma_dev;
struct gdma_context *gc;
struct gdma_context *gc = mdev_to_gc(dev);
int err;
gc = mdev->gdma_context;
mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_MR, sizeof(req),
sizeof(resp));
@ -133,7 +127,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
goto err_free;
}
err = mana_ib_gd_create_dma_region(dev, mr->umem, &dma_region_handle);
err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
if (err) {
ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
err);
@ -141,7 +135,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
}
ibdev_dbg(ibdev,
"mana_ib_gd_create_dma_region ret %d gdma_region %llx\n", err,
"create_dma_region ret %d gdma_region %llx\n", err,
dma_region_handle);
mr_params.pd_handle = pd->pd_handle;
@ -164,8 +158,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
return &mr->ibmr;
err_dma_region:
mana_gd_destroy_dma_region(dev->gdma_dev->gdma_context,
dma_region_handle);
mana_gd_destroy_dma_region(mdev_to_gc(dev), dma_region_handle);
err_umem:
ib_umem_release(mr->umem);

View File

@ -17,12 +17,10 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
struct mana_cfg_rx_steer_resp resp = {};
mana_handle_t *req_indir_tab;
struct gdma_context *gc;
struct gdma_dev *mdev;
u32 req_buf_size;
int i, err;
gc = dev->gdma_dev->gdma_context;
mdev = &gc->mana;
gc = mdev_to_gc(dev);
req_buf_size =
sizeof(*req) + sizeof(mana_handle_t) * MANA_INDIRECT_TABLE_SIZE;
@ -39,7 +37,7 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
req->rx_enable = 1;
req->update_default_rxobj = 1;
req->default_rxobj = default_rxobj;
req->hdr.dev_id = mdev->dev_id;
req->hdr.dev_id = gc->mana.dev_id;
/* If there are more than 1 entries in indirection table, enable RSS */
if (log_ind_tbl_size)
@ -99,20 +97,17 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
struct mana_ib_dev *mdev =
container_of(pd->device, struct mana_ib_dev, ib_dev);
struct gdma_context *gc = mdev_to_gc(mdev);
struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
struct mana_ib_create_qp_rss_resp resp = {};
struct mana_ib_create_qp_rss ucmd = {};
struct gdma_queue **gdma_cq_allocated;
mana_handle_t *mana_ind_table;
struct mana_port_context *mpc;
struct gdma_queue *gdma_cq;
unsigned int ind_tbl_size;
struct mana_context *mc;
struct net_device *ndev;
struct gdma_context *gc;
struct mana_ib_cq *cq;
struct mana_ib_wq *wq;
struct gdma_dev *gd;
struct mana_eq *eq;
struct ib_cq *ibcq;
struct ib_wq *ibwq;
@ -120,10 +115,6 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
u32 port;
int ret;
gc = mdev->gdma_dev->gdma_context;
gd = &gc->mana;
mc = gd->driver_data;
if (!udata || udata->inlen < sizeof(ucmd))
return -EINVAL;
@ -166,12 +157,12 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
/* IB ports start with 1, MANA start with 0 */
port = ucmd.port;
if (port < 1 || port > mc->num_ports) {
ndev = mana_ib_get_netdev(pd->device, port);
if (!ndev) {
ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
port);
return -EINVAL;
}
ndev = mc->ports[port - 1];
mpc = netdev_priv(ndev);
ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n",
@ -209,7 +200,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
cq_spec.gdma_region = cq->gdma_region;
cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
cq_spec.modr_ctx_id = 0;
eq = &mc->eqs[cq->comp_vector % gc->max_num_queues];
eq = &mpc->ac->eqs[cq->comp_vector % gc->max_num_queues];
cq_spec.attached_eq = eq->eq->id;
ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ,
@ -237,19 +228,11 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
mana_ind_table[i] = wq->rx_object;
/* Create CQ table entry */
WARN_ON(gc->cq_table[cq->id]);
gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
if (!gdma_cq) {
ret = -ENOMEM;
ret = mana_ib_install_cq_cb(mdev, cq);
if (ret)
goto fail;
}
gdma_cq_allocated[i] = gdma_cq;
gdma_cq->cq.context = cq;
gdma_cq->type = GDMA_CQ;
gdma_cq->cq.callback = mana_ib_cq_handler;
gdma_cq->id = cq->id;
gc->cq_table[cq->id] = gdma_cq;
gdma_cq_allocated[i] = gc->cq_table[cq->id];
}
resp.num_entries = i;
@ -306,14 +289,13 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
struct mana_ib_ucontext *mana_ucontext =
rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
ibucontext);
struct gdma_dev *gd = &mdev->gdma_dev->gdma_context->mana;
struct gdma_context *gc = mdev_to_gc(mdev);
struct mana_ib_create_qp_resp resp = {};
struct mana_ib_create_qp ucmd = {};
struct gdma_queue *gdma_cq = NULL;
struct mana_obj_spec wq_spec = {};
struct mana_obj_spec cq_spec = {};
struct mana_port_context *mpc;
struct mana_context *mc;
struct net_device *ndev;
struct ib_umem *umem;
struct mana_eq *eq;
@ -321,8 +303,6 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
u32 port;
int err;
mc = gd->driver_data;
if (!mana_ucontext || udata->inlen < sizeof(ucmd))
return -EINVAL;
@ -333,11 +313,6 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
return err;
}
/* IB ports start with 1, MANA Ethernet ports start with 0 */
port = ucmd.port;
if (port < 1 || port > mc->num_ports)
return -EINVAL;
if (attr->cap.max_send_wr > mdev->adapter_caps.max_qp_wr) {
ibdev_dbg(&mdev->ib_dev,
"Requested max_send_wr %d exceeding limit\n",
@ -352,11 +327,17 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
return -EINVAL;
}
ndev = mc->ports[port - 1];
port = ucmd.port;
ndev = mana_ib_get_netdev(ibpd->device, port);
if (!ndev) {
ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
port);
return -EINVAL;
}
mpc = netdev_priv(ndev);
ibdev_dbg(&mdev->ib_dev, "port %u ndev %p mpc %p\n", port, ndev, mpc);
err = mana_ib_cfg_vport(mdev, port - 1, pd, mana_ucontext->doorbell);
err = mana_ib_cfg_vport(mdev, port, pd, mana_ucontext->doorbell);
if (err)
return -ENODEV;
@ -376,8 +357,8 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
}
qp->sq_umem = umem;
err = mana_ib_gd_create_dma_region(mdev, qp->sq_umem,
&qp->sq_gdma_region);
err = mana_ib_create_zero_offset_dma_region(mdev, qp->sq_umem,
&qp->sq_gdma_region);
if (err) {
ibdev_dbg(&mdev->ib_dev,
"Failed to create dma region for create qp-raw, %d\n",
@ -386,7 +367,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
}
ibdev_dbg(&mdev->ib_dev,
"mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
"create_dma_region ret %d gdma_region 0x%llx\n",
err, qp->sq_gdma_region);
/* Create a WQ on the same port handle used by the Ethernet */
@ -396,8 +377,8 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
cq_spec.gdma_region = send_cq->gdma_region;
cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
cq_spec.modr_ctx_id = 0;
eq_vec = send_cq->comp_vector % gd->gdma_context->max_num_queues;
eq = &mc->eqs[eq_vec];
eq_vec = send_cq->comp_vector % gc->max_num_queues;
eq = &mpc->ac->eqs[eq_vec];
cq_spec.attached_eq = eq->eq->id;
err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec,
@ -417,18 +398,9 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
send_cq->id = cq_spec.queue_index;
/* Create CQ table entry */
WARN_ON(gd->gdma_context->cq_table[send_cq->id]);
gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
if (!gdma_cq) {
err = -ENOMEM;
err = mana_ib_install_cq_cb(mdev, send_cq);
if (err)
goto err_destroy_wq_obj;
}
gdma_cq->cq.context = send_cq;
gdma_cq->type = GDMA_CQ;
gdma_cq->cq.callback = mana_ib_cq_handler;
gdma_cq->id = send_cq->id;
gd->gdma_context->cq_table[send_cq->id] = gdma_cq;
ibdev_dbg(&mdev->ib_dev,
"ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err,
@ -450,7 +422,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
err_release_gdma_cq:
kfree(gdma_cq);
gd->gdma_context->cq_table[send_cq->id] = NULL;
gc->cq_table[send_cq->id] = NULL;
err_destroy_wq_obj:
mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
@ -462,7 +434,7 @@ err_release_umem:
ib_umem_release(umem);
err_free_vport:
mana_ib_uncfg_vport(mdev, pd, port - 1);
mana_ib_uncfg_vport(mdev, pd, port);
return err;
}
@ -500,16 +472,13 @@ static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp,
{
struct mana_ib_dev *mdev =
container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
struct gdma_dev *gd = &mdev->gdma_dev->gdma_context->mana;
struct mana_port_context *mpc;
struct mana_context *mc;
struct net_device *ndev;
struct mana_ib_wq *wq;
struct ib_wq *ibwq;
int i;
mc = gd->driver_data;
ndev = mc->ports[qp->port - 1];
ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
mpc = netdev_priv(ndev);
for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
@ -527,15 +496,12 @@ static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
{
struct mana_ib_dev *mdev =
container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
struct gdma_dev *gd = &mdev->gdma_dev->gdma_context->mana;
struct ib_pd *ibpd = qp->ibqp.pd;
struct mana_port_context *mpc;
struct mana_context *mc;
struct net_device *ndev;
struct mana_ib_pd *pd;
mc = gd->driver_data;
ndev = mc->ports[qp->port - 1];
ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
mpc = netdev_priv(ndev);
pd = container_of(ibpd, struct mana_ib_pd, ibpd);
@ -546,7 +512,7 @@ static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
ib_umem_release(qp->sq_umem);
}
mana_ib_uncfg_vport(mdev, pd, qp->port - 1);
mana_ib_uncfg_vport(mdev, pd, qp->port);
return 0;
}

View File

@ -46,7 +46,7 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
wq->wq_buf_size = ucmd.wq_buf_size;
wq->rx_object = INVALID_MANA_HANDLE;
err = mana_ib_gd_create_dma_region(mdev, wq->umem, &wq->gdma_region);
err = mana_ib_create_zero_offset_dma_region(mdev, wq->umem, &wq->gdma_region);
if (err) {
ibdev_dbg(&mdev->ib_dev,
"Failed to create dma region for create wq, %d\n",
@ -55,7 +55,7 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
}
ibdev_dbg(&mdev->ib_dev,
"mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
"create_dma_region ret %d gdma_region 0x%llx\n",
err, wq->gdma_region);
/* WQ ID is returned at wq_create time, doesn't know the value yet */

View File

@ -1377,7 +1377,6 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props);
void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
u64 access_flags);
void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev);
void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev);

View File

@ -160,8 +160,6 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
port->attr.active_mtu = mtu;
port->mtu_cap = ib_mtu_enum_to_int(mtu);
rxe_info_dev(rxe, "Set mtu to %d", port->mtu_cap);
}
/* called by ifc layer to create new rxe device.
@ -181,7 +179,7 @@ static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
int err = 0;
if (is_vlan_dev(ndev)) {
rxe_err("rxe creation allowed on top of a real device only");
rxe_err("rxe creation allowed on top of a real device only\n");
err = -EPERM;
goto err;
}
@ -189,7 +187,7 @@ static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
rxe = rxe_get_dev_from_net(ndev);
if (rxe) {
ib_device_put(&rxe->ib_dev);
rxe_err_dev(rxe, "already configured on %s", ndev->name);
rxe_err_dev(rxe, "already configured on %s\n", ndev->name);
err = -EEXIST;
goto err;
}

View File

@ -38,7 +38,7 @@
#define RXE_ROCE_V2_SPORT (0xc000)
#define rxe_dbg(fmt, ...) pr_debug("%s: " fmt "\n", __func__, ##__VA_ARGS__)
#define rxe_dbg(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
#define rxe_dbg_dev(rxe, fmt, ...) ibdev_dbg(&(rxe)->ib_dev, \
"%s: " fmt, __func__, ##__VA_ARGS__)
#define rxe_dbg_uc(uc, fmt, ...) ibdev_dbg((uc)->ibuc.device, \
@ -58,7 +58,7 @@
#define rxe_dbg_mw(mw, fmt, ...) ibdev_dbg((mw)->ibmw.device, \
"mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
#define rxe_err(fmt, ...) pr_err_ratelimited("%s: " fmt "\n", __func__, \
#define rxe_err(fmt, ...) pr_err_ratelimited("%s: " fmt, __func__, \
##__VA_ARGS__)
#define rxe_err_dev(rxe, fmt, ...) ibdev_err_ratelimited(&(rxe)->ib_dev, \
"%s: " fmt, __func__, ##__VA_ARGS__)
@ -79,7 +79,7 @@
#define rxe_err_mw(mw, fmt, ...) ibdev_err_ratelimited((mw)->ibmw.device, \
"mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
#define rxe_info(fmt, ...) pr_info_ratelimited("%s: " fmt "\n", __func__, \
#define rxe_info(fmt, ...) pr_info_ratelimited("%s: " fmt, __func__, \
##__VA_ARGS__)
#define rxe_info_dev(rxe, fmt, ...) ibdev_info_ratelimited(&(rxe)->ib_dev, \
"%s: " fmt, __func__, ##__VA_ARGS__)

View File

@ -433,7 +433,7 @@ static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
}
} else {
if (wqe->status != IB_WC_WR_FLUSH_ERR)
rxe_err_qp(qp, "non-flush error status = %d",
rxe_err_qp(qp, "non-flush error status = %d\n",
wqe->status);
}
}
@ -582,7 +582,7 @@ static int flush_send_wqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
err = rxe_cq_post(qp->scq, &cqe, 0);
if (err)
rxe_dbg_cq(qp->scq, "post cq failed, err = %d", err);
rxe_dbg_cq(qp->scq, "post cq failed, err = %d\n", err);
return err;
}

View File

@ -27,7 +27,7 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
if (cq) {
count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT);
if (cqe < count) {
rxe_dbg_cq(cq, "cqe(%d) < current # elements in queue (%d)",
rxe_dbg_cq(cq, "cqe(%d) < current # elements in queue (%d)\n",
cqe, count);
goto err1;
}
@ -96,7 +96,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT);
if (unlikely(full)) {
rxe_err_cq(cq, "queue full");
rxe_err_cq(cq, "queue full\n");
spin_unlock_irqrestore(&cq->cq_lock, flags);
if (cq->ibcq.event_handler) {
ev.device = cq->ibcq.device;

View File

@ -59,7 +59,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
/* rxe_mr.c */
u8 rxe_get_next_key(u32 last_key);
void rxe_mr_init_dma(int access, struct rxe_mr *mr);
int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
int access, struct rxe_mr *mr);
int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr);
int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length);

View File

@ -34,7 +34,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
case IB_MR_TYPE_MEM_REG:
if (iova < mr->ibmr.iova ||
iova + length > mr->ibmr.iova + mr->ibmr.length) {
rxe_dbg_mr(mr, "iova/length out of range");
rxe_dbg_mr(mr, "iova/length out of range\n");
return -EINVAL;
}
return 0;
@ -126,7 +126,7 @@ static int rxe_mr_fill_pages_from_sgt(struct rxe_mr *mr, struct sg_table *sgt)
return xas_error(&xas);
}
int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
int access, struct rxe_mr *mr)
{
struct ib_umem *umem;
@ -319,7 +319,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
err = mr_check_range(mr, iova, length);
if (unlikely(err)) {
rxe_dbg_mr(mr, "iova out of range");
rxe_dbg_mr(mr, "iova out of range\n");
return err;
}
@ -477,7 +477,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
u64 *va;
if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
rxe_dbg_mr(mr, "mr not in valid state");
rxe_dbg_mr(mr, "mr not in valid state\n");
return RESPST_ERR_RKEY_VIOLATION;
}
@ -490,7 +490,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
err = mr_check_range(mr, iova, sizeof(value));
if (err) {
rxe_dbg_mr(mr, "iova out of range");
rxe_dbg_mr(mr, "iova out of range\n");
return RESPST_ERR_RKEY_VIOLATION;
}
page_offset = rxe_mr_iova_to_page_offset(mr, iova);
@ -501,7 +501,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
}
if (unlikely(page_offset & 0x7)) {
rxe_dbg_mr(mr, "iova not aligned");
rxe_dbg_mr(mr, "iova not aligned\n");
return RESPST_ERR_MISALIGNED_ATOMIC;
}
@ -534,7 +534,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
/* See IBA oA19-28 */
if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
rxe_dbg_mr(mr, "mr not in valid state");
rxe_dbg_mr(mr, "mr not in valid state\n");
return RESPST_ERR_RKEY_VIOLATION;
}
@ -548,7 +548,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
/* See IBA oA19-28 */
err = mr_check_range(mr, iova, sizeof(value));
if (unlikely(err)) {
rxe_dbg_mr(mr, "iova out of range");
rxe_dbg_mr(mr, "iova out of range\n");
return RESPST_ERR_RKEY_VIOLATION;
}
page_offset = rxe_mr_iova_to_page_offset(mr, iova);
@ -560,7 +560,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
/* See IBA A19.4.2 */
if (unlikely(page_offset & 0x7)) {
rxe_dbg_mr(mr, "misaligned address");
rxe_dbg_mr(mr, "misaligned address\n");
return RESPST_ERR_MISALIGNED_ATOMIC;
}

View File

@ -198,7 +198,7 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
}
if (access & ~RXE_ACCESS_SUPPORTED_MW) {
rxe_err_mw(mw, "access %#x not supported", access);
rxe_err_mw(mw, "access %#x not supported\n", access);
ret = -EOPNOTSUPP;
goto err_drop_mr;
}

View File

@ -201,7 +201,7 @@ static int rxe_init_sq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size,
QUEUE_TYPE_FROM_CLIENT);
if (!qp->sq.queue) {
rxe_err_qp(qp, "Unable to allocate send queue");
rxe_err_qp(qp, "Unable to allocate send queue\n");
err = -ENOMEM;
goto err_out;
}
@ -211,7 +211,7 @@ static int rxe_init_sq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
qp->sq.queue->buf, qp->sq.queue->buf_size,
&qp->sq.queue->ip);
if (err) {
rxe_err_qp(qp, "do_mmap_info failed, err = %d", err);
rxe_err_qp(qp, "do_mmap_info failed, err = %d\n", err);
goto err_free;
}
@ -292,7 +292,7 @@ static int rxe_init_rq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, wqe_size,
QUEUE_TYPE_FROM_CLIENT);
if (!qp->rq.queue) {
rxe_err_qp(qp, "Unable to allocate recv queue");
rxe_err_qp(qp, "Unable to allocate recv queue\n");
err = -ENOMEM;
goto err_out;
}
@ -302,7 +302,7 @@ static int rxe_init_rq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
qp->rq.queue->buf, qp->rq.queue->buf_size,
&qp->rq.queue->ip);
if (err) {
rxe_err_qp(qp, "do_mmap_info failed, err = %d", err);
rxe_err_qp(qp, "do_mmap_info failed, err = %d\n", err);
goto err_free;
}

View File

@ -362,18 +362,18 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
if ((pkt->mask & RXE_START_MASK) &&
(pkt->mask & RXE_END_MASK)) {
if (unlikely(payload > mtu)) {
rxe_dbg_qp(qp, "only packet too long");
rxe_dbg_qp(qp, "only packet too long\n");
return RESPST_ERR_LENGTH;
}
} else if ((pkt->mask & RXE_START_MASK) ||
(pkt->mask & RXE_MIDDLE_MASK)) {
if (unlikely(payload != mtu)) {
rxe_dbg_qp(qp, "first or middle packet not mtu");
rxe_dbg_qp(qp, "first or middle packet not mtu\n");
return RESPST_ERR_LENGTH;
}
} else if (pkt->mask & RXE_END_MASK) {
if (unlikely((payload == 0) || (payload > mtu))) {
rxe_dbg_qp(qp, "last packet zero or too long");
rxe_dbg_qp(qp, "last packet zero or too long\n");
return RESPST_ERR_LENGTH;
}
}
@ -382,7 +382,7 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
/* See IBA C9-94 */
if (pkt->mask & RXE_RETH_MASK) {
if (reth_len(pkt) > (1U << 31)) {
rxe_dbg_qp(qp, "dma length too long");
rxe_dbg_qp(qp, "dma length too long\n");
return RESPST_ERR_LENGTH;
}
}
@ -1133,7 +1133,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
}
} else {
if (wc->status != IB_WC_WR_FLUSH_ERR)
rxe_err_qp(qp, "non-flush error status = %d",
rxe_err_qp(qp, "non-flush error status = %d\n",
wc->status);
}
@ -1442,7 +1442,7 @@ static int flush_recv_wqe(struct rxe_qp *qp, struct rxe_recv_wqe *wqe)
err = rxe_cq_post(qp->rcq, &cqe, 0);
if (err)
rxe_dbg_cq(qp->rcq, "post cq failed err = %d", err);
rxe_dbg_cq(qp->rcq, "post cq failed err = %d\n", err);
return err;
}

View File

@ -156,7 +156,7 @@ static void do_task(struct rxe_task *task)
default:
WARN_ON(1);
rxe_dbg_qp(task->qp, "unexpected task state = %d",
rxe_dbg_qp(task->qp, "unexpected task state = %d\n",
task->state);
task->state = TASK_STATE_IDLE;
}
@ -167,7 +167,7 @@ exit:
if (WARN_ON(task->num_done != task->num_sched))
rxe_dbg_qp(
task->qp,
"%ld tasks scheduled, %ld tasks done",
"%ld tasks scheduled, %ld tasks done\n",
task->num_sched, task->num_done);
}
spin_unlock_irqrestore(&task->lock, flags);

View File

@ -23,7 +23,7 @@ static int rxe_query_device(struct ib_device *ibdev,
int err;
if (udata->inlen || udata->outlen) {
rxe_dbg_dev(rxe, "malformed udata");
rxe_dbg_dev(rxe, "malformed udata\n");
err = -EINVAL;
goto err_out;
}
@ -33,7 +33,7 @@ static int rxe_query_device(struct ib_device *ibdev,
return 0;
err_out:
rxe_err_dev(rxe, "returned err = %d", err);
rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@ -45,7 +45,7 @@ static int rxe_query_port(struct ib_device *ibdev,
if (port_num != 1) {
err = -EINVAL;
rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
goto err_out;
}
@ -67,7 +67,7 @@ static int rxe_query_port(struct ib_device *ibdev,
return ret;
err_out:
rxe_err_dev(rxe, "returned err = %d", err);
rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@ -79,7 +79,7 @@ static int rxe_query_pkey(struct ib_device *ibdev,
if (index != 0) {
err = -EINVAL;
rxe_dbg_dev(rxe, "bad pkey index = %d", index);
rxe_dbg_dev(rxe, "bad pkey index = %d\n", index);
goto err_out;
}
@ -87,7 +87,7 @@ static int rxe_query_pkey(struct ib_device *ibdev,
return 0;
err_out:
rxe_err_dev(rxe, "returned err = %d", err);
rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@ -100,7 +100,7 @@ static int rxe_modify_device(struct ib_device *ibdev,
if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
IB_DEVICE_MODIFY_NODE_DESC)) {
err = -EOPNOTSUPP;
rxe_dbg_dev(rxe, "unsupported mask = 0x%x", mask);
rxe_dbg_dev(rxe, "unsupported mask = 0x%x\n", mask);
goto err_out;
}
@ -115,7 +115,7 @@ static int rxe_modify_device(struct ib_device *ibdev,
return 0;
err_out:
rxe_err_dev(rxe, "returned err = %d", err);
rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@ -128,14 +128,14 @@ static int rxe_modify_port(struct ib_device *ibdev, u32 port_num,
if (port_num != 1) {
err = -EINVAL;
rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
goto err_out;
}
//TODO is shutdown useful
if (mask & ~(IB_PORT_RESET_QKEY_CNTR)) {
err = -EOPNOTSUPP;
rxe_dbg_dev(rxe, "unsupported mask = 0x%x", mask);
rxe_dbg_dev(rxe, "unsupported mask = 0x%x\n", mask);
goto err_out;
}
@ -149,7 +149,7 @@ static int rxe_modify_port(struct ib_device *ibdev, u32 port_num,
return 0;
err_out:
rxe_err_dev(rxe, "returned err = %d", err);
rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@ -161,14 +161,14 @@ static enum rdma_link_layer rxe_get_link_layer(struct ib_device *ibdev,
if (port_num != 1) {
err = -EINVAL;
rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
goto err_out;
}
return IB_LINK_LAYER_ETHERNET;
err_out:
rxe_err_dev(rxe, "returned err = %d", err);
rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@ -181,7 +181,7 @@ static int rxe_port_immutable(struct ib_device *ibdev, u32 port_num,
if (port_num != 1) {
err = -EINVAL;
rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
goto err_out;
}
@ -197,7 +197,7 @@ static int rxe_port_immutable(struct ib_device *ibdev, u32 port_num,
return 0;
err_out:
rxe_err_dev(rxe, "returned err = %d", err);
rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@ -210,7 +210,7 @@ static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata)
err = rxe_add_to_pool(&rxe->uc_pool, uc);
if (err)
rxe_err_dev(rxe, "unable to create uc");
rxe_err_dev(rxe, "unable to create uc\n");
return err;
}
@ -222,7 +222,7 @@ static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
err = rxe_cleanup(uc);
if (err)
rxe_err_uc(uc, "cleanup failed, err = %d", err);
rxe_err_uc(uc, "cleanup failed, err = %d\n", err);
}
/* pd */
@ -234,14 +234,14 @@ static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
err = rxe_add_to_pool(&rxe->pd_pool, pd);
if (err) {
rxe_dbg_dev(rxe, "unable to alloc pd");
rxe_dbg_dev(rxe, "unable to alloc pd\n");
goto err_out;
}
return 0;
err_out:
rxe_err_dev(rxe, "returned err = %d", err);
rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@ -252,7 +252,7 @@ static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
err = rxe_cleanup(pd);
if (err)
rxe_err_pd(pd, "cleanup failed, err = %d", err);
rxe_err_pd(pd, "cleanup failed, err = %d\n", err);
return 0;
}
@ -279,7 +279,7 @@ static int rxe_create_ah(struct ib_ah *ibah,
err = rxe_add_to_pool_ah(&rxe->ah_pool, ah,
init_attr->flags & RDMA_CREATE_AH_SLEEPABLE);
if (err) {
rxe_dbg_dev(rxe, "unable to create ah");
rxe_dbg_dev(rxe, "unable to create ah\n");
goto err_out;
}
@ -288,7 +288,7 @@ static int rxe_create_ah(struct ib_ah *ibah,
err = rxe_ah_chk_attr(ah, init_attr->ah_attr);
if (err) {
rxe_dbg_ah(ah, "bad attr");
rxe_dbg_ah(ah, "bad attr\n");
goto err_cleanup;
}
@ -298,7 +298,7 @@ static int rxe_create_ah(struct ib_ah *ibah,
sizeof(uresp->ah_num));
if (err) {
err = -EFAULT;
rxe_dbg_ah(ah, "unable to copy to user");
rxe_dbg_ah(ah, "unable to copy to user\n");
goto err_cleanup;
}
} else if (ah->is_user) {
@ -314,9 +314,9 @@ static int rxe_create_ah(struct ib_ah *ibah,
err_cleanup:
cleanup_err = rxe_cleanup(ah);
if (cleanup_err)
rxe_err_ah(ah, "cleanup failed, err = %d", cleanup_err);
rxe_err_ah(ah, "cleanup failed, err = %d\n", cleanup_err);
err_out:
rxe_err_ah(ah, "returned err = %d", err);
rxe_err_ah(ah, "returned err = %d\n", err);
return err;
}
@ -327,7 +327,7 @@ static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
err = rxe_ah_chk_attr(ah, attr);
if (err) {
rxe_dbg_ah(ah, "bad attr");
rxe_dbg_ah(ah, "bad attr\n");
goto err_out;
}
@ -336,7 +336,7 @@ static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
return 0;
err_out:
rxe_err_ah(ah, "returned err = %d", err);
rxe_err_ah(ah, "returned err = %d\n", err);
return err;
}
@ -358,7 +358,7 @@ static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
err = rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE);
if (err)
rxe_err_ah(ah, "cleanup failed, err = %d", err);
rxe_err_ah(ah, "cleanup failed, err = %d\n", err);
return 0;
}
@ -376,7 +376,7 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
if (udata) {
if (udata->outlen < sizeof(*uresp)) {
err = -EINVAL;
rxe_err_dev(rxe, "malformed udata");
rxe_err_dev(rxe, "malformed udata\n");
goto err_out;
}
uresp = udata->outbuf;
@ -384,20 +384,20 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
if (init->srq_type != IB_SRQT_BASIC) {
err = -EOPNOTSUPP;
rxe_dbg_dev(rxe, "srq type = %d, not supported",
rxe_dbg_dev(rxe, "srq type = %d, not supported\n",
init->srq_type);
goto err_out;
}
err = rxe_srq_chk_init(rxe, init);
if (err) {
rxe_dbg_dev(rxe, "invalid init attributes");
rxe_dbg_dev(rxe, "invalid init attributes\n");
goto err_out;
}
err = rxe_add_to_pool(&rxe->srq_pool, srq);
if (err) {
rxe_dbg_dev(rxe, "unable to create srq, err = %d", err);
rxe_dbg_dev(rxe, "unable to create srq, err = %d\n", err);
goto err_out;
}
@ -406,7 +406,7 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
if (err) {
rxe_dbg_srq(srq, "create srq failed, err = %d", err);
rxe_dbg_srq(srq, "create srq failed, err = %d\n", err);
goto err_cleanup;
}
@ -415,9 +415,9 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
err_cleanup:
cleanup_err = rxe_cleanup(srq);
if (cleanup_err)
rxe_err_srq(srq, "cleanup failed, err = %d", cleanup_err);
rxe_err_srq(srq, "cleanup failed, err = %d\n", cleanup_err);
err_out:
rxe_err_dev(rxe, "returned err = %d", err);
rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@ -433,34 +433,34 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
if (udata) {
if (udata->inlen < sizeof(cmd)) {
err = -EINVAL;
rxe_dbg_srq(srq, "malformed udata");
rxe_dbg_srq(srq, "malformed udata\n");
goto err_out;
}
err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
if (err) {
err = -EFAULT;
rxe_dbg_srq(srq, "unable to read udata");
rxe_dbg_srq(srq, "unable to read udata\n");
goto err_out;
}
}
err = rxe_srq_chk_attr(rxe, srq, attr, mask);
if (err) {
rxe_dbg_srq(srq, "bad init attributes");
rxe_dbg_srq(srq, "bad init attributes\n");
goto err_out;
}
err = rxe_srq_from_attr(rxe, srq, attr, mask, &cmd, udata);
if (err) {
rxe_dbg_srq(srq, "bad attr");
rxe_dbg_srq(srq, "bad attr\n");
goto err_out;
}
return 0;
err_out:
rxe_err_srq(srq, "returned err = %d", err);
rxe_err_srq(srq, "returned err = %d\n", err);
return err;
}
@ -471,7 +471,7 @@ static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
if (srq->error) {
err = -EINVAL;
rxe_dbg_srq(srq, "srq in error state");
rxe_dbg_srq(srq, "srq in error state\n");
goto err_out;
}
@ -481,7 +481,7 @@ static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
return 0;
err_out:
rxe_err_srq(srq, "returned err = %d", err);
rxe_err_srq(srq, "returned err = %d\n", err);
return err;
}
@ -505,7 +505,7 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
if (err) {
*bad_wr = wr;
rxe_err_srq(srq, "returned err = %d", err);
rxe_err_srq(srq, "returned err = %d\n", err);
}
return err;
@ -518,7 +518,7 @@ static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
err = rxe_cleanup(srq);
if (err)
rxe_err_srq(srq, "cleanup failed, err = %d", err);
rxe_err_srq(srq, "cleanup failed, err = %d\n", err);
return 0;
}
@ -536,13 +536,13 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
if (udata) {
if (udata->inlen) {
err = -EINVAL;
rxe_dbg_dev(rxe, "malformed udata, err = %d", err);
rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err);
goto err_out;
}
if (udata->outlen < sizeof(*uresp)) {
err = -EINVAL;
rxe_dbg_dev(rxe, "malformed udata, err = %d", err);
rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err);
goto err_out;
}
@ -554,25 +554,25 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
if (init->create_flags) {
err = -EOPNOTSUPP;
rxe_dbg_dev(rxe, "unsupported create_flags, err = %d", err);
rxe_dbg_dev(rxe, "unsupported create_flags, err = %d\n", err);
goto err_out;
}
err = rxe_qp_chk_init(rxe, init);
if (err) {
rxe_dbg_dev(rxe, "bad init attr, err = %d", err);
rxe_dbg_dev(rxe, "bad init attr, err = %d\n", err);
goto err_out;
}
err = rxe_add_to_pool(&rxe->qp_pool, qp);
if (err) {
rxe_dbg_dev(rxe, "unable to create qp, err = %d", err);
rxe_dbg_dev(rxe, "unable to create qp, err = %d\n", err);
goto err_out;
}
err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata);
if (err) {
rxe_dbg_qp(qp, "create qp failed, err = %d", err);
rxe_dbg_qp(qp, "create qp failed, err = %d\n", err);
goto err_cleanup;
}
@ -582,9 +582,9 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
err_cleanup:
cleanup_err = rxe_cleanup(qp);
if (cleanup_err)
rxe_err_qp(qp, "cleanup failed, err = %d", cleanup_err);
rxe_err_qp(qp, "cleanup failed, err = %d\n", cleanup_err);
err_out:
rxe_err_dev(rxe, "returned err = %d", err);
rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@ -597,20 +597,20 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (mask & ~IB_QP_ATTR_STANDARD_BITS) {
err = -EOPNOTSUPP;
rxe_dbg_qp(qp, "unsupported mask = 0x%x, err = %d",
rxe_dbg_qp(qp, "unsupported mask = 0x%x, err = %d\n",
mask, err);
goto err_out;
}
err = rxe_qp_chk_attr(rxe, qp, attr, mask);
if (err) {
rxe_dbg_qp(qp, "bad mask/attr, err = %d", err);
rxe_dbg_qp(qp, "bad mask/attr, err = %d\n", err);
goto err_out;
}
err = rxe_qp_from_attr(qp, attr, mask, udata);
if (err) {
rxe_dbg_qp(qp, "modify qp failed, err = %d", err);
rxe_dbg_qp(qp, "modify qp failed, err = %d\n", err);
goto err_out;
}
@ -622,7 +622,7 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return 0;
err_out:
rxe_err_qp(qp, "returned err = %d", err);
rxe_err_qp(qp, "returned err = %d\n", err);
return err;
}
@ -644,18 +644,18 @@ static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
err = rxe_qp_chk_destroy(qp);
if (err) {
rxe_dbg_qp(qp, "unable to destroy qp, err = %d", err);
rxe_dbg_qp(qp, "unable to destroy qp, err = %d\n", err);
goto err_out;
}
err = rxe_cleanup(qp);
if (err)
rxe_err_qp(qp, "cleanup failed, err = %d", err);
rxe_err_qp(qp, "cleanup failed, err = %d\n", err);
return 0;
err_out:
rxe_err_qp(qp, "returned err = %d", err);
rxe_err_qp(qp, "returned err = %d\n", err);
return err;
}
@ -675,12 +675,12 @@ static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
do {
mask = wr_opcode_mask(ibwr->opcode, qp);
if (!mask) {
rxe_err_qp(qp, "bad wr opcode for qp type");
rxe_err_qp(qp, "bad wr opcode for qp type\n");
break;
}
if (num_sge > sq->max_sge) {
rxe_err_qp(qp, "num_sge > max_sge");
rxe_err_qp(qp, "num_sge > max_sge\n");
break;
}
@ -689,27 +689,27 @@ static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
length += ibwr->sg_list[i].length;
if (length > (1UL << 31)) {
rxe_err_qp(qp, "message length too long");
rxe_err_qp(qp, "message length too long\n");
break;
}
if (mask & WR_ATOMIC_MASK) {
if (length != 8) {
rxe_err_qp(qp, "atomic length != 8");
rxe_err_qp(qp, "atomic length != 8\n");
break;
}
if (atomic_wr(ibwr)->remote_addr & 0x7) {
rxe_err_qp(qp, "misaligned atomic address");
rxe_err_qp(qp, "misaligned atomic address\n");
break;
}
}
if (ibwr->send_flags & IB_SEND_INLINE) {
if (!(mask & WR_INLINE_MASK)) {
rxe_err_qp(qp, "opcode doesn't support inline data");
rxe_err_qp(qp, "opcode doesn't support inline data\n");
break;
}
if (length > sq->max_inline) {
rxe_err_qp(qp, "inline length too big");
rxe_err_qp(qp, "inline length too big\n");
break;
}
}
@ -747,7 +747,7 @@ static int init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
case IB_WR_SEND:
break;
default:
rxe_err_qp(qp, "bad wr opcode %d for UD/GSI QP",
rxe_err_qp(qp, "bad wr opcode %d for UD/GSI QP\n",
wr->opcode);
return -EINVAL;
}
@ -795,7 +795,7 @@ static int init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
case IB_WR_ATOMIC_WRITE:
break;
default:
rxe_err_qp(qp, "unsupported wr opcode %d",
rxe_err_qp(qp, "unsupported wr opcode %d\n",
wr->opcode);
return -EINVAL;
}
@ -870,7 +870,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr)
full = queue_full(sq->queue, QUEUE_TYPE_FROM_ULP);
if (unlikely(full)) {
rxe_err_qp(qp, "send queue full");
rxe_err_qp(qp, "send queue full\n");
return -ENOMEM;
}
@ -922,14 +922,14 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
/* caller has already called destroy_qp */
if (WARN_ON_ONCE(!qp->valid)) {
spin_unlock_irqrestore(&qp->state_lock, flags);
rxe_err_qp(qp, "qp has been destroyed");
rxe_err_qp(qp, "qp has been destroyed\n");
return -EINVAL;
}
if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
spin_unlock_irqrestore(&qp->state_lock, flags);
*bad_wr = wr;
rxe_err_qp(qp, "qp not ready to send");
rxe_err_qp(qp, "qp not ready to send\n");
return -EINVAL;
}
spin_unlock_irqrestore(&qp->state_lock, flags);
@ -959,13 +959,13 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP);
if (unlikely(full)) {
err = -ENOMEM;
rxe_dbg("queue full");
rxe_dbg("queue full\n");
goto err_out;
}
if (unlikely(num_sge > rq->max_sge)) {
err = -EINVAL;
rxe_dbg("bad num_sge > max_sge");
rxe_dbg("bad num_sge > max_sge\n");
goto err_out;
}
@ -976,7 +976,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
/* IBA max message size is 2^31 */
if (length >= (1UL<<31)) {
err = -EINVAL;
rxe_dbg("message length too long");
rxe_dbg("message length too long\n");
goto err_out;
}
@ -996,7 +996,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
return 0;
err_out:
rxe_dbg("returned err = %d", err);
rxe_dbg("returned err = %d\n", err);
return err;
}
@ -1012,7 +1012,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
/* caller has already called destroy_qp */
if (WARN_ON_ONCE(!qp->valid)) {
spin_unlock_irqrestore(&qp->state_lock, flags);
rxe_err_qp(qp, "qp has been destroyed");
rxe_err_qp(qp, "qp has been destroyed\n");
return -EINVAL;
}
@ -1020,14 +1020,14 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
if (unlikely((qp_state(qp) < IB_QPS_INIT))) {
spin_unlock_irqrestore(&qp->state_lock, flags);
*bad_wr = wr;
rxe_dbg_qp(qp, "qp not ready to post recv");
rxe_dbg_qp(qp, "qp not ready to post recv\n");
return -EINVAL;
}
spin_unlock_irqrestore(&qp->state_lock, flags);
if (unlikely(qp->srq)) {
*bad_wr = wr;
rxe_dbg_qp(qp, "qp has srq, use post_srq_recv instead");
rxe_dbg_qp(qp, "qp has srq, use post_srq_recv instead\n");
return -EINVAL;
}
@ -1065,7 +1065,7 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
if (udata) {
if (udata->outlen < sizeof(*uresp)) {
err = -EINVAL;
rxe_dbg_dev(rxe, "malformed udata, err = %d", err);
rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err);
goto err_out;
}
uresp = udata->outbuf;
@ -1073,26 +1073,26 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
if (attr->flags) {
err = -EOPNOTSUPP;
rxe_dbg_dev(rxe, "bad attr->flags, err = %d", err);
rxe_dbg_dev(rxe, "bad attr->flags, err = %d\n", err);
goto err_out;
}
err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
if (err) {
rxe_dbg_dev(rxe, "bad init attributes, err = %d", err);
rxe_dbg_dev(rxe, "bad init attributes, err = %d\n", err);
goto err_out;
}
err = rxe_add_to_pool(&rxe->cq_pool, cq);
if (err) {
rxe_dbg_dev(rxe, "unable to create cq, err = %d", err);
rxe_dbg_dev(rxe, "unable to create cq, err = %d\n", err);
goto err_out;
}
err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata,
uresp);
if (err) {
rxe_dbg_cq(cq, "create cq failed, err = %d", err);
rxe_dbg_cq(cq, "create cq failed, err = %d\n", err);
goto err_cleanup;
}
@ -1101,9 +1101,9 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
err_cleanup:
cleanup_err = rxe_cleanup(cq);
if (cleanup_err)
rxe_err_cq(cq, "cleanup failed, err = %d", cleanup_err);
rxe_err_cq(cq, "cleanup failed, err = %d\n", cleanup_err);
err_out:
rxe_err_dev(rxe, "returned err = %d", err);
rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@ -1117,7 +1117,7 @@ static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
if (udata) {
if (udata->outlen < sizeof(*uresp)) {
err = -EINVAL;
rxe_dbg_cq(cq, "malformed udata");
rxe_dbg_cq(cq, "malformed udata\n");
goto err_out;
}
uresp = udata->outbuf;
@ -1125,20 +1125,20 @@ static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
if (err) {
rxe_dbg_cq(cq, "bad attr, err = %d", err);
rxe_dbg_cq(cq, "bad attr, err = %d\n", err);
goto err_out;
}
err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
if (err) {
rxe_dbg_cq(cq, "resize cq failed, err = %d", err);
rxe_dbg_cq(cq, "resize cq failed, err = %d\n", err);
goto err_out;
}
return 0;
err_out:
rxe_err_cq(cq, "returned err = %d", err);
rxe_err_cq(cq, "returned err = %d\n", err);
return err;
}
@ -1202,18 +1202,18 @@ static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
*/
if (atomic_read(&cq->num_wq)) {
err = -EINVAL;
rxe_dbg_cq(cq, "still in use");
rxe_dbg_cq(cq, "still in use\n");
goto err_out;
}
err = rxe_cleanup(cq);
if (err)
rxe_err_cq(cq, "cleanup failed, err = %d", err);
rxe_err_cq(cq, "cleanup failed, err = %d\n", err);
return 0;
err_out:
rxe_err_cq(cq, "returned err = %d", err);
rxe_err_cq(cq, "returned err = %d\n", err);
return err;
}
@ -1231,7 +1231,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
err = rxe_add_to_pool(&rxe->mr_pool, mr);
if (err) {
rxe_dbg_dev(rxe, "unable to create mr");
rxe_dbg_dev(rxe, "unable to create mr\n");
goto err_free;
}
@ -1245,7 +1245,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
err_free:
kfree(mr);
rxe_err_pd(pd, "returned err = %d", err);
rxe_err_pd(pd, "returned err = %d\n", err);
return ERR_PTR(err);
}
@ -1259,7 +1259,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
int err, cleanup_err;
if (access & ~RXE_ACCESS_SUPPORTED_MR) {
rxe_err_pd(pd, "access = %#x not supported (%#x)", access,
rxe_err_pd(pd, "access = %#x not supported (%#x)\n", access,
RXE_ACCESS_SUPPORTED_MR);
return ERR_PTR(-EOPNOTSUPP);
}
@ -1270,7 +1270,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
err = rxe_add_to_pool(&rxe->mr_pool, mr);
if (err) {
rxe_dbg_pd(pd, "unable to create mr");
rxe_dbg_pd(pd, "unable to create mr\n");
goto err_free;
}
@ -1278,9 +1278,9 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
mr->ibmr.pd = ibpd;
mr->ibmr.device = ibpd->device;
err = rxe_mr_init_user(rxe, start, length, iova, access, mr);
err = rxe_mr_init_user(rxe, start, length, access, mr);
if (err) {
rxe_dbg_mr(mr, "reg_user_mr failed, err = %d", err);
rxe_dbg_mr(mr, "reg_user_mr failed, err = %d\n", err);
goto err_cleanup;
}
@ -1290,10 +1290,10 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
err_cleanup:
cleanup_err = rxe_cleanup(mr);
if (cleanup_err)
rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err);
rxe_err_mr(mr, "cleanup failed, err = %d\n", cleanup_err);
err_free:
kfree(mr);
rxe_err_pd(pd, "returned err = %d", err);
rxe_err_pd(pd, "returned err = %d\n", err);
return ERR_PTR(err);
}
@ -1310,7 +1310,7 @@ static struct ib_mr *rxe_rereg_user_mr(struct ib_mr *ibmr, int flags,
* rereg_pd and rereg_access
*/
if (flags & ~RXE_MR_REREG_SUPPORTED) {
rxe_err_mr(mr, "flags = %#x not supported", flags);
rxe_err_mr(mr, "flags = %#x not supported\n", flags);
return ERR_PTR(-EOPNOTSUPP);
}
@ -1322,7 +1322,7 @@ static struct ib_mr *rxe_rereg_user_mr(struct ib_mr *ibmr, int flags,
if (flags & IB_MR_REREG_ACCESS) {
if (access & ~RXE_ACCESS_SUPPORTED_MR) {
rxe_err_mr(mr, "access = %#x not supported", access);
rxe_err_mr(mr, "access = %#x not supported\n", access);
return ERR_PTR(-EOPNOTSUPP);
}
mr->access = access;
@ -1341,7 +1341,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
if (mr_type != IB_MR_TYPE_MEM_REG) {
err = -EINVAL;
rxe_dbg_pd(pd, "mr type %d not supported, err = %d",
rxe_dbg_pd(pd, "mr type %d not supported, err = %d\n",
mr_type, err);
goto err_out;
}
@ -1360,7 +1360,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
err = rxe_mr_init_fast(max_num_sg, mr);
if (err) {
rxe_dbg_mr(mr, "alloc_mr failed, err = %d", err);
rxe_dbg_mr(mr, "alloc_mr failed, err = %d\n", err);
goto err_cleanup;
}
@ -1370,11 +1370,11 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
err_cleanup:
cleanup_err = rxe_cleanup(mr);
if (cleanup_err)
rxe_err_mr(mr, "cleanup failed, err = %d", err);
rxe_err_mr(mr, "cleanup failed, err = %d\n", err);
err_free:
kfree(mr);
err_out:
rxe_err_pd(pd, "returned err = %d", err);
rxe_err_pd(pd, "returned err = %d\n", err);
return ERR_PTR(err);
}
@ -1386,19 +1386,19 @@ static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
/* See IBA 10.6.7.2.6 */
if (atomic_read(&mr->num_mw) > 0) {
err = -EINVAL;
rxe_dbg_mr(mr, "mr has mw's bound");
rxe_dbg_mr(mr, "mr has mw's bound\n");
goto err_out;
}
cleanup_err = rxe_cleanup(mr);
if (cleanup_err)
rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err);
rxe_err_mr(mr, "cleanup failed, err = %d\n", cleanup_err);
kfree_rcu_mightsleep(mr);
return 0;
err_out:
rxe_err_mr(mr, "returned err = %d", err);
rxe_err_mr(mr, "returned err = %d\n", err);
return err;
}

View File

@ -287,8 +287,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
ah = ipoib_create_ah(dev, priv->pd, &av);
if (IS_ERR(ah)) {
ipoib_warn(priv, "ib_address_create failed %ld\n",
-PTR_ERR(ah));
ipoib_warn(priv, "ib_address_create failed %pe\n", ah);
/* use original error */
return PTR_ERR(ah);
}

View File

@ -133,7 +133,7 @@ static ssize_t mpath_policy_store(struct device *dev,
/* distinguish "mi" and "min-latency" with length */
len = strnlen(buf, NAME_MAX);
if (buf[len - 1] == '\n')
if (len && buf[len - 1] == '\n')
len--;
if (!strncasecmp(buf, "round-robin", 11) ||

View File

@ -3209,7 +3209,6 @@ static int srpt_add_one(struct ib_device *device)
INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
srpt_event_handler);
ib_register_event_handler(&sdev->event_handler);
for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
sport = &sdev->port[i - 1];
@ -3232,6 +3231,7 @@ static int srpt_add_one(struct ib_device *device)
}
}
ib_register_event_handler(&sdev->event_handler);
spin_lock(&srpt_dev_lock);
list_add_tail(&sdev->list, &srpt_dev_list);
spin_unlock(&srpt_dev_lock);
@ -3242,7 +3242,6 @@ static int srpt_add_one(struct ib_device *device)
err_port:
srpt_unregister_mad_agent(sdev, i);
ib_unregister_event_handler(&sdev->event_handler);
err_cm:
if (sdev->cm_id)
ib_destroy_cm_id(sdev->cm_id);

View File

@ -1910,8 +1910,6 @@ struct ib_flow_eth_filter {
u8 src_mac[6];
__be16 ether_type;
__be16 vlan_tag;
/* Must be last */
u8 real_sz[];
};
struct ib_flow_spec_eth {
@ -1924,8 +1922,6 @@ struct ib_flow_spec_eth {
struct ib_flow_ib_filter {
__be16 dlid;
__u8 sl;
/* Must be last */
u8 real_sz[];
};
struct ib_flow_spec_ib {
@ -1949,8 +1945,6 @@ struct ib_flow_ipv4_filter {
u8 tos;
u8 ttl;
u8 flags;
/* Must be last */
u8 real_sz[];
};
struct ib_flow_spec_ipv4 {
@ -1967,9 +1961,7 @@ struct ib_flow_ipv6_filter {
u8 next_hdr;
u8 traffic_class;
u8 hop_limit;
/* Must be last */
u8 real_sz[];
};
} __packed;
struct ib_flow_spec_ipv6 {
u32 type;
@ -1981,8 +1973,6 @@ struct ib_flow_spec_ipv6 {
struct ib_flow_tcp_udp_filter {
__be16 dst_port;
__be16 src_port;
/* Must be last */
u8 real_sz[];
};
struct ib_flow_spec_tcp_udp {
@ -1994,7 +1984,6 @@ struct ib_flow_spec_tcp_udp {
struct ib_flow_tunnel_filter {
__be32 tunnel_id;
u8 real_sz[];
};
/* ib_flow_spec_tunnel describes the Vxlan tunnel
@ -2010,8 +1999,6 @@ struct ib_flow_spec_tunnel {
struct ib_flow_esp_filter {
__be32 spi;
__be32 seq;
/* Must be last */
u8 real_sz[];
};
struct ib_flow_spec_esp {
@ -2025,8 +2012,6 @@ struct ib_flow_gre_filter {
__be16 c_ks_res0_ver;
__be16 protocol;
__be32 key;
/* Must be last */
u8 real_sz[];
};
struct ib_flow_spec_gre {
@ -2038,8 +2023,6 @@ struct ib_flow_spec_gre {
struct ib_flow_mpls_filter {
__be32 tag;
/* Must be last */
u8 real_sz[];
};
struct ib_flow_spec_mpls {

View File

@ -629,12 +629,14 @@ struct uverbs_attr {
};
struct uverbs_attr_bundle {
struct ib_udata driver_udata;
struct ib_udata ucore;
struct ib_uverbs_file *ufile;
struct ib_ucontext *context;
struct ib_uobject *uobject;
DECLARE_BITMAP(attr_present, UVERBS_API_ATTR_BKEY_LEN);
struct_group_tagged(uverbs_attr_bundle_hdr, hdr,
struct ib_udata driver_udata;
struct ib_udata ucore;
struct ib_uverbs_file *ufile;
struct ib_ucontext *context;
struct ib_uobject *uobject;
DECLARE_BITMAP(attr_present, UVERBS_API_ATTR_BKEY_LEN);
);
struct uverbs_attr attrs[];
};

View File

@ -73,6 +73,17 @@ struct hns_roce_ib_create_srq_resp {
__u32 cap_flags; /* Use enum hns_roce_srq_cap_flags */
};
enum hns_roce_congest_type_flags {
HNS_ROCE_CREATE_QP_FLAGS_DCQCN,
HNS_ROCE_CREATE_QP_FLAGS_LDCP,
HNS_ROCE_CREATE_QP_FLAGS_HC3,
HNS_ROCE_CREATE_QP_FLAGS_DIP,
};
enum hns_roce_create_qp_comp_mask {
HNS_ROCE_CREATE_QP_MASK_CONGEST_TYPE = 1 << 0,
};
struct hns_roce_ib_create_qp {
__aligned_u64 buf_addr;
__aligned_u64 db_addr;
@ -81,6 +92,9 @@ struct hns_roce_ib_create_qp {
__u8 sq_no_prefetch;
__u8 reserved[5];
__aligned_u64 sdb_addr;
__aligned_u64 comp_mask; /* Use enum hns_roce_create_qp_comp_mask */
__aligned_u64 create_flags;
__aligned_u64 cong_type_flags;
};
enum hns_roce_qp_cap_flags {
@ -114,6 +128,8 @@ struct hns_roce_ib_alloc_ucontext_resp {
__u32 reserved;
__u32 config;
__u32 max_inline_data;
__u8 congest_type;
__u8 reserved0[7];
};
struct hns_roce_ib_alloc_ucontext {