linux-stable/drivers/infiniband/hw/efa/efa_com.h
Gal Pressman 2a152512a1 RDMA/efa: CQ notifications
This patch adds support for CQ notifications through the standard verbs
api.

In order to achieve that, a new event queue (EQ) object is introduced,
which is in charge of reporting completion events to the driver.  On
driver load, EQs are allocated and their affinity is set to a single
cpu. When a user app creates a CQ with a completion channel, the
completion vector number is converted to a EQ number, which is in charge
of reporting the CQ events.

In addition, the CQ creation admin command now returns an offset for the
CQ doorbell, which is mapped to the userspace provider and is used to arm
the CQ when requested by the user.

The EQs use a single doorbell (located on the registers BAR), which
encodes the EQ number and arm as part of the doorbell value.  The EQs are
polled by the driver on each new EQE, and arm it when the poll is
completed.

Link: https://lore.kernel.org/r/20211003105605.29222-1-galpress@amazon.com
Reviewed-by: Firas JahJah <firasj@amazon.com>
Reviewed-by: Yossi Leybovich <sleybo@amazon.com>
Signed-off-by: Gal Pressman <galpress@amazon.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
2021-10-06 19:47:18 -03:00

180 lines
4.3 KiB
C

/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
* Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_COM_H_
#define _EFA_COM_H_
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/semaphore.h>
#include <linux/sched.h>
#include <rdma/ib_verbs.h>
#include "efa_common_defs.h"
#include "efa_admin_defs.h"
#include "efa_admin_cmds_defs.h"
#include "efa_regs_defs.h"
#define EFA_MAX_HANDLERS 256
struct efa_com_admin_cq {
struct efa_admin_acq_entry *entries;
dma_addr_t dma_addr;
spinlock_t lock; /* Protects ACQ */
u16 cc; /* consumer counter */
u8 phase;
};
struct efa_com_admin_sq {
struct efa_admin_aq_entry *entries;
dma_addr_t dma_addr;
spinlock_t lock; /* Protects ASQ */
u32 __iomem *db_addr;
u16 cc; /* consumer counter */
u16 pc; /* producer counter */
u8 phase;
};
/* Don't use anything other than atomic64 */
struct efa_com_stats_admin {
atomic64_t submitted_cmd;
atomic64_t completed_cmd;
atomic64_t cmd_err;
atomic64_t no_completion;
};
enum {
EFA_AQ_STATE_RUNNING_BIT = 0,
EFA_AQ_STATE_POLLING_BIT = 1,
};
struct efa_com_admin_queue {
void *dmadev;
void *efa_dev;
struct efa_comp_ctx *comp_ctx;
u32 completion_timeout; /* usecs */
u16 poll_interval; /* msecs */
u16 depth;
struct efa_com_admin_cq cq;
struct efa_com_admin_sq sq;
u16 msix_vector_idx;
unsigned long state;
/* Count the number of available admin commands */
struct semaphore avail_cmds;
struct efa_com_stats_admin stats;
spinlock_t comp_ctx_lock; /* Protects completion context pool */
u32 *comp_ctx_pool;
u16 comp_ctx_pool_next;
};
struct efa_aenq_handlers;
struct efa_com_eq;
typedef void (*efa_eqe_handler)(struct efa_com_eq *eeq,
struct efa_admin_eqe *eqe);
struct efa_com_aenq {
struct efa_admin_aenq_entry *entries;
struct efa_aenq_handlers *aenq_handlers;
dma_addr_t dma_addr;
u32 cc; /* consumer counter */
u16 msix_vector_idx;
u16 depth;
u8 phase;
};
struct efa_com_mmio_read {
struct efa_admin_mmio_req_read_less_resp *read_resp;
dma_addr_t read_resp_dma_addr;
u16 seq_num;
u16 mmio_read_timeout; /* usecs */
/* serializes mmio reads */
spinlock_t lock;
};
struct efa_com_dev {
struct efa_com_admin_queue aq;
struct efa_com_aenq aenq;
u8 __iomem *reg_bar;
void *dmadev;
void *efa_dev;
u32 supported_features;
u32 dma_addr_bits;
struct efa_com_mmio_read mmio_read;
};
struct efa_com_eq {
struct efa_com_dev *edev;
struct efa_admin_eqe *eqes;
dma_addr_t dma_addr;
u32 cc; /* Consumer counter */
u16 eqn;
u16 depth;
u8 phase;
efa_eqe_handler cb;
};
struct efa_com_create_eq_params {
dma_addr_t dma_addr;
u32 event_bitmask;
u16 depth;
u8 entry_size_in_bytes;
u8 msix_vec;
};
struct efa_com_create_eq_result {
u16 eqn;
};
struct efa_com_destroy_eq_params {
u16 eqn;
};
typedef void (*efa_aenq_handler)(void *data,
struct efa_admin_aenq_entry *aenq_e);
/* Holds aenq handlers. Indexed by AENQ event group */
struct efa_aenq_handlers {
efa_aenq_handler handlers[EFA_MAX_HANDLERS];
efa_aenq_handler unimplemented_handler;
};
void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low);
int efa_com_admin_init(struct efa_com_dev *edev,
struct efa_aenq_handlers *aenq_handlers);
void efa_com_admin_destroy(struct efa_com_dev *edev);
int efa_com_eq_init(struct efa_com_dev *edev, struct efa_com_eq *eeq,
efa_eqe_handler cb, u16 depth, u8 msix_vec);
void efa_com_eq_destroy(struct efa_com_dev *edev, struct efa_com_eq *eeq);
int efa_com_dev_reset(struct efa_com_dev *edev,
enum efa_regs_reset_reason_types reset_reason);
void efa_com_set_admin_polling_mode(struct efa_com_dev *edev, bool polling);
void efa_com_admin_q_comp_intr_handler(struct efa_com_dev *edev);
int efa_com_mmio_reg_read_init(struct efa_com_dev *edev);
void efa_com_mmio_reg_read_destroy(struct efa_com_dev *edev);
int efa_com_validate_version(struct efa_com_dev *edev);
int efa_com_get_dma_width(struct efa_com_dev *edev);
int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
struct efa_admin_aq_entry *cmd,
size_t cmd_size,
struct efa_admin_acq_entry *comp,
size_t comp_size);
void efa_com_aenq_intr_handler(struct efa_com_dev *edev, void *data);
void efa_com_eq_comp_intr_handler(struct efa_com_dev *edev,
struct efa_com_eq *eeq);
#endif /* _EFA_COM_H_ */