misc: bcm-vk: add VK messaging support

Add message support in order to be able to communicate
to VK card via message queues.

This info is used for debug purposes via collection of logs via direct
read of BAR space and by sysfs access (in a follow on commit).

Co-developed-by: Desmond Yan <desmond.yan@broadcom.com>
Acked-by: Olof Johansson <olof@lixom.net>
Signed-off-by: Desmond Yan <desmond.yan@broadcom.com>
Signed-off-by: Scott Branden <scott.branden@broadcom.com>
Link: https://lore.kernel.org/r/20210120175827.14820-10-scott.branden@broadcom.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Scott Branden 2021-01-20 09:58:23 -08:00 committed by Greg Kroah-Hartman
parent ff428d052b
commit 111d746bb4
7 changed files with 2087 additions and 3 deletions

View file

@ -6,5 +6,6 @@
obj-$(CONFIG_BCM_VK) += bcm_vk.o
bcm_vk-objs := \
bcm_vk_dev.o \
bcm_vk_msg.o
bcm_vk_msg.o \
bcm_vk_sg.o

View file

@ -6,11 +6,13 @@
#ifndef BCM_VK_H
#define BCM_VK_H
#include <linux/atomic.h>
#include <linux/firmware.h>
#include <linux/kref.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/sched/signal.h>
#include <linux/uaccess.h>
#include <uapi/linux/misc/bcm_vk.h>
@ -93,14 +95,53 @@
#define MAJOR_SOC_REV(_chip_id) (((_chip_id) >> 20) & 0xf)
#define BAR_CARD_TEMPERATURE 0x45c
/* defines for all temperature sensor */
#define BCM_VK_TEMP_FIELD_MASK 0xff
#define BCM_VK_CPU_TEMP_SHIFT 0
#define BCM_VK_DDR0_TEMP_SHIFT 8
#define BCM_VK_DDR1_TEMP_SHIFT 16
#define BAR_CARD_VOLTAGE 0x460
/* defines for voltage rail conversion */
#define BCM_VK_VOLT_RAIL_MASK 0xffff
#define BCM_VK_3P3_VOLT_REG_SHIFT 16
#define BAR_CARD_ERR_LOG 0x464
/* Error log register bit definition - register for error alerts */
#define ERR_LOG_UECC BIT(0)
#define ERR_LOG_SSIM_BUSY BIT(1)
#define ERR_LOG_AFBC_BUSY BIT(2)
#define ERR_LOG_HIGH_TEMP_ERR BIT(3)
#define ERR_LOG_WDOG_TIMEOUT BIT(4)
#define ERR_LOG_SYS_FAULT BIT(5)
#define ERR_LOG_RAMDUMP BIT(6)
#define ERR_LOG_COP_WDOG_TIMEOUT BIT(7)
/* warnings */
#define ERR_LOG_MEM_ALLOC_FAIL BIT(8)
#define ERR_LOG_LOW_TEMP_WARN BIT(9)
#define ERR_LOG_ECC BIT(10)
#define ERR_LOG_IPC_DWN BIT(11)
/* Alert bit definitions detectd on host */
#define ERR_LOG_HOST_INTF_V_FAIL BIT(13)
#define ERR_LOG_HOST_HB_FAIL BIT(14)
#define ERR_LOG_HOST_PCIE_DWN BIT(15)
#define BAR_CARD_ERR_MEM 0x468
/* defines for mem err, all fields have same width */
#define BCM_VK_MEM_ERR_FIELD_MASK 0xff
#define BCM_VK_ECC_MEM_ERR_SHIFT 0
#define BCM_VK_UECC_MEM_ERR_SHIFT 8
/* threshold of event occurrence and logs start to come out */
#define BCM_VK_ECC_THRESHOLD 10
#define BCM_VK_UECC_THRESHOLD 1
#define BAR_CARD_PWR_AND_THRE 0x46c
/* defines for power and temp threshold, all fields have same width */
#define BCM_VK_PWR_AND_THRE_FIELD_MASK 0xff
#define BCM_VK_LOW_TEMP_THRE_SHIFT 0
#define BCM_VK_HIGH_TEMP_THRE_SHIFT 8
#define BCM_VK_PWR_STATE_SHIFT 16
#define BAR_CARD_STATIC_INFO 0x470
@ -143,6 +184,11 @@
#define BAR_FIRMWARE_TAG_SIZE 50
#define FIRMWARE_STATUS_PRE_INIT_DONE 0x1f
/* VK MSG_ID defines */
#define VK_MSG_ID_BITMAP_SIZE 4096
#define VK_MSG_ID_BITMAP_MASK (VK_MSG_ID_BITMAP_SIZE - 1)
#define VK_MSG_ID_OVERFLOW 0xffff
/*
* BAR1
*/
@ -197,6 +243,10 @@
/* VK device supports a maximum of 3 bars */
#define MAX_BAR 3
/* default number of msg blk for inband SGL */
#define BCM_VK_DEF_IB_SGL_BLK_LEN 16
#define BCM_VK_IB_SGL_BLK_MAX 24
enum pci_barno {
BAR_0 = 0,
BAR_1,
@ -267,9 +317,27 @@ struct bcm_vk_proc_mon_info {
struct bcm_vk_proc_mon_entry_t entries[BCM_VK_PROC_MON_MAX];
};
struct bcm_vk_hb_ctrl {
struct timer_list timer;
u32 last_uptime;
u32 lost_cnt;
};
struct bcm_vk_alert {
u16 flags;
u16 notfs;
};
/* some alert counters that the driver will keep track */
struct bcm_vk_alert_cnts {
u16 ecc;
u16 uecc;
};
struct bcm_vk {
struct pci_dev *pdev;
void __iomem *bar[MAX_BAR];
int num_irqs;
struct bcm_vk_card_info card_info;
struct bcm_vk_proc_mon_info proc_mon_info;
@ -283,9 +351,17 @@ struct bcm_vk {
/* Reference-counting to handle file operations */
struct kref kref;
spinlock_t msg_id_lock; /* Spinlock for msg_id */
u16 msg_id;
DECLARE_BITMAP(bmap, VK_MSG_ID_BITMAP_SIZE);
spinlock_t ctx_lock; /* Spinlock for component context */
struct bcm_vk_ctx ctx[VK_CMPT_CTX_MAX];
struct bcm_vk_ht_entry pid_ht[VK_PID_HT_SZ];
pid_t reset_pid; /* process that issue reset */
atomic_t msgq_inited; /* indicate if info has been synced with vk */
struct bcm_vk_msg_chan to_v_msg_chan;
struct bcm_vk_msg_chan to_h_msg_chan;
struct workqueue_struct *wq_thread;
struct work_struct wq_work; /* work queue for deferred job */
@ -294,6 +370,15 @@ struct bcm_vk {
dma_addr_t tdma_addr; /* test dma segment bus addr */
struct notifier_block panic_nb;
u32 ib_sgl_size; /* size allocated for inband sgl insertion */
/* heart beat mechanism control structure */
struct bcm_vk_hb_ctrl hb_ctrl;
/* house-keeping variable of error logs */
spinlock_t host_alert_lock; /* protection to access host_alert struct */
struct bcm_vk_alert host_alert;
struct bcm_vk_alert peer_alert; /* bits set by the card */
struct bcm_vk_alert_cnts alert_cnts;
/* offset of the peer log control in BAR2 */
u32 peerlog_off;
@ -306,8 +391,26 @@ struct bcm_vk {
enum bcm_vk_wq_offload_flags {
BCM_VK_WQ_DWNLD_PEND = 0,
BCM_VK_WQ_DWNLD_AUTO = 1,
BCM_VK_WQ_NOTF_PEND = 2,
};
/* a macro to get an individual field with mask and shift */
#define BCM_VK_EXTRACT_FIELD(_field, _reg, _mask, _shift) \
(_field = (((_reg) >> (_shift)) & (_mask)))
struct bcm_vk_entry {
const u32 mask;
const u32 exp_val;
const char *str;
};
/* alerts that could be generated from peer */
#define BCM_VK_PEER_ERR_NUM 12
extern struct bcm_vk_entry const bcm_vk_peer_err[BCM_VK_PEER_ERR_NUM];
/* alerts detected by the host */
#define BCM_VK_HOST_ERR_NUM 3
extern struct bcm_vk_entry const bcm_vk_host_err[BCM_VK_HOST_ERR_NUM];
/*
* check if PCIe interface is down on read. Use it when it is
* certain that _val should never be all ones.
@ -354,8 +457,28 @@ static inline bool bcm_vk_msgq_marker_valid(struct bcm_vk *vk)
}
int bcm_vk_open(struct inode *inode, struct file *p_file);
ssize_t bcm_vk_read(struct file *p_file, char __user *buf, size_t count,
loff_t *f_pos);
ssize_t bcm_vk_write(struct file *p_file, const char __user *buf,
size_t count, loff_t *f_pos);
__poll_t bcm_vk_poll(struct file *p_file, struct poll_table_struct *wait);
int bcm_vk_release(struct inode *inode, struct file *p_file);
void bcm_vk_release_data(struct kref *kref);
irqreturn_t bcm_vk_msgq_irqhandler(int irq, void *dev_id);
irqreturn_t bcm_vk_notf_irqhandler(int irq, void *dev_id);
int bcm_vk_msg_init(struct bcm_vk *vk);
void bcm_vk_msg_remove(struct bcm_vk *vk);
int bcm_vk_sync_msgq(struct bcm_vk *vk, bool force_sync);
void bcm_vk_blk_drv_access(struct bcm_vk *vk);
s32 bcm_to_h_msg_dequeue(struct bcm_vk *vk);
int bcm_vk_send_shutdown_msg(struct bcm_vk *vk, u32 shut_type,
const pid_t pid, const u32 q_num);
void bcm_to_v_q_doorbell(struct bcm_vk *vk, u32 q_num, u32 db_val);
int bcm_vk_auto_load_all_images(struct bcm_vk *vk);
void bcm_vk_hb_init(struct bcm_vk *vk);
void bcm_vk_hb_deinit(struct bcm_vk *vk);
void bcm_vk_handle_notf(struct bcm_vk *vk);
bool bcm_vk_drv_access_ok(struct bcm_vk *vk);
void bcm_vk_set_host_alert(struct bcm_vk *vk, u32 bit_mask);
#endif

View file

@ -8,6 +8,7 @@
#include <linux/firmware.h>
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/kref.h>
#include <linux/module.h>
#include <linux/mutex.h>
@ -102,6 +103,54 @@ static uint nr_scratch_pages = VK_BAR1_SCRATCH_DEF_NR_PAGES;
module_param(nr_scratch_pages, uint, 0444);
MODULE_PARM_DESC(nr_scratch_pages,
"Number of pre allocated DMAable coherent pages.\n");
static uint nr_ib_sgl_blk = BCM_VK_DEF_IB_SGL_BLK_LEN;
module_param(nr_ib_sgl_blk, uint, 0444);
MODULE_PARM_DESC(nr_ib_sgl_blk,
"Number of in-band msg blks for short SGL.\n");
/*
* alerts that could be generated from peer
*/
const struct bcm_vk_entry bcm_vk_peer_err[BCM_VK_PEER_ERR_NUM] = {
{ERR_LOG_UECC, ERR_LOG_UECC, "uecc"},
{ERR_LOG_SSIM_BUSY, ERR_LOG_SSIM_BUSY, "ssim_busy"},
{ERR_LOG_AFBC_BUSY, ERR_LOG_AFBC_BUSY, "afbc_busy"},
{ERR_LOG_HIGH_TEMP_ERR, ERR_LOG_HIGH_TEMP_ERR, "high_temp"},
{ERR_LOG_WDOG_TIMEOUT, ERR_LOG_WDOG_TIMEOUT, "wdog_timeout"},
{ERR_LOG_SYS_FAULT, ERR_LOG_SYS_FAULT, "sys_fault"},
{ERR_LOG_RAMDUMP, ERR_LOG_RAMDUMP, "ramdump"},
{ERR_LOG_COP_WDOG_TIMEOUT, ERR_LOG_COP_WDOG_TIMEOUT,
"cop_wdog_timeout"},
{ERR_LOG_MEM_ALLOC_FAIL, ERR_LOG_MEM_ALLOC_FAIL, "malloc_fail warn"},
{ERR_LOG_LOW_TEMP_WARN, ERR_LOG_LOW_TEMP_WARN, "low_temp warn"},
{ERR_LOG_ECC, ERR_LOG_ECC, "ecc"},
{ERR_LOG_IPC_DWN, ERR_LOG_IPC_DWN, "ipc_down"},
};
/* alerts detected by the host */
const struct bcm_vk_entry bcm_vk_host_err[BCM_VK_HOST_ERR_NUM] = {
{ERR_LOG_HOST_PCIE_DWN, ERR_LOG_HOST_PCIE_DWN, "PCIe_down"},
{ERR_LOG_HOST_HB_FAIL, ERR_LOG_HOST_HB_FAIL, "hb_fail"},
{ERR_LOG_HOST_INTF_V_FAIL, ERR_LOG_HOST_INTF_V_FAIL, "intf_ver_fail"},
};
irqreturn_t bcm_vk_notf_irqhandler(int irq, void *dev_id)
{
struct bcm_vk *vk = dev_id;
if (!bcm_vk_drv_access_ok(vk)) {
dev_err(&vk->pdev->dev,
"Interrupt %d received when msgq not inited\n", irq);
goto skip_schedule_work;
}
/* if notification is not pending, set bit and schedule work */
if (test_and_set_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload) == 0)
queue_work(vk->wq_thread, &vk->wq_work);
skip_schedule_work:
return IRQ_HANDLED;
}
static int bcm_vk_intf_ver_chk(struct bcm_vk *vk)
{
@ -126,6 +175,7 @@ static int bcm_vk_intf_ver_chk(struct bcm_vk *vk)
dev_err(dev,
"Intf major.minor=%d.%d rejected - drv %d.%d\n",
major, minor, SEMANTIC_MAJOR, SEMANTIC_MINOR);
bcm_vk_set_host_alert(vk, ERR_LOG_HOST_INTF_V_FAIL);
ret = -EPFNOSUPPORT;
} else {
dev_dbg(dev,
@ -135,6 +185,154 @@ static int bcm_vk_intf_ver_chk(struct bcm_vk *vk)
return ret;
}
static void bcm_vk_log_notf(struct bcm_vk *vk,
struct bcm_vk_alert *alert,
struct bcm_vk_entry const *entry_tab,
const u32 table_size)
{
u32 i;
u32 masked_val, latched_val;
struct bcm_vk_entry const *entry;
u32 reg;
u16 ecc_mem_err, uecc_mem_err;
struct device *dev = &vk->pdev->dev;
for (i = 0; i < table_size; i++) {
entry = &entry_tab[i];
masked_val = entry->mask & alert->notfs;
latched_val = entry->mask & alert->flags;
if (masked_val == ERR_LOG_UECC) {
/*
* if there is difference between stored cnt and it
* is greater than threshold, log it.
*/
reg = vkread32(vk, BAR_0, BAR_CARD_ERR_MEM);
BCM_VK_EXTRACT_FIELD(uecc_mem_err, reg,
BCM_VK_MEM_ERR_FIELD_MASK,
BCM_VK_UECC_MEM_ERR_SHIFT);
if ((uecc_mem_err != vk->alert_cnts.uecc) &&
(uecc_mem_err >= BCM_VK_UECC_THRESHOLD))
dev_info(dev,
"ALERT! %s.%d uecc RAISED - ErrCnt %d\n",
DRV_MODULE_NAME, vk->devid,
uecc_mem_err);
vk->alert_cnts.uecc = uecc_mem_err;
} else if (masked_val == ERR_LOG_ECC) {
reg = vkread32(vk, BAR_0, BAR_CARD_ERR_MEM);
BCM_VK_EXTRACT_FIELD(ecc_mem_err, reg,
BCM_VK_MEM_ERR_FIELD_MASK,
BCM_VK_ECC_MEM_ERR_SHIFT);
if ((ecc_mem_err != vk->alert_cnts.ecc) &&
(ecc_mem_err >= BCM_VK_ECC_THRESHOLD))
dev_info(dev, "ALERT! %s.%d ecc RAISED - ErrCnt %d\n",
DRV_MODULE_NAME, vk->devid,
ecc_mem_err);
vk->alert_cnts.ecc = ecc_mem_err;
} else if (masked_val != latched_val) {
/* print a log as info */
dev_info(dev, "ALERT! %s.%d %s %s\n",
DRV_MODULE_NAME, vk->devid, entry->str,
masked_val ? "RAISED" : "CLEARED");
}
}
}
static void bcm_vk_dump_peer_log(struct bcm_vk *vk)
{
struct bcm_vk_peer_log log;
struct bcm_vk_peer_log *log_info = &vk->peerlog_info;
char loc_buf[BCM_VK_PEER_LOG_LINE_MAX];
int cnt;
struct device *dev = &vk->pdev->dev;
unsigned int data_offset;
memcpy_fromio(&log, vk->bar[BAR_2] + vk->peerlog_off, sizeof(log));
dev_dbg(dev, "Peer PANIC: Size 0x%x(0x%x), [Rd Wr] = [%d %d]\n",
log.buf_size, log.mask, log.rd_idx, log.wr_idx);
if (!log_info->buf_size) {
dev_err(dev, "Peer log dump disabled - skipped!\n");
return;
}
/* perform range checking for rd/wr idx */
if ((log.rd_idx > log_info->mask) ||
(log.wr_idx > log_info->mask) ||
(log.buf_size != log_info->buf_size) ||
(log.mask != log_info->mask)) {
dev_err(dev,
"Corrupted Ptrs: Size 0x%x(0x%x) Mask 0x%x(0x%x) [Rd Wr] = [%d %d], skip log dump.\n",
log_info->buf_size, log.buf_size,
log_info->mask, log.mask,
log.rd_idx, log.wr_idx);
return;
}
cnt = 0;
data_offset = vk->peerlog_off + sizeof(struct bcm_vk_peer_log);
loc_buf[BCM_VK_PEER_LOG_LINE_MAX - 1] = '\0';
while (log.rd_idx != log.wr_idx) {
loc_buf[cnt] = vkread8(vk, BAR_2, data_offset + log.rd_idx);
if ((loc_buf[cnt] == '\0') ||
(cnt == (BCM_VK_PEER_LOG_LINE_MAX - 1))) {
dev_err(dev, "%s", loc_buf);
cnt = 0;
} else {
cnt++;
}
log.rd_idx = (log.rd_idx + 1) & log.mask;
}
/* update rd idx at the end */
vkwrite32(vk, log.rd_idx, BAR_2,
vk->peerlog_off + offsetof(struct bcm_vk_peer_log, rd_idx));
}
void bcm_vk_handle_notf(struct bcm_vk *vk)
{
u32 reg;
struct bcm_vk_alert alert;
bool intf_down;
unsigned long flags;
/* handle peer alerts and then locally detected ones */
reg = vkread32(vk, BAR_0, BAR_CARD_ERR_LOG);
intf_down = BCM_VK_INTF_IS_DOWN(reg);
if (!intf_down) {
vk->peer_alert.notfs = reg;
bcm_vk_log_notf(vk, &vk->peer_alert, bcm_vk_peer_err,
ARRAY_SIZE(bcm_vk_peer_err));
vk->peer_alert.flags = vk->peer_alert.notfs;
} else {
/* turn off access */
bcm_vk_blk_drv_access(vk);
}
/* check and make copy of alert with lock and then free lock */
spin_lock_irqsave(&vk->host_alert_lock, flags);
if (intf_down)
vk->host_alert.notfs |= ERR_LOG_HOST_PCIE_DWN;
alert = vk->host_alert;
vk->host_alert.flags = vk->host_alert.notfs;
spin_unlock_irqrestore(&vk->host_alert_lock, flags);
/* call display with copy */
bcm_vk_log_notf(vk, &alert, bcm_vk_host_err,
ARRAY_SIZE(bcm_vk_host_err));
/*
* If it is a sys fault or heartbeat timeout, we would like extract
* log msg from the card so that we would know what is the last fault
*/
if (!intf_down &&
((vk->host_alert.flags & ERR_LOG_HOST_HB_FAIL) ||
(vk->peer_alert.flags & ERR_LOG_SYS_FAULT)))
bcm_vk_dump_peer_log(vk);
}
static inline int bcm_vk_wait(struct bcm_vk *vk, enum pci_barno bar,
u64 offset, u32 mask, u32 value,
unsigned long timeout_ms)
@ -301,6 +499,31 @@ static int bcm_vk_sync_card_info(struct bcm_vk *vk)
return 0;
}
void bcm_vk_blk_drv_access(struct bcm_vk *vk)
{
int i;
/*
* kill all the apps
*/
spin_lock(&vk->ctx_lock);
/* set msgq_inited to 0 so that all rd/wr will be blocked */
atomic_set(&vk->msgq_inited, 0);
for (i = 0; i < VK_PID_HT_SZ; i++) {
struct bcm_vk_ctx *ctx;
list_for_each_entry(ctx, &vk->pid_ht[i].head, node) {
dev_dbg(&vk->pdev->dev,
"Send kill signal to pid %d\n",
ctx->pid);
kill_pid(find_vpid(ctx->pid), SIGKILL, 1);
}
}
spin_unlock(&vk->ctx_lock);
}
static void bcm_vk_buf_notify(struct bcm_vk *vk, void *bufp,
dma_addr_t host_buf_addr, u32 buf_size)
{
@ -518,6 +741,17 @@ static int bcm_vk_load_image_by_type(struct bcm_vk *vk, u32 load_type,
goto err_firmware_out;
}
/*
* Next, initialize Message Q if we are loading boot2.
* Do a force sync
*/
ret = bcm_vk_sync_msgq(vk, true);
if (ret) {
dev_err(dev, "Boot2 Error reading comm msg Q info\n");
ret = -EIO;
goto err_firmware_out;
}
/* sync & channel other info */
ret = bcm_vk_sync_card_info(vk);
if (ret) {
@ -668,12 +902,20 @@ static int bcm_vk_trigger_autoload(struct bcm_vk *vk)
}
/*
* deferred work queue for auto download.
* deferred work queue for draining and auto download.
*/
static void bcm_vk_wq_handler(struct work_struct *work)
{
struct bcm_vk *vk = container_of(work, struct bcm_vk, wq_work);
struct device *dev = &vk->pdev->dev;
s32 ret;
/* check wq offload bit map to perform various operations */
if (test_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload)) {
/* clear bit right the way for notification */
clear_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload);
bcm_vk_handle_notf(vk);
}
if (test_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload)) {
bcm_vk_auto_load_all_images(vk);
@ -684,6 +926,14 @@ static void bcm_vk_wq_handler(struct work_struct *work)
clear_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload);
clear_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload);
}
/* next, try to drain */
ret = bcm_to_h_msg_dequeue(vk);
if (ret == 0)
dev_dbg(dev, "Spurious trigger for workqueue\n");
else if (ret < 0)
bcm_vk_blk_drv_access(vk);
}
static long bcm_vk_load_image(struct bcm_vk *vk,
@ -837,6 +1087,9 @@ static long bcm_vk_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static const struct file_operations bcm_vk_fops = {
.owner = THIS_MODULE,
.open = bcm_vk_open,
.read = bcm_vk_read,
.write = bcm_vk_write,
.poll = bcm_vk_poll,
.release = bcm_vk_release,
.unlocked_ioctl = bcm_vk_ioctl,
};
@ -869,6 +1122,12 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
kref_init(&vk->kref);
if (nr_ib_sgl_blk > BCM_VK_IB_SGL_BLK_MAX) {
dev_warn(dev, "Inband SGL blk %d limited to max %d\n",
nr_ib_sgl_blk, BCM_VK_IB_SGL_BLK_MAX);
nr_ib_sgl_blk = BCM_VK_IB_SGL_BLK_MAX;
}
vk->ib_sgl_size = nr_ib_sgl_blk * VK_MSGQ_BLK_SIZE;
mutex_init(&vk->mutex);
err = pci_enable_device(pdev);
@ -932,11 +1191,34 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
for (vk->num_irqs = 0;
vk->num_irqs < VK_MSIX_MSGQ_MAX;
vk->num_irqs++) {
err = devm_request_irq(dev, pci_irq_vector(pdev, vk->num_irqs),
bcm_vk_msgq_irqhandler,
IRQF_SHARED, DRV_MODULE_NAME, vk);
if (err) {
dev_err(dev, "failed to request msgq IRQ %d for MSIX %d\n",
pdev->irq + vk->num_irqs, vk->num_irqs + 1);
goto err_irq;
}
}
/* one irq for notification from VK */
err = devm_request_irq(dev, pci_irq_vector(pdev, vk->num_irqs),
bcm_vk_notf_irqhandler,
IRQF_SHARED, DRV_MODULE_NAME, vk);
if (err) {
dev_err(dev, "failed to request notf IRQ %d for MSIX %d\n",
pdev->irq + vk->num_irqs, vk->num_irqs + 1);
goto err_irq;
}
vk->num_irqs++;
id = ida_simple_get(&bcm_vk_ida, 0, 0, GFP_KERNEL);
if (id < 0) {
err = id;
dev_err(dev, "unable to get id\n");
goto err_iounmap;
goto err_irq;
}
vk->devid = id;
@ -966,6 +1248,12 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_misc_deregister;
}
err = bcm_vk_msg_init(vk);
if (err) {
dev_err(dev, "failed to init msg queue info\n");
goto err_destroy_workqueue;
}
/* sync other info */
bcm_vk_sync_card_info(vk);
@ -994,6 +1282,9 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
/* enable hb */
bcm_vk_hb_init(vk);
dev_dbg(dev, "BCM-VK:%u created\n", id);
return 0;
@ -1015,6 +1306,13 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_ida_remove:
ida_simple_remove(&bcm_vk_ida, id);
err_irq:
for (i = 0; i < vk->num_irqs; i++)
devm_free_irq(dev, pci_irq_vector(pdev, i), vk);
pci_disable_msix(pdev);
pci_disable_msi(pdev);
err_iounmap:
for (i = 0; i < MAX_BAR; i++) {
if (vk->bar[i])
@ -1053,6 +1351,8 @@ static void bcm_vk_remove(struct pci_dev *pdev)
struct bcm_vk *vk = pci_get_drvdata(pdev);
struct miscdevice *misc_device = &vk->miscdev;
bcm_vk_hb_deinit(vk);
/*
* Trigger a reset to card and wait enough time for UCODE to rerun,
* which re-initialize the card into its default state.
@ -1076,6 +1376,11 @@ static void bcm_vk_remove(struct pci_dev *pdev)
kfree(misc_device->name);
ida_simple_remove(&bcm_vk_ida, vk->devid);
}
for (i = 0; i < vk->num_irqs; i++)
devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), vk);
pci_disable_msix(pdev);
pci_disable_msi(pdev);
cancel_work_sync(&vk->wq_work);
destroy_workqueue(vk->wq_thread);

File diff suppressed because it is too large Load diff

View file

@ -6,6 +6,78 @@
#ifndef BCM_VK_MSG_H
#define BCM_VK_MSG_H
#include <uapi/linux/misc/bcm_vk.h>
#include "bcm_vk_sg.h"
/* Single message queue control structure */
struct bcm_vk_msgq {
u16 type; /* queue type */
u16 num; /* queue number */
u32 start; /* offset in BAR1 where the queue memory starts */
u32 rd_idx; /* read idx */
u32 wr_idx; /* write idx */
u32 size; /*
* size, which is in number of 16byte blocks,
* to align with the message data structure.
*/
u32 nxt; /*
* nxt offset to the next msg queue struct.
* This is to provide flexibity for alignment purposes.
*/
/* Least significant 16 bits in below field hold doorbell register offset */
#define DB_SHIFT 16
u32 db_offset; /* queue doorbell register offset in BAR0 */
u32 rsvd;
};
/*
* Structure to record static info from the msgq sync. We keep local copy
* for some of these variables for both performance + checking purpose.
*/
struct bcm_vk_sync_qinfo {
void __iomem *q_start;
u32 q_size;
u32 q_mask;
u32 q_low;
u32 q_db_offset;
};
#define VK_MSGQ_MAX_NR 4 /* Maximum number of message queues */
/*
* message block - basic unit in the message where a message's size is always
* N x sizeof(basic_block)
*/
struct vk_msg_blk {
u8 function_id;
#define VK_FID_TRANS_BUF 5
#define VK_FID_SHUTDOWN 8
#define VK_FID_INIT 9
u8 size; /* size of the message in number of vk_msg_blk's */
u16 trans_id; /* transport id, queue & msg_id */
u32 context_id;
#define VK_NEW_CTX 0
u32 cmd;
#define VK_CMD_PLANES_MASK 0x000f /* number of planes to up/download */
#define VK_CMD_UPLOAD 0x0400 /* memory transfer to vk */
#define VK_CMD_DOWNLOAD 0x0500 /* memory transfer from vk */
#define VK_CMD_MASK 0x0f00 /* command mask */
u32 arg;
};
/* vk_msg_blk is 16 bytes fixed */
#define VK_MSGQ_BLK_SIZE (sizeof(struct vk_msg_blk))
/* shift for fast division of basic msg blk size */
#define VK_MSGQ_BLK_SZ_SHIFT 4
/* use msg_id 0 for any simplex host2vk communication */
#define VK_SIMPLEX_MSG_ID 0
/* context per session opening of sysfs */
struct bcm_vk_ctx {
struct list_head node; /* use for linkage in Hash Table */
@ -13,7 +85,11 @@ struct bcm_vk_ctx {
bool in_use;
pid_t pid;
u32 hash_idx;
u32 q_num; /* queue number used by the stream */
struct miscdevice *miscdev;
atomic_t pend_cnt; /* number of items pending to be read from host */
atomic_t dma_cnt; /* any dma transaction outstanding */
wait_queue_head_t rd_wq;
};
/* pid hash table entry */
@ -21,6 +97,55 @@ struct bcm_vk_ht_entry {
struct list_head head;
};
#define VK_DMA_MAX_ADDRS 4 /* Max 4 DMA Addresses */
/* structure for house keeping a single work entry */
struct bcm_vk_wkent {
struct list_head node; /* for linking purpose */
struct bcm_vk_ctx *ctx;
/* Store up to 4 dma pointers */
struct bcm_vk_dma dma[VK_DMA_MAX_ADDRS];
u32 to_h_blks; /* response */
struct vk_msg_blk *to_h_msg;
/*
* put the to_v_msg at the end so that we could simply append to_v msg
* to the end of the allocated block
*/
u32 usr_msg_id;
u32 to_v_blks;
u32 seq_num;
struct vk_msg_blk to_v_msg[0];
};
/* queue stats counters */
struct bcm_vk_qs_cnts {
u32 cnt; /* general counter, used to limit output */
u32 acc_sum;
u32 max_occ; /* max during a sampling period */
u32 max_abs; /* the abs max since reset */
};
/* control channel structure for either to_v or to_h communication */
struct bcm_vk_msg_chan {
u32 q_nr;
/* Mutex to access msgq */
struct mutex msgq_mutex;
/* pointing to BAR locations */
struct bcm_vk_msgq __iomem *msgq[VK_MSGQ_MAX_NR];
/* Spinlock to access pending queue */
spinlock_t pendq_lock;
/* for temporary storing pending items, one for each queue */
struct list_head pendq[VK_MSGQ_MAX_NR];
/* static queue info from the sync */
struct bcm_vk_sync_qinfo sync_qinfo[VK_MSGQ_MAX_NR];
};
/* totol number of message q allowed by the driver */
#define VK_MSGQ_PER_CHAN_MAX 3
#define VK_MSGQ_NUM_DEFAULT (VK_MSGQ_PER_CHAN_MAX - 1)
/* total number of supported ctx, 32 ctx each for 5 components */
#define VK_CMPT_CTX_MAX (32 * 5)
@ -28,4 +153,11 @@ struct bcm_vk_ht_entry {
#define VK_PID_HT_SHIFT_BIT 7 /* 128 */
#define VK_PID_HT_SZ BIT(VK_PID_HT_SHIFT_BIT)
/* The following are offsets of DDR info provided by the vk card */
#define VK_BAR0_SEG_SIZE (4 * SZ_1K) /* segment size for BAR0 */
/* shutdown types supported */
#define VK_SHUTDOWN_PID 1
#define VK_SHUTDOWN_GRACEFUL 2
#endif

View file

@ -0,0 +1,275 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2018-2020 Broadcom.
*/
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/pgtable.h>
#include <linux/vmalloc.h>
#include <asm/page.h>
#include <asm/unaligned.h>
#include <uapi/linux/misc/bcm_vk.h>
#include "bcm_vk.h"
#include "bcm_vk_msg.h"
#include "bcm_vk_sg.h"
/*
* Valkyrie has a hardware limitation of 16M transfer size.
* So limit the SGL chunks to 16M.
*/
#define BCM_VK_MAX_SGL_CHUNK SZ_16M
static int bcm_vk_dma_alloc(struct device *dev,
struct bcm_vk_dma *dma,
int dir,
struct _vk_data *vkdata);
static int bcm_vk_dma_free(struct device *dev, struct bcm_vk_dma *dma);
/* Uncomment to dump SGLIST */
/* #define BCM_VK_DUMP_SGLIST */
static int bcm_vk_dma_alloc(struct device *dev,
struct bcm_vk_dma *dma,
int direction,
struct _vk_data *vkdata)
{
dma_addr_t addr, sg_addr;
int err;
int i;
int offset;
u32 size;
u32 remaining_size;
u32 transfer_size;
u64 data;
unsigned long first, last;
struct _vk_data *sgdata;
/* Get 64-bit user address */
data = get_unaligned(&vkdata->address);
/* offset into first page */
offset = offset_in_page(data);
/* Calculate number of pages */
first = (data & PAGE_MASK) >> PAGE_SHIFT;
last = ((data + vkdata->size - 1) & PAGE_MASK) >> PAGE_SHIFT;
dma->nr_pages = last - first + 1;
/* Allocate DMA pages */
dma->pages = kmalloc_array(dma->nr_pages,
sizeof(struct page *),
GFP_KERNEL);
if (!dma->pages)
return -ENOMEM;
dev_dbg(dev, "Alloc DMA Pages [0x%llx+0x%x => %d pages]\n",
data, vkdata->size, dma->nr_pages);
dma->direction = direction;
/* Get user pages into memory */
err = get_user_pages_fast(data & PAGE_MASK,
dma->nr_pages,
direction == DMA_FROM_DEVICE,
dma->pages);
if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0;
dev_err(dev, "get_user_pages_fast, err=%d [%d]\n",
err, dma->nr_pages);
return err < 0 ? err : -EINVAL;
}
/* Max size of sg list is 1 per mapped page + fields at start */
dma->sglen = (dma->nr_pages * sizeof(*sgdata)) +
(sizeof(u32) * SGLIST_VKDATA_START);
/* Allocate sglist */
dma->sglist = dma_alloc_coherent(dev,
dma->sglen,
&dma->handle,
GFP_KERNEL);
if (!dma->sglist)
return -ENOMEM;
dma->sglist[SGLIST_NUM_SG] = 0;
dma->sglist[SGLIST_TOTALSIZE] = vkdata->size;
remaining_size = vkdata->size;
sgdata = (struct _vk_data *)&dma->sglist[SGLIST_VKDATA_START];
/* Map all pages into DMA */
size = min_t(size_t, PAGE_SIZE - offset, remaining_size);
remaining_size -= size;
sg_addr = dma_map_page(dev,
dma->pages[0],
offset,
size,
dma->direction);
transfer_size = size;
if (unlikely(dma_mapping_error(dev, sg_addr))) {
__free_page(dma->pages[0]);
return -EIO;
}
for (i = 1; i < dma->nr_pages; i++) {
size = min_t(size_t, PAGE_SIZE, remaining_size);
remaining_size -= size;
addr = dma_map_page(dev,
dma->pages[i],
0,
size,
dma->direction);
if (unlikely(dma_mapping_error(dev, addr))) {
__free_page(dma->pages[i]);
return -EIO;
}
/*
* Compress SG list entry when pages are contiguous
* and transfer size less or equal to BCM_VK_MAX_SGL_CHUNK
*/
if ((addr == (sg_addr + transfer_size)) &&
((transfer_size + size) <= BCM_VK_MAX_SGL_CHUNK)) {
/* pages are contiguous, add to same sg entry */
transfer_size += size;
} else {
/* pages are not contiguous, write sg entry */
sgdata->size = transfer_size;
put_unaligned(sg_addr, (u64 *)&sgdata->address);
dma->sglist[SGLIST_NUM_SG]++;
/* start new sg entry */
sgdata++;
sg_addr = addr;
transfer_size = size;
}
}
/* Write last sg list entry */
sgdata->size = transfer_size;
put_unaligned(sg_addr, (u64 *)&sgdata->address);
dma->sglist[SGLIST_NUM_SG]++;
/* Update pointers and size field to point to sglist */
put_unaligned((u64)dma->handle, &vkdata->address);
vkdata->size = (dma->sglist[SGLIST_NUM_SG] * sizeof(*sgdata)) +
(sizeof(u32) * SGLIST_VKDATA_START);
#ifdef BCM_VK_DUMP_SGLIST
dev_dbg(dev,
"sgl 0x%llx handle 0x%llx, sglen: 0x%x sgsize: 0x%x\n",
(u64)dma->sglist,
dma->handle,
dma->sglen,
vkdata->size);
for (i = 0; i < vkdata->size / sizeof(u32); i++)
dev_dbg(dev, "i:0x%x 0x%x\n", i, dma->sglist[i]);
#endif
return 0;
}
int bcm_vk_sg_alloc(struct device *dev,
struct bcm_vk_dma *dma,
int dir,
struct _vk_data *vkdata,
int num)
{
int i;
int rc = -EINVAL;
/* Convert user addresses to DMA SG List */
for (i = 0; i < num; i++) {
if (vkdata[i].size && vkdata[i].address) {
/*
* If both size and address are non-zero
* then DMA alloc.
*/
rc = bcm_vk_dma_alloc(dev,
&dma[i],
dir,
&vkdata[i]);
} else if (vkdata[i].size ||
vkdata[i].address) {
/*
* If one of size and address are zero
* there is a problem.
*/
dev_err(dev,
"Invalid vkdata %x 0x%x 0x%llx\n",
i, vkdata[i].size, vkdata[i].address);
rc = -EINVAL;
} else {
/*
* If size and address are both zero
* don't convert, but return success.
*/
rc = 0;
}
if (rc)
goto fail_alloc;
}
return rc;
fail_alloc:
while (i > 0) {
i--;
if (dma[i].sglist)
bcm_vk_dma_free(dev, &dma[i]);
}
return rc;
}
static int bcm_vk_dma_free(struct device *dev, struct bcm_vk_dma *dma)
{
dma_addr_t addr;
int i;
int num_sg;
u32 size;
struct _vk_data *vkdata;
dev_dbg(dev, "free sglist=%p sglen=0x%x\n", dma->sglist, dma->sglen);
/* Unmap all pages in the sglist */
num_sg = dma->sglist[SGLIST_NUM_SG];
vkdata = (struct _vk_data *)&dma->sglist[SGLIST_VKDATA_START];
for (i = 0; i < num_sg; i++) {
size = vkdata[i].size;
addr = get_unaligned(&vkdata[i].address);
dma_unmap_page(dev, addr, size, dma->direction);
}
/* Free allocated sglist */
dma_free_coherent(dev, dma->sglen, dma->sglist, dma->handle);
/* Release lock on all pages */
for (i = 0; i < dma->nr_pages; i++)
put_page(dma->pages[i]);
/* Free allocated dma pages */
kfree(dma->pages);
dma->sglist = NULL;
return 0;
}
int bcm_vk_sg_free(struct device *dev, struct bcm_vk_dma *dma, int num,
int *proc_cnt)
{
int i;
*proc_cnt = 0;
/* Unmap and free all pages and sglists */
for (i = 0; i < num; i++) {
if (dma[i].sglist) {
bcm_vk_dma_free(dev, &dma[i]);
*proc_cnt += 1;
}
}
return 0;
}

View file

@ -0,0 +1,61 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2018-2020 Broadcom.
*/
#ifndef BCM_VK_SG_H
#define BCM_VK_SG_H
#include <linux/dma-mapping.h>
struct bcm_vk_dma {
/* for userland buffer */
struct page **pages;
int nr_pages;
/* common */
dma_addr_t handle;
/*
* sglist is of the following LE format
* [U32] num_sg = number of sg addresses (N)
* [U32] totalsize = totalsize of data being transferred in sglist
* [U32] size[0] = size of data in address0
* [U32] addr_l[0] = lower 32-bits of address0
* [U32] addr_h[0] = higher 32-bits of address0
* ..
* [U32] size[N-1] = size of data in addressN-1
* [U32] addr_l[N-1] = lower 32-bits of addressN-1
* [U32] addr_h[N-1] = higher 32-bits of addressN-1
*/
u32 *sglist;
#define SGLIST_NUM_SG 0
#define SGLIST_TOTALSIZE 1
#define SGLIST_VKDATA_START 2
int sglen; /* Length (bytes) of sglist */
int direction;
};
struct _vk_data {
u32 size; /* data size in bytes */
u64 address; /* Pointer to data */
} __packed;
/*
* Scatter-gather DMA buffer API.
*
* These functions provide a simple way to create a page list and a
* scatter-gather list from userspace address and map the memory
* for DMA operation.
*/
int bcm_vk_sg_alloc(struct device *dev,
struct bcm_vk_dma *dma,
int dir,
struct _vk_data *vkdata,
int num);
int bcm_vk_sg_free(struct device *dev, struct bcm_vk_dma *dma, int num,
int *proc_cnt);
#endif