net: wwan: t7xx: Add data path interface

Data Path Modem AP Interface (DPMAIF) HIF layer provides methods
for initialization, ISR, control and event handling of TX/RX flows.

DPMAIF TX
Exposes the 'dmpaif_tx_send_skb' function which can be used by the
network device to transmit packets.
The uplink data management uses a Descriptor Ring Buffer (DRB).
First DRB entry is a message type that will be followed by 1 or more
normal DRB entries. Message type DRB will hold the skb information
and each normal DRB entry holds a pointer to the skb payload.

DPMAIF RX
The downlink buffer management uses Buffer Address Table (BAT) and
Packet Information Table (PIT) rings.
The BAT ring holds the address of skb data buffer for the HW to use,
while the PIT contains metadata about a whole network packet including
a reference to the BAT entry holding the data buffer address.
The driver reads the PIT and BAT entries written by the modem, when
reaching a threshold, the driver will reload the PIT and BAT rings.

Signed-off-by: Haijun Liu <haijun.liu@mediatek.com>
Signed-off-by: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Co-developed-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Signed-off-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Reviewed-by: Loic Poulain <loic.poulain@linaro.org>
Reviewed-by: Sergey Ryazanov <ryazanov.s.a@gmail.com>
Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Haijun Liu 2022-05-06 11:13:05 -07:00 committed by David S. Miller
parent 33f78ab5a3
commit d642b012df
7 changed files with 2758 additions and 0 deletions

View File

@ -13,3 +13,7 @@ mtk_t7xx-y:= t7xx_pci.o \
t7xx_port_proxy.o \
t7xx_port_ctrl_msg.o \
t7xx_port_wwan.o \
t7xx_hif_dpmaif.o \
t7xx_hif_dpmaif_tx.o \
t7xx_hif_dpmaif_rx.o \
t7xx_dpmaif.o \

View File

@ -0,0 +1,484 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
*
* Authors:
* Amir Hanania <amir.hanania@intel.com>
* Haijun Liu <haijun.liu@mediatek.com>
* Moises Veleta <moises.veleta@intel.com>
* Ricardo Martinez <ricardo.martinez@linux.intel.com>
*
* Contributors:
* Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
* Eliot Lee <eliot.lee@intel.com>
* Sreehari Kancharla <sreehari.kancharla@intel.com>
*/
#include <linux/device.h>
#include <linux/gfp.h>
#include <linux/irqreturn.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/string.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include "t7xx_dpmaif.h"
#include "t7xx_hif_dpmaif.h"
#include "t7xx_hif_dpmaif_rx.h"
#include "t7xx_hif_dpmaif_tx.h"
#include "t7xx_pci.h"
#include "t7xx_pcie_mac.h"
#include "t7xx_state_monitor.h"
unsigned int t7xx_ring_buf_get_next_wr_idx(unsigned int buf_len, unsigned int buf_idx)
{
buf_idx++;
return buf_idx < buf_len ? buf_idx : 0;
}
unsigned int t7xx_ring_buf_rd_wr_count(unsigned int total_cnt, unsigned int rd_idx,
unsigned int wr_idx, enum dpmaif_rdwr rd_wr)
{
int pkt_cnt;
if (rd_wr == DPMAIF_READ)
pkt_cnt = wr_idx - rd_idx;
else
pkt_cnt = rd_idx - wr_idx - 1;
if (pkt_cnt < 0)
pkt_cnt += total_cnt;
return (unsigned int)pkt_cnt;
}
static void t7xx_dpmaif_enable_irq(struct dpmaif_ctrl *dpmaif_ctrl)
{
struct dpmaif_isr_para *isr_para;
int i;
for (i = 0; i < ARRAY_SIZE(dpmaif_ctrl->isr_para); i++) {
isr_para = &dpmaif_ctrl->isr_para[i];
t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
}
}
static void t7xx_dpmaif_disable_irq(struct dpmaif_ctrl *dpmaif_ctrl)
{
struct dpmaif_isr_para *isr_para;
int i;
for (i = 0; i < ARRAY_SIZE(dpmaif_ctrl->isr_para); i++) {
isr_para = &dpmaif_ctrl->isr_para[i];
t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
}
}
static void t7xx_dpmaif_irq_cb(struct dpmaif_isr_para *isr_para)
{
struct dpmaif_ctrl *dpmaif_ctrl = isr_para->dpmaif_ctrl;
struct dpmaif_hw_intr_st_para intr_status;
struct device *dev = dpmaif_ctrl->dev;
struct dpmaif_hw_info *hw_info;
int i;
memset(&intr_status, 0, sizeof(intr_status));
hw_info = &dpmaif_ctrl->hw_info;
if (t7xx_dpmaif_hw_get_intr_cnt(hw_info, &intr_status, isr_para->dlq_id) < 0) {
dev_err(dev, "Failed to get HW interrupt count\n");
return;
}
t7xx_pcie_mac_clear_int_status(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
for (i = 0; i < intr_status.intr_cnt; i++) {
switch (intr_status.intr_types[i]) {
case DPF_INTR_UL_DONE:
t7xx_dpmaif_irq_tx_done(dpmaif_ctrl, intr_status.intr_queues[i]);
break;
case DPF_INTR_UL_DRB_EMPTY:
case DPF_INTR_UL_MD_NOTREADY:
case DPF_INTR_UL_MD_PWR_NOTREADY:
/* No need to log an error for these */
break;
case DPF_INTR_DL_BATCNT_LEN_ERR:
dev_err_ratelimited(dev, "DL interrupt: packet BAT count length error\n");
t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(hw_info);
break;
case DPF_INTR_DL_PITCNT_LEN_ERR:
dev_err_ratelimited(dev, "DL interrupt: PIT count length error\n");
t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(hw_info);
break;
case DPF_INTR_DL_Q0_PITCNT_LEN_ERR:
dev_err_ratelimited(dev, "DL interrupt: DLQ0 PIT count length error\n");
t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(hw_info, DPF_RX_QNO_DFT);
break;
case DPF_INTR_DL_Q1_PITCNT_LEN_ERR:
dev_err_ratelimited(dev, "DL interrupt: DLQ1 PIT count length error\n");
t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(hw_info, DPF_RX_QNO1);
break;
case DPF_INTR_DL_DONE:
case DPF_INTR_DL_Q0_DONE:
case DPF_INTR_DL_Q1_DONE:
t7xx_dpmaif_irq_rx_done(dpmaif_ctrl, intr_status.intr_queues[i]);
break;
default:
dev_err_ratelimited(dev, "DL interrupt error: unknown type : %d\n",
intr_status.intr_types[i]);
}
}
}
static irqreturn_t t7xx_dpmaif_isr_handler(int irq, void *data)
{
struct dpmaif_isr_para *isr_para = data;
struct dpmaif_ctrl *dpmaif_ctrl;
dpmaif_ctrl = isr_para->dpmaif_ctrl;
if (dpmaif_ctrl->state != DPMAIF_STATE_PWRON) {
dev_err(dpmaif_ctrl->dev, "Interrupt received before initializing DPMAIF\n");
return IRQ_HANDLED;
}
t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
t7xx_dpmaif_irq_cb(isr_para);
t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
return IRQ_HANDLED;
}
static void t7xx_dpmaif_isr_parameter_init(struct dpmaif_ctrl *dpmaif_ctrl)
{
struct dpmaif_isr_para *isr_para;
unsigned char i;
dpmaif_ctrl->rxq_int_mapping[DPF_RX_QNO0] = DPMAIF_INT;
dpmaif_ctrl->rxq_int_mapping[DPF_RX_QNO1] = DPMAIF2_INT;
for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
isr_para = &dpmaif_ctrl->isr_para[i];
isr_para->dpmaif_ctrl = dpmaif_ctrl;
isr_para->dlq_id = i;
isr_para->pcie_int = dpmaif_ctrl->rxq_int_mapping[i];
}
}
static void t7xx_dpmaif_register_pcie_irq(struct dpmaif_ctrl *dpmaif_ctrl)
{
struct t7xx_pci_dev *t7xx_dev = dpmaif_ctrl->t7xx_dev;
struct dpmaif_isr_para *isr_para;
enum t7xx_int int_type;
int i;
t7xx_dpmaif_isr_parameter_init(dpmaif_ctrl);
for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
isr_para = &dpmaif_ctrl->isr_para[i];
int_type = isr_para->pcie_int;
t7xx_pcie_mac_clear_int(t7xx_dev, int_type);
t7xx_dev->intr_handler[int_type] = t7xx_dpmaif_isr_handler;
t7xx_dev->intr_thread[int_type] = NULL;
t7xx_dev->callback_param[int_type] = isr_para;
t7xx_pcie_mac_clear_int_status(t7xx_dev, int_type);
t7xx_pcie_mac_set_int(t7xx_dev, int_type);
}
}
static int t7xx_dpmaif_rxtx_sw_allocs(struct dpmaif_ctrl *dpmaif_ctrl)
{
struct dpmaif_rx_queue *rx_q;
struct dpmaif_tx_queue *tx_q;
int ret, rx_idx, tx_idx, i;
ret = t7xx_dpmaif_bat_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_req, BAT_TYPE_NORMAL);
if (ret) {
dev_err(dpmaif_ctrl->dev, "Failed to allocate normal BAT table: %d\n", ret);
return ret;
}
ret = t7xx_dpmaif_bat_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_frag, BAT_TYPE_FRAG);
if (ret) {
dev_err(dpmaif_ctrl->dev, "Failed to allocate frag BAT table: %d\n", ret);
goto err_free_normal_bat;
}
for (rx_idx = 0; rx_idx < DPMAIF_RXQ_NUM; rx_idx++) {
rx_q = &dpmaif_ctrl->rxq[rx_idx];
rx_q->index = rx_idx;
rx_q->dpmaif_ctrl = dpmaif_ctrl;
ret = t7xx_dpmaif_rxq_init(rx_q);
if (ret)
goto err_free_rxq;
}
for (tx_idx = 0; tx_idx < DPMAIF_TXQ_NUM; tx_idx++) {
tx_q = &dpmaif_ctrl->txq[tx_idx];
tx_q->index = tx_idx;
tx_q->dpmaif_ctrl = dpmaif_ctrl;
ret = t7xx_dpmaif_txq_init(tx_q);
if (ret)
goto err_free_txq;
}
ret = t7xx_dpmaif_tx_thread_init(dpmaif_ctrl);
if (ret) {
dev_err(dpmaif_ctrl->dev, "Failed to start TX thread\n");
goto err_free_txq;
}
ret = t7xx_dpmaif_bat_rel_wq_alloc(dpmaif_ctrl);
if (ret)
goto err_thread_rel;
return 0;
err_thread_rel:
t7xx_dpmaif_tx_thread_rel(dpmaif_ctrl);
err_free_txq:
for (i = 0; i < tx_idx; i++) {
tx_q = &dpmaif_ctrl->txq[i];
t7xx_dpmaif_txq_free(tx_q);
}
err_free_rxq:
for (i = 0; i < rx_idx; i++) {
rx_q = &dpmaif_ctrl->rxq[i];
t7xx_dpmaif_rxq_free(rx_q);
}
t7xx_dpmaif_bat_free(dpmaif_ctrl, &dpmaif_ctrl->bat_frag);
err_free_normal_bat:
t7xx_dpmaif_bat_free(dpmaif_ctrl, &dpmaif_ctrl->bat_req);
return ret;
}
static void t7xx_dpmaif_sw_release(struct dpmaif_ctrl *dpmaif_ctrl)
{
struct dpmaif_rx_queue *rx_q;
struct dpmaif_tx_queue *tx_q;
int i;
t7xx_dpmaif_tx_thread_rel(dpmaif_ctrl);
t7xx_dpmaif_bat_wq_rel(dpmaif_ctrl);
for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
tx_q = &dpmaif_ctrl->txq[i];
t7xx_dpmaif_txq_free(tx_q);
}
for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
rx_q = &dpmaif_ctrl->rxq[i];
t7xx_dpmaif_rxq_free(rx_q);
}
}
static int t7xx_dpmaif_start(struct dpmaif_ctrl *dpmaif_ctrl)
{
struct dpmaif_hw_info *hw_info = &dpmaif_ctrl->hw_info;
struct dpmaif_hw_params hw_init_para;
struct dpmaif_rx_queue *rxq;
struct dpmaif_tx_queue *txq;
unsigned int buf_cnt;
int i, ret = 0;
if (dpmaif_ctrl->state == DPMAIF_STATE_PWRON)
return -EFAULT;
memset(&hw_init_para, 0, sizeof(hw_init_para));
for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
rxq = &dpmaif_ctrl->rxq[i];
rxq->que_started = true;
rxq->index = i;
rxq->budget = rxq->bat_req->bat_size_cnt - 1;
hw_init_para.pkt_bat_base_addr[i] = rxq->bat_req->bat_bus_addr;
hw_init_para.pkt_bat_size_cnt[i] = rxq->bat_req->bat_size_cnt;
hw_init_para.pit_base_addr[i] = rxq->pit_bus_addr;
hw_init_para.pit_size_cnt[i] = rxq->pit_size_cnt;
hw_init_para.frg_bat_base_addr[i] = rxq->bat_frag->bat_bus_addr;
hw_init_para.frg_bat_size_cnt[i] = rxq->bat_frag->bat_size_cnt;
}
bitmap_zero(dpmaif_ctrl->bat_req.bat_bitmap, dpmaif_ctrl->bat_req.bat_size_cnt);
buf_cnt = dpmaif_ctrl->bat_req.bat_size_cnt - 1;
ret = t7xx_dpmaif_rx_buf_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_req, 0, buf_cnt, true);
if (ret) {
dev_err(dpmaif_ctrl->dev, "Failed to allocate RX buffer: %d\n", ret);
return ret;
}
buf_cnt = dpmaif_ctrl->bat_frag.bat_size_cnt - 1;
ret = t7xx_dpmaif_rx_frag_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_frag, buf_cnt, true);
if (ret) {
dev_err(dpmaif_ctrl->dev, "Failed to allocate frag RX buffer: %d\n", ret);
goto err_free_normal_bat;
}
for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
txq = &dpmaif_ctrl->txq[i];
txq->que_started = true;
hw_init_para.drb_base_addr[i] = txq->drb_bus_addr;
hw_init_para.drb_size_cnt[i] = txq->drb_size_cnt;
}
ret = t7xx_dpmaif_hw_init(hw_info, &hw_init_para);
if (ret) {
dev_err(dpmaif_ctrl->dev, "Failed to initialize DPMAIF HW: %d\n", ret);
goto err_free_frag_bat;
}
ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(hw_info, rxq->bat_req->bat_size_cnt - 1);
if (ret)
goto err_free_frag_bat;
ret = t7xx_dpmaif_dl_snd_hw_frg_cnt(hw_info, rxq->bat_frag->bat_size_cnt - 1);
if (ret)
goto err_free_frag_bat;
t7xx_dpmaif_ul_clr_all_intr(hw_info);
t7xx_dpmaif_dl_clr_all_intr(hw_info);
dpmaif_ctrl->state = DPMAIF_STATE_PWRON;
t7xx_dpmaif_enable_irq(dpmaif_ctrl);
wake_up(&dpmaif_ctrl->tx_wq);
return 0;
err_free_frag_bat:
t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag);
err_free_normal_bat:
t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req);
return ret;
}
static void t7xx_dpmaif_stop_sw(struct dpmaif_ctrl *dpmaif_ctrl)
{
t7xx_dpmaif_tx_stop(dpmaif_ctrl);
t7xx_dpmaif_rx_stop(dpmaif_ctrl);
}
static void t7xx_dpmaif_stop_hw(struct dpmaif_ctrl *dpmaif_ctrl)
{
t7xx_dpmaif_hw_stop_all_txq(&dpmaif_ctrl->hw_info);
t7xx_dpmaif_hw_stop_all_rxq(&dpmaif_ctrl->hw_info);
}
static int t7xx_dpmaif_stop(struct dpmaif_ctrl *dpmaif_ctrl)
{
if (!dpmaif_ctrl->dpmaif_sw_init_done) {
dev_err(dpmaif_ctrl->dev, "dpmaif SW init fail\n");
return -EFAULT;
}
if (dpmaif_ctrl->state == DPMAIF_STATE_PWROFF)
return -EFAULT;
t7xx_dpmaif_disable_irq(dpmaif_ctrl);
dpmaif_ctrl->state = DPMAIF_STATE_PWROFF;
t7xx_dpmaif_stop_sw(dpmaif_ctrl);
t7xx_dpmaif_tx_clear(dpmaif_ctrl);
t7xx_dpmaif_rx_clear(dpmaif_ctrl);
return 0;
}
int t7xx_dpmaif_md_state_callback(struct dpmaif_ctrl *dpmaif_ctrl, enum md_state state)
{
int ret = 0;
switch (state) {
case MD_STATE_WAITING_FOR_HS1:
ret = t7xx_dpmaif_start(dpmaif_ctrl);
break;
case MD_STATE_EXCEPTION:
ret = t7xx_dpmaif_stop(dpmaif_ctrl);
break;
case MD_STATE_STOPPED:
ret = t7xx_dpmaif_stop(dpmaif_ctrl);
break;
case MD_STATE_WAITING_TO_STOP:
t7xx_dpmaif_stop_hw(dpmaif_ctrl);
break;
default:
break;
}
return ret;
}
/**
* t7xx_dpmaif_hif_init() - Initialize data path.
* @t7xx_dev: MTK context structure.
* @callbacks: Callbacks implemented by the network layer to handle RX skb and
* event notifications.
*
* Allocate and initialize datapath control block.
* Register datapath ISR, TX and RX resources.
*
* Return:
* * dpmaif_ctrl pointer - Pointer to DPMAIF context structure.
* * NULL - In case of error.
*/
struct dpmaif_ctrl *t7xx_dpmaif_hif_init(struct t7xx_pci_dev *t7xx_dev,
struct dpmaif_callbacks *callbacks)
{
struct device *dev = &t7xx_dev->pdev->dev;
struct dpmaif_ctrl *dpmaif_ctrl;
int ret;
if (!callbacks)
return NULL;
dpmaif_ctrl = devm_kzalloc(dev, sizeof(*dpmaif_ctrl), GFP_KERNEL);
if (!dpmaif_ctrl)
return NULL;
dpmaif_ctrl->t7xx_dev = t7xx_dev;
dpmaif_ctrl->callbacks = callbacks;
dpmaif_ctrl->dev = dev;
dpmaif_ctrl->dpmaif_sw_init_done = false;
dpmaif_ctrl->hw_info.dev = dev;
dpmaif_ctrl->hw_info.pcie_base = t7xx_dev->base_addr.pcie_ext_reg_base -
t7xx_dev->base_addr.pcie_dev_reg_trsl_addr;
t7xx_dpmaif_register_pcie_irq(dpmaif_ctrl);
t7xx_dpmaif_disable_irq(dpmaif_ctrl);
ret = t7xx_dpmaif_rxtx_sw_allocs(dpmaif_ctrl);
if (ret) {
dev_err(dev, "Failed to allocate RX/TX SW resources: %d\n", ret);
return NULL;
}
dpmaif_ctrl->dpmaif_sw_init_done = true;
return dpmaif_ctrl;
}
void t7xx_dpmaif_hif_exit(struct dpmaif_ctrl *dpmaif_ctrl)
{
if (dpmaif_ctrl->dpmaif_sw_init_done) {
t7xx_dpmaif_stop(dpmaif_ctrl);
t7xx_dpmaif_sw_release(dpmaif_ctrl);
dpmaif_ctrl->dpmaif_sw_init_done = false;
}
}

View File

@ -0,0 +1,205 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
*
* Authors:
* Amir Hanania <amir.hanania@intel.com>
* Haijun Liu <haijun.liu@mediatek.com>
* Moises Veleta <moises.veleta@intel.com>
* Ricardo Martinez <ricardo.martinez@linux.intel.com>
*
* Contributors:
* Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
* Eliot Lee <eliot.lee@intel.com>
* Sreehari Kancharla <sreehari.kancharla@intel.com>
*/
#ifndef __T7XX_HIF_DPMAIF_H__
#define __T7XX_HIF_DPMAIF_H__
#include <linux/bitmap.h>
#include <linux/mm_types.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include "t7xx_dpmaif.h"
#include "t7xx_pci.h"
#include "t7xx_state_monitor.h"
/* SKB control buffer */
struct t7xx_skb_cb {
u8 netif_idx;
u8 txq_number;
u8 rx_pkt_type;
};
#define T7XX_SKB_CB(__skb) ((struct t7xx_skb_cb *)(__skb)->cb)
enum dpmaif_rdwr {
DPMAIF_READ,
DPMAIF_WRITE,
};
/* Structure of DL BAT */
struct dpmaif_cur_rx_skb_info {
bool msg_pit_received;
struct sk_buff *cur_skb;
unsigned int cur_chn_idx;
unsigned int check_sum;
unsigned int pit_dp;
unsigned int pkt_type;
int err_payload;
};
struct dpmaif_bat {
unsigned int p_buffer_addr;
unsigned int buffer_addr_ext;
};
struct dpmaif_bat_skb {
struct sk_buff *skb;
dma_addr_t data_bus_addr;
unsigned int data_len;
};
struct dpmaif_bat_page {
struct page *page;
dma_addr_t data_bus_addr;
unsigned int offset;
unsigned int data_len;
};
enum bat_type {
BAT_TYPE_NORMAL,
BAT_TYPE_FRAG,
};
struct dpmaif_bat_request {
void *bat_base;
dma_addr_t bat_bus_addr;
unsigned int bat_size_cnt;
unsigned int bat_wr_idx;
unsigned int bat_release_rd_idx;
void *bat_skb;
unsigned int pkt_buf_sz;
unsigned long *bat_bitmap;
atomic_t refcnt;
spinlock_t mask_lock; /* Protects BAT mask */
enum bat_type type;
};
struct dpmaif_rx_queue {
unsigned int index;
bool que_started;
unsigned int budget;
void *pit_base;
dma_addr_t pit_bus_addr;
unsigned int pit_size_cnt;
unsigned int pit_rd_idx;
unsigned int pit_wr_idx;
unsigned int pit_release_rd_idx;
struct dpmaif_bat_request *bat_req;
struct dpmaif_bat_request *bat_frag;
wait_queue_head_t rx_wq;
struct task_struct *rx_thread;
struct sk_buff_head skb_list;
unsigned int skb_list_max_len;
struct workqueue_struct *worker;
struct work_struct dpmaif_rxq_work;
atomic_t rx_processing;
struct dpmaif_ctrl *dpmaif_ctrl;
unsigned int expect_pit_seq;
unsigned int pit_remain_release_cnt;
struct dpmaif_cur_rx_skb_info rx_data_info;
};
struct dpmaif_tx_queue {
unsigned int index;
bool que_started;
atomic_t tx_budget;
void *drb_base;
dma_addr_t drb_bus_addr;
unsigned int drb_size_cnt;
unsigned int drb_wr_idx;
unsigned int drb_rd_idx;
unsigned int drb_release_rd_idx;
void *drb_skb_base;
wait_queue_head_t req_wq;
struct workqueue_struct *worker;
struct work_struct dpmaif_tx_work;
spinlock_t tx_lock; /* Protects txq DRB */
atomic_t tx_processing;
struct dpmaif_ctrl *dpmaif_ctrl;
struct sk_buff_head tx_skb_head;
};
struct dpmaif_isr_para {
struct dpmaif_ctrl *dpmaif_ctrl;
unsigned char pcie_int;
unsigned char dlq_id;
};
enum dpmaif_state {
DPMAIF_STATE_MIN,
DPMAIF_STATE_PWROFF,
DPMAIF_STATE_PWRON,
DPMAIF_STATE_EXCEPTION,
DPMAIF_STATE_MAX
};
enum dpmaif_txq_state {
DMPAIF_TXQ_STATE_IRQ,
DMPAIF_TXQ_STATE_FULL,
};
struct dpmaif_callbacks {
void (*state_notify)(struct t7xx_pci_dev *t7xx_dev,
enum dpmaif_txq_state state, int txq_number);
void (*recv_skb)(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *skb);
};
struct dpmaif_ctrl {
struct device *dev;
struct t7xx_pci_dev *t7xx_dev;
enum dpmaif_state state;
bool dpmaif_sw_init_done;
struct dpmaif_hw_info hw_info;
struct dpmaif_tx_queue txq[DPMAIF_TXQ_NUM];
struct dpmaif_rx_queue rxq[DPMAIF_RXQ_NUM];
unsigned char rxq_int_mapping[DPMAIF_RXQ_NUM];
struct dpmaif_isr_para isr_para[DPMAIF_RXQ_NUM];
struct dpmaif_bat_request bat_req;
struct dpmaif_bat_request bat_frag;
struct workqueue_struct *bat_release_wq;
struct work_struct bat_release_work;
wait_queue_head_t tx_wq;
struct task_struct *tx_thread;
struct dpmaif_callbacks *callbacks;
};
struct dpmaif_ctrl *t7xx_dpmaif_hif_init(struct t7xx_pci_dev *t7xx_dev,
struct dpmaif_callbacks *callbacks);
void t7xx_dpmaif_hif_exit(struct dpmaif_ctrl *dpmaif_ctrl);
int t7xx_dpmaif_md_state_callback(struct dpmaif_ctrl *dpmaif_ctrl, enum md_state state);
unsigned int t7xx_ring_buf_get_next_wr_idx(unsigned int buf_len, unsigned int buf_idx);
unsigned int t7xx_ring_buf_rd_wr_count(unsigned int total_cnt, unsigned int rd_idx,
unsigned int wr_idx, enum dpmaif_rdwr);
#endif /* __T7XX_HIF_DPMAIF_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,116 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
*
* Authors:
* Haijun Liu <haijun.liu@mediatek.com>
* Eliot Lee <eliot.lee@intel.com>
* Ricardo Martinez <ricardo.martinez@linux.intel.com>
*
* Contributors:
* Amir Hanania <amir.hanania@intel.com>
* Moises Veleta <moises.veleta@intel.com>
* Sreehari Kancharla <sreehari.kancharla@intel.com>
*/
#ifndef __T7XX_HIF_DPMA_RX_H__
#define __T7XX_HIF_DPMA_RX_H__
#include <linux/bits.h>
#include <linux/types.h>
#include "t7xx_hif_dpmaif.h"
#define NETIF_MASK GENMASK(4, 0)
#define PKT_TYPE_IP4 0
#define PKT_TYPE_IP6 1
/* Structure of DL PIT */
struct dpmaif_pit {
__le32 header;
union {
struct {
__le32 data_addr_l;
__le32 data_addr_h;
__le32 footer;
} pd;
struct {
__le32 params_1;
__le32 params_2;
__le32 params_3;
} msg;
};
};
/* PIT header fields */
#define PD_PIT_DATA_LEN GENMASK(31, 16)
#define PD_PIT_BUFFER_ID GENMASK(15, 3)
#define PD_PIT_BUFFER_TYPE BIT(2)
#define PD_PIT_CONT BIT(1)
#define PD_PIT_PACKET_TYPE BIT(0)
/* PIT footer fields */
#define PD_PIT_DLQ_DONE GENMASK(31, 30)
#define PD_PIT_ULQ_DONE GENMASK(29, 24)
#define PD_PIT_HEADER_OFFSET GENMASK(23, 19)
#define PD_PIT_BI_F GENMASK(18, 17)
#define PD_PIT_IG BIT(16)
#define PD_PIT_RES GENMASK(15, 11)
#define PD_PIT_H_BID GENMASK(10, 8)
#define PD_PIT_PIT_SEQ GENMASK(7, 0)
#define MSG_PIT_DP BIT(31)
#define MSG_PIT_RES GENMASK(30, 27)
#define MSG_PIT_NETWORK_TYPE GENMASK(26, 24)
#define MSG_PIT_CHANNEL_ID GENMASK(23, 16)
#define MSG_PIT_RES2 GENMASK(15, 12)
#define MSG_PIT_HPC_IDX GENMASK(11, 8)
#define MSG_PIT_SRC_QID GENMASK(7, 5)
#define MSG_PIT_ERROR_BIT BIT(4)
#define MSG_PIT_CHECKSUM GENMASK(3, 2)
#define MSG_PIT_CONT BIT(1)
#define MSG_PIT_PACKET_TYPE BIT(0)
#define MSG_PIT_HP_IDX GENMASK(31, 27)
#define MSG_PIT_CMD GENMASK(26, 24)
#define MSG_PIT_RES3 GENMASK(23, 21)
#define MSG_PIT_FLOW GENMASK(20, 16)
#define MSG_PIT_COUNT GENMASK(15, 0)
#define MSG_PIT_HASH GENMASK(31, 24)
#define MSG_PIT_RES4 GENMASK(23, 18)
#define MSG_PIT_PRO GENMASK(17, 16)
#define MSG_PIT_VBID GENMASK(15, 3)
#define MSG_PIT_RES5 GENMASK(2, 0)
#define MSG_PIT_DLQ_DONE GENMASK(31, 30)
#define MSG_PIT_ULQ_DONE GENMASK(29, 24)
#define MSG_PIT_IP BIT(23)
#define MSG_PIT_RES6 BIT(22)
#define MSG_PIT_MR GENMASK(21, 20)
#define MSG_PIT_RES7 GENMASK(19, 17)
#define MSG_PIT_IG BIT(16)
#define MSG_PIT_RES8 GENMASK(15, 11)
#define MSG_PIT_H_BID GENMASK(10, 8)
#define MSG_PIT_PIT_SEQ GENMASK(7, 0)
int t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue *queue);
void t7xx_dpmaif_rx_clear(struct dpmaif_ctrl *dpmaif_ctrl);
int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl);
int t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl *dpmaif_ctrl,
const struct dpmaif_bat_request *bat_req,
const unsigned int q_num, const unsigned int buf_cnt,
const bool initial);
int t7xx_dpmaif_rx_frag_alloc(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
const unsigned int buf_cnt, const bool first_time);
void t7xx_dpmaif_rx_stop(struct dpmaif_ctrl *dpmaif_ctrl);
void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask);
void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue);
void t7xx_dpmaif_bat_wq_rel(struct dpmaif_ctrl *dpmaif_ctrl);
int t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
const enum bat_type buf_type);
void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl,
struct dpmaif_bat_request *bat_req);
#endif /* __T7XX_HIF_DPMA_RX_H__ */

View File

@ -0,0 +1,651 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
*
* Authors:
* Amir Hanania <amir.hanania@intel.com>
* Haijun Liu <haijun.liu@mediatek.com>
* Eliot Lee <eliot.lee@intel.com>
* Moises Veleta <moises.veleta@intel.com>
* Ricardo Martinez <ricardo.martinez@linux.intel.com>
*
* Contributors:
* Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
* Sreehari Kancharla <sreehari.kancharla@intel.com>
*/
#include <linux/atomic.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/minmax.h>
#include <linux/netdevice.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include "t7xx_dpmaif.h"
#include "t7xx_hif_dpmaif.h"
#include "t7xx_hif_dpmaif_tx.h"
#include "t7xx_pci.h"
#define DPMAIF_SKB_TX_BURST_CNT 5
#define DPMAIF_DRB_LIST_LEN 6144
/* DRB dtype */
#define DES_DTYP_PD 0
#define DES_DTYP_MSG 1
static unsigned int t7xx_dpmaif_update_drb_rd_idx(struct dpmaif_ctrl *dpmaif_ctrl,
unsigned int q_num)
{
struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
unsigned int old_sw_rd_idx, new_hw_rd_idx, drb_cnt;
unsigned long flags;
if (!txq->que_started)
return 0;
old_sw_rd_idx = txq->drb_rd_idx;
new_hw_rd_idx = t7xx_dpmaif_ul_get_rd_idx(&dpmaif_ctrl->hw_info, q_num);
if (new_hw_rd_idx >= DPMAIF_DRB_LIST_LEN) {
dev_err(dpmaif_ctrl->dev, "Out of range read index: %u\n", new_hw_rd_idx);
return 0;
}
if (old_sw_rd_idx <= new_hw_rd_idx)
drb_cnt = new_hw_rd_idx - old_sw_rd_idx;
else
drb_cnt = txq->drb_size_cnt - old_sw_rd_idx + new_hw_rd_idx;
spin_lock_irqsave(&txq->tx_lock, flags);
txq->drb_rd_idx = new_hw_rd_idx;
spin_unlock_irqrestore(&txq->tx_lock, flags);
return drb_cnt;
}
static unsigned int t7xx_dpmaif_release_tx_buffer(struct dpmaif_ctrl *dpmaif_ctrl,
unsigned int q_num, unsigned int release_cnt)
{
struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
struct dpmaif_drb_skb *cur_drb_skb, *drb_skb_base;
struct dpmaif_drb *cur_drb, *drb_base;
unsigned int drb_cnt, i, cur_idx;
unsigned long flags;
drb_skb_base = txq->drb_skb_base;
drb_base = txq->drb_base;
spin_lock_irqsave(&txq->tx_lock, flags);
drb_cnt = txq->drb_size_cnt;
cur_idx = txq->drb_release_rd_idx;
spin_unlock_irqrestore(&txq->tx_lock, flags);
for (i = 0; i < release_cnt; i++) {
cur_drb = drb_base + cur_idx;
if (FIELD_GET(DRB_HDR_DTYP, le32_to_cpu(cur_drb->header)) == DES_DTYP_PD) {
cur_drb_skb = drb_skb_base + cur_idx;
if (!cur_drb_skb->is_msg)
dma_unmap_single(dpmaif_ctrl->dev, cur_drb_skb->bus_addr,
cur_drb_skb->data_len, DMA_TO_DEVICE);
if (!FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header))) {
if (!cur_drb_skb->skb) {
dev_err(dpmaif_ctrl->dev,
"txq%u: DRB check fail, invalid skb\n", q_num);
continue;
}
dev_kfree_skb_any(cur_drb_skb->skb);
}
cur_drb_skb->skb = NULL;
}
spin_lock_irqsave(&txq->tx_lock, flags);
cur_idx = t7xx_ring_buf_get_next_wr_idx(drb_cnt, cur_idx);
txq->drb_release_rd_idx = cur_idx;
spin_unlock_irqrestore(&txq->tx_lock, flags);
if (atomic_inc_return(&txq->tx_budget) > txq->drb_size_cnt / 8)
cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_IRQ, txq->index);
}
if (FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header)))
dev_err(dpmaif_ctrl->dev, "txq%u: DRB not marked as the last one\n", q_num);
return i;
}
static int t7xx_dpmaif_tx_release(struct dpmaif_ctrl *dpmaif_ctrl,
unsigned int q_num, unsigned int budget)
{
struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
unsigned int rel_cnt, real_rel_cnt;
/* Update read index from HW */
t7xx_dpmaif_update_drb_rd_idx(dpmaif_ctrl, q_num);
rel_cnt = t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx,
txq->drb_rd_idx, DPMAIF_READ);
real_rel_cnt = min_not_zero(budget, rel_cnt);
if (real_rel_cnt)
real_rel_cnt = t7xx_dpmaif_release_tx_buffer(dpmaif_ctrl, q_num, real_rel_cnt);
return real_rel_cnt < rel_cnt ? -EAGAIN : 0;
}
static bool t7xx_dpmaif_drb_ring_not_empty(struct dpmaif_tx_queue *txq)
{
return !!t7xx_dpmaif_update_drb_rd_idx(txq->dpmaif_ctrl, txq->index);
}
static void t7xx_dpmaif_tx_done(struct work_struct *work)
{
struct dpmaif_tx_queue *txq = container_of(work, struct dpmaif_tx_queue, dpmaif_tx_work);
struct dpmaif_ctrl *dpmaif_ctrl = txq->dpmaif_ctrl;
struct dpmaif_hw_info *hw_info;
int ret;
hw_info = &dpmaif_ctrl->hw_info;
ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt);
if (ret == -EAGAIN ||
(t7xx_dpmaif_ul_clr_done(hw_info, txq->index) &&
t7xx_dpmaif_drb_ring_not_empty(txq))) {
queue_work(dpmaif_ctrl->txq[txq->index].worker,
&dpmaif_ctrl->txq[txq->index].dpmaif_tx_work);
/* Give the device time to enter the low power state */
t7xx_dpmaif_clr_ip_busy_sts(hw_info);
} else {
t7xx_dpmaif_clr_ip_busy_sts(hw_info);
t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index);
}
}
static void t7xx_setup_msg_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
unsigned int cur_idx, unsigned int pkt_len, unsigned int count_l,
unsigned int channel_id)
{
struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base;
struct dpmaif_drb *drb = drb_base + cur_idx;
drb->header = cpu_to_le32(FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_MSG) |
FIELD_PREP(DRB_HDR_CONT, 1) |
FIELD_PREP(DRB_HDR_DATA_LEN, pkt_len));
drb->msg.msg_hdr = cpu_to_le32(FIELD_PREP(DRB_MSG_COUNT_L, count_l) |
FIELD_PREP(DRB_MSG_CHANNEL_ID, channel_id) |
FIELD_PREP(DRB_MSG_L4_CHK, 1));
}
static void t7xx_setup_payload_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
unsigned int cur_idx, dma_addr_t data_addr,
unsigned int pkt_size, bool last_one)
{
struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base;
struct dpmaif_drb *drb = drb_base + cur_idx;
u32 header;
header = FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_PD) | FIELD_PREP(DRB_HDR_DATA_LEN, pkt_size);
if (!last_one)
header |= FIELD_PREP(DRB_HDR_CONT, 1);
drb->header = cpu_to_le32(header);
drb->pd.data_addr_l = cpu_to_le32(lower_32_bits(data_addr));
drb->pd.data_addr_h = cpu_to_le32(upper_32_bits(data_addr));
}
static void t7xx_record_drb_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
unsigned int cur_idx, struct sk_buff *skb, bool is_msg,
bool is_frag, bool is_last_one, dma_addr_t bus_addr,
unsigned int data_len)
{
struct dpmaif_drb_skb *drb_skb_base = dpmaif_ctrl->txq[q_num].drb_skb_base;
struct dpmaif_drb_skb *drb_skb = drb_skb_base + cur_idx;
drb_skb->skb = skb;
drb_skb->bus_addr = bus_addr;
drb_skb->data_len = data_len;
drb_skb->index = cur_idx;
drb_skb->is_msg = is_msg;
drb_skb->is_frag = is_frag;
drb_skb->is_last = is_last_one;
}
static int t7xx_dpmaif_add_skb_to_ring(struct dpmaif_ctrl *dpmaif_ctrl, struct sk_buff *skb)
{
struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
unsigned int wr_cnt, send_cnt, payload_cnt;
unsigned int cur_idx, drb_wr_idx_backup;
struct skb_shared_info *shinfo;
struct dpmaif_tx_queue *txq;
struct t7xx_skb_cb *skb_cb;
unsigned long flags;
skb_cb = T7XX_SKB_CB(skb);
txq = &dpmaif_ctrl->txq[skb_cb->txq_number];
if (!txq->que_started || dpmaif_ctrl->state != DPMAIF_STATE_PWRON)
return -ENODEV;
atomic_set(&txq->tx_processing, 1);
/* Ensure tx_processing is changed to 1 before actually begin TX flow */
smp_mb();
shinfo = skb_shinfo(skb);
if (shinfo->frag_list)
dev_warn_ratelimited(dpmaif_ctrl->dev, "frag_list not supported\n");
payload_cnt = shinfo->nr_frags + 1;
/* nr_frags: frag cnt, 1: skb->data, 1: msg DRB */
send_cnt = payload_cnt + 1;
spin_lock_irqsave(&txq->tx_lock, flags);
cur_idx = txq->drb_wr_idx;
drb_wr_idx_backup = cur_idx;
txq->drb_wr_idx += send_cnt;
if (txq->drb_wr_idx >= txq->drb_size_cnt)
txq->drb_wr_idx -= txq->drb_size_cnt;
t7xx_setup_msg_drb(dpmaif_ctrl, txq->index, cur_idx, skb->len, 0, skb_cb->netif_idx);
t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, true, 0, 0, 0, 0);
spin_unlock_irqrestore(&txq->tx_lock, flags);
for (wr_cnt = 0; wr_cnt < payload_cnt; wr_cnt++) {
bool is_frag, is_last_one = wr_cnt == payload_cnt - 1;
unsigned int data_len;
dma_addr_t bus_addr;
void *data_addr;
if (!wr_cnt) {
data_len = skb_headlen(skb);
data_addr = skb->data;
is_frag = false;
} else {
skb_frag_t *frag = shinfo->frags + wr_cnt - 1;
data_len = skb_frag_size(frag);
data_addr = skb_frag_address(frag);
is_frag = true;
}
bus_addr = dma_map_single(dpmaif_ctrl->dev, data_addr, data_len, DMA_TO_DEVICE);
if (dma_mapping_error(dpmaif_ctrl->dev, bus_addr))
goto unmap_buffers;
cur_idx = t7xx_ring_buf_get_next_wr_idx(txq->drb_size_cnt, cur_idx);
spin_lock_irqsave(&txq->tx_lock, flags);
t7xx_setup_payload_drb(dpmaif_ctrl, txq->index, cur_idx, bus_addr, data_len,
is_last_one);
t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, false, is_frag,
is_last_one, bus_addr, data_len);
spin_unlock_irqrestore(&txq->tx_lock, flags);
}
if (atomic_sub_return(send_cnt, &txq->tx_budget) <= (MAX_SKB_FRAGS + 2))
cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq->index);
atomic_set(&txq->tx_processing, 0);
return 0;
unmap_buffers:
while (wr_cnt--) {
struct dpmaif_drb_skb *drb_skb = txq->drb_skb_base;
cur_idx = cur_idx ? cur_idx - 1 : txq->drb_size_cnt - 1;
drb_skb += cur_idx;
dma_unmap_single(dpmaif_ctrl->dev, drb_skb->bus_addr,
drb_skb->data_len, DMA_TO_DEVICE);
}
txq->drb_wr_idx = drb_wr_idx_backup;
atomic_set(&txq->tx_processing, 0);
return -ENOMEM;
}
static bool t7xx_tx_lists_are_all_empty(const struct dpmaif_ctrl *dpmaif_ctrl)
{
int i;
for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
if (!skb_queue_empty(&dpmaif_ctrl->txq[i].tx_skb_head))
return false;
}
return true;
}
/* Currently, only the default TX queue is used */
static struct dpmaif_tx_queue *t7xx_select_tx_queue(struct dpmaif_ctrl *dpmaif_ctrl)
{
struct dpmaif_tx_queue *txq;
txq = &dpmaif_ctrl->txq[DPMAIF_TX_DEFAULT_QUEUE];
if (!txq->que_started)
return NULL;
return txq;
}
static unsigned int t7xx_txq_drb_wr_available(struct dpmaif_tx_queue *txq)
{
return t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx,
txq->drb_wr_idx, DPMAIF_WRITE);
}
static unsigned int t7xx_skb_drb_cnt(struct sk_buff *skb)
{
/* Normal DRB (frags data + skb linear data) + msg DRB */
return skb_shinfo(skb)->nr_frags + 2;
}
static int t7xx_txq_burst_send_skb(struct dpmaif_tx_queue *txq)
{
unsigned int drb_remain_cnt, i;
unsigned int send_drb_cnt;
int drb_cnt = 0;
int ret = 0;
drb_remain_cnt = t7xx_txq_drb_wr_available(txq);
for (i = 0; i < DPMAIF_SKB_TX_BURST_CNT; i++) {
struct sk_buff *skb;
skb = skb_peek(&txq->tx_skb_head);
if (!skb)
break;
send_drb_cnt = t7xx_skb_drb_cnt(skb);
if (drb_remain_cnt < send_drb_cnt) {
drb_remain_cnt = t7xx_txq_drb_wr_available(txq);
continue;
}
drb_remain_cnt -= send_drb_cnt;
ret = t7xx_dpmaif_add_skb_to_ring(txq->dpmaif_ctrl, skb);
if (ret < 0) {
dev_err(txq->dpmaif_ctrl->dev,
"Failed to add skb to device's ring: %d\n", ret);
break;
}
drb_cnt += send_drb_cnt;
skb_unlink(skb, &txq->tx_skb_head);
}
if (drb_cnt > 0)
return drb_cnt;
return ret;
}
static void t7xx_do_tx_hw_push(struct dpmaif_ctrl *dpmaif_ctrl)
{
do {
struct dpmaif_tx_queue *txq;
int drb_send_cnt;
txq = t7xx_select_tx_queue(dpmaif_ctrl);
if (!txq)
return;
drb_send_cnt = t7xx_txq_burst_send_skb(txq);
if (drb_send_cnt <= 0) {
usleep_range(10, 20);
cond_resched();
continue;
}
t7xx_dpmaif_ul_update_hw_drb_cnt(&dpmaif_ctrl->hw_info, txq->index,
drb_send_cnt * DPMAIF_UL_DRB_SIZE_WORD);
cond_resched();
} while (!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) && !kthread_should_stop() &&
(dpmaif_ctrl->state == DPMAIF_STATE_PWRON));
}
static int t7xx_dpmaif_tx_hw_push_thread(void *arg)
{
struct dpmaif_ctrl *dpmaif_ctrl = arg;
while (!kthread_should_stop()) {
if (t7xx_tx_lists_are_all_empty(dpmaif_ctrl) ||
dpmaif_ctrl->state != DPMAIF_STATE_PWRON) {
if (wait_event_interruptible(dpmaif_ctrl->tx_wq,
(!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) &&
dpmaif_ctrl->state == DPMAIF_STATE_PWRON) ||
kthread_should_stop()))
continue;
if (kthread_should_stop())
break;
}
t7xx_do_tx_hw_push(dpmaif_ctrl);
}
return 0;
}
int t7xx_dpmaif_tx_thread_init(struct dpmaif_ctrl *dpmaif_ctrl)
{
init_waitqueue_head(&dpmaif_ctrl->tx_wq);
dpmaif_ctrl->tx_thread = kthread_run(t7xx_dpmaif_tx_hw_push_thread,
dpmaif_ctrl, "dpmaif_tx_hw_push");
return PTR_ERR_OR_ZERO(dpmaif_ctrl->tx_thread);
}
void t7xx_dpmaif_tx_thread_rel(struct dpmaif_ctrl *dpmaif_ctrl)
{
if (dpmaif_ctrl->tx_thread)
kthread_stop(dpmaif_ctrl->tx_thread);
}
/**
* t7xx_dpmaif_tx_send_skb() - Add skb to the transmit queue.
* @dpmaif_ctrl: Pointer to struct dpmaif_ctrl.
* @txq_number: Queue number to xmit on.
* @skb: Pointer to the skb to transmit.
*
* Add the skb to the queue of the skbs to be transmit.
* Wake up the thread that push the skbs from the queue to the HW.
*
* Return:
* * 0 - Success.
* * -EBUSY - Tx budget exhausted.
* In normal circumstances t7xx_dpmaif_add_skb_to_ring() must report the txq full
* state to prevent this error condition.
*/
int t7xx_dpmaif_tx_send_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int txq_number,
struct sk_buff *skb)
{
struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[txq_number];
struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
struct t7xx_skb_cb *skb_cb;
if (atomic_read(&txq->tx_budget) <= t7xx_skb_drb_cnt(skb)) {
cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq_number);
return -EBUSY;
}
skb_cb = T7XX_SKB_CB(skb);
skb_cb->txq_number = txq_number;
skb_queue_tail(&txq->tx_skb_head, skb);
wake_up(&dpmaif_ctrl->tx_wq);
return 0;
}
void t7xx_dpmaif_irq_tx_done(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int que_mask)
{
int i;
for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
if (que_mask & BIT(i))
queue_work(dpmaif_ctrl->txq[i].worker, &dpmaif_ctrl->txq[i].dpmaif_tx_work);
}
}
static int t7xx_dpmaif_tx_drb_buf_init(struct dpmaif_tx_queue *txq)
{
size_t brb_skb_size, brb_pd_size;
brb_pd_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb);
brb_skb_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb_skb);
txq->drb_size_cnt = DPMAIF_DRB_LIST_LEN;
/* For HW && AP SW */
txq->drb_base = dma_alloc_coherent(txq->dpmaif_ctrl->dev, brb_pd_size,
&txq->drb_bus_addr, GFP_KERNEL | __GFP_ZERO);
if (!txq->drb_base)
return -ENOMEM;
/* For AP SW to record the skb information */
txq->drb_skb_base = devm_kzalloc(txq->dpmaif_ctrl->dev, brb_skb_size, GFP_KERNEL);
if (!txq->drb_skb_base) {
dma_free_coherent(txq->dpmaif_ctrl->dev, brb_pd_size,
txq->drb_base, txq->drb_bus_addr);
return -ENOMEM;
}
return 0;
}
static void t7xx_dpmaif_tx_free_drb_skb(struct dpmaif_tx_queue *txq)
{
struct dpmaif_drb_skb *drb_skb, *drb_skb_base = txq->drb_skb_base;
unsigned int i;
if (!drb_skb_base)
return;
for (i = 0; i < txq->drb_size_cnt; i++) {
drb_skb = drb_skb_base + i;
if (!drb_skb->skb)
continue;
if (!drb_skb->is_msg)
dma_unmap_single(txq->dpmaif_ctrl->dev, drb_skb->bus_addr,
drb_skb->data_len, DMA_TO_DEVICE);
if (drb_skb->is_last) {
dev_kfree_skb(drb_skb->skb);
drb_skb->skb = NULL;
}
}
}
static void t7xx_dpmaif_tx_drb_buf_rel(struct dpmaif_tx_queue *txq)
{
if (txq->drb_base)
dma_free_coherent(txq->dpmaif_ctrl->dev,
txq->drb_size_cnt * sizeof(struct dpmaif_drb),
txq->drb_base, txq->drb_bus_addr);
t7xx_dpmaif_tx_free_drb_skb(txq);
}
/**
* t7xx_dpmaif_txq_init() - Initialize TX queue.
* @txq: Pointer to struct dpmaif_tx_queue.
*
* Initialize the TX queue data structure and allocate memory for it to use.
*
* Return:
* * 0 - Success.
* * -ERROR - Error code from failure sub-initializations.
*/
int t7xx_dpmaif_txq_init(struct dpmaif_tx_queue *txq)
{
int ret;
skb_queue_head_init(&txq->tx_skb_head);
init_waitqueue_head(&txq->req_wq);
atomic_set(&txq->tx_budget, DPMAIF_DRB_LIST_LEN);
ret = t7xx_dpmaif_tx_drb_buf_init(txq);
if (ret) {
dev_err(txq->dpmaif_ctrl->dev, "Failed to initialize DRB buffers: %d\n", ret);
return ret;
}
txq->worker = alloc_workqueue("md_dpmaif_tx%d_worker", WQ_UNBOUND | WQ_MEM_RECLAIM |
(txq->index ? 0 : WQ_HIGHPRI), 1, txq->index);
if (!txq->worker)
return -ENOMEM;
INIT_WORK(&txq->dpmaif_tx_work, t7xx_dpmaif_tx_done);
spin_lock_init(&txq->tx_lock);
return 0;
}
void t7xx_dpmaif_txq_free(struct dpmaif_tx_queue *txq)
{
if (txq->worker)
destroy_workqueue(txq->worker);
skb_queue_purge(&txq->tx_skb_head);
t7xx_dpmaif_tx_drb_buf_rel(txq);
}
void t7xx_dpmaif_tx_stop(struct dpmaif_ctrl *dpmaif_ctrl)
{
int i;
for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
struct dpmaif_tx_queue *txq;
int count = 0;
txq = &dpmaif_ctrl->txq[i];
txq->que_started = false;
/* Make sure TXQ is disabled */
smp_mb();
/* Wait for active Tx to be done */
while (atomic_read(&txq->tx_processing)) {
if (++count >= DPMAIF_MAX_CHECK_COUNT) {
dev_err(dpmaif_ctrl->dev, "TX queue stop failed\n");
break;
}
}
}
}
static void t7xx_dpmaif_txq_flush_rel(struct dpmaif_tx_queue *txq)
{
txq->que_started = false;
cancel_work_sync(&txq->dpmaif_tx_work);
flush_work(&txq->dpmaif_tx_work);
t7xx_dpmaif_tx_free_drb_skb(txq);
txq->drb_rd_idx = 0;
txq->drb_wr_idx = 0;
txq->drb_release_rd_idx = 0;
}
void t7xx_dpmaif_tx_clear(struct dpmaif_ctrl *dpmaif_ctrl)
{
int i;
for (i = 0; i < DPMAIF_TXQ_NUM; i++)
t7xx_dpmaif_txq_flush_rel(&dpmaif_ctrl->txq[i]);
}

View File

@ -0,0 +1,78 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
*
* Authors:
* Haijun Liu <haijun.liu@mediatek.com>
* Eliot Lee <eliot.lee@intel.com>
* Ricardo Martinez <ricardo.martinez@linux.intel.com>
*
* Contributors:
* Amir Hanania <amir.hanania@intel.com>
* Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
* Moises Veleta <moises.veleta@intel.com>
* Sreehari Kancharla <sreehari.kancharla@intel.com>
*/
#ifndef __T7XX_HIF_DPMA_TX_H__
#define __T7XX_HIF_DPMA_TX_H__
#include <linux/bits.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include "t7xx_hif_dpmaif.h"
#define DPMAIF_TX_DEFAULT_QUEUE 0
struct dpmaif_drb {
__le32 header;
union {
struct {
__le32 data_addr_l;
__le32 data_addr_h;
} pd;
struct {
__le32 msg_hdr;
__le32 reserved1;
} msg;
};
__le32 reserved2;
};
/* Header fields */
#define DRB_HDR_DATA_LEN GENMASK(31, 16)
#define DRB_HDR_RESERVED GENMASK(15, 3)
#define DRB_HDR_CONT BIT(2)
#define DRB_HDR_DTYP GENMASK(1, 0)
#define DRB_MSG_DW2_RES GENMASK(31, 30)
#define DRB_MSG_L4_CHK BIT(29)
#define DRB_MSG_IP_CHK BIT(28)
#define DRB_MSG_RESERVED BIT(27)
#define DRB_MSG_NETWORK_TYPE GENMASK(26, 24)
#define DRB_MSG_CHANNEL_ID GENMASK(23, 16)
#define DRB_MSG_COUNT_L GENMASK(15, 0)
struct dpmaif_drb_skb {
struct sk_buff *skb;
dma_addr_t bus_addr;
unsigned int data_len;
u16 index:13;
u16 is_msg:1;
u16 is_frag:1;
u16 is_last:1;
};
int t7xx_dpmaif_tx_send_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int txq_number,
struct sk_buff *skb);
void t7xx_dpmaif_tx_thread_rel(struct dpmaif_ctrl *dpmaif_ctrl);
int t7xx_dpmaif_tx_thread_init(struct dpmaif_ctrl *dpmaif_ctrl);
void t7xx_dpmaif_txq_free(struct dpmaif_tx_queue *txq);
void t7xx_dpmaif_irq_tx_done(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int que_mask);
int t7xx_dpmaif_txq_init(struct dpmaif_tx_queue *txq);
void t7xx_dpmaif_tx_stop(struct dpmaif_ctrl *dpmaif_ctrl);
void t7xx_dpmaif_tx_clear(struct dpmaif_ctrl *dpmaif_ctrl);
#endif /* __T7XX_HIF_DPMA_TX_H__ */