mt76 patches for 5.19

- tx locking improvements
 - wireless ethernet dispatch support for flow offload
 - non-standard VHT MCS10-11 support
 - fixes
 - runtime PM improvements
 - mt7921 AP mode support
 - mt7921 ipv6 NS offload support
 -----BEGIN PGP SIGNATURE-----
 Comment: GPGTools - http://gpgtools.org
 
 iF0EABECAB0WIQR10Rp9kadxD0kAQu/XfRQdAqdu9QUCYn4LOgAKCRDXfRQdAqdu
 9T3+AKCPwHI9SexOtTRNL3OFesm177QtxwCfeov3mYvSxrUq2gwA/tvVlbNn48k=
 =RPS/
 -----END PGP SIGNATURE-----

Merge tag 'mt76-for-kvalo-2022-05-12' of https://github.com/nbd168/wireless

mt76 patches for 5.19

- tx locking improvements
- wireless ethernet dispatch support for flow offload
- non-standard VHT MCS10-11 support
- fixes
- runtime PM improvements
- mt7921 AP mode support
- mt7921 ipv6 NS offload support
This commit is contained in:
Kalle Valo 2022-05-16 13:11:58 +03:00
commit e99a2d6bcd
41 changed files with 1600 additions and 417 deletions

View file

@ -162,15 +162,15 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
if (!sta)
return;
if (!status->aggr && !(status->flag & RX_FLAG_8023)) {
mt76_rx_aggr_check_ctl(skb, frames);
if (!status->aggr) {
if (!(status->flag & RX_FLAG_8023))
mt76_rx_aggr_check_ctl(skb, frames);
return;
}
/* not part of a BA session */
ackp = status->qos_ctl & IEEE80211_QOS_CTL_ACK_POLICY_MASK;
if (ackp != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
ackp != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
if (ackp == IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
return;
tid = rcu_dereference(wcid->aggr[tidno]);

View file

@ -7,6 +7,37 @@
#include "mt76.h"
#include "dma.h"
#if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
#define Q_READ(_dev, _q, _field) ({ \
u32 _offset = offsetof(struct mt76_queue_regs, _field); \
u32 _val; \
if ((_q)->flags & MT_QFLAG_WED) \
_val = mtk_wed_device_reg_read(&(_dev)->mmio.wed, \
((_q)->wed_regs + \
_offset)); \
else \
_val = readl(&(_q)->regs->_field); \
_val; \
})
#define Q_WRITE(_dev, _q, _field, _val) do { \
u32 _offset = offsetof(struct mt76_queue_regs, _field); \
if ((_q)->flags & MT_QFLAG_WED) \
mtk_wed_device_reg_write(&(_dev)->mmio.wed, \
((_q)->wed_regs + _offset), \
_val); \
else \
writel(_val, &(_q)->regs->_field); \
} while (0)
#else
#define Q_READ(_dev, _q, _field) readl(&(_q)->regs->_field)
#define Q_WRITE(_dev, _q, _field, _val) writel(_val, &(_q)->regs->_field)
#endif
static struct mt76_txwi_cache *
mt76_alloc_txwi(struct mt76_dev *dev)
{
@ -16,11 +47,11 @@ mt76_alloc_txwi(struct mt76_dev *dev)
int size;
size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
txwi = kzalloc(size, GFP_ATOMIC);
if (!txwi)
return NULL;
addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size,
DMA_TO_DEVICE);
t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
t->dma_addr = addr;
@ -73,18 +104,20 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
struct mt76_txwi_cache *t;
local_bh_disable();
while ((t = __mt76_get_txwi(dev)) != NULL)
dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
while ((t = __mt76_get_txwi(dev)) != NULL) {
dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
kfree(mt76_get_txwi_ptr(dev, t));
}
local_bh_enable();
}
static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
{
writel(q->desc_dma, &q->regs->desc_base);
writel(q->ndesc, &q->regs->ring_size);
q->head = readl(&q->regs->dma_idx);
Q_WRITE(dev, q, desc_base, q->desc_dma);
Q_WRITE(dev, q, ring_size, q->ndesc);
q->head = Q_READ(dev, q, dma_idx);
q->tail = q->head;
}
@ -100,41 +133,11 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
for (i = 0; i < q->ndesc; i++)
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
writel(0, &q->regs->cpu_idx);
writel(0, &q->regs->dma_idx);
Q_WRITE(dev, q, cpu_idx, 0);
Q_WRITE(dev, q, dma_idx, 0);
mt76_dma_sync_idx(dev, q);
}
static int
mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize,
u32 ring_base)
{
int size;
spin_lock_init(&q->lock);
spin_lock_init(&q->cleanup_lock);
q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
q->ndesc = n_desc;
q->buf_size = bufsize;
q->hw_idx = idx;
size = q->ndesc * sizeof(struct mt76_desc);
q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
if (!q->desc)
return -ENOMEM;
size = q->ndesc * sizeof(*q->entry);
q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
if (!q->entry)
return -ENOMEM;
mt76_dma_queue_reset(dev, q);
return 0;
}
static int
mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_buf *buf, int nbufs, u32 info,
@ -203,11 +206,11 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
struct mt76_queue_entry *e = &q->entry[idx];
if (!e->skip_buf0)
dma_unmap_single(dev->dev, e->dma_addr[0], e->dma_len[0],
dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0],
DMA_TO_DEVICE);
if (!e->skip_buf1)
dma_unmap_single(dev->dev, e->dma_addr[1], e->dma_len[1],
dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1],
DMA_TO_DEVICE);
if (e->txwi == DMA_DUMMY_DATA)
@ -224,7 +227,7 @@ static void
mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
wmb();
writel(q->head, &q->regs->cpu_idx);
Q_WRITE(dev, q, cpu_idx, q->head);
}
static void
@ -240,7 +243,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
if (flush)
last = -1;
else
last = readl(&q->regs->dma_idx);
last = Q_READ(dev, q, dma_idx);
while (q->queued > 0 && q->tail != last) {
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
@ -252,8 +255,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
}
if (!flush && q->tail == last)
last = readl(&q->regs->dma_idx);
last = Q_READ(dev, q, dma_idx);
}
spin_unlock_bh(&q->cleanup_lock);
@ -288,7 +290,7 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
if (info)
*info = le32_to_cpu(desc->info);
dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
dma_unmap_single(dev->dma_dev, buf_addr, buf_len, DMA_FROM_DEVICE);
e->buf = NULL;
return buf;
@ -325,9 +327,9 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
if (q->queued + 1 >= q->ndesc - 1)
goto error;
addr = dma_map_single(dev->dev, skb->data, skb->len,
addr = dma_map_single(dev->dma_dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev->dev, addr)))
if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
goto error;
buf.addr = addr;
@ -374,8 +376,8 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
mt76_insert_hdr_pad(skb);
len = skb_headlen(skb);
addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev->dev, addr)))
addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
goto free;
tx_info.buf[n].addr = t->dma_addr;
@ -387,9 +389,9 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
if (n == ARRAY_SIZE(tx_info.buf))
goto unmap;
addr = dma_map_single(dev->dev, iter->data, iter->len,
addr = dma_map_single(dev->dma_dev, iter->data, iter->len,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev->dev, addr)))
if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
goto unmap;
tx_info.buf[n].addr = addr;
@ -402,10 +404,10 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
goto unmap;
}
dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
ret = dev->drv->tx_prepare_skb(dev, txwi, q->qid, wcid, sta, &tx_info);
dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
if (ret < 0)
goto unmap;
@ -415,7 +417,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
unmap:
for (n--; n > 0; n--)
dma_unmap_single(dev->dev, tx_info.buf[n].addr,
dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr,
tx_info.buf[n].len, DMA_TO_DEVICE);
free:
@ -460,8 +462,8 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
if (!buf)
break;
addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev->dev, addr))) {
addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
skb_free_frag(buf);
break;
}
@ -481,6 +483,85 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
return frames;
}
static int
mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
{
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
struct mtk_wed_device *wed = &dev->mmio.wed;
int ret, type, ring;
u8 flags = q->flags;
if (!mtk_wed_device_active(wed))
q->flags &= ~MT_QFLAG_WED;
if (!(q->flags & MT_QFLAG_WED))
return 0;
type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
switch (type) {
case MT76_WED_Q_TX:
ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs);
if (!ret)
q->wed_regs = wed->tx_ring[ring].reg_base;
break;
case MT76_WED_Q_TXFREE:
/* WED txfree queue needs ring to be initialized before setup */
q->flags = 0;
mt76_dma_queue_reset(dev, q);
mt76_dma_rx_fill(dev, q);
q->flags = flags;
ret = mtk_wed_device_txfree_ring_setup(wed, q->regs);
if (!ret)
q->wed_regs = wed->txfree_ring.reg_base;
break;
default:
ret = -EINVAL;
}
return ret;
#else
return 0;
#endif
}
static int
mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize,
u32 ring_base)
{
int ret, size;
spin_lock_init(&q->lock);
spin_lock_init(&q->cleanup_lock);
q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
q->ndesc = n_desc;
q->buf_size = bufsize;
q->hw_idx = idx;
size = q->ndesc * sizeof(struct mt76_desc);
q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL);
if (!q->desc)
return -ENOMEM;
size = q->ndesc * sizeof(*q->entry);
q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
if (!q->entry)
return -ENOMEM;
ret = mt76_dma_wed_setup(dev, q);
if (ret)
return ret;
if (q->flags != MT_WED_Q_TXFREE)
mt76_dma_queue_reset(dev, q);
return 0;
}
static void
mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
{
@ -562,14 +643,29 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
static int
mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
{
int len, data_len, done = 0;
int len, data_len, done = 0, dma_idx;
struct sk_buff *skb;
unsigned char *data;
bool check_ddone = false;
bool more;
if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
q->flags == MT_WED_Q_TXFREE) {
dma_idx = Q_READ(dev, q, dma_idx);
check_ddone = true;
}
while (done < budget) {
u32 info;
if (check_ddone) {
if (q->tail == dma_idx)
dma_idx = Q_READ(dev, q, dma_idx);
if (q->tail == dma_idx)
break;
}
data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
if (!data)
break;
@ -710,5 +806,8 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
}
mt76_free_pending_txwi(dev);
if (mtk_wed_device_active(&dev->mmio.wed))
mtk_wed_device_detach(&dev->mmio.wed);
}
EXPORT_SYMBOL_GPL(mt76_dma_cleanup);

View file

@ -248,6 +248,8 @@ static void mt76_init_stream_cap(struct mt76_phy *phy,
vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
else
vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
for (i = 0; i < 8; i++) {
if (i < nstream)
@ -323,8 +325,6 @@ mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
IEEE80211_VHT_CAP_RXSTBC_1 |
IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
return 0;
@ -545,6 +545,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
dev->hw = hw;
dev->dev = pdev;
dev->drv = drv_ops;
dev->dma_dev = pdev;
phy = &dev->phy;
phy->dev = dev;
@ -579,6 +580,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
INIT_LIST_HEAD(&dev->wcid_list);
INIT_LIST_HEAD(&dev->txwi_cache);
dev->token_size = dev->drv->token_size;
for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
skb_queue_head_init(&dev->rx_skb[i]);
@ -1303,7 +1305,7 @@ mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
continue;
mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
mtxq->wcid = wcid;
mtxq->wcid = wcid->idx;
}
ewma_signal_init(&wcid->rssi);
@ -1381,7 +1383,9 @@ void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
mutex_lock(&dev->mutex);
spin_lock_bh(&dev->status_lock);
rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
spin_unlock_bh(&dev->status_lock);
mutex_unlock(&dev->mutex);
}
EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
@ -1578,7 +1582,7 @@ EXPORT_SYMBOL_GPL(mt76_get_antenna);
struct mt76_queue *
mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
int ring_base)
int ring_base, u32 flags)
{
struct mt76_queue *hwq;
int err;
@ -1587,6 +1591,8 @@ mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
if (!hwq)
return ERR_PTR(-ENOMEM);
hwq->flags = flags;
err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
if (err < 0)
return ERR_PTR(err);

View file

@ -6,14 +6,14 @@
#include "mt76.h"
struct sk_buff *
mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
int data_len)
__mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
int data_len, gfp_t gfp)
{
const struct mt76_mcu_ops *ops = dev->mcu_ops;
int length = ops->headroom + data_len + ops->tailroom;
struct sk_buff *skb;
skb = alloc_skb(length, GFP_KERNEL);
skb = alloc_skb(length, gfp);
if (!skb)
return NULL;
@ -25,7 +25,7 @@ mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
return skb;
}
EXPORT_SYMBOL_GPL(mt76_mcu_msg_alloc);
EXPORT_SYMBOL_GPL(__mt76_mcu_msg_alloc);
struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
unsigned long expires)

View file

@ -73,8 +73,13 @@ void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr,
spin_lock_irqsave(&dev->mmio.irq_lock, flags);
dev->mmio.irqmask &= ~clear;
dev->mmio.irqmask |= set;
if (addr)
mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
if (addr) {
if (mtk_wed_device_active(&dev->mmio.wed))
mtk_wed_device_irq_set_mask(&dev->mmio.wed,
dev->mmio.irqmask);
else
mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
}
spin_unlock_irqrestore(&dev->mmio.irq_lock, flags);
}
EXPORT_SYMBOL_GPL(mt76_set_irq_mask);

View file

@ -13,6 +13,7 @@
#include <linux/leds.h>
#include <linux/usb.h>
#include <linux/average.h>
#include <linux/soc/mediatek/mtk_wed.h>
#include <net/mac80211.h>
#include "util.h"
#include "testmode.h"
@ -26,6 +27,16 @@
#define MT76_TOKEN_FREE_THR 64
#define MT_QFLAG_WED_RING GENMASK(1, 0)
#define MT_QFLAG_WED_TYPE GENMASK(3, 2)
#define MT_QFLAG_WED BIT(4)
#define __MT_WED_Q(_type, _n) (MT_QFLAG_WED | \
FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \
FIELD_PREP(MT_QFLAG_WED_RING, _n))
#define MT_WED_Q_TX(_n) __MT_WED_Q(MT76_WED_Q_TX, _n)
#define MT_WED_Q_TXFREE __MT_WED_Q(MT76_WED_Q_TXFREE, 0)
struct mt76_dev;
struct mt76_phy;
struct mt76_wcid;
@ -42,6 +53,11 @@ enum mt76_bus_type {
MT76_BUS_SDIO,
};
enum mt76_wed_type {
MT76_WED_Q_TX,
MT76_WED_Q_TXFREE,
};
struct mt76_bus_ops {
u32 (*rr)(struct mt76_dev *dev, u32 offset);
void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
@ -170,6 +186,9 @@ struct mt76_queue {
u8 buf_offset;
u8 hw_idx;
u8 qid;
u8 flags;
u32 wed_regs;
dma_addr_t desc_dma;
struct sk_buff *rx_head;
@ -275,7 +294,7 @@ struct mt76_wcid {
};
struct mt76_txq {
struct mt76_wcid *wcid;
u16 wcid;
u16 agg_ssn;
bool send_bar;
@ -537,6 +556,8 @@ struct mt76_mmio {
void __iomem *regs;
spinlock_t irq_lock;
u32 irqmask;
struct mtk_wed_device wed;
};
struct mt76_rx_status {
@ -698,6 +719,7 @@ struct mt76_dev {
const struct mt76_driver_ops *drv;
const struct mt76_mcu_ops *mcu_ops;
struct device *dev;
struct device *dma_dev;
struct mt76_mcu mcu;
@ -718,7 +740,9 @@ struct mt76_dev {
spinlock_t token_lock;
struct idr token;
int token_count;
u16 wed_token_count;
u16 token_count;
u16 token_size;
wait_queue_head_t tx_wait;
/* spinclock used to protect wcid pktid linked list */
@ -727,7 +751,7 @@ struct mt76_dev {
u32 wcid_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
u32 wcid_phy_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
u32 vif_mask;
u64 vif_mask;
struct mt76_wcid global_wcid;
struct mt76_wcid __rcu *wcid[MT76_N_WCIDS];
@ -942,14 +966,14 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len);
struct mt76_queue *
mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
int ring_base);
int ring_base, u32 flags);
u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx);
static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx,
int n_desc, int ring_base)
int n_desc, int ring_base, u32 flags)
{
struct mt76_queue *q;
q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base);
q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base, flags);
if (IS_ERR(q))
return PTR_ERR(q);
@ -964,7 +988,7 @@ static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx,
{
struct mt76_queue *q;
q = mt76_init_queue(dev, qid, idx, n_desc, ring_base);
q = mt76_init_queue(dev, qid, idx, n_desc, ring_base, 0);
if (IS_ERR(q))
return PTR_ERR(q);
@ -1321,8 +1345,15 @@ int mt76s_rd_rp(struct mt76_dev *dev, u32 base,
struct mt76_reg_pair *data, int len);
struct sk_buff *
__mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
int data_len, gfp_t gfp);
static inline struct sk_buff *
mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
int data_len);
int data_len)
{
return __mt76_mcu_msg_alloc(dev, data, data_len, GFP_KERNEL);
}
void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
unsigned long expires);
@ -1380,8 +1411,7 @@ mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
int token;
spin_lock_bh(&dev->token_lock);
token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size,
GFP_ATOMIC);
token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC);
spin_unlock_bh(&dev->token_lock);
return token;

View file

@ -82,12 +82,12 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
__skb_queue_head_init(&data.q);
q = dev->mphy.q_tx[MT_TXQ_BEACON];
spin_lock_bh(&q->lock);
spin_lock(&q->lock);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7603_update_beacon_iter, dev);
mt76_queue_kick(dev, q);
spin_unlock_bh(&q->lock);
spin_unlock(&q->lock);
/* Flush all previous CAB queue packets */
mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0));
@ -117,7 +117,7 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
mt76_skb_set_moredata(data.tail[i], false);
}
spin_lock_bh(&q->lock);
spin_lock(&q->lock);
while ((skb = __skb_dequeue(&data.q)) != NULL) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
@ -126,7 +126,7 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
mt76_tx_queue_skb(dev, q, skb, &mvif->sta.wcid, NULL);
}
mt76_queue_kick(dev, q);
spin_unlock_bh(&q->lock);
spin_unlock(&q->lock);
for (i = 0; i < ARRAY_SIZE(data.count); i++)
mt76_wr(dev, MT_WF_ARB_CAB_COUNT_B0_REG(i),

View file

@ -173,13 +173,13 @@ int mt7603_dma_init(struct mt7603_dev *dev)
for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i],
MT7603_TX_RING_SIZE, MT_TX_RING_BASE);
MT7603_TX_RING_SIZE, MT_TX_RING_BASE, 0);
if (ret)
return ret;
}
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT,
MT7603_PSD_RING_SIZE, MT_TX_RING_BASE);
MT7603_PSD_RING_SIZE, MT_TX_RING_BASE, 0);
if (ret)
return ret;
@ -189,12 +189,12 @@ int mt7603_dma_init(struct mt7603_dev *dev)
return ret;
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_BEACON, MT_TX_HW_QUEUE_BCN,
MT_MCU_RING_SIZE, MT_TX_RING_BASE);
MT_MCU_RING_SIZE, MT_TX_RING_BASE, 0);
if (ret)
return ret;
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_CAB, MT_TX_HW_QUEUE_BMC,
MT_MCU_RING_SIZE, MT_TX_RING_BASE);
MT_MCU_RING_SIZE, MT_TX_RING_BASE, 0);
if (ret)
return ret;

View file

@ -44,7 +44,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mutex_lock(&dev->mt76.mutex);
mvif->idx = ffs(~dev->mt76.vif_mask) - 1;
mvif->idx = __ffs64(~dev->mt76.vif_mask);
if (mvif->idx >= MT7603_MAX_INTERFACES) {
ret = -ENOSPC;
goto out;
@ -65,7 +65,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
idx = MT7603_WTBL_RESERVED - 1 - mvif->idx;
dev->mt76.vif_mask |= BIT(mvif->idx);
dev->mt76.vif_mask |= BIT_ULL(mvif->idx);
INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
mvif->sta.wcid.hw_key_idx = -1;
@ -75,7 +75,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mt7603_wtbl_init(dev, idx, mvif->idx, bc_addr);
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = &mvif->sta.wcid;
mtxq->wcid = idx;
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
out:
@ -106,7 +106,7 @@ mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
spin_unlock_bh(&dev->sta_poll_lock);
mutex_lock(&dev->mt76.mutex);
dev->mt76.vif_mask &= ~BIT(mvif->idx);
dev->mt76.vif_mask &= ~BIT_ULL(mvif->idx);
mutex_unlock(&dev->mt76.mutex);
mt76_packet_id_flush(&dev->mt76, &mvif->sta.wcid);

View file

@ -26,14 +26,14 @@ mt7622_init_tx_queues_multi(struct mt7615_dev *dev)
for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i],
MT7615_TX_RING_SIZE / 2,
MT_TX_RING_BASE);
MT_TX_RING_BASE, 0);
if (ret)
return ret;
}
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT7622_TXQ_MGMT,
MT7615_TX_MGMT_RING_SIZE,
MT_TX_RING_BASE);
MT_TX_RING_BASE, 0);
if (ret)
return ret;
@ -55,7 +55,7 @@ mt7615_init_tx_queues(struct mt7615_dev *dev)
return mt7622_init_tx_queues_multi(dev);
ret = mt76_init_tx_queue(&dev->mphy, 0, 0, MT7615_TX_RING_SIZE,
MT_TX_RING_BASE);
MT_TX_RING_BASE, 0);
if (ret)
return ret;

View file

@ -194,7 +194,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
is_zero_ether_addr(vif->addr))
phy->monitor_vif = vif;
mvif->mt76.idx = ffs(~dev->mt76.vif_mask) - 1;
mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask);
if (mvif->mt76.idx >= MT7615_MAX_INTERFACES) {
ret = -ENOSPC;
goto out;
@ -212,7 +212,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
if (ext_phy)
mvif->mt76.wmm_idx += 2;
dev->mt76.vif_mask |= BIT(mvif->mt76.idx);
dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx);
dev->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
@ -234,7 +234,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
if (vif->txq) {
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = &mvif->sta.wcid;
mtxq->wcid = idx;
}
ret = mt7615_mcu_add_dev_info(phy, vif, true);
@ -268,7 +268,7 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
dev->mt76.vif_mask &= ~BIT(mvif->mt76.idx);
dev->mt76.vif_mask &= ~BIT_ULL(mvif->mt76.idx);
dev->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);

View file

@ -145,7 +145,7 @@ static void mt7615_irq_tasklet(struct tasklet_struct *t)
return;
dev->reset_state = mcu_int;
ieee80211_queue_work(mt76_hw(dev), &dev->reset_work);
queue_work(dev->mt76.wq, &dev->reset_work);
wake_up(&dev->reset_wait);
}

View file

@ -2185,11 +2185,8 @@ int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev,
return -ENOMEM;
skb_put_data(skb, &req_hdr, sizeof(req_hdr));
for (i = 0; i < len; i++) {
u8 *addr = (u8 *)skb_put(skb, sizeof(__be32));
memcpy(addr, &info->arp_addr_list[i], sizeof(__be32));
}
for (i = 0; i < len; i++)
skb_put_data(skb, &info->arp_addr_list[i], sizeof(__be32));
return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD(OFFLOAD), true);
}

View file

@ -974,7 +974,6 @@ enum {
MCU_EXT_CMD_SET_RDD_PATTERN = 0x7d,
MCU_EXT_CMD_MWDS_SUPPORT = 0x80,
MCU_EXT_CMD_SET_SER_TRIGGER = 0x81,
MCU_EXT_CMD_SCS_CTRL = 0x82,
MCU_EXT_CMD_TWT_AGRT_UPDATE = 0x94,
MCU_EXT_CMD_FW_DBG_CTRL = 0x95,
MCU_EXT_CMD_OFFCH_SCAN_CTRL = 0x9a,

View file

@ -191,13 +191,13 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
ret = mt76_init_tx_queue(&dev->mphy, i, mt76_ac_to_hwq(i),
MT76x02_TX_RING_SIZE,
MT_TX_RING_BASE);
MT_TX_RING_BASE, 0);
if (ret)
return ret;
}
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT,
MT76x02_PSD_RING_SIZE, MT_TX_RING_BASE);
MT76x02_PSD_RING_SIZE, MT_TX_RING_BASE, 0);
if (ret)
return ret;

View file

@ -292,7 +292,8 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
mt76_packet_id_init(&mvif->group_wcid);
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = &mvif->group_wcid;
rcu_assign_pointer(dev->mt76.wcid[MT_VIF_WCID(idx)], &mvif->group_wcid);
mtxq->wcid = MT_VIF_WCID(idx);
}
int
@ -327,11 +328,11 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
idx += 8;
/* vif is already set or idx is 8 for AP/Mesh/... */
if (dev->mt76.vif_mask & BIT(idx) ||
if (dev->mt76.vif_mask & BIT_ULL(idx) ||
(vif->type != NL80211_IFTYPE_STATION && idx > 7))
return -EBUSY;
dev->mt76.vif_mask |= BIT(idx);
dev->mt76.vif_mask |= BIT_ULL(idx);
mt76x02_vif_init(dev, vif, idx);
return 0;
@ -344,7 +345,8 @@ void mt76x02_remove_interface(struct ieee80211_hw *hw,
struct mt76x02_dev *dev = hw->priv;
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
dev->mt76.vif_mask &= ~BIT(mvif->idx);
dev->mt76.vif_mask &= ~BIT_ULL(mvif->idx);
rcu_assign_pointer(dev->mt76.wcid[mvif->group_wcid.idx], NULL);
mt76_packet_id_flush(&dev->mt76, &mvif->group_wcid);
}
EXPORT_SYMBOL_GPL(mt76x02_remove_interface);

View file

@ -44,35 +44,113 @@ mt7915_implicit_txbf_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_implicit_txbf, mt7915_implicit_txbf_get,
mt7915_implicit_txbf_set, "%lld\n");
/* test knob of system layer 1/2 error recovery */
static int mt7915_ser_trigger_set(void *data, u64 val)
/* test knob of system error recovery */
static ssize_t
mt7915_fw_ser_set(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
enum {
SER_SET_RECOVER_L1 = 1,
SER_SET_RECOVER_L2,
SER_ENABLE = 2,
SER_RECOVER
};
struct mt7915_dev *dev = data;
struct mt7915_phy *phy = file->private_data;
struct mt7915_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
char buf[16];
int ret = 0;
u16 val;
if (count >= sizeof(buf))
return -EINVAL;
if (copy_from_user(buf, user_buf, count))
return -EFAULT;
if (count && buf[count - 1] == '\n')
buf[count - 1] = '\0';
else
buf[count] = '\0';
if (kstrtou16(buf, 0, &val))
return -EINVAL;
switch (val) {
case SER_QUERY:
/* grab firmware SER stats */
ret = mt7915_mcu_set_ser(dev, 0, 0, ext_phy);
break;
case SER_SET_RECOVER_L1:
case SER_SET_RECOVER_L2:
ret = mt7915_mcu_set_ser(dev, SER_ENABLE, BIT(val), 0);
case SER_SET_RECOVER_L3_RX_ABORT:
case SER_SET_RECOVER_L3_TX_ABORT:
case SER_SET_RECOVER_L3_TX_DISABLE:
case SER_SET_RECOVER_L3_BF:
ret = mt7915_mcu_set_ser(dev, SER_ENABLE, BIT(val), ext_phy);
if (ret)
return ret;
return mt7915_mcu_set_ser(dev, SER_RECOVER, val, 0);
ret = mt7915_mcu_set_ser(dev, SER_RECOVER, val, ext_phy);
break;
default:
break;
}
return ret ? ret : count;
}
static ssize_t
mt7915_fw_ser_get(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct mt7915_phy *phy = file->private_data;
struct mt7915_dev *dev = phy->dev;
char *buff;
int desc = 0;
ssize_t ret;
static const size_t bufsz = 400;
buff = kmalloc(bufsz, GFP_KERNEL);
if (!buff)
return -ENOMEM;
desc += scnprintf(buff + desc, bufsz - desc,
"::E R , SER_STATUS = 0x%08x\n",
mt76_rr(dev, MT_SWDEF_SER_STATS));
desc += scnprintf(buff + desc, bufsz - desc,
"::E R , SER_PLE_ERR = 0x%08x\n",
mt76_rr(dev, MT_SWDEF_PLE_STATS));
desc += scnprintf(buff + desc, bufsz - desc,
"::E R , SER_PLE_ERR_1 = 0x%08x\n",
mt76_rr(dev, MT_SWDEF_PLE1_STATS));
desc += scnprintf(buff + desc, bufsz - desc,
"::E R , SER_PLE_ERR_AMSDU = 0x%08x\n",
mt76_rr(dev, MT_SWDEF_PLE_AMSDU_STATS));
desc += scnprintf(buff + desc, bufsz - desc,
"::E R , SER_PSE_ERR = 0x%08x\n",
mt76_rr(dev, MT_SWDEF_PSE_STATS));
desc += scnprintf(buff + desc, bufsz - desc,
"::E R , SER_PSE_ERR_1 = 0x%08x\n",
mt76_rr(dev, MT_SWDEF_PSE1_STATS));
desc += scnprintf(buff + desc, bufsz - desc,
"::E R , SER_LMAC_WISR6_B0 = 0x%08x\n",
mt76_rr(dev, MT_SWDEF_LAMC_WISR6_BN0_STATS));
desc += scnprintf(buff + desc, bufsz - desc,
"::E R , SER_LMAC_WISR6_B1 = 0x%08x\n",
mt76_rr(dev, MT_SWDEF_LAMC_WISR6_BN1_STATS));
desc += scnprintf(buff + desc, bufsz - desc,
"::E R , SER_LMAC_WISR7_B0 = 0x%08x\n",
mt76_rr(dev, MT_SWDEF_LAMC_WISR7_BN0_STATS));
desc += scnprintf(buff + desc, bufsz - desc,
"::E R , SER_LMAC_WISR7_B1 = 0x%08x\n",
mt76_rr(dev, MT_SWDEF_LAMC_WISR7_BN1_STATS));
ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
kfree(buff);
return ret;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_ser_trigger, NULL,
mt7915_ser_trigger_set, "%lld\n");
static const struct file_operations mt7915_fw_ser_ops = {
.write = mt7915_fw_ser_set,
.read = mt7915_fw_ser_get,
.open = simple_open,
.llseek = default_llseek,
};
static int
mt7915_radar_trigger(void *data, u64 val)
@ -95,7 +173,7 @@ mt7915_muru_debug_set(void *data, u64 val)
struct mt7915_dev *dev = data;
dev->muru_debug = val;
mt7915_mcu_muru_debug_set(dev, data);
mt7915_mcu_muru_debug_set(dev, dev->muru_debug);
return 0;
}
@ -369,20 +447,20 @@ mt7915_fw_debug_wm_set(void *data, u64 val)
bool tx, rx, en;
int ret;
dev->fw_debug_wm = val ? MCU_FW_LOG_TO_HOST : 0;
dev->fw.debug_wm = val ? MCU_FW_LOG_TO_HOST : 0;
if (dev->fw_debug_bin)
if (dev->fw.debug_bin)
val = 16;
else
val = dev->fw_debug_wm;
val = dev->fw.debug_wm;
tx = dev->fw_debug_wm || (dev->fw_debug_bin & BIT(1));
rx = dev->fw_debug_wm || (dev->fw_debug_bin & BIT(2));
en = dev->fw_debug_wm || (dev->fw_debug_bin & BIT(0));
tx = dev->fw.debug_wm || (dev->fw.debug_bin & BIT(1));
rx = dev->fw.debug_wm || (dev->fw.debug_bin & BIT(2));
en = dev->fw.debug_wm || (dev->fw.debug_bin & BIT(0));
ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WM, val);
if (ret)
return ret;
goto out;
for (debug = DEBUG_TXCMD; debug <= DEBUG_RPT_RX; debug++) {
if (debug == DEBUG_RPT_RX)
@ -392,16 +470,20 @@ mt7915_fw_debug_wm_set(void *data, u64 val)
ret = mt7915_mcu_fw_dbg_ctrl(dev, debug, val);
if (ret)
return ret;
goto out;
}
/* WM CPU info record control */
mt76_clear(dev, MT_CPU_UTIL_CTRL, BIT(0));
mt76_wr(dev, MT_DIC_CMD_REG_CMD, BIT(2) | BIT(13) | !dev->fw_debug_wm);
mt76_wr(dev, MT_DIC_CMD_REG_CMD, BIT(2) | BIT(13) | !dev->fw.debug_wm);
mt76_wr(dev, MT_MCU_WM_CIRQ_IRQ_MASK_CLR_ADDR, BIT(5));
mt76_wr(dev, MT_MCU_WM_CIRQ_IRQ_SOFT_ADDR, BIT(5));
return 0;
out:
if (ret)
dev->fw.debug_wm = 0;
return ret;
}
static int
@ -409,7 +491,7 @@ mt7915_fw_debug_wm_get(void *data, u64 *val)
{
struct mt7915_dev *dev = data;
*val = dev->fw_debug_wm;
*val = dev->fw.debug_wm;
return 0;
}
@ -423,14 +505,19 @@ mt7915_fw_debug_wa_set(void *data, u64 val)
struct mt7915_dev *dev = data;
int ret;
dev->fw_debug_wa = val ? MCU_FW_LOG_TO_HOST : 0;
dev->fw.debug_wa = val ? MCU_FW_LOG_TO_HOST : 0;
ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WA, dev->fw_debug_wa);
ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WA, dev->fw.debug_wa);
if (ret)
return ret;
goto out;
return mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET), MCU_WA_PARAM_PDMA_RX,
!!dev->fw_debug_wa, 0);
ret = mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET),
MCU_WA_PARAM_PDMA_RX, !!dev->fw.debug_wa, 0);
out:
if (ret)
dev->fw.debug_wa = 0;
return ret;
}
static int
@ -438,7 +525,7 @@ mt7915_fw_debug_wa_get(void *data, u64 *val)
{
struct mt7915_dev *dev = data;
*val = dev->fw_debug_wa;
*val = dev->fw.debug_wa;
return 0;
}
@ -485,11 +572,11 @@ mt7915_fw_debug_bin_set(void *data, u64 val)
if (!dev->relay_fwlog)
return -ENOMEM;
dev->fw_debug_bin = val;
dev->fw.debug_bin = val;
relay_reset(dev->relay_fwlog);
return mt7915_fw_debug_wm_set(dev, dev->fw_debug_wm);
return mt7915_fw_debug_wm_set(dev, dev->fw.debug_wm);
}
static int
@ -497,7 +584,7 @@ mt7915_fw_debug_bin_get(void *data, u64 *val)
{
struct mt7915_dev *dev = data;
*val = dev->fw_debug_bin;
*val = dev->fw.debug_bin;
return 0;
}
@ -510,7 +597,13 @@ mt7915_fw_util_wm_show(struct seq_file *file, void *data)
{
struct mt7915_dev *dev = file->private;
if (dev->fw_debug_wm) {
seq_printf(file, "Program counter: 0x%x\n", mt76_rr(dev, MT_WM_MCU_PC));
seq_printf(file, "Exception state: 0x%x\n",
is_mt7915(&dev->mt76) ?
(u32)mt76_get_field(dev, MT_FW_EXCEPTION, GENMASK(15, 8)) :
(u32)mt76_get_field(dev, MT_FW_EXCEPTION, GENMASK(7, 0)));
if (dev->fw.debug_wm) {
seq_printf(file, "Busy: %u%% Peak busy: %u%%\n",
mt76_rr(dev, MT_CPU_UTIL_BUSY_PCT),
mt76_rr(dev, MT_CPU_UTIL_PEAK_BUSY_PCT));
@ -529,7 +622,9 @@ mt7915_fw_util_wa_show(struct seq_file *file, void *data)
{
struct mt7915_dev *dev = file->private;
if (dev->fw_debug_wa)
seq_printf(file, "Program counter: 0x%x\n", mt76_rr(dev, MT_WA_MCU_PC));
if (dev->fw.debug_wa)
return mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(QUERY),
MCU_WA_PARAM_CPU_UTIL, 0, 0);
@ -867,6 +962,36 @@ mt7915_twt_stats(struct seq_file *s, void *data)
return 0;
}
/* The index of RF registers use the generic regidx, combined with two parts:
* WF selection [31:28] and offset [27:0].
*/
static int
mt7915_rf_regval_get(void *data, u64 *val)
{
struct mt7915_dev *dev = data;
u32 regval;
int ret;
ret = mt7915_mcu_rf_regval(dev, dev->mt76.debugfs_reg, &regval, false);
if (ret)
return ret;
*val = le32_to_cpu(regval);
return 0;
}
static int
mt7915_rf_regval_set(void *data, u64 val)
{
struct mt7915_dev *dev = data;
return mt7915_mcu_rf_regval(dev, dev->mt76.debugfs_reg, (u32 *)&val, true);
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_rf_regval, mt7915_rf_regval_get,
mt7915_rf_regval_set, "0x%08llx\n");
int mt7915_init_debugfs(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
@ -884,6 +1009,7 @@ int mt7915_init_debugfs(struct mt7915_phy *phy)
debugfs_create_file("xmit-queues", 0400, dir, phy,
&mt7915_xmit_queues_fops);
debugfs_create_file("tx_stats", 0400, dir, phy, &mt7915_tx_stats_fops);
debugfs_create_file("fw_ser", 0600, dir, phy, &mt7915_fw_ser_ops);
debugfs_create_file("fw_debug_wm", 0600, dir, dev, &fops_fw_debug_wm);
debugfs_create_file("fw_debug_wa", 0600, dir, dev, &fops_fw_debug_wa);
debugfs_create_file("fw_debug_bin", 0600, dir, dev, &fops_fw_debug_bin);
@ -897,7 +1023,8 @@ int mt7915_init_debugfs(struct mt7915_phy *phy)
&mt7915_rate_txpower_fops);
debugfs_create_devm_seqfile(dev->mt76.dev, "twt_stats", dir,
mt7915_twt_stats);
debugfs_create_file("ser_trigger", 0200, dir, dev, &fops_ser_trigger);
debugfs_create_file("rf_regval", 0600, dir, dev, &fops_rf_regval);
if (!dev->dbdc_support || phy->band_idx) {
debugfs_create_u32("dfs_hw_pattern", 0400, dir,
&dev->hw_pattern);

View file

@ -5,11 +5,19 @@
#include "../dma.h"
#include "mac.h"
int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base)
static int
mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base)
{
struct mt7915_dev *dev = phy->dev;
int i, err;
err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, ring_base);
if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
ring_base = MT_WED_TX_RING_BASE;
idx -= MT_TXQ_ID(0);
}
err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, ring_base,
MT_WED_Q_TX(idx));
if (err < 0)
return err;
@ -318,14 +326,23 @@ static int mt7915_dma_enable(struct mt7915_dev *dev)
if (dev->dbdc_support || dev->phy.band_idx)
irq_mask |= MT_INT_BAND1_RX_DONE;
if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
u32 wed_irq_mask = irq_mask;
wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1;
mt76_wr(dev, MT_INT_WED_MASK_CSR, wed_irq_mask);
mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask);
}
mt7915_irq_enable(dev, irq_mask);
return 0;
}
int mt7915_dma_init(struct mt7915_dev *dev)
int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
{
struct mt76_dev *mdev = &dev->mt76;
u32 wa_rx_base, wa_rx_idx;
u32 hif1_ofs = 0;
int ret;
@ -338,6 +355,17 @@ int mt7915_dma_init(struct mt7915_dev *dev)
mt7915_dma_disable(dev, true);
if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
mt76_set(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED);
mt76_wr(dev, MT_WFDMA_WED_RING_CONTROL,
FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX0, 18) |
FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX1, 19) |
FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_RX1, 1));
} else {
mt76_clear(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED);
}
/* init tx queue */
ret = mt7915_init_tx_queues(&dev->phy,
MT_TXQ_ID(dev->phy.band_idx),
@ -346,6 +374,15 @@ int mt7915_dma_init(struct mt7915_dev *dev)
if (ret)
return ret;
if (phy2) {
ret = mt7915_init_tx_queues(phy2,
MT_TXQ_ID(phy2->band_idx),
MT7915_TX_RING_SIZE,
MT_TXQ_RING_BASE(1));
if (ret)
return ret;
}
/* command to WM */
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM,
MT_MCUQ_ID(MT_MCUQ_WM),
@ -380,11 +417,17 @@ int mt7915_dma_init(struct mt7915_dev *dev)
return ret;
/* event from WA */
if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
wa_rx_base = MT_WED_RX_RING_BASE;
wa_rx_idx = MT7915_RXQ_MCU_WA;
dev->mt76.q_rx[MT_RXQ_MCU_WA].flags = MT_WED_Q_TXFREE;
} else {
wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MCU_WA);
wa_rx_idx = MT_RXQ_ID(MT_RXQ_MCU_WA);
}
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
MT_RXQ_ID(MT_RXQ_MCU_WA),
MT7915_RX_MCU_RING_SIZE,
MT_RX_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_MCU_WA));
wa_rx_idx, MT7915_RX_MCU_RING_SIZE,
MT_RX_BUF_SIZE, wa_rx_base);
if (ret)
return ret;

View file

@ -152,6 +152,8 @@ static void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
phy->mt76->cap.has_2ghz = true;
return;
}
} else if (val == MT_EE_BAND_SEL_DEFAULT && dev->dbdc_support) {
val = phy->band_idx ? MT_EE_BAND_SEL_5GHZ : MT_EE_BAND_SEL_2GHZ;
}
switch (val) {

View file

@ -351,6 +351,8 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HT);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_VHT);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HE);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_DISCOVERY);
if (!mdev->dev->of_node ||
!of_property_read_bool(mdev->dev->of_node,
@ -450,6 +452,9 @@ static void mt7915_mac_init(struct mt7915_dev *dev)
mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, rx_len);
if (!is_mt7915(&dev->mt76))
mt76_clear(dev, MT_MDP_DCR2, MT_MDP_DCR2_RX_TRANS_SHORT);
/* enable hardware de-agg */
mt76_set(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN);
@ -484,21 +489,18 @@ static int mt7915_txbf_init(struct mt7915_dev *dev)
return mt7915_mcu_set_txbf(dev, MT_BF_TYPE_UPDATE);
}
static int mt7915_register_ext_phy(struct mt7915_dev *dev)
static struct mt7915_phy *
mt7915_alloc_ext_phy(struct mt7915_dev *dev)
{
struct mt7915_phy *phy = mt7915_ext_phy(dev);
struct mt7915_phy *phy;
struct mt76_phy *mphy;
int ret;
if (!dev->dbdc_support)
return 0;
if (phy)
return 0;
return NULL;
mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7915_ops);
if (!mphy)
return -ENOMEM;
return ERR_PTR(-ENOMEM);
phy = mphy->priv;
phy->dev = dev;
@ -507,6 +509,15 @@ static int mt7915_register_ext_phy(struct mt7915_dev *dev)
/* Bind main phy to band0 and ext_phy to band1 for dbdc case */
phy->band_idx = 1;
return phy;
}
static int
mt7915_register_ext_phy(struct mt7915_dev *dev, struct mt7915_phy *phy)
{
struct mt76_phy *mphy = phy->mt76;
int ret;
INIT_DELAYED_WORK(&mphy->mac_work, mt7915_mac_work);
mt7915_eeprom_parse_hw_cap(dev, phy);
@ -526,29 +537,22 @@ static int mt7915_register_ext_phy(struct mt7915_dev *dev)
/* init wiphy according to mphy and phy */
mt7915_init_wiphy(mphy->hw);
ret = mt7915_init_tx_queues(phy, MT_TXQ_ID(phy->band_idx),
MT7915_TX_RING_SIZE,
MT_TXQ_RING_BASE(1));
if (ret)
goto error;
ret = mt76_register_phy(mphy, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
if (ret)
goto error;
return ret;
ret = mt7915_thermal_init(phy);
if (ret)
goto error;
goto unreg;
ret = mt7915_init_debugfs(phy);
if (ret)
goto error;
mt7915_init_debugfs(phy);
return 0;
error:
ieee80211_free_hw(mphy->hw);
unreg:
mt76_unregister_phy(mphy);
return ret;
}
@ -565,7 +569,7 @@ static void mt7915_init_work(struct work_struct *work)
mt7915_txbf_init(dev);
}
static void mt7915_wfsys_reset(struct mt7915_dev *dev)
void mt7915_wfsys_reset(struct mt7915_dev *dev)
{
#define MT_MCU_DUMMY_RANDOM GENMASK(15, 0)
#define MT_MCU_DUMMY_DEFAULT GENMASK(31, 16)
@ -645,36 +649,25 @@ static bool mt7915_band_config(struct mt7915_dev *dev)
return ret;
}
static int mt7915_init_hardware(struct mt7915_dev *dev)
static int
mt7915_init_hardware(struct mt7915_dev *dev, struct mt7915_phy *phy2)
{
int ret, idx;
mt76_wr(dev, MT_INT_MASK_CSR, 0);
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
INIT_WORK(&dev->init_work, mt7915_init_work);
dev->dbdc_support = mt7915_band_config(dev);
/* If MCU was already running, it is likely in a bad state */
if (mt76_get_field(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE) >
FW_STATE_FW_DOWNLOAD)
mt7915_wfsys_reset(dev);
ret = mt7915_dma_init(dev);
ret = mt7915_dma_init(dev, phy2);
if (ret)
return ret;
set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
ret = mt7915_mcu_init(dev);
if (ret) {
/* Reset and try again */
mt7915_wfsys_reset(dev);
ret = mt7915_mcu_init(dev);
if (ret)
return ret;
}
if (ret)
return ret;
ret = mt7915_eeprom_init(dev);
if (ret < 0)
@ -814,7 +807,7 @@ static void
mt7915_gen_ppe_thresh(u8 *he_ppet, int nss)
{
u8 i, ppet_bits, ppet_size, ru_bit_mask = 0x7; /* HE80 */
u8 ppet16_ppet8_ru3_ru0[] = {0x1c, 0xc7, 0x71};
static const u8 ppet16_ppet8_ru3_ru0[] = {0x1c, 0xc7, 0x71};
he_ppet[0] = FIELD_PREP(IEEE80211_PPE_THRES_NSS_MASK, nss - 1) |
FIELD_PREP(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK,
@ -1048,9 +1041,22 @@ static void mt7915_unregister_ext_phy(struct mt7915_dev *dev)
ieee80211_free_hw(mphy->hw);
}
static void mt7915_stop_hardware(struct mt7915_dev *dev)
{
mt7915_mcu_exit(dev);
mt7915_tx_token_put(dev);
mt7915_dma_cleanup(dev);
tasklet_disable(&dev->irq_tasklet);
if (is_mt7986(&dev->mt76))
mt7986_wmac_disable(dev);
}
int mt7915_register_device(struct mt7915_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
struct mt7915_phy *phy2;
int ret;
dev->phy.dev = dev;
@ -1066,9 +1072,15 @@ int mt7915_register_device(struct mt7915_dev *dev)
init_waitqueue_head(&dev->reset_wait);
INIT_WORK(&dev->reset_work, mt7915_mac_reset_work);
ret = mt7915_init_hardware(dev);
dev->dbdc_support = mt7915_band_config(dev);
phy2 = mt7915_alloc_ext_phy(dev);
if (IS_ERR(phy2))
return PTR_ERR(phy2);
ret = mt7915_init_hardware(dev, phy2);
if (ret)
return ret;
goto free_phy2;
mt7915_init_wiphy(hw);
@ -1085,19 +1097,34 @@ int mt7915_register_device(struct mt7915_dev *dev)
ret = mt76_register_device(&dev->mt76, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
if (ret)
return ret;
goto stop_hw;
ret = mt7915_thermal_init(&dev->phy);
if (ret)
return ret;
goto unreg_dev;
ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
ret = mt7915_register_ext_phy(dev);
if (ret)
return ret;
if (phy2) {
ret = mt7915_register_ext_phy(dev, phy2);
if (ret)
goto unreg_thermal;
}
return mt7915_init_debugfs(&dev->phy);
mt7915_init_debugfs(&dev->phy);
return 0;
unreg_thermal:
mt7915_unregister_thermal(&dev->phy);
unreg_dev:
mt76_unregister_device(&dev->mt76);
stop_hw:
mt7915_stop_hardware(dev);
free_phy2:
if (phy2)
ieee80211_free_hw(phy2->mt76->hw);
return ret;
}
void mt7915_unregister_device(struct mt7915_dev *dev)
@ -1105,13 +1132,7 @@ void mt7915_unregister_device(struct mt7915_dev *dev)
mt7915_unregister_ext_phy(dev);
mt7915_unregister_thermal(&dev->phy);
mt76_unregister_device(&dev->mt76);
mt7915_mcu_exit(dev);
mt7915_tx_token_put(dev);
mt7915_dma_cleanup(dev);
tasklet_disable(&dev->irq_tasklet);
if (is_mt7986(&dev->mt76))
mt7986_wmac_disable(dev);
mt7915_stop_hardware(dev);
mt76_free_device(&dev->mt76);
}

View file

@ -309,7 +309,7 @@ mt7915_mac_decode_he_mu_radiotap(struct sk_buff *skb, __le32 *rxv)
}
static void
mt7915_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u32 mode)
mt7915_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u8 mode)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
static const struct ieee80211_radiotap_he known = {
@ -474,10 +474,10 @@ static int
mt7915_mac_fill_rx_rate(struct mt7915_dev *dev,
struct mt76_rx_status *status,
struct ieee80211_supported_band *sband,
__le32 *rxv)
__le32 *rxv, u8 *mode)
{
u32 v0, v2;
u8 stbc, gi, bw, dcm, mode, nss;
u8 stbc, gi, bw, dcm, nss;
int i, idx;
bool cck = false;
@ -490,18 +490,18 @@ mt7915_mac_fill_rx_rate(struct mt7915_dev *dev,
if (!is_mt7915(&dev->mt76)) {
stbc = FIELD_GET(MT_PRXV_HT_STBC, v0);
gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v0);
mode = FIELD_GET(MT_PRXV_TX_MODE, v0);
*mode = FIELD_GET(MT_PRXV_TX_MODE, v0);
dcm = FIELD_GET(MT_PRXV_DCM, v0);
bw = FIELD_GET(MT_PRXV_FRAME_MODE, v0);
} else {
stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
*mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
dcm = !!(idx & GENMASK(3, 0) & MT_PRXV_TX_DCM);
bw = FIELD_GET(MT_CRXV_FRAME_MODE, v2);
}
switch (mode) {
switch (*mode) {
case MT_PHY_TYPE_CCK:
cck = true;
fallthrough;
@ -521,7 +521,7 @@ mt7915_mac_fill_rx_rate(struct mt7915_dev *dev,
status->encoding = RX_ENC_VHT;
if (gi)
status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (i > 9)
if (i > 11)
return -EINVAL;
break;
case MT_PHY_TYPE_HE_MU:
@ -546,7 +546,7 @@ mt7915_mac_fill_rx_rate(struct mt7915_dev *dev,
case IEEE80211_STA_RX_BW_20:
break;
case IEEE80211_STA_RX_BW_40:
if (mode & MT_PHY_TYPE_HE_EXT_SU &&
if (*mode & MT_PHY_TYPE_HE_EXT_SU &&
(idx & MT_PRXV_TX_ER_SU_106T)) {
status->bw = RATE_INFO_BW_HE_RU;
status->he_ru =
@ -566,7 +566,7 @@ mt7915_mac_fill_rx_rate(struct mt7915_dev *dev,
}
status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
if (mode < MT_PHY_TYPE_HE_SU && gi)
if (*mode < MT_PHY_TYPE_HE_SU && gi)
status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
return 0;
@ -581,7 +581,6 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
struct ieee80211_supported_band *sband;
__le32 *rxd = (__le32 *)skb->data;
__le32 *rxv = NULL;
u32 mode = 0;
u32 rxd0 = le32_to_cpu(rxd[0]);
u32 rxd1 = le32_to_cpu(rxd[1]);
u32 rxd2 = le32_to_cpu(rxd[2]);
@ -590,10 +589,10 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
bool unicast, insert_ccmp_hdr = false;
u8 remove_pad, amsdu_info;
u8 mode = 0, qos_ctl = 0;
bool hdr_trans;
u16 hdr_gap;
u16 seq_ctrl = 0;
u8 qos_ctl = 0;
__le16 fc = 0;
int idx;
@ -766,7 +765,8 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
}
if (!is_mt7915(&dev->mt76) || (rxd1 & MT_RXD1_NORMAL_GROUP_5)) {
ret = mt7915_mac_fill_rx_rate(dev, status, sband, rxv);
ret = mt7915_mac_fill_rx_rate(dev, status, sband, rxv,
&mode);
if (ret < 0)
return ret;
}
@ -837,10 +837,6 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
if (!status->wcid || !ieee80211_is_data_qos(fc))
return 0;
/* drop no data frame */
if (fc & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))
return -EINVAL;
status->aggr = unicast &&
!ieee80211_is_qos_nullfunc(fc);
status->qos_ctl = qos_ctl;
@ -864,8 +860,11 @@ mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
int i;
band_idx = le32_get_bits(rxv_hdr[1], MT_RXV_HDR_BAND_IDX);
if (band_idx && !phy->band_idx)
if (band_idx && !phy->band_idx) {
phy = mt7915_ext_phy(dev);
if (!phy)
goto out;
}
rcpi = le32_to_cpu(rxv[6]);
ib_rssi = le32_to_cpu(rxv[7]);
@ -890,8 +889,8 @@ mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
phy->test.last_freq_offset = foe;
phy->test.last_snr = snr;
out:
#endif
dev_kfree_skb(skb);
}
@ -1017,6 +1016,7 @@ mt7915_mac_write_txwi_8023(struct mt7915_dev *dev, __le32 *txwi,
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
u8 fc_type, fc_stype;
u16 ethertype;
bool wmm = false;
u32 val;
@ -1030,7 +1030,8 @@ mt7915_mac_write_txwi_8023(struct mt7915_dev *dev, __le32 *txwi,
val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
FIELD_PREP(MT_TXD1_TID, tid);
if (be16_to_cpu(skb->protocol) >= ETH_P_802_3_MIN)
ethertype = get_unaligned_be16(&skb->data[12]);
if (ethertype >= ETH_P_802_3_MIN)
val |= MT_TXD1_ETH_802_3;
txwi[1] |= cpu_to_le32(val);
@ -1176,7 +1177,7 @@ mt7915_mac_tx_rate_val(struct mt76_phy *mphy, struct ieee80211_vif *vif,
void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
struct ieee80211_key_conf *key, bool beacon)
struct ieee80211_key_conf *key, u32 changed)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
@ -1187,6 +1188,10 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
bool mcast = false;
u16 tx_count = 15;
u32 val;
bool beacon = !!(changed & (BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED));
bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
BSS_CHANGED_FILS_DISCOVERY));
if (vif) {
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
@ -1199,7 +1204,10 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
if (ext_phy && dev->mt76.phy2)
mphy = dev->mt76.phy2;
if (beacon) {
if (inband_disc) {
p_fmt = MT_TX_TYPE_FW;
q_idx = MT_LMAC_ALTX0;
} else if (beacon) {
p_fmt = MT_TX_TYPE_FW;
q_idx = MT_LMAC_BCN0;
} else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
@ -1307,8 +1315,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
return id;
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid, key,
false);
mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid, key, 0);
txp = (struct mt7915_txp *)(txwi + MT_TXD_SIZE);
for (i = 0; i < nbuf; i++) {
@ -1347,6 +1354,29 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
return 0;
}
u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
{
struct mt7915_txp *txp = ptr + MT_TXD_SIZE;
__le32 *txwi = ptr;
u32 val;
memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp));
val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) |
FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT);
txwi[0] = cpu_to_le32(val);
val = MT_TXD1_LONG_FORMAT |
FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3);
txwi[1] = cpu_to_le32(val);
txp->token = cpu_to_le16(token_id);
txp->nbuf = 1;
txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp));
return MT_TXD_SIZE + sizeof(*txp);
}
static void
mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
{
@ -1380,7 +1410,7 @@ mt7915_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t)
txp = mt7915_txwi_to_txp(dev, t);
for (i = 0; i < txp->nbuf; i++)
dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
dma_unmap_single(dev->dma_dev, le32_to_cpu(txp->buf[i]),
le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
}
@ -1389,6 +1419,7 @@ mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t,
struct ieee80211_sta *sta, struct list_head *free_list)
{
struct mt76_dev *mdev = &dev->mt76;
struct mt7915_sta *msta;
struct mt76_wcid *wcid;
__le32 *txwi;
u16 wcid_idx;
@ -1401,13 +1432,24 @@ mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t,
if (sta) {
wcid = (struct mt76_wcid *)sta->drv_priv;
wcid_idx = wcid->idx;
if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
mt7915_tx_check_aggr(sta, txwi);
} else {
wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
wcid = rcu_dereference(dev->mt76.wcid[wcid_idx]);
if (wcid && wcid->sta) {
msta = container_of(wcid, struct mt7915_sta, wcid);
sta = container_of((void *)msta, struct ieee80211_sta,
drv_priv);
spin_lock_bh(&dev->sta_poll_lock);
if (list_empty(&msta->poll_list))
list_add_tail(&msta->poll_list, &dev->sta_poll_list);
spin_unlock_bh(&dev->sta_poll_lock);
}
}
if (sta && likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
mt7915_tx_check_aggr(sta, txwi);
__mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
out:
@ -1416,20 +1458,10 @@ mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t,
}
static void
mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
mt7915_mac_tx_free_prepare(struct mt7915_dev *dev)
{
struct mt7915_tx_free *free = (struct mt7915_tx_free *)data;
struct mt76_dev *mdev = &dev->mt76;
struct mt76_phy *mphy_ext = mdev->phy2;
struct mt76_txwi_cache *txwi;
struct ieee80211_sta *sta = NULL;
LIST_HEAD(free_list);
struct sk_buff *skb, *tmp;
void *end = data + len;
bool v3, wake = false;
u16 total, count = 0;
u32 txd = le32_to_cpu(free->txd);
__le32 *cur_info;
/* clean DMA queues and unmap buffers first */
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
@ -1438,6 +1470,42 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false);
mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false);
}
}
static void
mt7915_mac_tx_free_done(struct mt7915_dev *dev,
struct list_head *free_list, bool wake)
{
struct sk_buff *skb, *tmp;
mt7915_mac_sta_poll(dev);
if (wake)
mt76_set_tx_blocked(&dev->mt76, false);
mt76_worker_schedule(&dev->mt76.tx_worker);
list_for_each_entry_safe(skb, tmp, free_list, list) {
skb_list_del_init(skb);
napi_consume_skb(skb, 1);
}
}
static void
mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
{
struct mt7915_tx_free *free = (struct mt7915_tx_free *)data;
struct mt76_dev *mdev = &dev->mt76;
struct mt76_txwi_cache *txwi;
struct ieee80211_sta *sta = NULL;
LIST_HEAD(free_list);
void *end = data + len;
bool v3, wake = false;
u16 total, count = 0;
u32 txd = le32_to_cpu(free->txd);
__le32 *cur_info;
mt7915_mac_tx_free_prepare(dev);
total = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4);
@ -1491,17 +1559,38 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
}
}
mt7915_mac_sta_poll(dev);
mt7915_mac_tx_free_done(dev, &free_list, wake);
}
if (wake)
mt76_set_tx_blocked(&dev->mt76, false);
static void
mt7915_mac_tx_free_v0(struct mt7915_dev *dev, void *data, int len)
{
struct mt7915_tx_free *free = (struct mt7915_tx_free *)data;
struct mt76_dev *mdev = &dev->mt76;
__le16 *info = (__le16 *)free->info;
void *end = data + len;
LIST_HEAD(free_list);
bool wake = false;
u8 i, count;
mt76_worker_schedule(&dev->mt76.tx_worker);
mt7915_mac_tx_free_prepare(dev);
list_for_each_entry_safe(skb, tmp, &free_list, list) {
skb_list_del_init(skb);
napi_consume_skb(skb, 1);
count = FIELD_GET(MT_TX_FREE_MSDU_CNT_V0, le16_to_cpu(free->ctrl));
if (WARN_ON_ONCE((void *)&info[count] > end))
return;
for (i = 0; i < count; i++) {
struct mt76_txwi_cache *txwi;
u16 msdu = le16_to_cpu(info[i]);
txwi = mt76_token_release(mdev, msdu, &wake);
if (!txwi)
continue;
mt7915_txwi_free(dev, txwi, NULL, &free_list);
}
mt7915_mac_tx_free_done(dev, &free_list, wake);
}
static bool
@ -1681,6 +1770,9 @@ bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
case PKT_TYPE_TXRX_NOTIFY:
mt7915_mac_tx_free(dev, data, len);
return false;
case PKT_TYPE_TXRX_NOTIFY_V0:
mt7915_mac_tx_free_v0(dev, data, len);
return false;
case PKT_TYPE_TXS:
for (rxd += 2; rxd + 8 <= end; rxd += 8)
mt7915_mac_add_txs(dev, rxd);
@ -1708,6 +1800,10 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
mt7915_mac_tx_free(dev, skb->data, skb->len);
napi_consume_skb(skb, 1);
break;
case PKT_TYPE_TXRX_NOTIFY_V0:
mt7915_mac_tx_free_v0(dev, skb->data, skb->len);
napi_consume_skb(skb, 1);
break;
case PKT_TYPE_RX_EVENT:
mt7915_mcu_rx_event(dev, skb);
break;
@ -1918,7 +2014,8 @@ mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_AP:
mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon);
mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon,
BSS_CHANGED_BEACON_ENABLED);
break;
default:
break;
@ -2304,6 +2401,32 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
}
}
static void mt7915_mac_severe_check(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
u32 trb;
if (!phy->omac_mask)
return;
/* In rare cases, TRB pointers might be out of sync leads to RMAC
* stopping Rx, so check status periodically to see if TRB hardware
* requires minimal recovery.
*/
trb = mt76_rr(dev, MT_TRB_RXPSR0(phy->band_idx));
if ((FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, trb) !=
FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, trb)) &&
(FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, phy->trb_ts) !=
FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, phy->trb_ts)) &&
trb == phy->trb_ts)
mt7915_mcu_set_ser(dev, SER_RECOVER, SER_SET_RECOVER_L3_RX_ABORT,
ext_phy);
phy->trb_ts = trb;
}
void mt7915_mac_sta_rc_work(struct work_struct *work)
{
struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work);
@ -2356,6 +2479,7 @@ void mt7915_mac_work(struct work_struct *work)
mphy->mac_work_count = 0;
mt7915_mac_update_stats(phy);
mt7915_mac_severe_check(phy);
}
mutex_unlock(&mphy->dev->mutex);
@ -2600,6 +2724,34 @@ static int mt7915_mac_check_twt_req(struct ieee80211_twt_setup *twt)
return 0;
}
static bool
mt7915_mac_twt_param_equal(struct mt7915_sta *msta,
struct ieee80211_twt_params *twt_agrt)
{
u16 type = le16_to_cpu(twt_agrt->req_type);
u8 exp;
int i;
exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type);
for (i = 0; i < MT7915_MAX_STA_TWT_AGRT; i++) {
struct mt7915_twt_flow *f;
if (!(msta->twt.flowid_mask & BIT(i)))
continue;
f = &msta->twt.flow[i];
if (f->duration == twt_agrt->min_twt_dur &&
f->mantissa == twt_agrt->mantissa &&
f->exp == exp &&
f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) &&
f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) &&
f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER))
return true;
}
return false;
}
void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
struct ieee80211_twt_setup *twt)
@ -2625,6 +2777,12 @@ void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw,
if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
goto unlock;
if (twt_agrt->min_twt_dur < MT7915_MIN_TWT_DUR) {
setup_cmd = TWT_SETUP_CMD_DICTATE;
twt_agrt->min_twt_dur = MT7915_MIN_TWT_DUR;
goto unlock;
}
flowid = ffs(~msta->twt.flowid_mask) - 1;
le16p_replace_bits(&twt_agrt->req_type, flowid,
IEEE80211_TWT_REQTYPE_FLOWID);
@ -2633,6 +2791,9 @@ void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw,
exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type);
if (mt7915_mac_twt_param_equal(msta, twt_agrt))
goto unlock;
flow = &msta->twt.flow[flowid];
memset(flow, 0, sizeof(*flow));
INIT_LIST_HEAD(&flow->list);

View file

@ -24,6 +24,7 @@ enum rx_pkt_type {
PKT_TYPE_TXRX_NOTIFY,
PKT_TYPE_RX_EVENT,
PKT_TYPE_RX_FW_MONITOR = 0x0c,
PKT_TYPE_TXRX_NOTIFY_V0 = 0x18,
};
/* RXD DW1 */
@ -311,6 +312,7 @@ struct mt7915_tx_free {
#define MT_TX_FREE_VER GENMASK(18, 16)
#define MT_TX_FREE_MSDU_CNT GENMASK(9, 0)
#define MT_TX_FREE_MSDU_CNT_V0 GENMASK(6, 0)
#define MT_TX_FREE_WLAN_ID GENMASK(23, 14)
#define MT_TX_FREE_LATENCY GENMASK(12, 0)
/* 0: success, others: dropped */

View file

@ -42,10 +42,6 @@ static int mt7915_start(struct ieee80211_hw *hw)
if (ret)
goto out;
ret = mt7915_mcu_set_scs(dev, 0, true);
if (ret)
goto out;
mt7915_mac_enable_nf(dev, 0);
}
@ -58,10 +54,6 @@ static int mt7915_start(struct ieee80211_hw *hw)
if (ret)
goto out;
ret = mt7915_mcu_set_scs(dev, 1, true);
if (ret)
goto out;
mt7915_mac_enable_nf(dev, 1);
}
@ -174,14 +166,14 @@ static void mt7915_init_bitrate_mask(struct ieee80211_vif *vif)
for (i = 0; i < ARRAY_SIZE(mvif->bitrate_mask.control); i++) {
mvif->bitrate_mask.control[i].gi = NL80211_TXRATE_DEFAULT_GI;
mvif->bitrate_mask.control[i].he_gi = GENMASK(7, 0);
mvif->bitrate_mask.control[i].he_ltf = GENMASK(7, 0);
mvif->bitrate_mask.control[i].he_gi = 0xff;
mvif->bitrate_mask.control[i].he_ltf = 0xff;
mvif->bitrate_mask.control[i].legacy = GENMASK(31, 0);
memset(mvif->bitrate_mask.control[i].ht_mcs, GENMASK(7, 0),
memset(mvif->bitrate_mask.control[i].ht_mcs, 0xff,
sizeof(mvif->bitrate_mask.control[i].ht_mcs));
memset(mvif->bitrate_mask.control[i].vht_mcs, GENMASK(15, 0),
memset(mvif->bitrate_mask.control[i].vht_mcs, 0xff,
sizeof(mvif->bitrate_mask.control[i].vht_mcs));
memset(mvif->bitrate_mask.control[i].he_mcs, GENMASK(15, 0),
memset(mvif->bitrate_mask.control[i].he_mcs, 0xff,
sizeof(mvif->bitrate_mask.control[i].he_mcs));
}
}
@ -204,8 +196,8 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
is_zero_ether_addr(vif->addr))
phy->monitor_vif = vif;
mvif->mt76.idx = ffs(~dev->mt76.vif_mask) - 1;
if (mvif->mt76.idx >= MT7915_MAX_INTERFACES) {
mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask);
if (mvif->mt76.idx >= (MT7915_MAX_INTERFACES << dev->dbdc_support)) {
ret = -ENOSPC;
goto out;
}
@ -227,7 +219,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
if (ret)
goto out;
dev->mt76.vif_mask |= BIT(mvif->mt76.idx);
dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx);
phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
idx = MT7915_WTBL_RESERVED - mvif->mt76.idx;
@ -246,7 +238,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
if (vif->txq) {
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = &mvif->sta.wcid;
mtxq->wcid = idx;
}
if (vif->type != NL80211_IFTYPE_AP &&
@ -290,7 +282,7 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw,
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
mutex_lock(&dev->mt76.mutex);
dev->mt76.vif_mask &= ~BIT(mvif->mt76.idx);
dev->mt76.vif_mask &= ~BIT_ULL(mvif->mt76.idx);
phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
mutex_unlock(&dev->mt76.mutex);
@ -630,8 +622,10 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
mt7915_update_bss_color(hw, vif, &info->he_bss_color);
if (changed & (BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED))
mt7915_mcu_add_beacon(hw, vif, info->enable_beacon);
BSS_CHANGED_BEACON_ENABLED |
BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
BSS_CHANGED_FILS_DISCOVERY))
mt7915_mcu_add_beacon(hw, vif, info->enable_beacon, changed);
mutex_unlock(&dev->mt76.mutex);
}
@ -644,7 +638,7 @@ mt7915_channel_switch_beacon(struct ieee80211_hw *hw,
struct mt7915_dev *dev = mt7915_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
mt7915_mcu_add_beacon(hw, vif, true);
mt7915_mcu_add_beacon(hw, vif, true, BSS_CHANGED_BEACON);
mutex_unlock(&dev->mt76.mutex);
}
@ -1381,6 +1375,39 @@ mt7915_set_radar_background(struct ieee80211_hw *hw,
return ret;
}
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
static int
mt7915_net_fill_forward_path(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct net_device_path_ctx *ctx,
struct net_device_path *path)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
if (!mtk_wed_device_active(wed))
return -ENODEV;
if (msta->wcid.idx > 0xff)
return -EIO;
path->type = DEV_PATH_MTK_WDMA;
path->dev = ctx->dev;
path->mtk_wdma.wdma_idx = wed->wdma_idx;
path->mtk_wdma.bss = mvif->mt76.idx;
path->mtk_wdma.wcid = msta->wcid.idx;
path->mtk_wdma.queue = phy != &dev->phy;
ctx->dev = NULL;
return 0;
}
#endif
const struct ieee80211_ops mt7915_ops = {
.tx = mt7915_tx,
.start = mt7915_start,
@ -1428,4 +1455,7 @@ const struct ieee80211_ops mt7915_ops = {
.sta_add_debugfs = mt7915_sta_add_debugfs,
#endif
.set_radar_background = mt7915_set_radar_background,
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
.net_fill_forward_path = mt7915_net_fill_forward_path,
#endif
};

View file

@ -1854,7 +1854,8 @@ mt7915_mcu_beacon_mbss(struct sk_buff *rskb, struct sk_buff *skb,
continue;
for_each_element(sub_elem, elem->data + 1, elem->datalen - 1) {
const u8 *data;
const struct ieee80211_bssid_index *idx;
const u8 *idx_ie;
if (sub_elem->id || sub_elem->datalen < 4)
continue; /* not a valid BSS profile */
@ -1862,14 +1863,19 @@ mt7915_mcu_beacon_mbss(struct sk_buff *rskb, struct sk_buff *skb,
/* Find WLAN_EID_MULTI_BSSID_IDX
* in the merged nontransmitted profile
*/
data = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX,
sub_elem->data,
sub_elem->datalen);
if (!data || data[1] < 1 || !data[2])
idx_ie = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX,
sub_elem->data,
sub_elem->datalen);
if (!idx_ie || idx_ie[1] < sizeof(*idx))
continue;
mbss->offset[data[2]] = cpu_to_le16(data - skb->data);
mbss->bitmap |= cpu_to_le32(BIT(data[2]));
idx = (void *)(idx_ie + 2);
if (!idx->bssid_index || idx->bssid_index > 31)
continue;
mbss->offset[idx->bssid_index] =
cpu_to_le16(idx_ie - skb->data);
mbss->bitmap |= cpu_to_le32(BIT(idx->bssid_index));
}
}
}
@ -1886,6 +1892,7 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct ieee80211_vif *vif,
u8 *buf;
int len = sizeof(*cont) + MT_TXD_SIZE + skb->len;
len = (len & 0x3) ? ((len | 0x3) + 1) : len;
tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_CONTENT,
len, &bcn->sub_ntlv, &bcn->len);
@ -1904,7 +1911,7 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct ieee80211_vif *vif,
buf = (u8 *)tlv + sizeof(*cont);
mt7915_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, 0, NULL,
true);
BSS_CHANGED_BEACON);
memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
}
@ -1986,8 +1993,71 @@ mt7915_mcu_beacon_check_caps(struct mt7915_phy *phy, struct ieee80211_vif *vif,
}
}
int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int en)
static void
mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct sk_buff *rskb, struct bss_info_bcn *bcn,
u32 changed)
{
#define OFFLOAD_TX_MODE_SU BIT(0)
#define OFFLOAD_TX_MODE_MU BIT(1)
struct ieee80211_hw *hw = mt76_hw(dev);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef;
enum nl80211_band band = chandef->chan->band;
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
struct bss_info_inband_discovery *discov;
struct ieee80211_tx_info *info;
struct sk_buff *skb = NULL;
struct tlv *tlv;
bool ext_phy = phy != &dev->phy;
u8 *buf, interval;
int len;
if (changed & BSS_CHANGED_FILS_DISCOVERY &&
vif->bss_conf.fils_discovery.max_interval) {
interval = vif->bss_conf.fils_discovery.max_interval;
skb = ieee80211_get_fils_discovery_tmpl(hw, vif);
} else if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP &&
vif->bss_conf.unsol_bcast_probe_resp_interval) {
interval = vif->bss_conf.unsol_bcast_probe_resp_interval;
skb = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif);
}
if (!skb)
return;
info = IEEE80211_SKB_CB(skb);
info->control.vif = vif;
info->band = band;
if (ext_phy)
info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
len = sizeof(*discov) + MT_TXD_SIZE + skb->len;
len = (len & 0x3) ? ((len | 0x3) + 1) : len;
tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_DISCOV,
len, &bcn->sub_ntlv, &bcn->len);
discov = (struct bss_info_inband_discovery *)tlv;
discov->tx_mode = OFFLOAD_TX_MODE_SU;
/* 0: UNSOL PROBE RESP, 1: FILS DISCOV */
discov->tx_type = !!(changed & BSS_CHANGED_FILS_DISCOVERY);
discov->tx_interval = interval;
discov->prob_rsp_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
discov->enable = true;
buf = (u8 *)tlv + sizeof(*discov);
mt7915_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, 0, NULL,
changed);
memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
dev_kfree_skb(skb);
}
int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int en, u32 changed)
{
#define MAX_BEACON_SIZE 512
struct mt7915_dev *dev = mt7915_hw_dev(hw);
@ -2038,6 +2108,11 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
mt7915_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs);
dev_kfree_skb(skb);
if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP ||
changed & BSS_CHANGED_FILS_DISCOVERY)
mt7915_mcu_beacon_inband_discov(dev, vif, rskb,
bcn, changed);
out:
return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb,
MCU_EXT_CMD(BSS_INFO_UPDATE), true);
@ -2465,10 +2540,7 @@ int mt7915_mcu_init(struct mt7915_dev *dev)
/* force firmware operation mode into normal state,
* which should be set before firmware download stage.
*/
if (is_mt7915(&dev->mt76))
mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
else
mt76_wr(dev, MT_SWDEF_MODE_MT7916, MT_SWDEF_NORMAL_MODE);
mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
ret = mt7915_driver_own(dev, 0);
if (ret)
@ -2493,6 +2565,9 @@ int mt7915_mcu_init(struct mt7915_dev *dev)
if (ret)
return ret;
if (mtk_wed_device_active(&dev->mt76.mmio.wed))
mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(CAPABILITY), 0, 0, 0);
ret = mt7915_mcu_set_mwds(dev, 1);
if (ret)
return ret;
@ -2583,22 +2658,6 @@ int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band,
&req_mac, sizeof(req_mac), true);
}
int mt7915_mcu_set_scs(struct mt7915_dev *dev, u8 band, bool enable)
{
struct {
__le32 cmd;
u8 band;
u8 enable;
} __packed req = {
.cmd = cpu_to_le32(SCS_ENABLE),
.band = band,
.enable = enable + 1,
};
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SCS_CTRL), &req,
sizeof(req), false);
}
int mt7915_mcu_update_edca(struct mt7915_dev *dev, void *param)
{
struct mt7915_mcu_tx *req = (struct mt7915_mcu_tx *)param;
@ -3671,3 +3730,32 @@ int mt7915_mcu_twt_agrt_update(struct mt7915_dev *dev,
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TWT_AGRT_UPDATE),
&req, sizeof(req), true);
}
int mt7915_mcu_rf_regval(struct mt7915_dev *dev, u32 regidx, u32 *val, bool set)
{
struct {
__le32 idx;
__le32 ofs;
__le32 data;
} __packed req = {
.idx = cpu_to_le32(u32_get_bits(regidx, GENMASK(31, 28))),
.ofs = cpu_to_le32(u32_get_bits(regidx, GENMASK(27, 0))),
.data = set ? cpu_to_le32(*val) : 0,
};
struct sk_buff *skb;
int ret;
if (set)
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RF_REG_ACCESS),
&req, sizeof(req), false);
ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(RF_REG_ACCESS),
&req, sizeof(req), true, &skb);
if (ret)
return ret;
*val = le32_to_cpu(*(__le32 *)(skb->data + 8));
dev_kfree_skb(skb);
return 0;
}

View file

@ -304,16 +304,6 @@ enum mcu_mmps_mode {
MCU_MMPS_DISABLE,
};
enum {
SCS_SEND_DATA,
SCS_SET_MANUAL_PD_TH,
SCS_CONFIG,
SCS_ENABLE,
SCS_SHOW_INFO,
SCS_GET_GLO_ADDR,
SCS_GET_GLO_ADDR_EVENT,
};
struct bss_info_bmc_rate {
__le16 tag;
__le16 len;
@ -414,11 +404,23 @@ struct bss_info_bcn_cont {
__le16 pkt_len;
} __packed __aligned(4);
struct bss_info_inband_discovery {
__le16 tag;
__le16 len;
u8 tx_type;
u8 tx_mode;
u8 tx_interval;
u8 enable;
__le16 rsv;
__le16 prob_rsp_len;
} __packed __aligned(4);
enum {
BSS_INFO_BCN_CSA,
BSS_INFO_BCN_BCC,
BSS_INFO_BCN_MBSSID,
BSS_INFO_BCN_CONTENT,
BSS_INFO_BCN_DISCOV,
BSS_INFO_BCN_MAX
};
@ -473,6 +475,20 @@ enum {
MURU_GET_TXC_TX_STATS = 151,
};
enum {
SER_QUERY,
/* recovery */
SER_SET_RECOVER_L1,
SER_SET_RECOVER_L2,
SER_SET_RECOVER_L3_RX_ABORT,
SER_SET_RECOVER_L3_TX_ABORT,
SER_SET_RECOVER_L3_TX_DISABLE,
SER_SET_RECOVER_L3_BF,
/* action */
SER_ENABLE = 2,
SER_RECOVER
};
#define MT7915_BSS_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
sizeof(struct bss_info_omac) + \
sizeof(struct bss_info_basic) +\
@ -486,6 +502,7 @@ enum {
#define MT7915_BEACON_UPDATE_SIZE (sizeof(struct sta_req_hdr) + \
sizeof(struct bss_info_bcn_cntdwn) + \
sizeof(struct bss_info_bcn_mbss) + \
sizeof(struct bss_info_bcn_cont))
sizeof(struct bss_info_bcn_cont) + \
sizeof(struct bss_info_inband_discovery))
#endif

View file

@ -22,6 +22,8 @@ static const u32 mt7915_reg[] = {
[WFDMA_EXT_CSR_ADDR] = 0xd7000,
[CBTOP1_PHY_END] = 0x77ffffff,
[INFRA_MCU_ADDR_END] = 0x7c3fffff,
[FW_EXCEPTION_ADDR] = 0x219848,
[SWDEF_BASE_ADDR] = 0x41f200,
};
static const u32 mt7916_reg[] = {
@ -36,6 +38,8 @@ static const u32 mt7916_reg[] = {
[WFDMA_EXT_CSR_ADDR] = 0xd7000,
[CBTOP1_PHY_END] = 0x7fffffff,
[INFRA_MCU_ADDR_END] = 0x7c085fff,
[FW_EXCEPTION_ADDR] = 0x022050bc,
[SWDEF_BASE_ADDR] = 0x411400,
};
static const u32 mt7986_reg[] = {
@ -50,6 +54,8 @@ static const u32 mt7986_reg[] = {
[WFDMA_EXT_CSR_ADDR] = 0x27000,
[CBTOP1_PHY_END] = 0x7fffffff,
[INFRA_MCU_ADDR_END] = 0x7c085fff,
[FW_EXCEPTION_ADDR] = 0x02204ffc,
[SWDEF_BASE_ADDR] = 0x411400,
};
static const u32 mt7915_offs[] = {
@ -547,15 +553,21 @@ static void mt7915_rx_poll_complete(struct mt76_dev *mdev,
static void mt7915_irq_tasklet(struct tasklet_struct *t)
{
struct mt7915_dev *dev = from_tasklet(dev, t, irq_tasklet);
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
u32 intr, intr1, mask;
mt76_wr(dev, MT_INT_MASK_CSR, 0);
if (dev->hif2)
mt76_wr(dev, MT_INT1_MASK_CSR, 0);
if (mtk_wed_device_active(wed)) {
mtk_wed_device_irq_set_mask(wed, 0);
intr = mtk_wed_device_irq_get(wed, dev->mt76.mmio.irqmask);
} else {
mt76_wr(dev, MT_INT_MASK_CSR, 0);
if (dev->hif2)
mt76_wr(dev, MT_INT1_MASK_CSR, 0);
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
intr &= dev->mt76.mmio.irqmask;
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
intr &= dev->mt76.mmio.irqmask;
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
}
if (dev->hif2) {
intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
@ -601,7 +613,7 @@ static void mt7915_irq_tasklet(struct tasklet_struct *t)
mt76_wr(dev, MT_MCU_CMD, val);
if (val & MT_MCU_CMD_ERROR_MASK) {
dev->reset_state = val;
ieee80211_queue_work(mt76_hw(dev), &dev->reset_work);
queue_work(dev->mt76.wq, &dev->reset_work);
wake_up(&dev->reset_wait);
}
}
@ -610,10 +622,15 @@ static void mt7915_irq_tasklet(struct tasklet_struct *t)
irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
{
struct mt7915_dev *dev = dev_instance;
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
mt76_wr(dev, MT_INT_MASK_CSR, 0);
if (dev->hif2)
mt76_wr(dev, MT_INT1_MASK_CSR, 0);
if (mtk_wed_device_active(wed)) {
mtk_wed_device_irq_set_mask(wed, 0);
} else {
mt76_wr(dev, MT_INT_MASK_CSR, 0);
if (dev->hif2)
mt76_wr(dev, MT_INT1_MASK_CSR, 0);
}
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
return IRQ_NONE;
@ -665,8 +682,6 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
tasklet_setup(&dev->irq_tasklet, mt7915_irq_tasklet);
mt76_wr(dev, MT_INT_MASK_CSR, 0);
return dev;
error:

View file

@ -66,6 +66,7 @@
#define MT7915_MAX_TWT_AGRT 16
#define MT7915_MAX_STA_TWT_AGRT 8
#define MT7915_MIN_TWT_DUR 64
#define MT7915_MAX_QUEUE (__MT_RXQ_MAX + __MT_MCUQ_MAX + 2)
struct mt7915_vif;
@ -247,6 +248,8 @@ struct mt7915_phy {
u8 rdd_state;
u32 trb_ts;
u32 rx_ampdu_ts;
u32 ampdu_ref;
@ -309,9 +312,6 @@ struct mt7915_dev {
bool flash_mode;
bool muru_debug;
bool ibf;
u8 fw_debug_wm;
u8 fw_debug_wa;
u8 fw_debug_bin;
struct dentry *debugfs_dir;
struct rchan *relay_fwlog;
@ -319,7 +319,13 @@ struct mt7915_dev {
void *cal;
struct {
u8 table_mask;
u8 debug_wm;
u8 debug_wa;
u8 debug_bin;
} fw;
struct {
u16 table_mask;
u8 n_agrt;
} twt;
@ -429,8 +435,11 @@ static inline void mt7986_wmac_disable(struct mt7915_dev *dev)
#endif
struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
void __iomem *mem_base, u32 device_id);
void mt7915_wfsys_reset(struct mt7915_dev *dev);
irqreturn_t mt7915_irq_handler(int irq, void *dev_instance);
u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif);
u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id);
int mt7915_register_device(struct mt7915_dev *dev);
void mt7915_unregister_device(struct mt7915_dev *dev);
int mt7915_eeprom_init(struct mt7915_dev *dev);
@ -440,7 +449,7 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
struct ieee80211_channel *chan,
u8 chain_idx);
s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band);
int mt7915_dma_init(struct mt7915_dev *dev);
int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2);
void mt7915_dma_prefetch(struct mt7915_dev *dev);
void mt7915_dma_cleanup(struct mt7915_dev *dev);
int mt7915_mcu_init(struct mt7915_dev *dev);
@ -463,7 +472,7 @@ int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct cfg80211_he_bss_color *he_bss_color);
int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int enable);
int enable, u32 changed);
int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif,
bool enable);
int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
@ -485,7 +494,6 @@ int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable,
bool hdr_trans);
int mt7915_mcu_set_test_param(struct mt7915_dev *dev, u8 param, bool test_mode,
u8 en);
int mt7915_mcu_set_scs(struct mt7915_dev *dev, u8 band, bool enable);
int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band);
int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable);
int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy);
@ -506,6 +514,7 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct rate_info *rate);
int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy,
struct cfg80211_chan_def *chandef);
int mt7915_mcu_rf_regval(struct mt7915_dev *dev, u32 regidx, u32 *val, bool set);
int mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3);
int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl);
int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level);
@ -550,7 +559,7 @@ void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy);
void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy);
void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
struct ieee80211_key_conf *key, bool beacon);
struct ieee80211_key_conf *key, u32 changed);
void mt7915_mac_set_timing(struct mt7915_phy *phy);
int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
@ -572,7 +581,6 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt76_tx_info *tx_info);
void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7915_tx_token_put(struct mt7915_dev *dev);
int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base);
void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len);

View file

@ -12,6 +12,9 @@
#include "mac.h"
#include "../trace.h"
static bool wed_enable = false;
module_param(wed_enable, bool, 0644);
static LIST_HEAD(hif_list);
static DEFINE_SPINLOCK(hif_lock);
static u32 hif_idx;
@ -92,12 +95,79 @@ static int mt7915_pci_hif2_probe(struct pci_dev *pdev)
return 0;
}
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
static int mt7915_wed_offload_enable(struct mtk_wed_device *wed)
{
struct mt7915_dev *dev;
int ret;
dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
spin_lock_bh(&dev->mt76.token_lock);
dev->mt76.token_size = wed->wlan.token_start;
spin_unlock_bh(&dev->mt76.token_lock);
ret = wait_event_timeout(dev->mt76.tx_wait,
!dev->mt76.wed_token_count, HZ);
if (!ret)
return -EAGAIN;
return 0;
}
static void mt7915_wed_offload_disable(struct mtk_wed_device *wed)
{
struct mt7915_dev *dev;
dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
spin_lock_bh(&dev->mt76.token_lock);
dev->mt76.token_size = MT7915_TOKEN_SIZE;
spin_unlock_bh(&dev->mt76.token_lock);
}
#endif
static int
mt7915_pci_wed_init(struct mt7915_dev *dev, struct pci_dev *pdev, int *irq)
{
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
int ret;
if (!wed_enable)
return 0;
wed->wlan.pci_dev = pdev;
wed->wlan.wpdma_phys = pci_resource_start(pdev, 0) +
MT_WFDMA_EXT_CSR_BASE;
wed->wlan.nbuf = 4096;
wed->wlan.token_start = MT7915_TOKEN_SIZE - wed->wlan.nbuf;
wed->wlan.init_buf = mt7915_wed_init_buf;
wed->wlan.offload_enable = mt7915_wed_offload_enable;
wed->wlan.offload_disable = mt7915_wed_offload_disable;
if (mtk_wed_device_attach(wed) != 0)
return 0;
*irq = wed->irq;
dev->mt76.dma_dev = wed->dev;
ret = dma_set_mask(wed->dev, DMA_BIT_MASK(32));
if (ret)
return ret;
return 1;
#else
return 0;
#endif
}
static int mt7915_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct mt7915_hif *hif2 = NULL;
struct mt7915_dev *dev;
struct mt76_dev *mdev;
struct mt7915_hif *hif2;
int irq;
int ret;
@ -126,19 +196,27 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
return PTR_ERR(dev);
mdev = &dev->mt76;
mt7915_wfsys_reset(dev);
hif2 = mt7915_pci_init_hif2(pdev);
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
ret = mt7915_pci_wed_init(dev, pdev, &irq);
if (ret < 0)
goto free_device;
goto free_wed_or_irq_vector;
if (!ret) {
hif2 = mt7915_pci_init_hif2(pdev);
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0)
goto free_device;
irq = pdev->irq;
}
irq = pdev->irq;
ret = devm_request_irq(mdev->dev, irq, mt7915_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
goto free_irq_vector;
mt76_wr(dev, MT_INT_MASK_CSR, 0);
goto free_wed_or_irq_vector;
/* master switch of PCIe tnterrupt enable */
mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
@ -173,8 +251,11 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
if (dev->hif2)
put_device(dev->hif2->dev);
devm_free_irq(mdev->dev, irq, dev);
free_irq_vector:
pci_free_irq_vectors(pdev);
free_wed_or_irq_vector:
if (mtk_wed_device_active(&mdev->mmio.wed))
mtk_wed_device_detach(&mdev->mmio.wed);
else
pci_free_irq_vectors(pdev);
free_device:
mt76_free_device(&dev->mt76);

View file

@ -30,6 +30,8 @@ enum reg_rev {
WFDMA_EXT_CSR_ADDR,
CBTOP1_PHY_END,
INFRA_MCU_ADDR_END,
FW_EXCEPTION_ADDR,
SWDEF_BASE_ADDR,
__MT_REG_MAX,
};
@ -158,6 +160,9 @@ enum offs_rev {
#define MT_MDP_DCR1 MT_MDP(0x004)
#define MT_MDP_DCR1_MAX_RX_LEN GENMASK(15, 3)
#define MT_MDP_DCR2 MT_MDP(0x0e8)
#define MT_MDP_DCR2_RX_TRANS_SHORT BIT(2)
#define MT_MDP_BNRCFR0(_band) MT_MDP(__OFFS(MDP_BNRCFR0) + \
((_band) << 8))
#define MT_MDP_RCFR0_MCU_RX_MGMT GENMASK(5, 4)
@ -172,6 +177,14 @@ enum offs_rev {
#define MT_MDP_TO_HIF 0
#define MT_MDP_TO_WM 1
/* TRB: band 0(0x820e1000), band 1(0x820f1000) */
#define MT_WF_TRB_BASE(_band) ((_band) ? 0x820f1000 : 0x820e1000)
#define MT_WF_TRB(_band, ofs) (MT_WF_TRB_BASE(_band) + (ofs))
#define MT_TRB_RXPSR0(_band) MT_WF_TRB(_band, 0x03c)
#define MT_TRB_RXPSR0_RX_WTBL_PTR GENMASK(25, 16)
#define MT_TRB_RXPSR0_RX_RMAC_PTR GENMASK(9, 0)
/* TMAC: band 0(0x820e4000), band 1(0x820f4000) */
#define MT_WF_TMAC_BASE(_band) ((_band) ? 0x820f4000 : 0x820e4000)
#define MT_WF_TMAC(_band, ofs) (MT_WF_TMAC_BASE(_band) + (ofs))
@ -565,18 +578,31 @@ enum offs_rev {
/* WFDMA CSR */
#define MT_WFDMA_EXT_CSR_BASE __REG(WFDMA_EXT_CSR_ADDR)
#define MT_WFDMA_EXT_CSR_PHYS_BASE 0x18027000
#define MT_WFDMA_EXT_CSR(ofs) (MT_WFDMA_EXT_CSR_BASE + (ofs))
#define MT_WFDMA_EXT_CSR_PHYS(ofs) (MT_WFDMA_EXT_CSR_PHYS_BASE + (ofs))
#define MT_WFDMA_HOST_CONFIG MT_WFDMA_EXT_CSR(0x30)
#define MT_WFDMA_HOST_CONFIG MT_WFDMA_EXT_CSR_PHYS(0x30)
#define MT_WFDMA_HOST_CONFIG_PDMA_BAND BIT(0)
#define MT_WFDMA_HOST_CONFIG_WED BIT(1)
#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR(0x44)
#define MT_WFDMA_WED_RING_CONTROL MT_WFDMA_EXT_CSR_PHYS(0x34)
#define MT_WFDMA_WED_RING_CONTROL_TX0 GENMASK(4, 0)
#define MT_WFDMA_WED_RING_CONTROL_TX1 GENMASK(12, 8)
#define MT_WFDMA_WED_RING_CONTROL_RX1 GENMASK(20, 16)
#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR_PHYS(0x44)
#define MT_WFDMA_EXT_CSR_HIF_MISC_BUSY BIT(0)
#define MT_PCIE_RECOG_ID 0xd7090
#define MT_PCIE_RECOG_ID_MASK GENMASK(30, 0)
#define MT_PCIE_RECOG_ID_SEM BIT(31)
#define MT_INT_WED_MASK_CSR MT_WFDMA_EXT_CSR(0x204)
#define MT_WED_TX_RING_BASE MT_WFDMA_EXT_CSR(0x300)
#define MT_WED_RX_RING_BASE MT_WFDMA_EXT_CSR(0x400)
/* WFDMA0 PCIE1 */
#define MT_WFDMA0_PCIE1_BASE __REG(WFDMA0_PCIE1_ADDR)
#define MT_WFDMA0_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
@ -794,6 +820,7 @@ enum offs_rev {
/* ADIE */
#define MT_ADIE_CHIP_ID 0x02c
#define MT_ADIE_VERSION_MASK GENMASK(15, 0)
#define MT_ADIE_CHIP_ID_MASK GENMASK(31, 16)
#define MT_ADIE_IDX0 GENMASK(15, 0)
#define MT_ADIE_IDX1 GENMASK(31, 16)
@ -913,12 +940,27 @@ enum offs_rev {
#define MT_ADIE_TYPE_MASK BIT(1)
/* FW MODE SYNC */
#define MT_SWDEF_MODE 0x41f23c
#define MT_SWDEF_MODE_MT7916 0x41143c
#define MT_FW_EXCEPTION __REG(FW_EXCEPTION_ADDR)
#define MT_SWDEF_BASE __REG(SWDEF_BASE_ADDR)
#define MT_SWDEF(ofs) (MT_SWDEF_BASE + (ofs))
#define MT_SWDEF_MODE MT_SWDEF(0x3c)
#define MT_SWDEF_NORMAL_MODE 0
#define MT_SWDEF_ICAP_MODE 1
#define MT_SWDEF_SPECTRUM_MODE 2
#define MT_SWDEF_SER_STATS MT_SWDEF(0x040)
#define MT_SWDEF_PLE_STATS MT_SWDEF(0x044)
#define MT_SWDEF_PLE1_STATS MT_SWDEF(0x048)
#define MT_SWDEF_PLE_AMSDU_STATS MT_SWDEF(0x04C)
#define MT_SWDEF_PSE_STATS MT_SWDEF(0x050)
#define MT_SWDEF_PSE1_STATS MT_SWDEF(0x054)
#define MT_SWDEF_LAMC_WISR6_BN0_STATS MT_SWDEF(0x058)
#define MT_SWDEF_LAMC_WISR6_BN1_STATS MT_SWDEF(0x05C)
#define MT_SWDEF_LAMC_WISR7_BN0_STATS MT_SWDEF(0x060)
#define MT_SWDEF_LAMC_WISR7_BN1_STATS MT_SWDEF(0x064)
#define MT_DIC_CMD_REG_BASE 0x41f000
#define MT_DIC_CMD_REG(ofs) (MT_DIC_CMD_REG_BASE + (ofs))
#define MT_DIC_CMD_REG_CMD MT_DIC_CMD_REG(0x10)
@ -965,10 +1007,6 @@ enum offs_rev {
#define MT_TOP_MISC MT_TOP(0xf0)
#define MT_TOP_MISC_FW_STATE GENMASK(2, 0)
#define MT_HW_BOUND 0x70010020
#define MT_HW_REV 0x70010204
#define MT_WF_SUBSYS_RST 0x70002600
#define MT_TOP_WFSYS_WAKEUP MT_TOP(0x1a4)
#define MT_TOP_WFSYS_WAKEUP_MASK BIT(0)
@ -1030,6 +1068,10 @@ enum offs_rev {
#define MT_MCU_BUS_DBG_TIMEOUT_CK_EN_MASK BIT(3)
#define MT_MCU_BUS_DBG_TIMEOUT_EN_MASK BIT(2)
#define MT_HW_BOUND 0x70010020
#define MT_HW_REV 0x70010204
#define MT_WF_SUBSYS_RST 0x70002600
/* PCIE MAC */
#define MT_PCIE_MAC_BASE 0x74030000
#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs))
@ -1038,6 +1080,9 @@ enum offs_rev {
#define MT_PCIE1_MAC_INT_ENABLE 0x74020188
#define MT_PCIE1_MAC_INT_ENABLE_MT7916 0x74090188
#define MT_WM_MCU_PC 0x7c060204
#define MT_WA_MCU_PC 0x7c06020c
/* PP TOP */
#define MT_WF_PP_TOP_BASE 0x820cc000
#define MT_WF_PP_TOP(ofs) (MT_WF_PP_TOP_BASE + (ofs))

View file

@ -12,6 +12,7 @@
#include <linux/iopoll.h>
#include <linux/reset.h>
#include <linux/of_net.h>
#include <linux/clk.h>
#include "mt7915.h"
@ -210,6 +211,8 @@ static int mt7986_wmac_gpio_setup(struct mt7915_dev *dev)
if (IS_ERR_OR_NULL(state))
return -EINVAL;
break;
default:
return -EINVAL;
}
ret = pinctrl_select_state(pinctrl, state);
@ -468,17 +471,32 @@ static int mt7986_wmac_adie_xtal_trim_7976(struct mt7915_dev *dev, u8 adie)
static int mt7986_wmac_adie_patch_7976(struct mt7915_dev *dev, u8 adie)
{
u32 id, version, rg_xo_01, rg_xo_03;
int ret;
ret = mt76_wmac_spi_read(dev, adie, MT_ADIE_CHIP_ID, &id);
if (ret)
return ret;
version = FIELD_GET(MT_ADIE_VERSION_MASK, id);
ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_TOP_THADC, 0x4a563b00);
if (ret)
return ret;
ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_XO_01, 0x1d59080f);
if (version == 0x8a00 || version == 0x8a10 || version == 0x8b00) {
rg_xo_01 = 0x1d59080f;
rg_xo_03 = 0x34c00fe0;
} else {
rg_xo_01 = 0x1959f80f;
rg_xo_03 = 0x34d00fe0;
}
ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_XO_01, rg_xo_01);
if (ret)
return ret;
return mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_XO_03, 0x34c00fe0);
return mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_XO_03, rg_xo_03);
}
static int
@ -1115,6 +1133,19 @@ static int mt7986_wmac_init(struct mt7915_dev *dev)
{
struct device *pdev = dev->mt76.dev;
struct platform_device *pfdev = to_platform_device(pdev);
struct clk *mcu_clk, *ap_conn_clk;
mcu_clk = devm_clk_get(pdev, "mcu");
if (IS_ERR(mcu_clk))
dev_err(pdev, "mcu clock not found\n");
else if (clk_prepare_enable(mcu_clk))
dev_err(pdev, "mcu clock configuration failed\n");
ap_conn_clk = devm_clk_get(pdev, "ap2conn");
if (IS_ERR(ap_conn_clk))
dev_err(pdev, "ap2conn clock not found\n");
else if (clk_prepare_enable(ap_conn_clk))
dev_err(pdev, "ap2conn clock configuration failed\n");
dev->dcm = devm_platform_ioremap_resource(pfdev, 1);
if (IS_ERR(dev->dcm))
@ -1128,7 +1159,7 @@ static int mt7986_wmac_init(struct mt7915_dev *dev)
if (IS_ERR(dev->rstc))
return PTR_ERR(dev->rstc);
return mt7986_wmac_enable(dev);
return 0;
}
static int mt7986_wmac_probe(struct platform_device *pdev)
@ -1161,12 +1192,12 @@ static int mt7986_wmac_probe(struct platform_device *pdev)
if (ret)
goto free_device;
mt76_wr(dev, MT_INT_MASK_CSR, 0);
ret = mt7986_wmac_init(dev);
if (ret)
goto free_irq;
mt7915_wfsys_reset(dev);
ret = mt7915_register_device(dev);
if (ret)
goto free_irq;

View file

@ -9,7 +9,7 @@ static int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc)
{
int i, err;
err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE);
err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE, 0);
if (err < 0)
return err;

View file

@ -11,6 +11,10 @@ static const struct ieee80211_iface_limit if_limits[] = {
{
.max = MT7921_MAX_INTERFACES,
.types = BIT(NL80211_IFTYPE_STATION)
},
{
.max = 1,
.types = BIT(NL80211_IFTYPE_AP)
}
};
@ -64,7 +68,8 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
wiphy->iface_combinations = if_comb;
wiphy->flags &= ~(WIPHY_FLAG_IBSS_RSN | WIPHY_FLAG_4ADDR_AP |
WIPHY_FLAG_4ADDR_STATION);
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP);
wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN;
wiphy->max_scan_ssids = 4;
@ -80,6 +85,10 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_LEGACY);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HT);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_VHT);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HE);
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
@ -255,6 +264,10 @@ int mt7921_register_device(struct mt7921_dev *dev)
INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7921_mac_work);
INIT_DELAYED_WORK(&dev->phy.scan_work, mt7921_scan_work);
INIT_DELAYED_WORK(&dev->coredump.work, mt7921_coredump_work);
#if IS_ENABLED(CONFIG_IPV6)
INIT_WORK(&dev->ipv6_ns_work, mt7921_set_ipv6_ns_work);
skb_queue_head_init(&dev->ipv6_ns_list);
#endif
skb_queue_head_init(&dev->phy.scan_event_list);
skb_queue_head_init(&dev->coredump.msg_list);
INIT_LIST_HEAD(&dev->sta_poll_list);

View file

@ -696,7 +696,7 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
status->nss =
FIELD_GET(MT_PRXV_NSTS, v0) + 1;
status->encoding = RX_ENC_VHT;
if (i > 9)
if (i > 11)
return -EINVAL;
break;
case MT_PHY_TYPE_HE_MU:
@ -814,6 +814,7 @@ mt7921_mac_write_txwi_8023(struct mt7921_dev *dev, __le32 *txwi,
{
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
u8 fc_type, fc_stype;
u16 ethertype;
bool wmm = false;
u32 val;
@ -827,7 +828,8 @@ mt7921_mac_write_txwi_8023(struct mt7921_dev *dev, __le32 *txwi,
val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
FIELD_PREP(MT_TXD1_TID, tid);
if (be16_to_cpu(skb->protocol) >= ETH_P_802_3_MIN)
ethertype = get_unaligned_be16(&skb->data[12]);
if (ethertype >= ETH_P_802_3_MIN)
val |= MT_TXD1_ETH_802_3;
txwi[1] |= cpu_to_le32(val);
@ -1361,12 +1363,21 @@ mt7921_vif_connect_iter(void *priv, u8 *mac,
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct mt7921_dev *dev = mvif->phy->dev;
struct ieee80211_hw *hw = mt76_hw(dev);
if (vif->type == NL80211_IFTYPE_STATION)
ieee80211_disconnect(vif, true);
mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true);
mt7921_mcu_set_tx(dev, vif);
if (vif->type == NL80211_IFTYPE_AP) {
mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.wcid,
true);
mt7921_mcu_sta_update(dev, NULL, vif, true,
MT76_STA_INFO_STATE_NONE);
mt7921_mcu_uni_add_beacon_offload(dev, hw, vif, true);
}
}
/* system error recovery */
@ -1715,3 +1726,29 @@ bool mt7921_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update)
return false;
}
EXPORT_SYMBOL_GPL(mt7921_usb_sdio_tx_status_data);
#if IS_ENABLED(CONFIG_IPV6)
void mt7921_set_ipv6_ns_work(struct work_struct *work)
{
struct mt7921_dev *dev = container_of(work, struct mt7921_dev,
ipv6_ns_work);
struct sk_buff *skb;
int ret = 0;
do {
skb = skb_dequeue(&dev->ipv6_ns_list);
if (!skb)
break;
mt7921_mutex_acquire(dev);
ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_UNI_CMD(OFFLOAD), true);
mt7921_mutex_release(dev);
} while (!ret);
if (ret)
skb_queue_purge(&dev->ipv6_ns_list);
}
#endif

View file

@ -5,6 +5,7 @@
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <net/ipv6.h>
#include "mt7921.h"
#include "mcu.h"
@ -12,7 +13,7 @@ static void
mt7921_gen_ppe_thresh(u8 *he_ppet, int nss)
{
u8 i, ppet_bits, ppet_size, ru_bit_mask = 0x7; /* HE80 */
u8 ppet16_ppet8_ru3_ru0[] = {0x1c, 0xc7, 0x71};
static const u8 ppet16_ppet8_ru3_ru0[] = {0x1c, 0xc7, 0x71};
he_ppet[0] = FIELD_PREP(IEEE80211_PPE_THRES_NSS_MASK, nss - 1) |
FIELD_PREP(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK,
@ -53,6 +54,7 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
switch (i) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP:
break;
default:
continue;
@ -86,6 +88,23 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
switch (i) {
case NL80211_IFTYPE_AP:
he_cap_elem->mac_cap_info[2] |=
IEEE80211_HE_MAC_CAP2_BSR;
he_cap_elem->mac_cap_info[4] |=
IEEE80211_HE_MAC_CAP4_BQR;
he_cap_elem->mac_cap_info[5] |=
IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX;
he_cap_elem->phy_cap_info[3] |=
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
he_cap_elem->phy_cap_info[6] |=
IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
he_cap_elem->phy_cap_info[9] |=
IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
break;
case NL80211_IFTYPE_STATION:
he_cap_elem->mac_cap_info[1] |=
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
@ -294,7 +313,7 @@ static int mt7921_add_interface(struct ieee80211_hw *hw,
mt7921_mutex_acquire(dev);
mvif->mt76.idx = ffs(~dev->mt76.vif_mask) - 1;
mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask);
if (mvif->mt76.idx >= MT7921_MAX_INTERFACES) {
ret = -ENOSPC;
goto out;
@ -310,7 +329,7 @@ static int mt7921_add_interface(struct ieee80211_hw *hw,
if (ret)
goto out;
dev->mt76.vif_mask |= BIT(mvif->mt76.idx);
dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx);
phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
idx = MT7921_WTBL_RESERVED - mvif->mt76.idx;
@ -330,7 +349,7 @@ static int mt7921_add_interface(struct ieee80211_hw *hw,
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
if (vif->txq) {
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = &mvif->sta.wcid;
mtxq->wcid = idx;
}
out:
@ -354,7 +373,7 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw,
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
dev->mt76.vif_mask &= ~BIT(mvif->mt76.idx);
dev->mt76.vif_mask &= ~BIT_ULL(mvif->mt76.idx);
phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
mt7921_mutex_release(dev);
@ -489,8 +508,8 @@ mt7921_sniffer_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
bool monitor = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
mt7921_mcu_set_sniffer(dev, vif, monitor);
pm->enable = !monitor;
pm->ds_enable = !monitor;
pm->enable = pm->enable_user && !monitor;
pm->ds_enable = pm->ds_enable_user && !monitor;
mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
@ -566,7 +585,6 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw,
u64 multicast)
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
struct mt7921_phy *phy = mt7921_hw_phy(hw);
u32 ctl_flags = MT_WF_RFCR1_DROP_ACK |
MT_WF_RFCR1_DROP_BF_POLL |
MT_WF_RFCR1_DROP_BA |
@ -576,23 +594,23 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw,
#define MT76_FILTER(_flag, _hw) do { \
flags |= *total_flags & FIF_##_flag; \
phy->rxfilter &= ~(_hw); \
phy->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
dev->mt76.rxfilter &= ~(_hw); \
dev->mt76.rxfilter |= !(flags & FIF_##_flag) * (_hw); \
} while (0)
mt7921_mutex_acquire(dev);
phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
MT_WF_RFCR_DROP_OTHER_BEACON |
MT_WF_RFCR_DROP_FRAME_REPORT |
MT_WF_RFCR_DROP_PROBEREQ |
MT_WF_RFCR_DROP_MCAST_FILTERED |
MT_WF_RFCR_DROP_MCAST |
MT_WF_RFCR_DROP_BCAST |
MT_WF_RFCR_DROP_DUPLICATE |
MT_WF_RFCR_DROP_A2_BSSID |
MT_WF_RFCR_DROP_UNWANTED_CTL |
MT_WF_RFCR_DROP_STBC_MULTI);
dev->mt76.rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
MT_WF_RFCR_DROP_OTHER_BEACON |
MT_WF_RFCR_DROP_FRAME_REPORT |
MT_WF_RFCR_DROP_PROBEREQ |
MT_WF_RFCR_DROP_MCAST_FILTERED |
MT_WF_RFCR_DROP_MCAST |
MT_WF_RFCR_DROP_BCAST |
MT_WF_RFCR_DROP_DUPLICATE |
MT_WF_RFCR_DROP_A2_BSSID |
MT_WF_RFCR_DROP_UNWANTED_CTL |
MT_WF_RFCR_DROP_STBC_MULTI);
MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
MT_WF_RFCR_DROP_A3_MAC |
@ -606,7 +624,7 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw,
MT_WF_RFCR_DROP_NDPA);
*total_flags = flags;
mt76_wr(dev, MT_WF_RFCR(0), phy->rxfilter);
mt76_wr(dev, MT_WF_RFCR(0), dev->mt76.rxfilter);
if (*total_flags & FIF_CONTROL)
mt76_clear(dev, MT_WF_RFCR1(0), ctl_flags);
@ -635,6 +653,20 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
}
}
if (changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon) {
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid,
true);
mt7921_mcu_sta_update(dev, NULL, vif, true,
MT76_STA_INFO_STATE_NONE);
}
if (changed & (BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED))
mt7921_mcu_uni_add_beacon_offload(dev, hw, vif,
info->enable_beacon);
/* ensure that enable txcmd_mode after bss_info */
if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED))
mt7921_mcu_set_tx(dev, vif);
@ -1301,7 +1333,7 @@ static int mt7921_suspend(struct ieee80211_hw *hw,
clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
ieee80211_iterate_active_interfaces(hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
mt76_connac_mcu_set_suspend_iter,
mt7921_mcu_set_suspend_iter,
&dev->mphy);
mt7921_mutex_release(dev);
@ -1376,6 +1408,67 @@ static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw,
MCU_UNI_CMD(STA_REC_UPDATE));
}
#if IS_ENABLED(CONFIG_IPV6)
static void mt7921_ipv6_addr_change(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct inet6_dev *idev)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct mt7921_dev *dev = mvif->phy->dev;
struct inet6_ifaddr *ifa;
struct in6_addr ns_addrs[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
struct sk_buff *skb;
u8 i, idx = 0;
struct {
struct {
u8 bss_idx;
u8 pad[3];
} __packed hdr;
struct mt76_connac_arpns_tlv arpns;
} req_hdr = {
.hdr = {
.bss_idx = mvif->mt76.idx,
},
.arpns = {
.tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND),
.mode = 2, /* update */
.option = 1, /* update only */
},
};
read_lock_bh(&idev->lock);
list_for_each_entry(ifa, &idev->addr_list, if_list) {
if (ifa->flags & IFA_F_TENTATIVE)
continue;
ns_addrs[idx] = ifa->addr;
if (++idx >= IEEE80211_BSS_ARP_ADDR_LIST_LEN)
break;
}
read_unlock_bh(&idev->lock);
if (!idx)
return;
skb = __mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req_hdr) +
idx * sizeof(struct in6_addr), GFP_ATOMIC);
if (!skb)
return;
req_hdr.arpns.ips_num = idx;
req_hdr.arpns.len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)
+ idx * sizeof(struct in6_addr));
skb_put_data(skb, &req_hdr, sizeof(req_hdr));
for (i = 0; i < idx; i++)
skb_put_data(skb, &ns_addrs[i].in6_u, sizeof(struct in6_addr));
skb_queue_tail(&dev->ipv6_ns_list, skb);
ieee80211_queue_work(dev->mt76.hw, &dev->ipv6_ns_work);
}
#endif
static int mt7921_set_sar_specs(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar)
{
@ -1395,6 +1488,18 @@ static int mt7921_set_sar_specs(struct ieee80211_hw *hw,
return err;
}
static void
mt7921_channel_switch_beacon(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_chan_def *chandef)
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
mt7921_mutex_acquire(dev);
mt7921_mcu_uni_add_beacon_offload(dev, hw, vif, true);
mt7921_mutex_release(dev);
}
const struct ieee80211_ops mt7921_ops = {
.tx = mt7921_tx,
.start = mt7921_start,
@ -1409,10 +1514,14 @@ const struct ieee80211_ops mt7921_ops = {
.sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.set_key = mt7921_set_key,
.sta_set_decap_offload = mt7921_sta_set_decap_offload,
#if IS_ENABLED(CONFIG_IPV6)
.ipv6_addr_change = mt7921_ipv6_addr_change,
#endif /* CONFIG_IPV6 */
.ampdu_action = mt7921_ampdu_action,
.set_rts_threshold = mt7921_set_rts_threshold,
.wake_tx_queue = mt76_wake_tx_queue,
.release_buffered_frames = mt76_release_buffered_frames,
.channel_switch_beacon = mt7921_channel_switch_beacon,
.get_txpower = mt76_get_txpower,
.get_stats = mt7921_get_stats,
.get_et_sset_count = mt7921_get_et_sset_count,

View file

@ -224,6 +224,49 @@ int mt7921_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(mt7921_mcu_fill_message);
#ifdef CONFIG_PM
static int
mt7921_mcu_set_ipv6_ns_filter(struct mt76_dev *dev,
struct ieee80211_vif *vif, bool suspend)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct {
struct {
u8 bss_idx;
u8 pad[3];
} __packed hdr;
struct mt76_connac_arpns_tlv arpns;
} req = {
.hdr = {
.bss_idx = mvif->mt76.idx,
},
.arpns = {
.tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND),
.len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)),
.mode = suspend,
},
};
return mt76_mcu_send_msg(dev, MCU_UNI_CMD_OFFLOAD, &req, sizeof(req),
true);
}
void mt7921_mcu_set_suspend_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
if (IS_ENABLED(CONFIG_IPV6)) {
struct mt76_phy *phy = priv;
mt7921_mcu_set_ipv6_ns_filter(phy->dev, vif,
!test_bit(MT76_STATE_RUNNING,
&phy->state));
}
mt76_connac_mcu_set_suspend_iter(priv, mac, vif);
}
#endif /* CONFIG_PM */
static void
mt7921_mcu_scan_event(struct mt7921_dev *dev, struct sk_buff *skb)
{
@ -248,7 +291,8 @@ mt7921_mcu_connection_loss_iter(void *priv, u8 *mac,
if (mvif->idx != event->bss_idx)
return;
if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER) ||
vif->type != NL80211_IFTYPE_STATION)
return;
ieee80211_connection_loss(vif);
@ -1166,3 +1210,79 @@ int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif,
return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req),
true);
}
int
mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev,
struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
bool enable)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
struct ieee80211_mutable_offsets offs;
struct {
struct req_hdr {
u8 bss_idx;
u8 pad[3];
} __packed hdr;
struct bcn_content_tlv {
__le16 tag;
__le16 len;
__le16 tim_ie_pos;
__le16 csa_ie_pos;
__le16 bcc_ie_pos;
/* 0: disable beacon offload
* 1: enable beacon offload
* 2: update probe respond offload
*/
u8 enable;
/* 0: legacy format (TXD + payload)
* 1: only cap field IE
*/
u8 type;
__le16 pkt_len;
u8 pkt[512];
} __packed beacon_tlv;
} req = {
.hdr = {
.bss_idx = mvif->mt76.idx,
},
.beacon_tlv = {
.tag = cpu_to_le16(UNI_BSS_INFO_BCN_CONTENT),
.len = cpu_to_le16(sizeof(struct bcn_content_tlv)),
.enable = enable,
},
};
struct sk_buff *skb;
if (!enable)
goto out;
skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs);
if (!skb)
return -EINVAL;
if (skb->len > 512 - MT_TXD_SIZE) {
dev_err(dev->mt76.dev, "beacon size limit exceed\n");
dev_kfree_skb(skb);
return -EINVAL;
}
mt7921_mac_write_txwi(dev, (__le32 *)(req.beacon_tlv.pkt), skb,
wcid, NULL, 0, true);
memcpy(req.beacon_tlv.pkt + MT_TXD_SIZE, skb->data, skb->len);
req.beacon_tlv.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
req.beacon_tlv.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
if (offs.cntdwn_counter_offs[0]) {
u16 csa_offs;
csa_offs = MT_TXD_SIZE + offs.cntdwn_counter_offs[0] - 4;
req.beacon_tlv.csa_ie_pos = cpu_to_le16(csa_offs);
}
dev_kfree_skb(skb);
out:
return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
&req, sizeof(req), true);
}

View file

@ -155,7 +155,6 @@ struct mt7921_phy {
struct ieee80211_sband_iftype_data iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES];
u32 rxfilter;
u64 omac_mask;
u16 noise;
@ -212,6 +211,10 @@ struct mt7921_dev {
struct mt76_connac_pm pm;
struct mt76_connac_coredump coredump;
const struct mt7921_hif_ops *hif_ops;
struct work_struct ipv6_ns_work;
/* IPv6 addresses for WoWLAN */
struct sk_buff_head ipv6_ns_list;
};
enum {
@ -450,6 +453,10 @@ int mt7921s_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921s_mcu_fw_pmctrl(struct mt7921_dev *dev);
void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data);
void mt7921_set_runtime_pm(struct mt7921_dev *dev);
void mt7921_mcu_set_suspend_iter(void *priv, u8 *mac,
struct ieee80211_vif *vif);
void mt7921_set_ipv6_ns_work(struct work_struct *work);
int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif,
bool enable);
@ -467,7 +474,11 @@ bool mt7921_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update);
int mt7921u_mcu_power_on(struct mt7921_dev *dev);
int mt7921u_wfsys_reset(struct mt7921_dev *dev);
int mt7921u_dma_init(struct mt7921_dev *dev);
int mt7921u_dma_init(struct mt7921_dev *dev, bool resume);
int mt7921u_init_reset(struct mt7921_dev *dev);
int mt7921u_mac_reset(struct mt7921_dev *dev);
int mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev,
struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
bool enable);
#endif

View file

@ -119,7 +119,6 @@ static void mt7921e_unregister_device(struct mt7921_dev *dev)
mt7921_mcu_exit(dev);
tasklet_disable(&dev->irq_tasklet);
mt76_free_device(&dev->mt76);
}
static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
@ -302,8 +301,10 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
dev->bus_ops = dev->mt76.bus;
bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
GFP_KERNEL);
if (!bus_ops)
return -ENOMEM;
if (!bus_ops) {
ret = -ENOMEM;
goto err_free_dev;
}
bus_ops->rr = mt7921_rr;
bus_ops->wr = mt7921_wr;
@ -312,7 +313,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
ret = __mt7921e_mcu_drv_pmctrl(dev);
if (ret)
return ret;
goto err_free_dev;
mdev->rev = (mt7921_l1_rr(dev, MT_HW_CHIPID) << 16) |
(mt7921_l1_rr(dev, MT_HW_REV) & 0xff);
@ -354,6 +355,7 @@ static void mt7921_pci_remove(struct pci_dev *pdev)
mt7921e_unregister_device(dev);
devm_free_irq(&pdev->dev, pdev->irq, dev);
mt76_free_device(&dev->mt76);
pci_free_irq_vectors(pdev);
}

View file

@ -516,4 +516,9 @@
#define MT_TOP_MISC2_FW_PWR_ON BIT(0)
#define MT_TOP_MISC2_FW_N9_RDY GENMASK(1, 0)
#define MT_WF_SW_DEF_CR(ofs) (0x401a00 + (ofs))
#define MT_WF_SW_DEF_CR_USB_MCU_EVENT MT_WF_SW_DEF_CR(0x028)
#define MT_WF_SW_SER_TRIGGER_SUSPEND BIT(6)
#define MT_WF_SW_SER_DONE_SUSPEND BIT(7)
#endif

View file

@ -246,7 +246,7 @@ static int mt7921u_probe(struct usb_interface *usb_intf,
if (ret)
goto error;
ret = mt7921u_dma_init(dev);
ret = mt7921u_dma_init(dev, false);
if (ret)
return ret;
@ -288,6 +288,61 @@ static void mt7921u_disconnect(struct usb_interface *usb_intf)
mt76_free_device(&dev->mt76);
}
#ifdef CONFIG_PM
static int mt7921u_suspend(struct usb_interface *intf, pm_message_t state)
{
struct mt7921_dev *dev = usb_get_intfdata(intf);
int err;
err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true);
if (err)
return err;
mt76u_stop_rx(&dev->mt76);
mt76u_stop_tx(&dev->mt76);
set_bit(MT76_STATE_SUSPEND, &dev->mphy.state);
return 0;
}
static int mt7921u_resume(struct usb_interface *intf)
{
struct mt7921_dev *dev = usb_get_intfdata(intf);
bool reinit = true;
int err, i;
for (i = 0; i < 10; i++) {
u32 val = mt76_rr(dev, MT_WF_SW_DEF_CR_USB_MCU_EVENT);
if (!(val & MT_WF_SW_SER_TRIGGER_SUSPEND)) {
reinit = false;
break;
}
if (val & MT_WF_SW_SER_DONE_SUSPEND) {
mt76_wr(dev, MT_WF_SW_DEF_CR_USB_MCU_EVENT, 0);
break;
}
msleep(20);
}
if (reinit || mt7921_dma_need_reinit(dev)) {
err = mt7921u_dma_init(dev, true);
if (err)
return err;
}
clear_bit(MT76_STATE_SUSPEND, &dev->mphy.state);
err = mt76u_resume_rx(&dev->mt76);
if (err < 0)
return err;
return mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
}
#endif /* CONFIG_PM */
MODULE_DEVICE_TABLE(usb, mt7921u_device_table);
MODULE_FIRMWARE(MT7921_FIRMWARE_WM);
MODULE_FIRMWARE(MT7921_ROM_PATCH);
@ -297,6 +352,11 @@ static struct usb_driver mt7921u_driver = {
.id_table = mt7921u_device_table,
.probe = mt7921u_probe,
.disconnect = mt7921u_disconnect,
#ifdef CONFIG_PM
.suspend = mt7921u_suspend,
.resume = mt7921u_resume,
.reset_resume = mt7921u_resume,
#endif /* CONFIG_PM */
.soft_unbind = 1,
.disable_hub_initiated_lpm = 1,
};

View file

@ -121,7 +121,7 @@ static void mt7921u_epctl_rst_opt(struct mt7921_dev *dev, bool reset)
mt7921u_uhw_wr(&dev->mt76, MT_SSUSB_EPCTL_CSR_EP_RST_OPT, val);
}
int mt7921u_dma_init(struct mt7921_dev *dev)
int mt7921u_dma_init(struct mt7921_dev *dev, bool resume)
{
int err;
@ -136,6 +136,9 @@ int mt7921u_dma_init(struct mt7921_dev *dev)
MT_WL_RX_AGG_TO | MT_WL_RX_AGG_LMT);
mt76_clear(dev, MT_UDMA_WLCFG_1, MT_WL_RX_AGG_PKT_LMT);
if (resume)
return 0;
err = mt7921u_dma_rx_evt_ep4(dev);
if (err)
return err;
@ -221,7 +224,7 @@ int mt7921u_mac_reset(struct mt7921_dev *dev)
if (err)
goto out;
err = mt7921u_dma_init(dev);
err = mt7921u_dma_init(dev, false);
if (err)
goto out;

View file

@ -119,7 +119,7 @@ mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
memset(cb, 0, sizeof(*cb));
if (!wcid)
if (!wcid || !rcu_access_pointer(dev->wcid[wcid->idx]))
return MT_PACKET_ID_NO_ACK;
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
@ -435,12 +435,11 @@ mt76_txq_stopped(struct mt76_queue *q)
static int
mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
struct mt76_txq *mtxq)
struct mt76_txq *mtxq, struct mt76_wcid *wcid)
{
struct mt76_dev *dev = phy->dev;
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
enum mt76_txq_id qid = mt76_txq_get_qid(txq);
struct mt76_wcid *wcid = mtxq->wcid;
struct ieee80211_tx_info *info;
struct sk_buff *skb;
int n_frames = 1;
@ -462,7 +461,9 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
info->control.rates, 1);
spin_lock(&q->lock);
idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
spin_unlock(&q->lock);
if (idx < 0)
return idx;
@ -482,14 +483,18 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
info->control.rates, 1);
spin_lock(&q->lock);
idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
spin_unlock(&q->lock);
if (idx < 0)
break;
n_frames++;
} while (1);
spin_lock(&q->lock);
dev->queue_ops->kick(dev, q);
spin_unlock(&q->lock);
return n_frames;
}
@ -520,12 +525,10 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
break;
mtxq = (struct mt76_txq *)txq->drv_priv;
wcid = mtxq->wcid;
if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags))
wcid = rcu_dereference(dev->wcid[mtxq->wcid]);
if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags))
continue;
spin_lock_bh(&q->lock);
if (mtxq->send_bar && mtxq->aggr) {
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
struct ieee80211_sta *sta = txq->sta;
@ -534,15 +537,11 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
u8 tid = txq->tid;
mtxq->send_bar = false;
spin_unlock_bh(&q->lock);
ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
spin_lock_bh(&q->lock);
}
if (!mt76_txq_stopped(q))
n_frames = mt76_txq_send_burst(phy, q, mtxq);
spin_unlock_bh(&q->lock);
n_frames = mt76_txq_send_burst(phy, q, mtxq, wcid);
ieee80211_return_txq(phy->hw, txq, false);
@ -562,6 +561,7 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
if (qid >= 4)
return;
local_bh_disable();
rcu_read_lock();
do {
@ -571,6 +571,7 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
} while (len > 0);
rcu_read_unlock();
local_bh_enable();
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule);
@ -720,12 +721,17 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
spin_lock_bh(&dev->token_lock);
token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size,
GFP_ATOMIC);
token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC);
if (token >= 0)
dev->token_count++;
if (dev->token_count >= dev->drv->token_size - MT76_TOKEN_FREE_THR)
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
if (mtk_wed_device_active(&dev->mmio.wed) &&
token >= dev->mmio.wed.wlan.token_start)
dev->wed_token_count++;
#endif
if (dev->token_count >= dev->token_size - MT76_TOKEN_FREE_THR)
__mt76_set_tx_blocked(dev, true);
spin_unlock_bh(&dev->token_lock);
@ -742,10 +748,18 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
spin_lock_bh(&dev->token_lock);
txwi = idr_remove(&dev->token, token);
if (txwi)
if (txwi) {
dev->token_count--;
if (dev->token_count < dev->drv->token_size - MT76_TOKEN_FREE_THR &&
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
if (mtk_wed_device_active(&dev->mmio.wed) &&
token >= dev->mmio.wed.wlan.token_start &&
--dev->wed_token_count == 0)
wake_up(&dev->tx_wait);
#endif
}
if (dev->token_count < dev->token_size - MT76_TOKEN_FREE_THR &&
dev->phy.q_tx[0]->blocked)
*wake = true;