Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next into for-davem

This commit is contained in:
John W. Linville 2014-05-02 13:47:50 -04:00
commit 406a94d7fa
213 changed files with 6165 additions and 3535 deletions

View file

@ -100,6 +100,7 @@
!Finclude/net/cfg80211.h wdev_priv
!Finclude/net/cfg80211.h ieee80211_iface_limit
!Finclude/net/cfg80211.h ieee80211_iface_combination
!Finclude/net/cfg80211.h cfg80211_check_combinations
</chapter>
<chapter>
<title>Actions and configuration</title>

View file

@ -59,6 +59,8 @@ struct btmrvl_device {
};
struct btmrvl_adapter {
void *hw_regs_buf;
u8 *hw_regs;
u32 int_count;
struct sk_buff_head tx_queue;
u8 psmode;
@ -140,7 +142,7 @@ void btmrvl_interrupt(struct btmrvl_private *priv);
bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb);
int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd);
int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd);
int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv);
int btmrvl_enable_ps(struct btmrvl_private *priv);
int btmrvl_prepare_command(struct btmrvl_private *priv);

View file

@ -24,6 +24,7 @@
#include <net/bluetooth/hci_core.h>
#include "btmrvl_drv.h"
#include "btmrvl_sdio.h"
#define VERSION "1.0"
@ -201,7 +202,7 @@ static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 opcode,
return 0;
}
int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd)
{
int ret;
@ -337,10 +338,25 @@ static int btmrvl_tx_pkt(struct btmrvl_private *priv, struct sk_buff *skb)
static void btmrvl_init_adapter(struct btmrvl_private *priv)
{
int buf_size;
skb_queue_head_init(&priv->adapter->tx_queue);
priv->adapter->ps_state = PS_AWAKE;
buf_size = ALIGN_SZ(SDIO_BLOCK_SIZE, BTSDIO_DMA_ALIGN);
priv->adapter->hw_regs_buf = kzalloc(buf_size, GFP_KERNEL);
if (!priv->adapter->hw_regs_buf) {
priv->adapter->hw_regs = NULL;
BT_ERR("Unable to allocate buffer for hw_regs.");
} else {
priv->adapter->hw_regs =
(u8 *)ALIGN_ADDR(priv->adapter->hw_regs_buf,
BTSDIO_DMA_ALIGN);
BT_DBG("hw_regs_buf=%p hw_regs=%p",
priv->adapter->hw_regs_buf, priv->adapter->hw_regs);
}
init_waitqueue_head(&priv->adapter->cmd_wait_q);
}
@ -348,6 +364,7 @@ static void btmrvl_free_adapter(struct btmrvl_private *priv)
{
skb_queue_purge(&priv->adapter->tx_queue);
kfree(priv->adapter->hw_regs_buf);
kfree(priv->adapter);
priv->adapter = NULL;

View file

@ -64,6 +64,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8688 = {
.io_port_0 = 0x00,
.io_port_1 = 0x01,
.io_port_2 = 0x02,
.int_read_to_clear = false,
};
static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
.cfg = 0x00,
@ -80,6 +81,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
.io_port_0 = 0x78,
.io_port_1 = 0x79,
.io_port_2 = 0x7a,
.int_read_to_clear = false,
};
static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
@ -97,6 +99,9 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
.io_port_0 = 0xd8,
.io_port_1 = 0xd9,
.io_port_2 = 0xda,
.int_read_to_clear = true,
.host_int_rsr = 0x01,
.card_misc_cfg = 0xcc,
};
static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
@ -667,6 +672,53 @@ static int btmrvl_sdio_process_int_status(struct btmrvl_private *priv)
return 0;
}
static int btmrvl_sdio_read_to_clear(struct btmrvl_sdio_card *card, u8 *ireg)
{
struct btmrvl_adapter *adapter = card->priv->adapter;
int ret;
ret = sdio_readsb(card->func, adapter->hw_regs, 0, SDIO_BLOCK_SIZE);
if (ret) {
BT_ERR("sdio_readsb: read int hw_regs failed: %d", ret);
return ret;
}
*ireg = adapter->hw_regs[card->reg->host_intstatus];
BT_DBG("hw_regs[%#x]=%#x", card->reg->host_intstatus, *ireg);
return 0;
}
static int btmrvl_sdio_write_to_clear(struct btmrvl_sdio_card *card, u8 *ireg)
{
int ret;
*ireg = sdio_readb(card->func, card->reg->host_intstatus, &ret);
if (ret) {
BT_ERR("sdio_readb: read int status failed: %d", ret);
return ret;
}
if (*ireg) {
/*
* DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS
* Clear the interrupt status register and re-enable the
* interrupt.
*/
BT_DBG("int_status = 0x%x", *ireg);
sdio_writeb(card->func, ~(*ireg) & (DN_LD_HOST_INT_STATUS |
UP_LD_HOST_INT_STATUS),
card->reg->host_intstatus, &ret);
if (ret) {
BT_ERR("sdio_writeb: clear int status failed: %d", ret);
return ret;
}
}
return 0;
}
static void btmrvl_sdio_interrupt(struct sdio_func *func)
{
struct btmrvl_private *priv;
@ -684,28 +736,13 @@ static void btmrvl_sdio_interrupt(struct sdio_func *func)
priv = card->priv;
ireg = sdio_readb(card->func, card->reg->host_intstatus, &ret);
if (ret) {
BT_ERR("sdio_readb: read int status register failed");
if (card->reg->int_read_to_clear)
ret = btmrvl_sdio_read_to_clear(card, &ireg);
else
ret = btmrvl_sdio_write_to_clear(card, &ireg);
if (ret)
return;
}
if (ireg != 0) {
/*
* DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS
* Clear the interrupt status register and re-enable the
* interrupt.
*/
BT_DBG("ireg = 0x%x", ireg);
sdio_writeb(card->func, ~(ireg) & (DN_LD_HOST_INT_STATUS |
UP_LD_HOST_INT_STATUS),
card->reg->host_intstatus, &ret);
if (ret) {
BT_ERR("sdio_writeb: clear int status register failed");
return;
}
}
spin_lock_irqsave(&priv->driver_lock, flags);
sdio_ireg |= ireg;
@ -777,6 +814,30 @@ static int btmrvl_sdio_register_dev(struct btmrvl_sdio_card *card)
BT_DBG("SDIO FUNC%d IO port: 0x%x", func->num, card->ioport);
if (card->reg->int_read_to_clear) {
reg = sdio_readb(func, card->reg->host_int_rsr, &ret);
if (ret < 0) {
ret = -EIO;
goto release_irq;
}
sdio_writeb(func, reg | 0x3f, card->reg->host_int_rsr, &ret);
if (ret < 0) {
ret = -EIO;
goto release_irq;
}
reg = sdio_readb(func, card->reg->card_misc_cfg, &ret);
if (ret < 0) {
ret = -EIO;
goto release_irq;
}
sdio_writeb(func, reg | 0x10, card->reg->card_misc_cfg, &ret);
if (ret < 0) {
ret = -EIO;
goto release_irq;
}
}
sdio_set_drvdata(func, card);
sdio_release_host(func);

View file

@ -78,6 +78,9 @@ struct btmrvl_sdio_card_reg {
u8 io_port_0;
u8 io_port_1;
u8 io_port_2;
bool int_read_to_clear;
u8 host_int_rsr;
u8 card_misc_cfg;
};
struct btmrvl_sdio_card {

View file

@ -55,13 +55,6 @@ struct h4_struct {
struct sk_buff_head txq;
};
/* H4 receiver States */
#define H4_W4_PACKET_TYPE 0
#define H4_W4_EVENT_HDR 1
#define H4_W4_ACL_HDR 2
#define H4_W4_SCO_HDR 3
#define H4_W4_DATA 4
/* Initialize protocol */
static int h4_open(struct hci_uart *hu)
{

View file

@ -1090,7 +1090,8 @@ static int ar5523_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
return ret;
}
static void ar5523_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
static void ar5523_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct ar5523 *ar = hw->priv;

View file

@ -175,7 +175,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
return 0;
}
int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
{
struct bmi_cmd cmd;
union bmi_resp resp;
@ -184,7 +184,7 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
address, *param);
address, param);
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
@ -193,7 +193,7 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
cmd.id = __cpu_to_le32(BMI_EXECUTE);
cmd.execute.addr = __cpu_to_le32(address);
cmd.execute.param = __cpu_to_le32(*param);
cmd.execute.param = __cpu_to_le32(param);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
if (ret) {
@ -204,10 +204,13 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
if (resplen < sizeof(resp.execute)) {
ath10k_warn("invalid execute response length (%d)\n",
resplen);
return ret;
return -EIO;
}
*param = __le32_to_cpu(resp.execute.result);
*result = __le32_to_cpu(resp.execute.result);
ath10k_dbg(ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
return 0;
}

View file

@ -217,7 +217,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
ret; \
})
int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param);
int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result);
int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,

View file

@ -840,35 +840,17 @@ void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
static int ath10k_ce_init_src_ring(struct ath10k *ar,
unsigned int ce_id,
struct ath10k_ce_pipe *ce_state,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_ring *src_ring;
unsigned int nentries = attr->src_nentries;
unsigned int ce_nbytes;
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
dma_addr_t base_addr;
char *ptr;
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
nentries = roundup_pow_of_two(nentries);
nentries = roundup_pow_of_two(attr->src_nentries);
if (ce_state->src_ring) {
WARN_ON(ce_state->src_ring->nentries != nentries);
return 0;
}
ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
ptr = kzalloc(ce_nbytes, GFP_KERNEL);
if (ptr == NULL)
return -ENOMEM;
ce_state->src_ring = (struct ath10k_ce_ring *)ptr;
src_ring = ce_state->src_ring;
ptr += sizeof(struct ath10k_ce_ring);
src_ring->nentries = nentries;
src_ring->nentries_mask = nentries - 1;
memset(src_ring->per_transfer_context, 0,
nentries * sizeof(*src_ring->per_transfer_context));
src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->sw_index &= src_ring->nentries_mask;
@ -878,21 +860,87 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
src_ring->write_index &= src_ring->nentries_mask;
src_ring->per_transfer_context = (void **)ptr;
ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
src_ring->base_addr_ce_space);
ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ATH10K_DBG_BOOT,
"boot init ce src ring id %d entries %d base_addr %p\n",
ce_id, nentries, src_ring->base_addr_owner_space);
return 0;
}
static int ath10k_ce_init_dest_ring(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
nentries = roundup_pow_of_two(attr->dest_nentries);
memset(dest_ring->per_transfer_context, 0,
nentries * sizeof(*dest_ring->per_transfer_context));
dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
dest_ring->sw_index &= dest_ring->nentries_mask;
dest_ring->write_index =
ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
dest_ring->write_index &= dest_ring->nentries_mask;
ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
dest_ring->base_addr_ce_space);
ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ATH10K_DBG_BOOT,
"boot ce dest ring id %d entries %d base_addr %p\n",
ce_id, nentries, dest_ring->base_addr_owner_space);
return 0;
}
static struct ath10k_ce_ring *
ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
const struct ce_attr *attr)
{
struct ath10k_ce_ring *src_ring;
u32 nentries = attr->src_nentries;
dma_addr_t base_addr;
nentries = roundup_pow_of_two(nentries);
src_ring = kzalloc(sizeof(*src_ring) +
(nentries *
sizeof(*src_ring->per_transfer_context)),
GFP_KERNEL);
if (src_ring == NULL)
return ERR_PTR(-ENOMEM);
src_ring->nentries = nentries;
src_ring->nentries_mask = nentries - 1;
/*
* Legacy platforms that do not support cache
* coherent DMA are unsupported
*/
src_ring->base_addr_owner_space_unaligned =
pci_alloc_consistent(ar_pci->pdev,
(nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
&base_addr);
dma_alloc_coherent(ar->dev,
(nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
&base_addr, GFP_KERNEL);
if (!src_ring->base_addr_owner_space_unaligned) {
kfree(ce_state->src_ring);
ce_state->src_ring = NULL;
return -ENOMEM;
kfree(src_ring);
return ERR_PTR(-ENOMEM);
}
src_ring->base_addr_ce_space_unaligned = base_addr;
@ -912,88 +960,54 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
kmalloc((nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN), GFP_KERNEL);
if (!src_ring->shadow_base_unaligned) {
pci_free_consistent(ar_pci->pdev,
(nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
src_ring->base_addr_owner_space,
src_ring->base_addr_ce_space);
kfree(ce_state->src_ring);
ce_state->src_ring = NULL;
return -ENOMEM;
dma_free_coherent(ar->dev,
(nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
src_ring->base_addr_owner_space,
src_ring->base_addr_ce_space);
kfree(src_ring);
return ERR_PTR(-ENOMEM);
}
src_ring->shadow_base = PTR_ALIGN(
src_ring->shadow_base_unaligned,
CE_DESC_RING_ALIGN);
ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
src_ring->base_addr_ce_space);
ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ATH10K_DBG_BOOT,
"boot ce src ring id %d entries %d base_addr %p\n",
ce_id, nentries, src_ring->base_addr_owner_space);
return 0;
return src_ring;
}
static int ath10k_ce_init_dest_ring(struct ath10k *ar,
unsigned int ce_id,
struct ath10k_ce_pipe *ce_state,
const struct ce_attr *attr)
static struct ath10k_ce_ring *
ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_ring *dest_ring;
unsigned int nentries = attr->dest_nentries;
unsigned int ce_nbytes;
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
u32 nentries;
dma_addr_t base_addr;
char *ptr;
nentries = roundup_pow_of_two(nentries);
nentries = roundup_pow_of_two(attr->dest_nentries);
if (ce_state->dest_ring) {
WARN_ON(ce_state->dest_ring->nentries != nentries);
return 0;
}
dest_ring = kzalloc(sizeof(*dest_ring) +
(nentries *
sizeof(*dest_ring->per_transfer_context)),
GFP_KERNEL);
if (dest_ring == NULL)
return ERR_PTR(-ENOMEM);
ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
ptr = kzalloc(ce_nbytes, GFP_KERNEL);
if (ptr == NULL)
return -ENOMEM;
ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
dest_ring = ce_state->dest_ring;
ptr += sizeof(struct ath10k_ce_ring);
dest_ring->nentries = nentries;
dest_ring->nentries_mask = nentries - 1;
dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
dest_ring->sw_index &= dest_ring->nentries_mask;
dest_ring->write_index =
ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
dest_ring->write_index &= dest_ring->nentries_mask;
dest_ring->per_transfer_context = (void **)ptr;
/*
* Legacy platforms that do not support cache
* coherent DMA are unsupported
*/
dest_ring->base_addr_owner_space_unaligned =
pci_alloc_consistent(ar_pci->pdev,
(nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
&base_addr);
dma_alloc_coherent(ar->dev,
(nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
&base_addr, GFP_KERNEL);
if (!dest_ring->base_addr_owner_space_unaligned) {
kfree(ce_state->dest_ring);
ce_state->dest_ring = NULL;
return -ENOMEM;
kfree(dest_ring);
return ERR_PTR(-ENOMEM);
}
dest_ring->base_addr_ce_space_unaligned = base_addr;
@ -1012,39 +1026,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
dest_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN);
ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
dest_ring->base_addr_ce_space);
ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ATH10K_DBG_BOOT,
"boot ce dest ring id %d entries %d base_addr %p\n",
ce_id, nentries, dest_ring->base_addr_owner_space);
return 0;
}
static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
spin_lock_bh(&ar_pci->ce_lock);
ce_state->ar = ar;
ce_state->id = ce_id;
ce_state->ctrl_addr = ctrl_addr;
ce_state->attr_flags = attr->flags;
ce_state->src_sz_max = attr->src_sz_max;
spin_unlock_bh(&ar_pci->ce_lock);
return ce_state;
return dest_ring;
}
/*
@ -1054,11 +1036,11 @@ static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
* initialization. It may be that only one side or the other is
* initialized by software/firmware.
*/
struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr)
int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
const struct ce_attr *attr)
{
struct ath10k_ce_pipe *ce_state;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
int ret;
/*
@ -1074,64 +1056,128 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
ret = ath10k_pci_wake(ar);
if (ret)
return NULL;
return ret;
ce_state = ath10k_ce_init_state(ar, ce_id, attr);
if (!ce_state) {
ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id);
goto out;
}
spin_lock_bh(&ar_pci->ce_lock);
ce_state->ar = ar;
ce_state->id = ce_id;
ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
ce_state->attr_flags = attr->flags;
ce_state->src_sz_max = attr->src_sz_max;
spin_unlock_bh(&ar_pci->ce_lock);
if (attr->src_nentries) {
ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr);
ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
if (ret) {
ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
ce_id, ret);
ath10k_ce_deinit(ce_state);
ce_state = NULL;
goto out;
}
}
if (attr->dest_nentries) {
ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr);
ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
if (ret) {
ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
ce_id, ret);
ath10k_ce_deinit(ce_state);
ce_state = NULL;
goto out;
}
}
out:
ath10k_pci_sleep(ar);
return ce_state;
return ret;
}
void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state)
static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
{
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
}
static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
{
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
}
void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
{
int ret;
ret = ath10k_pci_wake(ar);
if (ret)
return;
ath10k_ce_deinit_src_ring(ar, ce_id);
ath10k_ce_deinit_dest_ring(ar, ce_id);
ath10k_pci_sleep(ar);
}
int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
const struct ce_attr *attr)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
int ret;
if (attr->src_nentries) {
ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
if (IS_ERR(ce_state->src_ring)) {
ret = PTR_ERR(ce_state->src_ring);
ath10k_err("failed to allocate copy engine source ring %d: %d\n",
ce_id, ret);
ce_state->src_ring = NULL;
return ret;
}
}
if (attr->dest_nentries) {
ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
attr);
if (IS_ERR(ce_state->dest_ring)) {
ret = PTR_ERR(ce_state->dest_ring);
ath10k_err("failed to allocate copy engine destination ring %d: %d\n",
ce_id, ret);
ce_state->dest_ring = NULL;
return ret;
}
}
return 0;
}
void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
if (ce_state->src_ring) {
kfree(ce_state->src_ring->shadow_base_unaligned);
pci_free_consistent(ar_pci->pdev,
(ce_state->src_ring->nentries *
sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
ce_state->src_ring->base_addr_owner_space,
ce_state->src_ring->base_addr_ce_space);
dma_free_coherent(ar->dev,
(ce_state->src_ring->nentries *
sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
ce_state->src_ring->base_addr_owner_space,
ce_state->src_ring->base_addr_ce_space);
kfree(ce_state->src_ring);
}
if (ce_state->dest_ring) {
pci_free_consistent(ar_pci->pdev,
(ce_state->dest_ring->nentries *
sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
ce_state->dest_ring->base_addr_owner_space,
ce_state->dest_ring->base_addr_ce_space);
dma_free_coherent(ar->dev,
(ce_state->dest_ring->nentries *
sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
ce_state->dest_ring->base_addr_owner_space,
ce_state->dest_ring->base_addr_ce_space);
kfree(ce_state->dest_ring);
}

View file

@ -104,7 +104,8 @@ struct ath10k_ce_ring {
void *shadow_base_unaligned;
struct ce_desc *shadow_base;
void **per_transfer_context;
/* keep last */
void *per_transfer_context[0];
};
struct ath10k_ce_pipe {
@ -210,10 +211,12 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
/*==================CE Engine Initialization=======================*/
/* Initialize an instance of a CE */
struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr);
int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
const struct ce_attr *attr);
void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
const struct ce_attr *attr);
void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
/*==================CE Engine Shutdown=======================*/
/*
@ -236,8 +239,6 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
unsigned int *nbytesp,
unsigned int *transfer_idp);
void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);
/*==================CE Interrupt Handlers====================*/
void ath10k_ce_per_engine_service_any(struct ath10k *ar);
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);

View file

@ -249,30 +249,40 @@ static int ath10k_download_board_data(struct ath10k *ar)
static int ath10k_download_and_run_otp(struct ath10k *ar)
{
u32 address = ar->hw_params.patch_load_addr;
u32 exec_param;
u32 result, address = ar->hw_params.patch_load_addr;
int ret;
/* OTP is optional */
if (!ar->otp_data || !ar->otp_len)
if (!ar->otp_data || !ar->otp_len) {
ath10k_warn("Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
ar->otp_data, ar->otp_len);
return 0;
}
ath10k_dbg(ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
address, ar->otp_len);
ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
if (ret) {
ath10k_err("could not write otp (%d)\n", ret);
goto exit;
return ret;
}
exec_param = 0;
ret = ath10k_bmi_execute(ar, address, &exec_param);
ret = ath10k_bmi_execute(ar, address, 0, &result);
if (ret) {
ath10k_err("could not execute otp (%d)\n", ret);
goto exit;
return ret;
}
exit:
return ret;
ath10k_dbg(ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
if (result != 0) {
ath10k_err("otp calibration failed: %d", result);
return -EINVAL;
}
return 0;
}
static int ath10k_download_fw(struct ath10k *ar)
@ -389,8 +399,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
/* first fetch the firmware file (firmware-*.bin) */
ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
if (IS_ERR(ar->firmware)) {
ath10k_err("Could not fetch firmware file '%s': %ld\n",
name, PTR_ERR(ar->firmware));
ath10k_err("could not fetch firmware file '%s/%s': %ld\n",
ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware));
return PTR_ERR(ar->firmware);
}
@ -401,14 +411,14 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
if (len < magic_len) {
ath10k_err("firmware image too small to contain magic: %zu\n",
len);
ath10k_err("firmware file '%s/%s' too small to contain magic: %zu\n",
ar->hw_params.fw.dir, name, len);
ret = -EINVAL;
goto err;
}
if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
ath10k_err("Invalid firmware magic\n");
ath10k_err("invalid firmware magic\n");
ret = -EINVAL;
goto err;
}
@ -430,7 +440,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
data += sizeof(*hdr);
if (len < ie_len) {
ath10k_err("Invalid length for FW IE %d (%zu < %zu)\n",
ath10k_err("invalid length for FW IE %d (%zu < %zu)\n",
ie_id, len, ie_len);
ret = -EINVAL;
goto err;
@ -513,8 +523,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
}
if (!ar->firmware_data || !ar->firmware_len) {
ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from %s, skipping\n",
name);
ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
ar->hw_params.fw.dir, name);
ret = -ENOMEDIUM;
goto err;
}
@ -531,7 +541,9 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
ar->hw_params.fw.board);
if (IS_ERR(ar->board)) {
ret = PTR_ERR(ar->board);
ath10k_err("could not fetch board data (%d)\n", ret);
ath10k_err("could not fetch board data '%s/%s' (%d)\n",
ar->hw_params.fw.dir, ar->hw_params.fw.board,
ret);
goto err;
}
@ -549,19 +561,21 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
{
int ret;
ar->fw_api = 2;
ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
if (ret == 0) {
ar->fw_api = 2;
goto out;
}
if (ret == 0)
goto success;
ar->fw_api = 1;
ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
ret = ath10k_core_fetch_firmware_api_1(ar);
if (ret)
return ret;
ar->fw_api = 1;
out:
success:
ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
return 0;
@ -572,16 +586,22 @@ static int ath10k_init_download_firmware(struct ath10k *ar)
int ret;
ret = ath10k_download_board_data(ar);
if (ret)
if (ret) {
ath10k_err("failed to download board data: %d\n", ret);
return ret;
}
ret = ath10k_download_and_run_otp(ar);
if (ret)
if (ret) {
ath10k_err("failed to run otp: %d\n", ret);
return ret;
}
ret = ath10k_download_fw(ar);
if (ret)
if (ret) {
ath10k_err("failed to download firmware: %d\n", ret);
return ret;
}
return ret;
}
@ -835,9 +855,12 @@ int ath10k_core_start(struct ath10k *ar)
INIT_LIST_HEAD(&ar->arvifs);
if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
ath10k_info("%s (0x%x) fw %s api %d htt %d.%d\n",
ar->hw_params.name, ar->target_version,
ar->hw->wiphy->fw_version, ar->fw_api,
ath10k_info("%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n",
ar->hw_params.name,
ar->target_version,
ar->chip_id,
ar->hw->wiphy->fw_version,
ar->fw_api,
ar->htt.target_version_major,
ar->htt.target_version_minor);

View file

@ -119,6 +119,7 @@ struct ath10k_peer_stat {
u8 peer_macaddr[ETH_ALEN];
u32 peer_rssi;
u32 peer_tx_rate;
u32 peer_rx_rate; /* 10x only */
};
struct ath10k_target_stats {
@ -130,6 +131,12 @@ struct ath10k_target_stats {
u32 cycle_count;
u32 phy_err_count;
u32 chan_tx_power;
u32 ack_rx_bad;
u32 rts_bad;
u32 rts_good;
u32 fcs_bad;
u32 no_beacons;
u32 mib_int_count;
/* PDEV TX stats */
s32 comp_queued;
@ -260,6 +267,8 @@ struct ath10k_vif {
u8 fixed_rate;
u8 fixed_nss;
u8 force_sgi;
bool use_cts_prot;
int num_legacy_stations;
};
struct ath10k_vif_iter {
@ -419,13 +428,18 @@ struct ath10k {
struct cfg80211_chan_def chandef;
int free_vdev_map;
bool promisc;
bool monitor;
int monitor_vdev_id;
bool monitor_enabled;
bool monitor_present;
bool monitor_started;
unsigned int filter_flags;
unsigned long dev_flags;
u32 dfs_block_radar_events;
/* protected by conf_mutex */
bool radar_enabled;
int num_started_vdevs;
struct wmi_pdev_set_wmm_params_arg wmm_params;
struct completion install_key_done;

View file

@ -161,7 +161,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
u8 *tmp = ev->data;
struct ath10k_target_stats *stats;
int num_pdev_stats, num_vdev_stats, num_peer_stats;
struct wmi_pdev_stats *ps;
struct wmi_pdev_stats_10x *ps;
int i;
spin_lock_bh(&ar->data_lock);
@ -173,7 +173,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */
if (num_pdev_stats) {
ps = (struct wmi_pdev_stats *)tmp;
ps = (struct wmi_pdev_stats_10x *)tmp;
stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf);
stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count);
@ -228,7 +228,18 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop);
stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs);
tmp += sizeof(struct wmi_pdev_stats);
if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
ar->fw_features)) {
stats->ack_rx_bad = __le32_to_cpu(ps->ack_rx_bad);
stats->rts_bad = __le32_to_cpu(ps->rts_bad);
stats->rts_good = __le32_to_cpu(ps->rts_good);
stats->fcs_bad = __le32_to_cpu(ps->fcs_bad);
stats->no_beacons = __le32_to_cpu(ps->no_beacons);
stats->mib_int_count = __le32_to_cpu(ps->mib_int_count);
tmp += sizeof(struct wmi_pdev_stats_10x);
} else {
tmp += sizeof(struct wmi_pdev_stats_old);
}
}
/* 0 or max vdevs */
@ -243,22 +254,29 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
}
if (num_peer_stats) {
struct wmi_peer_stats *peer_stats;
struct wmi_peer_stats_10x *peer_stats;
struct ath10k_peer_stat *s;
stats->peers = num_peer_stats;
for (i = 0; i < num_peer_stats; i++) {
peer_stats = (struct wmi_peer_stats *)tmp;
peer_stats = (struct wmi_peer_stats_10x *)tmp;
s = &stats->peer_stat[i];
WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_stats->peer_macaddr,
s->peer_macaddr);
memcpy(s->peer_macaddr, &peer_stats->peer_macaddr.addr,
ETH_ALEN);
s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi);
s->peer_tx_rate =
__le32_to_cpu(peer_stats->peer_tx_rate);
if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
ar->fw_features)) {
s->peer_rx_rate =
__le32_to_cpu(peer_stats->peer_rx_rate);
tmp += sizeof(struct wmi_peer_stats_10x);
tmp += sizeof(struct wmi_peer_stats);
} else {
tmp += sizeof(struct wmi_peer_stats_old);
}
}
}
@ -272,7 +290,7 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
struct ath10k *ar = file->private_data;
struct ath10k_target_stats *fw_stats;
char *buf = NULL;
unsigned int len = 0, buf_len = 2500;
unsigned int len = 0, buf_len = 8000;
ssize_t ret_cnt = 0;
long left;
int i;
@ -320,6 +338,16 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
"Cycle count", fw_stats->cycle_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"PHY error count", fw_stats->phy_err_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"RTS bad count", fw_stats->rts_bad);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"RTS good count", fw_stats->rts_good);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"FCS bad count", fw_stats->fcs_bad);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"No beacon count", fw_stats->no_beacons);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"MIB int count", fw_stats->mib_int_count);
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n",
@ -411,8 +439,8 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
"MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs);
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n",
"ath10k PEER stats");
len += scnprintf(buf + len, buf_len - len, "%30s (%d)\n",
"ath10k PEER stats", fw_stats->peers);
len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
"=================");
@ -425,6 +453,9 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"Peer TX rate",
fw_stats->peer_stat[i].peer_tx_rate);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"Peer RX rate",
fw_stats->peer_stat[i].peer_rx_rate);
len += scnprintf(buf + len, buf_len - len, "\n");
}
spin_unlock_bh(&ar->data_lock);
@ -451,27 +482,37 @@ static ssize_t ath10k_read_simulate_fw_crash(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
const char buf[] = "To simulate firmware crash write the keyword"
" `crash` to this file.\nThis will force firmware"
" to report a crash to the host system.\n";
const char buf[] = "To simulate firmware crash write one of the"
" keywords to this file:\n `soft` - this will send"
" WMI_FORCE_FW_HANG_ASSERT to firmware if FW"
" supports that command.\n `hard` - this will send"
" to firmware command with illegal parameters"
" causing firmware crash.\n";
return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
}
/* Simulate firmware crash:
* 'soft': Call wmi command causing firmware hang. This firmware hang is
* recoverable by warm firmware reset.
* 'hard': Force firmware crash by setting any vdev parameter for not allowed
* vdev id. This is hard firmware crash because it is recoverable only by cold
* firmware reset.
*/
static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
char buf[32] = {};
char buf[32];
int ret;
mutex_lock(&ar->conf_mutex);
simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
if (strcmp(buf, "crash") && strcmp(buf, "crash\n")) {
ret = -EINVAL;
goto exit;
}
/* make sure that buf is null terminated */
buf[sizeof(buf) - 1] = 0;
if (ar->state != ATH10K_STATE_ON &&
ar->state != ATH10K_STATE_RESTARTED) {
@ -479,14 +520,30 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
goto exit;
}
ath10k_info("simulating firmware crash\n");
/* drop the possible '\n' from the end */
if (buf[count - 1] == '\n') {
buf[count - 1] = 0;
count--;
}
ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
if (ret)
ath10k_warn("failed to force fw hang (%d)\n", ret);
if (!strcmp(buf, "soft")) {
ath10k_info("simulating soft firmware crash\n");
ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
} else if (!strcmp(buf, "hard")) {
ath10k_info("simulating hard firmware crash\n");
ret = ath10k_wmi_vdev_set_param(ar, TARGET_NUM_VDEVS + 1,
ar->wmi.vdev_param->rts_threshold, 0);
} else {
ret = -EINVAL;
goto exit;
}
if (ret == 0)
ret = count;
if (ret) {
ath10k_warn("failed to simulate firmware crash: %d\n", ret);
goto exit;
}
ret = count;
exit:
mutex_unlock(&ar->conf_mutex);

View file

@ -157,6 +157,9 @@ int ath10k_htc_send(struct ath10k_htc *htc,
goto err_pull;
}
ep->tx_credits -= credits;
ath10k_dbg(ATH10K_DBG_HTC,
"htc ep %d consumed %d credits (total %d)\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
}
@ -185,6 +188,9 @@ int ath10k_htc_send(struct ath10k_htc *htc,
if (ep->tx_credit_flow_enabled) {
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits;
ath10k_dbg(ATH10K_DBG_HTC,
"htc ep %d reverted %d credits back (total %d)\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
if (ep->ep_ops.ep_tx_credits)
@ -234,12 +240,12 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc,
if (report->eid >= ATH10K_HTC_EP_COUNT)
break;
ath10k_dbg(ATH10K_DBG_HTC, "ep %d got %d credits\n",
report->eid, report->credits);
ep = &htc->endpoint[report->eid];
ep->tx_credits += report->credits;
ath10k_dbg(ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
report->eid, report->credits, ep->tx_credits);
if (ep->ep_ops.ep_tx_credits) {
spin_unlock_bh(&htc->tx_lock);
ep->ep_ops.ep_tx_credits(htc->ar);

View file

@ -21,6 +21,7 @@
#include <linux/bug.h>
#include <linux/interrupt.h>
#include <linux/dmapool.h>
#include <net/mac80211.h>
#include "htc.h"
#include "rx_desc.h"
@ -1172,23 +1173,6 @@ struct htt_peer_unmap_event {
u16 peer_id;
};
struct htt_rx_info {
struct sk_buff *skb;
enum htt_rx_mpdu_status status;
enum htt_rx_mpdu_encrypt_type encrypt_type;
s8 signal;
struct {
u8 info0;
u32 info1;
u32 info2;
} rate;
u32 tsf;
bool fcs_err;
bool amsdu_more;
bool mic_err;
};
struct ath10k_htt_txbuf {
struct htt_data_tx_desc_frag frags[2];
struct ath10k_htc_hdr htc_hdr;
@ -1289,6 +1273,9 @@ struct ath10k_htt {
struct tasklet_struct txrx_compl_task;
struct sk_buff_head tx_compl_q;
struct sk_buff_head rx_compl_q;
/* rx_status template */
struct ieee80211_rx_status rx_status;
};
#define RX_HTT_HDR_STATUS_LEN 64

View file

@ -297,6 +297,7 @@ static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
}
}
/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
u8 **fw_desc, int *fw_desc_len,
struct sk_buff **head_msdu,
@ -310,7 +311,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
if (htt->rx_confused) {
ath10k_warn("htt is confused. refusing rx\n");
return 0;
return -1;
}
msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
@ -442,6 +443,9 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
}
*tail_msdu = msdu;
if (*head_msdu == NULL)
msdu_chaining = -1;
/*
* Don't refill the ring yet.
*
@ -636,6 +640,190 @@ struct amsdu_subframe_hdr {
__be16 len;
} __packed;
static const u8 rx_legacy_rate_idx[] = {
3, /* 0x00 - 11Mbps */
2, /* 0x01 - 5.5Mbps */
1, /* 0x02 - 2Mbps */
0, /* 0x03 - 1Mbps */
3, /* 0x04 - 11Mbps */
2, /* 0x05 - 5.5Mbps */
1, /* 0x06 - 2Mbps */
0, /* 0x07 - 1Mbps */
10, /* 0x08 - 48Mbps */
8, /* 0x09 - 24Mbps */
6, /* 0x0A - 12Mbps */
4, /* 0x0B - 6Mbps */
11, /* 0x0C - 54Mbps */
9, /* 0x0D - 36Mbps */
7, /* 0x0E - 18Mbps */
5, /* 0x0F - 9Mbps */
};
static void ath10k_htt_rx_h_rates(struct ath10k *ar,
enum ieee80211_band band,
u8 info0, u32 info1, u32 info2,
struct ieee80211_rx_status *status)
{
u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
u8 preamble = 0;
/* Check if valid fields */
if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
return;
preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
switch (preamble) {
case HTT_RX_LEGACY:
cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
rate_idx = 0;
if (rate < 0x08 || rate > 0x0F)
break;
switch (band) {
case IEEE80211_BAND_2GHZ:
if (cck)
rate &= ~BIT(3);
rate_idx = rx_legacy_rate_idx[rate];
break;
case IEEE80211_BAND_5GHZ:
rate_idx = rx_legacy_rate_idx[rate];
/* We are using same rate table registering
HW - ath10k_rates[]. In case of 5GHz skip
CCK rates, so -4 here */
rate_idx -= 4;
break;
default:
break;
}
status->rate_idx = rate_idx;
break;
case HTT_RX_HT:
case HTT_RX_HT_WITH_TXBF:
/* HT-SIG - Table 20-11 in info1 and info2 */
mcs = info1 & 0x1F;
nss = mcs >> 3;
bw = (info1 >> 7) & 1;
sgi = (info2 >> 7) & 1;
status->rate_idx = mcs;
status->flag |= RX_FLAG_HT;
if (sgi)
status->flag |= RX_FLAG_SHORT_GI;
if (bw)
status->flag |= RX_FLAG_40MHZ;
break;
case HTT_RX_VHT:
case HTT_RX_VHT_WITH_TXBF:
/* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
TODO check this */
mcs = (info2 >> 4) & 0x0F;
nss = ((info1 >> 10) & 0x07) + 1;
bw = info1 & 3;
sgi = info2 & 1;
status->rate_idx = mcs;
status->vht_nss = nss;
if (sgi)
status->flag |= RX_FLAG_SHORT_GI;
switch (bw) {
/* 20MHZ */
case 0:
break;
/* 40MHZ */
case 1:
status->flag |= RX_FLAG_40MHZ;
break;
/* 80MHZ */
case 2:
status->vht_flag |= RX_VHT_FLAG_80MHZ;
}
status->flag |= RX_FLAG_VHT;
break;
default:
break;
}
}
static void ath10k_htt_rx_h_protected(struct ath10k_htt *htt,
struct ieee80211_rx_status *rx_status,
struct sk_buff *skb,
enum htt_rx_mpdu_encrypt_type enctype)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (enctype == HTT_RX_MPDU_ENCRYPT_NONE) {
rx_status->flag &= ~(RX_FLAG_DECRYPTED |
RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED);
return;
}
rx_status->flag |= RX_FLAG_DECRYPTED |
RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED;
hdr->frame_control = __cpu_to_le16(__le16_to_cpu(hdr->frame_control) &
~IEEE80211_FCTL_PROTECTED);
}
static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
struct ieee80211_rx_status *status)
{
struct ieee80211_channel *ch;
spin_lock_bh(&ar->data_lock);
ch = ar->scan_channel;
if (!ch)
ch = ar->rx_channel;
spin_unlock_bh(&ar->data_lock);
if (!ch)
return false;
status->band = ch->band;
status->freq = ch->center_freq;
return true;
}
static void ath10k_process_rx(struct ath10k *ar,
struct ieee80211_rx_status *rx_status,
struct sk_buff *skb)
{
struct ieee80211_rx_status *status;
status = IEEE80211_SKB_RXCB(skb);
*status = *rx_status;
ath10k_dbg(ATH10K_DBG_DATA,
"rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %imic-err %i\n",
skb,
skb->len,
status->flag == 0 ? "legacy" : "",
status->flag & RX_FLAG_HT ? "ht" : "",
status->flag & RX_FLAG_VHT ? "vht" : "",
status->flag & RX_FLAG_40MHZ ? "40" : "",
status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
status->rate_idx,
status->vht_nss,
status->freq,
status->band, status->flag,
!!(status->flag & RX_FLAG_FAILED_FCS_CRC),
!!(status->flag & RX_FLAG_MMIC_ERROR));
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
skb->data, skb->len);
ieee80211_rx(ar->hw, skb);
}
static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
{
/* nwifi header is padded to 4 bytes. this fixes 4addr rx */
@ -643,11 +831,12 @@ static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
}
static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
struct htt_rx_info *info)
struct ieee80211_rx_status *rx_status,
struct sk_buff *skb_in)
{
struct htt_rx_desc *rxd;
struct sk_buff *skb = skb_in;
struct sk_buff *first;
struct sk_buff *skb = info->skb;
enum rx_msdu_decap_format fmt;
enum htt_rx_mpdu_encrypt_type enctype;
struct ieee80211_hdr *hdr;
@ -728,24 +917,27 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
break;
}
info->skb = skb;
info->encrypt_type = enctype;
skb_in = skb;
ath10k_htt_rx_h_protected(htt, rx_status, skb_in, enctype);
skb = skb->next;
info->skb->next = NULL;
skb_in->next = NULL;
if (skb)
info->amsdu_more = true;
rx_status->flag |= RX_FLAG_AMSDU_MORE;
else
rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
ath10k_process_rx(htt->ar, info);
ath10k_process_rx(htt->ar, rx_status, skb_in);
}
/* FIXME: It might be nice to re-assemble the A-MSDU when there's a
* monitor interface active for sniffing purposes. */
}
static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
struct ieee80211_rx_status *rx_status,
struct sk_buff *skb)
{
struct sk_buff *skb = info->skb;
struct htt_rx_desc *rxd;
struct ieee80211_hdr *hdr;
enum rx_msdu_decap_format fmt;
@ -808,66 +1000,9 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
break;
}
info->skb = skb;
info->encrypt_type = enctype;
ath10k_htt_rx_h_protected(htt, rx_status, skb, enctype);
ath10k_process_rx(htt->ar, info);
}
static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
{
struct htt_rx_desc *rxd;
u32 flags;
rxd = (void *)skb->data - sizeof(*rxd);
flags = __le32_to_cpu(rxd->attention.flags);
if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR)
return true;
return false;
}
static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
{
struct htt_rx_desc *rxd;
u32 flags;
rxd = (void *)skb->data - sizeof(*rxd);
flags = __le32_to_cpu(rxd->attention.flags);
if (flags & RX_ATTENTION_FLAGS_FCS_ERR)
return true;
return false;
}
static bool ath10k_htt_rx_has_mic_err(struct sk_buff *skb)
{
struct htt_rx_desc *rxd;
u32 flags;
rxd = (void *)skb->data - sizeof(*rxd);
flags = __le32_to_cpu(rxd->attention.flags);
if (flags & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
return true;
return false;
}
static bool ath10k_htt_rx_is_mgmt(struct sk_buff *skb)
{
struct htt_rx_desc *rxd;
u32 flags;
rxd = (void *)skb->data - sizeof(*rxd);
flags = __le32_to_cpu(rxd->attention.flags);
if (flags & RX_ATTENTION_FLAGS_MGMT_TYPE)
return true;
return false;
ath10k_process_rx(htt->ar, rx_status, skb);
}
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
@ -952,21 +1087,73 @@ static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
return 0;
}
static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
struct sk_buff *head,
enum htt_rx_mpdu_status status,
bool channel_set,
u32 attention)
{
if (head->len == 0) {
ath10k_dbg(ATH10K_DBG_HTT,
"htt rx dropping due to zero-len\n");
return false;
}
if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) {
ath10k_dbg(ATH10K_DBG_HTT,
"htt rx dropping due to decrypt-err\n");
return false;
}
if (!channel_set) {
ath10k_warn("no channel configured; ignoring frame!\n");
return false;
}
/* Skip mgmt frames while we handle this in WMI */
if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
attention & RX_ATTENTION_FLAGS_MGMT_TYPE) {
ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
return false;
}
if (status != HTT_RX_IND_MPDU_STATUS_OK &&
status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
!htt->ar->monitor_started) {
ath10k_dbg(ATH10K_DBG_HTT,
"htt rx ignoring frame w/ status %d\n",
status);
return false;
}
if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
ath10k_dbg(ATH10K_DBG_HTT,
"htt rx CAC running\n");
return false;
}
return true;
}
static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
struct htt_rx_indication *rx)
{
struct htt_rx_info info;
struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct htt_rx_indication_mpdu_range *mpdu_ranges;
struct htt_rx_desc *rxd;
enum htt_rx_mpdu_status status;
struct ieee80211_hdr *hdr;
int num_mpdu_ranges;
u32 attention;
int fw_desc_len;
u8 *fw_desc;
bool channel_set;
int i, j;
int ret;
lockdep_assert_held(&htt->rx_ring.lock);
memset(&info, 0, sizeof(info));
fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
fw_desc = (u8 *)&rx->fw_desc;
@ -974,106 +1161,90 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
/* Fill this once, while this is per-ppdu */
if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_START_VALID) {
memset(rx_status, 0, sizeof(*rx_status));
rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
rx->ppdu.combined_rssi;
}
if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
/* TSF available only in 32-bit */
rx_status->mactime = __le32_to_cpu(rx->ppdu.tsf) & 0xffffffff;
rx_status->flag |= RX_FLAG_MACTIME_END;
}
channel_set = ath10k_htt_rx_h_channel(htt->ar, rx_status);
if (channel_set) {
ath10k_htt_rx_h_rates(htt->ar, rx_status->band,
rx->ppdu.info0,
__le32_to_cpu(rx->ppdu.info1),
__le32_to_cpu(rx->ppdu.info2),
rx_status);
}
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
rx, sizeof(*rx) +
(sizeof(struct htt_rx_indication_mpdu_range) *
num_mpdu_ranges));
for (i = 0; i < num_mpdu_ranges; i++) {
info.status = mpdu_ranges[i].mpdu_range_status;
status = mpdu_ranges[i].mpdu_range_status;
for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
struct sk_buff *msdu_head, *msdu_tail;
enum htt_rx_mpdu_status status;
int msdu_chaining;
msdu_head = NULL;
msdu_tail = NULL;
msdu_chaining = ath10k_htt_rx_amsdu_pop(htt,
&fw_desc,
&fw_desc_len,
&msdu_head,
&msdu_tail);
ret = ath10k_htt_rx_amsdu_pop(htt,
&fw_desc,
&fw_desc_len,
&msdu_head,
&msdu_tail);
if (!msdu_head) {
ath10k_warn("htt rx no data!\n");
continue;
}
if (msdu_head->len == 0) {
ath10k_dbg(ATH10K_DBG_HTT,
"htt rx dropping due to zero-len\n");
if (ret < 0) {
ath10k_warn("failed to pop amsdu from htt rx ring %d\n",
ret);
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
if (ath10k_htt_rx_has_decrypt_err(msdu_head)) {
ath10k_dbg(ATH10K_DBG_HTT,
"htt rx dropping due to decrypt-err\n");
rxd = container_of((void *)msdu_head->data,
struct htt_rx_desc,
msdu_payload);
attention = __le32_to_cpu(rxd->attention.flags);
if (!ath10k_htt_rx_amsdu_allowed(htt, msdu_head,
status,
channel_set,
attention)) {
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
status = info.status;
/* Skip mgmt frames while we handle this in WMI */
if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
ath10k_htt_rx_is_mgmt(msdu_head)) {
ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
if (ret > 0 &&
ath10k_unchain_msdu(msdu_head) < 0) {
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
if (status != HTT_RX_IND_MPDU_STATUS_OK &&
status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
!htt->ar->monitor_enabled) {
ath10k_dbg(ATH10K_DBG_HTT,
"htt rx ignoring frame w/ status %d\n",
status);
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
if (attention & RX_ATTENTION_FLAGS_FCS_ERR)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
else
rx_status->flag &= ~RX_FLAG_FAILED_FCS_CRC;
if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
ath10k_dbg(ATH10K_DBG_HTT,
"htt rx CAC running\n");
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
if (msdu_chaining &&
(ath10k_unchain_msdu(msdu_head) < 0)) {
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
info.skb = msdu_head;
info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
info.mic_err = ath10k_htt_rx_has_mic_err(msdu_head);
if (info.fcs_err)
ath10k_dbg(ATH10K_DBG_HTT,
"htt rx has FCS err\n");
if (info.mic_err)
ath10k_dbg(ATH10K_DBG_HTT,
"htt rx has MIC err\n");
info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
info.signal += rx->ppdu.combined_rssi;
info.rate.info0 = rx->ppdu.info0;
info.rate.info1 = __le32_to_cpu(rx->ppdu.info1);
info.rate.info2 = __le32_to_cpu(rx->ppdu.info2);
info.tsf = __le32_to_cpu(rx->ppdu.tsf);
if (attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
rx_status->flag |= RX_FLAG_MMIC_ERROR;
else
rx_status->flag &= ~RX_FLAG_MMIC_ERROR;
hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
if (ath10k_htt_rx_hdr_is_amsdu(hdr))
ath10k_htt_rx_amsdu(htt, &info);
ath10k_htt_rx_amsdu(htt, rx_status, msdu_head);
else
ath10k_htt_rx_msdu(htt, &info);
ath10k_htt_rx_msdu(htt, rx_status, msdu_head);
}
}
@ -1084,11 +1255,12 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
struct htt_rx_fragment_indication *frag)
{
struct sk_buff *msdu_head, *msdu_tail;
enum htt_rx_mpdu_encrypt_type enctype;
struct htt_rx_desc *rxd;
enum rx_msdu_decap_format fmt;
struct htt_rx_info info = {};
struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct ieee80211_hdr *hdr;
int msdu_chaining;
int ret;
bool tkip_mic_err;
bool decrypt_err;
u8 *fw_desc;
@ -1102,19 +1274,15 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
msdu_tail = NULL;
spin_lock_bh(&htt->rx_ring.lock);
msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
&msdu_head, &msdu_tail);
ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
&msdu_head, &msdu_tail);
spin_unlock_bh(&htt->rx_ring.lock);
ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
if (!msdu_head) {
ath10k_warn("htt rx frag no data\n");
return;
}
if (msdu_chaining || msdu_head != msdu_tail) {
ath10k_warn("aggregation with fragmentation?!\n");
if (ret) {
ath10k_warn("failed to pop amsdu from httr rx ring for fragmented rx %d\n",
ret);
ath10k_htt_rx_free_msdu_chain(msdu_head);
return;
}
@ -1136,57 +1304,54 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
goto end;
}
info.skb = msdu_head;
info.status = HTT_RX_IND_MPDU_STATUS_OK;
info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb);
enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
ath10k_htt_rx_h_protected(htt, rx_status, msdu_head, enctype);
msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
if (tkip_mic_err) {
if (tkip_mic_err)
ath10k_warn("tkip mic error\n");
info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR;
}
if (decrypt_err) {
ath10k_warn("decryption err in fragmented rx\n");
dev_kfree_skb_any(info.skb);
dev_kfree_skb_any(msdu_head);
goto end;
}
if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) {
hdrlen = ieee80211_hdrlen(hdr->frame_control);
paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type);
paramlen = ath10k_htt_rx_crypto_param_len(enctype);
/* It is more efficient to move the header than the payload */
memmove((void *)info.skb->data + paramlen,
(void *)info.skb->data,
memmove((void *)msdu_head->data + paramlen,
(void *)msdu_head->data,
hdrlen);
skb_pull(info.skb, paramlen);
hdr = (struct ieee80211_hdr *)info.skb->data;
skb_pull(msdu_head, paramlen);
hdr = (struct ieee80211_hdr *)msdu_head->data;
}
/* remove trailing FCS */
trim = 4;
/* remove crypto trailer */
trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type);
trim += ath10k_htt_rx_crypto_tail_len(enctype);
/* last fragment of TKIP frags has MIC */
if (!ieee80211_has_morefrags(hdr->frame_control) &&
info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
trim += 8;
if (trim > info.skb->len) {
if (trim > msdu_head->len) {
ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
dev_kfree_skb_any(info.skb);
dev_kfree_skb_any(msdu_head);
goto end;
}
skb_trim(info.skb, info.skb->len - trim);
skb_trim(msdu_head, msdu_head->len - trim);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
info.skb->data, info.skb->len);
ath10k_process_rx(htt->ar, &info);
msdu_head->data, msdu_head->len);
ath10k_process_rx(htt->ar, rx_status, msdu_head);
end:
if (fw_desc_len > 0) {

View file

@ -28,6 +28,7 @@
#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0"
#define QCA988X_HW_2_0_FW_FILE "firmware.bin"
#define QCA988X_HW_2_0_FW_2_FILE "firmware-2.bin"
#define QCA988X_HW_2_0_OTP_FILE "otp.bin"
#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234

File diff suppressed because it is too large Load diff

View file

@ -39,15 +39,27 @@ enum ath10k_pci_irq_mode {
ATH10K_PCI_IRQ_MSI = 2,
};
static unsigned int ath10k_target_ps;
static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
enum ath10k_pci_reset_mode {
ATH10K_PCI_RESET_AUTO = 0,
ATH10K_PCI_RESET_WARM_ONLY = 1,
};
module_param(ath10k_target_ps, uint, 0644);
MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
static unsigned int ath10k_pci_target_ps;
static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
module_param_named(target_ps, ath10k_pci_target_ps, uint, 0644);
MODULE_PARM_DESC(target_ps, "Enable ath10k Target (SoC) PS option");
module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
/* how long wait to wait for target to initialise, in ms */
#define ATH10K_PCI_TARGET_WAIT 3000
#define QCA988X_2_0_DEVICE_ID (0x003c)
static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
@ -346,9 +358,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
* 2) Buffer in DMA-able space
*/
orig_nbytes = nbytes;
data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
orig_nbytes,
&ce_data_base);
data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
orig_nbytes,
&ce_data_base,
GFP_ATOMIC);
if (!data_buf) {
ret = -ENOMEM;
@ -442,12 +455,12 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
__le32_to_cpu(((__le32 *)data_buf)[i]);
}
} else
ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
__func__, address);
ath10k_warn("failed to read diag value at 0x%x: %d\n",
address, ret);
if (data_buf)
pci_free_consistent(ar_pci->pdev, orig_nbytes,
data_buf, ce_data_base);
dma_free_coherent(ar->dev, orig_nbytes, data_buf,
ce_data_base);
return ret;
}
@ -490,9 +503,10 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* 2) Buffer in DMA-able space
*/
orig_nbytes = nbytes;
data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
orig_nbytes,
&ce_data_base);
data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
orig_nbytes,
&ce_data_base,
GFP_ATOMIC);
if (!data_buf) {
ret = -ENOMEM;
goto done;
@ -588,13 +602,13 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
done:
if (data_buf) {
pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
ce_data_base);
dma_free_coherent(ar->dev, orig_nbytes, data_buf,
ce_data_base);
}
if (ret != 0)
ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
address);
ath10k_warn("failed to write diag value at 0x%x: %d\n",
address, ret);
return ret;
}
@ -803,6 +817,9 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n");
return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
}
@ -854,6 +871,8 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
int force)
{
ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n");
if (!force) {
int resources;
/*
@ -880,7 +899,7 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n");
memcpy(&ar_pci->msg_callbacks_current, callbacks,
sizeof(ar_pci->msg_callbacks_current));
@ -938,6 +957,8 @@ static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
{
int ret = 0;
ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n");
/* polling for received messages not supported */
*dl_is_polled = 0;
@ -997,6 +1018,8 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
{
int ul_is_polled, dl_is_polled;
ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n");
(void)ath10k_pci_hif_map_service_to_pipe(ar,
ATH10K_HTC_SVC_ID_RSVD_CTRL,
ul_pipe,
@ -1098,6 +1121,8 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret, ret_early;
ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n");
ath10k_pci_free_early_irq(ar);
ath10k_pci_kill_tasklet(ar);
@ -1233,18 +1258,10 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
static void ath10k_pci_ce_deinit(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pipe_info;
int pipe_num;
int i;
for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
pipe_info = &ar_pci->pipe_info[pipe_num];
if (pipe_info->ce_hdl) {
ath10k_ce_deinit(pipe_info->ce_hdl);
pipe_info->ce_hdl = NULL;
pipe_info->buf_sz = 0;
}
}
for (i = 0; i < CE_COUNT; i++)
ath10k_ce_deinit_pipe(ar, i);
}
static void ath10k_pci_hif_stop(struct ath10k *ar)
@ -1252,7 +1269,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n");
ret = ath10k_ce_disable_interrupts(ar);
if (ret)
@ -1697,30 +1714,49 @@ static int ath10k_pci_init_config(struct ath10k *ar)
return 0;
}
static int ath10k_pci_alloc_ce(struct ath10k *ar)
{
int i, ret;
for (i = 0; i < CE_COUNT; i++) {
ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
if (ret) {
ath10k_err("failed to allocate copy engine pipe %d: %d\n",
i, ret);
return ret;
}
}
return 0;
}
static void ath10k_pci_free_ce(struct ath10k *ar)
{
int i;
for (i = 0; i < CE_COUNT; i++)
ath10k_ce_free_pipe(ar, i);
}
static int ath10k_pci_ce_init(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pipe_info;
const struct ce_attr *attr;
int pipe_num;
int pipe_num, ret;
for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
pipe_info = &ar_pci->pipe_info[pipe_num];
pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
pipe_info->pipe_num = pipe_num;
pipe_info->hif_ce_state = ar;
attr = &host_ce_config_wlan[pipe_num];
pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
if (pipe_info->ce_hdl == NULL) {
ath10k_err("failed to initialize CE for pipe: %d\n",
pipe_num);
/* It is safe to call it here. It checks if ce_hdl is
* valid for each pipe */
ath10k_pci_ce_deinit(ar);
return -1;
ret = ath10k_ce_init_pipe(ar, pipe_num, attr);
if (ret) {
ath10k_err("failed to initialize copy engine pipe %d: %d\n",
pipe_num, ret);
return ret;
}
if (pipe_num == CE_COUNT - 1) {
@ -1741,16 +1777,15 @@ static int ath10k_pci_ce_init(struct ath10k *ar)
static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
u32 fw_indicator_address, fw_indicator;
u32 fw_indicator;
ath10k_pci_wake(ar);
fw_indicator_address = ar_pci->fw_indicator_address;
fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
fw_indicator = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
if (fw_indicator & FW_IND_EVENT_PENDING) {
/* ACK: clear Target-side pending event */
ath10k_pci_write32(ar, fw_indicator_address,
ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
fw_indicator & ~FW_IND_EVENT_PENDING);
if (ar_pci->started) {
@ -1769,11 +1804,10 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
static int ath10k_pci_warm_reset(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret = 0;
u32 val;
ath10k_dbg(ATH10K_DBG_BOOT, "boot performing warm chip reset\n");
ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n");
ret = ath10k_do_pci_wake(ar);
if (ret) {
@ -1801,7 +1835,7 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
msleep(100);
/* clear fw indicator */
ath10k_pci_write32(ar, ar_pci->fw_indicator_address, 0);
ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
/* clear target LF timer interrupts */
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
@ -1934,7 +1968,9 @@ static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
irq_mode = "legacy";
if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
ath10k_info("pci irq %s\n", irq_mode);
ath10k_info("pci irq %s irq_mode %d reset_mode %d\n",
irq_mode, ath10k_pci_irq_mode,
ath10k_pci_reset_mode);
return 0;
@ -1956,6 +1992,8 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
{
int ret;
ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n");
/*
* Hardware CUS232 version 2 has some issues with cold reset and the
* preferred (and safer) way to perform a device reset is through a
@ -1966,9 +2004,14 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
*/
ret = __ath10k_pci_hif_power_up(ar, false);
if (ret) {
ath10k_warn("failed to power up target using warm reset (%d), trying cold reset\n",
ath10k_warn("failed to power up target using warm reset: %d\n",
ret);
if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
return ret;
ath10k_warn("trying cold reset\n");
ret = __ath10k_pci_hif_power_up(ar, true);
if (ret) {
ath10k_err("failed to power up target using cold reset too (%d)\n",
@ -1984,12 +2027,14 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n");
ath10k_pci_free_early_irq(ar);
ath10k_pci_kill_tasklet(ar);
ath10k_pci_deinit_irq(ar);
ath10k_pci_ce_deinit(ar);
ath10k_pci_warm_reset(ar);
ath10k_pci_ce_deinit(ar);
if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
ath10k_do_pci_sleep(ar);
}
@ -2137,7 +2182,6 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
static void ath10k_pci_early_irq_tasklet(unsigned long data)
{
struct ath10k *ar = (struct ath10k *)data;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
u32 fw_ind;
int ret;
@ -2148,9 +2192,9 @@ static void ath10k_pci_early_irq_tasklet(unsigned long data)
return;
}
fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address);
fw_ind = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
if (fw_ind & FW_IND_EVENT_PENDING) {
ath10k_pci_write32(ar, ar_pci->fw_indicator_address,
ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
fw_ind & ~FW_IND_EVENT_PENDING);
/* Some structures are unavailable during early boot or at
@ -2385,33 +2429,50 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int wait_limit = 300; /* 3 sec */
unsigned long timeout;
int ret;
u32 val;
ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
ret = ath10k_pci_wake(ar);
if (ret) {
ath10k_err("failed to wake up target: %d\n", ret);
ath10k_err("failed to wake up target for init: %d\n", ret);
return ret;
}
while (wait_limit-- &&
!(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
FW_IND_INITIALIZED)) {
timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
do {
val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val);
/* target should never return this */
if (val == 0xffffffff)
continue;
if (val & FW_IND_INITIALIZED)
break;
if (ar_pci->num_msi_intrs == 0)
/* Fix potential race by repeating CORE_BASE writes */
iowrite32(PCIE_INTR_FIRMWARE_MASK |
PCIE_INTR_CE_MASK_ALL,
ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
PCIE_INTR_ENABLE_ADDRESS));
mdelay(10);
}
ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS,
PCIE_INTR_FIRMWARE_MASK |
PCIE_INTR_CE_MASK_ALL);
if (wait_limit < 0) {
ath10k_err("target stalled\n");
ret = -EIO;
mdelay(10);
} while (time_before(jiffies, timeout));
if (val == 0xffffffff || !(val & FW_IND_INITIALIZED)) {
ath10k_err("failed to receive initialized event from target: %08x\n",
val);
ret = -ETIMEDOUT;
goto out;
}
ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n");
out:
ath10k_pci_sleep(ar);
return ret;
@ -2422,6 +2483,8 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
int i, ret;
u32 val;
ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n");
ret = ath10k_do_pci_wake(ar);
if (ret) {
ath10k_err("failed to wake up target: %d\n",
@ -2453,6 +2516,9 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
}
ath10k_do_pci_sleep(ar);
ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n");
return 0;
}
@ -2484,7 +2550,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
struct ath10k_pci *ar_pci;
u32 lcr_val, chip_id;
ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n");
ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
if (ar_pci == NULL)
@ -2503,7 +2569,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_ar_pci;
}
if (ath10k_target_ps)
if (ath10k_pci_target_ps)
set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
ath10k_pci_dump_features(ar_pci);
@ -2516,7 +2582,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
}
ar_pci->ar = ar;
ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
atomic_set(&ar_pci->keep_awake_count, 0);
pci_set_drvdata(pdev, ar);
@ -2594,16 +2659,24 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ath10k_do_pci_sleep(ar);
ret = ath10k_pci_alloc_ce(ar);
if (ret) {
ath10k_err("failed to allocate copy engine pipes: %d\n", ret);
goto err_iomap;
}
ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
ret = ath10k_core_register(ar, chip_id);
if (ret) {
ath10k_err("failed to register driver core: %d\n", ret);
goto err_iomap;
goto err_free_ce;
}
return 0;
err_free_ce:
ath10k_pci_free_ce(ar);
err_iomap:
pci_iounmap(pdev, mem);
err_master:
@ -2626,7 +2699,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
struct ath10k *ar = pci_get_drvdata(pdev);
struct ath10k_pci *ar_pci;
ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n");
if (!ar)
return;
@ -2639,6 +2712,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
tasklet_kill(&ar_pci->msi_fw_err);
ath10k_core_unregister(ar);
ath10k_pci_free_ce(ar);
pci_iounmap(pdev, ar_pci->mem);
pci_release_region(pdev, BAR_NUM);
@ -2680,6 +2754,5 @@ module_exit(ath10k_pci_exit);
MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_2_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);

View file

@ -189,9 +189,6 @@ struct ath10k_pci {
struct ath10k_hif_cb msg_callbacks_current;
/* Target address used to signal a pending firmware event */
u32 fw_indicator_address;
/* Copy Engine used for Diagnostic Accesses */
struct ath10k_ce_pipe *ce_diag;

View file

@ -100,189 +100,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
wake_up(&htt->empty_tx_wq);
}
static const u8 rx_legacy_rate_idx[] = {
3, /* 0x00 - 11Mbps */
2, /* 0x01 - 5.5Mbps */
1, /* 0x02 - 2Mbps */
0, /* 0x03 - 1Mbps */
3, /* 0x04 - 11Mbps */
2, /* 0x05 - 5.5Mbps */
1, /* 0x06 - 2Mbps */
0, /* 0x07 - 1Mbps */
10, /* 0x08 - 48Mbps */
8, /* 0x09 - 24Mbps */
6, /* 0x0A - 12Mbps */
4, /* 0x0B - 6Mbps */
11, /* 0x0C - 54Mbps */
9, /* 0x0D - 36Mbps */
7, /* 0x0E - 18Mbps */
5, /* 0x0F - 9Mbps */
};
static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info,
enum ieee80211_band band,
struct ieee80211_rx_status *status)
{
u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
u8 info0 = info->rate.info0;
u32 info1 = info->rate.info1;
u32 info2 = info->rate.info2;
u8 preamble = 0;
/* Check if valid fields */
if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
return;
preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
switch (preamble) {
case HTT_RX_LEGACY:
cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
rate_idx = 0;
if (rate < 0x08 || rate > 0x0F)
break;
switch (band) {
case IEEE80211_BAND_2GHZ:
if (cck)
rate &= ~BIT(3);
rate_idx = rx_legacy_rate_idx[rate];
break;
case IEEE80211_BAND_5GHZ:
rate_idx = rx_legacy_rate_idx[rate];
/* We are using same rate table registering
HW - ath10k_rates[]. In case of 5GHz skip
CCK rates, so -4 here */
rate_idx -= 4;
break;
default:
break;
}
status->rate_idx = rate_idx;
break;
case HTT_RX_HT:
case HTT_RX_HT_WITH_TXBF:
/* HT-SIG - Table 20-11 in info1 and info2 */
mcs = info1 & 0x1F;
nss = mcs >> 3;
bw = (info1 >> 7) & 1;
sgi = (info2 >> 7) & 1;
status->rate_idx = mcs;
status->flag |= RX_FLAG_HT;
if (sgi)
status->flag |= RX_FLAG_SHORT_GI;
if (bw)
status->flag |= RX_FLAG_40MHZ;
break;
case HTT_RX_VHT:
case HTT_RX_VHT_WITH_TXBF:
/* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
TODO check this */
mcs = (info2 >> 4) & 0x0F;
nss = ((info1 >> 10) & 0x07) + 1;
bw = info1 & 3;
sgi = info2 & 1;
status->rate_idx = mcs;
status->vht_nss = nss;
if (sgi)
status->flag |= RX_FLAG_SHORT_GI;
switch (bw) {
/* 20MHZ */
case 0:
break;
/* 40MHZ */
case 1:
status->flag |= RX_FLAG_40MHZ;
break;
/* 80MHZ */
case 2:
status->vht_flag |= RX_VHT_FLAG_80MHZ;
}
status->flag |= RX_FLAG_VHT;
break;
default:
break;
}
}
void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
{
struct ieee80211_rx_status *status;
struct ieee80211_channel *ch;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)info->skb->data;
status = IEEE80211_SKB_RXCB(info->skb);
memset(status, 0, sizeof(*status));
if (info->encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED;
hdr->frame_control = __cpu_to_le16(
__le16_to_cpu(hdr->frame_control) &
~IEEE80211_FCTL_PROTECTED);
}
if (info->mic_err)
status->flag |= RX_FLAG_MMIC_ERROR;
if (info->fcs_err)
status->flag |= RX_FLAG_FAILED_FCS_CRC;
if (info->amsdu_more)
status->flag |= RX_FLAG_AMSDU_MORE;
status->signal = info->signal;
spin_lock_bh(&ar->data_lock);
ch = ar->scan_channel;
if (!ch)
ch = ar->rx_channel;
spin_unlock_bh(&ar->data_lock);
if (!ch) {
ath10k_warn("no channel configured; ignoring frame!\n");
dev_kfree_skb_any(info->skb);
return;
}
process_rx_rates(ar, info, ch->band, status);
status->band = ch->band;
status->freq = ch->center_freq;
if (info->rate.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
/* TSF available only in 32-bit */
status->mactime = info->tsf & 0xffffffff;
status->flag |= RX_FLAG_MACTIME_END;
}
ath10k_dbg(ATH10K_DBG_DATA,
"rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i\n",
info->skb,
info->skb->len,
status->flag == 0 ? "legacy" : "",
status->flag & RX_FLAG_HT ? "ht" : "",
status->flag & RX_FLAG_VHT ? "vht" : "",
status->flag & RX_FLAG_40MHZ ? "40" : "",
status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
status->rate_idx,
status->vht_nss,
status->freq,
status->band, status->flag, info->fcs_err);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
info->skb->data, info->skb->len);
ieee80211_rx(ar->hw, info->skb);
}
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
const u8 *addr)
{

View file

@ -21,7 +21,6 @@
void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done);
void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info);
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
const u8 *addr);

View file

@ -1362,13 +1362,10 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
struct sk_buff *bcn;
int ret, vdev_id = 0;
ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
ev = (struct wmi_host_swba_event *)skb->data;
map = __le32_to_cpu(ev->vdev_map);
ath10k_dbg(ATH10K_DBG_MGMT, "host swba:\n"
"-vdev map 0x%x\n",
ath10k_dbg(ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
ev->vdev_map);
for (; map; map >>= 1, vdev_id++) {
@ -1385,12 +1382,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
bcn_info = &ev->bcn_info[i];
ath10k_dbg(ATH10K_DBG_MGMT,
"-bcn_info[%d]:\n"
"--tim_len %d\n"
"--tim_mcast %d\n"
"--tim_changed %d\n"
"--tim_num_ps_pending %d\n"
"--tim_bitmap 0x%08x%08x%08x%08x\n",
"mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
i,
__le32_to_cpu(bcn_info->tim_info.tim_len),
__le32_to_cpu(bcn_info->tim_info.tim_mcast),
@ -2393,8 +2385,9 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar)
return 0;
}
int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
u16 rd5g, u16 ctl2g, u16 ctl5g)
static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd,
u16 rd2g, u16 rd5g, u16 ctl2g,
u16 ctl5g)
{
struct wmi_pdev_set_regdomain_cmd *cmd;
struct sk_buff *skb;
@ -2418,6 +2411,46 @@ int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
ar->wmi.cmd->pdev_set_regdomain_cmdid);
}
static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd,
u16 rd2g, u16 rd5g,
u16 ctl2g, u16 ctl5g,
enum wmi_dfs_region dfs_reg)
{
struct wmi_pdev_set_regdomain_cmd_10x *cmd;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
cmd->reg_domain = __cpu_to_le32(rd);
cmd->reg_domain_2G = __cpu_to_le32(rd2g);
cmd->reg_domain_5G = __cpu_to_le32(rd5g);
cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
cmd->dfs_domain = __cpu_to_le32(dfs_reg);
ath10k_dbg(ATH10K_DBG_WMI,
"wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->pdev_set_regdomain_cmdid);
}
int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
u16 rd5g, u16 ctl2g, u16 ctl5g,
enum wmi_dfs_region dfs_reg)
{
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
return ath10k_wmi_10x_pdev_set_regdomain(ar, rd, rd2g, rd5g,
ctl2g, ctl5g, dfs_reg);
else
return ath10k_wmi_main_pdev_set_regdomain(ar, rd, rd2g, rd5g,
ctl2g, ctl5g);
}
int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
const struct wmi_channel_arg *arg)
{
@ -3456,8 +3489,9 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
__cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
ath10k_dbg(ATH10K_DBG_WMI,
"wmi peer assoc vdev %d addr %pM\n",
arg->vdev_id, arg->addr);
"wmi peer assoc vdev %d addr %pM (%s)\n",
arg->vdev_id, arg->addr,
arg->peer_reassoc ? "reassociate" : "new");
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
}

View file

@ -198,16 +198,6 @@ struct wmi_mac_addr {
} __packed;
} __packed;
/* macro to convert MAC address from WMI word format to char array */
#define WMI_MAC_ADDR_TO_CHAR_ARRAY(pwmi_mac_addr, c_macaddr) do { \
(c_macaddr)[0] = ((pwmi_mac_addr)->word0) & 0xff; \
(c_macaddr)[1] = (((pwmi_mac_addr)->word0) >> 8) & 0xff; \
(c_macaddr)[2] = (((pwmi_mac_addr)->word0) >> 16) & 0xff; \
(c_macaddr)[3] = (((pwmi_mac_addr)->word0) >> 24) & 0xff; \
(c_macaddr)[4] = ((pwmi_mac_addr)->word1) & 0xff; \
(c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \
} while (0)
struct wmi_cmd_map {
u32 init_cmdid;
u32 start_scan_cmdid;
@ -2185,6 +2175,31 @@ struct wmi_pdev_set_regdomain_cmd {
__le32 conformance_test_limit_5G;
} __packed;
enum wmi_dfs_region {
/* Uninitialized dfs domain */
WMI_UNINIT_DFS_DOMAIN = 0,
/* FCC3 dfs domain */
WMI_FCC_DFS_DOMAIN = 1,
/* ETSI dfs domain */
WMI_ETSI_DFS_DOMAIN = 2,
/*Japan dfs domain */
WMI_MKK4_DFS_DOMAIN = 3,
};
struct wmi_pdev_set_regdomain_cmd_10x {
__le32 reg_domain;
__le32 reg_domain_2G;
__le32 reg_domain_5G;
__le32 conformance_test_limit_2G;
__le32 conformance_test_limit_5G;
/* dfs domain from wmi_dfs_region */
__le32 dfs_domain;
} __packed;
/* Command to set/unset chip in quiet mode */
struct wmi_pdev_set_quiet_cmd {
/* period in TUs */
@ -2210,6 +2225,19 @@ enum ath10k_protmode {
ATH10K_PROT_RTSCTS = 2, /* RTS-CTS */
};
enum wmi_rtscts_profile {
WMI_RTSCTS_FOR_NO_RATESERIES = 0,
WMI_RTSCTS_FOR_SECOND_RATESERIES,
WMI_RTSCTS_ACROSS_SW_RETRIES
};
#define WMI_RTSCTS_ENABLED 1
#define WMI_RTSCTS_SET_MASK 0x0f
#define WMI_RTSCTS_SET_LSB 0
#define WMI_RTSCTS_PROFILE_MASK 0xf0
#define WMI_RTSCTS_PROFILE_LSB 4
enum wmi_beacon_gen_mode {
WMI_BEACON_STAGGERED_MODE = 0,
WMI_BEACON_BURST_MODE = 1
@ -2682,6 +2710,9 @@ struct wal_dbg_tx_stats {
/* wal pdev resets */
__le32 pdev_resets;
/* frames dropped due to non-availability of stateless TIDs */
__le32 stateless_tid_alloc_failure;
__le32 phy_underrun;
/* MPDU is more than txop limit */
@ -2738,13 +2769,21 @@ enum wmi_stats_id {
WMI_REQUEST_AP_STAT = 0x02
};
struct wlan_inst_rssi_args {
__le16 cfg_retry_count;
__le16 retry_count;
};
struct wmi_request_stats_cmd {
__le32 stats_id;
/*
* Space to add parameters like
* peer mac addr
*/
__le32 vdev_id;
/* peer MAC address */
struct wmi_mac_addr peer_macaddr;
/* Instantaneous RSSI arguments */
struct wlan_inst_rssi_args inst_rssi_args;
} __packed;
/* Suspend option */
@ -2795,7 +2834,7 @@ struct wmi_stats_event {
* PDEV statistics
* TODO: add all PDEV stats here
*/
struct wmi_pdev_stats {
struct wmi_pdev_stats_old {
__le32 chan_nf; /* Channel noise floor */
__le32 tx_frame_count; /* TX frame count */
__le32 rx_frame_count; /* RX frame count */
@ -2806,6 +2845,23 @@ struct wmi_pdev_stats {
struct wal_dbg_stats wal; /* WAL dbg stats */
} __packed;
struct wmi_pdev_stats_10x {
__le32 chan_nf; /* Channel noise floor */
__le32 tx_frame_count; /* TX frame count */
__le32 rx_frame_count; /* RX frame count */
__le32 rx_clear_count; /* rx clear count */
__le32 cycle_count; /* cycle count */
__le32 phy_err_count; /* Phy error count */
__le32 chan_tx_pwr; /* channel tx power */
struct wal_dbg_stats wal; /* WAL dbg stats */
__le32 ack_rx_bad;
__le32 rts_bad;
__le32 rts_good;
__le32 fcs_bad;
__le32 no_beacons;
__le32 mib_int_count;
} __packed;
/*
* VDEV statistics
* TODO: add all VDEV stats here
@ -2818,12 +2874,19 @@ struct wmi_vdev_stats {
* peer statistics.
* TODO: add more stats
*/
struct wmi_peer_stats {
struct wmi_peer_stats_old {
struct wmi_mac_addr peer_macaddr;
__le32 peer_rssi;
__le32 peer_tx_rate;
} __packed;
struct wmi_peer_stats_10x {
struct wmi_mac_addr peer_macaddr;
__le32 peer_rssi;
__le32 peer_tx_rate;
__le32 peer_rx_rate;
} __packed;
struct wmi_vdev_create_cmd {
__le32 vdev_id;
__le32 vdev_type;
@ -4202,7 +4265,8 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt);
int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
u16 rd5g, u16 ctl2g, u16 ctl5g);
u16 rd5g, u16 ctl2g, u16 ctl5g,
enum wmi_dfs_region dfs_reg);
int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value);
int ath10k_wmi_cmd_init(struct ath10k *ar);
int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *);

View file

@ -3709,8 +3709,8 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_CHIRP),
AR5K_TPC);
} else {
ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX |
AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX);
ath5k_hw_reg_write(ah, AR5K_TUNE_MAX_TXPOWER,
AR5K_PHY_TXPOWER_RATE_MAX);
}
return 0;

View file

@ -1,11 +1,19 @@
config ATH6KL
tristate "Atheros mobile chipsets support"
depends on CFG80211
---help---
This module adds core support for wireless adapters based on
Atheros AR6003 and AR6004 chipsets. You still need separate
bus drivers for USB and SDIO to be able to use real devices.
If you choose to build it as a module, it will be called
ath6kl_core. Please note that AR6002 and AR6001 are not
supported by this driver.
config ATH6KL_SDIO
tristate "Atheros ath6kl SDIO support"
depends on ATH6KL
depends on MMC
depends on CFG80211
---help---
This module adds support for wireless adapters based on
Atheros AR6003 and AR6004 chipsets running over SDIO. If you
@ -17,25 +25,31 @@ config ATH6KL_USB
tristate "Atheros ath6kl USB support"
depends on ATH6KL
depends on USB
depends on CFG80211
---help---
This module adds support for wireless adapters based on
Atheros AR6004 chipset running over USB. This is still under
implementation and it isn't functional. If you choose to
build it as a module, it will be called ath6kl_usb.
Atheros AR6004 chipset and chipsets based on it running over
USB. If you choose to build it as a module, it will be
called ath6kl_usb.
config ATH6KL_DEBUG
bool "Atheros ath6kl debugging"
depends on ATH6KL
---help---
Enables debug support
Enables ath6kl debug support, including debug messages
enabled with debug_mask module parameter and debugfs
interface.
If unsure, say Y to make it easier to debug problems.
config ATH6KL_TRACING
bool "Atheros ath6kl tracing support"
depends on ATH6KL
depends on EVENT_TRACING
---help---
Select this to ath6kl use tracing infrastructure.
Select this to ath6kl use tracing infrastructure which, for
example, can be enabled with help of trace-cmd. All debug
messages and commands are delivered to using individually
enablable trace points.
If unsure, say Y to make it easier to debug problems.
@ -47,3 +61,5 @@ config ATH6KL_REGDOMAIN
Enabling this makes it possible to change the regdomain in
the firmware. This can be only enabled if regulatory requirements
are taken into account.
If unsure, say N.

View file

@ -724,8 +724,9 @@ ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"added bss %pM to cfg80211\n", bssid);
kfree(ie);
} else
} else {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss\n");
}
return bss;
}
@ -970,7 +971,6 @@ static int ath6kl_set_probed_ssids(struct ath6kl *ar,
ssid_list[i].flag,
ssid_list[i].ssid.ssid_len,
ssid_list[i].ssid.ssid);
}
/* Make sure no old entries are left behind */
@ -1897,7 +1897,6 @@ static int ath6kl_wow_usr(struct ath6kl *ar, struct ath6kl_vif *vif,
/* Configure the patterns that we received from the user. */
for (i = 0; i < wow->n_patterns; i++) {
/*
* Convert given nl80211 specific mask value to equivalent
* driver specific mask value and send it to the chip along
@ -2850,8 +2849,9 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
if (p.prwise_crypto_type == 0) {
p.prwise_crypto_type = NONE_CRYPT;
ath6kl_set_cipher(vif, 0, true);
} else if (info->crypto.n_ciphers_pairwise == 1)
} else if (info->crypto.n_ciphers_pairwise == 1) {
ath6kl_set_cipher(vif, info->crypto.ciphers_pairwise[0], true);
}
switch (info->crypto.cipher_group) {
case WLAN_CIPHER_SUITE_WEP40:
@ -2897,7 +2897,6 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
}
if (info->inactivity_timeout) {
inactivity_timeout = info->inactivity_timeout;
if (ar->hw.flags & ATH6KL_HW_AP_INACTIVITY_MINS)

View file

@ -45,9 +45,9 @@ module_param(testmode, uint, 0644);
module_param(recovery_enable, uint, 0644);
module_param(heart_beat_poll, uint, 0644);
MODULE_PARM_DESC(recovery_enable, "Enable recovery from firmware error");
MODULE_PARM_DESC(heart_beat_poll, "Enable fw error detection periodic" \
"polling. This also specifies the polling interval in" \
"msecs. Set reocvery_enable for this to be effective");
MODULE_PARM_DESC(heart_beat_poll,
"Enable fw error detection periodic polling in msecs - Also set recovery_enable for this to be effective");
void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
{

View file

@ -172,7 +172,6 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
struct ath6kl_irq_proc_registers *irq_proc_reg,
struct ath6kl_irq_enable_reg *irq_enable_reg)
{
ath6kl_dbg(ATH6KL_DBG_IRQ, ("<------- Register Table -------->\n"));
if (irq_proc_reg != NULL) {
@ -219,7 +218,6 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
"GMBOX lookahead alias 1: 0x%x\n",
irq_proc_reg->rx_gmbox_lkahd_alias[1]);
}
}
if (irq_enable_reg != NULL) {
@ -1396,7 +1394,6 @@ static ssize_t ath6kl_create_qos_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
struct ath6kl_vif *vif;
char buf[200];
@ -1575,7 +1572,6 @@ static ssize_t ath6kl_delete_qos_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
struct ath6kl_vif *vif;
char buf[100];

View file

@ -97,8 +97,8 @@ static inline void ath6kl_dump_registers(struct ath6kl_device *dev,
struct ath6kl_irq_proc_registers *irq_proc_reg,
struct ath6kl_irq_enable_reg *irq_en_reg)
{
}
static inline void dump_cred_dist_stats(struct htc_target *target)
{
}

View file

@ -37,7 +37,6 @@ static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req,
buf = req->virt_dma_buf;
for (i = 0; i < req->scat_entries; i++) {
if (from_dma)
memcpy(req->scat_list[i].buf, buf,
req->scat_list[i].len);
@ -116,7 +115,6 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
le32_to_cpu(regdump_val[i + 2]),
le32_to_cpu(regdump_val[i + 3]));
}
}
static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
@ -701,5 +699,4 @@ int ath6kl_hif_setup(struct ath6kl_device *dev)
fail_setup:
return status;
}

View file

@ -197,9 +197,9 @@ struct hif_scatter_req {
/* bounce buffer for upper layers to copy to/from */
u8 *virt_dma_buf;
struct hif_scatter_item scat_list[1];
u32 scat_q_depth;
struct hif_scatter_item scat_list[0];
};
struct ath6kl_irq_proc_registers {

View file

@ -112,9 +112,9 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
if (cur_ep_dist->endpoint == ENDPOINT_0)
continue;
if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
else {
} else {
/*
* For the remaining data endpoints, we assume that
* each cred_per_msg are the same. We use a simple
@ -129,7 +129,6 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
count = (count * 3) >> 2;
count = max(count, cur_ep_dist->cred_per_msg);
cur_ep_dist->cred_norm = count;
}
ath6kl_dbg(ATH6KL_DBG_CREDIT,
@ -549,7 +548,6 @@ static int htc_check_credits(struct htc_target *target,
enum htc_endpoint_id eid, unsigned int len,
int *req_cred)
{
*req_cred = (len > target->tgt_cred_sz) ?
DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
@ -608,7 +606,6 @@ static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
unsigned int len;
while (true) {
flags = 0;
if (list_empty(&endpoint->txq))
@ -889,7 +886,6 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
ac = target->dev->ar->ep2ac_map[endpoint->eid];
while (true) {
if (list_empty(&endpoint->txq))
break;
@ -1190,7 +1186,6 @@ static void ath6kl_htc_mbox_flush_txep(struct htc_target *target,
list_add_tail(&packet->list, &container);
htc_tx_complete(endpoint, &container);
}
}
static void ath6kl_htc_flush_txep_all(struct htc_target *target)
@ -1394,7 +1389,6 @@ static int ath6kl_htc_rx_setup(struct htc_target *target,
ep_cb = ep->ep_cb;
for (j = 0; j < n_msg; j++) {
/*
* Reset flag, any packets allocated using the
* rx_alloc() API cannot be recycled on
@ -1424,9 +1418,9 @@ static int ath6kl_htc_rx_setup(struct htc_target *target,
}
}
if (list_empty(&ep->rx_bufq))
if (list_empty(&ep->rx_bufq)) {
packet = NULL;
else {
} else {
packet = list_first_entry(&ep->rx_bufq,
struct htc_packet, list);
list_del(&packet->list);
@ -1487,7 +1481,6 @@ static int ath6kl_htc_rx_alloc(struct htc_target *target,
spin_lock_bh(&target->rx_lock);
for (i = 0; i < msg; i++) {
htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
if (htc_hdr->eid >= ENDPOINT_MAX) {
@ -1708,7 +1701,6 @@ static int htc_parse_trailer(struct htc_target *target,
lk_ahd = (struct htc_lookahead_report *) record_buf;
if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) &&
next_lk_ahds) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
lk_ahd->pre_valid, lk_ahd->post_valid);
@ -1755,7 +1747,6 @@ static int htc_parse_trailer(struct htc_target *target,
}
return 0;
}
static int htc_proc_trailer(struct htc_target *target,
@ -1776,7 +1767,6 @@ static int htc_proc_trailer(struct htc_target *target,
status = 0;
while (len > 0) {
if (len < sizeof(struct htc_record_hdr)) {
status = -ENOMEM;
break;
@ -2098,7 +2088,6 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target,
}
if (!fetched_pkts) {
packet = list_first_entry(rx_pktq, struct htc_packet,
list);
@ -2173,7 +2162,6 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
look_aheads[0] = msg_look_ahead;
while (true) {
/*
* First lookahead sets the expected endpoint IDs for all
* packets in a bundle.
@ -2825,8 +2813,9 @@ static int ath6kl_htc_reset(struct htc_target *target)
packet->buf = packet->buf_start;
packet->endpoint = ENDPOINT_0;
list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
} else
} else {
list_add_tail(&packet->list, &target->free_ctrl_txbuf);
}
}
return 0;

View file

@ -137,7 +137,6 @@ static void get_htc_packet_credit_based(struct htc_target *target,
credits_required = 0;
} else {
if (ep->cred_dist.credits < credits_required)
break;
@ -169,7 +168,6 @@ static void get_htc_packet_credit_based(struct htc_target *target,
/* queue this packet into the caller's queue */
list_add_tail(&packet->list, queue);
}
}
static void get_htc_packet(struct htc_target *target,
@ -279,7 +277,6 @@ static int htc_issue_packets(struct htc_target *target,
list_add(&packet->list, pkt_queue);
break;
}
}
if (status != 0) {
@ -385,7 +382,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
*/
list_for_each_entry_safe(packet, tmp_pkt,
txq, list) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: Indicat overflowed TX pkts: %p\n",
__func__, packet);
@ -403,7 +399,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
list_move_tail(&packet->list,
&send_queue);
}
}
if (list_empty(&send_queue)) {
@ -454,7 +449,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
* enough transmit resources.
*/
while (true) {
if (get_queue_depth(&ep->txq) == 0)
break;
@ -495,8 +489,8 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
}
spin_lock_bh(&target->tx_lock);
}
/* done with this endpoint, we can clear the count */
ep->tx_proc_cnt = 0;
spin_unlock_bh(&target->tx_lock);
@ -1106,7 +1100,6 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
dev_kfree_skb(skb);
return status;
}
static void htc_flush_rx_queue(struct htc_target *target,
@ -1258,7 +1251,6 @@ static int ath6kl_htc_pipe_conn_service(struct htc_target *target,
tx_alloc = 0;
} else {
tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id);
if (tx_alloc == 0) {
status = -ENOMEM;

View file

@ -1192,7 +1192,6 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
if (board_ext_address &&
ar->fw_board_len == (board_data_size + board_ext_data_size)) {
/* write extended board data */
ath6kl_dbg(ATH6KL_DBG_BOOT,
"writing extended board data to 0x%x (%d B)\n",

View file

@ -571,7 +571,6 @@ void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status)
static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
{
struct ath6kl *ar = vif->ar;
vif->profile.ch = cpu_to_le16(channel);
@ -600,7 +599,6 @@ static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
static void ath6kl_check_ch_switch(struct ath6kl *ar, u16 channel)
{
struct ath6kl_vif *vif;
int res = 0;
@ -692,9 +690,9 @@ void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast)
cfg80211_michael_mic_failure(vif->ndev, sta->mac,
NL80211_KEYTYPE_PAIRWISE, keyid,
tsc, GFP_KERNEL);
} else
} else {
ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast);
}
}
static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len)
@ -1093,8 +1091,9 @@ static int ath6kl_open(struct net_device *dev)
if (test_bit(CONNECTED, &vif->flags)) {
netif_carrier_on(dev);
netif_wake_queue(dev);
} else
} else {
netif_carrier_off(dev);
}
return 0;
}
@ -1146,7 +1145,6 @@ static int ath6kl_set_features(struct net_device *dev,
dev->features = features | NETIF_F_RXCSUM;
return err;
}
}
return err;

View file

@ -348,7 +348,7 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
int i, scat_req_sz, scat_list_sz, size;
u8 *virt_buf;
scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item);
scat_list_sz = n_scat_entry * sizeof(struct hif_scatter_item);
scat_req_sz = sizeof(*s_req) + scat_list_sz;
if (!virt_scat)
@ -425,8 +425,9 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
memcpy(tbuf, buf, len);
bounced = true;
} else
} else {
tbuf = buf;
}
ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
if ((request & HIF_READ) && bounced)
@ -441,9 +442,9 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
struct bus_request *req)
{
if (req->scat_req)
if (req->scat_req) {
ath6kl_sdio_scat_rw(ar_sdio, req);
else {
} else {
void *context;
int status;
@ -656,7 +657,6 @@ static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
list_add_tail(&s_req->list, &ar_sdio->scat_req);
spin_unlock_bh(&ar_sdio->scat_lock);
}
/* scatter gather read write request */
@ -674,9 +674,9 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
"hif-scatter: total len: %d scatter entries: %d\n",
scat_req->len, scat_req->scat_entries);
if (request & HIF_SYNCHRONOUS)
if (request & HIF_SYNCHRONOUS) {
status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
else {
} else {
spin_lock_bh(&ar_sdio->wr_async_lock);
list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
spin_unlock_bh(&ar_sdio->wr_async_lock);
@ -856,7 +856,6 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
if (ar->suspend_mode == WLAN_POWER_STATE_WOW ||
(!ar->suspend_mode && wow)) {
ret = ath6kl_set_sdio_pm_caps(ar);
if (ret)
goto cut_pwr;
@ -878,7 +877,6 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP ||
!ar->suspend_mode || try_deepsleep) {
flags = sdio_get_host_pm_caps(func);
if (!(flags & MMC_PM_KEEP_POWER))
goto cut_pwr;
@ -1061,7 +1059,6 @@ static int ath6kl_sdio_bmi_credits(struct ath6kl *ar)
timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
/*
* Hit the credit counter with a 4-byte access, the first byte
* read will hit the counter and cause a decrement, while the

View file

@ -289,7 +289,7 @@ struct host_interest {
u32 hi_hp_rx_traffic_ratio; /* 0xd8 */
/* test applications flags */
u32 hi_test_apps_related ; /* 0xdc */
u32 hi_test_apps_related; /* 0xdc */
/* location of test script */
u32 hi_ota_testscript; /* 0xe0 */
/* location of CAL data */

View file

@ -125,8 +125,9 @@ static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
*flags |= WMI_DATA_HDR_FLAGS_UAPSD;
spin_unlock_bh(&conn->psq_lock);
return false;
} else if (!conn->apsd_info)
} else if (!conn->apsd_info) {
return false;
}
if (test_bit(WMM_ENABLED, &vif->flags)) {
ether_type = be16_to_cpu(datap->h_proto);
@ -316,8 +317,9 @@ int ath6kl_control_tx(void *devt, struct sk_buff *skb,
cookie = NULL;
ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
skb, skb->len);
} else
} else {
cookie = ath6kl_alloc_cookie(ar);
}
if (cookie == NULL) {
spin_unlock_bh(&ar->lock);
@ -359,7 +361,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
struct ath6kl_vif *vif = netdev_priv(dev);
u32 map_no = 0;
u16 htc_tag = ATH6KL_DATA_PKT_TAG;
u8 ac = 99 ; /* initialize to unmapped ac */
u8 ac = 99; /* initialize to unmapped ac */
bool chk_adhoc_ps_mapping = false;
int ret;
struct wmi_tx_meta_v2 meta_v2;
@ -449,8 +451,9 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
if (ret)
goto fail_tx;
}
} else
} else {
goto fail_tx;
}
spin_lock_bh(&ar->lock);
@ -702,7 +705,6 @@ void ath6kl_tx_complete(struct htc_target *target,
/* reap completed packets */
while (!list_empty(packet_queue)) {
packet = list_first_entry(packet_queue, struct htc_packet,
list);
list_del(&packet->list);
@ -1089,8 +1091,9 @@ static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
else
skb_queue_tail(&rxtid->q, node->skb);
node->skb = NULL;
} else
} else {
stats->num_hole++;
}
rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
@ -1211,7 +1214,7 @@ static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
return is_queued;
spin_lock_bh(&rxtid->lock);
for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
for (idx = 0; idx < rxtid->hold_q_sz; idx++) {
if (rxtid->hold_q[idx].skb) {
/*
* There is a frame in the queue and no
@ -1265,7 +1268,6 @@ static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
is_apsdq_empty_at_start = is_apsdq_empty;
while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
spin_lock_bh(&conn->psq_lock);
skb = skb_dequeue(&conn->apsdq);
is_apsdq_empty = skb_queue_empty(&conn->apsdq);
@ -1606,16 +1608,18 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
if (!conn)
return;
aggr_conn = conn->aggr_conn;
} else
} else {
aggr_conn = vif->aggr_cntxt->aggr_conn;
}
if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
is_amsdu, skb)) {
/* aggregation code will handle the skb */
return;
}
} else if (!is_broadcast_ether_addr(datap->h_dest))
} else if (!is_broadcast_ether_addr(datap->h_dest)) {
vif->net_stats.multicast++;
}
ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
}
@ -1710,8 +1714,9 @@ void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
sta = ath6kl_find_sta_by_aid(vif->ar, aid);
if (sta)
aggr_conn = sta->aggr_conn;
} else
} else {
aggr_conn = vif->aggr_cntxt->aggr_conn;
}
if (!aggr_conn)
return;
@ -1766,7 +1771,6 @@ void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
skb_queue_head_init(&rxtid->q);
spin_lock_init(&rxtid->lock);
}
}
struct aggr_info *aggr_init(struct ath6kl_vif *vif)
@ -1806,8 +1810,9 @@ void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux)
sta = ath6kl_find_sta_by_aid(vif->ar, aid);
if (sta)
aggr_conn = sta->aggr_conn;
} else
} else {
aggr_conn = vif->aggr_cntxt->aggr_conn;
}
if (!aggr_conn)
return;

View file

@ -236,7 +236,6 @@ static void ath6kl_usb_free_pipe_resources(struct ath6kl_usb_pipe *pipe)
break;
kfree(urb_context);
}
}
static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb)
@ -245,7 +244,6 @@ static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb)
for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++)
ath6kl_usb_free_pipe_resources(&ar_usb->pipes[i]);
}
static u8 ath6kl_usb_get_logical_pipe_num(struct ath6kl_usb *ar_usb,

View file

@ -289,8 +289,9 @@ int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
ath6kl_wmi_determine_user_priority(((u8 *) llc_hdr) +
sizeof(struct ath6kl_llc_snap_hdr),
layer2_priority);
} else
} else {
usr_pri = layer2_priority & 0x7;
}
/*
* Queue the EAPOL frames in the same WMM_AC_VO queue
@ -359,8 +360,9 @@ int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb)
hdr_size = roundup(sizeof(struct ieee80211_qos_hdr),
sizeof(u32));
skb_pull(skb, hdr_size);
} else if (sub_type == cpu_to_le16(IEEE80211_STYPE_DATA))
} else if (sub_type == cpu_to_le16(IEEE80211_STYPE_DATA)) {
skb_pull(skb, sizeof(struct ieee80211_hdr_3addr));
}
datap = skb->data;
llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap);
@ -936,7 +938,6 @@ ath6kl_regd_find_country_by_rd(u16 regdmn)
static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
{
struct ath6kl_wmi_regdomain *ev;
struct country_code_to_enum_rd *country = NULL;
struct reg_dmn_pair_mapping *regpair = NULL;
@ -946,10 +947,9 @@ static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
ev = (struct ath6kl_wmi_regdomain *) datap;
reg_code = le32_to_cpu(ev->reg_code);
if ((reg_code >> ATH6KL_COUNTRY_RD_SHIFT) & COUNTRY_ERD_FLAG)
if ((reg_code >> ATH6KL_COUNTRY_RD_SHIFT) & COUNTRY_ERD_FLAG) {
country = ath6kl_regd_find_country((u16) reg_code);
else if (!(((u16) reg_code & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)) {
} else if (!(((u16) reg_code & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)) {
regpair = ath6kl_get_regpair((u16) reg_code);
country = ath6kl_regd_find_country_by_rd((u16) reg_code);
if (regpair)
@ -1499,7 +1499,6 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
(reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) {
ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
tsinfo = le16_to_cpu(ts->tsinfo);
tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
@ -1530,7 +1529,6 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
* for delete qos stream from AP
*/
else if (reply->cac_indication == CAC_INDICATION_DELETE) {
ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
tsinfo = le16_to_cpu(ts->tsinfo);
ts_id = ((tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
@ -2479,7 +2477,6 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
goto free_data_skb;
for (index = 0; index < num_pri_streams; index++) {
if (WARN_ON(!data_sync_bufs[index].skb))
goto free_data_skb;
@ -2704,7 +2701,6 @@ static void ath6kl_wmi_relinquish_implicit_pstream_credits(struct wmi *wmi)
for (i = 0; i < WMM_NUM_AC; i++) {
if (stream_exist & (1 << i)) {
/*
* FIXME: Is this lock & unlock inside
* for loop correct? may need rework.
@ -2870,8 +2866,9 @@ int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
if (host_mode == ATH6KL_HOST_MODE_ASLEEP) {
ath6kl_wmi_relinquish_implicit_pstream_credits(wmi);
cmd->asleep = cpu_to_le32(1);
} else
} else {
cmd->awake = cpu_to_le32(1);
}
ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
WMI_SET_HOST_SLEEP_MODE_CMDID,

View file

@ -898,7 +898,6 @@ struct wmi_start_scan_cmd {
* flags here
*/
enum wmi_scan_ctrl_flags_bits {
/* set if can scan in the connect cmd */
CONNECT_SCAN_CTRL_FLAGS = 0x01,

View file

@ -410,7 +410,7 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
{0x00009e40, 0x0d261820},
{0x00009e40, 0x0d261800},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
{0x00009e54, 0x00000000},

View file

@ -592,7 +592,7 @@ static const u32 ar9331_1p1_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
{0x00009e40, 0x0d261820},
{0x00009e40, 0x0d261800},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
{0x00009fc0, 0x803e4788},

View file

@ -231,7 +231,7 @@ static const u32 ar9331_1p2_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
{0x00009e40, 0x0d261820},
{0x00009e40, 0x0d261800},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
{0x00009fc0, 0x803e4788},

View file

@ -318,7 +318,7 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
{0x00009e40, 0x0d261820},
{0x00009e40, 0x0d261800},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
{0x00009e54, 0x00000000},
@ -348,9 +348,9 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
{0x0000a370, 0x00000000},
{0x0000a390, 0x00000001},
{0x0000a394, 0x00000444},
{0x0000a398, 0x00000000},
{0x0000a39c, 0x210d0401},
{0x0000a3a0, 0xab9a7144},
{0x0000a398, 0x001f0e0f},
{0x0000a39c, 0x0075393f},
{0x0000a3a0, 0xb79f6427},
{0x0000a3a4, 0x00000000},
{0x0000a3a8, 0xaaaaaaaa},
{0x0000a3ac, 0x3c466478},

View file

@ -257,9 +257,9 @@ static const u32 qca953x_1p0_baseband_core[][2] = {
{0x0000a370, 0x00000000},
{0x0000a390, 0x00000001},
{0x0000a394, 0x00000444},
{0x0000a398, 0x1f020503},
{0x0000a39c, 0x29180c03},
{0x0000a3a0, 0x9a8b6844},
{0x0000a398, 0x001f0e0f},
{0x0000a39c, 0x0075393f},
{0x0000a3a0, 0xb79f6427},
{0x0000a3a4, 0x000000ff},
{0x0000a3a8, 0x6a6a6a6a},
{0x0000a3ac, 0x6a6a6a6a},

View file

@ -90,7 +90,7 @@ static const u32 ar9580_1p0_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
{0x00009e40, 0x0d261820},
{0x00009e40, 0x0d261800},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
{0x00009e54, 0x00000000},

View file

@ -114,6 +114,9 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
#define ATH_TXFIFO_DEPTH 8
#define ATH_TX_ERROR 0x01
/* Stop tx traffic 1ms before the GO goes away */
#define ATH_P2P_PS_STOP_TIME 1000
#define IEEE80211_SEQ_SEQ_SHIFT 4
#define IEEE80211_SEQ_MAX 4096
#define IEEE80211_WEP_IVLEN 3
@ -367,11 +370,15 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
/********/
struct ath_vif {
struct ieee80211_vif *vif;
struct ath_node mcast_node;
int av_bslot;
bool primary_sta_vif;
__le64 tsf_adjust; /* TSF adjustment for staggered beacons */
struct ath_buf *av_bcbuf;
/* P2P Client */
struct ieee80211_noa_data noa;
};
struct ath9k_vif_iter_data {
@ -464,6 +471,8 @@ int ath_update_survey_stats(struct ath_softc *sc);
void ath_update_survey_nf(struct ath_softc *sc, int channel);
void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
void ath_ps_full_sleep(unsigned long data);
void ath9k_p2p_ps_timer(void *priv);
void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif);
/**********/
/* BTCOEX */
@ -714,6 +723,9 @@ struct ath_softc {
struct completion paprd_complete;
wait_queue_head_t tx_wait;
struct ath_gen_timer *p2p_ps_timer;
struct ath_vif *p2p_ps_vif;
unsigned long driver_data;
u8 gtt_cnt;

View file

@ -589,6 +589,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
if (ret)
goto err_btcoex;
sc->p2p_ps_timer = ath_gen_timer_alloc(sc->sc_ah, ath9k_p2p_ps_timer,
NULL, sc, AR_FIRST_NDP_TIMER);
ath9k_cmn_init_crypto(sc->sc_ah);
ath9k_init_misc(sc);
ath_fill_led_pin(sc);
@ -644,13 +647,13 @@ static void ath9k_init_txpower_limits(struct ath_softc *sc)
static const struct ieee80211_iface_limit if_limits[] = {
{ .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_WDS) },
{ .max = 8, .types =
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_AP) },
{ .max = 1, .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) },
};
@ -852,6 +855,9 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
{
int i = 0;
if (sc->p2p_ps_timer)
ath_gen_timer_free(sc->sc_ah, sc->p2p_ps_timer);
ath9k_deinit_btcoex(sc);
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)

View file

@ -261,6 +261,8 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
sc->gtt_cnt = 0;
ieee80211_wake_queues(sc->hw);
ath9k_p2p_ps_timer(sc);
return true;
}
@ -1119,6 +1121,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_assign_slot(sc, vif);
avp->vif = vif;
an->sc = sc;
an->sta = NULL;
an->vif = vif;
@ -1163,6 +1167,29 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
return 0;
}
static void
ath9k_update_p2p_ps_timer(struct ath_softc *sc, struct ath_vif *avp)
{
struct ath_hw *ah = sc->sc_ah;
s32 tsf, target_tsf;
if (!avp || !avp->noa.has_next_tsf)
return;
ath9k_hw_gen_timer_stop(ah, sc->p2p_ps_timer);
tsf = ath9k_hw_gettsf32(sc->sc_ah);
target_tsf = avp->noa.next_tsf;
if (!avp->noa.absent)
target_tsf -= ATH_P2P_PS_STOP_TIME;
if (target_tsf - tsf < ATH_P2P_PS_STOP_TIME)
target_tsf = tsf + ATH_P2P_PS_STOP_TIME;
ath9k_hw_gen_timer_start(ah, sc->p2p_ps_timer, (u32) target_tsf, 1000000);
}
static void ath9k_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@ -1174,6 +1201,13 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
spin_lock_bh(&sc->sc_pcu_lock);
if (avp == sc->p2p_ps_vif) {
sc->p2p_ps_vif = NULL;
ath9k_update_p2p_ps_timer(sc, NULL);
}
spin_unlock_bh(&sc->sc_pcu_lock);
sc->nvifs--;
sc->tx99_vif = NULL;
@ -1636,6 +1670,72 @@ static void ath9k_bss_assoc_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
ath9k_set_assoc_state(sc, vif);
}
void ath9k_p2p_ps_timer(void *priv)
{
struct ath_softc *sc = priv;
struct ath_vif *avp = sc->p2p_ps_vif;
struct ieee80211_vif *vif;
struct ieee80211_sta *sta;
struct ath_node *an;
u32 tsf;
if (!avp)
return;
tsf = ath9k_hw_gettsf32(sc->sc_ah);
if (!avp->noa.absent)
tsf += ATH_P2P_PS_STOP_TIME;
if (!avp->noa.has_next_tsf ||
avp->noa.next_tsf - tsf > BIT(31))
ieee80211_update_p2p_noa(&avp->noa, tsf);
ath9k_update_p2p_ps_timer(sc, avp);
rcu_read_lock();
vif = avp->vif;
sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
if (!sta)
goto out;
an = (void *) sta->drv_priv;
if (an->sleeping == !!avp->noa.absent)
goto out;
an->sleeping = avp->noa.absent;
if (an->sleeping)
ath_tx_aggr_sleep(sta, sc, an);
else
ath_tx_aggr_wakeup(sc, an);
out:
rcu_read_unlock();
}
void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif)
{
struct ath_vif *avp = (void *)vif->drv_priv;
unsigned long flags;
u32 tsf;
if (!sc->p2p_ps_timer)
return;
if (vif->type != NL80211_IFTYPE_STATION || !vif->p2p)
return;
sc->p2p_ps_vif = avp;
spin_lock_irqsave(&sc->sc_pm_lock, flags);
if (!(sc->ps_flags & PS_BEACON_SYNC)) {
tsf = ath9k_hw_gettsf32(sc->sc_ah);
ieee80211_parse_p2p_noa(&vif->bss_conf.p2p_noa_attr, &avp->noa, tsf);
ath9k_update_p2p_ps_timer(sc, avp);
}
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
}
static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@ -1710,6 +1810,12 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
}
}
if (changed & BSS_CHANGED_P2P_PS) {
spin_lock_bh(&sc->sc_pcu_lock);
ath9k_update_p2p_ps(sc, vif);
spin_unlock_bh(&sc->sc_pcu_lock);
}
if (changed & CHECK_ANI)
ath_check_ani(sc);
@ -1883,7 +1989,8 @@ static bool ath9k_has_tx_pending(struct ath_softc *sc)
return !!npend;
}
static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
static void ath9k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;

View file

@ -539,6 +539,9 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
ath_dbg(common, PS,
"Reconfigure beacon timers based on synchronized timestamp\n");
ath9k_set_beacon(sc);
if (sc->p2p_ps_vif)
ath9k_update_p2p_ps(sc, sc->p2p_ps_vif->vif);
}
if (ath_beacon_dtim_pending_cab(skb)) {

View file

@ -1707,7 +1707,9 @@ static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
return 0;
}
static void carl9170_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
static void carl9170_op_flush(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct ar9170 *ar = hw->priv;
unsigned int vid;

View file

@ -338,7 +338,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
}
if (isr)
wil_err(wil, "un-handled MISC ISR bits 0x%08x\n", isr);
wil_dbg_irq(wil, "un-handled MISC ISR bits 0x%08x\n", isr);
wil->isr_misc = 0;

View file

@ -363,8 +363,8 @@ static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
wil_err(wil, "Firmware not ready\n");
return -ETIME;
} else {
wil_dbg_misc(wil, "FW ready after %d ms\n",
jiffies_to_msecs(to-left));
wil_info(wil, "FW ready after %d ms. HW version 0x%08x\n",
jiffies_to_msecs(to-left), wil->hw_version);
}
return 0;
}

View file

@ -74,8 +74,6 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
if (rc)
goto release_irq;
wil_info(wil, "HW version: 0x%08x\n", wil->hw_version);
return 0;
release_irq:

View file

@ -91,6 +91,22 @@ void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
spin_lock(&r->reorder_lock);
/** Due to the race between WMI events, where BACK establishment
* reported, and data Rx, few packets may be pass up before reorder
* buffer get allocated. Catch up by pretending SSN is what we
* see in the 1-st Rx packet
*/
if (r->first_time) {
r->first_time = false;
if (seq != r->head_seq_num) {
wil_err(wil, "Error: 1-st frame with wrong sequence"
" %d, should be %d. Fixing...\n", seq,
r->head_seq_num);
r->head_seq_num = seq;
r->ssn = seq;
}
}
/* frame with out of date sequence number */
if (seq_less(seq, r->head_seq_num)) {
dev_kfree_skb(skb);
@ -162,6 +178,7 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
r->head_seq_num = ssn;
r->buf_size = size;
r->stored_mpdu_num = 0;
r->first_time = true;
return r;
}

View file

@ -35,7 +35,7 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
#define WIL6210_MEM_SIZE (2*1024*1024UL)
#define WIL6210_RX_RING_SIZE (128)
#define WIL6210_TX_RING_SIZE (128)
#define WIL6210_TX_RING_SIZE (512)
#define WIL6210_MAX_TX_RINGS (24) /* HW limit */
#define WIL6210_MAX_CID (8) /* HW limit */
#define WIL6210_NAPI_BUDGET (16) /* arbitrary */
@ -301,6 +301,7 @@ struct wil_tid_ampdu_rx {
u16 buf_size;
u16 timeout;
u8 dialog_token;
bool first_time; /* is it 1-st time this buffer used? */
};
struct wil6210_stats {

View file

@ -192,7 +192,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
might_sleep();
if (!test_bit(wil_status_fwready, &wil->status)) {
wil_err(wil, "FW not ready\n");
wil_err(wil, "WMI: cannot send command while FW not ready\n");
return -EAGAIN;
}
@ -276,8 +276,8 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
wil->fw_version = le32_to_cpu(evt->sw_version);
wil->n_mids = evt->numof_additional_mids;
wil_dbg_wmi(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
evt->mac, wil->n_mids);
wil_info(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
evt->mac, wil->n_mids);
if (!is_valid_ether_addr(ndev->dev_addr)) {
memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
@ -290,7 +290,7 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
int len)
{
wil_dbg_wmi(wil, "WMI: FW ready\n");
wil_dbg_wmi(wil, "WMI: got FW ready event\n");
set_bit(wil_status_fwready, &wil->status);
/* reuse wmi_ready for the firmware ready indication */
@ -348,7 +348,7 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
{
if (wil->scan_request) {
struct wmi_scan_complete_event *data = d;
bool aborted = (data->status != 0);
bool aborted = (data->status != WMI_SCAN_SUCCESS);
wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
cfg80211_scan_done(wil->scan_request, aborted);
@ -802,6 +802,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
.network_type = wmi_nettype,
.disable_sec_offload = 1,
.channel = chan - 1,
.pcp_max_assoc_sta = WIL6210_MAX_CID,
};
struct {
struct wil6210_mbox_hdr_wmi wmi;

View file

@ -28,7 +28,7 @@
#define __WILOCITY_WMI_H__
/* General */
#define WILOCITY_MAX_ASSOC_STA (8)
#define WMI_MAC_LEN (6)
#define WMI_PROX_RANGE_NUM (3)
@ -219,15 +219,6 @@ struct wmi_disconnect_sta_cmd {
__le16 disconnect_reason;
} __packed;
/*
* WMI_RECONNECT_CMDID
*/
struct wmi_reconnect_cmd {
u8 channel; /* hint */
u8 reserved;
u8 bssid[WMI_MAC_LEN]; /* mandatory if set */
} __packed;
/*
* WMI_SET_PMK_CMDID
@ -296,11 +287,13 @@ enum wmi_scan_type {
WMI_LONG_SCAN = 0,
WMI_SHORT_SCAN = 1,
WMI_PBC_SCAN = 2,
WMI_ACTIVE_SCAN = 3,
WMI_DIRECT_SCAN = 4,
};
struct wmi_start_scan_cmd {
u8 reserved[8];
u8 direct_scan_mac_addr[6];
u8 reserved[2];
__le32 home_dwell_time; /* Max duration in the home channel(ms) */
__le32 force_scan_interval; /* Time interval between scans (ms)*/
u8 scan_type; /* wmi_scan_type */
@ -332,6 +325,7 @@ struct wmi_probed_ssid_cmd {
u8 ssid[WMI_MAX_SSID_LEN];
} __packed;
/*
* WMI_SET_APPIE_CMDID
* Add Application specified IE to a management frame
@ -427,7 +421,7 @@ struct wmi_bcon_ctrl_cmd {
__le16 frag_num;
__le64 ss_mask;
u8 network_type;
u8 reserved;
u8 pcp_max_assoc_sta;
u8 disable_sec_offload;
u8 disable_sec;
} __packed;
@ -450,7 +444,7 @@ enum wmi_port_role {
struct wmi_port_allocate_cmd {
u8 mac[WMI_MAC_LEN];
u8 port_role;
u8 midid;
u8 mid;
} __packed;
/*
@ -467,6 +461,7 @@ struct wmi_delete_port_cmd {
enum wmi_discovery_mode {
WMI_DISCOVERY_MODE_NON_OFFLOAD = 0,
WMI_DISCOVERY_MODE_OFFLOAD = 1,
WMI_DISCOVERY_MODE_PEER2PEER = 2,
};
struct wmi_p2p_cfg_cmd {
@ -493,7 +488,8 @@ struct wmi_power_mgmt_cfg_cmd {
*/
struct wmi_pcp_start_cmd {
__le16 bcon_interval;
u8 reserved0[10];
u8 pcp_max_assoc_sta;
u8 reserved0[9];
u8 network_type;
u8 channel;
u8 disable_sec_offload;
@ -857,6 +853,7 @@ enum wmi_event_id {
WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
/* Performance monitoring events */
WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
@ -1040,16 +1037,23 @@ enum wmi_disconnect_reason {
struct wmi_disconnect_event {
__le16 protocol_reason_status; /* reason code, see 802.11 spec. */
u8 bssid[WMI_MAC_LEN]; /* set if known */
u8 disconnect_reason; /* see wmi_disconnect_reason_e */
u8 assoc_resp_len;
u8 assoc_info[0];
u8 disconnect_reason; /* see wmi_disconnect_reason */
u8 assoc_resp_len; /* not in use */
u8 assoc_info[0]; /* not in use */
} __packed;
/*
* WMI_SCAN_COMPLETE_EVENTID
*/
enum scan_status {
WMI_SCAN_SUCCESS = 0,
WMI_SCAN_FAILED = 1,
WMI_SCAN_ABORTED = 2,
WMI_SCAN_REJECTED = 3,
};
struct wmi_scan_complete_event {
__le32 status;
__le32 status; /* scan_status */
} __packed;
/*
@ -1256,6 +1260,14 @@ struct wmi_rx_mgmt_info {
u8 channel; /* From Radio MNGR */
} __packed;
/*
* WMI_TX_MGMT_PACKET_EVENTID
*/
struct wmi_tx_mgmt_packet_event {
u8 payload[0];
} __packed;
struct wmi_rx_mgmt_packet_event {
struct wmi_rx_mgmt_info info;
u8 payload[0];

View file

@ -915,10 +915,6 @@ struct b43_wl {
char rng_name[30 + 1];
#endif /* CONFIG_B43_HWRNG */
/* List of all wireless devices on this chip */
struct list_head devlist;
u8 nr_devs;
bool radiotap_enabled;
bool radio_enabled;

View file

@ -1195,8 +1195,13 @@ static void b43_bcma_wireless_core_reset(struct b43_wldev *dev, bool gmode)
B43_BCMA_CLKCTLST_PHY_PLL_REQ;
u32 status = B43_BCMA_CLKCTLST_80211_PLL_ST |
B43_BCMA_CLKCTLST_PHY_PLL_ST;
u32 flags;
flags = B43_BCMA_IOCTL_PHY_CLKEN;
if (gmode)
flags |= B43_BCMA_IOCTL_GMODE;
b43_device_enable(dev, flags);
b43_device_enable(dev, B43_BCMA_IOCTL_PHY_CLKEN);
bcma_core_set_clockmode(dev->dev->bdev, BCMA_CLKMODE_FAST);
b43_bcma_phy_reset(dev);
bcma_core_pll_ctl(dev->dev->bdev, req, status, true);
@ -3735,40 +3740,35 @@ static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan)
{
struct b43_wldev *up_dev = NULL;
struct b43_wldev *down_dev;
struct b43_wldev *d;
int err;
bool uninitialized_var(gmode);
int prev_status;
/* Find a device and PHY which supports the band. */
list_for_each_entry(d, &wl->devlist, list) {
switch (chan->band) {
case IEEE80211_BAND_5GHZ:
if (d->phy.supports_5ghz) {
up_dev = d;
gmode = false;
}
break;
case IEEE80211_BAND_2GHZ:
if (d->phy.supports_2ghz) {
up_dev = d;
gmode = true;
}
break;
default:
B43_WARN_ON(1);
return -EINVAL;
switch (chan->band) {
case IEEE80211_BAND_5GHZ:
if (wl->current_dev->phy.supports_5ghz) {
up_dev = wl->current_dev;
gmode = false;
}
if (up_dev)
break;
break;
case IEEE80211_BAND_2GHZ:
if (wl->current_dev->phy.supports_2ghz) {
up_dev = wl->current_dev;
gmode = true;
}
break;
default:
B43_WARN_ON(1);
return -EINVAL;
}
if (!up_dev) {
b43err(wl, "Could not find a device for %s-GHz band operation\n",
band_to_string(chan->band));
return -ENODEV;
}
if ((up_dev == wl->current_dev) &&
(!!wl->current_dev->phy.gmode == !!gmode)) {
if (!!wl->current_dev->phy.gmode == !!gmode) {
/* This device is already running. */
return 0;
}
@ -5178,7 +5178,6 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
}
dev->phy.gmode = have_2ghz_phy;
dev->phy.radio_on = true;
b43_wireless_core_reset(dev, dev->phy.gmode);
err = b43_phy_versioning(dev);
@ -5270,7 +5269,6 @@ static void b43_one_core_detach(struct b43_bus_dev *dev)
b43_debugfs_remove_device(wldev);
b43_wireless_core_detach(wldev);
list_del(&wldev->list);
wl->nr_devs--;
b43_bus_set_wldev(dev, NULL);
kfree(wldev);
}
@ -5295,8 +5293,6 @@ static int b43_one_core_attach(struct b43_bus_dev *dev, struct b43_wl *wl)
if (err)
goto err_kfree_wldev;
list_add(&wldev->list, &wl->devlist);
wl->nr_devs++;
b43_bus_set_wldev(dev, wldev);
b43_debugfs_add_device(wldev);
@ -5386,7 +5382,6 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
wl->hw = hw;
mutex_init(&wl->mutex);
spin_lock_init(&wl->hardirq_lock);
INIT_LIST_HEAD(&wl->devlist);
INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work);
INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work);
INIT_WORK(&wl->tx_work, b43_tx_work);
@ -5486,39 +5481,42 @@ int b43_ssb_probe(struct ssb_device *sdev, const struct ssb_device_id *id)
struct b43_bus_dev *dev;
struct b43_wl *wl;
int err;
int first = 0;
dev = b43_bus_dev_ssb_init(sdev);
if (!dev)
return -ENOMEM;
wl = ssb_get_devtypedata(sdev);
if (!wl) {
/* Probing the first core. Must setup common struct b43_wl */
first = 1;
b43_sprom_fixup(sdev->bus);
wl = b43_wireless_init(dev);
if (IS_ERR(wl)) {
err = PTR_ERR(wl);
goto out;
}
ssb_set_devtypedata(sdev, wl);
B43_WARN_ON(ssb_get_devtypedata(sdev) != wl);
if (wl) {
b43err(NULL, "Dual-core devices are not supported\n");
err = -ENOTSUPP;
goto err_ssb_kfree_dev;
}
b43_sprom_fixup(sdev->bus);
wl = b43_wireless_init(dev);
if (IS_ERR(wl)) {
err = PTR_ERR(wl);
goto err_ssb_kfree_dev;
}
ssb_set_devtypedata(sdev, wl);
B43_WARN_ON(ssb_get_devtypedata(sdev) != wl);
err = b43_one_core_attach(dev, wl);
if (err)
goto err_wireless_exit;
goto err_ssb_wireless_exit;
/* setup and start work to load firmware */
INIT_WORK(&wl->firmware_load, b43_request_firmware);
schedule_work(&wl->firmware_load);
out:
return err;
err_wireless_exit:
if (first)
b43_wireless_exit(dev, wl);
err_ssb_wireless_exit:
b43_wireless_exit(dev, wl);
err_ssb_kfree_dev:
kfree(dev);
return err;
}
@ -5546,13 +5544,8 @@ static void b43_ssb_remove(struct ssb_device *sdev)
/* Unregister HW RNG driver */
b43_rng_exit(wl);
if (list_empty(&wl->devlist)) {
b43_leds_unregister(wl);
/* Last core on the chip unregistered.
* We can destroy common struct b43_wl.
*/
b43_wireless_exit(dev, wl);
}
b43_leds_unregister(wl);
b43_wireless_exit(dev, wl);
}
static struct ssb_driver b43_ssb_driver = {

View file

@ -96,7 +96,7 @@ int b43_phy_init(struct b43_wldev *dev)
phy->channel = ops->get_default_chan(dev);
ops->software_rfkill(dev, false);
b43_software_rfkill(dev, false);
err = ops->init(dev);
if (err) {
b43err(dev->wl, "PHY init failed\n");
@ -116,7 +116,7 @@ int b43_phy_init(struct b43_wldev *dev)
if (ops->exit)
ops->exit(dev);
err_block_rf:
ops->software_rfkill(dev, true);
b43_software_rfkill(dev, true);
return err;
}
@ -125,7 +125,7 @@ void b43_phy_exit(struct b43_wldev *dev)
{
const struct b43_phy_operations *ops = dev->phy.ops;
ops->software_rfkill(dev, true);
b43_software_rfkill(dev, true);
if (ops->exit)
ops->exit(dev);
}

View file

@ -1587,6 +1587,7 @@ static void b43_phy_initb5(struct b43_wldev *dev)
b43_write16(dev, 0x03E4, (b43_read16(dev, 0x03E4) & 0xFFC0) | 0x0004);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/Init/B6 */
static void b43_phy_initb6(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
@ -1670,7 +1671,7 @@ static void b43_phy_initb6(struct b43_wldev *dev)
b43_radio_write16(dev, 0x50, 0x20);
}
if (phy->radio_rev <= 2) {
b43_radio_write16(dev, 0x7C, 0x20);
b43_radio_write16(dev, 0x50, 0x20);
b43_radio_write16(dev, 0x5A, 0x70);
b43_radio_write16(dev, 0x5B, 0x7B);
b43_radio_write16(dev, 0x5C, 0xB0);
@ -1686,9 +1687,8 @@ static void b43_phy_initb6(struct b43_wldev *dev)
b43_phy_write(dev, 0x2A, 0x8AC0);
b43_phy_write(dev, 0x0038, 0x0668);
b43_set_txpower_g(dev, &gphy->bbatt, &gphy->rfatt, gphy->tx_control);
if (phy->radio_rev <= 5) {
if (phy->radio_rev == 4 || phy->radio_rev == 5)
b43_phy_maskset(dev, 0x5D, 0xFF80, 0x0003);
}
if (phy->radio_rev <= 2)
b43_radio_write16(dev, 0x005D, 0x000D);

View file

@ -257,6 +257,72 @@ static void b43_nphy_rf_ctl_override(struct b43_wldev *dev, u16 field,
}
}
static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev,
enum n_intc_override intc_override,
u16 value, u8 core_sel)
{
u16 reg, tmp, tmp2, val;
int core;
for (core = 0; core < 2; core++) {
if ((core_sel == 1 && core != 0) ||
(core_sel == 2 && core != 1))
continue;
reg = (core == 0) ? B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2;
switch (intc_override) {
case N_INTC_OVERRIDE_OFF:
b43_phy_write(dev, reg, 0);
b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
break;
case N_INTC_OVERRIDE_TRSW:
b43_phy_maskset(dev, reg, ~0xC0, value << 6);
b43_phy_set(dev, reg, 0x400);
b43_phy_mask(dev, 0x2ff, ~0xC000 & 0xFFFF);
b43_phy_set(dev, 0x2ff, 0x2000);
b43_phy_set(dev, 0x2ff, 0x0001);
break;
case N_INTC_OVERRIDE_PA:
tmp = 0x0030;
if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
val = value << 5;
else
val = value << 4;
b43_phy_maskset(dev, reg, ~tmp, val);
b43_phy_set(dev, reg, 0x1000);
break;
case N_INTC_OVERRIDE_EXT_LNA_PU:
if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
tmp = 0x0001;
tmp2 = 0x0004;
val = value;
} else {
tmp = 0x0004;
tmp2 = 0x0001;
val = value << 2;
}
b43_phy_maskset(dev, reg, ~tmp, val);
b43_phy_mask(dev, reg, ~tmp2);
break;
case N_INTC_OVERRIDE_EXT_LNA_GAIN:
if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
tmp = 0x0002;
tmp2 = 0x0008;
val = value << 1;
} else {
tmp = 0x0008;
tmp2 = 0x0002;
val = value << 3;
}
b43_phy_maskset(dev, reg, ~tmp, val);
b43_phy_mask(dev, reg, ~tmp2);
break;
}
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
enum n_intc_override intc_override,
@ -265,6 +331,12 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
u8 i, j;
u16 reg, tmp, val;
if (dev->phy.rev >= 7) {
b43_nphy_rf_ctl_intc_override_rev7(dev, intc_override, value,
core);
return;
}
B43_WARN_ON(dev->phy.rev < 3);
for (i = 0; i < 2; i++) {
@ -419,7 +491,8 @@ static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
static const u16 clip[] = { 0xFFFF, 0xFFFF };
if (nphy->deaf_count++ == 0) {
nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
b43_nphy_classifier(dev, 0x7, 0);
b43_nphy_classifier(dev, 0x7,
B43_NPHY_CLASSCTL_WAITEDEN);
b43_nphy_read_clip_detection(dev, nphy->clip_state);
b43_nphy_write_clip_detection(dev, clip);
}
@ -734,9 +807,16 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
u16 bias, cbias;
u16 pag_boost, padg_boost, pgag_boost, mixg_boost;
u16 paa_boost, pada_boost, pgaa_boost, mixa_boost;
bool is_pkg_fab_smic;
B43_WARN_ON(dev->phy.rev < 3);
is_pkg_fab_smic =
((dev->dev->chip_id == BCMA_CHIP_ID_BCM43224 ||
dev->dev->chip_id == BCMA_CHIP_ID_BCM43225 ||
dev->dev->chip_id == BCMA_CHIP_ID_BCM43421) &&
dev->dev->chip_pkg == BCMA_PKG_ID_BCM43224_FAB_SMIC);
b43_chantab_radio_2056_upload(dev, e);
b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ);
@ -744,7 +824,8 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
if (dev->dev->chip_id == 0x4716) {
if (dev->dev->chip_id == BCMA_CHIP_ID_BCM4716 ||
dev->dev->chip_id == BCMA_CHIP_ID_BCM47162) {
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x14);
b43_radio_write(dev, B2056_SYN_PLL_CP2, 0);
} else {
@ -752,6 +833,13 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x14);
}
}
if (sprom->boardflags2_hi & B43_BFH2_GPLL_WAR2 &&
b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1f);
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1f);
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0b);
b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x20);
}
if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
@ -767,7 +855,8 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
b43_radio_write(dev,
offset | B2056_TX_PADG_IDAC, 0xcc);
if (dev->dev->chip_id == 0x4716) {
if (dev->dev->chip_id == BCMA_CHIP_ID_BCM4716 ||
dev->dev->chip_id == BCMA_CHIP_ID_BCM47162) {
bias = 0x40;
cbias = 0x45;
pag_boost = 0x5;
@ -776,6 +865,10 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
} else {
bias = 0x25;
cbias = 0x20;
if (is_pkg_fab_smic) {
bias = 0x2a;
cbias = 0x38;
}
pag_boost = 0x4;
pgag_boost = 0x03;
mixg_boost = 0x65;
@ -844,6 +937,8 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
mixa_boost = 0xF;
}
cbias = is_pkg_fab_smic ? 0x35 : 0x30;
for (i = 0; i < 2; i++) {
offset = i ? B2056_TX1 : B2056_TX0;
@ -862,11 +957,11 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
b43_radio_write(dev,
offset | B2056_TX_PADA_CASCBIAS, 0x03);
b43_radio_write(dev,
offset | B2056_TX_INTPAA_IAUX_STAT, 0x50);
offset | B2056_TX_INTPAA_IAUX_STAT, 0x30);
b43_radio_write(dev,
offset | B2056_TX_INTPAA_IMAIN_STAT, 0x50);
offset | B2056_TX_INTPAA_IMAIN_STAT, 0x30);
b43_radio_write(dev,
offset | B2056_TX_INTPAA_CASCBIAS, 0x30);
offset | B2056_TX_INTPAA_CASCBIAS, cbias);
}
}
@ -1164,23 +1259,20 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
u16 seq_mode;
u32 tmp;
if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, true);
b43_nphy_stay_in_carrier_search(dev, true);
if ((nphy->bb_mult_save & 0x80000000) == 0) {
tmp = b43_ntab_read(dev, B43_NTAB16(15, 87));
nphy->bb_mult_save = (tmp & 0xFFFF) | 0x80000000;
}
/* TODO: add modify_bbmult argument */
if (!dev->phy.is_40mhz)
tmp = 0x6464;
else
tmp = 0x4747;
b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, false);
b43_phy_write(dev, B43_NPHY_SAMP_DEPCNT, (samps - 1));
if (loops != 0xFFFF)
@ -1213,6 +1305,8 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
b43err(dev->wl, "run samples timeout\n");
b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
b43_nphy_stay_in_carrier_search(dev, false);
}
/**************************************************
@ -1588,8 +1682,8 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
struct b43_phy_n *nphy = dev->phy.n;
u16 saved_regs_phy_rfctl[2];
u16 saved_regs_phy[13];
u16 regs_to_store[] = {
u16 saved_regs_phy[22];
u16 regs_to_store_rev3[] = {
B43_NPHY_AFECTL_OVER1, B43_NPHY_AFECTL_OVER,
B43_NPHY_AFECTL_C1, B43_NPHY_AFECTL_C2,
B43_NPHY_TXF_40CO_B1S1, B43_NPHY_RFCTL_OVER,
@ -1598,6 +1692,20 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
B43_NPHY_RFCTL_LUT_TRSW_UP1, B43_NPHY_RFCTL_LUT_TRSW_UP2,
B43_NPHY_RFCTL_RSSIO1, B43_NPHY_RFCTL_RSSIO2
};
u16 regs_to_store_rev7[] = {
B43_NPHY_AFECTL_OVER1, B43_NPHY_AFECTL_OVER,
B43_NPHY_AFECTL_C1, B43_NPHY_AFECTL_C2,
B43_NPHY_TXF_40CO_B1S1, B43_NPHY_RFCTL_OVER,
0x342, 0x343, 0x346, 0x347,
0x2ff,
B43_NPHY_TXF_40CO_B1S0, B43_NPHY_TXF_40CO_B32S1,
B43_NPHY_RFCTL_CMD,
B43_NPHY_RFCTL_LUT_TRSW_UP1, B43_NPHY_RFCTL_LUT_TRSW_UP2,
0x340, 0x341, 0x344, 0x345,
B43_NPHY_RFCTL_RSSIO1, B43_NPHY_RFCTL_RSSIO2
};
u16 *regs_to_store;
int regs_amount;
u16 class;
@ -1617,6 +1725,15 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
u8 rx_core_state;
int core, i, j, vcm;
if (dev->phy.rev >= 7) {
regs_to_store = regs_to_store_rev7;
regs_amount = ARRAY_SIZE(regs_to_store_rev7);
} else {
regs_to_store = regs_to_store_rev3;
regs_amount = ARRAY_SIZE(regs_to_store_rev3);
}
BUG_ON(regs_amount > ARRAY_SIZE(saved_regs_phy));
class = b43_nphy_classifier(dev, 0, 0);
b43_nphy_classifier(dev, 7, 4);
b43_nphy_read_clip_detection(dev, clip_state);
@ -1624,22 +1741,29 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
saved_regs_phy_rfctl[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
saved_regs_phy_rfctl[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
for (i = 0; i < ARRAY_SIZE(regs_to_store); i++)
for (i = 0; i < regs_amount; i++)
saved_regs_phy[i] = b43_phy_read(dev, regs_to_store[i]);
b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_OFF, 0, 7);
b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_TRSW, 1, 7);
b43_nphy_rf_ctl_override(dev, 0x1, 0, 0, false);
b43_nphy_rf_ctl_override(dev, 0x2, 1, 0, false);
b43_nphy_rf_ctl_override(dev, 0x80, 1, 0, false);
b43_nphy_rf_ctl_override(dev, 0x40, 1, 0, false);
if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
b43_nphy_rf_ctl_override(dev, 0x20, 0, 0, false);
b43_nphy_rf_ctl_override(dev, 0x10, 1, 0, false);
if (dev->phy.rev >= 7) {
/* TODO */
if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
} else {
}
} else {
b43_nphy_rf_ctl_override(dev, 0x10, 0, 0, false);
b43_nphy_rf_ctl_override(dev, 0x20, 1, 0, false);
b43_nphy_rf_ctl_override(dev, 0x1, 0, 0, false);
b43_nphy_rf_ctl_override(dev, 0x2, 1, 0, false);
b43_nphy_rf_ctl_override(dev, 0x80, 1, 0, false);
b43_nphy_rf_ctl_override(dev, 0x40, 1, 0, false);
if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
b43_nphy_rf_ctl_override(dev, 0x20, 0, 0, false);
b43_nphy_rf_ctl_override(dev, 0x10, 1, 0, false);
} else {
b43_nphy_rf_ctl_override(dev, 0x10, 0, 0, false);
b43_nphy_rf_ctl_override(dev, 0x20, 1, 0, false);
}
}
rx_core_state = b43_nphy_get_rx_core_state(dev);
@ -1654,8 +1778,11 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
/* Grab RSSI results for every possible VCM */
for (vcm = 0; vcm < 8; vcm++) {
b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3,
vcm << 2);
if (dev->phy.rev >= 7)
;
else
b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC,
0xE3, vcm << 2);
b43_nphy_poll_rssi(dev, N_RSSI_NB, results[vcm], 8);
}
@ -1682,8 +1809,11 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
}
/* Select the best VCM */
b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3,
vcm_final << 2);
if (dev->phy.rev >= 7)
;
else
b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC,
0xE3, vcm_final << 2);
for (i = 0; i < 4; i++) {
if (core != i / 2)
@ -1736,9 +1866,9 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1);
b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_RXTX);
b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1, ~0x1);
b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
for (i = 0; i < ARRAY_SIZE(regs_to_store); i++)
for (i = 0; i < regs_amount; i++)
b43_phy_write(dev, regs_to_store[i], saved_regs_phy[i]);
/* Store for future configuration */
@ -2494,8 +2624,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
struct ssb_sprom *sprom = dev->dev->bus_sprom;
/* TX to RX */
u8 tx2rx_events[8] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F };
u8 tx2rx_delays[8] = { 8, 4, 2, 2, 4, 4, 6, 1 };
u8 tx2rx_events[7] = { 0x4, 0x3, 0x5, 0x2, 0x1, 0x8, 0x1F };
u8 tx2rx_delays[7] = { 8, 4, 4, 4, 4, 6, 1 };
/* RX to TX */
u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
0x1F };
@ -2503,6 +2633,23 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
u8 rx2tx_events[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0x3, 0x4, 0x1F };
u8 rx2tx_delays[9] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
u16 vmids[5][4] = {
{ 0xa2, 0xb4, 0xb4, 0x89, }, /* 0 */
{ 0xb4, 0xb4, 0xb4, 0x24, }, /* 1 */
{ 0xa2, 0xb4, 0xb4, 0x74, }, /* 2 */
{ 0xa2, 0xb4, 0xb4, 0x270, }, /* 3 */
{ 0xa2, 0xb4, 0xb4, 0x00, }, /* 4 and 5 */
};
u16 gains[5][4] = {
{ 0x02, 0x02, 0x02, 0x00, }, /* 0 */
{ 0x02, 0x02, 0x02, 0x02, }, /* 1 */
{ 0x02, 0x02, 0x02, 0x04, }, /* 2 */
{ 0x02, 0x02, 0x02, 0x00, }, /* 3 */
{ 0x02, 0x02, 0x02, 0x00, }, /* 4 and 5 */
};
u16 *vmid, *gain;
u8 pdet_range;
u16 tmp16;
u32 tmp32;
@ -2561,7 +2708,71 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
b43_ntab_write(dev, B43_NTAB16(8, 0), 2);
b43_ntab_write(dev, B43_NTAB16(8, 16), 2);
/* TODO */
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
pdet_range = sprom->fem.ghz2.pdet_range;
else
pdet_range = sprom->fem.ghz5.pdet_range;
vmid = vmids[min_t(u16, pdet_range, 4)];
gain = gains[min_t(u16, pdet_range, 4)];
switch (pdet_range) {
case 3:
if (!(dev->phy.rev >= 4 &&
b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
break;
/* FALL THROUGH */
case 0:
case 1:
b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4, vmid);
b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0c), 4, gain);
b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1c), 4, gain);
break;
case 2:
if (dev->phy.rev >= 6) {
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
vmid[3] = 0x94;
else
vmid[3] = 0x8e;
gain[3] = 3;
} else if (dev->phy.rev == 5) {
vmid[3] = 0x84;
gain[3] = 2;
}
b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4, vmid);
b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0c), 4, gain);
b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1c), 4, gain);
break;
case 4:
case 5:
if (b43_current_band(dev->wl) != IEEE80211_BAND_2GHZ) {
if (pdet_range == 4) {
vmid[3] = 0x8e;
tmp16 = 0x96;
gain[3] = 0x2;
} else {
vmid[3] = 0x89;
tmp16 = 0x89;
gain[3] = 0;
}
} else {
if (pdet_range == 4) {
vmid[3] = 0x89;
tmp16 = 0x8b;
gain[3] = 0x2;
} else {
vmid[3] = 0x74;
tmp16 = 0x70;
gain[3] = 0;
}
}
b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0c), 4, gain);
vmid[3] = tmp16;
b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4, vmid);
b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1c), 4, gain);
break;
}
b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00);
b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00);
@ -2600,7 +2811,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
/* Dropped probably-always-true condition */
b43_phy_write(dev, B43_NPHY_ED_CRS40ASSERTTHRESH0, 0x03eb);
b43_phy_write(dev, B43_NPHY_ED_CRS40ASSERTTHRESH1, 0x03eb);
b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH1, 0x0341);
b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH0, 0x0341);
b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH1, 0x0341);
b43_phy_write(dev, B43_NPHY_ED_CRS20LASSERTTHRESH0, 0x042b);
b43_phy_write(dev, B43_NPHY_ED_CRS20LASSERTTHRESH1, 0x042b);
@ -3211,6 +3422,20 @@ static void b43_nphy_tx_prepare_adjusted_power_table(struct b43_wldev *dev)
u8 idx, delta;
u8 i, stf_mode;
/* Array adj_pwr_tbl corresponds to the hardware table. It consists of
* 21 groups, each containing 4 entries.
*
* First group has entries for CCK modulation.
* The rest of groups has 1 entry per modulation (SISO, CDD, STBC, SDM).
*
* Group 0 is for CCK
* Groups 1..4 use BPSK (group per coding rate)
* Groups 5..8 use QPSK (group per coding rate)
* Groups 9..12 use 16-QAM (group per coding rate)
* Groups 13..16 use 64-QAM (group per coding rate)
* Groups 17..20 are unknown
*/
for (i = 0; i < 4; i++)
nphy->adj_pwr_tbl[i] = nphy->tx_power_offset[i];
@ -3409,10 +3634,8 @@ static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev)
}
b43_nphy_tx_prepare_adjusted_power_table(dev);
/*
b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84, nphy->adj_pwr_tbl);
b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84, nphy->adj_pwr_tbl);
*/
if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, false);
@ -5124,7 +5347,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
b43_phy_write(dev, B43_NPHY_TXMACIF_HOLDOFF, 0x0015);
b43_phy_write(dev, B43_NPHY_TXMACDELAY, 0x0320);
if (phy->rev >= 3 && phy->rev <= 6)
b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0014);
b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0032);
b43_nphy_tx_lp_fbw(dev);
if (phy->rev >= 3)
b43_nphy_spur_workaround(dev);
@ -5441,8 +5664,11 @@ static u16 b43_nphy_op_radio_read(struct b43_wldev *dev, u16 reg)
{
/* Register 1 is a 32-bit register. */
B43_WARN_ON(reg == 1);
/* N-PHY needs 0x100 for read access */
reg |= 0x100;
if (dev->phy.rev >= 7)
reg |= 0x200; /* Radio 0x2057 */
else
reg |= 0x100;
b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);

View file

@ -48,7 +48,7 @@ struct b2056_inittabs_pts {
unsigned int rx_length;
};
static const struct b2056_inittab_entry b2056_inittab_rev3_syn[] = {
static const struct b2056_inittab_entry b2056_inittab_phy_rev3_syn[] = {
[B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@ -232,7 +232,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev3_syn[] = {
[B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
static const struct b2056_inittab_entry b2056_inittab_rev3_tx[] = {
static const struct b2056_inittab_entry b2056_inittab_phy_rev3_tx[] = {
[B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@ -380,7 +380,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev3_tx[] = {
[B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
static const struct b2056_inittab_entry b2056_inittab_rev3_rx[] = {
static const struct b2056_inittab_entry b2056_inittab_phy_rev3_rx[] = {
[B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@ -530,7 +530,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev3_rx[] = {
[B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
static const struct b2056_inittab_entry b2056_inittab_rev4_syn[] = {
static const struct b2056_inittab_entry b2056_inittab_phy_rev4_syn[] = {
[B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@ -714,7 +714,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev4_syn[] = {
[B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
static const struct b2056_inittab_entry b2056_inittab_rev4_tx[] = {
static const struct b2056_inittab_entry b2056_inittab_phy_rev4_tx[] = {
[B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@ -862,7 +862,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev4_tx[] = {
[B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
static const struct b2056_inittab_entry b2056_inittab_rev4_rx[] = {
static const struct b2056_inittab_entry b2056_inittab_phy_rev4_rx[] = {
[B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@ -1012,7 +1012,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev4_rx[] = {
[B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
static const struct b2056_inittab_entry b2056_inittab_rev5_syn[] = {
static const struct b2056_inittab_entry b2056_inittab_radio_rev5_syn[] = {
[B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@ -1196,7 +1196,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev5_syn[] = {
[B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
static const struct b2056_inittab_entry b2056_inittab_rev5_tx[] = {
static const struct b2056_inittab_entry b2056_inittab_radio_rev5_tx[] = {
[B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@ -1352,7 +1352,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev5_tx[] = {
[B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0075, .ghz2 = 0x0075, UPLOAD, },
};
static const struct b2056_inittab_entry b2056_inittab_rev5_rx[] = {
static const struct b2056_inittab_entry b2056_inittab_radio_rev5_rx[] = {
[B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@ -1502,7 +1502,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev5_rx[] = {
[B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = {
static const struct b2056_inittab_entry b2056_inittab_radio_rev6_syn[] = {
[B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@ -1686,7 +1686,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = {
[B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
static const struct b2056_inittab_entry b2056_inittab_rev6_tx[] = {
static const struct b2056_inittab_entry b2056_inittab_radio_rev6_tx[] = {
[B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@ -1842,7 +1842,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_tx[] = {
[B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
};
static const struct b2056_inittab_entry b2056_inittab_rev6_rx[] = {
static const struct b2056_inittab_entry b2056_inittab_radio_rev6_rx[] = {
[B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@ -1992,7 +1992,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_rx[] = {
[B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
static const struct b2056_inittab_entry b2056_inittab_rev7_syn[] = {
static const struct b2056_inittab_entry b2056_inittab_radio_rev7_9_syn[] = {
[B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@ -2176,7 +2176,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev7_syn[] = {
[B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
static const struct b2056_inittab_entry b2056_inittab_rev7_tx[] = {
static const struct b2056_inittab_entry b2056_inittab_radio_rev7_9_tx[] = {
[B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@ -2332,7 +2332,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev7_tx[] = {
[B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0075, .ghz2 = 0x0075, UPLOAD, },
};
static const struct b2056_inittab_entry b2056_inittab_rev7_rx[] = {
static const struct b2056_inittab_entry b2056_inittab_radio_rev7_9_rx[] = {
[B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@ -2482,7 +2482,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev7_rx[] = {
[B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
static const struct b2056_inittab_entry b2056_inittab_rev8_syn[] = {
static const struct b2056_inittab_entry b2056_inittab_radio_rev8_syn[] = {
[B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@ -2666,7 +2666,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev8_syn[] = {
[B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
static const struct b2056_inittab_entry b2056_inittab_rev8_tx[] = {
static const struct b2056_inittab_entry b2056_inittab_radio_rev8_tx[] = {
[B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@ -2822,7 +2822,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev8_tx[] = {
[B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
};
static const struct b2056_inittab_entry b2056_inittab_rev8_rx[] = {
static const struct b2056_inittab_entry b2056_inittab_radio_rev8_rx[] = {
[B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@ -2972,24 +2972,69 @@ static const struct b2056_inittab_entry b2056_inittab_rev8_rx[] = {
[B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
#define INITTABSPTS(prefix) \
.syn = prefix##_syn, \
.syn_length = ARRAY_SIZE(prefix##_syn), \
.tx = prefix##_tx, \
.tx_length = ARRAY_SIZE(prefix##_tx), \
.rx = prefix##_rx, \
.rx_length = ARRAY_SIZE(prefix##_rx)
static const struct b2056_inittabs_pts b2056_inittabs[] = {
[3] = { INITTABSPTS(b2056_inittab_rev3) },
[4] = { INITTABSPTS(b2056_inittab_rev4) },
[5] = { INITTABSPTS(b2056_inittab_rev5) },
[6] = { INITTABSPTS(b2056_inittab_rev6) },
[7] = { INITTABSPTS(b2056_inittab_rev7) },
[8] = { INITTABSPTS(b2056_inittab_rev8) },
[9] = { INITTABSPTS(b2056_inittab_rev7) },
static const struct b2056_inittab_entry b2056_inittab_radio_rev11_syn[] = {
[B2056_SYN_PLL_PFD] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
[B2056_SYN_PLL_CP2] = { .ghz5 = 0x003f, .ghz2 = 0x003f, UPLOAD, },
[B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
[B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
[B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x002b, .ghz2 = 0x002b, UPLOAD, },
[B2056_SYN_PLL_VCO2] = { .ghz5 = 0x00f7, .ghz2 = 0x00f7, UPLOAD, },
[B2056_SYN_PLL_VCOCAL12] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
[B2056_SYN_LOGENBUF2] = { .ghz5 = 0x008f, .ghz2 = 0x008f, UPLOAD, },
};
static const struct b2056_inittab_entry b2056_inittab_radio_rev11_tx[] = {
[B2056_TX_PA_SPARE2] = { .ghz5 = 0x00ee, .ghz2 = 0x00ee, UPLOAD, },
[B2056_TX_INTPAA_IAUX_STAT] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
[B2056_TX_INTPAA_IMAIN_STAT] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
[B2056_TX_INTPAA_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
[B2056_TX_INTPAG_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
[B2056_TX_PADA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
[B2056_TX_PADA_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
[B2056_TX_PADG_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
[B2056_TX_PGAA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
[B2056_TX_PGAA_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
[B2056_TX_PGAG_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
[B2056_TX_GMBB_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
[B2056_TX_TXSPARE1] = { .ghz5 = 0x0030, .ghz2 = 0x0030, UPLOAD, },
};
static const struct b2056_inittab_entry b2056_inittab_radio_rev11_rx[] = {
[B2056_RX_BIASPOLE_LNAA1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
[B2056_RX_LNAA2_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
[B2056_RX_BIASPOLE_LNAG1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
[B2056_RX_LNAG2_IDAC] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
[B2056_RX_MIXA_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
[B2056_RX_MIXA_LOB_BIAS] = { .ghz5 = 0x0088, .ghz2 = 0x0088, UPLOAD, },
[B2056_RX_MIXA_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
[B2056_RX_MIXG_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
[B2056_RX_TIA_IOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
[B2056_RX_TIA_QOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
[B2056_RX_TIA_IMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
[B2056_RX_TIA_QMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
[B2056_RX_RXLPF_OUTVCM] = { .ghz5 = 0x0004, .ghz2 = 0x0004, UPLOAD, },
[B2056_RX_VGA_BIAS_DCCANCEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
[B2056_RX_RXSPARE3] = { .ghz5 = 0x0005, .ghz2 = 0x0005, UPLOAD, },
};
#define INITTABSPTS(prefix) \
static const struct b2056_inittabs_pts prefix = { \
.syn = prefix##_syn, \
.syn_length = ARRAY_SIZE(prefix##_syn), \
.tx = prefix##_tx, \
.tx_length = ARRAY_SIZE(prefix##_tx), \
.rx = prefix##_rx, \
.rx_length = ARRAY_SIZE(prefix##_rx), \
}
INITTABSPTS(b2056_inittab_phy_rev3);
INITTABSPTS(b2056_inittab_phy_rev4);
INITTABSPTS(b2056_inittab_radio_rev5);
INITTABSPTS(b2056_inittab_radio_rev6);
INITTABSPTS(b2056_inittab_radio_rev7_9);
INITTABSPTS(b2056_inittab_radio_rev8);
INITTABSPTS(b2056_inittab_radio_rev11);
#define RADIOREGS3(r00, r01, r02, r03, r04, r05, r06, r07, r08, r09, \
r10, r11, r12, r13, r14, r15, r16, r17, r18, r19, \
r20, r21, r22, r23, r24, r25, r26, r27, r28, r29, \
@ -3041,7 +3086,7 @@ static const struct b2056_inittabs_pts b2056_inittabs[] = {
.phy_regs.phy_bw6 = r5
/* http://bcm-v4.sipsolutions.net/802.11/Radio/2056/ChannelTable */
static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev3[] = {
static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_phy_rev3[] = {
{ .freq = 4920,
RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@ -4036,7 +4081,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev3[] =
},
};
static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev4[] = {
static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_phy_rev4[] = {
{ .freq = 4920,
RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@ -5031,7 +5076,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev4[] =
},
};
static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev5[] = {
static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev5[] = {
{ .freq = 4920,
RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@ -6026,7 +6071,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev5[] =
},
};
static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev6[] = {
static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev6[] = {
{ .freq = 4920,
RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@ -7021,7 +7066,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev6[] =
},
};
static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev7_9[] = {
static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev7_9[] = {
{ .freq = 4920,
RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@ -8016,7 +8061,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev7_9[]
},
};
static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev8[] = {
static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev8[] = {
{ .freq = 4920,
RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@ -9011,6 +9056,236 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev8[] =
},
};
static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev11[] = {
{
.freq = 5180,
RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x02,
0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
0xff, 0xf9, 0x00, 0x06, 0x00, 0x77, 0x00, 0x0e,
0x00, 0x6f, 0x00, 0xf9, 0x00, 0x06, 0x00, 0x77,
0x00, 0x0e, 0x00, 0x6f, 0x00),
PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
},
{
.freq = 5200,
RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x02,
0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
0xff, 0xf9, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
0x00, 0x6f, 0x00, 0xf9, 0x00, 0x05, 0x00, 0x77,
0x00, 0x0d, 0x00, 0x6f, 0x00),
PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
},
{
.freq = 5220,
RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x02,
0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
0xfe, 0xd8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
0x00, 0x6f, 0x00, 0xd8, 0x00, 0x05, 0x00, 0x77,
0x00, 0x0d, 0x00, 0x6f, 0x00),
PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
},
{
.freq = 5745,
RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x05, 0x05, 0x02,
0x15, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
0x00, 0x06, 0x00, 0x6d, 0x00),
PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
},
{
.freq = 5765,
RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x05, 0x05, 0x02,
0x15, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
0x00, 0x6c, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
0x00, 0x05, 0x00, 0x6c, 0x00),
PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
},
{
.freq = 5785,
RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x05, 0x05, 0x02,
0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
0x00, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
0x00, 0x05, 0x00, 0x6b, 0x00),
PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
},
{
.freq = 5805,
RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x05, 0x05, 0x02,
0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
0x00, 0x05, 0x00, 0x6a, 0x00),
PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
},
{
.freq = 5825,
RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x05, 0x05, 0x02,
0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
0x00, 0x05, 0x00, 0x69, 0x00),
PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
},
{
.freq = 2412,
RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x06, 0x06, 0x04,
0x2b, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
0x70, 0x00, 0x0b, 0x00, 0x0a),
PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
},
{
.freq = 2417,
RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x06, 0x06, 0x04,
0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
0x70, 0x00, 0x0b, 0x00, 0x0a),
PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
},
{
.freq = 2422,
RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x06, 0x06, 0x04,
0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
0x00, 0x00, 0x67, 0x00, 0x03, 0x00, 0x70, 0x00,
0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
0x70, 0x00, 0x0b, 0x00, 0x0a),
PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
},
{
.freq = 2427,
RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x06, 0x06, 0x04,
0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
0x00, 0x00, 0x57, 0x00, 0x03, 0x00, 0x70, 0x00,
0x0a, 0x00, 0x0a, 0x00, 0x78, 0x00, 0x03, 0x00,
0x70, 0x00, 0x0a, 0x00, 0x0a),
PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
},
{
.freq = 2432,
RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x06, 0x06, 0x04,
0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
0x00, 0x00, 0x56, 0x00, 0x03, 0x00, 0x70, 0x00,
0x0a, 0x00, 0x0a, 0x00, 0x77, 0x00, 0x03, 0x00,
0x70, 0x00, 0x0a, 0x00, 0x0a),
PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
},
{
.freq = 2437,
RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x06, 0x06, 0x04,
0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
0x00, 0x00, 0x46, 0x00, 0x03, 0x00, 0x70, 0x00,
0x0a, 0x00, 0x0a, 0x00, 0x76, 0x00, 0x03, 0x00,
0x70, 0x00, 0x0a, 0x00, 0x0a),
PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
},
{
.freq = 2442,
RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x06, 0x06, 0x04,
0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
0x00, 0x00, 0x45, 0x00, 0x02, 0x00, 0x70, 0x00,
0x0a, 0x00, 0x0a, 0x00, 0x66, 0x00, 0x02, 0x00,
0x70, 0x00, 0x0a, 0x00, 0x0a),
PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
},
{
.freq = 2447,
RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x06, 0x06, 0x04,
0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
0x00, 0x00, 0x34, 0x00, 0x02, 0x00, 0x70, 0x00,
0x0a, 0x00, 0x09, 0x00, 0x55, 0x00, 0x02, 0x00,
0x70, 0x00, 0x0a, 0x00, 0x09),
PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
},
{
.freq = 2452,
RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x06, 0x06, 0x04,
0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
0x00, 0x00, 0x23, 0x00, 0x02, 0x00, 0x70, 0x00,
0x0a, 0x00, 0x09, 0x00, 0x45, 0x00, 0x02, 0x00,
0x70, 0x00, 0x0a, 0x00, 0x09),
PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
},
{
.freq = 2457,
RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x06, 0x06, 0x04,
0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
0x00, 0x00, 0x12, 0x00, 0x02, 0x00, 0x70, 0x00,
0x0a, 0x00, 0x09, 0x00, 0x34, 0x00, 0x02, 0x00,
0x70, 0x00, 0x0a, 0x00, 0x09),
PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
},
{
.freq = 2462,
RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x06, 0x06, 0x04,
0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
0x00, 0x00, 0x02, 0x00, 0x02, 0x00, 0x70, 0x00,
0x09, 0x00, 0x09, 0x00, 0x33, 0x00, 0x02, 0x00,
0x70, 0x00, 0x09, 0x00, 0x09),
PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
},
{
.freq = 2467,
RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x06, 0x06, 0x04,
0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
0x09, 0x00, 0x09, 0x00, 0x22, 0x00, 0x02, 0x00,
0x70, 0x00, 0x09, 0x00, 0x09),
PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
},
{
.freq = 2472,
RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x06, 0x06, 0x04,
0x2b, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
0x09, 0x00, 0x09, 0x00, 0x11, 0x00, 0x02, 0x00,
0x70, 0x00, 0x09, 0x00, 0x09),
PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
},
{
.freq = 2484,
RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x06, 0x06, 0x04,
0x2b, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x20, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x70, 0x00,
0x09, 0x00, 0x09, 0x00, 0x00, 0x00, 0x02, 0x00,
0x70, 0x00, 0x09, 0x00, 0x09),
PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
},
};
static const struct b2056_inittabs_pts
*b43_nphy_get_inittabs_rev3(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
switch (dev->phy.rev) {
case 3:
return &b2056_inittab_phy_rev3;
case 4:
return &b2056_inittab_phy_rev4;
default:
switch (phy->radio_rev) {
case 5:
return &b2056_inittab_radio_rev5;
case 6:
return &b2056_inittab_radio_rev6;
case 7:
case 9:
return &b2056_inittab_radio_rev7_9;
case 8:
return &b2056_inittab_radio_rev8;
case 11:
return &b2056_inittab_radio_rev11;
}
}
return NULL;
}
static void b2056_upload_inittab(struct b43_wldev *dev, bool ghz5,
bool ignore_uploadflag, u16 routing,
const struct b2056_inittab_entry *e,
@ -9037,11 +9312,11 @@ void b2056_upload_inittabs(struct b43_wldev *dev,
{
const struct b2056_inittabs_pts *pts;
if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) {
pts = b43_nphy_get_inittabs_rev3(dev);
if (!pts) {
B43_WARN_ON(1);
return;
}
pts = &b2056_inittabs[dev->phy.rev];
b2056_upload_inittab(dev, ghz5, ignore_uploadflag,
B2056_SYN, pts->syn, pts->syn_length);
@ -9060,11 +9335,12 @@ void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5)
const struct b2056_inittabs_pts *pts;
const struct b2056_inittab_entry *e;
if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) {
pts = b43_nphy_get_inittabs_rev3(dev);
if (!pts) {
B43_WARN_ON(1);
return;
}
pts = &b2056_inittabs[dev->phy.rev];
e = &pts->syn[B2056_SYN_PLL_CP2];
b43_radio_write(dev, B2056_SYN_PLL_CP2, ghz5 ? e->ghz5 : e->ghz2);
@ -9073,38 +9349,46 @@ void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5)
const struct b43_nphy_channeltab_entry_rev3 *
b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq)
{
struct b43_phy *phy = &dev->phy;
const struct b43_nphy_channeltab_entry_rev3 *e;
unsigned int length, i;
switch (dev->phy.rev) {
switch (phy->rev) {
case 3:
e = b43_nphy_channeltab_rev3;
length = ARRAY_SIZE(b43_nphy_channeltab_rev3);
e = b43_nphy_channeltab_phy_rev3;
length = ARRAY_SIZE(b43_nphy_channeltab_phy_rev3);
break;
case 4:
e = b43_nphy_channeltab_rev4;
length = ARRAY_SIZE(b43_nphy_channeltab_rev4);
break;
case 5:
e = b43_nphy_channeltab_rev5;
length = ARRAY_SIZE(b43_nphy_channeltab_rev5);
break;
case 6:
e = b43_nphy_channeltab_rev6;
length = ARRAY_SIZE(b43_nphy_channeltab_rev6);
break;
case 7:
case 9:
e = b43_nphy_channeltab_rev7_9;
length = ARRAY_SIZE(b43_nphy_channeltab_rev7_9);
break;
case 8:
e = b43_nphy_channeltab_rev8;
length = ARRAY_SIZE(b43_nphy_channeltab_rev8);
e = b43_nphy_channeltab_phy_rev4;
length = ARRAY_SIZE(b43_nphy_channeltab_phy_rev4);
break;
default:
B43_WARN_ON(1);
return NULL;
switch (phy->radio_rev) {
case 5:
e = b43_nphy_channeltab_radio_rev5;
length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev5);
break;
case 6:
e = b43_nphy_channeltab_radio_rev6;
length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev6);
break;
case 7:
case 9:
e = b43_nphy_channeltab_radio_rev7_9;
length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev7_9);
break;
case 8:
e = b43_nphy_channeltab_radio_rev8;
length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev8);
break;
case 11:
e = b43_nphy_channeltab_radio_rev11;
length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev11);
break;
default:
B43_WARN_ON(1);
return NULL;
}
}
for (i = 0; i < length; i++, e++) {

View file

@ -1627,74 +1627,7 @@ static const u32 b43_ntab_tdtrn_r3[] = {
0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be,
};
static const u32 b43_ntab_noisevar0_r3[] = {
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
};
static const u32 b43_ntab_noisevar1_r3[] = {
static const u32 b43_ntab_noisevar_r3[] = {
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
@ -3114,8 +3047,7 @@ static void b43_nphy_tables_init_rev3(struct b43_wldev *dev)
ntab_upload(dev, B43_NTAB_TMAP_R3, b43_ntab_tmap_r3);
ntab_upload(dev, B43_NTAB_INTLEVEL_R3, b43_ntab_intlevel_r3);
ntab_upload(dev, B43_NTAB_TDTRN_R3, b43_ntab_tdtrn_r3);
ntab_upload(dev, B43_NTAB_NOISEVAR0_R3, b43_ntab_noisevar0_r3);
ntab_upload(dev, B43_NTAB_NOISEVAR1_R3, b43_ntab_noisevar1_r3);
ntab_upload(dev, B43_NTAB_NOISEVAR_R3, b43_ntab_noisevar_r3);
ntab_upload(dev, B43_NTAB_MCS_R3, b43_ntab_mcs_r3);
ntab_upload(dev, B43_NTAB_TDI20A0_R3, b43_ntab_tdi20a0_r3);
ntab_upload(dev, B43_NTAB_TDI20A1_R3, b43_ntab_tdi20a1_r3);

View file

@ -143,8 +143,7 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 0) /* TM AP */
#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 0) /* INT LV */
#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 0) /* TD TRN */
#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 0) /* noise variance 0 */
#define B43_NTAB_NOISEVAR1_R3 B43_NTAB32(16, 128) /* noise variance 1 */
#define B43_NTAB_NOISEVAR_R3 B43_NTAB32(16, 0) /* noise variance */
#define B43_NTAB_MCS_R3 B43_NTAB16(18, 0) /* MCS */
#define B43_NTAB_TDI20A0_R3 B43_NTAB32(19, 128) /* TDI 20/0 */
#define B43_NTAB_TDI20A1_R3 B43_NTAB32(19, 256) /* TDI 20/1 */

View file

@ -441,7 +441,7 @@ static void b43_wa_altagc(struct b43_wldev *dev)
static void b43_wa_tr_ltov(struct b43_wldev *dev) /* TR Lookup Table Original Values */
{
b43_gtab_write(dev, B43_GTAB_ORIGTR, 0, 0xC480);
b43_gtab_write(dev, B43_GTAB_ORIGTR, 0, 0x7654);
}
static void b43_wa_cpll_nonpilot(struct b43_wldev *dev)

View file

@ -897,7 +897,8 @@ static bool brcms_tx_flush_completed(struct brcms_info *wl)
return result;
}
static void brcms_ops_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
static void brcms_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct brcms_info *wl = hw->priv;
int ret;

View file

@ -936,7 +936,8 @@ static int __cw1200_flush(struct cw1200_common *priv, bool drop)
return ret;
}
void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
void cw1200_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct cw1200_common *priv = hw->priv;

View file

@ -40,7 +40,8 @@ int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
void cw1200_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop);
u64 cw1200_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list);

View file

@ -573,7 +573,7 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
rx_status.flag |= RX_FLAG_SHORTPRE;
if ((unlikely(rx_stats->phy_count > 20))) {
D_DROP("dsp size out of range [0,20]: %d/n",
D_DROP("dsp size out of range [0,20]: %d\n",
rx_stats->phy_count);
return;
}

View file

@ -670,7 +670,7 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
}
if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
D_DROP("dsp size out of range [0,20]: %d/n",
D_DROP("dsp size out of range [0,20]: %d\n",
phy_res->cfg_phy_cnt);
return;
}

View file

@ -4755,7 +4755,8 @@ il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL(il_mac_change_interface);
void il_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct il_priv *il = hw->priv;
unsigned long timeout = jiffies + msecs_to_jiffies(500);

View file

@ -1723,7 +1723,8 @@ void il_mac_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum nl80211_iftype newtype, bool newp2p);
void il_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop);
int il_alloc_txq_mem(struct il_priv *il);
void il_free_txq_mem(struct il_priv *il);

View file

@ -180,7 +180,7 @@ void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)
goto done;
}
IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
iwl_trans_wait_tx_queue_empty(priv->trans);
iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
done:
ieee80211_wake_queues(priv->hw);
mutex_unlock(&priv->mutex);

View file

@ -1091,7 +1091,8 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
}
static void iwlagn_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@ -1119,7 +1120,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
}
}
IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
iwl_trans_wait_tx_queue_empty(priv->trans);
iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
done:
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");

View file

@ -2053,6 +2053,17 @@ static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
return false;
}
static void iwl_napi_add(struct iwl_op_mode *op_mode,
struct napi_struct *napi,
struct net_device *napi_dev,
int (*poll)(struct napi_struct *, int),
int weight)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
ieee80211_napi_add(priv->hw, napi, napi_dev, poll, weight);
}
static const struct iwl_op_mode_ops iwl_dvm_ops = {
.start = iwl_op_mode_dvm_start,
.stop = iwl_op_mode_dvm_stop,
@ -2065,6 +2076,7 @@ static const struct iwl_op_mode_ops iwl_dvm_ops = {
.cmd_queue_full = iwl_cmd_queue_full,
.nic_config = iwl_nic_config,
.wimax_active = iwl_wimax_active,
.napi_add = iwl_napi_add,
};
/*****************************************************************************

View file

@ -62,6 +62,7 @@ static const struct iwl_base_params iwl1000_base_params = {
.led_compensation = 51,
.wd_timeout = IWL_WATCHDOG_DISABLED,
.max_event_log_size = 128,
.scd_chain_ext_wa = true,
};
static const struct iwl_ht_params iwl1000_ht_params = {

View file

@ -75,6 +75,7 @@ static const struct iwl_base_params iwl2000_base_params = {
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
.scd_chain_ext_wa = true,
};
@ -88,6 +89,7 @@ static const struct iwl_base_params iwl2030_base_params = {
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
.scd_chain_ext_wa = true,
};
static const struct iwl_ht_params iwl2000_ht_params = {

View file

@ -61,6 +61,7 @@ static const struct iwl_base_params iwl5000_base_params = {
.led_compensation = 51,
.wd_timeout = IWL_WATCHDOG_DISABLED,
.max_event_log_size = 512,
.scd_chain_ext_wa = true,
};
static const struct iwl_ht_params iwl5000_ht_params = {

View file

@ -85,6 +85,7 @@ static const struct iwl_base_params iwl6000_base_params = {
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
.scd_chain_ext_wa = true,
};
static const struct iwl_base_params iwl6050_base_params = {
@ -97,6 +98,7 @@ static const struct iwl_base_params iwl6050_base_params = {
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 1024,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
.scd_chain_ext_wa = true,
};
static const struct iwl_base_params iwl6000_g2_base_params = {
@ -109,6 +111,7 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
.scd_chain_ext_wa = true,
};
static const struct iwl_ht_params iwl6000_ht_params = {

View file

@ -71,12 +71,12 @@
#define IWL3160_UCODE_API_MAX 9
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 8
#define IWL3160_UCODE_API_OK 8
#define IWL7260_UCODE_API_OK 9
#define IWL3160_UCODE_API_OK 9
/* Lowest firmware API version supported */
#define IWL7260_UCODE_API_MIN 7
#define IWL3160_UCODE_API_MIN 7
#define IWL7260_UCODE_API_MIN 8
#define IWL3160_UCODE_API_MIN 8
/* NVM versions */
#define IWL7260_NVM_VERSION 0x0a1d
@ -107,6 +107,7 @@ static const struct iwl_base_params iwl7000_base_params = {
.max_event_log_size = 512,
.shadow_reg_enable = true,
.pcie_l1_allowed = true,
.apmg_wake_up_wa = true,
};
static const struct iwl_ht_params iwl7000_ht_params = {

View file

@ -146,6 +146,9 @@ static inline u8 num_of_ant(u8 mask)
* @wd_timeout: TX queues watchdog timeout
* @max_event_log_size: size of event log buffer size for ucode event logging
* @shadow_reg_enable: HW shadow register support
* @apmg_wake_up_wa: should the MAC access REQ be asserted when a command
* is in flight. This is due to a HW bug in 7260, 3160 and 7265.
* @scd_chain_ext_wa: should the chain extension feature in SCD be disabled.
*/
struct iwl_base_params {
int eeprom_size;
@ -160,6 +163,8 @@ struct iwl_base_params {
u32 max_event_log_size;
const bool shadow_reg_enable;
const bool pcie_l1_allowed;
const bool apmg_wake_up_wa;
const bool scd_chain_ext_wa;
};
/*

View file

@ -77,26 +77,21 @@
* @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD
* @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
* offload profile config command.
* @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api
* @IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2: using the new time event API.
* @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
* (rather than two) IPv6 addresses
* @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API
* @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
* from the probe request template.
* @IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API: modified D3 API to allow keeping
* connection when going back to D0
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
* @IWL_UCODE_TLV_FLAGS_SCHED_SCAN: this uCode image supports scheduled scan.
* @IWL_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API
* @IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command
* containing CAM (Continuous Active Mode) indication.
* @IWL_UCODE_TLV_FLAGS_P2P_PM: P2P client supports PM as a stand alone MAC
* @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and
* P2P client interfaces simultaneously if they are in different bindings.
* @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
* P2P client interfaces simultaneously if they are in same bindings.
* @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
* @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
* @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
* @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
*/
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
@ -104,22 +99,15 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
IWL_UCODE_TLV_FLAGS_NEWBT_COEX = BIT(5),
IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT = BIT(6),
IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7),
IWL_UCODE_TLV_FLAGS_RX_ENERGY_API = BIT(8),
IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2 = BIT(9),
IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
IWL_UCODE_TLV_FLAGS_BF_UPDATED = BIT(11),
IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12),
IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API = BIT(14),
IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15),
IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16),
IWL_UCODE_TLV_FLAGS_SCHED_SCAN = BIT(17),
IWL_UCODE_TLV_FLAGS_STA_KEY_CMD = BIT(19),
IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD = BIT(20),
IWL_UCODE_TLV_FLAGS_P2P_PM = BIT(21),
IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22),
IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM = BIT(23),
IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
@ -183,6 +171,7 @@ enum iwl_ucode_sec {
#define IWL_UCODE_SECTION_MAX 12
#define IWL_API_ARRAY_SIZE 1
#define IWL_CAPABILITIES_ARRAY_SIZE 1
#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
struct iwl_ucode_capabilities {
u32 max_probe_length;

View file

@ -134,12 +134,13 @@ static const u8 iwl_nvm_channels_family_8000[] = {
149, 153, 157, 161, 165, 169, 173, 177, 181
};
#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
#define IWL_NUM_CHANNELS_FAMILY_8000 ARRAY_SIZE(iwl_nvm_channels_family_8000)
#define NUM_2GHZ_CHANNELS 14
#define FIRST_2GHZ_HT_MINUS 5
#define LAST_2GHZ_HT_PLUS 9
#define LAST_5GHZ_HT 161
#define NUM_2GHZ_CHANNELS 14
#define NUM_2GHZ_CHANNELS_FAMILY_8000 13
#define FIRST_2GHZ_HT_MINUS 5
#define LAST_2GHZ_HT_PLUS 9
#define LAST_5GHZ_HT 161
#define DEFAULT_MAX_TX_POWER 16
@ -202,21 +203,23 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
struct ieee80211_channel *channel;
u16 ch_flags;
bool is_5ghz;
int num_of_ch;
int num_of_ch, num_2ghz_channels;
const u8 *nvm_chan;
if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
num_of_ch = IWL_NUM_CHANNELS;
nvm_chan = &iwl_nvm_channels[0];
num_2ghz_channels = NUM_2GHZ_CHANNELS;
} else {
num_of_ch = IWL_NUM_CHANNELS_FAMILY_8000;
nvm_chan = &iwl_nvm_channels_family_8000[0];
num_2ghz_channels = NUM_2GHZ_CHANNELS_FAMILY_8000;
}
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
if (ch_idx >= NUM_2GHZ_CHANNELS &&
if (ch_idx >= num_2ghz_channels &&
!data->sku_cap_band_52GHz_enable)
ch_flags &= ~NVM_CHANNEL_VALID;
@ -225,7 +228,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
"Ch. %d Flags %x [%sGHz] - No traffic\n",
nvm_chan[ch_idx],
ch_flags,
(ch_idx >= NUM_2GHZ_CHANNELS) ?
(ch_idx >= num_2ghz_channels) ?
"5.2" : "2.4");
continue;
}
@ -234,7 +237,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
n_channels++;
channel->hw_value = nvm_chan[ch_idx];
channel->band = (ch_idx < NUM_2GHZ_CHANNELS) ?
channel->band = (ch_idx < num_2ghz_channels) ?
IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
channel->center_freq =
ieee80211_channel_to_frequency(
@ -242,7 +245,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
/* TODO: Need to be dependent to the NVM */
channel->flags = IEEE80211_CHAN_NO_HT40;
if (ch_idx < NUM_2GHZ_CHANNELS &&
if (ch_idx < num_2ghz_channels &&
(ch_flags & NVM_CHANNEL_40MHZ)) {
if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
@ -250,7 +253,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
} else if (nvm_chan[ch_idx] <= LAST_5GHZ_HT &&
(ch_flags & NVM_CHANNEL_40MHZ)) {
if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
if ((ch_idx - num_2ghz_channels) % 2 == 0)
channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
else
channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;

View file

@ -63,6 +63,7 @@
#ifndef __iwl_op_mode_h__
#define __iwl_op_mode_h__
#include <linux/netdevice.h>
#include <linux/debugfs.h>
struct iwl_op_mode;
@ -112,8 +113,11 @@ struct iwl_cfg;
* @stop: stop the op_mode. Must free all the memory allocated.
* May sleep
* @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
* HCMD this Rx responds to.
* This callback may sleep, it is called from a threaded IRQ handler.
* HCMD this Rx responds to. Can't sleep.
* @napi_add: NAPI initialisation. The transport is fully responsible for NAPI,
* but the higher layers need to know about it (in particular mac80211 to
* to able to call the right NAPI RX functions); this function is needed
* to eventually call netif_napi_add() with higher layer involvement.
* @queue_full: notifies that a HW queue is full.
* Must be atomic and called with BH disabled.
* @queue_not_full: notifies that a HW queue is not full any more.
@ -143,6 +147,11 @@ struct iwl_op_mode_ops {
void (*stop)(struct iwl_op_mode *op_mode);
int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void (*napi_add)(struct iwl_op_mode *op_mode,
struct napi_struct *napi,
struct net_device *napi_dev,
int (*poll)(struct napi_struct *, int),
int weight);
void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
@ -180,7 +189,6 @@ static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
might_sleep();
return op_mode->ops->rx(op_mode, rxb, cmd);
}
@ -249,4 +257,15 @@ static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode)
return op_mode->ops->exit_d0i3(op_mode);
}
static inline void iwl_op_mode_napi_add(struct iwl_op_mode *op_mode,
struct napi_struct *napi,
struct net_device *napi_dev,
int (*poll)(struct napi_struct *, int),
int weight)
{
if (!op_mode->ops->napi_add)
return;
op_mode->ops->napi_add(op_mode, napi, napi_dev, poll, weight);
}
#endif /* __iwl_op_mode_h__ */

View file

@ -348,4 +348,12 @@ enum secure_load_status_reg {
#define LMPM_SECURE_TIME_OUT (100)
/* Rx FIFO */
#define RXF_SIZE_ADDR (0xa00c88)
#define RXF_SIZE_BYTE_CND_POS (7)
#define RXF_SIZE_BYTE_CNT_MSK (0x3ff << RXF_SIZE_BYTE_CND_POS)
#define RXF_LD_FENCE_OFFSET_ADDR (0xa00c10)
#define RXF_FIFO_RD_FENCE_ADDR (0xa00c0c)
#endif /* __iwl_prph_h__ */

View file

@ -437,8 +437,7 @@ struct iwl_trans;
* this one. The op_mode must not configure the HCMD queue. May sleep.
* @txq_disable: de-configure a Tx queue to send AMPDUs
* Must be atomic
* @wait_tx_queue_empty: wait until all tx queues are empty
* May sleep
* @wait_tx_queue_empty: wait until tx queues are empty. May sleep.
* @dbgfs_register: add the dbgfs files under this directory. Files will be
* automatically deleted.
* @write8: write a u8 to a register at offset ofs from the BAR
@ -490,7 +489,7 @@ struct iwl_trans_ops {
void (*txq_disable)(struct iwl_trans *trans, int queue);
int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
int (*wait_tx_queue_empty)(struct iwl_trans *trans);
int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
@ -759,12 +758,13 @@ static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0);
}
static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
u32 txq_bm)
{
if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
return trans->ops->wait_tx_queue_empty(trans);
return trans->ops->wait_tx_queue_empty(trans, txq_bm);
}
static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,

View file

@ -104,11 +104,8 @@ static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
#define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD (-65)
#define BT_ANTENNA_COUPLING_THRESHOLD (30)
int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
{
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
return 0;
return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC,
sizeof(struct iwl_bt_coex_prio_tbl_cmd),
&iwl_bt_prio_tbl);
@ -573,8 +570,9 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
int ret;
u32 flags;
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
return 0;
ret = iwl_send_bt_prio_tbl(mvm);
if (ret)
return ret;
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
if (!bt_cmd)
@ -582,10 +580,12 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
cmd.data[0] = bt_cmd;
bt_cmd->max_kill = 5;
bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD,
bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling,
bt_cmd->bt4_tx_tx_delta_freq_thr = 15,
bt_cmd->bt4_tx_rx_max_freq0 = 15,
bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD;
bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling;
bt_cmd->bt4_tx_tx_delta_freq_thr = 15;
bt_cmd->bt4_tx_rx_max_freq0 = 15;
bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT;
bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT;
flags = iwlwifi_mod_params.bt_coex_active ?
BT_COEX_NW : BT_COEX_DISABLE;
@ -1215,6 +1215,17 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
}
bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
enum ieee80211_band band)
{
u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
if (band != IEEE80211_BAND_2GHZ)
return false;
return bt_activity >= BT_LOW_TRAFFIC;
}
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac)
{
@ -1249,9 +1260,6 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
{
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
return;
iwl_mvm_bt_coex_notif_handle(mvm);
}

View file

@ -744,10 +744,8 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
int err;
u32 size;
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
cmd.data[0] = &query_cmd;
cmd.len[0] = sizeof(query_cmd);
}
cmd.data[0] = &query_cmd;
cmd.len[0] = sizeof(query_cmd);
err = iwl_mvm_send_cmd(mvm, &cmd);
if (err)
@ -758,10 +756,8 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
err = -EINVAL;
} else {
err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
/* new API returns next, not last-used seqno */
if (mvm->fw->ucode_capa.flags &
IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
err = (u16) (err - 0x10);
/* firmware returns next, not last-used seqno */
err = (u16) (err - 0x10);
}
iwl_free_resp(&cmd);
@ -785,10 +781,6 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmvif->seqno_valid = false;
if (!(mvm->fw->ucode_capa.flags &
IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API))
return;
if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, CMD_SYNC,
sizeof(query_cmd), &query_cmd))
IWL_ERR(mvm, "failed to set non-QoS seqno\n");
@ -1082,6 +1074,15 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
if (iwl_mvm_is_d0i3_supported(mvm)) {
mutex_lock(&mvm->d0i3_suspend_mutex);
__set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
mutex_unlock(&mvm->d0i3_suspend_mutex);
return 0;
}
return __iwl_mvm_suspend(hw, wowlan, false);
}
@ -1277,7 +1278,7 @@ static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
}
static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
struct iwl_wowlan_status_v6 *status)
struct iwl_wowlan_status *status)
{
union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc;
@ -1294,7 +1295,7 @@ static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
}
struct iwl_mvm_d3_gtk_iter_data {
struct iwl_wowlan_status_v6 *status;
struct iwl_wowlan_status *status;
void *last_gtk;
u32 cipher;
bool find_phase, unhandled_cipher;
@ -1370,7 +1371,7 @@ static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw,
static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_wowlan_status_v6 *status)
struct iwl_wowlan_status *status)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_d3_gtk_iter_data gtkdata = {
@ -1468,7 +1469,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
.flags = CMD_SYNC | CMD_WANT_SKB,
};
struct iwl_wowlan_status_data status;
struct iwl_wowlan_status_v6 *status_v6;
struct iwl_wowlan_status *fw_status;
int ret, len, status_size, i;
bool keep;
struct ieee80211_sta *ap_sta;
@ -1505,10 +1506,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
if (!cmd.resp_pkt)
goto out_unlock;
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
status_size = sizeof(struct iwl_wowlan_status_v6);
else
status_size = sizeof(struct iwl_wowlan_status_v4);
status_size = sizeof(*fw_status);
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
if (len < status_size) {
@ -1516,35 +1514,18 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
goto out_free_resp;
}
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
status_v6 = (void *)cmd.resp_pkt->data;
fw_status = (void *)cmd.resp_pkt->data;
status.pattern_number = le16_to_cpu(status_v6->pattern_number);
for (i = 0; i < 8; i++)
status.qos_seq_ctr[i] =
le16_to_cpu(status_v6->qos_seq_ctr[i]);
status.wakeup_reasons = le32_to_cpu(status_v6->wakeup_reasons);
status.wake_packet_length =
le32_to_cpu(status_v6->wake_packet_length);
status.wake_packet_bufsize =
le32_to_cpu(status_v6->wake_packet_bufsize);
status.wake_packet = status_v6->wake_packet;
} else {
struct iwl_wowlan_status_v4 *status_v4;
status_v6 = NULL;
status_v4 = (void *)cmd.resp_pkt->data;
status.pattern_number = le16_to_cpu(status_v4->pattern_number);
for (i = 0; i < 8; i++)
status.qos_seq_ctr[i] =
le16_to_cpu(status_v4->qos_seq_ctr[i]);
status.wakeup_reasons = le32_to_cpu(status_v4->wakeup_reasons);
status.wake_packet_length =
le32_to_cpu(status_v4->wake_packet_length);
status.wake_packet_bufsize =
le32_to_cpu(status_v4->wake_packet_bufsize);
status.wake_packet = status_v4->wake_packet;
}
status.pattern_number = le16_to_cpu(fw_status->pattern_number);
for (i = 0; i < 8; i++)
status.qos_seq_ctr[i] =
le16_to_cpu(fw_status->qos_seq_ctr[i]);
status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons);
status.wake_packet_length =
le32_to_cpu(fw_status->wake_packet_length);
status.wake_packet_bufsize =
le32_to_cpu(fw_status->wake_packet_bufsize);
status.wake_packet = fw_status->wake_packet;
if (len != status_size + ALIGN(status.wake_packet_bufsize, 4)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
@ -1571,7 +1552,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
keep = iwl_mvm_setup_connection_keep(mvm, vif, status_v6);
keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status);
iwl_free_resp(&cmd);
return keep;
@ -1674,6 +1655,19 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
if (iwl_mvm_is_d0i3_supported(mvm)) {
bool exit_now;
mutex_lock(&mvm->d0i3_suspend_mutex);
__clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
&mvm->d0i3_suspend_flags);
mutex_unlock(&mvm->d0i3_suspend_mutex);
if (exit_now)
_iwl_mvm_exit_d0i3(mvm);
return 0;
}
return __iwl_mvm_resume(mvm, false);
}

View file

@ -103,10 +103,6 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
dbgfs_pm->tx_data_timeout = val;
break;
case MVM_DEBUGFS_PM_DISABLE_POWER_OFF:
IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val);
dbgfs_pm->disable_power_off = val;
break;
case MVM_DEBUGFS_PM_LPRX_ENA:
IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
dbgfs_pm->lprx_ena = val;
@ -154,12 +150,6 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
if (sscanf(buf + 16, "%d", &val) != 1)
return -EINVAL;
param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
} else if (!strncmp("disable_power_off=", buf, 18) &&
!(mvm->fw->ucode_capa.flags &
IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)) {
if (sscanf(buf + 18, "%d", &val) != 1)
return -EINVAL;
param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
} else if (!strncmp("lprx=", buf, 5)) {
if (sscanf(buf + 5, "%d", &val) != 1)
return -EINVAL;
@ -592,8 +582,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return;
}
if ((mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT) &&
iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
(vif->type == NL80211_IFTYPE_STATION && vif->p2p &&
mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)))

View file

@ -136,9 +136,6 @@ static int iwl_dbgfs_fw_error_dump_open(struct inode *inode, struct file *file)
file->private_data = mvm->fw_error_dump;
mvm->fw_error_dump = NULL;
kfree(mvm->fw_error_sram);
mvm->fw_error_sram = NULL;
mvm->fw_error_sram_len = 0;
ret = 0;
out:
@ -1004,6 +1001,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
PRINT_MVM_REF(IWL_MVM_REF_USER);
PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@ -1108,9 +1106,9 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
static const struct file_operations iwl_dbgfs_fw_error_dump_ops = {
.open = iwl_dbgfs_fw_error_dump_open,
.read = iwl_dbgfs_fw_error_dump_read,
.release = iwl_dbgfs_fw_error_dump_release,
.open = iwl_dbgfs_fw_error_dump_open,
.read = iwl_dbgfs_fw_error_dump_read,
.release = iwl_dbgfs_fw_error_dump_release,
};
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
@ -1138,9 +1136,8 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(fw_error_dump, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)
MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);

View file

@ -141,7 +141,8 @@ enum iwl_bt_coex_lut_type {
BT_COEX_TX_DIS_LUT,
BT_COEX_MAX_LUT,
};
BT_COEX_INVALID_LUT = 0xff,
}; /* BT_COEX_DECISION_LUT_INDEX_API_E_VER_1 */
#define BT_COEX_LUT_SIZE (12)
#define BT_COEX_CORUN_LUT_SIZE (32)
@ -154,19 +155,23 @@ enum iwl_bt_coex_lut_type {
* @flags:&enum iwl_bt_coex_flags
* @max_kill:
* @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power
* @bt4_antenna_isolation:
* @bt4_antenna_isolation_thr:
* @bt4_tx_tx_delta_freq_thr:
* @bt4_tx_rx_max_freq0:
* @bt_prio_boost:
* @override_primary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
* should be set by default
* @override_secondary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
* should be set by default
* @bt4_antenna_isolation: antenna isolation
* @bt4_antenna_isolation_thr: antenna threshold value
* @bt4_tx_tx_delta_freq_thr: TxTx delta frequency
* @bt4_tx_rx_max_freq0: TxRx max frequency
* @bt_prio_boost: BT priority boost registers
* @wifi_tx_prio_boost: SW boost of wifi tx priority
* @wifi_rx_prio_boost: SW boost of wifi rx priority
* @kill_ack_msk:
* @kill_cts_msk:
* @decision_lut:
* @bt4_multiprio_lut:
* @bt4_corun_lut20:
* @bt4_corun_lut40:
* @kill_ack_msk: kill ACK mask. 1 - Tx ACK, 0 - kill Tx of ACK.
* @kill_cts_msk: kill CTS mask. 1 - Tx CTS, 0 - kill Tx of CTS.
* @decision_lut: PTA decision LUT, per Prio-Ch
* @bt4_multiprio_lut: multi priority LUT configuration
* @bt4_corun_lut20: co-running 20 MHz LUT configuration
* @bt4_corun_lut40: co-running 40 MHz LUT configuration
* @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
*
* The structure is used for the BT_COEX command.
@ -175,7 +180,8 @@ struct iwl_bt_coex_cmd {
__le32 flags;
u8 max_kill;
u8 bt_reduced_tx_power;
u8 reserved[2];
u8 override_primary_lut;
u8 override_secondary_lut;
u8 bt4_antenna_isolation;
u8 bt4_antenna_isolation_thr;
@ -194,7 +200,7 @@ struct iwl_bt_coex_cmd {
__le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE];
__le32 valid_bit_msk;
} __packed; /* BT_COEX_CMD_API_S_VER_3 */
} __packed; /* BT_COEX_CMD_API_S_VER_5 */
/**
* struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command
@ -282,7 +288,7 @@ enum iwl_bt_activity_grading {
BT_ON_NO_CONNECTION = 1,
BT_LOW_TRAFFIC = 2,
BT_HIGH_TRAFFIC = 3,
};
}; /* BT_COEX_BT_ACTIVITY_GRADING_API_E_VER_1 */
/**
* struct iwl_bt_coex_profile_notif - notification about BT coex
@ -310,7 +316,7 @@ struct iwl_bt_coex_profile_notif {
__le32 primary_ch_lut;
__le32 secondary_ch_lut;
__le32 bt_activity_grading;
} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_2 */
} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_3 */
enum iwl_bt_coex_prio_table_event {
BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0,

View file

@ -345,21 +345,6 @@ enum iwl_wowlan_wakeup_reason {
IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12),
}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
struct iwl_wowlan_status_v4 {
__le64 replay_ctr;
__le16 pattern_number;
__le16 non_qos_seq_ctr;
__le16 qos_seq_ctr[8];
__le32 wakeup_reasons;
__le32 rekey_status;
__le32 num_of_gtk_rekeys;
__le32 transmitted_ndps;
__le32 received_beacons;
__le32 wake_packet_length;
__le32 wake_packet_bufsize;
u8 wake_packet[]; /* can be truncated from _length to _bufsize */
} __packed; /* WOWLAN_STATUSES_API_S_VER_4 */
struct iwl_wowlan_gtk_status {
u8 key_index;
u8 reserved[3];
@ -368,7 +353,7 @@ struct iwl_wowlan_gtk_status {
struct iwl_wowlan_rsc_tsc_params_cmd rsc;
} __packed;
struct iwl_wowlan_status_v6 {
struct iwl_wowlan_status {
struct iwl_wowlan_gtk_status gtk;
__le64 replay_ctr;
__le16 pattern_number;

View file

@ -334,7 +334,7 @@ enum {
*/
struct iwl_lq_cmd {
u8 sta_id;
u8 reserved1;
u8 reduced_tpc;
u16 control;
/* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */
u8 flags;

View file

@ -169,8 +169,12 @@ enum iwl_scan_type {
SCAN_TYPE_DISCOVERY_FORCED = 6,
}; /* SCAN_ACTIVITY_TYPE_E_VER_1 */
/* Maximal number of channels to scan */
#define MAX_NUM_SCAN_CHANNELS 0x24
/**
* Maximal number of channels to scan
* it should be equal to:
* max(IWL_NUM_CHANNELS, IWL_NUM_CHANNELS_FAMILY_8000)
*/
#define MAX_NUM_SCAN_CHANNELS 50
/**
* struct iwl_scan_cmd - scan request command
@ -534,13 +538,16 @@ struct iwl_scan_offload_schedule {
*
* IWL_SCAN_OFFLOAD_FLAG_PASS_ALL: pass all results - no filtering.
* IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan.
* IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN: use energy based scan before partial scan
* on A band.
* IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE: EBS duration is 100mSec - typical
* beacon period. Finding channel activity in this mode is not guaranteed.
* IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE: EBS duration is 200mSec.
* Assuming beacon period is 100ms finding channel activity is guaranteed.
*/
enum iwl_scan_offload_flags {
IWL_SCAN_OFFLOAD_FLAG_PASS_ALL = BIT(0),
IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL = BIT(2),
IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN = BIT(3),
IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE = BIT(5),
IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE = BIT(6),
};
/**
@ -563,17 +570,24 @@ enum iwl_scan_offload_compleate_status {
IWL_SCAN_OFFLOAD_ABORTED = 2,
};
enum iwl_scan_ebs_status {
IWL_SCAN_EBS_SUCCESS,
IWL_SCAN_EBS_FAILED,
IWL_SCAN_EBS_CHAN_NOT_FOUND,
};
/**
* iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
* @last_schedule_line: last schedule line executed (fast or regular)
* @last_schedule_iteration: last scan iteration executed before scan abort
* @status: enum iwl_scan_offload_compleate_status
* @ebs_status: last EBS status, see IWL_SCAN_EBS_*
*/
struct iwl_scan_offload_complete {
u8 last_schedule_line;
u8 last_schedule_iteration;
u8 status;
u8 reserved;
u8 ebs_status;
} __packed;
/**

View file

@ -255,22 +255,19 @@ struct iwl_mvm_keyinfo {
} __packed;
/**
* struct iwl_mvm_add_sta_cmd_v5 - Add/modify a station in the fw's sta table.
* struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table.
* ( REPLY_ADD_STA = 0x18 )
* @add_modify: 1: modify existing, 0: add new station
* @unicast_tx_key_id: unicast tx key id. Relevant only when unicast key sent
* @multicast_tx_key_id: multicast tx key id. Relevant only when multicast key
* sent
* @awake_acs:
* @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
* AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
* @mac_id_n_color: the Mac context this station belongs to
* @addr[ETH_ALEN]: station's MAC address
* @sta_id: index of station in uCode's station table
* @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
* alone. 1 - modify, 0 - don't change.
* @key: look at %iwl_mvm_keyinfo
* @station_flags: look at %iwl_sta_flags
* @station_flags_msk: what of %station_flags have changed
* @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
* AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
* @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
* Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
* add_immediate_ba_ssn.
@ -294,40 +291,7 @@ struct iwl_mvm_keyinfo {
* ADD_STA sets up the table entry for one station, either creating a new
* entry, or modifying a pre-existing one.
*/
struct iwl_mvm_add_sta_cmd_v5 {
u8 add_modify;
u8 unicast_tx_key_id;
u8 multicast_tx_key_id;
u8 reserved1;
__le32 mac_id_n_color;
u8 addr[ETH_ALEN];
__le16 reserved2;
u8 sta_id;
u8 modify_mask;
__le16 reserved3;
struct iwl_mvm_keyinfo key;
__le32 station_flags;
__le32 station_flags_msk;
__le16 tid_disable_tx;
__le16 reserved4;
u8 add_immediate_ba_tid;
u8 remove_immediate_ba_tid;
__le16 add_immediate_ba_ssn;
__le16 sleep_tx_count;
__le16 sleep_state_flags;
__le16 assoc_id;
__le16 beamform_flags;
__le32 tfd_queue_msk;
} __packed; /* ADD_STA_CMD_API_S_VER_5 */
/**
* struct iwl_mvm_add_sta_cmd_v7 - Add / modify a station
* VER_7 of this command is quite similar to VER_5 except
* exclusion of all fields related to the security key installation.
* It only differs from VER_6 by the "awake_acs" field that is
* reserved and ignored in VER_6.
*/
struct iwl_mvm_add_sta_cmd_v7 {
struct iwl_mvm_add_sta_cmd {
u8 add_modify;
u8 awake_acs;
__le16 tid_disable_tx;

Some files were not shown because too many files have changed in this diff Show more