wireless-drivers-next patches for v5.8

First set of patches for v5.8. Changes all over, ath10k apparently
 seeing most new features this time. rtw88 also had lots of changes due
 to preparation for new hardware support.
 
 In this pull request there's also a new macro to include/linux/iopoll:
 read_poll_timeout_atomic(). This is needed by rtw88 for atomic
 polling.
 
 Major changes:
 
 ath11k
 
 * add debugfs file for testing ADDBA and DELBA
 
 * add 802.11 encapsulation offload on hardware support
 
 * add htt_peer_stats_reset debugfs file
 
 ath10k
 
 * enable VHT160 and VHT80+80 modes
 
 * enable radar detection in secondary segment
 
 * sdio: disable TX complete indication to improve throughput
 
 * sdio: decrease power consumption
 
 * sdio: add HTT TX bundle support to increase throughput
 
 * sdio: add rx bitrate reporting
 
 ath9k
 
 * improvements to AR9002 calibration logic
 
 carl9170
 
 * remove buggy P2P_GO support
 
 p54usb
 
 * add support for AirVasT USB stick
 
 rtw88
 
 * add support for antenna configuration
 
 ti wlcore
 
 * add support for AES_CMAC cipher
 
 iwlwifi
 
 * support for a few new FW API versions
 
 * new hw configs
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJetAhAAAoJEG4XJFUm622bADEH/A1OjAD3H1iZyTmXHP4T7yZe
 TKJ+9I6B3BDR1czUTm+kUhrgBDNpdLLtu+b+5QXfpPLrtZ0FF/zjuazgueyqQpZ1
 zudj+rG72njHpU0RKtO7wIBrCtckLPV0be+3026hztatJmJ7XQ9FvsanFPPsrrNv
 0lh8E8kDUSynOW2me8FW1GBgDkGaBaicAs4FSjwNJC31Wo/VN5m9gEFkGpT1VJWP
 l0xeEQ/N2mknQVuTR4CuMT9VJ0SNlJrLZpBVAqkmc170c3pKChl3LTNCnP925ye9
 Nfqw2sQKgUPJKRbZR5wZTphGuu4krFv0ldWCvb0oFtZlCLIkiOz6+AA7b33oV2A=
 =7ewK
 -----END PGP SIGNATURE-----

Merge tag 'wireless-drivers-next-2020-05-07' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
wireless-drivers-next patches for v5.8

First set of patches for v5.8. Changes all over, ath10k apparently
seeing most new features this time. rtw88 also had lots of changes due
to preparation for new hardware support.

In this pull request there's also a new macro to include/linux/iopoll:
read_poll_timeout_atomic(). This is needed by rtw88 for atomic
polling.

Major changes:

ath11k

* add debugfs file for testing ADDBA and DELBA

* add 802.11 encapsulation offload on hardware support

* add htt_peer_stats_reset debugfs file

ath10k

* enable VHT160 and VHT80+80 modes

* enable radar detection in secondary segment

* sdio: disable TX complete indication to improve throughput

* sdio: decrease power consumption

* sdio: add HTT TX bundle support to increase throughput

* sdio: add rx bitrate reporting

ath9k

* improvements to AR9002 calibration logic

carl9170

* remove buggy P2P_GO support

p54usb

* add support for AirVasT USB stick

rtw88

* add support for antenna configuration

ti wlcore

* add support for AES_CMAC cipher

iwlwifi

* support for a few new FW API versions

* new hw configs
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2020-05-07 13:22:35 -07:00
commit 5d9e4722c7
178 changed files with 22005 additions and 1716 deletions

View File

@ -96,6 +96,17 @@ Optional properties:
- qcom,coexist-gpio-pin : gpio pin number information to support coex
which will be used by wifi firmware.
* Subnodes
The ath10k wifi node can contain one optional firmware subnode.
Firmware subnode is needed when the platform does not have TustZone.
The firmware subnode must have:
- iommus:
Usage: required
Value type: <prop-encoded-array>
Definition: A list of phandle and IOMMU specifier pairs.
Example (to supply PCI based wifi block details):
In this example, the node is defined as child node of the PCI controller.
@ -196,4 +207,7 @@ wifi@18000000 {
memory-region = <&wifi_msa_mem>;
iommus = <&apps_smmu 0x0040 0x1>;
qcom,msa-fixed-perm;
wifi-firmware {
iommus = <&apps_iommu 0xc22 0x1>;
};
};

View File

@ -380,6 +380,7 @@ static int ath10k_bmi_lz_data_large(struct ath10k *ar, const void *buffer, u32 l
NULL, NULL);
if (ret) {
ath10k_warn(ar, "unable to write to the device\n");
kfree(cmd);
return ret;
}

View File

@ -419,7 +419,7 @@ struct ce_pipe_config {
#define PIPEDIR_INOUT 3 /* bidirectional */
/* Establish a mapping between a service/direction and a pipe. */
struct service_to_pipe {
struct ce_service_to_pipe {
__le32 service_id;
__le32 pipedir;
__le32 pipenum;

View File

@ -190,6 +190,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.uart_pin_workaround = true,
.tx_stats_over_pktlog = false,
.bmi_large_size_download = true,
.supports_peer_stats_info = true,
},
{
.id = QCA6174_HW_2_1_VERSION,
@ -723,15 +724,12 @@ static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode)
if (ret)
return ret;
/* Data transfer is not initiated, when reduced Tx completion
* is used for SDIO. disable it until fixed
*/
param &= ~HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET;
param |= HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET;
/* Alternate credit size of 1544 as used by SDIO firmware is
* not big enough for mac80211 / native wifi frames. disable it
*/
param &= ~HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE;
if (mode == ATH10K_FIRMWARE_MODE_NORMAL)
param |= HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE;
else
param &= ~HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE;
if (mode == ATH10K_FIRMWARE_MODE_UTF)
param &= ~HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET;
@ -2717,7 +2715,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err_hif_stop;
}
status = ath10k_hif_swap_mailbox(ar);
status = ath10k_hif_start_post(ar);
if (status) {
ath10k_err(ar, "failed to swap mailbox: %d\n", status);
goto err_hif_stop;
@ -3280,6 +3278,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
init_completion(&ar->thermal.wmi_sync);
init_completion(&ar->bss_survey_done);
init_completion(&ar->peer_delete_done);
init_completion(&ar->peer_stats_info_complete);
INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work);
@ -3291,6 +3290,11 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
if (!ar->workqueue_aux)
goto err_free_wq;
ar->workqueue_tx_complete =
create_singlethread_workqueue("ath10k_tx_complete_wq");
if (!ar->workqueue_tx_complete)
goto err_free_aux_wq;
mutex_init(&ar->conf_mutex);
mutex_init(&ar->dump_mutex);
spin_lock_init(&ar->data_lock);
@ -3318,7 +3322,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
ret = ath10k_coredump_create(ar);
if (ret)
goto err_free_aux_wq;
goto err_free_tx_complete;
ret = ath10k_debug_create(ar);
if (ret)
@ -3328,12 +3332,12 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
err_free_coredump:
ath10k_coredump_destroy(ar);
err_free_tx_complete:
destroy_workqueue(ar->workqueue_tx_complete);
err_free_aux_wq:
destroy_workqueue(ar->workqueue_aux);
err_free_wq:
destroy_workqueue(ar->workqueue);
err_free_mac:
ath10k_mac_destroy(ar);
@ -3349,6 +3353,9 @@ void ath10k_core_destroy(struct ath10k *ar)
flush_workqueue(ar->workqueue_aux);
destroy_workqueue(ar->workqueue_aux);
flush_workqueue(ar->workqueue_tx_complete);
destroy_workqueue(ar->workqueue_tx_complete);
ath10k_debug_destroy(ar);
ath10k_coredump_destroy(ar);
ath10k_htt_tx_destroy(&ar->htt);

View File

@ -149,6 +149,26 @@ static inline u32 host_interest_item_address(u32 item_offset)
return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
}
enum ath10k_phy_mode {
ATH10K_PHY_MODE_LEGACY = 0,
ATH10K_PHY_MODE_HT = 1,
ATH10K_PHY_MODE_VHT = 2,
};
/* Data rate 100KBPS based on IE Index */
struct ath10k_index_ht_data_rate_type {
u8 beacon_rate_index;
u16 supported_rate[4];
};
/* Data rate 100KBPS based on IE Index */
struct ath10k_index_vht_data_rate_type {
u8 beacon_rate_index;
u16 supported_VHT80_rate[2];
u16 supported_VHT40_rate[2];
u16 supported_VHT20_rate[2];
};
struct ath10k_bmi {
bool done_sent;
};
@ -500,8 +520,14 @@ struct ath10k_sta {
u16 peer_id;
struct rate_info txrate;
struct ieee80211_tx_info tx_info;
u32 tx_retries;
u32 tx_failed;
u32 last_tx_bitrate;
u32 rx_rate_code;
u32 rx_bitrate_kbps;
u32 tx_rate_code;
u32 tx_bitrate_kbps;
struct work_struct update_wk;
u64 rx_duration;
struct ath10k_htt_tx_stats *tx_stats;
@ -949,6 +975,11 @@ struct ath10k {
struct ieee80211_hw *hw;
struct ieee80211_ops *ops;
struct device *dev;
struct msa_region {
dma_addr_t paddr;
u32 mem_size;
void *vaddr;
} msa;
u8 mac_addr[ETH_ALEN];
enum ath10k_hw_rev hw_rev;
@ -1087,11 +1118,12 @@ struct ath10k {
int last_wmi_vdev_start_status;
struct completion vdev_setup_done;
struct completion vdev_delete_done;
struct completion peer_stats_info_complete;
struct workqueue_struct *workqueue;
/* Auxiliary workqueue */
struct workqueue_struct *workqueue_aux;
struct workqueue_struct *workqueue_tx_complete;
/* prevents concurrent FW reconfiguration */
struct mutex conf_mutex;
@ -1132,6 +1164,8 @@ struct ath10k {
struct work_struct register_work;
struct work_struct restart_work;
struct work_struct bundle_tx_work;
struct work_struct tx_complete_work;
/* cycle count is reported twice for each visited channel during scan.
* access protected by data_lock

View File

@ -349,7 +349,7 @@ free:
spin_unlock_bh(&ar->data_lock);
}
static int ath10k_debug_fw_stats_request(struct ath10k *ar)
int ath10k_debug_fw_stats_request(struct ath10k *ar)
{
unsigned long timeout, time_left;
int ret;
@ -778,7 +778,7 @@ static ssize_t ath10k_mem_value_read(struct file *file,
ret = ath10k_hif_diag_read(ar, *ppos, buf, count);
if (ret) {
ath10k_warn(ar, "failed to read address 0x%08x via diagnose window fnrom debugfs: %d\n",
ath10k_warn(ar, "failed to read address 0x%08x via diagnose window from debugfs: %d\n",
(u32)(*ppos), ret);
goto exit;
}

View File

@ -125,6 +125,9 @@ static inline int ath10k_debug_is_extd_tx_stats_enabled(struct ath10k *ar)
{
return ar->debug.enable_extd_tx_stats;
}
int ath10k_debug_fw_stats_request(struct ath10k *ar);
#else
static inline int ath10k_debug_start(struct ath10k *ar)
@ -192,6 +195,11 @@ static inline int ath10k_debug_is_extd_tx_stats_enabled(struct ath10k *ar)
return 0;
}
static inline int ath10k_debug_fw_stats_request(struct ath10k *ar)
{
return 0;
}
#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
#define ath10k_debug_get_et_strings NULL

View File

@ -54,7 +54,9 @@ struct ath10k_hif_ops {
*/
void (*stop)(struct ath10k *ar);
int (*swap_mailbox)(struct ath10k *ar);
int (*start_post)(struct ath10k *ar);
int (*get_htt_tx_complete)(struct ath10k *ar);
int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe);
@ -137,10 +139,17 @@ static inline void ath10k_hif_stop(struct ath10k *ar)
return ar->hif.ops->stop(ar);
}
static inline int ath10k_hif_swap_mailbox(struct ath10k *ar)
static inline int ath10k_hif_start_post(struct ath10k *ar)
{
if (ar->hif.ops->swap_mailbox)
return ar->hif.ops->swap_mailbox(ar);
if (ar->hif.ops->start_post)
return ar->hif.ops->start_post(ar);
return 0;
}
static inline int ath10k_hif_get_htt_tx_complete(struct ath10k *ar)
{
if (ar->hif.ops->get_htt_tx_complete)
return ar->hif.ops->get_htt_tx_complete(ar);
return 0;
}
@ -161,7 +170,8 @@ static inline void ath10k_hif_get_default_pipe(struct ath10k *ar,
static inline void ath10k_hif_send_complete_check(struct ath10k *ar,
u8 pipe_id, int force)
{
ar->hif.ops->send_complete_check(ar, pipe_id, force);
if (ar->hif.ops->send_complete_check)
ar->hif.ops->send_complete_check(ar, pipe_id, force);
}
static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,

View File

@ -51,10 +51,12 @@ void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
struct sk_buff *skb)
{
struct ath10k *ar = ep->htc->ar;
struct ath10k_htc_hdr *hdr;
ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
ep->eid, skb);
hdr = (struct ath10k_htc_hdr *)skb->data;
ath10k_htc_restore_tx_skb(ep->htc, skb);
if (!ep->ep_ops.ep_tx_complete) {
@ -63,6 +65,11 @@ void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
return;
}
if (hdr->flags & ATH10K_HTC_FLAG_SEND_BUNDLE) {
dev_kfree_skb_any(skb);
return;
}
ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
}
EXPORT_SYMBOL(ath10k_htc_notify_tx_completion);
@ -78,7 +85,7 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
hdr->eid = ep->eid;
hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
hdr->flags = 0;
if (ep->tx_credit_flow_enabled)
if (ep->tx_credit_flow_enabled && !ep->bundle_tx)
hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
spin_lock_bh(&ep->htc->tx_lock);
@ -86,6 +93,63 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
spin_unlock_bh(&ep->htc->tx_lock);
}
static int ath10k_htc_consume_credit(struct ath10k_htc_ep *ep,
unsigned int len,
bool consume)
{
struct ath10k_htc *htc = ep->htc;
struct ath10k *ar = htc->ar;
enum ath10k_htc_ep_id eid = ep->eid;
int credits, ret = 0;
if (!ep->tx_credit_flow_enabled)
return 0;
credits = DIV_ROUND_UP(len, ep->tx_credit_size);
spin_lock_bh(&htc->tx_lock);
if (ep->tx_credits < credits) {
ath10k_dbg(ar, ATH10K_DBG_HTC,
"htc insufficient credits ep %d required %d available %d consume %d\n",
eid, credits, ep->tx_credits, consume);
ret = -EAGAIN;
goto unlock;
}
if (consume) {
ep->tx_credits -= credits;
ath10k_dbg(ar, ATH10K_DBG_HTC,
"htc ep %d consumed %d credits total %d\n",
eid, credits, ep->tx_credits);
}
unlock:
spin_unlock_bh(&htc->tx_lock);
return ret;
}
static void ath10k_htc_release_credit(struct ath10k_htc_ep *ep, unsigned int len)
{
struct ath10k_htc *htc = ep->htc;
struct ath10k *ar = htc->ar;
enum ath10k_htc_ep_id eid = ep->eid;
int credits;
if (!ep->tx_credit_flow_enabled)
return;
credits = DIV_ROUND_UP(len, ep->tx_credit_size);
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits;
ath10k_dbg(ar, ATH10K_DBG_HTC,
"htc ep %d reverted %d credits back total %d\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
if (ep->ep_ops.ep_tx_credits)
ep->ep_ops.ep_tx_credits(htc->ar);
}
int ath10k_htc_send(struct ath10k_htc *htc,
enum ath10k_htc_ep_id eid,
struct sk_buff *skb)
@ -95,8 +159,8 @@ int ath10k_htc_send(struct ath10k_htc *htc,
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
struct ath10k_hif_sg_item sg_item;
struct device *dev = htc->ar->dev;
int credits = 0;
int ret;
unsigned int skb_len;
if (htc->ar->state == ATH10K_STATE_WEDGED)
return -ECOMM;
@ -108,23 +172,10 @@ int ath10k_htc_send(struct ath10k_htc *htc,
skb_push(skb, sizeof(struct ath10k_htc_hdr));
if (ep->tx_credit_flow_enabled) {
credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
spin_lock_bh(&htc->tx_lock);
if (ep->tx_credits < credits) {
ath10k_dbg(ar, ATH10K_DBG_HTC,
"htc insufficient credits ep %d required %d available %d\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
ret = -EAGAIN;
goto err_pull;
}
ep->tx_credits -= credits;
ath10k_dbg(ar, ATH10K_DBG_HTC,
"htc ep %d consumed %d credits (total %d)\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
}
skb_len = skb->len;
ret = ath10k_htc_consume_credit(ep, skb_len, true);
if (ret)
goto err_pull;
ath10k_htc_prepare_tx_skb(ep, skb);
@ -155,17 +206,7 @@ err_unmap:
if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
err_credits:
if (ep->tx_credit_flow_enabled) {
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits;
ath10k_dbg(ar, ATH10K_DBG_HTC,
"htc ep %d reverted %d credits back (total %d)\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
if (ep->ep_ops.ep_tx_credits)
ep->ep_ops.ep_tx_credits(htc->ar);
}
ath10k_htc_release_credit(ep, skb_len);
err_pull:
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
return ret;
@ -581,6 +622,278 @@ static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
return allocation;
}
static int ath10k_htc_send_bundle(struct ath10k_htc_ep *ep,
struct sk_buff *bundle_skb,
struct sk_buff_head *tx_save_head)
{
struct ath10k_hif_sg_item sg_item;
struct ath10k_htc *htc = ep->htc;
struct ath10k *ar = htc->ar;
struct sk_buff *skb;
int ret, cn = 0;
unsigned int skb_len;
ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle skb len %d\n", bundle_skb->len);
skb_len = bundle_skb->len;
ret = ath10k_htc_consume_credit(ep, skb_len, true);
if (!ret) {
sg_item.transfer_id = ep->eid;
sg_item.transfer_context = bundle_skb;
sg_item.vaddr = bundle_skb->data;
sg_item.len = bundle_skb->len;
ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
if (ret)
ath10k_htc_release_credit(ep, skb_len);
}
if (ret)
dev_kfree_skb_any(bundle_skb);
for (cn = 0; (skb = skb_dequeue_tail(tx_save_head)); cn++) {
if (ret) {
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
skb_queue_head(&ep->tx_req_head, skb);
} else {
skb_queue_tail(&ep->tx_complete_head, skb);
}
}
if (!ret)
queue_work(ar->workqueue_tx_complete, &ar->tx_complete_work);
ath10k_dbg(ar, ATH10K_DBG_HTC,
"bundle tx status %d eid %d req count %d count %d len %d\n",
ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, bundle_skb->len);
return ret;
}
static void ath10k_htc_send_one_skb(struct ath10k_htc_ep *ep, struct sk_buff *skb)
{
struct ath10k_htc *htc = ep->htc;
struct ath10k *ar = htc->ar;
int ret;
ret = ath10k_htc_send(htc, ep->eid, skb);
if (ret)
skb_queue_head(&ep->tx_req_head, skb);
ath10k_dbg(ar, ATH10K_DBG_HTC, "tx one status %d eid %d len %d pending count %d\n",
ret, ep->eid, skb->len, skb_queue_len(&ep->tx_req_head));
}
static int ath10k_htc_send_bundle_skbs(struct ath10k_htc_ep *ep)
{
struct ath10k_htc *htc = ep->htc;
struct sk_buff *bundle_skb, *skb;
struct sk_buff_head tx_save_head;
struct ath10k_htc_hdr *hdr;
u8 *bundle_buf;
int ret = 0, credit_pad, credit_remainder, trans_len, bundles_left = 0;
if (htc->ar->state == ATH10K_STATE_WEDGED)
return -ECOMM;
if (ep->tx_credit_flow_enabled &&
ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE)
return 0;
bundles_left = ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
bundle_skb = dev_alloc_skb(bundles_left);
if (!bundle_skb)
return -ENOMEM;
bundle_buf = bundle_skb->data;
skb_queue_head_init(&tx_save_head);
while (true) {
skb = skb_dequeue(&ep->tx_req_head);
if (!skb)
break;
credit_pad = 0;
trans_len = skb->len + sizeof(*hdr);
credit_remainder = trans_len % ep->tx_credit_size;
if (credit_remainder != 0) {
credit_pad = ep->tx_credit_size - credit_remainder;
trans_len += credit_pad;
}
ret = ath10k_htc_consume_credit(ep,
bundle_buf + trans_len - bundle_skb->data,
false);
if (ret) {
skb_queue_head(&ep->tx_req_head, skb);
break;
}
if (bundles_left < trans_len) {
bundle_skb->len = bundle_buf - bundle_skb->data;
ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
if (ret) {
skb_queue_head(&ep->tx_req_head, skb);
return ret;
}
if (skb_queue_len(&ep->tx_req_head) == 0) {
ath10k_htc_send_one_skb(ep, skb);
return ret;
}
if (ep->tx_credit_flow_enabled &&
ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE) {
skb_queue_head(&ep->tx_req_head, skb);
return 0;
}
bundles_left =
ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
bundle_skb = dev_alloc_skb(bundles_left);
if (!bundle_skb) {
skb_queue_head(&ep->tx_req_head, skb);
return -ENOMEM;
}
bundle_buf = bundle_skb->data;
skb_queue_head_init(&tx_save_head);
}
skb_push(skb, sizeof(struct ath10k_htc_hdr));
ath10k_htc_prepare_tx_skb(ep, skb);
memcpy(bundle_buf, skb->data, skb->len);
hdr = (struct ath10k_htc_hdr *)bundle_buf;
hdr->flags |= ATH10K_HTC_FLAG_SEND_BUNDLE;
hdr->pad_len = __cpu_to_le16(credit_pad);
bundle_buf += trans_len;
bundles_left -= trans_len;
skb_queue_tail(&tx_save_head, skb);
}
if (bundle_buf != bundle_skb->data) {
bundle_skb->len = bundle_buf - bundle_skb->data;
ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
} else {
dev_kfree_skb_any(bundle_skb);
}
return ret;
}
static void ath10k_htc_bundle_tx_work(struct work_struct *work)
{
struct ath10k *ar = container_of(work, struct ath10k, bundle_tx_work);
struct ath10k_htc_ep *ep;
struct sk_buff *skb;
int i;
for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
ep = &ar->htc.endpoint[i];
if (!ep->bundle_tx)
continue;
ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx work eid %d count %d\n",
ep->eid, skb_queue_len(&ep->tx_req_head));
if (skb_queue_len(&ep->tx_req_head) >=
ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE) {
ath10k_htc_send_bundle_skbs(ep);
} else {
skb = skb_dequeue(&ep->tx_req_head);
if (!skb)
continue;
ath10k_htc_send_one_skb(ep, skb);
}
}
}
static void ath10k_htc_tx_complete_work(struct work_struct *work)
{
struct ath10k *ar = container_of(work, struct ath10k, tx_complete_work);
struct ath10k_htc_ep *ep;
enum ath10k_htc_ep_id eid;
struct sk_buff *skb;
int i;
for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
ep = &ar->htc.endpoint[i];
eid = ep->eid;
if (ep->bundle_tx && eid == ar->htt.eid) {
ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx complete eid %d pending complete count%d\n",
ep->eid, skb_queue_len(&ep->tx_complete_head));
while (true) {
skb = skb_dequeue(&ep->tx_complete_head);
if (!skb)
break;
ath10k_htc_notify_tx_completion(ep, skb);
}
}
}
}
int ath10k_htc_send_hl(struct ath10k_htc *htc,
enum ath10k_htc_ep_id eid,
struct sk_buff *skb)
{
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
struct ath10k *ar = htc->ar;
if (sizeof(struct ath10k_htc_hdr) + skb->len > ep->tx_credit_size) {
ath10k_dbg(ar, ATH10K_DBG_HTC, "tx exceed max len %d\n", skb->len);
return -ENOMEM;
}
ath10k_dbg(ar, ATH10K_DBG_HTC, "htc send hl eid %d bundle %d tx count %d len %d\n",
eid, ep->bundle_tx, skb_queue_len(&ep->tx_req_head), skb->len);
if (ep->bundle_tx) {
skb_queue_tail(&ep->tx_req_head, skb);
queue_work(ar->workqueue, &ar->bundle_tx_work);
return 0;
} else {
return ath10k_htc_send(htc, eid, skb);
}
}
void ath10k_htc_setup_tx_req(struct ath10k_htc_ep *ep)
{
if (ep->htc->max_msgs_per_htc_bundle >= ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE &&
!ep->bundle_tx) {
ep->bundle_tx = true;
skb_queue_head_init(&ep->tx_req_head);
skb_queue_head_init(&ep->tx_complete_head);
}
}
void ath10k_htc_stop_hl(struct ath10k *ar)
{
struct ath10k_htc_ep *ep;
int i;
cancel_work_sync(&ar->bundle_tx_work);
cancel_work_sync(&ar->tx_complete_work);
for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
ep = &ar->htc.endpoint[i];
if (!ep->bundle_tx)
continue;
ath10k_dbg(ar, ATH10K_DBG_HTC, "stop tx work eid %d count %d\n",
ep->eid, skb_queue_len(&ep->tx_req_head));
skb_queue_purge(&ep->tx_req_head);
}
}
int ath10k_htc_wait_target(struct ath10k_htc *htc)
{
struct ath10k *ar = htc->ar;
@ -649,17 +962,34 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
*/
if (htc->control_resp_len >=
sizeof(msg->hdr) + sizeof(msg->ready_ext)) {
htc->alt_data_credit_size =
__le16_to_cpu(msg->ready_ext.reserved) &
ATH10K_HTC_MSG_READY_EXT_ALT_DATA_MASK;
htc->max_msgs_per_htc_bundle =
min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle,
HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
ath10k_dbg(ar, ATH10K_DBG_HTC,
"Extended ready message. RX bundle size: %d\n",
htc->max_msgs_per_htc_bundle);
"Extended ready message RX bundle size %d alt size %d\n",
htc->max_msgs_per_htc_bundle,
htc->alt_data_credit_size);
}
INIT_WORK(&ar->bundle_tx_work, ath10k_htc_bundle_tx_work);
INIT_WORK(&ar->tx_complete_work, ath10k_htc_tx_complete_work);
return 0;
}
void ath10k_htc_change_tx_credit_flow(struct ath10k_htc *htc,
enum ath10k_htc_ep_id eid,
bool enable)
{
struct ath10k *ar = htc->ar;
struct ath10k_htc_ep *ep = &ar->htc.endpoint[eid];
ep->tx_credit_flow_enabled = enable;
}
int ath10k_htc_connect_service(struct ath10k_htc *htc,
struct ath10k_htc_svc_conn_req *conn_req,
struct ath10k_htc_svc_conn_resp *conn_resp)
@ -791,6 +1121,11 @@ setup:
ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
ep->tx_credits = tx_alloc;
ep->tx_credit_size = htc->target_credit_size;
if (conn_req->service_id == ATH10K_HTC_SVC_ID_HTT_DATA_MSG &&
htc->alt_data_credit_size != 0)
ep->tx_credit_size = htc->alt_data_credit_size;
/* copy all the callbacks */
ep->ep_ops = conn_req->ep_ops;

View File

@ -83,8 +83,14 @@ struct ath10k_htc_hdr {
u8 seq_no; /* for tx */
u8 control_byte1;
} __packed;
u8 pad0;
u8 pad1;
union {
__le16 pad_len;
struct {
u8 pad0;
u8 pad1;
} __packed;
} __packed;
} __packed __aligned(4);
enum ath10k_ath10k_htc_msg_id {
@ -113,6 +119,8 @@ enum ath10k_htc_conn_flags {
#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_LSB 8
};
#define ATH10K_HTC_MSG_READY_EXT_ALT_DATA_MASK 0xFFF
enum ath10k_htc_conn_svc_status {
ATH10K_HTC_CONN_SVC_STATUS_SUCCESS = 0,
ATH10K_HTC_CONN_SVC_STATUS_NOT_FOUND = 1,
@ -121,6 +129,10 @@ enum ath10k_htc_conn_svc_status {
ATH10K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4
};
#define ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE 32
#define ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE 2
#define ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE 2
enum ath10k_htc_setup_complete_flags {
ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN = 1
};
@ -145,8 +157,14 @@ struct ath10k_htc_ready_extended {
struct ath10k_htc_ready base;
u8 htc_version; /* @enum ath10k_htc_version */
u8 max_msgs_per_htc_bundle;
u8 pad0;
u8 pad1;
union {
__le16 reserved;
struct {
u8 pad0;
u8 pad1;
} __packed;
} __packed;
} __packed;
struct ath10k_htc_conn_svc {
@ -353,7 +371,12 @@ struct ath10k_htc_ep {
u8 seq_no; /* for debugging */
int tx_credits;
int tx_credit_size;
bool tx_credit_flow_enabled;
bool bundle_tx;
struct sk_buff_head tx_req_head;
struct sk_buff_head tx_complete_head;
};
struct ath10k_htc_svc_tx_credits {
@ -378,16 +401,25 @@ struct ath10k_htc {
int total_transmit_credits;
int target_credit_size;
u8 max_msgs_per_htc_bundle;
int alt_data_credit_size;
};
int ath10k_htc_init(struct ath10k *ar);
int ath10k_htc_wait_target(struct ath10k_htc *htc);
void ath10k_htc_setup_tx_req(struct ath10k_htc_ep *ep);
int ath10k_htc_start(struct ath10k_htc *htc);
int ath10k_htc_connect_service(struct ath10k_htc *htc,
struct ath10k_htc_svc_conn_req *conn_req,
struct ath10k_htc_svc_conn_resp *conn_resp);
void ath10k_htc_change_tx_credit_flow(struct ath10k_htc *htc,
enum ath10k_htc_ep_id eid,
bool enable);
int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
struct sk_buff *packet);
void ath10k_htc_stop_hl(struct ath10k *ar);
int ath10k_htc_send_hl(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
struct sk_buff *packet);
struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size);
void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb);
void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb);

View File

@ -10,6 +10,7 @@
#include "htt.h"
#include "core.h"
#include "debug.h"
#include "hif.h"
static const enum htt_t2h_msg_type htt_main_t2h_msg_types[] = {
[HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
@ -134,6 +135,8 @@ int ath10k_htt_connect(struct ath10k_htt *htt)
{
struct ath10k_htc_svc_conn_req conn_req;
struct ath10k_htc_svc_conn_resp conn_resp;
struct ath10k *ar = htt->ar;
struct ath10k_htc_ep *ep;
int status;
memset(&conn_req, 0, sizeof(conn_req));
@ -141,6 +144,7 @@ int ath10k_htt_connect(struct ath10k_htt *htt)
conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete;
conn_req.ep_ops.ep_rx_complete = ath10k_htt_htc_t2h_msg_handler;
conn_req.ep_ops.ep_tx_credits = ath10k_htt_op_ep_tx_credits;
/* connect to control service */
conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
@ -153,6 +157,15 @@ int ath10k_htt_connect(struct ath10k_htt *htt)
htt->eid = conn_resp.eid;
if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
ep = &ar->htc.endpoint[htt->eid];
ath10k_htc_setup_tx_req(ep);
}
htt->disable_tx_comp = ath10k_hif_get_htt_tx_complete(htt->ar);
if (htt->disable_tx_comp)
ath10k_htc_change_tx_credit_flow(&htt->ar->htc, htt->eid, true);
return 0;
}

View File

@ -150,9 +150,19 @@ enum htt_data_tx_desc_flags1 {
HTT_DATA_TX_DESC_FLAGS1_MORE_IN_BATCH = 1 << 12,
HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD = 1 << 13,
HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD = 1 << 14,
HTT_DATA_TX_DESC_FLAGS1_RSVD1 = 1 << 15
HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE = 1 << 15
};
#define HTT_TX_CREDIT_DELTA_ABS_M 0xffff0000
#define HTT_TX_CREDIT_DELTA_ABS_S 16
#define HTT_TX_CREDIT_DELTA_ABS_GET(word) \
(((word) & HTT_TX_CREDIT_DELTA_ABS_M) >> HTT_TX_CREDIT_DELTA_ABS_S)
#define HTT_TX_CREDIT_SIGN_BIT_M 0x00000100
#define HTT_TX_CREDIT_SIGN_BIT_S 8
#define HTT_TX_CREDIT_SIGN_BIT_GET(word) \
(((word) & HTT_TX_CREDIT_SIGN_BIT_M) >> HTT_TX_CREDIT_SIGN_BIT_S)
enum htt_data_tx_ext_tid {
HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST = 16,
HTT_DATA_TX_EXT_TID_MGMT = 17,
@ -2021,6 +2031,10 @@ struct ath10k_htt {
bool tx_mem_allocated;
const struct ath10k_htt_tx_ops *tx_ops;
const struct ath10k_htt_rx_ops *rx_ops;
bool disable_tx_comp;
bool bundle_tx;
struct sk_buff_head tx_req_head;
struct sk_buff_head tx_complete_head;
};
struct ath10k_htt_tx_ops {
@ -2035,6 +2049,7 @@ struct ath10k_htt_tx_ops {
int (*htt_h2t_aggr_cfg_msg)(struct ath10k_htt *htt,
u8 max_subfrms_ampdu,
u8 max_subfrms_amsdu);
void (*htt_flush_tx)(struct ath10k_htt *htt);
};
static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt *htt)
@ -2074,6 +2089,12 @@ static inline int ath10k_htt_tx(struct ath10k_htt *htt,
return htt->tx_ops->htt_tx(htt, txmode, msdu);
}
static inline void ath10k_htt_flush_tx(struct ath10k_htt *htt)
{
if (htt->tx_ops->htt_flush_tx)
htt->tx_ops->htt_flush_tx(htt);
}
static inline int ath10k_htt_alloc_txbuff(struct ath10k_htt *htt)
{
if (!htt->tx_ops->htt_alloc_txbuff)
@ -2267,6 +2288,7 @@ int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
__le16 fetch_seq_num,
struct htt_tx_fetch_record *records,
size_t num_records);
void ath10k_htt_op_ep_tx_credits(struct ath10k *ar);
void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
struct ieee80211_txq *txq);

View File

@ -3574,6 +3574,13 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info);
}
if (ar->htt.disable_tx_comp) {
arsta->tx_retries += peer_stats->retry_pkts;
arsta->tx_failed += peer_stats->failed_pkts;
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d tx failed %d\n",
arsta->tx_retries, arsta->tx_failed);
}
if (ath10k_debug_is_extd_tx_stats_enabled(ar))
ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
rate_idx);
@ -3789,6 +3796,9 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
}
case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
struct htt_tx_done tx_done = {};
struct ath10k_htt *htt = &ar->htt;
struct ath10k_htc *htc = &ar->htc;
struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
int info = __le32_to_cpu(resp->mgmt_tx_completion.info);
@ -3814,6 +3824,12 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
if (htt->disable_tx_comp) {
spin_lock_bh(&htc->tx_lock);
ep->tx_credits++;
spin_unlock_bh(&htc->tx_lock);
}
status = ath10k_txrx_tx_unref(htt, &tx_done);
if (!status) {
spin_lock_bh(&htt->tx_lock);
@ -3888,8 +3904,32 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
return false;
}
case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: {
struct ath10k_htt *htt = &ar->htt;
struct ath10k_htc *htc = &ar->htc;
struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
u32 msg_word = __le32_to_cpu(*(__le32 *)resp);
int htt_credit_delta;
htt_credit_delta = HTT_TX_CREDIT_DELTA_ABS_GET(msg_word);
if (HTT_TX_CREDIT_SIGN_BIT_GET(msg_word))
htt_credit_delta = -htt_credit_delta;
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt credit update delta %d\n",
htt_credit_delta);
if (htt->disable_tx_comp) {
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += htt_credit_delta;
spin_unlock_bh(&htc->tx_lock);
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt credit total %d\n",
ep->tx_credits);
ep->ep_ops.ep_tx_credits(htc->ar);
}
break;
}
case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
u32 freq = __le32_to_cpu(resp->chan_change.freq);

View File

@ -529,9 +529,15 @@ void ath10k_htt_tx_destroy(struct ath10k_htt *htt)
htt->tx_mem_allocated = false;
}
static void ath10k_htt_flush_tx_queue(struct ath10k_htt *htt)
{
ath10k_htc_stop_hl(htt->ar);
idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
}
void ath10k_htt_tx_stop(struct ath10k_htt *htt)
{
idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
ath10k_htt_flush_tx_queue(htt);
idr_destroy(&htt->pending_tx);
}
@ -541,9 +547,46 @@ void ath10k_htt_tx_free(struct ath10k_htt *htt)
ath10k_htt_tx_destroy(htt);
}
void ath10k_htt_op_ep_tx_credits(struct ath10k *ar)
{
queue_work(ar->workqueue, &ar->bundle_tx_work);
}
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
struct htt_tx_done tx_done = {0};
struct htt_cmd_hdr *htt_hdr;
struct htt_data_tx_desc *desc_hdr = NULL;
u16 flags1 = 0;
u8 msg_type = 0;
if (htt->disable_tx_comp) {
htt_hdr = (struct htt_cmd_hdr *)skb->data;
msg_type = htt_hdr->msg_type;
if (msg_type == HTT_H2T_MSG_TYPE_TX_FRM) {
desc_hdr = (struct htt_data_tx_desc *)
(skb->data + sizeof(*htt_hdr));
flags1 = __le16_to_cpu(desc_hdr->flags1);
}
}
dev_kfree_skb_any(skb);
if ((!htt->disable_tx_comp) || (msg_type != HTT_H2T_MSG_TYPE_TX_FRM))
return;
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt tx complete msdu id:%u ,flags1:%x\n",
__le16_to_cpu(desc_hdr->id), flags1);
if (flags1 & HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE)
return;
tx_done.status = HTT_TX_COMPL_STATE_ACK;
tx_done.msdu_id = __le16_to_cpu(desc_hdr->id);
ath10k_txrx_tx_unref(&ar->htt, &tx_done);
}
void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb)
@ -1279,6 +1322,9 @@ static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txm
flags0 |= SM(ATH10K_HW_TXRX_MGMT,
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
if (htt->disable_tx_comp)
flags1 |= HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE;
break;
}
@ -1344,7 +1390,7 @@ static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txm
*/
tx_desc->peerid = __cpu_to_le32(HTT_INVALID_PEERID);
res = ath10k_htc_send(&htt->ar->htc, htt->eid, msdu);
res = ath10k_htc_send_hl(&htt->ar->htc, htt->eid, msdu);
out:
return res;
@ -1784,6 +1830,7 @@ static const struct ath10k_htt_tx_ops htt_tx_ops_hl = {
.htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
.htt_tx = ath10k_htt_tx_hl,
.htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32,
.htt_flush_tx = ath10k_htt_flush_tx_queue,
};
void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)

View File

@ -623,6 +623,9 @@ struct ath10k_hw_params {
/* tx stats support over pktlog */
bool tx_stats_over_pktlog;
/* provides bitrates for sta_statistics using WMI_TLV_PEER_STATS_INFO_EVENTID */
bool supports_peer_stats_info;
};
struct htt_rx_desc;
@ -765,7 +768,7 @@ ath10k_is_rssi_enable(struct ath10k_hw_params *hw,
#define TARGET_TLV_NUM_TDLS_VDEVS 1
#define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2)
#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
#define TARGET_TLV_NUM_MSDU_DESC_HL 64
#define TARGET_TLV_NUM_MSDU_DESC_HL 1024
#define TARGET_TLV_NUM_WOW_PATTERNS 22
#define TARGET_TLV_MGMT_NUM_MSDU_DESC (50)

View File

@ -2505,6 +2505,30 @@ ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
return tx_mcs_set;
}
static u32 get_160mhz_nss_from_maxrate(int rate)
{
u32 nss;
switch (rate) {
case 780:
nss = 1;
break;
case 1560:
nss = 2;
break;
case 2106:
nss = 3; /* not support MCS9 from spec*/
break;
case 3120:
nss = 4;
break;
default:
nss = 1;
}
return nss;
}
static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@ -2512,6 +2536,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
{
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct ath10k_hw_params *hw = &ar->hw_params;
struct cfg80211_chan_def def;
enum nl80211_band band;
const u16 *vht_mcs_mask;
@ -2578,22 +2603,38 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit(
__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
sta->addr, arg->peer_max_mpdu, arg->peer_flags);
/* Configure bandwidth-NSS mapping to FW
* for the chip's tx chains setting on 160Mhz bw
*/
if (arg->peer_phymode == MODE_11AC_VHT160 ||
arg->peer_phymode == MODE_11AC_VHT80_80) {
u32 rx_nss;
u32 max_rate;
if (arg->peer_vht_rates.rx_max_rate &&
(sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK)) {
switch (arg->peer_vht_rates.rx_max_rate) {
case 1560:
/* Must be 2x2 at 160Mhz is all it can do. */
arg->peer_bw_rxnss_override = 2;
break;
case 780:
/* Can only do 1x1 at 160Mhz (Long Guard Interval) */
arg->peer_bw_rxnss_override = 1;
break;
max_rate = arg->peer_vht_rates.rx_max_rate;
rx_nss = get_160mhz_nss_from_maxrate(max_rate);
if (rx_nss == 0)
rx_nss = arg->peer_num_spatial_streams;
else
rx_nss = min(arg->peer_num_spatial_streams, rx_nss);
max_rate = hw->vht160_mcs_tx_highest;
rx_nss = min(rx_nss, get_160mhz_nss_from_maxrate(max_rate));
arg->peer_bw_rxnss_override =
FIELD_PREP(WMI_PEER_NSS_MAP_ENABLE, 1) |
FIELD_PREP(WMI_PEER_NSS_160MHZ_MASK, (rx_nss - 1));
if (arg->peer_phymode == MODE_11AC_VHT80_80) {
arg->peer_bw_rxnss_override |=
FIELD_PREP(WMI_PEER_NSS_80_80MHZ_MASK, (rx_nss - 1));
}
}
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac vht peer %pM max_mpdu %d flags 0x%x peer_rx_nss_override 0x%x\n",
sta->addr, arg->peer_max_mpdu,
arg->peer_flags, arg->peer_bw_rxnss_override);
}
static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
@ -2745,9 +2786,9 @@ static int ath10k_peer_assoc_prepare(struct ath10k *ar,
ath10k_peer_assoc_h_crypto(ar, vif, sta, arg);
ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
return 0;
}
@ -2918,6 +2959,11 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
arvif->aid = bss_conf->aid;
ether_addr_copy(arvif->bssid, bss_conf->bssid);
ret = ath10k_wmi_pdev_set_param(ar,
ar->wmi.pdev_param->peer_stats_info_enable, 1);
if (ret)
ath10k_warn(ar, "failed to enable peer stats info: %d\n", ret);
ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
if (ret) {
ath10k_warn(ar, "failed to set vdev %d up: %d\n",
@ -4488,17 +4534,18 @@ static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
return 0;
}
static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
static bool ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
{
/* It is not clear that allowing gaps in chainmask
* is helpful. Probably it will not do what user
* is hoping for, so warn in that case.
*/
if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0)
return;
return true;
ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n",
ath10k_warn(ar, "mac %s antenna chainmask is invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n",
dbg, cm);
return false;
}
static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
@ -4563,13 +4610,6 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
vht_cap.cap |= val;
}
/* Currently the firmware seems to be buggy, don't enable 80+80
* mode until that's resolved.
*/
if ((ar->vht_cap_info & IEEE80211_VHT_CAP_SHORT_GI_160) &&
(ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) == 0)
vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
mcs_map = 0;
for (i = 0; i < 8; i++) {
if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i)))
@ -4688,11 +4728,15 @@ static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
{
int ret;
bool is_valid_tx_chain_mask, is_valid_rx_chain_mask;
lockdep_assert_held(&ar->conf_mutex);
ath10k_check_chain_mask(ar, tx_ant, "tx");
ath10k_check_chain_mask(ar, rx_ant, "rx");
is_valid_tx_chain_mask = ath10k_check_chain_mask(ar, tx_ant, "tx");
is_valid_rx_chain_mask = ath10k_check_chain_mask(ar, rx_ant, "rx");
if (!is_valid_tx_chain_mask || !is_valid_rx_chain_mask)
return -EINVAL;
ar->cfg_tx_chainmask = tx_ant;
ar->cfg_rx_chainmask = rx_ant;
@ -7190,6 +7234,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ath10k_wmi_peer_flush(ar, arvif->vdev_id,
arvif->bssid, bitmap);
}
ath10k_htt_flush_tx(&ar->htt);
}
return;
}
@ -8260,6 +8305,215 @@ static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw,
peer->removed = true;
}
/* HT MCS parameters with Nss = 1 */
static const struct ath10k_index_ht_data_rate_type supported_ht_mcs_rate_nss1[] = {
/* MCS L20 L40 S20 S40 */
{0, { 65, 135, 72, 150} },
{1, { 130, 270, 144, 300} },
{2, { 195, 405, 217, 450} },
{3, { 260, 540, 289, 600} },
{4, { 390, 810, 433, 900} },
{5, { 520, 1080, 578, 1200} },
{6, { 585, 1215, 650, 1350} },
{7, { 650, 1350, 722, 1500} }
};
/* HT MCS parameters with Nss = 2 */
static const struct ath10k_index_ht_data_rate_type supported_ht_mcs_rate_nss2[] = {
/* MCS L20 L40 S20 S40 */
{0, {130, 270, 144, 300} },
{1, {260, 540, 289, 600} },
{2, {390, 810, 433, 900} },
{3, {520, 1080, 578, 1200} },
{4, {780, 1620, 867, 1800} },
{5, {1040, 2160, 1156, 2400} },
{6, {1170, 2430, 1300, 2700} },
{7, {1300, 2700, 1444, 3000} }
};
/* MCS parameters with Nss = 1 */
static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss1[] = {
/* MCS L80 S80 L40 S40 L20 S20 */
{0, {293, 325}, {135, 150}, {65, 72} },
{1, {585, 650}, {270, 300}, {130, 144} },
{2, {878, 975}, {405, 450}, {195, 217} },
{3, {1170, 1300}, {540, 600}, {260, 289} },
{4, {1755, 1950}, {810, 900}, {390, 433} },
{5, {2340, 2600}, {1080, 1200}, {520, 578} },
{6, {2633, 2925}, {1215, 1350}, {585, 650} },
{7, {2925, 3250}, {1350, 1500}, {650, 722} },
{8, {3510, 3900}, {1620, 1800}, {780, 867} },
{9, {3900, 4333}, {1800, 2000}, {780, 867} }
};
/*MCS parameters with Nss = 2 */
static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss2[] = {
/* MCS L80 S80 L40 S40 L20 S20 */
{0, {585, 650}, {270, 300}, {130, 144} },
{1, {1170, 1300}, {540, 600}, {260, 289} },
{2, {1755, 1950}, {810, 900}, {390, 433} },
{3, {2340, 2600}, {1080, 1200}, {520, 578} },
{4, {3510, 3900}, {1620, 1800}, {780, 867} },
{5, {4680, 5200}, {2160, 2400}, {1040, 1156} },
{6, {5265, 5850}, {2430, 2700}, {1170, 1300} },
{7, {5850, 6500}, {2700, 3000}, {1300, 1444} },
{8, {7020, 7800}, {3240, 3600}, {1560, 1733} },
{9, {7800, 8667}, {3600, 4000}, {1560, 1733} }
};
static void ath10k_mac_get_rate_flags_ht(struct ath10k *ar, u32 rate, u8 nss, u8 mcs,
u8 *flags, u8 *bw)
{
struct ath10k_index_ht_data_rate_type *mcs_rate;
mcs_rate = (struct ath10k_index_ht_data_rate_type *)
((nss == 1) ? &supported_ht_mcs_rate_nss1 :
&supported_ht_mcs_rate_nss2);
if (rate == mcs_rate[mcs].supported_rate[0]) {
*bw = RATE_INFO_BW_20;
} else if (rate == mcs_rate[mcs].supported_rate[1]) {
*bw |= RATE_INFO_BW_40;
} else if (rate == mcs_rate[mcs].supported_rate[2]) {
*bw |= RATE_INFO_BW_20;
*flags |= RATE_INFO_FLAGS_SHORT_GI;
} else if (rate == mcs_rate[mcs].supported_rate[3]) {
*bw |= RATE_INFO_BW_40;
*flags |= RATE_INFO_FLAGS_SHORT_GI;
} else {
ath10k_warn(ar, "invalid ht params rate %d 100kbps nss %d mcs %d",
rate, nss, mcs);
}
}
static void ath10k_mac_get_rate_flags_vht(struct ath10k *ar, u32 rate, u8 nss, u8 mcs,
u8 *flags, u8 *bw)
{
struct ath10k_index_vht_data_rate_type *mcs_rate;
mcs_rate = (struct ath10k_index_vht_data_rate_type *)
((nss == 1) ? &supported_vht_mcs_rate_nss1 :
&supported_vht_mcs_rate_nss2);
if (rate == mcs_rate[mcs].supported_VHT80_rate[0]) {
*bw = RATE_INFO_BW_80;
} else if (rate == mcs_rate[mcs].supported_VHT80_rate[1]) {
*bw = RATE_INFO_BW_80;
*flags |= RATE_INFO_FLAGS_SHORT_GI;
} else if (rate == mcs_rate[mcs].supported_VHT40_rate[0]) {
*bw = RATE_INFO_BW_40;
} else if (rate == mcs_rate[mcs].supported_VHT40_rate[1]) {
*bw = RATE_INFO_BW_40;
*flags |= RATE_INFO_FLAGS_SHORT_GI;
} else if (rate == mcs_rate[mcs].supported_VHT20_rate[0]) {
*bw = RATE_INFO_BW_20;
} else if (rate == mcs_rate[mcs].supported_VHT20_rate[1]) {
*bw = RATE_INFO_BW_20;
*flags |= RATE_INFO_FLAGS_SHORT_GI;
} else {
ath10k_warn(ar, "invalid vht params rate %d 100kbps nss %d mcs %d",
rate, nss, mcs);
}
}
static void ath10k_mac_get_rate_flags(struct ath10k *ar, u32 rate,
enum ath10k_phy_mode mode, u8 nss, u8 mcs,
u8 *flags, u8 *bw)
{
if (mode == ATH10K_PHY_MODE_HT) {
*flags = RATE_INFO_FLAGS_MCS;
ath10k_mac_get_rate_flags_ht(ar, rate, nss, mcs, flags, bw);
} else if (mode == ATH10K_PHY_MODE_VHT) {
*flags = RATE_INFO_FLAGS_VHT_MCS;
ath10k_mac_get_rate_flags_vht(ar, rate, nss, mcs, flags, bw);
}
}
static void ath10k_mac_parse_bitrate(struct ath10k *ar, u32 rate_code,
u32 bitrate_kbps, struct rate_info *rate)
{
enum ath10k_phy_mode mode = ATH10K_PHY_MODE_LEGACY;
enum wmi_rate_preamble preamble = WMI_TLV_GET_HW_RC_PREAM_V1(rate_code);
u8 nss = WMI_TLV_GET_HW_RC_NSS_V1(rate_code) + 1;
u8 mcs = WMI_TLV_GET_HW_RC_RATE_V1(rate_code);
u8 flags = 0, bw = 0;
if (preamble == WMI_RATE_PREAMBLE_HT)
mode = ATH10K_PHY_MODE_HT;
else if (preamble == WMI_RATE_PREAMBLE_VHT)
mode = ATH10K_PHY_MODE_VHT;
ath10k_mac_get_rate_flags(ar, bitrate_kbps / 100, mode, nss, mcs, &flags, &bw);
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac parse bitrate preamble %d mode %d nss %d mcs %d flags %x bw %d\n",
preamble, mode, nss, mcs, flags, bw);
rate->flags = flags;
rate->bw = bw;
rate->legacy = bitrate_kbps / 100;
rate->nss = nss;
rate->mcs = mcs;
}
static void ath10k_mac_sta_get_peer_stats_info(struct ath10k *ar,
struct ieee80211_sta *sta,
struct station_info *sinfo)
{
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k_peer *peer;
unsigned long time_left;
int ret;
if (!(ar->hw_params.supports_peer_stats_info &&
arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA))
return;
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find(ar, arsta->arvif->vdev_id, sta->addr);
spin_unlock_bh(&ar->data_lock);
if (!peer)
return;
reinit_completion(&ar->peer_stats_info_complete);
ret = ath10k_wmi_request_peer_stats_info(ar,
arsta->arvif->vdev_id,
WMI_REQUEST_ONE_PEER_STATS_INFO,
arsta->arvif->bssid,
0);
if (ret && ret != -EOPNOTSUPP) {
ath10k_warn(ar, "could not request peer stats info: %d\n", ret);
return;
}
time_left = wait_for_completion_timeout(&ar->peer_stats_info_complete, 3 * HZ);
if (time_left == 0) {
ath10k_warn(ar, "timed out waiting peer stats info\n");
return;
}
if (arsta->rx_rate_code != 0 && arsta->rx_bitrate_kbps != 0) {
ath10k_mac_parse_bitrate(ar, arsta->rx_rate_code,
arsta->rx_bitrate_kbps,
&sinfo->rxrate);
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
arsta->rx_rate_code = 0;
arsta->rx_bitrate_kbps = 0;
}
if (arsta->tx_rate_code != 0 && arsta->tx_bitrate_kbps != 0) {
ath10k_mac_parse_bitrate(ar, arsta->tx_rate_code,
arsta->tx_bitrate_kbps,
&sinfo->txrate);
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
arsta->tx_rate_code = 0;
arsta->tx_bitrate_kbps = 0;
}
}
static void ath10k_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@ -8271,6 +8525,8 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw,
if (!ath10k_peer_stats_enabled(ar))
return;
ath10k_debug_fw_stats_request(ar);
sinfo->rx_duration = arsta->rx_duration;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
@ -8286,6 +8542,15 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw,
}
sinfo->txrate.flags = arsta->txrate.flags;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
if (ar->htt.disable_tx_comp) {
sinfo->tx_retries = arsta->tx_retries;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
sinfo->tx_failed = arsta->tx_failed;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
}
ath10k_mac_sta_get_peer_stats_info(ar, sta, sinfo);
}
static const struct ieee80211_ops ath10k_ops = {
@ -8625,7 +8890,9 @@ static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
.radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80),
BIT(NL80211_CHAN_WIDTH_80) |
BIT(NL80211_CHAN_WIDTH_80P80) |
BIT(NL80211_CHAN_WIDTH_160),
#endif
},
};
@ -8643,7 +8910,9 @@ ieee80211_iface_combination ath10k_10_4_bcn_int_if_comb[] = {
.radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80),
BIT(NL80211_CHAN_WIDTH_80) |
BIT(NL80211_CHAN_WIDTH_80P80) |
BIT(NL80211_CHAN_WIDTH_160),
#endif
},
};
@ -8919,7 +9188,6 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) {
ar->hw->wiphy->max_sched_scan_reqs = 1;
ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS;
ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH;

View File

@ -116,7 +116,7 @@ static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
static struct ce_attr host_ce_config_wlan[] = {
static const struct ce_attr pci_host_ce_config_wlan[] = {
/* CE0: host->target HTC control and raw streams */
{
.flags = CE_ATTR_FLAGS,
@ -222,7 +222,7 @@ static struct ce_attr host_ce_config_wlan[] = {
};
/* Target firmware's Copy Engine configuration. */
static struct ce_pipe_config target_ce_config_wlan[] = {
static const struct ce_pipe_config pci_target_ce_config_wlan[] = {
/* CE0: host->target HTC control and raw streams */
{
.pipenum = __cpu_to_le32(0),
@ -335,7 +335,7 @@ static struct ce_pipe_config target_ce_config_wlan[] = {
* This table is derived from the CE_PCI TABLE, above.
* It is passed to the Target at startup for use by firmware.
*/
static struct service_to_pipe target_service_to_ce_map_wlan[] = {
static const struct ce_service_to_pipe pci_target_service_to_ce_map_wlan[] = {
{
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
@ -1787,6 +1787,8 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
int force)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
if (!force) {
@ -1804,7 +1806,7 @@ void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
* If at least 50% of the total resources are still available,
* don't bother checking again yet.
*/
if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
if (resources > (ar_pci->attr[pipe].src_nentries >> 1))
return;
}
ath10k_ce_per_engine_service(ar, pipe);
@ -1820,14 +1822,15 @@ static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe)
{
const struct service_to_pipe *entry;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
const struct ce_service_to_pipe *entry;
bool ul_set = false, dl_set = false;
int i;
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
entry = &target_service_to_ce_map_wlan[i];
for (i = 0; i < ARRAY_SIZE(pci_target_service_to_ce_map_wlan); i++) {
entry = &ar_pci->serv_to_pipe[i];
if (__le32_to_cpu(entry->service_id) != service_id)
continue;
@ -2074,6 +2077,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_pci_irq_sync(ar);
napi_synchronize(&ar->napi);
napi_disable(&ar->napi);
cancel_work_sync(&ar_pci->dump_work);
/* Most likely the device has HTT Rx ring configured. The only way to
* prevent the device from accessing (and possible corrupting) host
@ -2315,6 +2319,7 @@ static int ath10k_bus_get_num_banks(struct ath10k *ar)
int ath10k_pci_init_config(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
u32 interconnect_targ_addr;
u32 pcie_state_targ_addr = 0;
u32 pipe_cfg_targ_addr = 0;
@ -2360,7 +2365,7 @@ int ath10k_pci_init_config(struct ath10k *ar)
}
ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
target_ce_config_wlan,
ar_pci->pipe_config,
sizeof(struct ce_pipe_config) *
NUM_TARGET_CE_CONFIG_WLAN);
@ -2385,8 +2390,8 @@ int ath10k_pci_init_config(struct ath10k *ar)
}
ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
target_service_to_ce_map_wlan,
sizeof(target_service_to_ce_map_wlan));
ar_pci->serv_to_pipe,
sizeof(pci_target_service_to_ce_map_wlan));
if (ret != 0) {
ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
return ret;
@ -2458,23 +2463,24 @@ static void ath10k_pci_override_ce_config(struct ath10k *ar)
{
struct ce_attr *attr;
struct ce_pipe_config *config;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
/* For QCA6174 we're overriding the Copy Engine 5 configuration,
* since it is currently used for other feature.
*/
/* Override Host's Copy Engine 5 configuration */
attr = &host_ce_config_wlan[5];
attr = &ar_pci->attr[5];
attr->src_sz_max = 0;
attr->dest_nentries = 0;
/* Override Target firmware's Copy Engine configuration */
config = &target_ce_config_wlan[5];
config = &ar_pci->pipe_config[5];
config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
config->nbytes_max = __cpu_to_le32(2048);
/* Map from service/endpoint to Copy Engine */
target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1);
ar_pci->serv_to_pipe[15].pipenum = __cpu_to_le32(1);
}
int ath10k_pci_alloc_pipes(struct ath10k *ar)
@ -2490,7 +2496,7 @@ int ath10k_pci_alloc_pipes(struct ath10k *ar)
pipe->pipe_num = i;
pipe->hif_ce_state = ar;
ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
ret = ath10k_ce_alloc_pipe(ar, i, &ar_pci->attr[i]);
if (ret) {
ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
i, ret);
@ -2503,7 +2509,7 @@ int ath10k_pci_alloc_pipes(struct ath10k *ar)
continue;
}
pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
pipe->buf_sz = (size_t)(ar_pci->attr[i].src_sz_max);
}
return 0;
@ -2519,10 +2525,11 @@ void ath10k_pci_free_pipes(struct ath10k *ar)
int ath10k_pci_init_pipes(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int i, ret;
for (i = 0; i < CE_COUNT; i++) {
ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
ret = ath10k_ce_init_pipe(ar, i, &ar_pci->attr[i]);
if (ret) {
ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
i, ret);
@ -3594,6 +3601,30 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0);
ar_pci->attr = kmemdup(pci_host_ce_config_wlan,
sizeof(pci_host_ce_config_wlan),
GFP_KERNEL);
if (!ar_pci->attr) {
ret = -ENOMEM;
goto err_free;
}
ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan,
sizeof(pci_target_ce_config_wlan),
GFP_KERNEL);
if (!ar_pci->pipe_config) {
ret = -ENOMEM;
goto err_free;
}
ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan,
sizeof(pci_target_service_to_ce_map_wlan),
GFP_KERNEL);
if (!ar_pci->serv_to_pipe) {
ret = -ENOMEM;
goto err_free;
}
ret = ath10k_pci_setup_resource(ar);
if (ret) {
ath10k_err(ar, "failed to setup resource: %d\n", ret);
@ -3689,6 +3720,11 @@ err_free_pipes:
err_core_destroy:
ath10k_core_destroy(ar);
err_free:
kfree(ar_pci->attr);
kfree(ar_pci->pipe_config);
kfree(ar_pci->serv_to_pipe);
return ret;
}
@ -3714,6 +3750,9 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
ath10k_pci_sleep_sync(ar);
ath10k_pci_release(ar);
ath10k_core_destroy(ar);
kfree(ar_pci->attr);
kfree(ar_pci->pipe_config);
kfree(ar_pci->serv_to_pipe);
}
MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);

View File

@ -183,6 +183,10 @@ struct ath10k_pci {
* this struct.
*/
struct ath10k_ahb ahb[0];
struct ce_attr *attr;
struct ce_pipe_config *pipe_config;
struct ce_service_to_pipe *serv_to_pipe;
};
static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)

View File

@ -122,8 +122,8 @@ static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
int ret;
int i;
req.msa_addr = qmi->msa_pa;
req.size = qmi->msa_mem_size;
req.msa_addr = ar->msa.paddr;
req.size = ar->msa.mem_size;
ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
wlfw_msa_info_resp_msg_v01_ei, &resp);
@ -157,12 +157,12 @@ static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
goto out;
}
max_mapped_addr = qmi->msa_pa + qmi->msa_mem_size;
max_mapped_addr = ar->msa.paddr + ar->msa.mem_size;
qmi->nr_mem_region = resp.mem_region_info_len;
for (i = 0; i < resp.mem_region_info_len; i++) {
if (resp.mem_region_info[i].size > qmi->msa_mem_size ||
if (resp.mem_region_info[i].size > ar->msa.mem_size ||
resp.mem_region_info[i].region_addr > max_mapped_addr ||
resp.mem_region_info[i].region_addr < qmi->msa_pa ||
resp.mem_region_info[i].region_addr < ar->msa.paddr ||
resp.mem_region_info[i].size +
resp.mem_region_info[i].region_addr > max_mapped_addr) {
ath10k_err(ar, "received out of range memory region address 0x%llx with size 0x%x, aborting\n",
@ -1006,54 +1006,10 @@ static void ath10k_qmi_driver_event_work(struct work_struct *work)
spin_unlock(&qmi->event_lock);
}
static int ath10k_qmi_setup_msa_resources(struct ath10k_qmi *qmi, u32 msa_size)
{
struct ath10k *ar = qmi->ar;
struct device *dev = ar->dev;
struct device_node *node;
struct resource r;
int ret;
node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (node) {
ret = of_address_to_resource(node, 0, &r);
if (ret) {
dev_err(dev, "failed to resolve msa fixed region\n");
return ret;
}
of_node_put(node);
qmi->msa_pa = r.start;
qmi->msa_mem_size = resource_size(&r);
qmi->msa_va = devm_memremap(dev, qmi->msa_pa, qmi->msa_mem_size,
MEMREMAP_WT);
if (IS_ERR(qmi->msa_va)) {
dev_err(dev, "failed to map memory region: %pa\n", &r.start);
return PTR_ERR(qmi->msa_va);
}
} else {
qmi->msa_va = dmam_alloc_coherent(dev, msa_size,
&qmi->msa_pa, GFP_KERNEL);
if (!qmi->msa_va) {
ath10k_err(ar, "failed to allocate dma memory for msa region\n");
return -ENOMEM;
}
qmi->msa_mem_size = msa_size;
}
if (of_property_read_bool(dev->of_node, "qcom,msa-fixed-perm"))
qmi->msa_fixed_perm = true;
ath10k_dbg(ar, ATH10K_DBG_QMI, "msa pa: %pad , msa va: 0x%p\n",
&qmi->msa_pa,
qmi->msa_va);
return 0;
}
int ath10k_qmi_init(struct ath10k *ar, u32 msa_size)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
struct device *dev = ar->dev;
struct ath10k_qmi *qmi;
int ret;
@ -1064,9 +1020,8 @@ int ath10k_qmi_init(struct ath10k *ar, u32 msa_size)
qmi->ar = ar;
ar_snoc->qmi = qmi;
ret = ath10k_qmi_setup_msa_resources(qmi, msa_size);
if (ret)
goto err;
if (of_property_read_bool(dev->of_node, "qcom,msa-fixed-perm"))
qmi->msa_fixed_perm = true;
ret = qmi_handle_init(&qmi->qmi_hdl,
WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,

View File

@ -93,9 +93,6 @@ struct ath10k_qmi {
spinlock_t event_lock; /* spinlock for qmi event list */
u32 nr_mem_region;
struct ath10k_msa_mem_info mem_region[MAX_NUM_MEMORY_REGIONS];
dma_addr_t msa_pa;
u32 msa_mem_size;
void *msa_va;
struct ath10k_qmi_chip_info chip_info;
struct ath10k_qmi_board_info board_info;
struct ath10k_qmi_soc_info soc_info;

View File

@ -542,7 +542,7 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
int pkt_cnt = 0;
if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) {
ath10k_warn(ar, "the total number of pkgs to be fetched (%u) exceeds maximum %u\n",
ath10k_warn(ar, "the total number of pkts to be fetched (%u) exceeds maximum %u\n",
n_lookaheads, ATH10K_SDIO_MAX_RX_MSGS);
ret = -ENOMEM;
goto err;
@ -1361,23 +1361,117 @@ static void ath10k_rx_indication_async_work(struct work_struct *work)
napi_schedule(&ar->napi);
}
static int ath10k_sdio_read_rtc_state(struct ath10k_sdio *ar_sdio, unsigned char *state)
{
struct ath10k *ar = ar_sdio->ar;
unsigned char rtc_state = 0;
int ret = 0;
rtc_state = sdio_f0_readb(ar_sdio->func, ATH10K_CIS_RTC_STATE_ADDR, &ret);
if (ret) {
ath10k_warn(ar, "failed to read rtc state: %d\n", ret);
return ret;
}
*state = rtc_state & 0x3;
return ret;
}
static int ath10k_sdio_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)
{
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
u32 val;
int retry = ATH10K_CIS_READ_RETRY, ret = 0;
unsigned char rtc_state = 0;
sdio_claim_host(ar_sdio->func);
ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);
if (ret) {
ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",
ret);
goto release;
}
if (enable_sleep) {
val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;
ar_sdio->mbox_state = SDIO_MBOX_SLEEP_STATE;
} else {
val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;
ar_sdio->mbox_state = SDIO_MBOX_AWAKE_STATE;
}
ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);
if (ret) {
ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",
ret);
}
if (!enable_sleep) {
do {
udelay(ATH10K_CIS_READ_WAIT_4_RTC_CYCLE_IN_US);
ret = ath10k_sdio_read_rtc_state(ar_sdio, &rtc_state);
if (ret) {
ath10k_warn(ar, "failed to disable mbox sleep: %d", ret);
break;
}
ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read rtc state: %d\n",
rtc_state);
if (rtc_state == ATH10K_CIS_RTC_STATE_ON)
break;
udelay(ATH10K_CIS_XTAL_SETTLE_DURATION_IN_US);
retry--;
} while (retry > 0);
}
release:
sdio_release_host(ar_sdio->func);
return ret;
}
static void ath10k_sdio_sleep_timer_handler(struct timer_list *t)
{
struct ath10k_sdio *ar_sdio = from_timer(ar_sdio, t, sleep_timer);
ar_sdio->mbox_state = SDIO_MBOX_REQUEST_TO_SLEEP_STATE;
queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
}
static void ath10k_sdio_write_async_work(struct work_struct *work)
{
struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
wr_async_work);
struct ath10k *ar = ar_sdio->ar;
struct ath10k_sdio_bus_request *req, *tmp_req;
struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
spin_lock_bh(&ar_sdio->wr_async_lock);
list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
list_del(&req->list);
spin_unlock_bh(&ar_sdio->wr_async_lock);
if (req->address >= mbox_info->htc_addr &&
ar_sdio->mbox_state == SDIO_MBOX_SLEEP_STATE) {
ath10k_sdio_set_mbox_sleep(ar, false);
mod_timer(&ar_sdio->sleep_timer, jiffies +
msecs_to_jiffies(ATH10K_MIN_SLEEP_INACTIVITY_TIME_MS));
}
__ath10k_sdio_write_async(ar, req);
spin_lock_bh(&ar_sdio->wr_async_lock);
}
spin_unlock_bh(&ar_sdio->wr_async_lock);
if (ar_sdio->mbox_state == SDIO_MBOX_REQUEST_TO_SLEEP_STATE)
ath10k_sdio_set_mbox_sleep(ar, true);
}
static int ath10k_sdio_prep_async_req(struct ath10k *ar, u32 addr,
@ -1444,7 +1538,7 @@ static void ath10k_sdio_irq_handler(struct sdio_func *func)
/* sdio HIF functions */
static int ath10k_sdio_hif_disable_intrs(struct ath10k *ar)
static int ath10k_sdio_disable_intrs(struct ath10k *ar)
{
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
@ -1500,7 +1594,7 @@ static int ath10k_sdio_hif_power_up(struct ath10k *ar,
ar_sdio->is_disabled = false;
ret = ath10k_sdio_hif_disable_intrs(ar);
ret = ath10k_sdio_disable_intrs(ar);
if (ret)
return ret;
@ -1517,6 +1611,9 @@ static void ath10k_sdio_hif_power_down(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n");
del_timer_sync(&ar_sdio->sleep_timer);
ath10k_sdio_set_mbox_sleep(ar, true);
/* Disable the card */
sdio_claim_host(ar_sdio->func);
@ -1569,7 +1666,7 @@ static int ath10k_sdio_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
return 0;
}
static int ath10k_sdio_hif_enable_intrs(struct ath10k *ar)
static int ath10k_sdio_enable_intrs(struct ath10k *ar)
{
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
@ -1617,33 +1714,6 @@ static int ath10k_sdio_hif_enable_intrs(struct ath10k *ar)
return ret;
}
static int ath10k_sdio_hif_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)
{
u32 val;
int ret;
ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);
if (ret) {
ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",
ret);
return ret;
}
if (enable_sleep)
val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;
else
val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;
ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);
if (ret) {
ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",
ret);
return ret;
}
return 0;
}
/* HIF diagnostics */
static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
@ -1679,8 +1749,8 @@ out:
return ret;
}
static int ath10k_sdio_hif_diag_read32(struct ath10k *ar, u32 address,
u32 *value)
static int ath10k_sdio_diag_read32(struct ath10k *ar, u32 address,
u32 *value)
{
__le32 *val;
int ret;
@ -1725,7 +1795,7 @@ static int ath10k_sdio_hif_diag_write_mem(struct ath10k *ar, u32 address,
return 0;
}
static int ath10k_sdio_hif_swap_mailbox(struct ath10k *ar)
static int ath10k_sdio_hif_start_post(struct ath10k *ar)
{
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
u32 addr, val;
@ -1733,7 +1803,7 @@ static int ath10k_sdio_hif_swap_mailbox(struct ath10k *ar)
addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
ret = ath10k_sdio_hif_diag_read32(ar, addr, &val);
ret = ath10k_sdio_diag_read32(ar, addr, &val);
if (ret) {
ath10k_warn(ar, "unable to read hi_acs_flags : %d\n", ret);
return ret;
@ -1749,9 +1819,33 @@ static int ath10k_sdio_hif_swap_mailbox(struct ath10k *ar)
ar_sdio->swap_mbox = false;
}
ath10k_sdio_set_mbox_sleep(ar, true);
return 0;
}
static int ath10k_sdio_get_htt_tx_complete(struct ath10k *ar)
{
u32 addr, val;
int ret;
addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
ret = ath10k_sdio_diag_read32(ar, addr, &val);
if (ret) {
ath10k_warn(ar,
"unable to read hi_acs_flags for htt tx comple : %d\n", ret);
return ret;
}
ret = (val & HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_FW_ACK);
ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio reduce tx complete fw%sack\n",
ret ? " " : " not ");
return ret;
}
/* HIF start/stop */
static int ath10k_sdio_hif_start(struct ath10k *ar)
@ -1766,7 +1860,7 @@ static int ath10k_sdio_hif_start(struct ath10k *ar)
* request before interrupts are disabled.
*/
msleep(20);
ret = ath10k_sdio_hif_disable_intrs(ar);
ret = ath10k_sdio_disable_intrs(ar);
if (ret)
return ret;
@ -1788,19 +1882,19 @@ static int ath10k_sdio_hif_start(struct ath10k *ar)
sdio_release_host(ar_sdio->func);
ret = ath10k_sdio_hif_enable_intrs(ar);
ret = ath10k_sdio_enable_intrs(ar);
if (ret)
ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret);
/* Enable sleep and then disable it again */
ret = ath10k_sdio_hif_set_mbox_sleep(ar, true);
ret = ath10k_sdio_set_mbox_sleep(ar, true);
if (ret)
return ret;
/* Wait for 20ms for the written value to take effect */
msleep(20);
ret = ath10k_sdio_hif_set_mbox_sleep(ar, false);
ret = ath10k_sdio_set_mbox_sleep(ar, false);
if (ret)
return ret;
@ -2007,17 +2101,6 @@ static void ath10k_sdio_hif_get_default_pipe(struct ath10k *ar,
*dl_pipe = 0;
}
/* This op is currently only used by htc_wait_target if the HTC ready
* message times out. It is not applicable for SDIO since there is nothing
* we can do if the HTC ready message does not arrive in time.
* TODO: Make this op non mandatory by introducing a NULL check in the
* hif op wrapper.
*/
static void ath10k_sdio_hif_send_complete_check(struct ath10k *ar,
u8 pipe, int force)
{
}
static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
.tx_sg = ath10k_sdio_hif_tx_sg,
.diag_read = ath10k_sdio_hif_diag_read,
@ -2025,10 +2108,10 @@ static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
.exchange_bmi_msg = ath10k_sdio_bmi_exchange_msg,
.start = ath10k_sdio_hif_start,
.stop = ath10k_sdio_hif_stop,
.swap_mailbox = ath10k_sdio_hif_swap_mailbox,
.start_post = ath10k_sdio_hif_start_post,
.get_htt_tx_complete = ath10k_sdio_get_htt_tx_complete,
.map_service_to_pipe = ath10k_sdio_hif_map_service_to_pipe,
.get_default_pipe = ath10k_sdio_hif_get_default_pipe,
.send_complete_check = ath10k_sdio_hif_send_complete_check,
.power_up = ath10k_sdio_hif_power_up,
.power_down = ath10k_sdio_hif_power_down,
#ifdef CONFIG_PM
@ -2053,6 +2136,8 @@ static int ath10k_sdio_pm_suspend(struct device *device)
if (!device_may_wakeup(ar->dev))
return 0;
ath10k_sdio_set_mbox_sleep(ar, true);
pm_flag = MMC_PM_KEEP_POWER;
ret = sdio_set_host_pm_flags(func, pm_flag);
@ -2216,6 +2301,8 @@ static int ath10k_sdio_probe(struct sdio_func *func,
goto err_free_wq;
}
timer_setup(&ar_sdio->sleep_timer, ath10k_sdio_sleep_timer_handler, 0);
return 0;
err_free_wq:

View File

@ -37,7 +37,7 @@
(ATH10K_SDIO_MAX_BUFFER_SIZE - sizeof(struct ath10k_htc_hdr))
#define ATH10K_HIF_MBOX_NUM_MAX 4
#define ATH10K_SDIO_BUS_REQUEST_MAX_NUM 64
#define ATH10K_SDIO_BUS_REQUEST_MAX_NUM 1024
#define ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ (100 * HZ)
@ -98,6 +98,21 @@
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON 0x10000
enum sdio_mbox_state {
SDIO_MBOX_UNKNOWN_STATE = 0,
SDIO_MBOX_REQUEST_TO_SLEEP_STATE = 1,
SDIO_MBOX_SLEEP_STATE = 2,
SDIO_MBOX_AWAKE_STATE = 3,
};
#define ATH10K_CIS_READ_WAIT_4_RTC_CYCLE_IN_US 125
#define ATH10K_CIS_RTC_STATE_ADDR 0x1138
#define ATH10K_CIS_RTC_STATE_ON 0x01
#define ATH10K_CIS_XTAL_SETTLE_DURATION_IN_US 1500
#define ATH10K_CIS_READ_RETRY 10
#define ATH10K_MIN_SLEEP_INACTIVITY_TIME_MS 50
/* TODO: remove this and use skb->cb instead, much cleaner approach */
struct ath10k_sdio_bus_request {
struct list_head list;
@ -217,6 +232,8 @@ struct ath10k_sdio {
spinlock_t wr_async_lock;
struct work_struct async_work_rx;
struct timer_list sleep_timer;
enum sdio_mbox_state mbox_state;
};
static inline struct ath10k_sdio *ath10k_sdio_priv(struct ath10k *ar)

View File

@ -11,6 +11,8 @@
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/of_address.h>
#include <linux/iommu.h>
#include "ce.h"
#include "coredump.h"
@ -356,7 +358,7 @@ static struct ce_pipe_config target_ce_config_wlan[] = {
},
};
static struct service_to_pipe target_service_to_ce_map_wlan[] = {
static struct ce_service_to_pipe target_service_to_ce_map_wlan[] = {
{
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
@ -769,7 +771,7 @@ static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
u16 service_id,
u8 *ul_pipe, u8 *dl_pipe)
{
const struct service_to_pipe *entry;
const struct ce_service_to_pipe *entry;
bool ul_set = false, dl_set = false;
int i;
@ -1393,7 +1395,6 @@ static int ath10k_hw_power_off(struct ath10k *ar)
static void ath10k_msa_dump_memory(struct ath10k *ar,
struct ath10k_fw_crash_data *crash_data)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
const struct ath10k_hw_mem_layout *mem_layout;
const struct ath10k_mem_region *current_region;
struct ath10k_dump_ram_data_hdr *hdr;
@ -1419,15 +1420,15 @@ static void ath10k_msa_dump_memory(struct ath10k *ar,
buf_len -= sizeof(*hdr);
hdr->region_type = cpu_to_le32(current_region->type);
hdr->start = cpu_to_le32((unsigned long)ar_snoc->qmi->msa_va);
hdr->length = cpu_to_le32(ar_snoc->qmi->msa_mem_size);
hdr->start = cpu_to_le32((unsigned long)ar->msa.vaddr);
hdr->length = cpu_to_le32(ar->msa.mem_size);
if (current_region->len < ar_snoc->qmi->msa_mem_size) {
memcpy(buf, ar_snoc->qmi->msa_va, current_region->len);
if (current_region->len < ar->msa.mem_size) {
memcpy(buf, ar->msa.vaddr, current_region->len);
ath10k_warn(ar, "msa dump length is less than msa size %x, %x\n",
current_region->len, ar_snoc->qmi->msa_mem_size);
current_region->len, ar->msa.mem_size);
} else {
memcpy(buf, ar_snoc->qmi->msa_va, ar_snoc->qmi->msa_mem_size);
memcpy(buf, ar->msa.vaddr, ar->msa.mem_size);
}
}
@ -1455,6 +1456,155 @@ void ath10k_snoc_fw_crashed_dump(struct ath10k *ar)
mutex_unlock(&ar->dump_mutex);
}
static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size)
{
struct device *dev = ar->dev;
struct device_node *node;
struct resource r;
int ret;
node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (node) {
ret = of_address_to_resource(node, 0, &r);
if (ret) {
dev_err(dev, "failed to resolve msa fixed region\n");
return ret;
}
of_node_put(node);
ar->msa.paddr = r.start;
ar->msa.mem_size = resource_size(&r);
ar->msa.vaddr = devm_memremap(dev, ar->msa.paddr,
ar->msa.mem_size,
MEMREMAP_WT);
if (IS_ERR(ar->msa.vaddr)) {
dev_err(dev, "failed to map memory region: %pa\n",
&r.start);
return PTR_ERR(ar->msa.vaddr);
}
} else {
ar->msa.vaddr = dmam_alloc_coherent(dev, msa_size,
&ar->msa.paddr,
GFP_KERNEL);
if (!ar->msa.vaddr) {
ath10k_err(ar, "failed to allocate dma memory for msa region\n");
return -ENOMEM;
}
ar->msa.mem_size = msa_size;
}
ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa.paddr: %pad , msa.vaddr: 0x%p\n",
&ar->msa.paddr,
ar->msa.vaddr);
return 0;
}
static int ath10k_fw_init(struct ath10k *ar)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
struct device *host_dev = &ar_snoc->dev->dev;
struct platform_device_info info;
struct iommu_domain *iommu_dom;
struct platform_device *pdev;
struct device_node *node;
int ret;
node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
if (!node) {
ar_snoc->use_tz = true;
return 0;
}
memset(&info, 0, sizeof(info));
info.fwnode = &node->fwnode;
info.parent = host_dev;
info.name = node->name;
info.dma_mask = DMA_BIT_MASK(32);
pdev = platform_device_register_full(&info);
if (IS_ERR(pdev)) {
of_node_put(node);
return PTR_ERR(pdev);
}
pdev->dev.of_node = node;
ret = of_dma_configure(&pdev->dev, node, true);
if (ret) {
ath10k_err(ar, "dma configure fail: %d\n", ret);
goto err_unregister;
}
ar_snoc->fw.dev = &pdev->dev;
iommu_dom = iommu_domain_alloc(&platform_bus_type);
if (!iommu_dom) {
ath10k_err(ar, "failed to allocate iommu domain\n");
ret = -ENOMEM;
goto err_unregister;
}
ret = iommu_attach_device(iommu_dom, ar_snoc->fw.dev);
if (ret) {
ath10k_err(ar, "could not attach device: %d\n", ret);
goto err_iommu_free;
}
ar_snoc->fw.iommu_domain = iommu_dom;
ar_snoc->fw.fw_start_addr = ar->msa.paddr;
ret = iommu_map(iommu_dom, ar_snoc->fw.fw_start_addr,
ar->msa.paddr, ar->msa.mem_size,
IOMMU_READ | IOMMU_WRITE);
if (ret) {
ath10k_err(ar, "failed to map firmware region: %d\n", ret);
goto err_iommu_detach;
}
of_node_put(node);
return 0;
err_iommu_detach:
iommu_detach_device(iommu_dom, ar_snoc->fw.dev);
err_iommu_free:
iommu_domain_free(iommu_dom);
err_unregister:
platform_device_unregister(pdev);
of_node_put(node);
return ret;
}
static int ath10k_fw_deinit(struct ath10k *ar)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
const size_t mapped_size = ar_snoc->fw.mapped_mem_size;
struct iommu_domain *iommu;
size_t unmapped_size;
if (ar_snoc->use_tz)
return 0;
iommu = ar_snoc->fw.iommu_domain;
unmapped_size = iommu_unmap(iommu, ar_snoc->fw.fw_start_addr,
mapped_size);
if (unmapped_size != mapped_size)
ath10k_err(ar, "failed to unmap firmware: %zu\n",
unmapped_size);
iommu_detach_device(iommu, ar_snoc->fw.dev);
iommu_domain_free(iommu);
platform_device_unregister(to_platform_device(ar_snoc->fw.dev));
return 0;
}
static const struct of_device_id ath10k_snoc_dt_match[] = {
{ .compatible = "qcom,wcn3990-wifi",
.data = &drv_priv,
@ -1557,16 +1707,31 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
goto err_free_irq;
}
ret = ath10k_setup_msa_resources(ar, msa_size);
if (ret) {
ath10k_warn(ar, "failed to setup msa resources: %d\n", ret);
goto err_power_off;
}
ret = ath10k_fw_init(ar);
if (ret) {
ath10k_err(ar, "failed to initialize firmware: %d\n", ret);
goto err_power_off;
}
ret = ath10k_qmi_init(ar, msa_size);
if (ret) {
ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
goto err_power_off;
goto err_fw_deinit;
}
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
return 0;
err_fw_deinit:
ath10k_fw_deinit(ar);
err_power_off:
ath10k_hw_power_off(ar);
@ -1598,6 +1763,7 @@ static int ath10k_snoc_remove(struct platform_device *pdev)
ath10k_core_unregister(ar);
ath10k_hw_power_off(ar);
ath10k_fw_deinit(ar);
ath10k_snoc_free_irq(ar);
ath10k_snoc_release_resource(ar);
ath10k_qmi_deinit(ar);

View File

@ -55,6 +55,13 @@ struct regulator_bulk_data;
struct ath10k_snoc {
struct platform_device *dev;
struct ath10k *ar;
unsigned int use_tz;
struct ath10k_firmware {
struct device *dev;
dma_addr_t fw_start_addr;
struct iommu_domain *iommu_domain;
size_t mapped_mem_size;
} fw;
void __iomem *mem;
dma_addr_t mem_pa;
struct ath10k_snoc_target_info target_info;

View File

@ -84,9 +84,11 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock);
rcu_read_lock();
if (txq && txq->sta && skb_cb->airtime_est)
ieee80211_sta_register_airtime(txq->sta, txq->tid,
skb_cb->airtime_est, 0);
rcu_read_unlock();
if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);

View File

@ -693,17 +693,6 @@ static int ath10k_usb_hif_map_service_to_pipe(struct ath10k *ar, u16 svc_id,
return 0;
}
/* This op is currently only used by htc_wait_target if the HTC ready
* message times out. It is not applicable for USB since there is nothing
* we can do if the HTC ready message does not arrive in time.
* TODO: Make this op non mandatory by introducing a NULL check in the
* hif op wrapper.
*/
static void ath10k_usb_hif_send_complete_check(struct ath10k *ar,
u8 pipe, int force)
{
}
static int ath10k_usb_hif_power_up(struct ath10k *ar,
enum ath10k_firmware_mode fw_mode)
{
@ -737,7 +726,6 @@ static const struct ath10k_hif_ops ath10k_usb_hif_ops = {
.stop = ath10k_usb_hif_stop,
.map_service_to_pipe = ath10k_usb_hif_map_service_to_pipe,
.get_default_pipe = ath10k_usb_hif_get_default_pipe,
.send_complete_check = ath10k_usb_hif_send_complete_check,
.get_free_queue_number = ath10k_usb_hif_get_free_queue_number,
.power_up = ath10k_usb_hif_power_up,
.power_down = ath10k_usb_hif_power_down,

View File

@ -126,6 +126,13 @@ struct wmi_ops {
struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
const struct wmi_wmm_params_all_arg *arg);
struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
struct sk_buff *(*gen_request_peer_stats_info)(struct ath10k *ar,
u32 vdev_id,
enum
wmi_peer_stats_info_request_type
type,
u8 *addr,
u32 reset);
struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
enum wmi_force_fw_hang_type type,
u32 delay_ms);
@ -1064,6 +1071,29 @@ ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
}
static inline int
ath10k_wmi_request_peer_stats_info(struct ath10k *ar,
u32 vdev_id,
enum wmi_peer_stats_info_request_type type,
u8 *addr,
u32 reset)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_request_peer_stats_info)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_request_peer_stats_info(ar,
vdev_id,
type,
addr,
reset);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_peer_stats_info_cmdid);
}
static inline int
ath10k_wmi_force_fw_hang(struct ath10k *ar,
enum wmi_force_fw_hang_type type, u32 delay_ms)

View File

@ -219,6 +219,91 @@ static void ath10k_wmi_tlv_event_vdev_delete_resp(struct ath10k *ar,
complete(&ar->vdev_delete_done);
}
static int ath10k_wmi_tlv_parse_peer_stats_info(struct ath10k *ar, u16 tag, u16 len,
const void *ptr, void *data)
{
const struct wmi_tlv_peer_stats_info *stat = ptr;
struct ieee80211_sta *sta;
struct ath10k_sta *arsta;
if (tag != WMI_TLV_TAG_STRUCT_PEER_STATS_INFO)
return -EPROTO;
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi tlv stats peer addr %pMF rx rate code 0x%x bit rate %d kbps\n",
stat->peer_macaddr.addr,
__le32_to_cpu(stat->last_rx_rate_code),
__le32_to_cpu(stat->last_rx_bitrate_kbps));
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi tlv stats tx rate code 0x%x bit rate %d kbps\n",
__le32_to_cpu(stat->last_tx_rate_code),
__le32_to_cpu(stat->last_tx_bitrate_kbps));
sta = ieee80211_find_sta_by_ifaddr(ar->hw, stat->peer_macaddr.addr, NULL);
if (!sta) {
ath10k_warn(ar, "not found station for peer stats\n");
return -EINVAL;
}
arsta = (struct ath10k_sta *)sta->drv_priv;
arsta->rx_rate_code = __le32_to_cpu(stat->last_rx_rate_code);
arsta->rx_bitrate_kbps = __le32_to_cpu(stat->last_rx_bitrate_kbps);
arsta->tx_rate_code = __le32_to_cpu(stat->last_tx_rate_code);
arsta->tx_bitrate_kbps = __le32_to_cpu(stat->last_tx_bitrate_kbps);
return 0;
}
static int ath10k_wmi_tlv_op_pull_peer_stats_info(struct ath10k *ar,
struct sk_buff *skb)
{
const void **tb;
const struct wmi_tlv_peer_stats_info_ev *ev;
const void *data;
u32 num_peer_stats;
int ret;
tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TLV_TAG_STRUCT_PEER_STATS_INFO_EVENT];
data = tb[WMI_TLV_TAG_ARRAY_STRUCT];
if (!ev || !data) {
kfree(tb);
return -EPROTO;
}
num_peer_stats = __le32_to_cpu(ev->num_peers);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi tlv peer stats info update peer vdev id %d peers %i more data %d\n",
__le32_to_cpu(ev->vdev_id),
num_peer_stats,
__le32_to_cpu(ev->more_data));
ret = ath10k_wmi_tlv_iter(ar, data, ath10k_wmi_tlv_len(data),
ath10k_wmi_tlv_parse_peer_stats_info, NULL);
if (ret)
ath10k_warn(ar, "failed to parse stats info tlv: %d\n", ret);
kfree(tb);
return 0;
}
static void ath10k_wmi_tlv_event_peer_stats_info(struct ath10k *ar,
struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PEER_STATS_INFO_EVENTID\n");
ath10k_wmi_tlv_op_pull_peer_stats_info(ar, skb);
complete(&ar->peer_stats_info_complete);
}
static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar,
struct sk_buff *skb)
{
@ -576,6 +661,9 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_TLV_UPDATE_STATS_EVENTID:
ath10k_wmi_event_update_stats(ar, skb);
break;
case WMI_TLV_PEER_STATS_INFO_EVENTID:
ath10k_wmi_tlv_event_peer_stats_info(ar, skb);
break;
case WMI_TLV_VDEV_START_RESP_EVENTID:
ath10k_wmi_event_vdev_start_resp(ar, skb);
break;
@ -2123,7 +2211,7 @@ ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
tlv->len = __cpu_to_le16(sizeof(*ch));
ch = (void *)tlv->value;
ath10k_wmi_put_wmi_channel(ch, &arg->channel);
ath10k_wmi_put_wmi_channel(ar, ch, &arg->channel);
ptr += sizeof(*tlv);
ptr += sizeof(*ch);
@ -2763,7 +2851,7 @@ ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar,
tlv->len = __cpu_to_le16(sizeof(*ci));
ci = (void *)tlv->value;
ath10k_wmi_put_wmi_channel(ci, ch);
ath10k_wmi_put_wmi_channel(ar, ci, ch);
chans += sizeof(*tlv);
chans += sizeof(*ci);
@ -2897,6 +2985,36 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
return skb;
}
static struct sk_buff *
ath10k_wmi_tlv_op_gen_request_peer_stats_info(struct ath10k *ar,
u32 vdev_id,
enum wmi_peer_stats_info_request_type type,
u8 *addr,
u32 reset)
{
struct wmi_tlv_request_peer_stats_info *cmd;
struct wmi_tlv *tlv;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
if (!skb)
return ERR_PTR(-ENOMEM);
tlv = (void *)skb->data;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_PEER_STATS_INFO_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(vdev_id);
cmd->request_type = __cpu_to_le32(type);
if (type == WMI_REQUEST_ONE_PEER_STATS_INFO)
ether_addr_copy(cmd->peer_macaddr.addr, addr);
cmd->reset_after_request = reset;
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request peer stats info\n");
return skb;
}
static int
ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb,
dma_addr_t paddr)
@ -3450,7 +3568,7 @@ ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar,
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
tlv->len = __cpu_to_le16(sizeof(*chan));
chan = (void *)tlv->value;
ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]);
ath10k_wmi_put_wmi_channel(ar, chan, &chan_arg[i]);
ptr += sizeof(*tlv);
ptr += sizeof(*chan);
@ -4113,6 +4231,7 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = {
.vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID,
.vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
.request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID,
.request_peer_stats_info_cmdid = WMI_TLV_REQUEST_PEER_STATS_INFO_CMDID,
.set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID,
.network_list_offload_config_cmdid =
WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
@ -4269,6 +4388,7 @@ static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
.rfkill_config = WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG,
.rfkill_enable = WMI_TLV_PDEV_PARAM_RFKILL_ENABLE,
.peer_stats_info_enable = WMI_TLV_PDEV_PARAM_PEER_STATS_INFO_ENABLE,
};
static struct wmi_peer_param_map wmi_tlv_peer_param_map = {
@ -4416,6 +4536,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma,
.gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
.gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
.gen_request_peer_stats_info = ath10k_wmi_tlv_op_gen_request_peer_stats_info,
.gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
/* .gen_mgmt_tx = not implemented; HTT is used */
.gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send,

View File

@ -198,6 +198,12 @@ enum wmi_tlv_cmd_id {
WMI_TLV_REQUEST_LINK_STATS_CMDID,
WMI_TLV_START_LINK_STATS_CMDID,
WMI_TLV_CLEAR_LINK_STATS_CMDID,
WMI_TLV_CGET_FW_MEM_DUMP_CMDID,
WMI_TLV_CDEBUG_MESG_FLUSH_CMDID,
WMI_TLV_CDIAG_EVENT_LOG_CONFIG_CMDID,
WMI_TLV_CREQUEST_WLAN_STATS_CMDID,
WMI_TLV_CREQUEST_RCPI_CMDID,
WMI_TLV_REQUEST_PEER_STATS_INFO_CMDID,
WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_ARP_NS_OFL),
WMI_TLV_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID,
WMI_TLV_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID,
@ -338,6 +344,13 @@ enum wmi_tlv_event_id {
WMI_TLV_IFACE_LINK_STATS_EVENTID,
WMI_TLV_PEER_LINK_STATS_EVENTID,
WMI_TLV_RADIO_LINK_STATS_EVENTID,
WMI_TLV_UPDATE_FW_MEM_DUMP_EVENTID,
WMI_TLV_DIAG_EVENT_LOG_SUPPORTED_EVENTID,
WMI_TLV_INST_RSSI_STATS_EVENTID,
WMI_TLV_RADIO_TX_POWER_LEVEL_STATS_EVENTID,
WMI_TLV_REPORT_STATS_EVENTID,
WMI_TLV_UPDATE_RCPI_EVENTID,
WMI_TLV_PEER_STATS_INFO_EVENTID,
WMI_TLV_NLO_MATCH_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_NLO_OFL),
WMI_TLV_NLO_SCAN_COMPLETE_EVENTID,
WMI_TLV_APFIND_EVENTID,
@ -451,6 +464,7 @@ enum wmi_tlv_pdev_param {
WMI_TLV_PDEV_PARAM_VDEV_RATE_STATS_UPDATE_PERIOD,
WMI_TLV_PDEV_PARAM_TXPOWER_REASON_NONE,
WMI_TLV_PDEV_PARAM_TXPOWER_REASON_SAR,
WMI_TLV_PDEV_PARAM_PEER_STATS_INFO_ENABLE = 0x8b,
WMI_TLV_PDEV_PARAM_TXPOWER_REASON_MAX,
};
@ -2081,6 +2095,94 @@ struct wmi_tlv_stats_ev {
__le32 num_peer_stats_extd;
} __packed;
struct wmi_tlv_peer_stats_info_ev {
__le32 vdev_id;
__le32 num_peers;
__le32 more_data;
} __packed;
#define WMI_TLV_MAX_CHAINS 8
struct wmi_tlv_peer_stats_info {
struct wmi_mac_addr peer_macaddr;
struct {
/* lower 32 bits of the tx_bytes value */
__le32 low_32;
/* upper 32 bits of the tx_bytes value */
__le32 high_32;
} __packed tx_bytes;
struct {
/* lower 32 bits of the tx_packets value */
__le32 low_32;
/* upper 32 bits of the tx_packets value */
__le32 high_32;
} __packed tx_packets;
struct {
/* lower 32 bits of the rx_bytes value */
__le32 low_32;
/* upper 32 bits of the rx_bytes value */
__le32 high_32;
} __packed rx_bytes;
struct {
/* lower 32 bits of the rx_packets value */
__le32 low_32;
/* upper 32 bits of the rx_packets value */
__le32 high_32;
} __packed rx_packets;
__le32 tx_retries;
__le32 tx_failed;
/* rate information, it is output of WMI_ASSEMBLE_RATECODE_V1
* (in format of 0x1000RRRR)
* The rate-code is a 4-bytes field in which,
* for given rate, nss and preamble
*
* b'31-b'29 unused / reserved
* b'28 indicate the version of rate-code (1 = RATECODE_V1)
* b'27-b'11 unused / reserved
* b'10-b'8 indicate the preamble (0 OFDM, 1 CCK, 2 HT, 3 VHT)
* b'7-b'5 indicate the NSS (0 - 1x1, 1 - 2x2, 2 - 3x3, 3 - 4x4)
* b'4-b'0 indicate the rate, which is indicated as follows:
* OFDM : 0: OFDM 48 Mbps
* 1: OFDM 24 Mbps
* 2: OFDM 12 Mbps
* 3: OFDM 6 Mbps
* 4: OFDM 54 Mbps
* 5: OFDM 36 Mbps
* 6: OFDM 18 Mbps
* 7: OFDM 9 Mbps
* CCK (pream == 1)
* 0: CCK 11 Mbps Long
* 1: CCK 5.5 Mbps Long
* 2: CCK 2 Mbps Long
* 3: CCK 1 Mbps Long
* 4: CCK 11 Mbps Short
* 5: CCK 5.5 Mbps Short
* 6: CCK 2 Mbps Short
* HT/VHT (pream == 2/3)
* 0..7: MCS0..MCS7 (HT)
* 0..9: MCS0..MCS9 (11AC VHT)
* 0..11: MCS0..MCS11 (11AX VHT)
* rate-code of the last transmission
*/
__le32 last_tx_rate_code;
__le32 last_rx_rate_code;
__le32 last_tx_bitrate_kbps;
__le32 last_rx_bitrate_kbps;
__le32 peer_rssi;
__le32 tx_succeed;
__le32 peer_rssi_per_chain[WMI_TLV_MAX_CHAINS];
} __packed;
#define HW_RATECODE_PREAM_V1_MASK GENMASK(10, 8)
#define WMI_TLV_GET_HW_RC_PREAM_V1(rc) FIELD_GET(HW_RATECODE_PREAM_V1_MASK, rc)
#define HW_RATECODE_NSS_V1_MASK GENMASK(7, 5)
#define WMI_TLV_GET_HW_RC_NSS_V1(rc) FIELD_GET(HW_RATECODE_NSS_V1_MASK, rc)
#define HW_RATECODE_RATE_V1_MASK GENMASK(4, 0)
#define WMI_TLV_GET_HW_RC_RATE_V1(rc) FIELD_GET(HW_RATECODE_RATE_V1_MASK, rc)
struct wmi_tlv_p2p_noa_ev {
__le32 vdev_id;
} __packed;
@ -2097,6 +2199,14 @@ struct wmi_tlv_wow_add_del_event_cmd {
__le32 event_bitmap;
} __packed;
struct wmi_tlv_request_peer_stats_info {
__le32 request_type;
__le32 vdev_id;
/* peer MAC address */
struct wmi_mac_addr peer_macaddr;
__le32 reset_after_request;
} __packed;
/* Command to set/unset chip in quiet mode */
struct wmi_tlv_set_quiet_cmd {
__le32 vdev_id;

View File

@ -1694,10 +1694,11 @@ static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = {
.bw160 = WMI_10_2_PEER_160MHZ,
};
void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
const struct wmi_channel_arg *arg)
{
u32 flags = 0;
struct ieee80211_channel *chan = NULL;
memset(ch, 0, sizeof(*ch));
@ -1714,12 +1715,39 @@ void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
if (arg->chan_radar)
flags |= WMI_CHAN_FLAG_DFS;
ch->band_center_freq2 = 0;
ch->mhz = __cpu_to_le32(arg->freq);
ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1);
if (arg->mode == MODE_11AC_VHT80_80)
if (arg->mode == MODE_11AC_VHT80_80) {
ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq2);
else
ch->band_center_freq2 = 0;
chan = ieee80211_get_channel(ar->hw->wiphy,
arg->band_center_freq2 - 10);
}
if (arg->mode == MODE_11AC_VHT160) {
u32 band_center_freq1;
u32 band_center_freq2;
if (arg->freq > arg->band_center_freq1) {
band_center_freq1 = arg->band_center_freq1 + 40;
band_center_freq2 = arg->band_center_freq1 - 40;
} else {
band_center_freq1 = arg->band_center_freq1 - 40;
band_center_freq2 = arg->band_center_freq1 + 40;
}
ch->band_center_freq1 =
__cpu_to_le32(band_center_freq1);
/* Minus 10 to get a defined 5G channel frequency*/
chan = ieee80211_get_channel(ar->hw->wiphy,
band_center_freq2 - 10);
/* The center frequency of the entire VHT160 */
ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq1);
}
if (chan && chan->flags & IEEE80211_CHAN_RADAR)
flags |= WMI_CHAN_FLAG_DFS_CFREQ2;
ch->min_power = arg->min_power;
ch->max_power = arg->max_power;
ch->reg_power = arg->max_reg_power;
@ -7165,7 +7193,7 @@ ath10k_wmi_op_gen_vdev_start(struct ath10k *ar,
memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
}
ath10k_wmi_put_wmi_channel(&cmd->chan, &arg->channel);
ath10k_wmi_put_wmi_channel(ar, &cmd->chan, &arg->channel);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n",
@ -7537,7 +7565,7 @@ ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar,
ch = &arg->channels[i];
ci = &cmd->chan_info[i];
ath10k_wmi_put_wmi_channel(ci, ch);
ath10k_wmi_put_wmi_channel(ar, ci, ch);
}
return skb;
@ -7628,12 +7656,8 @@ ath10k_wmi_peer_assoc_fill_10_4(struct ath10k *ar, void *buf,
struct wmi_10_4_peer_assoc_complete_cmd *cmd = buf;
ath10k_wmi_peer_assoc_fill_10_2(ar, buf, arg);
if (arg->peer_bw_rxnss_override)
cmd->peer_bw_rxnss_override =
__cpu_to_le32((arg->peer_bw_rxnss_override - 1) |
BIT(PEER_BW_RXNSS_OVERRIDE_OFFSET));
else
cmd->peer_bw_rxnss_override = 0;
cmd->peer_bw_rxnss_override =
__cpu_to_le32(arg->peer_bw_rxnss_override);
}
static int
@ -8312,7 +8336,7 @@ ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDUs delivered to stack", pdev->loc_mpdus);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Oversized AMSUs", pdev->oversize_amsdu);
"Oversized AMSDUs", pdev->oversize_amsdu);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PHY errors", pdev->phy_errs);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
@ -8945,7 +8969,7 @@ ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k *ar,
for (i = 0; i < cap->peer_chan_len; i++) {
chan = (struct wmi_channel *)&peer_cap->peer_chan_list[i];
ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]);
ath10k_wmi_put_wmi_channel(ar, chan, &chan_arg[i]);
}
ath10k_dbg(ar, ATH10K_DBG_WMI,

View File

@ -940,6 +940,7 @@ struct wmi_cmd_map {
u32 vdev_spectral_scan_configure_cmdid;
u32 vdev_spectral_scan_enable_cmdid;
u32 request_stats_cmdid;
u32 request_peer_stats_info_cmdid;
u32 set_arp_ns_offload_cmdid;
u32 network_list_offload_config_cmdid;
u32 gtk_offload_cmdid;
@ -2094,7 +2095,8 @@ enum wmi_channel_change_cause {
/* Indicate reason for channel switch */
#define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13)
/* DFS required on channel for 2nd segment of VHT160 and VHT80+80*/
#define WMI_CHAN_FLAG_DFS_CFREQ2 (1 << 15)
#define WMI_MAX_SPATIAL_STREAM 3 /* default max ss */
/* HT Capabilities*/
@ -3797,6 +3799,7 @@ struct wmi_pdev_param_map {
u32 enable_btcoex;
u32 rfkill_config;
u32 rfkill_enable;
u32 peer_stats_info_enable;
};
#define WMI_PDEV_PARAM_UNSUPPORTED 0
@ -4577,6 +4580,13 @@ struct wmi_request_stats_cmd {
struct wlan_inst_rssi_args inst_rssi_args;
} __packed;
enum wmi_peer_stats_info_request_type {
/* request stats of one specified peer */
WMI_REQUEST_ONE_PEER_STATS_INFO = 0x01,
/* request stats of all peers belong to specified VDEV */
WMI_REQUEST_VDEV_ALL_PEER_STATS_INFO = 0x02,
};
/* Suspend option */
enum {
/* suspend */
@ -6508,7 +6518,10 @@ struct wmi_10_2_peer_assoc_complete_cmd {
__le32 info0; /* WMI_PEER_ASSOC_INFO0_ */
} __packed;
#define PEER_BW_RXNSS_OVERRIDE_OFFSET 31
/* NSS Mapping to FW */
#define WMI_PEER_NSS_MAP_ENABLE BIT(31)
#define WMI_PEER_NSS_160MHZ_MASK GENMASK(2, 0)
#define WMI_PEER_NSS_80_80MHZ_MASK GENMASK(5, 3)
struct wmi_10_4_peer_assoc_complete_cmd {
struct wmi_10_2_peer_assoc_complete_cmd cmd;
@ -7348,7 +7361,7 @@ void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
const struct wmi_start_scan_arg *arg);
void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
const struct wmi_wmm_params_arg *arg);
void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
const struct wmi_channel_arg *arg);
int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg);

View File

@ -788,7 +788,7 @@ static int ath11k_ahb_ext_irq_config(struct ath11k_base *ab)
irq = platform_get_irq_byname(ab->pdev,
irq_name[irq_idx]);
ab->irq_num[irq_idx] = irq;
irq_set_status_flags(irq, IRQ_NOAUTOEN);
irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY);
ret = request_irq(irq, ath11k_ahb_ext_interrupt_handler,
IRQF_TRIGGER_RISING,
irq_name[irq_idx], irq_grp);

View File

@ -60,9 +60,14 @@ static inline enum wme_ac ath11k_tid_to_ac(u32 tid)
WME_AC_VO);
}
enum ath11k_skb_flags {
ATH11K_SKB_HW_80211_ENCAP = BIT(0),
};
struct ath11k_skb_cb {
dma_addr_t paddr;
u8 eid;
u8 flags;
struct ath11k *ar;
struct ieee80211_vif *vif;
} __packed;
@ -341,6 +346,11 @@ struct ath11k_sta {
u8 rssi_comb;
struct ath11k_htt_tx_stats *tx_stats;
struct ath11k_rx_peer_stats *rx_stats;
#ifdef CONFIG_MAC80211_DEBUGFS
/* protected by conf_mutex */
bool aggr_mode;
#endif
};
#define ATH11K_NUM_CHANS 41
@ -387,6 +397,7 @@ struct ath11k_debug {
u32 pktlog_mode;
u32 pktlog_peer_valid;
u8 pktlog_peer_addr[ETH_ALEN];
u32 rx_filter;
};
struct ath11k_per_peer_tx_stats {
@ -650,6 +661,10 @@ struct ath11k_base {
/* protected by data_lock */
u32 fw_crash_counter;
} stats;
u32 pktlog_defs_checksum;
/* Round robbin based TCL ring selector */
atomic_t tcl_ring_selector;
};
struct ath11k_fw_stats_pdev {

View File

@ -195,7 +195,7 @@ void ath11k_debug_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb)
total_vdevs_started += ar->num_started_vdevs;
}
is_end = ((++num_vdev) == total_vdevs_started ? true : false);
is_end = ((++num_vdev) == total_vdevs_started);
list_splice_tail_init(&stats.vdevs,
&ar->debug.fw_stats.vdevs);
@ -215,7 +215,7 @@ void ath11k_debug_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb)
/* Mark end until we reached the count of all started VDEVs
* within the PDEV
*/
is_end = ((++num_bcn) == ar->num_started_vdevs ? true : false);
is_end = ((++num_bcn) == ar->num_started_vdevs);
list_splice_tail_init(&stats.bcn,
&ar->debug.fw_stats.bcn);
@ -698,6 +698,8 @@ static ssize_t ath11k_write_extd_rx_stats(struct file *file,
tlv_filter = ath11k_mac_mon_status_filter_default;
}
ar->debug.rx_filter = tlv_filter.rx_filter;
ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
HAL_RXDMA_MONITOR_STATUS,
@ -803,6 +805,9 @@ static const struct file_operations fops_soc_rx_stats = {
int ath11k_debug_pdev_create(struct ath11k_base *ab)
{
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
return 0;
ab->debugfs_soc = debugfs_create_dir(ab->hw_params.name, ab->debugfs_ath11k);
if (IS_ERR_OR_NULL(ab->debugfs_soc)) {

View File

@ -67,7 +67,7 @@ struct debug_htt_stats_req {
u8 peer_addr[ETH_ALEN];
struct completion cmpln;
u32 buf_len;
u8 buf[0];
u8 buf[];
};
struct ath_pktlog_hdr {
@ -77,9 +77,11 @@ struct ath_pktlog_hdr {
u16 size;
u32 timestamp;
u32 type_specific_data;
u8 payload[0];
u8 payload[];
};
#define ATH11K_HTT_PEER_STATS_RESET BIT(16)
#define ATH11K_HTT_STATS_BUF_SIZE (1024 * 512)
#define ATH11K_FW_STATS_BUF_SIZE (1024 * 1024)
@ -112,6 +114,12 @@ enum ath11k_pktlog_enum {
ATH11K_PKTLOG_TYPE_LITE_RX = 24,
};
enum ath11k_dbg_aggr_mode {
ATH11K_DBG_AGGR_MODE_AUTO,
ATH11K_DBG_AGGR_MODE_MANUAL,
ATH11K_DBG_AGGR_MODE_MAX,
};
__printf(2, 3) void ath11k_info(struct ath11k_base *ab, const char *fmt, ...);
__printf(2, 3) void ath11k_err(struct ath11k_base *ab, const char *fmt, ...);
__printf(2, 3) void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...);
@ -182,6 +190,11 @@ static inline int ath11k_debug_is_extd_rx_stats_enabled(struct ath11k *ar)
return ar->debug.extd_rx_stats;
}
static inline int ath11k_debug_rx_filter(struct ath11k *ar)
{
return ar->debug.rx_filter;
}
void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir);
void
@ -263,6 +276,11 @@ static inline bool ath11k_debug_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr
return false;
}
static inline int ath11k_debug_rx_filter(struct ath11k *ar)
{
return 0;
}
static inline void
ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
struct ath11k_per_peer_tx_stats *peer_stats,

View File

@ -239,7 +239,7 @@ struct htt_tx_pdev_stats_tx_ppdu_stats_tlv_v {
*/
struct htt_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v {
u32 hist_bin_size;
u32 tried_mpdu_cnt_hist[0]; /* HTT_TX_PDEV_TRIED_MPDU_CNT_HIST */
u32 tried_mpdu_cnt_hist[]; /* HTT_TX_PDEV_TRIED_MPDU_CNT_HIST */
};
/* == SOC ERROR STATS == */
@ -550,7 +550,7 @@ struct htt_tx_hwq_stats_cmn_tlv {
struct htt_tx_hwq_difs_latency_stats_tlv_v {
u32 hist_intvl;
/* histogram of ppdu post to hwsch - > cmd status received */
u32 difs_latency_hist[0]; /* HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS */
u32 difs_latency_hist[]; /* HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS */
};
/* NOTE: Variable length TLV, use length spec to infer array size */
@ -586,7 +586,7 @@ struct htt_tx_hwq_fes_result_stats_tlv_v {
struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v {
u32 hist_bin_size;
/* Histogram of number of mpdus on tried mpdu */
u32 tried_mpdu_cnt_hist[0]; /* HTT_TX_HWQ_TRIED_MPDU_CNT_HIST */
u32 tried_mpdu_cnt_hist[]; /* HTT_TX_HWQ_TRIED_MPDU_CNT_HIST */
};
/* NOTE: Variable length TLV, use length spec to infer array size
@ -1584,7 +1584,7 @@ struct htt_pdev_stats_twt_session_tlv {
struct htt_pdev_stats_twt_sessions_tlv {
u32 pdev_id;
u32 num_sessions;
struct htt_pdev_stats_twt_session_tlv twt_session[0];
struct htt_pdev_stats_twt_session_tlv twt_session[];
};
enum htt_rx_reo_resource_sample_id_enum {

View File

@ -8,6 +8,8 @@
#include "core.h"
#include "peer.h"
#include "debug.h"
#include "dp_tx.h"
#include "debug_htt_stats.h"
void
ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
@ -435,13 +437,22 @@ ath11k_dbg_sta_open_htt_peer_stats(struct inode *inode, struct file *file)
return 0;
out:
vfree(stats_req);
ar->debug.htt_stats.stats_req = NULL;
return ret;
}
static int
ath11k_dbg_sta_release_htt_peer_stats(struct inode *inode, struct file *file)
{
struct ieee80211_sta *sta = inode->i_private;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
mutex_lock(&ar->conf_mutex);
vfree(file->private_data);
ar->debug.htt_stats.stats_req = NULL;
mutex_unlock(&ar->conf_mutex);
return 0;
}
@ -533,6 +544,282 @@ static const struct file_operations fops_peer_pktlog = {
.llseek = default_llseek,
};
static ssize_t ath11k_dbg_sta_write_delba(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
u32 tid, initiator, reason;
int ret;
char buf[64] = {0};
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
user_buf, count);
if (ret <= 0)
return ret;
ret = sscanf(buf, "%u %u %u", &tid, &initiator, &reason);
if (ret != 3)
return -EINVAL;
/* Valid TID values are 0 through 15 */
if (tid > HAL_DESC_REO_NON_QOS_TID - 1)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON ||
arsta->aggr_mode != ATH11K_DBG_AGGR_MODE_MANUAL) {
ret = count;
goto out;
}
ret = ath11k_wmi_delba_send(ar, arsta->arvif->vdev_id, sta->addr,
tid, initiator, reason);
if (ret) {
ath11k_warn(ar->ab, "failed to send delba: vdev_id %u peer %pM tid %u initiator %u reason %u\n",
arsta->arvif->vdev_id, sta->addr, tid, initiator,
reason);
}
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_delba = {
.write = ath11k_dbg_sta_write_delba,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath11k_dbg_sta_write_addba_resp(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
u32 tid, status;
int ret;
char buf[64] = {0};
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
user_buf, count);
if (ret <= 0)
return ret;
ret = sscanf(buf, "%u %u", &tid, &status);
if (ret != 2)
return -EINVAL;
/* Valid TID values are 0 through 15 */
if (tid > HAL_DESC_REO_NON_QOS_TID - 1)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON ||
arsta->aggr_mode != ATH11K_DBG_AGGR_MODE_MANUAL) {
ret = count;
goto out;
}
ret = ath11k_wmi_addba_set_resp(ar, arsta->arvif->vdev_id, sta->addr,
tid, status);
if (ret) {
ath11k_warn(ar->ab, "failed to send addba response: vdev_id %u peer %pM tid %u status%u\n",
arsta->arvif->vdev_id, sta->addr, tid, status);
}
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_addba_resp = {
.write = ath11k_dbg_sta_write_addba_resp,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath11k_dbg_sta_write_addba(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
u32 tid, buf_size;
int ret;
char buf[64] = {0};
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
user_buf, count);
if (ret <= 0)
return ret;
ret = sscanf(buf, "%u %u", &tid, &buf_size);
if (ret != 2)
return -EINVAL;
/* Valid TID values are 0 through 15 */
if (tid > HAL_DESC_REO_NON_QOS_TID - 1)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON ||
arsta->aggr_mode != ATH11K_DBG_AGGR_MODE_MANUAL) {
ret = count;
goto out;
}
ret = ath11k_wmi_addba_send(ar, arsta->arvif->vdev_id, sta->addr,
tid, buf_size);
if (ret) {
ath11k_warn(ar->ab, "failed to send addba request: vdev_id %u peer %pM tid %u buf_size %u\n",
arsta->arvif->vdev_id, sta->addr, tid, buf_size);
}
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_addba = {
.write = ath11k_dbg_sta_write_addba,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath11k_dbg_sta_read_aggr_mode(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
char buf[64];
int len = 0;
mutex_lock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf) - len,
"aggregation mode: %s\n\n%s\n%s\n",
(arsta->aggr_mode == ATH11K_DBG_AGGR_MODE_AUTO) ?
"auto" : "manual", "auto = 0", "manual = 1");
mutex_unlock(&ar->conf_mutex);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t ath11k_dbg_sta_write_aggr_mode(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
u32 aggr_mode;
int ret;
if (kstrtouint_from_user(user_buf, count, 0, &aggr_mode))
return -EINVAL;
if (aggr_mode >= ATH11K_DBG_AGGR_MODE_MAX)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON ||
aggr_mode == arsta->aggr_mode) {
ret = count;
goto out;
}
ret = ath11k_wmi_addba_clear_resp(ar, arsta->arvif->vdev_id, sta->addr);
if (ret) {
ath11k_warn(ar->ab, "failed to clear addba session ret: %d\n",
ret);
goto out;
}
arsta->aggr_mode = aggr_mode;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_aggr_mode = {
.read = ath11k_dbg_sta_read_aggr_mode,
.write = ath11k_dbg_sta_write_aggr_mode,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t
ath11k_write_htt_peer_stats_reset(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
struct htt_ext_stats_cfg_params cfg_params = { 0 };
int ret;
u8 type;
ret = kstrtou8_from_user(user_buf, count, 0, &type);
if (ret)
return ret;
if (!type)
return ret;
mutex_lock(&ar->conf_mutex);
cfg_params.cfg0 = HTT_STAT_PEER_INFO_MAC_ADDR;
cfg_params.cfg0 |= FIELD_PREP(GENMASK(15, 1),
HTT_PEER_STATS_REQ_MODE_FLUSH_TQM);
cfg_params.cfg1 = HTT_STAT_DEFAULT_PEER_REQ_TYPE;
cfg_params.cfg2 |= FIELD_PREP(GENMASK(7, 0), sta->addr[0]);
cfg_params.cfg2 |= FIELD_PREP(GENMASK(15, 8), sta->addr[1]);
cfg_params.cfg2 |= FIELD_PREP(GENMASK(23, 16), sta->addr[2]);
cfg_params.cfg2 |= FIELD_PREP(GENMASK(31, 24), sta->addr[3]);
cfg_params.cfg3 |= FIELD_PREP(GENMASK(7, 0), sta->addr[4]);
cfg_params.cfg3 |= FIELD_PREP(GENMASK(15, 8), sta->addr[5]);
cfg_params.cfg3 |= ATH11K_HTT_PEER_STATS_RESET;
ret = ath11k_dp_tx_htt_h2t_ext_stats_req(ar,
ATH11K_DBG_HTT_EXT_STATS_PEER_INFO,
&cfg_params,
0ULL);
if (ret) {
ath11k_warn(ar->ab, "failed to send htt peer stats request: %d\n", ret);
mutex_unlock(&ar->conf_mutex);
return ret;
}
mutex_unlock(&ar->conf_mutex);
ret = count;
return ret;
}
static const struct file_operations fops_htt_peer_stats_reset = {
.write = ath11k_write_htt_peer_stats_reset,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir)
{
@ -550,4 +837,14 @@ void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
debugfs_create_file("peer_pktlog", 0644, dir, sta,
&fops_peer_pktlog);
debugfs_create_file("aggr_mode", 0644, dir, sta, &fops_aggr_mode);
debugfs_create_file("addba", 0200, dir, sta, &fops_addba);
debugfs_create_file("addba_resp", 0200, dir, sta, &fops_addba_resp);
debugfs_create_file("delba", 0200, dir, sta, &fops_delba);
if (test_bit(WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET,
ar->ab->wmi_ab.svc_map))
debugfs_create_file("htt_peer_stats_reset", 0600, dir, sta,
&fops_htt_peer_stats_reset);
}

View File

@ -880,6 +880,8 @@ int ath11k_dp_alloc(struct ath11k_base *ab)
INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
spin_lock_init(&dp->reo_cmd_lock);
dp->reo_cmd_cache_flush_count = 0;
ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
if (ret) {
ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
@ -909,8 +911,10 @@ int ath11k_dp_alloc(struct ath11k_base *ab)
dp->tx_ring[i].tx_status_head = 0;
dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
if (!dp->tx_ring[i].tx_status)
if (!dp->tx_ring[i].tx_status) {
ret = -ENOMEM;
goto fail_cmn_srng_cleanup;
}
}
for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)

View File

@ -36,6 +36,7 @@ struct dp_rx_tid {
struct ath11k_base *ab;
};
#define DP_REO_DESC_FREE_THRESHOLD 64
#define DP_REO_DESC_FREE_TIMEOUT_MS 1000
struct dp_reo_cache_flush_elem {
@ -169,8 +170,8 @@ struct ath11k_pdev_dp {
#define DP_WBM_RELEASE_RING_SIZE 64
#define DP_TCL_DATA_RING_SIZE 512
#define DP_TX_COMP_RING_SIZE 8192
#define DP_TX_IDR_SIZE (DP_TX_COMP_RING_SIZE << 1)
#define DP_TX_COMP_RING_SIZE 32768
#define DP_TX_IDR_SIZE DP_TX_COMP_RING_SIZE
#define DP_TCL_CMD_RING_SIZE 32
#define DP_TCL_STATUS_RING_SIZE 32
#define DP_REO_DST_RING_MAX 4
@ -222,7 +223,13 @@ struct ath11k_dp {
struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
struct list_head reo_cmd_list;
struct list_head reo_cmd_cache_flush_list;
/* protects access to reo_cmd_list and reo_cmd_cache_flush_list */
u32 reo_cmd_cache_flush_count;
/**
* protects access to below fields,
* - reo_cmd_list
* - reo_cmd_cache_flush_list
* - reo_cmd_cache_flush_count
*/
spinlock_t reo_cmd_lock;
};

View File

@ -252,7 +252,7 @@ static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc *rx_desc)
tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG,
__le32_to_cpu(rx_desc->mpdu_start_tag));
return tlv_tag == HAL_RX_MPDU_START ? true : false;
return tlv_tag == HAL_RX_MPDU_START;
}
static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc)
@ -565,6 +565,7 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
list_for_each_entry_safe(cmd_cache, tmp_cache,
&dp->reo_cmd_cache_flush_list, list) {
list_del(&cmd_cache->list);
dp->reo_cmd_cache_flush_count--;
dma_unmap_single(ab->dev, cmd_cache->data.paddr,
cmd_cache->data.size, DMA_BIDIRECTIONAL);
kfree(cmd_cache->data.vaddr);
@ -651,15 +652,18 @@ static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
spin_lock_bh(&dp->reo_cmd_lock);
list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
dp->reo_cmd_cache_flush_count++;
spin_unlock_bh(&dp->reo_cmd_lock);
/* Flush and invalidate aged REO desc from HW cache */
spin_lock_bh(&dp->reo_cmd_lock);
list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
list) {
if (time_after(jiffies, elem->ts +
if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
time_after(jiffies, elem->ts +
msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
list_del(&elem->list);
dp->reo_cmd_cache_flush_count--;
spin_unlock_bh(&dp->reo_cmd_lock);
ath11k_dp_reo_cache_flush(ab, &elem->data);
@ -892,7 +896,7 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
else
hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_KERNEL);
vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
if (!vaddr) {
spin_unlock_bh(&ab->base_lock);
return -ENOMEM;
@ -1491,7 +1495,8 @@ static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
return;
}
trace_ath11k_htt_pktlog(ar, data->payload, hdr->size);
trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
ar->ab->pktlog_defs_checksum);
}
static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
@ -2265,6 +2270,7 @@ static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
struct ieee80211_hdr *hdr;
struct sk_buff *last_buf;
u8 l3_pad_bytes;
u8 *hdr_status;
u16 msdu_len;
int ret;
@ -2293,8 +2299,13 @@ static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
skb_pull(msdu, HAL_RX_DESC_SIZE);
} else if (!rxcb->is_continuation) {
if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) {
hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc);
ret = -EINVAL;
ath11k_warn(ar->ab, "invalid msdu len %u\n", msdu_len);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
sizeof(struct ieee80211_hdr));
ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
sizeof(struct hal_rx_desc));
goto free_out;
}
skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len);
@ -2402,12 +2413,12 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
try_again:
while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc;
struct hal_reo_dest_ring desc = *(struct hal_reo_dest_ring *)rx_desc;
enum hal_reo_dest_ring_push_reason push_reason;
u32 cookie;
cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
desc->buf_addr_info.info1);
desc.buf_addr_info.info1);
buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
cookie);
mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
@ -2435,7 +2446,7 @@ try_again:
total_msdu_reaped++;
push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
desc->info0);
desc.info0);
if (push_reason !=
HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
dev_kfree_skb_any(msdu);
@ -2443,15 +2454,15 @@ try_again:
continue;
}
rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
rxcb->is_first_msdu = !!(desc.rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
rxcb->is_last_msdu = !!(desc.rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
rxcb->is_continuation = !!(desc.rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
rxcb->mac_id = mac_id;
rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
desc->info0);
desc.info0);
__skb_queue_tail(&msdu_list, msdu);
@ -2960,8 +2971,8 @@ static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer
return 0;
mic_fail:
(ATH11K_SKB_RXCB(msdu))->is_first_msdu = 1;
(ATH11K_SKB_RXCB(msdu))->is_last_msdu = 1;
(ATH11K_SKB_RXCB(msdu))->is_first_msdu = true;
(ATH11K_SKB_RXCB(msdu))->is_last_msdu = true;
rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
@ -3389,6 +3400,7 @@ ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool
struct sk_buff *msdu;
struct ath11k_skb_rxcb *rxcb;
struct hal_rx_desc *rx_desc;
u8 *hdr_status;
u16 msdu_len;
spin_lock_bh(&rx_ring->idr_lock);
@ -3426,6 +3438,17 @@ ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool
rx_desc = (struct hal_rx_desc *)msdu->data;
msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) {
hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc);
ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
sizeof(struct ieee80211_hdr));
ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
sizeof(struct hal_rx_desc));
dev_kfree_skb_any(msdu);
goto exit;
}
skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len);
if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) {

View File

@ -9,14 +9,14 @@
#include "hw.h"
#include "peer.h"
/* NOTE: Any of the mapped ring id value must not exceed DP_TCL_NUM_RING_MAX */
static const u8
ath11k_txq_tcl_ring_map[ATH11K_HW_MAX_QUEUES] = { 0x0, 0x1, 0x2, 0x2 };
static enum hal_tcl_encap_type
ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb)
{
/* TODO: Determine encap type based on vif_type and configuration */
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
if (tx_info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP)
return HAL_TCL_ENCAP_TYPE_ETHERNET;
return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
}
@ -40,8 +40,11 @@ static void ath11k_dp_tx_encap_nwifi(struct sk_buff *skb)
static u8 ath11k_dp_tx_get_tid(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
struct ath11k_skb_cb *cb = ATH11K_SKB_CB(skb);
if (!ieee80211_is_data_qos(hdr->frame_control))
if (cb->flags & ATH11K_SKB_HW_80211_ENCAP)
return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
else if (!ieee80211_is_data_qos(hdr->frame_control))
return HAL_DESC_REO_NON_QOS_TID;
else
return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
@ -84,15 +87,31 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
u8 pool_id;
u8 hal_ring_id;
int ret;
u8 ring_selector = 0, ring_map = 0;
bool tcl_ring_retry;
if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
return -ESHUTDOWN;
if (!ieee80211_is_data(hdr->frame_control))
if (!(info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) &&
!ieee80211_is_data(hdr->frame_control))
return -ENOTSUPP;
pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
ti.ring_id = ath11k_txq_tcl_ring_map[pool_id];
/* Let the default ring selection be based on a round robin
* fashion where one of the 3 tcl rings are selected based on
* the tcl_ring_selector counter. In case that ring
* is full/busy, we resort to other available rings.
* If all rings are full, we drop the packet.
* //TODO Add throttling logic when all rings are full
*/
ring_selector = atomic_inc_return(&ab->tcl_ring_selector);
tcl_ring_sel:
tcl_ring_retry = false;
ti.ring_id = ring_selector % DP_TCL_NUM_RING_MAX;
ring_map |= BIT(ti.ring_id);
tx_ring = &dp->tx_ring[ti.ring_id];
@ -101,8 +120,14 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
DP_TX_IDR_SIZE - 1, GFP_ATOMIC);
spin_unlock_bh(&tx_ring->tx_idr_lock);
if (ret < 0)
return -ENOSPC;
if (ret < 0) {
if (ring_map == (BIT(DP_TCL_NUM_RING_MAX) - 1))
return -ENOSPC;
/* Check if the next ring is available */
ring_selector++;
goto tcl_ring_sel;
}
ti.desc_id = FIELD_PREP(DP_TX_DESC_ID_MAC_ID, ar->pdev_idx) |
FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, ret) |
@ -149,7 +174,10 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
* skb_checksum_help() is needed
*/
case HAL_TCL_ENCAP_TYPE_ETHERNET:
/* no need to encap */
break;
case HAL_TCL_ENCAP_TYPE_802_3:
default:
/* TODO: Take care of other encap modes as well */
ret = -EINVAL;
goto fail_remove_idr;
@ -178,11 +206,21 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
if (!hal_tcl_desc) {
/* NOTE: It is highly unlikely we'll be running out of tcl_ring
* desc because the desc is directly enqueued onto hw queue.
* So add tx packet throttling logic in future if required.
*/
ath11k_hal_srng_access_end(ab, tcl_ring);
spin_unlock_bh(&tcl_ring->lock);
ret = -ENOMEM;
/* Checking for available tcl descritors in another ring in
* case of failure due to full tcl ring now, is better than
* checking this ring earlier for each pkt tx.
* Restart ring selection if some rings are not checked yet.
*/
if (ring_map != (BIT(DP_TCL_NUM_RING_MAX) - 1)) {
tcl_ring_retry = true;
ring_selector++;
}
goto fail_unmap_dma;
}
@ -206,6 +244,9 @@ fail_remove_idr:
FIELD_GET(DP_TX_DESC_ID_MSDU_ID, ti.desc_id));
spin_unlock_bh(&tx_ring->tx_idr_lock);
if (tcl_ring_retry)
goto tcl_ring_sel;
return ret;
}
@ -543,8 +584,12 @@ int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
cmd_num = ath11k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
/* cmd_num should start from 1, during failure return the error code */
if (cmd_num < 0)
return cmd_num;
/* reo cmd ring descriptors has cmd_num starting from 1 */
if (cmd_num <= 0)
if (cmd_num == 0)
return -EINVAL;
if (!cb)

View File

@ -599,7 +599,7 @@ struct hal_srng {
/* Interrupt mitigation - timer threshold in us */
#define HAL_SRNG_INT_TIMER_THRESHOLD_TX 1000
#define HAL_SRNG_INT_TIMER_THRESHOLD_RX 500
#define HAL_SRNG_INT_TIMER_THRESHOLD_OTHER 1000
#define HAL_SRNG_INT_TIMER_THRESHOLD_OTHER 256
/* HW SRNG configuration table */
struct hal_srng_config {

View File

@ -477,7 +477,7 @@ enum hal_tlv_tag {
struct hal_tlv_hdr {
u32 tl;
u8 value[0];
u8 value[];
} __packed;
#define RX_MPDU_DESC_INFO0_MSDU_COUNT GENMASK(7, 0)
@ -1972,7 +1972,7 @@ struct hal_rx_reo_queue {
u32 processed_total_bytes;
u32 info5;
u32 rsvd[3];
struct hal_rx_reo_queue_ext ext_desc[0];
struct hal_rx_reo_queue_ext ext_desc[];
} __packed;
/* hal_rx_reo_queue

View File

@ -23,7 +23,7 @@ struct hal_rx_wbm_rel_info {
struct hal_rx_mon_status_tlv_hdr {
u32 hdr;
u8 value[0];
u8 value[];
};
enum hal_rx_su_mu_coding {

View File

@ -111,7 +111,7 @@ struct ath11k_hw_params {
struct ath11k_fw_ie {
__le32 id;
__le32 len;
u8 data[0];
u8 data[];
};
enum ath11k_bd_ie_board_type {

View File

@ -33,6 +33,12 @@
.max_power = 30, \
}
/* frame mode values are mapped as per enum ath11k_hw_txrx_mode */
static unsigned int ath11k_frame_mode = ATH11K_HW_TXRX_NATIVE_WIFI;
module_param_named(frame_mode, ath11k_frame_mode, uint, 0644);
MODULE_PARM_DESC(frame_mode,
"Datapath frame mode (0: raw, 1: native wifi (default), 2: ethernet)");
static const struct ieee80211_channel ath11k_2ghz_channels[] = {
CHAN2G(1, 2412, 0),
CHAN2G(2, 2417, 0),
@ -1142,6 +1148,10 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK;
arg->tx_mcs_set |= IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11;
if ((arg->tx_mcs_set & IEEE80211_VHT_MCS_NOT_SUPPORTED) ==
IEEE80211_VHT_MCS_NOT_SUPPORTED)
arg->peer_vht_caps &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
/* TODO: Check */
arg->tx_max_mcs_nss = 0xFF;
@ -3682,10 +3692,10 @@ static int __ath11k_set_antenna(struct ath11k *ar, u32 tx_ant, u32 rx_ant)
int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
{
struct sk_buff *msdu = skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
struct ath11k *ar = ctx;
struct ath11k_base *ab = ar->ab;
struct sk_buff *msdu = skb;
struct ieee80211_tx_info *info;
spin_lock_bh(&ar->txmgmt_idr_lock);
idr_remove(&ar->txmgmt_idr, buf_id);
@ -3725,6 +3735,7 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
{
struct ath11k_base *ab = ar->ab;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info;
dma_addr_t paddr;
int buf_id;
int ret;
@ -3736,11 +3747,14 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
if (buf_id < 0)
return -ENOSPC;
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
ieee80211_has_protected(hdr->frame_control)) {
skb_put(skb, IEEE80211_CCMP_MIC_LEN);
info = IEEE80211_SKB_CB(skb);
if (!(info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP)) {
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
ieee80211_has_protected(hdr->frame_control)) {
skb_put(skb, IEEE80211_CCMP_MIC_LEN);
}
}
paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
@ -3789,15 +3803,30 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) {
info = IEEE80211_SKB_CB(skb);
arvif = ath11k_vif_to_arvif(info->control.vif);
ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb);
if (ret) {
ath11k_warn(ar->ab, "failed to transmit management frame %d\n",
ret);
if (!info->control.vif) {
ath11k_warn(ar->ab, "no vif found for mgmt frame, flags 0x%x\n",
info->control.flags);
ieee80211_free_txskb(ar->hw, skb);
continue;
}
arvif = ath11k_vif_to_arvif(info->control.vif);
if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) &&
arvif->is_started) {
ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb);
if (ret) {
ath11k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
arvif->vdev_id, ret);
ieee80211_free_txskb(ar->hw, skb);
} else {
atomic_inc(&ar->num_pending_mgmt_tx);
}
} else {
atomic_inc(&ar->num_pending_mgmt_tx);
ath11k_warn(ar->ab,
"dropping mgmt frame for vdev %d, flags 0x%x is_started %d\n",
arvif->vdev_id, info->control.flags,
arvif->is_started);
ieee80211_free_txskb(ar->hw, skb);
}
}
}
@ -3837,6 +3866,7 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
struct ath11k *ar = hw->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
@ -3845,7 +3875,9 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
bool is_prb_rsp;
int ret;
if (ieee80211_is_mgmt(hdr->frame_control)) {
if (info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) {
skb_cb->flags |= ATH11K_SKB_HW_80211_ENCAP;
} else if (ieee80211_is_mgmt(hdr->frame_control)) {
is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
ret = ath11k_mac_mgmt_tx(ar, skb, is_prb_rsp);
if (ret) {
@ -3877,8 +3909,10 @@ static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable)
struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 ring_id;
if (enable)
if (enable) {
tlv_filter = ath11k_mac_mon_status_filter_default;
tlv_filter.rx_filter = ath11k_debug_rx_filter(ar);
}
ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id;
@ -4124,6 +4158,7 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
struct vdev_create_params vdev_param = {0};
struct peer_create_params peer_param;
u32 param_id, param_value;
int hw_encap = 0;
u16 nss;
int i;
int ret;
@ -4208,6 +4243,8 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
}
ar->num_created_vdevs++;
ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM created, vdev_id %d\n",
vif->addr, arvif->vdev_id);
ar->allocated_vdev_map |= 1LL << arvif->vdev_id;
ab->free_vdev_map &= ~(1LL << arvif->vdev_id);
@ -4216,7 +4253,22 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->data_lock);
param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
if (ath11k_frame_mode == ATH11K_HW_TXRX_ETHERNET)
switch (vif->type) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_AP:
hw_encap = 1;
break;
default:
break;
}
if (ieee80211_set_hw_80211_encap(vif, hw_encap))
param_value = ATH11K_HW_TXRX_ETHERNET;
else
param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, param_value);
if (ret) {
@ -4378,6 +4430,8 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
ar->num_created_vdevs--;
ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n",
vif->addr, arvif->vdev_id);
ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
ab->free_vdev_map |= 1LL << (arvif->vdev_id);
@ -4643,6 +4697,8 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
}
ar->num_started_vdevs++;
ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM started, vdev_id %d\n",
arvif->vif->addr, arvif->vdev_id);
/* Enable CAC Flag in the driver by checking the channel DFS cac time,
* i.e dfs_cac_ms value which will be valid only for radar channels
@ -4701,6 +4757,8 @@ static int ath11k_mac_vdev_stop(struct ath11k_vif *arvif)
WARN_ON(ar->num_started_vdevs == 0);
ar->num_started_vdevs--;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
arvif->vif->addr, arvif->vdev_id);
if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
clear_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
@ -5891,6 +5949,9 @@ int ath11k_mac_register(struct ath11k_base *ab)
int i;
int ret;
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
return 0;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;

View File

@ -17,7 +17,26 @@ struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
list_for_each_entry(peer, &ab->peers, list) {
if (peer->vdev_id != vdev_id)
continue;
if (memcmp(peer->addr, addr, ETH_ALEN))
if (!ether_addr_equal(peer->addr, addr))
continue;
return peer;
}
return NULL;
}
static struct ath11k_peer *ath11k_peer_find_by_pdev_idx(struct ath11k_base *ab,
u8 pdev_idx, const u8 *addr)
{
struct ath11k_peer *peer;
lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list) {
if (peer->pdev_idx != pdev_idx)
continue;
if (!ether_addr_equal(peer->addr, addr))
continue;
return peer;
@ -34,7 +53,7 @@ struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list) {
if (memcmp(peer->addr, addr, ETH_ALEN))
if (!ether_addr_equal(peer->addr, addr))
continue;
return peer;
@ -200,6 +219,17 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
return -ENOBUFS;
}
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find_by_pdev_idx(ar->ab, ar->pdev_idx, param->peer_addr);
if (peer) {
spin_unlock_bh(&ar->ab->base_lock);
ath11k_info(ar->ab,
"ignoring the peer %pM creation on same pdev idx %d\n",
param->peer_addr, ar->pdev_idx);
return -EINVAL;
}
spin_unlock_bh(&ar->ab->base_lock);
ret = ath11k_wmi_send_peer_create_cmd(ar, param);
if (ret) {
ath11k_warn(ar->ab,
@ -225,6 +255,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
return -ENOENT;
}
peer->pdev_idx = ar->pdev_idx;
peer->sta = sta;
arvif->ast_hash = peer->ast_hash;

View File

@ -13,6 +13,7 @@ struct ath11k_peer {
u8 addr[ETH_ALEN];
int peer_id;
u16 ast_hash;
u8 pdev_idx;
/* protected by ab->data_lock */
struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];

View File

@ -21,14 +21,16 @@ static inline void trace_ ## name(proto) {}
#define TRACE_SYSTEM ath11k
TRACE_EVENT(ath11k_htt_pktlog,
TP_PROTO(struct ath11k *ar, const void *buf, u16 buf_len),
TP_PROTO(struct ath11k *ar, const void *buf, u16 buf_len,
u32 pktlog_checksum),
TP_ARGS(ar, buf, buf_len),
TP_ARGS(ar, buf, buf_len, pktlog_checksum),
TP_STRUCT__entry(
__string(device, dev_name(ar->ab->dev))
__string(driver, dev_driver_string(ar->ab->dev))
__field(u16, buf_len)
__field(u32, pktlog_checksum)
__dynamic_array(u8, pktlog, buf_len)
),
@ -36,14 +38,16 @@ TRACE_EVENT(ath11k_htt_pktlog,
__assign_str(device, dev_name(ar->ab->dev));
__assign_str(driver, dev_driver_string(ar->ab->dev));
__entry->buf_len = buf_len;
__entry->pktlog_checksum = pktlog_checksum;
memcpy(__get_dynamic_array(pktlog), buf, buf_len);
),
TP_printk(
"%s %s size %hu",
"%s %s size %hu pktlog_checksum %d",
__get_str(driver),
__get_str(device),
__entry->buf_len
__entry->buf_len,
__entry->pktlog_checksum
)
);

View File

@ -87,8 +87,8 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
= { .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT]
= { .min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
[WMI_TAG_READY_EVENT]
= {.min_len = sizeof(struct wmi_ready_event) },
[WMI_TAG_READY_EVENT] = {
.min_len = sizeof(struct wmi_ready_event_min) },
[WMI_TAG_SERVICE_AVAILABLE_EVENT]
= {.min_len = sizeof(struct wmi_service_available_event) },
[WMI_TAG_PEER_ASSOC_CONF_EVENT]
@ -2368,6 +2368,146 @@ int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar,
return ret;
}
int ath11k_wmi_delba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 initiator, u32 reason)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_delba_send_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_delba_send_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DELBA_SEND_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
ether_addr_copy(cmd->peer_macaddr.addr, mac);
cmd->tid = tid;
cmd->initiator = initiator;
cmd->reasoncode = reason;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
vdev_id, mac, tid, initiator, reason);
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_DELBA_SEND_CMDID cmd\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath11k_wmi_addba_set_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 status)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_addba_setresponse_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
cmd->tlv_header =
FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_SETRESPONSE_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
ether_addr_copy(cmd->peer_macaddr.addr, mac);
cmd->tid = tid;
cmd->statuscode = status;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
vdev_id, mac, tid, status);
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath11k_wmi_addba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 buf_size)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_addba_send_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_addba_send_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_SEND_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
ether_addr_copy(cmd->peer_macaddr.addr, mac);
cmd->tid = tid;
cmd->buffersize = buf_size;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
vdev_id, mac, tid, buf_size);
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_ADDBA_SEND_CMDID cmd\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath11k_wmi_addba_clear_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_addba_clear_resp_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
cmd->tlv_header =
FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_CLEAR_RESP_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
ether_addr_copy(cmd->peer_macaddr.addr, mac);
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
vdev_id, mac);
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k *ar, u8 *addr, u8 enable)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
@ -2779,7 +2919,7 @@ int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k *ar, u32 vdev_id,
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
if (ret) {
ath11k_warn(ab, "Failed to send WMI_TWT_DIeABLE_CMDID");
ath11k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
dev_kfree_skb(skb);
}
return ret;
@ -3105,7 +3245,7 @@ int ath11k_wmi_cmd_init(struct ath11k_base *ab)
config.beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
config.rx_batchmode = TARGET_RX_BATCHMODE;
config.peer_map_unmap_v2_support = 1;
config.twt_ap_pdev_count = 2;
config.twt_ap_pdev_count = ab->num_radios;
config.twt_ap_sta_count = 1000;
memcpy(&wmi_sc->wlan_resource_config, &config, sizeof(config));
@ -3740,8 +3880,9 @@ static int wmi_process_mgmt_tx_comp(struct ath11k *ar, u32 desc_id,
ieee80211_tx_status_irqsafe(ar->hw, msdu);
WARN_ON_ONCE(atomic_read(&ar->num_pending_mgmt_tx) == 0);
atomic_dec(&ar->num_pending_mgmt_tx);
/* WARN when we received this event without doing any mgmt tx */
if (atomic_dec_if_positive(&ar->num_pending_mgmt_tx) < 0)
WARN_ON_ONCE(1);
return 0;
}
@ -4851,7 +4992,7 @@ static int ath11k_wmi_tlv_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_tlv_rdy_parse *rdy_parse = data;
struct wmi_ready_event *fixed_param;
struct wmi_ready_event fixed_param;
struct wmi_mac_addr *addr_list;
struct ath11k_pdev *pdev;
u32 num_mac_addr;
@ -4859,11 +5000,16 @@ static int ath11k_wmi_tlv_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len,
switch (tag) {
case WMI_TAG_READY_EVENT:
fixed_param = (struct wmi_ready_event *)ptr;
ab->wlan_init_status = fixed_param->status;
rdy_parse->num_extra_mac_addr = fixed_param->num_extra_mac_addr;
memset(&fixed_param, 0, sizeof(fixed_param));
memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
min_t(u16, sizeof(fixed_param), len));
ab->wlan_init_status = fixed_param.ready_event_min.status;
rdy_parse->num_extra_mac_addr =
fixed_param.ready_event_min.num_extra_mac_addr;
ether_addr_copy(ab->mac_addr, fixed_param->mac_addr.addr);
ether_addr_copy(ab->mac_addr,
fixed_param.ready_event_min.mac_addr.addr);
ab->pktlog_defs_checksum = fixed_param.pktlog_defs_checksum;
ab->wmi_ready = true;
break;
case WMI_TAG_ARRAY_FIXED_STRUCT:

View File

@ -39,7 +39,7 @@ struct wmi_cmd_hdr {
struct wmi_tlv {
u32 header;
u8 value[0];
u8 value[];
} __packed;
#define WMI_TLV_LEN GENMASK(15, 0)
@ -1976,6 +1976,43 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI = 174,
WMI_TLV_SERVICE_NAN_DISABLE_SUPPORT = 175,
WMI_TLV_SERVICE_HTT_H2T_NO_HTC_HDR_LEN_IN_MSG_LEN = 176,
WMI_TLV_SERVICE_COEX_SUPPORT_UNEQUAL_ISOLATION = 177,
WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT = 178,
WMI_TLV_SERVICE_SUPPORT_EXTEND_ADDRESS = 179,
WMI_TLV_SERVICE_BEACON_RECEPTION_STATS = 180,
WMI_TLV_SERVICE_FETCH_TX_PN = 181,
WMI_TLV_SERVICE_PEER_UNMAP_RESPONSE_SUPPORT = 182,
WMI_TLV_SERVICE_TX_PER_PEER_AMPDU_SIZE = 183,
WMI_TLV_SERVICE_BSS_COLOR_SWITCH_COUNT = 184,
WMI_TLV_SERVICE_HTT_PEER_STATS_SUPPORT = 185,
WMI_TLV_SERVICE_UL_RU26_ALLOWED = 186,
WMI_TLV_SERVICE_GET_MWS_COEX_STATE = 187,
WMI_TLV_SERVICE_GET_MWS_DPWB_STATE = 188,
WMI_TLV_SERVICE_GET_MWS_TDM_STATE = 189,
WMI_TLV_SERVICE_GET_MWS_IDRX_STATE = 190,
WMI_TLV_SERVICE_GET_MWS_ANTENNA_SHARING_STATE = 191,
WMI_TLV_SERVICE_ENHANCED_TPC_CONFIG_EVENT = 192,
WMI_TLV_SERVICE_WLM_STATS_REQUEST = 193,
WMI_TLV_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT = 194,
WMI_TLV_SERVICE_WPA3_FT_SAE_SUPPORT = 195,
WMI_TLV_SERVICE_WPA3_FT_SUITE_B_SUPPORT = 196,
WMI_TLV_SERVICE_VOW_ENABLE = 197,
WMI_TLV_SERVICE_CFR_CAPTURE_IND_EVT_TYPE_1 = 198,
WMI_TLV_SERVICE_BROADCAST_TWT = 199,
WMI_TLV_SERVICE_RAP_DETECTION_SUPPORT = 200,
WMI_TLV_SERVICE_PS_TDCC = 201,
WMI_TLV_SERVICE_THREE_WAY_COEX_CONFIG_LEGACY = 202,
WMI_TLV_SERVICE_THREE_WAY_COEX_CONFIG_OVERRIDE = 203,
WMI_TLV_SERVICE_TX_PWR_PER_PEER = 204,
WMI_TLV_SERVICE_STA_PLUS_STA_SUPPORT = 205,
WMI_TLV_SERVICE_WPA3_FT_FILS = 206,
WMI_TLV_SERVICE_ADAPTIVE_11R_ROAM = 207,
WMI_TLV_SERVICE_CHAN_RF_CHARACTERIZATION_INFO = 208,
WMI_TLV_SERVICE_FW_IFACE_COMBINATION_SUPPORT = 209,
WMI_TLV_SERVICE_TX_COMPL_TSF64 = 210,
WMI_TLV_SERVICE_DSM_ROAM_FILTER = 211,
WMI_TLV_SERVICE_PACKET_CAPTURE_SUPPORT = 212,
WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213,
WMI_MAX_EXT_SERVICE
@ -2345,7 +2382,7 @@ struct wmi_mac_addr {
} __packed;
} __packed;
struct wmi_ready_event {
struct wmi_ready_event_min {
struct wmi_abi_version fw_abi_vers;
struct wmi_mac_addr mac_addr;
u32 status;
@ -2355,6 +2392,12 @@ struct wmi_ready_event {
u32 num_extra_peers;
} __packed;
struct wmi_ready_event {
struct wmi_ready_event_min ready_event_min;
u32 max_ast_index;
u32 pktlog_defs_checksum;
} __packed;
struct wmi_service_available_event {
u32 wmi_service_segment_offset;
u32 wmi_service_segment_bitmap[WMI_SERVICE_SEGMENT_BM_SIZE32];
@ -3649,6 +3692,37 @@ struct wmi_therm_throt_level_config_info {
u32 prio;
} __packed;
struct wmi_delba_send_cmd {
u32 tlv_header;
u32 vdev_id;
struct wmi_mac_addr peer_macaddr;
u32 tid;
u32 initiator;
u32 reasoncode;
} __packed;
struct wmi_addba_setresponse_cmd {
u32 tlv_header;
u32 vdev_id;
struct wmi_mac_addr peer_macaddr;
u32 tid;
u32 statuscode;
} __packed;
struct wmi_addba_send_cmd {
u32 tlv_header;
u32 vdev_id;
struct wmi_mac_addr peer_macaddr;
u32 tid;
u32 buffersize;
} __packed;
struct wmi_addba_clear_resp_cmd {
u32 tlv_header;
u32 vdev_id;
struct wmi_mac_addr peer_macaddr;
} __packed;
struct wmi_pdev_pktlog_filter_info {
u32 tlv_header;
struct wmi_mac_addr peer_macaddr;
@ -4531,6 +4605,9 @@ enum wmi_sta_ps_param_rx_wake_policy {
WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1,
};
/* Do not change existing values! Used by ath11k_frame_mode parameter
* module parameter.
*/
enum ath11k_hw_txrx_mode {
ATH11K_HW_TXRX_RAW = 0,
ATH11K_HW_TXRX_NATIVE_WIFI = 1,
@ -4822,6 +4899,13 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
struct scan_chan_list_params *chan_list);
int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar,
u32 pdev_id);
int ath11k_wmi_addba_clear_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac);
int ath11k_wmi_addba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 buf_size);
int ath11k_wmi_addba_set_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 status);
int ath11k_wmi_delba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 initiator, u32 reason);
int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar,
u32 vdev_id, u32 bcn_ctrl_op);
int

View File

@ -501,7 +501,7 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
if (as->ofdm_errors > ofdm_high || as->cck_errors > cck_high) {
/* too many PHY errors - we have to raise immunity */
bool ofdm_flag = as->ofdm_errors > ofdm_high ? true : false;
bool ofdm_flag = as->ofdm_errors > ofdm_high;
ath5k_ani_raise_immunity(ah, as, ofdm_flag);
ath5k_ani_period_restart(as);

View File

@ -160,7 +160,7 @@ enum ath6kl_fw_capability {
struct ath6kl_fw_ie {
__le32 id;
__le32 len;
u8 data[0];
u8 data[];
};
enum ath6kl_hw_flags {
@ -406,7 +406,7 @@ struct ath6kl_mgmt_buff {
u32 id;
bool no_cck;
size_t len;
u8 buf[0];
u8 buf[];
};
struct ath6kl_sta {

View File

@ -30,7 +30,7 @@ struct ath6kl_fwlog_slot {
__le32 length;
/* max ATH6KL_FWLOG_PAYLOAD_SIZE bytes */
u8 payload[0];
u8 payload[];
};
#define ATH6KL_FWLOG_MAX_ENTRIES 20

View File

@ -199,7 +199,7 @@ struct hif_scatter_req {
u32 scat_q_depth;
struct hif_scatter_item scat_list[0];
struct hif_scatter_item scat_list[];
};
struct ath6kl_irq_proc_registers {

View File

@ -19,6 +19,8 @@
#include "ar9002_phy.h"
#define AR9285_CLCAL_REDO_THRESH 1
/* AGC & I/Q calibrations time limit, ms */
#define AR9002_CAL_MAX_TIME 30000
enum ar9002_cal_types {
ADC_GAIN_CAL = BIT(0),
@ -37,9 +39,8 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,
break;
case ADC_GAIN_CAL:
case ADC_DC_CAL:
/* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */
if (!((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
IS_CHAN_HT20(chan)))
/* Run even/odd ADCs calibrations for HT40 channels only */
if (IS_CHAN_HT40(chan))
supported = true;
break;
}
@ -105,6 +106,14 @@ static bool ar9002_hw_per_calibration(struct ath_hw *ah,
} else {
ar9002_hw_setup_calibration(ah, currCal);
}
} else if (time_after(jiffies, ah->cal_start_time +
msecs_to_jiffies(AR9002_CAL_MAX_TIME))) {
REG_CLR_BIT(ah, AR_PHY_TIMING_CTRL4(0),
AR_PHY_TIMING_CTRL4_DO_CAL);
ath_dbg(ath9k_hw_common(ah), CALIBRATE,
"calibration timeout\n");
currCal->calState = CAL_WAITING; /* Try later */
iscaldone = true;
}
} else if (!(caldata->CalValid & currCal->calData->calType)) {
ath9k_hw_reset_calibration(ah, currCal);
@ -664,8 +673,13 @@ static int ar9002_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
int ret;
nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF);
if (ah->caldata)
if (ah->caldata) {
nfcal_pending = test_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
if (longcal) /* Remember to not miss */
set_bit(LONGCAL_PENDING, &ah->caldata->cal_flags);
else if (test_bit(LONGCAL_PENDING, &ah->caldata->cal_flags))
longcal = true; /* Respin a previous one */
}
percal_pending = (currCal &&
(currCal->calState == CAL_RUNNING ||
@ -675,9 +689,24 @@ static int ar9002_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
if (!ar9002_hw_per_calibration(ah, chan, rxchainmask, currCal))
return 0;
ah->cal_list_curr = currCal = currCal->calNext;
if (currCal->calState == CAL_WAITING)
ath9k_hw_reset_calibration(ah, currCal);
/* Looking for next waiting calibration if any */
for (currCal = currCal->calNext; currCal != ah->cal_list_curr;
currCal = currCal->calNext) {
if (currCal->calState == CAL_WAITING)
break;
}
if (currCal->calState == CAL_WAITING) {
percal_pending = true;
ah->cal_list_curr = currCal;
} else {
percal_pending = false;
ah->cal_list_curr = ah->cal_list;
}
}
/* Do not start a next calibration if the longcal is in action */
if (percal_pending && !nfcal && !longcal) {
ath9k_hw_reset_calibration(ah, currCal);
return 0;
}
@ -701,6 +730,9 @@ static int ar9002_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
}
if (longcal) {
if (ah->caldata)
clear_bit(LONGCAL_PENDING,
&ah->caldata->cal_flags);
ath9k_hw_start_nfcal(ah, false);
/* Do periodic PAOffset Cal */
ar9002_hw_pa_cal(ah, false);
@ -858,9 +890,6 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
ath9k_hw_loadnf(ah, chan);
ath9k_hw_start_nfcal(ah, true);
if (ah->caldata)
set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
/* Enable IQ, ADC Gain and ADC DC offset CALs */

View File

@ -176,6 +176,7 @@ void ath9k_hw_reset_calibration(struct ath_hw *ah,
ath9k_hw_setup_calibration(ah, currCal);
ah->cal_start_time = jiffies;
currCal->calState = CAL_RUNNING;
for (i = 0; i < AR5416_MAX_CHAINS; i++) {
@ -209,14 +210,17 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
return true;
}
if (!(ah->supp_cals & currCal->calData->calType))
return true;
currCal = ah->cal_list;
do {
ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n",
currCal->calData->calType,
ah->curchan->chan->center_freq);
ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n",
currCal->calData->calType, ah->curchan->chan->center_freq);
ah->caldata->CalValid &= ~currCal->calData->calType;
currCal->calState = CAL_WAITING;
ah->caldata->CalValid &= ~currCal->calData->calType;
currCal->calState = CAL_WAITING;
currCal = currCal->calNext;
} while (currCal != ah->cal_list);
return false;
}

View File

@ -612,6 +612,11 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
hif_dev->remain_skb = nskb;
spin_unlock(&hif_dev->rx_lock);
} else {
if (pool_index == MAX_PKT_NUM_IN_TRANSFER) {
dev_err(&hif_dev->udev->dev,
"ath9k_htc: over RX MAX_PKT_NUM\n");
goto err;
}
nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
if (!nskb) {
dev_err(&hif_dev->udev->dev,
@ -638,9 +643,9 @@ err:
static void ath9k_hif_usb_rx_cb(struct urb *urb)
{
struct sk_buff *skb = (struct sk_buff *) urb->context;
struct hif_device_usb *hif_dev =
usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
struct hif_device_usb *hif_dev = rx_buf->hif_dev;
struct sk_buff *skb = rx_buf->skb;
int ret;
if (!skb)
@ -680,14 +685,15 @@ resubmit:
return;
free:
kfree_skb(skb);
kfree(rx_buf);
}
static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
{
struct sk_buff *skb = (struct sk_buff *) urb->context;
struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
struct hif_device_usb *hif_dev = rx_buf->hif_dev;
struct sk_buff *skb = rx_buf->skb;
struct sk_buff *nskb;
struct hif_device_usb *hif_dev =
usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
int ret;
if (!skb)
@ -745,6 +751,7 @@ resubmit:
return;
free:
kfree_skb(skb);
kfree(rx_buf);
urb->context = NULL;
}
@ -790,7 +797,7 @@ static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
init_usb_anchor(&hif_dev->mgmt_submitted);
for (i = 0; i < MAX_TX_URB_NUM; i++) {
tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL);
tx_buf = kzalloc(sizeof(*tx_buf), GFP_KERNEL);
if (!tx_buf)
goto err;
@ -827,8 +834,9 @@ static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
{
struct urb *urb = NULL;
struct rx_buf *rx_buf = NULL;
struct sk_buff *skb = NULL;
struct urb *urb = NULL;
int i, ret;
init_usb_anchor(&hif_dev->rx_submitted);
@ -836,6 +844,12 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
for (i = 0; i < MAX_RX_URB_NUM; i++) {
rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL);
if (!rx_buf) {
ret = -ENOMEM;
goto err_rxb;
}
/* Allocate URB */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (urb == NULL) {
@ -850,11 +864,14 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
goto err_skb;
}
rx_buf->hif_dev = hif_dev;
rx_buf->skb = skb;
usb_fill_bulk_urb(urb, hif_dev->udev,
usb_rcvbulkpipe(hif_dev->udev,
USB_WLAN_RX_PIPE),
skb->data, MAX_RX_BUF_SIZE,
ath9k_hif_usb_rx_cb, skb);
ath9k_hif_usb_rx_cb, rx_buf);
/* Anchor URB */
usb_anchor_urb(urb, &hif_dev->rx_submitted);
@ -880,6 +897,8 @@ err_submit:
err_skb:
usb_free_urb(urb);
err_urb:
kfree(rx_buf);
err_rxb:
ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
return ret;
}
@ -891,14 +910,21 @@ static void ath9k_hif_usb_dealloc_reg_in_urbs(struct hif_device_usb *hif_dev)
static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
{
struct urb *urb = NULL;
struct rx_buf *rx_buf = NULL;
struct sk_buff *skb = NULL;
struct urb *urb = NULL;
int i, ret;
init_usb_anchor(&hif_dev->reg_in_submitted);
for (i = 0; i < MAX_REG_IN_URB_NUM; i++) {
rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL);
if (!rx_buf) {
ret = -ENOMEM;
goto err_rxb;
}
/* Allocate URB */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (urb == NULL) {
@ -913,11 +939,14 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
goto err_skb;
}
rx_buf->hif_dev = hif_dev;
rx_buf->skb = skb;
usb_fill_int_urb(urb, hif_dev->udev,
usb_rcvintpipe(hif_dev->udev,
USB_REG_IN_PIPE),
skb->data, MAX_REG_IN_BUF_SIZE,
ath9k_hif_usb_reg_in_cb, skb, 1);
ath9k_hif_usb_reg_in_cb, rx_buf, 1);
/* Anchor URB */
usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
@ -943,6 +972,8 @@ err_submit:
err_skb:
usb_free_urb(urb);
err_urb:
kfree(rx_buf);
err_rxb:
ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
return ret;
}
@ -973,7 +1004,7 @@ err:
return -ENOMEM;
}
static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
{
usb_kill_anchored_urbs(&hif_dev->regout_submitted);
ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
@ -1341,8 +1372,9 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
if (hif_dev->flags & HIF_USB_READY) {
ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
ath9k_htc_hw_free(hif_dev->htc_handle);
ath9k_hif_usb_dev_deinit(hif_dev);
ath9k_destoy_wmi(hif_dev->htc_handle->drv_priv);
ath9k_htc_hw_free(hif_dev->htc_handle);
}
usb_set_intfdata(interface, NULL);

View File

@ -86,6 +86,11 @@ struct tx_buf {
struct list_head list;
};
struct rx_buf {
struct sk_buff *skb;
struct hif_device_usb *hif_dev;
};
#define HIF_USB_TX_STOP BIT(0)
#define HIF_USB_TX_FLUSH BIT(1)
@ -133,5 +138,6 @@ struct hif_device_usb {
int ath9k_hif_usb_init(void);
void ath9k_hif_usb_exit(void);
void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev);
#endif /* HTC_USB_H */

View File

@ -931,8 +931,9 @@ err_init:
int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
u16 devid, char *product, u32 drv_info)
{
struct ieee80211_hw *hw;
struct hif_device_usb *hif_dev;
struct ath9k_htc_priv *priv;
struct ieee80211_hw *hw;
int ret;
hw = ieee80211_alloc_hw(sizeof(struct ath9k_htc_priv), &ath9k_htc_ops);
@ -967,7 +968,10 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
return 0;
err_init:
ath9k_deinit_wmi(priv);
ath9k_stop_wmi(priv);
hif_dev = (struct hif_device_usb *)htc_handle->hif_dev;
ath9k_hif_usb_dealloc_urbs(hif_dev);
ath9k_destoy_wmi(priv);
err_free:
ieee80211_free_hw(hw);
return ret;
@ -982,7 +986,7 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug)
htc_handle->drv_priv->ah->ah_flags |= AH_UNPLUGGED;
ath9k_deinit_device(htc_handle->drv_priv);
ath9k_deinit_wmi(htc_handle->drv_priv);
ath9k_stop_wmi(htc_handle->drv_priv);
ieee80211_free_hw(htc_handle->drv_priv->hw);
}
}

View File

@ -999,9 +999,9 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
* which are not PHY_ERROR (short radar pulses have a length of 3)
*/
if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) {
ath_warn(common,
"Short RX data len, dropping (dlen: %d)\n",
rs_datalen);
ath_dbg(common, ANY,
"Short RX data len, dropping (dlen: %d)\n",
rs_datalen);
goto rx_next;
}

View File

@ -113,6 +113,9 @@ static void htc_process_conn_rsp(struct htc_target *target,
if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) {
epid = svc_rspmsg->endpoint_id;
if (epid < 0 || epid >= ENDPOINT_MAX)
return;
service_id = be16_to_cpu(svc_rspmsg->service_id);
max_msglen = be16_to_cpu(svc_rspmsg->max_msg_len);
endpoint = &target->endpoint[epid];
@ -170,7 +173,6 @@ static int htc_config_pipe_credits(struct htc_target *target)
time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
if (!time_left) {
dev_err(target->dev, "HTC credit config timeout\n");
kfree_skb(skb);
return -ETIMEDOUT;
}
@ -206,7 +208,6 @@ static int htc_setup_complete(struct htc_target *target)
time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
if (!time_left) {
dev_err(target->dev, "HTC start timeout\n");
kfree_skb(skb);
return -ETIMEDOUT;
}
@ -279,7 +280,6 @@ int htc_connect_service(struct htc_target *target,
if (!time_left) {
dev_err(target->dev, "Service connection timeout for: %d\n",
service_connreq->service_id);
kfree_skb(skb);
return -ETIMEDOUT;
}

View File

@ -427,6 +427,7 @@ enum ath9k_cal_flags {
TXIQCAL_DONE,
TXCLCAL_DONE,
SW_PKDET_DONE,
LONGCAL_PENDING,
};
struct ath9k_hw_cal_data {
@ -833,6 +834,7 @@ struct ath_hw {
/* Calibration */
u32 supp_cals;
unsigned long cal_start_time;
struct ath9k_cal_list iq_caldata;
struct ath9k_cal_list adcgain_caldata;
struct ath9k_cal_list adcdc_caldata;

View File

@ -112,14 +112,17 @@ struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
return wmi;
}
void ath9k_deinit_wmi(struct ath9k_htc_priv *priv)
void ath9k_stop_wmi(struct ath9k_htc_priv *priv)
{
struct wmi *wmi = priv->wmi;
mutex_lock(&wmi->op_mutex);
wmi->stopped = true;
mutex_unlock(&wmi->op_mutex);
}
void ath9k_destoy_wmi(struct ath9k_htc_priv *priv)
{
kfree(priv->wmi);
}
@ -336,7 +339,6 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n",
wmi_cmd_to_name(cmd_id));
mutex_unlock(&wmi->op_mutex);
kfree_skb(skb);
return -ETIMEDOUT;
}

View File

@ -179,7 +179,6 @@ struct wmi {
};
struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv);
void ath9k_deinit_wmi(struct ath9k_htc_priv *priv);
int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
enum htc_endpoint_id *wmi_ctrl_epid);
int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
@ -189,6 +188,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
void ath9k_wmi_event_tasklet(unsigned long data);
void ath9k_fatal_work(struct work_struct *work);
void ath9k_wmi_event_drain(struct ath9k_htc_priv *priv);
void ath9k_stop_wmi(struct ath9k_htc_priv *priv);
void ath9k_destoy_wmi(struct ath9k_htc_priv *priv);
#define WMI_CMD(_wmi_cmd) \
do { \

View File

@ -338,9 +338,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
if (SUPP(CARL9170FW_WLANTX_CAB)) {
if_comb_types |=
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_GO);
if_comb_types |= BIT(NL80211_IFTYPE_AP);
#ifdef CONFIG_MAC80211_MESH
if_comb_types |=

View File

@ -582,11 +582,10 @@ static int carl9170_init_interface(struct ar9170 *ar,
ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) &&
(vif->type != NL80211_IFTYPE_AP));
/* While the driver supports HW offload in a single
* P2P client configuration, it doesn't support HW
* offload in the favourit, concurrent P2P GO+CLIENT
* configuration. Hence, HW offload will always be
* disabled for P2P.
/* The driver used to have P2P GO+CLIENT support,
* but since this was dropped and we don't know if
* there are any gremlins lurking in the shadows,
* so best we keep HW offload disabled for P2P.
*/
ar->disable_offload |= vif->p2p;
@ -639,18 +638,6 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_STATION)
break;
/* P2P GO [master] use-case
* Because the P2P GO station is selected dynamically
* by all participating peers of a WIFI Direct network,
* the driver has be able to change the main interface
* operating mode on the fly.
*/
if (main_vif->p2p && vif->p2p &&
vif->type == NL80211_IFTYPE_AP) {
old_main = main_vif;
break;
}
err = -EBUSY;
rcu_read_unlock();

View File

@ -5507,7 +5507,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
core = (cmd & 0x3000) >> 12;
type = (cmd & 0x0F00) >> 8;
if (phy6or5x && updated[core] == 0) {
if (phy6or5x && !updated[core]) {
b43_nphy_update_tx_cal_ladder(dev, core);
updated[core] = true;
}

View File

@ -765,7 +765,7 @@ void b43_pio_rx(struct b43_pio_rxqueue *q)
bool stop;
while (1) {
stop = (pio_rx_frame(q) == 0);
stop = !pio_rx_frame(q);
if (stop)
break;
cond_resched();

View File

@ -558,6 +558,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
default:
b43legacywarn(dev->wl, "Unexpected value for chanstat (0x%X)\n",
chanstat);
goto drop;
}
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));

View File

@ -961,7 +961,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43364),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43364),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),

View File

@ -4449,6 +4449,11 @@ s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
mgmt_ie_len = &saved_ie->assoc_req_ie_len;
mgmt_ie_buf_len = sizeof(saved_ie->assoc_req_ie);
break;
case BRCMF_VNDR_IE_ASSOCRSP_FLAG:
mgmt_ie_buf = saved_ie->assoc_res_ie;
mgmt_ie_len = &saved_ie->assoc_res_ie_len;
mgmt_ie_buf_len = sizeof(saved_ie->assoc_res_ie);
break;
default:
err = -EPERM;
bphy_err(drvr, "not suitable type\n");
@ -4595,6 +4600,15 @@ brcmf_config_ap_mgmt_ie(struct brcmf_cfg80211_vif *vif,
else
brcmf_dbg(TRACE, "Applied Vndr IEs for Probe Resp\n");
/* Set Assoc Response IEs to FW */
err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_ASSOCRSP_FLAG,
beacon->assocresp_ies,
beacon->assocresp_ies_len);
if (err)
brcmf_err("Set Assoc Resp IE Failed\n");
else
brcmf_dbg(TRACE, "Applied Vndr IEs for Assoc Resp\n");
return err;
}
@ -4727,7 +4741,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
if ((dev_role == NL80211_IFTYPE_AP) &&
((ifp->ifidx == 0) ||
!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_RSDB))) {
(!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_RSDB) &&
!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN)))) {
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
if (err < 0) {
bphy_err(drvr, "BRCMF_C_DOWN error %d\n",

View File

@ -153,19 +153,23 @@ enum brcmf_vif_status {
* @probe_req_ie: IE info for probe request.
* @probe_res_ie: IE info for probe response.
* @beacon_ie: IE info for beacon frame.
* @assoc_res_ie: IE info for association response frame.
* @probe_req_ie_len: IE info length for probe request.
* @probe_res_ie_len: IE info length for probe response.
* @beacon_ie_len: IE info length for beacon frame.
* @assoc_res_ie_len: IE info length for association response frame.
*/
struct vif_saved_ie {
u8 probe_req_ie[IE_MAX_LEN];
u8 probe_res_ie[IE_MAX_LEN];
u8 beacon_ie[IE_MAX_LEN];
u8 assoc_req_ie[IE_MAX_LEN];
u8 assoc_res_ie[IE_MAX_LEN];
u32 probe_req_ie_len;
u32 probe_res_ie_len;
u32 beacon_ie_len;
u32 assoc_req_ie_len;
u32 assoc_res_ie_len;
};
/**

View File

@ -180,14 +180,8 @@ again:
int brcmf_commonring_write_complete(struct brcmf_commonring *commonring)
{
void *address;
address = commonring->buf_addr;
address += (commonring->f_ptr * commonring->item_len);
if (commonring->f_ptr > commonring->w_ptr) {
address = commonring->buf_addr;
if (commonring->f_ptr > commonring->w_ptr)
commonring->f_ptr = 0;
}
commonring->f_ptr = commonring->w_ptr;

View File

@ -579,9 +579,6 @@ static int brcmf_netdev_stop(struct net_device *ndev)
brcmf_cfg80211_down(ndev);
if (ifp->drvr->bus_if->state == BRCMF_BUS_UP)
brcmf_fil_iovar_data_set(ifp, "arp_hostip_clear", NULL, 0);
brcmf_net_setcarrier(ifp, false);
return 0;

View File

@ -47,13 +47,10 @@ struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr)
return drvr->wiphy->debugfsdir;
}
int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
void brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
int (*read_fn)(struct seq_file *seq, void *data))
{
struct dentry *e;
WARN(!drvr->wiphy->debugfsdir, "wiphy not (yet) registered\n");
e = debugfs_create_devm_seqfile(drvr->bus_if->dev, fn,
drvr->wiphy->debugfsdir, read_fn);
return PTR_ERR_OR_ZERO(e);
debugfs_create_devm_seqfile(drvr->bus_if->dev, fn,
drvr->wiphy->debugfsdir, read_fn);
}

View File

@ -116,8 +116,8 @@ struct brcmf_bus;
struct brcmf_pub;
#ifdef DEBUG
struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr);
int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
int (*read_fn)(struct seq_file *seq, void *data));
void brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
int (*read_fn)(struct seq_file *seq, void *data));
int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
size_t len);
#else
@ -126,11 +126,9 @@ static inline struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr)
return ERR_PTR(-ENOENT);
}
static inline
int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
int (*read_fn)(struct seq_file *seq, void *data))
{
return 0;
}
void brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
int (*read_fn)(struct seq_file *seq, void *data))
{ }
static inline
int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
size_t len)

View File

@ -285,13 +285,14 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
if (!err)
ifp->drvr->feat_flags |= BIT(BRCMF_FEAT_SCAN_RANDOM_MAC);
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
if (drvr->settings->feature_disable) {
brcmf_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n",
ifp->drvr->feat_flags,
drvr->settings->feature_disable);
ifp->drvr->feat_flags &= ~drvr->settings->feature_disable;
}
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
brcmf_feat_firmware_overrides(drvr);

View File

@ -2356,7 +2356,7 @@ struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr)
fws->drvr = drvr;
fws->fcmode = drvr->settings->fcmode;
if ((drvr->bus_if->always_use_fws_queue == false) &&
if (!drvr->bus_if->always_use_fws_queue &&
(fws->fcmode == BRCMF_FWS_FCMODE_NONE)) {
fws->avoid_queueing = true;
brcmf_dbg(INFO, "FWS queueing will be avoided\n");

View File

@ -17,6 +17,7 @@
#include "fwil_types.h"
#include "p2p.h"
#include "cfg80211.h"
#include "feature.h"
/* parameters used for p2p escan */
#define P2PAPI_SCAN_NPROBES 1
@ -59,12 +60,13 @@
#define P2P_AF_MIN_DWELL_TIME 100
#define P2P_AF_MED_DWELL_TIME 400
#define P2P_AF_LONG_DWELL_TIME 1000
#define P2P_AF_TX_MAX_RETRY 1
#define P2P_AF_TX_MAX_RETRY 5
#define P2P_AF_MAX_WAIT_TIME msecs_to_jiffies(2000)
#define P2P_INVALID_CHANNEL -1
#define P2P_CHANNEL_SYNC_RETRY 5
#define P2P_AF_FRM_SCAN_MAX_WAIT msecs_to_jiffies(450)
#define P2P_DEFAULT_SLEEP_TIME_VSDB 200
#define P2P_AF_RETRY_DELAY_TIME 40
/* WiFi P2P Public Action Frame OUI Subtypes */
#define P2P_PAF_GON_REQ 0 /* Group Owner Negotiation Req */
@ -92,6 +94,9 @@
#define P2PSD_ACTION_ID_GAS_CRESP 0x0d /* GAS Comback Response AF */
#define BRCMF_P2P_DISABLE_TIMEOUT msecs_to_jiffies(500)
/* Mask for retry counter of custom dwell time */
#define CUSTOM_RETRY_MASK 0xff000000
/**
* struct brcmf_p2p_disc_st_le - set discovery state in firmware.
*
@ -457,10 +462,21 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac)
*/
static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr)
{
struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
bool random_addr = false;
bool local_admin = false;
if (!dev_addr || is_zero_ether_addr(dev_addr))
random_addr = true;
if (!dev_addr || is_zero_ether_addr(dev_addr)) {
/* If the primary interface address is already locally
* administered, create a new random address.
*/
if (pri_ifp->mac_addr[0] & 0x02) {
random_addr = true;
} else {
dev_addr = pri_ifp->mac_addr;
local_admin = true;
}
}
/* Generate the P2P Device Address obtaining a random ethernet
* address with the locally administered bit set.
@ -470,13 +486,20 @@ static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr)
else
memcpy(p2p->dev_addr, dev_addr, ETH_ALEN);
if (local_admin)
p2p->dev_addr[0] |= 0x02;
/* Generate the P2P Interface Address. If the discovery and connection
* BSSCFGs need to simultaneously co-exist, then this address must be
* different from the P2P Device Address, but also locally administered.
*/
memcpy(p2p->int_addr, p2p->dev_addr, ETH_ALEN);
p2p->int_addr[0] |= 0x02;
p2p->int_addr[4] ^= 0x80;
memcpy(p2p->conn_int_addr, p2p->dev_addr, ETH_ALEN);
p2p->conn_int_addr[0] |= 0x02;
p2p->conn_int_addr[4] ^= 0x80;
memcpy(p2p->conn2_int_addr, p2p->dev_addr, ETH_ALEN);
p2p->conn2_int_addr[0] |= 0x02;
p2p->conn2_int_addr[4] ^= 0x90;
}
/**
@ -1491,6 +1514,7 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
{
struct brcmf_pub *drvr = p2p->cfg->pub;
struct brcmf_cfg80211_vif *vif;
struct brcmf_p2p_action_frame *p2p_af;
s32 err = 0;
s32 timeout = 0;
@ -1500,7 +1524,13 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status);
clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
/* check if it is a p2p_presence response */
p2p_af = (struct brcmf_p2p_action_frame *)af_params->action_frame.data;
if (p2p_af->subtype == P2P_AF_PRESENCE_RSP)
vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif;
else
vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
err = brcmf_fil_bsscfg_data_set(vif->ifp, "actframe", af_params,
sizeof(*af_params));
if (err) {
@ -1640,6 +1670,17 @@ static s32 brcmf_p2p_pub_af_tx(struct brcmf_cfg80211_info *cfg,
return err;
}
static bool brcmf_p2p_check_dwell_overflow(s32 requested_dwell,
unsigned long dwell_jiffies)
{
if ((requested_dwell & CUSTOM_RETRY_MASK) &&
(jiffies_to_msecs(jiffies - dwell_jiffies) >
(requested_dwell & ~CUSTOM_RETRY_MASK))) {
brcmf_err("Action frame TX retry time over dwell time!\n");
return true;
}
return false;
}
/**
* brcmf_p2p_send_action_frame() - send action frame .
*
@ -1664,6 +1705,10 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
s32 tx_retry;
s32 extra_listen_time;
uint delta_ms;
unsigned long dwell_jiffies = 0;
bool dwell_overflow = false;
s32 requested_dwell = af_params->dwell_time;
action_frame = &af_params->action_frame;
action_frame_len = le16_to_cpu(action_frame->len);
@ -1775,12 +1820,21 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
/* update channel */
af_params->channel = cpu_to_le32(afx_hdl->peer_chan);
}
dwell_jiffies = jiffies;
dwell_overflow = brcmf_p2p_check_dwell_overflow(requested_dwell,
dwell_jiffies);
tx_retry = 0;
while (!p2p->block_gon_req_tx &&
(ack == false) && (tx_retry < P2P_AF_TX_MAX_RETRY)) {
(!ack) && (tx_retry < P2P_AF_TX_MAX_RETRY) &&
!dwell_overflow) {
if (af_params->channel)
msleep(P2P_AF_RETRY_DELAY_TIME);
ack = !brcmf_p2p_tx_action_frame(p2p, af_params);
tx_retry++;
dwell_overflow = brcmf_p2p_check_dwell_overflow(requested_dwell,
dwell_jiffies);
}
if (ack == false) {
bphy_err(drvr, "Failed to send Action Frame(retry %d)\n",
@ -1994,7 +2048,7 @@ int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
if_request.type = cpu_to_le16((u16)if_type);
if_request.chspec = cpu_to_le16(chanspec);
memcpy(if_request.addr, p2p->int_addr, sizeof(if_request.addr));
memcpy(if_request.addr, p2p->conn_int_addr, sizeof(if_request.addr));
brcmf_cfg80211_arm_vif_event(cfg, vif);
err = brcmf_fil_iovar_data_set(vif->ifp, "p2p_ifupd", &if_request,
@ -2149,6 +2203,27 @@ fail:
return ERR_PTR(err);
}
int brcmf_p2p_get_conn_idx(struct brcmf_cfg80211_info *cfg)
{
int i;
struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
if (!ifp)
return -ENODEV;
for (i = P2PAPI_BSSCFG_CONNECTION; i < P2PAPI_BSSCFG_MAX; i++) {
if (!cfg->p2p.bss_idx[i].vif) {
if (i == P2PAPI_BSSCFG_CONNECTION2 &&
!(brcmf_feat_is_enabled(ifp, BRCMF_FEAT_RSDB))) {
brcmf_err("Multi p2p not supported");
return -EIO;
}
return i;
}
}
return -EIO;
}
/**
* brcmf_p2p_add_vif() - create a new P2P virtual interface.
*
@ -2168,7 +2243,9 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
struct brcmf_pub *drvr = cfg->pub;
struct brcmf_cfg80211_vif *vif;
enum brcmf_fil_p2p_if_types iftype;
int err;
int err = 0;
int connidx;
u8 *p2p_intf_addr;
if (brcmf_cfg80211_vif_event_armed(cfg))
return ERR_PTR(-EBUSY);
@ -2194,9 +2271,21 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
return (struct wireless_dev *)vif;
brcmf_cfg80211_arm_vif_event(cfg, vif);
err = brcmf_p2p_request_p2p_if(&cfg->p2p, ifp, cfg->p2p.int_addr,
iftype);
connidx = brcmf_p2p_get_conn_idx(cfg);
if (connidx == P2PAPI_BSSCFG_CONNECTION)
p2p_intf_addr = cfg->p2p.conn_int_addr;
else if (connidx == P2PAPI_BSSCFG_CONNECTION2)
p2p_intf_addr = cfg->p2p.conn2_int_addr;
else
err = -EINVAL;
if (!err)
err = brcmf_p2p_request_p2p_if(&cfg->p2p, ifp,
p2p_intf_addr, iftype);
if (err) {
brcmf_err("request p2p interface failed\n");
brcmf_cfg80211_arm_vif_event(cfg, NULL);
goto fail;
}
@ -2228,7 +2317,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
goto fail;
}
cfg->p2p.bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = vif;
cfg->p2p.bss_idx[connidx].vif = vif;
/* Disable firmware roaming for P2P interface */
brcmf_fil_iovar_int_set(ifp, "roam_off", 1);
if (iftype == BRCMF_FIL_P2P_IF_GO) {

View File

@ -14,13 +14,15 @@ struct brcmf_cfg80211_info;
*
* @P2PAPI_BSSCFG_PRIMARY: maps to driver's primary bsscfg.
* @P2PAPI_BSSCFG_DEVICE: maps to driver's P2P device discovery bsscfg.
* @P2PAPI_BSSCFG_CONNECTION: maps to driver's P2P connection bsscfg.
* @P2PAPI_BSSCFG_CONNECTION: maps to driver's 1st P2P connection bsscfg.
* @P2PAPI_BSSCFG_CONNECTION2: maps to driver's 2nd P2P connection bsscfg.
* @P2PAPI_BSSCFG_MAX: used for range checking.
*/
enum p2p_bss_type {
P2PAPI_BSSCFG_PRIMARY, /* maps to driver's primary bsscfg */
P2PAPI_BSSCFG_DEVICE, /* maps to driver's P2P device discovery bsscfg */
P2PAPI_BSSCFG_CONNECTION, /* maps to driver's P2P connection bsscfg */
P2PAPI_BSSCFG_CONNECTION, /* driver's 1st P2P connection bsscfg */
P2PAPI_BSSCFG_CONNECTION2, /* driver's 2nd P2P connection bsscfg */
P2PAPI_BSSCFG_MAX
};
@ -119,7 +121,8 @@ struct brcmf_p2p_info {
struct brcmf_cfg80211_info *cfg;
unsigned long status;
u8 dev_addr[ETH_ALEN];
u8 int_addr[ETH_ALEN];
u8 conn_int_addr[ETH_ALEN];
u8 conn2_int_addr[ETH_ALEN];
struct p2p_bss bss_idx[P2PAPI_BSSCFG_MAX];
struct timer_list listen_timer;
u8 listen_channel;

View File

@ -1431,6 +1431,7 @@ int brcms_up(struct brcms_info *wl)
* precondition: perimeter lock has been acquired
*/
void brcms_down(struct brcms_info *wl)
__must_hold(&wl->lock)
{
uint callbacks, ret_val = 0;
@ -1717,6 +1718,7 @@ int brcms_check_firmwares(struct brcms_info *wl)
* precondition: perimeter lock has been acquired
*/
bool brcms_rfkill_set_hw_state(struct brcms_info *wl)
__must_hold(&wl->lock)
{
bool blocked = brcms_c_check_radio_disabled(wl->wlc);

View File

@ -1057,7 +1057,7 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
txs->lasttxtime = 0;
*fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs);
if (*fatal == true)
if (*fatal)
return false;
n++;
}
@ -3768,17 +3768,14 @@ static void brcms_c_set_ps_ctrl(struct brcms_c_info *wlc)
* Write this BSS config's MAC address to core.
* Updates RXE match engine.
*/
static int brcms_c_set_mac(struct brcms_bss_cfg *bsscfg)
static void brcms_c_set_mac(struct brcms_bss_cfg *bsscfg)
{
int err = 0;
struct brcms_c_info *wlc = bsscfg->wlc;
/* enter the MAC addr into the RXE match registers */
brcms_c_set_addrmatch(wlc, RCM_MAC_OFFSET, wlc->pub->cur_etheraddr);
brcms_c_ampdu_macaddr_upd(wlc);
return err;
}
/* Write the BSS config's BSSID address to core (set_bssid in d11procs.tcl).

View File

@ -304,9 +304,8 @@ int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val, bool force)
* update wlc->stf->ss_opmode which represents the operational stf_ss mode
* we're using
*/
int brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band)
void brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band)
{
int ret_code = 0;
u8 prev_stf_ss;
u8 upd_stf_ss;
@ -325,7 +324,7 @@ int brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band)
PHY_TXC1_MODE_SISO : PHY_TXC1_MODE_CDD;
} else {
if (wlc->band != band)
return ret_code;
return;
upd_stf_ss = (wlc->stf->txstreams == 1) ?
PHY_TXC1_MODE_SISO : band->band_stf_ss_mode;
}
@ -333,8 +332,6 @@ int brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band)
wlc->stf->ss_opmode = upd_stf_ss;
brcms_b_band_stf_ss_set(wlc->hw, upd_stf_ss);
}
return ret_code;
}
int brcms_c_stf_attach(struct brcms_c_info *wlc)

View File

@ -25,7 +25,7 @@ void brcms_c_stf_detach(struct brcms_c_info *wlc);
void brcms_c_tempsense_upd(struct brcms_c_info *wlc);
void brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc,
u16 *ss_algo_channel, u16 chanspec);
int brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band);
void brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band);
void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val, bool force);
bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val);

View File

@ -3770,10 +3770,8 @@ static int ipw_queue_tx_init(struct ipw_priv *priv,
struct pci_dev *dev = priv->pci_dev;
q->txb = kmalloc_array(count, sizeof(q->txb[0]), GFP_KERNEL);
if (!q->txb) {
IPW_ERROR("vmalloc for auxiliary BD structures failed\n");
if (!q->txb)
return -ENOMEM;
}
q->bd =
pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
@ -7042,23 +7040,22 @@ static int ipw_qos_association(struct ipw_priv *priv,
* off the network from the associated setting, adjust the QoS
* setting
*/
static int ipw_qos_association_resp(struct ipw_priv *priv,
static void ipw_qos_association_resp(struct ipw_priv *priv,
struct libipw_network *network)
{
int ret = 0;
unsigned long flags;
u32 size = sizeof(struct libipw_qos_parameters);
int set_qos_param = 0;
if ((priv == NULL) || (network == NULL) ||
(priv->assoc_network == NULL))
return ret;
return;
if (!(priv->status & STATUS_ASSOCIATED))
return ret;
return;
if ((priv->ieee->iw_mode != IW_MODE_INFRA))
return ret;
return;
spin_lock_irqsave(&priv->ieee->lock, flags);
if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
@ -7088,8 +7085,6 @@ static int ipw_qos_association_resp(struct ipw_priv *priv,
if (set_qos_param == 1)
schedule_work(&priv->qos_activate);
return ret;
}
static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
@ -10643,10 +10638,8 @@ static void ipw_bg_link_down(struct work_struct *work)
mutex_unlock(&priv->mutex);
}
static int ipw_setup_deferred_work(struct ipw_priv *priv)
static void ipw_setup_deferred_work(struct ipw_priv *priv)
{
int ret = 0;
init_waitqueue_head(&priv->wait_command_queue);
init_waitqueue_head(&priv->wait_state);
@ -10680,8 +10673,6 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv)
tasklet_init(&priv->irq_tasklet,
ipw_irq_tasklet, (unsigned long)priv);
return ret;
}
static void shim__set_security(struct net_device *dev,
@ -11662,11 +11653,7 @@ static int ipw_pci_probe(struct pci_dev *pdev,
IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
err = ipw_setup_deferred_work(priv);
if (err) {
IPW_ERROR("Unable to setup deferred work\n");
goto out_iounmap;
}
ipw_setup_deferred_work(priv);
ipw_sw_reset(priv, 1);

View File

@ -13,7 +13,8 @@ iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
iwlwifi-objs += iwl-dbg-tlv.o
iwlwifi-objs += iwl-trans.o
iwlwifi-objs += fw/notif-wait.o
iwlwifi-objs += fw/img.o fw/notif-wait.o
iwlwifi-objs += fw/dbg.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o

View File

@ -90,7 +90,8 @@
#define IWL_22000_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-"
#define IWL_22000_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-"
#define IWL_22000_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-"
#define IWL_22000_SOSNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-"
#define IWL_SNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-"
#define IWL_SNJ_A_GF_A_FW_PRE "iwlwifi-SoSnj-a0-gf-a0-"
#define IWL_22000_HR_MODULE_FIRMWARE(api) \
IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
@ -120,6 +121,10 @@
IWL_22000_SO_A_GF_A_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_TY_A_GF_A_MODULE_FIRMWARE(api) \
IWL_22000_TY_A_GF_A_FW_PRE __stringify(api) ".ucode"
#define IWL_SNJ_A_GF4_A_MODULE_FIRMWARE(api) \
IWL_SNJ_A_GF4_A_FW_PRE __stringify(api) ".ucode"
#define IWL_SNJ_A_GF_A_MODULE_FIRMWARE(api) \
IWL_SNJ_A_GF_A_FW_PRE __stringify(api) ".ucode"
static const struct iwl_base_params iwl_22000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@ -229,6 +234,15 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
}, \
}
const struct iwl_cfg_trans_params iwl_qnj_trans_cfg = {
.mq_rx_supported = true,
.use_tfh = true,
.rf_id = true,
.gen2 = true,
.device_family = IWL_DEVICE_FAMILY_22000,
.base_params = &iwl_22000_base_params,
};
const struct iwl_cfg_trans_params iwl_qu_trans_cfg = {
.mq_rx_supported = true,
.use_tfh = true,
@ -238,6 +252,19 @@ const struct iwl_cfg_trans_params iwl_qu_trans_cfg = {
.base_params = &iwl_22000_base_params,
.integrated = true,
.xtal_latency = 5000,
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_200US,
};
const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg = {
.mq_rx_supported = true,
.use_tfh = true,
.rf_id = true,
.gen2 = true,
.device_family = IWL_DEVICE_FAMILY_22000,
.base_params = &iwl_22000_base_params,
.integrated = true,
.xtal_latency = 1820,
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_1820US,
};
const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg = {
@ -250,15 +277,7 @@ const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg = {
.integrated = true,
.xtal_latency = 12000,
.low_latency_xtal = true,
};
const struct iwl_cfg_trans_params iwl_qnj_trans_cfg = {
.mq_rx_supported = true,
.use_tfh = true,
.rf_id = true,
.gen2 = true,
.device_family = IWL_DEVICE_FAMILY_22000,
.base_params = &iwl_22000_base_params,
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
/*
@ -522,22 +541,32 @@ const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0 = {
};
const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = {
.name = "Intel(R) Wi-Fi 7 AX210 160MHz",
.name = "Intel(R) Wi-Fi 6 AX210 160MHz",
.fw_name_pre = IWL_22000_SO_A_HR_B_FW_PRE,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = {
.name = "Intel(R) Wi-Fi 7 AX211 160MHz",
.name = "Intel(R) Wi-Fi 6 AX211 160MHz",
.fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long = {
.name = "Intel(R) Wi-Fi 6 AX211 160MHz",
.fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
.trans.xtal_latency = 12000,
.trans.low_latency_xtal = true,
};
const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
.name = "Intel(R) Wi-Fi 7 AX210 160MHz",
.name = "Intel(R) Wi-Fi 6 AX210 160MHz",
.fw_name_pre = IWL_22000_TY_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
@ -545,16 +574,34 @@ const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
};
const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = {
.name = "Intel(R) Wi-Fi 7 AX411 160MHz",
.name = "Intel(R) Wi-Fi 6 AX411 160MHz",
.fw_name_pre = IWL_22000_SO_A_GF4_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long = {
.name = "Intel(R) Wi-Fi 6 AX411 160MHz",
.fw_name_pre = IWL_22000_SO_A_GF4_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
.trans.xtal_latency = 12000,
.trans.low_latency_xtal = true,
};
const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0 = {
.name = "Intel(R) Wi-Fi 7 AX411 160MHz",
.fw_name_pre = IWL_22000_SOSNJ_A_GF4_A_FW_PRE,
.name = "Intel(R) Wi-Fi 6 AX411 160MHz",
.fw_name_pre = IWL_SNJ_A_GF4_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwlax211_cfg_snj_gf_a0 = {
.name = "Intel(R) Wi-Fi 6 AX211 160MHz",
.fw_name_pre = IWL_SNJ_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
@ -573,3 +620,5 @@ MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_SO_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_SO_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_TY_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));

View File

@ -151,6 +151,82 @@ found:
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg);
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
__le32 *black_list_array,
int *black_list_size)
{
union acpi_object *wifi_pkg, *data;
int ret, tbl_rev, i;
bool enabled;
data = iwl_acpi_get_object(fwrt->dev, ACPI_WTAS_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_WTAS_WIFI_DATA_SIZE,
&tbl_rev);
if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
if (wifi_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
tbl_rev != 0) {
ret = -EINVAL;
goto out_free;
}
enabled = !!wifi_pkg->package.elements[0].integer.value;
if (!enabled) {
*black_list_size = -1;
IWL_DEBUG_RADIO(fwrt, "TAS not enabled\n");
ret = 0;
goto out_free;
}
if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
wifi_pkg->package.elements[1].integer.value >
APCI_WTAS_BLACK_LIST_MAX) {
IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %llu\n",
wifi_pkg->package.elements[1].integer.value);
ret = -EINVAL;
goto out_free;
}
*black_list_size = wifi_pkg->package.elements[1].integer.value;
IWL_DEBUG_RADIO(fwrt, "TAS array size %d\n", *black_list_size);
if (*black_list_size > APCI_WTAS_BLACK_LIST_MAX) {
IWL_DEBUG_RADIO(fwrt, "TAS invalid array size value %u\n",
*black_list_size);
ret = -EINVAL;
goto out_free;
}
for (i = 0; i < *black_list_size; i++) {
u32 country;
if (wifi_pkg->package.elements[2 + i].type !=
ACPI_TYPE_INTEGER) {
IWL_DEBUG_RADIO(fwrt,
"TAS invalid array elem %d\n", 2 + i);
ret = -EINVAL;
goto out_free;
}
country = wifi_pkg->package.elements[2 + i].integer.value;
black_list_array[i] = cpu_to_le32(country);
IWL_DEBUG_RADIO(fwrt, "TAS black list country %d\n", country);
}
ret = 0;
out_free:
kfree(data);
return ret;
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_tas);
int iwl_acpi_get_mcc(struct device *dev, char *mcc)
{
union acpi_object *wifi_pkg, *data;

View File

@ -64,6 +64,7 @@
#include "fw/api/commands.h"
#include "fw/api/power.h"
#include "fw/api/phy.h"
#include "fw/api/nvm-reg.h"
#include "fw/img.h"
#include "iwl-trans.h"
@ -75,6 +76,7 @@
#define ACPI_SPLC_METHOD "SPLC"
#define ACPI_ECKV_METHOD "ECKV"
#define ACPI_PPAG_METHOD "PPAG"
#define ACPI_WTAS_METHOD "WTAS"
#define ACPI_WIFI_DOMAIN (0x07)
@ -96,6 +98,12 @@
#define ACPI_SPLC_WIFI_DATA_SIZE 2
#define ACPI_ECKV_WIFI_DATA_SIZE 2
/*
* 1 type, 1 enabled, 1 black list size, 16 black list array
*/
#define APCI_WTAS_BLACK_LIST_MAX 16
#define ACPI_WTAS_WIFI_DATA_SIZE (3 + APCI_WTAS_BLACK_LIST_MAX)
#define ACPI_WGDS_NUM_BANDS 2
#define ACPI_WGDS_TABLE_SIZE 3
@ -174,6 +182,9 @@ int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
struct iwl_per_chain_offset_group *table);
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, __le32 *black_list_array,
int *black_list_size);
#else /* CONFIG_ACPI */
static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
@ -250,5 +261,11 @@ static inline int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
return -ENOENT;
}
static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
__le32 *black_list_array,
int *black_list_size)
{
return -ENOENT;
}
#endif /* CONFIG_ACPI */
#endif /* __iwl_fw_acpi__ */

View File

@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation
* Copyright (C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation
* Copyright (C) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -119,16 +119,49 @@ enum iwl_calib_cfg {
IWL_CALIB_CFG_AGC_IDX = BIT(18),
};
/**
* struct iwl_phy_specific_cfg - specific PHY filter configuration
*
* Sent as part of the phy configuration command (v3) to configure specific FW
* defined PHY filters that can be applied to each antenna.
*
* @filter_cfg_chain_a: filter config id for LMAC1 chain A
* @filter_cfg_chain_b: filter config id for LMAC1 chain B
* @filter_cfg_chain_c: filter config id for LMAC2 chain A
* @filter_cfg_chain_d: filter config id for LMAC2 chain B
* values: 0 - no filter; 0xffffffff - reserved; otherwise - filter id
*/
struct iwl_phy_specific_cfg {
__le32 filter_cfg_chain_a;
__le32 filter_cfg_chain_b;
__le32 filter_cfg_chain_c;
__le32 filter_cfg_chain_d;
} __packed; /* PHY_SPECIFIC_CONFIGURATION_API_VER_1*/
/**
* struct iwl_phy_cfg_cmd - Phy configuration command
*
* @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg
* @calib_control: calibration control data
*/
struct iwl_phy_cfg_cmd {
struct iwl_phy_cfg_cmd_v1 {
__le32 phy_cfg;
struct iwl_calib_ctrl calib_control;
} __packed;
/**
* struct iwl_phy_cfg_cmd_v3 - Phy configuration command (v3)
*
* @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg
* @calib_control: calibration control data
* @phy_specific_cfg: configure predefined PHY filters
*/
struct iwl_phy_cfg_cmd_v3 {
__le32 phy_cfg;
struct iwl_calib_ctrl calib_control;
struct iwl_phy_specific_cfg phy_specific_cfg;
} __packed; /* PHY_CONFIGURATION_CMD_API_S_VER_3 */
/*
* enum iwl_dc2dc_config_id - flag ids
*

View File

@ -618,7 +618,7 @@ struct iwl_wowlan_status_v6 {
* @wake_packet_bufsize: wakeup packet buffer size
* @wake_packet: wakeup packet
*/
struct iwl_wowlan_status {
struct iwl_wowlan_status_v7 {
struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM];
struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
__le64 replay_ctr;
@ -634,6 +634,43 @@ struct iwl_wowlan_status {
u8 wake_packet[]; /* can be truncated from _length to _bufsize */
} __packed; /* WOWLAN_STATUSES_API_S_VER_7 */
/**
* struct iwl_wowlan_status - WoWLAN status
* @gtk: GTK data
* @igtk: IGTK data
* @replay_ctr: GTK rekey replay counter
* @pattern_number: number of the matched pattern
* @non_qos_seq_ctr: non-QoS sequence counter to use next
* @qos_seq_ctr: QoS sequence counters to use next
* @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
* @num_of_gtk_rekeys: number of GTK rekeys
* @transmitted_ndps: number of transmitted neighbor discovery packets
* @received_beacons: number of received beacons
* @wake_packet_length: wakeup packet length
* @wake_packet_bufsize: wakeup packet buffer size
* @tid_tear_down: bit mask of tids whose BA sessions were closed
* in suspend state
* @reserved: unused
* @wake_packet: wakeup packet
*/
struct iwl_wowlan_status {
struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM];
struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
__le64 replay_ctr;
__le16 pattern_number;
__le16 non_qos_seq_ctr;
__le16 qos_seq_ctr[8];
__le32 wakeup_reasons;
__le32 num_of_gtk_rekeys;
__le32 transmitted_ndps;
__le32 received_beacons;
__le32 wake_packet_length;
__le32 wake_packet_bufsize;
u8 tid_tear_down;
u8 reserved[3];
u8 wake_packet[]; /* can be truncated from _length to _bufsize */
} __packed; /* WOWLAN_STATUSES_API_S_VER_9 */
static inline u8 iwlmvm_wowlan_gtk_idx(struct iwl_wowlan_gtk_status *gtk)
{
return gtk->key_flags & IWL_WOWLAN_GTK_IDX_MASK;

View File

@ -80,6 +80,11 @@ enum iwl_regulatory_and_nvm_subcmd_ids {
* response is &struct iwl_nvm_get_info_rsp
*/
NVM_GET_INFO = 0x2,
/**
* @TAS_CONFIG: &struct iwl_tas_config_cmd
*/
TAS_CONFIG = 0x3,
};
/**
@ -431,4 +436,14 @@ enum iwl_mcc_source {
MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11,
};
#define IWL_TAS_BLACK_LIST_MAX 16
/**
* struct iwl_tas_config_cmd - configures the TAS
* @black_list_size: size of relevant field in black_list_array
* @black_list_array: black list countries (without TAS)
*/
struct iwl_tas_config_cmd {
__le32 black_list_size;
__le32 black_list_array[IWL_TAS_BLACK_LIST_MAX];
} __packed; /* TAS_CONFIG_CMD_API_S_VER_2 */
#endif /* __iwl_fw_api_nvm_reg_h__ */

View File

@ -1050,20 +1050,6 @@ struct iwl_scan_req_params_v12 {
struct iwl_scan_probe_params_v3 probe_params;
} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_12 */
/**
* struct iwl_scan_req_params_v13
* @general_params: &struct iwl_scan_general_params_v10
* @channel_params: &struct iwl_scan_channel_params_v4
* @periodic_params: &struct iwl_scan_periodic_parms_v1
* @probe_params: &struct iwl_scan_probe_params_v4
*/
struct iwl_scan_req_params_v13 {
struct iwl_scan_general_params_v10 general_params;
struct iwl_scan_channel_params_v4 channel_params;
struct iwl_scan_periodic_parms_v1 periodic_params;
struct iwl_scan_probe_params_v4 probe_params;
} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_13 */
/**
* struct iwl_scan_req_params_v14
* @general_params: &struct iwl_scan_general_params_v10
@ -1090,18 +1076,6 @@ struct iwl_scan_req_umac_v12 {
struct iwl_scan_req_params_v12 scan_params;
} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_12 */
/**
* struct iwl_scan_req_umac_v13
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
* @scan_params: scan parameters
*/
struct iwl_scan_req_umac_v13 {
__le32 uid;
__le32 ooc_priority;
struct iwl_scan_req_params_v13 scan_params;
} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_13 */
/**
* struct iwl_scan_req_umac_v14
* @uid: scan id, &enum iwl_umac_scan_uid_offsets

View File

@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2019 Intel Deutschland GmbH
* Copyright(c) 2012 - 2014, 2019 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -28,10 +27,9 @@
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2019 Intel Deutschland GmbH
* Copyright(c) 2012 - 2014, 2019 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -68,6 +66,12 @@
#define SOC_CONFIG_CMD_FLAGS_DISCRETE BIT(0)
#define SOC_CONFIG_CMD_FLAGS_LOW_LATENCY BIT(1)
#define SOC_FLAGS_LTR_APPLY_DELAY_MASK 0xc
#define SOC_FLAGS_LTR_APPLY_DELAY_NONE 0
#define SOC_FLAGS_LTR_APPLY_DELAY_200 1
#define SOC_FLAGS_LTR_APPLY_DELAY_2500 2
#define SOC_FLAGS_LTR_APPLY_DELAY_1820 3
/**
* struct iwl_soc_configuration_cmd - Set device stabilization latency
*

View File

@ -245,32 +245,6 @@ enum iwl_sta_sleep_flag {
#define STA_KEY_LEN_WEP40 (5)
#define STA_KEY_LEN_WEP104 (13)
/**
* struct iwl_mvm_keyinfo - key information
* @key_flags: type &enum iwl_sta_key_flag
* @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
* @reserved1: reserved
* @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
* @key_offset: key offset in the fw's key table
* @reserved2: reserved
* @key: 16-byte unicast decryption key
* @tx_secur_seq_cnt: initial RSC / PN needed for replay check
* @hw_tkip_mic_rx_key: byte: MIC Rx Key - used for TKIP only
* @hw_tkip_mic_tx_key: byte: MIC Tx Key - used for TKIP only
*/
struct iwl_mvm_keyinfo {
__le16 key_flags;
u8 tkip_rx_tsc_byte2;
u8 reserved1;
__le16 tkip_rx_ttak[5];
u8 key_offset;
u8 reserved2;
u8 key[16];
__le64 tx_secur_seq_cnt;
__le64 hw_tkip_mic_rx_key;
__le64 hw_tkip_mic_tx_key;
} __packed;
#define IWL_ADD_STA_STATUS_MASK 0xFF
#define IWL_ADD_STA_BAID_VALID_MASK 0x8000
#define IWL_ADD_STA_BAID_MASK 0x7F00

View File

@ -818,7 +818,8 @@ static void iwl_dump_paging(struct iwl_fw_runtime *fwrt,
static struct iwl_fw_error_dump_file *
iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dump_ptrs *fw_error_dump)
struct iwl_fw_dump_ptrs *fw_error_dump,
struct iwl_fwrt_dump_data *data)
{
struct iwl_fw_error_dump_file *dump_file;
struct iwl_fw_error_dump_data *dump_data;
@ -900,15 +901,15 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
}
/* If we only want a monitor dump, reset the file length */
if (fwrt->dump.monitor_only) {
if (data->monitor_only) {
file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 +
sizeof(*dump_info) + sizeof(*dump_smem_cfg);
}
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) &&
fwrt->dump.desc)
data->desc)
file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
fwrt->dump.desc->len;
data->desc->len;
dump_file = vzalloc(file_len);
if (!dump_file)
@ -984,19 +985,19 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
iwl_read_radio_regs(fwrt, &dump_data);
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) &&
fwrt->dump.desc) {
data->desc) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
fwrt->dump.desc->len);
data->desc->len);
dump_trig = (void *)dump_data->data;
memcpy(dump_trig, &fwrt->dump.desc->trig_desc,
sizeof(*dump_trig) + fwrt->dump.desc->len);
memcpy(dump_trig, &data->desc->trig_desc,
sizeof(*dump_trig) + data->desc->len);
dump_data = iwl_fw_error_next_data(dump_data);
}
/* In case we only want monitor dump, skip to dump trasport data */
if (fwrt->dump.monitor_only)
if (data->monitor_only)
goto out;
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) {
@ -2172,7 +2173,21 @@ static u32 iwl_dump_ini_file_gen(struct iwl_fw_runtime *fwrt,
return le32_to_cpu(hdr->file_len);
}
static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt,
const struct iwl_fw_dump_desc **desc)
{
if (desc && *desc != &iwl_dump_desc_assert)
kfree(*desc);
*desc = NULL;
fwrt->dump.lmac_err_id[0] = 0;
if (fwrt->smem_cfg.num_lmacs > 1)
fwrt->dump.lmac_err_id[1] = 0;
fwrt->dump.umac_err_id = 0;
}
static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
struct iwl_fwrt_dump_data *dump_data)
{
struct iwl_fw_dump_ptrs fw_error_dump = {};
struct iwl_fw_error_dump_file *dump_file;
@ -2180,11 +2195,11 @@ static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
u32 file_len;
u32 dump_mask = fwrt->fw->dbg.dump_mask;
dump_file = iwl_fw_error_dump_file(fwrt, &fw_error_dump);
dump_file = iwl_fw_error_dump_file(fwrt, &fw_error_dump, dump_data);
if (!dump_file)
goto out;
return;
if (fwrt->dump.monitor_only)
if (dump_data->monitor_only)
dump_mask &= IWL_FW_ERROR_DUMP_FW_MONITOR;
fw_error_dump.trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask);
@ -2213,9 +2228,6 @@ static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
vfree(fw_error_dump.fwrt_ptr);
vfree(fw_error_dump.trans_ptr);
out:
iwl_fw_free_dump_desc(fwrt);
}
static void iwl_dump_ini_list_free(struct list_head *list)
@ -2244,7 +2256,7 @@ static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
u32 file_len = iwl_dump_ini_file_gen(fwrt, dump_data, &dump_list);
if (!file_len)
goto out;
return;
sg_dump_data = alloc_sgtable(file_len);
if (sg_dump_data) {
@ -2261,9 +2273,6 @@ static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
GFP_KERNEL);
}
iwl_dump_ini_list_free(&dump_list);
out:
iwl_fw_error_dump_data_free(dump_data);
}
const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
@ -2278,27 +2287,40 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
bool monitor_only,
unsigned int delay)
{
struct iwl_fwrt_wk_data *wk_data;
unsigned long idx;
if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
iwl_fw_free_dump_desc(fwrt);
iwl_fw_free_dump_desc(fwrt, &desc);
return 0;
}
/* use wks[0] since dump flow prior to ini does not need to support
* consecutive triggers collection
/*
* Check there is an available worker.
* ffz return value is undefined if no zero exists,
* so check against ~0UL first.
*/
if (test_and_set_bit(fwrt->dump.wks[0].idx, &fwrt->dump.active_wks))
if (fwrt->dump.active_wks == ~0UL)
return -EBUSY;
if (WARN_ON(fwrt->dump.desc))
iwl_fw_free_dump_desc(fwrt);
idx = ffz(fwrt->dump.active_wks);
if (idx >= IWL_FW_RUNTIME_DUMP_WK_NUM ||
test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks))
return -EBUSY;
wk_data = &fwrt->dump.wks[idx];
if (WARN_ON(wk_data->dump_data.desc))
iwl_fw_free_dump_desc(fwrt, &wk_data->dump_data.desc);
wk_data->dump_data.desc = desc;
wk_data->dump_data.monitor_only = monitor_only;
IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
le32_to_cpu(desc->trig_desc.type));
fwrt->dump.desc = desc;
fwrt->dump.monitor_only = monitor_only;
schedule_delayed_work(&fwrt->dump.wks[0].wk, usecs_to_jiffies(delay));
schedule_delayed_work(&wk_data->wk, usecs_to_jiffies(delay));
return 0;
}
@ -2307,26 +2329,40 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc);
int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig_type)
{
int ret;
struct iwl_fw_dump_desc *iwl_dump_error_desc;
if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status))
return -EIO;
iwl_dump_error_desc = kmalloc(sizeof(*iwl_dump_error_desc), GFP_KERNEL);
if (!iwl_dump_error_desc)
return -ENOMEM;
if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
if (trig_type != FW_DBG_TRIGGER_ALIVE_TIMEOUT)
return -EIO;
iwl_dump_error_desc->trig_desc.type = cpu_to_le32(trig_type);
iwl_dump_error_desc->len = 0;
iwl_dbg_tlv_time_point(fwrt,
IWL_FW_INI_TIME_POINT_HOST_ALIVE_TIMEOUT,
NULL);
} else {
struct iwl_fw_dump_desc *iwl_dump_error_desc;
int ret;
ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0);
if (ret)
kfree(iwl_dump_error_desc);
else
iwl_trans_sync_nmi(fwrt->trans);
iwl_dump_error_desc =
kmalloc(sizeof(*iwl_dump_error_desc), GFP_KERNEL);
return ret;
if (!iwl_dump_error_desc)
return -ENOMEM;
iwl_dump_error_desc->trig_desc.type = cpu_to_le32(trig_type);
iwl_dump_error_desc->len = 0;
ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc,
false, 0);
if (ret) {
kfree(iwl_dump_error_desc);
return ret;
}
}
iwl_trans_sync_nmi(fwrt->trans);
return 0;
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_error_collect);
@ -2504,14 +2540,14 @@ IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
{
struct iwl_fw_dbg_params params = {0};
struct iwl_fwrt_dump_data *dump_data =
&fwrt->dump.wks[wk_idx].dump_data;
if (!test_bit(wk_idx, &fwrt->dump.active_wks))
return;
if (fwrt->ops && fwrt->ops->fw_running &&
!fwrt->ops->fw_running(fwrt->ops_ctx)) {
IWL_ERR(fwrt, "Firmware not running - cannot dump error\n");
iwl_fw_free_dump_desc(fwrt);
if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) {
IWL_ERR(fwrt, "Device is not enabled - cannot dump error\n");
goto out;
}
@ -2527,12 +2563,17 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
if (iwl_trans_dbg_ini_valid(fwrt->trans))
iwl_fw_error_ini_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data);
else
iwl_fw_error_dump(fwrt);
iwl_fw_error_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data);
IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection done\n");
iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
out:
if (iwl_trans_dbg_ini_valid(fwrt->trans))
iwl_fw_error_dump_data_free(dump_data);
else
iwl_fw_free_dump_desc(fwrt, &dump_data->desc);
clear_bit(wk_idx, &fwrt->dump.active_wks);
}

View File

@ -98,17 +98,6 @@ struct iwl_fw_dbg_params {
extern const struct iwl_fw_dump_desc iwl_dump_desc_assert;
static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
{
if (fwrt->dump.desc != &iwl_dump_desc_assert)
kfree(fwrt->dump.desc);
fwrt->dump.desc = NULL;
fwrt->dump.lmac_err_id[0] = 0;
if (fwrt->smem_cfg.num_lmacs > 1)
fwrt->dump.lmac_err_id[1] = 0;
fwrt->dump.umac_err_id = 0;
}
int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
const struct iwl_fw_dump_desc *desc,
bool monitor_only, unsigned int delay);

View File

@ -449,6 +449,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_CS_MODIFY = (__force iwl_ucode_tlv_capa_t)49,
IWL_UCODE_TLV_CAPA_SET_LTR_GEN2 = (__force iwl_ucode_tlv_capa_t)50,
IWL_UCODE_TLV_CAPA_SET_PPAG = (__force iwl_ucode_tlv_capa_t)52,
IWL_UCODE_TLV_CAPA_TAS_CFG = (__force iwl_ucode_tlv_capa_t)53,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD = (__force iwl_ucode_tlv_capa_t)54,
/* set 2 */

View File

@ -0,0 +1,99 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "img.h"
u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd)
{
const struct iwl_fw_cmd_version *entry;
unsigned int i;
if (!fw->ucode_capa.cmd_versions ||
!fw->ucode_capa.n_cmd_versions)
return IWL_FW_CMD_VER_UNKNOWN;
entry = fw->ucode_capa.cmd_versions;
for (i = 0; i < fw->ucode_capa.n_cmd_versions; i++, entry++) {
if (entry->group == grp && entry->cmd == cmd)
return entry->cmd_ver;
}
return IWL_FW_CMD_VER_UNKNOWN;
}
EXPORT_SYMBOL_GPL(iwl_fw_lookup_cmd_ver);
u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def)
{
const struct iwl_fw_cmd_version *entry;
unsigned int i;
if (!fw->ucode_capa.cmd_versions ||
!fw->ucode_capa.n_cmd_versions)
return def;
entry = fw->ucode_capa.cmd_versions;
for (i = 0; i < fw->ucode_capa.n_cmd_versions; i++, entry++) {
if (entry->group == grp && entry->cmd == cmd) {
if (entry->notif_ver == IWL_FW_CMD_VER_UNKNOWN)
return def;
return entry->notif_ver;
}
}
return def;
}
EXPORT_SYMBOL_GPL(iwl_fw_lookup_notif_ver);

Some files were not shown because too many files have changed in this diff Show More