wireless-drivers-next patches for v5.11

First set of patches for v5.11. rtw88 getting improvements to work
 better with Bluetooth and other driver also getting some new features.
 mhi-ath11k-immutable branch was pulled from mhi tree to avoid
 conflicts with mhi tree.
 
 Major changes:
 
 rtw88
 
 * major bluetooth co-existance improvements
 
 wilc1000
 
 * Wi-Fi Multimedia (WMM) support
 
 ath11k
 
 * Fast Initial Link Setup (FILS) discovery and unsolicited broadcast
   probe response support
 
 * qcom,ath11k-calibration-variant Device Tree setting
 
 * cold boot calibration support
 
 * new DFS region: JP
 
 wnc36xx
 
 * enable connection monitoring and keepalive in firmware
 
 ath10k
 
 * firmware IRAM recovery feature
 
 mhi
 
 * merge mhi-ath11k-immutable branch to make MHI API change go smoothly
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJfyTQyAAoJEG4XJFUm622bCdcIAIyVnqdW7pnoDmWIyQmAEnD9
 vGARkzghPHXnufpOzohyDdxT12X9klhrxSVIgzEgH1/pl3i1PpnF6KXyGFCC44Lw
 wrLXhQygPzmIW1IZtJJE3G72WExXoRjWx6LD1I7C7oEIduqFixXADmK2tKzFp795
 Jxum+sOeT6+Dk1OvO/fIroBHX73mRE9zAuiTIMpt2G1j8uXs9QVfcTbTrUshLASN
 0sX9J6JutltBuM4G7+bFpVzKnLnlQ7ebUaF6nvTCQsgHWZwkS7yAubSWX9sFohbR
 UXgQHNE83s/esOg7nBxAfqTKP8mbxsobmxZtxE5GR5vFY5FJDxqP9Zc2KzPp39w=
 =CbX/
 -----END PGP SIGNATURE-----

Merge tag 'wireless-drivers-next-2020-12-03' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
wireless-drivers-next patches for v5.11

First set of patches for v5.11. rtw88 getting improvements to work
better with Bluetooth and other driver also getting some new features.
mhi-ath11k-immutable branch was pulled from mhi tree to avoid
conflicts with mhi tree.

Major changes:

rtw88
 * major bluetooth co-existance improvements
wilc1000
 * Wi-Fi Multimedia (WMM) support
ath11k
 * Fast Initial Link Setup (FILS) discovery and unsolicited broadcast
   probe response support
 * qcom,ath11k-calibration-variant Device Tree setting
 * cold boot calibration support
 * new DFS region: JP
wnc36xx
 * enable connection monitoring and keepalive in firmware
ath10k
 * firmware IRAM recovery feature
mhi
 * merge mhi-ath11k-immutable branch to make MHI API change go smoothly

* tag 'wireless-drivers-next-2020-12-03' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next: (180 commits)
  wl1251: remove trailing semicolon in macro definition
  airo: remove trailing semicolon in macro definition
  wilc1000: added queue support for WMM
  wilc1000: call complete() for failure in wilc_wlan_txq_add_cfg_pkt()
  wilc1000: free resource in wilc_wlan_txq_add_mgmt_pkt() for failure path
  wilc1000: free resource in wilc_wlan_txq_add_net_pkt() for failure path
  wilc1000: added 'ndo_set_mac_address' callback support
  brcmfmac: expose firmware config files through modinfo
  wlcore: Switch to using the new API kobj_to_dev()
  rtw88: coex: add feature to enhance HID coexistence performance
  rtw88: coex: upgrade coexistence A2DP mechanism
  rtw88: coex: add action for coexistence in hardware initial
  rtw88: coex: add function to avoid cck lock
  rtw88: coex: change the coexistence mechanism for WLAN connected
  rtw88: coex: change the coexistence mechanism for HID
  rtw88: coex: update AFH information while in free-run mode
  rtw88: coex: update the mechanism for A2DP + PAN
  rtw88: coex: add debug message
  rtw88: coex: run coexistence when WLAN entering/leaving LPS
  Revert "rtl8xxxu: Add Buffalo WI-U3-866D to list of supported devices"
  ...
====================

Link: https://lore.kernel.org/r/20201203185732.9CFA5C433ED@smtp.codeaurora.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2020-12-04 10:56:37 -08:00
commit 846c3c9cfe
150 changed files with 3496 additions and 1488 deletions

View File

@ -144,6 +144,12 @@ properties:
* reg
* reg-names
qcom,ath11k-calibration-variant:
$ref: /schemas/types.yaml#/definitions/string
description:
string to uniquely identify variant of the calibration data in the
board-2.bin for designs with colliding bus and device specific ids
required:
- compatible
- reg

View File

@ -758,7 +758,6 @@ static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
mhi_chan->offload_ch = ch_cfg->offload_channel;
mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
mhi_chan->pre_alloc = ch_cfg->auto_queue;
mhi_chan->auto_start = ch_cfg->auto_start;
/*
* If MHI host allocates buffers, then the channel direction
@ -1160,11 +1159,6 @@ static int mhi_driver_probe(struct device *dev)
goto exit_probe;
ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
if (ul_chan->auto_start) {
ret = mhi_prepare_channel(mhi_cntrl, ul_chan);
if (ret)
goto exit_probe;
}
}
ret = -EINVAL;
@ -1198,9 +1192,6 @@ static int mhi_driver_probe(struct device *dev)
if (ret)
goto exit_probe;
if (dl_chan && dl_chan->auto_start)
mhi_prepare_channel(mhi_cntrl, dl_chan);
mhi_device_put(mhi_dev);
return ret;

View File

@ -563,7 +563,6 @@ struct mhi_chan {
bool configured;
bool offload_ch;
bool pre_alloc;
bool auto_start;
bool wake_capable;
};

View File

@ -651,6 +651,7 @@ static const char *const ath10k_core_fw_feature_str[] = {
[ATH10K_FW_FEATURE_NON_BMI] = "non-bmi",
[ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL] = "single-chan-info-per-channel",
[ATH10K_FW_FEATURE_PEER_FIXED_RATE] = "peer-fixed-rate",
[ATH10K_FW_FEATURE_IRAM_RECOVERY] = "iram-recovery",
};
static unsigned int ath10k_core_get_fw_feature_str(char *buf,
@ -2604,6 +2605,78 @@ static int ath10k_core_compat_services(struct ath10k *ar)
return 0;
}
#define TGT_IRAM_READ_PER_ITR (8 * 1024)
static int ath10k_core_copy_target_iram(struct ath10k *ar)
{
const struct ath10k_hw_mem_layout *hw_mem;
const struct ath10k_mem_region *tmp, *mem_region = NULL;
dma_addr_t paddr;
void *vaddr = NULL;
u8 num_read_itr;
int i, ret;
u32 len, remaining_len;
hw_mem = ath10k_coredump_get_mem_layout(ar);
if (!hw_mem)
return -ENOMEM;
for (i = 0; i < hw_mem->region_table.size; i++) {
tmp = &hw_mem->region_table.regions[i];
if (tmp->type == ATH10K_MEM_REGION_TYPE_REG) {
mem_region = tmp;
break;
}
}
if (!mem_region)
return -ENOMEM;
for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
if (ar->wmi.mem_chunks[i].req_id ==
WMI_IRAM_RECOVERY_HOST_MEM_REQ_ID) {
vaddr = ar->wmi.mem_chunks[i].vaddr;
len = ar->wmi.mem_chunks[i].len;
break;
}
}
if (!vaddr || !len) {
ath10k_warn(ar, "No allocated memory for IRAM back up");
return -ENOMEM;
}
len = (len < mem_region->len) ? len : mem_region->len;
paddr = mem_region->start;
num_read_itr = len / TGT_IRAM_READ_PER_ITR;
remaining_len = len % TGT_IRAM_READ_PER_ITR;
for (i = 0; i < num_read_itr; i++) {
ret = ath10k_hif_diag_read(ar, paddr, vaddr,
TGT_IRAM_READ_PER_ITR);
if (ret) {
ath10k_warn(ar, "failed to copy firmware IRAM contents: %d",
ret);
return ret;
}
paddr += TGT_IRAM_READ_PER_ITR;
vaddr += TGT_IRAM_READ_PER_ITR;
}
if (remaining_len) {
ret = ath10k_hif_diag_read(ar, paddr, vaddr, remaining_len);
if (ret) {
ath10k_warn(ar, "failed to copy firmware IRAM contents: %d",
ret);
return ret;
}
}
ath10k_dbg(ar, ATH10K_DBG_BOOT, "target IRAM back up completed\n");
return 0;
}
int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
const struct ath10k_fw_components *fw)
{
@ -2636,7 +2709,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
if (status)
goto err;
/* Some of of qca988x solutions are having global reset issue
/* Some of qca988x solutions are having global reset issue
* during target initialization. Bypassing PLL setting before
* downloading firmware and letting the SoC run on REF_CLK is
* fixing the problem. Corresponding firmware change is also
@ -2765,6 +2838,16 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n",
ar->hw->wiphy->fw_version);
if (test_bit(ATH10K_FW_FEATURE_IRAM_RECOVERY,
ar->running_fw->fw_file.fw_features)) {
status = ath10k_core_copy_target_iram(ar);
if (status) {
ath10k_warn(ar, "failed to copy target iram contents: %d",
status);
goto err_hif_stop;
}
}
if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map) &&
mode == ATH10K_FIRMWARE_MODE_NORMAL) {
val = 0;

View File

@ -84,6 +84,11 @@
#define ATH10K_MAX_RETRY_COUNT 30
#define ATH10K_ITER_NORMAL_FLAGS (IEEE80211_IFACE_ITER_NORMAL | \
IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER)
#define ATH10K_ITER_RESUME_FLAGS (IEEE80211_IFACE_ITER_RESUME_ALL |\
IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER)
struct ath10k;
static inline const char *ath10k_bus_str(enum ath10k_bus bus)
@ -829,6 +834,9 @@ enum ath10k_fw_features {
/* Firmware allows setting peer fixed rate */
ATH10K_FW_FEATURE_PEER_FIXED_RATE = 21,
/* Firmware support IRAM recovery */
ATH10K_FW_FEATURE_IRAM_RECOVERY = 22,
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};

View File

@ -1764,7 +1764,7 @@ static ssize_t ath10k_write_simulate_radar(struct file *file,
struct ath10k *ar = file->private_data;
struct ath10k_vif *arvif;
/* Just check for for the first vif alone, as all the vifs will be
/* Just check for the first vif alone, as all the vifs will be
* sharing the same channel and if the channel is disabled, all the
* vifs will share the same 'is_started' state.
*/

View File

@ -3878,7 +3878,6 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
return ath10k_htt_rx_proc_rx_frag_ind(htt,
&resp->rx_frag_ind,
skb);
break;
}
case HTT_T2H_MSG_TYPE_TEST:
break;

View File

@ -2066,7 +2066,7 @@ static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac,
void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb)
{
ieee80211_iterate_active_interfaces_atomic(ar->hw,
IEEE80211_IFACE_ITER_NORMAL,
ATH10K_ITER_NORMAL_FLAGS,
ath10k_mac_handle_beacon_iter,
skb);
}
@ -2099,7 +2099,7 @@ static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id)
{
ieee80211_iterate_active_interfaces_atomic(ar->hw,
IEEE80211_IFACE_ITER_NORMAL,
ATH10K_ITER_NORMAL_FLAGS,
ath10k_mac_handle_beacon_miss_iter,
&vdev_id);
}
@ -3433,7 +3433,7 @@ void ath10k_mac_tx_unlock(struct ath10k *ar, int reason)
return;
ieee80211_iterate_active_interfaces_atomic(ar->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
ATH10K_ITER_RESUME_FLAGS,
ath10k_mac_tx_unlock_iter,
ar);
@ -3522,7 +3522,7 @@ void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
spin_lock_bh(&ar->htt.tx_lock);
ieee80211_iterate_active_interfaces_atomic(ar->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
ATH10K_ITER_RESUME_FLAGS,
ath10k_mac_handle_tx_pause_iter,
&arg);
spin_unlock_bh(&ar->htt.tx_lock);
@ -5457,7 +5457,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
/* Some firmware revisions don't wait for beacon tx completion before
* sending another SWBA event. This could lead to hardware using old
* (freed) beacon data in some cases, e.g. tx credit starvation
* combined with missed TBTT. This is very very rare.
* combined with missed TBTT. This is very rare.
*
* On non-IOMMU-enabled hosts this could be a possible security issue
* because hw could beacon some random data on the air. On
@ -8696,7 +8696,7 @@ ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) {
ieee80211_iterate_active_interfaces_atomic(
hw,
IEEE80211_IFACE_ITER_NORMAL,
ATH10K_ITER_NORMAL_FLAGS,
ath10k_mac_change_chanctx_cnt_iter,
&arg);
if (arg.n_vifs == 0)
@ -8709,7 +8709,7 @@ ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
ieee80211_iterate_active_interfaces_atomic(
hw,
IEEE80211_IFACE_ITER_NORMAL,
ATH10K_ITER_NORMAL_FLAGS,
ath10k_mac_change_chanctx_fill_iter,
&arg);
ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
@ -9169,10 +9169,11 @@ static int ath10k_mac_op_set_tid_config(struct ieee80211_hw *hw,
goto exit;
}
ret = 0;
if (sta)
goto exit;
ret = 0;
arvif->tids_rst = 0;
data.curr_vif = vif;
data.ar = ar;
@ -9593,14 +9594,12 @@ static void ath10k_get_arvif_iter(void *data, u8 *mac,
struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
{
struct ath10k_vif_iter arvif_iter;
u32 flags;
memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter));
arvif_iter.vdev_id = vdev_id;
flags = IEEE80211_IFACE_ITER_RESUME_ALL;
ieee80211_iterate_active_interfaces_atomic(ar->hw,
flags,
ATH10K_ITER_RESUME_FLAGS,
ath10k_get_arvif_iter,
&arvif_iter);
if (!arvif_iter.arvif) {

View File

@ -139,7 +139,7 @@ void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id,
};
ieee80211_iterate_active_interfaces_atomic(ar->hw,
IEEE80211_IFACE_ITER_NORMAL,
ATH10K_ITER_NORMAL_FLAGS,
ath10k_p2p_noa_update_vdev_iter,
&arg);
}

View File

@ -3236,7 +3236,7 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
if (ret == 0)
return 0;
/* fall-through */
/* MHI failed, try legacy irq next */
}
/* Try legacy irq

View File

@ -917,7 +917,7 @@ static void ath10k_qmi_msa_ready_ind(struct qmi_handle *qmi_hdl,
ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_MSA_READY_IND, NULL);
}
static struct qmi_msg_handler qmi_msg_handler[] = {
static const struct qmi_msg_handler qmi_msg_handler[] = {
{
.type = QMI_INDICATION,
.msg_id = QMI_WLFW_FW_READY_IND_V01,
@ -981,7 +981,7 @@ static void ath10k_qmi_del_server(struct qmi_handle *qmi_hdl,
NULL);
}
static struct qmi_ops ath10k_qmi_ops = {
static const struct qmi_ops ath10k_qmi_ops = {
.new_server = ath10k_qmi_new_server,
.del_server = ath10k_qmi_del_server,
};

View File

@ -68,7 +68,7 @@ struct rx_attention {
* first_msdu is set.
*
* peer_idx_invalid
* Indicates no matching entries within the the max search
* Indicates no matching entries within the max search
* count. Only set when first_msdu is set.
*
* peer_idx_timeout

View File

@ -1248,7 +1248,7 @@ static int ath10k_sdio_bmi_exchange_msg(struct ath10k *ar,
* Wait for first 4 bytes to be in FIFO
* If CONSERVATIVE_BMI_READ is enabled, also wait for
* a BMI command credit, which indicates that the ENTIRE
* response is available in the the FIFO
* response is available in the FIFO
*
* CASE 3: length > 128
* Wait for the first 4 bytes to be in FIFO
@ -1962,9 +1962,15 @@ static void ath10k_sdio_hif_stop(struct ath10k *ar)
{
struct ath10k_sdio_bus_request *req, *tmp_req;
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
struct sk_buff *skb;
ath10k_sdio_irq_disable(ar);
cancel_work_sync(&ar_sdio->async_work_rx);
while ((skb = skb_dequeue(&ar_sdio->rx_head)))
dev_kfree_skb_any(skb);
cancel_work_sync(&ar_sdio->wr_async_work);
spin_lock_bh(&ar_sdio->wr_async_lock);
@ -2307,8 +2313,8 @@ static int ath10k_sdio_dump_memory_section(struct ath10k *ar,
}
count = 0;
for (i = 0; cur_section; i++) {
i = 0;
for (; cur_section; cur_section = next_section) {
section_size = cur_section->end - cur_section->start;
if (section_size <= 0) {
@ -2318,7 +2324,7 @@ static int ath10k_sdio_dump_memory_section(struct ath10k *ar,
break;
}
if ((i + 1) == mem_region->section_table.size) {
if (++i == mem_region->section_table.size) {
/* last section */
next_section = NULL;
skip_size = 0;
@ -2361,12 +2367,6 @@ static int ath10k_sdio_dump_memory_section(struct ath10k *ar,
}
count += skip_size;
if (!next_section)
/* this was the last section */
break;
cur_section = next_section;
}
return count;

View File

@ -997,6 +997,8 @@ static int ath10k_usb_probe(struct usb_interface *interface,
ar_usb = ath10k_usb_priv(ar);
ret = ath10k_usb_create(ar, interface);
if (ret)
goto err;
ar_usb->ar = ar;
ar->dev_id = product_id;
@ -1009,7 +1011,7 @@ static int ath10k_usb_probe(struct usb_interface *interface,
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
ath10k_warn(ar, "failed to register driver core: %d\n", ret);
goto err;
goto err_usb_destroy;
}
/* TODO: remove this once USB support is fully implemented */
@ -1017,6 +1019,9 @@ static int ath10k_usb_probe(struct usb_interface *interface,
return 0;
err_usb_destroy:
ath10k_usb_destroy(ar);
err:
ath10k_core_destroy(ar);

View File

@ -1401,13 +1401,15 @@ static int ath10k_wmi_tlv_svc_avail_parse(struct ath10k *ar, u16 tag, u16 len,
switch (tag) {
case WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT:
arg->service_map_ext_valid = true;
arg->service_map_ext_len = *(__le32 *)ptr;
arg->service_map_ext = ptr + sizeof(__le32);
return 0;
default:
break;
}
return -EPROTO;
return 0;
}
static int ath10k_wmi_tlv_op_pull_svc_avail(struct ath10k *ar,

View File

@ -1894,7 +1894,7 @@ static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
{
ieee80211_iterate_active_interfaces_atomic(ar->hw,
IEEE80211_IFACE_ITER_NORMAL,
ATH10K_ITER_NORMAL_FLAGS,
ath10k_wmi_tx_beacons_iter,
NULL);
}
@ -5751,8 +5751,13 @@ void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb)
ret);
}
ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map,
__le32_to_cpu(arg.service_map_ext_len));
/*
* Initialization of "arg.service_map_ext_valid" to ZERO is necessary
* for the below logic to work.
*/
if (arg.service_map_ext_valid)
ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map,
__le32_to_cpu(arg.service_map_ext_len));
}
static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)

View File

@ -3060,6 +3060,8 @@ struct host_memory_chunk {
__le32 size;
} __packed;
#define WMI_IRAM_RECOVERY_HOST_MEM_REQ_ID 8
struct wmi_host_mem_chunks {
__le32 count;
/* some fw revisions require at least 1 chunk regardless of count */
@ -3832,7 +3834,7 @@ enum wmi_pdev_param {
WMI_PDEV_PARAM_BEACON_TX_MODE,
/*
* Resource manager off chan mode .
* 0: turn off off chan mode. 1: turn on offchan mode
* 0: turn off offchan mode. 1: turn on offchan mode
*/
WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
/*
@ -3936,7 +3938,7 @@ enum wmi_10x_pdev_param {
WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
/*
* Resource manager off chan mode .
* 0: turn off off chan mode. 1: turn on offchan mode
* 0: turn off offchan mode. 1: turn on offchan mode
*/
WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
/*
@ -6917,6 +6919,7 @@ struct wmi_svc_rdy_ev_arg {
};
struct wmi_svc_avail_ev_arg {
bool service_map_ext_valid;
__le32 service_map_ext_len;
const __le32 *service_map_ext;
};

View File

@ -340,6 +340,31 @@ static void ath11k_ahb_power_down(struct ath11k_base *ab)
rproc_shutdown(ab_ahb->tgt_rproc);
}
static int ath11k_ahb_fwreset_from_cold_boot(struct ath11k_base *ab)
{
int timeout;
if (ath11k_cold_boot_cal == 0 || ab->qmi.cal_done ||
ab->hw_params.cold_boot_calib == 0)
return 0;
ath11k_dbg(ab, ATH11K_DBG_AHB, "wait for cold boot done\n");
timeout = wait_event_timeout(ab->qmi.cold_boot_waitq,
(ab->qmi.cal_done == 1),
ATH11K_COLD_BOOT_FW_RESET_DELAY);
if (timeout <= 0) {
ath11k_cold_boot_cal = 0;
ath11k_warn(ab, "Coldboot Calibration failed timed out\n");
}
/* reset the firmware */
ath11k_ahb_power_down(ab);
ath11k_ahb_power_up(ab);
ath11k_dbg(ab, ATH11K_DBG_AHB, "exited from cold boot mode\n");
return 0;
}
static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab)
{
struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
@ -700,6 +725,8 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
goto err_ce_free;
}
ath11k_ahb_fwreset_from_cold_boot(ab);
return 0;
err_ce_free:

View File

@ -7,6 +7,7 @@
#include <linux/slab.h>
#include <linux/remoteproc.h>
#include <linux/firmware.h>
#include <linux/of.h>
#include "core.h"
#include "dp_tx.h"
#include "dp_rx.h"
@ -65,6 +66,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_monitor = true,
.supports_shadow_regs = false,
.idle_ps = false,
.cold_boot_calib = true,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@ -102,6 +104,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_monitor = true,
.supports_shadow_regs = false,
.idle_ps = false,
.cold_boot_calib = true,
},
{
.name = "qca6390 hw2.0",
@ -138,17 +141,48 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_monitor = false,
.supports_shadow_regs = true,
.idle_ps = true,
.cold_boot_calib = false,
},
};
int ath11k_core_check_dt(struct ath11k_base *ab)
{
size_t max_len = sizeof(ab->qmi.target.bdf_ext);
const char *variant = NULL;
struct device_node *node;
node = ab->dev->of_node;
if (!node)
return -ENOENT;
of_property_read_string(node, "qcom,ath11k-calibration-variant",
&variant);
if (!variant)
return -ENODATA;
if (strscpy(ab->qmi.target.bdf_ext, variant, max_len) < 0)
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"bdf variant string is longer than the buffer can accommodate (variant: %s)\n",
variant);
return 0;
}
static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
size_t name_len)
{
/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
if (ab->qmi.target.bdf_ext[0] != '\0')
scnprintf(variant, sizeof(variant), ",variant=%s",
ab->qmi.target.bdf_ext);
scnprintf(name, name_len,
"bus=%s,qmi-chip-id=%d,qmi-board-id=%d",
"bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
ath11k_bus_str(ab->hif.bus),
ab->qmi.target.chip_id,
ab->qmi.target.board_id);
ab->qmi.target.board_id, variant);
ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot using board name '%s'\n", name);
@ -774,8 +808,10 @@ static void ath11k_core_restart(struct work_struct *work)
complete(&ar->scan.started);
complete(&ar->scan.completed);
complete(&ar->peer_assoc_done);
complete(&ar->peer_delete_done);
complete(&ar->install_key_done);
complete(&ar->vdev_setup_done);
complete(&ar->vdev_delete_done);
complete(&ar->bss_survey_done);
complete(&ar->thermal.wmi_sync);
@ -923,6 +959,7 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
INIT_LIST_HEAD(&ab->peers);
init_waitqueue_head(&ab->peer_mapping_wq);
init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
init_waitqueue_head(&ab->qmi.cold_boot_waitq);
INIT_WORK(&ab->restart_work, ath11k_core_restart);
timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
ab->dev = dev;

View File

@ -75,12 +75,14 @@ static inline enum wme_ac ath11k_tid_to_ac(u32 tid)
enum ath11k_skb_flags {
ATH11K_SKB_HW_80211_ENCAP = BIT(0),
ATH11K_SKB_CIPHER_SET = BIT(1),
};
struct ath11k_skb_cb {
dma_addr_t paddr;
u8 eid;
u8 flags;
u32 cipher;
struct ath11k *ar;
struct ieee80211_vif *vif;
} __packed;
@ -111,8 +113,13 @@ enum ath11k_firmware_mode {
/* factory tests etc */
ATH11K_FIRMWARE_MODE_FTM,
/* Cold boot calibration */
ATH11K_FIRMWARE_MODE_COLD_BOOT = 7,
};
extern bool ath11k_cold_boot_cal;
#define ATH11K_IRQ_NUM_MAX 52
#define ATH11K_EXT_IRQ_NUM_MAX 16
@ -425,11 +432,7 @@ struct ath11k_per_peer_tx_stats {
};
#define ATH11K_FLUSH_TIMEOUT (5 * HZ)
struct ath11k_vdev_stop_status {
bool stop_in_progress;
u32 vdev_id;
};
#define ATH11K_VDEV_DELETE_TIMEOUT_HZ (5 * HZ)
struct ath11k {
struct ath11k_base *ab;
@ -500,13 +503,14 @@ struct ath11k {
u8 lmac_id;
struct completion peer_assoc_done;
struct completion peer_delete_done;
int install_key_status;
struct completion install_key_done;
int last_wmi_vdev_start_status;
struct ath11k_vdev_stop_status vdev_stop_status;
struct completion vdev_setup_done;
struct completion vdev_delete_done;
int num_peers;
int max_num_peers;
@ -723,9 +727,6 @@ struct ath11k_base {
} stats;
u32 pktlog_defs_checksum;
/* Round robbin based TCL ring selector */
atomic_t tcl_ring_selector;
struct ath11k_dbring_cap *db_caps;
u32 num_db_cap;
@ -883,6 +884,7 @@ void ath11k_core_free(struct ath11k_base *ath11k);
int ath11k_core_fetch_bdf(struct ath11k_base *ath11k,
struct ath11k_board_data *bd);
void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd);
int ath11k_core_check_dt(struct ath11k_base *ath11k);
void ath11k_core_halt(struct ath11k *ar);
@ -907,6 +909,8 @@ static inline const char *ath11k_scan_state_str(enum ath11k_scan_state state)
static inline struct ath11k_skb_cb *ATH11K_SKB_CB(struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(struct ath11k_skb_cb) >
IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
return (struct ath11k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
}

View File

@ -381,7 +381,7 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
HAL_WBM2SW_RELEASE, i, 0,
DP_TX_COMP_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up tcl_comp ring ring (%d) :%d\n",
ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
i, ret);
goto err;
}

View File

@ -423,7 +423,7 @@ enum htt_srng_ring_id {
* Used only by Consumer ring to generate ring_sw_int_p.
* Ring entries low threshold water mark, that is used
* in combination with the interrupt timer as well as
* the the clearing of the level interrupt.
* the clearing of the level interrupt.
* b'16:18 - prefetch_timer_cfg:
* Used only by Consumer ring to set timer mode to
* support Application prefetch handling.

View File

@ -377,7 +377,7 @@ static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
spin_lock_bh(&rx_ring->idr_lock);
idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
idr_remove(&rx_ring->bufs_idr, buf_id);
/* TODO: Understand where internal driver does this dma_unmap of
/* TODO: Understand where internal driver does this dma_unmap
* of rxdma_buffer.
*/
dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
@ -399,7 +399,7 @@ static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
spin_lock_bh(&rx_ring->idr_lock);
idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
idr_remove(&rx_ring->bufs_idr, buf_id);
/* XXX: Understand where internal driver does this dma_unmap of
/* XXX: Understand where internal driver does this dma_unmap
* of rxdma_buffer.
*/
dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
@ -960,7 +960,7 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
rx_tid->ba_win_sz = ba_win_sz;
/* TODO: Optimize the memory allocation for qos tid based on the
/* TODO: Optimize the memory allocation for qos tid based on
* the actual BA window size in REO tid update path.
*/
if (tid == HAL_DESC_REO_NON_QOS_TID)
@ -2715,7 +2715,7 @@ static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
paddr = dma_map_single(ab->dev, skb->data,
skb->len + skb_tailroom(skb),
DMA_BIDIRECTIONAL);
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(ab->dev, paddr)))
goto fail_free_skb;
@ -2731,7 +2731,7 @@ static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
fail_dma_unmap:
dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
DMA_BIDIRECTIONAL);
DMA_FROM_DEVICE);
fail_free_skb:
dev_kfree_skb_any(skb);
fail_alloc_skb:
@ -2795,7 +2795,7 @@ fail_desc_get:
idr_remove(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
DMA_BIDIRECTIONAL);
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
ath11k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
@ -2856,13 +2856,9 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
rxcb = ATH11K_SKB_RXCB(skb);
dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
dma_unmap_single(ab->dev, rxcb->paddr,
skb->len + skb_tailroom(skb),
DMA_BIDIRECTIONAL);
DMA_FROM_DEVICE);
tlv = (struct hal_tlv_hdr *)skb->data;
if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=

View File

@ -84,7 +84,6 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
struct ath11k_dp *dp = &ab->dp;
struct hal_tx_info ti = {0};
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_key_conf *key = info->control.hw_key;
struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
struct hal_srng *tcl_ring;
struct ieee80211_hdr *hdr = (void *)skb->data;
@ -105,14 +104,14 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
/* Let the default ring selection be based on a round robin
* fashion where one of the 3 tcl rings are selected based on
* the tcl_ring_selector counter. In case that ring
/* Let the default ring selection be based on current processor
* number, where one of the 3 tcl rings are selected based on
* the smp_processor_id(). In case that ring
* is full/busy, we resort to other available rings.
* If all rings are full, we drop the packet.
* //TODO Add throttling logic when all rings are full
*/
ring_selector = atomic_inc_return(&ab->tcl_ring_selector);
ring_selector = smp_processor_id();
tcl_ring_sel:
tcl_ring_retry = false;
@ -149,9 +148,9 @@ tcl_ring_sel:
ti.meta_data_flags = arvif->tcl_metadata;
if (ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW) {
if (key) {
if (skb_cb->flags & ATH11K_SKB_CIPHER_SET) {
ti.encrypt_type =
ath11k_dp_tx_get_encrypt_type(key->cipher);
ath11k_dp_tx_get_encrypt_type(skb_cb->cipher);
if (ieee80211_has_protected(hdr->frame_control))
skb_put(skb, IEEE80211_CCMP_MIC_LEN);

View File

@ -716,7 +716,7 @@ struct hal_reo_dest_ring {
*
* rx_msdu_info
* General information related to the MSDU that is passed
* on from RXDMA all the way to to the REO destination ring.
* on from RXDMA all the way to the REO destination ring.
*
* queue_addr_lo
* Address (lower 32 bits) of the REO queue descriptor.
@ -1425,7 +1425,7 @@ struct hal_ce_srng_dest_desc {
#define HAL_CE_DST_STATUS_DESC_FLAGS_GATHER BIT(11)
#define HAL_CE_DST_STATUS_DESC_FLAGS_LEN GENMASK(31, 16)
#define HAL_CE_DST_STATUS_DESC_META_INFO_DATA GENMASK(7, 0)
#define HAL_CE_DST_STATUS_DESC_META_INFO_DATA GENMASK(15, 0)
#define HAL_CE_DST_STATUS_DESC_META_INFO_RING_ID GENMASK(27, 20)
#define HAL_CE_DST_STATUS_DESC_META_INFO_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT
@ -1946,7 +1946,7 @@ enum hal_rx_reo_queue_pn_size {
#define HAL_RX_REO_QUEUE_INFO3_TIMEOUT_COUNT GENMASK(9, 4)
#define HAL_RX_REO_QUEUE_INFO3_FWD_DUE_TO_BAR_CNT GENMASK(15, 10)
#define HAL_RX_REO_QUEUE_INFO3_DUPLICATE_COUNT GENMASK(31, 10)
#define HAL_RX_REO_QUEUE_INFO3_DUPLICATE_COUNT GENMASK(31, 16)
#define HAL_RX_REO_QUEUE_INFO4_FRAME_IN_ORD_COUNT GENMASK(23, 0)
#define HAL_RX_REO_QUEUE_INFO4_BAR_RECVD_COUNT GENMASK(31, 24)
@ -2432,7 +2432,7 @@ struct hal_reo_flush_timeout_list_status {
#define HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0 GENMASK(23, 0)
#define HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1 GENMASK(23, 0)
#define HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2 GENMASK(23, 0)
#define HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM GENMASK(23, 0)
#define HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM GENMASK(25, 0)
struct hal_reo_desc_thresh_reached_status {
struct hal_reo_status_hdr hdr;

View File

@ -127,7 +127,7 @@ static void ath11k_init_wmi_config_ipq8074(struct ath11k_base *ab,
config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
config->rx_batchmode = TARGET_RX_BATCHMODE;
config->peer_map_unmap_v2_support = 1;
config->twt_ap_pdev_count = 2;
config->twt_ap_pdev_count = ab->num_radios;
config->twt_ap_sta_count = 1000;
}
@ -157,7 +157,7 @@ static int ath11k_hw_mac_id_to_srng_id_qca6390(struct ath11k_hw_params *hw,
const struct ath11k_hw_ops ipq8074_ops = {
.get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
.wmi_init_config = ath11k_init_wmi_config_qca6390,
.wmi_init_config = ath11k_init_wmi_config_ipq8074,
.mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074,
.mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074,
};

View File

@ -161,6 +161,7 @@ struct ath11k_hw_params {
bool supports_monitor;
bool supports_shadow_regs;
bool idle_ps;
bool cold_boot_calib;
};
struct ath11k_hw_ops {

View File

@ -537,31 +537,6 @@ struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id)
return NULL;
}
struct ath11k *ath11k_mac_get_ar_vdev_stop_status(struct ath11k_base *ab,
u32 vdev_id)
{
int i;
struct ath11k_pdev *pdev;
struct ath11k *ar;
for (i = 0; i < ab->num_radios; i++) {
pdev = rcu_dereference(ab->pdevs_active[i]);
if (pdev && pdev->ar) {
ar = pdev->ar;
spin_lock_bh(&ar->data_lock);
if (ar->vdev_stop_status.stop_in_progress &&
ar->vdev_stop_status.vdev_id == vdev_id) {
ar->vdev_stop_status.stop_in_progress = false;
spin_unlock_bh(&ar->data_lock);
return ar;
}
spin_unlock_bh(&ar->data_lock);
}
}
return NULL;
}
static void ath11k_pdev_caps_update(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
@ -1850,6 +1825,52 @@ static void ath11k_recalculate_mgmt_rate(struct ath11k *ar,
ath11k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret);
}
static int ath11k_mac_fils_discovery(struct ath11k_vif *arvif,
struct ieee80211_bss_conf *info)
{
struct ath11k *ar = arvif->ar;
struct sk_buff *tmpl;
int ret;
u32 interval;
bool unsol_bcast_probe_resp_enabled = false;
if (info->fils_discovery.max_interval) {
interval = info->fils_discovery.max_interval;
tmpl = ieee80211_get_fils_discovery_tmpl(ar->hw, arvif->vif);
if (tmpl)
ret = ath11k_wmi_fils_discovery_tmpl(ar, arvif->vdev_id,
tmpl);
} else if (info->unsol_bcast_probe_resp_interval) {
unsol_bcast_probe_resp_enabled = 1;
interval = info->unsol_bcast_probe_resp_interval;
tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(ar->hw,
arvif->vif);
if (tmpl)
ret = ath11k_wmi_probe_resp_tmpl(ar, arvif->vdev_id,
tmpl);
} else { /* Disable */
return ath11k_wmi_fils_discovery(ar, arvif->vdev_id, 0, false);
}
if (!tmpl) {
ath11k_warn(ar->ab,
"mac vdev %i failed to retrieve %s template\n",
arvif->vdev_id, (unsol_bcast_probe_resp_enabled ?
"unsolicited broadcast probe response" :
"FILS discovery"));
return -EPERM;
}
kfree_skb(tmpl);
if (!ret)
ret = ath11k_wmi_fils_discovery(ar, arvif->vdev_id, interval,
unsol_bcast_probe_resp_enabled);
return ret;
}
static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@ -2111,6 +2132,10 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
}
}
if (changed & BSS_CHANGED_FILS_DISCOVERY ||
changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP)
ath11k_mac_fils_discovery(arvif, info);
mutex_unlock(&ar->conf_mutex);
}
@ -3729,11 +3754,6 @@ static int ath11k_mac_copy_he_cap(struct ath11k *ar,
he_cap_elem->mac_cap_info[1] &=
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK;
he_cap_elem->phy_cap_info[4] &=
~IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK;
he_cap_elem->phy_cap_info[4] &=
~IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK;
he_cap_elem->phy_cap_info[4] |= (ar->num_tx_chains - 1) << 2;
he_cap_elem->phy_cap_info[5] &=
~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK;
@ -3977,21 +3997,20 @@ static void ath11k_mgmt_over_wmi_tx_purge(struct ath11k *ar)
static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
{
struct ath11k *ar = container_of(work, struct ath11k, wmi_mgmt_tx_work);
struct ieee80211_tx_info *info;
struct ath11k_skb_cb *skb_cb;
struct ath11k_vif *arvif;
struct sk_buff *skb;
int ret;
while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) {
info = IEEE80211_SKB_CB(skb);
if (!info->control.vif) {
ath11k_warn(ar->ab, "no vif found for mgmt frame, flags 0x%x\n",
info->control.flags);
skb_cb = ATH11K_SKB_CB(skb);
if (!skb_cb->vif) {
ath11k_warn(ar->ab, "no vif found for mgmt frame\n");
ieee80211_free_txskb(ar->hw, skb);
continue;
}
arvif = ath11k_vif_to_arvif(info->control.vif);
arvif = ath11k_vif_to_arvif(skb_cb->vif);
if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) &&
arvif->is_started) {
ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb);
@ -4004,8 +4023,8 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
}
} else {
ath11k_warn(ar->ab,
"dropping mgmt frame for vdev %d, flags 0x%x is_started %d\n",
arvif->vdev_id, info->control.flags,
"dropping mgmt frame for vdev %d, is_started %d\n",
arvif->vdev_id,
arvif->is_started);
ieee80211_free_txskb(ar->hw, skb);
}
@ -4053,10 +4072,20 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif = info->control.vif;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_key_conf *key = info->control.hw_key;
u32 info_flags = info->flags;
bool is_prb_rsp;
int ret;
if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
memset(skb_cb, 0, sizeof(*skb_cb));
skb_cb->vif = vif;
if (key) {
skb_cb->cipher = key->cipher;
skb_cb->flags |= ATH11K_SKB_CIPHER_SET;
}
if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
skb_cb->flags |= ATH11K_SKB_HW_80211_ENCAP;
} else if (ieee80211_is_mgmt(hdr->frame_control)) {
is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
@ -4094,7 +4123,8 @@ static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable)
if (enable) {
tlv_filter = ath11k_mac_mon_status_filter_default;
tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar);
if (ath11k_debugfs_rx_filter(ar))
tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar);
}
for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
@ -4578,8 +4608,22 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
err_peer_del:
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
reinit_completion(&ar->peer_delete_done);
ret = ath11k_wmi_send_peer_delete_cmd(ar, vif->addr,
arvif->vdev_id);
if (ret) {
ath11k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
arvif->vdev_id, vif->addr);
return ret;
}
ret = ath11k_wait_for_peer_delete_done(ar, arvif->vdev_id,
vif->addr);
if (ret)
return ret;
ar->num_peers--;
ath11k_wmi_send_peer_delete_cmd(ar, vif->addr, arvif->vdev_id);
}
err_vdev_del:
@ -4614,6 +4658,7 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
struct ath11k *ar = hw->priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k_base *ab = ar->ab;
unsigned long time_left;
int ret;
int i;
@ -4622,10 +4667,6 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
ath11k_dbg(ab, ATH11K_DBG_MAC, "mac remove interface (vdev %d)\n",
arvif->vdev_id);
spin_lock_bh(&ar->data_lock);
list_del(&arvif->list);
spin_unlock_bh(&ar->data_lock);
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath11k_peer_delete(ar, arvif->vdev_id, vif->addr);
if (ret)
@ -4633,16 +4674,33 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
}
reinit_completion(&ar->vdev_delete_done);
ret = ath11k_wmi_vdev_delete(ar, arvif->vdev_id);
if (ret)
if (ret) {
ath11k_warn(ab, "failed to delete WMI vdev %d: %d\n",
arvif->vdev_id, ret);
goto err_vdev_del;
}
time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
ATH11K_VDEV_DELETE_TIMEOUT_HZ);
if (time_left == 0) {
ath11k_warn(ab, "Timeout in receiving vdev delete response\n");
goto err_vdev_del;
}
ab->free_vdev_map |= 1LL << (arvif->vdev_id);
ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
ar->num_created_vdevs--;
ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n",
vif->addr, arvif->vdev_id);
ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
ab->free_vdev_map |= 1LL << (arvif->vdev_id);
err_vdev_del:
spin_lock_bh(&ar->data_lock);
list_del(&arvif->list);
spin_unlock_bh(&ar->data_lock);
ath11k_peer_cleanup(ar, arvif->vdev_id);
@ -4946,13 +5004,6 @@ static int ath11k_mac_vdev_stop(struct ath11k_vif *arvif)
reinit_completion(&ar->vdev_setup_done);
spin_lock_bh(&ar->data_lock);
ar->vdev_stop_status.stop_in_progress = true;
ar->vdev_stop_status.vdev_id = arvif->vdev_id;
spin_unlock_bh(&ar->data_lock);
ret = ath11k_wmi_vdev_stop(ar, arvif->vdev_id);
if (ret) {
ath11k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n",
@ -4981,10 +5032,6 @@ static int ath11k_mac_vdev_stop(struct ath11k_vif *arvif)
return 0;
err:
spin_lock_bh(&ar->data_lock);
ar->vdev_stop_status.stop_in_progress = false;
spin_unlock_bh(&ar->data_lock);
return ret;
}
@ -5225,20 +5272,26 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
arvif->vdev_type != WMI_VDEV_TYPE_AP &&
arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
memcpy(&arvif->chanctx, ctx, sizeof(*ctx));
mutex_unlock(&ar->conf_mutex);
return 0;
ret = 0;
goto out;
}
if (WARN_ON(arvif->is_started)) {
mutex_unlock(&ar->conf_mutex);
return -EBUSY;
ret = -EBUSY;
goto out;
}
if (ab->hw_params.vdev_start_delay) {
param.vdev_id = arvif->vdev_id;
param.peer_type = WMI_PEER_TYPE_DEFAULT;
param.peer_addr = ar->mac_addr;
ret = ath11k_peer_create(ar, arvif, NULL, &param);
if (ret) {
ath11k_warn(ab, "failed to create peer after vdev start delay: %d",
ret);
goto out;
}
}
ret = ath11k_mac_vdev_start(arvif, &ctx->def);
@ -5246,23 +5299,21 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
arvif->vdev_id, vif->addr,
ctx->def.chan->center_freq, ret);
goto err;
goto out;
}
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath11k_monitor_vdev_up(ar, arvif->vdev_id);
if (ret)
goto err;
goto out;
}
arvif->is_started = true;
/* TODO: Setup ps and cts/rts protection */
mutex_unlock(&ar->conf_mutex);
ret = 0;
return 0;
err:
out:
mutex_unlock(&ar->conf_mutex);
return ret;
@ -6258,6 +6309,13 @@ static int __ath11k_mac_register(struct ath11k *ar)
ar->hw->wiphy->num_iftype_ext_capab =
ARRAY_SIZE(ath11k_iftypes_ext_capa);
if (ar->supports_6ghz) {
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_FILS_DISCOVERY);
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP);
}
ath11k_reg_init(ar);
if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) {
@ -6397,7 +6455,9 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
INIT_LIST_HEAD(&ar->ppdu_stats_info);
mutex_init(&ar->conf_mutex);
init_completion(&ar->vdev_setup_done);
init_completion(&ar->vdev_delete_done);
init_completion(&ar->peer_assoc_done);
init_completion(&ar->peer_delete_done);
init_completion(&ar->install_key_done);
init_completion(&ar->bss_survey_done);
init_completion(&ar->scan.started);

View File

@ -137,8 +137,6 @@ struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab,
u32 vdev_id);
struct ath11k *ath11k_mac_get_ar_by_vdev_id(struct ath11k_base *ab, u32 vdev_id);
struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id);
struct ath11k *ath11k_mac_get_ar_vdev_stop_status(struct ath11k_base *ab,
u32 vdev_id);
void ath11k_mac_drain_tx(struct ath11k *ar);
void ath11k_mac_peer_cleanup_all(struct ath11k *ar);

View File

@ -24,7 +24,6 @@ static struct mhi_channel_config ath11k_mhi_channels[] = {
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
.auto_start = false,
},
{
.num = 1,
@ -39,7 +38,6 @@ static struct mhi_channel_config ath11k_mhi_channels[] = {
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
.auto_start = false,
},
{
.num = 20,
@ -54,7 +52,6 @@ static struct mhi_channel_config ath11k_mhi_channels[] = {
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
.auto_start = true,
},
{
.num = 21,
@ -69,7 +66,6 @@ static struct mhi_channel_config ath11k_mhi_channels[] = {
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = true,
.auto_start = true,
},
};

View File

@ -380,9 +380,9 @@ static void ath11k_pci_sync_ce_irqs(struct ath11k_base *ab)
}
}
static void ath11k_pci_ce_tasklet(unsigned long data)
static void ath11k_pci_ce_tasklet(struct tasklet_struct *t)
{
struct ath11k_ce_pipe *ce_pipe = (struct ath11k_ce_pipe *)data;
struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
@ -581,8 +581,7 @@ static int ath11k_pci_config_irq(struct ath11k_base *ab)
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
tasklet_init(&ce_pipe->intr_tq, ath11k_pci_ce_tasklet,
(unsigned long)ce_pipe);
tasklet_setup(&ce_pipe->intr_tq, ath11k_pci_ce_tasklet);
ret = request_irq(irq, ath11k_pci_ce_interrupt_handler,
IRQF_SHARED, irq_name[irq_idx],

View File

@ -177,12 +177,36 @@ static int ath11k_wait_for_peer_deleted(struct ath11k *ar, int vdev_id, const u8
return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, false);
}
int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
const u8 *addr)
{
int ret;
unsigned long time_left;
ret = ath11k_wait_for_peer_deleted(ar, vdev_id, addr);
if (ret) {
ath11k_warn(ar->ab, "failed wait for peer deleted");
return ret;
}
time_left = wait_for_completion_timeout(&ar->peer_delete_done,
3 * HZ);
if (time_left == 0) {
ath11k_warn(ar->ab, "Timeout in receiving peer delete response\n");
return -ETIMEDOUT;
}
return 0;
}
int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
reinit_completion(&ar->peer_delete_done);
ret = ath11k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
if (ret) {
ath11k_warn(ar->ab,
@ -191,7 +215,7 @@ int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr)
return ret;
}
ret = ath11k_wait_for_peer_deleted(ar, vdev_id, addr);
ret = ath11k_wait_for_peer_delete_done(ar, vdev_id, addr);
if (ret)
return ret;
@ -247,8 +271,22 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
spin_unlock_bh(&ar->ab->base_lock);
ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
param->peer_addr, param->vdev_id);
ath11k_wmi_send_peer_delete_cmd(ar, param->peer_addr,
param->vdev_id);
reinit_completion(&ar->peer_delete_done);
ret = ath11k_wmi_send_peer_delete_cmd(ar, param->peer_addr,
param->vdev_id);
if (ret) {
ath11k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
param->vdev_id, param->peer_addr);
return ret;
}
ret = ath11k_wait_for_peer_delete_done(ar, param->vdev_id,
param->peer_addr);
if (ret)
return ret;
return -ENOENT;
}

View File

@ -41,5 +41,7 @@ void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id);
int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr);
int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
struct ieee80211_sta *sta, struct peer_create_params *param);
int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
const u8 *addr);
#endif /* _PEER_H_ */

View File

@ -14,6 +14,12 @@
#define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02
#define HOST_CSTATE_BIT 0x04
bool ath11k_cold_boot_cal = 1;
EXPORT_SYMBOL(ath11k_cold_boot_cal);
module_param_named(cold_boot_cal, ath11k_cold_boot_cal, bool, 0644);
MODULE_PARM_DESC(cold_boot_cal,
"Decrease the channel switch time but increase the driver load time (Default: true)");
static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
@ -1585,15 +1591,17 @@ static int ath11k_qmi_fw_ind_register_send(struct ath11k_base *ab)
struct qmi_wlanfw_ind_register_resp_msg_v01 *resp;
struct qmi_handle *handle = &ab->qmi.handle;
struct qmi_txn txn;
int ret = 0;
int ret;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
resp = kzalloc(sizeof(*resp), GFP_KERNEL);
if (!resp)
if (!resp) {
ret = -ENOMEM;
goto resp_out;
}
req->client_id_valid = 1;
req->client_id = QMI_WLANFW_CLIENT_ID;
@ -1769,9 +1777,16 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
ath11k_warn(ab, "qmi mem size is low to load caldata\n");
return -EINVAL;
}
/* TODO ath11k does not support cold boot calibration */
ab->qmi.target_mem[idx].paddr = 0;
ab->qmi.target_mem[idx].vaddr = NULL;
if (ath11k_cold_boot_cal && ab->hw_params.cold_boot_calib) {
ab->qmi.target_mem[idx].paddr =
ATH11K_QMI_CALDB_ADDRESS;
ab->qmi.target_mem[idx].vaddr =
(void *)ATH11K_QMI_CALDB_ADDRESS;
} else {
ab->qmi.target_mem[idx].paddr = 0;
ab->qmi.target_mem[idx].vaddr = NULL;
}
ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
idx++;
@ -1793,6 +1808,7 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
struct qmi_wlanfw_cap_resp_msg_v01 resp;
struct qmi_txn txn = {};
int ret = 0;
int r;
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
@ -1858,6 +1874,10 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
ab->qmi.target.fw_build_timestamp,
ab->qmi.target.fw_build_id);
r = ath11k_core_check_dt(ab);
if (r)
ath11k_dbg(ab, ATH11K_DBG_QMI, "DT bdf variant name not set.\n");
out:
return ret;
}
@ -2352,6 +2372,32 @@ int ath11k_qmi_firmware_start(struct ath11k_base *ab,
return 0;
}
static int ath11k_qmi_process_coldboot_calibration(struct ath11k_base *ab)
{
int timeout;
int ret;
ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_COLD_BOOT);
if (ret < 0) {
ath11k_warn(ab, "qmi failed to send wlan fw mode:%d\n", ret);
return ret;
}
ath11k_dbg(ab, ATH11K_DBG_QMI, "Coldboot calibration wait started\n");
timeout = wait_event_timeout(ab->qmi.cold_boot_waitq,
(ab->qmi.cal_done == 1),
ATH11K_COLD_BOOT_FW_RESET_DELAY);
if (timeout <= 0) {
ath11k_warn(ab, "Coldboot Calibration failed - wait ended\n");
return 0;
}
ath11k_dbg(ab, ATH11K_DBG_QMI, "Coldboot calibration done\n");
return 0;
}
static int
ath11k_qmi_driver_event_post(struct ath11k_qmi *qmi,
enum ath11k_qmi_event_type type,
@ -2501,11 +2547,18 @@ static void ath11k_qmi_msg_fw_ready_cb(struct qmi_handle *qmi_hdl,
ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_READY, NULL);
}
static void ath11k_qmi_msg_cold_boot_cal_done_cb(struct qmi_handle *qmi,
static void ath11k_qmi_msg_cold_boot_cal_done_cb(struct qmi_handle *qmi_hdl,
struct sockaddr_qrtr *sq,
struct qmi_txn *txn,
const void *decoded)
{
struct ath11k_qmi *qmi = container_of(qmi_hdl,
struct ath11k_qmi, handle);
struct ath11k_base *ab = qmi->ab;
ab->qmi.cal_done = 1;
wake_up(&ab->qmi.cold_boot_waitq);
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi cold boot calibration done\n");
}
static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = {
@ -2618,9 +2671,16 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work)
break;
}
ath11k_core_qmi_firmware_ready(ab);
ab->qmi.cal_done = 1;
set_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags);
if (ath11k_cold_boot_cal && ab->qmi.cal_done == 0 &&
ab->hw_params.cold_boot_calib) {
ath11k_qmi_process_coldboot_calibration(ab);
} else {
clear_bit(ATH11K_FLAG_CRASH_FLUSH,
&ab->dev_flags);
clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
ath11k_core_qmi_firmware_ready(ab);
set_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags);
}
break;
case ATH11K_QMI_EVENT_COLD_BOOT_CAL_DONE:

View File

@ -12,6 +12,7 @@
#define ATH11K_HOST_VERSION_STRING "WIN"
#define ATH11K_QMI_WLANFW_TIMEOUT_MS 5000
#define ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE 64
#define ATH11K_QMI_CALDB_ADDRESS 0x4BA00000
#define ATH11K_QMI_BDF_MAX_SIZE (256 * 1024)
#define ATH11K_QMI_CALDATA_OFFSET (128 * 1024)
#define ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 128
@ -24,6 +25,7 @@
#define ATH11K_QMI_RESP_LEN_MAX 8192
#define ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 32
#define ATH11K_QMI_CALDB_SIZE 0x480000
#define ATH11K_QMI_BDF_EXT_STR_LENGTH 0x20
#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
@ -33,6 +35,7 @@
#define QMI_WLANFW_MAX_DATA_SIZE_V01 6144
#define ATH11K_FIRMWARE_MODE_OFF 4
#define ATH11K_QMI_TARGET_MEM_MODE_DEFAULT 0
#define ATH11K_COLD_BOOT_FW_RESET_DELAY (40 * HZ)
struct ath11k_base;
@ -101,6 +104,7 @@ struct target_info {
u32 fw_version;
char fw_build_timestamp[ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1];
char fw_build_id[ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
char bdf_ext[ATH11K_QMI_BDF_EXT_STR_LENGTH];
};
struct m3_mem_region {
@ -125,6 +129,7 @@ struct ath11k_qmi {
struct target_info target;
struct m3_mem_region m3_mem;
unsigned int service_ins_id;
wait_queue_head_t cold_boot_waitq;
};
#define QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN 189

View File

@ -80,6 +80,7 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
*/
init_country_param.flags = ALPHA_IS_SET;
memcpy(&init_country_param.cc_info.alpha2, request->alpha2, 2);
init_country_param.cc_info.alpha2[2] = 0;
ret = ath11k_wmi_send_init_country_cmd(ar, init_country_param);
if (ret)
@ -277,6 +278,7 @@ ath11k_map_fw_dfs_region(enum ath11k_dfs_region dfs_region)
case ATH11K_DFS_REG_KR:
return NL80211_DFS_ETSI;
case ATH11K_DFS_REG_MKK:
case ATH11K_DFS_REG_MKK_N:
return NL80211_DFS_JP;
default:
return NL80211_DFS_UNSET;
@ -584,7 +586,6 @@ ath11k_reg_build_regd(struct ath11k_base *ab,
if (!tmp_regd)
goto ret;
tmp_regd->n_reg_rules = num_rules;
memcpy(tmp_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
memcpy(alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
alpha2[2] = '\0';
@ -597,7 +598,7 @@ ath11k_reg_build_regd(struct ath11k_base *ab,
/* Update reg_rules[] below. Firmware is expected to
* send these rules in order(2G rules first and then 5G)
*/
for (; i < tmp_regd->n_reg_rules; i++) {
for (; i < num_rules; i++) {
if (reg_info->num_2g_reg_rules &&
(i < reg_info->num_2g_reg_rules)) {
reg_rule = reg_info->reg_rules_2g_ptr + i;
@ -652,6 +653,8 @@ ath11k_reg_build_regd(struct ath11k_base *ab,
flags);
}
tmp_regd->n_reg_rules = i;
if (intersect) {
default_regd = ab->default_regd[reg_info->phy_id];

View File

@ -20,6 +20,7 @@ enum ath11k_dfs_region {
ATH11K_DFS_REG_MKK,
ATH11K_DFS_REG_CN,
ATH11K_DFS_REG_KR,
ATH11K_DFS_REG_MKK_N,
ATH11K_DFS_REG_UNDEF,
};

View File

@ -170,7 +170,7 @@ struct rx_attention {
*
* ast_index_not_found
* Only valid when first_msdu is set. Indicates no AST matching
* entries within the the max search count.
* entries within the max search count.
*
* ast_index_timeout
* Only valid when first_msdu is set. Indicates an unsuccessful

View File

@ -51,7 +51,7 @@ bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id, struct sk_buff *skb)
ret = nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD, ATH11K_TM_CMD_WMI);
if (ret) {
ath11k_warn(ar->ab,
"failed to to put testmode wmi event cmd attribute: %d\n",
"failed to put testmode wmi event cmd attribute: %d\n",
ret);
kfree_skb(nl_skb);
goto out;
@ -60,7 +60,7 @@ bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id, struct sk_buff *skb)
ret = nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id);
if (ret) {
ath11k_warn(ar->ab,
"failed to to put testmode wmi even cmd_id: %d\n",
"failed to put testmode wmi even cmd_id: %d\n",
ret);
kfree_skb(nl_skb);
goto out;

View File

@ -122,6 +122,12 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
= { .min_len = sizeof(struct wmi_stats_event) },
[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]
= { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
[WMI_TAG_HOST_SWFDA_EVENT] = {
.min_len = sizeof(struct wmi_fils_discovery_event) },
[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
};
#define PRIMAP(_hw_mode_) \
@ -362,7 +368,7 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
* For example, for 4x4 capable macphys, first 4 chains can be used for first
* mac and the remaing 4 chains can be used for the second mac or vice-versa.
* In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
* will be advertised for second mac or vice-versa. Compute the shift value for
* will be advertised for second mac or vice-versa. Compute the shift value
* for tx/rx chainmask which will be used to advertise supported ht/vht rates to
* mac80211.
*/
@ -1946,6 +1952,11 @@ void ath11k_wmi_start_scan_init(struct ath11k *ar,
WMI_SCAN_EVENT_DEQUEUED;
arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
arg->num_bssid = 1;
/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
* ZEROs in probe request
*/
eth_broadcast_addr(arg->bssid_list[0].addr);
}
static inline void
@ -3064,6 +3075,137 @@ int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k *ar, u32 vdev_id,
return ret;
}
int ath11k_wmi_fils_discovery_tmpl(struct ath11k *ar, u32 vdev_id,
struct sk_buff *tmpl)
{
struct wmi_tlv *tlv;
struct sk_buff *skb;
void *ptr;
int ret, len;
size_t aligned_len;
struct wmi_fils_discovery_tmpl_cmd *cmd;
aligned_len = roundup(tmpl->len, 4);
len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"WMI vdev %i set FILS discovery template\n", vdev_id);
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_FILS_DISCOVERY_TMPL_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->buf_len = tmpl->len;
ptr = skb->data + sizeof(*cmd);
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
FIELD_PREP(WMI_TLV_LEN, aligned_len);
memcpy(tlv->value, tmpl->data, tmpl->len);
ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"WMI vdev %i failed to send FILS discovery template command\n",
vdev_id);
dev_kfree_skb(skb);
}
return ret;
}
int ath11k_wmi_probe_resp_tmpl(struct ath11k *ar, u32 vdev_id,
struct sk_buff *tmpl)
{
struct wmi_probe_tmpl_cmd *cmd;
struct wmi_bcn_prb_info *probe_info;
struct wmi_tlv *tlv;
struct sk_buff *skb;
void *ptr;
int ret, len;
size_t aligned_len = roundup(tmpl->len, 4);
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"WMI vdev %i set probe response template\n", vdev_id);
len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len;
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_probe_tmpl_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PRB_TMPL_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->buf_len = tmpl->len;
ptr = skb->data + sizeof(*cmd);
probe_info = ptr;
len = sizeof(*probe_info);
probe_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_BCN_PRB_INFO) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
probe_info->caps = 0;
probe_info->erp = 0;
ptr += sizeof(*probe_info);
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
FIELD_PREP(WMI_TLV_LEN, aligned_len);
memcpy(tlv->value, tmpl->data, tmpl->len);
ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"WMI vdev %i failed to send probe response template command\n",
vdev_id);
dev_kfree_skb(skb);
}
return ret;
}
int ath11k_wmi_fils_discovery(struct ath11k *ar, u32 vdev_id, u32 interval,
bool unsol_bcast_probe_resp_enabled)
{
struct sk_buff *skb;
int ret, len;
struct wmi_fils_discovery_cmd *cmd;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"WMI vdev %i set %s interval to %u TU\n",
vdev_id, unsol_bcast_probe_resp_enabled ?
"unsolicited broadcast probe response" : "FILS discovery",
interval);
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_fils_discovery_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ENABLE_FILS_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->interval = interval;
cmd->config = unsol_bcast_probe_resp_enabled;
ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"WMI vdev %i failed to send FILS discovery enable/disable command\n",
vdev_id);
dev_kfree_skb(skb);
}
return ret;
}
static void
ath11k_fill_band_to_mac_param(struct ath11k_base *soc,
struct wmi_host_pdev_band_to_mac *band_to_mac)
@ -3351,9 +3493,6 @@ int ath11k_wmi_cmd_init(struct ath11k_base *ab)
init_param.hw_mode_id = wmi_sc->preferred_hw_mode;
init_param.mem_chunks = wmi_sc->mem_chunks;
if (wmi_sc->preferred_hw_mode == WMI_HOST_HW_MODE_SINGLE)
init_param.hw_mode_id = WMI_HOST_HW_MODE_MAX;
if (ab->hw_params.needs_band_to_mac) {
init_param.num_band_to_mac = ab->num_radios;
ath11k_fill_band_to_mac_param(ab, init_param.band_to_mac);
@ -4242,6 +4381,34 @@ static int ath11k_pull_peer_del_resp_ev(struct ath11k_base *ab, struct sk_buff *
return 0;
}
static int ath11k_pull_vdev_del_resp_ev(struct ath11k_base *ab,
struct sk_buff *skb,
u32 *vdev_id)
{
const void **tb;
const struct wmi_vdev_delete_resp_event *ev;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT];
if (!ev) {
ath11k_warn(ab, "failed to fetch vdev delete resp ev");
kfree(tb);
return -EPROTO;
}
*vdev_id = ev->vdev_id;
kfree(tb);
return 0;
}
static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab, void *evt_buf,
u32 len, u32 *vdev_id,
u32 *tx_status)
@ -5563,15 +5730,54 @@ static int ath11k_ready_event(struct ath11k_base *ab, struct sk_buff *skb)
static void ath11k_peer_delete_resp_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_peer_delete_resp_event peer_del_resp;
struct ath11k *ar;
if (ath11k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) {
ath11k_warn(ab, "failed to extract peer delete resp");
return;
}
/* TODO: Do we need to validate whether ath11k_peer_find() return NULL
* Why this is needed when there is HTT event for peer delete
*/
rcu_read_lock();
ar = ath11k_mac_get_ar_by_vdev_id(ab, peer_del_resp.vdev_id);
if (!ar) {
ath11k_warn(ab, "invalid vdev id in peer delete resp ev %d",
peer_del_resp.vdev_id);
rcu_read_unlock();
return;
}
complete(&ar->peer_delete_done);
rcu_read_unlock();
ath11k_dbg(ab, ATH11K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n",
peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr);
}
static void ath11k_vdev_delete_resp_event(struct ath11k_base *ab,
struct sk_buff *skb)
{
struct ath11k *ar;
u32 vdev_id = 0;
if (ath11k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) {
ath11k_warn(ab, "failed to extract vdev delete resp");
return;
}
rcu_read_lock();
ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id);
if (!ar) {
ath11k_warn(ab, "invalid vdev id in vdev delete resp ev %d",
vdev_id);
rcu_read_unlock();
return;
}
complete(&ar->vdev_delete_done);
rcu_read_unlock();
ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev delete resp for vdev id %d\n",
vdev_id);
}
static inline const char *ath11k_wmi_vdev_resp_print(u32 vdev_resp_status)
@ -5650,7 +5856,7 @@ static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *sk
}
rcu_read_lock();
ar = ath11k_mac_get_ar_vdev_stop_status(ab, vdev_id);
ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id);
if (!ar) {
ath11k_warn(ab, "invalid vdev id in vdev stopped ev %d",
vdev_id);
@ -6429,6 +6635,68 @@ ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
ath11k_thermal_event_temperature(ar, ev.temp);
}
static void ath11k_fils_discovery_event(struct ath11k_base *ab,
struct sk_buff *skb)
{
const void **tb;
const struct wmi_fils_discovery_event *ev;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab,
"failed to parse FILS discovery event tlv %d\n",
ret);
return;
}
ev = tb[WMI_TAG_HOST_SWFDA_EVENT];
if (!ev) {
ath11k_warn(ab, "failed to fetch FILS discovery event\n");
kfree(tb);
return;
}
ath11k_warn(ab,
"FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n",
ev->vdev_id, ev->fils_tt, ev->tbtt);
kfree(tb);
}
static void ath11k_probe_resp_tx_status_event(struct ath11k_base *ab,
struct sk_buff *skb)
{
const void **tb;
const struct wmi_probe_resp_tx_status_event *ev;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab,
"failed to parse probe response transmission status event tlv: %d\n",
ret);
return;
}
ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT];
if (!ev) {
ath11k_warn(ab,
"failed to fetch probe response transmission status event");
kfree(tb);
return;
}
if (ev->tx_status)
ath11k_warn(ab,
"Probe response transmission failed for vdev_id %u, status %u\n",
ev->vdev_id, ev->tx_status);
kfree(tb);
}
static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@ -6515,9 +6783,14 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
ath11k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
break;
case WMI_HOST_FILS_DISCOVERY_EVENTID:
ath11k_fils_discovery_event(ab, skb);
break;
case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
ath11k_probe_resp_tx_status_event(ab, skb);
break;
/* add Unsupported events here */
case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
case WMI_VDEV_DELETE_RESP_EVENTID:
case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
case WMI_TWT_ENABLE_EVENTID:
case WMI_TWT_DISABLE_EVENTID:
@ -6528,6 +6801,9 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
ath11k_wmi_pdev_dfs_radar_detected_event(ab, skb);
break;
case WMI_VDEV_DELETE_RESP_EVENTID:
ath11k_vdev_delete_resp_event(ab, skb);
break;
/* TODO: Add remaining events */
default:
ath11k_dbg(ab, ATH11K_DBG_WMI, "Unknown eventid: 0x%x\n", id);

View File

@ -319,6 +319,7 @@ enum wmi_tlv_cmd_id {
WMI_BCN_OFFLOAD_CTRL_CMDID,
WMI_BSS_COLOR_CHANGE_ENABLE_CMDID,
WMI_VDEV_BCN_OFFLOAD_QUIET_CONFIG_CMDID,
WMI_FILS_DISCOVERY_TMPL_CMDID,
WMI_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_GRP_BA_NEG),
WMI_ADDBA_SEND_CMDID,
WMI_ADDBA_STATUS_CMDID,
@ -351,6 +352,8 @@ enum wmi_tlv_cmd_id {
WMI_ROAM_CONFIGURE_MAWC_CMDID,
WMI_ROAM_SET_MBO_PARAM_CMDID,
WMI_ROAM_PER_CONFIG_CMDID,
WMI_ROAM_BTM_CONFIG_CMDID,
WMI_ENABLE_FILS_CMDID,
WMI_OFL_SCAN_ADD_AP_PROFILE = WMI_TLV_CMD(WMI_GRP_OFL_SCAN),
WMI_OFL_SCAN_REMOVE_AP_PROFILE,
WMI_OFL_SCAN_PERIOD,
@ -642,6 +645,8 @@ enum wmi_tlv_event_id {
WMI_MGMT_TX_COMPLETION_EVENTID,
WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID,
WMI_TBTTOFFSET_EXT_UPDATE_EVENTID,
WMI_OFFCHAN_DATA_TX_COMPLETION_EVENTID,
WMI_HOST_FILS_DISCOVERY_EVENTID,
WMI_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_CMD(WMI_GRP_BA_NEG),
WMI_TX_ADDBA_COMPLETE_EVENTID,
WMI_BA_RSP_SSN_EVENTID,
@ -1810,6 +1815,7 @@ enum wmi_tlv_tag {
/* TODO add all the missing cmds */
WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD = 0x301,
WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO,
WMI_TAG_FILS_DISCOVERY_TMPL_CMD = 0x344,
WMI_TAG_MAX
};
@ -3348,7 +3354,6 @@ struct wmi_mgmt_params {
void *pdata;
u16 desc_id;
u8 *macaddr;
void *qdf_ctx;
};
enum wmi_sta_ps_mode {
@ -4013,6 +4018,10 @@ struct wmi_regulatory_rule_struct {
u32 flag_info;
};
struct wmi_vdev_delete_resp_event {
u32 vdev_id;
} __packed;
struct wmi_peer_delete_resp_event {
u32 vdev_id;
struct wmi_mac_addr peer_macaddr;
@ -4076,6 +4085,17 @@ struct wmi_peer_assoc_conf_arg {
const u8 *macaddr;
};
struct wmi_fils_discovery_event {
u32 vdev_id;
u32 fils_tt;
u32 tbtt;
} __packed;
struct wmi_probe_resp_tx_status_event {
u32 vdev_id;
u32 tx_status;
} __packed;
/*
* PDEV statistics
*/
@ -4908,6 +4928,30 @@ struct wmi_dma_buf_release_meta_data {
u32 ch_width;
} __packed;
enum wmi_fils_discovery_cmd_type {
WMI_FILS_DISCOVERY_CMD,
WMI_UNSOL_BCAST_PROBE_RESP,
};
struct wmi_fils_discovery_cmd {
u32 tlv_header;
u32 vdev_id;
u32 interval;
u32 config; /* enum wmi_fils_discovery_cmd_type */
} __packed;
struct wmi_fils_discovery_tmpl_cmd {
u32 tlv_header;
u32 vdev_id;
u32 buf_len;
} __packed;
struct wmi_probe_tmpl_cmd {
u32 tlv_header;
u32 vdev_id;
u32 buf_len;
} __packed;
struct target_resource_config {
u32 num_vdevs;
u32 num_peers;
@ -5121,4 +5165,10 @@ int ath11k_wmi_vdev_spectral_enable(struct ath11k *ar, u32 vdev_id,
u32 trigger, u32 enable);
int ath11k_wmi_vdev_spectral_conf(struct ath11k *ar,
struct ath11k_wmi_vdev_spectral_conf_param *param);
int ath11k_wmi_fils_discovery_tmpl(struct ath11k *ar, u32 vdev_id,
struct sk_buff *tmpl);
int ath11k_wmi_fils_discovery(struct ath11k *ar, u32 vdev_id, u32 interval,
bool unsol_bcast_probe_resp_enabled);
int ath11k_wmi_probe_resp_tmpl(struct ath11k *ar, u32 vdev_id,
struct sk_buff *tmpl);
#endif

View File

@ -94,7 +94,6 @@ int ath6kl_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
return 0;
break;
case ATH6KL_TM_CMD_RX_REPORT:
default:
return -EOPNOTSUPP;

View File

@ -1821,8 +1821,8 @@ int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb,
/* Only for OPT_TX_CMD, use BE endpoint. */
if (cmd_id == WMI_OPT_TX_FRAME_CMDID) {
ret = ath6kl_wmi_data_hdr_add(wmi, skb, OPT_MSGTYPE,
false, false, 0, NULL, if_idx);
ret = ath6kl_wmi_data_hdr_add(wmi, skb, OPT_MSGTYPE, false,
WMI_DATA_HDR_DATA_TYPE_802_3, 0, NULL, if_idx);
if (ret) {
dev_kfree_skb(skb);
return ret;

View File

@ -102,13 +102,8 @@ static void ar5008_write_bank6(struct ath_hw *ah, unsigned int *writecnt)
REGWRITE_BUFFER_FLUSH(ah);
}
/**
/*
* ar5008_hw_phy_modify_rx_buffer() - perform analog swizzling of parameters
* @rfbuf:
* @reg32:
* @numBits:
* @firstBit:
* @column:
*
* Performs analog "swizzling" of parameters into their location.
* Used on external AR2133/AR5133 radios.
@ -198,10 +193,8 @@ static void ar5008_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
ar5008_write_bank6(ah, &reg_writes);
}
/**
/*
* ar5008_hw_set_channel - tune to a channel on the external AR2133/AR5133 radios
* @ah: atheros hardware structure
* @chan:
*
* For the external AR2133/AR5133 radios, takes the MHz channel value and set
* the channel value. Assumes writes enabled to analog bus and bank6 register
@ -430,10 +423,8 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
}
/**
/*
* ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios
* @ah: atheros hardware structure
* @chan:
*
* For non single-chip solutions. Converts to baseband spur frequency given the
* input channel frequency and compute register settings below.

View File

@ -1724,20 +1724,6 @@ static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2[][2] = {
{0x00004044, 0x00000000},
};
static const u32 ar9300PciePhy_clkreq_enable_L1_2p2[][2] = {
/* Addr allmodes */
{0x00004040, 0x0825365e},
{0x00004040, 0x0008003b},
{0x00004044, 0x00000000},
};
static const u32 ar9300PciePhy_clkreq_disable_L1_2p2[][2] = {
/* Addr allmodes */
{0x00004040, 0x0821365e},
{0x00004040, 0x0008003b},
{0x00004044, 0x00000000},
};
static const u32 ar9300_2p2_baseband_core_txfir_coeff_japan_2484[][2] = {
/* Addr allmodes */
{0x0000a398, 0x00000000},

View File

@ -1010,11 +1010,4 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
{0x0000a1fc, 0x00000296},
};
static const u32 ar9331_common_tx_gain_offset1_1[][1] = {
{0x00000000},
{0x00000003},
{0x00000000},
{0x00000000},
};
#endif /* INITVALS_9330_1P1_H */

View File

@ -621,107 +621,6 @@ static const u32 ar9340Modes_high_ob_db_tx_gain_table_1p0[][5] = {
{0x00016448, 0x8e481666, 0x8e481666, 0x8e481266, 0x8e481266},
};
static const u32 ar9340Modes_ub124_tx_gain_table_1p0[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
{0x00009820, 0x206a022e, 0x206a022e, 0x206a00ae, 0x206a00ae},
{0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
{0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec82d2e, 0x7ec82d2e},
{0x0000a2dc, 0xfef5d402, 0xfef5d402, 0xfdab5b52, 0xfdab5b52},
{0x0000a2e0, 0xfe896600, 0xfe896600, 0xfd339c84, 0xfd339c84},
{0x0000a2e4, 0xff01f800, 0xff01f800, 0xfec3e000, 0xfec3e000},
{0x0000a2e8, 0xfffe0000, 0xfffe0000, 0xfffc0000, 0xfffc0000},
{0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
{0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
{0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
{0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
{0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
{0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
{0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
{0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
{0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
{0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
{0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
{0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
{0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
{0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
{0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
{0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
{0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
{0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
{0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
{0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
{0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
{0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
{0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
{0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
{0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
{0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
{0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
{0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
{0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
{0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
{0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
{0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
{0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
{0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
{0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
{0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
{0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
{0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
{0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
{0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
{0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
{0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
{0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
{0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
{0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
{0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
{0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
{0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
{0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
{0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
{0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
{0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
{0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
{0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
{0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
{0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
{0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
{0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x00016044, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4},
{0x00016048, 0x8e480086, 0x8e480086, 0x8e480086, 0x8e480086},
{0x00016444, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4},
{0x00016448, 0x8e480086, 0x8e480086, 0x8e480086, 0x8e480086},
{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
{0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
{0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
{0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
{0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
{0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
{0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
{0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
{0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
{0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
{0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
{0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
{0x0000b2dc, 0xfef5d402, 0xfef5d402, 0xfdab5b52, 0xfdab5b52},
{0x0000b2e0, 0xfe896600, 0xfe896600, 0xfd339c84, 0xfd339c84},
{0x0000b2e4, 0xff01f800, 0xff01f800, 0xfec3e000, 0xfec3e000},
{0x0000b2e8, 0xfffe0000, 0xfffe0000, 0xfffc0000, 0xfffc0000},
};
static const u32 ar9340Modes_low_ob_db_tx_gain_table_1p0[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},

View File

@ -1006,13 +1006,6 @@ static const u32 ar9485_1_1_soc_preamble[][2] = {
{0x00007048, 0x00000002},
};
static const u32 ar9485_fast_clock_1_1_baseband_postamble[][3] = {
/* Addr 5G_HT20 5G_HT40 */
{0x00009e00, 0x03721821, 0x03721821},
{0x0000a230, 0x0000400b, 0x00004016},
{0x0000a254, 0x00000898, 0x00001130},
};
static const u32 ar9485_1_1_baseband_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},

View File

@ -44,9 +44,8 @@ static u32 ath_dynack_get_max_to(struct ath_hw *ah)
return 600;
}
/**
/*
* ath_dynack_ewma - EWMA (Exponentially Weighted Moving Average) calculation
*
*/
static inline int ath_dynack_ewma(int old, int new)
{
@ -247,8 +246,12 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
ridx = ts->ts_rateindex;
da->st_rbf.ts[da->st_rbf.t_rb].tstamp = ts->ts_tstamp;
ether_addr_copy(da->st_rbf.addr[da->st_rbf.t_rb].h_dest, hdr->addr1);
ether_addr_copy(da->st_rbf.addr[da->st_rbf.t_rb].h_src, hdr->addr2);
/* ether_addr_copy() gives a false warning on gcc-10 so use memcpy()
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=97490
*/
memcpy(da->st_rbf.addr[da->st_rbf.t_rb].h_dest, hdr->addr1, ETH_ALEN);
memcpy(da->st_rbf.addr[da->st_rbf.t_rb].h_src, hdr->addr2, ETH_ALEN);
if (!(info->status.rates[ridx].flags & IEEE80211_TX_RC_MCS)) {
const struct ieee80211_rate *rate;

View File

@ -2308,7 +2308,6 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
ath_dbg(ath9k_hw_common(ah), BEACON,
"%s: unsupported opmode: %d\n", __func__, ah->opmode);
return;
break;
}
REG_WRITE(ah, AR_BEACON_PERIOD, beacon_period);

View File

@ -23,7 +23,7 @@
/**
* struct radar_types - contains array of patterns defined for one DFS domain
* @domain: DFS regulatory domain
* @region: regulatory DFS region
* @num_radar_types: number of radar types to follow
* @radar_types: radar types array
*/
@ -133,8 +133,9 @@ static const struct radar_types *dfs_domains[] = {
/**
* get_dfs_domain_radar_types() - get radar types for a given DFS domain
* @param domain DFS domain
* @return radar_types ptr on success, NULL if DFS domain is not supported
* @region: regulatory DFS region
*
* Return value: radar_types ptr on success, NULL if DFS domain is not supported
*/
static const struct radar_types *
get_dfs_domain_radar_types(enum nl80211_dfs_regions region)
@ -227,9 +228,10 @@ fail:
/**
* channel_detector_get() - get channel detector for given frequency
* @param dpd instance pointer
* @param freq frequency in MHz
* @return pointer to channel detector on success, NULL otherwise
* @dpd: DPD instance pointer
* @freq: freq frequency in MHz
*
* Return value: pointer to channel detector on success, NULL otherwise
*
* Return existing channel detector for the given frequency or return a
* newly create one.

View File

@ -29,18 +29,17 @@ struct ath_dfs_pool_stats global_dfs_pool_stats = {};
(MIN + PRI_TOLERANCE == MAX - PRI_TOLERANCE ? \
MIN + PRI_TOLERANCE : RUNTIME)
/**
/*
* struct pulse_elem - elements in pulse queue
* @ts: time stamp in usecs
*/
struct pulse_elem {
struct list_head head;
u64 ts;
};
/**
/*
* pde_get_multiple() - get number of multiples considering a given tolerance
* @return factor if abs(val - factor*fraction) <= tolerance, 0 otherwise
* Return value: factor if abs(val - factor*fraction) <= tolerance, 0 otherwise
*/
static u32 pde_get_multiple(u32 val, u32 fraction, u32 tolerance)
{
@ -70,7 +69,7 @@ static u32 pde_get_multiple(u32 val, u32 fraction, u32 tolerance)
return factor;
}
/**
/*
* DOC: Singleton Pulse and Sequence Pools
*
* Instances of pri_sequence and pulse_elem are kept in singleton pools to

View File

@ -360,6 +360,7 @@ ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
/**
* ath_reg_apply_ir_flags()
* @wiphy: the wiphy to use
* @reg: regulatory structure - used for country selection
* @initiator: the regulatory hint initiator
*
* If no country IE has been received always enable passive scan

View File

@ -910,6 +910,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
* place where AID is available.
*/
wcn36xx_smd_config_sta(wcn, vif, sta);
wcn36xx_enable_keep_alive_null_packet(wcn, vif);
} else {
wcn36xx_dbg(WCN36XX_DBG_MAC,
"disassociated bss %pM vif %pM AID=%d\n",
@ -1246,6 +1247,7 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
ieee80211_hw_set(wcn->hw, HAS_RATE_CONTROL);
ieee80211_hw_set(wcn->hw, SINGLE_SCAN_ON_ALL_BANDS);
ieee80211_hw_set(wcn->hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(wcn->hw, CONNECTION_MONITOR);
wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |

View File

@ -78,6 +78,7 @@ static struct wcn36xx_cfg_val wcn36xx_cfg_vals[] = {
WCN36XX_CFG_VAL(MAX_ASSOC_LIMIT, 10),
WCN36XX_CFG_VAL(ENABLE_MCC_ADAPTIVE_SCHEDULER, 0),
WCN36XX_CFG_VAL(ENABLE_DYNAMIC_RA_START_RATE, 133), /* MCS 5 */
WCN36XX_CFG_VAL(LINK_FAIL_TX_CNT, 1000),
};
static struct wcn36xx_cfg_val wcn3680_cfg_vals[] = {
@ -162,7 +163,7 @@ static struct wcn36xx_cfg_val wcn3680_cfg_vals[] = {
WCN36XX_CFG_VAL(ENABLE_RTSCTS_HTVHT, 0),
WCN36XX_CFG_VAL(BTC_STATIC_OPP_WLAN_IDLE_WLAN_LEN, 30000),
WCN36XX_CFG_VAL(BTC_STATIC_OPP_WLAN_IDLE_BT_LEN, 120000),
WCN36XX_CFG_VAL(LINK_FAIL_TX_CNT, 200),
WCN36XX_CFG_VAL(LINK_FAIL_TX_CNT, 1000),
WCN36XX_CFG_VAL(TOGGLE_ARP_BDRATES, 0),
WCN36XX_CFG_VAL(OPTIMIZE_CA_EVENT, 0),
WCN36XX_CFG_VAL(EXT_SCAN_CONC_MODE, 0),
@ -2175,6 +2176,7 @@ int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
INIT_HAL_MSG(msg_body, WCN36XX_HAL_EXIT_BMPS_REQ);
msg_body.bss_index = vif_priv->bss_index;
msg_body.send_data_null = 1;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);

View File

@ -262,7 +262,7 @@ struct fw_map *wil_find_fw_mapping(const char *section)
/**
* Check address validity for WMI buffer; remap if needed
* @wil: driver data
* @ptr: internal (linker) fw/ucode address
* @ptr_: internal (linker) fw/ucode address
* @size: if non zero, validate the block does not
* exceed the device memory (bar)
*

View File

@ -367,7 +367,7 @@ static int mmc_submit_one(struct mmc_data *md, struct mmc_request *mr,
* @func: SDIO function
* @write: direction flag
* @addr: dongle memory address as source/destination
* @pkt: skb pointer
* @pktlist: skb buffer head pointer
*
* This function takes the respbonsibility as the interface function to MMC
* stack for block data access. It assumes that the skb passed down by the

View File

@ -2137,7 +2137,8 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
BRCMF_WSEC_MAX_PSK_LEN);
else if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_SAE) {
/* clean up user-space RSNE */
if (brcmf_fil_iovar_data_set(ifp, "wpaie", NULL, 0)) {
err = brcmf_fil_iovar_data_set(ifp, "wpaie", NULL, 0);
if (err) {
bphy_err(drvr, "failed to clean up user-space RSNE\n");
goto done;
}

View File

@ -384,6 +384,7 @@ int brcmf_fweh_activate_events(struct brcmf_if *ifp)
* @drvr: driver information object.
* @event_packet: event packet to process.
* @packet_len: length of the packet
* @gfp: memory allocation flags.
*
* If the packet buffer contains a firmware event message it will
* dispatch the event to a registered handler (using worker).

View File

@ -759,6 +759,7 @@ static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo)
/**
* brcmf_pcie_bus_console_read - reads firmware messages
*
* @devinfo: pointer to the device data structure
* @error: specifies if error has occurred (prints messages unconditionally)
*/
static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo,
@ -1936,16 +1937,18 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
fwreq = brcmf_pcie_prepare_fw_request(devinfo);
if (!fwreq) {
ret = -ENOMEM;
goto fail_bus;
goto fail_brcmf;
}
ret = brcmf_fw_get_firmwares(bus->dev, fwreq, brcmf_pcie_setup);
if (ret < 0) {
kfree(fwreq);
goto fail_bus;
goto fail_brcmf;
}
return 0;
fail_brcmf:
brcmf_free(&devinfo->pdev->dev);
fail_bus:
kfree(bus->msgbuf);
kfree(bus);

View File

@ -625,6 +625,10 @@ BRCMF_FW_DEF(4359, "brcmfmac4359-sdio");
BRCMF_FW_DEF(4373, "brcmfmac4373-sdio");
BRCMF_FW_DEF(43012, "brcmfmac43012-sdio");
/* firmware config files */
MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcm/brcmfmac*-sdio.*.txt");
MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcm/brcmfmac*-pcie.*.txt");
static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143),
BRCMF_FW_ENTRY(BRCM_CC_43241_CHIP_ID, 0x0000001F, 43241B0),
@ -1340,7 +1344,7 @@ static void brcmf_sdio_free_glom(struct brcmf_sdio *bus)
static inline u8 brcmf_sdio_getdatoffset(u8 *swheader)
{
u32 hdrvalue;
hdrvalue = *(u32 *)swheader;
hdrvalue = le32_to_cpu(*(__le32 *)swheader);
return (u8)((hdrvalue & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT);
}
@ -1349,7 +1353,7 @@ static inline bool brcmf_sdio_fromevntchan(u8 *swheader)
u32 hdrvalue;
u8 ret;
hdrvalue = *(u32 *)swheader;
hdrvalue = le32_to_cpu(*(__le32 *)swheader);
ret = (u8)((hdrvalue & SDPCM_CHANNEL_MASK) >> SDPCM_CHANNEL_SHIFT);
return (ret == SDPCM_EVENT_CHANNEL);
@ -3517,6 +3521,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
struct brcmf_sdio *bus = sdiodev->bus;
struct brcmf_core *core = bus->sdio_core;
u32 value;
__le32 iovar;
int err;
/* maxctl provided by common layer */
@ -3537,16 +3542,16 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
*/
if (core->rev < 12) {
/* for sdio core rev < 12, disable txgloming */
value = 0;
err = brcmf_iovar_data_set(dev, "bus:txglom", &value,
sizeof(u32));
iovar = 0;
err = brcmf_iovar_data_set(dev, "bus:txglom", &iovar,
sizeof(iovar));
} else {
/* otherwise, set txglomalign */
value = sdiodev->settings->bus.sdio.sd_sgentry_align;
/* SDIO ADMA requires at least 32 bit alignment */
value = max_t(u32, value, ALIGNMENT);
err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value,
sizeof(u32));
iovar = cpu_to_le32(max_t(u32, value, ALIGNMENT));
err = brcmf_iovar_data_set(dev, "bus:txglomalign", &iovar,
sizeof(iovar));
}
if (err < 0)
@ -3555,9 +3560,9 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
if (sdiodev->sg_support) {
bus->txglom = false;
value = 1;
iovar = cpu_to_le32(1);
err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom",
&value, sizeof(u32));
&iovar, sizeof(iovar));
if (err < 0) {
/* bus:rxglom is allowed to fail */
err = 0;
@ -4541,6 +4546,7 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
brcmf_sdiod_intr_unregister(bus->sdiodev);
brcmf_detach(bus->sdiodev->dev);
brcmf_free(bus->sdiodev->dev);
cancel_work_sync(&bus->datawork);
if (bus->brcmf_wq)

View File

@ -942,14 +942,19 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
index = TX_SEQ_TO_INDEX(seq);
ack_recd = false;
if (ba_recd) {
int block_acked;
bindex = MODSUB_POW2(seq, start_seq, SEQNUM_MAX);
if (bindex < AMPDU_TX_BA_MAX_WSIZE)
block_acked = isset(bitmap, bindex);
else
block_acked = 0;
brcms_dbg_ht(wlc->hw->d11core,
"tid %d seq %d, start_seq %d, bindex %d set %d, index %d\n",
tid, seq, start_seq, bindex,
isset(bitmap, bindex), index);
block_acked, index);
/* if acked then clear bit and free packet */
if ((bindex < AMPDU_TX_BA_MAX_WSIZE)
&& isset(bitmap, bindex)) {
if (block_acked) {
ini->txretry[index] = 0;
/*

View File

@ -1115,7 +1115,8 @@ static int enable_MAC(struct airo_info *ai, int lock);
static void disable_MAC(struct airo_info *ai, int lock);
static void enable_interrupts(struct airo_info*);
static void disable_interrupts(struct airo_info*);
static u16 issuecommand(struct airo_info*, Cmd *pCmd, Resp *pRsp);
static u16 issuecommand(struct airo_info*, Cmd *pCmd, Resp *pRsp,
bool may_sleep);
static int bap_setup(struct airo_info*, u16 rid, u16 offset, int whichbap);
static int aux_bap_read(struct airo_info*, __le16 *pu16Dst, int bytelen,
int whichbap);
@ -1130,8 +1131,10 @@ static int PC4500_writerid(struct airo_info*, u16 rid, const void
static int do_writerid(struct airo_info*, u16 rid, const void *rid_data,
int len, int dummy);
static u16 transmit_allocate(struct airo_info*, int lenPayload, int raw);
static int transmit_802_3_packet(struct airo_info*, int len, char *pPacket);
static int transmit_802_11_packet(struct airo_info*, int len, char *pPacket);
static int transmit_802_3_packet(struct airo_info*, int len, char *pPacket,
bool may_sleep);
static int transmit_802_11_packet(struct airo_info*, int len, char *pPacket,
bool may_sleep);
static int mpi_send_packet(struct net_device *dev);
static void mpi_unmap_card(struct pci_dev *pci);
@ -1144,7 +1147,6 @@ static int airo_thread(void *data);
static void timer_func(struct net_device *dev);
static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev);
static void airo_read_wireless_stats(struct airo_info *local);
#ifdef CISCO_EXT
static int readrids(struct net_device *dev, aironet_ioctl *comp);
static int writerids(struct net_device *dev, aironet_ioctl *comp);
@ -1200,7 +1202,6 @@ struct airo_info {
#define JOB_MIC 5
#define JOB_EVENT 6
#define JOB_AUTOWEP 7
#define JOB_WSTATS 8
#define JOB_SCAN_RESULTS 9
unsigned long jobs;
int (*bap_read)(struct airo_info*, __le16 *pu16Dst, int bytelen,
@ -1755,7 +1756,7 @@ static int readBSSListRid(struct airo_info *ai, int first,
if (down_interruptible(&ai->sem))
return -ERESTARTSYS;
ai->list_bss_task = current;
issuecommand(ai, &cmd, &rsp);
issuecommand(ai, &cmd, &rsp, true);
up(&ai->sem);
/* Let the command take effect */
schedule_timeout_uninterruptible(3 * HZ);
@ -2098,7 +2099,7 @@ static void get_tx_error(struct airo_info *ai, s32 fid)
}
}
static void airo_end_xmit(struct net_device *dev)
static void airo_end_xmit(struct net_device *dev, bool may_sleep)
{
u16 status;
int i;
@ -2109,7 +2110,7 @@ static void airo_end_xmit(struct net_device *dev)
clear_bit(JOB_XMIT, &priv->jobs);
clear_bit(FLAG_PENDING_XMIT, &priv->flags);
status = transmit_802_3_packet (priv, fids[fid], skb->data);
status = transmit_802_3_packet(priv, fids[fid], skb->data, may_sleep);
up(&priv->sem);
i = 0;
@ -2166,11 +2167,11 @@ static netdev_tx_t airo_start_xmit(struct sk_buff *skb,
set_bit(JOB_XMIT, &priv->jobs);
wake_up_interruptible(&priv->thr_wait);
} else
airo_end_xmit(dev);
airo_end_xmit(dev, false);
return NETDEV_TX_OK;
}
static void airo_end_xmit11(struct net_device *dev)
static void airo_end_xmit11(struct net_device *dev, bool may_sleep)
{
u16 status;
int i;
@ -2181,7 +2182,7 @@ static void airo_end_xmit11(struct net_device *dev)
clear_bit(JOB_XMIT11, &priv->jobs);
clear_bit(FLAG_PENDING_XMIT11, &priv->flags);
status = transmit_802_11_packet (priv, fids[fid], skb->data);
status = transmit_802_11_packet(priv, fids[fid], skb->data, may_sleep);
up(&priv->sem);
i = MAX_FIDS / 2;
@ -2245,7 +2246,7 @@ static netdev_tx_t airo_start_xmit11(struct sk_buff *skb,
set_bit(JOB_XMIT11, &priv->jobs);
wake_up_interruptible(&priv->thr_wait);
} else
airo_end_xmit11(dev);
airo_end_xmit11(dev, false);
return NETDEV_TX_OK;
}
@ -2288,18 +2289,14 @@ static struct net_device_stats *airo_get_stats(struct net_device *dev)
struct airo_info *local = dev->ml_priv;
if (!test_bit(JOB_STATS, &local->jobs)) {
/* Get stats out of the card if available */
if (down_trylock(&local->sem) != 0) {
set_bit(JOB_STATS, &local->jobs);
wake_up_interruptible(&local->thr_wait);
} else
airo_read_stats(dev);
set_bit(JOB_STATS, &local->jobs);
wake_up_interruptible(&local->thr_wait);
}
return &dev->stats;
}
static void airo_set_promisc(struct airo_info *ai)
static void airo_set_promisc(struct airo_info *ai, bool may_sleep)
{
Cmd cmd;
Resp rsp;
@ -2308,7 +2305,7 @@ static void airo_set_promisc(struct airo_info *ai)
cmd.cmd = CMD_SETMODE;
clear_bit(JOB_PROMISC, &ai->jobs);
cmd.parm0=(ai->flags&IFF_PROMISC) ? PROMISC : NOPROMISC;
issuecommand(ai, &cmd, &rsp);
issuecommand(ai, &cmd, &rsp, may_sleep);
up(&ai->sem);
}
@ -2322,7 +2319,7 @@ static void airo_set_multicast_list(struct net_device *dev)
set_bit(JOB_PROMISC, &ai->jobs);
wake_up_interruptible(&ai->thr_wait);
} else
airo_set_promisc(ai);
airo_set_promisc(ai, false);
}
if ((dev->flags&IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
@ -2482,7 +2479,7 @@ static int mpi_init_descriptors (struct airo_info *ai)
cmd.parm0 = FID_RX;
cmd.parm1 = (ai->rxfids[0].card_ram_off - ai->pciaux);
cmd.parm2 = MPI_MAX_FIDS;
rc = issuecommand(ai, &cmd, &rsp);
rc = issuecommand(ai, &cmd, &rsp, true);
if (rc != SUCCESS) {
airo_print_err(ai->dev->name, "Couldn't allocate RX FID");
return rc;
@ -2510,7 +2507,7 @@ static int mpi_init_descriptors (struct airo_info *ai)
}
ai->txfids[i-1].tx_desc.eoc = 1; /* Last descriptor has EOC set */
rc = issuecommand(ai, &cmd, &rsp);
rc = issuecommand(ai, &cmd, &rsp, true);
if (rc != SUCCESS) {
airo_print_err(ai->dev->name, "Couldn't allocate TX FID");
return rc;
@ -2524,7 +2521,7 @@ static int mpi_init_descriptors (struct airo_info *ai)
cmd.parm0 = RID_RW;
cmd.parm1 = (ai->config_desc.card_ram_off - ai->pciaux);
cmd.parm2 = 1; /* Magic number... */
rc = issuecommand(ai, &cmd, &rsp);
rc = issuecommand(ai, &cmd, &rsp, true);
if (rc != SUCCESS) {
airo_print_err(ai->dev->name, "Couldn't allocate RID");
return rc;
@ -3150,15 +3147,13 @@ static int airo_thread(void *data)
}
if (test_bit(JOB_XMIT, &ai->jobs))
airo_end_xmit(dev);
airo_end_xmit(dev, true);
else if (test_bit(JOB_XMIT11, &ai->jobs))
airo_end_xmit11(dev);
airo_end_xmit11(dev, true);
else if (test_bit(JOB_STATS, &ai->jobs))
airo_read_stats(dev);
else if (test_bit(JOB_WSTATS, &ai->jobs))
airo_read_wireless_stats(ai);
else if (test_bit(JOB_PROMISC, &ai->jobs))
airo_set_promisc(ai);
airo_set_promisc(ai, true);
else if (test_bit(JOB_MIC, &ai->jobs))
micinit(ai);
else if (test_bit(JOB_EVENT, &ai->jobs))
@ -3281,11 +3276,9 @@ static void airo_handle_link(struct airo_info *ai)
set_bit(FLAG_UPDATE_UNI, &ai->flags);
set_bit(FLAG_UPDATE_MULTI, &ai->flags);
if (down_trylock(&ai->sem) != 0) {
set_bit(JOB_EVENT, &ai->jobs);
wake_up_interruptible(&ai->thr_wait);
} else
airo_send_event(ai->dev);
set_bit(JOB_EVENT, &ai->jobs);
wake_up_interruptible(&ai->thr_wait);
netif_carrier_on(ai->dev);
} else if (!scan_forceloss) {
if (auto_wep && !ai->expires) {
@ -3609,7 +3602,7 @@ static int enable_MAC(struct airo_info *ai, int lock)
if (!test_bit(FLAG_ENABLED, &ai->flags)) {
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = MAC_ENABLE;
rc = issuecommand(ai, &cmd, &rsp);
rc = issuecommand(ai, &cmd, &rsp, true);
if (rc == SUCCESS)
set_bit(FLAG_ENABLED, &ai->flags);
} else
@ -3641,7 +3634,7 @@ static void disable_MAC(struct airo_info *ai, int lock)
netif_carrier_off(ai->dev);
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = MAC_DISABLE; // disable in case already enabled
issuecommand(ai, &cmd, &rsp);
issuecommand(ai, &cmd, &rsp, true);
clear_bit(FLAG_ENABLED, &ai->flags);
}
if (lock == 1)
@ -3844,7 +3837,7 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
cmd.parm0 = cmd.parm1 = cmd.parm2 = 0;
if (lock && down_interruptible(&ai->sem))
return ERROR;
if (issuecommand(ai, &cmd, &rsp) != SUCCESS) {
if (issuecommand(ai, &cmd, &rsp, true) != SUCCESS) {
if (lock)
up(&ai->sem);
return ERROR;
@ -3854,7 +3847,7 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
// Let's figure out if we need to use the AUX port
if (!test_bit(FLAG_MPI,&ai->flags)) {
cmd.cmd = CMD_ENABLEAUX;
if (issuecommand(ai, &cmd, &rsp) != SUCCESS) {
if (issuecommand(ai, &cmd, &rsp, true) != SUCCESS) {
if (lock)
up(&ai->sem);
airo_print_err(ai->dev->name, "Error checking for AUX port");
@ -3966,7 +3959,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
return SUCCESS;
}
static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp)
static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp,
bool may_sleep)
{
// Im really paranoid about letting it run forever!
int max_tries = 600000;
@ -3983,8 +3977,8 @@ static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp)
if ((IN4500(ai, COMMAND)) == pCmd->cmd)
// PC4500 didn't notice command, try again
OUT4500(ai, COMMAND, pCmd->cmd);
if (!in_atomic() && (max_tries & 255) == 0)
schedule();
if (may_sleep && (max_tries & 255) == 0)
cond_resched();
}
if (max_tries == -1) {
@ -4141,7 +4135,7 @@ static int PC4500_accessrid(struct airo_info *ai, u16 rid, u16 accmd)
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = accmd;
cmd.parm0 = rid;
status = issuecommand(ai, &cmd, &rsp);
status = issuecommand(ai, &cmd, &rsp, true);
if (status != 0) return status;
if ((rsp.status & 0x7F00) != 0) {
return (accmd << 8) + (rsp.rsp0 & 0xFF);
@ -4177,7 +4171,7 @@ static int PC4500_readrid(struct airo_info *ai, u16 rid, void *pBuf, int len, in
memcpy_toio(ai->config_desc.card_ram_off,
&ai->config_desc.rid_desc, sizeof(Rid));
rc = issuecommand(ai, &cmd, &rsp);
rc = issuecommand(ai, &cmd, &rsp, true);
if (rsp.status & 0x7f00)
rc = rsp.rsp0;
@ -4256,7 +4250,7 @@ static int PC4500_writerid(struct airo_info *ai, u16 rid,
memcpy(ai->config_desc.virtual_host_addr,
pBuf, len);
rc = issuecommand(ai, &cmd, &rsp);
rc = issuecommand(ai, &cmd, &rsp, true);
if ((rc & 0xff00) != 0) {
airo_print_err(ai->dev->name, "%s: Write rid Error %d",
__func__, rc);
@ -4302,7 +4296,7 @@ static u16 transmit_allocate(struct airo_info *ai, int lenPayload, int raw)
cmd.parm0 = lenPayload;
if (down_interruptible(&ai->sem))
return ERROR;
if (issuecommand(ai, &cmd, &rsp) != SUCCESS) {
if (issuecommand(ai, &cmd, &rsp, true) != SUCCESS) {
txFid = ERROR;
goto done;
}
@ -4348,7 +4342,8 @@ done:
/* In general BAP1 is dedicated to transmiting packets. However,
since we need a BAP when accessing RIDs, we also use BAP1 for that.
Make sure the BAP1 spinlock is held when this is called. */
static int transmit_802_3_packet(struct airo_info *ai, int len, char *pPacket)
static int transmit_802_3_packet(struct airo_info *ai, int len, char *pPacket,
bool may_sleep)
{
__le16 payloadLen;
Cmd cmd;
@ -4386,12 +4381,14 @@ static int transmit_802_3_packet(struct airo_info *ai, int len, char *pPacket)
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = CMD_TRANSMIT;
cmd.parm0 = txFid;
if (issuecommand(ai, &cmd, &rsp) != SUCCESS) return ERROR;
if (issuecommand(ai, &cmd, &rsp, may_sleep) != SUCCESS)
return ERROR;
if ((rsp.status & 0xFF00) != 0) return ERROR;
return SUCCESS;
}
static int transmit_802_11_packet(struct airo_info *ai, int len, char *pPacket)
static int transmit_802_11_packet(struct airo_info *ai, int len, char *pPacket,
bool may_sleep)
{
__le16 fc, payloadLen;
Cmd cmd;
@ -4426,7 +4423,8 @@ static int transmit_802_11_packet(struct airo_info *ai, int len, char *pPacket)
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = CMD_TRANSMIT;
cmd.parm0 = txFid;
if (issuecommand(ai, &cmd, &rsp) != SUCCESS) return ERROR;
if (issuecommand(ai, &cmd, &rsp, may_sleep) != SUCCESS)
return ERROR;
if ((rsp.status & 0xFF00) != 0) return ERROR;
return SUCCESS;
}
@ -5490,7 +5488,7 @@ static int proc_BSSList_open(struct inode *inode, struct file *file)
kfree(file->private_data);
return -ERESTARTSYS;
}
issuecommand(ai, &cmd, &rsp);
issuecommand(ai, &cmd, &rsp, true);
up(&ai->sem);
data->readlen = 0;
return 0;
@ -5627,7 +5625,7 @@ static int __maybe_unused airo_pci_suspend(struct device *dev_d)
netif_device_detach(dev);
ai->power = PMSG_SUSPEND;
cmd.cmd = HOSTSLEEP;
issuecommand(ai, &cmd, &rsp);
issuecommand(ai, &cmd, &rsp, true);
device_wakeup_enable(dev_d);
return 0;
@ -5787,7 +5785,7 @@ static int airo_get_quality (StatusRid *status_rid, CapabilityRid *cap_rid)
}
#define airo_get_max_quality(cap_rid) (memcmp((cap_rid)->prodName, "350", 3) ? 0x20 : 0xa0)
#define airo_get_avg_quality(cap_rid) (memcmp((cap_rid)->prodName, "350", 3) ? 0x10 : 0x50);
#define airo_get_avg_quality(cap_rid) (memcmp((cap_rid)->prodName, "350", 3) ? 0x10 : 0x50)
/*------------------------------------------------------------------*/
/*
@ -5970,7 +5968,7 @@ static int airo_set_wap(struct net_device *dev,
cmd.cmd = CMD_LOSE_SYNC;
if (down_interruptible(&local->sem))
return -ERESTARTSYS;
issuecommand(local, &cmd, &rsp);
issuecommand(local, &cmd, &rsp, true);
up(&local->sem);
} else {
memset(APList_rid, 0, sizeof(*APList_rid));
@ -7268,7 +7266,7 @@ static int airo_set_scan(struct net_device *dev,
ai->scan_timeout = RUN_AT(3*HZ);
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = CMD_LISTBSS;
issuecommand(ai, &cmd, &rsp);
issuecommand(ai, &cmd, &rsp, true);
wake = 1;
out:
@ -7535,7 +7533,7 @@ static int airo_config_commit(struct net_device *dev,
writeConfigRid(local, 0);
enable_MAC(local, 0);
if (test_bit (FLAG_RESET, &local->flags))
airo_set_promisc(local);
airo_set_promisc(local, true);
else
up(&local->sem);
@ -7732,15 +7730,12 @@ static void airo_read_wireless_stats(struct airo_info *local)
__le32 *vals = stats_rid.vals;
/* Get stats out of the card */
clear_bit(JOB_WSTATS, &local->jobs);
if (local->power.event) {
up(&local->sem);
if (local->power.event)
return;
}
readCapabilityRid(local, &cap_rid, 0);
readStatusRid(local, &status_rid, 0);
readStatsRid(local, &stats_rid, RID_STATS, 0);
up(&local->sem);
/* The status */
local->wstats.status = le16_to_cpu(status_rid.mode);
@ -7783,15 +7778,10 @@ static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev)
{
struct airo_info *local = dev->ml_priv;
if (!test_bit(JOB_WSTATS, &local->jobs)) {
/* Get stats out of the card if available */
if (down_trylock(&local->sem) != 0) {
set_bit(JOB_WSTATS, &local->jobs);
wake_up_interruptible(&local->thr_wait);
} else
airo_read_wireless_stats(local);
if (!down_interruptible(&local->sem)) {
airo_read_wireless_stats(local);
up(&local->sem);
}
return &local->wstats;
}

View File

@ -1202,13 +1202,11 @@ static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return iwl_mvm_mac_ctxt_cmd_sta(mvm, vif, action,
force_assoc_off,
bssid_override);
break;
case NL80211_IFTYPE_AP:
if (!vif->p2p)
return iwl_mvm_mac_ctxt_cmd_ap(mvm, vif, action);
else
return iwl_mvm_mac_ctxt_cmd_go(mvm, vif, action);
break;
case NL80211_IFTYPE_MONITOR:
return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action);
case NL80211_IFTYPE_P2P_DEVICE:

View File

@ -3169,22 +3169,15 @@ prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
/* Initialize tasklets for handling hardware IRQ related operations
* outside hw IRQ handler */
#define HOSTAP_TASKLET_INIT(q, f, d) \
do { memset((q), 0, sizeof(*(q))); (q)->func = (void(*)(unsigned long))(f); } \
while (0)
HOSTAP_TASKLET_INIT(&local->bap_tasklet, hostap_bap_tasklet,
(unsigned long) local);
HOSTAP_TASKLET_INIT(&local->info_tasklet, hostap_info_tasklet,
(unsigned long) local);
tasklet_setup(&local->bap_tasklet, hostap_bap_tasklet);
tasklet_setup(&local->info_tasklet, hostap_info_tasklet);
hostap_info_init(local);
HOSTAP_TASKLET_INIT(&local->rx_tasklet,
hostap_rx_tasklet, (unsigned long) local);
tasklet_setup(&local->rx_tasklet, hostap_rx_tasklet);
skb_queue_head_init(&local->rx_list);
HOSTAP_TASKLET_INIT(&local->sta_tx_exc_tasklet,
hostap_sta_tx_exc_tasklet, (unsigned long) local);
tasklet_setup(&local->sta_tx_exc_tasklet,
hostap_sta_tx_exc_tasklet);
skb_queue_head_init(&local->sta_tx_exc_list);
INIT_LIST_HEAD(&local->cmd_queue);

View File

@ -44,19 +44,8 @@ static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev)
if (local->iw_mode != IW_MODE_MASTER &&
local->iw_mode != IW_MODE_REPEAT) {
int update = 1;
#ifdef in_atomic
/* RID reading might sleep and it must not be called in
* interrupt context or while atomic. However, this
* function seems to be called while atomic (at least in Linux
* 2.5.59). Update signal quality values only if in suitable
* context. Otherwise, previous values read from tick timer
* will be used. */
if (in_atomic())
update = 0;
#endif /* in_atomic */
if (update && prism2_update_comms_qual(dev) == 0)
if (prism2_update_comms_qual(dev) == 0)
wstats->qual.updated = IW_QUAL_ALL_UPDATED |
IW_QUAL_DBM;

View File

@ -763,6 +763,7 @@ static const struct hermes_ops hermes_ops_local = {
.init_cmd_wait = hermes_doicmd_wait,
.allocate = hermes_allocate,
.read_ltv = hermes_read_ltv,
.read_ltv_pr = hermes_read_ltv,
.write_ltv = hermes_write_ltv,
.bap_pread = hermes_bap_pread,
.bap_pwrite = hermes_bap_pwrite,

View File

@ -386,6 +386,8 @@ struct hermes_ops {
int (*allocate)(struct hermes *hw, u16 size, u16 *fid);
int (*read_ltv)(struct hermes *hw, int bap, u16 rid, unsigned buflen,
u16 *length, void *buf);
int (*read_ltv_pr)(struct hermes *hw, int bap, u16 rid,
unsigned buflen, u16 *length, void *buf);
int (*write_ltv)(struct hermes *hw, int bap, u16 rid,
u16 length, const void *value);
int (*bap_pread)(struct hermes *hw, int bap, void *buf, int len,
@ -494,6 +496,8 @@ static inline void hermes_clear_words(struct hermes *hw, int off,
#define HERMES_READ_RECORD(hw, bap, rid, buf) \
(hw->ops->read_ltv((hw), (bap), (rid), sizeof(*buf), NULL, (buf)))
#define HERMES_READ_RECORD_PR(hw, bap, rid, buf) \
(hw->ops->read_ltv_pr((hw), (bap), (rid), sizeof(*buf), NULL, (buf)))
#define HERMES_WRITE_RECORD(hw, bap, rid, buf) \
(hw->ops->write_ltv((hw), (bap), (rid), \
HERMES_BYTES_TO_RECLEN(sizeof(*buf)), (buf)))
@ -509,6 +513,17 @@ static inline int hermes_read_wordrec(struct hermes *hw, int bap, u16 rid,
return err;
}
static inline int hermes_read_wordrec_pr(struct hermes *hw, int bap, u16 rid,
u16 *word)
{
__le16 rec;
int err;
err = HERMES_READ_RECORD_PR(hw, bap, rid, &rec);
*word = le16_to_cpu(rec);
return err;
}
static inline int hermes_write_wordrec(struct hermes *hw, int bap, u16 rid,
u16 word)
{

View File

@ -78,7 +78,7 @@ int determine_fw_capabilities(struct orinoco_private *priv,
char tmp[SYMBOL_MAX_VER_LEN + 1] __attribute__((aligned(2)));
/* Get the hardware version */
err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_NICID, &nic_id);
err = HERMES_READ_RECORD_PR(hw, USER_BAP, HERMES_RID_NICID, &nic_id);
if (err) {
dev_err(dev, "Cannot read hardware identity: error %d\n",
err);
@ -101,7 +101,7 @@ int determine_fw_capabilities(struct orinoco_private *priv,
priv->firmware_type = determine_firmware_type(&nic_id);
/* Get the firmware version */
err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_STAID, &sta_id);
err = HERMES_READ_RECORD_PR(hw, USER_BAP, HERMES_RID_STAID, &sta_id);
if (err) {
dev_err(dev, "Cannot read station identity: error %d\n",
err);
@ -177,7 +177,7 @@ int determine_fw_capabilities(struct orinoco_private *priv,
/* 3Com MAC : 00:50:DA:* */
memset(tmp, 0, sizeof(tmp));
/* Get the Symbol firmware version */
err = hw->ops->read_ltv(hw, USER_BAP,
err = hw->ops->read_ltv_pr(hw, USER_BAP,
HERMES_RID_SECONDARYVERSION_SYMBOL,
SYMBOL_MAX_VER_LEN, NULL, &tmp);
if (err) {
@ -286,7 +286,7 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
u16 reclen;
/* Get the MAC address */
err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
err = hw->ops->read_ltv_pr(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
ETH_ALEN, NULL, dev_addr);
if (err) {
dev_warn(dev, "Failed to read MAC address!\n");
@ -296,7 +296,7 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
dev_dbg(dev, "MAC address %pM\n", dev_addr);
/* Get the station name */
err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
err = hw->ops->read_ltv_pr(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
sizeof(nickbuf), &reclen, &nickbuf);
if (err) {
dev_err(dev, "failed to read station name\n");
@ -312,7 +312,7 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
dev_dbg(dev, "Station name \"%s\"\n", priv->nick);
/* Get allowed channels */
err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CHANNELLIST,
err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_CHANNELLIST,
&priv->channel_mask);
if (err) {
dev_err(dev, "Failed to read channel list!\n");
@ -320,13 +320,13 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
}
/* Get initial AP density */
err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFSYSTEMSCALE,
err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_CNFSYSTEMSCALE,
&priv->ap_density);
if (err || priv->ap_density < 1 || priv->ap_density > 3)
priv->has_sensitivity = 0;
/* Get initial RTS threshold */
err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFRTSTHRESHOLD,
err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_CNFRTSTHRESHOLD,
&priv->rts_thresh);
if (err) {
dev_err(dev, "Failed to read RTS threshold!\n");
@ -335,11 +335,11 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
/* Get initial fragmentation settings */
if (priv->has_mwo)
err = hermes_read_wordrec(hw, USER_BAP,
err = hermes_read_wordrec_pr(hw, USER_BAP,
HERMES_RID_CNFMWOROBUST_AGERE,
&priv->mwo_robust);
else
err = hermes_read_wordrec(hw, USER_BAP,
err = hermes_read_wordrec_pr(hw, USER_BAP,
HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
&priv->frag_thresh);
if (err) {
@ -351,7 +351,7 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
if (priv->has_pm) {
priv->pm_on = 0;
priv->pm_mcast = 1;
err = hermes_read_wordrec(hw, USER_BAP,
err = hermes_read_wordrec_pr(hw, USER_BAP,
HERMES_RID_CNFMAXSLEEPDURATION,
&priv->pm_period);
if (err) {
@ -359,7 +359,7 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
"period!\n");
goto out;
}
err = hermes_read_wordrec(hw, USER_BAP,
err = hermes_read_wordrec_pr(hw, USER_BAP,
HERMES_RID_CNFPMHOLDOVERDURATION,
&priv->pm_timeout);
if (err) {
@ -371,7 +371,7 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
/* Preamble setup */
if (priv->has_preamble) {
err = hermes_read_wordrec(hw, USER_BAP,
err = hermes_read_wordrec_pr(hw, USER_BAP,
HERMES_RID_CNFPREAMBLE_SYMBOL,
&priv->preamble);
if (err) {
@ -381,21 +381,21 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
}
/* Retry settings */
err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_SHORTRETRYLIMIT,
err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_SHORTRETRYLIMIT,
&priv->short_retry_limit);
if (err) {
dev_err(dev, "Failed to read short retry limit\n");
goto out;
}
err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_LONGRETRYLIMIT,
err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_LONGRETRYLIMIT,
&priv->long_retry_limit);
if (err) {
dev_err(dev, "Failed to read long retry limit\n");
goto out;
}
err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_MAXTRANSMITLIFETIME,
err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_MAXTRANSMITLIFETIME,
&priv->retry_lifetime);
if (err) {
dev_err(dev, "Failed to read max retry lifetime\n");

View File

@ -665,26 +665,17 @@ static void ezusb_request_in_callback(struct ezusb_priv *upriv,
} /* switch */
}
typedef void (*ezusb_ctx_wait)(struct ezusb_priv *, struct request_context *);
static void ezusb_req_ctx_wait(struct ezusb_priv *upriv,
struct request_context *ctx)
static void ezusb_req_ctx_wait_compl(struct ezusb_priv *upriv,
struct request_context *ctx)
{
switch (ctx->state) {
case EZUSB_CTX_QUEUED:
case EZUSB_CTX_REQ_SUBMITTED:
case EZUSB_CTX_REQ_COMPLETE:
case EZUSB_CTX_RESP_RECEIVED:
if (in_softirq()) {
/* If we get called from a timer, timeout timers don't
* get the chance to run themselves. So we make sure
* that we don't sleep for ever */
int msecs = DEF_TIMEOUT * (1000 / HZ);
while (!try_wait_for_completion(&ctx->done) && msecs--)
udelay(1000);
} else {
wait_for_completion(&ctx->done);
}
wait_for_completion(&ctx->done);
break;
default:
/* Done or failed - nothing to wait for */
@ -692,6 +683,38 @@ static void ezusb_req_ctx_wait(struct ezusb_priv *upriv,
}
}
static void ezusb_req_ctx_wait_poll(struct ezusb_priv *upriv,
struct request_context *ctx)
{
int msecs;
switch (ctx->state) {
case EZUSB_CTX_QUEUED:
case EZUSB_CTX_REQ_SUBMITTED:
case EZUSB_CTX_REQ_COMPLETE:
case EZUSB_CTX_RESP_RECEIVED:
/* If we get called from a timer or with our lock acquired, then
* we can't wait for the completion and have to poll. This won't
* happen if the USB controller completes the URB requests in
* BH.
*/
msecs = DEF_TIMEOUT * (1000 / HZ);
while (!try_wait_for_completion(&ctx->done) && msecs--)
udelay(1000);
break;
default:
/* Done or failed - nothing to wait for */
break;
}
}
static void ezusb_req_ctx_wait_skip(struct ezusb_priv *upriv,
struct request_context *ctx)
{
WARN(1, "Shouldn't be invoked for in_rid\n");
}
static inline u16 build_crc(struct ezusb_packet *data)
{
u16 crc = 0;
@ -853,14 +876,13 @@ static int ezusb_firmware_download(struct ezusb_priv *upriv,
static int ezusb_access_ltv(struct ezusb_priv *upriv,
struct request_context *ctx,
u16 length, const void *data, u16 frame_type,
void *ans_buff, unsigned ans_size, u16 *ans_length)
void *ans_buff, unsigned ans_size, u16 *ans_length,
ezusb_ctx_wait ezusb_ctx_wait_func)
{
int req_size;
int retval = 0;
enum ezusb_state state;
BUG_ON(in_irq());
if (!upriv->udev) {
retval = -ENODEV;
goto exit;
@ -885,7 +907,7 @@ static int ezusb_access_ltv(struct ezusb_priv *upriv,
spin_unlock_bh(&upriv->reply_count_lock);
if (ctx->in_rid)
ezusb_req_ctx_wait(upriv, ctx);
ezusb_ctx_wait_func(upriv, ctx);
state = ctx->state;
switch (state) {
@ -946,8 +968,9 @@ static int ezusb_access_ltv(struct ezusb_priv *upriv,
return retval;
}
static int ezusb_write_ltv(struct hermes *hw, int bap, u16 rid,
u16 length, const void *data)
static int __ezusb_write_ltv(struct hermes *hw, int bap, u16 rid,
u16 length, const void *data,
ezusb_ctx_wait ezusb_ctx_wait_func)
{
struct ezusb_priv *upriv = hw->priv;
u16 frame_type;
@ -973,11 +996,20 @@ static int ezusb_write_ltv(struct hermes *hw, int bap, u16 rid,
frame_type = EZUSB_FRAME_CONTROL;
return ezusb_access_ltv(upriv, ctx, length, data, frame_type,
NULL, 0, NULL);
NULL, 0, NULL, ezusb_ctx_wait_func);
}
static int ezusb_read_ltv(struct hermes *hw, int bap, u16 rid,
unsigned bufsize, u16 *length, void *buf)
static int ezusb_write_ltv(struct hermes *hw, int bap, u16 rid,
u16 length, const void *data)
{
return __ezusb_write_ltv(hw, bap, rid, length, data,
ezusb_req_ctx_wait_poll);
}
static int __ezusb_read_ltv(struct hermes *hw, int bap, u16 rid,
unsigned bufsize, u16 *length, void *buf,
ezusb_ctx_wait ezusb_ctx_wait_func)
{
struct ezusb_priv *upriv = hw->priv;
struct request_context *ctx;
@ -990,34 +1022,33 @@ static int ezusb_read_ltv(struct hermes *hw, int bap, u16 rid,
return -ENOMEM;
return ezusb_access_ltv(upriv, ctx, 0, NULL, EZUSB_FRAME_CONTROL,
buf, bufsize, length);
buf, bufsize, length, ezusb_req_ctx_wait_poll);
}
static int ezusb_read_ltv(struct hermes *hw, int bap, u16 rid,
unsigned bufsize, u16 *length, void *buf)
{
return __ezusb_read_ltv(hw, bap, rid, bufsize, length, buf,
ezusb_req_ctx_wait_poll);
}
static int ezusb_read_ltv_preempt(struct hermes *hw, int bap, u16 rid,
unsigned bufsize, u16 *length, void *buf)
{
return __ezusb_read_ltv(hw, bap, rid, bufsize, length, buf,
ezusb_req_ctx_wait_compl);
}
static int ezusb_doicmd_wait(struct hermes *hw, u16 cmd, u16 parm0, u16 parm1,
u16 parm2, struct hermes_response *resp)
{
struct ezusb_priv *upriv = hw->priv;
struct request_context *ctx;
__le16 data[4] = {
cpu_to_le16(cmd),
cpu_to_le16(parm0),
cpu_to_le16(parm1),
cpu_to_le16(parm2),
};
netdev_dbg(upriv->dev,
"0x%04X, parm0 0x%04X, parm1 0x%04X, parm2 0x%04X\n", cmd,
parm0, parm1, parm2);
ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_DOCMD, EZUSB_RID_ACK);
if (!ctx)
return -ENOMEM;
return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
EZUSB_FRAME_CONTROL, NULL, 0, NULL);
WARN_ON_ONCE(1);
return -EINVAL;
}
static int ezusb_docmd_wait(struct hermes *hw, u16 cmd, u16 parm0,
struct hermes_response *resp)
static int __ezusb_docmd_wait(struct hermes *hw, u16 cmd, u16 parm0,
struct hermes_response *resp,
ezusb_ctx_wait ezusb_ctx_wait_func)
{
struct ezusb_priv *upriv = hw->priv;
struct request_context *ctx;
@ -1034,7 +1065,14 @@ static int ezusb_docmd_wait(struct hermes *hw, u16 cmd, u16 parm0,
return -ENOMEM;
return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
EZUSB_FRAME_CONTROL, NULL, 0, NULL);
EZUSB_FRAME_CONTROL, NULL, 0, NULL,
ezusb_ctx_wait_func);
}
static int ezusb_docmd_wait(struct hermes *hw, u16 cmd, u16 parm0,
struct hermes_response *resp)
{
return __ezusb_docmd_wait(hw, cmd, parm0, resp, ezusb_req_ctx_wait_poll);
}
static int ezusb_bap_pread(struct hermes *hw, int bap,
@ -1092,7 +1130,7 @@ static int ezusb_read_pda(struct hermes *hw, __le16 *pda,
return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
EZUSB_FRAME_CONTROL, &pda[2], pda_len - 4,
NULL);
NULL, ezusb_req_ctx_wait_compl);
}
static int ezusb_program_init(struct hermes *hw, u32 entry_point)
@ -1106,7 +1144,8 @@ static int ezusb_program_init(struct hermes *hw, u32 entry_point)
return -ENOMEM;
return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
EZUSB_FRAME_CONTROL, NULL, 0, NULL);
EZUSB_FRAME_CONTROL, NULL, 0, NULL,
ezusb_req_ctx_wait_compl);
}
static int ezusb_program_end(struct hermes *hw)
@ -1119,7 +1158,8 @@ static int ezusb_program_end(struct hermes *hw)
return -ENOMEM;
return ezusb_access_ltv(upriv, ctx, 0, NULL,
EZUSB_FRAME_CONTROL, NULL, 0, NULL);
EZUSB_FRAME_CONTROL, NULL, 0, NULL,
ezusb_req_ctx_wait_compl);
}
static int ezusb_program_bytes(struct hermes *hw, const char *buf,
@ -1135,7 +1175,8 @@ static int ezusb_program_bytes(struct hermes *hw, const char *buf,
return -ENOMEM;
err = ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
EZUSB_FRAME_CONTROL, NULL, 0, NULL);
EZUSB_FRAME_CONTROL, NULL, 0, NULL,
ezusb_req_ctx_wait_compl);
if (err)
return err;
@ -1144,7 +1185,8 @@ static int ezusb_program_bytes(struct hermes *hw, const char *buf,
return -ENOMEM;
return ezusb_access_ltv(upriv, ctx, len, buf,
EZUSB_FRAME_CONTROL, NULL, 0, NULL);
EZUSB_FRAME_CONTROL, NULL, 0, NULL,
ezusb_req_ctx_wait_compl);
}
static int ezusb_program(struct hermes *hw, const char *buf,
@ -1223,13 +1265,6 @@ static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb->len < ETH_HLEN)
goto drop;
ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0);
if (!ctx)
goto busy;
memset(ctx->buf, 0, BULK_BUF_SIZE);
buf = ctx->buf->data;
tx_control = 0;
err = orinoco_process_xmit_skb(skb, dev, priv, &tx_control,
@ -1237,6 +1272,13 @@ static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev)
if (err)
goto drop;
ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0);
if (!ctx)
goto drop;
memset(ctx->buf, 0, BULK_BUF_SIZE);
buf = ctx->buf->data;
{
__le16 *tx_cntl = (__le16 *)buf;
*tx_cntl = cpu_to_le16(tx_control);
@ -1264,7 +1306,8 @@ static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev)
tx_size = ALIGN(buf - ctx->buf->data, 2);
err = ezusb_access_ltv(upriv, ctx, tx_size, NULL,
EZUSB_FRAME_DATA, NULL, 0, NULL);
EZUSB_FRAME_DATA, NULL, 0, NULL,
ezusb_req_ctx_wait_skip);
if (err) {
netif_start_queue(dev);
@ -1349,7 +1392,6 @@ static int ezusb_init(struct hermes *hw)
struct ezusb_priv *upriv = hw->priv;
int retval;
BUG_ON(in_interrupt());
if (!upriv)
return -EINVAL;
@ -1362,14 +1404,16 @@ static int ezusb_init(struct hermes *hw)
usb_kill_urb(upriv->read_urb);
ezusb_submit_in_urb(upriv);
retval = ezusb_write_ltv(hw, 0, EZUSB_RID_INIT1,
HERMES_BYTES_TO_RECLEN(2), "\x10\x00");
retval = __ezusb_write_ltv(hw, 0, EZUSB_RID_INIT1,
HERMES_BYTES_TO_RECLEN(2), "\x10\x00",
ezusb_req_ctx_wait_compl);
if (retval < 0) {
printk(KERN_ERR PFX "EZUSB_RID_INIT1 error %d\n", retval);
return retval;
}
retval = ezusb_docmd_wait(hw, HERMES_CMD_INIT, 0, NULL);
retval = __ezusb_docmd_wait(hw, HERMES_CMD_INIT, 0, NULL,
ezusb_req_ctx_wait_compl);
if (retval < 0) {
printk(KERN_ERR PFX "HERMES_CMD_INIT error %d\n", retval);
return retval;
@ -1448,7 +1492,6 @@ static inline void ezusb_delete(struct ezusb_priv *upriv)
struct list_head *tmp_item;
unsigned long flags;
BUG_ON(in_interrupt());
BUG_ON(!upriv);
mutex_lock(&upriv->mtx);
@ -1533,6 +1576,7 @@ static const struct hermes_ops ezusb_ops = {
.init_cmd_wait = ezusb_doicmd_wait,
.allocate = ezusb_allocate,
.read_ltv = ezusb_read_ltv,
.read_ltv_pr = ezusb_read_ltv_preempt,
.write_ltv = ezusb_write_ltv,
.bap_pread = ezusb_bap_pread,
.read_pda = ezusb_read_pda,

View File

@ -54,7 +54,7 @@ static const unsigned char scan_rate_list[] = { 2, 4, 11, 22,
/**
* prism54_mib_mode_helper - MIB change mode helper function
* @mib: the &struct islpci_mib object to modify
* @priv: the &struct islpci_private object to modify
* @iw_mode: new mode (%IW_MODE_*)
*
* This is a helper function, hence it does not lock. Make sure
@ -114,14 +114,13 @@ prism54_mib_mode_helper(islpci_private *priv, u32 iw_mode)
return 0;
}
/**
/*
* prism54_mib_init - fill MIB cache with defaults
*
* this function initializes the struct given as @mib with defaults,
* of which many are retrieved from the global module parameter
* variables.
*/
void
prism54_mib_init(islpci_private *priv)
{

View File

@ -1455,7 +1455,7 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter)
}
/*
* This function gets called during PCIe function level reset.
* This function can be used for shutting down the adapter SW.
*/
int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
{
@ -1471,6 +1471,8 @@ int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
mwifiex_deauthenticate(priv, NULL);
mwifiex_init_shutdown_fw(priv, MWIFIEX_FUNC_SHUTDOWN);
mwifiex_uninit_sw(adapter);
adapter->is_up = false;
@ -1481,7 +1483,7 @@ int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
}
EXPORT_SYMBOL_GPL(mwifiex_shutdown_sw);
/* This function gets called during PCIe function level reset. Required
/* This function can be used for reinitting the adapter SW. Required
* code is extracted from mwifiex_add_card()
*/
int

View File

@ -429,7 +429,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
struct mwifiex_private *priv;
const struct mwifiex_pcie_card_reg *reg;
u32 fw_status;
int ret;
card = pci_get_drvdata(pdev);
@ -441,7 +440,7 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
reg = card->pcie.reg;
if (reg)
ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status);
mwifiex_read_reg(adapter, reg->fw_status, &fw_status);
else
fw_status = -1;
@ -526,6 +525,8 @@ static void mwifiex_pcie_reset_prepare(struct pci_dev *pdev)
clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
card->pci_reset_ongoing = true;
}
/*
@ -554,6 +555,8 @@ static void mwifiex_pcie_reset_done(struct pci_dev *pdev)
dev_err(&pdev->dev, "reinit failed: %d\n", ret);
else
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
card->pci_reset_ongoing = false;
}
static const struct pci_error_handlers mwifiex_pcie_err_handler = {
@ -3139,12 +3142,23 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter)
struct pcie_service_card *card = adapter->card;
struct pci_dev *pdev = card->dev;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
int ret;
u32 fw_status;
cancel_work_sync(&card->work);
/* Perform the cancel_work_sync() only when we're not resetting
* the card. It's because that function never returns if we're
* in reset path. If we're here when resetting the card, it means
* that we failed to reset the card (reset failure path).
*/
if (!card->pci_reset_ongoing) {
mwifiex_dbg(adapter, MSG, "performing cancel_work_sync()...\n");
cancel_work_sync(&card->work);
mwifiex_dbg(adapter, MSG, "cancel_work_sync() done\n");
} else {
mwifiex_dbg(adapter, MSG,
"skipped cancel_work_sync() because we're in card reset failure path\n");
}
ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status);
mwifiex_read_reg(adapter, reg->fw_status, &fw_status);
if (fw_status == FIRMWARE_READY_PCIE) {
mwifiex_dbg(adapter, INFO,
"Clearing driver ready signature\n");

View File

@ -242,6 +242,8 @@ struct pcie_service_card {
struct mwifiex_msix_context share_irq_ctx;
struct work_struct work;
unsigned long work_flags;
bool pci_reset_ongoing;
};
static inline int

View File

@ -43,8 +43,6 @@
#define BLOCK_MODE 1
#define BYTE_MODE 0
#define REG_PORT 0
#define MWIFIEX_SDIO_IO_PORT_MASK 0xfffff
#define MWIFIEX_SDIO_BYTE_MODE_MASK 0x80000000

View File

@ -201,6 +201,7 @@ static int mwifiex_ret_802_11_snmp_mib(struct mwifiex_private *priv,
mwifiex_dbg(priv->adapter, INFO,
"info: SNMP_RESP: DTIM period=%u\n",
ul_temp);
break;
default:
break;
}
@ -1393,6 +1394,7 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
break;
case HostCmd_CMD_TDLS_OPER:
ret = mwifiex_ret_tdls_oper(priv, resp);
break;
case HostCmd_CMD_MC_POLICY:
break;
case HostCmd_CMD_CHAN_REPORT_REQUEST:

View File

@ -99,6 +99,7 @@ static int mwifiex_check_ibss_peer_capabilities(struct mwifiex_private *priv,
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895:
sta_ptr->max_amsdu =
MWIFIEX_TX_DATA_BUF_SIZE_4K;
break;
default:
break;
}

View File

@ -108,6 +108,7 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
if (params->crypto.wpa_versions & NL80211_WPA_VERSION_2)
bss_config->wpa_cfg.pairwise_cipher_wpa2 |=
CIPHER_AES_CCMP;
break;
default:
break;
}

View File

@ -1396,6 +1396,7 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
break;
case 0:
mwifiex_write_data_complete(adapter, skb, 0, ret);
break;
default:
break;
}

View File

@ -212,9 +212,9 @@ out:
spin_unlock_irqrestore(&dev->rx_lock, flags);
}
static void mt7601u_rx_tasklet(unsigned long data)
static void mt7601u_rx_tasklet(struct tasklet_struct *t)
{
struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
struct mt7601u_dev *dev = from_tasklet(dev, t, rx_tasklet);
struct mt7601u_dma_buf_rx *e;
while ((e = mt7601u_rx_get_pending_entry(dev))) {
@ -266,9 +266,9 @@ out:
spin_unlock_irqrestore(&dev->tx_lock, flags);
}
static void mt7601u_tx_tasklet(unsigned long data)
static void mt7601u_tx_tasklet(struct tasklet_struct *t)
{
struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
struct mt7601u_dev *dev = from_tasklet(dev, t, tx_tasklet);
struct sk_buff_head skbs;
unsigned long flags;
@ -507,8 +507,8 @@ int mt7601u_dma_init(struct mt7601u_dev *dev)
{
int ret = -ENOMEM;
tasklet_init(&dev->tx_tasklet, mt7601u_tx_tasklet, (unsigned long) dev);
tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev);
tasklet_setup(&dev->tx_tasklet, mt7601u_tx_tasklet);
tasklet_setup(&dev->rx_tasklet, mt7601u_rx_tasklet);
ret = mt7601u_alloc_tx(dev);
if (ret)

View File

@ -1709,7 +1709,7 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
{
struct wilc *wl;
struct wilc_vif *vif;
int ret;
int ret, i;
wl = wilc_create_wiphy(dev);
if (!wl)
@ -1725,7 +1725,10 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
wl->io_type = io_type;
wl->hif_func = ops;
wl->chip_ps_state = WILC_CHIP_WAKEDUP;
INIT_LIST_HEAD(&wl->txq_head.list);
for (i = 0; i < NQUEUES; i++)
INIT_LIST_HEAD(&wl->txq[i].txq_head.list);
INIT_LIST_HEAD(&wl->rxq_head.list);
INIT_LIST_HEAD(&wl->vif_list);

View File

@ -1276,6 +1276,23 @@ int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr)
return result;
}
int wilc_set_mac_address(struct wilc_vif *vif, u8 *mac_addr)
{
struct wid wid;
int result;
wid.id = WID_MAC_ADDR;
wid.type = WID_STR;
wid.size = ETH_ALEN;
wid.val = mac_addr;
result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1);
if (result)
netdev_err(vif->ndev, "Failed to get mac address\n");
return result;
}
int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ies,
size_t ies_len)
{

View File

@ -168,6 +168,7 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
u8 cipher_mode);
int wilc_set_pmkid_info(struct wilc_vif *vif, struct wilc_pmkid_attr *pmkid);
int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr);
int wilc_set_mac_address(struct wilc_vif *vif, u8 *mac_addr);
int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ies,
size_t ies_len);
int wilc_disconnect(struct wilc_vif *vif);

View File

@ -628,6 +628,43 @@ static struct net_device_stats *mac_stats(struct net_device *dev)
return &vif->netstats;
}
static int wilc_set_mac_addr(struct net_device *dev, void *p)
{
int result;
struct wilc_vif *vif = netdev_priv(dev);
struct wilc *wilc = vif->wilc;
struct sockaddr *addr = (struct sockaddr *)p;
unsigned char mac_addr[ETH_ALEN];
struct wilc_vif *tmp_vif;
int srcu_idx;
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
srcu_idx = srcu_read_lock(&wilc->srcu);
list_for_each_entry_rcu(tmp_vif, &wilc->vif_list, list) {
wilc_get_mac_address(tmp_vif, mac_addr);
if (ether_addr_equal(addr->sa_data, mac_addr)) {
if (vif != tmp_vif) {
srcu_read_unlock(&wilc->srcu, srcu_idx);
return -EINVAL;
}
srcu_read_unlock(&wilc->srcu, srcu_idx);
return 0;
}
}
srcu_read_unlock(&wilc->srcu, srcu_idx);
result = wilc_set_mac_address(vif, (u8 *)addr->sa_data);
if (result)
return result;
ether_addr_copy(vif->bssid, addr->sa_data);
ether_addr_copy(vif->ndev->dev_addr, addr->sa_data);
return result;
}
static void wilc_set_multicast_list(struct net_device *dev)
{
struct netdev_hw_addr *ha;
@ -813,6 +850,7 @@ static const struct net_device_ops wilc_netdev_ops = {
.ndo_init = mac_init_fn,
.ndo_open = wilc_mac_open,
.ndo_stop = wilc_mac_close,
.ndo_set_mac_address = wilc_set_mac_addr,
.ndo_start_xmit = wilc_mac_xmit,
.ndo_get_stats = mac_stats,
.ndo_set_rx_mode = wilc_set_multicast_list,

View File

@ -197,6 +197,14 @@ struct wilc_vif {
struct cfg80211_bss *bss;
};
struct wilc_tx_queue_status {
u8 buffer[AC_BUFFER_SIZE];
u16 end_index;
u16 cnt[NQUEUES];
u16 sum;
bool initialized;
};
struct wilc {
struct wiphy *wiphy;
const struct wilc_hif_func *hif_func;
@ -245,9 +253,10 @@ struct wilc {
u32 rx_buffer_offset;
u8 *tx_buffer;
struct txq_entry_t txq_head;
struct txq_handle txq[NQUEUES];
int txq_entries;
struct wilc_tx_queue_status tx_q_limit;
struct rxq_entry_t rxq_head;
const struct firmware *firmware;

View File

@ -6,6 +6,7 @@
#include <linux/if_ether.h>
#include <linux/ip.h>
#include <net/dsfield.h>
#include "cfg80211.h"
#include "wlan_cfg.h"
@ -28,33 +29,34 @@ static inline void release_bus(struct wilc *wilc, enum bus_release release)
mutex_unlock(&wilc->hif_cs);
}
static void wilc_wlan_txq_remove(struct wilc *wilc, struct txq_entry_t *tqe)
static void wilc_wlan_txq_remove(struct wilc *wilc, u8 q_num,
struct txq_entry_t *tqe)
{
list_del(&tqe->list);
wilc->txq_entries -= 1;
wilc->txq[q_num].count--;
}
static struct txq_entry_t *
wilc_wlan_txq_remove_from_head(struct net_device *dev)
wilc_wlan_txq_remove_from_head(struct wilc *wilc, u8 q_num)
{
struct txq_entry_t *tqe = NULL;
unsigned long flags;
struct wilc_vif *vif = netdev_priv(dev);
struct wilc *wilc = vif->wilc;
spin_lock_irqsave(&wilc->txq_spinlock, flags);
if (!list_empty(&wilc->txq_head.list)) {
tqe = list_first_entry(&wilc->txq_head.list, struct txq_entry_t,
list);
if (!list_empty(&wilc->txq[q_num].txq_head.list)) {
tqe = list_first_entry(&wilc->txq[q_num].txq_head.list,
struct txq_entry_t, list);
list_del(&tqe->list);
wilc->txq_entries -= 1;
wilc->txq[q_num].count--;
}
spin_unlock_irqrestore(&wilc->txq_spinlock, flags);
return tqe;
}
static void wilc_wlan_txq_add_to_tail(struct net_device *dev,
static void wilc_wlan_txq_add_to_tail(struct net_device *dev, u8 q_num,
struct txq_entry_t *tqe)
{
unsigned long flags;
@ -63,15 +65,16 @@ static void wilc_wlan_txq_add_to_tail(struct net_device *dev,
spin_lock_irqsave(&wilc->txq_spinlock, flags);
list_add_tail(&tqe->list, &wilc->txq_head.list);
list_add_tail(&tqe->list, &wilc->txq[q_num].txq_head.list);
wilc->txq_entries += 1;
wilc->txq[q_num].count++;
spin_unlock_irqrestore(&wilc->txq_spinlock, flags);
complete(&wilc->txq_event);
}
static void wilc_wlan_txq_add_to_head(struct wilc_vif *vif,
static void wilc_wlan_txq_add_to_head(struct wilc_vif *vif, u8 q_num,
struct txq_entry_t *tqe)
{
unsigned long flags;
@ -81,8 +84,9 @@ static void wilc_wlan_txq_add_to_head(struct wilc_vif *vif,
spin_lock_irqsave(&wilc->txq_spinlock, flags);
list_add(&tqe->list, &wilc->txq_head.list);
list_add(&tqe->list, &wilc->txq[q_num].txq_head.list);
wilc->txq_entries += 1;
wilc->txq[q_num].count++;
spin_unlock_irqrestore(&wilc->txq_spinlock, flags);
mutex_unlock(&wilc->txq_add_to_head_cs);
@ -212,7 +216,7 @@ static void wilc_wlan_txq_filter_dup_tcp_ack(struct net_device *dev)
tqe = f->pending_acks[i].txqe;
if (tqe) {
wilc_wlan_txq_remove(wilc, tqe);
wilc_wlan_txq_remove(wilc, tqe->q_num, tqe);
tqe->status = 1;
if (tqe->tx_complete_func)
tqe->tx_complete_func(tqe->priv,
@ -258,18 +262,148 @@ static int wilc_wlan_txq_add_cfg_pkt(struct wilc_vif *vif, u8 *buffer,
}
tqe = kmalloc(sizeof(*tqe), GFP_ATOMIC);
if (!tqe)
if (!tqe) {
complete(&wilc->cfg_event);
return 0;
}
tqe->type = WILC_CFG_PKT;
tqe->buffer = buffer;
tqe->buffer_size = buffer_size;
tqe->tx_complete_func = NULL;
tqe->priv = NULL;
tqe->q_num = AC_VO_Q;
tqe->ack_idx = NOT_TCP_ACK;
tqe->vif = vif;
wilc_wlan_txq_add_to_head(vif, tqe);
wilc_wlan_txq_add_to_head(vif, AC_VO_Q, tqe);
return 1;
}
static bool is_ac_q_limit(struct wilc *wl, u8 q_num)
{
u8 factors[NQUEUES] = {1, 1, 1, 1};
u16 i;
unsigned long flags;
struct wilc_tx_queue_status *q = &wl->tx_q_limit;
u8 end_index;
u8 q_limit;
bool ret = false;
spin_lock_irqsave(&wl->txq_spinlock, flags);
if (!q->initialized) {
for (i = 0; i < AC_BUFFER_SIZE; i++)
q->buffer[i] = i % NQUEUES;
for (i = 0; i < NQUEUES; i++) {
q->cnt[i] = AC_BUFFER_SIZE * factors[i] / NQUEUES;
q->sum += q->cnt[i];
}
q->end_index = AC_BUFFER_SIZE - 1;
q->initialized = 1;
}
end_index = q->end_index;
q->cnt[q->buffer[end_index]] -= factors[q->buffer[end_index]];
q->cnt[q_num] += factors[q_num];
q->sum += (factors[q_num] - factors[q->buffer[end_index]]);
q->buffer[end_index] = q_num;
if (end_index > 0)
q->end_index--;
else
q->end_index = AC_BUFFER_SIZE - 1;
if (!q->sum)
q_limit = 1;
else
q_limit = (q->cnt[q_num] * FLOW_CONTROL_UPPER_THRESHOLD / q->sum) + 1;
if (wl->txq[q_num].count <= q_limit)
ret = true;
spin_unlock_irqrestore(&wl->txq_spinlock, flags);
return ret;
}
static inline u8 ac_classify(struct wilc *wilc, struct sk_buff *skb)
{
u8 q_num = AC_BE_Q;
u8 dscp;
switch (skb->protocol) {
case htons(ETH_P_IP):
dscp = ipv4_get_dsfield(ip_hdr(skb)) & 0xfc;
break;
case htons(ETH_P_IPV6):
dscp = ipv6_get_dsfield(ipv6_hdr(skb)) & 0xfc;
break;
default:
return q_num;
}
switch (dscp) {
case 0x08:
case 0x20:
case 0x40:
q_num = AC_BK_Q;
break;
case 0x80:
case 0xA0:
case 0x28:
q_num = AC_VI_Q;
break;
case 0xC0:
case 0xD0:
case 0xE0:
case 0x88:
case 0xB8:
q_num = AC_VO_Q;
break;
}
return q_num;
}
static inline int ac_balance(struct wilc *wl, u8 *ratio)
{
u8 i, max_count = 0;
if (!ratio)
return -EINVAL;
for (i = 0; i < NQUEUES; i++)
if (wl->txq[i].fw.count > max_count)
max_count = wl->txq[i].fw.count;
for (i = 0; i < NQUEUES; i++)
ratio[i] = max_count - wl->txq[i].fw.count;
return 0;
}
static inline void ac_update_fw_ac_pkt_info(struct wilc *wl, u32 reg)
{
wl->txq[AC_BK_Q].fw.count = FIELD_GET(BK_AC_COUNT_FIELD, reg);
wl->txq[AC_BE_Q].fw.count = FIELD_GET(BE_AC_COUNT_FIELD, reg);
wl->txq[AC_VI_Q].fw.count = FIELD_GET(VI_AC_COUNT_FIELD, reg);
wl->txq[AC_VO_Q].fw.count = FIELD_GET(VO_AC_COUNT_FIELD, reg);
wl->txq[AC_BK_Q].fw.acm = FIELD_GET(BK_AC_ACM_STAT_FIELD, reg);
wl->txq[AC_BE_Q].fw.acm = FIELD_GET(BE_AC_ACM_STAT_FIELD, reg);
wl->txq[AC_VI_Q].fw.acm = FIELD_GET(VI_AC_ACM_STAT_FIELD, reg);
wl->txq[AC_VO_Q].fw.acm = FIELD_GET(VO_AC_ACM_STAT_FIELD, reg);
}
static inline u8 ac_change(struct wilc *wilc, u8 *ac)
{
do {
if (wilc->txq[*ac].fw.acm == 0)
return 0;
(*ac)++;
} while (*ac < NQUEUES);
return 1;
}
@ -281,16 +415,21 @@ int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer,
struct txq_entry_t *tqe;
struct wilc_vif *vif = netdev_priv(dev);
struct wilc *wilc;
u8 q_num;
wilc = vif->wilc;
if (wilc->quit)
if (wilc->quit) {
tx_complete_fn(priv, 0);
return 0;
}
tqe = kmalloc(sizeof(*tqe), GFP_ATOMIC);
if (!tqe)
if (!tqe) {
tx_complete_fn(priv, 0);
return 0;
}
tqe->type = WILC_NET_PKT;
tqe->buffer = buffer;
tqe->buffer_size = buffer_size;
@ -298,10 +437,24 @@ int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer,
tqe->priv = priv;
tqe->vif = vif;
tqe->ack_idx = NOT_TCP_ACK;
if (vif->ack_filter.enabled)
tcp_process(dev, tqe);
wilc_wlan_txq_add_to_tail(dev, tqe);
q_num = ac_classify(wilc, priv);
tqe->q_num = q_num;
if (ac_change(wilc, &q_num)) {
tx_complete_fn(priv, 0);
kfree(tqe);
return 0;
}
if (is_ac_q_limit(wilc, q_num)) {
tqe->ack_idx = NOT_TCP_ACK;
if (vif->ack_filter.enabled)
tcp_process(dev, tqe);
wilc_wlan_txq_add_to_tail(dev, q_num, tqe);
} else {
tx_complete_fn(priv, 0);
kfree(tqe);
}
return wilc->txq_entries;
}
@ -315,34 +468,39 @@ int wilc_wlan_txq_add_mgmt_pkt(struct net_device *dev, void *priv, u8 *buffer,
wilc = vif->wilc;
if (wilc->quit)
if (wilc->quit) {
tx_complete_fn(priv, 0);
return 0;
}
tqe = kmalloc(sizeof(*tqe), GFP_ATOMIC);
if (!tqe)
if (!tqe) {
tx_complete_fn(priv, 0);
return 0;
}
tqe->type = WILC_MGMT_PKT;
tqe->buffer = buffer;
tqe->buffer_size = buffer_size;
tqe->tx_complete_func = tx_complete_fn;
tqe->priv = priv;
tqe->q_num = AC_BE_Q;
tqe->ack_idx = NOT_TCP_ACK;
tqe->vif = vif;
wilc_wlan_txq_add_to_tail(dev, tqe);
wilc_wlan_txq_add_to_tail(dev, AC_VO_Q, tqe);
return 1;
}
static struct txq_entry_t *wilc_wlan_txq_get_first(struct wilc *wilc)
static struct txq_entry_t *wilc_wlan_txq_get_first(struct wilc *wilc, u8 q_num)
{
struct txq_entry_t *tqe = NULL;
unsigned long flags;
spin_lock_irqsave(&wilc->txq_spinlock, flags);
if (!list_empty(&wilc->txq_head.list))
tqe = list_first_entry(&wilc->txq_head.list, struct txq_entry_t,
list);
if (!list_empty(&wilc->txq[q_num].txq_head.list))
tqe = list_first_entry(&wilc->txq[q_num].txq_head.list,
struct txq_entry_t, list);
spin_unlock_irqrestore(&wilc->txq_spinlock, flags);
@ -350,13 +508,14 @@ static struct txq_entry_t *wilc_wlan_txq_get_first(struct wilc *wilc)
}
static struct txq_entry_t *wilc_wlan_txq_get_next(struct wilc *wilc,
struct txq_entry_t *tqe)
struct txq_entry_t *tqe,
u8 q_num)
{
unsigned long flags;
spin_lock_irqsave(&wilc->txq_spinlock, flags);
if (!list_is_last(&tqe->list, &wilc->txq_head.list))
if (!list_is_last(&tqe->list, &wilc->txq[q_num].txq_head.list))
tqe = list_next_entry(tqe, list);
else
tqe = NULL;
@ -479,54 +638,92 @@ EXPORT_SYMBOL_GPL(host_sleep_notify);
int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
{
int i, entries = 0;
u8 k, ac;
u32 sum;
u32 reg;
u8 ac_desired_ratio[NQUEUES] = {0, 0, 0, 0};
u8 ac_preserve_ratio[NQUEUES] = {1, 1, 1, 1};
u8 *num_pkts_to_add;
u8 vmm_entries_ac[WILC_VMM_TBL_SIZE];
u32 offset = 0;
bool max_size_over = 0, ac_exist = 0;
int vmm_sz = 0;
struct txq_entry_t *tqe;
struct txq_entry_t *tqe_q[NQUEUES];
int ret = 0;
int counter;
int timeout;
u32 vmm_table[WILC_VMM_TBL_SIZE];
u8 ac_pkt_num_to_chip[NQUEUES] = {0, 0, 0, 0};
const struct wilc_hif_func *func;
int srcu_idx;
u8 *txb = wilc->tx_buffer;
struct net_device *dev;
struct wilc_vif *vif;
if (wilc->quit)
goto out_update_cnt;
if (ac_balance(wilc, ac_desired_ratio))
return -EINVAL;
mutex_lock(&wilc->txq_add_to_head_cs);
tqe = wilc_wlan_txq_get_first(wilc);
if (!tqe)
goto out_unlock;
dev = tqe->vif->ndev;
wilc_wlan_txq_filter_dup_tcp_ack(dev);
srcu_idx = srcu_read_lock(&wilc->srcu);
list_for_each_entry_rcu(vif, &wilc->vif_list, list)
wilc_wlan_txq_filter_dup_tcp_ack(vif->ndev);
srcu_read_unlock(&wilc->srcu, srcu_idx);
for (ac = 0; ac < NQUEUES; ac++)
tqe_q[ac] = wilc_wlan_txq_get_first(wilc, ac);
i = 0;
sum = 0;
while (tqe && (i < (WILC_VMM_TBL_SIZE - 1))) {
if (tqe->type == WILC_CFG_PKT)
vmm_sz = ETH_CONFIG_PKT_HDR_OFFSET;
else if (tqe->type == WILC_NET_PKT)
vmm_sz = ETH_ETHERNET_HDR_OFFSET;
else
vmm_sz = HOST_HDR_OFFSET;
max_size_over = 0;
num_pkts_to_add = ac_desired_ratio;
do {
ac_exist = 0;
for (ac = 0; (ac < NQUEUES) && (!max_size_over); ac++) {
if (!tqe_q[ac])
continue;
vmm_sz += tqe->buffer_size;
vmm_sz = ALIGN(vmm_sz, 4);
vif = tqe_q[ac]->vif;
ac_exist = 1;
for (k = 0; (k < num_pkts_to_add[ac]) &&
(!max_size_over) && tqe_q[ac]; k++) {
if (i >= (WILC_VMM_TBL_SIZE - 1)) {
max_size_over = 1;
break;
}
if ((sum + vmm_sz) > WILC_TX_BUFF_SIZE)
break;
if (tqe_q[ac]->type == WILC_CFG_PKT)
vmm_sz = ETH_CONFIG_PKT_HDR_OFFSET;
else if (tqe_q[ac]->type == WILC_NET_PKT)
vmm_sz = ETH_ETHERNET_HDR_OFFSET;
else
vmm_sz = HOST_HDR_OFFSET;
vmm_table[i] = vmm_sz / 4;
if (tqe->type == WILC_CFG_PKT)
vmm_table[i] |= BIT(10);
cpu_to_le32s(&vmm_table[i]);
vmm_sz += tqe_q[ac]->buffer_size;
vmm_sz = ALIGN(vmm_sz, 4);
i++;
sum += vmm_sz;
tqe = wilc_wlan_txq_get_next(wilc, tqe);
}
if ((sum + vmm_sz) > WILC_TX_BUFF_SIZE) {
max_size_over = 1;
break;
}
vmm_table[i] = vmm_sz / 4;
if (tqe_q[ac]->type == WILC_CFG_PKT)
vmm_table[i] |= BIT(10);
cpu_to_le32s(&vmm_table[i]);
vmm_entries_ac[i] = ac;
i++;
sum += vmm_sz;
tqe_q[ac] = wilc_wlan_txq_get_next(wilc,
tqe_q[ac],
ac);
}
}
num_pkts_to_add = ac_preserve_ratio;
} while (!max_size_over && ac_exist);
if (i == 0)
goto out_unlock;
@ -540,8 +737,10 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
if (ret)
break;
if ((reg & 0x1) == 0)
if ((reg & 0x1) == 0) {
ac_update_fw_ac_pkt_info(wilc, reg);
break;
}
counter++;
if (counter > 200) {
@ -610,11 +809,13 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
offset = 0;
i = 0;
do {
struct txq_entry_t *tqe;
u32 header, buffer_offset;
char *bssid;
u8 mgmt_ptk = 0;
tqe = wilc_wlan_txq_remove_from_head(dev);
tqe = wilc_wlan_txq_remove_from_head(wilc, vmm_entries_ac[i]);
ac_pkt_num_to_chip[vmm_entries_ac[i]]++;
if (!tqe)
break;
@ -639,8 +840,11 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
if (tqe->type == WILC_CFG_PKT) {
buffer_offset = ETH_CONFIG_PKT_HDR_OFFSET;
} else if (tqe->type == WILC_NET_PKT) {
int prio = tqe->q_num;
bssid = tqe->vif->bssid;
buffer_offset = ETH_ETHERNET_HDR_OFFSET;
memcpy(&txb[offset + 4], &prio, sizeof(prio));
memcpy(&txb[offset + 8], bssid, 6);
} else {
buffer_offset = HOST_HDR_OFFSET;
@ -658,6 +862,8 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
vif->ack_filter.pending_acks[tqe->ack_idx].txqe = NULL;
kfree(tqe);
} while (--entries);
for (i = 0; i < NQUEUES; i++)
wilc->txq[i].fw.count += ac_pkt_num_to_chip[i];
acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
@ -956,14 +1162,17 @@ void wilc_wlan_cleanup(struct net_device *dev)
{
struct txq_entry_t *tqe;
struct rxq_entry_t *rqe;
u8 ac;
struct wilc_vif *vif = netdev_priv(dev);
struct wilc *wilc = vif->wilc;
wilc->quit = 1;
while ((tqe = wilc_wlan_txq_remove_from_head(dev))) {
if (tqe->tx_complete_func)
tqe->tx_complete_func(tqe->priv, 0);
kfree(tqe);
for (ac = 0; ac < NQUEUES; ac++) {
while ((tqe = wilc_wlan_txq_remove_from_head(wilc, ac))) {
if (tqe->tx_complete_func)
tqe->tx_complete_func(tqe->priv, 0);
kfree(tqe);
}
}
while ((rqe = wilc_wlan_rxq_remove(wilc)))

View File

@ -207,6 +207,18 @@
#define MODALIAS "WILC_SPI"
#define NQUEUES 4
#define AC_BUFFER_SIZE 1000
#define VO_AC_COUNT_FIELD GENMASK(31, 25)
#define VO_AC_ACM_STAT_FIELD BIT(24)
#define VI_AC_COUNT_FIELD GENMASK(23, 17)
#define VI_AC_ACM_STAT_FIELD BIT(16)
#define BE_AC_COUNT_FIELD GENMASK(15, 9)
#define BE_AC_ACM_STAT_FIELD BIT(8)
#define BK_AC_COUNT_FIELD GENMASK(7, 3)
#define BK_AC_ACM_STAT_FIELD BIT(1)
#define WILC_PKT_HDR_CONFIG_FIELD BIT(31)
#define WILC_PKT_HDR_OFFSET_FIELD GENMASK(30, 22)
#define WILC_PKT_HDR_TOTAL_LEN_FIELD GENMASK(21, 11)
@ -295,10 +307,17 @@
* Tx/Rx Queue Structure
*
********************************************/
enum ip_pkt_priority {
AC_VO_Q = 0,
AC_VI_Q = 1,
AC_BE_Q = 2,
AC_BK_Q = 3
};
struct txq_entry_t {
struct list_head list;
int type;
u8 q_num;
int ack_idx;
u8 *buffer;
int buffer_size;
@ -308,6 +327,17 @@ struct txq_entry_t {
void (*tx_complete_func)(void *priv, int status);
};
struct txq_fw_recv_queue_stat {
u8 acm;
u8 count;
};
struct txq_handle {
struct txq_entry_t txq_head;
u16 count;
struct txq_fw_recv_queue_stat fw;
};
struct rxq_entry_t {
struct list_head list;
u8 *buffer;

View File

@ -299,19 +299,19 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sysctl_bar = qtnf_map_bar(pdev, QTN_SYSCTL_BAR);
if (IS_ERR(sysctl_bar)) {
pr_err("failed to map BAR%u\n", QTN_SYSCTL_BAR);
return ret;
return PTR_ERR(sysctl_bar);
}
dmareg_bar = qtnf_map_bar(pdev, QTN_DMA_BAR);
if (IS_ERR(dmareg_bar)) {
pr_err("failed to map BAR%u\n", QTN_DMA_BAR);
return ret;
return PTR_ERR(dmareg_bar);
}
epmem_bar = qtnf_map_bar(pdev, QTN_SHMEM_BAR);
if (IS_ERR(epmem_bar)) {
pr_err("failed to map BAR%u\n", QTN_SHMEM_BAR);
return ret;
return PTR_ERR(epmem_bar);
}
chipid = qtnf_chip_id_get(sysctl_bar);

View File

@ -1228,6 +1228,17 @@ static int rt2800_check_hung(struct data_queue *queue)
return queue->wd_count > 16;
}
static void rt2800_update_survey(struct rt2x00_dev *rt2x00dev)
{
struct ieee80211_channel *chan = rt2x00dev->hw->conf.chandef.chan;
struct rt2x00_chan_survey *chan_survey =
&rt2x00dev->chan_survey[chan->hw_value];
chan_survey->time_idle += rt2800_register_read(rt2x00dev, CH_IDLE_STA);
chan_survey->time_busy += rt2800_register_read(rt2x00dev, CH_BUSY_STA);
chan_survey->time_ext_busy += rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC);
}
void rt2800_watchdog(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue;
@ -1237,6 +1248,8 @@ void rt2800_watchdog(struct rt2x00_dev *rt2x00dev)
if (test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags))
return;
rt2800_update_survey(rt2x00dev);
queue_for_each(rt2x00dev, queue) {
switch (queue->qid) {
case QID_AC_VO:
@ -5553,6 +5566,12 @@ void rt2800_config(struct rt2x00_dev *rt2x00dev,
rt2800_config_lna_gain(rt2x00dev, libconf);
if (flags & IEEE80211_CONF_CHANGE_CHANNEL) {
/*
* To provide correct survey data for survey-based ACS algorithm
* we have to save survey data for current channel before switching.
*/
rt2800_update_survey(rt2x00dev);
rt2800_config_channel(rt2x00dev, libconf->conf,
&libconf->rf, &libconf->channel);
rt2800_config_txpower(rt2x00dev, libconf->conf->chandef.chan,
@ -10111,12 +10130,20 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
}
/*
* Create channel information array
* Create channel information and survey arrays
*/
info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
rt2x00dev->chan_survey =
kcalloc(spec->num_channels, sizeof(struct rt2x00_chan_survey),
GFP_KERNEL);
if (!rt2x00dev->chan_survey) {
kfree(info);
return -ENOMEM;
}
spec->channels_info = info;
default_power1 = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
@ -10503,27 +10530,30 @@ int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
u32 idle, busy, busy_ext;
struct rt2x00_chan_survey *chan_survey =
&rt2x00dev->chan_survey[idx];
enum nl80211_band band = NL80211_BAND_2GHZ;
if (idx != 0)
if (idx >= rt2x00dev->bands[band].n_channels) {
idx -= rt2x00dev->bands[band].n_channels;
band = NL80211_BAND_5GHZ;
}
if (idx >= rt2x00dev->bands[band].n_channels)
return -ENOENT;
survey->channel = conf->chandef.chan;
if (idx == 0)
rt2800_update_survey(rt2x00dev);
idle = rt2800_register_read(rt2x00dev, CH_IDLE_STA);
busy = rt2800_register_read(rt2x00dev, CH_BUSY_STA);
busy_ext = rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC);
survey->channel = &rt2x00dev->bands[band].channels[idx];
if (idle || busy) {
survey->filled = SURVEY_INFO_TIME |
SURVEY_INFO_TIME_BUSY |
SURVEY_INFO_TIME_EXT_BUSY;
survey->filled = SURVEY_INFO_TIME |
SURVEY_INFO_TIME_BUSY |
SURVEY_INFO_TIME_EXT_BUSY;
survey->time = (idle + busy) / 1000;
survey->time_busy = busy / 1000;
survey->time_ext_busy = busy_ext / 1000;
}
survey->time = div_u64(chan_survey->time_idle + chan_survey->time_busy, 1000);
survey->time_busy = div_u64(chan_survey->time_busy, 1000);
survey->time_ext_busy = div_u64(chan_survey->time_ext_busy, 1000);
if (!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
survey->filled |= SURVEY_INFO_IN_USE;

View File

@ -181,6 +181,15 @@ struct rf_channel {
u32 rf4;
};
/*
* Information structure for channel survey.
*/
struct rt2x00_chan_survey {
u64 time_idle;
u64 time_busy;
u64 time_ext_busy;
};
/*
* Channel information structure
*/
@ -752,6 +761,7 @@ struct rt2x00_dev {
*/
struct ieee80211_hw *hw;
struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
struct rt2x00_chan_survey *chan_survey;
enum nl80211_band curr_band;
int curr_freq;

View File

@ -877,10 +877,10 @@ static int ray_hw_xmit(unsigned char *data, int len, struct net_device *dev,
switch (ccsindex = get_free_tx_ccs(local)) {
case ECCSBUSY:
pr_debug("ray_hw_xmit tx_ccs table busy\n");
/* fall through */
fallthrough;
case ECCSFULL:
pr_debug("ray_hw_xmit No free tx ccs\n");
/* fall through */
fallthrough;
case ECARDGONE:
netif_stop_queue(dev);
return XMIT_NO_CCS;
@ -1272,7 +1272,7 @@ static int ray_set_mode(struct net_device *dev, struct iw_request_info *info,
switch (wrqu->mode) {
case IW_MODE_ADHOC:
card_mode = 0;
/* Fall through */
fallthrough;
case IW_MODE_INFRA:
local->sparm.b5.a_network_type = card_mode;
break;

View File

@ -1628,17 +1628,17 @@ static void btc8723b2ant_action_wifi_link_process(struct btc_coexist
static bool btc8723b2ant_action_wifi_idle_process(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
u8 wifi_rssi_state1;
u8 ap_num = 0;
u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
coex_dm->switch_thres_offset - coex_dm->switch_thres_offset;
wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2,
tmp, 0);
tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
coex_dm->switch_thres_offset - coex_dm->switch_thres_offset;
bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
@ -2764,10 +2764,10 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
/* SCO only or SCO+PAN(HS) */
static void btc8723b2ant_action_sco(struct btc_coexist *btcoexist)
{
u8 wifi_rssi_state, bt_rssi_state;
u8 bt_rssi_state;
u32 wifi_bw;
wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
bt_rssi_state = btc8723b2ant_bt_rssi_state(
btcoexist, 2, BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
coex_dm->switch_thres_offset,
@ -2807,12 +2807,12 @@ static void btc8723b2ant_action_sco(struct btc_coexist *btcoexist)
static void btc8723b2ant_action_hid(struct btc_coexist *btcoexist)
{
u8 wifi_rssi_state, bt_rssi_state;
u8 bt_rssi_state;
u32 wifi_bw;
u8 tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
coex_dm->switch_thres_offset;
wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
@ -2852,13 +2852,13 @@ static void btc8723b2ant_action_hid(struct btc_coexist *btcoexist)
/* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */
static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist)
{
u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
u8 wifi_rssi_state1, bt_rssi_state;
u32 wifi_bw;
u8 ap_num = 0;
u8 tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
coex_dm->switch_thres_offset;
wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2, 40, 0);
bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
@ -2926,12 +2926,12 @@ static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist)
static void btc8723b2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
{
u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
u8 wifi_rssi_state1, bt_rssi_state;
u32 wifi_bw;
u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
coex_dm->switch_thres_offset;
wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2,
tmp, 0);
tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
@ -2973,12 +2973,12 @@ static void btc8723b2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
static void btc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist)
{
u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
u8 wifi_rssi_state1, bt_rssi_state;
u32 wifi_bw;
u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
coex_dm->switch_thres_offset;
wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2,
tmp, 0);
tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
@ -3025,13 +3025,13 @@ static void btc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist)
/* PAN(HS) only */
static void btc8723b2ant_action_pan_hs(struct btc_coexist *btcoexist)
{
u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
u8 bt_rssi_state;
u32 wifi_bw;
u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
coex_dm->switch_thres_offset;
wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2,
btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2,
tmp, 0);
tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
coex_dm->switch_thres_offset;
@ -3063,12 +3063,12 @@ static void btc8723b2ant_action_pan_hs(struct btc_coexist *btcoexist)
/* PAN(EDR) + A2DP */
static void btc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
{
u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
u8 wifi_rssi_state1, bt_rssi_state;
u32 wifi_bw;
u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
coex_dm->switch_thres_offset;
wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2,
tmp, 0);
tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
@ -3118,12 +3118,12 @@ static void btc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
static void btc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
{
u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
u8 wifi_rssi_state1, bt_rssi_state;
u32 wifi_bw;
u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
coex_dm->switch_thres_offset;
wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2,
tmp, 0);
tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
@ -3182,12 +3182,12 @@ static void btc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
/* HID + A2DP + PAN(EDR) */
static void btc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
{
u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
u8 wifi_rssi_state1, bt_rssi_state;
u32 wifi_bw;
u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
coex_dm->switch_thres_offset;
wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2,
tmp, 0);
tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
@ -3241,13 +3241,13 @@ static void btc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
static void btc8723b2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
{
u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
u8 wifi_rssi_state1, bt_rssi_state;
u32 wifi_bw;
u8 ap_num = 0;
u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
coex_dm->switch_thres_offset;
wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2,
tmp, 0);
tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -

View File

@ -1901,7 +1901,6 @@ static void btc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
bool increase_scan_dev_num = false;
bool bt_ctrl_agg_buf_size = false;
u8 agg_buf_size = 5;
u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
u32 wifi_link_status = 0;
u32 num_of_wifi_link = 0;
bool wifi_under_5g = false;
@ -1962,8 +1961,7 @@ static void btc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
} else {
if (wifi_connected) {
wifi_rssi_state =
btc8821a1ant_wifi_rssi_state(btcoexist, 1, 2,
btc8821a1ant_wifi_rssi_state(btcoexist, 1, 2,
30, 0);
btc8821a1ant_limited_tx(btcoexist,
NORMAL_EXEC, 1, 1,

View File

@ -1448,17 +1448,15 @@ static void btc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist)
static void btc8821a2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
bool wifi_connected = false;
bool low_pwr_disable = true;
bool scan = false, link = false, roam = false;
wifi_rssi_state =
btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2,
btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2,
BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0);
bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist,
2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
btc8821a2ant_bt_rssi_state(btcoexist,
2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
@ -1516,15 +1514,14 @@ static void btc8821a2ant_action_wifi_link_process(struct btc_coexist *btcoexist)
static bool btc8821a2ant_action_wifi_idle_process(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
u8 wifi_rssi_state1;
u8 ap_num = 0;
wifi_rssi_state =
btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2,
BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES - 20, 0);
bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist,
2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
btc8821a2ant_bt_rssi_state(btcoexist,
2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
@ -2987,11 +2984,11 @@ static void btc8821a2ant_action_pan_edr(struct btc_coexist *btcoexist)
/* PAN(HS) only */
static void btc8821a2ant_action_pan_hs(struct btc_coexist *btcoexist)
{
u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
u8 wifi_rssi_state, bt_rssi_state;
u32 wifi_bw;
wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2,
btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2,
BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0);
bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist,
2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
@ -3274,11 +3271,11 @@ static void btc8821a2ant_act_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
static void btc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
{
u32 wifi_bw;
u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
u8 wifi_rssi_state, bt_rssi_state;
u8 ap_num = 0;
wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2,
btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2,
BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0);
bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist,
3, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 37);

View File

@ -47,30 +47,17 @@ static bool is_any_client_connect_to_ap(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
struct rtl_mac *mac = rtl_mac(rtlpriv);
struct rtl_sta_info *drv_priv;
u8 cnt = 0;
bool ret = false;
if (mac->opmode == NL80211_IFTYPE_ADHOC ||
mac->opmode == NL80211_IFTYPE_MESH_POINT ||
mac->opmode == NL80211_IFTYPE_AP) {
if (in_interrupt() > 0) {
list_for_each_entry(drv_priv, &rtlpriv->entry_list,
list) {
cnt++;
}
} else {
spin_lock_bh(&rtlpriv->locks.entry_list_lock);
list_for_each_entry(drv_priv, &rtlpriv->entry_list,
list) {
cnt++;
}
spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
}
spin_lock_bh(&rtlpriv->locks.entry_list_lock);
if (!list_empty(&rtlpriv->entry_list))
ret = true;
spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
}
if (cnt > 0)
return true;
else
return false;
return ret;
}
static bool halbtc_legacy(struct rtl_priv *adapter)
@ -253,9 +240,6 @@ bool halbtc_send_bt_mp_operation(struct btc_coexist *btcoexist, u8 op_code,
rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"btmpinfo wait req_num=%d wait=%ld\n", req_num, wait_ms);
if (in_interrupt())
return false;
if (wait_for_completion_timeout(&btcoexist->bt_mp_comp,
msecs_to_jiffies(wait_ms)) == 0) {
rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,

View File

@ -1226,7 +1226,6 @@ static int _rtl88ee_set_media_status(struct ieee80211_hw *hw,
default:
pr_err("Network type %d not support!\n", type);
return 1;
break;
}
/* MSR_INFRA == Link in infrastructure network;

View File

@ -1348,7 +1348,7 @@ static bool _rtl88e_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
static u8 _rtl88e_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
{
u32 reg_eac, reg_e94, reg_e9c, reg_ea4;
u32 reg_eac, reg_e94, reg_e9c;
u8 result = 0x00;
rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1c);
@ -1365,7 +1365,7 @@ static u8 _rtl88e_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
reg_ea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
rtl_get_bbreg(hw, 0xea4, MASKDWORD);
if (!(reg_eac & BIT(28)) &&
(((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&

View File

@ -674,12 +674,12 @@ void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
u8 fw_queue = QSLT_BEACON;
__le32 *pdesc = (__le32 *)pdesc8;
dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
skb->len, DMA_TO_DEVICE);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
__le16 fc = hdr->frame_control;
dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
"DMA mapping error\n");

View File

@ -527,12 +527,12 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
u8 fw_queue = QSLT_BEACON;
__le32 *pdesc = (__le32 *)pdesc8;
dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
skb->len, DMA_TO_DEVICE);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
__le16 fc = hdr->frame_control;
dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
"DMA mapping error\n");

View File

@ -113,7 +113,7 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
/**
* writeLLT - LLT table write access
* @io: io callback
* @hw: Pointer to the ieee80211_hw structure.
* @address: LLT logical address.
* @data: LLT data content
*
@ -145,11 +145,10 @@ bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
/**
* rtl92c_init_LLT_table - Init LLT table
* @io: io callback
* @boundary:
* @hw: Pointer to the ieee80211_hw structure.
* @boundary: Page boundary.
*
* Realtek hardware access function.
*
*/
bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary)
{

Some files were not shown because too many files have changed in this diff Show More