Merge ath-next from git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git

ath.git patches for v5.17. Major changes:

ath11k

* support PCI devices with 1 MSI vector

* WCN6855 hw2.1 support

* 11d scan offload support

* full monitor mode, only supported on QCN9074

* scan MAC address randomization support

* reserved host DDR addresses from DT for PCI devices support

ath9k

* switch to rate table based lookup

ath

* extend South Korea regulatory domain support

wcn36xx

* beacon filter support
This commit is contained in:
Kalle Valo 2021-12-16 20:12:58 +02:00
commit fd5e3c4ab9
47 changed files with 2102 additions and 324 deletions

View File

@ -150,6 +150,12 @@ properties:
string to uniquely identify variant of the calibration data in the
board-2.bin for designs with colliding bus and device specific ids
memory-region:
maxItems: 1
description:
phandle to a node describing reserved memory (System RAM memory)
used by ath11k firmware (see bindings/reserved-memory/reserved-memory.txt)
required:
- compatible
- reg
@ -279,3 +285,27 @@ examples:
"tcl2host-status-ring";
qcom,rproc = <&q6v5_wcss>;
};
- |
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
qcn9074_0: qcn9074_0@51100000 {
no-map;
reg = <0x0 0x51100000 0x0 0x03500000>;
};
};
pci {
pcie0 {
#size-cells = <2>;
#address-cells = <3>;
wifi_0: wifi@0 {
reg = <0 0 0 0 0>;
memory-region = <&qcn9074_0>;
};
};
};

View File

@ -90,6 +90,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.credit_size_workaround = false,
.tx_stats_over_pktlog = true,
.dynamic_sar_support = false,
},
@ -125,6 +126,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.credit_size_workaround = false,
.tx_stats_over_pktlog = true,
.dynamic_sar_support = false,
},
@ -161,6 +163,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
@ -191,6 +194,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.uart_pin_workaround = true,
.tx_stats_over_pktlog = false,
.credit_size_workaround = false,
.bmi_large_size_download = true,
.supports_peer_stats_info = true,
.dynamic_sar_support = true,
@ -227,6 +231,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
@ -262,6 +267,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
@ -297,6 +303,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
@ -335,6 +342,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = true,
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.supports_peer_stats_info = true,
.dynamic_sar_support = true,
@ -377,6 +385,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
@ -425,6 +434,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
@ -470,6 +480,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
@ -505,6 +516,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
@ -542,6 +554,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = true,
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
@ -571,6 +584,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.uart_pin_workaround = true,
.credit_size_workaround = true,
.dynamic_sar_support = false,
},
{
@ -612,6 +626,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
@ -640,6 +655,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = true,
.hw_filter_reset_required = false,
.fw_diag_ce_download = false,
.credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = true,
},
@ -715,6 +731,7 @@ static void ath10k_send_suspend_complete(struct ath10k *ar)
static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode)
{
bool mtu_workaround = ar->hw_params.credit_size_workaround;
int ret;
u32 param = 0;
@ -732,7 +749,7 @@ static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode)
param |= HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET;
if (mode == ATH10K_FIRMWARE_MODE_NORMAL)
if (mode == ATH10K_FIRMWARE_MODE_NORMAL && !mtu_workaround)
param |= HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE;
else
param &= ~HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE;

View File

@ -1400,115 +1400,6 @@ enum htt_dbg_stats_status {
HTT_DBG_STATS_STATUS_SERIES_DONE = 7
};
/*
* target -> host statistics upload
*
* The following field definitions describe the format of the HTT target
* to host stats upload confirmation message.
* The message contains a cookie echoed from the HTT host->target stats
* upload request, which identifies which request the confirmation is
* for, and a series of tag-length-value stats information elements.
* The tag-length header for each stats info element also includes a
* status field, to indicate whether the request for the stat type in
* question was fully met, partially met, unable to be met, or invalid
* (if the stat type in question is disabled in the target).
* A special value of all 1's in this status field is used to indicate
* the end of the series of stats info elements.
*
*
* |31 16|15 8|7 5|4 0|
* |------------------------------------------------------------|
* | reserved | msg type |
* |------------------------------------------------------------|
* | cookie LSBs |
* |------------------------------------------------------------|
* | cookie MSBs |
* |------------------------------------------------------------|
* | stats entry length | reserved | S |stat type|
* |------------------------------------------------------------|
* | |
* | type-specific stats info |
* | |
* |------------------------------------------------------------|
* | stats entry length | reserved | S |stat type|
* |------------------------------------------------------------|
* | |
* | type-specific stats info |
* | |
* |------------------------------------------------------------|
* | n/a | reserved | 111 | n/a |
* |------------------------------------------------------------|
* Header fields:
* - MSG_TYPE
* Bits 7:0
* Purpose: identifies this is a statistics upload confirmation message
* Value: 0x9
* - COOKIE_LSBS
* Bits 31:0
* Purpose: Provide a mechanism to match a target->host stats confirmation
* message with its preceding host->target stats request message.
* Value: LSBs of the opaque cookie specified by the host-side requestor
* - COOKIE_MSBS
* Bits 31:0
* Purpose: Provide a mechanism to match a target->host stats confirmation
* message with its preceding host->target stats request message.
* Value: MSBs of the opaque cookie specified by the host-side requestor
*
* Stats Information Element tag-length header fields:
* - STAT_TYPE
* Bits 4:0
* Purpose: identifies the type of statistics info held in the
* following information element
* Value: htt_dbg_stats_type
* - STATUS
* Bits 7:5
* Purpose: indicate whether the requested stats are present
* Value: htt_dbg_stats_status, including a special value (0x7) to mark
* the completion of the stats entry series
* - LENGTH
* Bits 31:16
* Purpose: indicate the stats information size
* Value: This field specifies the number of bytes of stats information
* that follows the element tag-length header.
* It is expected but not required that this length is a multiple of
* 4 bytes. Even if the length is not an integer multiple of 4, the
* subsequent stats entry header will begin on a 4-byte aligned
* boundary.
*/
#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_MASK 0x1F
#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_LSB 0
#define HTT_STATS_CONF_ITEM_INFO_STATUS_MASK 0xE0
#define HTT_STATS_CONF_ITEM_INFO_STATUS_LSB 5
struct htt_stats_conf_item {
union {
u8 info;
struct {
u8 stat_type:5; /* %HTT_DBG_STATS_ */
u8 status:3; /* %HTT_DBG_STATS_STATUS_ */
} __packed;
} __packed;
u8 pad;
__le16 length;
u8 payload[]; /* roundup(length, 4) long */
} __packed;
struct htt_stats_conf {
u8 pad[3];
__le32 cookie_lsb;
__le32 cookie_msb;
/* each item has variable length! */
struct htt_stats_conf_item items[];
} __packed;
static inline struct htt_stats_conf_item *htt_stats_conf_next_item(
const struct htt_stats_conf_item *item)
{
return (void *)item + sizeof(*item) + roundup(item->length, 4);
}
/*
* host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank
*
@ -1828,7 +1719,6 @@ struct htt_resp {
struct htt_rc_update rc_update;
struct htt_rx_test rx_test;
struct htt_pktlog_msg pktlog_msg;
struct htt_stats_conf stats_conf;
struct htt_rx_pn_ind rx_pn_ind;
struct htt_rx_offload_ind rx_offload_ind;
struct htt_rx_in_ord_ind rx_in_ord_ind;

View File

@ -147,6 +147,9 @@ void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
htt->num_pending_tx--;
if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
if (htt->num_pending_tx == 0)
wake_up(&htt->empty_tx_wq);
}
int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)

View File

@ -618,6 +618,9 @@ struct ath10k_hw_params {
*/
bool uart_pin_workaround;
/* Workaround for the credit size calculation */
bool credit_size_workaround;
/* tx stats support over pktlog */
bool tx_stats_over_pktlog;

View File

@ -6380,13 +6380,14 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
scan_timeout = min_t(u32, arg.max_rest_time *
(arg.n_channels - 1) + (req->duration +
ATH10K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) *
arg.n_channels, arg.max_scan_time + 200);
arg.n_channels, arg.max_scan_time);
} else {
/* Add a 200ms margin to account for event/command processing */
scan_timeout = arg.max_scan_time + 200;
scan_timeout = arg.max_scan_time;
}
/* Add a 200ms margin to account for event/command processing */
scan_timeout += 200;
ret = ath10k_start_scan(ar, &arg);
if (ret) {
ath10k_warn(ar, "failed to start hw scan: %d\n", ret);

View File

@ -82,8 +82,6 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
flags = skb_cb->flags;
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
ath10k_htt_tx_dec_pending(htt);
if (htt->num_pending_tx == 0)
wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock);
rcu_read_lock();

View File

@ -2611,9 +2611,30 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_mac_handle_beacon(ar, skb);
if (ieee80211_is_beacon(hdr->frame_control) ||
ieee80211_is_probe_resp(hdr->frame_control))
ieee80211_is_probe_resp(hdr->frame_control)) {
struct ieee80211_mgmt *mgmt = (void *)skb->data;
u8 *ies;
int ies_ch;
status->boottime_ns = ktime_get_boottime_ns();
if (!ar->scan_channel)
goto drop;
ies = mgmt->u.beacon.variable;
ies_ch = cfg80211_get_ies_channel_number(mgmt->u.beacon.variable,
skb_tail_pointer(skb) - ies,
sband->band);
if (ies_ch > 0 && ies_ch != channel) {
ath10k_dbg(ar, ATH10K_DBG_MGMT,
"channel mismatched ds channel %d scan channel %d\n",
ies_ch, channel);
goto drop;
}
}
ath10k_dbg(ar, ATH10K_DBG_MGMT,
"event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
skb, skb->len,
@ -2627,6 +2648,10 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
ieee80211_rx_ni(ar->hw, skb);
return 0;
drop:
dev_kfree_skb(skb);
return 0;
}
static int freq_to_idx(struct ath10k *ar, int freq)

View File

@ -3478,7 +3478,9 @@ struct wmi_phyerr_event {
__le32 num_phyerrs;
__le32 tsf_l32;
__le32 tsf_u32;
struct wmi_phyerr phyerrs[];
/* array of struct wmi_phyerr */
u8 phyerrs[];
} __packed;
struct wmi_10_4_phyerr_event {

View File

@ -206,13 +206,13 @@ static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset)
static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
{
const struct ce_pipe_config *ce_config;
const struct ce_attr *ce_attr;
ce_config = &ab->hw_params.target_ce_config[ce_id];
if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_OUT)
ce_attr = &ab->hw_params.host_ce_config[ce_id];
if (ce_attr->src_nentries)
ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_IN) {
if (ce_attr->dest_nentries) {
ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS);
ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
CE_HOST_IE_3_ADDRESS);
@ -221,13 +221,13 @@ static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
{
const struct ce_pipe_config *ce_config;
const struct ce_attr *ce_attr;
ce_config = &ab->hw_params.target_ce_config[ce_id];
if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_OUT)
ce_attr = &ab->hw_params.host_ce_config[ce_id];
if (ce_attr->src_nentries)
ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_IN) {
if (ce_attr->dest_nentries) {
ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS);
ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
CE_HOST_IE_3_ADDRESS);

View File

@ -74,10 +74,14 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT),
.supports_monitor = true,
.full_monitor_mode = false,
.supports_shadow_regs = false,
.idle_ps = false,
.supports_sta_ps = false,
.cold_boot_calib = true,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
.fix_l1ss = true,
@ -128,10 +132,14 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT),
.supports_monitor = true,
.full_monitor_mode = false,
.supports_shadow_regs = false,
.idle_ps = false,
.supports_sta_ps = false,
.cold_boot_calib = true,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
.fix_l1ss = true,
@ -181,10 +189,14 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP),
.supports_monitor = false,
.full_monitor_mode = false,
.supports_shadow_regs = true,
.idle_ps = true,
.supports_sta_ps = true,
.cold_boot_calib = false,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
.fix_l1ss = true,
@ -234,10 +246,14 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT),
.supports_monitor = true,
.full_monitor_mode = true,
.supports_shadow_regs = false,
.idle_ps = false,
.supports_sta_ps = false,
.cold_boot_calib = false,
.fw_mem_mode = 2,
.num_vdevs = 8,
.num_peers = 128,
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
.fix_l1ss = true,
@ -287,10 +303,70 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP),
.supports_monitor = false,
.full_monitor_mode = false,
.supports_shadow_regs = true,
.idle_ps = true,
.supports_sta_ps = true,
.cold_boot_calib = false,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
.fix_l1ss = false,
.credit_flow = true,
.max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
.hal_params = &ath11k_hw_hal_params_qca6390,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false,
.wakeup_mhi = true,
},
{
.name = "wcn6855 hw2.1",
.hw_rev = ATH11K_HW_WCN6855_HW21,
.fw = {
.dir = "WCN6855/hw2.1",
.board_size = 256 * 1024,
.cal_offset = 128 * 1024,
},
.max_radios = 3,
.bdf_addr = 0x4B0C0000,
.hw_ops = &wcn6855_ops,
.ring_mask = &ath11k_hw_ring_mask_qca6390,
.internal_sleep_clock = true,
.regs = &wcn6855_regs,
.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
.host_ce_config = ath11k_host_ce_config_qca6390,
.ce_count = 9,
.target_ce_config = ath11k_target_ce_config_wlan_qca6390,
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 2,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
.spectral = {
.fft_sz = 0,
.fft_pad_sz = 0,
.summary_pad_sz = 0,
.fft_hdr_len = 0,
.max_fft_bins = 0,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP),
.supports_monitor = false,
.supports_shadow_regs = true,
.idle_ps = true,
.supports_sta_ps = true,
.cold_boot_calib = false,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
.fix_l1ss = false,
@ -1009,7 +1085,7 @@ static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab)
ath11k_dp_free(ab);
ath11k_hal_srng_deinit(ab);
ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1;
ret = ath11k_hal_srng_init(ab);
if (ret)
@ -1043,6 +1119,7 @@ void ath11k_core_halt(struct ath11k *ar)
ath11k_mac_peer_cleanup_all(ar);
cancel_delayed_work_sync(&ar->scan.timeout);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ab->update_11d_work);
rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
synchronize_rcu();
@ -1050,6 +1127,34 @@ void ath11k_core_halt(struct ath11k *ar)
idr_init(&ar->txmgmt_idr);
}
static void ath11k_update_11d(struct work_struct *work)
{
struct ath11k_base *ab = container_of(work, struct ath11k_base, update_11d_work);
struct ath11k *ar;
struct ath11k_pdev *pdev;
struct wmi_set_current_country_params set_current_param = {};
int ret, i;
spin_lock_bh(&ab->base_lock);
memcpy(&set_current_param.alpha2, &ab->new_alpha2, 2);
spin_unlock_bh(&ab->base_lock);
ath11k_dbg(ab, ATH11K_DBG_WMI, "update 11d new cc %c%c\n",
set_current_param.alpha2[0],
set_current_param.alpha2[1]);
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
if (ret)
ath11k_warn(ar->ab,
"pdev id %d failed set current country code: %d\n",
i, ret);
}
}
static void ath11k_core_restart(struct work_struct *work)
{
struct ath11k_base *ab = container_of(work, struct ath11k_base, restart_work);
@ -1083,6 +1188,7 @@ static void ath11k_core_restart(struct work_struct *work)
idr_for_each(&ar->txmgmt_idr,
ath11k_mac_tx_mgmt_pending_free, ar);
idr_destroy(&ar->txmgmt_idr);
wake_up(&ar->txmgmt_empty_waitq);
}
wake_up(&ab->wmi_ab.tx_credits_wq);
@ -1219,12 +1325,14 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
mutex_init(&ab->core_lock);
spin_lock_init(&ab->base_lock);
mutex_init(&ab->vdev_id_11d_lock);
INIT_LIST_HEAD(&ab->peers);
init_waitqueue_head(&ab->peer_mapping_wq);
init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
init_waitqueue_head(&ab->qmi.cold_boot_waitq);
INIT_WORK(&ab->restart_work, ath11k_core_restart);
INIT_WORK(&ab->update_11d_work, ath11k_update_11d);
timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);
init_completion(&ab->wow.wakeup_completed);

View File

@ -117,6 +117,7 @@ enum ath11k_hw_rev {
ATH11K_HW_IPQ6018_HW10,
ATH11K_HW_QCN9074_HW10,
ATH11K_HW_WCN6855_HW20,
ATH11K_HW_WCN6855_HW21,
};
enum ath11k_firmware_mode {
@ -199,6 +200,9 @@ enum ath11k_dev_flags {
ATH11K_FLAG_REGISTERED,
ATH11K_FLAG_QMI_FAIL,
ATH11K_FLAG_HTC_SUSPEND_COMPLETE,
ATH11K_FLAG_CE_IRQ_ENABLED,
ATH11K_FLAG_EXT_IRQ_ENABLED,
ATH11K_FLAG_FIXED_MEM_RGN,
};
enum ath11k_monitor_flags {
@ -547,6 +551,7 @@ struct ath11k {
/* protects txmgmt_idr data */
spinlock_t txmgmt_idr_lock;
atomic_t num_pending_mgmt_tx;
wait_queue_head_t txmgmt_empty_waitq;
/* cycle count is reported twice for each visited channel during scan.
* access protected by data_lock
@ -585,6 +590,11 @@ struct ath11k {
#endif
bool dfs_block_radar_events;
struct ath11k_thermal thermal;
u32 vdev_id_11d_scan;
struct completion finish_11d_scan;
struct completion finish_11d_ch_list;
bool pending_11d;
bool regdom_set_by_user;
};
struct ath11k_band_cap {
@ -711,6 +721,11 @@ struct ath11k_base {
/* Protects data like peers */
spinlock_t base_lock;
struct ath11k_pdev pdevs[MAX_RADIOS];
struct {
enum WMI_HOST_WLAN_BAND supported_bands;
u32 pdev_id;
} target_pdev_ids[MAX_RADIOS];
u8 target_pdev_count;
struct ath11k_pdev __rcu *pdevs_active[MAX_RADIOS];
struct ath11k_hal_reg_capabilities_ext hal_reg_cap[MAX_RADIOS];
unsigned long long free_vdev_map;
@ -754,6 +769,8 @@ struct ath11k_base {
struct completion driver_recovery;
struct workqueue_struct *workqueue;
struct work_struct restart_work;
struct work_struct update_11d_work;
u8 new_alpha2[3];
struct {
/* protected by data_lock */
u32 fw_crash_counter;
@ -763,6 +780,8 @@ struct ath11k_base {
struct ath11k_dbring_cap *db_caps;
u32 num_db_cap;
/* To synchronize 11d scan vdev id */
struct mutex vdev_id_11d_lock;
struct timer_list mon_reap_timer;
struct completion htc_suspend;

View File

@ -6,6 +6,35 @@
#include "core.h"
#include "debug.h"
#define ATH11K_DB_MAGIC_VALUE 0xdeadbeaf
int ath11k_dbring_validate_buffer(struct ath11k *ar, void *buffer, u32 size)
{
u32 *temp;
int idx;
size = size >> 2;
for (idx = 0, temp = buffer; idx < size; idx++, temp++) {
if (*temp == ATH11K_DB_MAGIC_VALUE)
return -EINVAL;
}
return 0;
}
static void ath11k_dbring_fill_magic_value(struct ath11k *ar,
void *buffer, u32 size)
{
u32 *temp;
int idx;
size = size >> 2;
for (idx = 0, temp = buffer; idx < size; idx++, temp++)
*temp++ = ATH11K_DB_MAGIC_VALUE;
}
static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
struct ath11k_dbring *ring,
struct ath11k_dbring_element *buff)
@ -26,6 +55,7 @@ static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
ptr_unaligned = buff->payload;
ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align);
ath11k_dbring_fill_magic_value(ar, ptr_aligned, ring->buf_sz);
paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz,
DMA_FROM_DEVICE);

View File

@ -76,4 +76,6 @@ int ath11k_dbring_get_cap(struct ath11k_base *ab,
struct ath11k_dbring_cap *db_cap);
void ath11k_dbring_srng_cleanup(struct ath11k *ar, struct ath11k_dbring *ring);
void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring);
int ath11k_dbring_validate_buffer(struct ath11k *ar, void *data, u32 size);
#endif /* ATH11K_DBRING_H */

View File

@ -1051,6 +1051,7 @@ int ath11k_dp_alloc(struct ath11k_base *ab)
INIT_LIST_HEAD(&dp->reo_cmd_list);
INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list);
spin_lock_init(&dp->reo_cmd_lock);
dp->reo_cmd_cache_flush_count = 0;

View File

@ -89,6 +89,19 @@ struct dp_tx_ring {
int tx_status_tail;
};
enum dp_mon_status_buf_state {
/* PPDU id matches in dst ring and status ring */
DP_MON_STATUS_MATCH,
/* status ring dma is not done */
DP_MON_STATUS_NO_DMA,
/* status ring is lagging, reap status ring */
DP_MON_STATUS_LAG,
/* status ring is leading, reap dst ring and drop */
DP_MON_STATUS_LEAD,
/* replinish monitor status ring */
DP_MON_STATUS_REPLINISH,
};
struct ath11k_pdev_mon_stats {
u32 status_ppdu_state;
u32 status_ppdu_start;
@ -104,6 +117,12 @@ struct ath11k_pdev_mon_stats {
u32 dup_mon_buf_cnt;
};
struct dp_full_mon_mpdu {
struct list_head list;
struct sk_buff *head;
struct sk_buff *tail;
};
struct dp_link_desc_bank {
void *vaddr_unaligned;
void *vaddr;
@ -135,7 +154,11 @@ struct ath11k_mon_data {
u32 mon_last_buf_cookie;
u64 mon_last_linkdesc_paddr;
u16 chan_noise_floor;
bool hold_mon_dst_ring;
enum dp_mon_status_buf_state buf_state;
dma_addr_t mon_status_paddr;
struct dp_full_mon_mpdu *mon_mpdu;
struct hal_sw_mon_ring_entries sw_mon_entries;
struct ath11k_pdev_mon_stats rx_mon_stats;
/* lock for monitor data */
spinlock_t mon_lock;
@ -245,6 +268,7 @@ struct ath11k_dp {
struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
struct list_head reo_cmd_list;
struct list_head reo_cmd_cache_flush_list;
struct list_head dp_full_mon_mpdu_list;
u32 reo_cmd_cache_flush_count;
/**
* protects access to below fields,
@ -292,6 +316,7 @@ enum htt_h2t_msg_type {
HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG = 0xc,
HTT_H2T_MSG_TYPE_EXT_STATS_CFG = 0x10,
HTT_H2T_MSG_TYPE_PPDU_STATS_CFG = 0x11,
HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE = 0x17,
};
#define HTT_VER_REQ_INFO_MSG_ID GENMASK(7, 0)
@ -957,6 +982,33 @@ struct htt_rx_ring_tlv_filter {
u32 pkt_filter_flags3; /* DATA */
};
#define HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
#define HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8)
#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ENABLE BIT(0)
#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ZERO_MPDUS_END BIT(1)
#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_NON_ZERO_MPDUS_END BIT(2)
#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_RELEASE_RING GENMASK(10, 3)
/**
* Enumeration for full monitor mode destination ring select
* 0 - REO destination ring select
* 1 - FW destination ring select
* 2 - SW destination ring select
* 3 - Release destination ring select
*/
enum htt_rx_full_mon_release_ring {
HTT_RX_MON_RING_REO,
HTT_RX_MON_RING_FW,
HTT_RX_MON_RING_SW,
HTT_RX_MON_RING_RELEASE,
};
struct htt_rx_full_monitor_mode_cfg_cmd {
u32 info0;
u32 cfg;
} __packed;
/* HTT message target->host */
enum htt_t2h_msg_type {

View File

@ -2942,6 +2942,43 @@ fail_desc_get:
return req_entries - num_remain;
}
#define ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP 32535
static void
ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon,
struct hal_tlv_hdr *tlv)
{
struct hal_rx_ppdu_start *ppdu_start;
u16 ppdu_id_diff, ppdu_id, tlv_len;
u8 *ptr;
/* PPDU id is part of second tlv, move ptr to second tlv */
tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
ptr = (u8 *)tlv;
ptr += sizeof(*tlv) + tlv_len;
tlv = (struct hal_tlv_hdr *)ptr;
if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_PPDU_START)
return;
ptr += sizeof(*tlv);
ppdu_start = (struct hal_rx_ppdu_start *)ptr;
ppdu_id = FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID,
__le32_to_cpu(ppdu_start->info0));
if (pmon->sw_mon_entries.ppdu_id < ppdu_id) {
pmon->buf_state = DP_MON_STATUS_LEAD;
ppdu_id_diff = ppdu_id - pmon->sw_mon_entries.ppdu_id;
if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
pmon->buf_state = DP_MON_STATUS_LAG;
} else if (pmon->sw_mon_entries.ppdu_id > ppdu_id) {
pmon->buf_state = DP_MON_STATUS_LAG;
ppdu_id_diff = pmon->sw_mon_entries.ppdu_id - ppdu_id;
if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
pmon->buf_state = DP_MON_STATUS_LEAD;
}
}
static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
int *budget, struct sk_buff_head *skb_list)
{
@ -2949,6 +2986,7 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
const struct ath11k_hw_hal_params *hal_params;
struct ath11k_pdev_dp *dp;
struct dp_rxdma_ring *rx_ring;
struct ath11k_mon_data *pmon;
struct hal_srng *srng;
void *rx_mon_status_desc;
struct sk_buff *skb;
@ -2962,6 +3000,7 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
dp = &ar->dp;
pmon = &dp->mon_data;
srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id);
rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
@ -2974,8 +3013,10 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
*budget -= 1;
rx_mon_status_desc =
ath11k_hal_srng_src_peek(ab, srng);
if (!rx_mon_status_desc)
if (!rx_mon_status_desc) {
pmon->buf_state = DP_MON_STATUS_REPLINISH;
break;
}
ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
&cookie, &rbm);
@ -2988,6 +3029,7 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
pmon->buf_state = DP_MON_STATUS_REPLINISH;
goto move_next;
}
@ -3007,10 +3049,18 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
FIELD_GET(HAL_TLV_HDR_TAG,
tlv->tl));
dev_kfree_skb_any(skb);
pmon->buf_state = DP_MON_STATUS_NO_DMA;
goto move_next;
}
if (ab->hw_params.full_monitor_mode) {
ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv);
if (paddr == pmon->mon_status_paddr)
pmon->buf_state = DP_MON_STATUS_MATCH;
}
__skb_queue_tail(skb_list, skb);
} else {
pmon->buf_state = DP_MON_STATUS_REPLINISH;
}
move_next:
skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
@ -3061,10 +3111,10 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
if (!num_buffs_reaped)
goto exit;
while ((skb = __skb_dequeue(&skb_list))) {
memset(&ppdu_info, 0, sizeof(ppdu_info));
ppdu_info.peer_id = HAL_INVALID_PEERID;
memset(&ppdu_info, 0, sizeof(ppdu_info));
ppdu_info.peer_id = HAL_INVALID_PEERID;
while ((skb = __skb_dequeue(&skb_list))) {
if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
@ -3092,10 +3142,7 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
ath11k_dbg(ab, ATH11K_DBG_DATA,
"failed to find the peer with peer_id %d\n",
ppdu_info.peer_id);
spin_unlock_bh(&ab->base_lock);
rcu_read_unlock();
dev_kfree_skb_any(skb);
continue;
goto next_skb;
}
arsta = (struct ath11k_sta *)peer->sta->drv_priv;
@ -3104,10 +3151,13 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
next_skb:
spin_unlock_bh(&ab->base_lock);
rcu_read_unlock();
dev_kfree_skb_any(skb);
memset(&ppdu_info, 0, sizeof(ppdu_info));
ppdu_info.peer_id = HAL_INVALID_PEERID;
}
exit:
return num_buffs_reaped;
@ -5098,6 +5148,357 @@ static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar,
}
}
static u32
ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar,
void *ring_entry, struct sk_buff **head_msdu,
struct sk_buff **tail_msdu,
struct hal_sw_mon_ring_entries *sw_mon_entries)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = &dp->mon_data;
struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
struct sk_buff *msdu = NULL, *last = NULL;
struct hal_sw_monitor_ring *sw_desc = ring_entry;
struct hal_rx_msdu_list msdu_list;
struct hal_rx_desc *rx_desc;
struct ath11k_skb_rxcb *rxcb;
void *rx_msdu_link_desc;
void *p_buf_addr_info, *p_last_buf_addr_info;
int buf_id, i = 0;
u32 rx_buf_size, rx_pkt_offset, l2_hdr_offset;
u32 rx_bufs_used = 0, msdu_cnt = 0;
u32 total_len = 0, frag_len = 0, sw_cookie;
u16 num_msdus = 0;
u8 rxdma_err, rbm;
bool is_frag, is_first_msdu;
bool drop_mpdu = false;
ath11k_hal_rx_sw_mon_ring_buf_paddr_get(ring_entry, sw_mon_entries);
sw_cookie = sw_mon_entries->mon_dst_sw_cookie;
sw_mon_entries->end_of_ppdu = false;
sw_mon_entries->drop_ppdu = false;
p_last_buf_addr_info = sw_mon_entries->dst_buf_addr_info;
msdu_cnt = sw_mon_entries->msdu_cnt;
sw_mon_entries->end_of_ppdu =
FIELD_GET(HAL_SW_MON_RING_INFO0_END_OF_PPDU, sw_desc->info0);
if (sw_mon_entries->end_of_ppdu)
return rx_bufs_used;
if (FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON,
sw_desc->info0) ==
HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
rxdma_err =
FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE,
sw_desc->info0);
if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
pmon->rx_mon_stats.dest_mpdu_drop++;
drop_mpdu = true;
}
}
is_frag = false;
is_first_msdu = true;
do {
rx_msdu_link_desc =
(u8 *)pmon->link_desc_banks[sw_cookie].vaddr +
(sw_mon_entries->mon_dst_paddr -
pmon->link_desc_banks[sw_cookie].paddr);
ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
&num_msdus);
for (i = 0; i < num_msdus; i++) {
buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
msdu_list.sw_cookie[i]);
spin_lock_bh(&rx_ring->idr_lock);
msdu = idr_find(&rx_ring->bufs_idr, buf_id);
if (!msdu) {
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
"full mon msdu_pop: invalid buf_id %d\n",
buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
break;
}
idr_remove(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
rxcb = ATH11K_SKB_RXCB(msdu);
if (!rxcb->unmapped) {
dma_unmap_single(ar->ab->dev, rxcb->paddr,
msdu->len +
skb_tailroom(msdu),
DMA_FROM_DEVICE);
rxcb->unmapped = 1;
}
if (drop_mpdu) {
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
"full mon: i %d drop msdu %p *ppdu_id %x\n",
i, msdu, sw_mon_entries->ppdu_id);
dev_kfree_skb_any(msdu);
msdu_cnt--;
goto next_msdu;
}
rx_desc = (struct hal_rx_desc *)msdu->data;
rx_pkt_offset = sizeof(struct hal_rx_desc);
l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
if (is_first_msdu) {
if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
drop_mpdu = true;
dev_kfree_skb_any(msdu);
msdu = NULL;
goto next_msdu;
}
is_first_msdu = false;
}
ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
&is_frag, &total_len,
&frag_len, &msdu_cnt);
rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
if (!(*head_msdu))
*head_msdu = msdu;
else if (last)
last->next = msdu;
last = msdu;
next_msdu:
rx_bufs_used++;
}
ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc,
&sw_mon_entries->mon_dst_paddr,
&sw_mon_entries->mon_dst_sw_cookie,
&rbm,
&p_buf_addr_info);
if (ath11k_dp_rx_monitor_link_desc_return(ar,
p_last_buf_addr_info,
dp->mac_id))
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
"full mon: dp_rx_monitor_link_desc_return failed\n");
p_last_buf_addr_info = p_buf_addr_info;
} while (sw_mon_entries->mon_dst_paddr && msdu_cnt);
if (last)
last->next = NULL;
*tail_msdu = msdu;
return rx_bufs_used;
}
static int ath11k_dp_rx_full_mon_prepare_mpdu(struct ath11k_dp *dp,
struct dp_full_mon_mpdu *mon_mpdu,
struct sk_buff *head,
struct sk_buff *tail)
{
mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
if (!mon_mpdu)
return -ENOMEM;
list_add_tail(&mon_mpdu->list, &dp->dp_full_mon_mpdu_list);
mon_mpdu->head = head;
mon_mpdu->tail = tail;
return 0;
}
static void ath11k_dp_rx_full_mon_drop_ppdu(struct ath11k_dp *dp,
struct dp_full_mon_mpdu *mon_mpdu)
{
struct dp_full_mon_mpdu *tmp;
struct sk_buff *tmp_msdu, *skb_next;
if (list_empty(&dp->dp_full_mon_mpdu_list))
return;
list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
list_del(&mon_mpdu->list);
tmp_msdu = mon_mpdu->head;
while (tmp_msdu) {
skb_next = tmp_msdu->next;
dev_kfree_skb_any(tmp_msdu);
tmp_msdu = skb_next;
}
kfree(mon_mpdu);
}
}
static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar,
int mac_id,
struct ath11k_mon_data *pmon,
struct napi_struct *napi)
{
struct ath11k_pdev_mon_stats *rx_mon_stats;
struct dp_full_mon_mpdu *tmp;
struct dp_full_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
struct sk_buff *head_msdu, *tail_msdu;
struct ath11k_base *ab = ar->ab;
struct ath11k_dp *dp = &ab->dp;
int ret;
rx_mon_stats = &pmon->rx_mon_stats;
list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
list_del(&mon_mpdu->list);
head_msdu = mon_mpdu->head;
tail_msdu = mon_mpdu->tail;
if (head_msdu && tail_msdu) {
ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu,
tail_msdu, napi);
rx_mon_stats->dest_mpdu_done++;
ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n");
}
kfree(mon_mpdu);
}
return ret;
}
static int
ath11k_dp_rx_process_full_mon_status_ring(struct ath11k_base *ab, int mac_id,
struct napi_struct *napi, int budget)
{
struct ath11k *ar = ab->pdevs[mac_id].ar;
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = &dp->mon_data;
struct hal_sw_mon_ring_entries *sw_mon_entries;
int quota = 0, work = 0, count;
sw_mon_entries = &pmon->sw_mon_entries;
while (pmon->hold_mon_dst_ring) {
quota = ath11k_dp_rx_process_mon_status(ab, mac_id,
napi, 1);
if (pmon->buf_state == DP_MON_STATUS_MATCH) {
count = sw_mon_entries->status_buf_count;
if (count > 1) {
quota += ath11k_dp_rx_process_mon_status(ab, mac_id,
napi, count);
}
ath11k_dp_rx_full_mon_deliver_ppdu(ar, dp->mac_id,
pmon, napi);
pmon->hold_mon_dst_ring = false;
} else if (!pmon->mon_status_paddr ||
pmon->buf_state == DP_MON_STATUS_LEAD) {
sw_mon_entries->drop_ppdu = true;
pmon->hold_mon_dst_ring = false;
}
if (!quota)
break;
work += quota;
}
if (sw_mon_entries->drop_ppdu)
ath11k_dp_rx_full_mon_drop_ppdu(&ab->dp, pmon->mon_mpdu);
return work;
}
static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id,
struct napi_struct *napi, int budget)
{
struct ath11k *ar = ab->pdevs[mac_id].ar;
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = &dp->mon_data;
struct hal_sw_mon_ring_entries *sw_mon_entries;
struct ath11k_pdev_mon_stats *rx_mon_stats;
struct sk_buff *head_msdu, *tail_msdu;
void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
void *ring_entry;
u32 rx_bufs_used = 0, mpdu_rx_bufs_used;
int quota = 0, ret;
bool break_dst_ring = false;
spin_lock_bh(&pmon->mon_lock);
sw_mon_entries = &pmon->sw_mon_entries;
rx_mon_stats = &pmon->rx_mon_stats;
if (pmon->hold_mon_dst_ring) {
spin_unlock_bh(&pmon->mon_lock);
goto reap_status_ring;
}
ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
head_msdu = NULL;
tail_msdu = NULL;
mpdu_rx_bufs_used = ath11k_dp_rx_full_mon_mpdu_pop(ar, ring_entry,
&head_msdu,
&tail_msdu,
sw_mon_entries);
rx_bufs_used += mpdu_rx_bufs_used;
if (!sw_mon_entries->end_of_ppdu) {
if (head_msdu) {
ret = ath11k_dp_rx_full_mon_prepare_mpdu(&ab->dp,
pmon->mon_mpdu,
head_msdu,
tail_msdu);
if (ret)
break_dst_ring = true;
}
goto next_entry;
} else {
if (!sw_mon_entries->ppdu_id &&
!sw_mon_entries->mon_status_paddr) {
break_dst_ring = true;
goto next_entry;
}
}
rx_mon_stats->dest_ppdu_done++;
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
pmon->buf_state = DP_MON_STATUS_LAG;
pmon->mon_status_paddr = sw_mon_entries->mon_status_paddr;
pmon->hold_mon_dst_ring = true;
next_entry:
ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
mon_dst_srng);
if (break_dst_ring)
break;
}
ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
spin_unlock_bh(&pmon->mon_lock);
if (rx_bufs_used) {
ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
&dp->rxdma_mon_buf_ring,
rx_bufs_used,
HAL_RX_BUF_RBM_SW3_BM);
}
reap_status_ring:
quota = ath11k_dp_rx_process_full_mon_status_ring(ab, mac_id,
napi, budget);
return quota;
}
static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id,
struct napi_struct *napi, int budget)
{
@ -5120,10 +5521,14 @@ int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
int ret = 0;
if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags))
if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
ab->hw_params.full_monitor_mode)
ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget);
else if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags))
ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget);
else
ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
return ret;
}

View File

@ -9,6 +9,7 @@
#include "debugfs_sta.h"
#include "hw.h"
#include "peer.h"
#include "mac.h"
static enum hal_tcl_encap_type
ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb)
@ -985,6 +986,7 @@ ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type,
struct ath11k_dp *dp = &ab->dp;
struct sk_buff *skb;
struct htt_ext_stats_cfg_cmd *cmd;
u32 pdev_id;
int len = sizeof(*cmd);
int ret;
@ -998,7 +1000,12 @@ ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type,
memset(cmd, 0, sizeof(*cmd));
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
cmd->hdr.pdev_mask = 1 << ar->pdev->pdev_id;
if (ab->hw_params.single_pdev_only)
pdev_id = ath11k_mac_get_target_pdev_id(ar);
else
pdev_id = ar->pdev->pdev_id;
cmd->hdr.pdev_mask = 1 << pdev_id;
cmd->hdr.stats_type = type;
cmd->cfg_param0 = cfg_params->cfg0;
@ -1026,6 +1033,15 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
struct htt_rx_ring_tlv_filter tlv_filter = {0};
int ret = 0, ring_id = 0, i;
if (ab->hw_params.full_monitor_mode) {
ret = ath11k_dp_tx_htt_rx_full_mon_setup(ab,
dp->mac_id, !reset);
if (ret < 0) {
ath11k_err(ab, "failed to setup full monitor %d\n", ret);
return ret;
}
}
ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
if (!reset) {
@ -1091,3 +1107,42 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
return ret;
}
int ath11k_dp_tx_htt_rx_full_mon_setup(struct ath11k_base *ab, int mac_id,
bool config)
{
struct htt_rx_full_monitor_mode_cfg_cmd *cmd;
struct sk_buff *skb;
int ret, len = sizeof(*cmd);
skb = ath11k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_rx_full_monitor_mode_cfg_cmd *)skb->data;
memset(cmd, 0, sizeof(*cmd));
cmd->info0 = FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_MSG_TYPE,
HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE);
cmd->info0 |= FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_PDEV_ID, mac_id);
cmd->cfg = HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ENABLE |
FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_RELEASE_RING,
HTT_RX_MON_RING_SW);
if (config) {
cmd->cfg |= HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ZERO_MPDUS_END |
HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_NON_ZERO_MPDUS_END;
}
ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
if (ret)
goto err_free;
return 0;
err_free:
dev_kfree_skb_any(skb);
return ret;
}

View File

@ -37,4 +37,6 @@ int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base *ab, u32 ring_id,
int rx_buf_size,
struct htt_rx_ring_tlv_filter *tlv_filter);
int ath11k_dp_tx_htt_rx_full_mon_setup(struct ath11k_base *ab, int mac_id,
bool config);
#endif

View File

@ -974,6 +974,7 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
srng->msi_data = params->msi_data;
srng->initialized = 1;
spin_lock_init(&srng->lock);
lockdep_set_class(&srng->lock, hal->srng_key + ring_id);
for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
srng->hwreg_base[i] = srng_config->reg_start[i] +
@ -1260,6 +1261,24 @@ static int ath11k_hal_srng_create_config(struct ath11k_base *ab)
return 0;
}
static void ath11k_hal_register_srng_key(struct ath11k_base *ab)
{
struct ath11k_hal *hal = &ab->hal;
u32 ring_id;
for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
lockdep_register_key(hal->srng_key + ring_id);
}
static void ath11k_hal_unregister_srng_key(struct ath11k_base *ab)
{
struct ath11k_hal *hal = &ab->hal;
u32 ring_id;
for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
lockdep_unregister_key(hal->srng_key + ring_id);
}
int ath11k_hal_srng_init(struct ath11k_base *ab)
{
struct ath11k_hal *hal = &ab->hal;
@ -1279,6 +1298,8 @@ int ath11k_hal_srng_init(struct ath11k_base *ab)
if (ret)
goto err_free_cont_rdp;
ath11k_hal_register_srng_key(ab);
return 0;
err_free_cont_rdp:
@ -1293,6 +1314,7 @@ void ath11k_hal_srng_deinit(struct ath11k_base *ab)
{
struct ath11k_hal *hal = &ab->hal;
ath11k_hal_unregister_srng_key(ab);
ath11k_hal_free_cont_rdp(ab);
ath11k_hal_free_cont_wrp(ab);
kfree(hal->srng_config);

View File

@ -902,6 +902,8 @@ struct ath11k_hal {
/* shadow register configuration */
u32 shadow_reg_addr[HAL_SHADOW_NUM_REGS];
int num_shadow_reg_configured;
struct lock_class_key srng_key[HAL_SRNG_RING_ID_MAX];
};
u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid);

View File

@ -858,6 +858,25 @@ struct hal_reo_entrance_ring {
* this ring has looped around the ring.
*/
#define HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON GENMASK(1, 0)
#define HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE GENMASK(6, 2)
#define HAL_SW_MON_RING_INFO0_MPDU_FRAG_NUMBER GENMASK(10, 7)
#define HAL_SW_MON_RING_INFO0_FRAMELESS_BAR BIT(11)
#define HAL_SW_MON_RING_INFO0_STATUS_BUF_CNT GENMASK(15, 12)
#define HAL_SW_MON_RING_INFO0_END_OF_PPDU BIT(16)
#define HAL_SW_MON_RING_INFO1_PHY_PPDU_ID GENMASK(15, 0)
#define HAL_SW_MON_RING_INFO1_RING_ID GENMASK(27, 20)
#define HAL_SW_MON_RING_INFO1_LOOPING_COUNT GENMASK(31, 28)
struct hal_sw_monitor_ring {
struct ath11k_buffer_addr buf_addr_info;
struct rx_mpdu_desc rx_mpdu_info;
struct ath11k_buffer_addr status_buf_addr_info;
u32 info0;
u32 info1;
} __packed;
#define HAL_REO_CMD_HDR_INFO0_CMD_NUMBER GENMASK(15, 0)
#define HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED BIT(16)

View File

@ -29,8 +29,7 @@ static int ath11k_hal_reo_cmd_queue_stats(struct hal_tlv_hdr *tlv,
FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
desc = (struct hal_reo_get_queue_stats *)tlv->value;
memset(&desc->queue_addr_lo, 0,
(sizeof(*desc) - sizeof(struct hal_reo_cmd_hdr)));
memset_startat(desc, 0, queue_addr_lo);
desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
@ -62,8 +61,7 @@ static int ath11k_hal_reo_cmd_flush_cache(struct ath11k_hal *hal, struct hal_tlv
FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
desc = (struct hal_reo_flush_cache *)tlv->value;
memset(&desc->cache_addr_lo, 0,
(sizeof(*desc) - sizeof(struct hal_reo_cmd_hdr)));
memset_startat(desc, 0, cache_addr_lo);
desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
@ -101,8 +99,7 @@ static int ath11k_hal_reo_cmd_update_rx_queue(struct hal_tlv_hdr *tlv,
FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
desc = (struct hal_reo_update_rx_queue *)tlv->value;
memset(&desc->queue_addr_lo, 0,
(sizeof(*desc) - sizeof(struct hal_reo_cmd_hdr)));
memset_startat(desc, 0, queue_addr_lo);
desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
@ -764,15 +761,17 @@ void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
* size changes and also send WMI message to FW to change the REO
* queue descriptor in Rx peer entry as part of dp_rx_tid_update.
*/
memset(ext_desc, 0, 3 * sizeof(*ext_desc));
memset(ext_desc, 0, sizeof(*ext_desc));
ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
HAL_DESC_REO_QUEUE_EXT_DESC,
REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1);
ext_desc++;
memset(ext_desc, 0, sizeof(*ext_desc));
ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
HAL_DESC_REO_QUEUE_EXT_DESC,
REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2);
ext_desc++;
memset(ext_desc, 0, sizeof(*ext_desc));
ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
HAL_DESC_REO_QUEUE_EXT_DESC,
REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3);
@ -1186,3 +1185,47 @@ void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr,
*pp_buf_addr = (void *)buf_addr_info;
}
void
ath11k_hal_rx_sw_mon_ring_buf_paddr_get(void *rx_desc,
struct hal_sw_mon_ring_entries *sw_mon_entries)
{
struct hal_sw_monitor_ring *sw_mon_ring = rx_desc;
struct ath11k_buffer_addr *buf_addr_info;
struct ath11k_buffer_addr *status_buf_addr_info;
struct rx_mpdu_desc *rx_mpdu_desc_info_details;
rx_mpdu_desc_info_details = &sw_mon_ring->rx_mpdu_info;
sw_mon_entries->msdu_cnt = FIELD_GET(RX_MPDU_DESC_INFO0_MSDU_COUNT,
rx_mpdu_desc_info_details->info0);
buf_addr_info = &sw_mon_ring->buf_addr_info;
status_buf_addr_info = &sw_mon_ring->status_buf_addr_info;
sw_mon_entries->mon_dst_paddr = (((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
buf_addr_info->info1)) << 32) |
FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
buf_addr_info->info0);
sw_mon_entries->mon_status_paddr =
(((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
status_buf_addr_info->info1)) << 32) |
FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
status_buf_addr_info->info0);
sw_mon_entries->mon_dst_sw_cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
buf_addr_info->info1);
sw_mon_entries->mon_status_sw_cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
status_buf_addr_info->info1);
sw_mon_entries->status_buf_count = FIELD_GET(HAL_SW_MON_RING_INFO0_STATUS_BUF_CNT,
sw_mon_ring->info0);
sw_mon_entries->dst_buf_addr_info = buf_addr_info;
sw_mon_entries->status_buf_addr_info = status_buf_addr_info;
sw_mon_entries->ppdu_id =
FIELD_GET(HAL_SW_MON_RING_INFO1_PHY_PPDU_ID, sw_mon_ring->info1);
}

View File

@ -77,6 +77,20 @@ enum hal_rx_mon_status {
HAL_RX_MON_STATUS_BUF_DONE,
};
struct hal_sw_mon_ring_entries {
dma_addr_t mon_dst_paddr;
dma_addr_t mon_status_paddr;
u32 mon_dst_sw_cookie;
u32 mon_status_sw_cookie;
void *dst_buf_addr_info;
void *status_buf_addr_info;
u16 ppdu_id;
u8 status_buf_count;
u8 msdu_cnt;
bool end_of_ppdu;
bool drop_ppdu;
};
struct hal_rx_mon_ppdu_info {
u32 ppdu_id;
u32 ppdu_ts;
@ -331,6 +345,9 @@ void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc,
dma_addr_t *paddr, u32 *sw_cookie,
void **pp_buf_addr_info, u8 *rbm,
u32 *msdu_cnt);
void
ath11k_hal_rx_sw_mon_ring_buf_paddr_get(void *rx_desc,
struct hal_sw_mon_ring_entries *sw_mon_ent);
enum hal_rx_mon_status
ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
struct hal_rx_mon_ppdu_info *ppdu_info,

View File

@ -150,18 +150,18 @@ static void ath11k_hw_ipq8074_reo_setup(struct ath11k_base *ab)
static void ath11k_init_wmi_config_ipq8074(struct ath11k_base *ab,
struct target_resource_config *config)
{
config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS(ab);
if (ab->num_radios == 2) {
config->num_peers = TARGET_NUM_PEERS(DBS);
config->num_tids = TARGET_NUM_TIDS(DBS);
config->num_peers = TARGET_NUM_PEERS(ab, DBS);
config->num_tids = TARGET_NUM_TIDS(ab, DBS);
} else if (ab->num_radios == 3) {
config->num_peers = TARGET_NUM_PEERS(DBS_SBS);
config->num_tids = TARGET_NUM_TIDS(DBS_SBS);
config->num_peers = TARGET_NUM_PEERS(ab, DBS_SBS);
config->num_tids = TARGET_NUM_TIDS(ab, DBS_SBS);
} else {
/* Control should not reach here */
config->num_peers = TARGET_NUM_PEERS(SINGLE);
config->num_tids = TARGET_NUM_TIDS(SINGLE);
config->num_peers = TARGET_NUM_PEERS(ab, SINGLE);
config->num_tids = TARGET_NUM_TIDS(ab, SINGLE);
}
config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;

View File

@ -12,26 +12,26 @@
/* Target configuration defines */
/* Num VDEVS per radio */
#define TARGET_NUM_VDEVS (16 + 1)
#define TARGET_NUM_VDEVS(ab) (ab->hw_params.num_vdevs)
#define TARGET_NUM_PEERS_PDEV (512 + TARGET_NUM_VDEVS)
#define TARGET_NUM_PEERS_PDEV(ab) (ab->hw_params.num_peers + TARGET_NUM_VDEVS(ab))
/* Num of peers for Single Radio mode */
#define TARGET_NUM_PEERS_SINGLE (TARGET_NUM_PEERS_PDEV)
#define TARGET_NUM_PEERS_SINGLE(ab) (TARGET_NUM_PEERS_PDEV(ab))
/* Num of peers for DBS */
#define TARGET_NUM_PEERS_DBS (2 * TARGET_NUM_PEERS_PDEV)
#define TARGET_NUM_PEERS_DBS(ab) (2 * TARGET_NUM_PEERS_PDEV(ab))
/* Num of peers for DBS_SBS */
#define TARGET_NUM_PEERS_DBS_SBS (3 * TARGET_NUM_PEERS_PDEV)
#define TARGET_NUM_PEERS_DBS_SBS(ab) (3 * TARGET_NUM_PEERS_PDEV(ab))
/* Max num of stations (per radio) */
#define TARGET_NUM_STATIONS 512
#define TARGET_NUM_STATIONS(ab) (ab->hw_params.num_peers)
#define TARGET_NUM_PEERS(x) TARGET_NUM_PEERS_##x
#define TARGET_NUM_PEERS(ab, x) TARGET_NUM_PEERS_##x(ab)
#define TARGET_NUM_PEER_KEYS 2
#define TARGET_NUM_TIDS(x) (2 * TARGET_NUM_PEERS(x) + \
4 * TARGET_NUM_VDEVS + 8)
#define TARGET_NUM_TIDS(ab, x) (2 * TARGET_NUM_PEERS(ab, x) + \
4 * TARGET_NUM_VDEVS(ab) + 8)
#define TARGET_AST_SKID_LIMIT 16
#define TARGET_NUM_OFFLD_PEERS 4
@ -168,10 +168,14 @@ struct ath11k_hw_params {
u16 interface_modes;
bool supports_monitor;
bool full_monitor_mode;
bool supports_shadow_regs;
bool idle_ps;
bool supports_sta_ps;
bool cold_boot_calib;
int fw_mem_mode;
u32 num_vdevs;
u32 num_peers;
bool supports_suspend;
u32 hal_desc_sz;
bool fix_l1ss;

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <net/mac80211.h>
@ -553,6 +554,67 @@ struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id)
return NULL;
}
struct ath11k_vif *ath11k_mac_get_vif_up(struct ath11k_base *ab)
{
struct ath11k *ar;
struct ath11k_pdev *pdev;
struct ath11k_vif *arvif;
int i;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->is_up)
return arvif;
}
}
return NULL;
}
static bool ath11k_mac_band_match(enum nl80211_band band1, enum WMI_HOST_WLAN_BAND band2)
{
return (((band1 == NL80211_BAND_2GHZ) && (band2 & WMI_HOST_WLAN_2G_CAP)) ||
(((band1 == NL80211_BAND_5GHZ) || (band1 == NL80211_BAND_6GHZ)) &&
(band2 & WMI_HOST_WLAN_5G_CAP)));
}
u8 ath11k_mac_get_target_pdev_id_from_vif(struct ath11k_vif *arvif)
{
struct ath11k *ar = arvif->ar;
struct ath11k_base *ab = ar->ab;
struct ieee80211_vif *vif = arvif->vif;
struct cfg80211_chan_def def;
enum nl80211_band band;
u8 pdev_id = ab->target_pdev_ids[0].pdev_id;
int i;
if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
return pdev_id;
band = def.chan->band;
for (i = 0; i < ab->target_pdev_count; i++) {
if (ath11k_mac_band_match(band, ab->target_pdev_ids[i].supported_bands))
return ab->target_pdev_ids[i].pdev_id;
}
return pdev_id;
}
u8 ath11k_mac_get_target_pdev_id(struct ath11k *ar)
{
struct ath11k_vif *arvif;
arvif = ath11k_mac_get_vif_up(ar->ab);
if (arvif)
return ath11k_mac_get_target_pdev_id_from_vif(arvif);
else
return ar->ab->target_pdev_ids[0].pdev_id;
}
static void ath11k_pdev_caps_update(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
@ -1920,7 +1982,6 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
struct ath11k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_chan_def def;
const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
u8 ampdu_factor;
enum nl80211_band band;
u16 *he_mcs_mask;
u8 max_nss, he_mcs;
@ -1928,6 +1989,9 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
int i, he_nss, nss_idx;
bool user_rate_valid = true;
u32 rx_nss, tx_nss, nss_160;
u8 ampdu_factor, rx_mcs_80, rx_mcs_160;
u16 mcs_160_map, mcs_80_map;
bool support_160;
if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
return;
@ -1942,6 +2006,39 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
return;
arg->he_flag = true;
support_160 = !!(he_cap->he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G);
/* Supported HE-MCS and NSS Set of peer he_cap is intersection with self he_cp */
mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
if (support_160) {
for (i = 7; i >= 0; i--) {
u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3;
if (mcs_160 != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
rx_mcs_160 = i + 1;
break;
}
}
}
for (i = 7; i >= 0; i--) {
u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3;
if (mcs_80 != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
rx_mcs_80 = i + 1;
break;
}
}
if (support_160)
max_nss = min(rx_mcs_80, rx_mcs_160);
else
max_nss = rx_mcs_80;
arg->peer_nss = min(sta->rx_nss, max_nss);
memcpy_and_pad(&arg->peer_he_cap_macinfo,
sizeof(arg->peer_he_cap_macinfo),
@ -2584,6 +2681,15 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
if (ret)
ath11k_warn(ar->ab, "failed to set vdev %i OBSS PD parameters: %d\n",
arvif->vdev_id, ret);
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
WMI_VDEV_PARAM_DTIM_POLICY,
WMI_DTIM_POLICY_STICK);
if (ret)
ath11k_warn(ar->ab, "failed to set vdev %d dtim policy: %d\n",
arvif->vdev_id, ret);
ath11k_mac_11d_scan_stop_all(ar->ab);
}
static void ath11k_bss_disassoc(struct ieee80211_hw *hw,
@ -3313,6 +3419,7 @@ static int ath11k_start_scan(struct ath11k *ar,
struct scan_req_params *arg)
{
int ret;
unsigned long timeout = 1 * HZ;
lockdep_assert_held(&ar->conf_mutex);
@ -3323,7 +3430,14 @@ static int ath11k_start_scan(struct ath11k *ar,
if (ret)
return ret;
ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map)) {
timeout = 5 * HZ;
if (ar->supports_6ghz)
timeout += 5 * HZ;
}
ret = wait_for_completion_timeout(&ar->scan.started, timeout);
if (ret == 0) {
ret = ath11k_scan_stop(ar);
if (ret)
@ -3380,15 +3494,38 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
if (ret)
goto exit;
/* Currently the pending_11d=true only happened 1 time while
* wlan interface up in ath11k_mac_11d_scan_start(), it is called by
* ath11k_mac_op_add_interface(), after wlan interface up,
* pending_11d=false always.
* If remove below wait, it always happened scan fail and lead connect
* fail while wlan interface up, because it has a 11d scan which is running
* in firmware, and lead this scan failed.
*/
if (ar->pending_11d) {
long time_left;
unsigned long timeout = 5 * HZ;
if (ar->supports_6ghz)
timeout += 5 * HZ;
time_left = wait_for_completion_timeout(&ar->finish_11d_ch_list, timeout);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"mac wait 11d channel list time left %ld\n", time_left);
}
memset(&arg, 0, sizeof(arg));
ath11k_wmi_start_scan_init(ar, &arg);
arg.vdev_id = arvif->vdev_id;
arg.scan_id = ATH11K_SCAN_ID;
if (req->ie_len) {
arg.extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL);
if (!arg.extraie.ptr) {
ret = -ENOMEM;
goto exit;
}
arg.extraie.len = req->ie_len;
arg.extraie.ptr = kzalloc(req->ie_len, GFP_KERNEL);
memcpy(arg.extraie.ptr, req->ie, req->ie_len);
}
if (req->n_ssids) {
@ -3404,10 +3541,24 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
if (req->n_channels) {
arg.num_chan = req->n_channels;
arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
GFP_KERNEL);
if (!arg.chan_list) {
ret = -ENOMEM;
goto exit;
}
for (i = 0; i < arg.num_chan; i++)
arg.chan_list[i] = req->channels[i]->center_freq;
}
if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
arg.scan_f_add_spoofed_mac_in_probe = 1;
ether_addr_copy(arg.mac_addr.addr, req->mac_addr);
ether_addr_copy(arg.mac_mask.addr, req->mac_addr_mask);
}
ret = ath11k_start_scan(ar, &arg);
if (ret) {
ath11k_warn(ar->ab, "failed to start hw scan: %d\n", ret);
@ -3422,6 +3573,8 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
ATH11K_MAC_SCAN_TIMEOUT_MSECS));
exit:
kfree(arg.chan_list);
if (req->ie_len)
kfree(arg.extraie.ptr);
@ -3597,7 +3750,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
/* flush the fragments cache during key (re)install to
* ensure all frags in the new frag list belong to the same key.
*/
if (peer && cmd == SET_KEY)
if (peer && sta && cmd == SET_KEY)
ath11k_peer_frags_flush(ar, peer);
spin_unlock_bh(&ab->base_lock);
@ -5128,23 +5281,47 @@ static int __ath11k_set_antenna(struct ath11k *ar, u32 tx_ant, u32 rx_ant)
return 0;
}
int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
static void ath11k_mgmt_over_wmi_tx_drop(struct ath11k *ar, struct sk_buff *skb)
{
struct sk_buff *msdu = skb;
int num_mgmt;
ieee80211_free_txskb(ar->hw, skb);
num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
if (num_mgmt < 0)
WARN_ON_ONCE(1);
if (!num_mgmt)
wake_up(&ar->txmgmt_empty_waitq);
}
static void ath11k_mac_tx_mgmt_free(struct ath11k *ar, int buf_id)
{
struct sk_buff *msdu;
struct ieee80211_tx_info *info;
struct ath11k *ar = ctx;
struct ath11k_base *ab = ar->ab;
spin_lock_bh(&ar->txmgmt_idr_lock);
idr_remove(&ar->txmgmt_idr, buf_id);
msdu = idr_remove(&ar->txmgmt_idr, buf_id);
spin_unlock_bh(&ar->txmgmt_idr_lock);
dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
if (!msdu)
return;
dma_unmap_single(ar->ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
DMA_TO_DEVICE);
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
ieee80211_free_txskb(ar->hw, msdu);
ath11k_mgmt_over_wmi_tx_drop(ar, msdu);
}
int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
{
struct ath11k *ar = ctx;
ath11k_mac_tx_mgmt_free(ar, buf_id);
return 0;
}
@ -5153,17 +5330,10 @@ static int ath11k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx)
{
struct ieee80211_vif *vif = ctx;
struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB((struct sk_buff *)skb);
struct sk_buff *msdu = skb;
struct ath11k *ar = skb_cb->ar;
struct ath11k_base *ab = ar->ab;
if (skb_cb->vif == vif) {
spin_lock_bh(&ar->txmgmt_idr_lock);
idr_remove(&ar->txmgmt_idr, buf_id);
spin_unlock_bh(&ar->txmgmt_idr_lock);
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len,
DMA_TO_DEVICE);
}
if (skb_cb->vif == vif)
ath11k_mac_tx_mgmt_free(ar, buf_id);
return 0;
}
@ -5178,10 +5348,16 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
int buf_id;
int ret;
ATH11K_SKB_CB(skb)->ar = ar;
spin_lock_bh(&ar->txmgmt_idr_lock);
buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0,
ATH11K_TX_MGMT_NUM_PENDING_MAX, GFP_ATOMIC);
spin_unlock_bh(&ar->txmgmt_idr_lock);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"mac tx mgmt frame, buf id %d\n", buf_id);
if (buf_id < 0)
return -ENOSPC;
@ -5228,7 +5404,7 @@ static void ath11k_mgmt_over_wmi_tx_purge(struct ath11k *ar)
struct sk_buff *skb;
while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL)
ieee80211_free_txskb(ar->hw, skb);
ath11k_mgmt_over_wmi_tx_drop(ar, skb);
}
static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
@ -5243,29 +5419,29 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
skb_cb = ATH11K_SKB_CB(skb);
if (!skb_cb->vif) {
ath11k_warn(ar->ab, "no vif found for mgmt frame\n");
ieee80211_free_txskb(ar->hw, skb);
ath11k_mgmt_over_wmi_tx_drop(ar, skb);
continue;
}
arvif = ath11k_vif_to_arvif(skb_cb->vif);
if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) &&
arvif->is_started) {
atomic_inc(&ar->num_pending_mgmt_tx);
ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb);
if (ret) {
if (atomic_dec_if_positive(&ar->num_pending_mgmt_tx) < 0)
WARN_ON_ONCE(1);
ath11k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
arvif->vdev_id, ret);
ieee80211_free_txskb(ar->hw, skb);
ath11k_mgmt_over_wmi_tx_drop(ar, skb);
} else {
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"mac tx mgmt frame, vdev_id %d\n",
arvif->vdev_id);
}
} else {
ath11k_warn(ar->ab,
"dropping mgmt frame for vdev %d, is_started %d\n",
arvif->vdev_id,
arvif->is_started);
ieee80211_free_txskb(ar->hw, skb);
ath11k_mgmt_over_wmi_tx_drop(ar, skb);
}
}
}
@ -5296,6 +5472,7 @@ static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb,
}
skb_queue_tail(q, skb);
atomic_inc(&ar->num_pending_mgmt_tx);
ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
return 0;
@ -5426,6 +5603,14 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
goto err;
}
if (test_bit(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi->wmi_ab->svc_map)) {
ret = ath11k_wmi_scan_prob_req_oui(ar, ar->mac_addr);
if (ret) {
ath11k_err(ab, "failed to set prob req oui: %i\n", ret);
goto err;
}
}
ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
0, pdev->pdev_id);
if (ret) {
@ -5524,6 +5709,7 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw)
cancel_delayed_work_sync(&ar->scan.timeout);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ar->ab->update_11d_work);
spin_lock_bh(&ar->data_lock);
list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
@ -5677,6 +5863,122 @@ static void ath11k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
}
}
static bool ath11k_mac_vif_ap_active_any(struct ath11k_base *ab)
{
struct ath11k *ar;
struct ath11k_pdev *pdev;
struct ath11k_vif *arvif;
int i;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_AP)
return true;
}
}
return false;
}
void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait)
{
struct wmi_11d_scan_start_params param;
int ret;
mutex_lock(&ar->ab->vdev_id_11d_lock);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev id for 11d scan %d\n",
ar->vdev_id_11d_scan);
if (ar->regdom_set_by_user)
goto fin;
if (ar->vdev_id_11d_scan != ATH11K_11D_INVALID_VDEV_ID)
goto fin;
if (!test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map))
goto fin;
if (ath11k_mac_vif_ap_active_any(ar->ab))
goto fin;
param.vdev_id = vdev_id;
param.start_interval_msec = 0;
param.scan_period_msec = ATH11K_SCAN_11D_INTERVAL;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac start 11d scan\n");
if (wait)
reinit_completion(&ar->finish_11d_scan);
ret = ath11k_wmi_send_11d_scan_start_cmd(ar, &param);
if (ret) {
ath11k_warn(ar->ab, "failed to start 11d scan vdev %d ret: %d\n",
vdev_id, ret);
} else {
ar->vdev_id_11d_scan = vdev_id;
if (wait) {
ar->pending_11d = true;
ret = wait_for_completion_timeout(&ar->finish_11d_scan,
5 * HZ);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"mac 11d scan left time %d\n", ret);
if (!ret)
ar->pending_11d = false;
}
}
fin:
mutex_unlock(&ar->ab->vdev_id_11d_lock);
}
void ath11k_mac_11d_scan_stop(struct ath11k *ar)
{
int ret;
u32 vdev_id;
if (!test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map))
return;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac stop 11d scan\n");
mutex_lock(&ar->ab->vdev_id_11d_lock);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac stop 11d vdev id %d\n",
ar->vdev_id_11d_scan);
if (ar->vdev_id_11d_scan != ATH11K_11D_INVALID_VDEV_ID) {
vdev_id = ar->vdev_id_11d_scan;
ret = ath11k_wmi_send_11d_scan_stop_cmd(ar, vdev_id);
if (ret)
ath11k_warn(ar->ab,
"failed to stopt 11d scan vdev %d ret: %d\n",
vdev_id, ret);
else
ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
}
mutex_unlock(&ar->ab->vdev_id_11d_lock);
}
void ath11k_mac_11d_scan_stop_all(struct ath11k_base *ab)
{
struct ath11k *ar;
struct ath11k_pdev *pdev;
int i;
ath11k_dbg(ab, ATH11K_DBG_MAC, "mac stop soc 11d scan\n");
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
ath11k_mac_11d_scan_stop(ar);
}
}
static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@ -5702,9 +6004,9 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
goto err;
}
if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) {
if (ar->num_created_vdevs > (TARGET_NUM_VDEVS(ab) - 1)) {
ath11k_warn(ab, "failed to create vdev %u, reached max vdev limit %d\n",
ar->num_created_vdevs, TARGET_NUM_VDEVS);
ar->num_created_vdevs, TARGET_NUM_VDEVS(ab));
ret = -EBUSY;
goto err;
}
@ -5810,6 +6112,8 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
goto err_peer_del;
}
ath11k_mac_11d_scan_stop_all(ar->ab);
break;
case WMI_VDEV_TYPE_STA:
param_id = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
@ -5849,6 +6153,9 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
goto err_peer_del;
}
ath11k_mac_11d_scan_start(ar, arvif->vdev_id, true);
break;
case WMI_VDEV_TYPE_MONITOR:
set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
@ -5950,6 +6257,9 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
ath11k_dbg(ab, ATH11K_DBG_MAC, "mac remove interface (vdev %d)\n",
arvif->vdev_id);
if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
ath11k_mac_11d_scan_stop(ar);
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath11k_peer_delete(ar, arvif->vdev_id, vif->addr);
if (ret)
@ -6363,37 +6673,7 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
lockdep_assert_held(&ar->conf_mutex);
for (i = 0; i < n_vifs; i++) {
arvif = (void *)vifs[i].vif->drv_priv;
if (vifs[i].vif->type == NL80211_IFTYPE_MONITOR)
monitor_vif = true;
ath11k_dbg(ab, ATH11K_DBG_MAC,
"mac chanctx switch vdev_id %i freq %u->%u width %d->%d\n",
arvif->vdev_id,
vifs[i].old_ctx->def.chan->center_freq,
vifs[i].new_ctx->def.chan->center_freq,
vifs[i].old_ctx->def.width,
vifs[i].new_ctx->def.width);
if (WARN_ON(!arvif->is_started))
continue;
if (WARN_ON(!arvif->is_up))
continue;
ret = ath11k_wmi_vdev_down(ar, arvif->vdev_id);
if (ret) {
ath11k_warn(ab, "failed to down vdev %d: %d\n",
arvif->vdev_id, ret);
continue;
}
ar->num_started_vdevs--;
}
/* All relevant vdevs are downed and associated channel resources
/* Associated channel resources of all relevant vdevs
* should be available for the channel switch now.
*/
@ -6698,6 +6978,9 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
ret);
}
if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
ath11k_mac_11d_scan_start(ar, arvif->vdev_id, false);
mutex_unlock(&ar->conf_mutex);
}
@ -6784,6 +7067,17 @@ static void ath11k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *v
ATH11K_FLUSH_TIMEOUT);
if (time_left == 0)
ath11k_warn(ar->ab, "failed to flush transmit queue %ld\n", time_left);
time_left = wait_event_timeout(ar->txmgmt_empty_waitq,
(atomic_read(&ar->num_pending_mgmt_tx) == 0),
ATH11K_FLUSH_TIMEOUT);
if (time_left == 0)
ath11k_warn(ar->ab, "failed to flush mgmt transmit queue %ld\n",
time_left);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"mac mgmt tx flush mgmt pending %d\n",
atomic_read(&ar->num_pending_mgmt_tx));
}
static int
@ -7849,6 +8143,9 @@ static int __ath11k_mac_register(struct ath11k *ar)
ar->hw->wiphy->interface_modes = ab->hw_params.interface_modes;
if (ab->hw_params.single_pdev_only && ar->supports_6ghz)
ieee80211_hw_set(ar->hw, SINGLE_SCAN_ON_ALL_BANDS);
ieee80211_hw_set(ar->hw, SIGNAL_DBM);
ieee80211_hw_set(ar->hw, SUPPORTS_PS);
ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
@ -7905,11 +8202,16 @@ static int __ath11k_mac_register(struct ath11k *ar)
ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
NL80211_FEATURE_AP_SCAN;
ar->max_num_stations = TARGET_NUM_STATIONS;
ar->max_num_peers = TARGET_NUM_PEERS_PDEV;
ar->max_num_stations = TARGET_NUM_STATIONS(ab);
ar->max_num_peers = TARGET_NUM_PEERS_PDEV(ab);
ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
if (test_bit(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi->wmi_ab->svc_map)) {
ar->hw->wiphy->features |=
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
}
ar->hw->queues = ATH11K_HW_MAX_QUEUES;
ar->hw->wiphy->tx_queue_len = ATH11K_QUEUE_LEN;
ar->hw->offchannel_tx_hw_queue = ATH11K_HW_MAX_QUEUES - 1;
@ -8004,7 +8306,7 @@ int ath11k_mac_register(struct ath11k_base *ab)
/* Initialize channel counters frequency value in hertz */
ab->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ;
ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
@ -8022,6 +8324,8 @@ int ath11k_mac_register(struct ath11k_base *ab)
ret = __ath11k_mac_register(ar);
if (ret)
goto err_cleanup;
init_waitqueue_head(&ar->txmgmt_empty_waitq);
}
return 0;
@ -8099,6 +8403,9 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
ar->monitor_vdev_id = -1;
clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
init_completion(&ar->finish_11d_scan);
init_completion(&ar->finish_11d_ch_list);
}
return 0;

View File

@ -127,6 +127,13 @@ struct ath11k_generic_iter {
extern const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default;
#define ATH11K_SCAN_11D_INTERVAL 600000
#define ATH11K_11D_INVALID_VDEV_ID 0xFFFF
void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait);
void ath11k_mac_11d_scan_stop(struct ath11k *ar);
void ath11k_mac_11d_scan_stop_all(struct ath11k_base *ab);
void ath11k_mac_destroy(struct ath11k_base *ab);
void ath11k_mac_unregister(struct ath11k_base *ab);
int ath11k_mac_register(struct ath11k_base *ab);
@ -144,6 +151,10 @@ void ath11k_mac_scan_finish(struct ath11k *ar);
struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id);
struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab,
u32 vdev_id);
u8 ath11k_mac_get_target_pdev_id(struct ath11k *ar);
u8 ath11k_mac_get_target_pdev_id_from_vif(struct ath11k_vif *arvif);
struct ath11k_vif *ath11k_mac_get_vif_up(struct ath11k_base *ab);
struct ath11k *ath11k_mac_get_ar_by_vdev_id(struct ath11k_base *ab, u32 vdev_id);
struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id);

View File

@ -3,6 +3,9 @@
#include <linux/msi.h>
#include <linux/pci.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/ioport.h>
#include "core.h"
#include "debug.h"
@ -248,6 +251,7 @@ static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci)
u32 user_base_data, base_vector;
int ret, num_vectors, i;
int *irq;
unsigned int msi_data;
ret = ath11k_pci_get_user_msi_assignment(ab_pci,
"MHI", &num_vectors,
@ -262,9 +266,15 @@ static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci)
if (!irq)
return -ENOMEM;
for (i = 0; i < num_vectors; i++)
for (i = 0; i < num_vectors; i++) {
msi_data = base_vector;
if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
msi_data += i;
irq[i] = ath11k_pci_get_msi_irq(ab->dev,
base_vector + i);
msi_data);
}
ab_pci->mhi_ctrl->irq = irq;
ab_pci->mhi_ctrl->nr_irqs = num_vectors;
@ -311,6 +321,26 @@ static void ath11k_mhi_op_write_reg(struct mhi_controller *mhi_cntrl,
writel(val, addr);
}
static int ath11k_mhi_read_addr_from_dt(struct mhi_controller *mhi_ctrl)
{
struct device_node *np;
struct resource res;
int ret;
np = of_find_node_by_type(NULL, "memory");
if (!np)
return -ENOENT;
ret = of_address_to_resource(np, 0, &res);
if (ret)
return ret;
mhi_ctrl->iova_start = res.start + 0x1000000;
mhi_ctrl->iova_stop = res.end;
return 0;
}
int ath11k_mhi_register(struct ath11k_pci *ab_pci)
{
struct ath11k_base *ab = ab_pci->ab;
@ -339,8 +369,18 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
return ret;
}
mhi_ctrl->iova_start = 0;
mhi_ctrl->iova_stop = 0xffffffff;
if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
if (test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) {
ret = ath11k_mhi_read_addr_from_dt(mhi_ctrl);
if (ret < 0)
return ret;
} else {
mhi_ctrl->iova_start = 0;
mhi_ctrl->iova_stop = 0xFFFFFFFF;
}
mhi_ctrl->sbl_size = SZ_512K;
mhi_ctrl->seg_len = SZ_512K;
mhi_ctrl->fbc_download = true;
@ -356,6 +396,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
break;
case ATH11K_HW_QCA6390_HW20:
case ATH11K_HW_WCN6855_HW20:
case ATH11K_HW_WCN6855_HW21:
ath11k_mhi_config = &ath11k_mhi_config_qca6390;
break;
default:

View File

@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/pci.h>
#include <linux/of.h>
#include "pci.h"
#include "core.h"
@ -16,7 +17,8 @@
#define ATH11K_PCI_BAR_NUM 0
#define ATH11K_PCI_DMA_MASK 32
#define ATH11K_PCI_IRQ_CE0_OFFSET 3
#define ATH11K_PCI_IRQ_CE0_OFFSET 3
#define ATH11K_PCI_IRQ_DP_OFFSET 14
#define WINDOW_ENABLE_BIT 0x40000000
#define WINDOW_REG_ADDRESS 0x310c
@ -25,7 +27,7 @@
#define WINDOW_RANGE_MASK GENMASK(18, 0)
#define TCSR_SOC_HW_VERSION 0x0224
#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(16, 8)
#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8)
#define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 0)
/* BAR0 + 4k is always accessible, and no
@ -76,6 +78,17 @@ static const struct ath11k_msi_config ath11k_msi_config[] = {
},
};
static const struct ath11k_msi_config msi_config_one_msi = {
.total_vectors = 1,
.total_users = 4,
.users = (struct ath11k_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 1, .base_vector = 0 },
{ .name = "WAKE", .num_vectors = 1, .base_vector = 0 },
{ .name = "DP", .num_vectors = 1, .base_vector = 0 },
},
};
static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
"bhi",
"mhi-er0",
@ -485,11 +498,11 @@ int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ab_pci, char *user_nam
for (idx = 0; idx < msi_config->total_users; idx++) {
if (strcmp(user_name, msi_config->users[idx].name) == 0) {
*num_vectors = msi_config->users[idx].num_vectors;
*user_base_data = msi_config->users[idx].base_vector
+ ab_pci->msi_ep_base_data;
*base_vector = msi_config->users[idx].base_vector;
*base_vector = msi_config->users[idx].base_vector;
*user_base_data = *base_vector + ab_pci->msi_ep_base_data;
ath11k_dbg(ab, ATH11K_DBG_PCI, "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
ath11k_dbg(ab, ATH11K_DBG_PCI,
"Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
user_name, *num_vectors, *user_base_data,
*base_vector);
@ -560,16 +573,30 @@ static void ath11k_pci_free_irq(struct ath11k_base *ab)
static void ath11k_pci_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
u32 irq_idx;
/* In case of one MSI vector, we handle irq enable/disable in a
* uniform way since we only have one irq
*/
if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
return;
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
enable_irq(ab->irq_num[irq_idx]);
}
static void ath11k_pci_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
u32 irq_idx;
/* In case of one MSI vector, we handle irq enable/disable in a
* uniform way since we only have one irq
*/
if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
return;
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
disable_irq_nosync(ab->irq_num[irq_idx]);
}
@ -578,6 +605,8 @@ static void ath11k_pci_ce_irqs_disable(struct ath11k_base *ab)
{
int i;
clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
@ -602,20 +631,27 @@ static void ath11k_pci_sync_ce_irqs(struct ath11k_base *ab)
static void ath11k_pci_ce_tasklet(struct tasklet_struct *t)
{
struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
ath11k_pci_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
enable_irq(ce_pipe->ab->irq_num[irq_idx]);
}
static irqreturn_t ath11k_pci_ce_interrupt_handler(int irq, void *arg)
{
struct ath11k_ce_pipe *ce_pipe = arg;
struct ath11k_base *ab = ce_pipe->ab;
int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
return IRQ_HANDLED;
/* last interrupt received for this CE */
ce_pipe->timestamp = jiffies;
ath11k_pci_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
disable_irq_nosync(ab->irq_num[irq_idx]);
tasklet_schedule(&ce_pipe->intr_tq);
return IRQ_HANDLED;
@ -623,8 +659,15 @@ static irqreturn_t ath11k_pci_ce_interrupt_handler(int irq, void *arg)
static void ath11k_pci_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(irq_grp->ab);
int i;
/* In case of one MSI vector, we handle irq enable/disable
* in a uniform way since we only have one irq
*/
if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
return;
for (i = 0; i < irq_grp->num_irq; i++)
disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
@ -633,6 +676,8 @@ static void __ath11k_pci_ext_irq_disable(struct ath11k_base *sc)
{
int i;
clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &sc->dev_flags);
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
@ -645,8 +690,15 @@ static void __ath11k_pci_ext_irq_disable(struct ath11k_base *sc)
static void ath11k_pci_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(irq_grp->ab);
int i;
/* In case of one MSI vector, we handle irq enable/disable in a
* uniform way since we only have one irq
*/
if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
return;
for (i = 0; i < irq_grp->num_irq; i++)
enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
@ -655,6 +707,8 @@ static void ath11k_pci_ext_irq_enable(struct ath11k_base *ab)
{
int i;
set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
@ -690,11 +744,13 @@ static int ath11k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget)
napi);
struct ath11k_base *ab = irq_grp->ab;
int work_done;
int i;
work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
ath11k_pci_ext_grp_enable(irq_grp);
for (i = 0; i < irq_grp->num_irq; i++)
enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
if (work_done > budget)
@ -706,13 +762,19 @@ static int ath11k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget)
static irqreturn_t ath11k_pci_ext_interrupt_handler(int irq, void *arg)
{
struct ath11k_ext_irq_grp *irq_grp = arg;
struct ath11k_base *ab = irq_grp->ab;
int i;
if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
return IRQ_HANDLED;
ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq:%d\n", irq);
/* last interrupt received for this group */
irq_grp->timestamp = jiffies;
ath11k_pci_ext_grp_disable(irq_grp);
for (i = 0; i < irq_grp->num_irq; i++)
disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
napi_schedule(&irq_grp->napi);
@ -721,10 +783,10 @@ static irqreturn_t ath11k_pci_ext_interrupt_handler(int irq, void *arg)
static int ath11k_pci_ext_irq_config(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
int i, j, ret, num_vectors = 0;
u32 user_base_data = 0, base_vector = 0, base_idx;
u32 user_base_data = 0, base_vector = 0;
base_idx = ATH11K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX;
ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab), "DP",
&num_vectors,
&user_base_data,
@ -754,7 +816,7 @@ static int ath11k_pci_ext_irq_config(struct ath11k_base *ab)
}
irq_grp->num_irq = num_irq;
irq_grp->irqs[0] = base_idx + i;
irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i;
for (j = 0; j < irq_grp->num_irq; j++) {
int irq_idx = irq_grp->irqs[j];
@ -768,23 +830,32 @@ static int ath11k_pci_ext_irq_config(struct ath11k_base *ab)
irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
ret = request_irq(irq, ath11k_pci_ext_interrupt_handler,
IRQF_SHARED,
ab_pci->irq_flags,
"DP_EXT_IRQ", irq_grp);
if (ret) {
ath11k_err(ab, "failed request irq %d: %d\n",
vector, ret);
return ret;
}
disable_irq_nosync(ab->irq_num[irq_idx]);
}
ath11k_pci_ext_grp_disable(irq_grp);
}
return 0;
}
static int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci,
const struct cpumask *m)
{
if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
return 0;
return irq_set_affinity_hint(ab_pci->pdev->irq, m);
}
static int ath11k_pci_config_irq(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
struct ath11k_ce_pipe *ce_pipe;
u32 msi_data_start;
u32 msi_data_count, msi_data_idx;
@ -798,6 +869,12 @@ static int ath11k_pci_config_irq(struct ath11k_base *ab)
if (ret)
return ret;
ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
if (ret) {
ath11k_err(ab, "failed to set irq affinity %d\n", ret);
return ret;
}
/* Configure CE irqs */
for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
@ -812,12 +889,12 @@ static int ath11k_pci_config_irq(struct ath11k_base *ab)
tasklet_setup(&ce_pipe->intr_tq, ath11k_pci_ce_tasklet);
ret = request_irq(irq, ath11k_pci_ce_interrupt_handler,
IRQF_SHARED, irq_name[irq_idx],
ab_pci->irq_flags, irq_name[irq_idx],
ce_pipe);
if (ret) {
ath11k_err(ab, "failed to request irq %d: %d\n",
irq_idx, ret);
return ret;
goto err_irq_affinity_cleanup;
}
ab->irq_num[irq_idx] = irq;
@ -828,9 +905,13 @@ static int ath11k_pci_config_irq(struct ath11k_base *ab)
ret = ath11k_pci_ext_irq_config(ab);
if (ret)
return ret;
goto err_irq_affinity_cleanup;
return 0;
err_irq_affinity_cleanup:
ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
return ret;
}
static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab)
@ -852,6 +933,8 @@ static void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab)
{
int i;
set_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
@ -896,15 +979,25 @@ static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci)
msi_config->total_vectors,
msi_config->total_vectors,
PCI_IRQ_MSI);
if (num_vectors != msi_config->total_vectors) {
ath11k_err(ab, "failed to get %d MSI vectors, only %d available",
msi_config->total_vectors, num_vectors);
if (num_vectors >= 0)
return -EINVAL;
else
return num_vectors;
if (num_vectors == msi_config->total_vectors) {
set_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags);
ab_pci->irq_flags = IRQF_SHARED;
} else {
num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
1,
1,
PCI_IRQ_MSI);
if (num_vectors < 0) {
ret = -EINVAL;
goto reset_msi_config;
}
clear_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags);
ab_pci->msi_config = &msi_config_one_msi;
ab_pci->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
ath11k_dbg(ab, ATH11K_DBG_PCI, "request MSI one vector\n");
}
ath11k_info(ab, "MSI vectors: %d\n", num_vectors);
ath11k_pci_msi_disable(ab_pci);
msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
@ -925,6 +1018,7 @@ static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci)
free_msi_vector:
pci_free_irq_vectors(ab_pci->pdev);
reset_msi_config:
return ret;
}
@ -933,6 +1027,25 @@ static void ath11k_pci_free_msi(struct ath11k_pci *ab_pci)
pci_free_irq_vectors(ab_pci->pdev);
}
static int ath11k_pci_config_msi_data(struct ath11k_pci *ab_pci)
{
struct msi_desc *msi_desc;
msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
if (!msi_desc) {
ath11k_err(ab_pci->ab, "msi_desc is NULL!\n");
pci_free_irq_vectors(ab_pci->pdev);
return -EINVAL;
}
ab_pci->msi_ep_base_data = msi_desc->msg.data;
ath11k_dbg(ab_pci->ab, ATH11K_DBG_PCI, "pci after request_irq msi_ep_base_data %d\n",
ab_pci->msi_ep_base_data);
return 0;
}
static int ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev)
{
struct ath11k_base *ab = ab_pci->ab;
@ -1130,7 +1243,13 @@ static int ath11k_pci_start(struct ath11k_base *ab)
set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
ath11k_pci_aspm_restore(ab_pci);
/* TODO: for now don't restore ASPM in case of single MSI
* vector as MHI register reading in M2 causes system hang.
*/
if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
ath11k_pci_aspm_restore(ab_pci);
else
ath11k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n");
ath11k_pci_ce_irqs_enable(ab);
ath11k_ce_rx_post_buf(ab);
@ -1229,7 +1348,7 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
{
struct ath11k_base *ab;
struct ath11k_pci *ab_pci;
u32 soc_hw_version_major, soc_hw_version_minor;
u32 soc_hw_version_major, soc_hw_version_minor, addr;
int ret;
ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI,
@ -1249,6 +1368,14 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, ab);
spin_lock_init(&ab_pci->window_lock);
/* Set fixed_mem_region to true for platforms support reserved memory
* from DT. If memory is reserved from DT for FW, ath11k driver need not
* allocate memory.
*/
ret = of_property_read_u32(ab->dev->of_node, "memory-region", &addr);
if (!ret)
set_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags);
ret = ath11k_pci_claim(ab_pci, pdev);
if (ret) {
ath11k_err(ab, "failed to claim device: %d\n", ret);
@ -1291,9 +1418,21 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
&soc_hw_version_minor);
switch (soc_hw_version_major) {
case 2:
ab->hw_rev = ATH11K_HW_WCN6855_HW20;
switch (soc_hw_version_minor) {
case 0x00:
case 0x01:
ab->hw_rev = ATH11K_HW_WCN6855_HW20;
break;
case 0x10:
case 0x11:
ab->hw_rev = ATH11K_HW_WCN6855_HW21;
break;
default:
goto unsupported_wcn6855_soc;
}
break;
default:
unsupported_wcn6855_soc:
dev_err(&pdev->dev, "Unsupported WCN6855 SOC hardware version: %d %d\n",
soc_hw_version_major, soc_hw_version_minor);
ret = -EOPNOTSUPP;
@ -1342,6 +1481,17 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
goto err_ce_free;
}
/* kernel may allocate a dummy vector before request_irq and
* then allocate a real vector when request_irq is called.
* So get msi_data here again to avoid spurious interrupt
* as msi_data will configured to srngs.
*/
ret = ath11k_pci_config_msi_data(ab_pci);
if (ret) {
ath11k_err(ab, "failed to config msi_data: %d\n", ret);
goto err_free_irq;
}
ret = ath11k_core_init(ab);
if (ret) {
ath11k_err(ab, "failed to init core: %d\n", ret);
@ -1378,6 +1528,8 @@ static void ath11k_pci_remove(struct pci_dev *pdev)
struct ath11k_base *ab = pci_get_drvdata(pdev);
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
ath11k_pci_power_down(ab);
ath11k_debugfs_soc_destroy(ab);

View File

@ -68,6 +68,7 @@ enum ath11k_pci_flags {
ATH11K_PCI_FLAG_INIT_DONE,
ATH11K_PCI_FLAG_IS_MSI_64,
ATH11K_PCI_ASPM_RESTORE,
ATH11K_PCI_FLAG_MULTI_MSI_VECTORS,
};
struct ath11k_pci {
@ -87,6 +88,8 @@ struct ath11k_pci {
/* enum ath11k_pci_flags */
unsigned long flags;
u16 link_ctl;
unsigned long irq_flags;
};
static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)

View File

@ -9,6 +9,8 @@
#include "core.h"
#include "debug.h"
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/ioport.h>
#include <linux/firmware.h>
#define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02
@ -1751,7 +1753,9 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
* failure to FW and FW will then request mulitple blocks of small
* chunk size memory.
*/
if (!ab->bus_params.fixed_mem_region && ab->qmi.target_mem_delayed) {
if (!(ab->bus_params.fixed_mem_region ||
test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) &&
ab->qmi.target_mem_delayed) {
delayed = true;
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi delays mem_request %d\n",
ab->qmi.mem_seg_count);
@ -1818,10 +1822,12 @@ static void ath11k_qmi_free_target_mem_chunk(struct ath11k_base *ab)
{
int i;
if (ab->bus_params.fixed_mem_region)
return;
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
if ((ab->bus_params.fixed_mem_region ||
test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) &&
ab->qmi.target_mem[i].iaddr)
iounmap(ab->qmi.target_mem[i].iaddr);
if (!ab->qmi.target_mem[i].vaddr)
continue;
@ -1869,10 +1875,44 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
{
int i, idx;
struct device *dev = ab->dev;
struct device_node *hremote_node = NULL;
struct resource res;
u32 host_ddr_sz;
int i, idx, ret;
for (i = 0, idx = 0; i < ab->qmi.mem_seg_count; i++) {
switch (ab->qmi.target_mem[i].type) {
case HOST_DDR_REGION_TYPE:
hremote_node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (!hremote_node) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
"qmi fail to get hremote_node\n");
return ret;
}
ret = of_address_to_resource(hremote_node, 0, &res);
if (ret) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
"qmi fail to get reg from hremote\n");
return ret;
}
if (res.end - res.start + 1 < ab->qmi.target_mem[i].size) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
"qmi fail to assign memory of sz\n");
return -EINVAL;
}
ab->qmi.target_mem[idx].paddr = res.start;
ab->qmi.target_mem[idx].iaddr =
ioremap(ab->qmi.target_mem[idx].paddr,
ab->qmi.target_mem[i].size);
ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
host_ddr_sz = ab->qmi.target_mem[i].size;
ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
idx++;
break;
case BDF_MEM_REGION_TYPE:
ab->qmi.target_mem[idx].paddr = ab->hw_params.bdf_addr;
ab->qmi.target_mem[idx].vaddr = NULL;
@ -1887,10 +1927,16 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
}
if (ath11k_cold_boot_cal && ab->hw_params.cold_boot_calib) {
ab->qmi.target_mem[idx].paddr =
ATH11K_QMI_CALDB_ADDRESS;
ab->qmi.target_mem[idx].vaddr =
(void *)ATH11K_QMI_CALDB_ADDRESS;
if (hremote_node) {
ab->qmi.target_mem[idx].paddr =
res.start + host_ddr_sz;
ab->qmi.target_mem[idx].iaddr =
ioremap(ab->qmi.target_mem[idx].paddr,
ab->qmi.target_mem[i].size);
} else {
ab->qmi.target_mem[idx].paddr =
ATH11K_QMI_CALDB_ADDRESS;
}
} else {
ab->qmi.target_mem[idx].paddr = 0;
ab->qmi.target_mem[idx].vaddr = NULL;
@ -2621,7 +2667,8 @@ static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
msg->mem_seg[i].type, msg->mem_seg[i].size);
}
if (ab->bus_params.fixed_mem_region) {
if (ab->bus_params.fixed_mem_region ||
test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) {
ret = ath11k_qmi_assign_target_mem_chunk(ab);
if (ret) {
ath11k_warn(ab, "failed to assign qmi target memory: %d\n",
@ -2830,7 +2877,7 @@ int ath11k_qmi_init_service(struct ath11k_base *ab)
memset(&ab->qmi.target_mem, 0, sizeof(struct target_mem_chunk));
ab->qmi.ab = ab;
ab->qmi.target_mem_mode = ATH11K_QMI_TARGET_MEM_MODE_DEFAULT;
ab->qmi.target_mem_mode = ab->hw_params.fw_mem_mode;
ret = qmi_handle_init(&ab->qmi.handle, ATH11K_QMI_RESP_LEN_MAX,
&ath11k_qmi_ops, ath11k_qmi_msg_handlers);
if (ret < 0) {

View File

@ -34,14 +34,13 @@
#define QMI_WLANFW_MAX_DATA_SIZE_V01 6144
#define ATH11K_FIRMWARE_MODE_OFF 4
#define ATH11K_QMI_TARGET_MEM_MODE_DEFAULT 0
#define ATH11K_COLD_BOOT_FW_RESET_DELAY (40 * HZ)
struct ath11k_base;
enum ath11k_qmi_file_type {
ATH11K_QMI_FILE_TYPE_BDF_GOLDEN,
ATH11K_QMI_FILE_TYPE_CALDATA,
ATH11K_QMI_FILE_TYPE_CALDATA = 2,
ATH11K_QMI_FILE_TYPE_EEPROM,
ATH11K_QMI_MAX_FILE_TYPE,
};
@ -95,6 +94,7 @@ struct target_mem_chunk {
u32 type;
dma_addr_t paddr;
u32 *vaddr;
void __iomem *iaddr;
};
struct target_info {

View File

@ -86,6 +86,9 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
if (ret)
ath11k_warn(ar->ab,
"INIT Country code set to fw failed : %d\n", ret);
ath11k_mac_11d_scan_stop(ar);
ar->regdom_set_by_user = true;
}
int ath11k_reg_update_chan_list(struct ath11k *ar)
@ -179,6 +182,11 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params);
kfree(params);
if (ar->pending_11d) {
complete(&ar->finish_11d_ch_list);
ar->pending_11d = false;
}
return ret;
}
@ -244,8 +252,15 @@ int ath11k_regd_update(struct ath11k *ar)
goto err;
}
if (ar->pending_11d)
complete(&ar->finish_11d_scan);
rtnl_lock();
wiphy_lock(ar->hw->wiphy);
if (ar->pending_11d)
reinit_completion(&ar->finish_11d_ch_list);
ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy);
wiphy_unlock(ar->hw->wiphy);
rtnl_unlock();

View File

@ -581,6 +581,7 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
u16 length, freq;
u8 chan_width_mhz, bin_sz;
int ret;
u32 check_length;
lockdep_assert_held(&ar->spectral.lock);
@ -614,6 +615,13 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
return -EINVAL;
}
check_length = sizeof(*fft_report) + (num_bins * ab->hw_params.spectral.fft_sz);
ret = ath11k_dbring_validate_buffer(ar, data, check_length);
if (ret) {
ath11k_warn(ar->ab, "found magic value in fft data, dropping\n");
return ret;
}
ret = ath11k_spectral_pull_search(ar, data, &search);
if (ret) {
ath11k_warn(ab, "failed to pull search report %d\n", ret);
@ -747,6 +755,12 @@ static int ath11k_spectral_process_data(struct ath11k *ar,
goto err;
}
ret = ath11k_dbring_validate_buffer(ar, data, tlv_len);
if (ret) {
ath11k_warn(ar->ab, "found magic value in spectral summary, dropping\n");
goto err;
}
summary = (struct spectral_summary_fft_report *)tlv;
ath11k_spectral_pull_summary(ar, &param->meta,
summary, &summ_rpt);

View File

@ -130,6 +130,8 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
[WMI_TAG_OBSS_COLOR_COLLISION_EVT] = {
.min_len = sizeof(struct wmi_obss_color_collision_event) },
[WMI_TAG_11D_NEW_COUNTRY_EVENT] = {
.min_len = sizeof(struct wmi_11d_new_cc_ev) },
};
#define PRIMAP(_hw_mode_) \
@ -337,6 +339,7 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
struct ath11k_pdev *pdev)
{
struct wmi_mac_phy_capabilities *mac_phy_caps;
struct ath11k_base *ab = wmi_handle->wmi_ab->ab;
struct ath11k_band_cap *cap_band;
struct ath11k_pdev_cap *pdev_cap = &pdev->cap;
u32 phy_map;
@ -368,6 +371,10 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
pdev->pdev_id = mac_phy_caps->pdev_id;
pdev_cap->supported_bands |= mac_phy_caps->supported_bands;
pdev_cap->ampdu_density = mac_phy_caps->ampdu_density;
ab->target_pdev_ids[ab->target_pdev_count].supported_bands =
mac_phy_caps->supported_bands;
ab->target_pdev_ids[ab->target_pdev_count].pdev_id = mac_phy_caps->pdev_id;
ab->target_pdev_count++;
/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
* band to band for a single radio, need to see how this should be
@ -2106,7 +2113,7 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
void *ptr;
int i, ret, len;
u32 *tmp_ptr;
u8 extraie_len_with_pad = 0;
u16 extraie_len_with_pad = 0;
struct hint_short_ssid *s_ssid = NULL;
struct hint_bssid *hint_bssid = NULL;
@ -2125,7 +2132,7 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
len += sizeof(*bssid) * params->num_bssid;
len += TLV_HDR_SIZE;
if (params->extraie.len)
if (params->extraie.len && params->extraie.len <= 0xFFFF)
extraie_len_with_pad =
roundup(params->extraie.len, sizeof(u32));
len += extraie_len_with_pad;
@ -2174,6 +2181,8 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
cmd->num_ssids = params->num_ssids;
cmd->ie_len = params->extraie.len;
cmd->n_probes = params->n_probes;
ether_addr_copy(cmd->mac_addr.addr, params->mac_addr.addr);
ether_addr_copy(cmd->mac_mask.addr, params->mac_mask.addr);
ptr += sizeof(*cmd);
@ -2232,7 +2241,7 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
FIELD_PREP(WMI_TLV_LEN, len);
ptr += TLV_HDR_SIZE;
if (params->extraie.len)
if (extraie_len_with_pad)
memcpy(ptr, params->extraie.ptr,
params->extraie.len);
@ -2793,6 +2802,42 @@ out:
return ret;
}
int ath11k_wmi_send_set_current_country_cmd(struct ath11k *ar,
struct wmi_set_current_country_params *param)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_set_current_country_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_set_current_country_cmd *)skb->data;
cmd->tlv_header =
FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SET_CURRENT_COUNTRY_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->pdev_id = ar->pdev->pdev_id;
memcpy(&cmd->new_alpha2, &param->alpha2, 3);
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID);
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"set current country pdev id %d alpha2 %c%c\n",
ar->pdev->pdev_id,
param->alpha2[0],
param->alpha2[1]);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_SET_CURRENT_COUNTRY_CMDID: %d\n", ret);
dev_kfree_skb(skb);
}
return ret;
}
int
ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar,
struct thermal_mitigation_params *param)
@ -2857,6 +2902,75 @@ ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar,
return ret;
}
int ath11k_wmi_send_11d_scan_start_cmd(struct ath11k *ar,
struct wmi_11d_scan_start_params *param)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_11d_scan_start_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_11d_scan_start_cmd *)skb->data;
cmd->tlv_header =
FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_START_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = param->vdev_id;
cmd->scan_period_msec = param->scan_period_msec;
cmd->start_interval_msec = param->start_interval_msec;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID);
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"send 11d scan start vdev id %d period %d ms internal %d ms\n",
cmd->vdev_id,
cmd->scan_period_msec,
cmd->start_interval_msec);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_11D_SCAN_START_CMDID: %d\n", ret);
dev_kfree_skb(skb);
}
return ret;
}
int ath11k_wmi_send_11d_scan_stop_cmd(struct ath11k *ar, u32 vdev_id)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_11d_scan_stop_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_11d_scan_stop_cmd *)skb->data;
cmd->tlv_header =
FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_STOP_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID);
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"send 11d scan stop vdev id %d\n",
cmd->vdev_id);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_11D_SCAN_STOP_CMDID: %d\n", ret);
dev_kfree_skb(skb);
}
return ret;
}
int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
@ -3506,7 +3620,7 @@ ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *sk
case WMI_BSS_COLOR_FREE_SLOT_AVAILABLE:
break;
default:
ath11k_warn(ab, "received unknown obss color collision detetction event\n");
ath11k_warn(ab, "received unknown obss color collision detection event\n");
}
exit:
@ -4230,6 +4344,7 @@ static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc,
svc_rdy_ext->param.num_phy = svc_rdy_ext->soc_hal_reg_caps->num_phy;
soc->num_radios = 0;
soc->target_pdev_count = 0;
phy_id_map = svc_rdy_ext->pref_hw_mode_caps.phy_id_map;
while (phy_id_map && soc->num_radios < MAX_RADIOS) {
@ -4867,6 +4982,7 @@ static int wmi_process_mgmt_tx_comp(struct ath11k *ar, u32 desc_id,
struct sk_buff *msdu;
struct ieee80211_tx_info *info;
struct ath11k_skb_cb *skb_cb;
int num_mgmt;
spin_lock_bh(&ar->txmgmt_idr_lock);
msdu = idr_find(&ar->txmgmt_idr, desc_id);
@ -4890,10 +5006,19 @@ static int wmi_process_mgmt_tx_comp(struct ath11k *ar, u32 desc_id,
ieee80211_tx_status_irqsafe(ar->hw, msdu);
num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
/* WARN when we received this event without doing any mgmt tx */
if (atomic_dec_if_positive(&ar->num_pending_mgmt_tx) < 0)
if (num_mgmt < 0)
WARN_ON_ONCE(1);
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"wmi mgmt tx comp pending %d desc id %d\n",
num_mgmt, desc_id);
if (!num_mgmt)
wake_up(&ar->txmgmt_empty_waitq);
return 0;
}
@ -5896,6 +6021,41 @@ static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab)
wake_up(&ab->wmi_ab.tx_credits_wq);
}
static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *skb)
{
const struct wmi_11d_new_cc_ev *ev;
const void **tb;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_11D_NEW_COUNTRY_EVENT];
if (!ev) {
kfree(tb);
ath11k_warn(ab, "failed to fetch 11d new cc ev");
return -EPROTO;
}
spin_lock_bh(&ab->base_lock);
memcpy(&ab->new_alpha2, &ev->new_alpha2, 2);
spin_unlock_bh(&ab->base_lock);
ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi 11d new cc %c%c\n",
ab->new_alpha2[0],
ab->new_alpha2[1]);
kfree(tb);
queue_work(ab->workqueue, &ab->update_11d_work);
return 0;
}
static void ath11k_wmi_htc_tx_complete(struct ath11k_base *ab,
struct sk_buff *skb)
{
@ -5927,7 +6087,13 @@ static void ath11k_wmi_htc_tx_complete(struct ath11k_base *ab,
static bool ath11k_reg_is_world_alpha(char *alpha)
{
return alpha[0] == '0' && alpha[1] == '0';
if (alpha[0] == '0' && alpha[1] == '0')
return true;
if (alpha[0] == 'n' && alpha[1] == 'a')
return true;
return false;
}
static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *skb)
@ -6020,7 +6186,7 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *sk
ar = ab->pdevs[pdev_idx].ar;
kfree(ab->new_regd[pdev_idx]);
ab->new_regd[pdev_idx] = regd;
ieee80211_queue_work(ar->hw, &ar->regd_update_work);
queue_work(ab->workqueue, &ar->regd_update_work);
} else {
/* This regd would be applied during mac registration and is
* held constant throughout for regd intersection purpose
@ -7285,6 +7451,9 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_WOW_WAKEUP_HOST_EVENTID:
ath11k_wmi_event_wow_wakeup_host(ab, skb);
break;
case WMI_11D_NEW_COUNTRY_EVENTID:
ath11k_reg_11d_new_cc_event(ab, skb);
break;
/* TODO: Add remaining events */
default:
ath11k_dbg(ab, ATH11K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
@ -7543,3 +7712,31 @@ int ath11k_wmi_wow_enable(struct ath11k *ar)
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID);
}
int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar,
const u8 mac_addr[ETH_ALEN])
{
struct sk_buff *skb;
struct wmi_scan_prob_req_oui_cmd *cmd;
u32 prob_req_oui;
int len;
prob_req_oui = (((u32)mac_addr[0]) << 16) |
(((u32)mac_addr[1]) << 8) | mac_addr[2];
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_scan_prob_req_oui_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_SCAN_PROB_REQ_OUI_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->prob_req_oui = prob_req_oui;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi scan prob req oui %d\n",
prob_req_oui);
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SCAN_PROB_REQ_OUI_CMDID);
}

View File

@ -113,10 +113,10 @@ enum wmi_host_hw_mode_priority {
WMI_HOST_HW_MODE_MAX_PRI
};
enum {
enum WMI_HOST_WLAN_BAND {
WMI_HOST_WLAN_2G_CAP = 0x1,
WMI_HOST_WLAN_5G_CAP = 0x2,
WMI_HOST_WLAN_2G_5G_CAP = 0x3,
WMI_HOST_WLAN_2G_5G_CAP = WMI_HOST_WLAN_2G_CAP | WMI_HOST_WLAN_5G_CAP,
};
/* Parameters used for WMI_VDEV_PARAM_AUTORATE_MISC_CFG command.
@ -2169,6 +2169,13 @@ enum wmi_nss_ratio {
WMI_NSS_RATIO_2_NSS = 0x3,
};
enum wmi_dtim_policy {
WMI_DTIM_POLICY_IGNORE = 1,
WMI_DTIM_POLICY_NORMAL = 2,
WMI_DTIM_POLICY_STICK = 3,
WMI_DTIM_POLICY_AUTO = 4,
};
struct wmi_host_pdev_band_to_mac {
u32 pdev_id;
u32 start_freq;
@ -3082,7 +3089,6 @@ enum scan_dwelltime_adaptive_mode {
#define WLAN_SCAN_MAX_NUM_SSID 10
#define WLAN_SCAN_MAX_NUM_BSSID 10
#define WLAN_SCAN_MAX_NUM_CHANNELS 40
#define WLAN_SSID_MAX_LEN 32
@ -3303,7 +3309,7 @@ struct scan_req_params {
u32 num_bssid;
u32 num_ssids;
u32 n_probes;
u32 chan_list[WLAN_SCAN_MAX_NUM_CHANNELS];
u32 *chan_list;
u32 notify_scan_events;
struct wlan_ssid ssid[WLAN_SCAN_MAX_NUM_SSID];
struct wmi_mac_addr bssid_list[WLAN_SCAN_MAX_NUM_BSSID];
@ -3314,6 +3320,8 @@ struct scan_req_params {
u32 num_hint_bssid;
struct hint_short_ssid hint_s_ssid[WLAN_SCAN_MAX_HINT_S_SSID];
struct hint_bssid hint_bssid[WLAN_SCAN_MAX_HINT_BSSID];
struct wmi_mac_addr mac_addr;
struct wmi_mac_addr mac_mask;
};
struct wmi_ssid_arg {
@ -3677,6 +3685,11 @@ struct wmi_scan_chan_list_cmd {
u32 pdev_id;
} __packed;
struct wmi_scan_prob_req_oui_cmd {
u32 tlv_header;
u32 prob_req_oui;
} __packed;
#define WMI_MGMT_SEND_DOWNLD_LEN 64
#define WMI_TX_PARAMS_DWORD0_POWER GENMASK(7, 0)
@ -3770,6 +3783,16 @@ struct stats_request_params {
u32 pdev_id;
};
struct wmi_set_current_country_params {
u8 alpha2[3];
};
struct wmi_set_current_country_cmd {
u32 tlv_header;
u32 pdev_id;
u32 new_alpha2;
} __packed;
enum set_init_cc_type {
WMI_COUNTRY_INFO_TYPE_ALPHA,
WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE,
@ -3803,6 +3826,28 @@ struct wmi_init_country_cmd {
} cc_info;
} __packed;
struct wmi_11d_scan_start_params {
u32 vdev_id;
u32 scan_period_msec;
u32 start_interval_msec;
};
struct wmi_11d_scan_start_cmd {
u32 tlv_header;
u32 vdev_id;
u32 scan_period_msec;
u32 start_interval_msec;
} __packed;
struct wmi_11d_scan_stop_cmd {
u32 tlv_header;
u32 vdev_id;
} __packed;
struct wmi_11d_new_cc_ev {
u32 new_alpha2;
} __packed;
#define THERMAL_LEVELS 1
struct tt_level_config {
u32 tmplwm;
@ -5433,9 +5478,16 @@ int ath11k_wmi_delba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 initiator, u32 reason);
int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar,
u32 vdev_id, u32 bcn_ctrl_op);
int ath11k_wmi_send_set_current_country_cmd(struct ath11k *ar,
struct wmi_set_current_country_params *param);
int
ath11k_wmi_send_init_country_cmd(struct ath11k *ar,
struct wmi_init_country_params init_cc_param);
int ath11k_wmi_send_11d_scan_start_cmd(struct ath11k *ar,
struct wmi_11d_scan_start_params *param);
int ath11k_wmi_send_11d_scan_stop_cmd(struct ath11k *ar, u32 vdev_id);
int
ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar,
struct thermal_mitigation_params *param);
@ -5492,5 +5544,6 @@ int ath11k_wmi_set_hw_mode(struct ath11k_base *ab,
enum wmi_host_hw_mode_config_type mode);
int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar);
int ath11k_wmi_wow_enable(struct ath11k *ar);
int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar,
const u8 mac_addr[ETH_ALEN]);
#endif

View File

@ -153,12 +153,19 @@
* implementations.
*/
struct htc_frame_hdr {
u8 eid;
u8 flags;
struct_group_tagged(htc_frame_look_ahead, header,
union {
struct {
u8 eid;
u8 flags;
/* length of data (including trailer) that follows the header */
__le16 payld_len;
/* length of data (including trailer) that follows the header */
__le16 payld_len;
};
u32 word;
};
);
/* end of 4-byte lookahead */
u8 ctrl[2];

View File

@ -2260,19 +2260,16 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
{
struct htc_packet *packet = NULL;
struct htc_frame_hdr *htc_hdr;
u32 look_ahead;
struct htc_frame_look_ahead look_ahead;
if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead,
if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead.word,
HTC_TARGET_RESPONSE_TIMEOUT))
return NULL;
ath6kl_dbg(ATH6KL_DBG_HTC,
"htc rx wait ctrl look_ahead 0x%X\n", look_ahead);
"htc rx wait ctrl look_ahead 0x%X\n", look_ahead.word);
htc_hdr = (struct htc_frame_hdr *)&look_ahead;
if (htc_hdr->eid != ENDPOINT_0)
if (look_ahead.eid != ENDPOINT_0)
return NULL;
packet = htc_get_control_buf(target, false);
@ -2281,8 +2278,8 @@ static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
return NULL;
packet->info.rx.rx_flags = 0;
packet->info.rx.exp_hdr = look_ahead;
packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH;
packet->info.rx.exp_hdr = look_ahead.word;
packet->act_len = le16_to_cpu(look_ahead.payld_len) + HTC_HDR_LENGTH;
if (packet->act_len > packet->buf_len)
goto fail_ctrl_rx;

View File

@ -154,11 +154,52 @@ static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
seqno << IEEE80211_SEQ_SEQ_SHIFT);
}
static bool ath_merge_ratetbl(struct ieee80211_sta *sta, struct ath_buf *bf,
struct ieee80211_tx_info *tx_info)
{
struct ieee80211_sta_rates *ratetbl;
u8 i;
if (!sta)
return false;
ratetbl = rcu_dereference(sta->rates);
if (!ratetbl)
return false;
if (tx_info->control.rates[0].idx < 0 ||
tx_info->control.rates[0].count == 0)
{
i = 0;
} else {
bf->rates[0] = tx_info->control.rates[0];
i = 1;
}
for ( ; i < IEEE80211_TX_MAX_RATES; i++) {
bf->rates[i].idx = ratetbl->rate[i].idx;
bf->rates[i].flags = ratetbl->rate[i].flags;
if (tx_info->control.use_rts)
bf->rates[i].count = ratetbl->rate[i].count_rts;
else if (tx_info->control.use_cts_prot)
bf->rates[i].count = ratetbl->rate[i].count_cts;
else
bf->rates[i].count = ratetbl->rate[i].count;
}
return true;
}
static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ath_buf *bf)
{
ieee80211_get_tx_rates(vif, sta, bf->bf_mpdu, bf->rates,
ARRAY_SIZE(bf->rates));
struct ieee80211_tx_info *tx_info;
tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
if (!ath_merge_ratetbl(sta, bf, tx_info))
ieee80211_get_tx_rates(vif, sta, bf->bf_mpdu, bf->rates,
ARRAY_SIZE(bf->rates));
}
static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,

View File

@ -307,8 +307,7 @@ static void carl9170_zap_queues(struct ar9170 *ar)
for (i = 0; i < ar->hw->queues; i++)
ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD;
for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++)
ar->mem_bitmap[i] = 0;
bitmap_zero(ar->mem_bitmap, ar->fw.mem_blocks);
rcu_read_lock();
list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
@ -1968,9 +1967,7 @@ int carl9170_register(struct ar9170 *ar)
if (WARN_ON(ar->mem_bitmap))
return -EINVAL;
ar->mem_bitmap = kcalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG),
sizeof(unsigned long),
GFP_KERNEL);
ar->mem_bitmap = bitmap_zalloc(ar->fw.mem_blocks, GFP_KERNEL);
if (!ar->mem_bitmap)
return -ENOMEM;
@ -2085,7 +2082,7 @@ void carl9170_free(struct ar9170 *ar)
kfree_skb(ar->rx_failover);
ar->rx_failover = NULL;
kfree(ar->mem_bitmap);
bitmap_free(ar->mem_bitmap);
ar->mem_bitmap = NULL;
kfree(ar->survey);

View File

@ -126,6 +126,7 @@ enum CountryCode {
CTRY_KOREA_ROC = 410,
CTRY_KOREA_ROC2 = 411,
CTRY_KOREA_ROC3 = 412,
CTRY_KOREA_ROC4 = 413,
CTRY_KUWAIT = 414,
CTRY_LATVIA = 428,
CTRY_LEBANON = 422,

View File

@ -76,6 +76,7 @@ enum EnumRd {
APL7_FCCA = 0x5C,
APL8_WORLD = 0x5D,
APL9_WORLD = 0x5E,
APL10_WORLD = 0x5F,
WOR0_WORLD = 0x60,
WOR1_WORLD = 0x61,
@ -204,6 +205,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = {
{APL6_WORLD, CTL_ETSI, CTL_ETSI},
{APL8_WORLD, CTL_ETSI, CTL_ETSI},
{APL9_WORLD, CTL_ETSI, CTL_ETSI},
{APL10_WORLD, CTL_ETSI, CTL_ETSI},
{APL3_FCCA, CTL_FCC, CTL_FCC},
{APL7_FCCA, CTL_FCC, CTL_FCC},
@ -426,6 +428,7 @@ static struct country_code_to_enum_rd allCountries[] = {
{CTRY_KOREA_ROC, APL9_WORLD, "KR"},
{CTRY_KOREA_ROC2, APL2_WORLD, "K2"},
{CTRY_KOREA_ROC3, APL9_WORLD, "K3"},
{CTRY_KOREA_ROC4, APL10_WORLD, "K4"},
{CTRY_KUWAIT, ETSI3_WORLD, "KW"},
{CTRY_LATVIA, ETSI1_WORLD, "LV"},
{CTRY_LEBANON, NULL1_WORLD, "LB"},

View File

@ -3459,9 +3459,6 @@ struct wcn36xx_hal_missed_beacon_ind_msg {
/* Beacon Filtering data structures */
/* The above structure would be followed by multiple of below mentioned
* structure
*/
struct beacon_filter_ie {
u8 element_id;
u8 check_ie_presence;
@ -3469,7 +3466,27 @@ struct beacon_filter_ie {
u8 value;
u8 bitmask;
u8 ref;
};
} __packed;
#define WCN36XX_FILTER_CAPABILITY_MASK 0x73cf
#define WCN36XX_FILTER_IE_DS_CHANNEL_MASK 0x00
#define WCN36XX_FILTER_IE_ERP_FILTER_MASK 0xF8
#define WCN36XX_FILTER_IE_EDCA_FILTER_MASK 0xF0
#define WCN36XX_FILTER_IE_QOS_FILTER_MASK 0xF0
#define WCN36XX_FILTER_IE_CHANNEL_SWITCH_MASK 0x00
#define WCN36XX_FILTER_IE_HT_BYTE0_FILTER_MASK 0x00
#define WCN36XX_FILTER_IE_HT_BYTE1_FILTER_MASK 0xF8
#define WCN36XX_FILTER_IE_HT_BYTE2_FILTER_MASK 0xEB
#define WCN36XX_FILTER_IE_HT_BYTE5_FILTER_MASK 0xFD
#define WCN36XX_FILTER_IE_PWR_CONSTRAINT_MASK 0x00
#define WCN36XX_FILTER_IE_OPMODE_NOTIF_MASK 0x00
#define WCN36XX_FILTER_IE_VHTOP_CHWIDTH_MASK 0xFC
#define WCN36XX_FILTER_IE_RSN_MASK 0x00
#define WCN36XX_FILTER_IE_VENDOR_MASK 0x00
/* The above structure would be followed by multiple of below mentioned
* structure
*/
struct wcn36xx_hal_add_bcn_filter_req_msg {
struct wcn36xx_hal_msg_header header;
@ -3480,14 +3497,14 @@ struct wcn36xx_hal_add_bcn_filter_req_msg {
u16 ie_num;
u8 bss_index;
u8 reserved;
};
} __packed;
struct wcn36xx_hal_rem_bcn_filter_req {
struct wcn36xx_hal_msg_header header;
u8 ie_Count;
u8 rem_ie_id[1];
};
} __packed;
#define WCN36XX_HAL_IPV4_ARP_REPLY_OFFLOAD 0
#define WCN36XX_HAL_IPV6_NEIGHBOR_DISCOVERY_OFFLOAD 1

View File

@ -934,6 +934,8 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
* place where AID is available.
*/
wcn36xx_smd_config_sta(wcn, vif, sta);
if (vif->type == NL80211_IFTYPE_STATION)
wcn36xx_smd_add_beacon_filter(wcn, vif);
wcn36xx_enable_keep_alive_null_packet(wcn, vif);
} else {
wcn36xx_dbg(WCN36XX_DBG_MAC,
@ -1220,7 +1222,7 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
u16 tid = params->tid;
u16 *ssn = &params->ssn;
int ret = 0;
u8 session;
int session;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
action, tid);
@ -1232,9 +1234,11 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
sta_priv->tid = tid;
session = wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 0,
get_sta_index(vif, sta_priv));
if (session < 0) {
ret = session;
goto out;
}
wcn36xx_smd_add_ba(wcn, session);
wcn36xx_smd_trigger_ba(wcn, get_sta_index(vif, sta_priv), tid,
session);
break;
case IEEE80211_AMPDU_RX_STOP:
wcn36xx_smd_del_ba(wcn, tid, 0, get_sta_index(vif, sta_priv));
@ -1244,6 +1248,18 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_START;
spin_unlock_bh(&sta_priv->ampdu_lock);
/* Replace the mac80211 ssn with the firmware one */
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu ssn = %u\n", *ssn);
wcn36xx_smd_trigger_ba(wcn, get_sta_index(vif, sta_priv), tid, ssn);
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu fw-ssn = %u\n", *ssn);
/* Start BA session */
session = wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 1,
get_sta_index(vif, sta_priv));
if (session < 0) {
ret = session;
goto out;
}
ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
@ -1251,8 +1267,6 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_OPERATIONAL;
spin_unlock_bh(&sta_priv->ampdu_lock);
wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 1,
get_sta_index(vif, sta_priv));
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
@ -1268,6 +1282,7 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
wcn36xx_err("Unknown AMPDU action\n");
}
out:
mutex_unlock(&wcn->conf_mutex);
return ret;

View File

@ -944,7 +944,7 @@ int wcn36xx_smd_update_channel_list(struct wcn36xx *wcn, struct cfg80211_scan_re
INIT_HAL_MSG((*msg_body), WCN36XX_HAL_UPDATE_CHANNEL_LIST_REQ);
msg_body->num_channel = min_t(u8, req->n_channels, sizeof(msg_body->channels));
msg_body->num_channel = min_t(u8, req->n_channels, ARRAY_SIZE(msg_body->channels));
for (i = 0; i < msg_body->num_channel; i++) {
struct wcn36xx_hal_channel_param *param = &msg_body->channels[i];
u32 min_power = WCN36XX_HAL_DEFAULT_MIN_POWER;
@ -2561,6 +2561,7 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
&session_id);
if (ret) {
wcn36xx_err("hal_add_ba_session response failed err=%d\n", ret);
ret = -EINVAL;
goto out;
}
@ -2626,27 +2627,43 @@ out:
return ret;
}
static int wcn36xx_smd_trigger_ba_rsp(void *buf, int len)
static int wcn36xx_smd_trigger_ba_rsp(void *buf, int len, struct add_ba_info *ba_info)
{
struct wcn36xx_hal_trigger_ba_rsp_candidate *candidate;
struct wcn36xx_hal_trigger_ba_rsp_msg *rsp;
int i;
if (len < sizeof(*rsp))
return -EINVAL;
rsp = (struct wcn36xx_hal_trigger_ba_rsp_msg *) buf;
if (rsp->candidate_cnt < 1)
return rsp->status ? rsp->status : -EINVAL;
candidate = (struct wcn36xx_hal_trigger_ba_rsp_candidate *)(buf + sizeof(*rsp));
for (i = 0; i < STACFG_MAX_TC; i++) {
ba_info[i] = candidate->ba_info[i];
}
return rsp->status;
}
int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u8 session_id)
int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u16 *ssn)
{
struct wcn36xx_hal_trigger_ba_req_msg msg_body;
struct wcn36xx_hal_trigger_ba_req_candidate *candidate;
struct add_ba_info ba_info[STACFG_MAX_TC];
int ret;
if (tid >= STACFG_MAX_TC)
return -EINVAL;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_TRIGGER_BA_REQ);
msg_body.session_id = session_id;
msg_body.session_id = 0; /* not really used */
msg_body.candidate_cnt = 1;
msg_body.header.len += sizeof(*candidate);
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@ -2661,13 +2678,17 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u8 sessio
wcn36xx_err("Sending hal_trigger_ba failed\n");
goto out;
}
ret = wcn36xx_smd_trigger_ba_rsp(wcn->hal_buf, wcn->hal_rsp_len);
ret = wcn36xx_smd_trigger_ba_rsp(wcn->hal_buf, wcn->hal_rsp_len, ba_info);
if (ret) {
wcn36xx_err("hal_trigger_ba response failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
if (ssn)
*ssn = ba_info[tid].starting_seq_num;
return ret;
}
@ -3172,6 +3193,91 @@ out:
return ret;
}
#define BEACON_FILTER(eid, presence, offs, val, mask, ref_val) \
{ \
.element_id = eid, \
.check_ie_presence = presence, \
.offset = offs, \
.value = val, \
.bitmask = mask, \
.ref = ref_val, \
}
static const struct beacon_filter_ie bcn_filter_ies[] = {
BEACON_FILTER(WLAN_EID_DS_PARAMS, 0, 0, 0,
WCN36XX_FILTER_IE_DS_CHANNEL_MASK, 0),
BEACON_FILTER(WLAN_EID_ERP_INFO, 0, 0, 0,
WCN36XX_FILTER_IE_ERP_FILTER_MASK, 0),
BEACON_FILTER(WLAN_EID_EDCA_PARAM_SET, 0, 0, 0,
WCN36XX_FILTER_IE_EDCA_FILTER_MASK, 0),
BEACON_FILTER(WLAN_EID_QOS_CAPA, 0, 0, 0,
WCN36XX_FILTER_IE_QOS_FILTER_MASK, 0),
BEACON_FILTER(WLAN_EID_CHANNEL_SWITCH, 1, 0, 0,
WCN36XX_FILTER_IE_CHANNEL_SWITCH_MASK, 0),
BEACON_FILTER(WLAN_EID_HT_OPERATION, 0, 0, 0,
WCN36XX_FILTER_IE_HT_BYTE0_FILTER_MASK, 0),
BEACON_FILTER(WLAN_EID_HT_OPERATION, 0, 2, 0,
WCN36XX_FILTER_IE_HT_BYTE2_FILTER_MASK, 0),
BEACON_FILTER(WLAN_EID_HT_OPERATION, 0, 5, 0,
WCN36XX_FILTER_IE_HT_BYTE5_FILTER_MASK, 0),
BEACON_FILTER(WLAN_EID_PWR_CONSTRAINT, 0, 0, 0,
WCN36XX_FILTER_IE_PWR_CONSTRAINT_MASK, 0),
BEACON_FILTER(WLAN_EID_OPMODE_NOTIF, 0, 0, 0,
WCN36XX_FILTER_IE_OPMODE_NOTIF_MASK, 0),
BEACON_FILTER(WLAN_EID_VHT_OPERATION, 0, 0, 0,
WCN36XX_FILTER_IE_VHTOP_CHWIDTH_MASK, 0),
BEACON_FILTER(WLAN_EID_RSN, 1, 0, 0,
WCN36XX_FILTER_IE_RSN_MASK, 0),
BEACON_FILTER(WLAN_EID_VENDOR_SPECIFIC, 1, 0, 0,
WCN36XX_FILTER_IE_VENDOR_MASK, 0),
};
int wcn36xx_smd_add_beacon_filter(struct wcn36xx *wcn,
struct ieee80211_vif *vif)
{
struct wcn36xx_hal_add_bcn_filter_req_msg msg_body, *body;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
u8 *payload;
size_t payload_size;
int ret;
if (!get_feat_caps(wcn->fw_feat_caps, BCN_FILTER))
return -EOPNOTSUPP;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BCN_FILTER_REQ);
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
body = (struct wcn36xx_hal_add_bcn_filter_req_msg *)wcn->hal_buf;
body->capability_info = vif->bss_conf.assoc_capability;
body->capability_mask = WCN36XX_FILTER_CAPABILITY_MASK;
body->beacon_interval = vif->bss_conf.beacon_int;
body->ie_num = ARRAY_SIZE(bcn_filter_ies);
body->bss_index = vif_priv->bss_index;
payload = ((u8 *)body) + body->header.len;
payload_size = sizeof(bcn_filter_ies);
memcpy(payload, &bcn_filter_ies, payload_size);
body->header.len += payload_size;
ret = wcn36xx_smd_send_and_wait(wcn, body->header.len);
if (ret) {
wcn36xx_err("Sending add bcn_filter failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("add bcn filter response failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
void *buf, int len, void *priv, u32 addr)
{
@ -3227,6 +3333,7 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
case WCN36XX_HAL_ENTER_IMPS_RSP:
case WCN36XX_HAL_EXIT_IMPS_RSP:
case WCN36XX_HAL_UPDATE_CHANNEL_LIST_RSP:
case WCN36XX_HAL_ADD_BCN_FILTER_RSP:
memcpy(wcn->hal_buf, buf, len);
wcn->hal_rsp_len = len;
complete(&wcn->hal_rsp_compl);

View File

@ -137,7 +137,7 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
u8 sta_index);
int wcn36xx_smd_add_ba(struct wcn36xx *wcn, u8 session_id);
int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 direction, u8 sta_index);
int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u8 session_id);
int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u16 *ssn);
int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value);
@ -167,4 +167,7 @@ int wcn36xx_smd_host_resume(struct wcn36xx *wcn);
int wcn36xx_smd_enter_imps(struct wcn36xx *wcn);
int wcn36xx_smd_exit_imps(struct wcn36xx *wcn);
int wcn36xx_smd_add_beacon_filter(struct wcn36xx *wcn,
struct ieee80211_vif *vif);
#endif /* _SMD_H_ */