* work on DQA continued

* SAR BIOS implementation
 * some work on debugging capabilities
 * added support for GCMP encryption
 * data path rework in preparation for new HW
 * some cleanup to remove transport dependency on mac80211
 * support for MSIx in preparation for new HW
 * lots of work in preparation for HW support (9000 and a000 series)
 * general cleanups
 * general bugfixes
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJXfNsEAAoJEKFHnKIaPMX6Y4IQAKbpRnJybU8sEQug1y0wyFK7
 ou2JJwPj8wnufV6W08MgVxACT8IX2Lgac/LndCFrE2sRvyCm1mzwvpiF3Z7dJ6p1
 N6cziYEWh1IlTh5jXlNrE3sRJkA4+dspiwABZV7FoSiAUEJLWH1kaZQzwaOG/BCf
 jWH04VSnaYNBMAzQ0aLsq1J2/oagIdhcMdnz29XZBX5NxSbq7/TyXlDhQQoFUmH7
 /0TQ63zr6YuRedz5BvqMllShSeRDw+0TA/TawMHsPrnIHkS+28Dhn/WWy+cEBN8P
 5v4OODoAuRR3EWqlxj7/yHTJkB8EjdLB98QweBMhIaaHUwchfleq7b3lq1CIrmoh
 Hy7kHjRjVFS/FQGKXSunNP46DpNOHVPZ9ycoIMKEaA97TE95triKIY7Hh1Hn1a63
 TDuw2IFKnVaCwWgJ2CLPvGmLHOEls4euqXTcbyeqf2CmWOQc7ih/v1cSVJJPHQx8
 NbQCQk8V8eh9JgXICfiTvtH5ExwzFPlNsNfScv0UP/xaqKvTGXSWx0X+T44rjY4w
 tzcs7asVhT0CRNR9qR5wTIv5Uuq7jOham8mKaOKmyeyB/VCSuCpvml1oTfmoQetL
 c8DxZlAuUbfiJeM/KrdpKMOS7WbxaRftQgwPq4vBea5JqLC4tAUuCuN5dT1a1Q7z
 iP5lucMDEOTwWzVSlf6e
 =PpSg
 -----END PGP SIGNATURE-----

Merge tag 'iwlwifi-next-for-kalle-2016-07-06' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

* work on DQA continued
* SAR BIOS implementation
* some work on debugging capabilities
* added support for GCMP encryption
* data path rework in preparation for new HW
* some cleanup to remove transport dependency on mac80211
* support for MSIx in preparation for new HW
* lots of work in preparation for HW support (9000 and a000 series)
* general cleanups
* general bugfixes
This commit is contained in:
Kalle Valo 2016-07-08 12:20:30 +03:00
commit 26124f4b77
50 changed files with 2095 additions and 728 deletions

View file

@ -8,7 +8,7 @@ iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o iwl-9000.o
iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o iwl-9000.o iwl-a000.o
iwlwifi-objs += iwl-trans.o
iwlwifi-objs += $(iwlwifi-m)

View file

@ -205,23 +205,6 @@ static const __le32 iwlagn_def_3w_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = {
cpu_to_le32(0xf0005000),
};
/* Loose Coex */
static const __le32 iwlagn_loose_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = {
cpu_to_le32(0xaaaaaaaa),
cpu_to_le32(0xaaaaaaaa),
cpu_to_le32(0xaeaaaaaa),
cpu_to_le32(0xaaaaaaaa),
cpu_to_le32(0xcc00ff28),
cpu_to_le32(0x0000aaaa),
cpu_to_le32(0xcc00aaaa),
cpu_to_le32(0x0000aaaa),
cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000),
cpu_to_le32(0xf0005000),
cpu_to_le32(0xf0005000),
};
/* Full concurrency */
static const __le32 iwlagn_concurrent_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = {
cpu_to_le32(0xaaaaaaaa),

View file

@ -1337,6 +1337,8 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
trans_cfg.command_groups_size = ARRAY_SIZE(iwl_dvm_groups);
trans_cfg.cmd_fifo = IWLAGN_CMD_FIFO_NUM;
trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
driver_data[2]);
WARN_ON(sizeof(priv->transport_queue_stop) * BITS_PER_BYTE <
priv->cfg->base_params->num_of_queues);

View file

@ -523,11 +523,6 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
return ret;
}
if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION &&
priv->cfg->ht_params && priv->cfg->ht_params->smps_mode)
ieee80211_request_smps(ctx->vif,
priv->cfg->ht_params->smps_mode);
return 0;
}

View file

@ -0,0 +1,131 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015-2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Copyright(c) 2015-2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include <linux/module.h>
#include <linux/stringify.h>
#include "iwl-config.h"
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
#define IWL_A000_UCODE_API_MAX 24
/* Lowest firmware API version supported */
#define IWL_A000_UCODE_API_MIN 24
/* NVM versions */
#define IWL_A000_NVM_VERSION 0x0a1d
#define IWL_A000_TX_POWER_VERSION 0xffff /* meaningless */
/* Memory offsets and lengths */
#define IWL_A000_DCCM_OFFSET 0x800000
#define IWL_A000_DCCM_LEN 0x18000
#define IWL_A000_DCCM2_OFFSET 0x880000
#define IWL_A000_DCCM2_LEN 0x8000
#define IWL_A000_SMEM_OFFSET 0x400000
#define IWL_A000_SMEM_LEN 0x68000
#define IWL_A000_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
#define IWL_A000_MODULE_FIRMWARE(api) \
IWL_A000_FW_PRE "-" __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_A000 10
static const struct iwl_base_params iwl_a000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_A000,
.num_of_queues = 31,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = true,
.pcie_l1_allowed = true,
};
static const struct iwl_ht_params iwl_a000_ht_params = {
.stbc = true,
.ldpc = true,
.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
};
#define IWL_DEVICE_A000 \
.ucode_api_max = IWL_A000_UCODE_API_MAX, \
.ucode_api_min = IWL_A000_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_8000, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.base_params = &iwl_a000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_A000, \
.non_shared_ant = ANT_A, \
.dccm_offset = IWL_A000_DCCM_OFFSET, \
.dccm_len = IWL_A000_DCCM_LEN, \
.dccm2_offset = IWL_A000_DCCM2_OFFSET, \
.dccm2_len = IWL_A000_DCCM2_LEN, \
.smem_offset = IWL_A000_SMEM_OFFSET, \
.smem_len = IWL_A000_SMEM_LEN, \
.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
.apmg_not_supported = true, \
.mq_rx_supported = true, \
.vht_mu_mimo_supported = true, \
.mac_addr_from_csr = true, \
.use_tfh = true
const struct iwl_cfg iwla000_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC a000",
.fw_name_pre = IWL_A000_FW_PRE,
IWL_DEVICE_A000,
.ht_params = &iwl_a000_ht_params,
.nvm_ver = IWL_A000_NVM_VERSION,
.nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
MODULE_FIRMWARE(IWL_A000_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));

View file

@ -66,8 +66,9 @@
#define __IWL_CONFIG_H__
#include <linux/types.h>
#include <net/mac80211.h>
#include <linux/netdevice.h>
#include <linux/ieee80211.h>
#include <linux/nl80211.h>
enum iwl_device_family {
IWL_DEVICE_FAMILY_UNDEFINED,
@ -192,7 +193,6 @@ struct iwl_base_params {
* @ht40_bands: bitmap of bands (using %NL80211_BAND_*) that support HT40
*/
struct iwl_ht_params {
enum ieee80211_smps_mode smps_mode;
u8 ht_greenfield_support:1,
stbc:1,
ldpc:1,
@ -261,6 +261,7 @@ struct iwl_tt_params {
#define OTP_LOW_IMAGE_SIZE_FAMILY_7000 (16 * 512 * sizeof(u16)) /* 16 KB */
#define OTP_LOW_IMAGE_SIZE_FAMILY_8000 (32 * 512 * sizeof(u16)) /* 32 KB */
#define OTP_LOW_IMAGE_SIZE_FAMILY_9000 OTP_LOW_IMAGE_SIZE_FAMILY_8000
#define OTP_LOW_IMAGE_SIZE_FAMILY_A000 OTP_LOW_IMAGE_SIZE_FAMILY_9000
struct iwl_eeprom_params {
const u8 regulatory_bands[7];
@ -364,7 +365,8 @@ struct iwl_cfg {
mq_rx_supported:1,
vht_mu_mimo_supported:1,
rf_id:1,
integrated:1;
integrated:1,
use_tfh:1;
u8 valid_tx_ant;
u8 valid_rx_ant;
u8 non_shared_ant;
@ -450,6 +452,7 @@ extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
extern const struct iwl_cfg iwl9260_2ac_cfg;
extern const struct iwl_cfg iwl9260lc_2ac_cfg;
extern const struct iwl_cfg iwl5165_2ac_cfg;
extern const struct iwl_cfg iwla000_2ac_cfg;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */

View file

@ -1,6 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@ -83,6 +84,23 @@ TRACE_EVENT(iwlwifi_dev_iowrite32,
__get_str(dev), __entry->offs, __entry->val)
);
TRACE_EVENT(iwlwifi_dev_iowrite64,
TP_PROTO(const struct device *dev, u64 offs, u64 val),
TP_ARGS(dev, offs, val),
TP_STRUCT__entry(
DEV_ENTRY
__field(u64, offs)
__field(u64, val)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->offs = offs;
__entry->val = val;
),
TP_printk("[%s] write io[%llu] = %llu)",
__get_str(dev), __entry->offs, __entry->val)
);
TRACE_EVENT(iwlwifi_dev_iowrite_prph32,
TP_PROTO(const struct device *dev, u32 offs, u32 val),
TP_ARGS(dev, offs, val),
@ -100,6 +118,23 @@ TRACE_EVENT(iwlwifi_dev_iowrite_prph32,
__get_str(dev), __entry->offs, __entry->val)
);
TRACE_EVENT(iwlwifi_dev_iowrite_prph64,
TP_PROTO(const struct device *dev, u64 offs, u64 val),
TP_ARGS(dev, offs, val),
TP_STRUCT__entry(
DEV_ENTRY
__field(u64, offs)
__field(u64, val)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->offs = offs;
__entry->val = val;
),
TP_printk("[%s] write PRPH[%llu] = %llu)",
__get_str(dev), __entry->offs, __entry->val)
);
TRACE_EVENT(iwlwifi_dev_ioread_prph32,
TP_PROTO(const struct device *dev, u32 offs, u32 val),
TP_ARGS(dev, offs, val),

View file

@ -1,6 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
* Copyright(C) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@ -33,11 +34,29 @@
static inline bool iwl_trace_data(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
__le16 fc = hdr->frame_control;
int offs = 24; /* start with normal header length */
if (!ieee80211_is_data(hdr->frame_control))
if (!ieee80211_is_data(fc))
return false;
return !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO);
/* Try to determine if the frame is EAPOL. This might have false
* positives (if there's no RFC 1042 header and we compare to some
* payload instead) but since we're only doing tracing that's not
* a problem.
*/
if (ieee80211_has_a4(fc))
offs += 6;
if (ieee80211_is_data_qos(fc))
offs += 2;
/* don't account for crypto - these are unencrypted */
/* also account for the RFC 1042 header, of course */
offs += 6;
return skb->len > offs + 2 &&
*(__be16 *)(skb->data + offs) == cpu_to_be16(ETH_P_PAE);
}
static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,

View file

@ -129,8 +129,8 @@ struct iwl_drv {
};
enum {
DVM_OP_MODE = 0,
MVM_OP_MODE = 1,
DVM_OP_MODE,
MVM_OP_MODE,
};
/* Protects the table contents, i.e. the ops pointer & drv list */
@ -326,8 +326,6 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
int i, j;
struct iwl_fw_cscheme_list *l = (struct iwl_fw_cscheme_list *)data;
struct iwl_fw_cipher_scheme *fwcs;
struct ieee80211_cipher_scheme *cs;
u32 cipher;
if (len < sizeof(*l) ||
len < sizeof(l->size) + l->size * sizeof(l->cs[0]))
@ -335,22 +333,12 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
for (i = 0, j = 0; i < IWL_UCODE_MAX_CS && i < l->size; i++) {
fwcs = &l->cs[j];
cipher = le32_to_cpu(fwcs->cipher);
/* we skip schemes with zero cipher suite selector */
if (!cipher)
if (!fwcs->cipher)
continue;
cs = &fw->cs[j++];
cs->cipher = cipher;
cs->iftype = BIT(NL80211_IFTYPE_STATION);
cs->hdr_len = fwcs->hdr_len;
cs->pn_len = fwcs->pn_len;
cs->pn_off = fwcs->pn_off;
cs->key_idx_off = fwcs->key_idx_off;
cs->key_idx_mask = fwcs->key_idx_mask;
cs->key_idx_shift = fwcs->key_idx_shift;
cs->mic_len = fwcs->mic_len;
fw->cs[j++] = *fwcs;
}
return 0;
@ -795,17 +783,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
case IWL_UCODE_TLV_SEC_RT:
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
tlv_len);
drv->fw.mvm_fw = true;
drv->fw.type = IWL_FW_MVM;
break;
case IWL_UCODE_TLV_SEC_INIT:
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
tlv_len);
drv->fw.mvm_fw = true;
drv->fw.type = IWL_FW_MVM;
break;
case IWL_UCODE_TLV_SEC_WOWLAN:
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
tlv_len);
drv->fw.mvm_fw = true;
drv->fw.type = IWL_FW_MVM;
break;
case IWL_UCODE_TLV_DEF_CALIB:
if (tlv_len != sizeof(struct iwl_tlv_calib_data))
@ -827,17 +815,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
case IWL_UCODE_TLV_SECURE_SEC_RT:
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
tlv_len);
drv->fw.mvm_fw = true;
drv->fw.type = IWL_FW_MVM;
break;
case IWL_UCODE_TLV_SECURE_SEC_INIT:
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
tlv_len);
drv->fw.mvm_fw = true;
drv->fw.type = IWL_FW_MVM;
break;
case IWL_UCODE_TLV_SECURE_SEC_WOWLAN:
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
tlv_len);
drv->fw.mvm_fw = true;
drv->fw.type = IWL_FW_MVM;
break;
case IWL_UCODE_TLV_NUM_OF_CPU:
if (tlv_len != sizeof(u32))
@ -1275,7 +1263,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
* In mvm uCode there is no difference between data and instructions
* sections.
*/
if (!fw->mvm_fw && validate_sec_sizes(drv, pieces, drv->cfg))
if (fw->type == IWL_FW_DVM && validate_sec_sizes(drv, pieces, drv->cfg))
goto try_again;
/* Allocate ucode buffers for card's bus-master loading ... */
@ -1403,10 +1391,16 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
release_firmware(ucode_raw);
mutex_lock(&iwlwifi_opmode_table_mtx);
if (fw->mvm_fw)
op = &iwlwifi_opmode_table[MVM_OP_MODE];
else
switch (fw->type) {
case IWL_FW_DVM:
op = &iwlwifi_opmode_table[DVM_OP_MODE];
break;
default:
WARN(1, "Invalid fw type %d\n", fw->type);
case IWL_FW_MVM:
op = &iwlwifi_opmode_table[MVM_OP_MODE];
break;
}
IWL_INFO(drv, "loaded firmware version %s op_mode %s\n",
drv->fw.fw_version, op->name);

View file

@ -66,6 +66,7 @@
#include <linux/types.h>
#include <linux/if_ether.h>
#include <net/cfg80211.h>
#include "iwl-trans.h"
struct iwl_nvm_data {

View file

@ -77,6 +77,7 @@
*/
#define FH_MEM_LOWER_BOUND (0x1000)
#define FH_MEM_UPPER_BOUND (0x2000)
#define TFH_MEM_LOWER_BOUND (0xA06000)
/**
* Keep-Warm (KW) buffer base address.
@ -118,10 +119,17 @@
#define FH_MEM_CBBC_16_19_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
#define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20)
#define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80)
/* a000 TFD table address, 64 bit */
#define TFH_TFDQ_CBB_TABLE (TFH_MEM_LOWER_BOUND + 0x1C00)
/* Find TFD CB base pointer for given queue */
static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
unsigned int chnl)
{
if (trans->cfg->use_tfh) {
WARN_ON_ONCE(chnl >= 64);
return TFH_TFDQ_CBB_TABLE + 8 * chnl;
}
if (chnl < 16)
return FH_MEM_CBBC_0_15_LOWER_BOUND + 4 * chnl;
if (chnl < 20)
@ -130,6 +138,65 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20);
}
/* a000 configuration registers */
/*
* TFH Configuration register.
*
* BIT fields:
*
* Bits 3:0:
* Define the maximum number of pending read requests.
* Maximum configration value allowed is 0xC
* Bits 9:8:
* Define the maximum transfer size. (64 / 128 / 256)
* Bit 10:
* When bit is set and transfer size is set to 128B, the TFH will enable
* reading chunks of more than 64B only if the read address is aligned to 128B.
* In case of DRAM read address which is not aligned to 128B, the TFH will
* enable transfer size which doesn't cross 64B DRAM address boundary.
*/
#define TFH_TRANSFER_MODE (TFH_MEM_LOWER_BOUND + 0x1F40)
#define TFH_TRANSFER_MAX_PENDING_REQ 0xc
#define TFH_CHUNK_SIZE_128 BIT(8)
#define TFH_CHUNK_SPLIT_MODE BIT(10)
/*
* Defines the offset address in dwords referring from the beginning of the
* Tx CMD which will be updated in DRAM.
* Note that the TFH offset address for Tx CMD update is always referring to
* the start of the TFD first TB.
* In case of a DRAM Tx CMD update the TFH will update PN and Key ID
*/
#define TFH_TXCMD_UPDATE_CFG (TFH_MEM_LOWER_BOUND + 0x1F48)
/*
* Controls TX DMA operation
*
* BIT fields:
*
* Bits 31:30: Enable the SRAM DMA channel.
* Turning on bit 31 will kick the SRAM2DRAM DMA.
* Note that the sram2dram may be enabled only after configuring the DRAM and
* SRAM addresses registers and the byte count register.
* Bits 25:24: Defines the interrupt target upon dram2sram transfer done. When
* set to 1 - interrupt is sent to the driver
* Bit 0: Indicates the snoop configuration
*/
#define TFH_SRV_DMA_CHNL0_CTRL (TFH_MEM_LOWER_BOUND + 0x1F60)
#define TFH_SRV_DMA_SNOOP BIT(0)
#define TFH_SRV_DMA_TO_DRIVER BIT(24)
#define TFH_SRV_DMA_START BIT(31)
/* Defines the DMA SRAM write start address to transfer a data block */
#define TFH_SRV_DMA_CHNL0_SRAM_ADDR (TFH_MEM_LOWER_BOUND + 0x1F64)
/* Defines the 64bits DRAM start address to read the DMA data block from */
#define TFH_SRV_DMA_CHNL0_DRAM_ADDR (TFH_MEM_LOWER_BOUND + 0x1F68)
/*
* Defines the number of bytes to transfer from DRAM to SRAM.
* Note that this register may be configured with non-dword aligned size.
*/
#define TFH_SRV_DMA_CHNL0_BC (TFH_MEM_LOWER_BOUND + 0x1F70)
/**
* Rx SRAM Control and Status Registers (RSCSR)
@ -344,6 +411,32 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
#define RFH_RBDBUF_RBD0_LSB 0xA08300
#define RFH_RBDBUF_RBD_LSB(q) (RFH_RBDBUF_RBD0_LSB + (q) * 8)
/**
* RFH Status Register
*
* Bit fields:
*
* Bit 29: RBD_FETCH_IDLE
* This status flag is set by the RFH when there is no active RBD fetch from
* DRAM.
* Once the RFH RBD controller starts fetching (or when there is a pending
* RBD read response from DRAM), this flag is immediately turned off.
*
* Bit 30: SRAM_DMA_IDLE
* This status flag is set by the RFH when there is no active transaction from
* SRAM to DRAM.
* Once the SRAM to DRAM DMA is active, this flag is immediately turned off.
*
* Bit 31: RXF_DMA_IDLE
* This status flag is set by the RFH when there is no active transaction from
* RXF to DRAM.
* Once the RXF-to-DRAM DMA is active, this flag is immediately turned off.
*/
#define RFH_GEN_STATUS 0xA09808
#define RBD_FETCH_IDLE BIT(29)
#define SRAM_DMA_IDLE BIT(30)
#define RXF_DMA_IDLE BIT(31)
/* DMA configuration */
#define RFH_RXF_DMA_CFG 0xA09820
/* RB size */

View file

@ -89,6 +89,9 @@
* @IWL_FW_ERROR_PAGING: UMAC's image memory segments which were
* paged to the DRAM.
* @IWL_FW_ERROR_DUMP_RADIO_REG: Dump the radio registers.
* @IWL_FW_ERROR_DUMP_EXTERNAL: used only by external code utilities, and
* for that reason is not in use in any other place in the Linux Wi-Fi
* stack.
*/
enum iwl_fw_error_dump_type {
/* 0 is deprecated */
@ -106,6 +109,7 @@ enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_PAGING = 12,
IWL_FW_ERROR_DUMP_RADIO_REG = 13,
IWL_FW_ERROR_DUMP_INTERNAL_TXF = 14,
IWL_FW_ERROR_DUMP_EXTERNAL = 15, /* Do not move */
IWL_FW_ERROR_DUMP_MAX,
};

View file

@ -330,6 +330,9 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG: support getting more shared
* memory addresses from the firmware.
* @IWL_UCODE_TLV_CAPA_LQM_SUPPORT: supports Link Quality Measurement
* @IWL_UCODE_TLV_CAPA_TX_POWER_ACK: reduced TX power API has larger
* command size (command version 4) that supports toggling ACK TX
* power reduction.
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@ -370,6 +373,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED = (__force iwl_ucode_tlv_capa_t)77,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80,
IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81,
IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84,
NUM_IWL_UCODE_TLV_CAPA
#ifdef __CHECKER__

View file

@ -67,7 +67,6 @@
#ifndef __iwl_fw_h__
#define __iwl_fw_h__
#include <linux/types.h>
#include <net/mac80211.h>
#include "iwl-fw-file.h"
#include "iwl-fw-error-dump.h"
@ -230,6 +229,16 @@ struct iwl_gscan_capabilities {
u32 max_number_of_black_listed_ssid;
};
/**
* enum iwl_fw_type - iwlwifi firmware type
* @IWL_FW_DVM: DVM firmware
* @IWL_FW_MVM: MVM firmware
*/
enum iwl_fw_type {
IWL_FW_DVM,
IWL_FW_MVM,
};
/**
* struct iwl_fw - variables associated with the firmware
*
@ -244,7 +253,7 @@ struct iwl_gscan_capabilities {
* @inst_evtlog_ptr: event log offset for runtime ucode.
* @inst_evtlog_size: event log size for runtime ucode.
* @inst_errlog_ptr: error log offfset for runtime ucode.
* @mvm_fw: indicates this is MVM firmware
* @type: firmware type (&enum iwl_fw_type)
* @cipher_scheme: optional external cipher scheme.
* @human_readable: human readable version
* @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
@ -275,9 +284,9 @@ struct iwl_fw {
u8 valid_tx_ant;
u8 valid_rx_ant;
bool mvm_fw;
enum iwl_fw_type type;
struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS];
u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
u32 sdio_adma_addr;

View file

@ -1,7 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
*
* Portions of this file are derived from the ipw3945 project.
*
@ -51,6 +51,14 @@ void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val)
}
IWL_EXPORT_SYMBOL(iwl_write32);
void iwl_write64(struct iwl_trans *trans, u64 ofs, u64 val)
{
trace_iwlwifi_dev_iowrite64(trans->dev, ofs, val);
iwl_trans_write32(trans, ofs, val & 0xffffffff);
iwl_trans_write32(trans, ofs + 4, val >> 32);
}
IWL_EXPORT_SYMBOL(iwl_write64);
u32 iwl_read32(struct iwl_trans *trans, u32 ofs)
{
u32 val = iwl_trans_read32(trans, ofs);
@ -102,6 +110,17 @@ void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
}
IWL_EXPORT_SYMBOL(iwl_write_direct32);
void iwl_write_direct64(struct iwl_trans *trans, u64 reg, u64 value)
{
unsigned long flags;
if (iwl_trans_grab_nic_access(trans, &flags)) {
iwl_write64(trans, reg, value);
iwl_trans_release_nic_access(trans, &flags);
}
}
IWL_EXPORT_SYMBOL(iwl_write_direct64);
int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
int timeout)
{
@ -133,6 +152,14 @@ void iwl_write_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val)
}
IWL_EXPORT_SYMBOL(iwl_write_prph_no_grab);
void iwl_write_prph64_no_grab(struct iwl_trans *trans, u64 ofs, u64 val)
{
trace_iwlwifi_dev_iowrite_prph64(trans->dev, ofs, val);
iwl_write_prph_no_grab(trans, ofs, val & 0xffffffff);
iwl_write_prph_no_grab(trans, ofs + 4, val >> 32);
}
IWL_EXPORT_SYMBOL(iwl_write_prph64_no_grab);
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
{
unsigned long flags;
@ -228,9 +255,117 @@ void iwl_force_nmi(struct iwl_trans *trans)
}
IWL_EXPORT_SYMBOL(iwl_force_nmi);
static const char *get_fh_string(int cmd)
static const char *get_rfh_string(int cmd)
{
#define IWL_CMD(x) case x: return #x
#define IWL_CMD_MQ(arg, reg, q) { if (arg == reg(q)) return #reg; }
int i;
for (i = 0; i < IWL_MAX_RX_HW_QUEUES; i++) {
IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_BA_LSB, i);
IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_WIDX, i);
IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_RIDX, i);
IWL_CMD_MQ(cmd, RFH_Q_URBD_STTS_WPTR_LSB, i);
};
switch (cmd) {
IWL_CMD(RFH_RXF_DMA_CFG);
IWL_CMD(RFH_GEN_CFG);
IWL_CMD(RFH_GEN_STATUS);
IWL_CMD(FH_TSSR_TX_STATUS_REG);
IWL_CMD(FH_TSSR_TX_ERROR_REG);
default:
return "UNKNOWN";
}
#undef IWL_CMD_MQ
}
struct reg {
u32 addr;
bool is64;
};
static int iwl_dump_rfh(struct iwl_trans *trans, char **buf)
{
int i, q;
int num_q = trans->num_rx_queues;
static const u32 rfh_tbl[] = {
RFH_RXF_DMA_CFG,
RFH_GEN_CFG,
RFH_GEN_STATUS,
FH_TSSR_TX_STATUS_REG,
FH_TSSR_TX_ERROR_REG,
};
static const struct reg rfh_mq_tbl[] = {
{ RFH_Q0_FRBDCB_BA_LSB, true },
{ RFH_Q0_FRBDCB_WIDX, false },
{ RFH_Q0_FRBDCB_RIDX, false },
{ RFH_Q0_URBD_STTS_WPTR_LSB, true },
};
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (buf) {
int pos = 0;
/*
* Register (up to 34 for name + 8 blank/q for MQ): 40 chars
* Colon + space: 2 characters
* 0X%08x: 10 characters
* New line: 1 character
* Total of 53 characters
*/
size_t bufsz = ARRAY_SIZE(rfh_tbl) * 53 +
ARRAY_SIZE(rfh_mq_tbl) * 53 * num_q + 40;
*buf = kmalloc(bufsz, GFP_KERNEL);
if (!*buf)
return -ENOMEM;
pos += scnprintf(*buf + pos, bufsz - pos,
"RFH register values:\n");
for (i = 0; i < ARRAY_SIZE(rfh_tbl); i++)
pos += scnprintf(*buf + pos, bufsz - pos,
"%40s: 0X%08x\n",
get_rfh_string(rfh_tbl[i]),
iwl_read_prph(trans, rfh_tbl[i]));
for (i = 0; i < ARRAY_SIZE(rfh_mq_tbl); i++)
for (q = 0; q < num_q; q++) {
u32 addr = rfh_mq_tbl[i].addr;
addr += q * (rfh_mq_tbl[i].is64 ? 8 : 4);
pos += scnprintf(*buf + pos, bufsz - pos,
"%34s(q %2d): 0X%08x\n",
get_rfh_string(addr), q,
iwl_read_prph(trans, addr));
}
return pos;
}
#endif
IWL_ERR(trans, "RFH register values:\n");
for (i = 0; i < ARRAY_SIZE(rfh_tbl); i++)
IWL_ERR(trans, " %34s: 0X%08x\n",
get_rfh_string(rfh_tbl[i]),
iwl_read_prph(trans, rfh_tbl[i]));
for (i = 0; i < ARRAY_SIZE(rfh_mq_tbl); i++)
for (q = 0; q < num_q; q++) {
u32 addr = rfh_mq_tbl[i].addr;
addr += q * (rfh_mq_tbl[i].is64 ? 8 : 4);
IWL_ERR(trans, " %34s(q %d): 0X%08x\n",
get_rfh_string(addr), q,
iwl_read_prph(trans, addr));
}
return 0;
}
static const char *get_fh_string(int cmd)
{
switch (cmd) {
IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
@ -262,6 +397,9 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf)
FH_TSSR_TX_ERROR_REG
};
if (trans->cfg->mq_rx_supported)
return iwl_dump_rfh(trans, buf);
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (buf) {
int pos = 0;

View file

@ -34,6 +34,7 @@
void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val);
void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val);
void iwl_write64(struct iwl_trans *trans, u64 ofs, u64 val);
u32 iwl_read32(struct iwl_trans *trans, u32 ofs);
static inline void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
@ -53,11 +54,13 @@ int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg);
void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value);
void iwl_write_direct64(struct iwl_trans *trans, u64 reg, u64 value);
u32 iwl_read_prph_no_grab(struct iwl_trans *trans, u32 ofs);
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs);
void iwl_write_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val);
void iwl_write_prph64_no_grab(struct iwl_trans *trans, u64 ofs, u64 val);
void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout);

View file

@ -66,7 +66,6 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/gfp.h>
#include <net/mac80211.h>
extern struct iwl_mod_params iwlwifi_mod_params;

View file

@ -417,5 +417,6 @@ enum {
};
#define UREG_CHICK (0xA05C00)
#define UREG_CHICK_MSI_ENABLE BIT(24)
#define UREG_CHICK_MSIX_ENABLE BIT(25)
#endif /* __iwl_prph_h__ */

View file

@ -392,11 +392,6 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
#define MAX_NO_RECLAIM_CMDS 6
/*
* The first entry in driver_data array in ieee80211_tx_info
* that can be used by the transport.
*/
#define IWL_TRANS_FIRST_DRIVER_DATA 2
#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
/*
@ -500,6 +495,8 @@ struct iwl_hcmd_arr {
* @command_groups_size: number of command groups, to avoid illegal access
* @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
* we get the ALIVE from the uCode
* @cb_data_offs: offset inside skb->cb to store transport data at, must have
* space for at least two pointers
*/
struct iwl_trans_config {
struct iwl_op_mode *op_mode;
@ -519,6 +516,8 @@ struct iwl_trans_config {
int command_groups_size;
u32 sdio_adma_addr;
u8 cb_data_offs;
};
struct iwl_trans_dump_data {
@ -583,6 +582,7 @@ struct iwl_trans_txq_scd_cfg {
* configured. May sleep.
* @txq_disable: de-configure a Tx queue to send AMPDUs
* Must be atomic
* @txq_set_shared_mode: change Tx queue shared/unshared marking
* @wait_tx_queue_empty: wait until tx queues are empty. May sleep.
* @freeze_txq_timer: prevents the timer of the queue from firing until the
* queue is set to awake. Must be atomic.
@ -646,6 +646,9 @@ struct iwl_trans_ops {
void (*txq_disable)(struct iwl_trans *trans, int queue,
bool configure_scd);
void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
bool shared);
int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
bool freeze);
@ -758,6 +761,7 @@ enum iwl_plat_pm_mode {
* @ops - pointer to iwl_trans_ops
* @op_mode - pointer to the op_mode
* @cfg - pointer to the configuration
* @drv - pointer to iwl_drv
* @status: a bit-mask of transport status flags
* @dev - pointer to struct device * that represents the device
* @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
@ -801,6 +805,7 @@ struct iwl_trans {
const struct iwl_trans_ops *ops;
struct iwl_op_mode *op_mode;
const struct iwl_cfg *cfg;
struct iwl_drv *drv;
enum iwl_trans_state state;
unsigned long status;
@ -1061,6 +1066,13 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout);
}
static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
int queue, bool shared_mode)
{
if (trans->ops->txq_set_shared_mode)
trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
}
static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
int fifo, int sta_id, int tid,
int frame_limit, u16 ssn,

View file

@ -142,7 +142,7 @@ static const __le64 iwl_ci_mask[][3] = {
cpu_to_le64(0x0)
},
{
cpu_to_le64(0xFFC0000000ULL),
cpu_to_le64(0xFE00000000ULL),
cpu_to_le64(0x0ULL),
cpu_to_le64(0x0ULL)
},
@ -615,8 +615,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
* don't reduce the Tx power if one of these is true:
* we are in LOOSE
* single share antenna product
* BT is active
* we are associated
* BT is inactive
* we are not associated
*/
if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc ||

View file

@ -1020,6 +1020,8 @@ static ssize_t iwl_dbgfs_max_amsdu_len_write(struct iwl_mvm *mvm,
int ret;
ret = kstrtouint(buf, 0, &max_amsdu_len);
if (ret)
return ret;
if (max_amsdu_len > IEEE80211_MAX_MPDU_LEN_VHT_11454)
return -EINVAL;

View file

@ -70,85 +70,6 @@
#define BITS(nb) (BIT(nb) - 1)
/**
* enum iwl_bt_coex_flags - flags for BT_COEX command
* @BT_COEX_MODE_POS:
* @BT_COEX_MODE_MSK:
* @BT_COEX_DISABLE_OLD:
* @BT_COEX_2W_OLD:
* @BT_COEX_3W_OLD:
* @BT_COEX_NW_OLD:
* @BT_COEX_AUTO_OLD:
* @BT_COEX_BT_OLD: Antenna is for BT (manufacuring tests)
* @BT_COEX_WIFI_OLD: Antenna is for BT (manufacuring tests)
* @BT_COEX_SYNC2SCO:
* @BT_COEX_CORUNNING:
* @BT_COEX_MPLUT:
* @BT_COEX_TTC:
* @BT_COEX_RRC:
*
* The COEX_MODE must be set for each command. Even if it is not changed.
*/
enum iwl_bt_coex_flags {
BT_COEX_MODE_POS = 3,
BT_COEX_MODE_MSK = BITS(3) << BT_COEX_MODE_POS,
BT_COEX_DISABLE_OLD = 0x0 << BT_COEX_MODE_POS,
BT_COEX_2W_OLD = 0x1 << BT_COEX_MODE_POS,
BT_COEX_3W_OLD = 0x2 << BT_COEX_MODE_POS,
BT_COEX_NW_OLD = 0x3 << BT_COEX_MODE_POS,
BT_COEX_AUTO_OLD = 0x5 << BT_COEX_MODE_POS,
BT_COEX_BT_OLD = 0x6 << BT_COEX_MODE_POS,
BT_COEX_WIFI_OLD = 0x7 << BT_COEX_MODE_POS,
BT_COEX_SYNC2SCO = BIT(7),
BT_COEX_CORUNNING = BIT(8),
BT_COEX_MPLUT = BIT(9),
BT_COEX_TTC = BIT(20),
BT_COEX_RRC = BIT(21),
};
/*
* indicates what has changed in the BT_COEX command.
* BT_VALID_ENABLE must be set for each command. Commands without this bit will
* discarded by the firmware
*/
enum iwl_bt_coex_valid_bit_msk {
BT_VALID_ENABLE = BIT(0),
BT_VALID_BT_PRIO_BOOST = BIT(1),
BT_VALID_MAX_KILL = BIT(2),
BT_VALID_3W_TMRS = BIT(3),
BT_VALID_KILL_ACK = BIT(4),
BT_VALID_KILL_CTS = BIT(5),
BT_VALID_REDUCED_TX_POWER = BIT(6),
BT_VALID_LUT = BIT(7),
BT_VALID_WIFI_RX_SW_PRIO_BOOST = BIT(8),
BT_VALID_WIFI_TX_SW_PRIO_BOOST = BIT(9),
BT_VALID_MULTI_PRIO_LUT = BIT(10),
BT_VALID_TRM_KICK_FILTER = BIT(11),
BT_VALID_CORUN_LUT_20 = BIT(12),
BT_VALID_CORUN_LUT_40 = BIT(13),
BT_VALID_ANT_ISOLATION = BIT(14),
BT_VALID_ANT_ISOLATION_THRS = BIT(15),
BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16),
BT_VALID_TXRX_MAX_FREQ_0 = BIT(17),
BT_VALID_SYNC_TO_SCO = BIT(18),
BT_VALID_TTC = BIT(20),
BT_VALID_RRC = BIT(21),
};
/**
* enum iwl_bt_reduced_tx_power - allows to reduce txpower for WiFi frames.
* @BT_REDUCED_TX_POWER_CTL: reduce Tx power for control frames
* @BT_REDUCED_TX_POWER_DATA: reduce Tx power for data frames
*
* This mechanism allows to have BT and WiFi run concurrently. Since WiFi
* reduces its Tx power, it can work along with BT, hence reducing the amount
* of WiFi frames being killed by BT.
*/
enum iwl_bt_reduced_tx_power {
BT_REDUCED_TX_POWER_CTL = BIT(0),
BT_REDUCED_TX_POWER_DATA = BIT(1),
};
enum iwl_bt_coex_lut_type {
BT_COEX_TIGHT_LUT = 0,
BT_COEX_LOOSE_LUT,
@ -158,64 +79,9 @@ enum iwl_bt_coex_lut_type {
BT_COEX_INVALID_LUT = 0xff,
}; /* BT_COEX_DECISION_LUT_INDEX_API_E_VER_1 */
#define BT_COEX_LUT_SIZE (12)
#define BT_COEX_CORUN_LUT_SIZE (32)
#define BT_COEX_MULTI_PRIO_LUT_SIZE (2)
#define BT_COEX_BOOST_SIZE (4)
#define BT_REDUCED_TX_POWER_BIT BIT(7)
/**
* struct iwl_bt_coex_cmd_old - bt coex configuration command
* @flags:&enum iwl_bt_coex_flags
* @max_kill:
* @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power
* @override_primary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
* should be set by default
* @override_secondary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
* should be set by default
* @bt4_antenna_isolation: antenna isolation
* @bt4_antenna_isolation_thr: antenna threshold value
* @bt4_tx_tx_delta_freq_thr: TxTx delta frequency
* @bt4_tx_rx_max_freq0: TxRx max frequency
* @bt_prio_boost: BT priority boost registers
* @wifi_tx_prio_boost: SW boost of wifi tx priority
* @wifi_rx_prio_boost: SW boost of wifi rx priority
* @kill_ack_msk: kill ACK mask. 1 - Tx ACK, 0 - kill Tx of ACK.
* @kill_cts_msk: kill CTS mask. 1 - Tx CTS, 0 - kill Tx of CTS.
* @decision_lut: PTA decision LUT, per Prio-Ch
* @bt4_multiprio_lut: multi priority LUT configuration
* @bt4_corun_lut20: co-running 20 MHz LUT configuration
* @bt4_corun_lut40: co-running 40 MHz LUT configuration
* @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
*
* The structure is used for the BT_COEX command.
*/
struct iwl_bt_coex_cmd_old {
__le32 flags;
u8 max_kill;
u8 bt_reduced_tx_power;
u8 override_primary_lut;
u8 override_secondary_lut;
u8 bt4_antenna_isolation;
u8 bt4_antenna_isolation_thr;
u8 bt4_tx_tx_delta_freq_thr;
u8 bt4_tx_rx_max_freq0;
__le32 bt_prio_boost[BT_COEX_BOOST_SIZE];
__le32 wifi_tx_prio_boost;
__le32 wifi_rx_prio_boost;
__le32 kill_ack_msk;
__le32 kill_cts_msk;
__le32 decision_lut[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE];
__le32 bt4_multiprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE];
__le32 bt4_corun_lut20[BT_COEX_CORUN_LUT_SIZE];
__le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE];
__le32 valid_bit_msk;
} __packed; /* BT_COEX_CMD_API_S_VER_5 */
enum iwl_bt_coex_mode {
BT_COEX_DISABLE = 0x0,
BT_COEX_NW = 0x1,
@ -385,92 +251,4 @@ struct iwl_bt_coex_profile_notif {
u8 reserved[3];
} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4 */
enum iwl_bt_coex_prio_table_event {
BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0,
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2 = 1,
BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1 = 2,
BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2 = 3,
BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1 = 4,
BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2 = 5,
BT_COEX_PRIO_TBL_EVT_DTIM = 6,
BT_COEX_PRIO_TBL_EVT_SCAN52 = 7,
BT_COEX_PRIO_TBL_EVT_SCAN24 = 8,
BT_COEX_PRIO_TBL_EVT_IDLE = 9,
BT_COEX_PRIO_TBL_EVT_MAX = 16,
}; /* BT_COEX_PRIO_TABLE_EVENTS_API_E_VER_1 */
enum iwl_bt_coex_prio_table_prio {
BT_COEX_PRIO_TBL_DISABLED = 0,
BT_COEX_PRIO_TBL_PRIO_LOW = 1,
BT_COEX_PRIO_TBL_PRIO_HIGH = 2,
BT_COEX_PRIO_TBL_PRIO_BYPASS = 3,
BT_COEX_PRIO_TBL_PRIO_COEX_OFF = 4,
BT_COEX_PRIO_TBL_PRIO_COEX_ON = 5,
BT_COEX_PRIO_TBL_PRIO_COEX_IDLE = 6,
BT_COEX_PRIO_TBL_MAX = 8,
}; /* BT_COEX_PRIO_TABLE_PRIORITIES_API_E_VER_1 */
#define BT_COEX_PRIO_TBL_SHRD_ANT_POS (0)
#define BT_COEX_PRIO_TBL_PRIO_POS (1)
#define BT_COEX_PRIO_TBL_RESERVED_POS (4)
/**
* struct iwl_bt_coex_prio_tbl_cmd - priority table for BT coex
* @prio_tbl:
*/
struct iwl_bt_coex_prio_tbl_cmd {
u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
} __packed;
/**
* struct iwl_bt_coex_ci_cmd_old - bt coex channel inhibition command
* @bt_primary_ci:
* @bt_secondary_ci:
* @co_run_bw_primary:
* @co_run_bw_secondary:
* @primary_ch_phy_id:
* @secondary_ch_phy_id:
*
* Used for BT_COEX_CI command
*/
struct iwl_bt_coex_ci_cmd_old {
__le64 bt_primary_ci;
__le64 bt_secondary_ci;
u8 co_run_bw_primary;
u8 co_run_bw_secondary;
u8 primary_ch_phy_id;
u8 secondary_ch_phy_id;
} __packed; /* BT_CI_MSG_API_S_VER_1 */
/**
* struct iwl_bt_coex_profile_notif_old - notification about BT coex
* @mbox_msg: message from BT to WiFi
* @msg_idx: the index of the message
* @bt_status: 0 - off, 1 - on
* @bt_open_conn: number of BT connections open
* @bt_traffic_load: load of BT traffic
* @bt_agg_traffic_load: aggregated load of BT traffic
* @bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant
* @primary_ch_lut: LUT used for primary channel
* @secondary_ch_lut: LUT used for secondary channel
* @bt_activity_grading: the activity of BT enum %iwl_bt_activity_grading
*/
struct iwl_bt_coex_profile_notif_old {
__le32 mbox_msg[4];
__le32 msg_idx;
u8 bt_status;
u8 bt_open_conn;
u8 bt_traffic_load;
u8 bt_agg_traffic_load;
u8 bt_ci_compliance;
u8 ttc_enabled;
u8 rrc_enabled;
u8 reserved;
__le32 primary_ch_lut;
__le32 secondary_ch_lut;
__le32 bt_activity_grading;
} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_3 */
#endif /* __fw_api_bt_coex_h__ */

View file

@ -72,6 +72,9 @@
#define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX
#define NUM_MAC_INDEX (MAC_INDEX_AUX + 1)
#define IWL_MVM_STATION_COUNT 16
#define IWL_MVM_TDLS_STA_COUNT 4
enum iwl_ac {
AC_BK,
AC_BE,

View file

@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 Intel Deutschland GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 Intel Deutschland GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -310,7 +310,8 @@ enum iwl_dev_tx_power_cmd_mode {
IWL_TX_POWER_MODE_SET_MAC = 0,
IWL_TX_POWER_MODE_SET_DEVICE = 1,
IWL_TX_POWER_MODE_SET_CHAINS = 2,
}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_2 */;
IWL_TX_POWER_MODE_SET_ACK = 3,
}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_4 */;
/**
* struct iwl_dev_tx_power_cmd_v2 - TX power reduction command
@ -338,7 +339,7 @@ struct iwl_dev_tx_power_cmd_v2 {
* @v2: version 2 of the command, embedded here for easier software handling
* @per_chain_restriction: per chain restrictions
*/
struct iwl_dev_tx_power_cmd {
struct iwl_dev_tx_power_cmd_v3 {
/* v3 is just an extension of v2 - keep this here */
struct iwl_dev_tx_power_cmd_v2 v2;
__le16 per_chain_restriction[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
@ -346,6 +347,19 @@ struct iwl_dev_tx_power_cmd {
#define IWL_DEV_MAX_TX_POWER 0x7FFF
/**
* struct iwl_dev_tx_power_cmd - TX power reduction command
* @v3: version 3 of the command, embedded here for easier software handling
* @enable_ack_reduction: enable or disable close range ack TX power
* reduction.
*/
struct iwl_dev_tx_power_cmd {
/* v4 is just an extension of v3 - keep this here */
struct iwl_dev_tx_power_cmd_v3 v3;
u8 enable_ack_reduction;
u8 reserved[3];
} __packed; /* TX_REDUCED_POWER_API_S_VER_4 */
/**
* struct iwl_beacon_filter_cmd
* REPLY_BEACON_FILTERING_CMD = 0xd2 (command)

View file

@ -296,7 +296,7 @@ enum iwl_rx_mpdu_status {
IWL_RX_MPDU_STATUS_OVERRUN_OK = BIT(1),
IWL_RX_MPDU_STATUS_SRC_STA_FOUND = BIT(2),
IWL_RX_MPDU_STATUS_KEY_VALID = BIT(3),
IWL_RX_MPDU_STATUS_KEY_ERROR = BIT(4),
IWL_RX_MPDU_STATUS_KEY_PARAM_OK = BIT(4),
IWL_RX_MPDU_STATUS_ICV_OK = BIT(5),
IWL_RX_MPDU_STATUS_MIC_OK = BIT(6),
IWL_RX_MPDU_RES_STATUS_TTAK_OK = BIT(7),
@ -311,7 +311,7 @@ enum iwl_rx_mpdu_status {
IWL_RX_MPDU_STATUS_WEP_MATCH = BIT(12),
IWL_RX_MPDU_STATUS_EXT_IV_MATCH = BIT(13),
IWL_RX_MPDU_STATUS_KEY_ID_MATCH = BIT(14),
IWL_RX_MPDU_STATUS_KEY_COLOR = BIT(15),
IWL_RX_MPDU_STATUS_ROBUST_MNG_FRAME = BIT(15),
};
enum iwl_rx_mpdu_hash_filter {

View file

@ -141,6 +141,7 @@ enum iwl_sta_flags {
* @STA_KEY_FLG_CCM: CCMP encryption algorithm
* @STA_KEY_FLG_TKIP: TKIP encryption algorithm
* @STA_KEY_FLG_EXT: extended cipher algorithm (depends on the FW support)
* @STA_KEY_FLG_GCMP: GCMP encryption algorithm
* @STA_KEY_FLG_CMAC: CMAC encryption algorithm
* @STA_KEY_FLG_ENC_UNKNOWN: unknown encryption algorithm
* @STA_KEY_FLG_EN_MSK: mask for encryption algorithmi value
@ -149,6 +150,7 @@ enum iwl_sta_flags {
* @STA_KEY_FLG_KEYID_MSK: the index of the key
* @STA_KEY_NOT_VALID: key is invalid
* @STA_KEY_FLG_WEP_13BYTES: set for 13 bytes WEP key
* @STA_KEY_FLG_KEY_32BYTES for non-wep key set for 32 bytes key
* @STA_KEY_MULTICAST: set for multical key
* @STA_KEY_MFP: key is used for Management Frame Protection
*/
@ -158,6 +160,7 @@ enum iwl_sta_key_flag {
STA_KEY_FLG_CCM = (2 << 0),
STA_KEY_FLG_TKIP = (3 << 0),
STA_KEY_FLG_EXT = (4 << 0),
STA_KEY_FLG_GCMP = (5 << 0),
STA_KEY_FLG_CMAC = (6 << 0),
STA_KEY_FLG_ENC_UNKNOWN = (7 << 0),
STA_KEY_FLG_EN_MSK = (7 << 0),
@ -167,6 +170,7 @@ enum iwl_sta_key_flag {
STA_KEY_FLG_KEYID_MSK = (3 << STA_KEY_FLG_KEYID_POS),
STA_KEY_NOT_VALID = BIT(11),
STA_KEY_FLG_WEP_13BYTES = BIT(12),
STA_KEY_FLG_KEY_32BYTES = BIT(12),
STA_KEY_MULTICAST = BIT(14),
STA_KEY_MFP = BIT(15),
};
@ -388,7 +392,6 @@ struct iwl_mvm_add_sta_cmd {
* @key_offset: key offset in key storage
* @key_flags: type %iwl_sta_key_flag
* @key: key material data
* @key2: key material data
* @rx_secur_seq_cnt: RX security sequence counter for the key
* @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
* @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
@ -397,8 +400,7 @@ struct iwl_mvm_add_sta_key_cmd {
u8 sta_id;
u8 key_offset;
__le16 key_flags;
u8 key[16];
u8 key2[16];
u8 key[32];
u8 rx_secur_seq_cnt[16];
u8 tkip_rx_tsc_byte2;
u8 reserved;

View file

@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -252,6 +253,20 @@ struct mvm_statistics_general_v8 {
u8 reserved[4 - (NUM_MAC_INDEX % 4)];
} __packed; /* STATISTICS_GENERAL_API_S_VER_8 */
/**
* struct mvm_statistics_load - RX statistics for multi-queue devices
* @air_time: accumulated air time, per mac
* @byte_count: accumulated byte count, per mac
* @pkt_count: accumulated packet count, per mac
* @avg_energy: average RSSI, per station
*/
struct mvm_statistics_load {
__le32 air_time[NUM_MAC_INDEX];
__le32 byte_count[NUM_MAC_INDEX];
__le32 pkt_count[NUM_MAC_INDEX];
u8 avg_energy[IWL_MVM_STATION_COUNT];
} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_1 */
struct mvm_statistics_rx {
struct mvm_statistics_rx_phy ofdm;
struct mvm_statistics_rx_phy cck;
@ -266,7 +281,6 @@ struct mvm_statistics_rx {
* while associated. To disable this behavior, set DISABLE_NOTIF flag in the
* STATISTICS_CMD (0x9c), below.
*/
struct iwl_notif_statistics_v10 {
__le32 flag;
struct mvm_statistics_rx rx;
@ -274,6 +288,14 @@ struct iwl_notif_statistics_v10 {
struct mvm_statistics_general_v8 general;
} __packed; /* STATISTICS_NTFY_API_S_VER_10 */
struct iwl_notif_statistics_v11 {
__le32 flag;
struct mvm_statistics_rx rx;
struct mvm_statistics_tx tx;
struct mvm_statistics_general_v8 general;
struct mvm_statistics_load load_stats;
} __packed; /* STATISTICS_NTFY_API_S_VER_11 */
#define IWL_STATISTICS_FLG_CLEAR 0x1
#define IWL_STATISTICS_FLG_DISABLE_NOTIF 0x2

View file

@ -137,17 +137,32 @@ enum iwl_tx_pm_timeouts {
PM_FRAME_ASSOC = 3,
};
/*
* TX command security control
*/
#define TX_CMD_SEC_WEP 0x01
#define TX_CMD_SEC_CCM 0x02
#define TX_CMD_SEC_TKIP 0x03
#define TX_CMD_SEC_EXT 0x04
#define TX_CMD_SEC_MSK 0x07
#define TX_CMD_SEC_WEP_KEY_IDX_POS 6
#define TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0
#define TX_CMD_SEC_KEY128 0x08
/**
* enum iwl_tx_cmd_sec_ctrl - bitmasks for security control in TX command
* @TX_CMD_SEC_WEP: WEP encryption algorithm.
* @TX_CMD_SEC_CCM: CCM encryption algorithm.
* @TX_CMD_SEC_TKIP: TKIP encryption algorithm.
* @TX_CMD_SEC_EXT: extended cipher algorithm.
* @TX_CMD_SEC_GCMP: GCMP encryption algorithm.
* @TX_CMD_SEC_KEY128: set for 104 bits WEP key.
* @TC_CMD_SEC_KEY_FROM_TABLE: for a non-WEP key, set if the key should be taken
* from the table instead of from the TX command.
* If the key is taken from the key table its index should be given by the
* first byte of the TX command key field.
*/
enum iwl_tx_cmd_sec_ctrl {
TX_CMD_SEC_WEP = 0x01,
TX_CMD_SEC_CCM = 0x02,
TX_CMD_SEC_TKIP = 0x03,
TX_CMD_SEC_EXT = 0x04,
TX_CMD_SEC_GCMP = 0x05,
TX_CMD_SEC_KEY128 = 0x08,
TC_CMD_SEC_KEY_FROM_TABLE = 0x08,
};
/* TODO: how does these values are OK with only 16 bit variable??? */
/*

View file

@ -90,6 +90,7 @@ enum {
* DQA queue numbers
*
* @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW
* @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames
* @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames
* @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames
* @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure
@ -108,6 +109,7 @@ enum {
*/
enum iwl_mvm_dqa_txq {
IWL_MVM_DQA_CMD_QUEUE = 0,
IWL_MVM_DQA_AUX_QUEUE = 1,
IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2,
IWL_MVM_DQA_GCAST_QUEUE = 3,
IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4,
@ -127,9 +129,6 @@ enum iwl_mvm_tx_fifo {
IWL_MVM_TX_FIFO_CMD = 7,
};
#define IWL_MVM_STATION_COUNT 16
#define IWL_MVM_TDLS_STA_COUNT 4
/* commands */
enum {
@ -330,6 +329,7 @@ enum iwl_system_subcmd_ids {
};
enum iwl_data_path_subcmd_ids {
DQA_ENABLE_CMD = 0x0,
UPDATE_MU_GROUPS_CMD = 0x1,
TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
MU_GROUP_MGMT_NOTIF = 0xFE,
@ -359,6 +359,14 @@ struct iwl_cmd_response {
__le32 status;
};
/*
* struct iwl_dqa_enable_cmd
* @cmd_queue: the TXQ number of the command queue
*/
struct iwl_dqa_enable_cmd {
__le32 cmd_queue;
} __packed; /* DQA_CONTROL_CMD_API_S_VER_1 */
/*
* struct iwl_tx_ant_cfg_cmd
* @valid: valid antenna configuration

View file

@ -288,7 +288,8 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
fifo_hdr->fifo_num = cpu_to_le32(i);
/* Mark the number of TXF we're pulling now */
iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i);
iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i +
ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size));
fifo_hdr->available_bytes =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,

View file

@ -65,6 +65,7 @@
*****************************************************************************/
#include <net/mac80211.h>
#include <linux/netdevice.h>
#include <linux/acpi.h>
#include "iwl-trans.h"
#include "iwl-op-mode.h"
@ -122,6 +123,9 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
};
if (mvm->trans->num_rx_queues == 1)
return 0;
/* Do not direct RSS traffic to Q 0 which is our fallback queue */
for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
cmd.indirection_table[i] =
@ -131,6 +135,23 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
}
static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
{
struct iwl_dqa_enable_cmd dqa_cmd = {
.cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
};
u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0);
int ret;
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
if (ret)
IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret);
else
IWL_DEBUG_FW(mvm, "Working in DQA mode\n");
return ret;
}
void iwl_free_fw_paging(struct iwl_mvm *mvm)
{
int i;
@ -139,17 +160,21 @@ void iwl_free_fw_paging(struct iwl_mvm *mvm)
return;
for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
if (!mvm->fw_paging_db[i].fw_paging_block) {
struct iwl_fw_paging *paging = &mvm->fw_paging_db[i];
if (!paging->fw_paging_block) {
IWL_DEBUG_FW(mvm,
"Paging: block %d already freed, continue to next page\n",
i);
continue;
}
dma_unmap_page(mvm->trans->dev, paging->fw_paging_phys,
paging->fw_paging_size, DMA_BIDIRECTIONAL);
__free_pages(mvm->fw_paging_db[i].fw_paging_block,
get_order(mvm->fw_paging_db[i].fw_paging_size));
mvm->fw_paging_db[i].fw_paging_block = NULL;
__free_pages(paging->fw_paging_block,
get_order(paging->fw_paging_size));
paging->fw_paging_block = NULL;
}
kfree(mvm->trans->paging_download_buf);
mvm->trans->paging_download_buf = NULL;
@ -882,6 +907,177 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
sizeof(cmd), &cmd);
}
#define ACPI_WRDS_METHOD "WRDS"
#define ACPI_WRDS_WIFI (0x07)
#define ACPI_WRDS_TABLE_SIZE 10
struct iwl_mvm_sar_table {
bool enabled;
u8 values[ACPI_WRDS_TABLE_SIZE];
};
#ifdef CONFIG_ACPI
static int iwl_mvm_sar_get_wrds(struct iwl_mvm *mvm, union acpi_object *wrds,
struct iwl_mvm_sar_table *sar_table)
{
union acpi_object *data_pkg;
u32 i;
/* We need at least two packages, one for the revision and one
* for the data itself. Also check that the revision is valid
* (i.e. it is an integer set to 0).
*/
if (wrds->type != ACPI_TYPE_PACKAGE ||
wrds->package.count < 2 ||
wrds->package.elements[0].type != ACPI_TYPE_INTEGER ||
wrds->package.elements[0].integer.value != 0) {
IWL_DEBUG_RADIO(mvm, "Unsupported wrds structure\n");
return -EINVAL;
}
/* loop through all the packages to find the one for WiFi */
for (i = 1; i < wrds->package.count; i++) {
union acpi_object *domain;
data_pkg = &wrds->package.elements[i];
/* Skip anything that is not a package with the right
* amount of elements (i.e. domain_type,
* enabled/disabled plus the sar table size.
*/
if (data_pkg->type != ACPI_TYPE_PACKAGE ||
data_pkg->package.count != ACPI_WRDS_TABLE_SIZE + 2)
continue;
domain = &data_pkg->package.elements[0];
if (domain->type == ACPI_TYPE_INTEGER &&
domain->integer.value == ACPI_WRDS_WIFI)
break;
data_pkg = NULL;
}
if (!data_pkg)
return -ENOENT;
if (data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
return -EINVAL;
sar_table->enabled = !!(data_pkg->package.elements[1].integer.value);
for (i = 0; i < ACPI_WRDS_TABLE_SIZE; i++) {
union acpi_object *entry;
entry = &data_pkg->package.elements[i + 2];
if ((entry->type != ACPI_TYPE_INTEGER) ||
(entry->integer.value > U8_MAX))
return -EINVAL;
sar_table->values[i] = entry->integer.value;
}
return 0;
}
static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
struct iwl_mvm_sar_table *sar_table)
{
acpi_handle root_handle;
acpi_handle handle;
struct acpi_buffer wrds = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_status status;
int ret;
root_handle = ACPI_HANDLE(mvm->dev);
if (!root_handle) {
IWL_DEBUG_RADIO(mvm,
"Could not retrieve root port ACPI handle\n");
return -ENOENT;
}
/* Get the method's handle */
status = acpi_get_handle(root_handle, (acpi_string)ACPI_WRDS_METHOD,
&handle);
if (ACPI_FAILURE(status)) {
IWL_DEBUG_RADIO(mvm, "WRDS method not found\n");
return -ENOENT;
}
/* Call WRDS with no arguments */
status = acpi_evaluate_object(handle, NULL, NULL, &wrds);
if (ACPI_FAILURE(status)) {
IWL_DEBUG_RADIO(mvm, "WRDS invocation failed (0x%x)\n", status);
return -ENOENT;
}
ret = iwl_mvm_sar_get_wrds(mvm, wrds.pointer, sar_table);
kfree(wrds.pointer);
return ret;
}
#else /* CONFIG_ACPI */
static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
struct iwl_mvm_sar_table *sar_table)
{
return -ENOENT;
}
#endif /* CONFIG_ACPI */
static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
{
struct iwl_mvm_sar_table sar_table;
struct iwl_dev_tx_power_cmd cmd = {
.v3.v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
};
int ret, i, j, idx;
int len = sizeof(cmd);
/* we can't do anything with the table if the FW doesn't support it */
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_TX_POWER_CHAIN)) {
IWL_DEBUG_RADIO(mvm,
"FW doesn't support per-chain TX power settings.\n");
return 0;
}
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
len = sizeof(cmd.v3);
ret = iwl_mvm_sar_get_table(mvm, &sar_table);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"SAR BIOS table invalid or unavailable. (%d)\n",
ret);
/* we don't fail if the table is not available */
return 0;
}
if (!sar_table.enabled)
return 0;
IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS !=
ACPI_WRDS_TABLE_SIZE);
for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i);
for (j = 0; j < IWL_NUM_SUB_BANDS; j++) {
idx = (i * IWL_NUM_SUB_BANDS) + j;
cmd.v3.per_chain_restriction[i][j] =
cpu_to_le16(sar_table.values[idx]);
IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n",
j, sar_table.values[idx]);
}
}
ret = iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
if (ret)
IWL_ERR(mvm, "failed to set per-chain TX power: %d\n", ret);
return ret;
}
int iwl_mvm_up(struct iwl_mvm *mvm)
{
int ret, i;
@ -976,6 +1172,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
/* reset quota debouncing buffer - 0xff will yield invalid data */
memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
/* Enable DQA-mode if required */
if (iwl_mvm_is_dqa_supported(mvm)) {
ret = iwl_mvm_send_dqa_cmd(mvm);
if (ret)
goto error;
} else {
IWL_DEBUG_FW(mvm, "Working in non-DQA mode\n");
}
/* Add auxiliary station for scanning */
ret = iwl_mvm_add_aux_sta(mvm);
if (ret)
@ -1048,6 +1253,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
ret = iwl_mvm_sar_init(mvm);
if (ret)
goto error;
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:

View file

@ -465,11 +465,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 2);
BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 4);
memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
hw->wiphy->cipher_suites = mvm->ciphers;
if (iwl_mvm_has_new_rx_api(mvm)) {
mvm->ciphers[hw->wiphy->n_cipher_suites] =
WLAN_CIPHER_SUITE_GCMP;
hw->wiphy->n_cipher_suites++;
mvm->ciphers[hw->wiphy->n_cipher_suites] =
WLAN_CIPHER_SUITE_GCMP_256;
hw->wiphy->n_cipher_suites++;
}
/*
* Enable 11w if advertised by firmware and software crypto
* is not enabled (as the firmware will interpret some mgmt
@ -485,10 +494,23 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
/* currently FW API supports only one optional cipher scheme */
if (mvm->fw->cs[0].cipher) {
const struct iwl_fw_cipher_scheme *fwcs = &mvm->fw->cs[0];
struct ieee80211_cipher_scheme *cs = &mvm->cs[0];
mvm->hw->n_cipher_schemes = 1;
mvm->hw->cipher_schemes = &mvm->fw->cs[0];
mvm->ciphers[hw->wiphy->n_cipher_suites] =
mvm->fw->cs[0].cipher;
cs->cipher = le32_to_cpu(fwcs->cipher);
cs->iftype = BIT(NL80211_IFTYPE_STATION);
cs->hdr_len = fwcs->hdr_len;
cs->pn_len = fwcs->pn_len;
cs->pn_off = fwcs->pn_off;
cs->key_idx_off = fwcs->key_idx_off;
cs->key_idx_mask = fwcs->key_idx_mask;
cs->key_idx_shift = fwcs->key_idx_shift;
cs->mic_len = fwcs->mic_len;
mvm->hw->cipher_schemes = mvm->cs;
mvm->ciphers[hw->wiphy->n_cipher_suites] = cs->cipher;
hw->wiphy->n_cipher_suites++;
}
@ -1011,11 +1033,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old));
memset(&mvm->bt_ack_kill_msk, 0, sizeof(mvm->bt_ack_kill_msk));
memset(&mvm->bt_cts_kill_msk, 0, sizeof(mvm->bt_cts_kill_msk));
ieee80211_wake_queues(mvm->hw);
@ -1200,6 +1218,7 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
flush_work(&mvm->add_stream_wk);
cancel_delayed_work_sync(&mvm->fw_dump_wk);
cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
iwl_mvm_free_fw_dump_desc(mvm);
mutex_lock(&mvm->mutex);
@ -1231,18 +1250,20 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
s16 tx_power)
{
struct iwl_dev_tx_power_cmd cmd = {
.v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
.v2.mac_context_id =
.v3.v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
.v3.v2.mac_context_id =
cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
.v2.pwr_restriction = cpu_to_le16(8 * tx_power),
.v3.v2.pwr_restriction = cpu_to_le16(8 * tx_power),
};
int len = sizeof(cmd);
if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
cmd.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
cmd.v3.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
len = sizeof(cmd.v3);
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_CHAIN))
len = sizeof(cmd.v2);
len = sizeof(cmd.v3.v2);
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
}
@ -2720,6 +2741,8 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
break;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
@ -2781,7 +2804,8 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
sta && iwl_mvm_has_new_rx_api(mvm) &&
key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
(key->cipher == WLAN_CIPHER_SUITE_CCMP ||
key->cipher == WLAN_CIPHER_SUITE_GCMP)) {
key->cipher == WLAN_CIPHER_SUITE_GCMP ||
key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
struct ieee80211_key_seq seq;
int tid, q;
@ -2835,7 +2859,8 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
if (sta && iwl_mvm_has_new_rx_api(mvm) &&
key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
(key->cipher == WLAN_CIPHER_SUITE_CCMP ||
key->cipher == WLAN_CIPHER_SUITE_GCMP)) {
key->cipher == WLAN_CIPHER_SUITE_GCMP ||
key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
mvmsta = iwl_mvm_sta_from_mac80211(sta);
ptk_pn = rcu_dereference_protected(
mvmsta->ptk_pn[keyidx],
@ -3908,6 +3933,11 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (mvmsta->avg_energy) {
sinfo->signal_avg = mvmsta->avg_energy;
sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
}
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return;

View file

@ -687,13 +687,28 @@ struct iwl_mvm_baid_data {
* This is the state of a queue that has been fully configured (including
* SCD pointers, etc), has a specific RA/TID assigned to it, and can be
* used to send traffic.
* @IWL_MVM_QUEUE_SHARED: queue is shared, or in a process of becoming shared
* This is a state in which a single queue serves more than one TID, all of
* which are not aggregated. Note that the queue is only associated to one
* RA.
* @IWL_MVM_QUEUE_INACTIVE: queue is allocated but no traffic on it
* This is a state of a queue that has had traffic on it, but during the
* last %IWL_MVM_DQA_QUEUE_TIMEOUT time period there has been no traffic on
* it. In this state, when a new queue is needed to be allocated but no
* such free queue exists, an inactive queue might be freed and given to
* the new RA/TID.
*/
enum iwl_mvm_queue_status {
IWL_MVM_QUEUE_FREE,
IWL_MVM_QUEUE_RESERVED,
IWL_MVM_QUEUE_READY,
IWL_MVM_QUEUE_SHARED,
IWL_MVM_QUEUE_INACTIVE,
};
#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
#define IWL_MVM_NUM_CIPHERS 8
struct iwl_mvm {
/* for logger access */
struct device *dev;
@ -750,11 +765,16 @@ struct iwl_mvm {
u32 hw_queue_to_mac80211;
u8 hw_queue_refcount;
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
bool reserved; /* Is this the TXQ reserved for a STA */
u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
/* Timestamp for inactivation per TID of this queue */
unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
enum iwl_mvm_queue_status status;
} queue_info[IWL_MAX_HW_QUEUES];
spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
struct work_struct add_stream_wk; /* To add streams to queues */
atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
const char *nvm_file_name;
@ -789,7 +809,7 @@ struct iwl_mvm {
struct iwl_mcast_filter_cmd *mcast_filter_cmd;
enum iwl_mvm_scan_type scan_type;
enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all;
struct timer_list scan_timer;
struct delayed_work scan_timeout_dwork;
/* max number of simultaneous scans the FW supports */
unsigned int max_scans;
@ -912,11 +932,6 @@ struct iwl_mvm {
wait_queue_head_t d0i3_exit_waitq;
/* BT-Coex */
u8 bt_ack_kill_msk[NUM_PHY_CTX];
u8 bt_cts_kill_msk[NUM_PHY_CTX];
struct iwl_bt_coex_profile_notif_old last_bt_notif_old;
struct iwl_bt_coex_ci_cmd_old last_bt_ci_cmd_old;
struct iwl_bt_coex_profile_notif last_bt_notif;
struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
@ -996,7 +1011,8 @@ struct iwl_mvm {
struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
u32 ciphers[6];
u32 ciphers[IWL_MVM_NUM_CIPHERS];
struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
struct iwl_mvm_tof_data tof_data;
struct ieee80211_vif *nan_vif;
@ -1402,7 +1418,7 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm);
int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
void iwl_mvm_scan_timeout(unsigned long data);
void iwl_mvm_scan_timeout_wk(struct work_struct *work);
/* Scheduled scan */
void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
@ -1618,7 +1634,7 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
*/
void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u8 tid, u8 flags);
int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq);
int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
/* Return a bitmask with all the hw supported queues, except for the
* command queue, which can't be flushed.
@ -1725,6 +1741,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
void iwl_mvm_reorder_timer_expired(unsigned long data);
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
void iwl_mvm_inactivity_check(struct iwl_mvm *mvm);
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,

View file

@ -66,7 +66,6 @@
*****************************************************************************/
#include <linux/firmware.h>
#include <linux/rtnetlink.h>
#include <linux/pci.h>
#include <linux/acpi.h>
#include "iwl-trans.h"
#include "iwl-csr.h"
@ -667,8 +666,7 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
.mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]),
.source_id = (u8)src_id,
};
struct iwl_mcc_update_resp *mcc_resp, *resp_cp = NULL;
struct iwl_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
struct iwl_mcc_update_resp *resp_cp;
struct iwl_rx_packet *pkt;
struct iwl_host_cmd cmd = {
.id = MCC_UPDATE_CMD,
@ -701,34 +699,36 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
/* Extract MCC response */
if (resp_v2) {
mcc_resp = (void *)pkt->data;
struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data;
n_channels = __le32_to_cpu(mcc_resp->n_channels);
resp_len = sizeof(struct iwl_mcc_update_resp) +
n_channels * sizeof(__le32);
resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
} else {
mcc_resp_v1 = (void *)pkt->data;
struct iwl_mcc_update_resp_v1 *mcc_resp_v1 = (void *)pkt->data;
n_channels = __le32_to_cpu(mcc_resp_v1->n_channels);
resp_len = sizeof(struct iwl_mcc_update_resp) +
n_channels * sizeof(__le32);
resp_cp = kzalloc(resp_len, GFP_KERNEL);
if (resp_cp) {
resp_cp->status = mcc_resp_v1->status;
resp_cp->mcc = mcc_resp_v1->mcc;
resp_cp->cap = mcc_resp_v1->cap;
resp_cp->source_id = mcc_resp_v1->source_id;
resp_cp->n_channels = mcc_resp_v1->n_channels;
memcpy(resp_cp->channels, mcc_resp_v1->channels,
n_channels * sizeof(__le32));
}
}
resp_len = sizeof(struct iwl_mcc_update_resp) + n_channels *
sizeof(__le32);
resp_cp = kzalloc(resp_len, GFP_KERNEL);
if (!resp_cp) {
ret = -ENOMEM;
goto exit;
}
if (resp_v2) {
memcpy(resp_cp, mcc_resp, resp_len);
} else {
resp_cp->status = mcc_resp_v1->status;
resp_cp->mcc = mcc_resp_v1->mcc;
resp_cp->cap = mcc_resp_v1->cap;
resp_cp->source_id = mcc_resp_v1->source_id;
resp_cp->n_channels = mcc_resp_v1->n_channels;
memcpy(resp_cp->channels, mcc_resp_v1->channels,
n_channels * sizeof(__le32));
}
status = le32_to_cpu(resp_cp->status);
mcc = le16_to_cpu(resp_cp->mcc);
@ -802,9 +802,8 @@ static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc)
struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_status status;
u32 mcc_val;
struct pci_dev *pdev = to_pci_dev(mvm->dev);
root_handle = ACPI_HANDLE(&pdev->dev);
root_handle = ACPI_HANDLE(mvm->dev);
if (!root_handle) {
IWL_DEBUG_LAR(mvm,
"Could not retrieve root port ACPI handle\n");

View file

@ -577,18 +577,21 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
mvm->aux_queue = 15;
if (!iwl_mvm_is_dqa_supported(mvm)) {
mvm->first_agg_queue = 16;
mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1;
if (mvm->cfg->base_params->num_of_queues == 16) {
mvm->aux_queue = 11;
mvm->first_agg_queue = 12;
} else {
mvm->aux_queue = 15;
mvm->first_agg_queue = 16;
}
} else {
mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
mvm->first_agg_queue = IWL_MVM_DQA_MIN_DATA_QUEUE;
mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE;
}
if (mvm->cfg->base_params->num_of_queues == 16) {
mvm->aux_queue = 11;
mvm->first_agg_queue = 12;
}
mvm->sf_state = SF_UNINIT;
mvm->cur_ucode = IWL_UCODE_INIT;
mvm->drop_bcn_ap_mode = true;
@ -608,6 +611,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk);
INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
spin_lock_init(&mvm->d0i3_tx_lock);
@ -664,6 +668,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
trans_cfg.scd_set_active = true;
trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
driver_data[2]);
trans_cfg.sdio_adma_addr = fw->sdio_adma_addr;
trans_cfg.sw_csum_tx = IWL_MVM_SW_TX_CSUM_OFFLOAD;
@ -766,9 +773,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_mvm_tof_init(mvm);
setup_timer(&mvm->scan_timer, iwl_mvm_scan_timeout,
(unsigned long)mvm);
return op_mode;
out_unregister:
@ -822,8 +826,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
iwl_mvm_tof_clean(mvm);
del_timer_sync(&mvm->scan_timer);
mutex_destroy(&mvm->mutex);
mutex_destroy(&mvm->d0i3_suspend_mutex);

View file

@ -211,6 +211,9 @@ static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (is_ht80(rate) && (vht_cap->cap &
IEEE80211_VHT_CAP_SHORT_GI_80))
return true;
if (is_ht160(rate) && (vht_cap->cap &
IEEE80211_VHT_CAP_SHORT_GI_160))
return true;
return false;
}
@ -399,7 +402,7 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
static void rs_rate_scale_perform(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
int tid);
int tid, bool ndp);
static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
@ -445,6 +448,13 @@ static const u16 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691},
};
static const u16 expected_tpt_siso_160MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 191, 0, 244, 288, 298, 308, 313, 318, 323, 328, 330},
{0, 0, 0, 0, 200, 0, 251, 293, 302, 312, 317, 322, 327, 332, 334},
{0, 0, 0, 0, 439, 0, 875, 1307, 1736, 2584, 3419, 3831, 4240, 5049, 5581},
{0, 0, 0, 0, 488, 0, 972, 1451, 1925, 2864, 3785, 4240, 4691, 5581, 6165},
};
static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250, 261, 0},
{0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256, 267, 0},
@ -466,6 +476,13 @@ static const u16 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 474, 0, 920, 1338, 1732, 2464, 3116, 3418, 3705, 4225, 4545},
};
static const u16 expected_tpt_mimo2_160MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 240, 0, 278, 308, 313, 319, 322, 324, 328, 330, 334},
{0, 0, 0, 0, 247, 0, 282, 310, 315, 320, 323, 325, 329, 332, 338},
{0, 0, 0, 0, 875, 0, 1735, 2582, 3414, 5043, 6619, 7389, 8147, 9629, 10592},
{0, 0, 0, 0, 971, 0, 1925, 2861, 3779, 5574, 7304, 8147, 8976, 10592, 11640},
};
/* mbps, mcs */
static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
{ "1", "BPSK DSSS"},
@ -901,7 +918,6 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
}
}
WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_160);
WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 &&
!is_vht(rate));
@ -1161,7 +1177,7 @@ static u8 rs_get_tid(struct ieee80211_hdr *hdr)
}
void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, struct ieee80211_tx_info *info)
int tid, struct ieee80211_tx_info *info, bool ndp)
{
int legacy_success;
int retries;
@ -1384,7 +1400,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
done:
/* See if there's a better rate or modulation mode to try. */
if (sta->supp_rates[info->band])
rs_rate_scale_perform(mvm, sta, lq_sta, tid);
rs_rate_scale_perform(mvm, sta, lq_sta, tid, ndp);
}
/*
@ -1407,7 +1423,8 @@ static void rs_mac80211_tx_status(void *mvm_r,
info->flags & IEEE80211_TX_CTL_NO_ACK)
return;
iwl_mvm_rs_tx_status(mvm, sta, rs_get_tid(hdr), info);
iwl_mvm_rs_tx_status(mvm, sta, rs_get_tid(hdr), info,
ieee80211_is_qos_nullfunc(hdr->frame_control));
}
/*
@ -1494,6 +1511,9 @@ static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
case RATE_MCS_CHAN_WIDTH_80:
ht_tbl_pointer = expected_tpt_siso_80MHz;
break;
case RATE_MCS_CHAN_WIDTH_160:
ht_tbl_pointer = expected_tpt_siso_160MHz;
break;
default:
WARN_ON_ONCE(1);
}
@ -1508,6 +1528,9 @@ static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
case RATE_MCS_CHAN_WIDTH_80:
ht_tbl_pointer = expected_tpt_mimo2_80MHz;
break;
case RATE_MCS_CHAN_WIDTH_160:
ht_tbl_pointer = expected_tpt_mimo2_160MHz;
break;
default:
WARN_ON_ONCE(1);
}
@ -1582,12 +1605,17 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
{
if (sta->bandwidth >= IEEE80211_STA_RX_BW_80)
switch (sta->bandwidth) {
case IEEE80211_STA_RX_BW_160:
return RATE_MCS_CHAN_WIDTH_160;
case IEEE80211_STA_RX_BW_80:
return RATE_MCS_CHAN_WIDTH_80;
else if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
case IEEE80211_STA_RX_BW_40:
return RATE_MCS_CHAN_WIDTH_40;
return RATE_MCS_CHAN_WIDTH_20;
case IEEE80211_STA_RX_BW_20:
default:
return RATE_MCS_CHAN_WIDTH_20;
}
}
/*
@ -2213,7 +2241,7 @@ static bool rs_tpc_perform(struct iwl_mvm *mvm,
static void rs_rate_scale_perform(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
int tid)
int tid, bool ndp)
{
int low = IWL_RATE_INVALID;
int high = IWL_RATE_INVALID;
@ -2512,7 +2540,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
(lq_sta->tx_agg_tid_en & (1 << tid)) &&
(tid != IWL_MAX_TID_COUNT)) {
tid_data = &sta_priv->tid_data[tid];
if (tid_data->state == IWL_AGG_OFF) {
if (tid_data->state == IWL_AGG_OFF && !ndp) {
IWL_DEBUG_RATE(mvm,
"try to aggregate tid %d\n",
tid);
@ -2565,6 +2593,9 @@ static const struct rs_init_rate_info rs_optimal_rates_ht[] = {
{ S8_MIN, IWL_RATE_MCS_0_INDEX},
};
/* MCS index 9 is not valid for 20MHz VHT channel width,
* but is ok for 40, 80 and 160MHz channels.
*/
static const struct rs_init_rate_info rs_optimal_rates_vht_20mhz[] = {
{ -60, IWL_RATE_MCS_8_INDEX },
{ -64, IWL_RATE_MCS_7_INDEX },
@ -2577,7 +2608,7 @@ static const struct rs_init_rate_info rs_optimal_rates_vht_20mhz[] = {
{ S8_MIN, IWL_RATE_MCS_0_INDEX},
};
static const struct rs_init_rate_info rs_optimal_rates_vht_40_80mhz[] = {
static const struct rs_init_rate_info rs_optimal_rates_vht[] = {
{ -60, IWL_RATE_MCS_9_INDEX },
{ -64, IWL_RATE_MCS_8_INDEX },
{ -68, IWL_RATE_MCS_7_INDEX },
@ -2640,9 +2671,9 @@ static void rs_init_optimal_rate(struct iwl_mvm *mvm,
lq_sta->optimal_nentries =
ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
} else {
lq_sta->optimal_rates = rs_optimal_rates_vht_40_80mhz;
lq_sta->optimal_rates = rs_optimal_rates_vht;
lq_sta->optimal_nentries =
ARRAY_SIZE(rs_optimal_rates_vht_40_80mhz);
ARRAY_SIZE(rs_optimal_rates_vht);
}
} else if (is_ht(rate)) {
lq_sta->optimal_rates = rs_optimal_rates_ht;
@ -2734,23 +2765,25 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
*/
if (sta->vht_cap.vht_supported &&
best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
initial_rates = rs_optimal_rates_vht_40_80mhz;
nentries = ARRAY_SIZE(rs_optimal_rates_vht_40_80mhz);
if (sta->bandwidth >= IEEE80211_STA_RX_BW_80)
rate->bw = RATE_MCS_CHAN_WIDTH_80;
else
rate->bw = RATE_MCS_CHAN_WIDTH_40;
} else if (sta->bandwidth == IEEE80211_STA_RX_BW_20) {
switch (sta->bandwidth) {
case IEEE80211_STA_RX_BW_160:
case IEEE80211_STA_RX_BW_80:
case IEEE80211_STA_RX_BW_40:
initial_rates = rs_optimal_rates_vht;
nentries = ARRAY_SIZE(rs_optimal_rates_vht);
break;
case IEEE80211_STA_RX_BW_20:
initial_rates = rs_optimal_rates_vht_20mhz;
nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
rate->bw = RATE_MCS_CHAN_WIDTH_20;
} else {
break;
default:
IWL_ERR(mvm, "Invalid BW %d\n", sta->bandwidth);
goto out;
}
active_rate = lq_sta->active_siso_rate;
rate->type = LQ_VHT_SISO;
rate->bw = rs_bw_from_sta_bw(sta);
} else if (sta->ht_cap.ht_supported &&
best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
initial_rates = rs_optimal_rates_ht;
@ -3057,6 +3090,9 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
case RATE_MCS_CHAN_WIDTH_80:
mvm->drv_rx_stats.bw_80_frames++;
break;
case RATE_MCS_CHAN_WIDTH_160:
mvm->drv_rx_stats.bw_160_frames++;
break;
default:
WARN_ONCE(1, "bad BW. rate 0x%x", rate);
}
@ -3705,7 +3741,8 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
desc += sprintf(buff + desc, " %s",
(is_ht20(rate)) ? "20MHz" :
(is_ht40(rate)) ? "40MHz" :
(is_ht80(rate)) ? "80Mhz" : "BAD BW");
(is_ht80(rate)) ? "80MHz" :
(is_ht160(rate)) ? "160MHz" : "BAD BW");
desc += sprintf(buff + desc, " %s %s %s %s\n",
(rate->sgi) ? "SGI" : "NGI",
(rate->ldpc) ? "LDPC" : "BCC",
@ -3787,9 +3824,10 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
lq_sta->active_tbl == i ? "*" : "x",
rate->type,
rate->sgi,
is_ht20(rate) ? "20Mhz" :
is_ht40(rate) ? "40Mhz" :
is_ht80(rate) ? "80Mhz" : "ERR",
is_ht20(rate) ? "20MHz" :
is_ht40(rate) ? "40MHz" :
is_ht80(rate) ? "80MHz" :
is_ht160(rate) ? "160MHz" : "ERR",
rate->index);
for (j = 0; j < IWL_RATE_COUNT; j++) {
desc += sprintf(buff+desc,

View file

@ -205,6 +205,7 @@ struct rs_rate {
#define is_ht20(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_20)
#define is_ht40(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_40)
#define is_ht80(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_80)
#define is_ht160(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_160)
#define IWL_MAX_MCS_DISPLAY_SIZE 12
@ -362,7 +363,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
/* Notify RS about Tx status */
void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, struct ieee80211_tx_info *info);
int tid, struct ieee80211_tx_info *info, bool ndp);
/**
* iwl_rate_control_register - Register the rate control algorithm callbacks

View file

@ -101,7 +101,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
struct napi_struct *napi,
struct sk_buff *skb,
struct ieee80211_hdr *hdr, u16 len,
u32 ampdu_status, u8 crypt_len,
u8 crypt_len,
struct iwl_rx_cmd_buffer *rxb)
{
unsigned int hdrlen, fraglen;
@ -268,7 +268,6 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
struct ieee80211_sta *sta = NULL;
struct sk_buff *skb;
u32 len;
u32 ampdu_status;
u32 rate_n_flags;
u32 rx_pkt_status;
u8 crypt_len = 0;
@ -480,7 +479,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_ref(mvm, IWL_MVM_REF_RX);
iwl_mvm_pass_packet_to_mac80211(mvm, sta, napi, skb, hdr, len,
ampdu_status, crypt_len, rxb);
crypt_len, rxb);
if (take_ref)
iwl_mvm_unref(mvm, IWL_MVM_REF_RX);
@ -499,6 +498,7 @@ struct iwl_mvm_stat_data {
__le32 mac_id;
u8 beacon_filter_average_energy;
struct mvm_statistics_general_v8 *general;
struct mvm_statistics_load *load;
};
static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
@ -615,13 +615,15 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
struct iwl_notif_statistics_v10 *stats = (void *)&pkt->data;
struct iwl_notif_statistics_v11 *stats = (void *)&pkt->data;
struct iwl_mvm_stat_data data = {
.mvm = mvm,
};
int expected_size = iwl_mvm_has_new_rx_api(mvm) ? sizeof(*stats) :
sizeof(struct iwl_notif_statistics_v10);
u32 temperature;
if (iwl_rx_packet_payload_len(pkt) != sizeof(*stats))
if (iwl_rx_packet_payload_len(pkt) != expected_size)
goto invalid;
temperature = le32_to_cpu(stats->general.radio_temperature);
@ -639,6 +641,25 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
le64_to_cpu(stats->general.on_time_scan);
data.general = &stats->general;
if (iwl_mvm_has_new_rx_api(mvm)) {
int i;
data.load = &stats->load_stats;
rcu_read_lock();
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
struct iwl_mvm_sta *sta;
if (!data.load->avg_energy[i])
continue;
sta = iwl_mvm_sta_from_staid_rcu(mvm, i);
if (!sta)
continue;
sta->avg_energy = data.load->avg_energy[i];
}
rcu_read_unlock();
}
iwl_mvm_rx_stats_check_trigger(mvm, pkt);

View file

@ -476,6 +476,9 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
rcu_read_lock();
sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[buf->sta_id]);
/* SN is set to the last expired frame + 1 */
IWL_DEBUG_HT(buf->mvm,
"Releasing expired frames for sta %u, sn %d\n",
buf->sta_id, sn);
iwl_mvm_release_frames(buf->mvm, sta, NULL, buf, sn);
rcu_read_unlock();
} else if (buf->num_stored) {
@ -956,6 +959,9 @@ void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
int baid = release->baid;
IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
release->baid, le16_to_cpu(release->nssn));
if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
return;

View file

@ -399,7 +399,7 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
ieee80211_scan_completed(mvm->hw,
scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
del_timer(&mvm->scan_timer);
cancel_delayed_work(&mvm->scan_timeout_dwork);
} else {
IWL_ERR(mvm,
"got scan complete notification but no scan is running\n");
@ -1222,15 +1222,16 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
return -EIO;
}
#define SCAN_TIMEOUT (16 * HZ)
#define SCAN_TIMEOUT 20000
void iwl_mvm_scan_timeout(unsigned long data)
void iwl_mvm_scan_timeout_wk(struct work_struct *work)
{
struct iwl_mvm *mvm = (struct iwl_mvm *)data;
struct delayed_work *delayed_work = to_delayed_work(work);
struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
scan_timeout_dwork);
IWL_ERR(mvm, "regular scan timed out\n");
del_timer(&mvm->scan_timer);
iwl_force_nmi(mvm->trans);
}
@ -1313,7 +1314,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
mod_timer(&mvm->scan_timer, jiffies + SCAN_TIMEOUT);
queue_delayed_work(system_wq, &mvm->scan_timeout_dwork,
msecs_to_jiffies(SCAN_TIMEOUT));
return 0;
}
@ -1432,7 +1434,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
ieee80211_scan_completed(mvm->hw, aborted);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
del_timer(&mvm->scan_timer);
cancel_delayed_work(&mvm->scan_timeout_dwork);
} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
@ -1628,7 +1630,7 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
* to release the scan reference here.
*/
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
del_timer(&mvm->scan_timer);
cancel_delayed_work(&mvm->scan_timeout_dwork);
if (notify)
ieee80211_scan_completed(mvm->hw, true);
} else if (notify) {

View file

@ -215,7 +215,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
enum iwl_sf_state new_state)
{
struct iwl_sf_cfg_cmd sf_cmd = {
.state = cpu_to_le32(SF_FULL_ON),
.state = cpu_to_le32(new_state),
};
struct ieee80211_sta *sta;
int ret = 0;

View file

@ -310,6 +310,304 @@ static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
}
/* Disable aggregations for a bitmap of TIDs for a given station */
static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
unsigned long disable_agg_tids,
bool remove_queue)
{
struct iwl_mvm_add_sta_cmd cmd = {};
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
u32 status;
u8 sta_id;
int ret;
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id;
spin_unlock_bh(&mvm->queue_info_lock);
rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
rcu_read_unlock();
return -EINVAL;
}
mvmsta = iwl_mvm_sta_from_mac80211(sta);
mvmsta->tid_disable_agg |= disable_agg_tids;
cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
cmd.sta_id = mvmsta->sta_id;
cmd.add_modify = STA_MODE_MODIFY;
cmd.modify_mask = STA_MODIFY_QUEUES;
if (disable_agg_tids)
cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
if (remove_queue)
cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
rcu_read_unlock();
/* Notify FW of queue removal from the STA queues */
status = ADD_STA_SUCCESS;
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
iwl_mvm_add_sta_cmd_size(mvm),
&cmd, &status);
return ret;
}
static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
{
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
unsigned long tid_bitmap;
unsigned long agg_tids = 0;
s8 sta_id;
int tid;
lockdep_assert_held(&mvm->mutex);
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id;
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
spin_unlock_bh(&mvm->queue_info_lock);
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
return -EINVAL;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
spin_lock_bh(&mvmsta->lock);
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
agg_tids |= BIT(tid);
}
spin_unlock_bh(&mvmsta->lock);
return agg_tids;
}
/*
* Remove a queue from a station's resources.
* Note that this only marks as free. It DOESN'T delete a BA agreement, and
* doesn't disable the queue
*/
static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
{
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
unsigned long tid_bitmap;
unsigned long disable_agg_tids = 0;
u8 sta_id;
int tid;
lockdep_assert_held(&mvm->mutex);
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id;
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
spin_unlock_bh(&mvm->queue_info_lock);
rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
rcu_read_unlock();
return 0;
}
mvmsta = iwl_mvm_sta_from_mac80211(sta);
spin_lock_bh(&mvmsta->lock);
/* Unmap MAC queues and TIDs from this queue */
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
disable_agg_tids |= BIT(tid);
mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE;
}
mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
spin_unlock_bh(&mvmsta->lock);
rcu_read_unlock();
spin_lock_bh(&mvm->queue_info_lock);
/* Unmap MAC queues and TIDs from this queue */
mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
mvm->queue_info[queue].hw_queue_refcount = 0;
mvm->queue_info[queue].tid_bitmap = 0;
spin_unlock_bh(&mvm->queue_info_lock);
return disable_agg_tids;
}
static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
unsigned long tfd_queue_mask, u8 ac)
{
int queue = 0;
u8 ac_to_queue[IEEE80211_NUM_ACS];
int i;
lockdep_assert_held(&mvm->queue_info_lock);
memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
/* See what ACs the existing queues for this STA have */
for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
/* Only DATA queues can be shared */
if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
continue;
ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
}
/*
* The queue to share is chosen only from DATA queues as follows (in
* descending priority):
* 1. An AC_BE queue
* 2. Same AC queue
* 3. Highest AC queue that is lower than new AC
* 4. Any existing AC (there always is at least 1 DATA queue)
*/
/* Priority 1: An AC_BE queue */
if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
queue = ac_to_queue[IEEE80211_AC_BE];
/* Priority 2: Same AC queue */
else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
queue = ac_to_queue[ac];
/* Priority 3a: If new AC is VO and VI exists - use VI */
else if (ac == IEEE80211_AC_VO &&
ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
queue = ac_to_queue[IEEE80211_AC_VI];
/* Priority 3b: No BE so only AC less than the new one is BK */
else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
queue = ac_to_queue[IEEE80211_AC_BK];
/* Priority 4a: No BE nor BK - use VI if exists */
else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
queue = ac_to_queue[IEEE80211_AC_VI];
/* Priority 4b: No BE, BK nor VI - use VO if exists */
else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
queue = ac_to_queue[IEEE80211_AC_VO];
/* Make sure queue found (or not) is legal */
if (!((queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE &&
queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE) ||
(queue >= IWL_MVM_DQA_MIN_DATA_QUEUE &&
queue <= IWL_MVM_DQA_MAX_DATA_QUEUE) ||
(queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE))) {
IWL_ERR(mvm, "No DATA queues available to share\n");
queue = -ENOSPC;
}
return queue;
}
/*
* If a given queue has a higher AC than the TID stream that is being added to
* it, the queue needs to be redirected to the lower AC. This function does that
* in such a case, otherwise - if no redirection required - it does nothing,
* unless the %force param is true.
*/
static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
int ac, int ssn, unsigned int wdg_timeout,
bool force)
{
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
.enable = 0,
};
bool shared_queue;
unsigned long mq;
int ret;
/*
* If the AC is lower than current one - FIFO needs to be redirected to
* the lowest one of the streams in the queue. Check if this is needed
* here.
* Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
* value 3 and VO with value 0, so to check if ac X is lower than ac Y
* we need to check if the numerical value of X is LARGER than of Y.
*/
spin_lock_bh(&mvm->queue_info_lock);
if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
spin_unlock_bh(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm,
"No redirection needed on TXQ #%d\n",
queue);
return 0;
}
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
mq = mvm->queue_info[queue].hw_queue_to_mac80211;
shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
spin_unlock_bh(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm, "Redirecting shared TXQ #%d to FIFO #%d\n",
queue, iwl_mvm_ac_to_tx_fifo[ac]);
/* Stop MAC queues and wait for this queue to empty */
iwl_mvm_stop_mac_queues(mvm, mq);
ret = iwl_trans_wait_tx_queue_empty(mvm->trans, BIT(queue));
if (ret) {
IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
queue);
ret = -EIO;
goto out;
}
/* Before redirecting the queue we need to de-activate it */
iwl_trans_txq_disable(mvm->trans, queue, false);
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
if (ret)
IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
ret);
/* Make sure the SCD wrptr is correctly set before reconfiguring */
iwl_trans_txq_enable(mvm->trans, queue, iwl_mvm_ac_to_tx_fifo[ac],
cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
ssn, wdg_timeout);
/* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
/* Redirect to lower AC */
iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
ssn);
/* Update AC marking of the queue */
spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[queue].mac80211_ac = ac;
spin_unlock_bh(&mvm->queue_info_lock);
/*
* Mark queue as shared in transport if shared
* Note this has to be done after queue enablement because enablement
* can also set this value, and there is no indication there to shared
* queues
*/
if (shared_queue)
iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
out:
/* Continue using the MAC queues */
iwl_mvm_start_mac_queues(mvm, mq);
return ret;
}
static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, u8 ac, int tid,
struct ieee80211_hdr *hdr)
@ -325,11 +623,20 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
u8 mac_queue = mvmsta->vif->hw_queue[ac];
int queue = -1;
bool using_inactive_queue = false;
unsigned long disable_agg_tids = 0;
enum iwl_mvm_agg_state queue_state;
bool shared_queue = false;
int ssn;
unsigned long tfd_queue_mask;
int ret;
lockdep_assert_held(&mvm->mutex);
spin_lock_bh(&mvmsta->lock);
tfd_queue_mask = mvmsta->tfd_queue_msk;
spin_unlock_bh(&mvmsta->lock);
spin_lock_bh(&mvm->queue_info_lock);
/*
@ -338,7 +645,8 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
*/
if (!ieee80211_is_data_qos(hdr->frame_control) ||
ieee80211_is_qos_nullfunc(hdr->frame_control)) {
queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_MGMT_QUEUE,
queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
IWL_MVM_DQA_MIN_MGMT_QUEUE,
IWL_MVM_DQA_MAX_MGMT_QUEUE);
if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
@ -347,29 +655,62 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
/* If no such queue is found, we'll use a DATA queue instead */
}
if (queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
(mvm->queue_info[mvmsta->reserved_queue].status ==
IWL_MVM_QUEUE_RESERVED ||
mvm->queue_info[mvmsta->reserved_queue].status ==
IWL_MVM_QUEUE_INACTIVE)) {
queue = mvmsta->reserved_queue;
mvm->queue_info[queue].reserved = true;
IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
}
if (queue < 0)
queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
IWL_MVM_DQA_MIN_DATA_QUEUE,
IWL_MVM_DQA_MAX_DATA_QUEUE);
/*
* Check if this queue is already allocated but inactive.
* In such a case, we'll need to first free this queue before enabling
* it again, so we'll mark it as reserved to make sure no new traffic
* arrives on it
*/
if (queue > 0 &&
mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
using_inactive_queue = true;
IWL_DEBUG_TX_QUEUES(mvm,
"Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
queue, mvmsta->sta_id, tid);
}
/* No free queue - we'll have to share */
if (queue <= 0) {
queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
if (queue > 0) {
shared_queue = true;
mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
}
}
/*
* Mark TXQ as ready, even though it hasn't been fully configured yet,
* to make sure no one else takes it.
* This will allow avoiding re-acquiring the lock at the end of the
* configuration. On error we'll mark it back as free.
*/
if (queue >= 0)
if ((queue > 0) && !shared_queue)
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
spin_unlock_bh(&mvm->queue_info_lock);
/* TODO: support shared queues for same RA */
if (queue < 0)
/* This shouldn't happen - out of queues */
if (WARN_ON(queue <= 0)) {
IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
tid, cfg.sta_id);
return -ENOSPC;
}
/*
* Actual en/disablement of aggregations is through the ADD_STA HCMD,
@ -380,24 +721,103 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue #%d to sta %d on tid %d\n",
queue, mvmsta->sta_id, tid);
/*
* If this queue was previously inactive (idle) - we need to free it
* first
*/
if (using_inactive_queue) {
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
.enable = 0,
};
u8 ac;
disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
spin_lock_bh(&mvm->queue_info_lock);
ac = mvm->queue_info[queue].mac80211_ac;
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[ac];
spin_unlock_bh(&mvm->queue_info_lock);
/* Disable the queue */
iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids,
true);
iwl_trans_txq_disable(mvm->trans, queue, false);
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
&cmd);
if (ret) {
IWL_ERR(mvm,
"Failed to free inactive queue %d (ret=%d)\n",
queue, ret);
/* Re-mark the inactive queue as inactive */
spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
spin_unlock_bh(&mvm->queue_info_lock);
return ret;
}
}
IWL_DEBUG_TX_QUEUES(mvm,
"Allocating %squeue #%d to sta %d on tid %d\n",
shared_queue ? "shared " : "", queue,
mvmsta->sta_id, tid);
if (shared_queue) {
/* Disable any open aggs on this queue */
disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
if (disable_agg_tids) {
IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
queue);
iwl_mvm_invalidate_sta_queue(mvm, queue,
disable_agg_tids, false);
}
}
ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
wdg_timeout);
/*
* Mark queue as shared in transport if shared
* Note this has to be done after queue enablement because enablement
* can also set this value, and there is no indication there to shared
* queues
*/
if (shared_queue)
iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
spin_lock_bh(&mvmsta->lock);
mvmsta->tid_data[tid].txq_id = queue;
mvmsta->tid_data[tid].is_tid_active = true;
mvmsta->tfd_queue_msk |= BIT(queue);
queue_state = mvmsta->tid_data[tid].state;
if (mvmsta->reserved_queue == queue)
mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
spin_unlock_bh(&mvmsta->lock);
ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
if (ret)
goto out_err;
if (!shared_queue) {
ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
if (ret)
goto out_err;
/* If we need to re-enable aggregations... */
if (queue_state == IWL_AGG_ON) {
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
if (ret)
goto out_err;
}
} else {
/* Redirect queue, if needed */
ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
wdg_timeout, false);
if (ret)
goto out_err;
}
return 0;
@ -476,6 +896,9 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
unsigned long deferred_tid_traffic;
int sta_id, tid;
/* Check inactivity of queues */
iwl_mvm_inactivity_check(mvm);
mutex_lock(&mvm->mutex);
/* Go over all stations with deferred traffic */
@ -505,6 +928,12 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
int queue;
/*
* Check for inactive queues, so we don't reach a situation where we
* can't add a STA due to a shortage in queues that doesn't really exist
*/
iwl_mvm_inactivity_check(mvm);
spin_lock_bh(&mvm->queue_info_lock);
/* Make sure we have free resources for this STA */
@ -514,7 +943,8 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
IWL_MVM_QUEUE_FREE))
queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
else
queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
IWL_MVM_DQA_MIN_DATA_QUEUE,
IWL_MVM_DQA_MAX_DATA_QUEUE);
if (queue < 0) {
spin_unlock_bh(&mvm->queue_info_lock);
@ -568,8 +998,11 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
mvm_sta->tfd_queue_msk = 0;
/* allocate new queues for a TDLS station */
if (sta->tdls) {
/*
* Allocate new queues for a TDLS station, unless we're in DQA mode,
* and then they'll be allocated dynamically
*/
if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) {
ret = iwl_mvm_tdls_sta_init(mvm, sta);
if (ret)
return ret;
@ -633,7 +1066,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
return 0;
err:
iwl_mvm_tdls_sta_deinit(mvm, sta);
if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
iwl_mvm_tdls_sta_deinit(mvm, sta);
return ret;
}
@ -839,16 +1273,19 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
if (iwl_mvm_is_dqa_supported(mvm))
iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
/* if we are associated - we can't remove the AP STA now */
if (vif->bss_conf.assoc)
return ret;
if (vif->type == NL80211_IFTYPE_STATION &&
mvmvif->ap_sta_id == mvm_sta->sta_id) {
/* if associated - we can't remove the AP STA now */
if (vif->bss_conf.assoc)
return ret;
/* unassoc - go ahead - remove the AP STA now */
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
/* unassoc - go ahead - remove the AP STA now */
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
/* clear d0i3_ap_sta_id if no longer relevant */
if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
/* clear d0i3_ap_sta_id if no longer relevant */
if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
}
}
/*
@ -886,7 +1323,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
} else {
spin_unlock_bh(&mvm_sta->lock);
if (sta->tdls)
if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
iwl_mvm_tdls_sta_deinit(mvm, sta);
ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
@ -984,8 +1421,9 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
/* Map Aux queue to fifo - needs to happen before adding Aux station */
iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
if (!iwl_mvm_is_dqa_supported(mvm))
iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
/* Allocate aux station and assign to it the aux queue */
ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
@ -993,6 +1431,19 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
if (ret)
return ret;
if (iwl_mvm_is_dqa_supported(mvm)) {
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = IWL_MVM_TX_FIFO_MCAST,
.sta_id = mvm->aux_sta.sta_id,
.tid = IWL_MAX_TID_COUNT,
.aggregate = false,
.frame_limit = IWL_FRAME_LIMIT,
};
iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
wdg_timeout);
}
ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
MAC_INDEX_AUX, 0);
@ -1317,8 +1768,8 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
switch (status & IWL_ADD_STA_STATUS_MASK) {
case ADD_STA_SUCCESS:
IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n",
start ? "start" : "stopp");
IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
start ? "start" : "stopp");
break;
case ADD_STA_IMMEDIATE_BA_FAILURE:
IWL_WARN(mvm, "RX BA Session refused by fw\n");
@ -1373,13 +1824,16 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
* supposed to happen) and we will free the session data while
* RX is being processed in parallel
*/
IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
mvm_sta->sta_id, tid, baid);
WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
rcu_assign_pointer(mvm->baid_map[baid], baid_data);
} else if (mvm->rx_ba_sessions > 0) {
} else {
u8 baid = mvm_sta->tid_to_baid[tid];
/* check that restart flow didn't zero the counter */
mvm->rx_ba_sessions--;
if (mvm->rx_ba_sessions > 0)
/* check that restart flow didn't zero the counter */
mvm->rx_ba_sessions--;
if (!iwl_mvm_has_new_rx_api(mvm))
return 0;
@ -1395,6 +1849,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
del_timer_sync(&baid_data->session_timer);
RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
kfree_rcu(baid_data, rcu_head);
IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
}
return 0;
@ -1403,8 +1858,8 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return ret;
}
static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u8 queue, bool start)
int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u8 queue, bool start)
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd cmd = {};
@ -1459,6 +1914,7 @@ const u8 tid_to_mac80211_ac[] = {
IEEE80211_AC_VI,
IEEE80211_AC_VO,
IEEE80211_AC_VO,
IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
};
static const u8 tid_to_ucode_ac[] = {
@ -1513,7 +1969,8 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
txq_id = mvmsta->tid_data[tid].txq_id;
if (!iwl_mvm_is_dqa_supported(mvm) ||
mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue,
txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
mvm->first_agg_queue,
mvm->last_agg_queue);
if (txq_id < 0) {
ret = txq_id;
@ -1902,6 +2359,13 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
break;
case WLAN_CIPHER_SUITE_GCMP_256:
key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
/* fall through */
case WLAN_CIPHER_SUITE_GCMP:
key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
memcpy(cmd.key, keyconf->key, keyconf->keylen);
break;
default:
key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
memcpy(cmd.key, keyconf->key, keyconf->keylen);
@ -2022,6 +2486,8 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
0, NULL, 0, key_offset);
break;

View file

@ -321,6 +321,9 @@ enum iwl_mvm_agg_state {
* Basically when next_reclaimed reaches ssn, we can tell mac80211 that
* we are ready to finish the Tx AGG stop / start flow.
* @tx_time: medium time consumed by this A-MPDU
* @is_tid_active: has this TID sent traffic in the last
* %IWL_MVM_DQA_QUEUE_TIMEOUT time period. If %txq_id is invalid, this
* field should be ignored.
*/
struct iwl_mvm_tid_data {
struct sk_buff_head deferred_tx_frames;
@ -333,6 +336,7 @@ struct iwl_mvm_tid_data {
u16 txq_id;
u16 ssn;
u16 tx_time;
bool is_tid_active;
};
static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
@ -434,6 +438,7 @@ struct iwl_mvm_sta {
bool tlc_amsdu;
u8 agg_tids;
u8 sleep_tx_count;
u8 avg_energy;
};
static inline struct iwl_mvm_sta *
@ -509,6 +514,9 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid);
int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u8 queue, bool start);
int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm);

View file

@ -138,28 +138,19 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
protocol = ipv6h->nexthdr;
while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
struct ipv6_opt_hdr *hp;
/* only supported extension headers */
if (protocol != NEXTHDR_ROUTING &&
protocol != NEXTHDR_HOP &&
protocol != NEXTHDR_DEST &&
protocol != NEXTHDR_FRAGMENT) {
protocol != NEXTHDR_DEST) {
skb_checksum_help(skb);
return;
}
if (protocol == NEXTHDR_FRAGMENT) {
struct frag_hdr *hp =
OPT_HDR(struct frag_hdr, skb, off);
protocol = hp->nexthdr;
off += sizeof(struct frag_hdr);
} else {
struct ipv6_opt_hdr *hp =
OPT_HDR(struct ipv6_opt_hdr, skb, off);
protocol = hp->nexthdr;
off += ipv6_optlen(hp);
}
hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
protocol = hp->nexthdr;
off += ipv6_optlen(hp);
}
/* if we get here - protocol now should be TCP/UDP */
#endif
@ -388,6 +379,23 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
tx_cmd->rate_n_flags = cpu_to_le32((u32)rate_plcp | rate_flags);
}
static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
u8 *crypto_hdr)
{
struct ieee80211_key_conf *keyconf = info->control.hw_key;
u64 pn;
pn = atomic64_inc_return(&keyconf->tx_pn);
crypto_hdr[0] = pn;
crypto_hdr[2] = 0;
crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
crypto_hdr[1] = pn >> 8;
crypto_hdr[4] = pn >> 16;
crypto_hdr[5] = pn >> 24;
crypto_hdr[6] = pn >> 32;
crypto_hdr[7] = pn >> 40;
}
/*
* Sets the fields in the Tx cmd that are crypto related
*/
@ -405,15 +413,7 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
pn = atomic64_inc_return(&keyconf->tx_pn);
crypto_hdr[0] = pn;
crypto_hdr[2] = 0;
crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
crypto_hdr[1] = pn >> 8;
crypto_hdr[4] = pn >> 16;
crypto_hdr[5] = pn >> 24;
crypto_hdr[6] = pn >> 32;
crypto_hdr[7] = pn >> 40;
iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
break;
case WLAN_CIPHER_SUITE_TKIP:
@ -433,6 +433,18 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
break;
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
/* TODO: Taking the key from the table might introduce a race
* when PTK rekeying is done, having an old packets with a PN
* based on the old key but the message encrypted with a new
* one.
* Need to handle this.
*/
tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TC_CMD_SEC_KEY_FROM_TABLE;
tx_cmd->key[0] = keyconf->hw_key_idx;
iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
break;
default:
tx_cmd->sec_ctl |= TX_CMD_SEC_EXT;
}
@ -534,6 +546,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
* (this is not possible for unicast packets as a TLDS discovery
* response are sent without a station entry); otherwise use the
* AUX station.
* In DQA mode, if vif is of type STATION and frames are not multicast,
* they should be sent from the BSS queue. For example, TDLS setup
* frames should be sent on this queue, as they go through the AP.
*/
sta_id = mvm->aux_sta.sta_id;
if (info.control.vif) {
@ -551,6 +566,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
if (ap_sta_id != IWL_MVM_STATION_COUNT)
sta_id = ap_sta_id;
} else if (iwl_mvm_is_dqa_supported(mvm) &&
info.control.vif->type == NL80211_IFTYPE_STATION) {
queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
}
}
@ -884,15 +902,17 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
* nullfunc frames should go to the MGMT queue regardless of QOS
*/
tid = IWL_MAX_TID_COUNT;
txq_id = mvmsta->tid_data[tid].txq_id;
}
if (iwl_mvm_is_dqa_supported(mvm))
txq_id = mvmsta->tid_data[tid].txq_id;
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdrlen);
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
if (sta->tdls) {
if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) {
/* default to TID 0 for non-QoS packets */
u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid;
@ -905,9 +925,12 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
txq_id = mvmsta->tid_data[tid].txq_id;
}
if (iwl_mvm_is_dqa_supported(mvm)) {
if (unlikely(mvmsta->tid_data[tid].txq_id ==
IEEE80211_INVAL_HW_QUEUE)) {
/* Check if TXQ needs to be allocated or re-activated */
if (unlikely(txq_id == IEEE80211_INVAL_HW_QUEUE ||
!mvmsta->tid_data[tid].is_tid_active) &&
iwl_mvm_is_dqa_supported(mvm)) {
/* If TXQ needs to be allocated... */
if (txq_id == IEEE80211_INVAL_HW_QUEUE) {
iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
/*
@ -917,11 +940,22 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
spin_unlock(&mvmsta->lock);
return 0;
}
txq_id = mvmsta->tid_data[tid].txq_id;
/* If we are here - TXQ exists and needs to be re-activated */
spin_lock(&mvm->queue_info_lock);
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
mvmsta->tid_data[tid].is_tid_active = true;
spin_unlock(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm, "Re-activating queue %d for TX\n",
txq_id);
}
/* Keep track of the time of the last frame for this RA/TID */
mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
@ -1313,7 +1347,15 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
bool send_eosp_ndp = false;
spin_lock_bh(&mvmsta->lock);
txq_agg = (mvmsta->tid_data[tid].state == IWL_AGG_ON);
if (iwl_mvm_is_dqa_supported(mvm)) {
enum iwl_mvm_agg_state state;
state = mvmsta->tid_data[tid].state;
txq_agg = (state == IWL_AGG_ON ||
state == IWL_EMPTYING_HW_QUEUE_DELBA);
} else {
txq_agg = txq_id >= mvm->first_agg_queue;
}
if (!is_ndp) {
tid_data->next_reclaimed = next_reclaimed;
@ -1644,7 +1686,7 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
iwl_mvm_tx_info_from_ba_notif(&ba_info, ba_notif, tid_data);
IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
iwl_mvm_rs_tx_status(mvm, sta, tid, &ba_info);
iwl_mvm_rs_tx_status(mvm, sta, tid, &ba_info, false);
}
out:

View file

@ -579,17 +579,29 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
iwl_mvm_dump_umac_error_log(mvm);
}
int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq)
int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq)
{
int i;
lockdep_assert_held(&mvm->queue_info_lock);
/* Start by looking for a free queue */
for (i = minq; i <= maxq; i++)
if (mvm->queue_info[i].hw_queue_refcount == 0 &&
mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
return i;
/*
* If no free queue found - settle for an inactive one to reconfigure
* Make sure that the inactive queue either already belongs to this STA,
* or that if it belongs to another one - it isn't the reserved queue
*/
for (i = minq; i <= maxq; i++)
if (mvm->queue_info[i].status == IWL_MVM_QUEUE_INACTIVE &&
(sta_id == mvm->queue_info[i].ra_sta_id ||
!mvm->queue_info[i].reserved))
return i;
return -ENOSPC;
}
@ -643,13 +655,21 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
}
/* Update mappings and refcounts */
if (mvm->queue_info[queue].hw_queue_refcount > 0)
enable_queue = false;
mvm->queue_info[queue].hw_queue_to_mac80211 |= BIT(mac80211_queue);
mvm->queue_info[queue].hw_queue_refcount++;
if (mvm->queue_info[queue].hw_queue_refcount > 1)
enable_queue = false;
else
mvm->queue_info[queue].ra_sta_id = cfg->sta_id;
mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid);
mvm->queue_info[queue].ra_sta_id = cfg->sta_id;
if (enable_queue) {
if (cfg->tid != IWL_MAX_TID_COUNT)
mvm->queue_info[queue].mac80211_ac =
tid_to_mac80211_ac[cfg->tid];
else
mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
}
IWL_DEBUG_TX_QUEUES(mvm,
"Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
@ -671,6 +691,10 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
.tid = cfg->tid,
};
/* Set sta_id in the command, if it exists */
if (iwl_mvm_is_dqa_supported(mvm))
cmd.sta_id = cfg->sta_id;
iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL,
wdg_timeout);
WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
@ -752,6 +776,9 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
mvm->queue_info[queue].tid_bitmap = 0;
mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
/* Regardless if this is a reserved TXQ for a STA - mark it as false */
mvm->queue_info[queue].reserved = false;
spin_unlock_bh(&mvm->queue_info_lock);
iwl_trans_txq_disable(mvm->trans, queue, false);
@ -1039,6 +1066,155 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ieee80211_connection_loss(vif);
}
/*
* Remove inactive TIDs of a given queue.
* If all queue TIDs are inactive - mark the queue as inactive
* If only some the queue TIDs are inactive - unmap them from the queue
*/
static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvmsta, int queue,
unsigned long tid_bitmap)
{
int tid;
lockdep_assert_held(&mvmsta->lock);
lockdep_assert_held(&mvm->queue_info_lock);
/* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
/* If some TFDs are still queued - don't mark TID as inactive */
if (iwl_mvm_tid_queued(&mvmsta->tid_data[tid]))
tid_bitmap &= ~BIT(tid);
}
/* If all TIDs in the queue are inactive - mark queue as inactive. */
if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1)
mvmsta->tid_data[tid].is_tid_active = false;
IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n",
queue);
return;
}
/*
* If we are here, this is a shared queue and not all TIDs timed-out.
* Remove the ones that did.
*/
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE;
mvm->queue_info[queue].hw_queue_to_mac80211 &= ~BIT(mac_queue);
mvm->queue_info[queue].hw_queue_refcount--;
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
mvmsta->tid_data[tid].is_tid_active = false;
IWL_DEBUG_TX_QUEUES(mvm,
"Removing inactive TID %d from shared Q:%d\n",
tid, queue);
}
IWL_DEBUG_TX_QUEUES(mvm,
"TXQ #%d left with tid bitmap 0x%x\n", queue,
mvm->queue_info[queue].tid_bitmap);
/*
* There may be different TIDs with the same mac queues, so make
* sure all TIDs have existing corresponding mac queues enabled
*/
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
mvm->queue_info[queue].hw_queue_to_mac80211 |=
BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
}
/* TODO: if queue was shared - need to re-enable AGGs */
}
void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
{
unsigned long timeout_queues_map = 0;
unsigned long now = jiffies;
int i;
spin_lock_bh(&mvm->queue_info_lock);
for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
if (mvm->queue_info[i].hw_queue_refcount > 0)
timeout_queues_map |= BIT(i);
spin_unlock_bh(&mvm->queue_info_lock);
rcu_read_lock();
/*
* If a queue time outs - mark it as INACTIVE (don't remove right away
* if we don't have to.) This is an optimization in case traffic comes
* later, and we don't HAVE to use a currently-inactive queue
*/
for_each_set_bit(i, &timeout_queues_map, IWL_MAX_HW_QUEUES) {
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
u8 sta_id;
int tid;
unsigned long inactive_tid_bitmap = 0;
unsigned long queue_tid_bitmap;
spin_lock_bh(&mvm->queue_info_lock);
queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
/* If TXQ isn't in active use anyway - nothing to do here... */
if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) {
spin_unlock_bh(&mvm->queue_info_lock);
continue;
}
/* Check to see if there are inactive TIDs on this queue */
for_each_set_bit(tid, &queue_tid_bitmap,
IWL_MAX_TID_COUNT + 1) {
if (time_after(mvm->queue_info[i].last_frame_time[tid] +
IWL_MVM_DQA_QUEUE_TIMEOUT, now))
continue;
inactive_tid_bitmap |= BIT(tid);
}
spin_unlock_bh(&mvm->queue_info_lock);
/* If all TIDs are active - finish check on this queue */
if (!inactive_tid_bitmap)
continue;
/*
* If we are here - the queue hadn't been served recently and is
* in use
*/
sta_id = mvm->queue_info[i].ra_sta_id;
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
/*
* If the STA doesn't exist anymore, it isn't an error. It could
* be that it was removed since getting the queues, and in this
* case it should've inactivated its queues anyway.
*/
if (IS_ERR_OR_NULL(sta))
continue;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
spin_lock_bh(&mvmsta->lock);
spin_lock(&mvm->queue_info_lock);
iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
inactive_tid_bitmap);
spin_unlock(&mvm->queue_info_lock);
spin_unlock_bh(&mvmsta->lock);
}
rcu_read_unlock();
}
int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
enum iwl_lqm_cmd_operatrions operation,
u32 duration, u32 timeout)

View file

@ -433,6 +433,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* 8000 Series */
{IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x10B0, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8260_2ac_cfg)},
@ -454,6 +455,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0xD0B0, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0xB0B0, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)},
@ -481,6 +484,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x1130, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0130, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)},
@ -491,6 +496,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0910, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)},
/* 9000 Series */
{IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
@ -507,6 +516,9 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2526, 0x1420, iwl5165_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
/* a000 Series */
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
@ -598,7 +610,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
const struct iwl_cfg *cfg_9260lc __maybe_unused = NULL;
struct iwl_trans *iwl_trans;
struct iwl_trans_pcie *trans_pcie;
int ret;
iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg);
@ -636,12 +647,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif
pci_set_drvdata(pdev, iwl_trans);
iwl_trans->drv = iwl_drv_start(iwl_trans, cfg);
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
trans_pcie->drv = iwl_drv_start(iwl_trans, cfg);
if (IS_ERR(trans_pcie->drv)) {
ret = PTR_ERR(trans_pcie->drv);
if (IS_ERR(iwl_trans->drv)) {
ret = PTR_ERR(iwl_trans->drv);
goto out_free_trans;
}
@ -680,7 +689,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
out_free_drv:
iwl_drv_stop(trans_pcie->drv);
iwl_drv_stop(iwl_trans->drv);
out_free_trans:
iwl_trans_pcie_free(iwl_trans);
return ret;
@ -689,7 +698,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static void iwl_pci_remove(struct pci_dev *pdev)
{
struct iwl_trans *trans = pci_get_drvdata(pdev);
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
/* if RTPM was in use, restore it to the state before probe */
if (trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) {
@ -700,7 +708,7 @@ static void iwl_pci_remove(struct pci_dev *pdev)
pm_runtime_forbid(trans->dev);
}
iwl_drv_stop(trans_pcie->drv);
iwl_drv_stop(trans->drv);
iwl_trans_pcie_free(trans);
}

View file

@ -68,12 +68,14 @@ struct iwl_host_cmd;
* struct iwl_rx_mem_buffer
* @page_dma: bus address of rxb page
* @page: driver's pointer to the rxb page
* @invalid: rxb is in driver ownership - not owned by HW
* @vid: index of this rxb in the global table
*/
struct iwl_rx_mem_buffer {
dma_addr_t page_dma;
struct page *page;
u16 vid;
bool invalid;
struct list_head list;
};
@ -230,15 +232,16 @@ struct iwl_queue {
#define TFD_CMD_SLOTS 32
/*
* The FH will write back to the first TB only, so we need
* to copy some data into the buffer regardless of whether
* it should be mapped or not. This indicates how big the
* first TB must be to include the scratch buffer. Since
* the scratch is 4 bytes at offset 12, it's 16 now. If we
* make it bigger then allocations will be bigger and copy
* slower, so that's probably not useful.
* The FH will write back to the first TB only, so we need to copy some data
* into the buffer regardless of whether it should be mapped or not.
* This indicates how big the first TB must be to include the scratch buffer
* and the assigned PN.
* Since PN location is 16 bytes at offset 24, it's 40 now.
* If we make it bigger then allocations will be bigger and copy slower, so
* that's probably not useful.
*/
#define IWL_HCMD_SCRATCHBUF_SIZE 16
#define IWL_FIRST_TB_SIZE 40
#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
struct iwl_pcie_txq_entry {
struct iwl_device_cmd *cmd;
@ -248,20 +251,18 @@ struct iwl_pcie_txq_entry {
struct iwl_cmd_meta meta;
};
struct iwl_pcie_txq_scratch_buf {
struct iwl_cmd_header hdr;
u8 buf[8];
__le32 scratch;
struct iwl_pcie_first_tb_buf {
u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
};
/**
* struct iwl_txq - Tx Queue for DMA
* @q: generic Rx/Tx queue descriptor
* @tfds: transmit frame descriptors (DMA memory)
* @scratchbufs: start of command headers, including scratch buffers, for
* @first_tb_bufs: start of command headers, including scratch buffers, for
* the writeback -- this is DMA memory and an array holding one buffer
* for each command on the queue
* @scratchbufs_dma: DMA address for the scratchbufs start
* @first_tb_dma: DMA address for the first_tb_bufs start
* @entries: transmit entries (driver state)
* @lock: queue lock
* @stuck_timer: timer that fires if queue gets stuck
@ -279,8 +280,8 @@ struct iwl_pcie_txq_scratch_buf {
struct iwl_txq {
struct iwl_queue q;
struct iwl_tfd *tfds;
struct iwl_pcie_txq_scratch_buf *scratchbufs;
dma_addr_t scratchbufs_dma;
struct iwl_pcie_first_tb_buf *first_tb_bufs;
dma_addr_t first_tb_dma;
struct iwl_pcie_txq_entry *entries;
spinlock_t lock;
unsigned long frozen_expiry_remainder;
@ -296,10 +297,10 @@ struct iwl_txq {
};
static inline dma_addr_t
iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx)
{
return txq->scratchbufs_dma +
sizeof(struct iwl_pcie_txq_scratch_buf) * idx;
return txq->first_tb_dma +
sizeof(struct iwl_pcie_first_tb_buf) * idx;
}
struct iwl_tso_hdr_page {
@ -313,7 +314,6 @@ struct iwl_tso_hdr_page {
* @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
* @global_table: table mapping received VID from hw to rxb
* @rba: allocator for RX replenishing
* @drv - pointer to iwl_drv
* @trans: pointer to the generic transport area
* @scd_base_addr: scheduler sram base address in SRAM
* @scd_bc_tbls: pointer to the byte count table of the scheduler
@ -351,7 +351,6 @@ struct iwl_trans_pcie {
struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
struct iwl_rb_allocator rba;
struct iwl_trans *trans;
struct iwl_drv *drv;
struct net_device napi_dev;
@ -385,6 +384,8 @@ struct iwl_trans_pcie {
wait_queue_head_t wait_command_queue;
wait_queue_head_t d0i3_waitq;
u8 page_offs, dev_cmd_offs;
u8 cmd_queue;
u8 cmd_fifo;
unsigned int cmd_q_wdg_timeout;
@ -471,6 +472,10 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
unsigned int wdg_timeout);
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
bool configure_scd);
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
bool shared_mode);
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
struct iwl_txq *txq);
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int txq_id);
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
@ -496,7 +501,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans);
/*****************************************************
* Helpers
******************************************************/
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@ -519,7 +524,16 @@ static inline void iwl_disable_interrupts(struct iwl_trans *trans)
IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
}
static inline void iwl_enable_interrupts(struct iwl_trans *trans)
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
spin_lock(&trans_pcie->irq_lock);
_iwl_disable_interrupts(trans);
spin_unlock(&trans_pcie->irq_lock);
}
static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@ -542,6 +556,14 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans)
}
}
static inline void iwl_enable_interrupts(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
spin_lock(&trans_pcie->irq_lock);
_iwl_enable_interrupts(trans);
spin_unlock(&trans_pcie->irq_lock);
}
static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

View file

@ -161,21 +161,21 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
return cpu_to_le32((u32)(dma_addr >> 8));
}
static void iwl_pcie_write_prph_64_no_grab(struct iwl_trans *trans, u64 ofs,
u64 val)
{
iwl_write_prph_no_grab(trans, ofs, val & 0xffffffff);
iwl_write_prph_no_grab(trans, ofs + 4, val >> 32);
}
/*
* iwl_pcie_rx_stop - stops the Rx DMA
*/
int iwl_pcie_rx_stop(struct iwl_trans *trans)
{
iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
if (trans->cfg->mq_rx_supported) {
iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
} else {
iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
1000);
}
}
/*
@ -233,10 +233,10 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
}
/*
* iwl_pcie_rxq_mq_restock - restock implementation for multi-queue rx
* iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
*/
static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans,
struct iwl_rxq *rxq)
static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
struct iwl_rx_mem_buffer *rxb;
@ -259,7 +259,7 @@ static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans,
rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
list);
list_del(&rxb->list);
rxb->invalid = false;
/* 12 first bits are expected to be empty */
WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
/* Point to Rx buffer via next RBD in circular buffer */
@ -281,10 +281,10 @@ static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans,
}
/*
* iwl_pcie_rxq_sq_restock - restock implementation for single queue rx
* iwl_pcie_rxsq_restock - restock implementation for single queue rx
*/
static void iwl_pcie_rxq_sq_restock(struct iwl_trans *trans,
struct iwl_rxq *rxq)
static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
struct iwl_rx_mem_buffer *rxb;
@ -310,6 +310,7 @@ static void iwl_pcie_rxq_sq_restock(struct iwl_trans *trans,
rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
list);
list_del(&rxb->list);
rxb->invalid = false;
/* Point to Rx buffer via next RBD in circular buffer */
bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
@ -343,9 +344,9 @@ static
void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
if (trans->cfg->mq_rx_supported)
iwl_pcie_rxq_mq_restock(trans, rxq);
iwl_pcie_rxmq_restock(trans, rxq);
else
iwl_pcie_rxq_sq_restock(trans, rxq);
iwl_pcie_rxsq_restock(trans, rxq);
}
/*
@ -809,17 +810,17 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
for (i = 0; i < trans->num_rx_queues; i++) {
/* Tell device where to find RBD free table in DRAM */
iwl_pcie_write_prph_64_no_grab(trans,
RFH_Q_FRBDCB_BA_LSB(i),
trans_pcie->rxq[i].bd_dma);
iwl_write_prph64_no_grab(trans,
RFH_Q_FRBDCB_BA_LSB(i),
trans_pcie->rxq[i].bd_dma);
/* Tell device where to find RBD used table in DRAM */
iwl_pcie_write_prph_64_no_grab(trans,
RFH_Q_URBDCB_BA_LSB(i),
trans_pcie->rxq[i].used_bd_dma);
iwl_write_prph64_no_grab(trans,
RFH_Q_URBDCB_BA_LSB(i),
trans_pcie->rxq[i].used_bd_dma);
/* Tell device where in DRAM to update its Rx status */
iwl_pcie_write_prph_64_no_grab(trans,
RFH_Q_URBD_STTS_WPTR_LSB(i),
trans_pcie->rxq[i].rb_stts_dma);
iwl_write_prph64_no_grab(trans,
RFH_Q_URBD_STTS_WPTR_LSB(i),
trans_pcie->rxq[i].rb_stts_dma);
/* Reset device indice tables */
iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
@ -828,9 +829,6 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
enabled |= BIT(i) | BIT(i + 16);
}
/* restock default queue */
iwl_pcie_rxq_mq_restock(trans, &trans_pcie->rxq[0]);
/*
* Enable Rx DMA
* Rx buffer size 4 or 8k or 12k
@ -956,16 +954,18 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
else
list_add(&rxb->list, &def_rxq->rx_used);
trans_pcie->global_table[i] = rxb;
rxb->vid = (u16)i;
rxb->vid = (u16)(i + 1);
rxb->invalid = true;
}
iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
if (trans->cfg->mq_rx_supported) {
if (trans->cfg->mq_rx_supported)
iwl_pcie_rx_mq_hw_init(trans);
} else {
iwl_pcie_rxq_sq_restock(trans, def_rxq);
else
iwl_pcie_rx_hw_init(trans, def_rxq);
}
iwl_pcie_rxq_restock(trans, def_rxq);
spin_lock(&def_rxq->lock);
iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq);
@ -1244,10 +1244,19 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
*/
u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF;
if (WARN(vid >= ARRAY_SIZE(trans_pcie->global_table),
"Invalid rxb index from HW %u\n", (u32)vid))
if (WARN(!vid ||
vid > ARRAY_SIZE(trans_pcie->global_table),
"Invalid rxb index from HW %u\n", (u32)vid)) {
iwl_force_nmi(trans);
goto out;
rxb = trans_pcie->global_table[vid];
}
rxb = trans_pcie->global_table[vid - 1];
if (WARN(rxb->invalid,
"Invalid rxb from HW %u\n", (u32)vid)) {
iwl_force_nmi(trans);
goto out;
}
rxb->invalid = true;
} else {
rxb = rxq->queue[i];
rxq->queue[i] = NULL;
@ -1527,7 +1536,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
* have anything to service
*/
if (test_bit(STATUS_INT_ENABLED, &trans->status))
iwl_enable_interrupts(trans);
_iwl_enable_interrupts(trans);
spin_unlock(&trans_pcie->irq_lock);
lock_map_release(&trans->sync_cmd_lockdep_map);
return IRQ_NONE;
@ -1719,15 +1728,17 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
inta & ~trans_pcie->inta_mask);
}
/* we are loading the firmware, enable FH_TX interrupt only */
if (handled & CSR_INT_BIT_FH_TX)
iwl_enable_fw_load_int(trans);
spin_lock(&trans_pcie->irq_lock);
/* only Re-enable all interrupt if disabled by irq */
else if (test_bit(STATUS_INT_ENABLED, &trans->status))
iwl_enable_interrupts(trans);
if (test_bit(STATUS_INT_ENABLED, &trans->status))
_iwl_enable_interrupts(trans);
/* we are loading the firmware, enable FH_TX interrupt only */
else if (handled & CSR_INT_BIT_FH_TX)
iwl_enable_fw_load_int(trans);
/* Re-enable RF_KILL if it occurred */
else if (handled & CSR_INT_BIT_RF_KILL)
iwl_enable_rfkill_int(trans);
spin_unlock(&trans_pcie->irq_lock);
out:
lock_map_release(&trans->sync_cmd_lockdep_map);
@ -1791,7 +1802,7 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
return;
spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
_iwl_disable_interrupts(trans);
memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
@ -1807,7 +1818,7 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
trans_pcie->use_ict = true;
trans_pcie->ict_index = 0;
iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
iwl_enable_interrupts(trans);
_iwl_enable_interrupts(trans);
spin_unlock(&trans_pcie->irq_lock);
}

View file

@ -608,18 +608,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
/*
* ucode
*/
static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
dma_addr_t phy_addr, u32 byte_cnt)
static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans,
u32 dst_addr, dma_addr_t phy_addr,
u32 byte_cnt)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long flags;
int ret;
trans_pcie->ucode_write_complete = false;
if (!iwl_trans_grab_nic_access(trans, &flags))
return -EIO;
iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
@ -642,7 +634,50 @@ static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
}
static void iwl_pcie_load_firmware_chunk_tfh(struct iwl_trans *trans,
u32 dst_addr, dma_addr_t phy_addr,
u32 byte_cnt)
{
/* Stop DMA channel */
iwl_write32(trans, TFH_SRV_DMA_CHNL0_CTRL, 0);
/* Configure SRAM address */
iwl_write32(trans, TFH_SRV_DMA_CHNL0_SRAM_ADDR,
dst_addr);
/* Configure DRAM address - 64 bit */
iwl_write64(trans, TFH_SRV_DMA_CHNL0_DRAM_ADDR, phy_addr);
/* Configure byte count to transfer */
iwl_write32(trans, TFH_SRV_DMA_CHNL0_BC, byte_cnt);
/* Enable the DRAM2SRAM to start */
iwl_write32(trans, TFH_SRV_DMA_CHNL0_CTRL, TFH_SRV_DMA_SNOOP |
TFH_SRV_DMA_TO_DRIVER |
TFH_SRV_DMA_START);
}
static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
u32 dst_addr, dma_addr_t phy_addr,
u32 byte_cnt)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long flags;
int ret;
trans_pcie->ucode_write_complete = false;
if (!iwl_trans_grab_nic_access(trans, &flags))
return -EIO;
if (trans->cfg->use_tfh)
iwl_pcie_load_firmware_chunk_tfh(trans, dst_addr, phy_addr,
byte_cnt);
else
iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
byte_cnt);
iwl_trans_release_nic_access(trans, &flags);
ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
@ -801,6 +836,8 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
*first_ucode_section = last_read_idx;
iwl_enable_interrupts(trans);
if (cpu == 1)
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF);
else
@ -980,6 +1017,8 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
iwl_pcie_apply_destination(trans);
}
iwl_enable_interrupts(trans);
/* release CPU reset */
iwl_write32(trans, CSR_RESET, 0);
@ -1033,9 +1072,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
was_hw_rfkill = iwl_is_rfkill_set(trans);
/* tell the device to stop sending interrupts */
spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
spin_unlock(&trans_pcie->irq_lock);
/* device going down, Stop using ICT table */
iwl_pcie_disable_ict(trans);
@ -1079,9 +1116,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
* the time, unless the interrupt is ACKed even if the interrupt
* should be masked. Re-ACK all the interrupts here.
*/
spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
spin_unlock(&trans_pcie->irq_lock);
/* clear all status bits */
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
@ -1215,7 +1250,6 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
ret = iwl_pcie_load_given_ucode_8000(trans, fw);
else
ret = iwl_pcie_load_given_ucode(trans, fw);
iwl_enable_interrupts(trans);
/* re-check RF-Kill state since we may have missed the interrupt */
hw_rfkill = iwl_is_rfkill_set(trans);
@ -1393,8 +1427,12 @@ static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
max_rx_vector = trans_pcie->allocated_vector - 1;
if (!trans_pcie->msix_enabled)
if (!trans_pcie->msix_enabled) {
if (trans->cfg->mq_rx_supported)
iwl_write_prph(trans, UREG_CHICK,
UREG_CHICK_MSI_ENABLE);
return;
}
iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
@ -1571,15 +1609,11 @@ static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
mutex_lock(&trans_pcie->mutex);
/* disable interrupts - don't enable HW RF kill interrupt */
spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
spin_unlock(&trans_pcie->irq_lock);
iwl_pcie_apm_stop(trans, true);
spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
spin_unlock(&trans_pcie->irq_lock);
iwl_pcie_disable_ict(trans);
@ -1643,6 +1677,9 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans_pcie->scd_set_active = trans_cfg->scd_set_active;
trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
trans_pcie->page_offs = trans_cfg->cb_data_offs;
trans_pcie->dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
trans->command_groups = trans_cfg->command_groups;
trans->command_groups_size = trans_cfg->command_groups_size;
@ -1913,6 +1950,48 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
#define IWL_FLUSH_WAIT_MS 2000
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 scd_sram_addr;
u8 buf[16];
int cnt;
IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
txq->q.read_ptr, txq->q.write_ptr);
scd_sram_addr = trans_pcie->scd_base_addr +
SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
iwl_print_hex_error(trans, buf, sizeof(buf));
for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
u32 tbl_dw =
iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
if (cnt & 0x1)
tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
else
tbl_dw = tbl_dw & 0x0000FFFF;
IWL_ERR(trans,
"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
cnt, active ? "" : "in", fifo, tbl_dw,
iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
(TFD_QUEUE_SIZE_MAX - 1),
iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
}
}
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@ -1920,8 +1999,6 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
struct iwl_queue *q;
int cnt;
unsigned long now = jiffies;
u32 scd_sram_addr;
u8 buf[16];
int ret = 0;
/* waiting for all the tx frames complete might take a while */
@ -1961,42 +2038,8 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
}
if (!ret)
return 0;
IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
txq->q.read_ptr, txq->q.write_ptr);
scd_sram_addr = trans_pcie->scd_base_addr +
SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
iwl_print_hex_error(trans, buf, sizeof(buf));
for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
u32 tbl_dw =
iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
if (cnt & 0x1)
tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
else
tbl_dw = tbl_dw & 0x0000FFFF;
IWL_ERR(trans,
"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
cnt, active ? "" : "in", fifo, tbl_dw,
iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
(TFD_QUEUE_SIZE_MAX - 1),
iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
}
if (ret)
iwl_trans_pcie_log_scd_error(trans, txq);
return ret;
}
@ -2745,6 +2788,8 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.txq_disable = iwl_trans_pcie_txq_disable,
.txq_enable = iwl_trans_pcie_txq_enable,
.txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
.wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
.freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
.block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,

View file

@ -70,6 +70,7 @@
* Tx queue resumed.
*
***************************************************/
static int iwl_queue_space(const struct iwl_queue *q)
{
unsigned int max;
@ -154,10 +155,6 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
struct iwl_txq *txq = (void *)data;
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
u32 scd_sram_addr = trans_pcie->scd_base_addr +
SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
u8 buf[16];
int i;
spin_lock(&txq->lock);
/* check if triggered erroneously */
@ -169,38 +166,8 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
jiffies_to_msecs(txq->wd_timeout));
IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
txq->q.read_ptr, txq->q.write_ptr);
iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
iwl_print_hex_error(trans, buf, sizeof(buf));
for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
u32 tbl_dw =
iwl_trans_read_mem32(trans,
trans_pcie->scd_base_addr +
SCD_TRANS_TBL_OFFSET_QUEUE(i));
if (i & 0x1)
tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
else
tbl_dw = tbl_dw & 0x0000FFFF;
IWL_ERR(trans,
"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
i, active ? "" : "in", fifo, tbl_dw,
iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) &
(TFD_QUEUE_SIZE_MAX - 1),
iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
}
iwl_trans_pcie_log_scd_error(trans, txq);
iwl_force_nmi(trans);
}
@ -393,7 +360,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
return;
}
/* first TB is never freed - it's the scratchbuf data */
/* first TB is never freed - it's the bidirectional DMA data */
for (i = 1; i < num_tbs; i++) {
if (meta->flags & BIT(i + CMD_TB_BITMAP_POS))
@ -491,7 +458,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
size_t scratchbuf_sz;
size_t tb0_buf_sz;
int i;
if (WARN_ON(txq->entries || txq->tfds))
@ -526,17 +493,14 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
if (!txq->tfds)
goto error;
BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=
sizeof(struct iwl_cmd_header) +
offsetof(struct iwl_tx_cmd, scratch));
BUILD_BUG_ON(IWL_FIRST_TB_SIZE_ALIGN != sizeof(*txq->first_tb_bufs));
scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num;
tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz,
&txq->scratchbufs_dma,
txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
&txq->first_tb_dma,
GFP_KERNEL);
if (!txq->scratchbufs)
if (!txq->first_tb_bufs)
goto err_free_tfds;
txq->q.id = txq_id;
@ -578,22 +542,27 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
* Tell nic where to find circular buffer of Tx Frame Descriptors for
* given Tx queue, and enable the DMA channel used for that queue.
* Circular buffer (TFD queue in DRAM) physical base address */
iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
txq->q.dma_addr >> 8);
if (trans->cfg->use_tfh)
iwl_write_direct64(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
txq->q.dma_addr);
else
iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
txq->q.dma_addr >> 8);
return 0;
}
static void iwl_pcie_free_tso_page(struct sk_buff *skb)
static void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct page **page_ptr;
if (info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA]) {
struct page *page =
info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA];
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
__free_page(page);
info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA] = NULL;
if (*page_ptr) {
__free_page(*page_ptr);
*page_ptr = NULL;
}
}
@ -639,7 +608,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
if (WARN_ON_ONCE(!skb))
continue;
iwl_pcie_free_tso_page(skb);
iwl_pcie_free_tso_page(trans_pcie, skb);
}
iwl_pcie_txq_free_tfd(trans, txq);
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
@ -708,8 +677,8 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
txq->tfds = NULL;
dma_free_coherent(dev,
sizeof(*txq->scratchbufs) * txq->q.n_window,
txq->scratchbufs, txq->scratchbufs_dma);
sizeof(*txq->first_tb_bufs) * txq->q.n_window,
txq->first_tb_bufs, txq->first_tb_dma);
}
kfree(txq->entries);
@ -786,9 +755,14 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
txq_id++) {
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
txq->q.dma_addr >> 8);
if (trans->cfg->use_tfh)
iwl_write_direct64(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
txq->q.dma_addr);
else
iwl_write_direct32(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
txq->q.dma_addr >> 8);
iwl_pcie_txq_unmap(trans, txq_id);
txq->q.read_ptr = 0;
txq->q.write_ptr = 0;
@ -996,6 +970,12 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
}
}
if (trans->cfg->use_tfh)
iwl_write_direct32(trans, TFH_TRANSFER_MODE,
TFH_TRANSFER_MAX_PENDING_REQ |
TFH_CHUNK_SIZE_128 |
TFH_CHUNK_SPLIT_MODE);
iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
if (trans->cfg->base_params->num_of_queues > 20)
iwl_set_bits_prph(trans, SCD_GP_CTRL,
@ -1084,7 +1064,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
if (WARN_ON_ONCE(!skb))
continue;
iwl_pcie_free_tso_page(skb);
iwl_pcie_free_tso_page(trans_pcie, skb);
__skb_queue_tail(skbs, skb);
@ -1115,17 +1095,17 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
while (!skb_queue_empty(&overflow_skbs)) {
struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
u8 dev_cmd_idx = IWL_TRANS_FIRST_DRIVER_DATA + 1;
struct iwl_device_cmd *dev_cmd =
info->driver_data[dev_cmd_idx];
struct iwl_device_cmd *dev_cmd_ptr;
dev_cmd_ptr = *(void **)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs);
/*
* Note that we can very well be overflowing again.
* In that case, iwl_queue_space will be small again
* and we won't wake mac80211's queue.
*/
iwl_trans_pcie_tx(trans, skb, dev_cmd, txq_id);
iwl_trans_pcie_tx(trans, skb, dev_cmd_ptr, txq_id);
}
spin_lock_bh(&txq->lock);
@ -1354,6 +1334,15 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
txq->active = true;
}
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
bool shared_mode)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
txq->ampdu = !shared_mode;
}
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
bool configure_scd)
{
@ -1413,7 +1402,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
void *dup_buf = NULL;
dma_addr_t phys_addr;
int idx;
u16 copy_size, cmd_size, scratch_size;
u16 copy_size, cmd_size, tb0_size;
bool had_nocopy = false;
u8 group_id = iwl_cmd_groupid(cmd->id);
int i, ret;
@ -1444,9 +1433,9 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
if (!cmd->len[i])
continue;
/* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
/* need at least IWL_FIRST_TB_SIZE copied */
if (copy_size < IWL_FIRST_TB_SIZE) {
int copy = IWL_FIRST_TB_SIZE - copy_size;
if (copy > cmdlen[i])
copy = cmdlen[i];
@ -1567,8 +1556,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
}
/*
* Otherwise we need at least IWL_HCMD_SCRATCHBUF_SIZE copied
* in total (for the scratchbuf handling), but copy up to what
* Otherwise we need at least IWL_FIRST_TB_SIZE copied
* in total (for bi-directional DMA), but copy up to what
* we can fit into the payload for debug dump purposes.
*/
copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
@ -1577,8 +1566,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
cmd_pos += copy;
/* However, treat copy_size the proper way, we need it below */
if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
if (copy_size < IWL_FIRST_TB_SIZE) {
copy = IWL_FIRST_TB_SIZE - copy_size;
if (copy > cmd->len[i])
copy = cmd->len[i];
@ -1593,18 +1582,18 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
le16_to_cpu(out_cmd->hdr.sequence),
cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
/* start the TFD with the scratchbuf */
scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE);
memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
iwl_pcie_txq_build_tfd(trans, txq,
iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
scratch_size, true);
iwl_pcie_get_first_tb_dma(txq, idx),
tb0_size, true);
/* map first command fragment, if any remains */
if (copy_size > scratch_size) {
if (copy_size > tb0_size) {
phys_addr = dma_map_single(trans->dev,
((u8 *)&out_cmd->hdr) + scratch_size,
copy_size - scratch_size,
((u8 *)&out_cmd->hdr) + tb0_size,
copy_size - tb0_size,
DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
iwl_pcie_tfd_unmap(trans, out_meta,
@ -1614,7 +1603,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
}
iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
copy_size - scratch_size, false);
copy_size - tb0_size, false);
}
/* map the remaining (adjusted) nocopy/dup fragments */
@ -1959,7 +1948,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
trace_iwlwifi_dev_tx(trans->dev, skb,
&txq->tfds[txq->q.write_ptr],
sizeof(struct iwl_tfd),
&dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
skb->data + hdr_len, tb2_len);
trace_iwlwifi_dev_tx_data(trans->dev, skb,
hdr_len, skb->len - hdr_len);
@ -2015,7 +2004,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
struct iwl_device_cmd *dev_cmd, u16 tb1_len)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
@ -2024,6 +2012,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
u16 length, iv_len, amsdu_pad;
u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page;
struct page **page_ptr;
int ret;
struct tso_t tso;
@ -2035,7 +2024,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
trace_iwlwifi_dev_tx(trans->dev, skb,
&txq->tfds[txq->q.write_ptr],
sizeof(struct iwl_tfd),
&dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
NULL, 0);
ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
@ -2054,7 +2043,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
get_page(hdr_page->page);
start_hdr = hdr_page->pos;
info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA] = hdr_page->page;
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
*page_ptr = hdr_page->page;
memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
hdr_page->pos += iv_len;
@ -2264,10 +2254,12 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* don't put the packet on the ring, if there is no room */
if (unlikely(iwl_queue_space(q) < 3)) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_device_cmd **dev_cmd_ptr;
info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA + 1] =
dev_cmd;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs);
*dev_cmd_ptr = dev_cmd;
__skb_queue_tail(&txq->overflow_q, skb);
spin_unlock(&txq->lock);
@ -2294,7 +2286,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
INDEX_TO_SEQ(q->write_ptr)));
tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr);
tb0_phys = iwl_pcie_get_first_tb_dma(txq, q->write_ptr);
scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
offsetof(struct iwl_tx_cmd, scratch);
@ -2312,7 +2304,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
* setup of the first TB)
*/
len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
hdr_len - IWL_FIRST_TB_SIZE;
/* do not align A-MSDU to dword as the subframe header aligns it */
amsdu = ieee80211_is_data_qos(fc) &&
(*ieee80211_get_qos_ctl(hdr) &
@ -2326,17 +2318,17 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
tb1_len = len;
}
/* The first TB points to the scratchbuf data - min_copy bytes */
memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
IWL_HCMD_SCRATCHBUF_SIZE);
/* The first TB points to bi-directional DMA data */
memcpy(&txq->first_tb_bufs[q->write_ptr], &dev_cmd->hdr,
IWL_FIRST_TB_SIZE);
iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
IWL_HCMD_SCRATCHBUF_SIZE, true);
IWL_FIRST_TB_SIZE, true);
/* there must be data left over for TB1 or this code must be changed */
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE);
/* map the data for TB1 */
tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE;
tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
goto out_err;