Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next into for-davem

This commit is contained in:
John W. Linville 2014-02-20 15:02:02 -05:00
commit 88daf80dcc
211 changed files with 9951 additions and 6366 deletions

View file

@ -161,6 +161,8 @@ struct ath_common {
bool btcoex_enabled;
bool disable_ani;
bool bt_ant_diversity;
int last_rssi;
};
struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,

View file

@ -204,7 +204,7 @@ static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info,
break;
/* 80MHZ */
case 2:
status->flag |= RX_FLAG_80MHZ;
status->vht_flag |= RX_VHT_FLAG_80MHZ;
}
status->flag |= RX_FLAG_VHT;
@ -266,7 +266,7 @@ void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
status->flag & RX_FLAG_HT ? "ht" : "",
status->flag & RX_FLAG_VHT ? "vht" : "",
status->flag & RX_FLAG_40MHZ ? "40" : "",
status->flag & RX_FLAG_80MHZ ? "80" : "",
status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
status->rate_idx,
status->vht_nss,

View file

@ -790,7 +790,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
if (nw_type & ADHOC_NETWORK) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n",
nw_type & ADHOC_CREATOR ? "creator" : "joiner");
cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
cfg80211_ibss_joined(vif->ndev, bssid, chan, GFP_KERNEL);
cfg80211_put_bss(ar->wiphy, bss);
return;
}
@ -861,13 +861,9 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
}
if (vif->nw_type & ADHOC_NETWORK) {
if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) {
if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC)
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: ath6k not in ibss mode\n", __func__);
return;
}
memset(bssid, 0, ETH_ALEN);
cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
return;
}
@ -3256,6 +3252,15 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
struct ath6kl_vif *vif = netdev_priv(dev);
u16 interval;
int ret, rssi_thold;
int n_match_sets = request->n_match_sets;
/*
* If there's a matchset w/o an SSID, then assume it's just for
* the RSSI (nothing else is currently supported) and ignore it.
* The device only supports a global RSSI filter that we set below.
*/
if (n_match_sets == 1 && !request->match_sets[0].ssid.ssid_len)
n_match_sets = 0;
if (ar->state != ATH6KL_STATE_ON)
return -EIO;
@ -3268,11 +3273,11 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
request->n_ssids,
request->match_sets,
request->n_match_sets);
n_match_sets);
if (ret < 0)
return ret;
if (!request->n_match_sets) {
if (!n_match_sets) {
ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
ALL_BSS_FILTER, 0);
if (ret < 0)
@ -3286,12 +3291,12 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
if (test_bit(ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD,
ar->fw_capabilities)) {
if (request->rssi_thold <= NL80211_SCAN_RSSI_THOLD_OFF)
if (request->min_rssi_thold <= NL80211_SCAN_RSSI_THOLD_OFF)
rssi_thold = 0;
else if (request->rssi_thold < -127)
else if (request->min_rssi_thold < -127)
rssi_thold = -127;
else
rssi_thold = request->rssi_thold;
rssi_thold = request->min_rssi_thold;
ret = ath6kl_wmi_set_rssi_filter_cmd(ar->wmi, vif->fw_vif_idx,
rssi_thold);

View file

@ -120,18 +120,6 @@ config ATH9K_WOW
This option enables Wake on Wireless LAN support for certain cards.
Currently, AR9462 is supported.
config ATH9K_LEGACY_RATE_CONTROL
bool "Atheros ath9k rate control"
depends on ATH9K
default n
---help---
Say Y, if you want to use the ath9k specific rate control
module instead of minstrel_ht. Be warned that there are various
issues with the ath9k RC and minstrel is a more robust algorithm.
Note that even if this option is selected, "ath9k_rate_control"
has to be passed to mac80211 using the module parameter,
ieee80211_default_rc_algo.
config ATH9K_RFKILL
bool "Atheros ath9k rfkill support" if EXPERT
depends on ATH9K

View file

@ -8,7 +8,6 @@ ath9k-y += beacon.o \
antenna.o
ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o
ath9k-$(CONFIG_ATH9K_LEGACY_RATE_CONTROL) += rc.o
ath9k-$(CONFIG_ATH9K_PCI) += pci.o
ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o

View file

@ -23,10 +23,11 @@
#define MAX_MEASUREMENT MAX_IQCAL_MEASUREMENT
#define MAX_MAG_DELTA 11
#define MAX_PHS_DELTA 10
#define MAXIQCAL 3
struct coeff {
int mag_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT];
int phs_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT];
int mag_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT][MAXIQCAL];
int phs_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT][MAXIQCAL];
int iqc_coeff[2];
};
@ -655,9 +656,6 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
if (i2_m_q2_a0_d1 > 0x800)
i2_m_q2_a0_d1 = -((0xfff - i2_m_q2_a0_d1) + 1);
if (i2_p_q2_a0_d1 > 0x1000)
i2_p_q2_a0_d1 = -((0x1fff - i2_p_q2_a0_d1) + 1);
if (iq_corr_a0_d1 > 0x800)
iq_corr_a0_d1 = -((0xfff - iq_corr_a0_d1) + 1);
@ -800,7 +798,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
if (q_q_coff > 63)
q_q_coff = 63;
iqc_coeff[0] = (q_q_coff * 128) + q_i_coff;
iqc_coeff[0] = (q_q_coff * 128) + (0x7f & q_i_coff);
ath_dbg(common, CALIBRATE, "tx chain %d: iq corr coeff=%x\n",
chain_idx, iqc_coeff[0]);
@ -831,7 +829,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
if (q_q_coff > 63)
q_q_coff = 63;
iqc_coeff[1] = (q_q_coff * 128) + q_i_coff;
iqc_coeff[1] = (q_q_coff * 128) + (0x7f & q_i_coff);
ath_dbg(common, CALIBRATE, "rx chain %d: iq corr coeff=%x\n",
chain_idx, iqc_coeff[1]);
@ -839,7 +837,8 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
return true;
}
static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
static void ar9003_hw_detect_outlier(int mp_coeff[][MAXIQCAL],
int nmeasurement,
int max_delta)
{
int mp_max = -64, max_idx = 0;
@ -848,20 +847,20 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
/* find min/max mismatch across all calibrated gains */
for (i = 0; i < nmeasurement; i++) {
if (mp_coeff[i] > mp_max) {
mp_max = mp_coeff[i];
if (mp_coeff[i][0] > mp_max) {
mp_max = mp_coeff[i][0];
max_idx = i;
} else if (mp_coeff[i] < mp_min) {
mp_min = mp_coeff[i];
} else if (mp_coeff[i][0] < mp_min) {
mp_min = mp_coeff[i][0];
min_idx = i;
}
}
/* find average (exclude max abs value) */
for (i = 0; i < nmeasurement; i++) {
if ((abs(mp_coeff[i]) < abs(mp_max)) ||
(abs(mp_coeff[i]) < abs(mp_min))) {
mp_avg += mp_coeff[i];
if ((abs(mp_coeff[i][0]) < abs(mp_max)) ||
(abs(mp_coeff[i][0]) < abs(mp_min))) {
mp_avg += mp_coeff[i][0];
mp_count++;
}
}
@ -873,7 +872,7 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
if (mp_count)
mp_avg /= mp_count;
else
mp_avg = mp_coeff[nmeasurement - 1];
mp_avg = mp_coeff[nmeasurement - 1][0];
/* detect outlier */
if (abs(mp_max - mp_min) > max_delta) {
@ -882,15 +881,16 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
else
outlier_idx = min_idx;
mp_coeff[outlier_idx] = mp_avg;
mp_coeff[outlier_idx][0] = mp_avg;
}
}
static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
struct coeff *coeff,
bool is_reusable)
static void ar9003_hw_tx_iq_cal_outlier_detection(struct ath_hw *ah,
struct coeff *coeff,
bool is_reusable)
{
int i, im, nmeasurement;
int magnitude, phase;
u32 tx_corr_coeff[MAX_MEASUREMENT][AR9300_MAX_CHAINS];
struct ath9k_hw_cal_data *caldata = ah->caldata;
@ -920,21 +920,30 @@ static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
if (nmeasurement > MAX_MEASUREMENT)
nmeasurement = MAX_MEASUREMENT;
/* detect outlier only if nmeasurement > 1 */
if (nmeasurement > 1) {
/* Detect magnitude outlier */
ar9003_hw_detect_outlier(coeff->mag_coeff[i],
nmeasurement, MAX_MAG_DELTA);
/*
* Skip normal outlier detection for AR9550.
*/
if (!AR_SREV_9550(ah)) {
/* detect outlier only if nmeasurement > 1 */
if (nmeasurement > 1) {
/* Detect magnitude outlier */
ar9003_hw_detect_outlier(coeff->mag_coeff[i],
nmeasurement,
MAX_MAG_DELTA);
/* Detect phase outlier */
ar9003_hw_detect_outlier(coeff->phs_coeff[i],
nmeasurement, MAX_PHS_DELTA);
/* Detect phase outlier */
ar9003_hw_detect_outlier(coeff->phs_coeff[i],
nmeasurement,
MAX_PHS_DELTA);
}
}
for (im = 0; im < nmeasurement; im++) {
magnitude = coeff->mag_coeff[i][im][0];
phase = coeff->phs_coeff[i][im][0];
coeff->iqc_coeff[0] = (coeff->mag_coeff[i][im] & 0x7f) |
((coeff->phs_coeff[i][im] & 0x7f) << 7);
coeff->iqc_coeff[0] =
(phase & 0x7f) | ((magnitude & 0x7f) << 7);
if ((im % 2) == 0)
REG_RMW_FIELD(ah, tx_corr_coeff[im][i],
@ -991,7 +1000,63 @@ static bool ar9003_hw_tx_iq_cal_run(struct ath_hw *ah)
return true;
}
static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
static void __ar955x_tx_iq_cal_sort(struct ath_hw *ah,
struct coeff *coeff,
int i, int nmeasurement)
{
struct ath_common *common = ath9k_hw_common(ah);
int im, ix, iy, temp;
for (im = 0; im < nmeasurement; im++) {
for (ix = 0; ix < MAXIQCAL - 1; ix++) {
for (iy = ix + 1; iy <= MAXIQCAL - 1; iy++) {
if (coeff->mag_coeff[i][im][iy] <
coeff->mag_coeff[i][im][ix]) {
temp = coeff->mag_coeff[i][im][ix];
coeff->mag_coeff[i][im][ix] =
coeff->mag_coeff[i][im][iy];
coeff->mag_coeff[i][im][iy] = temp;
}
if (coeff->phs_coeff[i][im][iy] <
coeff->phs_coeff[i][im][ix]) {
temp = coeff->phs_coeff[i][im][ix];
coeff->phs_coeff[i][im][ix] =
coeff->phs_coeff[i][im][iy];
coeff->phs_coeff[i][im][iy] = temp;
}
}
}
coeff->mag_coeff[i][im][0] = coeff->mag_coeff[i][im][MAXIQCAL / 2];
coeff->phs_coeff[i][im][0] = coeff->phs_coeff[i][im][MAXIQCAL / 2];
ath_dbg(common, CALIBRATE,
"IQCAL: Median [ch%d][gain%d]: mag = %d phase = %d\n",
i, im,
coeff->mag_coeff[i][im][0],
coeff->phs_coeff[i][im][0]);
}
}
static bool ar955x_tx_iq_cal_median(struct ath_hw *ah,
struct coeff *coeff,
int iqcal_idx,
int nmeasurement)
{
int i;
if ((iqcal_idx + 1) != MAXIQCAL)
return false;
for (i = 0; i < AR9300_MAX_CHAINS; i++) {
__ar955x_tx_iq_cal_sort(ah, coeff, i, nmeasurement);
}
return true;
}
static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah,
int iqcal_idx,
bool is_reusable)
{
struct ath_common *common = ath9k_hw_common(ah);
const u32 txiqcal_status[AR9300_MAX_CHAINS] = {
@ -1004,10 +1069,11 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
AR_PHY_CHAN_INFO_TAB_1,
AR_PHY_CHAN_INFO_TAB_2,
};
struct coeff coeff;
static struct coeff coeff;
s32 iq_res[6];
int i, im, j;
int nmeasurement;
int nmeasurement = 0;
bool outlier_detect = true;
for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (!(ah->txchainmask & (1 << i)))
@ -1065,17 +1131,23 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
goto tx_iqcal_fail;
}
coeff.mag_coeff[i][im] = coeff.iqc_coeff[0] & 0x7f;
coeff.phs_coeff[i][im] =
coeff.phs_coeff[i][im][iqcal_idx] =
coeff.iqc_coeff[0] & 0x7f;
coeff.mag_coeff[i][im][iqcal_idx] =
(coeff.iqc_coeff[0] >> 7) & 0x7f;
if (coeff.mag_coeff[i][im] > 63)
coeff.mag_coeff[i][im] -= 128;
if (coeff.phs_coeff[i][im] > 63)
coeff.phs_coeff[i][im] -= 128;
if (coeff.mag_coeff[i][im][iqcal_idx] > 63)
coeff.mag_coeff[i][im][iqcal_idx] -= 128;
if (coeff.phs_coeff[i][im][iqcal_idx] > 63)
coeff.phs_coeff[i][im][iqcal_idx] -= 128;
}
}
ar9003_hw_tx_iqcal_load_avg_2_passes(ah, &coeff, is_reusable);
if (AR_SREV_9550(ah))
outlier_detect = ar955x_tx_iq_cal_median(ah, &coeff,
iqcal_idx, nmeasurement);
if (outlier_detect)
ar9003_hw_tx_iq_cal_outlier_detection(ah, &coeff, is_reusable);
return;
@ -1409,7 +1481,7 @@ static bool ar9003_hw_init_cal_pcoem(struct ath_hw *ah,
}
if (txiqcal_done)
ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable);
ar9003_hw_tx_iq_cal_post_proc(ah, 0, is_reusable);
else if (caldata && test_bit(TXIQCAL_DONE, &caldata->cal_flags))
ar9003_hw_tx_iq_cal_reload(ah);
@ -1455,14 +1527,38 @@ static bool ar9003_hw_init_cal_pcoem(struct ath_hw *ah,
return true;
}
static bool do_ar9003_agc_cal(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
bool status;
REG_WRITE(ah, AR_PHY_AGC_CONTROL,
REG_READ(ah, AR_PHY_AGC_CONTROL) |
AR_PHY_AGC_CONTROL_CAL);
status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT);
if (!status) {
ath_dbg(common, CALIBRATE,
"offset calibration failed to complete in %d ms,"
"noisy environment?\n",
AH_WAIT_TIMEOUT / 1000);
return false;
}
return true;
}
static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_cal_data *caldata = ah->caldata;
bool txiqcal_done = false;
bool is_reusable = true, status = true;
bool status = true;
bool run_agc_cal = false, sep_iq_cal = false;
int i = 0;
/* Use chip chainmask only for calibration */
ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
@ -1485,7 +1581,12 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
* AGC calibration. Specifically, AR9550 in SoC chips.
*/
if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) {
txiqcal_done = true;
if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL)) {
txiqcal_done = true;
} else {
txiqcal_done = false;
}
run_agc_cal = true;
} else {
sep_iq_cal = true;
@ -1512,27 +1613,37 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
if (AR_SREV_9330_11(ah))
ar9003_hw_manual_peak_cal(ah, 0, IS_CHAN_2GHZ(chan));
/* Calibrate the AGC */
REG_WRITE(ah, AR_PHY_AGC_CONTROL,
REG_READ(ah, AR_PHY_AGC_CONTROL) |
AR_PHY_AGC_CONTROL_CAL);
/*
* For non-AR9550 chips, we just trigger AGC calibration
* in the HW, poll for completion and then process
* the results.
*
* For AR955x, we run it multiple times and use
* median IQ correction.
*/
if (!AR_SREV_9550(ah)) {
status = do_ar9003_agc_cal(ah);
if (!status)
return false;
/* Poll for offset calibration complete */
status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT);
if (txiqcal_done)
ar9003_hw_tx_iq_cal_post_proc(ah, 0, false);
} else {
if (!txiqcal_done) {
status = do_ar9003_agc_cal(ah);
if (!status)
return false;
} else {
for (i = 0; i < MAXIQCAL; i++) {
status = do_ar9003_agc_cal(ah);
if (!status)
return false;
ar9003_hw_tx_iq_cal_post_proc(ah, i, false);
}
}
}
}
if (!status) {
ath_dbg(common, CALIBRATE,
"offset calibration failed to complete in %d ms; noisy environment?\n",
AH_WAIT_TIMEOUT / 1000);
return false;
}
if (txiqcal_done)
ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable);
/* Revert chainmask to runtime parameters */
ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);

View file

@ -30,7 +30,6 @@
#include "spectral.h"
struct ath_node;
struct ath_rate_table;
extern struct ieee80211_ops ath9k_ops;
extern int ath9k_modparam_nohwcrypt;
@ -150,6 +149,11 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
#define IS_CCK_RATE(rate) ((rate >= 0x18) && (rate <= 0x1e))
#define IS_OFDM_RATE(rate) ((rate >= 0x8) && (rate <= 0xf))
enum {
WLAN_RC_PHY_OFDM,
WLAN_RC_PHY_CCK,
};
struct ath_txq {
int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
u32 axq_qnum; /* ath9k hardware queue number */
@ -442,7 +446,8 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_set_beacon(struct ath_softc *sc);
bool ath9k_csa_is_finished(struct ath_softc *sc);
bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_csa_update(struct ath_softc *sc);
/*******************/
/* Link Monitoring */
@ -757,7 +762,6 @@ struct ath_softc {
#endif
struct ath9k_hw_cal_data caldata;
int last_rssi;
#ifdef CONFIG_ATH9K_DEBUGFS
struct ath9k_debug debug;
@ -774,7 +778,6 @@ struct ath_softc {
#endif
struct ath_descdma txsdma;
struct ieee80211_vif *csa_vif;
struct ath_ant_comb ant_comb;
u8 ant_tx, ant_rx;

View file

@ -292,11 +292,8 @@ static void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif)
(unsigned long long)tsfadjust, avp->av_bslot);
}
bool ath9k_csa_is_finished(struct ath_softc *sc)
bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif)
{
struct ieee80211_vif *vif;
vif = sc->csa_vif;
if (!vif || !vif->csa_active)
return false;
@ -304,11 +301,23 @@ bool ath9k_csa_is_finished(struct ath_softc *sc)
return false;
ieee80211_csa_finish(vif);
sc->csa_vif = NULL;
return true;
}
static void ath9k_csa_update_vif(void *data, u8 *mac, struct ieee80211_vif *vif)
{
struct ath_softc *sc = data;
ath9k_csa_is_finished(sc, vif);
}
void ath9k_csa_update(struct ath_softc *sc)
{
ieee80211_iterate_active_interfaces(sc->hw,
IEEE80211_IFACE_ITER_NORMAL,
ath9k_csa_update_vif,
sc);
}
void ath9k_beacon_tasklet(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
@ -362,13 +371,13 @@ void ath9k_beacon_tasklet(unsigned long data)
return;
}
/* EDMA devices check that in the tx completion function. */
if (!edma && ath9k_csa_is_finished(sc))
return;
slot = ath9k_beacon_choose_slot(sc);
vif = sc->beacon.bslot[slot];
/* EDMA devices check that in the tx completion function. */
if (!edma && ath9k_csa_is_finished(sc, vif))
return;
if (!vif || !vif->bss_conf.enable_beacon)
return;

View file

@ -27,6 +27,250 @@ MODULE_AUTHOR("Atheros Communications");
MODULE_DESCRIPTION("Shared library for Atheros wireless 802.11n LAN cards.");
MODULE_LICENSE("Dual BSD/GPL");
/* Assumes you've already done the endian to CPU conversion */
bool ath9k_cmn_rx_accept(struct ath_common *common,
struct ieee80211_hdr *hdr,
struct ieee80211_rx_status *rxs,
struct ath_rx_status *rx_stats,
bool *decrypt_error,
unsigned int rxfilter)
{
struct ath_hw *ah = common->ah;
bool is_mc, is_valid_tkip, strip_mic, mic_error;
__le16 fc;
fc = hdr->frame_control;
is_mc = !!is_multicast_ether_addr(hdr->addr1);
is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
test_bit(rx_stats->rs_keyix, common->tkip_keymap);
strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
ieee80211_has_protected(fc) &&
!(rx_stats->rs_status &
(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
ATH9K_RXERR_KEYMISS));
/*
* Key miss events are only relevant for pairwise keys where the
* descriptor does contain a valid key index. This has been observed
* mostly with CCMP encryption.
*/
if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
!test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
!ieee80211_has_morefrags(fc) &&
!(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
(rx_stats->rs_status & ATH9K_RXERR_MIC);
/*
* The rx_stats->rs_status will not be set until the end of the
* chained descriptors so it can be ignored if rs_more is set. The
* rs_more will be false at the last element of the chained
* descriptors.
*/
if (rx_stats->rs_status != 0) {
u8 status_mask;
if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
mic_error = false;
}
if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
(!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
*decrypt_error = true;
mic_error = false;
}
/*
* Reject error frames with the exception of
* decryption and MIC failures. For monitor mode,
* we also ignore the CRC error.
*/
status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
ATH9K_RXERR_KEYMISS;
if (ah->is_monitoring && (rxfilter & FIF_FCSFAIL))
status_mask |= ATH9K_RXERR_CRC;
if (rx_stats->rs_status & ~status_mask)
return false;
}
/*
* For unicast frames the MIC error bit can have false positives,
* so all MIC error reports need to be validated in software.
* False negatives are not common, so skip software verification
* if the hardware considers the MIC valid.
*/
if (strip_mic)
rxs->flag |= RX_FLAG_MMIC_STRIPPED;
else if (is_mc && mic_error)
rxs->flag |= RX_FLAG_MMIC_ERROR;
return true;
}
EXPORT_SYMBOL(ath9k_cmn_rx_accept);
void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
struct sk_buff *skb,
struct ath_rx_status *rx_stats,
struct ieee80211_rx_status *rxs,
bool decrypt_error)
{
struct ath_hw *ah = common->ah;
struct ieee80211_hdr *hdr;
int hdrlen, padpos, padsize;
u8 keyix;
__le16 fc;
/* see if any padding is done by the hw and remove it */
hdr = (struct ieee80211_hdr *) skb->data;
hdrlen = ieee80211_get_hdrlen_from_skb(skb);
fc = hdr->frame_control;
padpos = ieee80211_hdrlen(fc);
/* The MAC header is padded to have 32-bit boundary if the
* packet payload is non-zero. The general calculation for
* padsize would take into account odd header lengths:
* padsize = (4 - padpos % 4) % 4; However, since only
* even-length headers are used, padding can only be 0 or 2
* bytes and we can optimize this a bit. In addition, we must
* not try to remove padding from short control frames that do
* not have payload. */
padsize = padpos & 3;
if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
memmove(skb->data + padsize, skb->data, padpos);
skb_pull(skb, padsize);
}
keyix = rx_stats->rs_keyix;
if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
ieee80211_has_protected(fc)) {
rxs->flag |= RX_FLAG_DECRYPTED;
} else if (ieee80211_has_protected(fc)
&& !decrypt_error && skb->len >= hdrlen + 4) {
keyix = skb->data[hdrlen + 3] >> 6;
if (test_bit(keyix, common->keymap))
rxs->flag |= RX_FLAG_DECRYPTED;
}
if (ah->sw_mgmt_crypto &&
(rxs->flag & RX_FLAG_DECRYPTED) &&
ieee80211_is_mgmt(fc))
/* Use software decrypt for management frames. */
rxs->flag &= ~RX_FLAG_DECRYPTED;
}
EXPORT_SYMBOL(ath9k_cmn_rx_skb_postprocess);
int ath9k_cmn_process_rate(struct ath_common *common,
struct ieee80211_hw *hw,
struct ath_rx_status *rx_stats,
struct ieee80211_rx_status *rxs)
{
struct ieee80211_supported_band *sband;
enum ieee80211_band band;
unsigned int i = 0;
struct ath_hw *ah = common->ah;
band = ah->curchan->chan->band;
sband = hw->wiphy->bands[band];
if (IS_CHAN_QUARTER_RATE(ah->curchan))
rxs->flag |= RX_FLAG_5MHZ;
else if (IS_CHAN_HALF_RATE(ah->curchan))
rxs->flag |= RX_FLAG_10MHZ;
if (rx_stats->rs_rate & 0x80) {
/* HT rate */
rxs->flag |= RX_FLAG_HT;
rxs->flag |= rx_stats->flag;
rxs->rate_idx = rx_stats->rs_rate & 0x7f;
return 0;
}
for (i = 0; i < sband->n_bitrates; i++) {
if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
rxs->rate_idx = i;
return 0;
}
if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
rxs->flag |= RX_FLAG_SHORTPRE;
rxs->rate_idx = i;
return 0;
}
}
return -EINVAL;
}
EXPORT_SYMBOL(ath9k_cmn_process_rate);
void ath9k_cmn_process_rssi(struct ath_common *common,
struct ieee80211_hw *hw,
struct ath_rx_status *rx_stats,
struct ieee80211_rx_status *rxs)
{
struct ath_hw *ah = common->ah;
int last_rssi;
int rssi = rx_stats->rs_rssi;
int i, j;
/*
* RSSI is not available for subframes in an A-MPDU.
*/
if (rx_stats->rs_moreaggr) {
rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
return;
}
/*
* Check if the RSSI for the last subframe in an A-MPDU
* or an unaggregated frame is valid.
*/
if (rx_stats->rs_rssi == ATH9K_RSSI_BAD) {
rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
return;
}
for (i = 0, j = 0; i < ARRAY_SIZE(rx_stats->rs_rssi_ctl); i++) {
s8 rssi;
if (!(ah->rxchainmask & BIT(i)))
continue;
rssi = rx_stats->rs_rssi_ctl[i];
if (rssi != ATH9K_RSSI_BAD) {
rxs->chains |= BIT(j);
rxs->chain_signal[j] = ah->noise + rssi;
}
j++;
}
/*
* Update Beacon RSSI, this is used by ANI.
*/
if (rx_stats->is_mybeacon &&
((ah->opmode == NL80211_IFTYPE_STATION) ||
(ah->opmode == NL80211_IFTYPE_ADHOC))) {
ATH_RSSI_LPF(common->last_rssi, rx_stats->rs_rssi);
last_rssi = common->last_rssi;
if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
if (rssi < 0)
rssi = 0;
ah->stats.avgbrssi = rssi;
}
rxs->signal = ah->noise + rx_stats->rs_rssi;
}
EXPORT_SYMBOL(ath9k_cmn_process_rssi);
int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);

View file

@ -42,6 +42,25 @@
#define ATH_EP_RND(x, mul) \
(((x) + ((mul)/2)) / (mul))
bool ath9k_cmn_rx_accept(struct ath_common *common,
struct ieee80211_hdr *hdr,
struct ieee80211_rx_status *rxs,
struct ath_rx_status *rx_stats,
bool *decrypt_error,
unsigned int rxfilter);
void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
struct sk_buff *skb,
struct ath_rx_status *rx_stats,
struct ieee80211_rx_status *rxs,
bool decrypt_error);
int ath9k_cmn_process_rate(struct ath_common *common,
struct ieee80211_hw *hw,
struct ath_rx_status *rx_stats,
struct ieee80211_rx_status *rxs);
void ath9k_cmn_process_rssi(struct ath_common *common,
struct ieee80211_hw *hw,
struct ath_rx_status *rx_stats,
struct ieee80211_rx_status *rxs);
int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
struct ath_hw *ah,

View file

@ -18,7 +18,6 @@
#define DEBUG_H
#include "hw.h"
#include "rc.h"
#include "dfs_debug.h"
struct ath_txq;

View file

@ -21,6 +21,8 @@
#include "hw.h"
struct ath_softc;
/**
* struct ath_dfs_stats - DFS Statistics per wiphy
* @pulses_total: pulses reported by HW

View file

@ -54,6 +54,8 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
.driver_info = AR9280_USB }, /* SMC Networks */
{ USB_DEVICE(0x0411, 0x017f),
.driver_info = AR9280_USB }, /* Sony UWA-BR100 */
{ USB_DEVICE(0x0411, 0x0197),
.driver_info = AR9280_USB }, /* Buffalo WLI-UV-AG300P */
{ USB_DEVICE(0x04da, 0x3904),
.driver_info = AR9280_USB },

View file

@ -277,7 +277,6 @@ struct ath9k_htc_rxbuf {
};
struct ath9k_htc_rx {
int last_rssi; /* FIXME: per-STA */
struct list_head rxbuf;
spinlock_t rxbuflock;
};

View file

@ -611,6 +611,7 @@ static void ath9k_init_misc(struct ath9k_htc_priv *priv)
memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
common->last_rssi = ATH_RSSI_DUMMY_MARKER;
priv->ah->opmode = NL80211_IFTYPE_STATION;
}

View file

@ -1474,6 +1474,7 @@ static void ath9k_htc_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
if ((vif->type == NL80211_IFTYPE_STATION) && bss_conf->assoc) {
common->curaid = bss_conf->aid;
common->last_rssi = ATH_RSSI_DUMMY_MARKER;
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
}
}

View file

@ -927,43 +927,39 @@ void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
ath9k_hw_rxena(priv->ah);
ath9k_htc_opmode_init(priv);
ath9k_hw_startpcureceive(priv->ah, test_bit(OP_SCANNING, &priv->op_flags));
priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER;
}
static void ath9k_process_rate(struct ieee80211_hw *hw,
struct ieee80211_rx_status *rxs,
u8 rx_rate, u8 rs_flags)
static inline void convert_htc_flag(struct ath_rx_status *rx_stats,
struct ath_htc_rx_status *rxstatus)
{
struct ieee80211_supported_band *sband;
enum ieee80211_band band;
unsigned int i = 0;
rx_stats->flag = 0;
if (rxstatus->rs_flags & ATH9K_RX_2040)
rx_stats->flag |= RX_FLAG_40MHZ;
if (rxstatus->rs_flags & ATH9K_RX_GI)
rx_stats->flag |= RX_FLAG_SHORT_GI;
}
if (rx_rate & 0x80) {
/* HT rate */
rxs->flag |= RX_FLAG_HT;
if (rs_flags & ATH9K_RX_2040)
rxs->flag |= RX_FLAG_40MHZ;
if (rs_flags & ATH9K_RX_GI)
rxs->flag |= RX_FLAG_SHORT_GI;
rxs->rate_idx = rx_rate & 0x7f;
return;
}
static void rx_status_htc_to_ath(struct ath_rx_status *rx_stats,
struct ath_htc_rx_status *rxstatus)
{
rx_stats->rs_datalen = rxstatus->rs_datalen;
rx_stats->rs_status = rxstatus->rs_status;
rx_stats->rs_phyerr = rxstatus->rs_phyerr;
rx_stats->rs_rssi = rxstatus->rs_rssi;
rx_stats->rs_keyix = rxstatus->rs_keyix;
rx_stats->rs_rate = rxstatus->rs_rate;
rx_stats->rs_antenna = rxstatus->rs_antenna;
rx_stats->rs_more = rxstatus->rs_more;
band = hw->conf.chandef.chan->band;
sband = hw->wiphy->bands[band];
for (i = 0; i < sband->n_bitrates; i++) {
if (sband->bitrates[i].hw_value == rx_rate) {
rxs->rate_idx = i;
return;
}
if (sband->bitrates[i].hw_value_short == rx_rate) {
rxs->rate_idx = i;
rxs->flag |= RX_FLAG_SHORTPRE;
return;
}
}
memcpy(rx_stats->rs_rssi_ctl, rxstatus->rs_rssi_ctl,
sizeof(rx_stats->rs_rssi_ctl));
memcpy(rx_stats->rs_rssi_ext, rxstatus->rs_rssi_ext,
sizeof(rx_stats->rs_rssi_ext));
rx_stats->rs_isaggr = rxstatus->rs_isaggr;
rx_stats->rs_moreaggr = rxstatus->rs_moreaggr;
rx_stats->rs_num_delims = rxstatus->rs_num_delims;
convert_htc_flag(rx_stats, rxstatus);
}
static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
@ -975,10 +971,10 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
struct ieee80211_hw *hw = priv->hw;
struct sk_buff *skb = rxbuf->skb;
struct ath_common *common = ath9k_hw_common(priv->ah);
struct ath_hw *ah = common->ah;
struct ath_htc_rx_status *rxstatus;
int hdrlen, padsize;
int last_rssi = ATH_RSSI_DUMMY_MARKER;
__le16 fc;
struct ath_rx_status rx_stats;
bool decrypt_error;
if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
ath_err(common, "Corrupted RX frame, dropping (len: %d)\n",
@ -999,103 +995,39 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
ath9k_htc_err_stat_rx(priv, rxstatus);
/* Get the RX status information */
memcpy(&rxbuf->rxstatus, rxstatus, HTC_RX_FRAME_HEADER_SIZE);
skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
hdr = (struct ieee80211_hdr *)skb->data;
fc = hdr->frame_control;
hdrlen = ieee80211_get_hdrlen_from_skb(skb);
padsize = hdrlen & 3;
if (padsize && skb->len >= hdrlen+padsize+FCS_LEN) {
memmove(skb->data + padsize, skb->data, hdrlen);
skb_pull(skb, padsize);
}
memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
if (rxbuf->rxstatus.rs_status != 0) {
if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_CRC)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_PHY)
goto rx_next;
/* Copy everything from ath_htc_rx_status (HTC_RX_FRAME_HEADER).
* After this, we can drop this part of skb. */
rx_status_htc_to_ath(&rx_stats, rxstatus);
rx_status->mactime = be64_to_cpu(rxstatus->rs_tstamp);
skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT) {
/* FIXME */
} else if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_MIC) {
if (ieee80211_is_ctl(fc))
/*
* Sometimes, we get invalid
* MIC failures on valid control frames.
* Remove these mic errors.
*/
rxbuf->rxstatus.rs_status &= ~ATH9K_RXERR_MIC;
else
rx_status->flag |= RX_FLAG_MMIC_ERROR;
}
/*
* everything but the rate is checked here, the rate check is done
* separately to avoid doing two lookups for a rate for each frame.
*/
hdr = (struct ieee80211_hdr *)skb->data;
if (!ath9k_cmn_rx_accept(common, hdr, rx_status, &rx_stats,
&decrypt_error, priv->rxfilter))
goto rx_next;
/*
* Reject error frames with the exception of
* decryption and MIC failures. For monitor mode,
* we also ignore the CRC error.
*/
if (priv->ah->opmode == NL80211_IFTYPE_MONITOR) {
if (rxbuf->rxstatus.rs_status &
~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
ATH9K_RXERR_CRC))
goto rx_next;
} else {
if (rxbuf->rxstatus.rs_status &
~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
goto rx_next;
}
}
}
ath9k_cmn_rx_skb_postprocess(common, skb, &rx_stats,
rx_status, decrypt_error);
if (!(rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT)) {
u8 keyix;
keyix = rxbuf->rxstatus.rs_keyix;
if (keyix != ATH9K_RXKEYIX_INVALID) {
rx_status->flag |= RX_FLAG_DECRYPTED;
} else if (ieee80211_has_protected(fc) &&
skb->len >= hdrlen + 4) {
keyix = skb->data[hdrlen + 3] >> 6;
if (test_bit(keyix, common->keymap))
rx_status->flag |= RX_FLAG_DECRYPTED;
}
}
if (ath9k_cmn_process_rate(common, hw, &rx_stats, rx_status))
goto rx_next;
ath9k_process_rate(hw, rx_status, rxbuf->rxstatus.rs_rate,
rxbuf->rxstatus.rs_flags);
rx_stats.is_mybeacon = ath_is_mybeacon(common, hdr);
ath9k_cmn_process_rssi(common, hw, &rx_stats, rx_status);
if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD &&
!rxbuf->rxstatus.rs_moreaggr)
ATH_RSSI_LPF(priv->rx.last_rssi,
rxbuf->rxstatus.rs_rssi);
last_rssi = priv->rx.last_rssi;
if (ath_is_mybeacon(common, hdr)) {
s8 rssi = rxbuf->rxstatus.rs_rssi;
if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
if (rssi < 0)
rssi = 0;
priv->ah->stats.avgbrssi = rssi;
}
rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
rx_status->band = hw->conf.chandef.chan->band;
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
rx_status->antenna = rxbuf->rxstatus.rs_antenna;
rx_status->band = ah->curchan->chan->band;
rx_status->freq = ah->curchan->chan->center_freq;
rx_status->antenna = rx_stats.rs_antenna;
rx_status->flag |= RX_FLAG_MACTIME_END;
return true;
rx_next:
return false;
}

View file

@ -358,6 +358,36 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
kfree_skb(skb);
}
static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle,
struct sk_buff *skb)
{
uint32_t *pattern = (uint32_t *)skb->data;
switch (*pattern) {
case 0x33221199:
{
struct htc_panic_bad_vaddr *htc_panic;
htc_panic = (struct htc_panic_bad_vaddr *) skb->data;
dev_err(htc_handle->dev, "ath: firmware panic! "
"exccause: 0x%08x; pc: 0x%08x; badvaddr: 0x%08x.\n",
htc_panic->exccause, htc_panic->pc,
htc_panic->badvaddr);
break;
}
case 0x33221299:
{
struct htc_panic_bad_epid *htc_panic;
htc_panic = (struct htc_panic_bad_epid *) skb->data;
dev_err(htc_handle->dev, "ath: firmware panic! "
"bad epid: 0x%08x\n", htc_panic->epid);
break;
}
default:
dev_err(htc_handle->dev, "ath: uknown panic pattern!\n");
break;
}
}
/*
* HTC Messages are handled directly here and the obtained SKB
* is freed.
@ -379,6 +409,12 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle,
htc_hdr = (struct htc_frame_hdr *) skb->data;
epid = htc_hdr->endpoint_id;
if (epid == 0x99) {
ath9k_htc_fw_panic_report(htc_handle, skb);
kfree_skb(skb);
return;
}
if (epid >= ENDPOINT_MAX) {
if (pipe_id != USB_REG_IN_PIPE)
dev_kfree_skb_any(skb);

View file

@ -77,6 +77,18 @@ struct htc_config_pipe_msg {
u8 credits;
} __packed;
struct htc_panic_bad_vaddr {
__be32 pattern;
__be32 exccause;
__be32 pc;
__be32 badvaddr;
} __packed;
struct htc_panic_bad_epid {
__be32 pattern;
__be32 epid;
} __packed;
struct htc_ep_callbacks {
void *priv;
void (*tx) (void *, struct sk_buff *, enum htc_endpoint_id, bool txok);

View file

@ -23,7 +23,6 @@
#include "hw.h"
#include "hw-ops.h"
#include "rc.h"
#include "ar9003_mac.h"
#include "ar9003_mci.h"
#include "ar9003_phy.h"

View file

@ -538,7 +538,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
common->last_rssi = ATH_RSSI_DUMMY_MARKER;
sc->config.txpowlimit = ATH_TXPOWER_MAX;
memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
sc->beacon.slottime = ATH9K_SLOT_TIME_9;
@ -1106,19 +1106,11 @@ static int __init ath9k_init(void)
{
int error;
/* Register rate control algorithm */
error = ath_rate_control_register();
if (error != 0) {
pr_err("Unable to register rate control algorithm: %d\n",
error);
goto err_out;
}
error = ath_pci_init();
if (error < 0) {
pr_err("No PCI devices found, driver not installed\n");
error = -ENODEV;
goto err_rate_unregister;
goto err_out;
}
error = ath_ahb_init();
@ -1131,9 +1123,6 @@ static int __init ath9k_init(void)
err_pci_exit:
ath_pci_exit();
err_rate_unregister:
ath_rate_control_unregister();
err_out:
return error;
}
@ -1144,7 +1133,6 @@ static void __exit ath9k_exit(void)
is_ath9k_unloaded = true;
ath_ahb_exit();
ath_pci_exit();
ath_rate_control_unregister();
pr_info("%s: Driver unloaded\n", dev_info);
}
module_exit(ath9k_exit);

View file

@ -155,12 +155,8 @@ struct ath_htc_rx_status {
u8 rs_status;
u8 rs_phyerr;
int8_t rs_rssi;
int8_t rs_rssi_ctl0;
int8_t rs_rssi_ctl1;
int8_t rs_rssi_ctl2;
int8_t rs_rssi_ext0;
int8_t rs_rssi_ext1;
int8_t rs_rssi_ext2;
int8_t rs_rssi_ctl[3];
int8_t rs_rssi_ext[3];
u8 rs_keyix;
u8 rs_rate;
u8 rs_antenna;
@ -170,6 +166,7 @@ struct ath_htc_rx_status {
u8 rs_num_delims;
u8 rs_flags;
u8 rs_dummy;
/* FIXME: evm* never used? */
__be32 evm0;
__be32 evm1;
__be32 evm2;

View file

@ -1178,9 +1178,6 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_remove_slot(sc, vif);
if (sc->csa_vif == vif)
sc->csa_vif = NULL;
ath9k_ps_wakeup(sc);
ath9k_calculate_summary_state(hw, NULL);
ath9k_ps_restore(sc);
@ -1609,7 +1606,7 @@ static void ath9k_set_assoc_state(struct ath_softc *sc,
common->curaid = bss_conf->aid;
ath9k_hw_write_associd(sc->sc_ah);
sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
common->last_rssi = ATH_RSSI_DUMMY_MARKER;
sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
spin_lock_irqsave(&sc->sc_pm_lock, flags);
@ -1866,7 +1863,7 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
static bool ath9k_has_tx_pending(struct ath_softc *sc)
{
int i, npend;
int i, npend = 0;
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
if (!ATH_TXQ_SETUP(sc, i))
@ -2086,13 +2083,8 @@ static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_chan_def *chandef)
{
struct ath_softc *sc = hw->priv;
/* mac80211 does not support CSA in multi-if cases (yet) */
if (WARN_ON(sc->csa_vif))
return;
sc->csa_vif = vif;
/* depend on vif->csa_active only */
return;
}
struct ieee80211_ops ath9k_ops = {

File diff suppressed because it is too large Load diff

View file

@ -1,248 +0,0 @@
/*
* Copyright (c) 2004 Sam Leffler, Errno Consulting
* Copyright (c) 2004 Video54 Technologies, Inc.
* Copyright (c) 2008-2011 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef RC_H
#define RC_H
#include "hw.h"
struct ath_softc;
#define ATH_RATE_MAX 30
#define RATE_TABLE_SIZE 72
#define RC_INVALID 0x0000
#define RC_LEGACY 0x0001
#define RC_SS 0x0002
#define RC_DS 0x0004
#define RC_TS 0x0008
#define RC_HT_20 0x0010
#define RC_HT_40 0x0020
#define RC_STREAM_MASK 0xe
#define RC_DS_OR_LATER(f) ((((f) & RC_STREAM_MASK) == RC_DS) || \
(((f) & RC_STREAM_MASK) == (RC_DS | RC_TS)))
#define RC_TS_ONLY(f) (((f) & RC_STREAM_MASK) == RC_TS)
#define RC_SS_OR_LEGACY(f) ((f) & (RC_SS | RC_LEGACY))
#define RC_HT_2040 (RC_HT_20 | RC_HT_40)
#define RC_ALL_STREAM (RC_SS | RC_DS | RC_TS)
#define RC_L_SD (RC_LEGACY | RC_SS | RC_DS)
#define RC_L_SDT (RC_LEGACY | RC_SS | RC_DS | RC_TS)
#define RC_HT_S_20 (RC_HT_20 | RC_SS)
#define RC_HT_D_20 (RC_HT_20 | RC_DS)
#define RC_HT_T_20 (RC_HT_20 | RC_TS)
#define RC_HT_S_40 (RC_HT_40 | RC_SS)
#define RC_HT_D_40 (RC_HT_40 | RC_DS)
#define RC_HT_T_40 (RC_HT_40 | RC_TS)
#define RC_HT_SD_20 (RC_HT_20 | RC_SS | RC_DS)
#define RC_HT_DT_20 (RC_HT_20 | RC_DS | RC_TS)
#define RC_HT_SD_40 (RC_HT_40 | RC_SS | RC_DS)
#define RC_HT_DT_40 (RC_HT_40 | RC_DS | RC_TS)
#define RC_HT_SD_2040 (RC_HT_2040 | RC_SS | RC_DS)
#define RC_HT_SDT_2040 (RC_HT_2040 | RC_SS | RC_DS | RC_TS)
#define RC_HT_SDT_20 (RC_HT_20 | RC_SS | RC_DS | RC_TS)
#define RC_HT_SDT_40 (RC_HT_40 | RC_SS | RC_DS | RC_TS)
#define RC_ALL (RC_LEGACY | RC_HT_2040 | RC_ALL_STREAM)
enum {
WLAN_RC_PHY_OFDM,
WLAN_RC_PHY_CCK,
WLAN_RC_PHY_HT_20_SS,
WLAN_RC_PHY_HT_20_DS,
WLAN_RC_PHY_HT_20_TS,
WLAN_RC_PHY_HT_40_SS,
WLAN_RC_PHY_HT_40_DS,
WLAN_RC_PHY_HT_40_TS,
WLAN_RC_PHY_HT_20_SS_HGI,
WLAN_RC_PHY_HT_20_DS_HGI,
WLAN_RC_PHY_HT_20_TS_HGI,
WLAN_RC_PHY_HT_40_SS_HGI,
WLAN_RC_PHY_HT_40_DS_HGI,
WLAN_RC_PHY_HT_40_TS_HGI,
WLAN_RC_PHY_MAX
};
#define WLAN_RC_PHY_DS(_phy) ((_phy == WLAN_RC_PHY_HT_20_DS) \
|| (_phy == WLAN_RC_PHY_HT_40_DS) \
|| (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
|| (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
#define WLAN_RC_PHY_TS(_phy) ((_phy == WLAN_RC_PHY_HT_20_TS) \
|| (_phy == WLAN_RC_PHY_HT_40_TS) \
|| (_phy == WLAN_RC_PHY_HT_20_TS_HGI) \
|| (_phy == WLAN_RC_PHY_HT_40_TS_HGI))
#define WLAN_RC_PHY_20(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS) \
|| (_phy == WLAN_RC_PHY_HT_20_DS) \
|| (_phy == WLAN_RC_PHY_HT_20_TS) \
|| (_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
|| (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
|| (_phy == WLAN_RC_PHY_HT_20_TS_HGI))
#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \
|| (_phy == WLAN_RC_PHY_HT_40_DS) \
|| (_phy == WLAN_RC_PHY_HT_40_TS) \
|| (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
|| (_phy == WLAN_RC_PHY_HT_40_DS_HGI) \
|| (_phy == WLAN_RC_PHY_HT_40_TS_HGI))
#define WLAN_RC_PHY_SGI(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
|| (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
|| (_phy == WLAN_RC_PHY_HT_20_TS_HGI) \
|| (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
|| (_phy == WLAN_RC_PHY_HT_40_DS_HGI) \
|| (_phy == WLAN_RC_PHY_HT_40_TS_HGI))
#define WLAN_RC_PHY_HT(_phy) (_phy >= WLAN_RC_PHY_HT_20_SS)
#define WLAN_RC_CAP_MODE(capflag) (((capflag & WLAN_RC_HT_FLAG) ? \
((capflag & WLAN_RC_40_FLAG) ? RC_HT_40 : RC_HT_20) : RC_LEGACY))
#define WLAN_RC_CAP_STREAM(capflag) (((capflag & WLAN_RC_TS_FLAG) ? \
(RC_TS) : ((capflag & WLAN_RC_DS_FLAG) ? RC_DS : RC_SS)))
/* Return TRUE if flag supports HT20 && client supports HT20 or
* return TRUE if flag supports HT40 && client supports HT40.
* This is used becos some rates overlap between HT20/HT40.
*/
#define WLAN_RC_PHY_HT_VALID(flag, capflag) \
(((flag & RC_HT_20) && !(capflag & WLAN_RC_40_FLAG)) || \
((flag & RC_HT_40) && (capflag & WLAN_RC_40_FLAG)))
#define WLAN_RC_DS_FLAG (0x01)
#define WLAN_RC_TS_FLAG (0x02)
#define WLAN_RC_40_FLAG (0x04)
#define WLAN_RC_SGI_FLAG (0x08)
#define WLAN_RC_HT_FLAG (0x10)
/**
* struct ath_rate_table - Rate Control table
* @rate_cnt: total number of rates for the given wireless mode
* @mcs_start: MCS rate index offset
* @rate_flags: Rate Control flags
* @phy: CCK/OFDM/HT20/HT40
* @ratekbps: rate in Kbits per second
* @user_ratekbps: user rate in Kbits per second
* @ratecode: rate that goes into HW descriptors
* @dot11rate: value that goes into supported
* rates info element of MLME
* @ctrl_rate: Index of next lower basic rate, used for duration computation
* @cw40index: Index of rates having 40MHz channel width
* @sgi_index: Index of rates having Short Guard Interval
* @ht_index: high throughput rates having 40MHz channel width and
* Short Guard Interval
* @probe_interval: interval for rate control to probe for other rates
* @initial_ratemax: initial ratemax value
*/
struct ath_rate_table {
int rate_cnt;
int mcs_start;
struct {
u16 rate_flags;
u8 phy;
u32 ratekbps;
u32 user_ratekbps;
u8 ratecode;
u8 dot11rate;
} info[RATE_TABLE_SIZE];
u32 probe_interval;
u8 initial_ratemax;
};
struct ath_rateset {
u8 rs_nrates;
u8 rs_rates[ATH_RATE_MAX];
};
struct ath_rc_stats {
u32 success;
u32 retries;
u32 xretries;
u8 per;
};
/**
* struct ath_rate_priv - Rate Control priv data
* @state: RC state
* @probe_rate: rate we are probing at
* @probe_time: msec timestamp for last probe
* @hw_maxretry_pktcnt: num of packets since we got HW max retry error
* @max_valid_rate: maximum number of valid rate
* @per_down_time: msec timestamp for last PER down step
* @valid_phy_ratecnt: valid rate count
* @rate_max_phy: phy index for the max rate
* @per: PER for every valid rate in %
* @probe_interval: interval for ratectrl to probe for other rates
* @ht_cap: HT capabilities
* @neg_rates: Negotatied rates
* @neg_ht_rates: Negotiated HT rates
*/
struct ath_rate_priv {
u8 rate_table_size;
u8 probe_rate;
u8 hw_maxretry_pktcnt;
u8 max_valid_rate;
u8 valid_rate_index[RATE_TABLE_SIZE];
u8 ht_cap;
u8 valid_phy_ratecnt[WLAN_RC_PHY_MAX];
u8 valid_phy_rateidx[WLAN_RC_PHY_MAX][RATE_TABLE_SIZE];
u8 rate_max_phy;
u8 per[RATE_TABLE_SIZE];
u32 probe_time;
u32 per_down_time;
u32 probe_interval;
struct ath_rateset neg_rates;
struct ath_rateset neg_ht_rates;
const struct ath_rate_table *rate_table;
#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
struct dentry *debugfs_rcstats;
struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
#endif
};
#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate);
void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
int xretries, int retries, u8 per);
#else
static inline void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
{
}
static inline void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
int xretries, int retries, u8 per)
{
}
#endif
#ifdef CONFIG_ATH9K_LEGACY_RATE_CONTROL
int ath_rate_control_register(void);
void ath_rate_control_unregister(void);
#else
static inline int ath_rate_control_register(void)
{
return 0;
}
static inline void ath_rate_control_unregister(void)
{
}
#endif
#endif /* RC_H */

View file

@ -755,204 +755,6 @@ static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc,
return bf;
}
/* Assumes you've already done the endian to CPU conversion */
static bool ath9k_rx_accept(struct ath_common *common,
struct ieee80211_hdr *hdr,
struct ieee80211_rx_status *rxs,
struct ath_rx_status *rx_stats,
bool *decrypt_error)
{
struct ath_softc *sc = (struct ath_softc *) common->priv;
bool is_mc, is_valid_tkip, strip_mic, mic_error;
struct ath_hw *ah = common->ah;
__le16 fc;
fc = hdr->frame_control;
is_mc = !!is_multicast_ether_addr(hdr->addr1);
is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
test_bit(rx_stats->rs_keyix, common->tkip_keymap);
strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
ieee80211_has_protected(fc) &&
!(rx_stats->rs_status &
(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
ATH9K_RXERR_KEYMISS));
/*
* Key miss events are only relevant for pairwise keys where the
* descriptor does contain a valid key index. This has been observed
* mostly with CCMP encryption.
*/
if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
!test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
!ieee80211_has_morefrags(fc) &&
!(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
(rx_stats->rs_status & ATH9K_RXERR_MIC);
/*
* The rx_stats->rs_status will not be set until the end of the
* chained descriptors so it can be ignored if rs_more is set. The
* rs_more will be false at the last element of the chained
* descriptors.
*/
if (rx_stats->rs_status != 0) {
u8 status_mask;
if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
mic_error = false;
}
if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
(!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
*decrypt_error = true;
mic_error = false;
}
/*
* Reject error frames with the exception of
* decryption and MIC failures. For monitor mode,
* we also ignore the CRC error.
*/
status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
ATH9K_RXERR_KEYMISS;
if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL))
status_mask |= ATH9K_RXERR_CRC;
if (rx_stats->rs_status & ~status_mask)
return false;
}
/*
* For unicast frames the MIC error bit can have false positives,
* so all MIC error reports need to be validated in software.
* False negatives are not common, so skip software verification
* if the hardware considers the MIC valid.
*/
if (strip_mic)
rxs->flag |= RX_FLAG_MMIC_STRIPPED;
else if (is_mc && mic_error)
rxs->flag |= RX_FLAG_MMIC_ERROR;
return true;
}
static int ath9k_process_rate(struct ath_common *common,
struct ieee80211_hw *hw,
struct ath_rx_status *rx_stats,
struct ieee80211_rx_status *rxs)
{
struct ieee80211_supported_band *sband;
enum ieee80211_band band;
unsigned int i = 0;
struct ath_softc __maybe_unused *sc = common->priv;
struct ath_hw *ah = sc->sc_ah;
band = ah->curchan->chan->band;
sband = hw->wiphy->bands[band];
if (IS_CHAN_QUARTER_RATE(ah->curchan))
rxs->flag |= RX_FLAG_5MHZ;
else if (IS_CHAN_HALF_RATE(ah->curchan))
rxs->flag |= RX_FLAG_10MHZ;
if (rx_stats->rs_rate & 0x80) {
/* HT rate */
rxs->flag |= RX_FLAG_HT;
rxs->flag |= rx_stats->flag;
rxs->rate_idx = rx_stats->rs_rate & 0x7f;
return 0;
}
for (i = 0; i < sband->n_bitrates; i++) {
if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
rxs->rate_idx = i;
return 0;
}
if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
rxs->flag |= RX_FLAG_SHORTPRE;
rxs->rate_idx = i;
return 0;
}
}
/*
* No valid hardware bitrate found -- we should not get here
* because hardware has already validated this frame as OK.
*/
ath_dbg(common, ANY,
"unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
rx_stats->rs_rate);
RX_STAT_INC(rx_rate_err);
return -EINVAL;
}
static void ath9k_process_rssi(struct ath_common *common,
struct ieee80211_hw *hw,
struct ath_rx_status *rx_stats,
struct ieee80211_rx_status *rxs)
{
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = common->ah;
int last_rssi;
int rssi = rx_stats->rs_rssi;
int i, j;
/*
* RSSI is not available for subframes in an A-MPDU.
*/
if (rx_stats->rs_moreaggr) {
rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
return;
}
/*
* Check if the RSSI for the last subframe in an A-MPDU
* or an unaggregated frame is valid.
*/
if (rx_stats->rs_rssi == ATH9K_RSSI_BAD) {
rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
return;
}
for (i = 0, j = 0; i < ARRAY_SIZE(rx_stats->rs_rssi_ctl); i++) {
s8 rssi;
if (!(ah->rxchainmask & BIT(i)))
continue;
rssi = rx_stats->rs_rssi_ctl[i];
if (rssi != ATH9K_RSSI_BAD) {
rxs->chains |= BIT(j);
rxs->chain_signal[j] = ah->noise + rssi;
}
j++;
}
/*
* Update Beacon RSSI, this is used by ANI.
*/
if (rx_stats->is_mybeacon &&
((ah->opmode == NL80211_IFTYPE_STATION) ||
(ah->opmode == NL80211_IFTYPE_ADHOC))) {
ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
last_rssi = sc->last_rssi;
if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
if (rssi < 0)
rssi = 0;
ah->stats.avgbrssi = rssi;
}
rxs->signal = ah->noise + rx_stats->rs_rssi;
}
static void ath9k_process_tsf(struct ath_rx_status *rs,
struct ieee80211_rx_status *rxs,
u64 tsf)
@ -1051,7 +853,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
* everything but the rate is checked here, the rate check is done
* separately to avoid doing two lookups for a rate for each frame.
*/
if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) {
if (!ath9k_cmn_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error, sc->rx.rxfilter)) {
ret = -EINVAL;
goto exit;
}
@ -1069,12 +871,19 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
goto exit;
}
if (ath9k_process_rate(common, hw, rx_stats, rx_status)) {
if (ath9k_cmn_process_rate(common, hw, rx_stats, rx_status)) {
/*
* No valid hardware bitrate found -- we should not get here
* because hardware has already validated this frame as OK.
*/
ath_dbg(common, ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
rx_stats->rs_rate);
RX_STAT_INC(rx_rate_err);
ret =-EINVAL;
goto exit;
}
ath9k_process_rssi(common, hw, rx_stats, rx_status);
ath9k_cmn_process_rssi(common, hw, rx_stats, rx_status);
rx_status->band = ah->curchan->chan->band;
rx_status->freq = ah->curchan->chan->center_freq;
@ -1092,57 +901,6 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
return ret;
}
static void ath9k_rx_skb_postprocess(struct ath_common *common,
struct sk_buff *skb,
struct ath_rx_status *rx_stats,
struct ieee80211_rx_status *rxs,
bool decrypt_error)
{
struct ath_hw *ah = common->ah;
struct ieee80211_hdr *hdr;
int hdrlen, padpos, padsize;
u8 keyix;
__le16 fc;
/* see if any padding is done by the hw and remove it */
hdr = (struct ieee80211_hdr *) skb->data;
hdrlen = ieee80211_get_hdrlen_from_skb(skb);
fc = hdr->frame_control;
padpos = ieee80211_hdrlen(fc);
/* The MAC header is padded to have 32-bit boundary if the
* packet payload is non-zero. The general calculation for
* padsize would take into account odd header lengths:
* padsize = (4 - padpos % 4) % 4; However, since only
* even-length headers are used, padding can only be 0 or 2
* bytes and we can optimize this a bit. In addition, we must
* not try to remove padding from short control frames that do
* not have payload. */
padsize = padpos & 3;
if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
memmove(skb->data + padsize, skb->data, padpos);
skb_pull(skb, padsize);
}
keyix = rx_stats->rs_keyix;
if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
ieee80211_has_protected(fc)) {
rxs->flag |= RX_FLAG_DECRYPTED;
} else if (ieee80211_has_protected(fc)
&& !decrypt_error && skb->len >= hdrlen + 4) {
keyix = skb->data[hdrlen + 3] >> 6;
if (test_bit(keyix, common->keymap))
rxs->flag |= RX_FLAG_DECRYPTED;
}
if (ah->sw_mgmt_crypto &&
(rxs->flag & RX_FLAG_DECRYPTED) &&
ieee80211_is_mgmt(fc))
/* Use software decrypt for management frames. */
rxs->flag &= ~RX_FLAG_DECRYPTED;
}
/*
* Run the LNA combining algorithm only in these cases:
*
@ -1292,8 +1050,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
skb_pull(skb, ah->caps.rx_status_len);
if (!rs.rs_more)
ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
rxs, decrypt_error);
ath9k_cmn_rx_skb_postprocess(common, hdr_skb, &rs,
rxs, decrypt_error);
if (rs.rs_more) {
RX_STAT_INC(rx_frags);

View file

@ -2566,7 +2566,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
sc->beacon.tx_processed = true;
sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
ath9k_csa_is_finished(sc);
ath9k_csa_update(sc);
continue;
}

View file

@ -44,6 +44,14 @@ static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
writel(data, wcn->mmio + addr);
}
#define wcn36xx_dxe_write_register_x(wcn, reg, reg_data) \
do { \
if (wcn->chip_version == WCN36XX_CHIP_3680) \
wcn36xx_dxe_write_register(wcn, reg ## _3680, reg_data); \
else \
wcn36xx_dxe_write_register(wcn, reg ## _3660, reg_data); \
} while (0) \
static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
{
*data = readl(wcn->mmio + addr);
@ -680,7 +688,7 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
/* Setting interrupt path */
reg_data = WCN36XX_DXE_CCU_INT;
wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
wcn36xx_dxe_write_register_x(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
/***************************************/
/* Init descriptors for TX LOW channel */

View file

@ -28,11 +28,11 @@ H2H_TEST_RX_TX = DMA2
*/
/* DXE registers */
#define WCN36XX_DXE_MEM_BASE 0x03000000
#define WCN36XX_DXE_MEM_REG 0x202000
#define WCN36XX_DXE_CCU_INT 0xA0011
#define WCN36XX_DXE_REG_CCU_INT 0x200b10
#define WCN36XX_DXE_REG_CCU_INT_3660 0x200b10
#define WCN36XX_DXE_REG_CCU_INT_3680 0x2050dc
/* TODO This must calculated properly but not hardcoded */
#define WCN36XX_DXE_CTRL_TX_L 0x328a44

View file

@ -4384,11 +4384,13 @@ enum place_holder_in_cap_bitmap {
MAX_FEATURE_SUPPORTED = 128,
};
#define WCN36XX_HAL_CAPS_SIZE 4
struct wcn36xx_hal_feat_caps_msg {
struct wcn36xx_hal_msg_header header;
u32 feat_caps[4];
u32 feat_caps[WCN36XX_HAL_CAPS_SIZE];
} __packed;
/* status codes to help debug rekey failures */

View file

@ -17,6 +17,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/platform_device.h>
#include "wcn36xx.h"
@ -177,6 +178,60 @@ static inline u8 get_sta_index(struct ieee80211_vif *vif,
sta_priv->sta_index;
}
static const char * const wcn36xx_caps_names[] = {
"MCC", /* 0 */
"P2P", /* 1 */
"DOT11AC", /* 2 */
"SLM_SESSIONIZATION", /* 3 */
"DOT11AC_OPMODE", /* 4 */
"SAP32STA", /* 5 */
"TDLS", /* 6 */
"P2P_GO_NOA_DECOUPLE_INIT_SCAN",/* 7 */
"WLANACTIVE_OFFLOAD", /* 8 */
"BEACON_OFFLOAD", /* 9 */
"SCAN_OFFLOAD", /* 10 */
"ROAM_OFFLOAD", /* 11 */
"BCN_MISS_OFFLOAD", /* 12 */
"STA_POWERSAVE", /* 13 */
"STA_ADVANCED_PWRSAVE", /* 14 */
"AP_UAPSD", /* 15 */
"AP_DFS", /* 16 */
"BLOCKACK", /* 17 */
"PHY_ERR", /* 18 */
"BCN_FILTER", /* 19 */
"RTT", /* 20 */
"RATECTRL", /* 21 */
"WOW" /* 22 */
};
static const char *wcn36xx_get_cap_name(enum place_holder_in_cap_bitmap x)
{
if (x >= ARRAY_SIZE(wcn36xx_caps_names))
return "UNKNOWN";
return wcn36xx_caps_names[x];
}
static void wcn36xx_feat_caps_info(struct wcn36xx *wcn)
{
int i;
for (i = 0; i < MAX_FEATURE_SUPPORTED; i++) {
if (get_feat_caps(wcn->fw_feat_caps, i))
wcn36xx_info("FW Cap %s\n", wcn36xx_get_cap_name(i));
}
}
static void wcn36xx_detect_chip_version(struct wcn36xx *wcn)
{
if (get_feat_caps(wcn->fw_feat_caps, DOT11AC)) {
wcn36xx_info("Chip is 3680\n");
wcn->chip_version = WCN36XX_CHIP_3680;
} else {
wcn36xx_info("Chip is 3660\n");
wcn->chip_version = WCN36XX_CHIP_3660;
}
}
static int wcn36xx_start(struct ieee80211_hw *hw)
{
struct wcn36xx *wcn = hw->priv;
@ -223,6 +278,16 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
goto out_free_smd_buf;
}
if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
ret = wcn36xx_smd_feature_caps_exchange(wcn);
if (ret)
wcn36xx_warn("Exchange feature caps failed\n");
else
wcn36xx_feat_caps_info(wcn);
}
wcn36xx_detect_chip_version(wcn);
/* DMA channel initialization */
ret = wcn36xx_dxe_init(wcn);
if (ret) {
@ -232,11 +297,6 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
wcn36xx_debugfs_init(wcn);
if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
ret = wcn36xx_smd_feature_caps_exchange(wcn);
if (ret)
wcn36xx_warn("Exchange feature caps failed\n");
}
INIT_LIST_HEAD(&wcn->vif_list);
return 0;
@ -648,6 +708,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->enable_beacon);
if (bss_conf->enable_beacon) {
vif_priv->dtim_period = bss_conf->dtim_period;
vif_priv->bss_index = 0xff;
wcn36xx_smd_config_bss(wcn, vif, NULL,
vif->addr, false);
@ -992,6 +1053,7 @@ static int wcn36xx_remove(struct platform_device *pdev)
struct wcn36xx *wcn = hw->priv;
wcn36xx_dbg(WCN36XX_DBG_MAC, "platform remove\n");
release_firmware(wcn->nv);
mutex_destroy(&wcn->hal_mutex);
ieee80211_unregister_hw(hw);

View file

@ -195,9 +195,11 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
{
int ret = 0;
unsigned long start;
wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "HAL >>> ", wcn->hal_buf, len);
init_completion(&wcn->hal_rsp_compl);
start = jiffies;
ret = wcn->ctrl_ops->tx(wcn->hal_buf, len);
if (ret) {
wcn36xx_err("HAL TX failed\n");
@ -205,10 +207,13 @@ static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
}
if (wait_for_completion_timeout(&wcn->hal_rsp_compl,
msecs_to_jiffies(HAL_MSG_TIMEOUT)) <= 0) {
wcn36xx_err("Timeout while waiting SMD response\n");
wcn36xx_err("Timeout! No SMD response in %dms\n",
HAL_MSG_TIMEOUT);
ret = -ETIME;
goto out;
}
wcn36xx_dbg(WCN36XX_DBG_SMD, "SMD command completed in %dms",
jiffies_to_msecs(jiffies - start));
out:
return ret;
}
@ -246,21 +251,22 @@ static int wcn36xx_smd_rsp_status_check(void *buf, size_t len)
int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
{
const struct firmware *nv;
struct nv_data *nv_d;
struct wcn36xx_hal_nv_img_download_req_msg msg_body;
int fw_bytes_left;
int ret;
u16 fm_offset = 0;
ret = request_firmware(&nv, WLAN_NV_FILE, wcn->dev);
if (ret) {
wcn36xx_err("Failed to load nv file %s: %d\n",
WLAN_NV_FILE, ret);
goto out_free_nv;
if (!wcn->nv) {
ret = request_firmware(&wcn->nv, WLAN_NV_FILE, wcn->dev);
if (ret) {
wcn36xx_err("Failed to load nv file %s: %d\n",
WLAN_NV_FILE, ret);
goto out;
}
}
nv_d = (struct nv_data *)nv->data;
nv_d = (struct nv_data *)wcn->nv->data;
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DOWNLOAD_NV_REQ);
msg_body.header.len += WCN36XX_NV_FRAGMENT_SIZE;
@ -270,7 +276,7 @@ int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
mutex_lock(&wcn->hal_mutex);
do {
fw_bytes_left = nv->size - fm_offset - 4;
fw_bytes_left = wcn->nv->size - fm_offset - 4;
if (fw_bytes_left > WCN36XX_NV_FRAGMENT_SIZE) {
msg_body.last_fragment = 0;
msg_body.nv_img_buffer_size = WCN36XX_NV_FRAGMENT_SIZE;
@ -308,10 +314,7 @@ int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
out_unlock:
mutex_unlock(&wcn->hal_mutex);
out_free_nv:
release_firmware(nv);
return ret;
out: return ret;
}
static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len)
@ -899,11 +902,12 @@ static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
sta_priv->sta_index = params->sta_index;
sta_priv->dpu_desc_index = params->dpu_index;
sta_priv->ucast_dpu_sign = params->uc_ucast_sig;
wcn36xx_dbg(WCN36XX_DBG_HAL,
"hal config sta rsp status %d sta_index %d bssid_index %d p2p %d\n",
"hal config sta rsp status %d sta_index %d bssid_index %d uc_ucast_sig %d p2p %d\n",
params->status, params->sta_index, params->bssid_index,
params->p2p);
params->uc_ucast_sig, params->p2p);
return 0;
}
@ -1118,7 +1122,7 @@ static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
priv_vif->sta->bss_dpu_desc_index = params->dpu_desc_index;
}
priv_vif->ucast_dpu_signature = params->ucast_dpu_signature;
priv_vif->self_ucast_dpu_sign = params->ucast_dpu_signature;
return 0;
}
@ -1637,12 +1641,12 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_exit_bmps failed\n");
wcn36xx_err("Sending hal_keep_alive failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_exit_bmps response failed err=%d\n", ret);
wcn36xx_err("hal_keep_alive response failed err=%d\n", ret);
goto out;
}
out:
@ -1682,8 +1686,7 @@ int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
return ret;
}
static inline void set_feat_caps(u32 *bitmap,
enum place_holder_in_cap_bitmap cap)
void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
{
int arr_idx, bit_idx;
@ -1697,8 +1700,7 @@ static inline void set_feat_caps(u32 *bitmap,
bitmap[arr_idx] |= (1 << bit_idx);
}
static inline int get_feat_caps(u32 *bitmap,
enum place_holder_in_cap_bitmap cap)
int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
{
int arr_idx, bit_idx;
int ret = 0;
@ -1714,8 +1716,7 @@ static inline int get_feat_caps(u32 *bitmap,
return ret;
}
static inline void clear_feat_caps(u32 *bitmap,
enum place_holder_in_cap_bitmap cap)
void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
{
int arr_idx, bit_idx;
@ -1731,8 +1732,8 @@ static inline void clear_feat_caps(u32 *bitmap,
int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
{
struct wcn36xx_hal_feat_caps_msg msg_body;
int ret = 0;
struct wcn36xx_hal_feat_caps_msg msg_body, *rsp;
int ret = 0, i;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
@ -1746,12 +1747,15 @@ int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
wcn36xx_err("Sending hal_feature_caps_exchange failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_feature_caps_exchange response failed err=%d\n",
ret);
if (wcn->hal_rsp_len != sizeof(*rsp)) {
wcn36xx_err("Invalid hal_feature_caps_exchange response");
goto out;
}
rsp = (struct wcn36xx_hal_feat_caps_msg *) wcn->hal_buf;
for (i = 0; i < WCN36XX_HAL_CAPS_SIZE; i++)
wcn->fw_feat_caps[i] = rsp->feat_caps[i];
out:
mutex_unlock(&wcn->hal_mutex);
return ret;

View file

@ -24,7 +24,7 @@
#define WCN36XX_HAL_BUF_SIZE 4096
#define HAL_MSG_TIMEOUT 200
#define HAL_MSG_TIMEOUT 500
#define WCN36XX_SMSM_WLAN_TX_ENABLE 0x00000400
#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY 0x00000200
/* The PNO version info be contained in the rsp msg */
@ -112,6 +112,9 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
u32 arg3, u32 arg4, u32 arg5);
int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn);
void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
struct ieee80211_sta *sta,

View file

@ -57,8 +57,7 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
RX_FLAG_MMIC_STRIPPED |
RX_FLAG_DECRYPTED;
wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x status->vendor_radiotap_len=%x\n",
status.flag, status.vendor_radiotap_len);
wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag);
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
@ -132,6 +131,7 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
struct ieee80211_vif,
drv_priv);
bd->dpu_sign = sta_priv->ucast_dpu_sign;
if (vif->type == NL80211_IFTYPE_STATION) {
bd->sta_index = sta_priv->bss_sta_index;
bd->dpu_desc_idx = sta_priv->bss_dpu_desc_index;
@ -145,10 +145,9 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
__vif_priv = get_vif_by_addr(wcn, hdr->addr2);
bd->sta_index = __vif_priv->self_sta_index;
bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index;
bd->dpu_sign = __vif_priv->self_ucast_dpu_sign;
}
bd->dpu_sign = __vif_priv->ucast_dpu_signature;
if (ieee80211_is_nullfunc(hdr->frame_control) ||
(sta_priv && !sta_priv->is_data_encrypted))
bd->dpu_ne = 1;

View file

@ -125,10 +125,10 @@ struct wcn36xx_vif {
enum wcn36xx_power_state pw_state;
u8 bss_index;
u8 ucast_dpu_signature;
/* Returned from WCN36XX_HAL_ADD_STA_SELF_RSP */
u8 self_sta_index;
u8 self_dpu_desc_index;
u8 self_ucast_dpu_sign;
};
/**
@ -159,6 +159,7 @@ struct wcn36xx_sta {
u16 tid;
u8 sta_index;
u8 dpu_desc_index;
u8 ucast_dpu_sign;
u8 bss_sta_index;
u8 bss_dpu_desc_index;
bool is_data_encrypted;
@ -171,10 +172,14 @@ struct wcn36xx {
struct device *dev;
struct list_head vif_list;
const struct firmware *nv;
u8 fw_revision;
u8 fw_version;
u8 fw_minor;
u8 fw_major;
u32 fw_feat_caps[WCN36XX_HAL_CAPS_SIZE];
u32 chip_version;
/* extra byte for the NULL termination */
u8 crm_version[WCN36XX_HAL_VERSION_LENGTH + 1];
@ -222,6 +227,9 @@ struct wcn36xx {
};
#define WCN36XX_CHIP_3660 0
#define WCN36XX_CHIP_3680 1
static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
u8 major,
u8 minor,

View file

@ -58,41 +58,6 @@ enum b43_verbosity {
#endif
};
/* Lightweight function to convert a frequency (in Mhz) to a channel number. */
static inline u8 b43_freq_to_channel_5ghz(int freq)
{
return ((freq - 5000) / 5);
}
static inline u8 b43_freq_to_channel_2ghz(int freq)
{
u8 channel;
if (freq == 2484)
channel = 14;
else
channel = (freq - 2407) / 5;
return channel;
}
/* Lightweight function to convert a channel number to a frequency (in Mhz). */
static inline int b43_channel_to_freq_5ghz(u8 channel)
{
return (5000 + (5 * channel));
}
static inline int b43_channel_to_freq_2ghz(u8 channel)
{
int freq;
if (channel == 14)
freq = 2484;
else
freq = 2407 + (5 * channel);
return freq;
}
static inline int b43_is_cck_rate(int rate)
{
return (rate == B43_CCK_RATE_1MB ||

View file

@ -806,7 +806,8 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
B43_WARN_ON(1);
/* FIXME: We don't really know which value the "chanid" contains.
* So the following assignment might be wrong. */
status.freq = b43_channel_to_freq_5ghz(chanid);
status.freq =
ieee80211_channel_to_frequency(chanid, status.band);
break;
case B43_PHYTYPE_G:
status.band = IEEE80211_BAND_2GHZ;
@ -819,13 +820,12 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
case B43_PHYTYPE_HT:
/* chanid is the SHM channel cookie. Which is the plain
* channel number in b43. */
if (chanstat & B43_RX_CHAN_5GHZ) {
if (chanstat & B43_RX_CHAN_5GHZ)
status.band = IEEE80211_BAND_5GHZ;
status.freq = b43_channel_to_freq_5ghz(chanid);
} else {
else
status.band = IEEE80211_BAND_2GHZ;
status.freq = b43_channel_to_freq_2ghz(chanid);
}
status.freq =
ieee80211_channel_to_frequency(chanid, status.band);
break;
default:
B43_WARN_ON(1);

View file

@ -24,6 +24,7 @@ ccflags-y += -D__CHECK_ENDIAN__
obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
brcmfmac-objs += \
wl_cfg80211.o \
chip.o \
fwil.o \
fweh.o \
fwsignal.o \
@ -36,8 +37,7 @@ brcmfmac-objs += \
btcoex.o
brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
dhd_sdio.o \
bcmsdh.o \
sdio_chip.o
bcmsdh.o
brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
usb.o
brcmfmac-$(CONFIG_BRCMDBG) += \

View file

@ -43,7 +43,6 @@
#include "dhd_bus.h"
#include "dhd_dbg.h"
#include "sdio_host.h"
#include "sdio_chip.h"
#define SDIOH_API_ACCESS_RETRY_LIMIT 2
@ -827,7 +826,7 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
}
if (!write)
memcpy(data, pkt->data, dsize);
skb_trim(pkt, dsize);
skb_trim(pkt, 0);
/* Adjust for next transfer (if any) */
size -= dsize;
@ -1115,11 +1114,12 @@ static struct sdio_driver brcmf_sdmmc_driver = {
.remove = brcmf_ops_sdio_remove,
.name = BRCMFMAC_SDIO_PDATA_NAME,
.id_table = brcmf_sdmmc_ids,
#ifdef CONFIG_PM_SLEEP
.drv = {
.owner = THIS_MODULE,
#ifdef CONFIG_PM_SLEEP
.pm = &brcmf_sdio_pm_ops,
},
#endif /* CONFIG_PM_SLEEP */
},
};
static int brcmf_sdio_pd_probe(struct platform_device *pdev)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,91 @@
/*
* Copyright (c) 2014 Broadcom Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef BRCMF_CHIP_H
#define BRCMF_CHIP_H
#include <linux/types.h>
#define CORE_CC_REG(base, field) \
(base + offsetof(struct chipcregs, field))
/**
* struct brcmf_chip - chip level information.
*
* @chip: chip identifier.
* @chiprev: chip revision.
* @cc_caps: chipcommon core capabilities.
* @pmucaps: PMU capabilities.
* @pmurev: PMU revision.
* @rambase: RAM base address (only applicable for ARM CR4 chips).
* @ramsize: amount of RAM on chip.
* @name: string representation of the chip identifier.
*/
struct brcmf_chip {
u32 chip;
u32 chiprev;
u32 cc_caps;
u32 pmucaps;
u32 pmurev;
u32 rambase;
u32 ramsize;
char name[8];
};
/**
* struct brcmf_core - core related information.
*
* @id: core identifier.
* @rev: core revision.
* @base: base address of core register space.
*/
struct brcmf_core {
u16 id;
u16 rev;
u32 base;
};
/**
* struct brcmf_buscore_ops - buscore specific callbacks.
*
* @read32: read 32-bit value over bus.
* @write32: write 32-bit value over bus.
* @prepare: prepare bus for core configuration.
* @setup: bus-specific core setup.
* @exit_dl: exit download state.
* The callback should use the provided @rstvec when non-zero.
*/
struct brcmf_buscore_ops {
u32 (*read32)(void *ctx, u32 addr);
void (*write32)(void *ctx, u32 addr, u32 value);
int (*prepare)(void *ctx);
int (*setup)(void *ctx, struct brcmf_chip *chip);
void (*exit_dl)(void *ctx, struct brcmf_chip *chip, u32 rstvec);
};
struct brcmf_chip *brcmf_chip_attach(void *ctx,
const struct brcmf_buscore_ops *ops);
void brcmf_chip_detach(struct brcmf_chip *chip);
struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *chip, u16 coreid);
struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *chip);
bool brcmf_chip_iscoreup(struct brcmf_core *core);
void brcmf_chip_coredisable(struct brcmf_core *core, u32 prereset, u32 reset);
void brcmf_chip_resetcore(struct brcmf_core *core, u32 prereset, u32 reset,
u32 postreset);
void brcmf_chip_enter_download(struct brcmf_chip *ci);
bool brcmf_chip_exit_download(struct brcmf_chip *ci, u32 rstvec);
bool brcmf_chip_sr_capable(struct brcmf_chip *pub);
#endif /* BRCMF_AXIDMP_H */

View file

@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/card.h>
#include <linux/semaphore.h>
@ -40,7 +41,7 @@
#include <brcm_hw_ids.h>
#include <soc.h>
#include "sdio_host.h"
#include "sdio_chip.h"
#include "chip.h"
#include "nvram.h"
#define DCMD_RESP_TIMEOUT 2000 /* In milli second */
@ -156,6 +157,33 @@ struct rte_console {
/* manfid tuple length, include tuple, link bytes */
#define SBSDIO_CIS_MANFID_TUPLE_LEN 6
#define CORE_BUS_REG(base, field) \
(base + offsetof(struct sdpcmd_regs, field))
/* SDIO function 1 register CHIPCLKCSR */
/* Force ALP request to backplane */
#define SBSDIO_FORCE_ALP 0x01
/* Force HT request to backplane */
#define SBSDIO_FORCE_HT 0x02
/* Force ILP request to backplane */
#define SBSDIO_FORCE_ILP 0x04
/* Make ALP ready (power up xtal) */
#define SBSDIO_ALP_AVAIL_REQ 0x08
/* Make HT ready (power up PLL) */
#define SBSDIO_HT_AVAIL_REQ 0x10
/* Squelch clock requests from HW */
#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20
/* Status: ALP is ready */
#define SBSDIO_ALP_AVAIL 0x40
/* Status: HT is ready */
#define SBSDIO_HT_AVAIL 0x80
#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
#define SBSDIO_CLKAV(regval, alponly) \
(SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
/* intstatus */
#define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
#define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
@ -494,6 +522,52 @@ enum brcmf_sdio_frmtype {
BRCMF_SDIO_FT_SUB,
};
#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
/* SDIO Pad drive strength to select value mappings */
struct sdiod_drive_str {
u8 strength; /* Pad Drive Strength in mA */
u8 sel; /* Chip-specific select value */
};
/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
{32, 0x6},
{26, 0x7},
{22, 0x4},
{16, 0x5},
{12, 0x2},
{8, 0x3},
{4, 0x0},
{0, 0x1}
};
/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
{6, 0x7},
{5, 0x6},
{4, 0x5},
{3, 0x4},
{2, 0x2},
{1, 0x1},
{0, 0x0}
};
/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
{3, 0x3},
{2, 0x2},
{1, 0x1},
{0, 0x0} };
/* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
{16, 0x7},
{12, 0x5},
{8, 0x3},
{4, 0x1}
};
#define BCM43143_FIRMWARE_NAME "brcm/brcmfmac43143-sdio.bin"
#define BCM43143_NVRAM_NAME "brcm/brcmfmac43143-sdio.txt"
#define BCM43241B0_FIRMWARE_NAME "brcm/brcmfmac43241b0-sdio.bin"
@ -619,27 +693,24 @@ static bool data_ok(struct brcmf_sdio *bus)
* Reads a register in the SDIO hardware block. This block occupies a series of
* adresses on the 32 bit backplane bus.
*/
static int
r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
static int r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
{
u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
struct brcmf_core *core;
int ret;
*regvar = brcmf_sdiod_regrl(bus->sdiodev,
bus->ci->c_inf[idx].base + offset, &ret);
core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
*regvar = brcmf_sdiod_regrl(bus->sdiodev, core->base + offset, &ret);
return ret;
}
static int
w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
static int w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
{
u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
struct brcmf_core *core;
int ret;
brcmf_sdiod_regwl(bus->sdiodev,
bus->ci->c_inf[idx].base + reg_offset,
regval, &ret);
core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
brcmf_sdiod_regwl(bus->sdiodev, core->base + reg_offset, regval, &ret);
return ret;
}
@ -900,8 +971,8 @@ static int
brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
{
int err = 0;
brcmf_dbg(TRACE, "Enter\n");
brcmf_dbg(SDIO, "request %s currently %s\n",
brcmf_dbg(SDIO, "Enter: request %s currently %s\n",
(sleep ? "SLEEP" : "WAKE"),
(bus->sleeping ? "SLEEP" : "WAKE"));
@ -953,6 +1024,86 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
}
#ifdef DEBUG
static inline bool brcmf_sdio_valid_shared_address(u32 addr)
{
return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
}
static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
struct sdpcm_shared *sh)
{
u32 addr;
int rv;
u32 shaddr = 0;
struct sdpcm_shared_le sh_le;
__le32 addr_le;
shaddr = bus->ci->rambase + bus->ramsize - 4;
/*
* Read last word in socram to determine
* address of sdpcm_shared structure
*/
sdio_claim_host(bus->sdiodev->func[1]);
brcmf_sdio_bus_sleep(bus, false, false);
rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
sdio_release_host(bus->sdiodev->func[1]);
if (rv < 0)
return rv;
addr = le32_to_cpu(addr_le);
brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
/*
* Check if addr is valid.
* NVRAM length at the end of memory should have been overwritten.
*/
if (!brcmf_sdio_valid_shared_address(addr)) {
brcmf_err("invalid sdpcm_shared address 0x%08X\n",
addr);
return -EINVAL;
}
/* Read hndrte_shared structure */
rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
sizeof(struct sdpcm_shared_le));
if (rv < 0)
return rv;
/* Endianness */
sh->flags = le32_to_cpu(sh_le.flags);
sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
sh->assert_line = le32_to_cpu(sh_le.assert_line);
sh->console_addr = le32_to_cpu(sh_le.console_addr);
sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
SDPCM_SHARED_VERSION,
sh->flags & SDPCM_SHARED_VERSION_MASK);
return -EPROTO;
}
return 0;
}
static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
{
struct sdpcm_shared sh;
if (brcmf_sdio_readshared(bus, &sh) == 0)
bus->console_addr = sh.console_addr;
}
#else
static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
{
}
#endif /* DEBUG */
static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
{
u32 intstatus = 0;
@ -996,6 +1147,12 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
else
brcmf_dbg(SDIO, "Dongle ready, protocol version %d\n",
bus->sdpcm_ver);
/*
* Retrieve console state address now that firmware should have
* updated it.
*/
brcmf_sdio_get_console_addr(bus);
}
/*
@ -2293,14 +2450,13 @@ static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
{
u8 idx;
struct brcmf_core *buscore;
u32 addr;
unsigned long val;
int n, ret;
idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
addr = bus->ci->c_inf[idx].base +
offsetof(struct sdpcmd_regs, intstatus);
buscore = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
addr = buscore->base + offsetof(struct sdpcmd_regs, intstatus);
val = brcmf_sdiod_regrl(bus->sdiodev, addr, &ret);
bus->sdcnt.f1regdata++;
@ -2810,72 +2966,6 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
}
#ifdef DEBUG
static inline bool brcmf_sdio_valid_shared_address(u32 addr)
{
return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
}
static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
struct sdpcm_shared *sh)
{
u32 addr;
int rv;
u32 shaddr = 0;
struct sdpcm_shared_le sh_le;
__le32 addr_le;
shaddr = bus->ci->rambase + bus->ramsize - 4;
/*
* Read last word in socram to determine
* address of sdpcm_shared structure
*/
sdio_claim_host(bus->sdiodev->func[1]);
brcmf_sdio_bus_sleep(bus, false, false);
rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
sdio_release_host(bus->sdiodev->func[1]);
if (rv < 0)
return rv;
addr = le32_to_cpu(addr_le);
brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
/*
* Check if addr is valid.
* NVRAM length at the end of memory should have been overwritten.
*/
if (!brcmf_sdio_valid_shared_address(addr)) {
brcmf_err("invalid sdpcm_shared address 0x%08X\n",
addr);
return -EINVAL;
}
/* Read hndrte_shared structure */
rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
sizeof(struct sdpcm_shared_le));
if (rv < 0)
return rv;
/* Endianness */
sh->flags = le32_to_cpu(sh_le.flags);
sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
sh->assert_line = le32_to_cpu(sh_le.assert_line);
sh->console_addr = le32_to_cpu(sh_le.console_addr);
sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
SDPCM_SHARED_VERSION,
sh->flags & SDPCM_SHARED_VERSION_MASK);
return -EPROTO;
}
return 0;
}
static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
struct sdpcm_shared *sh, char __user *data,
size_t count)
@ -3105,6 +3195,8 @@ static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
debugfs_create_file("forensics", S_IRUGO, dentry, bus,
&brcmf_sdio_forensic_ops);
brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
debugfs_create_u32("console_interval", 0644, dentry,
&bus->console_interval);
}
#else
static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
@ -3223,32 +3315,17 @@ static int brcmf_sdio_download_code_file(struct brcmf_sdio *bus,
const struct firmware *fw)
{
int err;
int offset;
int address;
int len;
brcmf_dbg(TRACE, "Enter\n");
err = 0;
offset = 0;
address = bus->ci->rambase;
while (offset < fw->size) {
len = ((offset + MEMBLOCK) < fw->size) ? MEMBLOCK :
fw->size - offset;
err = brcmf_sdiod_ramrw(bus->sdiodev, true, address,
(u8 *)&fw->data[offset], len);
if (err) {
brcmf_err("error %d on writing %d membytes at 0x%08x\n",
err, len, address);
return err;
}
offset += len;
address += len;
}
if (!err)
if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
(u8 *)fw->data, fw->size))
err = -EIO;
err = brcmf_sdiod_ramrw(bus->sdiodev, true, bus->ci->rambase,
(u8 *)fw->data, fw->size);
if (err)
brcmf_err("error %d on writing %d membytes at 0x%08x\n",
err, (int)fw->size, bus->ci->rambase);
else if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
(u8 *)fw->data, fw->size))
err = -EIO;
return err;
}
@ -3291,7 +3368,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
/* Keep arm in reset */
brcmf_sdio_chip_enter_download(bus->sdiodev, bus->ci);
brcmf_chip_enter_download(bus->ci);
fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_BIN);
if (fw == NULL) {
@ -3323,7 +3400,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
}
/* Take arm out of reset */
if (!brcmf_sdio_chip_exit_download(bus->sdiodev, bus->ci, rstvec)) {
if (!brcmf_chip_exit_download(bus->ci, rstvec)) {
brcmf_err("error getting out of ARM core reset\n");
goto err;
}
@ -3338,40 +3415,6 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
return bcmerror;
}
static bool brcmf_sdio_sr_capable(struct brcmf_sdio *bus)
{
u32 addr, reg, pmu_cc3_mask = ~0;
int err;
brcmf_dbg(TRACE, "Enter\n");
/* old chips with PMU version less than 17 don't support save restore */
if (bus->ci->pmurev < 17)
return false;
switch (bus->ci->chip) {
case BCM43241_CHIP_ID:
case BCM4335_CHIP_ID:
case BCM4339_CHIP_ID:
/* read PMU chipcontrol register 3 */
addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_addr);
brcmf_sdiod_regwl(bus->sdiodev, addr, 3, NULL);
addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_data);
reg = brcmf_sdiod_regrl(bus->sdiodev, addr, NULL);
return (reg & pmu_cc3_mask) != 0;
default:
addr = CORE_CC_REG(bus->ci->c_inf[0].base, pmucapabilities_ext);
reg = brcmf_sdiod_regrl(bus->sdiodev, addr, &err);
if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0)
return false;
addr = CORE_CC_REG(bus->ci->c_inf[0].base, retention_ctl);
reg = brcmf_sdiod_regrl(bus->sdiodev, addr, NULL);
return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
}
}
static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
{
int err = 0;
@ -3423,7 +3466,7 @@ static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
brcmf_dbg(TRACE, "Enter\n");
/* KSO bit added in SDIO core rev 12 */
if (bus->ci->c_inf[1].rev < 12)
if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12)
return 0;
val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
@ -3454,15 +3497,13 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
struct brcmf_sdio *bus = sdiodev->bus;
uint pad_size;
u32 value;
u8 idx;
int err;
/* the commands below use the terms tx and rx from
* a device perspective, ie. bus:txglom affects the
* bus transfers from device to host.
*/
idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
if (bus->ci->c_inf[idx].rev < 12) {
if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12) {
/* for sdio core rev < 12, disable txgloming */
value = 0;
err = brcmf_iovar_data_set(dev, "bus:txglom", &value,
@ -3573,7 +3614,7 @@ static int brcmf_sdio_bus_init(struct device *dev)
ret = -ENODEV;
}
if (brcmf_sdio_sr_capable(bus)) {
if (brcmf_chip_sr_capable(bus->ci)) {
brcmf_sdio_sr_init(bus);
} else {
/* Restore previous clock setting */
@ -3722,6 +3763,170 @@ static void brcmf_sdio_dataworker(struct work_struct *work)
}
}
static void
brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
struct brcmf_chip *ci, u32 drivestrength)
{
const struct sdiod_drive_str *str_tab = NULL;
u32 str_mask;
u32 str_shift;
u32 base;
u32 i;
u32 drivestrength_sel = 0;
u32 cc_data_temp;
u32 addr;
if (!(ci->cc_caps & CC_CAP_PMU))
return;
switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
str_tab = sdiod_drvstr_tab1_1v8;
str_mask = 0x00003800;
str_shift = 11;
break;
case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
str_tab = sdiod_drvstr_tab6_1v8;
str_mask = 0x00001800;
str_shift = 11;
break;
case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
/* note: 43143 does not support tristate */
i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) {
str_tab = sdiod_drvstr_tab2_3v3;
str_mask = 0x00000007;
str_shift = 0;
} else
brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
ci->name, drivestrength);
break;
case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
str_tab = sdiod_drive_strength_tab5_1v8;
str_mask = 0x00003800;
str_shift = 11;
break;
default:
brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
ci->name, ci->chiprev, ci->pmurev);
break;
}
if (str_tab != NULL) {
for (i = 0; str_tab[i].strength != 0; i++) {
if (drivestrength >= str_tab[i].strength) {
drivestrength_sel = str_tab[i].sel;
break;
}
}
base = brcmf_chip_get_chipcommon(ci)->base;
addr = CORE_CC_REG(base, chipcontrol_addr);
brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
cc_data_temp &= ~str_mask;
drivestrength_sel <<= str_shift;
cc_data_temp |= drivestrength_sel;
brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
str_tab[i].strength, drivestrength, cc_data_temp);
}
}
static int brcmf_sdio_buscoreprep(void *ctx)
{
struct brcmf_sdio_dev *sdiodev = ctx;
int err = 0;
u8 clkval, clkset;
/* Try forcing SDIO core to do ALPAvail request only */
clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
if (err) {
brcmf_err("error writing for HT off\n");
return err;
}
/* If register supported, wait for ALPAvail and then force ALP */
/* This may take up to 15 milliseconds */
clkval = brcmf_sdiod_regrb(sdiodev,
SBSDIO_FUNC1_CHIPCLKCSR, NULL);
if ((clkval & ~SBSDIO_AVBITS) != clkset) {
brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
clkset, clkval);
return -EACCES;
}
SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
!SBSDIO_ALPAV(clkval)),
PMU_MAX_TRANSITION_DLY);
if (!SBSDIO_ALPAV(clkval)) {
brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
clkval);
return -EBUSY;
}
clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
udelay(65);
/* Also, disable the extra SDIO pull-ups */
brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
return 0;
}
static void brcmf_sdio_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
u32 rstvec)
{
struct brcmf_sdio_dev *sdiodev = ctx;
struct brcmf_core *core;
u32 reg_addr;
/* clear all interrupts */
core = brcmf_chip_get_core(chip, BCMA_CORE_SDIO_DEV);
reg_addr = core->base + offsetof(struct sdpcmd_regs, intstatus);
brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
if (rstvec)
/* Write reset vector to address 0 */
brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
sizeof(rstvec));
}
static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
{
struct brcmf_sdio_dev *sdiodev = ctx;
u32 val, rev;
val = brcmf_sdiod_regrl(sdiodev, addr, NULL);
if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
if (rev >= 2) {
val &= ~CID_ID_MASK;
val |= BCM4339_CHIP_ID;
}
}
return val;
}
static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val)
{
struct brcmf_sdio_dev *sdiodev = ctx;
brcmf_sdiod_regwl(sdiodev, addr, val, NULL);
}
static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = {
.prepare = brcmf_sdio_buscoreprep,
.exit_dl = brcmf_sdio_buscore_exitdl,
.read32 = brcmf_sdio_buscore_read32,
.write32 = brcmf_sdio_buscore_write32,
};
static bool
brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
{
@ -3737,7 +3942,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
brcmf_sdiod_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
/*
* Force PLL off until brcmf_sdio_chip_attach()
* Force PLL off until brcmf_chip_attach()
* programs PLL control regs
*/
@ -3758,8 +3963,10 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
*/
brcmf_bus_change_state(bus->sdiodev->bus_if, BRCMF_BUS_DOWN);
if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci)) {
brcmf_err("brcmf_sdio_chip_attach failed!\n");
bus->ci = brcmf_chip_attach(bus->sdiodev, &brcmf_sdio_buscore_ops);
if (IS_ERR(bus->ci)) {
brcmf_err("brcmf_chip_attach failed!\n");
bus->ci = NULL;
goto fail;
}
@ -3772,7 +3979,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
drivestrength = bus->sdiodev->pdata->drive_strength;
else
drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH;
brcmf_sdio_chip_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
brcmf_sdio_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
/* Get info on the SOCRAM cores... */
bus->ramsize = bus->ci->ramsize;
@ -3795,24 +4002,18 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
goto fail;
/* set PMUControl so a backplane reset does PMU state reload */
reg_addr = CORE_CC_REG(bus->ci->c_inf[0].base,
reg_addr = CORE_CC_REG(brcmf_chip_get_chipcommon(bus->ci)->base,
pmucontrol);
reg_val = brcmf_sdiod_regrl(bus->sdiodev,
reg_addr,
&err);
reg_val = brcmf_sdiod_regrl(bus->sdiodev, reg_addr, &err);
if (err)
goto fail;
reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
brcmf_sdiod_regwl(bus->sdiodev,
reg_addr,
reg_val,
&err);
brcmf_sdiod_regwl(bus->sdiodev, reg_addr, reg_val, &err);
if (err)
goto fail;
sdio_release_host(bus->sdiodev->func[1]);
brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
@ -4027,14 +4228,14 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
/* De-register interrupt handler */
brcmf_sdiod_intr_unregister(bus->sdiodev);
cancel_work_sync(&bus->datawork);
if (bus->brcmf_wq)
destroy_workqueue(bus->brcmf_wq);
if (bus->sdiodev->bus_if->drvr) {
brcmf_detach(bus->sdiodev->dev);
}
cancel_work_sync(&bus->datawork);
if (bus->brcmf_wq)
destroy_workqueue(bus->brcmf_wq);
if (bus->ci) {
if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
sdio_claim_host(bus->sdiodev->func[1]);
@ -4045,12 +4246,11 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
* all necessary cores.
*/
msleep(20);
brcmf_sdio_chip_enter_download(bus->sdiodev,
bus->ci);
brcmf_chip_enter_download(bus->ci);
brcmf_sdio_clkctl(bus, CLK_NONE, false);
sdio_release_host(bus->sdiodev->func[1]);
}
brcmf_sdio_chip_detach(&bus->ci);
brcmf_chip_detach(bus->ci);
}
brcmu_pkt_buf_free_skb(bus->txglom_sgpad);

View file

@ -124,7 +124,8 @@ brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
}
static u32
brcmf_create_iovar(char *name, char *data, u32 datalen, char *buf, u32 buflen)
brcmf_create_iovar(char *name, const char *data, u32 datalen,
char *buf, u32 buflen)
{
u32 len;
@ -144,7 +145,7 @@ brcmf_create_iovar(char *name, char *data, u32 datalen, char *buf, u32 buflen)
s32
brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;

View file

@ -83,7 +83,7 @@ s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data);
s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
u32 len);
s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
u32 len);

View file

@ -1,972 +0,0 @@
/*
* Copyright (c) 2011 Broadcom Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* ***** SDIO interface chip backplane handle functions ***** */
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/mmc/card.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/ssb/ssb_regs.h>
#include <linux/bcma/bcma.h>
#include <chipcommon.h>
#include <brcm_hw_ids.h>
#include <brcmu_wifi.h>
#include <brcmu_utils.h>
#include <soc.h>
#include "dhd_dbg.h"
#include "sdio_host.h"
#include "sdio_chip.h"
/* chip core base & ramsize */
/* bcm4329 */
/* SDIO device core, ID 0x829 */
#define BCM4329_CORE_BUS_BASE 0x18011000
/* internal memory core, ID 0x80e */
#define BCM4329_CORE_SOCRAM_BASE 0x18003000
/* ARM Cortex M3 core, ID 0x82a */
#define BCM4329_CORE_ARM_BASE 0x18002000
#define BCM4329_RAMSIZE 0x48000
/* bcm43143 */
/* SDIO device core */
#define BCM43143_CORE_BUS_BASE 0x18002000
/* internal memory core */
#define BCM43143_CORE_SOCRAM_BASE 0x18004000
/* ARM Cortex M3 core, ID 0x82a */
#define BCM43143_CORE_ARM_BASE 0x18003000
#define BCM43143_RAMSIZE 0x70000
/* All D11 cores, ID 0x812 */
#define BCM43xx_CORE_D11_BASE 0x18001000
#define SBCOREREV(sbidh) \
((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
((sbidh) & SSB_IDHIGH_RCLO))
/* SOC Interconnect types (aka chip types) */
#define SOCI_SB 0
#define SOCI_AI 1
/* EROM CompIdentB */
#define CIB_REV_MASK 0xff000000
#define CIB_REV_SHIFT 24
/* ARM CR4 core specific control flag bits */
#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
/* D11 core specific control flag bits */
#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
#define D11_BCMA_IOCTL_PHYRESET 0x0008
#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
/* SDIO Pad drive strength to select value mappings */
struct sdiod_drive_str {
u8 strength; /* Pad Drive Strength in mA */
u8 sel; /* Chip-specific select value */
};
/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
{32, 0x6},
{26, 0x7},
{22, 0x4},
{16, 0x5},
{12, 0x2},
{8, 0x3},
{4, 0x0},
{0, 0x1}
};
/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
{6, 0x7},
{5, 0x6},
{4, 0x5},
{3, 0x4},
{2, 0x2},
{1, 0x1},
{0, 0x0}
};
/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
{3, 0x3},
{2, 0x2},
{1, 0x1},
{0, 0x0} };
/* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
{16, 0x7},
{12, 0x5},
{8, 0x3},
{4, 0x1}
};
u8
brcmf_sdio_chip_getinfidx(struct brcmf_chip *ci, u16 coreid)
{
u8 idx;
for (idx = 0; idx < BRCMF_MAX_CORENUM; idx++)
if (coreid == ci->c_inf[idx].id)
return idx;
return BRCMF_MAX_CORENUM;
}
static u32
brcmf_sdio_sb_corerev(struct brcmf_sdio_dev *sdiodev,
struct brcmf_chip *ci, u16 coreid)
{
u32 regdata;
u8 idx;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
regdata = brcmf_sdiod_regrl(sdiodev,
CORE_SB(ci->c_inf[idx].base, sbidhigh),
NULL);
return SBCOREREV(regdata);
}
static u32
brcmf_sdio_ai_corerev(struct brcmf_sdio_dev *sdiodev,
struct brcmf_chip *ci, u16 coreid)
{
u8 idx;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
return (ci->c_inf[idx].cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
}
static bool
brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
struct brcmf_chip *ci, u16 coreid)
{
u32 regdata;
u8 idx;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
if (idx == BRCMF_MAX_CORENUM)
return false;
regdata = brcmf_sdiod_regrl(sdiodev,
CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
NULL);
regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
return (SSB_TMSLOW_CLOCK == regdata);
}
static bool
brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
struct brcmf_chip *ci, u16 coreid)
{
u32 regdata;
u8 idx;
bool ret;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
if (idx == BRCMF_MAX_CORENUM)
return false;
regdata = brcmf_sdiod_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
NULL);
ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
regdata = brcmf_sdiod_regrl(sdiodev,
ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
NULL);
ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
return ret;
}
static void
brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
u32 in_resetbits)
{
u32 regdata, base;
u8 idx;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
base = ci->c_inf[idx].base;
regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
if (regdata & SSB_TMSLOW_RESET)
return;
regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
if ((regdata & SSB_TMSLOW_CLOCK) != 0) {
/*
* set target reject and spin until busy is clear
* (preserve core-specific bits)
*/
regdata = brcmf_sdiod_regrl(sdiodev,
CORE_SB(base, sbtmstatelow), NULL);
brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
regdata | SSB_TMSLOW_REJECT, NULL);
regdata = brcmf_sdiod_regrl(sdiodev,
CORE_SB(base, sbtmstatelow), NULL);
udelay(1);
SPINWAIT((brcmf_sdiod_regrl(sdiodev,
CORE_SB(base, sbtmstatehigh),
NULL) &
SSB_TMSHIGH_BUSY), 100000);
regdata = brcmf_sdiod_regrl(sdiodev,
CORE_SB(base, sbtmstatehigh),
NULL);
if (regdata & SSB_TMSHIGH_BUSY)
brcmf_err("core state still busy\n");
regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbidlow),
NULL);
if (regdata & SSB_IDLOW_INITIATOR) {
regdata = brcmf_sdiod_regrl(sdiodev,
CORE_SB(base, sbimstate),
NULL);
regdata |= SSB_IMSTATE_REJECT;
brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbimstate),
regdata, NULL);
regdata = brcmf_sdiod_regrl(sdiodev,
CORE_SB(base, sbimstate),
NULL);
udelay(1);
SPINWAIT((brcmf_sdiod_regrl(sdiodev,
CORE_SB(base, sbimstate),
NULL) &
SSB_IMSTATE_BUSY), 100000);
}
/* set reset and reject while enabling the clocks */
regdata = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET;
brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
regdata, NULL);
regdata = brcmf_sdiod_regrl(sdiodev,
CORE_SB(base, sbtmstatelow), NULL);
udelay(10);
/* clear the initiator reject bit */
regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbidlow),
NULL);
if (regdata & SSB_IDLOW_INITIATOR) {
regdata = brcmf_sdiod_regrl(sdiodev,
CORE_SB(base, sbimstate),
NULL);
regdata &= ~SSB_IMSTATE_REJECT;
brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbimstate),
regdata, NULL);
}
}
/* leave reset and reject asserted */
brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
(SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET), NULL);
udelay(1);
}
static void
brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
u32 in_resetbits)
{
u8 idx;
u32 regdata;
u32 wrapbase;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
if (idx == BRCMF_MAX_CORENUM)
return;
wrapbase = ci->c_inf[idx].wrapbase;
/* if core is already in reset, just return */
regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL);
if ((regdata & BCMA_RESET_CTL_RESET) != 0)
return;
/* configure reset */
brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
/* put in reset */
brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_RESET_CTL,
BCMA_RESET_CTL_RESET, NULL);
usleep_range(10, 20);
/* wait till reset is 1 */
SPINWAIT(brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL) !=
BCMA_RESET_CTL_RESET, 300);
/* post reset configure */
brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
}
static void
brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
u32 in_resetbits, u32 post_resetbits)
{
u32 regdata;
u8 idx;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
if (idx == BRCMF_MAX_CORENUM)
return;
/*
* Must do the disable sequence first to work for
* arbitrary current core state.
*/
brcmf_sdio_sb_coredisable(sdiodev, ci, coreid, pre_resetbits,
in_resetbits);
/*
* Now do the initialization sequence.
* set reset while enabling the clock and
* forcing them on throughout the core
*/
brcmf_sdiod_regwl(sdiodev,
CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET,
NULL);
regdata = brcmf_sdiod_regrl(sdiodev,
CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
NULL);
udelay(1);
/* clear any serror */
regdata = brcmf_sdiod_regrl(sdiodev,
CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
NULL);
if (regdata & SSB_TMSHIGH_SERR)
brcmf_sdiod_regwl(sdiodev,
CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
0, NULL);
regdata = brcmf_sdiod_regrl(sdiodev,
CORE_SB(ci->c_inf[idx].base, sbimstate),
NULL);
if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO))
brcmf_sdiod_regwl(sdiodev,
CORE_SB(ci->c_inf[idx].base, sbimstate),
regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO),
NULL);
/* clear reset and allow it to propagate throughout the core */
brcmf_sdiod_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK, NULL);
regdata = brcmf_sdiod_regrl(sdiodev,
CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
NULL);
udelay(1);
/* leave clock enabled */
brcmf_sdiod_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
SSB_TMSLOW_CLOCK, NULL);
regdata = brcmf_sdiod_regrl(sdiodev,
CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
NULL);
udelay(1);
}
static void
brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
u32 in_resetbits, u32 post_resetbits)
{
u8 idx;
u32 regdata;
u32 wrapbase;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
if (idx == BRCMF_MAX_CORENUM)
return;
wrapbase = ci->c_inf[idx].wrapbase;
/* must disable first to work for arbitrary current core state */
brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, pre_resetbits,
in_resetbits);
while (brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL) &
BCMA_RESET_CTL_RESET) {
brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_RESET_CTL, 0, NULL);
usleep_range(40, 60);
}
brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, post_resetbits |
BCMA_IOCTL_CLK, NULL);
regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
}
#ifdef DEBUG
/* safety check for chipinfo */
static int brcmf_sdio_chip_cichk(struct brcmf_chip *ci)
{
u8 core_idx;
/* check RAM core presence for ARM CM3 core */
core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
if (BRCMF_MAX_CORENUM != core_idx) {
core_idx = brcmf_sdio_chip_getinfidx(ci,
BCMA_CORE_INTERNAL_MEM);
if (BRCMF_MAX_CORENUM == core_idx) {
brcmf_err("RAM core not provided with ARM CM3 core\n");
return -ENODEV;
}
}
/* check RAM base for ARM CR4 core */
core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CR4);
if (BRCMF_MAX_CORENUM != core_idx) {
if (ci->rambase == 0) {
brcmf_err("RAM base not provided with ARM CR4 core\n");
return -ENOMEM;
}
}
return 0;
}
#else /* DEBUG */
static inline int brcmf_sdio_chip_cichk(struct brcmf_chip *ci)
{
return 0;
}
#endif
static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
struct brcmf_chip *ci)
{
u32 regdata;
u32 socitype;
/* Get CC core rev
* Chipid is assume to be at offset 0 from SI_ENUM_BASE
* For different chiptypes or old sdio hosts w/o chipcommon,
* other ways of recognition should be added here.
*/
regdata = brcmf_sdiod_regrl(sdiodev,
CORE_CC_REG(SI_ENUM_BASE, chipid),
NULL);
ci->chip = regdata & CID_ID_MASK;
ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
ci->chiprev >= 2)
ci->chip = BCM4339_CHIP_ID;
socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
brcmf_dbg(INFO, "found %s chip: id=0x%x, rev=%d\n",
socitype == SOCI_SB ? "SB" : "AXI", ci->chip, ci->chiprev);
if (socitype == SOCI_SB) {
if (ci->chip != BCM4329_CHIP_ID) {
brcmf_err("SB chip is not supported\n");
return -ENODEV;
}
ci->iscoreup = brcmf_sdio_sb_iscoreup;
ci->corerev = brcmf_sdio_sb_corerev;
ci->coredisable = brcmf_sdio_sb_coredisable;
ci->resetcore = brcmf_sdio_sb_resetcore;
ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
ci->c_inf[0].base = SI_ENUM_BASE;
ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
ci->c_inf[1].base = BCM4329_CORE_BUS_BASE;
ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
ci->c_inf[2].base = BCM4329_CORE_SOCRAM_BASE;
ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
ci->c_inf[3].base = BCM4329_CORE_ARM_BASE;
ci->c_inf[4].id = BCMA_CORE_80211;
ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
ci->ramsize = BCM4329_RAMSIZE;
} else if (socitype == SOCI_AI) {
ci->iscoreup = brcmf_sdio_ai_iscoreup;
ci->corerev = brcmf_sdio_ai_corerev;
ci->coredisable = brcmf_sdio_ai_coredisable;
ci->resetcore = brcmf_sdio_ai_resetcore;
ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
ci->c_inf[0].base = SI_ENUM_BASE;
/* Address of cores for new chips should be added here */
switch (ci->chip) {
case BCM43143_CHIP_ID:
ci->c_inf[0].wrapbase = ci->c_inf[0].base + 0x00100000;
ci->c_inf[0].cib = 0x2b000000;
ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
ci->c_inf[1].base = BCM43143_CORE_BUS_BASE;
ci->c_inf[1].wrapbase = ci->c_inf[1].base + 0x00100000;
ci->c_inf[1].cib = 0x18000000;
ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
ci->c_inf[2].base = BCM43143_CORE_SOCRAM_BASE;
ci->c_inf[2].wrapbase = ci->c_inf[2].base + 0x00100000;
ci->c_inf[2].cib = 0x14000000;
ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
ci->c_inf[3].base = BCM43143_CORE_ARM_BASE;
ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
ci->c_inf[3].cib = 0x07000000;
ci->c_inf[4].id = BCMA_CORE_80211;
ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
ci->ramsize = BCM43143_RAMSIZE;
break;
case BCM43241_CHIP_ID:
ci->c_inf[0].wrapbase = 0x18100000;
ci->c_inf[0].cib = 0x2a084411;
ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
ci->c_inf[1].base = 0x18002000;
ci->c_inf[1].wrapbase = 0x18102000;
ci->c_inf[1].cib = 0x0e004211;
ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
ci->c_inf[2].base = 0x18004000;
ci->c_inf[2].wrapbase = 0x18104000;
ci->c_inf[2].cib = 0x14080401;
ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
ci->c_inf[3].base = 0x18003000;
ci->c_inf[3].wrapbase = 0x18103000;
ci->c_inf[3].cib = 0x07004211;
ci->c_inf[4].id = BCMA_CORE_80211;
ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
ci->ramsize = 0x90000;
break;
case BCM4330_CHIP_ID:
ci->c_inf[0].wrapbase = 0x18100000;
ci->c_inf[0].cib = 0x27004211;
ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
ci->c_inf[1].base = 0x18002000;
ci->c_inf[1].wrapbase = 0x18102000;
ci->c_inf[1].cib = 0x07004211;
ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
ci->c_inf[2].base = 0x18004000;
ci->c_inf[2].wrapbase = 0x18104000;
ci->c_inf[2].cib = 0x0d080401;
ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
ci->c_inf[3].base = 0x18003000;
ci->c_inf[3].wrapbase = 0x18103000;
ci->c_inf[3].cib = 0x03004211;
ci->c_inf[4].id = BCMA_CORE_80211;
ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
ci->ramsize = 0x48000;
break;
case BCM4334_CHIP_ID:
ci->c_inf[0].wrapbase = 0x18100000;
ci->c_inf[0].cib = 0x29004211;
ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
ci->c_inf[1].base = 0x18002000;
ci->c_inf[1].wrapbase = 0x18102000;
ci->c_inf[1].cib = 0x0d004211;
ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
ci->c_inf[2].base = 0x18004000;
ci->c_inf[2].wrapbase = 0x18104000;
ci->c_inf[2].cib = 0x13080401;
ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
ci->c_inf[3].base = 0x18003000;
ci->c_inf[3].wrapbase = 0x18103000;
ci->c_inf[3].cib = 0x07004211;
ci->c_inf[4].id = BCMA_CORE_80211;
ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
ci->ramsize = 0x80000;
break;
case BCM4335_CHIP_ID:
ci->c_inf[0].wrapbase = 0x18100000;
ci->c_inf[0].cib = 0x2b084411;
ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
ci->c_inf[1].base = 0x18005000;
ci->c_inf[1].wrapbase = 0x18105000;
ci->c_inf[1].cib = 0x0f004211;
ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
ci->c_inf[2].base = 0x18002000;
ci->c_inf[2].wrapbase = 0x18102000;
ci->c_inf[2].cib = 0x01084411;
ci->c_inf[3].id = BCMA_CORE_80211;
ci->c_inf[3].base = BCM43xx_CORE_D11_BASE;
ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
ci->ramsize = 0xc0000;
ci->rambase = 0x180000;
break;
case BCM43362_CHIP_ID:
ci->c_inf[0].wrapbase = 0x18100000;
ci->c_inf[0].cib = 0x27004211;
ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
ci->c_inf[1].base = 0x18002000;
ci->c_inf[1].wrapbase = 0x18102000;
ci->c_inf[1].cib = 0x0a004211;
ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
ci->c_inf[2].base = 0x18004000;
ci->c_inf[2].wrapbase = 0x18104000;
ci->c_inf[2].cib = 0x08080401;
ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
ci->c_inf[3].base = 0x18003000;
ci->c_inf[3].wrapbase = 0x18103000;
ci->c_inf[3].cib = 0x03004211;
ci->c_inf[4].id = BCMA_CORE_80211;
ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
ci->ramsize = 0x3C000;
break;
case BCM4339_CHIP_ID:
ci->c_inf[0].wrapbase = 0x18100000;
ci->c_inf[0].cib = 0x2e084411;
ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
ci->c_inf[1].base = 0x18005000;
ci->c_inf[1].wrapbase = 0x18105000;
ci->c_inf[1].cib = 0x15004211;
ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
ci->c_inf[2].base = 0x18002000;
ci->c_inf[2].wrapbase = 0x18102000;
ci->c_inf[2].cib = 0x04084411;
ci->c_inf[3].id = BCMA_CORE_80211;
ci->c_inf[3].base = BCM43xx_CORE_D11_BASE;
ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
ci->ramsize = 0xc0000;
ci->rambase = 0x180000;
break;
default:
brcmf_err("AXI chip is not supported\n");
return -ENODEV;
}
} else {
brcmf_err("chip backplane type %u is not supported\n",
socitype);
return -ENODEV;
}
return brcmf_sdio_chip_cichk(ci);
}
static int
brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
{
int err = 0;
u8 clkval, clkset;
/* Try forcing SDIO core to do ALPAvail request only */
clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
if (err) {
brcmf_err("error writing for HT off\n");
return err;
}
/* If register supported, wait for ALPAvail and then force ALP */
/* This may take up to 15 milliseconds */
clkval = brcmf_sdiod_regrb(sdiodev,
SBSDIO_FUNC1_CHIPCLKCSR, NULL);
if ((clkval & ~SBSDIO_AVBITS) != clkset) {
brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
clkset, clkval);
return -EACCES;
}
SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
!SBSDIO_ALPAV(clkval)),
PMU_MAX_TRANSITION_DLY);
if (!SBSDIO_ALPAV(clkval)) {
brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
clkval);
return -EBUSY;
}
clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
udelay(65);
/* Also, disable the extra SDIO pull-ups */
brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
return 0;
}
static void
brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
struct brcmf_chip *ci)
{
u32 base = ci->c_inf[0].base;
/* get chipcommon rev */
ci->c_inf[0].rev = ci->corerev(sdiodev, ci, ci->c_inf[0].id);
/* get chipcommon capabilites */
ci->c_inf[0].caps = brcmf_sdiod_regrl(sdiodev,
CORE_CC_REG(base, capabilities),
NULL);
/* get pmu caps & rev */
if (ci->c_inf[0].caps & CC_CAP_PMU) {
ci->pmucaps =
brcmf_sdiod_regrl(sdiodev,
CORE_CC_REG(base, pmucapabilities),
NULL);
ci->pmurev = ci->pmucaps & PCAP_REV_MASK;
}
ci->c_inf[1].rev = ci->corerev(sdiodev, ci, ci->c_inf[1].id);
brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n",
ci->c_inf[0].rev, ci->pmurev,
ci->c_inf[1].rev, ci->c_inf[1].id);
/*
* Make sure any on-chip ARM is off (in case strapping is wrong),
* or downloaded code was already running.
*/
ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0);
}
int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
struct brcmf_chip **ci_ptr)
{
int ret;
struct brcmf_chip *ci;
brcmf_dbg(TRACE, "Enter\n");
ci = kzalloc(sizeof(*ci), GFP_ATOMIC);
if (!ci)
return -ENOMEM;
ret = brcmf_sdio_chip_buscoreprep(sdiodev);
if (ret != 0)
goto err;
ret = brcmf_sdio_chip_recognition(sdiodev, ci);
if (ret != 0)
goto err;
brcmf_sdio_chip_buscoresetup(sdiodev, ci);
brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopullup),
0, NULL);
brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopulldown),
0, NULL);
*ci_ptr = ci;
return 0;
err:
kfree(ci);
return ret;
}
void
brcmf_sdio_chip_detach(struct brcmf_chip **ci_ptr)
{
brcmf_dbg(TRACE, "Enter\n");
kfree(*ci_ptr);
*ci_ptr = NULL;
}
static char *brcmf_sdio_chip_name(uint chipid, char *buf, uint len)
{
const char *fmt;
fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
snprintf(buf, len, fmt, chipid);
return buf;
}
void
brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
struct brcmf_chip *ci, u32 drivestrength)
{
const struct sdiod_drive_str *str_tab = NULL;
u32 str_mask;
u32 str_shift;
char chn[8];
u32 base = ci->c_inf[0].base;
u32 i;
u32 drivestrength_sel = 0;
u32 cc_data_temp;
u32 addr;
if (!(ci->c_inf[0].caps & CC_CAP_PMU))
return;
switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
str_tab = sdiod_drvstr_tab1_1v8;
str_mask = 0x00003800;
str_shift = 11;
break;
case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
str_tab = sdiod_drvstr_tab6_1v8;
str_mask = 0x00001800;
str_shift = 11;
break;
case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
/* note: 43143 does not support tristate */
i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) {
str_tab = sdiod_drvstr_tab2_3v3;
str_mask = 0x00000007;
str_shift = 0;
} else
brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
brcmf_sdio_chip_name(ci->chip, chn, 8),
drivestrength);
break;
case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
str_tab = sdiod_drive_strength_tab5_1v8;
str_mask = 0x00003800;
str_shift = 11;
break;
default:
brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
brcmf_sdio_chip_name(ci->chip, chn, 8),
ci->chiprev, ci->pmurev);
break;
}
if (str_tab != NULL) {
for (i = 0; str_tab[i].strength != 0; i++) {
if (drivestrength >= str_tab[i].strength) {
drivestrength_sel = str_tab[i].sel;
break;
}
}
addr = CORE_CC_REG(base, chipcontrol_addr);
brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
cc_data_temp &= ~str_mask;
drivestrength_sel <<= str_shift;
cc_data_temp |= drivestrength_sel;
brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
str_tab[i].strength, drivestrength, cc_data_temp);
}
}
static void
brcmf_sdio_chip_cm3_enterdl(struct brcmf_sdio_dev *sdiodev,
struct brcmf_chip *ci)
{
ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0);
ci->resetcore(sdiodev, ci, BCMA_CORE_80211,
D11_BCMA_IOCTL_PHYRESET | D11_BCMA_IOCTL_PHYCLOCKEN,
D11_BCMA_IOCTL_PHYCLOCKEN, D11_BCMA_IOCTL_PHYCLOCKEN);
ci->resetcore(sdiodev, ci, BCMA_CORE_INTERNAL_MEM, 0, 0, 0);
}
static bool brcmf_sdio_chip_cm3_exitdl(struct brcmf_sdio_dev *sdiodev,
struct brcmf_chip *ci)
{
u8 core_idx;
u32 reg_addr;
if (!ci->iscoreup(sdiodev, ci, BCMA_CORE_INTERNAL_MEM)) {
brcmf_err("SOCRAM core is down after reset?\n");
return false;
}
/* clear all interrupts */
core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
reg_addr = ci->c_inf[core_idx].base;
reg_addr += offsetof(struct sdpcmd_regs, intstatus);
brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0, 0);
return true;
}
static inline void
brcmf_sdio_chip_cr4_enterdl(struct brcmf_sdio_dev *sdiodev,
struct brcmf_chip *ci)
{
u8 idx;
u32 regdata;
u32 wrapbase;
idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CR4);
if (idx == BRCMF_MAX_CORENUM)
return;
wrapbase = ci->c_inf[idx].wrapbase;
regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
regdata &= ARMCR4_BCMA_IOCTL_CPUHALT;
ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, regdata,
ARMCR4_BCMA_IOCTL_CPUHALT, ARMCR4_BCMA_IOCTL_CPUHALT);
ci->resetcore(sdiodev, ci, BCMA_CORE_80211,
D11_BCMA_IOCTL_PHYRESET | D11_BCMA_IOCTL_PHYCLOCKEN,
D11_BCMA_IOCTL_PHYCLOCKEN, D11_BCMA_IOCTL_PHYCLOCKEN);
}
static bool brcmf_sdio_chip_cr4_exitdl(struct brcmf_sdio_dev *sdiodev,
struct brcmf_chip *ci, u32 rstvec)
{
u8 core_idx;
u32 reg_addr;
/* clear all interrupts */
core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
reg_addr = ci->c_inf[core_idx].base;
reg_addr += offsetof(struct sdpcmd_regs, intstatus);
brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
/* Write reset vector to address 0 */
brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
sizeof(rstvec));
/* restore ARM */
ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, ARMCR4_BCMA_IOCTL_CPUHALT,
0, 0);
return true;
}
void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
struct brcmf_chip *ci)
{
u8 arm_core_idx;
arm_core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
if (BRCMF_MAX_CORENUM != arm_core_idx) {
brcmf_sdio_chip_cm3_enterdl(sdiodev, ci);
return;
}
brcmf_sdio_chip_cr4_enterdl(sdiodev, ci);
}
bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
struct brcmf_chip *ci, u32 rstvec)
{
u8 arm_core_idx;
arm_core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
if (BRCMF_MAX_CORENUM != arm_core_idx)
return brcmf_sdio_chip_cm3_exitdl(sdiodev, ci);
return brcmf_sdio_chip_cr4_exitdl(sdiodev, ci, rstvec);
}

View file

@ -1,231 +0,0 @@
/*
* Copyright (c) 2011 Broadcom Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _BRCMFMAC_SDIO_CHIP_H_
#define _BRCMFMAC_SDIO_CHIP_H_
/*
* Core reg address translation.
* Both macro's returns a 32 bits byte address on the backplane bus.
*/
#define CORE_CC_REG(base, field) \
(base + offsetof(struct chipcregs, field))
#define CORE_BUS_REG(base, field) \
(base + offsetof(struct sdpcmd_regs, field))
#define CORE_SB(base, field) \
(base + SBCONFIGOFF + offsetof(struct sbconfig, field))
/* SDIO function 1 register CHIPCLKCSR */
/* Force ALP request to backplane */
#define SBSDIO_FORCE_ALP 0x01
/* Force HT request to backplane */
#define SBSDIO_FORCE_HT 0x02
/* Force ILP request to backplane */
#define SBSDIO_FORCE_ILP 0x04
/* Make ALP ready (power up xtal) */
#define SBSDIO_ALP_AVAIL_REQ 0x08
/* Make HT ready (power up PLL) */
#define SBSDIO_HT_AVAIL_REQ 0x10
/* Squelch clock requests from HW */
#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20
/* Status: ALP is ready */
#define SBSDIO_ALP_AVAIL 0x40
/* Status: HT is ready */
#define SBSDIO_HT_AVAIL 0x80
#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
#define SBSDIO_CLKAV(regval, alponly) \
(SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
#define BRCMF_MAX_CORENUM 6
struct brcmf_core {
u16 id;
u16 rev;
u32 base;
u32 wrapbase;
u32 caps;
u32 cib;
};
struct brcmf_chip {
u32 chip;
u32 chiprev;
/* core info */
/* always put chipcommon core at 0, bus core at 1 */
struct brcmf_core c_inf[BRCMF_MAX_CORENUM];
u32 pmurev;
u32 pmucaps;
u32 ramsize;
u32 rambase;
u32 rst_vec; /* reset vertor for ARM CR4 core */
bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct brcmf_chip *ci,
u16 coreid);
u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct brcmf_chip *ci,
u16 coreid);
void (*coredisable)(struct brcmf_sdio_dev *sdiodev,
struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
u32 in_resetbits);
void (*resetcore)(struct brcmf_sdio_dev *sdiodev,
struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
u32 in_resetbits, u32 post_resetbits);
};
struct sbconfig {
u32 PAD[2];
u32 sbipsflag; /* initiator port ocp slave flag */
u32 PAD[3];
u32 sbtpsflag; /* target port ocp slave flag */
u32 PAD[11];
u32 sbtmerrloga; /* (sonics >= 2.3) */
u32 PAD;
u32 sbtmerrlog; /* (sonics >= 2.3) */
u32 PAD[3];
u32 sbadmatch3; /* address match3 */
u32 PAD;
u32 sbadmatch2; /* address match2 */
u32 PAD;
u32 sbadmatch1; /* address match1 */
u32 PAD[7];
u32 sbimstate; /* initiator agent state */
u32 sbintvec; /* interrupt mask */
u32 sbtmstatelow; /* target state */
u32 sbtmstatehigh; /* target state */
u32 sbbwa0; /* bandwidth allocation table0 */
u32 PAD;
u32 sbimconfiglow; /* initiator configuration */
u32 sbimconfighigh; /* initiator configuration */
u32 sbadmatch0; /* address match0 */
u32 PAD;
u32 sbtmconfiglow; /* target configuration */
u32 sbtmconfighigh; /* target configuration */
u32 sbbconfig; /* broadcast configuration */
u32 PAD;
u32 sbbstate; /* broadcast state */
u32 PAD[3];
u32 sbactcnfg; /* activate configuration */
u32 PAD[3];
u32 sbflagst; /* current sbflags */
u32 PAD[3];
u32 sbidlow; /* identification */
u32 sbidhigh; /* identification */
};
/* sdio core registers */
struct sdpcmd_regs {
u32 corecontrol; /* 0x00, rev8 */
u32 corestatus; /* rev8 */
u32 PAD[1];
u32 biststatus; /* rev8 */
/* PCMCIA access */
u16 pcmciamesportaladdr; /* 0x010, rev8 */
u16 PAD[1];
u16 pcmciamesportalmask; /* rev8 */
u16 PAD[1];
u16 pcmciawrframebc; /* rev8 */
u16 PAD[1];
u16 pcmciaunderflowtimer; /* rev8 */
u16 PAD[1];
/* interrupt */
u32 intstatus; /* 0x020, rev8 */
u32 hostintmask; /* rev8 */
u32 intmask; /* rev8 */
u32 sbintstatus; /* rev8 */
u32 sbintmask; /* rev8 */
u32 funcintmask; /* rev4 */
u32 PAD[2];
u32 tosbmailbox; /* 0x040, rev8 */
u32 tohostmailbox; /* rev8 */
u32 tosbmailboxdata; /* rev8 */
u32 tohostmailboxdata; /* rev8 */
/* synchronized access to registers in SDIO clock domain */
u32 sdioaccess; /* 0x050, rev8 */
u32 PAD[3];
/* PCMCIA frame control */
u8 pcmciaframectrl; /* 0x060, rev8 */
u8 PAD[3];
u8 pcmciawatermark; /* rev8 */
u8 PAD[155];
/* interrupt batching control */
u32 intrcvlazy; /* 0x100, rev8 */
u32 PAD[3];
/* counters */
u32 cmd52rd; /* 0x110, rev8 */
u32 cmd52wr; /* rev8 */
u32 cmd53rd; /* rev8 */
u32 cmd53wr; /* rev8 */
u32 abort; /* rev8 */
u32 datacrcerror; /* rev8 */
u32 rdoutofsync; /* rev8 */
u32 wroutofsync; /* rev8 */
u32 writebusy; /* rev8 */
u32 readwait; /* rev8 */
u32 readterm; /* rev8 */
u32 writeterm; /* rev8 */
u32 PAD[40];
u32 clockctlstatus; /* rev8 */
u32 PAD[7];
u32 PAD[128]; /* DMA engines */
/* SDIO/PCMCIA CIS region */
char cis[512]; /* 0x400-0x5ff, rev6 */
/* PCMCIA function control registers */
char pcmciafcr[256]; /* 0x600-6ff, rev6 */
u16 PAD[55];
/* PCMCIA backplane access */
u16 backplanecsr; /* 0x76E, rev6 */
u16 backplaneaddr0; /* rev6 */
u16 backplaneaddr1; /* rev6 */
u16 backplaneaddr2; /* rev6 */
u16 backplaneaddr3; /* rev6 */
u16 backplanedata0; /* rev6 */
u16 backplanedata1; /* rev6 */
u16 backplanedata2; /* rev6 */
u16 backplanedata3; /* rev6 */
u16 PAD[31];
/* sprom "size" & "blank" info */
u16 spromstatus; /* 0x7BE, rev2 */
u32 PAD[464];
u16 PAD[0x80];
};
int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
struct brcmf_chip **ci_ptr);
void brcmf_sdio_chip_detach(struct brcmf_chip **ci_ptr);
void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
struct brcmf_chip *ci,
u32 drivestrength);
u8 brcmf_sdio_chip_getinfidx(struct brcmf_chip *ci, u16 coreid);
void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
struct brcmf_chip *ci);
bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
struct brcmf_chip *ci, u32 rstvec);
#endif /* _BRCMFMAC_SDIO_CHIP_H_ */

View file

@ -182,6 +182,95 @@ struct brcmf_sdio_dev {
uint max_segment_size;
};
/* sdio core registers */
struct sdpcmd_regs {
u32 corecontrol; /* 0x00, rev8 */
u32 corestatus; /* rev8 */
u32 PAD[1];
u32 biststatus; /* rev8 */
/* PCMCIA access */
u16 pcmciamesportaladdr; /* 0x010, rev8 */
u16 PAD[1];
u16 pcmciamesportalmask; /* rev8 */
u16 PAD[1];
u16 pcmciawrframebc; /* rev8 */
u16 PAD[1];
u16 pcmciaunderflowtimer; /* rev8 */
u16 PAD[1];
/* interrupt */
u32 intstatus; /* 0x020, rev8 */
u32 hostintmask; /* rev8 */
u32 intmask; /* rev8 */
u32 sbintstatus; /* rev8 */
u32 sbintmask; /* rev8 */
u32 funcintmask; /* rev4 */
u32 PAD[2];
u32 tosbmailbox; /* 0x040, rev8 */
u32 tohostmailbox; /* rev8 */
u32 tosbmailboxdata; /* rev8 */
u32 tohostmailboxdata; /* rev8 */
/* synchronized access to registers in SDIO clock domain */
u32 sdioaccess; /* 0x050, rev8 */
u32 PAD[3];
/* PCMCIA frame control */
u8 pcmciaframectrl; /* 0x060, rev8 */
u8 PAD[3];
u8 pcmciawatermark; /* rev8 */
u8 PAD[155];
/* interrupt batching control */
u32 intrcvlazy; /* 0x100, rev8 */
u32 PAD[3];
/* counters */
u32 cmd52rd; /* 0x110, rev8 */
u32 cmd52wr; /* rev8 */
u32 cmd53rd; /* rev8 */
u32 cmd53wr; /* rev8 */
u32 abort; /* rev8 */
u32 datacrcerror; /* rev8 */
u32 rdoutofsync; /* rev8 */
u32 wroutofsync; /* rev8 */
u32 writebusy; /* rev8 */
u32 readwait; /* rev8 */
u32 readterm; /* rev8 */
u32 writeterm; /* rev8 */
u32 PAD[40];
u32 clockctlstatus; /* rev8 */
u32 PAD[7];
u32 PAD[128]; /* DMA engines */
/* SDIO/PCMCIA CIS region */
char cis[512]; /* 0x400-0x5ff, rev6 */
/* PCMCIA function control registers */
char pcmciafcr[256]; /* 0x600-6ff, rev6 */
u16 PAD[55];
/* PCMCIA backplane access */
u16 backplanecsr; /* 0x76E, rev6 */
u16 backplaneaddr0; /* rev6 */
u16 backplaneaddr1; /* rev6 */
u16 backplaneaddr2; /* rev6 */
u16 backplaneaddr3; /* rev6 */
u16 backplanedata0; /* rev6 */
u16 backplanedata1; /* rev6 */
u16 backplanedata2; /* rev6 */
u16 backplanedata3; /* rev6 */
u16 PAD[31];
/* sprom "size" & "blank" info */
u16 spromstatus; /* 0x7BE, rev2 */
u32 PAD[464];
u16 PAD[0x80];
};
/* Register/deregister interrupt handler. */
int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev);
int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev);

View file

@ -351,13 +351,11 @@ u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
* triples, returning a pointer to the substring whose first element
* matches tag
*/
struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
const struct brcmf_tlv *
brcmf_parse_tlvs(const void *buf, int buflen, uint key)
{
struct brcmf_tlv *elt;
int totlen;
elt = (struct brcmf_tlv *)buf;
totlen = buflen;
const struct brcmf_tlv *elt = buf;
int totlen = buflen;
/* find tagged parameter */
while (totlen >= TLV_HDR_LEN) {
@ -378,8 +376,8 @@ struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
* not update the tlvs buffer pointer/length.
*/
static bool
brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
u8 *oui, u32 oui_len, u8 type)
brcmf_tlv_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len,
const u8 *oui, u32 oui_len, u8 type)
{
/* If the contents match the OUI and the type */
if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
@ -401,12 +399,12 @@ brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
}
static struct brcmf_vs_tlv *
brcmf_find_wpaie(u8 *parse, u32 len)
brcmf_find_wpaie(const u8 *parse, u32 len)
{
struct brcmf_tlv *ie;
const struct brcmf_tlv *ie;
while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
if (brcmf_tlv_has_ie((const u8 *)ie, &parse, &len,
WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
return (struct brcmf_vs_tlv *)ie;
}
@ -414,9 +412,9 @@ brcmf_find_wpaie(u8 *parse, u32 len)
}
static struct brcmf_vs_tlv *
brcmf_find_wpsie(u8 *parse, u32 len)
brcmf_find_wpsie(const u8 *parse, u32 len)
{
struct brcmf_tlv *ie;
const struct brcmf_tlv *ie;
while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
@ -1562,9 +1560,9 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
struct ieee80211_channel *chan = sme->channel;
struct brcmf_join_params join_params;
size_t join_params_size;
struct brcmf_tlv *rsn_ie;
struct brcmf_vs_tlv *wpa_ie;
void *ie;
const struct brcmf_tlv *rsn_ie;
const struct brcmf_vs_tlv *wpa_ie;
const void *ie;
u32 ie_len;
struct brcmf_ext_join_params_le *ext_join_params;
u16 chanspec;
@ -1591,7 +1589,8 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
ie_len = wpa_ie->len + TLV_HDR_LEN;
} else {
/* find the RSN_IE */
rsn_ie = brcmf_parse_tlvs((u8 *)sme->ie, sme->ie_len,
rsn_ie = brcmf_parse_tlvs((const u8 *)sme->ie,
sme->ie_len,
WLAN_EID_RSN);
if (rsn_ie) {
ie = rsn_ie;
@ -1981,7 +1980,9 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
if (!check_vif_up(ifp->vif))
return -EIO;
if (mac_addr) {
if (mac_addr &&
(params->cipher != WLAN_CIPHER_SUITE_WEP40) &&
(params->cipher != WLAN_CIPHER_SUITE_WEP104)) {
brcmf_dbg(TRACE, "Exit");
return brcmf_add_keyext(wiphy, ndev, key_idx, mac_addr, params);
}
@ -2164,6 +2165,8 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
s32 err = 0;
u8 *bssid = profile->bssid;
struct brcmf_sta_info_le sta_info_le;
u32 beacon_period;
u32 dtim_period;
brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
if (!check_vif_up(ifp->vif))
@ -2218,6 +2221,30 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
sinfo->signal = rssi;
brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
}
err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_BCNPRD,
&beacon_period);
if (err) {
brcmf_err("Could not get beacon period (%d)\n",
err);
goto done;
} else {
sinfo->bss_param.beacon_interval =
beacon_period;
brcmf_dbg(CONN, "Beacon peroid %d\n",
beacon_period);
}
err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_DTIMPRD,
&dtim_period);
if (err) {
brcmf_err("Could not get DTIM period (%d)\n",
err);
goto done;
} else {
sinfo->bss_param.dtim_period = dtim_period;
brcmf_dbg(CONN, "DTIM peroid %d\n",
dtim_period);
}
sinfo->filled |= STATION_INFO_BSS_PARAM;
}
} else
err = -EPERM;
@ -2455,7 +2482,7 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
struct brcmf_cfg80211_profile *profile = ndev_to_prof(ifp->ndev);
struct brcmf_bss_info_le *bi;
struct brcmf_ssid *ssid;
struct brcmf_tlv *tim;
const struct brcmf_tlv *tim;
u16 beacon_interval;
u8 dtim_period;
size_t ie_len;
@ -3220,8 +3247,9 @@ static bool brcmf_valid_wpa_oui(u8 *oui, bool is_rsn_ie)
}
static s32
brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
bool is_rsn_ie)
brcmf_configure_wpaie(struct net_device *ndev,
const struct brcmf_vs_tlv *wpa_ie,
bool is_rsn_ie)
{
struct brcmf_if *ifp = netdev_priv(ndev);
u32 auth = 0; /* d11 open authentication */
@ -3707,11 +3735,11 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
s32 ie_offset;
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_tlv *ssid_ie;
const struct brcmf_tlv *ssid_ie;
struct brcmf_ssid_le ssid_le;
s32 err = -EPERM;
struct brcmf_tlv *rsn_ie;
struct brcmf_vs_tlv *wpa_ie;
const struct brcmf_tlv *rsn_ie;
const struct brcmf_vs_tlv *wpa_ie;
struct brcmf_join_params join_params;
enum nl80211_iftype dev_role;
struct brcmf_fil_bss_enable_le bss_enable;
@ -4658,6 +4686,7 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
struct net_device *ndev = ifp->ndev;
struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
struct ieee80211_channel *chan;
s32 err = 0;
if (ifp->vif->mode == WL_MODE_AP) {
@ -4665,9 +4694,10 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
} else if (brcmf_is_linkup(e)) {
brcmf_dbg(CONN, "Linkup\n");
if (brcmf_is_ibssmode(ifp->vif)) {
chan = ieee80211_get_channel(cfg->wiphy, cfg->channel);
memcpy(profile->bssid, e->addr, ETH_ALEN);
wl_inform_ibss(cfg, ndev, e->addr);
cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL);
cfg80211_ibss_joined(ndev, e->addr, chan, GFP_KERNEL);
clear_bit(BRCMF_VIF_STATUS_CONNECTING,
&ifp->vif->sme_state);
set_bit(BRCMF_VIF_STATUS_CONNECTED,
@ -5164,9 +5194,6 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
ieee80211_channel_to_frequency(ch.chnum, band);
band_chan_arr[index].hw_value = ch.chnum;
brcmf_err("channel %d: f=%d bw=%d sb=%d\n",
ch.chnum, band_chan_arr[index].center_freq,
ch.bw, ch.sb);
if (ch.bw == BRCMU_CHAN_BW_40) {
/* assuming the order is HT20, HT40 Upper,
* HT40 lower from chanspecs

View file

@ -491,7 +491,8 @@ void brcmf_free_vif(struct brcmf_cfg80211_vif *vif);
s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
const u8 *vndr_ie_buf, u32 vndr_ie_len);
s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif);
struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key);
const struct brcmf_tlv *
brcmf_parse_tlvs(const void *buf, int buflen, uint key);
u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
struct ieee80211_channel *ch);
u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state);

View file

@ -891,8 +891,7 @@ il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
{
}
static struct rate_control_ops rs_ops = {
.module = NULL,
static const struct rate_control_ops rs_ops = {
.name = RS_NAME,
.tx_status = il3945_rs_tx_status,
.get_rate = il3945_rs_get_rate,

View file

@ -2807,8 +2807,7 @@ il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
{
}
static struct rate_control_ops rs_4965_ops = {
.module = NULL,
static const struct rate_control_ops rs_4965_ops = {
.name = IL4965_RS_NAME,
.tx_status = il4965_rs_tx_status,
.get_rate = il4965_rs_get_rate,

View file

@ -68,6 +68,19 @@ config IWLWIFI_OPMODE_MODULAR
comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
depends on IWLWIFI && IWLDVM=n && IWLMVM=n
config IWLWIFI_BCAST_FILTERING
bool "Enable broadcast filtering"
depends on IWLMVM
help
Say Y here to enable default bcast filtering configuration.
Enabling broadcast filtering will drop any incoming wireless
broadcast frames, except some very specific predefined
patterns (e.g. incoming arp requests).
If unsure, don't enable this option, as some programs might
expect incoming broadcasts for their normal operations.
menu "Debugging Options"
depends on IWLWIFI
@ -111,6 +124,7 @@ config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
Enable use of experimental ucode for testing and debugging.
config IWLWIFI_DEVICE_TRACING
bool "iwlwifi device access tracing"
depends on IWLWIFI
depends on EVENT_TRACING

View file

@ -8,7 +8,7 @@ iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o
iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o
iwlwifi-objs += $(iwlwifi-m)

View file

@ -176,46 +176,46 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
* (2.4 GHz) band.
*/
static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = {
7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
};
static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
static const u16 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */
{0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */
{0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */
{0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
};
static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
static const u16 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
{0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
{0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
{0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
};
static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
{0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
{0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
{0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
};
static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
{0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
{0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
{0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
};
static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
static const u16 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */
{0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */
{0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */
{0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */
};
static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
static const u16 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 152, 0, 211, 239, 255, 279, 290, 294, 297}, /* Norm */
{0, 0, 0, 0, 160, 0, 219, 245, 261, 284, 294, 297, 300}, /* SGI */
{0, 0, 0, 0, 254, 0, 443, 584, 695, 868, 984, 1030, 1070}, /* AGG */
@ -1111,7 +1111,7 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
struct iwl_scale_tbl_info *tbl)
{
/* Used to choose among HT tables */
s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
/* Check for invalid LQ type */
if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
@ -1173,9 +1173,8 @@ static s32 rs_get_best_rate(struct iwl_priv *priv,
&(lq_sta->lq_info[lq_sta->active_tbl]);
s32 active_sr = active_tbl->win[index].success_ratio;
s32 active_tpt = active_tbl->expected_tpt[index];
/* expected "search" throughput */
s32 *tpt_tbl = tbl->expected_tpt;
const u16 *tpt_tbl = tbl->expected_tpt;
s32 new_rate, high, low, start_hi;
u16 high_low;
@ -3319,8 +3318,8 @@ static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sba
struct ieee80211_sta *sta, void *priv_sta)
{
}
static struct rate_control_ops rs_ops = {
.module = NULL,
static const struct rate_control_ops rs_ops = {
.name = RS_NAME,
.tx_status = rs_tx_status,
.get_rate = rs_get_rate,

View file

@ -315,7 +315,7 @@ struct iwl_scale_tbl_info {
u8 is_dup; /* 1 = duplicated data streams */
u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
u8 max_search; /* maximun number of tables we can search */
s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
u32 current_rate; /* rate_n_flags, uCode API format */
struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
};

View file

@ -71,8 +71,8 @@
#define IWL3160_UCODE_API_MAX 8
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 7
#define IWL3160_UCODE_API_OK 7
#define IWL7260_UCODE_API_OK 8
#define IWL3160_UCODE_API_OK 8
/* Lowest firmware API version supported */
#define IWL7260_UCODE_API_MIN 7
@ -95,6 +95,8 @@
#define IWL7265_FW_PRE "iwlwifi-7265-"
#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_7000 0
static const struct iwl_base_params iwl7000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
@ -120,7 +122,8 @@ static const struct iwl_ht_params iwl7000_ht_params = {
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.base_params = &iwl7000_base_params, \
.led_mode = IWL_LED_RF_STATE
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000
const struct iwl_cfg iwl7260_2ac_cfg = {
@ -194,6 +197,17 @@ const struct iwl_cfg iwl3160_n_cfg = {
.host_interrupt_operation_mode = true,
};
static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = {
{.pwr = 1600, .backoff = 0},
{.pwr = 1300, .backoff = 467},
{.pwr = 900, .backoff = 1900},
{.pwr = 800, .backoff = 2630},
{.pwr = 700, .backoff = 3720},
{.pwr = 600, .backoff = 5550},
{.pwr = 500, .backoff = 9350},
{0},
};
const struct iwl_cfg iwl7265_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 7265",
.fw_name_pre = IWL7265_FW_PRE,
@ -201,6 +215,7 @@ const struct iwl_cfg iwl7265_2ac_cfg = {
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
};
const struct iwl_cfg iwl7265_2n_cfg = {
@ -210,6 +225,7 @@ const struct iwl_cfg iwl7265_2n_cfg = {
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
};
const struct iwl_cfg iwl7265_n_cfg = {
@ -219,6 +235,7 @@ const struct iwl_cfg iwl7265_n_cfg = {
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
};
MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));

View file

@ -0,0 +1,132 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include <linux/module.h>
#include <linux/stringify.h>
#include "iwl-config.h"
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
#define IWL8000_UCODE_API_MAX 8
/* Oldest version we won't warn about */
#define IWL8000_UCODE_API_OK 8
/* Lowest firmware API version supported */
#define IWL8000_UCODE_API_MIN 8
/* NVM versions */
#define IWL8000_NVM_VERSION 0x0a1d
#define IWL8000_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL8000_FW_PRE "iwlwifi-8000-"
#define IWL8000_MODULE_FIRMWARE(api) IWL8000_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_8000 10
static const struct iwl_base_params iwl8000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.pll_cfg_val = 0,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = true,
.pcie_l1_allowed = true,
};
static const struct iwl_ht_params iwl8000_ht_params = {
.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
};
#define IWL_DEVICE_8000 \
.ucode_api_max = IWL8000_UCODE_API_MAX, \
.ucode_api_ok = IWL8000_UCODE_API_OK, \
.ucode_api_min = IWL8000_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_8000, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.base_params = &iwl8000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000
const struct iwl_cfg iwl8260_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 8260",
.fw_name_pre = IWL8000_FW_PRE,
IWL_DEVICE_8000,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
};
const struct iwl_cfg iwl8260_n_cfg = {
.name = "Intel(R) Dual Band Wireless-AC 8260",
.fw_name_pre = IWL8000_FW_PRE,
IWL_DEVICE_8000,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
};
MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));

View file

@ -84,6 +84,7 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_6050,
IWL_DEVICE_FAMILY_6150,
IWL_DEVICE_FAMILY_7000,
IWL_DEVICE_FAMILY_8000,
};
/*
@ -192,6 +193,15 @@ struct iwl_eeprom_params {
bool enhanced_txpower;
};
/* Tx-backoff power threshold
* @pwr: The power limit in mw
* @backoff: The tx-backoff in uSec
*/
struct iwl_pwr_tx_backoff {
u32 pwr;
u32 backoff;
};
/**
* struct iwl_cfg
* @name: Offical name of the device
@ -217,6 +227,9 @@ struct iwl_eeprom_params {
* @high_temp: Is this NIC is designated to be in high temperature.
* @host_interrupt_operation_mode: device needs host interrupt operation
* mode set
* @d0i3: device uses d0i3 instead of d3
* @nvm_hw_section_num: the ID of the HW NVM section
* @pwr_tx_backoffs: translation table between power limits and backoffs
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@ -247,6 +260,9 @@ struct iwl_cfg {
const bool internal_wimax_coex;
const bool host_interrupt_operation_mode;
bool high_temp;
bool d0i3;
u8 nvm_hw_section_num;
const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
};
/*
@ -307,6 +323,8 @@ extern const struct iwl_cfg iwl3160_n_cfg;
extern const struct iwl_cfg iwl7265_2ac_cfg;
extern const struct iwl_cfg iwl7265_2n_cfg;
extern const struct iwl_cfg iwl7265_n_cfg;
extern const struct iwl_cfg iwl8260_2ac_cfg;
extern const struct iwl_cfg iwl8260_n_cfg;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */

View file

@ -395,38 +395,6 @@
#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
/* SECURE boot registers */
#define CSR_SECURE_BOOT_CONFIG_ADDR (0x100)
enum secure_boot_config_reg {
CSR_SECURE_BOOT_CONFIG_INSPECTOR_BURNED_IN_OTP = 0x00000001,
CSR_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ = 0x00000002,
};
#define CSR_SECURE_BOOT_CPU1_STATUS_ADDR (0x100)
#define CSR_SECURE_BOOT_CPU2_STATUS_ADDR (0x100)
enum secure_boot_status_reg {
CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS = 0x00000003,
CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED = 0x00000002,
CSR_SECURE_BOOT_CPU_STATUS_VERF_SUCCESS = 0x00000004,
CSR_SECURE_BOOT_CPU_STATUS_VERF_FAIL = 0x00000008,
CSR_SECURE_BOOT_CPU_STATUS_SIGN_VERF_FAIL = 0x00000010,
};
#define CSR_UCODE_LOAD_STATUS_ADDR (0x100)
enum secure_load_status_reg {
CSR_CPU_STATUS_LOADING_STARTED = 0x00000001,
CSR_CPU_STATUS_LOADING_COMPLETED = 0x00000002,
CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8,
CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00,
};
#define CSR_SECURE_INSPECTOR_CODE_ADDR (0x100)
#define CSR_SECURE_INSPECTOR_DATA_ADDR (0x100)
#define CSR_SECURE_TIME_OUT (100)
#define FH_TCSR_0_REG0 (0x1D00)
/*
* HBUS (Host-side Bus)
*

View file

@ -126,6 +126,7 @@ do { \
/* 0x00000F00 - 0x00000100 */
#define IWL_DL_POWER 0x00000100
#define IWL_DL_TEMP 0x00000200
#define IWL_DL_RPM 0x00000400
#define IWL_DL_SCAN 0x00000800
/* 0x0000F000 - 0x00001000 */
#define IWL_DL_ASSOC 0x00001000
@ -189,5 +190,6 @@ do { \
#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
#define IWL_DEBUG_RPM(p, f, a...) IWL_DEBUG(p, IWL_DL_RPM, f, ## a)
#endif

View file

@ -128,7 +128,7 @@ struct iwl_drv {
const struct iwl_cfg *cfg;
int fw_index; /* firmware we're trying to load */
char firmware_name[25]; /* name of firmware file to load */
char firmware_name[32]; /* name of firmware file to load */
struct completion request_firmware_complete;
@ -237,7 +237,8 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
return -ENOENT;
}
sprintf(drv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
name_pre, tag);
IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
(drv->fw_index == UCODE_EXPERIMENTAL_INDEX)

View file

@ -95,6 +95,8 @@
* @IWL_UCODE_TLV_FLAGS_P2P_PS: P2P client power save is supported (only on a
* single bound interface).
* @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
* @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
* @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
*/
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
@ -119,6 +121,8 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_P2P_PS = BIT(21),
IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
};
/* The default calibrate table size if not specified by firmware file */
@ -160,8 +164,7 @@ enum iwl_ucode_sec {
* For 16.0 uCode and above, there is no differentiation between sections,
* just an offset to the HW address.
*/
#define IWL_UCODE_SECTION_MAX 6
#define IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU (IWL_UCODE_SECTION_MAX/2)
#define IWL_UCODE_SECTION_MAX 12
struct iwl_ucode_capabilities {
u32 max_probe_length;

View file

@ -130,6 +130,21 @@ void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
}
IWL_EXPORT_SYMBOL(iwl_write_prph);
int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout)
{
int t = 0;
do {
if ((iwl_read_prph(trans, addr) & mask) == (bits & mask))
return t;
udelay(IWL_POLL_INTERVAL);
t += IWL_POLL_INTERVAL;
} while (t < timeout);
return -ETIMEDOUT;
}
void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
{
unsigned long flags;

View file

@ -72,6 +72,8 @@ void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value);
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs);
void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout);
void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
u32 bits, u32 mask);

View file

@ -402,11 +402,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
iwl_init_sbands(dev, cfg, data, nvm_sw, sku & NVM_SKU_CAP_11AC_ENABLE,
tx_chains, rx_chains);
data->calib_version = 255; /* TODO:
this value will prevent some checks from
failing, we need to check if this
field is still needed, and if it does,
where is it in the NVM*/
data->calib_version = 255;
return data;
}

View file

@ -131,6 +131,8 @@ struct iwl_cfg;
* @nic_config: configure NIC, called before firmware is started.
* May sleep
* @wimax_active: invoked when WiMax becomes active. May sleep
* @enter_d0i3: configure the fw to enter d0i3. May sleep.
* @exit_d0i3: configure the fw to exit d0i3. May sleep.
*/
struct iwl_op_mode_ops {
struct iwl_op_mode *(*start)(struct iwl_trans *trans,
@ -148,6 +150,8 @@ struct iwl_op_mode_ops {
void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
void (*nic_config)(struct iwl_op_mode *op_mode);
void (*wimax_active)(struct iwl_op_mode *op_mode);
int (*enter_d0i3)(struct iwl_op_mode *op_mode);
int (*exit_d0i3)(struct iwl_op_mode *op_mode);
};
int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
@ -155,7 +159,7 @@ void iwl_opmode_deregister(const char *name);
/**
* struct iwl_op_mode - operational mode
* @ops - pointer to its own ops
* @ops: pointer to its own ops
*
* This holds an implementation of the mac80211 / fw API.
*/
@ -226,4 +230,22 @@ static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
op_mode->ops->wimax_active(op_mode);
}
static inline int iwl_op_mode_enter_d0i3(struct iwl_op_mode *op_mode)
{
might_sleep();
if (!op_mode->ops->enter_d0i3)
return 0;
return op_mode->ops->enter_d0i3(op_mode);
}
static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode)
{
might_sleep();
if (!op_mode->ops->exit_d0i3)
return 0;
return op_mode->ops->exit_d0i3(op_mode);
}
#endif /* __iwl_op_mode_h__ */

View file

@ -72,7 +72,7 @@
#include "iwl-trans.h"
#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
#define IWL_NUM_PAPD_CH_GROUPS 4
#define IWL_NUM_PAPD_CH_GROUPS 7
#define IWL_NUM_TXP_CH_GROUPS 9
struct iwl_phy_db_entry {
@ -383,7 +383,7 @@ static int iwl_phy_db_send_all_channel_groups(
if (!entry)
return -EINVAL;
if (WARN_ON_ONCE(!entry->size))
if (!entry->size)
continue;
/* Send the requested PHY DB section */

View file

@ -105,6 +105,13 @@
/* Device NMI register */
#define DEVICE_SET_NMI_REG 0x00a01c30
/*
* Device reset for family 8000
* write to bit 24 in order to reset the CPU
*/
#define RELEASE_CPU_RESET (0x300C)
#define RELEASE_CPU_RESET_BIT BIT(24)
/*****************************************************************************
* 7000/3000 series SHR DTS addresses *
*****************************************************************************/
@ -281,4 +288,43 @@ static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
#define OSC_CLK (0xa04068)
#define OSC_CLK_FORCE_CONTROL (0x8)
/* SECURE boot registers */
#define LMPM_SECURE_BOOT_CONFIG_ADDR (0x100)
enum secure_boot_config_reg {
LMPM_SECURE_BOOT_CONFIG_INSPECTOR_BURNED_IN_OTP = 0x00000001,
LMPM_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ = 0x00000002,
};
#define LMPM_SECURE_BOOT_CPU1_STATUS_ADDR (0x1E30)
#define LMPM_SECURE_BOOT_CPU2_STATUS_ADDR (0x1E34)
enum secure_boot_status_reg {
LMPM_SECURE_BOOT_CPU_STATUS_VERF_STATUS = 0x00000001,
LMPM_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED = 0x00000002,
LMPM_SECURE_BOOT_CPU_STATUS_VERF_SUCCESS = 0x00000004,
LMPM_SECURE_BOOT_CPU_STATUS_VERF_FAIL = 0x00000008,
LMPM_SECURE_BOOT_CPU_STATUS_SIGN_VERF_FAIL = 0x00000010,
LMPM_SECURE_BOOT_STATUS_SUCCESS = 0x00000003,
};
#define CSR_UCODE_LOAD_STATUS_ADDR (0x1E70)
enum secure_load_status_reg {
LMPM_CPU_UCODE_LOADING_STARTED = 0x00000001,
LMPM_CPU_HDRS_LOADING_COMPLETED = 0x00000003,
LMPM_CPU_UCODE_LOADING_COMPLETED = 0x00000007,
LMPM_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8,
LMPM_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00,
};
#define LMPM_SECURE_INSPECTOR_CODE_ADDR (0x1E38)
#define LMPM_SECURE_INSPECTOR_DATA_ADDR (0x1E3C)
#define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR (0x1E78)
#define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR (0x1E7C)
#define LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE (0x400000)
#define LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE (0x402000)
#define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000)
#define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400)
#define LMPM_SECURE_TIME_OUT (100)
#endif /* __iwl_prph_h__ */

View file

@ -193,12 +193,23 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
* @CMD_ASYNC: Return right away and don't wait for the response
* @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
* response. The caller needs to call iwl_free_resp when done.
* @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the
* command queue, but after other high priority commands. valid only
* with CMD_ASYNC.
* @CMD_SEND_IN_IDLE: The command should be sent even when the trans is idle.
* @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
* @CMD_WAKE_UP_TRANS: The command response should wake up the trans
* (i.e. mark it as non-idle).
*/
enum CMD_MODE {
CMD_SYNC = 0,
CMD_ASYNC = BIT(0),
CMD_WANT_SKB = BIT(1),
CMD_SEND_IN_RFKILL = BIT(2),
CMD_HIGH_PRIO = BIT(3),
CMD_SEND_IN_IDLE = BIT(4),
CMD_MAKE_TRANS_IDLE = BIT(5),
CMD_WAKE_UP_TRANS = BIT(6),
};
#define DEF_CMD_PAYLOAD_SIZE 320
@ -335,6 +346,9 @@ enum iwl_d3_status {
* @STATUS_INT_ENABLED: interrupts are enabled
* @STATUS_RFKILL: the HW RFkill switch is in KILL position
* @STATUS_FW_ERROR: the fw is in error state
* @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
* are sent
* @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
*/
enum iwl_trans_status {
STATUS_SYNC_HCMD_ACTIVE,
@ -343,6 +357,8 @@ enum iwl_trans_status {
STATUS_INT_ENABLED,
STATUS_RFKILL,
STATUS_FW_ERROR,
STATUS_TRANS_GOING_IDLE,
STATUS_TRANS_IDLE,
};
/**
@ -443,6 +459,11 @@ struct iwl_trans;
* @release_nic_access: let the NIC go to sleep. The "flags" parameter
* must be the same one that was sent before to the grab_nic_access.
* @set_bits_mask - set SRAM register according to value and mask.
* @ref: grab a reference to the transport/FW layers, disallowing
* certain low power states
* @unref: release a reference previously taken with @ref. Note that
* initially the reference count is 1, making an initial @unref
* necessary to allow low power states.
*/
struct iwl_trans_ops {
@ -489,6 +510,8 @@ struct iwl_trans_ops {
unsigned long *flags);
void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
u32 value);
void (*ref)(struct iwl_trans *trans);
void (*unref)(struct iwl_trans *trans);
};
/**
@ -523,6 +546,7 @@ enum iwl_trans_state {
* starting the firmware, used for tracing
* @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
* start of the 802.11 header in the @rx_mpdu_cmd
* @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
@ -551,6 +575,8 @@ struct iwl_trans {
struct lockdep_map sync_cmd_lockdep_map;
#endif
u64 dflt_pwr_limit;
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
@ -627,6 +653,18 @@ static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
return trans->ops->d3_resume(trans, status, test);
}
static inline void iwl_trans_ref(struct iwl_trans *trans)
{
if (trans->ops->ref)
trans->ops->ref(trans);
}
static inline void iwl_trans_unref(struct iwl_trans *trans)
{
if (trans->ops->unref)
trans->ops->unref(trans);
}
static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{

View file

@ -2,7 +2,7 @@ obj-$(CONFIG_IWLMVM) += iwlmvm.o
iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o
iwlmvm-y += scan.o time-event.o rs.o
iwlmvm-y += power.o power_legacy.o bt-coex.o
iwlmvm-y += power.o bt-coex.o
iwlmvm-y += led.o tt.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_PM_SLEEP) += d3.o

View file

@ -378,7 +378,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
flags = iwlwifi_mod_params.bt_coex_active ?
BT_COEX_NW : BT_COEX_DISABLE;
flags |= BT_CH_PRIMARY_EN | BT_CH_SECONDARY_EN | BT_SYNC_2_BT_DISABLE;
bt_cmd->flags = cpu_to_le32(flags);
bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE |
@ -399,6 +398,9 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
BT_VALID_TXRX_MAX_FREQ_0 |
BT_VALID_SYNC_TO_SCO);
if (IWL_MVM_BT_COEX_SYNC2SCO)
bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
if (mvm->cfg->bt_shared_single_ant)
memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
sizeof(iwl_single_shared_ant));
@ -489,8 +491,7 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
return ret;
}
static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
bool enable)
int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable)
{
struct iwl_bt_coex_cmd *bt_cmd;
/* Send ASYNC since this can be sent from an atomic context */
@ -500,25 +501,16 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
.dataflags = { IWL_HCMD_DFL_DUP, },
.flags = CMD_ASYNC,
};
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
int ret;
if (sta_id == IWL_MVM_STATION_COUNT)
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
if (!mvmsta)
return 0;
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
/* This can happen if the station has been removed right now */
if (IS_ERR_OR_NULL(sta))
return 0;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
/* nothing to do */
if (mvmsta->bt_reduced_txpower == enable)
if (mvmsta->bt_reduced_txpower_dbg ||
mvmsta->bt_reduced_txpower == enable)
return 0;
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
@ -552,6 +544,7 @@ struct iwl_bt_iterator_data {
bool reduced_tx_power;
struct ieee80211_chanctx_conf *primary;
struct ieee80211_chanctx_conf *secondary;
bool primary_ll;
};
static inline
@ -577,72 +570,113 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
struct iwl_mvm *mvm = data->mvm;
struct ieee80211_chanctx_conf *chanctx_conf;
enum ieee80211_smps_mode smps_mode;
u32 bt_activity_grading;
int ave_rssi;
lockdep_assert_held(&mvm->mutex);
if (vif->type != NL80211_IFTYPE_STATION &&
vif->type != NL80211_IFTYPE_AP)
return;
switch (vif->type) {
case NL80211_IFTYPE_STATION:
/* default smps_mode for BSS / P2P client is AUTOMATIC */
smps_mode = IEEE80211_SMPS_AUTOMATIC;
data->num_bss_ifaces++;
smps_mode = IEEE80211_SMPS_AUTOMATIC;
/*
* Count unassoc BSSes, relax SMSP constraints
* and disable reduced Tx Power
*/
if (!vif->bss_conf.assoc) {
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
smps_mode);
if (iwl_mvm_bt_coex_reduced_txp(mvm,
mvmvif->ap_sta_id,
false))
IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
return;
}
break;
case NL80211_IFTYPE_AP:
/* default smps_mode for AP / GO is OFF */
smps_mode = IEEE80211_SMPS_OFF;
if (!mvmvif->ap_ibss_active) {
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
smps_mode);
return;
}
/* the Ack / Cts kill mask must be default if AP / GO */
data->reduced_tx_power = false;
break;
default:
return;
}
chanctx_conf = rcu_dereference(vif->chanctx_conf);
/* If channel context is invalid or not on 2.4GHz .. */
if ((!chanctx_conf ||
chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
/* ... and it is an associated STATION, relax constraints */
if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc)
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
smps_mode);
iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
/* ... relax constraints and disable rssi events */
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
smps_mode);
if (vif->type == NL80211_IFTYPE_STATION)
iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
return;
}
/* SoftAP / GO will always be primary */
bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
if (bt_activity_grading >= BT_HIGH_TRAFFIC)
smps_mode = IEEE80211_SMPS_STATIC;
else if (bt_activity_grading >= BT_LOW_TRAFFIC)
smps_mode = vif->type == NL80211_IFTYPE_AP ?
IEEE80211_SMPS_OFF :
IEEE80211_SMPS_DYNAMIC;
IWL_DEBUG_COEX(data->mvm,
"mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
mvmvif->id, data->notif->bt_status, bt_activity_grading,
smps_mode);
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
/* low latency is always primary */
if (iwl_mvm_vif_low_latency(mvmvif)) {
data->primary_ll = true;
data->secondary = data->primary;
data->primary = chanctx_conf;
}
if (vif->type == NL80211_IFTYPE_AP) {
if (!mvmvif->ap_ibss_active)
return;
/* the Ack / Cts kill mask must be default if AP / GO */
data->reduced_tx_power = false;
if (chanctx_conf == data->primary)
return;
/* downgrade the current primary no matter what its type is */
data->secondary = data->primary;
data->primary = chanctx_conf;
if (!data->primary_ll) {
/*
* downgrade the current primary no matter what its
* type is.
*/
data->secondary = data->primary;
data->primary = chanctx_conf;
} else {
/* there is low latency vif - we will be secondary */
data->secondary = chanctx_conf;
}
return;
}
data->num_bss_ifaces++;
/* we are now a STA / P2P Client, and take associated ones only */
if (!vif->bss_conf.assoc)
return;
/* STA / P2P Client, try to be primary if first vif */
/*
* STA / P2P Client, try to be primary if first vif. If we are in low
* latency mode, we are already in primary and just don't do much
*/
if (!data->primary || data->primary == chanctx_conf)
data->primary = chanctx_conf;
else if (!data->secondary)
/* if secondary is not NULL, it might be a GO */
data->secondary = chanctx_conf;
if (le32_to_cpu(data->notif->bt_activity_grading) >= BT_HIGH_TRAFFIC)
smps_mode = IEEE80211_SMPS_STATIC;
else if (le32_to_cpu(data->notif->bt_activity_grading) >=
BT_LOW_TRAFFIC)
smps_mode = IEEE80211_SMPS_DYNAMIC;
IWL_DEBUG_COEX(data->mvm,
"mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
mvmvif->id, data->notif->bt_status,
data->notif->bt_activity_grading, smps_mode);
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
/* don't reduce the Tx power if in loose scheme */
if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
mvm->cfg->bt_shared_single_ant) {

View file

@ -78,5 +78,9 @@
#define IWL_MVM_PS_SNOOZE_INTERVAL 25
#define IWL_MVM_PS_SNOOZE_WINDOW 50
#define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW 25
#define IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT 64
#define IWL_MVM_LOWLAT_SINGLE_BINDING_MAXDUR 24 /* TU */
#define IWL_MVM_LOWLAT_DUAL_BINDING_MAXDUR 24 /* TU */
#define IWL_MVM_BT_COEX_SYNC2SCO 1
#endif /* __MVM_CONSTANTS_H */

View file

@ -963,7 +963,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
};
int ret, i;
int len __maybe_unused;
u8 old_aux_sta_id, old_ap_sta_id = IWL_MVM_STATION_COUNT;
if (!wowlan) {
/*
@ -980,8 +979,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
old_aux_sta_id = mvm->aux_sta.sta_id;
/* see if there's only a single BSS vif and it's associated */
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
@ -1066,16 +1063,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
iwl_trans_stop_device(mvm->trans);
/*
* The D3 firmware still hardcodes the AP station ID for the
* BSS we're associated with as 0. Store the real STA ID here
* and assign 0. When we leave this function, we'll restore
* the original value for the resume code.
*/
old_ap_sta_id = mvm_ap_sta->sta_id;
mvm_ap_sta->sta_id = 0;
mvmvif->ap_sta_id = 0;
/*
* Set the HW restart bit -- this is mostly true as we're
* going to load new firmware and reprogram that, though
@ -1096,16 +1083,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvm->ptk_ivlen = 0;
mvm->ptk_icvlen = 0;
/*
* The D3 firmware still hardcodes the AP station ID for the
* BSS we're associated with as 0. As a result, we have to move
* the auxiliary station to ID 1 so the ID 0 remains free for
* the AP station for later.
* We set the sta_id to 1 here, and reset it to its previous
* value (that we stored above) later.
*/
mvm->aux_sta.sta_id = 1;
ret = iwl_mvm_load_d3_fw(mvm);
if (ret)
goto out;
@ -1191,11 +1168,11 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (ret)
goto out;
ret = iwl_mvm_power_update_device_mode(mvm);
ret = iwl_mvm_power_update_device(mvm);
if (ret)
goto out;
ret = iwl_mvm_power_update_mode(mvm, vif);
ret = iwl_mvm_power_update_mac(mvm, vif);
if (ret)
goto out;
@ -1222,10 +1199,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
iwl_trans_d3_suspend(mvm->trans, test);
out:
mvm->aux_sta.sta_id = old_aux_sta_id;
mvm_ap_sta->sta_id = old_ap_sta_id;
mvmvif->ap_sta_id = old_ap_sta_id;
if (ret < 0)
ieee80211_restart_hw(mvm->hw);
out_noreset:

View file

@ -185,7 +185,7 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
mutex_lock(&mvm->mutex);
iwl_dbgfs_update_pm(mvm, vif, param, val);
ret = iwl_mvm_power_update_mode(mvm, vif);
ret = iwl_mvm_power_update_mac(mvm, vif);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@ -202,7 +202,7 @@ static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
int bufsz = sizeof(buf);
int pos;
pos = iwl_mvm_power_dbgfs_read(mvm, vif, buf, bufsz);
pos = iwl_mvm_power_mac_dbgfs_read(mvm, vif, buf, bufsz);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@ -225,6 +225,29 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
ap_sta_id = mvmvif->ap_sta_id;
switch (ieee80211_vif_type_p2p(vif)) {
case NL80211_IFTYPE_ADHOC:
pos += scnprintf(buf+pos, bufsz-pos, "type: ibss\n");
break;
case NL80211_IFTYPE_STATION:
pos += scnprintf(buf+pos, bufsz-pos, "type: bss\n");
break;
case NL80211_IFTYPE_AP:
pos += scnprintf(buf+pos, bufsz-pos, "type: ap\n");
break;
case NL80211_IFTYPE_P2P_CLIENT:
pos += scnprintf(buf+pos, bufsz-pos, "type: p2p client\n");
break;
case NL80211_IFTYPE_P2P_GO:
pos += scnprintf(buf+pos, bufsz-pos, "type: p2p go\n");
break;
case NL80211_IFTYPE_P2P_DEVICE:
pos += scnprintf(buf+pos, bufsz-pos, "type: p2p dev\n");
break;
default:
break;
}
pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n",
mvmvif->id, mvmvif->color);
pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n",
@ -249,9 +272,10 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
pos += scnprintf(buf+pos, bufsz-pos,
"ap_sta_id %d - reduced Tx power %d\n",
"ap_sta_id %d - reduced Tx power %d force %d\n",
ap_sta_id,
mvm_sta->bt_reduced_txpower);
mvm_sta->bt_reduced_txpower,
mvm_sta->bt_reduced_txpower_dbg);
}
}
@ -269,6 +293,36 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_reduced_txp_write(struct ieee80211_vif *vif,
char *buf, size_t count,
loff_t *ppos)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
struct iwl_mvm_sta *mvmsta;
bool reduced_tx_power;
int ret;
if (mvmvif->ap_sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
return -ENOTCONN;
if (strtobool(buf, &reduced_tx_power) != 0)
return -EINVAL;
mutex_lock(&mvm->mutex);
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id);
mvmsta->bt_reduced_txpower_dbg = false;
ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
reduced_tx_power);
if (!ret)
mvmsta->bt_reduced_txpower_dbg = true;
mutex_unlock(&mvm->mutex);
return ret ? : count;
}
static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
enum iwl_dbgfs_bf_mask param, int value)
{
@ -403,9 +457,9 @@ static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
mutex_lock(&mvm->mutex);
iwl_dbgfs_update_bf(vif, param, value);
if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
ret = iwl_mvm_disable_beacon_filter(mvm, vif);
ret = iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
else
ret = iwl_mvm_enable_beacon_filter(mvm, vif);
ret = iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@ -460,6 +514,41 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
u8 value;
int ret;
ret = kstrtou8(buf, 0, &value);
if (ret)
return ret;
if (value > 1)
return -EINVAL;
mutex_lock(&mvm->mutex);
iwl_mvm_update_low_latency(mvm, vif, value);
mutex_unlock(&mvm->mutex);
return count;
}
static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
char buf[3];
buf[0] = mvmvif->low_latency ? '1' : '0';
buf[1] = '\n';
buf[2] = '\0';
return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
}
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@ -473,6 +562,8 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
MVM_DEBUGFS_READ_FILE_OPS(mac_params);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(reduced_txp, 10);
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
@ -496,15 +587,18 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return;
}
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
if ((mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT) &&
iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
(vif->type == NL80211_IFTYPE_STATION && vif->p2p &&
mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS)))
MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
S_IRUSR);
MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir,
S_IRUSR);
MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE_VIF(reduced_txp, mvmvif->dbgfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
mvmvif == mvm->bf_allowed_vif)

View file

@ -90,7 +90,7 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
int sta_id, drain, ret;
if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
@ -105,13 +105,12 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
mutex_lock(&mvm->mutex);
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta))
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
if (!mvmsta)
ret = -ENOENT;
else
ret = iwl_mvm_drain_sta(mvm, (void *)sta->drv_priv, drain) ? :
count;
ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ? : count;
mutex_unlock(&mvm->mutex);
@ -251,7 +250,7 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
}
mutex_lock(&mvm->mutex);
ret = iwl_mvm_power_update_device_mode(mvm);
ret = iwl_mvm_power_update_device(mvm);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@ -600,6 +599,187 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
return count;
}
#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
struct iwl_bcast_filter_cmd cmd;
const struct iwl_fw_bcast_filter *filter;
char *buf;
int bufsz = 1024;
int i, j, pos = 0;
ssize_t ret;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf)
return -ENOMEM;
mutex_lock(&mvm->mutex);
if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
ADD_TEXT("None\n");
mutex_unlock(&mvm->mutex);
goto out;
}
mutex_unlock(&mvm->mutex);
for (i = 0; cmd.filters[i].attrs[0].mask; i++) {
filter = &cmd.filters[i];
ADD_TEXT("Filter [%d]:\n", i);
ADD_TEXT("\tDiscard=%d\n", filter->discard);
ADD_TEXT("\tFrame Type: %s\n",
filter->frame_type ? "IPv4" : "Generic");
for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) {
const struct iwl_fw_bcast_filter_attr *attr;
attr = &filter->attrs[j];
if (!attr->mask)
break;
ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n",
j, attr->offset,
attr->offset_type ? "IP End" :
"Payload Start",
be32_to_cpu(attr->mask),
be32_to_cpu(attr->val),
le16_to_cpu(attr->reserved1));
}
}
out:
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
}
static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
int pos, next_pos;
struct iwl_fw_bcast_filter filter = {};
struct iwl_bcast_filter_cmd cmd;
u32 filter_id, attr_id, mask, value;
int err = 0;
if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard,
&filter.frame_type, &pos) != 3)
return -EINVAL;
if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) ||
filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4)
return -EINVAL;
for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs);
attr_id++) {
struct iwl_fw_bcast_filter_attr *attr =
&filter.attrs[attr_id];
if (pos >= count)
break;
if (sscanf(&buf[pos], "%hhi %hhi %i %i %n",
&attr->offset, &attr->offset_type,
&mask, &value, &next_pos) != 4)
return -EINVAL;
attr->mask = cpu_to_be32(mask);
attr->val = cpu_to_be32(value);
if (mask)
filter.num_attrs++;
pos += next_pos;
}
mutex_lock(&mvm->mutex);
memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id],
&filter, sizeof(filter));
/* send updated bcast filtering configuration */
if (mvm->dbgfs_bcast_filtering.override &&
iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
sizeof(cmd), &cmd);
mutex_unlock(&mvm->mutex);
return err ?: count;
}
static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
struct iwl_bcast_filter_cmd cmd;
char *buf;
int bufsz = 1024;
int i, pos = 0;
ssize_t ret;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf)
return -ENOMEM;
mutex_lock(&mvm->mutex);
if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
ADD_TEXT("None\n");
mutex_unlock(&mvm->mutex);
goto out;
}
mutex_unlock(&mvm->mutex);
for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) {
const struct iwl_fw_bcast_mac *mac = &cmd.macs[i];
ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n",
i, mac->default_discard, mac->attached_filters);
}
out:
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
}
static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
char *buf, size_t count,
loff_t *ppos)
{
struct iwl_bcast_filter_cmd cmd;
struct iwl_fw_bcast_mac mac = {};
u32 mac_id, attached_filters;
int err = 0;
if (!mvm->bcast_filters)
return -ENOENT;
if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard,
&attached_filters) != 3)
return -EINVAL;
if (mac_id >= ARRAY_SIZE(cmd.macs) ||
mac.default_discard > 1 ||
attached_filters >= BIT(ARRAY_SIZE(cmd.filters)))
return -EINVAL;
mac.attached_filters = cpu_to_le16(attached_filters);
mutex_lock(&mvm->mutex);
memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id],
&mac, sizeof(mac));
/* send updated bcast filtering configuration */
if (mvm->dbgfs_bcast_filtering.override &&
iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
sizeof(cmd), &cmd);
mutex_unlock(&mvm->mutex);
return err ?: count;
}
#endif
#ifdef CONFIG_PM_SLEEP
static ssize_t iwl_dbgfs_d3_sram_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
@ -658,15 +838,74 @@ static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
}
#endif
#define PRINT_MVM_REF(ref) do { \
if (test_bit(ref, mvm->ref_bitmap)) \
pos += scnprintf(buf + pos, bufsz - pos, \
"\t(0x%lx) %s\n", \
BIT(ref), #ref); \
} while (0)
static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
int pos = 0;
char buf[256];
const size_t bufsz = sizeof(buf);
pos += scnprintf(buf + pos, bufsz - pos, "taken mvm refs: 0x%lx\n",
mvm->ref_bitmap[0]);
PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN);
PRINT_MVM_REF(IWL_MVM_REF_SCAN);
PRINT_MVM_REF(IWL_MVM_REF_ROC);
PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
PRINT_MVM_REF(IWL_MVM_REF_USER);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
unsigned long value;
int ret;
bool taken;
ret = kstrtoul(buf, 10, &value);
if (ret < 0)
return ret;
mutex_lock(&mvm->mutex);
taken = test_bit(IWL_MVM_REF_USER, mvm->ref_bitmap);
if (value == 1 && !taken)
iwl_mvm_ref(mvm, IWL_MVM_REF_USER);
else if (value == 0 && taken)
iwl_mvm_unref(mvm, IWL_MVM_REF_USER);
else
ret = -EINVAL;
mutex_unlock(&mvm->mutex);
if (ret < 0)
return ret;
return count;
}
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) do { \
if (!debugfs_create_file(#name, mode, parent, mvm, \
#define MVM_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \
if (!debugfs_create_file(alias, mode, parent, mvm, \
&iwl_dbgfs_##name##_ops)) \
goto err; \
} while (0)
#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \
MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
/* Device wide debugfs entries */
MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16);
@ -680,6 +919,12 @@ MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
#endif
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
@ -687,6 +932,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
{
struct dentry *bcast_dir __maybe_unused;
char buf[100];
mvm->debugfs_dir = dbgfs_dir;
@ -705,6 +951,27 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir,
S_IWUSR | S_IRUSR);
MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
bcast_dir = debugfs_create_dir("bcast_filtering",
mvm->debugfs_dir);
if (!bcast_dir)
goto err;
if (!debugfs_create_bool("override", S_IRUSR | S_IWUSR,
bcast_dir,
&mvm->dbgfs_bcast_filtering.override))
goto err;
MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters,
bcast_dir, S_IWUSR | S_IRUSR);
MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs,
bcast_dir, S_IWUSR | S_IRUSR);
}
#endif
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR);

View file

@ -70,37 +70,24 @@
/**
* enum iwl_bt_coex_flags - flags for BT_COEX command
* @BT_CH_PRIMARY_EN:
* @BT_CH_SECONDARY_EN:
* @BT_NOTIF_COEX_OFF:
* @BT_COEX_MODE_POS:
* @BT_COEX_MODE_MSK:
* @BT_COEX_DISABLE:
* @BT_COEX_2W:
* @BT_COEX_3W:
* @BT_COEX_NW:
* @BT_USE_DEFAULTS:
* @BT_SYNC_2_BT_DISABLE:
* @BT_COEX_CORUNNING_TBL_EN:
* @BT_COEX_SYNC2SCO:
*
* The COEX_MODE must be set for each command. Even if it is not changed.
*/
enum iwl_bt_coex_flags {
BT_CH_PRIMARY_EN = BIT(0),
BT_CH_SECONDARY_EN = BIT(1),
BT_NOTIF_COEX_OFF = BIT(2),
BT_COEX_MODE_POS = 3,
BT_COEX_MODE_MSK = BITS(3) << BT_COEX_MODE_POS,
BT_COEX_DISABLE = 0x0 << BT_COEX_MODE_POS,
BT_COEX_2W = 0x1 << BT_COEX_MODE_POS,
BT_COEX_3W = 0x2 << BT_COEX_MODE_POS,
BT_COEX_NW = 0x3 << BT_COEX_MODE_POS,
BT_USE_DEFAULTS = BIT(6),
BT_SYNC_2_BT_DISABLE = BIT(7),
BT_COEX_CORUNNING_TBL_EN = BIT(8),
BT_COEX_MPLUT_TBL_EN = BIT(9),
/* Bit 10 is reserved */
BT_COEX_WF_PRIO_BOOST_CHECK_EN = BIT(11),
BT_COEX_SYNC2SCO = BIT(7),
};
/*

View file

@ -231,8 +231,12 @@ enum iwl_wowlan_wakeup_filters {
IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT = BIT(8),
IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS = BIT(9),
IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE = BIT(10),
/* BIT(11) reserved */
IWL_WOWLAN_WAKEUP_REMOTE_TCP_EXTERNAL = BIT(11),
IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET = BIT(12),
IWL_WOWLAN_WAKEUP_IOAC_MAGIC_PACKET = BIT(13),
IWL_WOWLAN_WAKEUP_HOST_TIMER = BIT(14),
IWL_WOWLAN_WAKEUP_RX_FRAME = BIT(15),
IWL_WOWLAN_WAKEUP_BCN_FILTERING = BIT(16),
}; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */
struct iwl_wowlan_config_cmd {

View file

@ -301,54 +301,65 @@ struct iwl_beacon_filter_cmd {
/* Beacon filtering and beacon abort */
#define IWL_BF_ENERGY_DELTA_DEFAULT 5
#define IWL_BF_ENERGY_DELTA_D0I3 20
#define IWL_BF_ENERGY_DELTA_MAX 255
#define IWL_BF_ENERGY_DELTA_MIN 0
#define IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT 1
#define IWL_BF_ROAMING_ENERGY_DELTA_D0I3 20
#define IWL_BF_ROAMING_ENERGY_DELTA_MAX 255
#define IWL_BF_ROAMING_ENERGY_DELTA_MIN 0
#define IWL_BF_ROAMING_STATE_DEFAULT 72
#define IWL_BF_ROAMING_STATE_D0I3 72
#define IWL_BF_ROAMING_STATE_MAX 255
#define IWL_BF_ROAMING_STATE_MIN 0
#define IWL_BF_TEMP_THRESHOLD_DEFAULT 112
#define IWL_BF_TEMP_THRESHOLD_D0I3 112
#define IWL_BF_TEMP_THRESHOLD_MAX 255
#define IWL_BF_TEMP_THRESHOLD_MIN 0
#define IWL_BF_TEMP_FAST_FILTER_DEFAULT 1
#define IWL_BF_TEMP_FAST_FILTER_D0I3 1
#define IWL_BF_TEMP_FAST_FILTER_MAX 255
#define IWL_BF_TEMP_FAST_FILTER_MIN 0
#define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5
#define IWL_BF_TEMP_SLOW_FILTER_D0I3 5
#define IWL_BF_TEMP_SLOW_FILTER_MAX 255
#define IWL_BF_TEMP_SLOW_FILTER_MIN 0
#define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1
#define IWL_BF_DEBUG_FLAG_DEFAULT 0
#define IWL_BF_DEBUG_FLAG_D0I3 0
#define IWL_BF_ESCAPE_TIMER_DEFAULT 50
#define IWL_BF_ESCAPE_TIMER_D0I3 1024
#define IWL_BF_ESCAPE_TIMER_MAX 1024
#define IWL_BF_ESCAPE_TIMER_MIN 0
#define IWL_BA_ESCAPE_TIMER_DEFAULT 6
#define IWL_BA_ESCAPE_TIMER_D0I3 6
#define IWL_BA_ESCAPE_TIMER_D3 9
#define IWL_BA_ESCAPE_TIMER_MAX 1024
#define IWL_BA_ESCAPE_TIMER_MIN 0
#define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1
#define IWL_BF_CMD_CONFIG_DEFAULTS \
.bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA_DEFAULT), \
.bf_roaming_energy_delta = \
cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT), \
.bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE_DEFAULT), \
.bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD_DEFAULT), \
.bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER_DEFAULT), \
.bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER_DEFAULT), \
.bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG_DEFAULT), \
.bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT), \
.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT)
#define IWL_BF_CMD_CONFIG(mode) \
.bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA ## mode), \
.bf_roaming_energy_delta = \
cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA ## mode), \
.bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE ## mode), \
.bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD ## mode), \
.bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER ## mode), \
.bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER ## mode), \
.bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG ## mode), \
.bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER ## mode), \
.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER ## mode)
#define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT)
#define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3)
#endif

View file

@ -199,11 +199,14 @@ enum iwl_sta_modify_flag {
* @STA_SLEEP_STATE_AWAKE:
* @STA_SLEEP_STATE_PS_POLL:
* @STA_SLEEP_STATE_UAPSD:
* @STA_SLEEP_STATE_MOREDATA: set more-data bit on
* (last) released frame
*/
enum iwl_sta_sleep_flag {
STA_SLEEP_STATE_AWAKE = 0,
STA_SLEEP_STATE_PS_POLL = BIT(0),
STA_SLEEP_STATE_UAPSD = BIT(1),
STA_SLEEP_STATE_AWAKE = 0,
STA_SLEEP_STATE_PS_POLL = BIT(0),
STA_SLEEP_STATE_UAPSD = BIT(1),
STA_SLEEP_STATE_MOREDATA = BIT(2),
};
/* STA ID and color bits definitions */
@ -318,13 +321,15 @@ struct iwl_mvm_add_sta_cmd_v5 {
} __packed; /* ADD_STA_CMD_API_S_VER_5 */
/**
* struct iwl_mvm_add_sta_cmd_v6 - Add / modify a station
* VER_6 of this command is quite similar to VER_5 except
* struct iwl_mvm_add_sta_cmd_v7 - Add / modify a station
* VER_7 of this command is quite similar to VER_5 except
* exclusion of all fields related to the security key installation.
* It only differs from VER_6 by the "awake_acs" field that is
* reserved and ignored in VER_6.
*/
struct iwl_mvm_add_sta_cmd_v6 {
struct iwl_mvm_add_sta_cmd_v7 {
u8 add_modify;
u8 reserved1;
u8 awake_acs;
__le16 tid_disable_tx;
__le32 mac_id_n_color;
u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
@ -342,7 +347,7 @@ struct iwl_mvm_add_sta_cmd_v6 {
__le16 assoc_id;
__le16 beamform_flags;
__le32 tfd_queue_msk;
} __packed; /* ADD_STA_CMD_API_S_VER_6 */
} __packed; /* ADD_STA_CMD_API_S_VER_7 */
/**
* struct iwl_mvm_add_sta_key_cmd - add/modify sta key
@ -432,5 +437,15 @@ struct iwl_mvm_wep_key_cmd {
struct iwl_mvm_wep_key wep_key[0];
} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */
/**
* struct iwl_mvm_eosp_notification - EOSP notification from firmware
* @remain_frame_count: # of frames remaining, non-zero if SP was cut
* short by GO absence
* @sta_id: station ID
*/
struct iwl_mvm_eosp_notification {
__le32 remain_frame_count;
__le32 sta_id;
} __packed; /* UAPSD_EOSP_NTFY_API_S_VER_1 */
#endif /* __fw_api_sta_h__ */

View file

@ -163,6 +163,7 @@ enum {
TX_ANT_CONFIGURATION_CMD = 0x98,
BT_CONFIG = 0x9b,
STATISTICS_NOTIFICATION = 0x9d,
EOSP_NOTIFICATION = 0x9e,
REDUCE_TX_POWER_CMD = 0x9f,
/* RF-KILL commands and notifications */
@ -190,6 +191,7 @@ enum {
REPLY_DEBUG_CMD = 0xf0,
DEBUG_LOG_MSG = 0xf7,
BCAST_FILTER_CMD = 0xcf,
MCAST_FILTER_CMD = 0xd0,
/* D3 commands/notifications */
@ -197,6 +199,7 @@ enum {
PROT_OFFLOAD_CONFIG_CMD = 0xd4,
OFFLOADS_QUERY_CMD = 0xd5,
REMOTE_WAKE_CONFIG_CMD = 0xd6,
D0I3_END_CMD = 0xed,
/* for WoWLAN in particular */
WOWLAN_PATTERNS = 0xe0,
@ -303,6 +306,7 @@ struct iwl_phy_cfg_cmd {
#define PHY_CFG_RX_CHAIN_B BIT(13)
#define PHY_CFG_RX_CHAIN_C BIT(14)
#define NVM_MAX_NUM_SECTIONS 11
/* Target of the NVM_ACCESS_CMD */
enum {
@ -313,14 +317,9 @@ enum {
/* Section types for NVM_ACCESS_CMD */
enum {
NVM_SECTION_TYPE_HW = 0,
NVM_SECTION_TYPE_SW,
NVM_SECTION_TYPE_PAPD,
NVM_SECTION_TYPE_BT,
NVM_SECTION_TYPE_CALIBRATION,
NVM_SECTION_TYPE_PRODUCTION,
NVM_SECTION_TYPE_POST_FCS_CALIB,
NVM_NUM_OF_SECTIONS,
NVM_SECTION_TYPE_SW = 1,
NVM_SECTION_TYPE_CALIBRATION = 4,
NVM_SECTION_TYPE_PRODUCTION = 5,
};
/**
@ -412,6 +411,35 @@ struct mvm_alive_resp {
__le32 scd_base_ptr; /* SRAM address for SCD */
} __packed; /* ALIVE_RES_API_S_VER_1 */
struct mvm_alive_resp_ver2 {
__le16 status;
__le16 flags;
u8 ucode_minor;
u8 ucode_major;
__le16 id;
u8 api_minor;
u8 api_major;
u8 ver_subtype;
u8 ver_type;
u8 mac;
u8 opt;
__le16 reserved2;
__le32 timestamp;
__le32 error_event_table_ptr; /* SRAM address for error log */
__le32 log_event_table_ptr; /* SRAM address for LMAC event log */
__le32 cpu_register_ptr;
__le32 dbgm_config_ptr;
__le32 alive_counter_ptr;
__le32 scd_base_ptr; /* SRAM address for SCD */
__le32 st_fwrd_addr; /* pointer to Store and forward */
__le32 st_fwrd_size;
u8 umac_minor; /* UMAC version: minor */
u8 umac_major; /* UMAC version: major */
__le16 umac_id; /* UMAC version: id */
__le32 error_info_addr; /* SRAM address for UMAC error log */
__le32 dbg_print_buff_addr;
} __packed; /* ALIVE_RES_API_S_VER_2 */
/* Error response/notification */
enum {
FW_ERR_UNKNOWN_CMD = 0x0,
@ -1159,6 +1187,90 @@ struct iwl_mcast_filter_cmd {
u8 addr_list[0];
} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
#define MAX_BCAST_FILTERS 8
#define MAX_BCAST_FILTER_ATTRS 2
/**
* enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet
* @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start.
* @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e.
* start of ip payload).
*/
enum iwl_mvm_bcast_filter_attr_offset {
BCAST_FILTER_OFFSET_PAYLOAD_START = 0,
BCAST_FILTER_OFFSET_IP_END = 1,
};
/**
* struct iwl_fw_bcast_filter_attr - broadcast filter attribute
* @offset_type: &enum iwl_mvm_bcast_filter_attr_offset.
* @offset: starting offset of this pattern.
* @val: value to match - big endian (MSB is the first
* byte to match from offset pos).
* @mask: mask to match (big endian).
*/
struct iwl_fw_bcast_filter_attr {
u8 offset_type;
u8 offset;
__le16 reserved1;
__be32 val;
__be32 mask;
} __packed; /* BCAST_FILTER_ATT_S_VER_1 */
/**
* enum iwl_mvm_bcast_filter_frame_type - filter frame type
* @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames.
* @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames
*/
enum iwl_mvm_bcast_filter_frame_type {
BCAST_FILTER_FRAME_TYPE_ALL = 0,
BCAST_FILTER_FRAME_TYPE_IPV4 = 1,
};
/**
* struct iwl_fw_bcast_filter - broadcast filter
* @discard: discard frame (1) or let it pass (0).
* @frame_type: &enum iwl_mvm_bcast_filter_frame_type.
* @num_attrs: number of valid attributes in this filter.
* @attrs: attributes of this filter. a filter is considered matched
* only when all its attributes are matched (i.e. AND relationship)
*/
struct iwl_fw_bcast_filter {
u8 discard;
u8 frame_type;
u8 num_attrs;
u8 reserved1;
struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS];
} __packed; /* BCAST_FILTER_S_VER_1 */
/**
* struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration.
* @default_discard: default action for this mac (discard (1) / pass (0)).
* @attached_filters: bitmap of relevant filters for this mac.
*/
struct iwl_fw_bcast_mac {
u8 default_discard;
u8 reserved1;
__le16 attached_filters;
} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */
/**
* struct iwl_bcast_filter_cmd - broadcast filtering configuration
* @disable: enable (0) / disable (1)
* @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS)
* @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER)
* @filters: broadcast filters
* @macs: broadcast filtering configuration per-mac
*/
struct iwl_bcast_filter_cmd {
u8 disable;
u8 max_bcast_filters;
u8 max_macs;
u8 reserved1;
struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS];
struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER];
} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */
struct mvm_statistics_dbg {
__le32 burst_check;
__le32 burst_count;

View file

@ -110,18 +110,46 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
container_of(notif_wait, struct iwl_mvm, notif_wait);
struct iwl_mvm_alive_data *alive_data = data;
struct mvm_alive_resp *palive;
struct mvm_alive_resp_ver2 *palive2;
palive = (void *)pkt->data;
if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
palive = (void *)pkt->data;
mvm->error_event_table = le32_to_cpu(palive->error_event_table_ptr);
mvm->log_event_table = le32_to_cpu(palive->log_event_table_ptr);
alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
mvm->support_umac_log = false;
mvm->error_event_table =
le32_to_cpu(palive->error_event_table_ptr);
mvm->log_event_table = le32_to_cpu(palive->log_event_table_ptr);
alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
alive_data->valid = le16_to_cpu(palive->status) == IWL_ALIVE_STATUS_OK;
IWL_DEBUG_FW(mvm,
"Alive ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
le16_to_cpu(palive->status), palive->ver_type,
palive->ver_subtype, palive->flags);
alive_data->valid = le16_to_cpu(palive->status) ==
IWL_ALIVE_STATUS_OK;
IWL_DEBUG_FW(mvm,
"Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
le16_to_cpu(palive->status), palive->ver_type,
palive->ver_subtype, palive->flags);
} else {
palive2 = (void *)pkt->data;
mvm->support_umac_log = true;
mvm->error_event_table =
le32_to_cpu(palive2->error_event_table_ptr);
mvm->log_event_table =
le32_to_cpu(palive2->log_event_table_ptr);
alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
mvm->umac_error_event_table =
le32_to_cpu(palive2->error_info_addr);
alive_data->valid = le16_to_cpu(palive2->status) ==
IWL_ALIVE_STATUS_OK;
IWL_DEBUG_FW(mvm,
"Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
le16_to_cpu(palive2->status), palive2->ver_type,
palive2->ver_subtype, palive2->flags);
IWL_DEBUG_FW(mvm,
"UMAC version: Major - 0x%x, Minor - 0x%x\n",
palive2->umac_major, palive2->umac_minor);
}
return true;
}
@ -439,10 +467,23 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
ret = iwl_mvm_power_update_device_mode(mvm);
/* Initialize tx backoffs to the minimal possible */
iwl_mvm_tt_tx_backoff(mvm, 0);
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
ret = iwl_power_legacy_set_cam_mode(mvm);
if (ret)
goto error;
}
ret = iwl_mvm_power_update_device(mvm);
if (ret)
goto error;
/* allow FW/transport low power modes if not during restart */
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:

View file

@ -90,6 +90,7 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
{
struct iwl_mvm_mac_iface_iterator_data *data = _data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
u16 min_bi;
/* Skip the interface for which we are trying to assign a tsf_id */
if (vif == data->vif)
@ -114,42 +115,57 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
switch (data->vif->type) {
case NL80211_IFTYPE_STATION:
/*
* The new interface is client, so if the existing one
* we're iterating is an AP, and both interfaces have the
* same beacon interval, the same TSF should be used to
* avoid drift between the new client and existing AP,
* the existing AP will get drift updates from the new
* client context in this case
* The new interface is a client, so if the one we're iterating
* is an AP, and the beacon interval of the AP is a multiple or
* divisor of the beacon interval of the client, the same TSF
* should be used to avoid drift between the new client and
* existing AP. The existing AP will get drift updates from the
* new client context in this case.
*/
if (vif->type == NL80211_IFTYPE_AP) {
if (data->preferred_tsf == NUM_TSF_IDS &&
test_bit(mvmvif->tsf_id, data->available_tsf_ids) &&
(vif->bss_conf.beacon_int ==
data->vif->bss_conf.beacon_int)) {
data->preferred_tsf = mvmvif->tsf_id;
return;
}
if (vif->type != NL80211_IFTYPE_AP ||
data->preferred_tsf != NUM_TSF_IDS ||
!test_bit(mvmvif->tsf_id, data->available_tsf_ids))
break;
min_bi = min(data->vif->bss_conf.beacon_int,
vif->bss_conf.beacon_int);
if (!min_bi)
break;
if ((data->vif->bss_conf.beacon_int -
vif->bss_conf.beacon_int) % min_bi == 0) {
data->preferred_tsf = mvmvif->tsf_id;
return;
}
break;
case NL80211_IFTYPE_AP:
/*
* The new interface is AP/GO, so in case both interfaces
* have the same beacon interval, it should get drift
* updates from an existing client or use the same
* TSF as an existing GO. There's no drift between
* TSFs internally but if they used different TSFs
* then a new client MAC could update one of them
* and cause drift that way.
* The new interface is AP/GO, so if its beacon interval is a
* multiple or a divisor of the beacon interval of an existing
* interface, it should get drift updates from an existing
* client or use the same TSF as an existing GO. There's no
* drift between TSFs internally but if they used different
* TSFs then a new client MAC could update one of them and
* cause drift that way.
*/
if (vif->type == NL80211_IFTYPE_STATION ||
vif->type == NL80211_IFTYPE_AP) {
if (data->preferred_tsf == NUM_TSF_IDS &&
test_bit(mvmvif->tsf_id, data->available_tsf_ids) &&
(vif->bss_conf.beacon_int ==
data->vif->bss_conf.beacon_int)) {
data->preferred_tsf = mvmvif->tsf_id;
return;
}
if ((vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_STATION) ||
data->preferred_tsf != NUM_TSF_IDS ||
!test_bit(mvmvif->tsf_id, data->available_tsf_ids))
break;
min_bi = min(data->vif->bss_conf.beacon_int,
vif->bss_conf.beacon_int);
if (!min_bi)
break;
if ((data->vif->bss_conf.beacon_int -
vif->bss_conf.beacon_int) % min_bi == 0) {
data->preferred_tsf = mvmvif->tsf_id;
return;
}
break;
default:

View file

@ -66,6 +66,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/if_arp.h>
#include <net/mac80211.h>
#include <net/tcp.h>
@ -128,6 +129,117 @@ static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
};
#endif
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
/*
* Use the reserved field to indicate magic values.
* these values will only be used internally by the driver,
* and won't make it to the fw (reserved will be 0).
* BC_FILTER_MAGIC_IP - configure the val of this attribute to
* be the vif's ip address. in case there is not a single
* ip address (0, or more than 1), this attribute will
* be skipped.
* BC_FILTER_MAGIC_MAC - set the val of this attribute to
* the LSB bytes of the vif's mac address
*/
enum {
BC_FILTER_MAGIC_NONE = 0,
BC_FILTER_MAGIC_IP,
BC_FILTER_MAGIC_MAC,
};
static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
{
/* arp */
.discard = 0,
.frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
.attrs = {
{
/* frame type - arp, hw type - ethernet */
.offset_type =
BCAST_FILTER_OFFSET_PAYLOAD_START,
.offset = sizeof(rfc1042_header),
.val = cpu_to_be32(0x08060001),
.mask = cpu_to_be32(0xffffffff),
},
{
/* arp dest ip */
.offset_type =
BCAST_FILTER_OFFSET_PAYLOAD_START,
.offset = sizeof(rfc1042_header) + 2 +
sizeof(struct arphdr) +
ETH_ALEN + sizeof(__be32) +
ETH_ALEN,
.mask = cpu_to_be32(0xffffffff),
/* mark it as special field */
.reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
},
},
},
{
/* dhcp offer bcast */
.discard = 0,
.frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
.attrs = {
{
/* udp dest port - 68 (bootp client)*/
.offset_type = BCAST_FILTER_OFFSET_IP_END,
.offset = offsetof(struct udphdr, dest),
.val = cpu_to_be32(0x00440000),
.mask = cpu_to_be32(0xffff0000),
},
{
/* dhcp - lsb bytes of client hw address */
.offset_type = BCAST_FILTER_OFFSET_IP_END,
.offset = 38,
.mask = cpu_to_be32(0xffffffff),
/* mark it as special field */
.reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
},
},
},
/* last filter must be empty */
{},
};
#endif
void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
{
if (!mvm->trans->cfg->d0i3)
return;
IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
WARN_ON(test_and_set_bit(ref_type, mvm->ref_bitmap));
iwl_trans_ref(mvm->trans);
}
void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
{
if (!mvm->trans->cfg->d0i3)
return;
IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
WARN_ON(!test_and_clear_bit(ref_type, mvm->ref_bitmap));
iwl_trans_unref(mvm->trans);
}
static void
iwl_mvm_unref_all_except(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref)
{
int i;
if (!mvm->trans->cfg->d0i3)
return;
for_each_set_bit(i, mvm->ref_bitmap, IWL_MVM_REF_COUNT) {
if (ref == i)
continue;
IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d\n", i);
clear_bit(i, mvm->ref_bitmap);
iwl_trans_unref(mvm->trans);
}
}
static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
{
int i;
@ -203,6 +315,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
REGULATORY_DISABLE_BEACON_HINTS;
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
hw->wiphy->n_iface_combinations =
ARRAY_SIZE(iwl_mvm_iface_combinations);
@ -289,6 +404,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
#endif
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
/* assign default bcast filtering configuration */
mvm->bcast_filters = iwl_mvm_default_bcast_filters;
#endif
ret = iwl_mvm_leds_init(mvm);
if (ret)
return ret;
@ -305,6 +425,9 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct ieee80211_sta *sta = control->sta;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (void *)skb->data;
if (iwl_mvm_is_radio_killed(mvm)) {
IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
@ -315,8 +438,16 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
!test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
goto drop;
if (control->sta) {
if (iwl_mvm_tx_skb(mvm, skb, control->sta))
/* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER &&
ieee80211_is_mgmt(hdr->frame_control) &&
!ieee80211_is_deauth(hdr->frame_control) &&
!ieee80211_is_disassoc(hdr->frame_control) &&
!ieee80211_is_action(hdr->frame_control)))
sta = NULL;
if (sta) {
if (iwl_mvm_tx_skb(mvm, skb, sta))
goto drop;
return;
}
@ -434,6 +565,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
iwl_mvm_cleanup_iterator, mvm);
mvm->p2p_device_vif = NULL;
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
iwl_mvm_reset_phy_ctxts(mvm);
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
@ -441,6 +573,10 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
ieee80211_wake_queues(mvm->hw);
/* cleanup all stale references (scan, roc), but keep the
* ucode_down ref until reconfig is complete */
iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
mvm->vif_count = 0;
mvm->rx_ba_sessions = 0;
}
@ -475,6 +611,9 @@ static void iwl_mvm_mac_restart_complete(struct ieee80211_hw *hw)
IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
ret);
/* allow transport/FW low power modes */
iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
mutex_unlock(&mvm->mutex);
}
@ -482,9 +621,14 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
flush_work(&mvm->d0i3_exit_work);
flush_work(&mvm->async_handlers_wk);
mutex_lock(&mvm->mutex);
/* disallow low power states when the FW is down */
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
/* async_handlers_wk is now blocked */
/*
@ -510,14 +654,6 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
cancel_work_sync(&mvm->async_handlers_wk);
}
static void iwl_mvm_power_update_iterator(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = data;
iwl_mvm_power_update_mode(mvm, vif);
}
static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
{
u16 i;
@ -585,7 +721,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
vif->type == NL80211_IFTYPE_ADHOC) {
u32 qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta,
qmask);
qmask,
ieee80211_vif_type_p2p(vif));
if (ret) {
IWL_ERR(mvm, "Failed to allocate bcast sta\n");
goto out_release;
@ -599,10 +736,12 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (ret)
goto out_release;
iwl_mvm_power_disable(mvm, vif);
ret = iwl_mvm_power_update_mac(mvm, vif);
if (ret)
goto out_release;
/* beacon filtering */
ret = iwl_mvm_disable_beacon_filter(mvm, vif);
ret = iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
if (ret)
goto out_remove_mac;
@ -661,11 +800,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count--;
/* TODO: remove this when legacy PM will be discarded */
ieee80211_iterate_active_interfaces(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_power_update_iterator, mvm);
iwl_mvm_mac_ctxt_release(mvm, vif);
out_unlock:
mutex_unlock(&mvm->mutex);
@ -754,11 +888,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count--;
/* TODO: remove this when legacy PM will be discarded */
ieee80211_iterate_active_interfaces(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_power_update_iterator, mvm);
iwl_mvm_power_update_mac(mvm, vif);
iwl_mvm_mac_ctxt_remove(mvm, vif);
out_release:
@ -876,6 +1006,156 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
*total_flags = 0;
}
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
struct iwl_bcast_iter_data {
struct iwl_mvm *mvm;
struct iwl_bcast_filter_cmd *cmd;
u8 current_filter;
};
static void
iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
const struct iwl_fw_bcast_filter *in_filter,
struct iwl_fw_bcast_filter *out_filter)
{
struct iwl_fw_bcast_filter_attr *attr;
int i;
memcpy(out_filter, in_filter, sizeof(*out_filter));
for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
attr = &out_filter->attrs[i];
if (!attr->mask)
break;
switch (attr->reserved1) {
case cpu_to_le16(BC_FILTER_MAGIC_IP):
if (vif->bss_conf.arp_addr_cnt != 1) {
attr->mask = 0;
continue;
}
attr->val = vif->bss_conf.arp_addr_list[0];
break;
case cpu_to_le16(BC_FILTER_MAGIC_MAC):
attr->val = *(__be32 *)&vif->addr[2];
break;
default:
break;
}
attr->reserved1 = 0;
out_filter->num_attrs++;
}
}
static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_bcast_iter_data *data = _data;
struct iwl_mvm *mvm = data->mvm;
struct iwl_bcast_filter_cmd *cmd = data->cmd;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_fw_bcast_mac *bcast_mac;
int i;
if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
return;
bcast_mac = &cmd->macs[mvmvif->id];
/* enable filtering only for associated stations */
if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
return;
bcast_mac->default_discard = 1;
/* copy all configured filters */
for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
/*
* Make sure we don't exceed our filters limit.
* if there is still a valid filter to be configured,
* be on the safe side and just allow bcast for this mac.
*/
if (WARN_ON_ONCE(data->current_filter >=
ARRAY_SIZE(cmd->filters))) {
bcast_mac->default_discard = 0;
bcast_mac->attached_filters = 0;
break;
}
iwl_mvm_set_bcast_filter(vif,
&mvm->bcast_filters[i],
&cmd->filters[data->current_filter]);
/* skip current filter if it contains no attributes */
if (!cmd->filters[data->current_filter].num_attrs)
continue;
/* attach the filter to current mac */
bcast_mac->attached_filters |=
cpu_to_le16(BIT(data->current_filter));
data->current_filter++;
}
}
bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
struct iwl_bcast_filter_cmd *cmd)
{
struct iwl_bcast_iter_data iter_data = {
.mvm = mvm,
.cmd = cmd,
};
memset(cmd, 0, sizeof(*cmd));
cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
cmd->max_macs = ARRAY_SIZE(cmd->macs);
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* use debugfs filters/macs if override is configured */
if (mvm->dbgfs_bcast_filtering.override) {
memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
sizeof(cmd->filters));
memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
sizeof(cmd->macs));
return true;
}
#endif
/* if no filters are configured, do nothing */
if (!mvm->bcast_filters)
return false;
/* configure and attach these filters for each associated sta vif */
ieee80211_iterate_active_interfaces(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_bcast_filter_iterator, &iter_data);
return true;
}
static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_bcast_filter_cmd cmd;
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
return 0;
if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
return 0;
return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
sizeof(cmd), &cmd);
}
#else
static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
return 0;
}
#endif
static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@ -928,6 +1208,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
iwl_mvm_sf_update(mvm, vif, false);
iwl_mvm_power_vif_assoc(mvm, vif);
if (vif->p2p)
iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
/*
* If update fails - SF might be running in associated
@ -940,27 +1222,25 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
if (ret)
IWL_ERR(mvm, "failed to remove AP station\n");
if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
/* remove quota for this interface */
ret = iwl_mvm_update_quotas(mvm, NULL);
if (ret)
IWL_ERR(mvm, "failed to update quotas\n");
if (vif->p2p)
iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
}
iwl_mvm_recalc_multicast(mvm);
iwl_mvm_configure_bcast_filter(mvm, vif);
/* reset rssi values */
mvmvif->bf_data.ave_beacon_signal = 0;
if (!(mvm->fw->ucode_capa.flags &
IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
/* Workaround for FW bug, otherwise FW disables device
* power save upon disassociation
*/
ret = iwl_mvm_power_update_mode(mvm, vif);
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
}
iwl_mvm_bt_coex_vif_change(mvm);
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
IEEE80211_SMPS_AUTOMATIC);
@ -973,7 +1253,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
&mvmvif->time_event_data);
} else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
BSS_CHANGED_QOS)) {
ret = iwl_mvm_power_update_mode(mvm, vif);
ret = iwl_mvm_power_update_mac(mvm, vif);
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
}
@ -987,10 +1267,15 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_DEBUG_MAC80211(mvm, "cqm info_changed");
/* reset cqm events tracking */
mvmvif->bf_data.last_cqm_event = 0;
ret = iwl_mvm_update_beacon_filter(mvm, vif);
ret = iwl_mvm_update_beacon_filter(mvm, vif, false, CMD_SYNC);
if (ret)
IWL_ERR(mvm, "failed to update CQM thresholds\n");
}
if (changes & BSS_CHANGED_ARP_FILTER) {
IWL_DEBUG_MAC80211(mvm, "arp filter changed");
iwl_mvm_configure_bcast_filter(mvm, vif);
}
}
static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
@ -1024,8 +1309,6 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
if (ret)
goto out_remove;
mvmvif->ap_ibss_active = true;
/* Send the bcast station. At this stage the TBTT and DTIM time events
* are added and applied to the scheduler */
ret = iwl_mvm_send_bcast_sta(mvm, vif, &mvmvif->bcast_sta);
@ -1037,7 +1320,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
/* power updated needs to be done before quotas */
mvm->bound_vif_cnt++;
iwl_mvm_power_update_binding(mvm, vif, true);
iwl_mvm_power_update_mac(mvm, vif);
ret = iwl_mvm_update_quotas(mvm, vif);
if (ret)
@ -1047,6 +1330,8 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
if (vif->p2p && mvm->p2p_device_vif)
iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
iwl_mvm_bt_coex_vif_change(mvm);
mutex_unlock(&mvm->mutex);
@ -1054,7 +1339,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
out_quota_failed:
mvm->bound_vif_cnt--;
iwl_mvm_power_update_binding(mvm, vif, false);
iwl_mvm_power_update_mac(mvm, vif);
mvmvif->ap_ibss_active = false;
iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
out_unbind:
@ -1080,6 +1365,8 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_bt_coex_vif_change(mvm);
iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
/* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
if (vif->p2p && mvm->p2p_device_vif)
iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
@ -1089,7 +1376,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_binding_remove_vif(mvm, vif);
mvm->bound_vif_cnt--;
iwl_mvm_power_update_binding(mvm, vif, false);
iwl_mvm_power_update_mac(mvm, vif);
iwl_mvm_mac_ctxt_remove(mvm, vif);
@ -1103,26 +1390,20 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
u32 changes)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
enum ieee80211_bss_change ht_change = BSS_CHANGED_ERP_CTS_PROT |
BSS_CHANGED_HT |
BSS_CHANGED_BANDWIDTH;
int ret;
/* Changes will be applied when the AP/IBSS is started */
if (!mvmvif->ap_ibss_active)
return;
if (changes & ht_change) {
ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
if (ret)
IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
}
if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
BSS_CHANGED_BANDWIDTH) &&
iwl_mvm_mac_ctxt_changed(mvm, vif))
IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
/* Need to send a new beacon template to the FW */
if (changes & BSS_CHANGED_BEACON) {
if (iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
IWL_WARN(mvm, "Failed updating beacon data\n");
}
if (changes & BSS_CHANGED_BEACON &&
iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
IWL_WARN(mvm, "Failed updating beacon data\n");
}
static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
@ -1155,6 +1436,8 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
struct cfg80211_scan_request *req)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_notification_wait wait_scan_done;
static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, };
int ret;
if (req->n_channels == 0 || req->n_channels > MAX_NUM_SCAN_CHANNELS)
@ -1162,11 +1445,38 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
if (mvm->scan_status == IWL_MVM_SCAN_NONE)
ret = iwl_mvm_scan_request(mvm, vif, req);
else
switch (mvm->scan_status) {
case IWL_MVM_SCAN_SCHED:
iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
scan_done_notif,
ARRAY_SIZE(scan_done_notif),
NULL, NULL);
iwl_mvm_sched_scan_stop(mvm);
ret = iwl_wait_notification(&mvm->notif_wait,
&wait_scan_done, HZ);
if (ret) {
ret = -EBUSY;
goto out;
}
/* iwl_mvm_rx_scan_offload_complete_notif() will be called
* soon but will not reset the scan status as it won't be
* IWL_MVM_SCAN_SCHED any more since we queue the next scan
* immediately (below)
*/
break;
case IWL_MVM_SCAN_NONE:
break;
default:
ret = -EBUSY;
goto out;
}
iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
ret = iwl_mvm_scan_request(mvm, vif, req);
if (ret)
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
out:
mutex_unlock(&mvm->mutex);
return ret;
@ -1186,20 +1496,32 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
static void
iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, u16 tid,
struct ieee80211_sta *sta, u16 tids,
int num_frames,
enum ieee80211_frame_release_type reason,
bool more_data)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
/* TODO: how do we tell the fw to send frames for a specific TID */
/* Called when we need to transmit (a) frame(s) from mac80211 */
/*
* The fw will send EOSP notification when the last frame will be
* transmitted.
*/
iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames);
iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
tids, more_data, false);
}
static void
iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, u16 tids,
int num_frames,
enum ieee80211_frame_release_type reason,
bool more_data)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
/* Called when we need to transmit (a) frame(s) from agg queue */
iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
tids, more_data, true);
}
static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
@ -1209,11 +1531,25 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
int tid;
switch (cmd) {
case STA_NOTIFY_SLEEP:
if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
ieee80211_sta_block_awake(hw, sta, true);
spin_lock_bh(&mvmsta->lock);
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
struct iwl_mvm_tid_data *tid_data;
tid_data = &mvmsta->tid_data[tid];
if (tid_data->state != IWL_AGG_ON &&
tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
continue;
if (iwl_mvm_tid_queued(tid_data) == 0)
continue;
ieee80211_sta_set_buffered(sta, tid, true);
}
spin_unlock_bh(&mvmsta->lock);
/*
* The fw updates the STA to be asleep. Tx packets on the Tx
* queues to this station will not be transmitted. The fw will
@ -1304,12 +1640,12 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
/* enable beacon filtering */
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif));
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC));
ret = 0;
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
/* disable beacon filtering */
WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif));
WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC));
ret = 0;
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH) {
@ -1774,7 +2110,7 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
* otherwise fw will complain.
*/
mvm->bound_vif_cnt++;
iwl_mvm_power_update_binding(mvm, vif, true);
iwl_mvm_power_update_mac(mvm, vif);
/* Setting the quota at this stage is only required for monitor
* interfaces. For the other types, the bss_info changed flow
@ -1792,7 +2128,7 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
out_remove_binding:
iwl_mvm_binding_remove_vif(mvm, vif);
mvm->bound_vif_cnt--;
iwl_mvm_power_update_binding(mvm, vif, false);
iwl_mvm_power_update_mac(mvm, vif);
out_unlock:
mutex_unlock(&mvm->mutex);
if (ret)
@ -1825,7 +2161,7 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
iwl_mvm_binding_remove_vif(mvm, vif);
mvm->bound_vif_cnt--;
iwl_mvm_power_update_binding(mvm, vif, false);
iwl_mvm_power_update_mac(mvm, vif);
out_unlock:
mvmvif->phy_ctxt = NULL;
@ -1892,8 +2228,9 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
return -EINVAL;
if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
return iwl_mvm_enable_beacon_filter(mvm, vif);
return iwl_mvm_disable_beacon_filter(mvm, vif);
return iwl_mvm_enable_beacon_filter(mvm, vif,
CMD_SYNC);
return iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
}
return -EOPNOTSUPP;
@ -1932,6 +2269,7 @@ struct ieee80211_ops iwl_mvm_hw_ops = {
.sta_state = iwl_mvm_mac_sta_state,
.sta_notify = iwl_mvm_mac_sta_notify,
.allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
.release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
.set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
.sta_rc_update = iwl_mvm_sta_rc_update,
.conf_tx = iwl_mvm_mac_conf_tx,

View file

@ -92,7 +92,6 @@ enum iwl_mvm_tx_fifo {
};
extern struct ieee80211_ops iwl_mvm_hw_ops;
extern const struct iwl_mvm_power_ops pm_legacy_ops;
extern const struct iwl_mvm_power_ops pm_mac_ops;
/**
@ -159,20 +158,6 @@ enum iwl_power_scheme {
IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_2
struct iwl_mvm_power_ops {
int (*power_update_mode)(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
int (*power_update_device_mode)(struct iwl_mvm *mvm);
int (*power_disable)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void (*power_update_binding)(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, bool assign);
#ifdef CONFIG_IWLWIFI_DEBUGFS
int (*power_dbgfs_read)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
char *buf, int bufsz);
#endif
};
#ifdef CONFIG_IWLWIFI_DEBUGFS
enum iwl_dbgfs_pm_mask {
MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0),
@ -239,6 +224,17 @@ enum iwl_mvm_smps_type_request {
NUM_IWL_MVM_SMPS_REQ,
};
enum iwl_mvm_ref_type {
IWL_MVM_REF_UCODE_DOWN,
IWL_MVM_REF_SCAN,
IWL_MVM_REF_ROC,
IWL_MVM_REF_P2P_CLIENT,
IWL_MVM_REF_AP_IBSS,
IWL_MVM_REF_USER,
IWL_MVM_REF_COUNT,
};
/**
* struct iwl_mvm_vif_bf_data - beacon filtering related data
* @bf_enabled: indicates if beacon filtering is enabled
@ -269,7 +265,9 @@ struct iwl_mvm_vif_bf_data {
* @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
* should get quota etc.
* @monitor_active: indicates that monitor context is configured, and that the
* interface should get quota etc.
* interface should get quota etc.
* @low_latency: indicates that this interface is in low-latency mode
* (VMACLowLatencyMode)
* @queue_params: QoS params for this MAC
* @bcast_sta: station used for broadcast packets. Used by the following
* vifs: P2P_DEVICE, GO and AP.
@ -285,6 +283,7 @@ struct iwl_mvm_vif {
bool uploaded;
bool ap_ibss_active;
bool monitor_active;
bool low_latency;
struct iwl_mvm_vif_bf_data bf_data;
u32 ap_beacon_time;
@ -333,14 +332,13 @@ struct iwl_mvm_vif {
struct dentry *dbgfs_slink;
struct iwl_dbgfs_pm dbgfs_pm;
struct iwl_dbgfs_bf dbgfs_bf;
struct iwl_mac_power_cmd mac_pwr_cmd;
#endif
enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ];
/* FW identified misbehaving AP */
u8 uapsd_misbehaving_bssid[ETH_ALEN];
bool pm_prevented;
};
static inline struct iwl_mvm_vif *
@ -415,6 +413,7 @@ struct iwl_tt_params {
* @ct_kill_exit: worker to exit thermal kill
* @dynamic_smps: Is thermal throttling enabled dynamic_smps?
* @tx_backoff: The current thremal throttling tx backoff in uSec.
* @min_backoff: The minimal tx backoff due to power restrictions
* @params: Parameters to configure the thermal throttling algorithm.
* @throttle: Is thermal throttling is active?
*/
@ -422,6 +421,7 @@ struct iwl_mvm_tt_mgmt {
struct delayed_work ct_kill_exit;
bool dynamic_smps;
u32 tx_backoff;
u32 min_backoff;
const struct iwl_tt_params *params;
bool throttle;
};
@ -457,6 +457,8 @@ struct iwl_mvm {
bool init_ucode_complete;
u32 error_event_table;
u32 log_event_table;
u32 umac_error_event_table;
bool support_umac_log;
u32 ampdu_ref;
@ -470,7 +472,7 @@ struct iwl_mvm {
struct iwl_nvm_data *nvm_data;
/* NVM sections */
struct iwl_nvm_section nvm_sections[NVM_NUM_OF_SECTIONS];
struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
/* EEPROM MAC addresses */
struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
@ -494,6 +496,17 @@ struct iwl_mvm {
/* rx chain antennas set through debugfs for the scan command */
u8 scan_rx_ant;
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
/* broadcast filters to configure for each associated station */
const struct iwl_fw_bcast_filter *bcast_filters;
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct {
u32 override; /* u32 for debugfs_create_bool */
struct iwl_bcast_filter_cmd cmd;
} dbgfs_bcast_filtering;
#endif
#endif
/* Internal station */
struct iwl_mvm_int_sta aux_sta;
@ -526,6 +539,9 @@ struct iwl_mvm {
*/
unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
/* A bitmap of reference types taken by the driver. */
unsigned long ref_bitmap[BITS_TO_LONGS(IWL_MVM_REF_COUNT)];
u8 vif_count;
/* -1 for always, 0 for never, >0 for that many times */
@ -548,6 +564,10 @@ struct iwl_mvm {
#endif
#endif
/* d0i3 */
u8 d0i3_ap_sta_id;
struct work_struct d0i3_exit_work;
/* BT-Coex */
u8 bt_kill_msk;
struct iwl_bt_coex_profile_notif last_bt_notif;
@ -557,8 +577,6 @@ struct iwl_mvm {
struct iwl_mvm_tt_mgmt thermal_throttle;
s32 temperature; /* Celsius */
const struct iwl_mvm_power_ops *pm_ops;
#ifdef CONFIG_NL80211_TESTMODE
u32 noa_duration;
struct ieee80211_vif *noa_vif;
@ -572,7 +590,9 @@ struct iwl_mvm {
u8 bound_vif_cnt;
/* Indicate if device power save is allowed */
bool ps_prevented;
bool ps_disabled;
/* Indicate if device power management is allowed */
bool pm_disabled;
};
/* Extract MVM priv from op_mode and _hw */
@ -595,6 +615,24 @@ static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
}
static inline struct iwl_mvm_sta *
iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id)
{
struct ieee80211_sta *sta;
if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
return NULL;
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
/* This can happen if the station has been removed right now */
if (IS_ERR_OR_NULL(sta))
return NULL;
return iwl_mvm_sta_from_mac80211(sta);
}
extern const u8 iwl_mvm_ac_to_tx_fifo[];
struct iwl_rate_info {
@ -661,6 +699,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm);
int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
struct iwl_bcast_filter_cmd *cmd);
/*
* FW notifications / CMD responses handlers
@ -773,48 +813,19 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
/* rate scaling */
int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
/* power managment */
static inline int iwl_mvm_power_update_mode(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
return mvm->pm_ops->power_update_mode(mvm, vif);
}
/* power management */
int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm);
static inline int iwl_mvm_power_disable(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
return mvm->pm_ops->power_disable(mvm, vif);
}
static inline int iwl_mvm_power_update_device_mode(struct iwl_mvm *mvm)
{
if (mvm->pm_ops->power_update_device_mode)
return mvm->pm_ops->power_update_device_mode(mvm);
return 0;
}
static inline void iwl_mvm_power_update_binding(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool assign)
{
if (mvm->pm_ops->power_update_binding)
mvm->pm_ops->power_update_binding(mvm, vif, assign);
}
int iwl_mvm_power_update_device(struct iwl_mvm *mvm);
int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
char *buf, int bufsz);
void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS
static inline int iwl_mvm_power_dbgfs_read(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
char *buf, int bufsz)
{
return mvm->pm_ops->power_dbgfs_read(mvm, vif, buf, bufsz);
}
#endif
int iwl_mvm_leds_init(struct iwl_mvm *mvm);
void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
@ -841,6 +852,10 @@ iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
}
#endif
/* D0i3 */
void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
/* BT Coex */
int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
@ -854,6 +869,7 @@ u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable);
enum iwl_bt_kill_msk {
BT_KILL_MSK_DEFAULT,
@ -875,25 +891,51 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
struct iwl_beacon_filter_cmd *cmd)
{}
#endif
int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool enable, u32 flags);
int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
struct ieee80211_vif *vif,
u32 flags);
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
struct iwl_beacon_filter_cmd *cmd);
struct ieee80211_vif *vif,
u32 flags);
int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, bool enable);
int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
struct ieee80211_vif *vif,
bool force,
u32 flags);
/* SMPS */
void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum iwl_mvm_smps_type_request req_type,
enum ieee80211_smps_mode smps_request);
/* Low latency */
int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool value);
/* get VMACLowLatencyMode */
static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
{
/*
* should this consider associated/active/... state?
*
* Normally low-latency should only be active on interfaces
* that are active, but at least with debugfs it can also be
* enabled on interfaces that aren't active. However, when
* interface aren't active then they aren't added into the
* binding, so this has no real impact. For now, just return
* the current desired low-latency state.
*/
return mvmvif->low_latency;
}
/* Thermal management and CT-kill */
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
void iwl_mvm_tt_initialize(struct iwl_mvm *mvm);
void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);

View file

@ -67,14 +67,6 @@
#include "iwl-eeprom-read.h"
#include "iwl-nvm-parse.h"
/* list of NVM sections we are allowed/need to read */
static const int nvm_to_read[] = {
NVM_SECTION_TYPE_HW,
NVM_SECTION_TYPE_SW,
NVM_SECTION_TYPE_CALIBRATION,
NVM_SECTION_TYPE_PRODUCTION,
};
/* Default NVM size to read */
#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
#define IWL_MAX_NVM_SECTION_SIZE 7000
@ -240,7 +232,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
/* Checking for required sections */
if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
!mvm->nvm_sections[NVM_SECTION_TYPE_HW].data) {
!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
IWL_ERR(mvm, "Can't parse empty NVM sections\n");
return NULL;
}
@ -248,7 +240,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
if (WARN_ON(!mvm->cfg))
return NULL;
hw = (const __le16 *)sections[NVM_SECTION_TYPE_HW].data;
hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data;
sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
@ -367,7 +359,7 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
break;
}
if (WARN(section_id >= NVM_NUM_OF_SECTIONS,
if (WARN(section_id >= NVM_MAX_NUM_SECTIONS,
"Invalid NVM section ID %d\n", section_id)) {
ret = -EINVAL;
break;
@ -415,6 +407,9 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
int ret, i, section;
u8 *nvm_buffer, *temp;
if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
return -EINVAL;
/* load external NVM if configured */
if (iwlwifi_mod_params.nvm_file) {
/* move to External NVM flow */
@ -422,6 +417,14 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
if (ret)
return ret;
} else {
/* list of NVM sections we are allowed/need to read */
int nvm_to_read[] = {
mvm->cfg->nvm_hw_section_num,
NVM_SECTION_TYPE_SW,
NVM_SECTION_TYPE_CALIBRATION,
NVM_SECTION_TYPE_PRODUCTION,
};
/* Read From FW NVM */
IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
@ -446,10 +449,6 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
#ifdef CONFIG_IWLWIFI_DEBUGFS
switch (section) {
case NVM_SECTION_TYPE_HW:
mvm->nvm_hw_blob.data = temp;
mvm->nvm_hw_blob.size = ret;
break;
case NVM_SECTION_TYPE_SW:
mvm->nvm_sw_blob.data = temp;
mvm->nvm_sw_blob.size = ret;
@ -463,6 +462,11 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
mvm->nvm_prod_blob.size = ret;
break;
default:
if (section == mvm->cfg->nvm_hw_section_num) {
mvm->nvm_hw_blob.data = temp;
mvm->nvm_hw_blob.size = ret;
break;
}
WARN(1, "section: %d", section);
}
#endif

View file

@ -185,9 +185,10 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
* (PCIe power is lost before PERST# is asserted), causing ME FW
* to lose ownership and not being able to obtain it back.
*/
iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
}
struct iwl_rx_handlers {
@ -222,10 +223,12 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false),
RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
iwl_mvm_rx_scan_offload_complete_notif, false),
iwl_mvm_rx_scan_offload_complete_notif, true),
RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_sched_scan_results,
false),
@ -284,9 +287,11 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(BEACON_NOTIFICATION),
CMD(BEACON_TEMPLATE_CMD),
CMD(STATISTICS_NOTIFICATION),
CMD(EOSP_NOTIFICATION),
CMD(REDUCE_TX_POWER_CMD),
CMD(TX_ANT_CONFIGURATION_CMD),
CMD(D3_CONFIG_CMD),
CMD(D0I3_END_CMD),
CMD(PROT_OFFLOAD_CONFIG_CMD),
CMD(OFFLOADS_QUERY_CMD),
CMD(REMOTE_WAKE_CONFIG_CMD),
@ -309,6 +314,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(BT_PROFILE_NOTIFICATION),
CMD(BT_CONFIG),
CMD(MCAST_FILTER_CMD),
CMD(BCAST_FILTER_CMD),
CMD(REPLY_SF_CFG_CMD),
CMD(REPLY_BEACON_FILTERING_CMD),
CMD(REPLY_THERMAL_MNG_BACKOFF),
@ -320,6 +326,24 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
/* this forward declaration can avoid to export the function */
static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
static void iwl_mvm_d0i3_exit_work(struct work_struct *wk);
static u32 calc_min_backoff(struct iwl_trans *trans, const struct iwl_cfg *cfg)
{
const struct iwl_pwr_tx_backoff *pwr_tx_backoff = cfg->pwr_tx_backoffs;
if (!pwr_tx_backoff)
return 0;
while (pwr_tx_backoff->pwr) {
if (trans->dflt_pwr_limit >= pwr_tx_backoff->pwr)
return pwr_tx_backoff->backoff;
pwr_tx_backoff++;
}
return 0;
}
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
@ -333,6 +357,14 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
TX_CMD,
};
int err, scan_size;
u32 min_backoff;
/*
* We use IWL_MVM_STATION_COUNT to check the validity of the station
* index all over the driver - check that its value corresponds to the
* array size.
*/
BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT);
/********************************
* 1. Allocating and configuring HW data
@ -373,6 +405,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
@ -421,7 +454,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
mvm->cfg->name, mvm->trans->hw_rev);
iwl_mvm_tt_initialize(mvm);
min_backoff = calc_min_backoff(trans, cfg);
iwl_mvm_tt_initialize(mvm, min_backoff);
/*
* If the NVM exists in an external file,
@ -462,13 +496,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (err)
goto out_unregister;
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)
mvm->pm_ops = &pm_mac_ops;
else
mvm->pm_ops = &pm_legacy_ops;
memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
/* rpm starts with a taken ref. only set the appropriate bit here. */
set_bit(IWL_MVM_REF_UCODE_DOWN, mvm->ref_bitmap);
return op_mode;
out_unregister:
@ -508,7 +540,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
mvm->phy_db = NULL;
iwl_free_nvm_data(mvm->nvm_data);
for (i = 0; i < NVM_NUM_OF_SECTIONS; i++)
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
ieee80211_free_hw(mvm->hw);
@ -702,6 +734,29 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
{
iwl_abort_notification_waits(&mvm->notif_wait);
/*
* This is a bit racy, but worst case we tell mac80211 about
* a stopped/aborted scan when that was already done which
* is not a problem. It is necessary to abort any os scan
* here because mac80211 requires having the scan cleared
* before restarting.
* We'll reset the scan_status to NONE in restart cleanup in
* the next start() call from mac80211. If restart isn't called
* (no fw restart) scan status will stay busy.
*/
switch (mvm->scan_status) {
case IWL_MVM_SCAN_NONE:
break;
case IWL_MVM_SCAN_OS:
ieee80211_scan_completed(mvm->hw, true);
break;
case IWL_MVM_SCAN_SCHED:
/* Sched scan will be restarted by mac80211 in restart_hw. */
if (!mvm->restart_fw)
ieee80211_sched_scan_stopped(mvm->hw);
break;
}
/*
* If we're restarting already, don't cycle restarts.
* If INIT fw asserted, it will likely fail again.
@ -733,25 +788,8 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
schedule_work(&reprobe->work);
} else if (mvm->cur_ucode == IWL_UCODE_REGULAR && mvm->restart_fw) {
/*
* This is a bit racy, but worst case we tell mac80211 about
* a stopped/aborted (sched) scan when that was already done
* which is not a problem. It is necessary to abort any scan
* here because mac80211 requires having the scan cleared
* before restarting.
* We'll reset the scan_status to NONE in restart cleanup in
* the next start() call from mac80211.
*/
switch (mvm->scan_status) {
case IWL_MVM_SCAN_NONE:
break;
case IWL_MVM_SCAN_OS:
ieee80211_scan_completed(mvm->hw, true);
break;
case IWL_MVM_SCAN_SCHED:
ieee80211_sched_scan_stopped(mvm->hw);
break;
}
/* don't let the transport/FW power down */
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
if (mvm->restart_fw > 0)
mvm->restart_fw--;
@ -778,6 +816,163 @@ static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
iwl_mvm_nic_restart(mvm);
}
struct iwl_d0i3_iter_data {
struct iwl_mvm *mvm;
u8 ap_sta_id;
u8 vif_count;
};
static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_d0i3_iter_data *data = _data;
struct iwl_mvm *mvm = data->mvm;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr);
if (vif->type != NL80211_IFTYPE_STATION ||
!vif->bss_conf.assoc)
return;
iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags);
/*
* on init/association, mvm already configures POWER_TABLE_CMD
* and REPLY_MCAST_FILTER_CMD, so currently don't
* reconfigure them (we might want to use different
* params later on, though).
*/
data->ap_sta_id = mvmvif->ap_sta_id;
data->vif_count++;
}
static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
int ret;
struct iwl_d0i3_iter_data d0i3_iter_data = {
.mvm = mvm,
};
struct iwl_wowlan_config_cmd wowlan_config_cmd = {
.wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
IWL_WOWLAN_WAKEUP_BEACON_MISS |
IWL_WOWLAN_WAKEUP_LINK_CHANGE |
IWL_WOWLAN_WAKEUP_BCN_FILTERING),
};
struct iwl_d3_manager_config d3_cfg_cmd = {
.min_sleep_time = cpu_to_le32(1000),
};
IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_enter_d0i3_iterator,
&d0i3_iter_data);
if (d0i3_iter_data.vif_count == 1) {
mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id;
} else {
WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
}
ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
sizeof(wowlan_config_cmd),
&wowlan_config_cmd);
if (ret)
return ret;
return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD,
flags | CMD_MAKE_TRANS_IDLE,
sizeof(d3_cfg_cmd), &d3_cfg_cmd);
}
static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = _data;
u32 flags = CMD_ASYNC | CMD_HIGH_PRIO;
IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr);
if (vif->type != NL80211_IFTYPE_STATION ||
!vif->bss_conf.assoc)
return;
iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
}
static void iwl_mvm_d0i3_disconnect_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc &&
mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
ieee80211_connection_loss(vif);
}
static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
{
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
struct iwl_host_cmd get_status_cmd = {
.id = WOWLAN_GET_STATUSES,
.flags = CMD_SYNC | CMD_HIGH_PRIO | CMD_WANT_SKB,
};
struct iwl_wowlan_status_v6 *status;
int ret;
u32 disconnection_reasons, wakeup_reasons;
mutex_lock(&mvm->mutex);
ret = iwl_mvm_send_cmd(mvm, &get_status_cmd);
if (ret)
goto out;
if (!get_status_cmd.resp_pkt)
goto out;
status = (void *)get_status_cmd.resp_pkt->data;
wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
disconnection_reasons =
IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
if (wakeup_reasons & disconnection_reasons)
ieee80211_iterate_active_interfaces(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_d0i3_disconnect_iter, mvm);
iwl_free_resp(&get_status_cmd);
out:
mutex_unlock(&mvm->mutex);
}
static int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
CMD_WAKE_UP_TRANS;
int ret;
IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
if (ret)
goto out;
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_exit_d0i3_iterator,
mvm);
out:
schedule_work(&mvm->d0i3_exit_work);
return ret;
}
static const struct iwl_op_mode_ops iwl_mvm_ops = {
.start = iwl_op_mode_mvm_start,
.stop = iwl_op_mode_mvm_stop,
@ -789,4 +984,6 @@ static const struct iwl_op_mode_ops iwl_mvm_ops = {
.nic_error = iwl_mvm_nic_error,
.cmd_queue_full = iwl_mvm_cmd_queue_full,
.nic_config = iwl_mvm_nic_config,
.enter_d0i3 = iwl_mvm_enter_d0i3,
.exit_d0i3 = iwl_mvm_exit_d0i3,
};

View file

@ -74,39 +74,36 @@
#define POWER_KEEP_ALIVE_PERIOD_SEC 25
static
int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
struct iwl_beacon_filter_cmd *cmd)
struct iwl_beacon_filter_cmd *cmd,
u32 flags)
{
int ret;
IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
le32_to_cpu(cmd->ba_enable_beacon_abort));
IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
le32_to_cpu(cmd->ba_escape_timer));
IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
le32_to_cpu(cmd->bf_debug_flag));
IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
le32_to_cpu(cmd->bf_enable_beacon_filter));
IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
le32_to_cpu(cmd->bf_energy_delta));
IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
le32_to_cpu(cmd->bf_escape_timer));
IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
le32_to_cpu(cmd->bf_roaming_energy_delta));
IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
le32_to_cpu(cmd->bf_roaming_state));
IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n",
le32_to_cpu(cmd->bf_temp_threshold));
IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n",
le32_to_cpu(cmd->bf_temp_fast_filter));
IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n",
le32_to_cpu(cmd->bf_temp_slow_filter));
ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, CMD_SYNC,
sizeof(struct iwl_beacon_filter_cmd), cmd);
if (!ret) {
IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
le32_to_cpu(cmd->ba_enable_beacon_abort));
IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
le32_to_cpu(cmd->ba_escape_timer));
IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
le32_to_cpu(cmd->bf_debug_flag));
IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
le32_to_cpu(cmd->bf_enable_beacon_filter));
IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
le32_to_cpu(cmd->bf_energy_delta));
IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
le32_to_cpu(cmd->bf_escape_timer));
IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
le32_to_cpu(cmd->bf_roaming_energy_delta));
IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
le32_to_cpu(cmd->bf_roaming_state));
IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n",
le32_to_cpu(cmd->bf_temp_threshold));
IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n",
le32_to_cpu(cmd->bf_temp_fast_filter));
IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n",
le32_to_cpu(cmd->bf_temp_slow_filter));
}
return ret;
return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, flags,
sizeof(struct iwl_beacon_filter_cmd), cmd);
}
static
@ -145,7 +142,7 @@ int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
mvmvif->bf_data.ba_enabled = enable;
iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, CMD_SYNC);
}
static void iwl_mvm_power_log(struct iwl_mvm *mvm,
@ -301,8 +298,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM ||
mvm->ps_prevented)
if (mvm->ps_disabled)
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
@ -312,7 +308,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
mvmvif->dbgfs_pm.disable_power_off)
cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
#endif
if (!vif->bss_conf.ps || mvmvif->pm_prevented)
if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
mvm->pm_disabled)
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@ -419,11 +416,9 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
#endif /* CONFIG_IWLWIFI_DEBUGFS */
}
static int iwl_mvm_power_mac_update_mode(struct iwl_mvm *mvm,
static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
int ret;
bool ba_enable;
struct iwl_mac_power_cmd cmd = {};
if (vif->type != NL80211_IFTYPE_STATION)
@ -435,56 +430,30 @@ static int iwl_mvm_power_mac_update_mode(struct iwl_mvm *mvm,
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
iwl_mvm_power_log(mvm, &cmd);
ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC,
sizeof(cmd), &cmd);
if (ret)
return ret;
ba_enable = !!(cmd.flags &
cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
}
static int iwl_mvm_power_mac_disable(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_mac_power_cmd cmd = {};
struct iwl_mvm_vif *mvmvif __maybe_unused =
iwl_mvm_vif_from_mac80211(vif);
if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color));
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
mvmvif->dbgfs_pm.disable_power_off)
cmd.flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
#endif
iwl_mvm_power_log(mvm, &cmd);
return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_ASYNC,
return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC,
sizeof(cmd), &cmd);
}
static int _iwl_mvm_power_update_device(struct iwl_mvm *mvm, bool force_disable)
int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
{
struct iwl_device_power_cmd cmd = {
.flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
};
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
return 0;
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
return 0;
if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM ||
force_disable)
if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
mvm->ps_disabled = true;
if (mvm->ps_disabled)
cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_CAM_MSK);
#ifdef CONFIG_IWLWIFI_DEBUGFS
@ -501,11 +470,6 @@ static int _iwl_mvm_power_update_device(struct iwl_mvm *mvm, bool force_disable)
&cmd);
}
static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
{
return _iwl_mvm_power_update_device(mvm, false);
}
void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@ -544,44 +508,137 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
return 0;
}
static void iwl_mvm_power_binding_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
struct iwl_power_constraint {
struct ieee80211_vif *bf_vif;
struct ieee80211_vif *bss_vif;
bool pm_disabled;
bool ps_disabled;
};
static void iwl_mvm_power_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = _data;
int ret;
struct iwl_power_constraint *power_iterator = _data;
mvmvif->pm_prevented = (mvm->bound_vif_cnt <= 1) ? false : true;
switch (ieee80211_vif_type_p2p(vif)) {
case NL80211_IFTYPE_P2P_DEVICE:
break;
ret = iwl_mvm_power_mac_update_mode(mvm, vif);
WARN_ONCE(ret, "Failed to update power parameters on a specific vif\n");
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_AP:
/* no BSS power mgmt if we have an active AP */
if (mvmvif->ap_ibss_active)
power_iterator->pm_disabled = true;
break;
case NL80211_IFTYPE_MONITOR:
/* no BSS power mgmt and no device power save */
power_iterator->pm_disabled = true;
power_iterator->ps_disabled = true;
break;
case NL80211_IFTYPE_P2P_CLIENT:
/* no BSS power mgmt if we have a P2P client*/
power_iterator->pm_disabled = true;
break;
case NL80211_IFTYPE_STATION:
/* we should have only one BSS vif */
WARN_ON(power_iterator->bss_vif);
power_iterator->bss_vif = vif;
if (mvmvif->bf_data.bf_enabled &&
!WARN_ON(power_iterator->bf_vif))
power_iterator->bf_vif = vif;
break;
default:
break;
}
}
static void _iwl_mvm_power_update_binding(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool assign)
static void
iwl_mvm_power_get_global_constraint(struct iwl_mvm *mvm,
struct iwl_power_constraint *constraint)
{
if (vif->type == NL80211_IFTYPE_MONITOR) {
int ret = _iwl_mvm_power_update_device(mvm, assign);
mvm->ps_prevented = assign;
WARN_ONCE(ret, "Failed to update power device state\n");
lockdep_assert_held(&mvm->mutex);
if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) {
constraint->pm_disabled = true;
constraint->ps_disabled = true;
}
ieee80211_iterate_active_interfaces(mvm->hw,
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_power_binding_iterator,
mvm);
iwl_mvm_power_iterator, constraint);
/* TODO: remove this and determine this variable in the iterator */
if (mvm->bound_vif_cnt > 1)
constraint->pm_disabled = true;
}
int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_power_constraint constraint = {};
bool ba_enable;
int ret;
lockdep_assert_held(&mvm->mutex);
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
return 0;
iwl_mvm_power_get_global_constraint(mvm, &constraint);
mvm->ps_disabled = constraint.ps_disabled;
mvm->pm_disabled = constraint.pm_disabled;
/* don't update device power state unless we add / remove monitor */
if (vif->type == NL80211_IFTYPE_MONITOR) {
ret = iwl_mvm_power_update_device(mvm);
if (ret)
return ret;
}
ret = iwl_mvm_power_send_cmd(mvm, vif);
if (ret)
return ret;
if (constraint.bss_vif && vif != constraint.bss_vif) {
ret = iwl_mvm_power_send_cmd(mvm, constraint.bss_vif);
if (ret)
return ret;
}
if (!constraint.bf_vif)
return 0;
vif = constraint.bf_vif;
mvmvif = iwl_mvm_vif_from_mac80211(vif);
ba_enable = !(constraint.pm_disabled || constraint.ps_disabled ||
!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif));
return iwl_mvm_update_beacon_abort(mvm, constraint.bf_vif, ba_enable);
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, char *buf,
int bufsz)
int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, char *buf,
int bufsz)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mac_power_cmd cmd = {};
int pos = 0;
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
if (WARN_ON(!(mvm->fw->ucode_capa.flags &
IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)))
return 0;
mutex_lock(&mvm->mutex);
memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd));
mutex_unlock(&mvm->mutex);
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
@ -685,32 +742,46 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
}
#endif
int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_beacon_filter_cmd *cmd,
u32 cmd_flags,
bool d0i3)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_beacon_filter_cmd cmd = {
IWL_BF_CMD_CONFIG_DEFAULTS,
.bf_enable_beacon_filter = cpu_to_le32(1),
};
int ret;
if (mvmvif != mvm->bf_allowed_vif ||
vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd);
if (!d0i3)
iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd);
ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags);
if (!ret)
/* don't change bf_enabled in case of temporary d0i3 configuration */
if (!ret && !d0i3)
mvmvif->bf_data.bf_enabled = true;
return ret;
}
int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 flags)
{
struct iwl_beacon_filter_cmd cmd = {
IWL_BF_CMD_CONFIG_DEFAULTS,
.bf_enable_beacon_filter = cpu_to_le32(1),
};
return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false);
}
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
struct ieee80211_vif *vif,
u32 flags)
{
struct iwl_beacon_filter_cmd cmd = {};
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@ -720,7 +791,7 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
if (!ret)
mvmvif->bf_data.bf_enabled = false;
@ -728,23 +799,89 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
return ret;
}
int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool enable, u32 flags)
{
int ret;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mac_power_cmd cmd = {};
if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
if (!vif->bss_conf.assoc)
return 0;
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
if (enable) {
/* configure skip over dtim up to 300 msec */
int dtimper = mvm->hw->conf.ps_dtim_period ?: 1;
int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
if (WARN_ON(!dtimper_msec))
return 0;
cmd.flags |=
cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
cmd.skip_dtim_periods = 300 / dtimper_msec;
}
iwl_mvm_power_log(mvm, &cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS
memcpy(&mvmvif->mac_pwr_cmd, &cmd, sizeof(cmd));
#endif
ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, flags,
sizeof(cmd), &cmd);
if (ret)
return ret;
/* configure beacon filtering */
if (mvmvif != mvm->bf_allowed_vif)
return 0;
if (enable) {
struct iwl_beacon_filter_cmd cmd_bf = {
IWL_BF_CMD_CONFIG_D0I3,
.bf_enable_beacon_filter = cpu_to_le32(1),
};
ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf,
flags, true);
} else {
if (mvmvif->bf_data.bf_enabled)
ret = iwl_mvm_enable_beacon_filter(mvm, vif, flags);
else
ret = iwl_mvm_disable_beacon_filter(mvm, vif, flags);
}
return ret;
}
int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
struct ieee80211_vif *vif,
bool force,
u32 flags)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
if (!mvmvif->bf_data.bf_enabled)
if (mvmvif != mvm->bf_allowed_vif)
return 0;
return iwl_mvm_enable_beacon_filter(mvm, vif);
if (!mvmvif->bf_data.bf_enabled) {
/* disable beacon filtering explicitly if force is true */
if (force)
return iwl_mvm_disable_beacon_filter(mvm, vif, flags);
return 0;
}
return iwl_mvm_enable_beacon_filter(mvm, vif, flags);
}
const struct iwl_mvm_power_ops pm_mac_ops = {
.power_update_mode = iwl_mvm_power_mac_update_mode,
.power_update_device_mode = iwl_mvm_power_update_device,
.power_disable = iwl_mvm_power_mac_disable,
.power_update_binding = _iwl_mvm_power_update_binding,
#ifdef CONFIG_IWLWIFI_DEBUGFS
.power_dbgfs_read = iwl_mvm_power_mac_dbgfs_read,
#endif
};
int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm)
{
struct iwl_powertable_cmd cmd = {
.keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC,
};
return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
sizeof(cmd), &cmd);
}

View file

@ -1,319 +0,0 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <net/mac80211.h>
#include "iwl-debug.h"
#include "mvm.h"
#include "iwl-modparams.h"
#include "fw-api-power.h"
#define POWER_KEEP_ALIVE_PERIOD_SEC 25
static void iwl_mvm_power_log(struct iwl_mvm *mvm,
struct iwl_powertable_cmd *cmd)
{
IWL_DEBUG_POWER(mvm,
"Sending power table command for power level %d, flags = 0x%X\n",
iwlmvm_mod_params.power_scheme,
le16_to_cpu(cmd->flags));
IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", cmd->keep_alive_seconds);
if (cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
le32_to_cpu(cmd->rx_data_timeout));
IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
le32_to_cpu(cmd->tx_data_timeout));
if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
le32_to_cpu(cmd->skip_dtim_periods));
if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
le32_to_cpu(cmd->lprx_rssi_threshold));
}
}
static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_powertable_cmd *cmd)
{
struct ieee80211_hw *hw = mvm->hw;
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_channel *chan;
int dtimper, dtimper_msec;
int keep_alive;
bool radar_detect = false;
struct iwl_mvm_vif *mvmvif __maybe_unused =
iwl_mvm_vif_from_mac80211(vif);
/*
* Regardless of power management state the driver must set
* keep alive period. FW will use it for sending keep alive NDPs
* immediately after association.
*/
cmd->keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC;
if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
if (!vif->bss_conf.assoc)
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
mvmvif->dbgfs_pm.disable_power_off)
cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
#endif
if (!vif->bss_conf.ps)
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
if (vif->bss_conf.beacon_rate &&
(vif->bss_conf.beacon_rate->bitrate == 10 ||
vif->bss_conf.beacon_rate->bitrate == 60)) {
cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
cmd->lprx_rssi_threshold =
cpu_to_le32(POWER_LPRX_RSSI_THRESHOLD);
}
dtimper = hw->conf.ps_dtim_period ?: 1;
/* Check if radar detection is required on current channel */
rcu_read_lock();
chanctx_conf = rcu_dereference(vif->chanctx_conf);
WARN_ON(!chanctx_conf);
if (chanctx_conf) {
chan = chanctx_conf->def.chan;
radar_detect = chan->flags & IEEE80211_CHAN_RADAR;
}
rcu_read_unlock();
/* Check skip over DTIM conditions */
if (!radar_detect && (dtimper <= 10) &&
(iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
cmd->skip_dtim_periods = cpu_to_le32(3);
}
/* Check that keep alive period is at least 3 * DTIM */
dtimper_msec = dtimper * vif->bss_conf.beacon_int;
keep_alive = max_t(int, 3 * dtimper_msec,
MSEC_PER_SEC * cmd->keep_alive_seconds);
keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
cmd->keep_alive_seconds = keep_alive;
if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
} else {
cmd->rx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
cmd->tx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
cmd->keep_alive_seconds = mvmvif->dbgfs_pm.keep_alive_seconds;
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
if (mvmvif->dbgfs_pm.skip_over_dtim)
cmd->flags |=
cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
else
cmd->flags &=
cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK);
}
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT)
cmd->rx_data_timeout =
cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout);
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT)
cmd->tx_data_timeout =
cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
cmd->skip_dtim_periods =
cpu_to_le32(mvmvif->dbgfs_pm.skip_dtim_periods);
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
if (mvmvif->dbgfs_pm.lprx_ena)
cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
else
cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
}
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
cmd->lprx_rssi_threshold =
cpu_to_le32(mvmvif->dbgfs_pm.lprx_rssi_threshold);
#endif /* CONFIG_IWLWIFI_DEBUGFS */
}
static int iwl_mvm_power_legacy_update_mode(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
int ret;
bool ba_enable;
struct iwl_powertable_cmd cmd = {};
if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
/*
* TODO: The following vif_count verification is temporary condition.
* Avoid power mode update if more than one interface is currently
* active. Remove this condition when FW will support power management
* on multiple MACs.
*/
IWL_DEBUG_POWER(mvm, "Currently %d interfaces active\n",
mvm->vif_count);
if (mvm->vif_count > 1)
return 0;
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
iwl_mvm_power_log(mvm, &cmd);
ret = iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
sizeof(cmd), &cmd);
if (ret)
return ret;
ba_enable = !!(cmd.flags &
cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
}
static int iwl_mvm_power_legacy_disable(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_powertable_cmd cmd = {};
struct iwl_mvm_vif *mvmvif __maybe_unused =
iwl_mvm_vif_from_mac80211(vif);
if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
mvmvif->dbgfs_pm.disable_power_off)
cmd.flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
#endif
iwl_mvm_power_log(mvm, &cmd);
return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC,
sizeof(cmd), &cmd);
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
static int iwl_mvm_power_legacy_dbgfs_read(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, char *buf,
int bufsz)
{
struct iwl_powertable_cmd cmd = {};
int pos = 0;
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
(cmd.flags &
cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
0 : 1);
pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
le32_to_cpu(cmd.skip_dtim_periods));
pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
iwlmvm_mod_params.power_scheme);
pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
le16_to_cpu(cmd.flags));
pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
cmd.keep_alive_seconds);
if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
(cmd.flags &
cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
1 : 0);
pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
le32_to_cpu(cmd.rx_data_timeout));
pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
le32_to_cpu(cmd.tx_data_timeout));
if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
pos += scnprintf(buf+pos, bufsz-pos,
"lprx_rssi_threshold = %d\n",
le32_to_cpu(cmd.lprx_rssi_threshold));
}
return pos;
}
#endif
const struct iwl_mvm_power_ops pm_legacy_ops = {
.power_update_mode = iwl_mvm_power_legacy_update_mode,
.power_disable = iwl_mvm_power_legacy_disable,
#ifdef CONFIG_IWLWIFI_DEBUGFS
.power_dbgfs_read = iwl_mvm_power_legacy_dbgfs_read,
#endif
};

View file

@ -65,9 +65,14 @@
#include "fw-api.h"
#include "mvm.h"
#define QUOTA_100 IWL_MVM_MAX_QUOTA
#define QUOTA_LOWLAT_MIN ((QUOTA_100 * IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT) / 100)
struct iwl_mvm_quota_iterator_data {
int n_interfaces[MAX_BINDINGS];
int colors[MAX_BINDINGS];
int low_latency[MAX_BINDINGS];
int n_low_latency_bindings;
struct ieee80211_vif *new_vif;
};
@ -107,22 +112,29 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
if (vif->bss_conf.assoc)
data->n_interfaces[id]++;
break;
break;
return;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_ADHOC:
if (mvmvif->ap_ibss_active)
data->n_interfaces[id]++;
break;
break;
return;
case NL80211_IFTYPE_MONITOR:
if (mvmvif->monitor_active)
data->n_interfaces[id]++;
break;
break;
return;
case NL80211_IFTYPE_P2P_DEVICE:
break;
return;
default:
WARN_ON_ONCE(1);
break;
return;
}
data->n_interfaces[id]++;
if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) {
data->n_low_latency_bindings++;
data->low_latency[id] = true;
}
}
@ -162,12 +174,13 @@ static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
{
struct iwl_time_quota_cmd cmd = {};
int i, idx, ret, num_active_macs, quota, quota_rem;
int i, idx, ret, num_active_macs, quota, quota_rem, n_non_lowlat;
struct iwl_mvm_quota_iterator_data data = {
.n_interfaces = {},
.colors = { -1, -1, -1, -1 },
.new_vif = newvif,
};
u32 ll_max_duration;
lockdep_assert_held(&mvm->mutex);
@ -186,6 +199,21 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
iwl_mvm_quota_iterator(&data, newvif->addr, newvif);
}
switch (data.n_low_latency_bindings) {
case 0: /* no low latency - use default */
ll_max_duration = 0;
break;
case 1: /* SingleBindingLowLatencyMode */
ll_max_duration = IWL_MVM_LOWLAT_SINGLE_BINDING_MAXDUR;
break;
case 2: /* DualBindingLowLatencyMode */
ll_max_duration = IWL_MVM_LOWLAT_DUAL_BINDING_MAXDUR;
break;
default: /* MultiBindingLowLatencyMode */
ll_max_duration = 0;
break;
}
/*
* The FW's scheduling session consists of
* IWL_MVM_MAX_QUOTA fragments. Divide these fragments
@ -197,11 +225,39 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
num_active_macs += data.n_interfaces[i];
}
quota = 0;
quota_rem = 0;
if (num_active_macs) {
quota = IWL_MVM_MAX_QUOTA / num_active_macs;
quota_rem = IWL_MVM_MAX_QUOTA % num_active_macs;
n_non_lowlat = num_active_macs;
if (data.n_low_latency_bindings == 1) {
for (i = 0; i < MAX_BINDINGS; i++) {
if (data.low_latency[i]) {
n_non_lowlat -= data.n_interfaces[i];
break;
}
}
}
if (data.n_low_latency_bindings == 1 && n_non_lowlat) {
/*
* Reserve quota for the low latency binding in case that
* there are several data bindings but only a single
* low latency one. Split the rest of the quota equally
* between the other data interfaces.
*/
quota = (QUOTA_100 - QUOTA_LOWLAT_MIN) / n_non_lowlat;
quota_rem = QUOTA_100 - n_non_lowlat * quota -
QUOTA_LOWLAT_MIN;
} else if (num_active_macs) {
/*
* There are 0 or more than 1 low latency bindings, or all the
* data interfaces belong to the single low latency binding.
* Split the quota equally between the data interfaces.
*/
quota = QUOTA_100 / num_active_macs;
quota_rem = QUOTA_100 % num_active_macs;
} else {
/* values don't really matter - won't be used */
quota = 0;
quota_rem = 0;
}
for (idx = 0, i = 0; i < MAX_BINDINGS; i++) {
@ -211,19 +267,42 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
cmd.quotas[idx].id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i]));
if (data.n_interfaces[i] <= 0) {
if (data.n_interfaces[i] <= 0)
cmd.quotas[idx].quota = cpu_to_le32(0);
cmd.quotas[idx].max_duration = cpu_to_le32(0);
} else {
else if (data.n_low_latency_bindings == 1 && n_non_lowlat &&
data.low_latency[i])
/*
* There is more than one binding, but only one of the
* bindings is in low latency. For this case, allocate
* the minimal required quota for the low latency
* binding.
*/
cmd.quotas[idx].quota = cpu_to_le32(QUOTA_LOWLAT_MIN);
else
cmd.quotas[idx].quota =
cpu_to_le32(quota * data.n_interfaces[i]);
WARN_ONCE(le32_to_cpu(cmd.quotas[idx].quota) > QUOTA_100,
"Binding=%d, quota=%u > max=%u\n",
idx, le32_to_cpu(cmd.quotas[idx].quota), QUOTA_100);
if (data.n_interfaces[i] && !data.low_latency[i])
cmd.quotas[idx].max_duration =
cpu_to_le32(ll_max_duration);
else
cmd.quotas[idx].max_duration = cpu_to_le32(0);
}
idx++;
}
/* Give the remainder of the session to the first binding */
le32_add_cpu(&cmd.quotas[0].quota, quota_rem);
/* Give the remainder of the session to the first data binding */
for (i = 0; i < MAX_BINDINGS; i++) {
if (le32_to_cpu(cmd.quotas[i].quota) != 0) {
le32_add_cpu(&cmd.quotas[i].quota, quota_rem);
break;
}
}
iwl_mvm_adjust_quota_for_noa(mvm, &cmd);

View file

@ -380,49 +380,49 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
* (2.4 GHz) band.
*/
static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = {
7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0
};
/* Expected TpT tables. 4 indexes:
* 0 - NGI, 1 - SGI, 2 - AGG+NGI, 3 - AGG+SGI
*/
static s32 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = {
static const u16 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202, 216, 0},
{0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210, 225, 0},
{0, 0, 0, 0, 49, 0, 97, 145, 192, 285, 375, 420, 464, 551, 0},
{0, 0, 0, 0, 54, 0, 108, 160, 213, 315, 415, 465, 513, 608, 0},
};
static s32 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = {
static const u16 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257, 269, 275},
{0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264, 275, 280},
{0, 0, 0, 0, 101, 0, 199, 295, 389, 570, 744, 828, 911, 1070, 1173},
{0, 0, 0, 0, 112, 0, 220, 326, 429, 629, 819, 912, 1000, 1173, 1284},
};
static s32 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
static const u16 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 130, 0, 191, 223, 244, 273, 288, 294, 298, 305, 308},
{0, 0, 0, 0, 138, 0, 200, 231, 251, 279, 293, 298, 302, 308, 312},
{0, 0, 0, 0, 217, 0, 429, 634, 834, 1220, 1585, 1760, 1931, 2258, 2466},
{0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691},
};
static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250, 261, 0},
{0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256, 267, 0},
{0, 0, 0, 0, 98, 0, 193, 286, 375, 550, 718, 799, 878, 1032, 0},
{0, 0, 0, 0, 109, 0, 214, 316, 414, 607, 790, 879, 965, 1132, 0},
};
static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289, 296, 300},
{0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293, 300, 303},
{0, 0, 0, 0, 200, 0, 390, 571, 741, 1067, 1365, 1505, 1640, 1894, 2053},
{0, 0, 0, 0, 221, 0, 430, 630, 816, 1169, 1490, 1641, 1784, 2053, 2221},
};
static s32 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
static const u16 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 182, 0, 240, 264, 278, 299, 308, 311, 313, 317, 319},
{0, 0, 0, 0, 190, 0, 247, 269, 282, 302, 310, 313, 315, 319, 320},
{0, 0, 0, 0, 428, 0, 833, 1215, 1577, 2254, 2863, 3147, 3418, 3913, 4219},
@ -905,7 +905,7 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
rate->bw = RATE_MCS_CHAN_WIDTH_20;
WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX &&
WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX ||
rate->index > IWL_RATE_MCS_9_INDEX);
rate->index = rs_ht_to_legacy[rate->index];
@ -1169,12 +1169,12 @@ static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
lq_sta->visited_columns = 0;
}
static s32 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
const struct rs_tx_column *column,
u32 bw)
{
/* Used to choose among HT tables */
s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
if (WARN_ON_ONCE(column->mode != RS_LEGACY &&
column->mode != RS_SISO &&
@ -1262,9 +1262,8 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
&(lq_sta->lq_info[lq_sta->active_tbl]);
s32 active_sr = active_tbl->win[index].success_ratio;
s32 active_tpt = active_tbl->expected_tpt[index];
/* expected "search" throughput */
s32 *tpt_tbl = tbl->expected_tpt;
const u16 *tpt_tbl = tbl->expected_tpt;
s32 new_rate, high, low, start_hi;
u16 high_low;
@ -1479,7 +1478,7 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
const struct rs_tx_column *next_col;
allow_column_func_t allow_func;
u8 valid_ants = iwl_fw_valid_tx_ant(mvm->fw);
s32 *expected_tpt_tbl;
const u16 *expected_tpt_tbl;
s32 tpt, max_expected_tpt;
for (i = 0; i < MAX_NEXT_COLUMNS; i++) {
@ -2815,8 +2814,8 @@ static void rs_rate_init_stub(void *mvm_r,
struct ieee80211_sta *sta, void *mvm_sta)
{
}
static struct rate_control_ops rs_mvm_ops = {
.module = NULL,
static const struct rate_control_ops rs_mvm_ops = {
.name = RS_NAME,
.tx_status = rs_tx_status,
.get_rate = rs_get_rate,

View file

@ -277,7 +277,7 @@ enum rs_column {
struct iwl_scale_tbl_info {
struct rs_rate rate;
enum rs_column column;
s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
};

View file

@ -129,22 +129,16 @@ static void iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
struct ieee80211_rx_status *rx_status)
{
int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
int rssi_all_band_a, rssi_all_band_b;
u32 agc_a, agc_b, max_agc;
u32 agc_a, agc_b;
u32 val;
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
max_agc = max_t(u32, agc_a, agc_b);
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
rssi_all_band_a = (val & IWL_OFDM_RSSI_ALLBAND_A_MSK) >>
IWL_OFDM_RSSI_ALLBAND_A_POS;
rssi_all_band_b = (val & IWL_OFDM_RSSI_ALLBAND_B_MSK) >>
IWL_OFDM_RSSI_ALLBAND_B_POS;
/*
* dBm = rssi dB - agc dB - constant.
@ -364,10 +358,10 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
rx_status.flag |= RX_FLAG_40MHZ;
break;
case RATE_MCS_CHAN_WIDTH_80:
rx_status.flag |= RX_FLAG_80MHZ;
rx_status.vht_flag |= RX_VHT_FLAG_80MHZ;
break;
case RATE_MCS_CHAN_WIDTH_160:
rx_status.flag |= RX_FLAG_160MHZ;
rx_status.vht_flag |= RX_VHT_FLAG_160MHZ;
break;
}
if (rate_n_flags & RATE_MCS_SGI_MSK)

View file

@ -408,6 +408,8 @@ int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
mvm->scan_status = IWL_MVM_SCAN_NONE;
ieee80211_scan_completed(mvm->hw, notif->status != SCAN_COMP_STATUS_OK);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
return 0;
}
@ -476,6 +478,7 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
if (iwl_mvm_is_radio_killed(mvm)) {
ieee80211_scan_completed(mvm->hw, true);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
mvm->scan_status = IWL_MVM_SCAN_NONE;
return;
}
@ -488,7 +491,7 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL);
if (ret) {
IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
/* mac80211's state will be cleaned in the fw_restart flow */
/* mac80211's state will be cleaned in the nic_restart flow */
goto out_remove_notif;
}
@ -509,11 +512,16 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_scan_offload_complete *scan_notif = (void *)pkt->data;
/* scan status must be locked for proper checking */
lockdep_assert_held(&mvm->mutex);
IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n",
scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
"completed" : "aborted");
mvm->scan_status = IWL_MVM_SCAN_NONE;
/* might already be something else again, don't reset if so */
if (mvm->scan_status == IWL_MVM_SCAN_SCHED)
mvm->scan_status = IWL_MVM_SCAN_NONE;
ieee80211_sched_scan_stopped(mvm->hw);
return 0;
@ -596,6 +604,9 @@ static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
* config match list.
*/
for (i = 0; i < req->n_match_sets && i < PROBE_OPTION_MAX; i++) {
/* skip empty SSID matchsets */
if (!req->match_sets[i].ssid.ssid_len)
continue;
scan->direct_scan[i].id = WLAN_EID_SSID;
scan->direct_scan[i].len = req->match_sets[i].ssid.ssid_len;
memcpy(scan->direct_scan[i].ssid, req->match_sets[i].ssid.ssid,

View file

@ -66,27 +66,27 @@
#include "sta.h"
#include "rs.h"
static void iwl_mvm_add_sta_cmd_v6_to_v5(struct iwl_mvm_add_sta_cmd_v6 *cmd_v6,
static void iwl_mvm_add_sta_cmd_v7_to_v5(struct iwl_mvm_add_sta_cmd_v7 *cmd_v7,
struct iwl_mvm_add_sta_cmd_v5 *cmd_v5)
{
memset(cmd_v5, 0, sizeof(*cmd_v5));
cmd_v5->add_modify = cmd_v6->add_modify;
cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
memcpy(cmd_v5->addr, cmd_v6->addr, ETH_ALEN);
cmd_v5->sta_id = cmd_v6->sta_id;
cmd_v5->modify_mask = cmd_v6->modify_mask;
cmd_v5->station_flags = cmd_v6->station_flags;
cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
cmd_v5->assoc_id = cmd_v6->assoc_id;
cmd_v5->beamform_flags = cmd_v6->beamform_flags;
cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
cmd_v5->add_modify = cmd_v7->add_modify;
cmd_v5->tid_disable_tx = cmd_v7->tid_disable_tx;
cmd_v5->mac_id_n_color = cmd_v7->mac_id_n_color;
memcpy(cmd_v5->addr, cmd_v7->addr, ETH_ALEN);
cmd_v5->sta_id = cmd_v7->sta_id;
cmd_v5->modify_mask = cmd_v7->modify_mask;
cmd_v5->station_flags = cmd_v7->station_flags;
cmd_v5->station_flags_msk = cmd_v7->station_flags_msk;
cmd_v5->add_immediate_ba_tid = cmd_v7->add_immediate_ba_tid;
cmd_v5->remove_immediate_ba_tid = cmd_v7->remove_immediate_ba_tid;
cmd_v5->add_immediate_ba_ssn = cmd_v7->add_immediate_ba_ssn;
cmd_v5->sleep_tx_count = cmd_v7->sleep_tx_count;
cmd_v5->sleep_state_flags = cmd_v7->sleep_state_flags;
cmd_v5->assoc_id = cmd_v7->assoc_id;
cmd_v5->beamform_flags = cmd_v7->beamform_flags;
cmd_v5->tfd_queue_msk = cmd_v7->tfd_queue_msk;
}
static void
@ -110,7 +110,7 @@ iwl_mvm_add_sta_key_to_add_sta_cmd_v5(struct iwl_mvm_add_sta_key_cmd *key_cmd,
}
static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
struct iwl_mvm_add_sta_cmd_v6 *cmd,
struct iwl_mvm_add_sta_cmd_v7 *cmd,
int *status)
{
struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
@ -119,14 +119,14 @@ static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(*cmd),
cmd, status);
iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd_v5),
&cmd_v5, status);
}
static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
struct iwl_mvm_add_sta_cmd_v6 *cmd)
struct iwl_mvm_add_sta_cmd_v7 *cmd)
{
struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
@ -134,7 +134,7 @@ static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags,
sizeof(*cmd), cmd);
iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(cmd_v5),
&cmd_v5);
@ -175,19 +175,30 @@ static int iwl_mvm_send_add_sta_key_cmd(struct iwl_mvm *mvm,
&sta_cmd);
}
static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm)
static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
enum nl80211_iftype iftype)
{
int sta_id;
u32 reserved_ids = 0;
BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
lockdep_assert_held(&mvm->mutex);
/* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
if (iftype != NL80211_IFTYPE_STATION)
reserved_ids = BIT(0);
/* Don't take rcu_read_lock() since we are protected by mvm->mutex */
for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++)
for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
if (BIT(sta_id) & reserved_ids)
continue;
if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex)))
return sta_id;
}
return IWL_MVM_STATION_COUNT;
}
@ -196,7 +207,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
bool update)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
struct iwl_mvm_add_sta_cmd_v6 add_sta_cmd;
struct iwl_mvm_add_sta_cmd_v7 add_sta_cmd;
int ret;
u32 status;
u32 agg_size = 0, mpdu_dens = 0;
@ -312,7 +323,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
sta_id = iwl_mvm_find_free_sta_id(mvm);
sta_id = iwl_mvm_find_free_sta_id(mvm,
ieee80211_vif_type_p2p(vif));
else
sta_id = mvm_sta->sta_id;
@ -368,7 +380,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool drain)
{
struct iwl_mvm_add_sta_cmd_v6 cmd = {};
struct iwl_mvm_add_sta_cmd_v7 cmd = {};
int ret;
u32 status;
@ -522,6 +534,10 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
/* unassoc - go ahead - remove the AP STA now */
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
/* clear d0i3_ap_sta_id if no longer relevant */
if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
}
/*
@ -560,10 +576,10 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
}
int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
u32 qmask)
u32 qmask, enum nl80211_iftype iftype)
{
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
sta->sta_id = iwl_mvm_find_free_sta_id(mvm);
sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
return -ENOSPC;
}
@ -587,13 +603,13 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
const u8 *addr,
u16 mac_id, u16 color)
{
struct iwl_mvm_add_sta_cmd_v6 cmd;
struct iwl_mvm_add_sta_cmd_v7 cmd;
int ret;
u32 status;
lockdep_assert_held(&mvm->mutex);
memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v6));
memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v7));
cmd.sta_id = sta->sta_id;
cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
color));
@ -627,7 +643,8 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
/* Add the aux station, but without any queues */
ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, 0);
ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, 0,
NL80211_IFTYPE_UNSPECIFIED);
if (ret)
return ret;
@ -699,7 +716,8 @@ int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
lockdep_assert_held(&mvm->mutex);
qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
ret = iwl_mvm_allocate_int_sta(mvm, bsta, qmask);
ret = iwl_mvm_allocate_int_sta(mvm, bsta, qmask,
ieee80211_vif_type_p2p(vif));
if (ret)
return ret;
@ -735,7 +753,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
struct iwl_mvm_add_sta_cmd_v6 cmd = {};
struct iwl_mvm_add_sta_cmd_v7 cmd = {};
int ret;
u32 status;
@ -794,7 +812,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u8 queue, bool start)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
struct iwl_mvm_add_sta_cmd_v6 cmd = {};
struct iwl_mvm_add_sta_cmd_v7 cmd = {};
int ret;
u32 status;
@ -833,7 +851,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return ret;
}
static const u8 tid_to_ac[] = {
static const u8 tid_to_mac80211_ac[] = {
IEEE80211_AC_BE,
IEEE80211_AC_BK,
IEEE80211_AC_BK,
@ -844,6 +862,17 @@ static const u8 tid_to_ac[] = {
IEEE80211_AC_VO,
};
static const u8 tid_to_ucode_ac[] = {
AC_BE,
AC_BK,
AC_BK,
AC_BE,
AC_VI,
AC_VI,
AC_VO,
AC_VO,
};
int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
@ -874,7 +903,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
}
/* the new tx queue is still connected to the same mac80211 queue */
mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_ac[tid]];
mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_mac80211_ac[tid]];
spin_lock_bh(&mvmsta->lock);
tid_data = &mvmsta->tid_data[tid];
@ -916,7 +945,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
tid_data->ssn = 0xffff;
spin_unlock_bh(&mvmsta->lock);
fifo = iwl_mvm_ac_to_tx_fifo[tid_to_ac[tid]];
fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
if (ret)
@ -1411,7 +1440,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd_v6 cmd = {
struct iwl_mvm_add_sta_cmd_v7 cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
.station_flags_msk = cpu_to_le32(STA_FLG_PS),
@ -1427,28 +1456,102 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
enum ieee80211_frame_release_type reason,
u16 cnt)
u16 cnt, u16 tids, bool more_data,
bool agg)
{
u16 sleep_state_flags =
(reason == IEEE80211_FRAME_RELEASE_UAPSD) ?
STA_SLEEP_STATE_UAPSD : STA_SLEEP_STATE_PS_POLL;
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd_v6 cmd = {
struct iwl_mvm_add_sta_cmd_v7 cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
.modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
.sleep_tx_count = cpu_to_le16(cnt),
.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
/*
* Same modify mask for sleep_tx_count and sleep_state_flags so
* we must set the sleep_state_flags too.
*/
.sleep_state_flags = cpu_to_le16(sleep_state_flags),
};
int ret;
int tid, ret;
unsigned long _tids = tids;
/* convert TIDs to ACs - we don't support TSPEC so that's OK
* Note that this field is reserved and unused by firmware not
* supporting GO uAPSD, so it's safe to always do this.
*/
for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
/* If we're releasing frames from aggregation queues then check if the
* all queues combined that we're releasing frames from have
* - more frames than the service period, in which case more_data
* needs to be set
* - fewer than 'cnt' frames, in which case we need to adjust the
* firmware command (but do that unconditionally)
*/
if (agg) {
int remaining = cnt;
spin_lock_bh(&mvmsta->lock);
for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
struct iwl_mvm_tid_data *tid_data;
u16 n_queued;
tid_data = &mvmsta->tid_data[tid];
if (WARN(tid_data->state != IWL_AGG_ON &&
tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
"TID %d state is %d\n",
tid, tid_data->state)) {
spin_unlock_bh(&mvmsta->lock);
ieee80211_sta_eosp(sta);
return;
}
n_queued = iwl_mvm_tid_queued(tid_data);
if (n_queued > remaining) {
more_data = true;
remaining = 0;
break;
}
remaining -= n_queued;
}
spin_unlock_bh(&mvmsta->lock);
cmd.sleep_tx_count = cpu_to_le16(cnt - remaining);
if (WARN_ON(cnt - remaining == 0)) {
ieee80211_sta_eosp(sta);
return;
}
}
/* Note: this is ignored by firmware not supporting GO uAPSD */
if (more_data)
cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);
if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
mvmsta->next_status_eosp = true;
cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
} else {
cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
}
/* TODO: somehow the fw doesn't seem to take PS_POLL into account */
ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
struct ieee80211_sta *sta;
u32 sta_id = le32_to_cpu(notif->sta_id);
if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
return 0;
rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
if (!IS_ERR_OR_NULL(sta))
ieee80211_sta_eosp(sta);
rcu_read_unlock();
return 0;
}

View file

@ -195,24 +195,33 @@ struct iwl_mvm;
/**
* DOC: AP mode - PS
*
* When a station is asleep, the fw will set it as "asleep". All the
* non-aggregation frames to that station will be dropped by the fw
* (%TX_STATUS_FAIL_DEST_PS failure code).
* AMPDUs are in a separate queue that is stopped by the fw. We just need to
* let mac80211 know how many frames we have in these queues so that it can
* properly handle trigger frames.
* When the a trigger frame is received, mac80211 tells the driver to send
* frames from the AMPDU queues or AC queue depending on which queue are
* delivery-enabled and what TID has frames to transmit (Note that mac80211 has
* all the knowledege since all the non-agg frames are buffered / filtered, and
* the driver tells mac80211 about agg frames). The driver needs to tell the fw
* to let frames out even if the station is asleep. This is done by
* %iwl_mvm_sta_modify_sleep_tx_count.
* When we receive a frame from that station with PM bit unset, the
* driver needs to let the fw know that this station isn't alseep any more.
* This is done by %iwl_mvm_sta_modify_ps_wake.
* When a station is asleep, the fw will set it as "asleep". All frames on
* shared queues (i.e. non-aggregation queues) to that station will be dropped
* by the fw (%TX_STATUS_FAIL_DEST_PS failure code).
*
* TODO - EOSP handling
* AMPDUs are in a separate queue that is stopped by the fw. We just need to
* let mac80211 know when there are frames in these queues so that it can
* properly handle trigger frames.
*
* When a trigger frame is received, mac80211 tells the driver to send frames
* from the AMPDU queues or sends frames to non-aggregation queues itself,
* depending on which ACs are delivery-enabled and what TID has frames to
* transmit. Note that mac80211 has all the knowledege since all the non-agg
* frames are buffered / filtered, and the driver tells mac80211 about agg
* frames). The driver needs to tell the fw to let frames out even if the
* station is asleep. This is done by %iwl_mvm_sta_modify_sleep_tx_count.
*
* When we receive a frame from that station with PM bit unset, the driver
* needs to let the fw know that this station isn't asleep any more. This is
* done by %iwl_mvm_sta_modify_ps_wake in response to mac80211 signalling the
* station's wakeup.
*
* For a GO, the Service Period might be cut short due to an absence period
* of the GO. In this (and all other cases) the firmware notifies us with the
* EOSP_NOTIFICATION, and we notify mac80211 of that. Further frames that we
* already sent to the device will be rejected again.
*
* See also "AP support for powersaving clients" in mac80211.h.
*/
/**
@ -261,6 +270,12 @@ struct iwl_mvm_tid_data {
u16 ssn;
};
static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
{
return ieee80211_sn_sub(IEEE80211_SEQ_TO_SN(tid_data->seq_number),
tid_data->next_reclaimed);
}
/**
* struct iwl_mvm_sta - representation of a station in the driver
* @sta_id: the index of the station in the fw (will be replaced by id_n_color)
@ -269,7 +284,11 @@ struct iwl_mvm_tid_data {
* @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
* tid.
* @max_agg_bufsize: the maximal size of the AGG buffer for this station
* @bt_reduced_txpower_dbg: debug mode in which %bt_reduced_txpower is forced
* by debugfs.
* @bt_reduced_txpower: is reduced tx power enabled for this station
* @next_status_eosp: the next reclaimed packet is a PS-Poll response and
* we need to signal the EOSP
* @lock: lock to protect the whole struct. Since %tid_data is access from Tx
* and from Tx response flow, it needs a spinlock.
* @tid_data: per tid data. Look at %iwl_mvm_tid_data.
@ -287,7 +306,9 @@ struct iwl_mvm_sta {
u32 mac_id_n_color;
u16 tid_disable_agg;
u8 max_agg_bufsize;
bool bt_reduced_txpower_dbg;
bool bt_reduced_txpower;
bool next_status_eosp;
spinlock_t lock;
struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
struct iwl_lq_sta lq_sta;
@ -345,6 +366,10 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, u32 iv32,
u16 *phase1key);
int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
/* AMPDU */
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start);
@ -359,7 +384,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
u32 qmask);
u32 qmask, enum nl80211_iftype iftype);
void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
struct iwl_mvm_int_sta *sta);
int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@ -375,7 +400,8 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
enum ieee80211_frame_release_type reason,
u16 cnt);
u16 cnt, u16 tids, bool more_data,
bool agg);
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool drain);

View file

@ -126,6 +126,7 @@ static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
* in iwl_mvm_te_handle_notif).
*/
clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
/*
* Of course, our status bit is just as racy as mac80211, so in
@ -210,6 +211,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
iwl_mvm_ref(mvm, IWL_MVM_REF_ROC);
ieee80211_ready_on_channel(mvm->hw);
}
} else {

View file

@ -403,7 +403,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
}
}
static void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
{
struct iwl_host_cmd cmd = {
.id = REPLY_THERMAL_MNG_BACKOFF,
@ -412,6 +412,8 @@ static void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
.flags = CMD_SYNC,
};
backoff = max(backoff, mvm->thermal_throttle.min_backoff);
if (iwl_mvm_send_cmd(mvm, &cmd) == 0) {
IWL_DEBUG_TEMP(mvm, "Set Thermal Tx backoff to: %u\n",
backoff);
@ -534,7 +536,7 @@ static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
.support_tx_backoff = true,
};
void iwl_mvm_tt_initialize(struct iwl_mvm *mvm)
void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff)
{
struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
@ -546,6 +548,7 @@ void iwl_mvm_tt_initialize(struct iwl_mvm *mvm)
tt->params = &iwl7000_tt_params;
tt->throttle = false;
tt->min_backoff = min_backoff;
INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
}

View file

@ -377,6 +377,13 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
/* From now on, we cannot access info->control */
/*
* we handle that entirely ourselves -- for uAPSD the firmware
* will always send a notification, and for PS-Poll responses
* we'll notify mac80211 when getting frame status
*/
info->flags &= ~IEEE80211_TX_STATUS_EOSP;
spin_lock(&mvmsta->lock);
if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
@ -437,6 +444,17 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
lockdep_assert_held(&mvmsta->lock);
if ((tid_data->state == IWL_AGG_ON ||
tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
iwl_mvm_tid_queued(tid_data) == 0) {
/*
* Now that this aggregation queue is empty tell mac80211 so it
* knows we no longer have frames buffered for the station on
* this TID (for the TIM bitmap calculation.)
*/
ieee80211_sta_set_buffered(sta, tid, false);
}
if (tid_data->ssn != tid_data->next_reclaimed)
return;
@ -680,6 +698,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
iwl_mvm_check_ratid_empty(mvm, sta, tid);
spin_unlock_bh(&mvmsta->lock);
}
if (mvmsta->next_status_eosp) {
mvmsta->next_status_eosp = false;
ieee80211_sta_eosp(sta);
}
} else {
mvmsta = NULL;
}

View file

@ -376,9 +376,67 @@ struct iwl_error_event_table {
u32 flow_handler; /* FH read/write pointers, RX credit */
} __packed;
/*
* UMAC error struct - relevant starting from family 8000 chip.
* Note: This structure is read from the device with IO accesses,
* and the reading already does the endian conversion. As it is
* read with u32-sized accesses, any members with a different size
* need to be ordered correctly though!
*/
struct iwl_umac_error_event_table {
u32 valid; /* (nonzero) valid, (0) log is empty */
u32 error_id; /* type of error */
u32 pc; /* program counter */
u32 blink1; /* branch link */
u32 blink2; /* branch link */
u32 ilink1; /* interrupt link */
u32 ilink2; /* interrupt link */
u32 data1; /* error-specific data */
u32 data2; /* error-specific data */
u32 line; /* source code line of error */
u32 umac_ver; /* umac version */
} __packed;
#define ERROR_START_OFFSET (1 * sizeof(u32))
#define ERROR_ELEM_SIZE (7 * sizeof(u32))
static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
{
struct iwl_trans *trans = mvm->trans;
struct iwl_umac_error_event_table table;
u32 base;
base = mvm->umac_error_event_table;
if (base < 0x800000 || base >= 0x80C000) {
IWL_ERR(mvm,
"Not valid error log pointer 0x%08X for %s uCode\n",
base,
(mvm->cur_ucode == IWL_UCODE_INIT)
? "Init" : "RT");
return;
}
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
mvm->status, table.valid);
}
IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
desc_lookup(table.error_id));
IWL_ERR(mvm, "0x%08X | umac uPc\n", table.pc);
IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1);
IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2);
IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
IWL_ERR(mvm, "0x%08X | umac version\n", table.umac_ver);
}
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
{
struct iwl_trans *trans = mvm->trans;
@ -394,7 +452,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
base = mvm->fw->inst_errlog_ptr;
}
if (base < 0x800000 || base >= 0x80C000) {
if (base < 0x800000) {
IWL_ERR(mvm,
"Not valid error log pointer 0x%08X for %s uCode\n",
base,
@ -453,13 +511,17 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
if (mvm->support_umac_log)
iwl_mvm_dump_umac_error_log(mvm);
}
void iwl_mvm_dump_sram(struct iwl_mvm *mvm)
{
const struct fw_img *img;
int ofs, len = 0;
u8 *buf;
int i;
__le32 *buf;
if (!mvm->ucode_loaded)
return;
@ -473,7 +535,12 @@ void iwl_mvm_dump_sram(struct iwl_mvm *mvm)
return;
iwl_trans_read_mem_bytes(mvm->trans, ofs, buf, len);
iwl_print_hex_error(mvm->trans, buf, len);
len = len >> 2;
for (i = 0; i < len; i++) {
IWL_ERR(mvm, "0x%08X\n", le32_to_cpu(buf[i]));
/* Add a small delay to let syslog catch up */
udelay(10);
}
kfree(buf);
}
@ -516,7 +583,7 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_smps_mode smps_request)
{
struct iwl_mvm_vif *mvmvif;
enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
enum ieee80211_smps_mode smps_mode;
int i;
lockdep_assert_held(&mvm->mutex);
@ -525,6 +592,11 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (num_of_ant(iwl_fw_valid_rx_ant(mvm->fw)) == 1)
return;
if (vif->type == NL80211_IFTYPE_AP)
smps_mode = IEEE80211_SMPS_OFF;
else
smps_mode = IEEE80211_SMPS_AUTOMATIC;
mvmvif = iwl_mvm_vif_from_mac80211(vif);
mvmvif->smps_requests[req_type] = smps_request;
for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
@ -538,3 +610,22 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ieee80211_request_smps(vif, smps_mode);
}
int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool value)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int res;
lockdep_assert_held(&mvm->mutex);
mvmvif->low_latency = value;
res = iwl_mvm_update_quotas(mvm, NULL);
if (res)
return res;
iwl_mvm_bt_coex_vif_change(mvm);
return iwl_mvm_power_update_mac(mvm, vif);
}

View file

@ -66,6 +66,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
#include <linux/acpi.h>
#include "iwl-trans.h"
#include "iwl-drv.h"
@ -390,12 +391,91 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
/* 8000 Series */
{IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
};
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
#ifdef CONFIG_ACPI
#define SPL_METHOD "SPLC"
#define SPL_DOMAINTYPE_MODULE BIT(0)
#define SPL_DOMAINTYPE_WIFI BIT(1)
#define SPL_DOMAINTYPE_WIGIG BIT(2)
#define SPL_DOMAINTYPE_RFEM BIT(3)
static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
{
union acpi_object *limits, *domain_type, *power_limit;
if (splx->type != ACPI_TYPE_PACKAGE ||
splx->package.count != 2 ||
splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
splx->package.elements[0].integer.value != 0) {
IWL_ERR(trans, "Unsupported splx structure");
return 0;
}
limits = &splx->package.elements[1];
if (limits->type != ACPI_TYPE_PACKAGE ||
limits->package.count < 2 ||
limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
IWL_ERR(trans, "Invalid limits element");
return 0;
}
domain_type = &limits->package.elements[0];
power_limit = &limits->package.elements[1];
if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
IWL_DEBUG_INFO(trans, "WiFi power is not limited");
return 0;
}
return power_limit->integer.value;
}
static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
{
acpi_handle pxsx_handle;
acpi_handle handle;
struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_status status;
pxsx_handle = ACPI_HANDLE(&pdev->dev);
if (!pxsx_handle) {
IWL_ERR(trans, "Could not retrieve root port ACPI handle");
return;
}
/* Get the method's handle */
status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
if (ACPI_FAILURE(status)) {
IWL_DEBUG_INFO(trans, "SPL method not found");
return;
}
/* Call SPLC with no arguments */
status = acpi_evaluate_object(handle, NULL, NULL, &splx);
if (ACPI_FAILURE(status)) {
IWL_ERR(trans, "SPLC invocation failed (0x%x)", status);
return;
}
trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
IWL_DEBUG_INFO(trans, "Default power limit set to %lld",
trans->dflt_pwr_limit);
kfree(splx.pointer);
}
#else /* CONFIG_ACPI */
static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) {}
#endif
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
@ -420,6 +500,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_free_trans;
}
set_dflt_pwr_limit(iwl_trans, pdev);
/* register transport layer debugfs here */
ret = iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir);
if (ret)

Some files were not shown because too many files have changed in this diff Show more