@@ -1435,7 +1435,7 @@ F: Documentation/aoe/
F: drivers/block/aoe/
ATHEROS ATH GENERIC UTILITIES
-M: "Luis R. Rodriguez" <mcgrof@qca.qualcomm.com>
+M: "Luis R. Rodriguez" <mcgrof@do-not-panic.com>
L: linux-wireless@vger.kernel.org
S: Supported
F: drivers/net/wireless/ath/*
@@ -1443,7 +1443,7 @@ F: drivers/net/wireless/ath/*
ATHEROS ATH5K WIRELESS DRIVER
M: Jiri Slaby <jirislaby@gmail.com>
M: Nick Kossifidis <mickflemm@gmail.com>
L: ath5k-devel@lists.ath5k.org
W: http://wireless.kernel.org/en/users/Drivers/ath5k
@@ -8553,12 +8553,11 @@ S: Maintained
F: sound/soc/codecs/twl4030*
TI WILINK WIRELESS DRIVERS
-M: Luciano Coelho <luca@coelho.fi>
W: http://wireless.kernel.org/en/users/Drivers/wl12xx
W: http://wireless.kernel.org/en/users/Drivers/wl1251
T: git git://git.kernel.org/pub/scm/linux/kernel/git/luca/wl12xx.git
-S: Maintained
+S: Orphan
F: drivers/net/wireless/ti/
F: include/linux/wl12xx.h
@@ -176,6 +176,7 @@ static int bcma_register_cores(struct bcma_bus *bus)
bcma_err(bus,
"Could not register dev for core 0x%03X\n",
core->id.id);
+ put_device(&core->dev);
continue;
}
core->dev_registered = true;
@@ -616,7 +616,16 @@ ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
* SISRs will also clear PISR so no need to worry here.
*/
- pisr_clear = pisr & ~AR5K_ISR_BITS_FROM_SISRS;
+ /* XXX: There seems to be an issue on some cards
+ * with tx interrupt flags not being updated
+ * on PISR despite that all Tx interrupt bits
+ * are cleared on SISRs. Since we handle all
+ * Tx queues all together it shouldn't be an
+ * issue if we clear Tx interrupt flags also
+ * on PISR to avoid that.
+ */
+ pisr_clear = (pisr & ~AR5K_ISR_BITS_FROM_SISRS) |
+ (pisr & AR5K_INT_TX_ALL);
/*
* Write to clear them...
@@ -2754,9 +2754,9 @@ static int ath6kl_set_bitrate_mask64(struct wmi *wmi, u8 if_idx,
mask->control[band].legacy << 4;
/* copy mcs rate mask */
- mcsrate = mask->control[band].mcs[1];
+ mcsrate = mask->control[band].ht_mcs[1];
mcsrate <<= 8;
- mcsrate |= mask->control[band].mcs[0];
+ mcsrate |= mask->control[band].ht_mcs[0];
ratemask[band] |= mcsrate << 12;
ratemask[band] |= mcsrate << 28;
@@ -2806,7 +2806,7 @@ static int ath6kl_set_bitrate_mask32(struct wmi *wmi, u8 if_idx,
- mcsrate = mask->control[band].mcs[0];
+ mcsrate = mask->control[band].ht_mcs[0];
ratemask[band] |= mcsrate << 20;
@@ -11,12 +11,14 @@ ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o
ath9k-$(CONFIG_ATH9K_LEGACY_RATE_CONTROL) += rc.o
ath9k-$(CONFIG_ATH9K_PCI) += pci.o
ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
-ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += dfs.o
ath9k-$(CONFIG_ATH9K_TX99) += tx99.o
ath9k-$(CONFIG_ATH9K_WOW) += wow.o
+ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o \
+ spectral.o
+
obj-$(CONFIG_ATH9K) += ath9k.o
ath9k_hw-y:= \
@@ -724,14 +724,14 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
struct ath_ant_comb *antcomb = &sc->ant_comb;
int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set;
int curr_main_set;
- int main_rssi = rs->rs_rssi_ctl0;
- int alt_rssi = rs->rs_rssi_ctl1;
+ int main_rssi = rs->rs_rssi_ctl[0];
+ int alt_rssi = rs->rs_rssi_ctl[1];
int rx_ant_conf, main_ant_conf;
bool short_scan = false, ret;
- rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
+ rx_ant_conf = (rs->rs_rssi_ctl[2] >> ATH_ANT_RX_CURRENT_SHIFT) &
ATH_ANT_RX_MASK;
- main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
+ main_ant_conf = (rs->rs_rssi_ctl[2] >> ATH_ANT_RX_MAIN_SHIFT) &
if (alt_rssi >= antcomb->low_rssi_thresh) {
@@ -32,12 +32,8 @@ static int ar9002_hw_init_mode_regs(struct ath_hw *ah)
return 0;
- if (ah->config.pcie_clock_req)
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9280PciePhy_clkreq_off_L1_9280);
- else
- ar9280PciePhy_clkreq_always_on_L1_9280);
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9280PciePhy_clkreq_always_on_L1_9280);
if (AR_SREV_9287_11_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1);
@@ -29,7 +29,8 @@ static void ar9002_hw_set_desc_link(void *ds, u32 ds_link)
((struct ath_desc*) ds)->ds_link = ds_link;
-static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
+ u32 *sync_cause_p)
{
u32 isr = 0;
u32 mask2 = 0;
@@ -136,7 +137,8 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
if (sync_cause) {
- ath9k_debug_sync_cause(common, sync_cause);
+ if (sync_cause_p)
+ *sync_cause_p = sync_cause;
fatal_int =
(sync_cause &
(AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
@@ -201,7 +201,6 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
ath9k_hw_get_channel_centers(ah, chan, ¢ers);
freq = centers.synth_center;
- ah->config.spurmode = SPUR_ENABLE_EEPROM;
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
@@ -131,6 +131,7 @@ static const struct ar9300_eeprom ar9300_default = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .switchcomspdt = 0,
.xlna_bias_strength = 0,
.futureModal = {
0, 0, 0, 0, 0, 0, 0,
@@ -138,7 +139,7 @@ static const struct ar9300_eeprom ar9300_default = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0},
+ .future = {0, 0},
.tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
.calFreqPier2G = {
@@ -333,6 +334,7 @@ static const struct ar9300_eeprom ar9300_default = {
.papdRateMaskHt20 = LE32(0x0c80c080),
.papdRateMaskHt40 = LE32(0x0080c080),
@@ -707,6 +709,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
@@ -714,7 +717,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
@@ -909,6 +912,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
@@ -1284,6 +1288,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
@@ -1291,7 +1296,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
@@ -1486,6 +1491,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
@@ -1861,6 +1867,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
@@ -1868,7 +1875,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
@@ -2063,6 +2070,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
@@ -2437,6 +2445,7 @@ static const struct ar9300_eeprom ar9300_h116 = {
.papdRateMaskHt20 = LE32(0x0c80C080),
.papdRateMaskHt40 = LE32(0x0080C080),
@@ -2444,7 +2453,7 @@ static const struct ar9300_eeprom ar9300_h116 = {
@@ -2639,6 +2648,7 @@ static const struct ar9300_eeprom ar9300_h116 = {
@@ -4111,6 +4121,37 @@ static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah)
+static void ar9003_hw_apply_minccapwr_thresh(struct ath_hw *ah,
+ bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ const u_int32_t cca_ctrl[AR9300_MAX_CHAINS] = {
+ AR_PHY_CCA_CTRL_0,
+ AR_PHY_CCA_CTRL_1,
+ AR_PHY_CCA_CTRL_2,
+ };
+ int chain;
+ u32 val;
+ if (is2ghz) {
+ if (!(eep->base_ext1.misc_enable & BIT(2)))
+ return;
+ } else {
+ if (!(eep->base_ext1.misc_enable & BIT(3)))
+ }
+ for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+ if (!(ah->caps.tx_chainmask & BIT(chain)))
+ continue;
+ val = ar9003_modal_header(ah, is2ghz)->noiseFloorThreshCh[chain];
+ REG_RMW_FIELD(ah, cca_ctrl[chain],
+ AR_PHY_EXT_CCA0_THRESH62_1, val);
+}
static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
struct ath9k_channel *chan)
@@ -4125,6 +4166,7 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah))
ar9003_hw_internal_regulator_apply(ah);
ar9003_hw_apply_tuning_caps(ah);
+ ar9003_hw_apply_minccapwr_thresh(ah, chan);
ar9003_hw_txend_to_xpa_off_apply(ah, is2ghz);
ar9003_hw_thermometer_apply(ah);
ar9003_hw_thermo_cal_apply(ah);
@@ -270,10 +270,20 @@ struct cal_ctl_data_5g {
u8 ctlEdges[AR9300_NUM_BAND_EDGES_5G];
} __packed;
+#define MAX_BASE_EXTENSION_FUTURE 2
struct ar9300_BaseExtension_1 {
u8 ant_div_control;
- u8 future[3];
- u8 tempslopextension[8];
+ u8 future[MAX_BASE_EXTENSION_FUTURE];
+ /*
+ * misc_enable:
+ *
+ * BIT 0 - TX Gain Cap enable.
+ * BIT 1 - Uncompressed Checksum enable.
+ * BIT 2/3 - MinCCApwr enable 2g/5g.
+ u8 misc_enable;
+ int8_t tempslopextension[8];
int8_t quick_drop_low;
int8_t quick_drop_high;
@@ -175,7 +175,8 @@ static void ar9003_hw_set_desc_link(void *ds, u32 ds_link)
ads->ctl10 |= ar9003_calc_ptr_chksum(ads);
-static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
@@ -310,7 +311,8 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
ar9003_mci_get_isr(ah, masked);
@@ -476,12 +478,12 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
/* XXX: Keycache */
rxs->rs_rssi = MS(rxsp->status5, AR_RxRSSICombined);
- rxs->rs_rssi_ctl0 = MS(rxsp->status1, AR_RxRSSIAnt00);
- rxs->rs_rssi_ctl1 = MS(rxsp->status1, AR_RxRSSIAnt01);
- rxs->rs_rssi_ctl2 = MS(rxsp->status1, AR_RxRSSIAnt02);
- rxs->rs_rssi_ext0 = MS(rxsp->status5, AR_RxRSSIAnt10);
- rxs->rs_rssi_ext1 = MS(rxsp->status5, AR_RxRSSIAnt11);
- rxs->rs_rssi_ext2 = MS(rxsp->status5, AR_RxRSSIAnt12);
+ rxs->rs_rssi_ctl[0] = MS(rxsp->status1, AR_RxRSSIAnt00);
+ rxs->rs_rssi_ctl[1] = MS(rxsp->status1, AR_RxRSSIAnt01);
+ rxs->rs_rssi_ctl[2] = MS(rxsp->status1, AR_RxRSSIAnt02);
+ rxs->rs_rssi_ext[0] = MS(rxsp->status5, AR_RxRSSIAnt10);
+ rxs->rs_rssi_ext[1] = MS(rxsp->status5, AR_RxRSSIAnt11);
+ rxs->rs_rssi_ext[2] = MS(rxsp->status5, AR_RxRSSIAnt12);
if (rxsp->status11 & AR_RxKeyIdxValid)
rxs->rs_keyix = MS(rxsp->status11, AR_KeyIdx);
@@ -270,7 +270,7 @@
#define AR_PHY_AGC (AR_AGC_BASE + 0x14)
#define AR_PHY_EXT_ATTEN_CTL_0 (AR_AGC_BASE + 0x18)
#define AR_PHY_CCA_0 (AR_AGC_BASE + 0x1c)
-#define AR_PHY_EXT_CCA0 (AR_AGC_BASE + 0x20)
+#define AR_PHY_CCA_CTRL_0 (AR_AGC_BASE + 0x20)
#define AR_PHY_RESTART (AR_AGC_BASE + 0x24)
@@ -398,6 +398,8 @@
#define AR9280_PHY_CCA_THRESH62_S 12
#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
#define AR_PHY_EXT_CCA0_THRESH62_S 0
+#define AR_PHY_EXT_CCA0_THRESH62_1 0x000001FF
+#define AR_PHY_EXT_CCA0_THRESH62_1_S 0
#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
@@ -27,40 +27,15 @@
#include "common.h"
#include "mci.h"
#include "dfs.h"
-
-/*
- * Header for the ath9k.ko driver core *only* -- hw code nor any other driver
- * should rely on this file or its contents.
- */
+#include "spectral.h"
struct ath_node;
+struct ath_rate_table;
-/* Macro to expand scalars to 64-bit objects */
-#define ito64(x) (sizeof(x) == 1) ? \
- (((unsigned long long int)(x)) & (0xff)) : \
- (sizeof(x) == 2) ? \
- (((unsigned long long int)(x)) & 0xffff) : \
- ((sizeof(x) == 4) ? \
- (((unsigned long long int)(x)) & 0xffffffff) : \
- (unsigned long long int)(x))
-/* increment with wrap-around */
-#define INCR(_l, _sz) do { \
- (_l)++; \
- (_l) &= ((_sz) - 1); \
- } while (0)
-/* decrement with wrap-around */
-#define DECR(_l, _sz) do { \
- (_l)--; \
-#define TSF_TO_TU(_h,_l) \
- ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
-#define ATH_TXQ_SETUP(sc, i) ((sc)->tx.txqsetup & (1<<i))
+extern struct ieee80211_ops ath9k_ops;
+extern int ath9k_modparam_nohwcrypt;
+extern int led_blink;
+extern bool is_ath9k_unloaded;
struct ath_config {
u16 txpowlimit;
@@ -70,6 +45,17 @@ struct ath_config {
/* Descriptor Management */
/*************************/
+#define ATH_TXSTATUS_RING_SIZE 512
+/* Macro to expand scalars to 64-bit objects */
+#define ito64(x) (sizeof(x) == 1) ? \
+ (((unsigned long long int)(x)) & (0xff)) : \
+ (sizeof(x) == 2) ? \
+ (((unsigned long long int)(x)) & 0xffff) : \
+ ((sizeof(x) == 4) ? \
+ (((unsigned long long int)(x)) & 0xffffffff) : \
+ (unsigned long long int)(x))
#define ATH_TXBUF_RESET(_bf) do { \
(_bf)->bf_lastbf = NULL; \
(_bf)->bf_next = NULL; \
@@ -77,23 +63,6 @@ struct ath_config {
sizeof(struct ath_buf_state)); \
} while (0)
-/**
- * enum buffer_type - Buffer type flags
- *
- * @BUF_AMPDU: This buffer is an ampdu, as part of an aggregate (during TX)
- * @BUF_AGGR: Indicates whether the buffer can be aggregated
- * (used in aggregation scheduling)
-enum buffer_type {
- BUF_AMPDU = BIT(0),
- BUF_AGGR = BIT(1),
-};
-#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU)
-#define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR)
-#define ATH_TXSTATUS_RING_SIZE 512
#define DS2PHYS(_dd, _ds) \
((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
@@ -113,11 +82,20 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
/* RX / TX */
/***********/
+#define ATH_TXQ_SETUP(sc, i) ((sc)->tx.txqsetup & (1<<i))
+/* increment with wrap-around */
+#define INCR(_l, _sz) do { \
+ (_l)++; \
+ (_l) &= ((_sz) - 1); \
+ } while (0)
#define ATH_RXBUF 512
#define ATH_TXBUF 512
#define ATH_TXBUF_RESERVE 5
#define ATH_MAX_QDEPTH (ATH_TXBUF / 4 - ATH_TXBUF_RESERVE)
#define ATH_TXMAXTRY 13
+#define ATH_MAX_SW_RETRIES 30
#define TID_TO_WME_AC(_tid) \
((((_tid) == 0) || ((_tid) == 3)) ? IEEE80211_AC_BE : \
@@ -133,6 +111,9 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
#define ATH_AGGR_MIN_QDEPTH 2
/* minimum h/w qdepth for non-aggregated traffic */
#define ATH_NON_AGGR_MIN_QDEPTH 8
+#define ATH_TX_COMPLETE_POLL_INT 1000
+#define ATH_TXFIFO_DEPTH 8
+#define ATH_TX_ERROR 0x01
#define IEEE80211_SEQ_SEQ_SHIFT 4
#define IEEE80211_SEQ_MAX 4096
@@ -167,9 +148,6 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
#define IS_CCK_RATE(rate) ((rate >= 0x18) && (rate <= 0x1e))
-#define ATH_TX_COMPLETE_POLL_INT 1000
-#define ATH_TXFIFO_DEPTH 8
struct ath_txq {
int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
u32 axq_qnum; /* ath9k hardware queue number */
@@ -214,6 +192,21 @@ struct ath_rxbuf {
dma_addr_t bf_buf_addr;
};
+/**
+ * enum buffer_type - Buffer type flags
+ * @BUF_AMPDU: This buffer is an ampdu, as part of an aggregate (during TX)
+ * @BUF_AGGR: Indicates whether the buffer can be aggregated
+ * (used in aggregation scheduling)
+enum buffer_type {
+ BUF_AMPDU = BIT(0),
+ BUF_AGGR = BIT(1),
+};
+#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU)
+#define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR)
struct ath_buf_state {
u8 bf_type;
u8 bfs_paprd;
@@ -278,7 +271,6 @@ struct ath_tx_control {
struct ieee80211_sta *sta;
-#define ATH_TX_ERROR 0x01
/**
* @txq_map: Index is mac80211 queue number. This is
@@ -372,6 +364,22 @@ struct ath_vif {
struct ath_buf *av_bcbuf;
+struct ath9k_vif_iter_data {
+ u8 hw_macaddr[ETH_ALEN]; /* address of the first vif */
+ u8 mask[ETH_ALEN]; /* bssid mask */
+ bool has_hw_macaddr;
+ int naps; /* number of AP vifs */
+ int nmeshes; /* number of mesh vifs */
+ int nstations; /* number of station vifs */
+ int nwds; /* number of WDS vifs */
+ int nadhocs; /* number of adhoc vifs */
+void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ath9k_vif_iter_data *iter_data);
/*******************/
/* Beacon Handling */
@@ -387,6 +395,9 @@ struct ath_vif {
#define ATH_DEFAULT_BMISS_LIMIT 10
#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
+#define TSF_TO_TU(_h,_l) \
+ ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
struct ath_beacon_config {
int beacon_interval;
u16 listen_interval;
@@ -420,12 +431,10 @@ struct ath_beacon {
void ath9k_beacon_tasklet(unsigned long data);
-bool ath9k_allow_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
u32 changed);
void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
-void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_set_beacon(struct ath_softc *sc);
bool ath9k_csa_is_finished(struct ath_softc *sc);
@@ -440,10 +449,9 @@ bool ath9k_csa_is_finished(struct ath_softc *sc);
#define ATH_LONG_CALINTERVAL_INT 1000 /* 1000 ms */
#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
-#define ATH_ANI_MAX_SKIP_COUNT 10
-#define ATH_PAPRD_TIMEOUT 100 /* msecs */
-#define ATH_PLL_WORK_INTERVAL 100
+#define ATH_ANI_MAX_SKIP_COUNT 10
+#define ATH_PAPRD_TIMEOUT 100 /* msecs */
+#define ATH_PLL_WORK_INTERVAL 100
void ath_tx_complete_poll_work(struct work_struct *work);
void ath_reset_work(struct work_struct *work);
@@ -477,20 +485,19 @@ enum bt_op_flags {
struct ath_btcoex {
- bool hw_timer_enabled;
spinlock_t btcoex_lock;
struct timer_list period_timer; /* Timer for BT period */
+ struct timer_list no_stomp_timer;
u32 bt_priority_cnt;
unsigned long bt_priority_time;
unsigned long op_flags;
int bt_stomp_type; /* Types of BT stomping */
- u32 btcoex_no_stomp; /* in usec */
+ u32 btcoex_no_stomp; /* in msec */
u32 btcoex_period; /* in msec */
- u32 btscan_no_stomp; /* in usec */
+ u32 btscan_no_stomp; /* in msec */
u32 duty_cycle;
u32 bt_wait_time;
int rssi_count;
- struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
struct ath_mci_profile mci;
u8 stomp_audio;
@@ -538,12 +545,6 @@ static inline int ath9k_dump_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
-struct ath9k_wow_pattern {
- u8 pattern_bytes[MAX_PATTERN_SIZE];
- u8 mask_bytes[MAX_PATTERN_SIZE];
- u32 pattern_len;
/********************/
/* LED Control */
@@ -575,6 +576,12 @@ static inline void ath_fill_led_pin(struct ath_softc *sc)
/* Wake on Wireless LAN */
/************************/
+struct ath9k_wow_pattern {
+ u8 pattern_bytes[MAX_PATTERN_SIZE];
+ u8 mask_bytes[MAX_PATTERN_SIZE];
+ u32 pattern_len;
#ifdef CONFIG_ATH9K_WOW
void ath9k_init_wow(struct ieee80211_hw *hw);
int ath9k_suspend(struct ieee80211_hw *hw,
@@ -678,13 +685,8 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
* Used when PCI device not fully initialized by bootrom/BIOS
#define DEFAULT_CACHELINE 32
-#define ATH_REGCLASSIDS_MAX 10
#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
-#define ATH_MAX_SW_RETRIES 30
-#define ATH_CHAN_MAX 255
#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
-#define ATH_RATE_DUMMY_MARKER 0
enum sc_op_flags {
SC_OP_INVALID,
@@ -703,37 +705,6 @@ enum sc_op_flags {
#define PS_BEACON_SYNC BIT(4)
#define PS_WAIT_FOR_ANI BIT(5)
-struct ath_rate_table;
-struct ath9k_vif_iter_data {
- u8 hw_macaddr[ETH_ALEN]; /* address of the first vif */
- u8 mask[ETH_ALEN]; /* bssid mask */
- bool has_hw_macaddr;
- int naps; /* number of AP vifs */
- int nmeshes; /* number of mesh vifs */
- int nstations; /* number of station vifs */
- int nwds; /* number of WDS vifs */
- int nadhocs; /* number of adhoc vifs */
-/* enum spectral_mode:
- * @SPECTRAL_DISABLED: spectral mode is disabled
- * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
- * something else.
- * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
- * is performed manually.
- * @SPECTRAL_CHANSCAN: Like manual, but also triggered when changing channels
- * during a channel scan.
-enum spectral_mode {
- SPECTRAL_DISABLED = 0,
- SPECTRAL_BACKGROUND,
- SPECTRAL_MANUAL,
- SPECTRAL_CHANSCAN,
struct ath_softc {
struct ieee80211_hw *hw;
struct device *dev;
@@ -823,162 +794,6 @@ struct ath_softc {
#endif
-#define SPECTRAL_SCAN_BITMASK 0x10
-/* Radar info packet format, used for DFS and spectral formats. */
-struct ath_radar_info {
- u8 pulse_length_pri;
- u8 pulse_length_ext;
- u8 pulse_bw_info;
-} __packed;
-/* The HT20 spectral data has 4 bytes of additional information at it's end.
- * [7:0]: all bins {max_magnitude[1:0], bitmap_weight[5:0]}
- * [7:0]: all bins max_magnitude[9:2]
- * [7:0]: all bins {max_index[5:0], max_magnitude[11:10]}
- * [3:0]: max_exp (shift amount to size max bin to 8-bit unsigned)
-struct ath_ht20_mag_info {
- u8 all_bins[3];
- u8 max_exp;
-#define SPECTRAL_HT20_NUM_BINS 56
-/* WARNING: don't actually use this struct! MAC may vary the amount of
- * data by -1/+2. This struct is for reference only.
-struct ath_ht20_fft_packet {
- u8 data[SPECTRAL_HT20_NUM_BINS];
- struct ath_ht20_mag_info mag_info;
- struct ath_radar_info radar_info;
-#define SPECTRAL_HT20_TOTAL_DATA_LEN (sizeof(struct ath_ht20_fft_packet))
-/* Dynamic 20/40 mode:
- * [7:0]: lower bins {max_magnitude[1:0], bitmap_weight[5:0]}
- * [7:0]: lower bins max_magnitude[9:2]
- * [7:0]: lower bins {max_index[5:0], max_magnitude[11:10]}
- * [7:0]: upper bins {max_magnitude[1:0], bitmap_weight[5:0]}
- * [7:0]: upper bins max_magnitude[9:2]
- * [7:0]: upper bins {max_index[5:0], max_magnitude[11:10]}
-struct ath_ht20_40_mag_info {
- u8 lower_bins[3];
- u8 upper_bins[3];
-#define SPECTRAL_HT20_40_NUM_BINS 128
- * data. This struct is for reference only.
-struct ath_ht20_40_fft_packet {
- u8 data[SPECTRAL_HT20_40_NUM_BINS];
- struct ath_ht20_40_mag_info mag_info;
-#define SPECTRAL_HT20_40_TOTAL_DATA_LEN (sizeof(struct ath_ht20_40_fft_packet))
-/* grabs the max magnitude from the all/upper/lower bins */
-static inline u16 spectral_max_magnitude(u8 *bins)
-{
- return (bins[0] & 0xc0) >> 6 |
- (bins[1] & 0xff) << 2 |
- (bins[2] & 0x03) << 10;
-}
-/* return the max magnitude from the all/upper/lower bins */
-static inline u8 spectral_max_index(u8 *bins)
- s8 m = (bins[2] & 0xfc) >> 2;
- /* TODO: this still doesn't always report the right values ... */
- if (m > 32)
- m |= 0xe0;
- m &= ~0xe0;
- return m + 29;
-/* return the bitmap weight from the all/upper/lower bins */
-static inline u8 spectral_bitmap_weight(u8 *bins)
- return bins[0] & 0x3f;
-/* FFT sample format given to userspace via debugfs.
- * Please keep the type/length at the front position and change
- * other fields after adding another sample type
- * TODO: this might need rework when switching to nl80211-based
- * interface.
-enum ath_fft_sample_type {
- ATH_FFT_SAMPLE_HT20 = 1,
- ATH_FFT_SAMPLE_HT20_40,
-struct fft_sample_tlv {
- u8 type; /* see ath_fft_sample */
- __be16 length;
- /* type dependent data follows */
-struct fft_sample_ht20 {
- struct fft_sample_tlv tlv;
- __be16 freq;
- s8 rssi;
- s8 noise;
- __be16 max_magnitude;
- u8 max_index;
- u8 bitmap_weight;
- __be64 tsf;
-struct fft_sample_ht20_40 {
- u8 channel_type;
- s8 lower_rssi;
- s8 upper_rssi;
- s8 lower_noise;
- s8 upper_noise;
- __be16 lower_max_magnitude;
- __be16 upper_max_magnitude;
- u8 lower_max_index;
- u8 upper_max_index;
- u8 lower_bitmap_weight;
- u8 upper_bitmap_weight;
/********/
/* TX99 */
@@ -999,19 +814,13 @@ static inline int ath9k_tx99_send(struct ath_softc *sc,
#endif /* CONFIG_ATH9K_TX99 */
-void ath9k_tasklet(unsigned long data);
-int ath_cabq_update(struct ath_softc *);
static inline void ath_read_cachesize(struct ath_common *common, int *csz)
common->bus_ops->read_cachesize(common, csz);
-extern struct ieee80211_ops ath9k_ops;
-extern int ath9k_modparam_nohwcrypt;
-extern int led_blink;
-extern bool is_ath9k_unloaded;
+void ath9k_tasklet(unsigned long data);
+int ath_cabq_update(struct ath_softc *);
u8 ath9k_parse_mpdudensity(u8 mpdudensity);
irqreturn_t ath_isr(int irq, void *dev);
int ath_reset(struct ath_softc *sc);
@@ -1020,13 +829,12 @@ void ath_restart_work(struct ath_softc *sc);
int ath9k_init_device(u16 devid, struct ath_softc *sc,
const struct ath_bus_ops *bus_ops);
void ath9k_deinit_device(struct ath_softc *sc);
-void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath9k_reload_chainmask_settings(struct ath_softc *sc);
-void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw);
-int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
- enum spectral_mode spectral_mode);
+u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
+void ath_start_rfkill_poll(struct ath_softc *sc);
+void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
+void ath9k_ps_wakeup(struct ath_softc *sc);
+void ath9k_ps_restore(struct ath_softc *sc);
#ifdef CONFIG_ATH9K_PCI
int ath_pci_init(void);
@@ -1044,15 +852,4 @@ static inline int ath_ahb_init(void) { return 0; };
static inline void ath_ahb_exit(void) {};
-void ath9k_ps_wakeup(struct ath_softc *sc);
-void ath9k_ps_restore(struct ath_softc *sc);
-u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
-void ath_start_rfkill_poll(struct ath_softc *sc);
-void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
-void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ath9k_vif_iter_data *iter_data);
#endif /* ATH9K_H */
@@ -274,18 +274,19 @@ static int ath9k_beacon_choose_slot(struct ath_softc *sc)
return slot;
-void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif)
+static void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
struct ath_vif *avp = (void *)vif->drv_priv;
- u64 tsfadjust;
+ u32 tsfadjust;
if (avp->av_bslot == 0)
return;
- tsfadjust = cur_conf->beacon_interval * avp->av_bslot / ATH_BCBUF;
- avp->tsf_adjust = cpu_to_le64(TU_TO_USEC(tsfadjust));
+ tsfadjust = cur_conf->beacon_interval * avp->av_bslot;
+ tsfadjust = TU_TO_USEC(tsfadjust) / ATH_BCBUF;
+ avp->tsf_adjust = cpu_to_le64(tsfadjust);
ath_dbg(common, CONFIG, "tsfadjust is: %llu for bslot: %d\n",
(unsigned long long)tsfadjust, avp->av_bslot);
@@ -431,6 +432,33 @@ static void ath9k_beacon_init(struct ath_softc *sc, u32 nexttbtt,
ath9k_hw_enable_interrupts(ah);
+/* Calculate the modulo of a 64 bit TSF snapshot with a TU divisor */
+static u32 ath9k_mod_tsf64_tu(u64 tsf, u32 div_tu)
+ u32 tsf_mod, tsf_hi, tsf_lo, mod_hi, mod_lo;
+ tsf_mod = tsf & (BIT(10) - 1);
+ tsf_hi = tsf >> 32;
+ tsf_lo = ((u32) tsf) >> 10;
+ mod_hi = tsf_hi % div_tu;
+ mod_lo = ((mod_hi << 22) + tsf_lo) % div_tu;
+ return (mod_lo << 10) | tsf_mod;
+static u32 ath9k_get_next_tbtt(struct ath_softc *sc, u64 tsf,
+ unsigned int interval)
+ struct ath_hw *ah = sc->sc_ah;
+ unsigned int offset;
+ tsf += TU_TO_USEC(FUDGE + ah->config.sw_beacon_response_time);
+ offset = ath9k_mod_tsf64_tu(tsf, interval);
+ return (u32) tsf + TU_TO_USEC(interval) - offset;
* For multi-bss ap support beacons are either staggered evenly over N slots or
* burst together. For the former arrange for the SWBA to be delivered for each
@@ -446,7 +474,8 @@ static void ath9k_beacon_config_ap(struct ath_softc *sc,
/* NB: the beacon interval is kept internally in TU's */
intval = TU_TO_USEC(conf->beacon_interval);
intval /= ATH_BCBUF;
- nexttbtt = intval;
+ nexttbtt = ath9k_get_next_tbtt(sc, ath9k_hw_gettsf64(ah),
+ conf->beacon_interval);
if (conf->enable_beacon)
ah->imask |= ATH9K_INT_SWBA;
@@ -458,7 +487,7 @@ static void ath9k_beacon_config_ap(struct ath_softc *sc,
(conf->enable_beacon) ? "Enable" : "Disable",
nexttbtt, intval, conf->beacon_interval);
- ath9k_beacon_init(sc, nexttbtt, intval, true);
+ ath9k_beacon_init(sc, nexttbtt, intval, false);
@@ -475,11 +504,9 @@ static void ath9k_beacon_config_sta(struct ath_softc *sc,
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_beacon_state bs;
- int dtimperiod, dtimcount, sleepduration;
- int cfpperiod, cfpcount;
- u32 nexttbtt = 0, intval, tsftu;
+ int dtim_intval, sleepduration;
+ u32 nexttbtt = 0, intval;
u64 tsf;
- int num_beacons, offset, dtim_dec_count, cfp_dec_count;
/* No need to configure beacon if we are not associated */
if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
@@ -492,53 +519,25 @@ static void ath9k_beacon_config_sta(struct ath_softc *sc,
intval = conf->beacon_interval;
- * Setup dtim and cfp parameters according to
+ * Setup dtim parameters according to
* last beacon we received (which may be none).
- dtimperiod = conf->dtim_period;
- dtimcount = conf->dtim_count;
- if (dtimcount >= dtimperiod) /* NB: sanity check */
- dtimcount = 0;
- cfpperiod = 1; /* NB: no PCF support yet */
- cfpcount = 0;
+ dtim_intval = intval * conf->dtim_period;
sleepduration = conf->listen_interval * intval;
* Pull nexttbtt forward to reflect the current
- * TSF and calculate dtim+cfp state for the result.
+ * TSF and calculate dtim state for the result.
tsf = ath9k_hw_gettsf64(ah);
- tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
- num_beacons = tsftu / intval + 1;
- offset = tsftu % intval;
- nexttbtt = tsftu - offset;
- if (offset)
- nexttbtt += intval;
- /* DTIM Beacon every dtimperiod Beacon */
- dtim_dec_count = num_beacons % dtimperiod;
- /* CFP every cfpperiod DTIM Beacon */
- cfp_dec_count = (num_beacons / dtimperiod) % cfpperiod;
- if (dtim_dec_count)
- cfp_dec_count++;
- dtimcount -= dtim_dec_count;
- if (dtimcount < 0)
- dtimcount += dtimperiod;
- cfpcount -= cfp_dec_count;
- if (cfpcount < 0)
- cfpcount += cfpperiod;
- bs.bs_intval = intval;
+ nexttbtt = ath9k_get_next_tbtt(sc, tsf, intval);
+ bs.bs_intval = TU_TO_USEC(intval);
+ bs.bs_dtimperiod = conf->dtim_period * bs.bs_intval;
bs.bs_nexttbtt = nexttbtt;
- bs.bs_dtimperiod = dtimperiod*intval;
- bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
- bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
- bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
- bs.bs_cfpmaxduration = 0;
+ bs.bs_nextdtim = nexttbtt;
+ if (conf->dtim_period > 1)
+ bs.bs_nextdtim = ath9k_get_next_tbtt(sc, tsf, dtim_intval);
* Calculate the number of consecutive beacons to miss* before taking
@@ -566,18 +565,16 @@ static void ath9k_beacon_config_sta(struct ath_softc *sc,
* XXX fixed at 100ms
- bs.bs_sleepduration = roundup(IEEE80211_MS_TO_TU(100), sleepduration);
+ bs.bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
+ sleepduration));
if (bs.bs_sleepduration > bs.bs_dtimperiod)
bs.bs_sleepduration = bs.bs_dtimperiod;
/* TSF out of range threshold fixed at 1 second */
bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
- ath_dbg(common, BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
- ath_dbg(common, BEACON,
- "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
- bs.bs_bmissthreshold, bs.bs_sleepduration,
- bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
+ ath_dbg(common, BEACON, "bmiss: %u sleep: %u\n",
+ bs.bs_bmissthreshold, bs.bs_sleepduration);
/* Set the computed STA beacon timers */
@@ -600,25 +597,11 @@ static void ath9k_beacon_config_adhoc(struct ath_softc *sc,
- if (conf->ibss_creator) {
+ if (conf->ibss_creator)
nexttbtt = intval;
- } else {
- u32 tbtt, offset, tsftu;
- u64 tsf;
- /*
- * Pull nexttbtt forward to reflect the current
- * sync'd TSF.
- tsf = ath9k_hw_gettsf64(ah);
- tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
- offset = tsftu % conf->beacon_interval;
- tbtt = tsftu - offset;
- tbtt += conf->beacon_interval;
- nexttbtt = TU_TO_USEC(tbtt);
- }
+ else
@@ -640,7 +623,8 @@ static void ath9k_beacon_config_adhoc(struct ath_softc *sc,
set_bit(SC_OP_BEACONS, &sc->sc_flags);
-bool ath9k_allow_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
+static bool ath9k_allow_beacon_config(struct ath_softc *sc,
+ struct ieee80211_vif *vif)
@@ -711,12 +695,17 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
unsigned long flags;
bool skip_beacon = false;
+ if (vif->type == NL80211_IFTYPE_AP)
+ ath9k_set_tsfadjust(sc, vif);
+ if (!ath9k_allow_beacon_config(sc, vif))
if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
ath9k_cache_beacon_config(sc, bss_conf);
ath9k_set_beacon(sc);
@@ -66,7 +66,6 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
.bt_first_slot_time = 5,
.bt_hold_rx_clear = true,
- u32 i, idx;
bool rxclear_polarity = ath_bt_config.bt_rxclear_polarity;
if (AR_SREV_9300_20_OR_LATER(ah))
@@ -88,11 +87,6 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
SM(ath_bt_config.bt_hold_rx_clear, AR_BT_HOLD_RX_CLEAR) |
SM(ATH_BTCOEX_BMISS_THRESH, AR_BT_BCN_MISS_THRESH) |
AR_BT_DISABLE_BT_ANT;
- for (i = 0; i < 32; i++) {
- idx = (debruijn32 << i) >> 27;
- ah->hw_gen_timers.gen_timer_index[idx] = i;
EXPORT_SYMBOL(ath9k_hw_init_btcoex_hw);
@@ -98,10 +98,8 @@ struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
struct ieee80211_channel *curchan = chandef->chan;
struct ath9k_channel *channel;
- u8 chan_idx;
- chan_idx = curchan->hw_value;
- channel = &ah->channels[chan_idx];
+ channel = &ah->channels[curchan->hw_value];
ath9k_cmn_update_ichannel(channel, chandef);
return channel;
@@ -17,7 +17,6 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
-#include <linux/relay.h>
#include <asm/unaligned.h>
#include "ath9k.h"
@@ -27,6 +26,47 @@
#define REG_READ_D(_ah, _reg) \
ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
+void ath9k_debug_sync_cause(struct ath_softc *sc, u32 sync_cause)
+ if (sync_cause)
+ sc->debug.stats.istats.sync_cause_all++;
+ if (sync_cause & AR_INTR_SYNC_RTC_IRQ)
+ sc->debug.stats.istats.sync_rtc_irq++;
+ if (sync_cause & AR_INTR_SYNC_MAC_IRQ)
+ sc->debug.stats.istats.sync_mac_irq++;
+ if (sync_cause & AR_INTR_SYNC_EEPROM_ILLEGAL_ACCESS)
+ sc->debug.stats.istats.eeprom_illegal_access++;
+ if (sync_cause & AR_INTR_SYNC_APB_TIMEOUT)
+ sc->debug.stats.istats.apb_timeout++;
+ if (sync_cause & AR_INTR_SYNC_PCI_MODE_CONFLICT)
+ sc->debug.stats.istats.pci_mode_conflict++;
+ if (sync_cause & AR_INTR_SYNC_HOST1_FATAL)
+ sc->debug.stats.istats.host1_fatal++;
+ if (sync_cause & AR_INTR_SYNC_HOST1_PERR)
+ sc->debug.stats.istats.host1_perr++;
+ if (sync_cause & AR_INTR_SYNC_TRCV_FIFO_PERR)
+ sc->debug.stats.istats.trcv_fifo_perr++;
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_EP)
+ sc->debug.stats.istats.radm_cpl_ep++;
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_DLLP_ABORT)
+ sc->debug.stats.istats.radm_cpl_dllp_abort++;
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_TLP_ABORT)
+ sc->debug.stats.istats.radm_cpl_tlp_abort++;
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_ECRC_ERR)
+ sc->debug.stats.istats.radm_cpl_ecrc_err++;
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT)
+ sc->debug.stats.istats.radm_cpl_timeout++;
+ if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
+ sc->debug.stats.istats.local_timeout++;
+ if (sync_cause & AR_INTR_SYNC_PM_ACCESS)
+ sc->debug.stats.istats.pm_access++;
+ if (sync_cause & AR_INTR_SYNC_MAC_AWAKE)
+ sc->debug.stats.istats.mac_awake++;
+ if (sync_cause & AR_INTR_SYNC_MAC_ASLEEP)
+ sc->debug.stats.istats.mac_asleep++;
+ if (sync_cause & AR_INTR_SYNC_MAC_SLEEP_ACCESS)
+ sc->debug.stats.istats.mac_sleep_access++;
static ssize_t ath9k_debugfs_read_buf(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -1016,293 +1056,6 @@ static const struct file_operations fops_recv = {
.llseek = default_llseek,
-static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
- struct ath_softc *sc = file->private_data;
- char *mode = "";
- unsigned int len;
- switch (sc->spectral_mode) {
- case SPECTRAL_DISABLED:
- mode = "disable";
- break;
- case SPECTRAL_BACKGROUND:
- mode = "background";
- case SPECTRAL_CHANSCAN:
- mode = "chanscan";
- case SPECTRAL_MANUAL:
- mode = "manual";
- len = strlen(mode);
- return simple_read_from_buffer(user_buf, count, ppos, mode, len);
-static ssize_t write_file_spec_scan_ctl(struct file *file,
- const char __user *user_buf,
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- char buf[32];
- ssize_t len;
- if (config_enabled(CONFIG_ATH9K_TX99))
- return -EOPNOTSUPP;
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
- buf[len] = '\0';
- if (strncmp("trigger", buf, 7) == 0) {
- ath9k_spectral_scan_trigger(sc->hw);
- } else if (strncmp("background", buf, 9) == 0) {
- ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND);
- ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n");
- } else if (strncmp("chanscan", buf, 8) == 0) {
- ath9k_spectral_scan_config(sc->hw, SPECTRAL_CHANSCAN);
- ath_dbg(common, CONFIG, "spectral scan: channel scan mode enabled\n");
- } else if (strncmp("manual", buf, 6) == 0) {
- ath9k_spectral_scan_config(sc->hw, SPECTRAL_MANUAL);
- ath_dbg(common, CONFIG, "spectral scan: manual mode enabled\n");
- } else if (strncmp("disable", buf, 7) == 0) {
- ath9k_spectral_scan_config(sc->hw, SPECTRAL_DISABLED);
- ath_dbg(common, CONFIG, "spectral scan: disabled\n");
- return -EINVAL;
- return count;
-static const struct file_operations fops_spec_scan_ctl = {
- .read = read_file_spec_scan_ctl,
- .write = write_file_spec_scan_ctl,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-static ssize_t read_file_spectral_short_repeat(struct file *file,
- char __user *user_buf,
- len = sprintf(buf, "%d\n", sc->spec_config.short_repeat);
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-static ssize_t write_file_spectral_short_repeat(struct file *file,
- unsigned long val;
- if (kstrtoul(buf, 0, &val))
- if (val < 0 || val > 1)
- sc->spec_config.short_repeat = val;
-static const struct file_operations fops_spectral_short_repeat = {
- .read = read_file_spectral_short_repeat,
- .write = write_file_spectral_short_repeat,
-static ssize_t read_file_spectral_count(struct file *file,
- len = sprintf(buf, "%d\n", sc->spec_config.count);
-static ssize_t write_file_spectral_count(struct file *file,
- if (val < 0 || val > 255)
- sc->spec_config.count = val;
-static const struct file_operations fops_spectral_count = {
- .read = read_file_spectral_count,
- .write = write_file_spectral_count,
-static ssize_t read_file_spectral_period(struct file *file,
- len = sprintf(buf, "%d\n", sc->spec_config.period);
-static ssize_t write_file_spectral_period(struct file *file,
- sc->spec_config.period = val;
-static const struct file_operations fops_spectral_period = {
- .read = read_file_spectral_period,
- .write = write_file_spectral_period,
-static ssize_t read_file_spectral_fft_period(struct file *file,
- len = sprintf(buf, "%d\n", sc->spec_config.fft_period);
-static ssize_t write_file_spectral_fft_period(struct file *file,
- if (val < 0 || val > 15)
- sc->spec_config.fft_period = val;
-static const struct file_operations fops_spectral_fft_period = {
- .read = read_file_spectral_fft_period,
- .write = write_file_spectral_fft_period,
-static struct dentry *create_buf_file_handler(const char *filename,
- struct dentry *parent,
- umode_t mode,
- struct rchan_buf *buf,
- int *is_global)
- struct dentry *buf_file;
- buf_file = debugfs_create_file(filename, mode, parent, buf,
- &relay_file_operations);
- *is_global = 1;
- return buf_file;
-static int remove_buf_file_handler(struct dentry *dentry)
- debugfs_remove(dentry);
- return 0;
-void ath_debug_send_fft_sample(struct ath_softc *sc,
- struct fft_sample_tlv *fft_sample_tlv)
- int length;
- if (!sc->rfs_chan_spec_scan)
- return;
- length = __be16_to_cpu(fft_sample_tlv->length) +
- sizeof(*fft_sample_tlv);
- relay_write(sc->rfs_chan_spec_scan, fft_sample_tlv, length);
-static struct rchan_callbacks rfs_spec_scan_cb = {
- .create_buf_file = create_buf_file_handler,
- .remove_buf_file = remove_buf_file_handler,
static ssize_t read_file_regidx(struct file *file, char __user *user_buf,
@@ -1772,10 +1525,7 @@ void ath9k_get_et_stats(struct ieee80211_hw *hw,
void ath9k_deinit_debug(struct ath_softc *sc)
- if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) {
- relay_close(sc->rfs_chan_spec_scan);
- sc->rfs_chan_spec_scan = NULL;
+ ath9k_spectral_deinit_debug(sc);
int ath9k_init_debug(struct ath_hw *ah)
@@ -1795,6 +1545,7 @@ int ath9k_init_debug(struct ath_hw *ah)
ath9k_dfs_init_debug(sc);
ath9k_tx99_init_debug(sc);
+ ath9k_spectral_init_debug(sc);
debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_dma);
@@ -1841,23 +1592,6 @@ int ath9k_init_debug(struct ath_hw *ah)
&fops_base_eeprom);
debugfs_create_file("modal_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_modal_eeprom);
- sc->rfs_chan_spec_scan = relay_open("spectral_scan",
- sc->debug.debugfs_phy,
- 1024, 256, &rfs_spec_scan_cb,
- NULL);
- debugfs_create_file("spectral_scan_ctl", S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc,
- &fops_spec_scan_ctl);
- debugfs_create_file("spectral_short_repeat", S_IRUSR | S_IWUSR,
- &fops_spectral_short_repeat);
- debugfs_create_file("spectral_count", S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc, &fops_spectral_count);
- debugfs_create_file("spectral_period", S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc, &fops_spectral_period);
- debugfs_create_file("spectral_fft_period", S_IRUSR | S_IWUSR,
- &fops_spectral_fft_period);
debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
@@ -292,11 +292,11 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct dentry *dir);
- struct fft_sample_tlv *fft_sample);
void ath9k_debug_stat_ant(struct ath_softc *sc,
struct ath_hw_antcomb_conf *div_ant_conf,
int main_rssi_avg, int alt_rssi_avg);
+void ath9k_debug_sync_cause(struct ath_softc *sc, u32 sync_cause);
#else
#define RX_STAT_INC(c) /* NOP */
@@ -331,6 +331,11 @@ static inline void ath9k_debug_stat_ant(struct ath_softc *sc,
+static inline void
+ath9k_debug_sync_cause(struct ath_softc *sc, u32 sync_cause)
#endif /* CONFIG_ATH9K_DEBUGFS */
#endif /* DEBUG_H */
@@ -158,8 +158,8 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
- ard.rssi = rs->rs_rssi_ctl0;
- ard.ext_rssi = rs->rs_rssi_ext0;
+ ard.rssi = rs->rs_rssi_ctl[0];
+ ard.ext_rssi = rs->rs_rssi_ext[0];
* hardware stores this as 8 bit signed value.
@@ -1085,31 +1085,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
-#define EEP_MAP4K_SPURCHAN \
- (ah->eeprom.map4k.modalHeader.spurChans[i].spurChan)
- struct ath_common *common = ath9k_hw_common(ah);
- u16 spur_val = AR_NO_SPUR;
- ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n",
- i, is2GHz, ah->config.spurchans[i][is2GHz]);
- switch (ah->config.spurmode) {
- case SPUR_DISABLE:
- case SPUR_ENABLE_IOCTL:
- spur_val = ah->config.spurchans[i][is2GHz];
- ath_dbg(common, ANI, "Getting spur val from new loc. %d\n",
- spur_val);
- case SPUR_ENABLE_EEPROM:
- spur_val = EEP_MAP4K_SPURCHAN;
- return spur_val;
-#undef EEP_MAP4K_SPURCHAN
+ return ah->eeprom.map4k.modalHeader.spurChans[i].spurChan;
const struct eeprom_ops eep_4k_ops = {
@@ -1004,31 +1004,7 @@ static void ath9k_hw_ar9287_set_board_values(struct ath_hw *ah,
static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah,
u16 i, bool is2GHz)
-#define EEP_MAP9287_SPURCHAN \
- (ah->eeprom.map9287.modalHeader.spurChans[i].spurChan)
- spur_val = EEP_MAP9287_SPURCHAN;
-#undef EEP_MAP9287_SPURCHAN
+ return ah->eeprom.map9287.modalHeader.spurChans[i].spurChan;
const struct eeprom_ops eep_ar9287_ops = {
@@ -1348,31 +1348,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
-#define EEP_DEF_SPURCHAN \
- (ah->eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan)
- spur_val = EEP_DEF_SPURCHAN;
-#undef EEP_DEF_SPURCHAN
+ return ah->eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan;
const struct eeprom_ops eep_def_ops = {
@@ -157,36 +157,6 @@ static void ath_detect_bt_priority(struct ath_softc *sc)
-static void ath9k_gen_timer_start(struct ath_hw *ah,
- struct ath_gen_timer *timer,
- u32 trig_timeout,
- u32 timer_period)
- ath9k_hw_gen_timer_start(ah, timer, trig_timeout, timer_period);
- if ((ah->imask & ATH9K_INT_GENTIMER) == 0) {
- ath9k_hw_disable_interrupts(ah);
- ah->imask |= ATH9K_INT_GENTIMER;
- ath9k_hw_set_interrupts(ah);
- ath9k_hw_enable_interrupts(ah);
-static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
- struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
- ath9k_hw_gen_timer_stop(ah, timer);
- /* if no timer is enabled, turn off interrupt mask */
- if (timer_table->timer_mask.val == 0) {
- ah->imask &= ~ATH9K_INT_GENTIMER;
static void ath_mci_ftp_adjust(struct ath_softc *sc)
struct ath_btcoex *btcoex = &sc->btcoex;
@@ -257,19 +227,9 @@ static void ath_btcoex_period_timer(unsigned long data)
spin_unlock_bh(&btcoex->btcoex_lock);
- * btcoex_period is in msec while (btocex/btscan_)no_stomp are in usec,
- * ensure that we properly convert btcoex_period to usec
- * for any comparision with (btcoex/btscan_)no_stomp.
- if (btcoex->btcoex_period * 1000 != btcoex->btcoex_no_stomp) {
- if (btcoex->hw_timer_enabled)
- ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
- ath9k_gen_timer_start(ah, btcoex->no_stomp_timer, timer_period,
- timer_period * 10);
- btcoex->hw_timer_enabled = true;
+ if (btcoex->btcoex_period != btcoex->btcoex_no_stomp)
+ mod_timer(&btcoex->no_stomp_timer,
+ jiffies + msecs_to_jiffies(timer_period));
ath9k_ps_restore(sc);
@@ -282,7 +242,7 @@ skip_hw_wakeup:
* Generic tsf based hw timer which configures weight
* registers to time slice between wlan and bt traffic
-static void ath_btcoex_no_stomp_timer(void *arg)
+static void ath_btcoex_no_stomp_timer(unsigned long arg)
struct ath_softc *sc = (struct ath_softc *)arg;
@@ -311,24 +271,18 @@ static int ath_init_btcoex_timer(struct ath_softc *sc)
btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD;
- btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) * 1000 *
+ btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
btcoex->btcoex_period / 100;
- btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) * 1000 *
+ btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) *
setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
(unsigned long) sc);
+ setup_timer(&btcoex->no_stomp_timer, ath_btcoex_no_stomp_timer,
+ (unsigned long) sc);
spin_lock_init(&btcoex->btcoex_lock);
- btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah,
- ath_btcoex_no_stomp_timer,
- (void *) sc, AR_FIRST_NDP_TIMER);
- if (!btcoex->no_stomp_timer)
- return -ENOMEM;
@@ -343,10 +297,7 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n");
/* make sure duty cycle timer is also stopped when resuming */
- if (btcoex->hw_timer_enabled) {
- ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
- btcoex->hw_timer_enabled = false;
+ del_timer_sync(&btcoex->no_stomp_timer);
btcoex->bt_priority_cnt = 0;
btcoex->bt_priority_time = jiffies;
@@ -363,24 +314,16 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
void ath9k_btcoex_timer_pause(struct ath_softc *sc)
- struct ath_hw *ah = sc->sc_ah;
del_timer_sync(&btcoex->period_timer);
void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc)
u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen)
@@ -400,12 +343,6 @@ u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen)
void ath9k_btcoex_handle_interrupt(struct ath_softc *sc, u32 status)
- if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
- if (status & ATH9K_INT_GENTIMER)
- ath_gen_timer_isr(sc->sc_ah);
if (status & ATH9K_INT_MCI)
ath_mci_intr(sc);
@@ -447,10 +384,6 @@ void ath9k_deinit_btcoex(struct ath_softc *sc)
- if ((sc->btcoex.no_stomp_timer) &&
- ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_3WIRE)
- ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
if (ath9k_hw_mci_is_enabled(ah))
ath_mci_cleanup(sc);
@@ -600,10 +600,15 @@ void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw);
struct base_eep_header *ath9k_htc_get_eeprom_base(struct ath9k_htc_priv *priv);
#ifdef CONFIG_MAC80211_LEDS
+void ath9k_configure_leds(struct ath9k_htc_priv *priv);
void ath9k_init_leds(struct ath9k_htc_priv *priv);
void ath9k_deinit_leds(struct ath9k_htc_priv *priv);
void ath9k_led_work(struct work_struct *work);
+static inline void ath9k_configure_leds(struct ath9k_htc_priv *priv)
static inline void ath9k_init_leds(struct ath9k_htc_priv *priv)
@@ -70,11 +70,11 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
enum ath9k_int imask = 0;
int dtimperiod, dtimcount, sleepduration;
- int cfpperiod, cfpcount, bmiss_timeout;
+ int bmiss_timeout;
u32 nexttbtt = 0, intval, tsftu;
__be32 htc_imask = 0;
+ int num_beacons, offset, dtim_dec_count;
int ret __attribute__ ((unused));
u8 cmd_rsp;
@@ -84,7 +84,7 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
bmiss_timeout = (ATH_DEFAULT_BMISS_LIMIT * bss_conf->beacon_interval);
dtimperiod = bss_conf->dtim_period;
@@ -93,8 +93,6 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
dtimcount = 1;
if (dtimcount >= dtimperiod) /* NB: sanity check */
dtimcount = 0;
sleepduration = intval;
if (sleepduration <= 0)
@@ -102,7 +100,7 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
tsf = ath9k_hw_gettsf64(priv->ah);
tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
@@ -115,26 +113,14 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
/* DTIM Beacon every dtimperiod Beacon */
dtim_dec_count = num_beacons % dtimperiod;
dtimcount -= dtim_dec_count;
if (dtimcount < 0)
dtimcount += dtimperiod;
- bs.bs_nexttbtt = nexttbtt;
+ bs.bs_nexttbtt = TU_TO_USEC(nexttbtt);
+ bs.bs_dtimperiod = dtimperiod * bs.bs_intval;
+ bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount * bs.bs_intval;
@@ -161,7 +147,8 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
@@ -170,10 +157,8 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
ath_dbg(common, CONFIG, "intval: %u tsf: %llu tsftu: %u\n",
intval, tsf, tsftu);
- ath_dbg(common, CONFIG,
+ ath_dbg(common, CONFIG, "bmiss: %u sleep: %u\n",
@@ -255,6 +255,17 @@ void ath9k_deinit_leds(struct ath9k_htc_priv *priv)
cancel_work_sync(&priv->led_work);
+void ath9k_configure_leds(struct ath9k_htc_priv *priv)
+ /* Configure gpio 1 for output */
+ ath9k_hw_cfg_output(priv->ah, priv->ah->led_pin,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ /* LED off, active low */
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
void ath9k_init_leds(struct ath9k_htc_priv *priv)
int ret;
@@ -268,11 +279,7 @@ void ath9k_init_leds(struct ath9k_htc_priv *priv)
else
priv->ah->led_pin = ATH_LED_PIN_DEF;
- /* Configure gpio 1 for output */
- ath9k_hw_cfg_output(priv->ah, priv->ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
- /* LED off, active low */
- ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
+ ath9k_configure_leds(priv);
snprintf(priv->led_name, sizeof(priv->led_name),
"ath9k_htc-%s", wiphy_name(priv->hw->wiphy));
@@ -1000,6 +1000,8 @@ int ath9k_htc_resume(struct htc_target *htc_handle)
ret = ath9k_init_htc_services(priv, priv->ah->hw_version.devid,
priv->ah->hw_version.usbdev);
return ret;
@@ -49,9 +49,10 @@ static inline bool ath9k_hw_calibrate(struct ath_hw *ah,
return ath9k_hw_ops(ah)->calibrate(ah, chan, rxchainmask, longcal);
-static inline bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
+static inline bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked,
- return ath9k_hw_ops(ah)->get_isr(ah, masked);
+ return ath9k_hw_ops(ah)->get_isr(ah, masked, sync_cause_p);
static inline void ath9k_hw_set_txdesc(struct ath_hw *ah, void *ds,
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/time.h>
+#include <linux/bitops.h>
#include "hw.h"
@@ -84,48 +85,6 @@ static void ath9k_hw_ani_cache_ini_regs(struct ath_hw *ah)
#ifdef CONFIG_ATH9K_DEBUGFS
-void ath9k_debug_sync_cause(struct ath_common *common, u32 sync_cause)
- struct ath_softc *sc = common->priv;
- if (sync_cause)
- sc->debug.stats.istats.sync_cause_all++;
- if (sync_cause & AR_INTR_SYNC_RTC_IRQ)
- sc->debug.stats.istats.sync_rtc_irq++;
- if (sync_cause & AR_INTR_SYNC_MAC_IRQ)
- sc->debug.stats.istats.sync_mac_irq++;
- if (sync_cause & AR_INTR_SYNC_EEPROM_ILLEGAL_ACCESS)
- sc->debug.stats.istats.eeprom_illegal_access++;
- if (sync_cause & AR_INTR_SYNC_APB_TIMEOUT)
- sc->debug.stats.istats.apb_timeout++;
- if (sync_cause & AR_INTR_SYNC_PCI_MODE_CONFLICT)
- sc->debug.stats.istats.pci_mode_conflict++;
- if (sync_cause & AR_INTR_SYNC_HOST1_FATAL)
- sc->debug.stats.istats.host1_fatal++;
- if (sync_cause & AR_INTR_SYNC_HOST1_PERR)
- sc->debug.stats.istats.host1_perr++;
- if (sync_cause & AR_INTR_SYNC_TRCV_FIFO_PERR)
- sc->debug.stats.istats.trcv_fifo_perr++;
- if (sync_cause & AR_INTR_SYNC_RADM_CPL_EP)
- sc->debug.stats.istats.radm_cpl_ep++;
- if (sync_cause & AR_INTR_SYNC_RADM_CPL_DLLP_ABORT)
- sc->debug.stats.istats.radm_cpl_dllp_abort++;
- if (sync_cause & AR_INTR_SYNC_RADM_CPL_TLP_ABORT)
- sc->debug.stats.istats.radm_cpl_tlp_abort++;
- if (sync_cause & AR_INTR_SYNC_RADM_CPL_ECRC_ERR)
- sc->debug.stats.istats.radm_cpl_ecrc_err++;
- if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT)
- sc->debug.stats.istats.radm_cpl_timeout++;
- if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
- sc->debug.stats.istats.local_timeout++;
- if (sync_cause & AR_INTR_SYNC_PM_ACCESS)
- sc->debug.stats.istats.pm_access++;
- if (sync_cause & AR_INTR_SYNC_MAC_AWAKE)
- sc->debug.stats.istats.mac_awake++;
- if (sync_cause & AR_INTR_SYNC_MAC_ASLEEP)
- sc->debug.stats.istats.mac_asleep++;
- if (sync_cause & AR_INTR_SYNC_MAC_SLEEP_ACCESS)
- sc->debug.stats.istats.mac_sleep_access++;
@@ -438,21 +397,12 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
static void ath9k_hw_init_config(struct ath_hw *ah)
- int i;
ah->config.dma_beacon_response_time = 1;
ah->config.sw_beacon_response_time = 6;
- ah->config.additional_swba_backoff = 0;
ah->config.ack_6mb = 0x0;
ah->config.cwm_ignore_extcca = 0;
- ah->config.pcie_clock_req = 0;
ah->config.analog_shiftreg = 1;
- for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
- ah->config.spurchans[i][0] = AR_NO_SPUR;
- ah->config.spurchans[i][1] = AR_NO_SPUR;
ah->config.rx_intr_mitigation = true;
@@ -485,7 +435,6 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
ah->hw_version.magic = AR5416_MAGIC;
ah->hw_version.subvendorid = 0;
- ah->atim_window = 0;
ah->sta_id1_defaults =
AR_STA_ID1_CRPT_MIC_ENABLE |
AR_STA_ID1_MCAST_KSRCH;
@@ -1281,6 +1230,42 @@ void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
*coef_exponent = coef_exp - 16;
+/* AR9330 WAR:
+ * call external reset function to reset WMAC if:
+ * - doing a cold reset
+ * - we have pending frames in the TX queues.
+static bool ath9k_hw_ar9330_reset_war(struct ath_hw *ah, int type)
+ int i, npend = 0;
+ for (i = 0; i < AR_NUM_QCU; i++) {
+ npend = ath9k_hw_numtxpending(ah, i);
+ if (npend)
+ break;
+ if (ah->external_reset &&
+ (npend || type == ATH9K_RESET_COLD)) {
+ int reset_err = 0;
+ ath_dbg(ath9k_hw_common(ah), RESET,
+ "reset MAC via external reset\n");
+ reset_err = ah->external_reset();
+ if (reset_err) {
+ ath_err(ath9k_hw_common(ah),
+ "External reset failed, err=%d\n",
+ reset_err);
+ return false;
+ REG_WRITE(ah, AR_RTC_RESET, 1);
+ return true;
static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
u32 rst_flags;
@@ -1331,38 +1316,8 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
if (AR_SREV_9330(ah)) {
- int npend = 0;
- /* AR9330 WAR:
- * call external reset function to reset WMAC if:
- * - doing a cold reset
- * - we have pending frames in the TX queues
- for (i = 0; i < AR_NUM_QCU; i++) {
- npend = ath9k_hw_numtxpending(ah, i);
- if (npend)
- if (ah->external_reset &&
- (npend || type == ATH9K_RESET_COLD)) {
- int reset_err = 0;
- ath_dbg(ath9k_hw_common(ah), RESET,
- "reset MAC via external reset\n");
- reset_err = ah->external_reset();
- if (reset_err) {
- ath_err(ath9k_hw_common(ah),
- "External reset failed, err=%d\n",
- reset_err);
- return false;
- REG_WRITE(ah, AR_RTC_RESET, 1);
+ if (!ath9k_hw_ar9330_reset_war(ah, type))
@@ -1372,7 +1327,12 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
REGWRITE_BUFFER_FLUSH(ah);
- udelay(50);
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ udelay(50);
+ else if (AR_SREV_9100(ah))
+ udelay(10000);
+ udelay(100);
REG_WRITE(ah, AR_RTC_RC, 0);
if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) {
@@ -1408,8 +1368,7 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
- if (!AR_SREV_9300_20_OR_LATER(ah))
- udelay(2);
+ udelay(2);
if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, 0);
@@ -1485,7 +1444,6 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah,
if (AR_SREV_9330(ah))
ath9k_hw_init_pll(ah, chan);
- ath9k_hw_set_rfmode(ah, chan);
return true;
@@ -1954,6 +1912,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (r)
return r;
+ ath9k_hw_set_rfmode(ah, chan);
ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep);
@@ -2264,9 +2224,6 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
case NL80211_IFTYPE_ADHOC:
REG_SET_BIT(ah, AR_TXCFG,
AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
- REG_WRITE(ah, AR_NEXT_NDP_TIMER, next_beacon +
- TU_TO_USEC(ah->atim_window ? ah->atim_window : 1));
- flags |= AR_NDP_TIMER_EN;
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
REG_WRITE(ah, AR_NEXT_TBTT_TIMER, next_beacon);
@@ -2287,7 +2244,6 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
REG_WRITE(ah, AR_BEACON_PERIOD, beacon_period);
REG_WRITE(ah, AR_DMA_BEACON_PERIOD, beacon_period);
REG_WRITE(ah, AR_SWBA_PERIOD, beacon_period);
- REG_WRITE(ah, AR_NDP_PERIOD, beacon_period);
@@ -2304,12 +2260,9 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
ENABLE_REGWRITE_BUFFER(ah);
- REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt));
- REG_WRITE(ah, AR_BEACON_PERIOD,
- TU_TO_USEC(bs->bs_intval));
- REG_WRITE(ah, AR_DMA_BEACON_PERIOD,
+ REG_WRITE(ah, AR_NEXT_TBTT_TIMER, bs->bs_nexttbtt);
+ REG_WRITE(ah, AR_BEACON_PERIOD, bs->bs_intval);
+ REG_WRITE(ah, AR_DMA_BEACON_PERIOD, bs->bs_intval);
@@ -2337,9 +2290,8 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
- REG_WRITE(ah, AR_NEXT_DTIM,
- TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP));
- REG_WRITE(ah, AR_NEXT_TIM, TU_TO_USEC(nextTbtt - SLEEP_SLOP));
+ REG_WRITE(ah, AR_NEXT_DTIM, bs->bs_nextdtim - SLEEP_SLOP);
+ REG_WRITE(ah, AR_NEXT_TIM, nextTbtt - SLEEP_SLOP);
REG_WRITE(ah, AR_SLEEP1,
SM((CAB_TIMEOUT_VAL << 3), AR_SLEEP1_CAB_TIMEOUT)
@@ -2353,8 +2305,8 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
REG_WRITE(ah, AR_SLEEP2,
SM(beacontimeout, AR_SLEEP2_BEACON_TIMEOUT));
- REG_WRITE(ah, AR_TIM_PERIOD, TU_TO_USEC(beaconintval));
- REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod));
+ REG_WRITE(ah, AR_TIM_PERIOD, beaconintval);
+ REG_WRITE(ah, AR_DTIM_PERIOD, dtimperiod);
@@ -2990,20 +2942,6 @@ static const struct ath_gen_timer_configuration gen_tmr_configuration[] =
/* HW generic timer primitives */
-/* compute and clear index of rightmost 1 */
-static u32 rightmost_index(struct ath_gen_timer_table *timer_table, u32 *mask)
- u32 b;
- b = *mask;
- b &= (0-b);
- *mask &= ~b;
- b *= debruijn32;
- b >>= 27;
- return timer_table->gen_timer_index[b];
u32 ath9k_hw_gettsf32(struct ath_hw *ah)
return REG_READ(ah, AR_TSF_L32);
@@ -3019,6 +2957,10 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
struct ath_gen_timer *timer;
+ if ((timer_index < AR_FIRST_NDP_TIMER) ||
+ (timer_index >= ATH_MAX_GEN_TIMER))
+ return NULL;
timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL);
if (timer == NULL)
return NULL;
@@ -3036,23 +2978,13 @@ EXPORT_SYMBOL(ath_gen_timer_alloc);
void ath9k_hw_gen_timer_start(struct ath_hw *ah,
struct ath_gen_timer *timer,
+ u32 timer_next,
u32 timer_period)
- u32 tsf, timer_next;
- BUG_ON(!timer_period);
- set_bit(timer->index, &timer_table->timer_mask.timer_bits);
- tsf = ath9k_hw_gettsf32(ah);
+ u32 mask = 0;
- timer_next = tsf + trig_timeout;
- ath_dbg(ath9k_hw_common(ah), BTCOEX,
- "current tsf %x period %x timer_next %x\n",
- tsf, timer_period, timer_next);
+ timer_table->timer_mask |= BIT(timer->index);
* Program generic timer registers
@@ -3078,10 +3010,19 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah,
(1 << timer->index));
- /* Enable both trigger and thresh interrupt masks */
- REG_SET_BIT(ah, AR_IMR_S5,
- (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) |
- SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG)));
+ if (timer->trigger)
+ mask |= SM(AR_GENTMR_BIT(timer->index),
+ AR_IMR_S5_GENTIMER_TRIG);
+ if (timer->overflow)
+ AR_IMR_S5_GENTIMER_THRESH);
+ REG_SET_BIT(ah, AR_IMR_S5, mask);
+ if ((ah->imask & ATH9K_INT_GENTIMER) == 0) {
+ ah->imask |= ATH9K_INT_GENTIMER;
+ ath9k_hw_set_interrupts(ah);
EXPORT_SYMBOL(ath9k_hw_gen_timer_start);
@@ -3089,11 +3030,6 @@ void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
- if ((timer->index < AR_FIRST_NDP_TIMER) ||
- (timer->index >= ATH_MAX_GEN_TIMER)) {
/* Clear generic timer enable bits. */
REG_CLR_BIT(ah, gen_tmr_configuration[timer->index].mode_addr,
gen_tmr_configuration[timer->index].mode_mask);
@@ -3113,7 +3049,12 @@ void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
(SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) |
SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG)));
- clear_bit(timer->index, &timer_table->timer_mask.timer_bits);
+ timer_table->timer_mask &= ~BIT(timer->index);
+ if (timer_table->timer_mask == 0) {
+ ah->imask &= ~ATH9K_INT_GENTIMER;
EXPORT_SYMBOL(ath9k_hw_gen_timer_stop);
@@ -3134,32 +3075,32 @@ void ath_gen_timer_isr(struct ath_hw *ah)
- u32 trigger_mask, thresh_mask, index;
+ unsigned long trigger_mask, thresh_mask;
+ unsigned int index;
/* get hardware generic timer interrupt status */
trigger_mask = ah->intr_gen_timer_trigger;
thresh_mask = ah->intr_gen_timer_thresh;
- trigger_mask &= timer_table->timer_mask.val;
- thresh_mask &= timer_table->timer_mask.val;
+ trigger_mask &= timer_table->timer_mask;
+ thresh_mask &= timer_table->timer_mask;
trigger_mask &= ~thresh_mask;
- while (thresh_mask) {
- index = rightmost_index(timer_table, &thresh_mask);
+ for_each_set_bit(index, &thresh_mask, ARRAY_SIZE(timer_table->timers)) {
timer = timer_table->timers[index];
- BUG_ON(!timer);
- ath_dbg(common, BTCOEX, "TSF overflow for Gen timer %d\n",
- index);
+ if (!timer)
+ if (!timer->overflow)
timer->overflow(timer->arg);
- while (trigger_mask) {
- index = rightmost_index(timer_table, &trigger_mask);
+ for_each_set_bit(index, &trigger_mask, ARRAY_SIZE(timer_table->timers)) {
- ath_dbg(common, BTCOEX,
- "Gen timer[%d] trigger\n", index);
+ if (!timer->trigger)
timer->trigger(timer->arg);
@@ -168,7 +168,7 @@
#define CAB_TIMEOUT_VAL 10
#define BEACON_TIMEOUT_VAL 10
#define MIN_BEACON_TIMEOUT_VAL 1
-#define SLEEP_SLOP 3
+#define SLEEP_SLOP TU_TO_USEC(3)
#define INIT_CONFIG_STATUS 0x00000000
#define INIT_RSSI_THR 0x00000700
@@ -280,10 +280,8 @@ struct ath9k_hw_capabilities {
struct ath9k_ops_config {
int dma_beacon_response_time;
int sw_beacon_response_time;
- int additional_swba_backoff;
int ack_6mb;
u32 cwm_ignore_extcca;
- u8 pcie_clock_req;
u32 pcie_waen;
u8 analog_shiftreg;
u32 ofdm_trig_low;
@@ -294,18 +292,11 @@ struct ath9k_ops_config {
int serialize_regmode;
bool rx_intr_mitigation;
bool tx_intr_mitigation;
-#define SPUR_DISABLE 0
-#define SPUR_ENABLE_IOCTL 1
-#define SPUR_ENABLE_EEPROM 2
-#define AR_SPUR_5413_1 1640
-#define AR_SPUR_5413_2 1200
#define AR_NO_SPUR 0x8000
#define AR_BASE_FREQ_2GHZ 2300
#define AR_BASE_FREQ_5GHZ 4900
#define AR_SPUR_FEEQ_BOUND_HT40 19
#define AR_SPUR_FEEQ_BOUND_HT20 10
- int spurmode;
- u16 spurchans[AR_EEPROM_MODAL_SPURS][2];
u8 max_txtrig_level;
u16 ani_poll_interval; /* ANI poll interval in ms */
@@ -460,10 +451,6 @@ struct ath9k_beacon_state {
u32 bs_intval;
#define ATH9K_TSFOOR_THRESHOLD 0x00004240 /* 16k us */
u32 bs_dtimperiod;
- u16 bs_cfpperiod;
- u16 bs_cfpmaxduration;
- u32 bs_cfpnext;
- u16 bs_timoffset;
u16 bs_bmissthreshold;
u32 bs_sleepduration;
u32 bs_tsfoor_threshold;
@@ -499,12 +486,6 @@ struct ath9k_hw_version {
#define AR_GENTMR_BIT(_index) (1 << (_index))
- * Using de Bruijin sequence to look up 1's index in a 32 bit number
- * debruijn32 = 0000 0111 0111 1100 1011 0101 0011 0001
-#define debruijn32 0x077CB531U
struct ath_gen_timer_configuration {
u32 next_addr;
u32 period_addr;
@@ -520,12 +501,8 @@ struct ath_gen_timer {
struct ath_gen_timer_table {
- u32 gen_timer_index[32];
struct ath_gen_timer *timers[ATH_MAX_GEN_TIMER];
- union {
- unsigned long timer_bits;
- u16 val;
- } timer_mask;
+ u16 timer_mask;
struct ath_hw_antcomb_conf {
@@ -690,7 +667,8 @@ struct ath_hw_ops {
struct ath9k_channel *chan,
u8 rxchainmask,
bool longcal);
- bool (*get_isr)(struct ath_hw *ah, enum ath9k_int *masked);
+ bool (*get_isr)(struct ath_hw *ah, enum ath9k_int *masked,
+ u32 *sync_cause_p);
void (*set_txdesc)(struct ath_hw *ah, void *ds,
struct ath_tx_info *i);
int (*proc_txdesc)(struct ath_hw *ah, void *ds,
@@ -786,7 +764,6 @@ struct ath_hw {
u32 txurn_interrupt_mask;
atomic_t intr_ref_cnt;
bool chip_fullsleep;
- u32 atim_window;
u32 modes_index;
/* Calibration */
@@ -1018,13 +995,6 @@ bool ath9k_hw_check_alive(struct ath_hw *ah);
bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode);
-#ifdef CONFIG_ATH9K_DEBUGFS
-void ath9k_debug_sync_cause(struct ath_common *common, u32 sync_cause);
-#else
-static inline void ath9k_debug_sync_cause(struct ath_common *common,
- u32 sync_cause) {}
-#endif
/* Generic hw timer primitives */
struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
void (*trigger)(void *),
@@ -470,7 +470,6 @@ static int ath9k_init_queues(struct ath_softc *sc)
sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
ath_cabq_update(sc);
sc->tx.uapsdq = ath_txq_setup(sc, ATH9K_TX_QUEUE_UAPSD, 0);
@@ -705,7 +704,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
ah->reg_ops.read = ath9k_ioread32;
ah->reg_ops.write = ath9k_iowrite32;
ah->reg_ops.rmw = ath9k_reg_rmw;
- atomic_set(&ah->intr_ref_cnt, -1);
sc->sc_ah = ah;
pCap = &ah->caps;
@@ -899,7 +897,7 @@ static const struct ieee80211_iface_combination if_comb[] = {
-void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
+static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
@@ -481,8 +481,7 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
| AR_Q_MISC_CBR_INCR_DIS0);
value = (qi->tqi_readyTime -
(ah->config.sw_beacon_response_time -
- ah->config.dma_beacon_response_time) -
- ah->config.additional_swba_backoff) * 1024;
+ ah->config.dma_beacon_response_time)) * 1024;
REG_WRITE(ah, AR_QRDYTIMECFG(q),
value | AR_Q_RDYTIMECFG_EN);
REG_SET_BIT(ah, AR_DMISC(q),
@@ -550,25 +549,25 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
rs->rs_rssi = ATH9K_RSSI_BAD;
- rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD;
- rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD;
- rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD;
- rs->rs_rssi_ext0 = ATH9K_RSSI_BAD;
- rs->rs_rssi_ext1 = ATH9K_RSSI_BAD;
- rs->rs_rssi_ext2 = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ctl[0] = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ctl[1] = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ctl[2] = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ext[0] = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ext[1] = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ext[2] = ATH9K_RSSI_BAD;
} else {
rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
- rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
+ rs->rs_rssi_ctl[0] = MS(ads.ds_rxstatus0,
AR_RxRSSIAnt00);
- rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
+ rs->rs_rssi_ctl[1] = MS(ads.ds_rxstatus0,
AR_RxRSSIAnt01);
- rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
+ rs->rs_rssi_ctl[2] = MS(ads.ds_rxstatus0,
AR_RxRSSIAnt02);
- rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4,
+ rs->rs_rssi_ext[0] = MS(ads.ds_rxstatus4,
AR_RxRSSIAnt10);
- rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4,
+ rs->rs_rssi_ext[1] = MS(ads.ds_rxstatus4,
AR_RxRSSIAnt11);
- rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4,
+ rs->rs_rssi_ext[2] = MS(ads.ds_rxstatus4,
AR_RxRSSIAnt12);
if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
@@ -133,12 +133,8 @@ struct ath_rx_status {
u8 rs_rate;
u8 rs_antenna;
u8 rs_more;
- int8_t rs_rssi_ctl0;
- int8_t rs_rssi_ctl1;
- int8_t rs_rssi_ctl2;
- int8_t rs_rssi_ext0;
- int8_t rs_rssi_ext1;
- int8_t rs_rssi_ext2;
+ int8_t rs_rssi_ctl[3];
+ int8_t rs_rssi_ext[3];
u8 rs_isaggr;
u8 rs_firstaggr;
u8 rs_moreaggr;
@@ -508,6 +508,9 @@ void ath9k_tasklet(unsigned long data)
wake_up(&sc->tx_wait);
+ if (status & ATH9K_INT_GENTIMER)
+ ath_gen_timer_isr(sc->sc_ah);
ath9k_btcoex_handle_interrupt(sc, status);
/* re-enable hardware interrupt */
@@ -538,6 +541,7 @@ irqreturn_t ath_isr(int irq, void *dev)
enum ath9k_int status;
+ u32 sync_cause;
bool sched = false;
@@ -564,7 +568,8 @@ irqreturn_t ath_isr(int irq, void *dev)
* bits we haven't explicitly enabled so we mask the
* value to insure we only process bits we requested.
- ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
+ ath9k_hw_getisr(ah, &status, &sync_cause); /* NB: clears ISR too */
+ ath9k_debug_sync_cause(sc, sync_cause);
status &= ah->imask; /* discard unasked-for bits */
@@ -757,6 +762,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
ath9k_cmn_init_crypto(sc->sc_ah);
+ ath9k_hw_reset_tsf(ah);
spin_unlock_bh(&sc->sc_pcu_lock);
mutex_unlock(&sc->mutex);
@@ -1657,13 +1664,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
if ((changed & BSS_CHANGED_BEACON_ENABLED) ||
- (changed & BSS_CHANGED_BEACON_INT)) {
- if (ah->opmode == NL80211_IFTYPE_AP &&
- bss_conf->enable_beacon)
- ath9k_set_tsfadjust(sc, vif);
- if (ath9k_allow_beacon_config(sc, vif))
- ath9k_beacon_config(sc, vif, changed);
+ (changed & BSS_CHANGED_BEACON_INT))
+ ath9k_beacon_config(sc, vif, changed);
if (changed & BSS_CHANGED_ERP_SLOT) {
if (bss_conf->use_short_slot)
@@ -200,7 +200,7 @@ skip_tuning:
if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE)
btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE;
- btcoex->btcoex_no_stomp = btcoex->btcoex_period * 1000 *
+ btcoex->btcoex_no_stomp = btcoex->btcoex_period *
(100 - btcoex->duty_cycle) / 100;
ath9k_hw_btcoex_enable(sc->sc_ah);
@@ -15,7 +15,6 @@
#include <linux/dma-mapping.h>
#include "ar9003_mac.h"
@@ -906,6 +905,7 @@ static void ath9k_process_rssi(struct ath_common *common,
struct ath_hw *ah = common->ah;
int last_rssi;
int rssi = rx_stats->rs_rssi;
+ int i, j;
* RSSI is not available for subframes in an A-MPDU.
@@ -924,6 +924,20 @@ static void ath9k_process_rssi(struct ath_common *common,
+ for (i = 0, j = 0; i < ARRAY_SIZE(rx_stats->rs_rssi_ctl); i++) {
+ s8 rssi;
+ if (!(ah->rxchainmask & BIT(i)))
+ rssi = rx_stats->rs_rssi_ctl[i];
+ if (rssi != ATH9K_RSSI_BAD) {
+ rxs->chains |= BIT(j);
+ rxs->chain_signal[j] = ah->noise + rssi;
+ j++;
* Update Beacon RSSI, this is used by ANI.
@@ -960,186 +974,6 @@ static void ath9k_process_tsf(struct ath_rx_status *rs,
rxs->mactime += 0x100000000ULL;
-static s8 fix_rssi_inv_only(u8 rssi_val)
- if (rssi_val == 128)
- rssi_val = 0;
- return (s8) rssi_val;
-/* returns 1 if this was a spectral frame, even if not handled. */
-static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
- struct ath_rx_status *rs, u64 tsf)
- u8 num_bins, *bins, *vdata = (u8 *)hdr;
- struct fft_sample_ht20 fft_sample_20;
- struct fft_sample_ht20_40 fft_sample_40;
- struct fft_sample_tlv *tlv;
- struct ath_radar_info *radar_info;
- int len = rs->rs_datalen;
- int dc_pos;
- u16 fft_len, length, freq = ah->curchan->chan->center_freq;
- enum nl80211_channel_type chan_type;
- /* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
- * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
- * yet, but this is supposed to be possible as well.
- if (rs->rs_phyerr != ATH9K_PHYERR_RADAR &&
- rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT &&
- rs->rs_phyerr != ATH9K_PHYERR_SPECTRAL)
- /* check if spectral scan bit is set. This does not have to be checked
- * if received through a SPECTRAL phy error, but shouldn't hurt.
- radar_info = ((struct ath_radar_info *)&vdata[len]) - 1;
- if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
- chan_type = cfg80211_get_chandef_type(&sc->hw->conf.chandef);
- if ((chan_type == NL80211_CHAN_HT40MINUS) ||
- (chan_type == NL80211_CHAN_HT40PLUS)) {
- fft_len = SPECTRAL_HT20_40_TOTAL_DATA_LEN;
- num_bins = SPECTRAL_HT20_40_NUM_BINS;
- bins = (u8 *)fft_sample_40.data;
- fft_len = SPECTRAL_HT20_TOTAL_DATA_LEN;
- num_bins = SPECTRAL_HT20_NUM_BINS;
- bins = (u8 *)fft_sample_20.data;
- /* Variation in the data length is possible and will be fixed later */
- if ((len > fft_len + 2) || (len < fft_len - 1))
- return 1;
- switch (len - fft_len) {
- case 0:
- /* length correct, nothing to do. */
- memcpy(bins, vdata, num_bins);
- case -1:
- /* first byte missing, duplicate it. */
- memcpy(&bins[1], vdata, num_bins - 1);
- bins[0] = vdata[0];
- case 2:
- /* MAC added 2 extra bytes at bin 30 and 32, remove them. */
- memcpy(bins, vdata, 30);
- bins[30] = vdata[31];
- memcpy(&bins[31], &vdata[33], num_bins - 31);
- case 1:
- /* MAC added 2 extra bytes AND first byte is missing. */
- memcpy(&bins[1], vdata, 30);
- bins[31] = vdata[31];
- memcpy(&bins[32], &vdata[33], num_bins - 32);
- default:
- /* DC value (value in the middle) is the blind spot of the spectral
- * sample and invalid, interpolate it.
- dc_pos = num_bins / 2;
- bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
- s8 lower_rssi, upper_rssi;
- s16 ext_nf;
- u8 lower_max_index, upper_max_index;
- u8 lower_bitmap_w, upper_bitmap_w;
- u16 lower_mag, upper_mag;
- struct ath9k_hw_cal_data *caldata = ah->caldata;
- struct ath_ht20_40_mag_info *mag_info;
- if (caldata)
- ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
- caldata->nfCalHist[3].privNF);
- ext_nf = ATH_DEFAULT_NOISE_FLOOR;
- length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
- fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
- fft_sample_40.tlv.length = __cpu_to_be16(length);
- fft_sample_40.freq = __cpu_to_be16(freq);
- fft_sample_40.channel_type = chan_type;
- if (chan_type == NL80211_CHAN_HT40PLUS) {
- lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
- upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext0);
- fft_sample_40.lower_noise = ah->noise;
- fft_sample_40.upper_noise = ext_nf;
- lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext0);
- upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
- fft_sample_40.lower_noise = ext_nf;
- fft_sample_40.upper_noise = ah->noise;
- fft_sample_40.lower_rssi = lower_rssi;
- fft_sample_40.upper_rssi = upper_rssi;
- mag_info = ((struct ath_ht20_40_mag_info *)radar_info) - 1;
- lower_mag = spectral_max_magnitude(mag_info->lower_bins);
- upper_mag = spectral_max_magnitude(mag_info->upper_bins);
- fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
- fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
- lower_max_index = spectral_max_index(mag_info->lower_bins);
- upper_max_index = spectral_max_index(mag_info->upper_bins);
- fft_sample_40.lower_max_index = lower_max_index;
- fft_sample_40.upper_max_index = upper_max_index;
- lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
- upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
- fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
- fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
- fft_sample_40.max_exp = mag_info->max_exp & 0xf;
- fft_sample_40.tsf = __cpu_to_be64(tsf);
- tlv = (struct fft_sample_tlv *)&fft_sample_40;
- u8 max_index, bitmap_w;
- u16 magnitude;
- struct ath_ht20_mag_info *mag_info;
- length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
- fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
- fft_sample_20.tlv.length = __cpu_to_be16(length);
- fft_sample_20.freq = __cpu_to_be16(freq);
- fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
- fft_sample_20.noise = ah->noise;
- mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
- magnitude = spectral_max_magnitude(mag_info->all_bins);
- fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
- max_index = spectral_max_index(mag_info->all_bins);
- fft_sample_20.max_index = max_index;
- bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
- fft_sample_20.bitmap_weight = bitmap_w;
- fft_sample_20.max_exp = mag_info->max_exp & 0xf;
- fft_sample_20.tsf = __cpu_to_be64(tsf);
- tlv = (struct fft_sample_tlv *)&fft_sample_20;
- ath_debug_send_fft_sample(sc, tlv);
static bool ath9k_is_mybeacon(struct ath_softc *sc, struct ieee80211_hdr *hdr)
@@ -0,0 +1,543 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+#include <linux/relay.h>
+#include "ath9k.h"
+static s8 fix_rssi_inv_only(u8 rssi_val)
+ if (rssi_val == 128)
+ rssi_val = 0;
+ return (s8) rssi_val;
+static void ath_debug_send_fft_sample(struct ath_softc *sc,
+ struct fft_sample_tlv *fft_sample_tlv)
+ int length;
+ if (!sc->rfs_chan_spec_scan)
+ length = __be16_to_cpu(fft_sample_tlv->length) +
+ sizeof(*fft_sample_tlv);
+ relay_write(sc->rfs_chan_spec_scan, fft_sample_tlv, length);
+/* returns 1 if this was a spectral frame, even if not handled. */
+int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
+ struct ath_rx_status *rs, u64 tsf)
+ u8 num_bins, *bins, *vdata = (u8 *)hdr;
+ struct fft_sample_ht20 fft_sample_20;
+ struct fft_sample_ht20_40 fft_sample_40;
+ struct fft_sample_tlv *tlv;
+ struct ath_radar_info *radar_info;
+ int len = rs->rs_datalen;
+ int dc_pos;
+ u16 fft_len, length, freq = ah->curchan->chan->center_freq;
+ enum nl80211_channel_type chan_type;
+ /* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
+ * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
+ * yet, but this is supposed to be possible as well.
+ if (rs->rs_phyerr != ATH9K_PHYERR_RADAR &&
+ rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT &&
+ rs->rs_phyerr != ATH9K_PHYERR_SPECTRAL)
+ return 0;
+ /* check if spectral scan bit is set. This does not have to be checked
+ * if received through a SPECTRAL phy error, but shouldn't hurt.
+ radar_info = ((struct ath_radar_info *)&vdata[len]) - 1;
+ if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
+ chan_type = cfg80211_get_chandef_type(&sc->hw->conf.chandef);
+ if ((chan_type == NL80211_CHAN_HT40MINUS) ||
+ (chan_type == NL80211_CHAN_HT40PLUS)) {
+ fft_len = SPECTRAL_HT20_40_TOTAL_DATA_LEN;
+ num_bins = SPECTRAL_HT20_40_NUM_BINS;
+ bins = (u8 *)fft_sample_40.data;
+ fft_len = SPECTRAL_HT20_TOTAL_DATA_LEN;
+ num_bins = SPECTRAL_HT20_NUM_BINS;
+ bins = (u8 *)fft_sample_20.data;
+ /* Variation in the data length is possible and will be fixed later */
+ if ((len > fft_len + 2) || (len < fft_len - 1))
+ return 1;
+ switch (len - fft_len) {
+ case 0:
+ /* length correct, nothing to do. */
+ memcpy(bins, vdata, num_bins);
+ case -1:
+ /* first byte missing, duplicate it. */
+ memcpy(&bins[1], vdata, num_bins - 1);
+ bins[0] = vdata[0];
+ case 2:
+ /* MAC added 2 extra bytes at bin 30 and 32, remove them. */
+ memcpy(bins, vdata, 30);
+ bins[30] = vdata[31];
+ memcpy(&bins[31], &vdata[33], num_bins - 31);
+ case 1:
+ /* MAC added 2 extra bytes AND first byte is missing. */
+ memcpy(&bins[1], vdata, 30);
+ bins[31] = vdata[31];
+ memcpy(&bins[32], &vdata[33], num_bins - 32);
+ default:
+ /* DC value (value in the middle) is the blind spot of the spectral
+ * sample and invalid, interpolate it.
+ dc_pos = num_bins / 2;
+ bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
+ s8 lower_rssi, upper_rssi;
+ s16 ext_nf;
+ u8 lower_max_index, upper_max_index;
+ u8 lower_bitmap_w, upper_bitmap_w;
+ u16 lower_mag, upper_mag;
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ struct ath_ht20_40_mag_info *mag_info;
+ if (caldata)
+ ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
+ caldata->nfCalHist[3].privNF);
+ ext_nf = ATH_DEFAULT_NOISE_FLOOR;
+ length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
+ fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
+ fft_sample_40.tlv.length = __cpu_to_be16(length);
+ fft_sample_40.freq = __cpu_to_be16(freq);
+ fft_sample_40.channel_type = chan_type;
+ if (chan_type == NL80211_CHAN_HT40PLUS) {
+ lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+ upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
+ fft_sample_40.lower_noise = ah->noise;
+ fft_sample_40.upper_noise = ext_nf;
+ lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
+ upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+ fft_sample_40.lower_noise = ext_nf;
+ fft_sample_40.upper_noise = ah->noise;
+ fft_sample_40.lower_rssi = lower_rssi;
+ fft_sample_40.upper_rssi = upper_rssi;
+ mag_info = ((struct ath_ht20_40_mag_info *)radar_info) - 1;
+ lower_mag = spectral_max_magnitude(mag_info->lower_bins);
+ upper_mag = spectral_max_magnitude(mag_info->upper_bins);
+ fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
+ fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
+ lower_max_index = spectral_max_index(mag_info->lower_bins);
+ upper_max_index = spectral_max_index(mag_info->upper_bins);
+ fft_sample_40.lower_max_index = lower_max_index;
+ fft_sample_40.upper_max_index = upper_max_index;
+ lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
+ upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
+ fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
+ fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
+ fft_sample_40.max_exp = mag_info->max_exp & 0xf;
+ fft_sample_40.tsf = __cpu_to_be64(tsf);
+ tlv = (struct fft_sample_tlv *)&fft_sample_40;
+ u8 max_index, bitmap_w;
+ u16 magnitude;
+ struct ath_ht20_mag_info *mag_info;
+ length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
+ fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
+ fft_sample_20.tlv.length = __cpu_to_be16(length);
+ fft_sample_20.freq = __cpu_to_be16(freq);
+ fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+ fft_sample_20.noise = ah->noise;
+ mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
+ magnitude = spectral_max_magnitude(mag_info->all_bins);
+ fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
+ max_index = spectral_max_index(mag_info->all_bins);
+ fft_sample_20.max_index = max_index;
+ bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
+ fft_sample_20.bitmap_weight = bitmap_w;
+ fft_sample_20.max_exp = mag_info->max_exp & 0xf;
+ fft_sample_20.tsf = __cpu_to_be64(tsf);
+ tlv = (struct fft_sample_tlv *)&fft_sample_20;
+ ath_debug_send_fft_sample(sc, tlv);
+/*********************/
+/* spectral_scan_ctl */
+static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+ struct ath_softc *sc = file->private_data;
+ char *mode = "";
+ unsigned int len;
+ switch (sc->spectral_mode) {
+ case SPECTRAL_DISABLED:
+ mode = "disable";
+ case SPECTRAL_BACKGROUND:
+ mode = "background";
+ case SPECTRAL_CHANSCAN:
+ mode = "chanscan";
+ case SPECTRAL_MANUAL:
+ mode = "manual";
+ len = strlen(mode);
+ return simple_read_from_buffer(user_buf, count, ppos, mode, len);
+static ssize_t write_file_spec_scan_ctl(struct file *file,
+ const char __user *user_buf,
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ char buf[32];
+ ssize_t len;
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return -EOPNOTSUPP;
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+ if (strncmp("trigger", buf, 7) == 0) {
+ ath9k_spectral_scan_trigger(sc->hw);
+ } else if (strncmp("background", buf, 9) == 0) {
+ ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND);
+ ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n");
+ } else if (strncmp("chanscan", buf, 8) == 0) {
+ ath9k_spectral_scan_config(sc->hw, SPECTRAL_CHANSCAN);
+ ath_dbg(common, CONFIG, "spectral scan: channel scan mode enabled\n");
+ } else if (strncmp("manual", buf, 6) == 0) {
+ ath9k_spectral_scan_config(sc->hw, SPECTRAL_MANUAL);
+ ath_dbg(common, CONFIG, "spectral scan: manual mode enabled\n");
+ } else if (strncmp("disable", buf, 7) == 0) {
+ ath9k_spectral_scan_config(sc->hw, SPECTRAL_DISABLED);
+ ath_dbg(common, CONFIG, "spectral scan: disabled\n");
+ return -EINVAL;
+ return count;
+static const struct file_operations fops_spec_scan_ctl = {
+ .read = read_file_spec_scan_ctl,
+ .write = write_file_spec_scan_ctl,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+/*************************/
+/* spectral_short_repeat */
+static ssize_t read_file_spectral_short_repeat(struct file *file,
+ char __user *user_buf,
+ len = sprintf(buf, "%d\n", sc->spec_config.short_repeat);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+static ssize_t write_file_spectral_short_repeat(struct file *file,
+ unsigned long val;
+ if (kstrtoul(buf, 0, &val))
+ if (val < 0 || val > 1)
+ sc->spec_config.short_repeat = val;
+static const struct file_operations fops_spectral_short_repeat = {
+ .read = read_file_spectral_short_repeat,
+ .write = write_file_spectral_short_repeat,
+/******************/
+/* spectral_count */
+static ssize_t read_file_spectral_count(struct file *file,
+ len = sprintf(buf, "%d\n", sc->spec_config.count);
+static ssize_t write_file_spectral_count(struct file *file,
+ if (val < 0 || val > 255)
+ sc->spec_config.count = val;
+static const struct file_operations fops_spectral_count = {
+ .read = read_file_spectral_count,
+ .write = write_file_spectral_count,
+/*******************/
+/* spectral_period */
+static ssize_t read_file_spectral_period(struct file *file,
+ len = sprintf(buf, "%d\n", sc->spec_config.period);
+static ssize_t write_file_spectral_period(struct file *file,
+ sc->spec_config.period = val;
+static const struct file_operations fops_spectral_period = {
+ .read = read_file_spectral_period,
+ .write = write_file_spectral_period,
+/***********************/
+/* spectral_fft_period */
+static ssize_t read_file_spectral_fft_period(struct file *file,
+ len = sprintf(buf, "%d\n", sc->spec_config.fft_period);
+static ssize_t write_file_spectral_fft_period(struct file *file,
+ if (val < 0 || val > 15)
+ sc->spec_config.fft_period = val;
+static const struct file_operations fops_spectral_fft_period = {
+ .read = read_file_spectral_fft_period,
+ .write = write_file_spectral_fft_period,
+/* Relay interface */
+static struct dentry *create_buf_file_handler(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+ struct dentry *buf_file;
+ buf_file = debugfs_create_file(filename, mode, parent, buf,
+ &relay_file_operations);
+ *is_global = 1;
+ return buf_file;
+static int remove_buf_file_handler(struct dentry *dentry)
+ debugfs_remove(dentry);
+struct rchan_callbacks rfs_spec_scan_cb = {
+ .create_buf_file = create_buf_file_handler,
+ .remove_buf_file = remove_buf_file_handler,
+/* Debug Init/Deinit */
+void ath9k_spectral_deinit_debug(struct ath_softc *sc)
+ if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) {
+ relay_close(sc->rfs_chan_spec_scan);
+ sc->rfs_chan_spec_scan = NULL;
+void ath9k_spectral_init_debug(struct ath_softc *sc)
+ sc->rfs_chan_spec_scan = relay_open("spectral_scan",
+ sc->debug.debugfs_phy,
+ 1024, 256, &rfs_spec_scan_cb,
+ NULL);
+ debugfs_create_file("spectral_scan_ctl",
+ S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_spec_scan_ctl);
+ debugfs_create_file("spectral_short_repeat",
+ &fops_spectral_short_repeat);
+ debugfs_create_file("spectral_count",
+ &fops_spectral_count);
+ debugfs_create_file("spectral_period",
+ &fops_spectral_period);
+ debugfs_create_file("spectral_fft_period",
+ &fops_spectral_fft_period);
@@ -0,0 +1,212 @@
+#ifndef SPECTRAL_H
+#define SPECTRAL_H
+/* enum spectral_mode:
+ * @SPECTRAL_DISABLED: spectral mode is disabled
+ * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
+ * something else.
+ * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
+ * is performed manually.
+ * @SPECTRAL_CHANSCAN: Like manual, but also triggered when changing channels
+ * during a channel scan.
+enum spectral_mode {
+ SPECTRAL_DISABLED = 0,
+ SPECTRAL_BACKGROUND,
+ SPECTRAL_MANUAL,
+ SPECTRAL_CHANSCAN,
+#define SPECTRAL_SCAN_BITMASK 0x10
+/* Radar info packet format, used for DFS and spectral formats. */
+struct ath_radar_info {
+ u8 pulse_length_pri;
+ u8 pulse_length_ext;
+ u8 pulse_bw_info;
+} __packed;
+/* The HT20 spectral data has 4 bytes of additional information at it's end.
+ * [7:0]: all bins {max_magnitude[1:0], bitmap_weight[5:0]}
+ * [7:0]: all bins max_magnitude[9:2]
+ * [7:0]: all bins {max_index[5:0], max_magnitude[11:10]}
+ * [3:0]: max_exp (shift amount to size max bin to 8-bit unsigned)
+struct ath_ht20_mag_info {
+ u8 all_bins[3];
+ u8 max_exp;
+#define SPECTRAL_HT20_NUM_BINS 56
+/* WARNING: don't actually use this struct! MAC may vary the amount of
+ * data by -1/+2. This struct is for reference only.
+struct ath_ht20_fft_packet {
+ u8 data[SPECTRAL_HT20_NUM_BINS];
+ struct ath_ht20_mag_info mag_info;
+ struct ath_radar_info radar_info;
+#define SPECTRAL_HT20_TOTAL_DATA_LEN (sizeof(struct ath_ht20_fft_packet))
+/* Dynamic 20/40 mode:
+ * [7:0]: lower bins {max_magnitude[1:0], bitmap_weight[5:0]}
+ * [7:0]: lower bins max_magnitude[9:2]
+ * [7:0]: lower bins {max_index[5:0], max_magnitude[11:10]}
+ * [7:0]: upper bins {max_magnitude[1:0], bitmap_weight[5:0]}
+ * [7:0]: upper bins max_magnitude[9:2]
+ * [7:0]: upper bins {max_index[5:0], max_magnitude[11:10]}
+struct ath_ht20_40_mag_info {
+ u8 lower_bins[3];
+ u8 upper_bins[3];
+#define SPECTRAL_HT20_40_NUM_BINS 128
+ * data. This struct is for reference only.
+struct ath_ht20_40_fft_packet {
+ u8 data[SPECTRAL_HT20_40_NUM_BINS];
+ struct ath_ht20_40_mag_info mag_info;
+#define SPECTRAL_HT20_40_TOTAL_DATA_LEN (sizeof(struct ath_ht20_40_fft_packet))
+/* grabs the max magnitude from the all/upper/lower bins */
+static inline u16 spectral_max_magnitude(u8 *bins)
+ return (bins[0] & 0xc0) >> 6 |
+ (bins[1] & 0xff) << 2 |
+ (bins[2] & 0x03) << 10;
+/* return the max magnitude from the all/upper/lower bins */
+static inline u8 spectral_max_index(u8 *bins)
+ s8 m = (bins[2] & 0xfc) >> 2;
+ /* TODO: this still doesn't always report the right values ... */
+ if (m > 32)
+ m |= 0xe0;
+ m &= ~0xe0;
+ return m + 29;
+/* return the bitmap weight from the all/upper/lower bins */
+static inline u8 spectral_bitmap_weight(u8 *bins)
+ return bins[0] & 0x3f;
+/* FFT sample format given to userspace via debugfs.
+ * Please keep the type/length at the front position and change
+ * other fields after adding another sample type
+ * TODO: this might need rework when switching to nl80211-based
+ * interface.
+enum ath_fft_sample_type {
+ ATH_FFT_SAMPLE_HT20 = 1,
+ ATH_FFT_SAMPLE_HT20_40,
+struct fft_sample_tlv {
+ u8 type; /* see ath_fft_sample */
+ __be16 length;
+ /* type dependent data follows */
+struct fft_sample_ht20 {
+ struct fft_sample_tlv tlv;
+ __be16 freq;
+ s8 noise;
+ __be16 max_magnitude;
+ u8 max_index;
+ u8 bitmap_weight;
+ __be64 tsf;
+struct fft_sample_ht20_40 {
+ u8 channel_type;
+ s8 lower_rssi;
+ s8 upper_rssi;
+ s8 lower_noise;
+ s8 upper_noise;
+ __be16 lower_max_magnitude;
+ __be16 upper_max_magnitude;
+ u8 lower_max_index;
+ u8 upper_max_index;
+ u8 lower_bitmap_weight;
+ u8 upper_bitmap_weight;
+void ath9k_spectral_init_debug(struct ath_softc *sc);
+void ath9k_spectral_deinit_debug(struct ath_softc *sc);
+void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw);
+int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
+ enum spectral_mode spectral_mode);
+#ifdef CONFIG_ATH9K_DEBUGFS
+ struct ath_rx_status *rs, u64 tsf);
+#else
+static inline int ath_process_fft(struct ath_softc *sc,
+ struct ieee80211_hdr *hdr,
+#endif /* CONFIG_ATH9K_DEBUGFS */
+#endif /* SPECTRAL_H */
@@ -48,7 +48,9 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
0x02, 0x23, 0x23, 0xab, 0x63, 0x89, 0x51, 0xb3,
0xe7, 0x8b, 0x72, 0x90, 0x4c, 0xe8, 0xfb, 0xc0};
u32 len = 1200;
+ struct ieee80211_tx_rate *rate;
struct ieee80211_hw *hw = sc->hw;
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *tx_info;
struct sk_buff *skb;
@@ -73,10 +75,16 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
tx_info = IEEE80211_SKB_CB(skb);
memset(tx_info, 0, sizeof(*tx_info));
+ rate = &tx_info->control.rates[0];
tx_info->band = hw->conf.chandef.chan->band;
tx_info->flags = IEEE80211_TX_CTL_NO_ACK;
tx_info->control.vif = sc->tx99_vif;
- tx_info->control.rates[0].count = 1;
+ rate->count = 1;
+ if (ah->curchan && IS_CHAN_HT(ah->curchan)) {
+ rate->flags |= IEEE80211_TX_RC_MCS;
+ if (IS_CHAN_HT40(ah->curchan))
+ rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
memcpy(skb->data + sizeof(*hdr), PN9Data, sizeof(PN9Data));
@@ -174,14 +174,7 @@ static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
static struct ath_atx_tid *
ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
- struct ieee80211_hdr *hdr;
- u8 tidno = 0;
- hdr = (struct ieee80211_hdr *) skb->data;
- if (ieee80211_is_data_qos(hdr->frame_control))
- tidno = ieee80211_get_qos_ctl(hdr)[0];
- tidno &= IEEE80211_QOS_CTL_TID_MASK;
+ u8 tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
return ATH_AN_2_TID(an, tidno);
@@ -2060,22 +2060,28 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
case WCN36XX_HAL_OTA_TX_COMPL_IND:
case WCN36XX_HAL_MISSED_BEACON_IND:
case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
- mutex_lock(&wcn->hal_ind_mutex);
msg_ind = kmalloc(sizeof(*msg_ind), GFP_KERNEL);
- if (msg_ind) {
- msg_ind->msg_len = len;
- msg_ind->msg = kmalloc(len, GFP_KERNEL);
- memcpy(msg_ind->msg, buf, len);
- list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
- queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
- wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n");
+ if (!msg_ind)
+ goto nomem;
+ msg_ind->msg_len = len;
+ msg_ind->msg = kmalloc(len, GFP_KERNEL);
+ if (!msg_ind->msg) {
+ kfree(msg_ind);
+nomem:
+ * FIXME: Do something smarter then just
+ * printing an error.
+ wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n",
+ msg_header->msg_type);
+ memcpy(msg_ind->msg, buf, len);
+ mutex_lock(&wcn->hal_ind_mutex);
+ list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
+ queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
mutex_unlock(&wcn->hal_ind_mutex);
- if (msg_ind)
- /* FIXME: Do something smarter then just printing an error. */
- wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n",
- msg_header->msg_type);
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n");
break;
default:
wcn36xx_err("SMD_EVENT (%d) not supported\n",
@@ -28,8 +28,8 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with Atmel wireless lan drivers; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ along with Atmel wireless lan drivers; if not, see
+ <http://www.gnu.org/licenses/>.
For all queries about this code, please contact the current author,
Simon Kelley <simon@thekelleys.org.uk> and not Atmel Corporation.
@@ -4278,8 +4278,7 @@ static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data)
- along with AtmelMACFW; if not, write to the Free Software
+ along with AtmelMACFW; if not, see <http://www.gnu.org/licenses/>.
****************************************************************************/
/* This firmware should work on the 76C502 RFMD, RFMD_D, and RFMD_E */
@@ -15,8 +15,8 @@
******************************************************************************/
@@ -24,8 +24,8 @@
#include <linux/pci.h>
@@ -36,7 +36,6 @@ brcmfmac-objs += \
brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
dhd_sdio.o \
bcmsdh.o \
- bcmsdh_sdmmc.o \
sdio_chip.o
brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
usb.o
@@ -101,35 +101,41 @@ struct brcmf_proto_bcdc_header {
* plus any space that might be needed
* for bus alignment padding.
-#define ROUND_UP_MARGIN 2048 /* Biggest bus block size possible for
- * round off at the end of buffer
- * Currently is SDIO
struct brcmf_bcdc {
u16 reqid;
u8 bus_header[BUS_HEADER_LEN];
struct brcmf_proto_bcdc_dcmd msg;
- unsigned char buf[BRCMF_DCMD_MAXLEN + ROUND_UP_MARGIN];
+ unsigned char buf[BRCMF_DCMD_MAXLEN];
-static int brcmf_proto_bcdc_msg(struct brcmf_pub *drvr)
+static int
+brcmf_proto_bcdc_msg(struct brcmf_pub *drvr, int ifidx, uint cmd, void *buf,
+ uint len, bool set)
struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd;
- int len = le32_to_cpu(bcdc->msg.len) +
- sizeof(struct brcmf_proto_bcdc_dcmd);
+ struct brcmf_proto_bcdc_dcmd *msg = &bcdc->msg;
+ u32 flags;
brcmf_dbg(BCDC, "Enter\n");
- /* NOTE : bcdc->msg.len holds the desired length of the buffer to be
- * returned. Only up to BCDC_MAX_MSG_SIZE of this buffer area
- * is actually sent to the dongle
- if (len > BCDC_MAX_MSG_SIZE)
- len = BCDC_MAX_MSG_SIZE;
+ memset(msg, 0, sizeof(struct brcmf_proto_bcdc_dcmd));
+ msg->cmd = cpu_to_le32(cmd);
+ msg->len = cpu_to_le32(len);
+ flags = (++bcdc->reqid << BCDC_DCMD_ID_SHIFT);
+ if (set)
+ flags |= BCDC_DCMD_SET;
+ flags = (flags & ~BCDC_DCMD_IF_MASK) |
+ (ifidx << BCDC_DCMD_IF_SHIFT);
+ msg->flags = cpu_to_le32(flags);
+ if (buf)
+ memcpy(bcdc->buf, buf, len);
/* Send request */
- return brcmf_bus_txctl(drvr->bus_if, (unsigned char *)&bcdc->msg, len);
+ return brcmf_bus_txctl(drvr->bus_if, (unsigned char *)&bcdc->msg,
+ len + sizeof(struct brcmf_proto_bcdc_dcmd));
static int brcmf_proto_bcdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
@@ -161,19 +167,7 @@ brcmf_proto_bcdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
brcmf_dbg(BCDC, "Enter, cmd %d len %d\n", cmd, len);
- memset(msg, 0, sizeof(struct brcmf_proto_bcdc_dcmd));
- msg->cmd = cpu_to_le32(cmd);
- msg->len = cpu_to_le32(len);
- flags = (++bcdc->reqid << BCDC_DCMD_ID_SHIFT);
- flags = (flags & ~BCDC_DCMD_IF_MASK) |
- (ifidx << BCDC_DCMD_IF_SHIFT);
- msg->flags = cpu_to_le32(flags);
- if (buf)
- memcpy(bcdc->buf, buf, len);
- ret = brcmf_proto_bcdc_msg(drvr);
+ ret = brcmf_proto_bcdc_msg(drvr, ifidx, cmd, buf, len, false);
if (ret < 0) {
brcmf_err("brcmf_proto_bcdc_msg failed w/status %d\n",
ret);
@@ -227,19 +221,7 @@ brcmf_proto_bcdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
- flags = (++bcdc->reqid << BCDC_DCMD_ID_SHIFT) | BCDC_DCMD_SET;
+ ret = brcmf_proto_bcdc_msg(drvr, ifidx, cmd, buf, len, true);
if (ret < 0)
goto done;
@@ -347,6 +329,15 @@ brcmf_proto_bcdc_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
+brcmf_proto_bcdc_txdata(struct brcmf_pub *drvr, int ifidx, u8 offset,
+ struct sk_buff *pktbuf)
+ brcmf_proto_bcdc_hdrpush(drvr, ifidx, offset, pktbuf);
+ return brcmf_bus_txdata(drvr->bus_if, pktbuf);
int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
struct brcmf_bcdc *bcdc;
@@ -361,15 +352,15 @@ int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
goto fail;
- drvr->proto->hdrpush = brcmf_proto_bcdc_hdrpush;
drvr->proto->hdrpull = brcmf_proto_bcdc_hdrpull;
drvr->proto->query_dcmd = brcmf_proto_bcdc_query_dcmd;
drvr->proto->set_dcmd = brcmf_proto_bcdc_set_dcmd;
+ drvr->proto->txdata = brcmf_proto_bcdc_txdata;
drvr->proto->pd = bcdc;
drvr->hdrlen += BCDC_HEADER_LEN + BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES;
drvr->bus_if->maxctl = BRCMF_DCMD_MAXLEN +
- sizeof(struct brcmf_proto_bcdc_dcmd) + ROUND_UP_MARGIN;
+ sizeof(struct brcmf_proto_bcdc_dcmd);
fail:
@@ -23,9 +23,17 @@
#include <linux/completion.h>
#include <linux/scatterlist.h>
#include <linux/mmc/sdio.h>
+#include <linux/mmc/core.h>
#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/platform_device.h>
#include <linux/platform_data/brcmfmac-sdio.h>
+#include <linux/suspend.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <net/cfg80211.h>
#include <defs.h>
#include <brcm_hw_ids.h>
@@ -35,11 +43,21 @@
#include "dhd_bus.h"
#include "dhd_dbg.h"
#include "sdio_host.h"
+#include "sdio_chip.h"
#define SDIOH_API_ACCESS_RETRY_LIMIT 2
+#define SDIO_VENDOR_ID_BROADCOM 0x02d0
-static irqreturn_t brcmf_sdio_oob_irqhandler(int irq, void *dev_id)
+#define DMA_ALIGN_MASK 0x03
+#define SDIO_FUNC1_BLOCKSIZE 64
+#define SDIO_FUNC2_BLOCKSIZE 512
+/* Maximum milliseconds to wait for F2 to come up */
+#define SDIO_WAIT_F2RDY 3000
+static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id)
struct brcmf_bus *bus_if = dev_get_drvdata(dev_id);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
@@ -54,27 +72,46 @@ static irqreturn_t brcmf_sdio_oob_irqhandler(int irq, void *dev_id)
sdiodev->irq_en = false;
- brcmf_sdbrcm_isr(sdiodev->bus);
+ brcmf_sdio_isr(sdiodev->bus);
return IRQ_HANDLED;
-static void brcmf_sdio_ib_irqhandler(struct sdio_func *func)
+static void brcmf_sdiod_ib_irqhandler(struct sdio_func *func)
struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev);
brcmf_dbg(INTR, "IB intr triggered\n");
/* dummy handler for SDIO function 2 interrupt */
-static void brcmf_sdio_dummy_irqhandler(struct sdio_func *func)
+static void brcmf_sdiod_dummy_irqhandler(struct sdio_func *func)
+static bool brcmf_sdiod_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
+ bool is_err = false;
+#ifdef CONFIG_PM_SLEEP
+ is_err = atomic_read(&sdiodev->suspend);
+#endif
+ return is_err;
-int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
+static void brcmf_sdiod_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
+ wait_queue_head_t *wq)
+ int retry = 0;
+ while (atomic_read(&sdiodev->suspend) && retry++ != 30)
+ wait_event_timeout(*wq, false, HZ/100);
+int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
int ret = 0;
u8 data;
@@ -84,7 +121,7 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n",
sdiodev->pdata->oob_irq_nr);
ret = request_irq(sdiodev->pdata->oob_irq_nr,
- brcmf_sdio_oob_irqhandler,
+ brcmf_sdiod_oob_irqhandler,
sdiodev->pdata->oob_irq_flags,
"brcmf_oob_intr",
&sdiodev->func[1]->dev);
@@ -108,36 +145,36 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
sdio_claim_host(sdiodev->func[1]);
/* must configure SDIO_CCCR_IENx to enable irq */
- data = brcmf_sdio_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
+ data = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
- brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
+ brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
/* redirect, configure and enable io for interrupt signal */
data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
if (sdiodev->pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
data |= SDIO_SEPINT_ACT_HI;
- brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
+ brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
sdio_release_host(sdiodev->func[1]);
brcmf_dbg(SDIO, "Entering\n");
- sdio_claim_irq(sdiodev->func[1], brcmf_sdio_ib_irqhandler);
- sdio_claim_irq(sdiodev->func[2], brcmf_sdio_dummy_irqhandler);
+ sdio_claim_irq(sdiodev->func[1], brcmf_sdiod_ib_irqhandler);
+ sdio_claim_irq(sdiodev->func[2], brcmf_sdiod_dummy_irqhandler);
-int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
+int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
- brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
- brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
+ brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
+ brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
if (sdiodev->oob_irq_requested) {
@@ -160,8 +197,117 @@ int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
+static inline int brcmf_sdiod_f0_writeb(struct sdio_func *func,
+ uint regaddr, u8 byte)
+ int err_ret;
+ * Can only directly write to some F0 registers.
+ * Handle CCCR_IENx and CCCR_ABORT command
+ * as a special case.
+ if ((regaddr == SDIO_CCCR_ABORT) ||
+ (regaddr == SDIO_CCCR_IENx))
+ sdio_writeb(func, byte, regaddr, &err_ret);
+ sdio_f0_writeb(func, byte, regaddr, &err_ret);
+ return err_ret;
+static int brcmf_sdiod_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw,
+ uint func, uint regaddr, u8 *byte)
+ brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x\n", rw, func, regaddr);
+ brcmf_sdiod_pm_resume_wait(sdiodev, &sdiodev->request_byte_wait);
+ if (brcmf_sdiod_pm_resume_error(sdiodev))
+ return -EIO;
+ if (rw && func == 0) {
+ /* handle F0 separately */
+ err_ret = brcmf_sdiod_f0_writeb(sdiodev->func[func],
+ regaddr, *byte);
+ if (rw) /* CMD52 Write */
+ sdio_writeb(sdiodev->func[func], *byte, regaddr,
+ &err_ret);
+ else if (func == 0) {
+ *byte = sdio_f0_readb(sdiodev->func[func], regaddr,
+ *byte = sdio_readb(sdiodev->func[func], regaddr,
+ if (err_ret) {
+ * SleepCSR register access can fail when
+ * waking up the device so reduce this noise
+ * in the logs.
+ if (regaddr != SBSDIO_FUNC1_SLEEPCSR)
+ brcmf_err("Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
+ rw ? "write" : "read", func, regaddr, *byte,
+ err_ret);
+ brcmf_dbg(SDIO, "Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
+static int brcmf_sdiod_request_word(struct brcmf_sdio_dev *sdiodev, uint rw,
+ uint func, uint addr, u32 *word,
+ uint nbytes)
+ int err_ret = -EIO;
+ if (func == 0) {
+ brcmf_err("Only CMD52 allowed to F0\n");
+ brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
+ rw, func, addr, nbytes);
+ brcmf_sdiod_pm_resume_wait(sdiodev, &sdiodev->request_word_wait);
+ if (rw) { /* CMD52 Write */
+ if (nbytes == 4)
+ sdio_writel(sdiodev->func[func], *word, addr,
+ else if (nbytes == 2)
+ sdio_writew(sdiodev->func[func], (*word & 0xFFFF),
+ addr, &err_ret);
+ brcmf_err("Invalid nbytes: %d\n", nbytes);
+ } else { /* CMD52 Read */
+ *word = sdio_readl(sdiodev->func[func], addr, &err_ret);
+ *word = sdio_readw(sdiodev->func[func], addr,
+ &err_ret) & 0xFFFF;
+ if (err_ret)
+ brcmf_err("Failed to %s word, Err: 0x%08x\n",
+ rw ? "write" : "read", err_ret);
static int
-brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
+brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
int err = 0, i;
u8 addr[3];
@@ -176,7 +322,7 @@ brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
do {
if (retry)
usleep_range(1000, 2000);
- err = brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE,
+ err = brcmf_sdiod_request_byte(sdiodev, SDIOH_WRITE,
SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW + i,
&addr[i]);
} while (err != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
@@ -192,13 +338,13 @@ brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
-brcmf_sdio_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
+brcmf_sdiod_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
int err = 0;
if (bar0 != sdiodev->sbwad) {
- err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
+ err = brcmf_sdiod_set_sbaddr_window(sdiodev, bar0);
if (err)
return err;
@@ -213,9 +359,8 @@ brcmf_sdio_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
-int
-brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
- void *data, bool write)
+static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
+ void *data, bool write)
u8 func_num, reg_size;
s32 retry = 0;
@@ -237,7 +382,7 @@ brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
func_num = SDIO_FUNC_1;
reg_size = 4;
- ret = brcmf_sdio_addrprep(sdiodev, reg_size, &addr);
+ ret = brcmf_sdiod_addrprep(sdiodev, reg_size, &addr);
if (ret)
@@ -248,10 +393,10 @@ brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
if (retry) /* wait for 1 ms till bus get settled down */
if (reg_size == 1)
- ret = brcmf_sdioh_request_byte(sdiodev, write,
+ ret = brcmf_sdiod_request_byte(sdiodev, write,
func_num, addr, data);
- ret = brcmf_sdioh_request_word(sdiodev, write,
+ ret = brcmf_sdiod_request_word(sdiodev, write,
func_num, addr, data, 4);
} while (ret != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
@@ -262,13 +407,13 @@ done:
-u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
+u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
int retval;
brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
- retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
+ retval = brcmf_sdiod_regrw_helper(sdiodev, addr, &data, false);
brcmf_dbg(SDIO, "data:0x%02x\n", data);
@@ -277,13 +422,13 @@ u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
return data;
-u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
+u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
u32 data;
brcmf_dbg(SDIO, "data:0x%08x\n", data);
@@ -292,37 +437,37 @@ u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
-void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
+void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
u8 data, int *ret)
brcmf_dbg(SDIO, "addr:0x%08x, data:0x%02x\n", addr, data);
- retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
+ retval = brcmf_sdiod_regrw_helper(sdiodev, addr, &data, true);
*ret = retval;
-void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
+void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
u32 data, int *ret)
brcmf_dbg(SDIO, "addr:0x%08x, data:0x%08x\n", addr, data);
-static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
+static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
bool write, u32 addr, struct sk_buff *pkt)
unsigned int req_sz;
- brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
- if (brcmf_pm_resume_error(sdiodev))
+ brcmf_sdiod_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
return -EIO;
/* Single skb use the standard mmc interface */
@@ -345,7 +490,7 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
- * brcmf_sdio_sglist_rw - SDIO interface function for block data access
+ * brcmf_sdiod_sglist_rw - SDIO interface function for block data access
* @sdiodev: brcmfmac sdio device
* @fn: SDIO function number
* @write: direction flag
@@ -356,9 +501,9 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
* stack for block data access. It assumes that the skb passed down by the
* caller has already been padded and aligned.
-static int brcmf_sdio_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
- bool write, u32 addr,
- struct sk_buff_head *pktlist)
+static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
+ bool write, u32 addr,
+ struct sk_buff_head *pktlist)
unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
unsigned int max_req_sz, orig_offset, dst_offset;
@@ -376,8 +521,8 @@ static int brcmf_sdio_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
if (!pktlist->qlen)
return -EINVAL;
target_list = pktlist;
@@ -524,9 +669,7 @@ exit:
-brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes)
+int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
struct sk_buff *mypkt;
int err;
@@ -538,7 +681,7 @@ brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- err = brcmf_sdcard_recv_pkt(sdiodev, addr, fn, flags, mypkt);
+ err = brcmf_sdiod_recv_pkt(sdiodev, mypkt);
if (!err)
memcpy(buf, mypkt->data, nbytes);
@@ -546,50 +689,47 @@ brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff *pkt)
+int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
- uint width;
+ u32 addr = sdiodev->sbwad;
- brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
- fn, addr, pkt->len);
+ brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len);
- width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
- err = brcmf_sdio_addrprep(sdiodev, width, &addr);
+ err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
- err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pkt);
+ err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, pkt);
done:
-int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff_head *pktq, uint totlen)
+int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
+ struct sk_buff_head *pktq, uint totlen)
struct sk_buff *glom_skb;
- fn, addr, pktq->qlen);
+ brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n",
+ addr, pktq->qlen);
if (pktq->qlen == 1)
- err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pktq->next);
+ err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
+ pktq->next);
else if (!sdiodev->sg_support) {
glom_skb = brcmu_pkt_buf_get_skb(totlen);
if (!glom_skb)
return -ENOMEM;
- err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, glom_skb);
+ glom_skb);
@@ -598,18 +738,17 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
skb_pull(glom_skb, skb->len);
} else
- err = brcmf_sdio_sglist_rw(sdiodev, fn, false, addr, pktq);
+ err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, false, addr,
+ pktq);
-brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
mypkt = brcmu_pkt_buf_get_skb(nbytes);
@@ -621,48 +760,47 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
memcpy(mypkt->data, buf, nbytes);
- err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, mypkt);
+ err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true, addr,
+ mypkt);
brcmu_pkt_buf_free_skb(mypkt);
-brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff_head *pktq)
+int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
+ struct sk_buff_head *pktq)
+ brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen);
if (pktq->qlen == 1 || !sdiodev->sg_support)
skb_queue_walk(pktq, skb) {
- err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, skb);
+ err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true,
+ addr, skb);
- err = brcmf_sdio_sglist_rw(sdiodev, fn, true, addr, pktq);
+ err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, true, addr,
int
-brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
- u8 *data, uint size)
+brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
+ u8 *data, uint size)
int bcmerror = 0;
struct sk_buff *pkt;
@@ -689,7 +827,7 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
/* Do the transfer(s) */
while (size) {
/* Set the backplane window to include the start address */
- bcmerror = brcmf_sdcard_set_sbaddr_window(sdiodev, address);
+ bcmerror = brcmf_sdiod_set_sbaddr_window(sdiodev, address);
if (bcmerror)
@@ -703,8 +841,8 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
skb_put(pkt, dsize);
if (write)
memcpy(pkt->data, data, dsize);
- bcmerror = brcmf_sdio_buffrw(sdiodev, SDIO_FUNC_1, write,
- sdaddr, pkt);
+ bcmerror = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_1, write,
+ sdaddr, pkt);
if (bcmerror) {
brcmf_err("membytes transfer failed\n");
@@ -726,7 +864,7 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
dev_kfree_skb(pkt);
/* Return the window to backplane enumeration space for core access */
- if (brcmf_sdcard_set_sbaddr_window(sdiodev, sdiodev->sbwad))
+ if (brcmf_sdiod_set_sbaddr_window(sdiodev, sdiodev->sbwad))
brcmf_err("FAILED to set window back to 0x%x\n",
sdiodev->sbwad);
@@ -735,65 +873,337 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
return bcmerror;
-int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
+int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
char t_func = (char)fn;
brcmf_dbg(SDIO, "Enter\n");
/* issue abort cmd52 command through F0 */
- brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0,
+ brcmf_sdiod_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0,
SDIO_CCCR_ABORT, &t_func);
brcmf_dbg(SDIO, "Exit\n");
-int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
+static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
+ sdiodev->bus_if->state = BRCMF_BUS_DOWN;
+ if (sdiodev->bus) {
+ brcmf_sdio_remove(sdiodev->bus);
+ sdiodev->bus = NULL;
+ /* Disable Function 2 */
+ sdio_claim_host(sdiodev->func[2]);
+ sdio_disable_func(sdiodev->func[2]);
+ sdio_release_host(sdiodev->func[2]);
+ /* Disable Function 1 */
+ sdio_claim_host(sdiodev->func[1]);
+ sdio_disable_func(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func[1]);
+ sdiodev->sbwad = 0;
+static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
- u32 regs = 0;
+ struct sdio_func *func;
+ struct mmc_host *host;
+ uint max_blocks;
- ret = brcmf_sdioh_attach(sdiodev);
- if (ret)
+ sdiodev->num_funcs = 2;
+ ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE);
+ if (ret) {
+ brcmf_err("Failed to set F1 blocksize\n");
+ goto out;
+ ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE);
+ brcmf_err("Failed to set F2 blocksize\n");
+ /* increase F2 timeout */
+ sdiodev->func[2]->enable_timeout = SDIO_WAIT_F2RDY;
+ /* Enable Function 1 */
+ ret = sdio_enable_func(sdiodev->func[1]);
+ brcmf_err("Failed to enable F1: err=%d\n", ret);
goto out;
- regs = SI_ENUM_BASE;
+ * determine host related variables after brcmf_sdiod_probe()
+ * as func->cur_blksize is properly set and F2 init has been
+ * completed successfully.
+ func = sdiodev->func[2];
+ host = func->card->host;
+ sdiodev->sg_support = host->max_segs > 1;
+ max_blocks = min_t(uint, host->max_blk_count, 511u);
+ sdiodev->max_request_size = min_t(uint, host->max_req_size,
+ max_blocks * func->cur_blksize);
+ sdiodev->max_segment_count = min_t(uint, host->max_segs,
+ SG_MAX_SINGLE_ALLOC);
+ sdiodev->max_segment_size = host->max_seg_size;
/* try to attach to the target device */
- sdiodev->bus = brcmf_sdbrcm_probe(regs, sdiodev);
+ sdiodev->bus = brcmf_sdio_probe(sdiodev);
if (!sdiodev->bus) {
- brcmf_err("device attach failed\n");
ret = -ENODEV;
out:
- brcmf_sdio_remove(sdiodev);
+ brcmf_sdiod_remove(sdiodev);
-int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev)
+/* devices we support, null terminated */
+static const struct sdio_device_id brcmf_sdmmc_ids[] = {
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43143)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43241)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
+ SDIO_DEVICE_ID_BROADCOM_4335_4339)},
+ { /* end: all zeroes */ },
+MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
+static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata;
+static int brcmf_ops_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
- sdiodev->bus_if->state = BRCMF_BUS_DOWN;
+ int err;
+ struct brcmf_sdio_dev *sdiodev;
+ struct brcmf_bus *bus_if;
- if (sdiodev->bus) {
- brcmf_sdbrcm_disconnect(sdiodev->bus);
- sdiodev->bus = NULL;
+ brcmf_dbg(SDIO, "Enter\n");
+ brcmf_dbg(SDIO, "Class=%x\n", func->class);
+ brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
+ brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
+ brcmf_dbg(SDIO, "Function#: %d\n", func->num);
+ /* Consume func num 1 but dont do anything with it. */
+ if (func->num == 1)
+ /* Ignore anything but func 2 */
+ if (func->num != 2)
+ return -ENODEV;
+ bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
+ if (!bus_if)
+ return -ENOMEM;
+ sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
+ if (!sdiodev) {
+ kfree(bus_if);
- brcmf_sdioh_detach(sdiodev);
+ /* store refs to functions used. mmc_card does
+ * not hold the F0 function pointer.
+ sdiodev->func[0] = kmemdup(func, sizeof(*func), GFP_KERNEL);
+ sdiodev->func[0]->num = 0;
+ sdiodev->func[1] = func->card->sdio_func[0];
+ sdiodev->func[2] = func;
+ sdiodev->bus_if = bus_if;
+ bus_if->bus_priv.sdio = sdiodev;
+ bus_if->proto_type = BRCMF_PROTO_BCDC;
+ dev_set_drvdata(&func->dev, bus_if);
+ dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
+ sdiodev->dev = &sdiodev->func[1]->dev;
+ sdiodev->pdata = brcmfmac_sdio_pdata;
+ atomic_set(&sdiodev->suspend, false);
+ init_waitqueue_head(&sdiodev->request_byte_wait);
+ init_waitqueue_head(&sdiodev->request_word_wait);
+ init_waitqueue_head(&sdiodev->request_buffer_wait);
+ brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
+ err = brcmf_sdiod_probe(sdiodev);
+ if (err) {
+ brcmf_err("F2 error, probe failed %d...\n", err);
+ goto fail;
- sdiodev->sbwad = 0;
+ brcmf_dbg(SDIO, "F2 init completed...\n");
+fail:
+ dev_set_drvdata(&func->dev, NULL);
+ dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
+ kfree(sdiodev->func[0]);
+ kfree(sdiodev);
+ return err;
+static void brcmf_ops_sdio_remove(struct sdio_func *func)
+ brcmf_dbg(SDIO, "Function: %d\n", func->num);
+ if (func->num != 1 && func->num != 2)
+ bus_if = dev_get_drvdata(&func->dev);
+ if (bus_if) {
+ sdiodev = bus_if->bus_priv.sdio;
+ dev_set_drvdata(&sdiodev->func[2]->dev, NULL);
+ brcmf_dbg(SDIO, "Exit\n");
+static int brcmf_ops_sdio_suspend(struct device *dev)
+ mmc_pm_flag_t sdio_flags;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ int ret = 0;
+ brcmf_dbg(SDIO, "\n");
+ atomic_set(&sdiodev->suspend, true);
+ sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
+ if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
+ brcmf_err("Host can't keep power while suspended\n");
+ ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER);
+ brcmf_err("Failed to set pm_flags\n");
+ return ret;
+ brcmf_sdio_wd_timer(sdiodev->bus, 0);
+static int brcmf_ops_sdio_resume(struct device *dev)
+ brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
+static const struct dev_pm_ops brcmf_sdio_pm_ops = {
+ .suspend = brcmf_ops_sdio_suspend,
+ .resume = brcmf_ops_sdio_resume,
+#endif /* CONFIG_PM_SLEEP */
+static struct sdio_driver brcmf_sdmmc_driver = {
+ .probe = brcmf_ops_sdio_probe,
+ .remove = brcmf_ops_sdio_remove,
+ .name = BRCMFMAC_SDIO_PDATA_NAME,
+ .id_table = brcmf_sdmmc_ids,
+ .drv = {
+ .pm = &brcmf_sdio_pm_ops,
+ },
+static int brcmf_sdio_pd_probe(struct platform_device *pdev)
+ brcmfmac_sdio_pdata = dev_get_platdata(&pdev->dev);
+ if (brcmfmac_sdio_pdata->power_on)
+ brcmfmac_sdio_pdata->power_on();
+static int brcmf_sdio_pd_remove(struct platform_device *pdev)
+ if (brcmfmac_sdio_pdata->power_off)
+ brcmfmac_sdio_pdata->power_off();
+ sdio_unregister_driver(&brcmf_sdmmc_driver);
-void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable)
+static struct platform_driver brcmf_sdio_pd = {
+ .remove = brcmf_sdio_pd_remove,
+ .driver = {
+void brcmf_sdio_register(void)
+ int ret;
+ ret = sdio_register_driver(&brcmf_sdmmc_driver);
+ if (ret)
+ brcmf_err("sdio_register_driver failed: %d\n", ret);
+void brcmf_sdio_exit(void)
- if (enable)
- brcmf_sdbrcm_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
+ if (brcmfmac_sdio_pdata)
+ platform_driver_unregister(&brcmf_sdio_pd);
- brcmf_sdbrcm_wd_timer(sdiodev->bus, 0);
+void __init brcmf_sdio_init(void)
+ ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe);
+ if (ret == -ENODEV)
+ brcmf_dbg(SDIO, "No platform data available.\n");
@@ -1,552 +0,0 @@
- * Copyright (c) 2010 Broadcom Corporation
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/mmc/sdio.h>
-#include <linux/mmc/core.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/host.h>
-#include <linux/suspend.h>
-#include <linux/errno.h>
-#include <linux/sched.h> /* request_irq() */
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/platform_data/brcmfmac-sdio.h>
-#include <net/cfg80211.h>
-#include <defs.h>
-#include <brcm_hw_ids.h>
-#include <brcmu_utils.h>
-#include <brcmu_wifi.h>
-#include "sdio_host.h"
-#include "sdio_chip.h"
-#include "dhd_dbg.h"
-#include "dhd_bus.h"
-#define SDIO_VENDOR_ID_BROADCOM 0x02d0
-#define DMA_ALIGN_MASK 0x03
-#define SDIO_FUNC1_BLOCKSIZE 64
-#define SDIO_FUNC2_BLOCKSIZE 512
-/* devices we support, null terminated */
-static const struct sdio_device_id brcmf_sdmmc_ids[] = {
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43143)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43241)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
- SDIO_DEVICE_ID_BROADCOM_4335_4339)},
- { /* end: all zeroes */ },
-MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
-static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata;
-bool
-brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
- bool is_err = false;
-#ifdef CONFIG_PM_SLEEP
- is_err = atomic_read(&sdiodev->suspend);
- return is_err;
-void
-brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev, wait_queue_head_t *wq)
- int retry = 0;
- while (atomic_read(&sdiodev->suspend) && retry++ != 30)
- wait_event_timeout(*wq, false, HZ/100);
-static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
- uint regaddr, u8 *byte)
- struct sdio_func *sdfunc = sdiodev->func[0];
- int err_ret;
- * Can only directly write to some F0 registers.
- * Handle F2 enable/disable and Abort command
- * as a special case.
- if (regaddr == SDIO_CCCR_IOEx) {
- sdfunc = sdiodev->func[2];
- if (sdfunc) {
- if (*byte & SDIO_FUNC_ENABLE_2) {
- /* Enable Function 2 */
- err_ret = sdio_enable_func(sdfunc);
- if (err_ret)
- brcmf_err("enable F2 failed:%d\n",
- err_ret);
- /* Disable Function 2 */
- err_ret = sdio_disable_func(sdfunc);
- brcmf_err("Disable F2 failed:%d\n",
- err_ret = -ENOENT;
- } else if ((regaddr == SDIO_CCCR_ABORT) ||
- (regaddr == SDIO_CCCR_IENx)) {
- sdfunc = kmemdup(sdiodev->func[0], sizeof(struct sdio_func),
- GFP_KERNEL);
- if (!sdfunc)
- sdfunc->num = 0;
- sdio_writeb(sdfunc, *byte, regaddr, &err_ret);
- kfree(sdfunc);
- } else if (regaddr < 0xF0) {
- brcmf_err("F0 Wr:0x%02x: write disallowed\n", regaddr);
- err_ret = -EPERM;
- sdio_f0_writeb(sdfunc, *byte, regaddr, &err_ret);
- return err_ret;
-int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint func,
- brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x\n", rw, func, regaddr);
- brcmf_pm_resume_wait(sdiodev, &sdiodev->request_byte_wait);
- return -EIO;
- if (rw && func == 0) {
- /* handle F0 separately */
- err_ret = brcmf_sdioh_f0_write_byte(sdiodev, regaddr, byte);
- if (rw) /* CMD52 Write */
- sdio_writeb(sdiodev->func[func], *byte, regaddr,
- &err_ret);
- else if (func == 0) {
- *byte = sdio_f0_readb(sdiodev->func[func], regaddr,
- *byte = sdio_readb(sdiodev->func[func], regaddr,
- if (err_ret) {
- * SleepCSR register access can fail when
- * waking up the device so reduce this noise
- * in the logs.
- if (regaddr != SBSDIO_FUNC1_SLEEPCSR)
- brcmf_err("Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
- rw ? "write" : "read", func, regaddr, *byte,
- brcmf_dbg(SDIO, "Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
-int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
- uint rw, uint func, uint addr, u32 *word,
- uint nbytes)
- int err_ret = -EIO;
- if (func == 0) {
- brcmf_err("Only CMD52 allowed to F0\n");
- brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
- rw, func, addr, nbytes);
- brcmf_pm_resume_wait(sdiodev, &sdiodev->request_word_wait);
- if (rw) { /* CMD52 Write */
- if (nbytes == 4)
- sdio_writel(sdiodev->func[func], *word, addr,
- else if (nbytes == 2)
- sdio_writew(sdiodev->func[func], (*word & 0xFFFF),
- addr, &err_ret);
- brcmf_err("Invalid nbytes: %d\n", nbytes);
- } else { /* CMD52 Read */
- *word = sdio_readl(sdiodev->func[func], addr, &err_ret);
- *word = sdio_readw(sdiodev->func[func], addr,
- &err_ret) & 0xFFFF;
- brcmf_err("Failed to %s word, Err: 0x%08x\n",
- rw ? "write" : "read", err_ret);
-static int brcmf_sdioh_get_cisaddr(struct brcmf_sdio_dev *sdiodev, u32 regaddr)
- /* read 24 bits and return valid 17 bit addr */
- int i, ret;
- u32 scratch, regdata;
- __le32 scratch_le;
- u8 *ptr = (u8 *)&scratch_le;
- for (i = 0; i < 3; i++) {
- regdata = brcmf_sdio_regrl(sdiodev, regaddr, &ret);
- if (ret != 0)
- brcmf_err("Can't read!\n");
- *ptr++ = (u8) regdata;
- regaddr++;
- /* Only the lower 17-bits are valid */
- scratch = le32_to_cpu(scratch_le);
- scratch &= 0x0001FFFF;
- return scratch;
-static int brcmf_sdioh_enablefuncs(struct brcmf_sdio_dev *sdiodev)
- u32 fbraddr;
- u8 func;
- brcmf_dbg(SDIO, "\n");
- /* Get the Card's common CIS address */
- sdiodev->func_cis_ptr[0] = brcmf_sdioh_get_cisaddr(sdiodev,
- SDIO_CCCR_CIS);
- brcmf_dbg(SDIO, "Card's Common CIS Ptr = 0x%x\n",
- sdiodev->func_cis_ptr[0]);
- /* Get the Card's function CIS (for each function) */
- for (fbraddr = SDIO_FBR_BASE(1), func = 1;
- func <= sdiodev->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
- sdiodev->func_cis_ptr[func] =
- brcmf_sdioh_get_cisaddr(sdiodev, SDIO_FBR_CIS + fbraddr);
- brcmf_dbg(SDIO, "Function %d CIS Ptr = 0x%x\n",
- func, sdiodev->func_cis_ptr[func]);
- /* Enable Function 1 */
- err_ret = sdio_enable_func(sdiodev->func[1]);
- brcmf_err("Failed to enable F1 Err: 0x%08x\n", err_ret);
- * Public entry points & extern's
-int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev)
- int err_ret = 0;
- struct mmc_host *host;
- struct sdio_func *func;
- uint max_blocks;
- sdiodev->num_funcs = 2;
- sdio_claim_host(sdiodev->func[1]);
- err_ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE);
- brcmf_err("Failed to set F1 blocksize\n");
- goto out;
- err_ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE);
- brcmf_err("Failed to set F2 blocksize\n");
- brcmf_sdioh_enablefuncs(sdiodev);
- * determine host related variables after brcmf_sdio_probe()
- * as func->cur_blksize is properly set and F2 init has been
- * completed successfully.
- func = sdiodev->func[2];
- host = func->card->host;
- sdiodev->sg_support = host->max_segs > 1;
- max_blocks = min_t(uint, host->max_blk_count, 511u);
- sdiodev->max_request_size = min_t(uint, host->max_req_size,
- max_blocks * func->cur_blksize);
- sdiodev->max_segment_count = min_t(uint, host->max_segs,
- SG_MAX_SINGLE_ALLOC);
- sdiodev->max_segment_size = host->max_seg_size;
-out:
- sdio_release_host(sdiodev->func[1]);
- brcmf_dbg(SDIO, "Done\n");
-void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev)
- sdio_claim_host(sdiodev->func[2]);
- sdio_disable_func(sdiodev->func[2]);
- sdio_release_host(sdiodev->func[2]);
- /* Disable Function 1 */
- sdio_disable_func(sdiodev->func[1]);
-static int brcmf_ops_sdio_probe(struct sdio_func *func,
- const struct sdio_device_id *id)
- int err;
- struct brcmf_sdio_dev *sdiodev;
- struct brcmf_bus *bus_if;
- brcmf_dbg(SDIO, "Enter\n");
- brcmf_dbg(SDIO, "Class=%x\n", func->class);
- brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
- brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
- brcmf_dbg(SDIO, "Function#: %d\n", func->num);
- /* Consume func num 1 but dont do anything with it. */
- if (func->num == 1)
- /* Ignore anything but func 2 */
- if (func->num != 2)
- return -ENODEV;
- bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
- if (!bus_if)
- sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
- if (!sdiodev) {
- kfree(bus_if);
- sdiodev->func[0] = func->card->sdio_func[0];
- sdiodev->func[1] = func->card->sdio_func[0];
- sdiodev->func[2] = func;
- sdiodev->bus_if = bus_if;
- bus_if->bus_priv.sdio = sdiodev;
- dev_set_drvdata(&func->dev, bus_if);
- dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
- sdiodev->dev = &sdiodev->func[1]->dev;
- sdiodev->pdata = brcmfmac_sdio_pdata;
- atomic_set(&sdiodev->suspend, false);
- init_waitqueue_head(&sdiodev->request_byte_wait);
- init_waitqueue_head(&sdiodev->request_word_wait);
- init_waitqueue_head(&sdiodev->request_buffer_wait);
- brcmf_dbg(SDIO, "F2 found, calling brcmf_sdio_probe...\n");
- err = brcmf_sdio_probe(sdiodev);
- if (err) {
- brcmf_err("F2 error, probe failed %d...\n", err);
- goto fail;
- brcmf_dbg(SDIO, "F2 init completed...\n");
-fail:
- dev_set_drvdata(&func->dev, NULL);
- dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
- kfree(sdiodev);
- return err;
-static void brcmf_ops_sdio_remove(struct sdio_func *func)
- brcmf_dbg(SDIO, "Function: %d\n", func->num);
- if (func->num != 1 && func->num != 2)
- bus_if = dev_get_drvdata(&func->dev);
- if (bus_if) {
- sdiodev = bus_if->bus_priv.sdio;
- dev_set_drvdata(&sdiodev->func[2]->dev, NULL);
- brcmf_dbg(SDIO, "Exit\n");
-static int brcmf_sdio_suspend(struct device *dev)
- mmc_pm_flag_t sdio_flags;
- struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
- int ret = 0;
- atomic_set(&sdiodev->suspend, true);
- sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
- if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
- brcmf_err("Host can't keep power while suspended\n");
- ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER);
- if (ret) {
- brcmf_err("Failed to set pm_flags\n");
- return ret;
- brcmf_sdio_wdtmr_enable(sdiodev, false);
-static int brcmf_sdio_resume(struct device *dev)
- brcmf_sdio_wdtmr_enable(sdiodev, true);
-static const struct dev_pm_ops brcmf_sdio_pm_ops = {
- .suspend = brcmf_sdio_suspend,
- .resume = brcmf_sdio_resume,
-#endif /* CONFIG_PM_SLEEP */
-static struct sdio_driver brcmf_sdmmc_driver = {
- .probe = brcmf_ops_sdio_probe,
- .remove = brcmf_ops_sdio_remove,
- .name = BRCMFMAC_SDIO_PDATA_NAME,
- .id_table = brcmf_sdmmc_ids,
- .drv = {
- .pm = &brcmf_sdio_pm_ops,
- },
-static int brcmf_sdio_pd_probe(struct platform_device *pdev)
- brcmfmac_sdio_pdata = dev_get_platdata(&pdev->dev);
- if (brcmfmac_sdio_pdata->power_on)
- brcmfmac_sdio_pdata->power_on();
-static int brcmf_sdio_pd_remove(struct platform_device *pdev)
- if (brcmfmac_sdio_pdata->power_off)
- brcmfmac_sdio_pdata->power_off();
- sdio_unregister_driver(&brcmf_sdmmc_driver);
-static struct platform_driver brcmf_sdio_pd = {
- .remove = brcmf_sdio_pd_remove,
- .driver = {
-void brcmf_sdio_register(void)
- int ret;
- ret = sdio_register_driver(&brcmf_sdmmc_driver);
- brcmf_err("sdio_register_driver failed: %d\n", ret);
-void brcmf_sdio_exit(void)
- if (brcmfmac_sdio_pdata)
- platform_driver_unregister(&brcmf_sdio_pd);
-void __init brcmf_sdio_init(void)
- ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe);
- if (ret == -ENODEV)
- brcmf_dbg(SDIO, "No platform data available.\n");
@@ -24,6 +24,12 @@ enum brcmf_bus_state {
BRCMF_BUS_DATA /* Ready for frame transfers */
+/* The level of bus communication with the dongle */
+enum brcmf_bus_protocol_type {
+ BRCMF_PROTO_BCDC,
+ BRCMF_PROTO_MSGBUF
struct brcmf_bus_dcmd {
char *name;
char *param;
@@ -65,6 +71,7 @@ struct brcmf_bus_ops {
* struct brcmf_bus - interface structure between common and bus layer
*
* @bus_priv: pointer to private bus device.
+ * @proto_type: protocol type, bcdc or msgbuf
* @dev: device pointer of bus device.
* @drvr: public driver information.
* @state: operational state of the bus interface.
@@ -80,6 +87,7 @@ struct brcmf_bus {
struct brcmf_sdio_dev *sdio;
struct brcmf_usbdev *usb;
} bus_priv;
+ enum brcmf_bus_protocol_type proto_type;
struct brcmf_pub *drvr;
enum brcmf_bus_state state;
@@ -260,9 +260,6 @@ struct rte_console {
#define MAX_HDR_READ (1 << 6)
#define MAX_RX_DATASZ 2048
-/* Maximum milliseconds to wait for F2 to come up */
-#define BRCMF_WAIT_F2RDY 3000
/* Bump up limit on waiting for HT to account for first startup;
* if the image is doing a CRC calculation before programming the PMU
* for HT availability, it could take a couple hundred ms more, so
@@ -559,7 +556,7 @@ static const struct brcmf_firmware_names brcmf_fwname_data[] = {
-static const struct firmware *brcmf_sdbrcm_get_fw(struct brcmf_sdio *bus,
+static const struct firmware *brcmf_sdio_get_fw(struct brcmf_sdio *bus,
enum brcmf_firmware_type type)
const struct firmware *fw;
@@ -624,8 +621,8 @@ r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
- *regvar = brcmf_sdio_regrl(bus->sdiodev,
- bus->ci->c_inf[idx].base + offset, &ret);
+ *regvar = brcmf_sdiod_regrl(bus->sdiodev,
+ bus->ci->c_inf[idx].base + offset, &ret);
@@ -636,15 +633,15 @@ w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
- brcmf_sdio_regwl(bus->sdiodev,
- bus->ci->c_inf[idx].base + reg_offset,
- regval, &ret);
+ brcmf_sdiod_regwl(bus->sdiodev,
+ bus->ci->c_inf[idx].base + reg_offset,
+ regval, &ret);
-brcmf_sdbrcm_kso_control(struct brcmf_sdio *bus, bool on)
+brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
u8 wr_val = 0, rd_val, cmp_val, bmask;
@@ -654,8 +651,8 @@ brcmf_sdbrcm_kso_control(struct brcmf_sdio *bus, bool on)
wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
/* 1st KSO write goes to AOS wake up core if device is asleep */
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
- wr_val, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+ wr_val, &err);
if (err) {
brcmf_err("SDIO_AOS KSO write error: %d\n", err);
@@ -685,15 +682,15 @@ brcmf_sdbrcm_kso_control(struct brcmf_sdio *bus, bool on)
* just one write attempt may fail,
* read it back until it matches written value
- rd_val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
- &err);
+ rd_val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+ &err);
if (((rd_val & bmask) == cmp_val) && !err)
brcmf_dbg(SDIO, "KSO wr/rd retry:%d (max: %d) ERR:%x\n",
try_cnt, MAX_KSO_ATTEMPTS, err);
udelay(KSO_WAIT_US);
} while (try_cnt++ < MAX_KSO_ATTEMPTS);
@@ -704,7 +701,7 @@ brcmf_sdbrcm_kso_control(struct brcmf_sdio *bus, bool on)
#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
/* Turn backplane clock on or off */
-static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
+static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
u8 clkctl, clkreq, devctl;
@@ -724,16 +721,16 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
clkreq =
bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- clkreq, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ clkreq, &err);
brcmf_err("HT Avail request error: %d\n", err);
return -EBADE;
/* Check current status */
- clkctl = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
brcmf_err("HT Avail read error: %d\n", err);
@@ -742,8 +739,8 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
/* Go to pending and await interrupt if appropriate */
if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
/* Allow only clock-available interrupt */
- devctl = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_DEVICE_CTL, &err);
+ devctl = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_DEVICE_CTL, &err);
brcmf_err("Devctl error setting CA: %d\n",
err);
@@ -751,28 +748,28 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
- devctl, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ devctl, &err);
brcmf_dbg(SDIO, "CLKCTL: set PENDING\n");
bus->clkstate = CLK_PENDING;
} else if (bus->clkstate == CLK_PENDING) {
/* Cancel CA-only interrupt filter */
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
/* Otherwise, wait here (polling) for HT Avail */
timeout = jiffies +
msecs_to_jiffies(PMU_MAX_TRANSITION_DLY/1000);
while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
- SBSDIO_FUNC1_CHIPCLKCSR,
+ SBSDIO_FUNC1_CHIPCLKCSR,
if (time_after(jiffies, timeout))
@@ -805,16 +802,16 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
if (bus->clkstate == CLK_PENDING) {
bus->clkstate = CLK_SDONLY;
brcmf_dbg(SDIO, "CLKCTL: turned OFF\n");
brcmf_err("Failed access turning clock off: %d\n",
@@ -826,7 +823,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
/* Change idle/active SD state */
-static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on)
+static int brcmf_sdio_sdclk(struct brcmf_sdio *bus, bool on)
@@ -839,7 +836,7 @@ static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on)
/* Transition SD and backplane clock readiness */
-static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
+static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
#ifdef DEBUG
uint oldstate = bus->clkstate;
@@ -850,7 +847,7 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
/* Early exit if we're already there */
if (bus->clkstate == target) {
if (target == CLK_AVAIL) {
- brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+ brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
bus->activity = true;
@@ -860,32 +857,32 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
case CLK_AVAIL:
/* Make sure SD clock is available */
if (bus->clkstate == CLK_NONE)
- brcmf_sdbrcm_sdclk(bus, true);
+ brcmf_sdio_sdclk(bus, true);
/* Now request HT Avail on the backplane */
- brcmf_sdbrcm_htclk(bus, true, pendok);
+ brcmf_sdio_htclk(bus, true, pendok);
case CLK_SDONLY:
/* Remove HT request, or bring up SD clock */
else if (bus->clkstate == CLK_AVAIL)
- brcmf_sdbrcm_htclk(bus, false, false);
+ brcmf_sdio_htclk(bus, false, false);
brcmf_err("request for %d -> %d\n",
bus->clkstate, target);
case CLK_NONE:
/* Make sure to remove HT request */
if (bus->clkstate == CLK_AVAIL)
/* Now remove the SD clock */
- brcmf_sdbrcm_sdclk(bus, false);
- brcmf_sdbrcm_wd_timer(bus, 0);
+ brcmf_sdio_sdclk(bus, false);
+ brcmf_sdio_wd_timer(bus, 0);
@@ -896,7 +893,7 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
-brcmf_sdbrcm_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
+brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
brcmf_dbg(TRACE, "Enter\n");
@@ -919,13 +916,13 @@ brcmf_sdbrcm_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
data_ok(bus)))
return -EBUSY;
- err = brcmf_sdbrcm_kso_control(bus, false);
+ err = brcmf_sdio_kso_control(bus, false);
/* disable watchdog */
bus->idlecount = 0;
- err = brcmf_sdbrcm_kso_control(bus, true);
+ err = brcmf_sdio_kso_control(bus, true);
if (!err) {
/* Change state */
@@ -943,16 +940,16 @@ end:
/* control clocks */
if (sleep) {
if (!bus->sr_enabled)
- brcmf_sdbrcm_clkctl(bus, CLK_NONE, pendok);
+ brcmf_sdio_clkctl(bus, CLK_NONE, pendok);
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, pendok);
+ brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok);
-static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
+static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
u32 intstatus = 0;
u32 hmb_data;
@@ -1028,7 +1025,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
return intstatus;
-static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
+static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
uint retries = 0;
u16 lastrbc;
@@ -1040,18 +1037,18 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
rtx ? ", send NAK" : "");
if (abort)
- brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
+ brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
- SFC_RF_TERM, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_RF_TERM, &err);
bus->sdcnt.f1regdata++;
/* Wait until the packet has been flushed (device/FIFO stable) */
for (lastrbc = retries = 0xffff; retries > 0; retries--) {
- hi = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_RFRAMEBCHI, &err);
- lo = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_RFRAMEBCLO, &err);
+ hi = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_RFRAMEBCHI, &err);
+ lo = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_RFRAMEBCLO, &err);
bus->sdcnt.f1regdata += 2;
if ((hi == 0) && (lo == 0))
@@ -1088,7 +1085,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
/* return total length of buffer chain */
-static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
+static uint brcmf_sdio_glom_len(struct brcmf_sdio *bus)
struct sk_buff *p;
uint total;
@@ -1099,7 +1096,7 @@ static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
return total;
-static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
+static void brcmf_sdio_free_glom(struct brcmf_sdio *bus)
struct sk_buff *cur, *next;
@@ -1187,7 +1184,7 @@ static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
if ((u16)(~(len ^ checksum))) {
brcmf_err("HW header checksum error\n");
bus->sdcnt.rx_badhdr++;
- brcmf_sdbrcm_rxfail(bus, false, false);
+ brcmf_sdio_rxfail(bus, false, false);
if (len < SDPCM_HDRLEN) {
@@ -1219,7 +1216,7 @@ static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
type != BRCMF_SDIO_FT_SUPER) {
brcmf_err("HW header length too long\n");
bus->sdcnt.rx_toolong++;
rd->len = 0;
return -EPROTO;
@@ -1238,7 +1235,7 @@ static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
brcmf_err("seq %d: bad data offset\n", rx_seq);
return -ENXIO;
@@ -1311,7 +1308,7 @@ static void brcmf_sdio_hdpack(struct brcmf_sdio *bus, u8 *header,
trace_brcmf_sdpcm_hdr(SDPCM_TX + !!(bus->txglom), header);
-static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
+static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
u16 dlen, totlen;
u8 *dptr, num = 0;
@@ -1391,7 +1388,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
pfirst = pnext = NULL;
- brcmf_sdbrcm_free_glom(bus);
+ brcmf_sdio_free_glom(bus);
num = 0;
@@ -1414,16 +1411,15 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
pfirst = skb_peek(&bus->glom);
- dlen = (u16) brcmf_sdbrcm_glom_len(bus);
+ dlen = (u16) brcmf_sdio_glom_len(bus);
/* Do an SDIO read for the superframe. Configurable iovar to
* read directly into the chained packet, or allocate a large
* packet and and copy into the chain.
sdio_claim_host(bus->sdiodev->func[1]);
- errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
- bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, &bus->glom, dlen);
+ errcode = brcmf_sdiod_recv_chain(bus->sdiodev,
+ &bus->glom, dlen);
sdio_release_host(bus->sdiodev->func[1]);
bus->sdcnt.f2rxdata++;
@@ -1434,12 +1430,12 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
if (bus->glomerr++ < 3) {
- brcmf_sdbrcm_rxfail(bus, true, true);
+ brcmf_sdio_rxfail(bus, true, true);
bus->glomerr = 0;
- brcmf_sdbrcm_rxfail(bus, true, false);
+ brcmf_sdio_rxfail(bus, true, false);
bus->sdcnt.rxglomfail++;
@@ -1487,12 +1483,12 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
/* Restore superframe header space */
skb_push(pfirst, sfdoff);
bus->cur_read.len = 0;
@@ -1536,8 +1532,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
return num;
-static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
- bool *pending)
+static int brcmf_sdio_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
+ bool *pending)
DECLARE_WAITQUEUE(wait, current);
int timeout = msecs_to_jiffies(DCMD_RESP_TIMEOUT);
@@ -1558,7 +1554,7 @@ static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
return timeout;
-static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio *bus)
+static int brcmf_sdio_dcmd_resp_wake(struct brcmf_sdio *bus)
if (waitqueue_active(&bus->dcmd_resp_wait))
wake_up_interruptible(&bus->dcmd_resp_wait);
@@ -1566,7 +1562,7 @@ static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio *bus)
static void
-brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
+brcmf_sdio_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
uint rdlen, pad;
u8 *buf = NULL, *rbuf;
@@ -1604,7 +1600,7 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) {
brcmf_err("%d-byte control read exceeds %d-byte buffer\n",
rdlen, bus->sdiodev->bus_if->maxctl);
@@ -1612,15 +1608,12 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
brcmf_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
len, len - doff, bus->sdiodev->bus_if->maxctl);
/* Read remain of frame body */
- sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
- SDIO_FUNC_2,
- F2SYNC, rbuf, rdlen);
+ sdret = brcmf_sdiod_recv_buf(bus->sdiodev, rbuf, rdlen);
/* Control frame failures need retransmission */
@@ -1628,7 +1621,7 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
brcmf_err("read %d control bytes failed: %d\n",
rdlen, sdret);
bus->sdcnt.rxc_errors++;
memcpy(buf + BRCMF_FIRSTREAD, rbuf, rdlen);
@@ -1653,11 +1646,11 @@ gotpkt:
/* Awake any waiters */
- brcmf_sdbrcm_dcmd_resp_wake(bus);
+ brcmf_sdio_dcmd_resp_wake(bus);
/* Pad read to blocksize for efficiency */
-static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
+static void brcmf_sdio_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) {
*pad = bus->blocksize - (*rdlen % bus->blocksize);
@@ -1694,7 +1687,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
u8 cnt;
brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
bus->glomd, skb_peek(&bus->glom));
- cnt = brcmf_sdbrcm_rxglom(bus, rd->seq_num);
+ cnt = brcmf_sdio_rxglom(bus, rd->seq_num);
brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
rd->seq_num += cnt - 1;
rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
@@ -1705,17 +1698,14 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
/* read header first for unknow frame length */
if (!rd->len) {
- ret = brcmf_sdcard_recv_buf(bus->sdiodev,
- SDIO_FUNC_2, F2SYNC,
- bus->rxhdr,
- BRCMF_FIRSTREAD);
+ ret = brcmf_sdiod_recv_buf(bus->sdiodev,
+ bus->rxhdr, BRCMF_FIRSTREAD);
bus->sdcnt.f2rxhdrs++;
brcmf_err("RXHEADER FAILED: %d\n",
bus->sdcnt.rx_hdrfail++;
@@ -1734,9 +1724,9 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
if (rd->channel == SDPCM_CONTROL_CHANNEL) {
- brcmf_sdbrcm_read_control(bus, bus->rxhdr,
- rd->len,
- rd->dat_offset);
+ brcmf_sdio_read_control(bus, bus->rxhdr,
+ rd->len,
+ rd->dat_offset);
/* prepare the descriptor for the next read */
rd->len = rd->len_nxtfrm << 4;
rd->len_nxtfrm = 0;
@@ -1750,14 +1740,14 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
head_read = BRCMF_FIRSTREAD;
- brcmf_pad(bus, &pad, &rd->len_left);
+ brcmf_sdio_pad(bus, &pad, &rd->len_left);
pkt = brcmu_pkt_buf_get_skb(rd->len_left + head_read +
bus->head_align);
if (!pkt) {
/* Give up on data, request rtx of events */
brcmf_err("brcmu_pkt_buf_get_skb failed\n");
- brcmf_sdbrcm_rxfail(bus, false,
+ brcmf_sdio_rxfail(bus, false,
RETRYCHAN(rd->channel));
@@ -1765,8 +1755,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
skb_pull(pkt, head_read);
pkt_align(pkt, rd->len_left, bus->head_align);
- ret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, pkt);
+ ret = brcmf_sdiod_recv_pkt(bus->sdiodev, pkt);
@@ -1775,7 +1764,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
rd->len, rd->channel, ret);
brcmu_pkt_buf_free_skb(pkt);
- brcmf_sdbrcm_rxfail(bus, true,
+ brcmf_sdio_rxfail(bus, true,
@@ -1800,7 +1789,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
rd->len,
roundup(rd_new.len, 16) >> 4);
@@ -1822,7 +1811,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
/* Force retry w/normal header read */
- brcmf_sdbrcm_rxfail(bus, false, true);
+ brcmf_sdio_rxfail(bus, false, true);
@@ -1847,7 +1836,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
brcmf_err("%s: glom superframe w/o "
"descriptor!\n", __func__);
@@ -1891,7 +1880,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
-brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
+brcmf_sdio_wait_event_wakeup(struct brcmf_sdio *bus)
if (waitqueue_active(&bus->ctrl_wait))
wake_up_interruptible(&bus->ctrl_wait);
@@ -2107,8 +2096,8 @@ brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
/* Writes a HW/SW header into the packet and sends it. */
/* Assumes: (a) header space already there, (b) caller holds lock */
-static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
- uint chan)
+static int brcmf_sdio_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
+ uint chan)
int i;
@@ -2121,8 +2110,7 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
- ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, pktq);
+ ret = brcmf_sdiod_send_pkt(bus->sdiodev, pktq);
bus->sdcnt.f2txdata++;
@@ -2131,17 +2119,17 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
bus->sdcnt.tx_sderrs++;
- SFC_WF_TERM, NULL);
+ SFC_WF_TERM, NULL);
for (i = 0; i < 3; i++) {
u8 hi, lo;
- SBSDIO_FUNC1_WFRAMEBCHI, NULL);
- SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+ SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+ SBSDIO_FUNC1_WFRAMEBCLO, NULL);
@@ -2160,7 +2148,7 @@ done:
-static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
+static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
struct sk_buff_head pktq;
@@ -2194,7 +2182,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
if (i == 0)
- ret = brcmf_sdbrcm_txpkt(bus, &pktq, SDPCM_DATA_CHANNEL);
+ ret = brcmf_sdio_txpkt(bus, &pktq, SDPCM_DATA_CHANNEL);
cnt += i;
/* In poll mode, need to check for other events */
@@ -2223,7 +2211,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
return cnt;
-static void brcmf_sdbrcm_bus_stop(struct device *dev)
+static void brcmf_sdio_bus_stop(struct device *dev)
u32 local_hostintmask;
u8 saveclk;
@@ -2243,7 +2231,7 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
/* Enable clock for device interrupts */
- brcmf_sdbrcm_bus_sleep(bus, false, false);
+ brcmf_sdio_bus_sleep(bus, false, false);
/* Disable and clear interrupts at the chip level also */
w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask));
@@ -2254,26 +2242,25 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
/* Force clocks on backplane to be sure F2 interrupt propagates */
- saveclk = brcmf_sdio_regrb(bus->sdiodev,
+ saveclk = brcmf_sdiod_regrb(bus->sdiodev,
- (saveclk | SBSDIO_FORCE_HT), &err);
+ (saveclk | SBSDIO_FORCE_HT), &err);
brcmf_err("Failed to force clock for F2: err %d\n", err);
/* Turn off the bus (F2), free any pending packets */
brcmf_dbg(INTR, "disable SDIO interrupts\n");
- brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, SDIO_FUNC_ENABLE_1,
+ sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
/* Clear any pending interrupts now that F2 is disabled */
w_sdreg32(bus, local_hostintmask,
offsetof(struct sdpcmd_regs, intstatus));
/* Turn off the backplane clock (only) */
- brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
+ brcmf_sdio_clkctl(bus, CLK_SDONLY, false);
/* Clear the data packet queues */
@@ -2282,20 +2269,20 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
/* Clear any held glomming stuff */
if (bus->glomd)
brcmu_pkt_buf_free_skb(bus->glomd);
/* Clear rx control and wake any waiters */
spin_lock_bh(&bus->rxctl_lock);
bus->rxlen = 0;
spin_unlock_bh(&bus->rxctl_lock);
/* Reset some F2 state stuff */
bus->rxskip = false;
bus->tx_seq = bus->rx_seq = 0;
-static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
+static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
@@ -2320,7 +2307,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
addr = bus->ci->c_inf[idx].base +
offsetof(struct sdpcmd_regs, intstatus);
- ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, false);
+ val = brcmf_sdiod_regrl(bus->sdiodev, addr, &ret);
if (ret != 0)
val = 0;
@@ -2330,7 +2317,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
/* Clear interrupts */
if (val) {
- ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, true);
+ brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
@@ -2344,7 +2331,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
-static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
+static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
u32 newstatus = 0;
unsigned long intstatus;
@@ -2363,8 +2350,8 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
/* Check for inconsistent device control */
brcmf_err("error reading DEVCTL: %d\n", err);
@@ -2372,8 +2359,8 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
#endif /* DEBUG */
/* Read CSR, if clock on switch to AVAIL, else ignore */
brcmf_err("error reading CSR: %d\n",
@@ -2384,16 +2371,16 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
devctl, clkctl);
if (SBSDIO_HTAV(clkctl)) {
brcmf_err("error reading DEVCTL: %d\n",
brcmf_err("error writing DEVCTL: %d\n",
@@ -2404,7 +2391,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
/* Make sure backplane clock is on */
- brcmf_sdbrcm_bus_sleep(bus, false, true);
+ brcmf_sdio_bus_sleep(bus, false, true);
/* Pending interrupt indicates new device status */
if (atomic_read(&bus->ipend) > 0) {
@@ -2435,7 +2422,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
/* Handle host mailbox indication */
if (intstatus & I_HMB_HOST_INT) {
intstatus &= ~I_HMB_HOST_INT;
- intstatus |= brcmf_sdbrcm_hostmail(bus);
+ intstatus |= brcmf_sdio_hostmail(bus);
@@ -2480,16 +2467,15 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
set_bit(n, (unsigned long *)&bus->intstatus.counter);
- brcmf_sdbrcm_clrintr(bus);
+ brcmf_sdio_clrintr(bus);
if (data_ok(bus) && bus->ctrl_frame_stat &&
(bus->clkstate == CLK_AVAIL)) {
- err = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, bus->ctrl_frame_buf,
- (u32) bus->ctrl_frame_len);
+ err = brcmf_sdiod_send_buf(bus->sdiodev, bus->ctrl_frame_buf,
+ (u32)bus->ctrl_frame_len);
if (err < 0) {
/* On failure, abort the command and
@@ -2498,20 +2484,20 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
- SFC_WF_TERM, &err);
+ SFC_WF_TERM, &err);
- SBSDIO_FUNC1_WFRAMEBCHI,
- SBSDIO_FUNC1_WFRAMEBCLO,
+ SBSDIO_FUNC1_WFRAMEBCHI,
+ SBSDIO_FUNC1_WFRAMEBCLO,
@@ -2522,7 +2508,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
bus->ctrl_frame_stat = false;
- brcmf_sdbrcm_wait_event_wakeup(bus);
+ brcmf_sdio_wait_event_wakeup(bus);
/* Send queued frames (limit 1 if rx may still be pending) */
else if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
@@ -2530,7 +2516,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
&& data_ok(bus)) {
framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
txlimit;
- framecnt = brcmf_sdbrcm_sendfromq(bus, framecnt);
+ framecnt = brcmf_sdio_sendfromq(bus, framecnt);
txlimit -= framecnt;
@@ -2552,12 +2538,12 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
bus->activity = false;
brcmf_dbg(SDIO, "idle state\n");
- brcmf_sdbrcm_bus_sleep(bus, true, false);
+ brcmf_sdio_bus_sleep(bus, true, false);
-static struct pktq *brcmf_sdbrcm_bus_gettxq(struct device *dev)
+static struct pktq *brcmf_sdio_bus_gettxq(struct device *dev)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
@@ -2566,7 +2552,7 @@ static struct pktq *brcmf_sdbrcm_bus_gettxq(struct device *dev)
return &bus->txq;
-static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
+static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
int ret = -EBADE;
uint datalen, prec;
@@ -2622,7 +2608,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
#define CONSOLE_LINE_MAX 192
-static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
+static int brcmf_sdio_readconsole(struct brcmf_sdio *bus)
struct brcmf_console *c = &bus->console;
u8 line[CONSOLE_LINE_MAX], ch;
@@ -2635,8 +2621,8 @@ static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
/* Read console log struct */
addr = bus->console_addr + offsetof(struct rte_console, log_le);
- rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, (u8 *)&c->log_le,
- sizeof(c->log_le));
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&c->log_le,
+ sizeof(c->log_le));
if (rv < 0)
return rv;
@@ -2661,7 +2647,7 @@ static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
/* Read the console buffer */
addr = le32_to_cpu(c->log_le.buf);
- rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, c->buf, c->bufsize);
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, c->buf, c->bufsize);
@@ -2699,14 +2685,13 @@ break2:
-static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
+static int brcmf_sdio_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
- ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, frame, len);
+ ret = brcmf_sdiod_send_buf(bus->sdiodev, frame, len);
/* On failure, abort the command and terminate the frame */
@@ -2714,18 +2699,18 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
if (hi == 0 && lo == 0)
@@ -2739,7 +2724,7 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
-brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
+brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
u8 *frame;
u16 len, pad;
@@ -2783,7 +2768,7 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
hd_info.len = (u16)msglen;
@@ -2827,7 +2812,7 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
- ret = brcmf_tx_frame(bus, frame, len);
+ ret = brcmf_sdio_tx_frame(bus, frame, len);
} while (ret < 0 && retries++ < TXRETRIES);
@@ -2837,7 +2822,7 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
brcmf_dbg(INFO, "idle\n");
- brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
+ brcmf_sdio_clkctl(bus, CLK_NONE, true);
@@ -2871,8 +2856,8 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
* address of sdpcm_shared structure
- rv = brcmf_sdio_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
@@ -2892,8 +2877,8 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
/* Read hndrte_shared structure */
- rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
- sizeof(struct sdpcm_shared_le));
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
+ sizeof(struct sdpcm_shared_le));
@@ -2929,22 +2914,22 @@ static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
/* obtain console information from device memory */
addr = sh->console_addr + offsetof(struct rte_console, log_le);
- rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr,
- (u8 *)&sh_val, sizeof(u32));
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr,
+ (u8 *)&sh_val, sizeof(u32));
console_ptr = le32_to_cpu(sh_val);
addr = sh->console_addr + offsetof(struct rte_console, log_le.buf_size);
console_size = le32_to_cpu(sh_val);
addr = sh->console_addr + offsetof(struct rte_console, log_le.idx);
console_index = le32_to_cpu(sh_val);
@@ -2958,8 +2943,8 @@ static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
/* obtain the console data from device */
conbuf[console_size] = '\0';
- rv = brcmf_sdio_ramrw(bus->sdiodev, false, console_ptr, (u8 *)conbuf,
- console_size);
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, console_ptr, (u8 *)conbuf,
+ console_size);
@@ -2996,8 +2981,8 @@ static int brcmf_sdio_trap_info(struct brcmf_sdio *bus, struct sdpcm_shared *sh,
- error = brcmf_sdio_ramrw(bus->sdiodev, false, sh->trap_addr, (u8 *)&tr,
- sizeof(struct brcmf_trap_info));
+ error = brcmf_sdiod_ramrw(bus->sdiodev, false, sh->trap_addr, (u8 *)&tr,
+ sizeof(struct brcmf_trap_info));
if (error < 0)
return error;
@@ -3040,14 +3025,14 @@ static int brcmf_sdio_assert_info(struct brcmf_sdio *bus,
if (sh->assert_file_addr != 0) {
- error = brcmf_sdio_ramrw(bus->sdiodev, false,
- sh->assert_file_addr, (u8 *)file, 80);
+ error = brcmf_sdiod_ramrw(bus->sdiodev, false,
+ sh->assert_file_addr, (u8 *)file, 80);
if (sh->assert_exp_addr != 0) {
- sh->assert_exp_addr, (u8 *)expr, 80);
+ sh->assert_exp_addr, (u8 *)expr, 80);
@@ -3059,7 +3044,7 @@ static int brcmf_sdio_assert_info(struct brcmf_sdio *bus,
return simple_read_from_buffer(data, count, &pos, buf, res);
-static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
+static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
int error;
struct sdpcm_shared sh;
@@ -3080,8 +3065,8 @@ static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
-static int brcmf_sdbrcm_died_dump(struct brcmf_sdio *bus, char __user *data,
+static int brcmf_sdio_died_dump(struct brcmf_sdio *bus, char __user *data,
int error = 0;
@@ -3122,7 +3107,7 @@ static ssize_t brcmf_sdio_forensic_read(struct file *f, char __user *data,
struct brcmf_sdio *bus = f->private_data;
int res;
- res = brcmf_sdbrcm_died_dump(bus, data, count, ppos);
+ res = brcmf_sdio_died_dump(bus, data, count, ppos);
if (res > 0)
*ppos += res;
return (ssize_t)res;
@@ -3147,7 +3132,7 @@ static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
@@ -3158,7 +3143,7 @@ static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
-brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
+brcmf_sdio_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
int timeleft;
uint rxlen = 0;
@@ -3171,7 +3156,7 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
/* Wait until control frame is available */
- timeleft = brcmf_sdbrcm_dcmd_resp_wait(bus, &bus->rxlen, &pending);
+ timeleft = brcmf_sdio_dcmd_resp_wait(bus, &bus->rxlen, &pending);
rxlen = bus->rxlen;
@@ -3188,13 +3173,13 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
rxlen, msglen);
} else if (timeleft == 0) {
brcmf_err("resumed on timeout\n");
- brcmf_sdbrcm_checkdied(bus);
+ brcmf_sdio_checkdied(bus);
} else if (pending) {
brcmf_dbg(CTL, "cancelled\n");
return -ERESTARTSYS;
brcmf_dbg(CTL, "resumed for unknown reason?\n");
if (rxlen)
@@ -3205,7 +3190,7 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
return rxlen ? (int)rxlen : -ETIMEDOUT;
-static bool brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
+static bool brcmf_sdio_download_state(struct brcmf_sdio *bus, bool enter)
struct chip_info *ci = bus->ci;
@@ -3230,7 +3215,7 @@ static bool brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
-static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
+static int brcmf_sdio_download_code_file(struct brcmf_sdio *bus)
@@ -3238,7 +3223,7 @@ static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
int address;
int len;
- fw = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_BIN);
+ fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_BIN);
if (fw == NULL)
return -ENOENT;
@@ -3252,8 +3237,8 @@ static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
while (offset < fw->size) {
len = ((offset + MEMBLOCK) < fw->size) ? MEMBLOCK :
fw->size - offset;
- err = brcmf_sdio_ramrw(bus->sdiodev, true, address,
- (u8 *)&fw->data[offset], len);
+ err = brcmf_sdiod_ramrw(bus->sdiodev, true, address,
+ (u8 *)&fw->data[offset], len);
brcmf_err("error %d on writing %d membytes at 0x%08x\n",
err, len, address);
@@ -3278,8 +3263,8 @@ failure:
* by two NULs.
-static int brcmf_process_nvram_vars(struct brcmf_sdio *bus,
- const struct firmware *nv)
+static int brcmf_sdio_strip_nvram(struct brcmf_sdio *bus,
+ const struct firmware *nv)
char *varbuf;
char *dp;
@@ -3343,44 +3328,48 @@ err:
-static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
+static int brcmf_sdio_download_nvram(struct brcmf_sdio *bus)
const struct firmware *nv;
- nv = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_NVRAM);
+ nv = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_NVRAM);
if (nv == NULL)
- ret = brcmf_process_nvram_vars(bus, nv);
+ ret = brcmf_sdio_strip_nvram(bus, nv);
release_firmware(nv);
-static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
+static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
- int bcmerror = -1;
+ int bcmerror = -EFAULT;
+ sdio_claim_host(bus->sdiodev->func[1]);
+ brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
/* Keep arm in reset */
- if (!brcmf_sdbrcm_download_state(bus, true)) {
+ if (!brcmf_sdio_download_state(bus, true)) {
brcmf_err("error placing ARM core in reset\n");
goto err;
- if (brcmf_sdbrcm_download_code_file(bus)) {
+ if (brcmf_sdio_download_code_file(bus)) {
brcmf_err("dongle image file download failed\n");
- if (brcmf_sdbrcm_download_nvram(bus)) {
+ if (brcmf_sdio_download_nvram(bus)) {
brcmf_err("dongle nvram file download failed\n");
/* Take arm out of reset */
- if (!brcmf_sdbrcm_download_state(bus, false)) {
+ if (!brcmf_sdio_download_state(bus, false)) {
brcmf_err("error getting out of ARM core reset\n");
@@ -3388,10 +3377,12 @@ static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
bcmerror = 0;
err:
+ sdio_release_host(bus->sdiodev->func[1]);
-static bool brcmf_sdbrcm_sr_capable(struct brcmf_sdio *bus)
+static bool brcmf_sdio_sr_capable(struct brcmf_sdio *bus)
u32 addr, reg;
@@ -3403,47 +3394,45 @@ static bool brcmf_sdbrcm_sr_capable(struct brcmf_sdio *bus)
/* read PMU chipcontrol register 3*/
addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_addr);
- brcmf_sdio_regwl(bus->sdiodev, addr, 3, NULL);
+ brcmf_sdiod_regwl(bus->sdiodev, addr, 3, NULL);
addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_data);
- reg = brcmf_sdio_regrl(bus->sdiodev, addr, NULL);
+ reg = brcmf_sdiod_regrl(bus->sdiodev, addr, NULL);
return (bool)reg;
-static void brcmf_sdbrcm_sr_init(struct brcmf_sdio *bus)
+static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
u8 val;
- val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL,
+ val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, &err);
brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n");
val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL,
- val, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, val, &err);
brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n");
/* Add CMD14 Support */
- brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
- (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
- SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
+ brcmf_sdiod_regwb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
+ (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
+ SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n");
- SBSDIO_FORCE_HT, &err);
+ SBSDIO_FORCE_HT, &err);
brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n");
@@ -3455,7 +3444,7 @@ static void brcmf_sdbrcm_sr_init(struct brcmf_sdio *bus)
/* enable KSO bit */
-static int brcmf_sdbrcm_kso_init(struct brcmf_sdio *bus)
+static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
@@ -3466,8 +3455,7 @@ static int brcmf_sdbrcm_kso_init(struct brcmf_sdio *bus)
if (bus->ci->c_inf[1].rev < 12)
- val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+ val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
brcmf_err("error reading SBSDIO_FUNC1_SLEEPCSR\n");
@@ -3476,8 +3464,8 @@ static int brcmf_sdbrcm_kso_init(struct brcmf_sdio *bus)
if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN <<
SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
+ val, &err);
brcmf_err("error writing SBSDIO_FUNC1_SLEEPCSR\n");
@@ -3488,25 +3476,7 @@ static int brcmf_sdbrcm_kso_init(struct brcmf_sdio *bus)
-static bool
-brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
- bool ret;
- sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
- ret = _brcmf_sdbrcm_download_firmware(bus) == 0;
- sdio_release_host(bus->sdiodev->func[1]);
-static int brcmf_sdbrcm_bus_preinit(struct device *dev)
+static int brcmf_sdio_bus_preinit(struct device *dev)
@@ -3565,13 +3535,11 @@ done:
-static int brcmf_sdbrcm_bus_init(struct device *dev)
+static int brcmf_sdio_bus_init(struct device *dev)
struct brcmf_sdio *bus = sdiodev->bus;
- unsigned long timeout;
- u8 ready, enable;
int err, ret = 0;
@@ -3579,8 +3547,9 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
/* try to download image and nvram to the dongle */
if (bus_if->state == BRCMF_BUS_DOWN) {
- if (!(brcmf_sdbrcm_download_firmware(bus)))
- return -1;
+ err = brcmf_sdio_download_firmware(bus);
+ if (err)
if (!bus->sdiodev->bus_if->drvr)
@@ -3588,21 +3557,21 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
/* Start the watchdog timer */
bus->sdcnt.tickcnt = 0;
/* Make sure backplane clock is on, needed to generate F2 interrupt */
if (bus->clkstate != CLK_AVAIL)
goto exit;
@@ -3612,56 +3581,42 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
/* Enable function 2 (frame transfers) */
w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
offsetof(struct sdpcmd_regs, tosbmailboxdata));
- enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
- brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, enable, NULL);
+ err = sdio_enable_func(bus->sdiodev->func[SDIO_FUNC_2]);
- timeout = jiffies + msecs_to_jiffies(BRCMF_WAIT_F2RDY);
- ready = 0;
- while (enable != ready) {
- ready = brcmf_sdio_regrb(bus->sdiodev,
- SDIO_CCCR_IORx, NULL);
- if (time_after(jiffies, timeout))
- else if (time_after(jiffies, timeout - BRCMF_WAIT_F2RDY + 50))
- /* prevent busy waiting if it takes too long */
- msleep_interruptible(20);
- brcmf_dbg(INFO, "enable 0x%02x, ready 0x%02x\n", enable, ready);
+ brcmf_dbg(INFO, "enable F2: err=%d\n", err);
/* If F2 successfully enabled, set core and enable interrupts */
- if (ready == enable) {
+ if (!err) {
/* Set up the interrupt mask and enable interrupts */
bus->hostintmask = HOSTINTMASK;
w_sdreg32(bus, bus->hostintmask,
offsetof(struct sdpcmd_regs, hostintmask));
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_WATERMARK, 8, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_WATERMARK, 8, &err);
/* Disable F2 again */
- enable = SDIO_FUNC_ENABLE_1;
- if (brcmf_sdbrcm_sr_capable(bus)) {
- brcmf_sdbrcm_sr_init(bus);
+ if (brcmf_sdio_sr_capable(bus)) {
+ brcmf_sdio_sr_init(bus);
/* Restore previous clock setting */
- saveclk, &err);
+ saveclk, &err);
if (ret == 0) {
- ret = brcmf_sdio_intr_register(bus->sdiodev);
+ ret = brcmf_sdiod_intr_register(bus->sdiodev);
brcmf_err("intr register failed:%d\n", ret);
/* If we didn't come up, turn off backplane clock */
if (bus_if->state != BRCMF_BUS_DATA)
- brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
+ brcmf_sdio_clkctl(bus, CLK_NONE, false);
exit:
@@ -3669,10 +3624,8 @@ exit:
-void brcmf_sdbrcm_isr(void *arg)
+void brcmf_sdio_isr(struct brcmf_sdio *bus)
- struct brcmf_sdio *bus = (struct brcmf_sdio *) arg;
if (!bus) {
@@ -3702,7 +3655,7 @@ void brcmf_sdbrcm_isr(void *arg)
queue_work(bus->brcmf_wq, &bus->datawork);
-static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
+static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
@@ -3726,9 +3679,9 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
u8 devpend;
- devpend = brcmf_sdio_regrb(bus->sdiodev,
- SDIO_CCCR_INTx,
+ devpend = brcmf_sdiod_regrb(bus->sdiodev,
+ SDIO_CCCR_INTx,
intstatus =
devpend & (INTR_STATUS_FUNC1 |
@@ -3758,8 +3711,8 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
bus->console.count -= bus->console_interval;
- if (brcmf_sdbrcm_readconsole(bus) < 0)
+ if (brcmf_sdio_readconsole(bus) < 0)
/* stop on error */
bus->console_interval = 0;
@@ -3773,11 +3726,11 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
if (bus->activity) {
brcmf_dbg(SDIO, "idle\n");
@@ -3792,12 +3745,12 @@ static void brcmf_sdio_dataworker(struct work_struct *work)
datawork);
while (atomic_read(&bus->dpc_tskcnt)) {
- brcmf_sdbrcm_dpc(bus);
+ brcmf_sdio_dpc(bus);
atomic_dec(&bus->dpc_tskcnt);
-static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
+static void brcmf_sdio_release_malloc(struct brcmf_sdio *bus)
@@ -3806,7 +3759,7 @@ static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
-static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
+static bool brcmf_sdio_probe_malloc(struct brcmf_sdio *bus)
@@ -3823,7 +3776,7 @@ static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
static bool
-brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
+brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
u8 clkctl = 0;
@@ -3836,18 +3789,18 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
pr_debug("F1 signature read @0x18000000=0x%4x\n",
- brcmf_sdio_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
+ brcmf_sdiod_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
* Force PLL off until brcmf_sdio_chip_attach()
* programs PLL control regs
- BRCMF_INIT_CLKCTL1, &err);
+ BRCMF_INIT_CLKCTL1, &err);
if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
@@ -3855,12 +3808,12 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
- if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci, regsva)) {
+ if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci)) {
brcmf_err("brcmf_sdio_chip_attach failed!\n");
- if (brcmf_sdbrcm_kso_init(bus)) {
+ if (brcmf_sdio_kso_init(bus)) {
brcmf_err("error enabling KSO\n");
@@ -3879,33 +3832,33 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
/* Set card control so an SDIO card reset does a WLAN backplane reset */
- reg_val = brcmf_sdio_regrb(bus->sdiodev,
- SDIO_CCCR_BRCM_CARDCTRL, &err);
+ reg_val = brcmf_sdiod_regrb(bus->sdiodev,
+ SDIO_CCCR_BRCM_CARDCTRL, &err);
reg_val |= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET;
- brcmf_sdio_regwb(bus->sdiodev,
- SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
+ brcmf_sdiod_regwb(bus->sdiodev,
+ SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
/* set PMUControl so a backplane reset does PMU state reload */
reg_addr = CORE_CC_REG(bus->ci->c_inf[0].base,
pmucontrol);
- reg_val = brcmf_sdio_regrl(bus->sdiodev,
- reg_addr,
+ reg_val = brcmf_sdiod_regrl(bus->sdiodev,
+ reg_addr,
reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
- reg_val,
+ reg_val,
@@ -3935,21 +3888,20 @@ fail:
return false;
-static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
+static bool brcmf_sdio_probe_init(struct brcmf_sdio *bus)
/* Disable F2 to clear any intermediate frame state on the dongle */
- brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx,
- SDIO_FUNC_ENABLE_1, NULL);
bus->rxflow = false;
/* Done with backplane-dependent accesses, can drop clock... */
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
@@ -3970,7 +3922,7 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
-brcmf_sdbrcm_watchdog_thread(void *data)
+brcmf_sdio_watchdog_thread(void *data)
struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
@@ -3980,7 +3932,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
if (kthread_should_stop())
if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
- brcmf_sdbrcm_bus_watchdog(bus);
+ brcmf_sdio_bus_watchdog(bus);
/* Count the tick for reference */
bus->sdcnt.tickcnt++;
@@ -3990,7 +3942,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
-brcmf_sdbrcm_watchdog(unsigned long data)
+brcmf_sdio_watchdog(unsigned long data)
@@ -4003,14 +3955,14 @@ brcmf_sdbrcm_watchdog(unsigned long data)
-static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus)
+static void brcmf_sdio_release_dongle(struct brcmf_sdio *bus)
if (bus->ci) {
brcmf_sdio_chip_detach(&bus->ci);
if (bus->vars && bus->varsz)
@@ -4021,53 +3973,23 @@ static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus)
brcmf_dbg(TRACE, "Disconnected\n");
-/* Detach and free everything */
-static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
- brcmf_dbg(TRACE, "Enter\n");
- if (bus) {
- /* De-register interrupt handler */
- brcmf_sdio_intr_unregister(bus->sdiodev);
- cancel_work_sync(&bus->datawork);
- if (bus->brcmf_wq)
- destroy_workqueue(bus->brcmf_wq);
- if (bus->sdiodev->bus_if->drvr) {
- brcmf_detach(bus->sdiodev->dev);
- brcmf_sdbrcm_release_dongle(bus);
- brcmu_pkt_buf_free_skb(bus->txglom_sgpad);
- brcmf_sdbrcm_release_malloc(bus);
- kfree(bus->hdrbuf);
- kfree(bus);
- brcmf_dbg(TRACE, "Disconnected\n");
static struct brcmf_bus_ops brcmf_sdio_bus_ops = {
- .stop = brcmf_sdbrcm_bus_stop,
- .preinit = brcmf_sdbrcm_bus_preinit,
- .init = brcmf_sdbrcm_bus_init,
- .txdata = brcmf_sdbrcm_bus_txdata,
- .txctl = brcmf_sdbrcm_bus_txctl,
- .rxctl = brcmf_sdbrcm_bus_rxctl,
- .gettxq = brcmf_sdbrcm_bus_gettxq,
+ .stop = brcmf_sdio_bus_stop,
+ .preinit = brcmf_sdio_bus_preinit,
+ .init = brcmf_sdio_bus_init,
+ .txdata = brcmf_sdio_bus_txdata,
+ .txctl = brcmf_sdio_bus_txctl,
+ .rxctl = brcmf_sdio_bus_rxctl,
+ .gettxq = brcmf_sdio_bus_gettxq,
-void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
+struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
struct brcmf_sdio *bus;
- /* We make an assumption about address window mappings:
- * regsva == SI_ENUM_BASE*/
/* Allocate private bus interface state */
bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC);
if (!bus)
@@ -4101,8 +4023,8 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
/* attempt to attach to the dongle */
- if (!(brcmf_sdbrcm_probe_attach(bus, regsva))) {
- brcmf_err("brcmf_sdbrcm_probe_attach failed\n");
+ if (!(brcmf_sdio_probe_attach(bus))) {
+ brcmf_err("brcmf_sdio_probe_attach failed\n");
@@ -4114,11 +4036,11 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
/* Set up the watchdog timer */
init_timer(&bus->timer);
bus->timer.data = (unsigned long)bus;
- bus->timer.function = brcmf_sdbrcm_watchdog;
+ bus->timer.function = brcmf_sdio_watchdog;
/* Initialize watchdog thread */
init_completion(&bus->watchdog_wait);
- bus->watchdog_tsk = kthread_run(brcmf_sdbrcm_watchdog_thread,
+ bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread,
bus, "brcmf_watchdog");
if (IS_ERR(bus->watchdog_tsk)) {
pr_warn("brcmf_watchdog thread failed to start\n");
@@ -4144,13 +4066,13 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
/* Allocate buffers */
- if (!(brcmf_sdbrcm_probe_malloc(bus))) {
- brcmf_err("brcmf_sdbrcm_probe_malloc failed\n");
+ if (!(brcmf_sdio_probe_malloc(bus))) {
+ brcmf_err("brcmf_sdio_probe_malloc failed\n");
- if (!(brcmf_sdbrcm_probe_init(bus))) {
- brcmf_err("brcmf_sdbrcm_probe_init failed\n");
+ if (!(brcmf_sdio_probe_init(bus))) {
+ brcmf_err("brcmf_sdio_probe_init failed\n");
@@ -4167,24 +4089,38 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
return bus;
- brcmf_sdbrcm_release(bus);
+ brcmf_sdio_remove(bus);
-void brcmf_sdbrcm_disconnect(void *ptr)
+/* Detach and free everything */
+void brcmf_sdio_remove(struct brcmf_sdio *bus)
- struct brcmf_sdio *bus = (struct brcmf_sdio *)ptr;
- if (bus)
+ if (bus) {
+ /* De-register interrupt handler */
+ brcmf_sdiod_intr_unregister(bus->sdiodev);
+ cancel_work_sync(&bus->datawork);
+ if (bus->brcmf_wq)
+ destroy_workqueue(bus->brcmf_wq);
+ if (bus->sdiodev->bus_if->drvr) {
+ brcmf_detach(bus->sdiodev->dev);
+ brcmf_sdio_release_dongle(bus);
+ brcmu_pkt_buf_free_skb(bus->txglom_sgpad);
+ brcmf_sdio_release_malloc(bus);
+ kfree(bus->hdrbuf);
+ kfree(bus);
-brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick)
+void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick)
/* Totally stop the timer */
if (!wdtick && bus->wd_timer_valid) {
@@ -68,7 +68,7 @@ brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
brcmf_dbg(FIL, "cmd=%d, len=%d\n", cmd, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
- min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+ min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
err = brcmf_fil_cmd_data(ifp, cmd, data, len, true);
mutex_unlock(&ifp->drvr->proto_block);
@@ -86,7 +86,7 @@ brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
@@ -155,7 +155,7 @@ brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
brcmf_dbg(FIL, "name=%s, len=%d\n", name, len);
buflen = brcmf_create_iovar(name, data, len, drvr->proto_buf,
sizeof(drvr->proto_buf));
@@ -195,7 +195,7 @@ brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
mutex_unlock(&drvr->proto_block);
@@ -278,7 +278,7 @@ brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name,
brcmf_dbg(FIL, "bssidx=%d, name=%s, len=%d\n", ifp->bssidx, name, len);
buflen = brcmf_create_bsscfg(ifp->bssidx, name, data, len,
drvr->proto_buf, sizeof(drvr->proto_buf));
@@ -317,7 +317,7 @@ brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name,
@@ -838,7 +838,7 @@ static void brcmf_fws_cleanup(struct brcmf_fws_info *fws, int ifidx)
brcmf_fws_hanger_cleanup(fws, matchfn, ifidx);
-static int brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
+static u8 brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
u8 *wlh;
@@ -887,9 +887,7 @@ static int brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
if (fillers)
memset(wlh, BRCMF_FWS_TYPE_FILLER, fillers);
- brcmf_proto_hdrpush(fws->drvr, brcmf_skb_if_flags_get_field(skb, INDEX),
- data_offset >> 2, skb);
+ return (u8)(data_offset >> 2);
static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws,
@@ -897,10 +895,11 @@ static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws,
int fifo, bool send_immediately)
- struct brcmf_bus *bus;
struct brcmf_skbuff_cb *skcb;
s32 err;
u32 len;
+ u8 data_offset;
+ int ifidx;
/* check delayedQ and suppressQ in one call using bitmap */
if (brcmu_pktq_mlen(&entry->psq, 3 << (fifo * 2)) == 0)
@@ -928,13 +927,11 @@ static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws,
skcb->state = BRCMF_FWS_SKBSTATE_TIM;
skcb->htod = 0;
skcb->htod_seq = 0;
- bus = fws->drvr->bus_if;
- err = brcmf_fws_hdrpush(fws, skb);
- if (err == 0) {
- brcmf_fws_unlock(fws);
- err = brcmf_bus_txdata(bus, skb);
- brcmf_fws_lock(fws);
+ data_offset = brcmf_fws_hdrpush(fws, skb);
+ ifidx = brcmf_skb_if_flags_get_field(skb, INDEX);
+ brcmf_fws_unlock(fws);
+ err = brcmf_proto_txdata(fws->drvr, ifidx, data_offset, skb);
+ brcmf_fws_lock(fws);
brcmu_pkt_buf_free_skb(skb);
@@ -1393,7 +1390,7 @@ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
entry->generation = genbit;
ret = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
- if (ret == 0)
+ if (ret == 0) {
brcmf_skb_htod_tag_set_field(skb, GENERATION, genbit);
brcmf_skbcb(skb)->htod_seq = seq;
if (brcmf_skb_htod_seq_get_field(skb, FROMFW)) {
@@ -1404,6 +1401,8 @@ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo,
skb);
if (ret != 0) {
/* suppress q is full or hdrpull failed, drop this packet */
brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
@@ -1717,7 +1716,7 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
-static void brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
+static u8 brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
struct sk_buff *p)
struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p);
@@ -1735,7 +1734,7 @@ static void brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
flags |= BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED;
brcmf_skb_htod_tag_set_field(p, FLAGS, flags);
- brcmf_fws_hdrpush(fws, p);
+ return brcmf_fws_hdrpush(fws, p);
static void brcmf_fws_rollback_toq(struct brcmf_fws_info *fws,
@@ -1803,20 +1802,21 @@ static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo,
struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb);
struct brcmf_fws_mac_descriptor *entry;
- struct brcmf_bus *bus = fws->drvr->bus_if;
int rc;
u8 ifidx;
entry = skcb->mac;
if (IS_ERR(entry))
return PTR_ERR(entry);
- brcmf_fws_precommit_skb(fws, fifo, skb);
+ data_offset = brcmf_fws_precommit_skb(fws, fifo, skb);
entry->transit_count++;
if (entry->suppressed)
entry->suppr_transit_count++;
brcmf_fws_unlock(fws);
- rc = brcmf_bus_txdata(bus, skb);
+ rc = brcmf_proto_txdata(fws->drvr, ifidx, data_offset, skb);
brcmf_fws_lock(fws);
brcmf_dbg(DATA, "%s flags %X htod %X bus_tx %d\n", entry->name,
skcb->if_flags, skcb->htod, rc);
@@ -1977,10 +1977,9 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
&skb, true);
ifidx = brcmf_skb_if_flags_get_field(skb,
INDEX);
- brcmf_proto_hdrpush(drvr, ifidx, 0, skb);
- /* Use bus module to send data frame */
+ /* Use proto layer to send data frame */
- ret = brcmf_bus_txdata(drvr->bus_if, skb);
+ ret = brcmf_proto_txdata(drvr, ifidx, 0, skb);
brcmf_txfinalize(drvr, skb, false);
@@ -39,7 +39,7 @@ int brcmf_proto_attach(struct brcmf_pub *drvr)
if (brcmf_proto_bcdc_attach(drvr))
- if ((proto->hdrpush == NULL) || (proto->hdrpull == NULL) ||
+ if ((proto->txdata == NULL) || (proto->hdrpull == NULL) ||
(proto->query_dcmd == NULL) || (proto->set_dcmd == NULL)) {
brcmf_err("Not all proto handlers have been installed\n");
@@ -17,14 +17,14 @@
#define BRCMFMAC_PROTO_H
struct brcmf_proto {
- void (*hdrpush)(struct brcmf_pub *drvr, int ifidx, u8 offset,
- struct sk_buff *skb);
int (*hdrpull)(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
struct sk_buff *skb);
int (*query_dcmd)(struct brcmf_pub *drvr, int ifidx, uint cmd,
void *buf, uint len);
int (*set_dcmd)(struct brcmf_pub *drvr, int ifidx, uint cmd, void *buf,
uint len);
+ int (*txdata)(struct brcmf_pub *drvr, int ifidx, u8 offset,
+ struct sk_buff *skb);
void *pd;
@@ -32,11 +32,6 @@ struct brcmf_proto {
int brcmf_proto_attach(struct brcmf_pub *drvr);
void brcmf_proto_detach(struct brcmf_pub *drvr);
-static inline void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx,
- u8 offset, struct sk_buff *skb)
- drvr->proto->hdrpush(drvr, ifidx, offset, skb);
static inline int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws,
u8 *ifidx, struct sk_buff *skb)
@@ -52,6 +47,11 @@ static inline int brcmf_proto_set_dcmd(struct brcmf_pub *drvr, int ifidx,
return drvr->proto->set_dcmd(drvr, ifidx, cmd, buf, len);
+static inline int brcmf_proto_txdata(struct brcmf_pub *drvr, int ifidx,
+ u8 offset, struct sk_buff *skb)
+ return drvr->proto->txdata(drvr, ifidx, offset, skb);
#endif /* BRCMFMAC_PROTO_H */
@@ -112,9 +112,9 @@ brcmf_sdio_sb_corerev(struct brcmf_sdio_dev *sdiodev,
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- regdata = brcmf_sdio_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbidhigh),
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbidhigh),
return SBCOREREV(regdata);
@@ -140,9 +140,9 @@ brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
if (idx == BRCMF_MAX_CORENUM)
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
return (SSB_TMSLOW_CLOCK == regdata);
@@ -160,13 +160,13 @@ brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
- regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+ regdata = brcmf_sdiod_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
- ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+ ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
@@ -182,79 +182,79 @@ brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
base = ci->c_inf[idx].base;
- regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
if (regdata & SSB_TMSLOW_RESET)
if ((regdata & SSB_TMSLOW_CLOCK) != 0) {
* set target reject and spin until busy is clear
* (preserve core-specific bits)
- regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow),
- brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
- regdata | SSB_TMSLOW_REJECT, NULL);
+ CORE_SB(base, sbtmstatelow), NULL);
+ brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
+ regdata | SSB_TMSLOW_REJECT, NULL);
udelay(1);
- SPINWAIT((brcmf_sdio_regrl(sdiodev,
- CORE_SB(base, sbtmstatehigh),
- NULL) &
- SSB_TMSHIGH_BUSY), 100000);
+ SPINWAIT((brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(base, sbtmstatehigh),
+ NULL) &
+ SSB_TMSHIGH_BUSY), 100000);
if (regdata & SSB_TMSHIGH_BUSY)
brcmf_err("core state still busy\n");
- regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbidlow),
+ regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbidlow),
if (regdata & SSB_IDLOW_INITIATOR) {
- CORE_SB(base, sbimstate),
+ CORE_SB(base, sbimstate),
regdata |= SSB_IMSTATE_REJECT;
- brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbimstate),
- regdata, NULL);
+ brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbimstate),
+ regdata, NULL);
- SSB_IMSTATE_BUSY), 100000);
+ SSB_IMSTATE_BUSY), 100000);
/* set reset and reject while enabling the clocks */
regdata = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET;
udelay(10);
/* clear the initiator reject bit */
regdata &= ~SSB_IMSTATE_REJECT;
/* leave reset and reject asserted */
- (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET), NULL);
+ (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET), NULL);
@@ -270,9 +270,9 @@ brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
/* if core is already in reset, just return */
if ((regdata & BCMA_RESET_CTL_RESET) != 0)
@@ -281,24 +281,24 @@ brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
* extra 10ms is taken into account for firmware load stage
* after 10300us carry on disabling the core anyway
- SPINWAIT(brcmf_sdio_regrl(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_RESET_ST,
- NULL), 10300);
+ SPINWAIT(brcmf_sdiod_regrl(sdiodev,
ci->c_inf[idx].wrapbase+BCMA_RESET_ST,
+ NULL), 10300);
+ ci->c_inf[idx].wrapbase+BCMA_RESET_ST,
if (regdata)
brcmf_err("disabling core 0x%x with reset status %x\n",
coreid, regdata);
- brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
- BCMA_RESET_CTL_RESET, NULL);
+ brcmf_sdiod_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+ BCMA_RESET_CTL_RESET, NULL);
- brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
- core_bits, NULL);
+ brcmf_sdiod_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+ core_bits, NULL);
usleep_range(10, 20);
@@ -325,47 +325,47 @@ brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
* set reset while enabling the clock and
* forcing them on throughout the core
- brcmf_sdio_regwl(sdiodev,
- SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET,
+ brcmf_sdiod_regwl(sdiodev,
+ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET,
/* clear any serror */
- CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
+ CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
if (regdata & SSB_TMSHIGH_SERR)
- 0, NULL);
+ 0, NULL);
- CORE_SB(ci->c_inf[idx].base, sbimstate),
+ CORE_SB(ci->c_inf[idx].base, sbimstate),
if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO))
- regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO),
+ regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO),
/* clear reset and allow it to propagate throughout the core */
- brcmf_sdio_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK, NULL);
+ brcmf_sdiod_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK, NULL);
/* leave clock enabled */
- SSB_TMSLOW_CLOCK, NULL);
+ SSB_TMSLOW_CLOCK, NULL);
@@ -384,21 +384,21 @@ brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, core_bits);
/* now do initialization sequence */
- core_bits | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
+ core_bits | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
- core_bits | BCMA_IOCTL_CLK, NULL);
+ core_bits | BCMA_IOCTL_CLK, NULL);
@@ -438,7 +438,7 @@ static inline int brcmf_sdio_chip_cichk(struct chip_info *ci)
static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u32 regs)
+ struct chip_info *ci)
u32 regdata;
@@ -449,10 +449,10 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
* other ways of recognition should be added here.
ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
- ci->c_inf[0].base = regs;
- CORE_CC_REG(ci->c_inf[0].base, chipid),
+ ci->c_inf[0].base = SI_ENUM_BASE;
+ CORE_CC_REG(ci->c_inf[0].base, chipid),
ci->chip = regdata & CID_ID_MASK;
ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
@@ -607,7 +607,7 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
/* Try forcing SDIO core to do ALPAvail request only */
clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
- brcmf_sdio_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
brcmf_err("error writing for HT off\n");
@@ -615,8 +615,8 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
/* If register supported, wait for ALPAvail and then force ALP */
/* This may take up to 15 milliseconds */
- clkval = brcmf_sdio_regrb(sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+ clkval = brcmf_sdiod_regrb(sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL);
if ((clkval & ~SBSDIO_AVBITS) != clkset) {
brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
@@ -624,8 +624,8 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
return -EACCES;
- SPINWAIT(((clkval = brcmf_sdio_regrb(sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+ SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
!SBSDIO_ALPAV(clkval)),
PMU_MAX_TRANSITION_DLY);
if (!SBSDIO_ALPAV(clkval)) {
@@ -635,11 +635,11 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
udelay(65);
/* Also, disable the extra SDIO pull-ups */
- brcmf_sdio_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
@@ -654,16 +654,16 @@ brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
ci->c_inf[0].rev = ci->corerev(sdiodev, ci, ci->c_inf[0].id);
/* get chipcommon capabilites */
- ci->c_inf[0].caps = brcmf_sdio_regrl(sdiodev,
- CORE_CC_REG(base, capabilities),
+ ci->c_inf[0].caps = brcmf_sdiod_regrl(sdiodev,
+ CORE_CC_REG(base, capabilities),
/* get pmu caps & rev */
if (ci->c_inf[0].caps & CC_CAP_PMU) {
ci->pmucaps =
- brcmf_sdio_regrl(sdiodev,
- CORE_CC_REG(base, pmucapabilities),
+ brcmf_sdiod_regrl(sdiodev,
+ CORE_CC_REG(base, pmucapabilities),
ci->pmurev = ci->pmucaps & PCAP_REV_MASK;
@@ -681,7 +681,7 @@ brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
- struct chip_info **ci_ptr, u32 regs)
+ struct chip_info **ci_ptr)
struct chip_info *ci;
@@ -697,16 +697,16 @@ int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
- ret = brcmf_sdio_chip_recognition(sdiodev, ci, regs);
+ ret = brcmf_sdio_chip_recognition(sdiodev, ci);
brcmf_sdio_chip_buscoresetup(sdiodev, ci);
- brcmf_sdio_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopullup),
- brcmf_sdio_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopulldown),
+ brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopullup),
+ brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopulldown),
*ci_ptr = ci;
@@ -784,12 +784,12 @@ brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
addr = CORE_CC_REG(base, chipcontrol_addr);
- brcmf_sdio_regwl(sdiodev, addr, 1, NULL);
- cc_data_temp = brcmf_sdio_regrl(sdiodev, addr, NULL);
+ brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
+ cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
cc_data_temp &= ~str_mask;
drivestrength_sel <<= str_shift;
cc_data_temp |= drivestrength_sel;
- brcmf_sdio_regwl(sdiodev, addr, cc_data_temp, NULL);
+ brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
str_tab[i].strength, drivestrength, cc_data_temp);
@@ -816,8 +816,8 @@ brcmf_sdio_chip_verifynvram(struct brcmf_sdio_dev *sdiodev, u32 nvram_addr,
memset(nvram_ularray, 0xaa, nvram_sz);
/* Read the vars list to temp buffer for comparison */
- err = brcmf_sdio_ramrw(sdiodev, false, nvram_addr, nvram_ularray,
- nvram_sz);
+ err = brcmf_sdiod_ramrw(sdiodev, false, nvram_addr, nvram_ularray,
+ nvram_sz);
brcmf_err("error %d on reading %d nvram bytes at 0x%08x\n",
err, nvram_sz, nvram_addr);
@@ -850,7 +850,7 @@ static bool brcmf_sdio_chip_writenvram(struct brcmf_sdio_dev *sdiodev,
nvram_addr = (ci->ramsize - 4) - nvram_sz + ci->rambase;
/* Write the vars list */
- err = brcmf_sdio_ramrw(sdiodev, true, nvram_addr, nvram_dat, nvram_sz);
+ err = brcmf_sdiod_ramrw(sdiodev, true, nvram_addr, nvram_dat, nvram_sz);
brcmf_err("error %d on writing %d nvram bytes at 0x%08x\n",
@@ -874,8 +874,8 @@ static bool brcmf_sdio_chip_writenvram(struct brcmf_sdio_dev *sdiodev,
nvram_addr, nvram_sz, token);
/* Write the length token to the last word */
- if (brcmf_sdio_ramrw(sdiodev, true, (ci->ramsize - 4 + ci->rambase),
- (u8 *)&token_le, 4))
+ if (brcmf_sdiod_ramrw(sdiodev, true, (ci->ramsize - 4 + ci->rambase),
+ (u8 *)&token_le, 4))
@@ -891,7 +891,7 @@ brcmf_sdio_chip_cm3_enterdl(struct brcmf_sdio_dev *sdiodev,
ci->resetcore(sdiodev, ci, BCMA_CORE_INTERNAL_MEM, 0);
/* clear length token */
- brcmf_sdio_ramrw(sdiodev, true, ci->ramsize - 4, (u8 *)&zeros, 4);
+ brcmf_sdiod_ramrw(sdiodev, true, ci->ramsize - 4, (u8 *)&zeros, 4);
@@ -913,7 +913,7 @@ brcmf_sdio_chip_cm3_exitdl(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
reg_addr = ci->c_inf[core_idx].base;
reg_addr += offsetof(struct sdpcmd_regs, intstatus);
- brcmf_sdio_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
+ brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CM3, 0);
@@ -942,11 +942,11 @@ brcmf_sdio_chip_cr4_exitdl(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
/* Write reset vector to address 0 */
- brcmf_sdio_ramrw(sdiodev, true, 0, (void *)&ci->rst_vec,
- sizeof(ci->rst_vec));
+ brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&ci->rst_vec,
+ sizeof(ci->rst_vec));
/* restore ARM */
ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, 0);
@@ -224,7 +224,7 @@ struct sdpcmd_regs {
- struct chip_info **ci_ptr, u32 regs);
+ struct chip_info **ci_ptr);
void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);
void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
struct chip_info *ci, u32 drivestrength);
@@ -164,9 +164,8 @@ struct brcmf_sdio;
struct brcmf_sdio_dev {
struct sdio_func *func[SDIO_MAX_FUNCS];
u8 num_funcs; /* Supported funcs on client */
- u32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
u32 sbwad; /* Save backplane window address */
- void *bus;
+ struct brcmf_sdio *bus;
atomic_t suspend; /* suspend flag */
wait_queue_head_t request_byte_wait;
wait_queue_head_t request_word_wait;
@@ -185,22 +184,19 @@ struct brcmf_sdio_dev {
/* Register/deregister interrupt handler. */
-int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev);
-int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev);
/* sdio device register access interface */
-u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, u8 data,
- int *ret);
-void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
-int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
- void *data, bool write);
+u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, u8 data,
+ int *ret);
+void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
/* Buffer transfer to/from device (client) core via cmd53.
* fn: function number
- * addr: backplane address (i.e. >= regsva from attach)
* flags: backplane width, address increment, sync/async
* buf: pointer to memory data buffer
* nbytes: number of bytes to transfer to/from buf
@@ -210,17 +206,14 @@ int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
* Returns 0 or error code.
* NOTE: Async operation is not currently supported.
-int brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff_head *pktq);
-int brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes);
-int brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff *pkt);
-int brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff_head *pktq, uint totlen);
+ struct sk_buff_head *pktq);
+int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes);
+int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt);
+int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes);
+ struct sk_buff_head *pktq, uint totlen);
/* Flags bits */
@@ -236,43 +229,16 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
- u8 *buf, uint nbytes);
-int brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
- u8 *data, uint size);
+int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
+ u8 *data, uint size);
/* Issue an abort to the specified function */
-int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
-/* platform specific/high level functions */
-int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
-int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev);
-/* attach, return handler on success, NULL if failed.
- * The handler shall be provided by all subsequent calls. No local cache
- * cfghdl points to the starting address of pci device mapped memory
-int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev);
-void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev);
-/* read or write one byte using cmd52 */
-int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint fnc,
- uint addr, u8 *byte);
-/* read or write 2/4 bytes using cmd53 */
-int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev, uint rw, uint fnc,
- uint addr, u32 *word, uint nbyte);
-/* Watchdog timer interface for pm ops */
-void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable);
+int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
-void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
-void brcmf_sdbrcm_disconnect(void *ptr);
-void brcmf_sdbrcm_isr(void *arg);
+struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdio_remove(struct brcmf_sdio *bus);
+void brcmf_sdio_isr(struct brcmf_sdio *bus);
-void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
+void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick);
-void brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
- wait_queue_head_t *wq);
-bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev);
#endif /* _BRCM_SDH_H_ */
@@ -1253,6 +1253,7 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
bus->ops = &brcmf_usb_bus_ops;
bus->chip = bus_pub->devid;
bus->chiprev = bus_pub->chiprev;
+ bus->proto_type = BRCMF_PROTO_BCDC;
/* Attach to the common driver interface */
ret = brcmf_attach(dev);
@@ -48,7 +48,7 @@ il3945_stats_flag(struct il_priv *il, char *buf, int bufsz)
return p;
-ssize_t
+static ssize_t
il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
@@ -313,7 +313,7 @@ il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
@@ -403,7 +403,7 @@ il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
il3945_ucode_general_stats_read(struct file *file, char __user *user_buf,
@@ -55,7 +55,7 @@ il4965_stats_flag(struct il_priv *il, char *buf, int bufsz)
il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
@@ -467,7 +467,7 @@ il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
@@ -633,7 +633,7 @@ il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
il4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
@@ -31,7 +31,7 @@
+static void
il_clear_traffic_stats(struct il_priv *il)
memset(&il->tx_stats, 0, sizeof(struct traffic_stats));
@@ -322,12 +322,6 @@ static void iwlagn_mac_stop(struct ieee80211_hw *hw)
flush_workqueue(priv->workqueue);
- /* User space software may expect getting rfkill changes
- * even if interface is down, trans->down will leave the RF
- * kill interrupt enabled
- iwl_trans_stop_hw(priv->trans, false);
IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -1313,7 +1313,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
/* Reset chip to save power until we load uCode during "up". */
+ iwl_trans_stop_device(priv->trans);
priv->nvm_data = iwl_parse_eeprom_data(priv->trans->dev, priv->cfg,
priv->eeprom_blob,
@@ -1458,7 +1458,7 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
dev_kfree_skb(priv->beacon_skb);
- iwl_trans_stop_hw(priv->trans, true);
+ iwl_trans_op_mode_leave(priv->trans);
ieee80211_free_hw(priv->hw);
@@ -389,13 +389,6 @@ struct iwl_lq_sta {
u8 last_bt_traffic;
-static inline u8 num_of_ant(u8 mask)
- return !!((mask) & ANT_A) +
- !!((mask) & ANT_B) +
- !!((mask) & ANT_C);
static inline u8 first_antenna(u8 mask)
if (mask & ANT_A)
@@ -368,6 +368,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
goto drop_unlock_priv;
memset(dev_cmd, 0, sizeof(*dev_cmd));
+ dev_cmd->hdr.cmd = REPLY_TX;
tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
/* Total # bytes to be transmitted */
@@ -108,7 +108,7 @@ static const struct iwl_base_params iwl7000_base_params = {
static const struct iwl_ht_params iwl7000_ht_params = {
- .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .stbc = true,
.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
@@ -129,6 +129,12 @@ enum iwl_led_mode {
#define ANT_BC (ANT_B | ANT_C)
#define ANT_ABC (ANT_A | ANT_B | ANT_C)
+static inline u8 num_of_ant(u8 mask)
+ return !!((mask) & ANT_A) +
+ !!((mask) & ANT_B) +
+ !!((mask) & ANT_C);
* @max_ll_items: max number of OTP blocks
@@ -156,12 +162,14 @@ struct iwl_base_params {
+ * @stbc: support Tx STBC and 1*SS Rx STBC
* @use_rts_for_aggregation: use rts/cts protection for HT traffic
* @ht40_bands: bitmap of bands (using %IEEE80211_BAND_*) that support HT40
struct iwl_ht_params {
enum ieee80211_smps_mode smps_mode;
const bool ht_greenfield_support; /* if used set to true */
+ const bool stbc;
bool use_rts_for_aggregation;
u8 ht40_bands;
@@ -322,6 +322,41 @@ static void set_sec_offset(struct iwl_firmware_pieces *pieces,
pieces->img[type].sec[sec].offset = offset;
+static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
+ struct iwl_fw_cscheme_list *l = (struct iwl_fw_cscheme_list *)data;
+ struct iwl_fw_cipher_scheme *fwcs;
+ struct ieee80211_cipher_scheme *cs;
+ u32 cipher;
+ if (len < sizeof(*l) ||
+ len < sizeof(l->size) + l->size * sizeof(l->cs[0]))
+ for (i = 0, j = 0; i < IWL_UCODE_MAX_CS && i < l->size; i++) {
+ fwcs = &l->cs[j];
+ cipher = le32_to_cpu(fwcs->cipher);
+ /* we skip schemes with zero cipher suite selector */
+ if (!cipher)
+ cs = &fw->cs[j++];
+ cs->cipher = cipher;
+ cs->iftype = BIT(NL80211_IFTYPE_STATION);
+ cs->hdr_len = fwcs->hdr_len;
+ cs->pn_len = fwcs->pn_len;
+ cs->pn_off = fwcs->pn_off;
+ cs->key_idx_off = fwcs->key_idx_off;
+ cs->key_idx_mask = fwcs->key_idx_mask;
+ cs->key_idx_shift = fwcs->key_idx_shift;
+ cs->mic_len = fwcs->mic_len;
* Gets uCode section from tlv.
@@ -729,6 +764,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
+ case IWL_UCODE_TLV_CSCHEME:
+ if (iwl_store_cscheme(&drv->fw, tlv_data, tlv_len))
+ goto invalid_tlv_len;
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
@@ -751,6 +751,13 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
ht_info->ht_supported = true;
ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
+ if (cfg->ht_params->stbc) {
+ ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+ if (tx_chains > 1)
+ ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
if (iwlwifi_mod_params.amsdu_size_8K)
ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
@@ -125,6 +125,7 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_SECURE_SEC_INIT = 25,
IWL_UCODE_TLV_SECURE_SEC_WOWLAN = 26,
IWL_UCODE_TLV_NUM_OF_CPU = 27,
+ IWL_UCODE_TLV_CSCHEME = 28,
struct iwl_ucode_tlv {
@@ -92,6 +92,9 @@
* @IWL_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API
* @IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command
* containing CAM (Continuous Active Mode) indication.
+ * @IWL_UCODE_TLV_FLAGS_P2P_PS: P2P client power save is supported (only on a
+ * single bound interface).
+ * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
@@ -113,7 +116,9 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_SCHED_SCAN = BIT(17),
IWL_UCODE_TLV_FLAGS_STA_KEY_CMD = BIT(19),
IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD = BIT(20),
+ IWL_UCODE_TLV_FLAGS_P2P_PS = BIT(21),
IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
+ IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
/* The default calibrate table size if not specified by firmware file */
@@ -209,6 +214,44 @@ enum iwl_fw_phy_cfg {
FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS,
+#define IWL_UCODE_MAX_CS 1
+ * struct iwl_fw_cipher_scheme - a cipher scheme supported by FW.
+ * @cipher: a cipher suite selector
+ * @flags: cipher scheme flags (currently reserved for a future use)
+ * @hdr_len: a size of MPDU security header
+ * @pn_len: a size of PN
+ * @pn_off: an offset of pn from the beginning of the security header
+ * @key_idx_off: an offset of key index byte in the security header
+ * @key_idx_mask: a bit mask of key_idx bits
+ * @key_idx_shift: bit shift needed to get key_idx
+ * @mic_len: mic length in bytes
+ * @hw_cipher: a HW cipher index used in host commands
+struct iwl_fw_cipher_scheme {
+ __le32 cipher;
+ u8 flags;
+ u8 hdr_len;
+ u8 pn_len;
+ u8 pn_off;
+ u8 key_idx_off;
+ u8 key_idx_mask;
+ u8 key_idx_shift;
+ u8 mic_len;
+ u8 hw_cipher;
+ * struct iwl_fw_cscheme_list - a cipher scheme list
+ * @size: a number of entries
+ * @cs: cipher scheme entries
+struct iwl_fw_cscheme_list {
+ u8 size;
+ struct iwl_fw_cipher_scheme cs[];
* struct iwl_fw - variables associated with the firmware
@@ -224,6 +267,7 @@ enum iwl_fw_phy_cfg {
* @inst_evtlog_size: event log size for runtime ucode.
* @inst_errlog_ptr: error log offfset for runtime ucode.
* @mvm_fw: indicates this is MVM firmware
+ * @cipher_scheme: optional external cipher scheme.
struct iwl_fw {
u32 ucode_ver;
@@ -243,6 +287,8 @@ struct iwl_fw {
u32 phy_config;
bool mvm_fw;
+ struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
static inline u8 iwl_fw_valid_tx_ant(const struct iwl_fw *fw)
@@ -263,13 +263,20 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
struct ieee80211_sta_vht_cap *vht_cap)
+ int num_ants = num_of_ant(data->valid_rx_ant);
+ int bf_sts_cap = num_ants - 1;
vht_cap->vht_supported = true;
vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_RXSTBC_1 |
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ bf_sts_cap << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT |
7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+ if (num_ants > 1)
+ vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
@@ -283,15 +290,22 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 14);
- if (data->valid_rx_ant == 1 || cfg->rx_with_siso_diversity) {
+ /* Max rate for Long GI NSS=2 80Mhz is 780Mbps */
+ vht_cap->vht_mcs.rx_highest = cpu_to_le16(780);
+ if (num_ants == 1 ||
+ cfg->rx_with_siso_diversity) {
vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
/* this works because NOT_SUPPORTED == 3 */
vht_cap->vht_mcs.rx_mcs_map |=
cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << 2);
+ /* Max rate for Long GI NSS=1 80Mhz is 390Mbps */
+ vht_cap->vht_mcs.rx_highest = cpu_to_le16(390);
vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
+ vht_cap->vht_mcs.tx_highest = vht_cap->vht_mcs.rx_highest;
static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
@@ -155,14 +155,12 @@ void iwl_opmode_deregister(const char *name);
* struct iwl_op_mode - operational mode
+ * @ops - pointer to its own ops
* This holds an implementation of the mac80211 / fw API.
- * @ops - pointer to its own ops
struct iwl_op_mode {
const struct iwl_op_mode_ops *ops;
- const struct iwl_trans *trans;
char op_mode_specific[0] __aligned(sizeof(void *));
@@ -102,6 +102,9 @@
/* Device system time */
#define DEVICE_SYSTEM_TIME_REG 0xA0206C
+/* Device NMI register */
+#define DEVICE_SET_NMI_REG 0x00a01c30
/*****************************************************************************
* 7000/3000 series SHR DTS addresses *
*****************************************************************************/
@@ -70,6 +70,7 @@
#include "iwl-debug.h"
#include "iwl-config.h"
#include "iwl-fw.h"
+#include "iwl-op-mode.h"
* DOC: Transport layer - what is it ?
@@ -100,8 +101,7 @@
* start_fw
* 5) Then when finished (or reset):
- * stop_fw (a.k.a. stop device for the moment)
- * stop_hw
+ * stop_device
* 6) Eventually, the free function will be called.
@@ -317,6 +317,24 @@ enum iwl_d3_status {
IWL_D3_STATUS_RESET,
+ * enum iwl_trans_status: transport status flags
+ * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
+ * @STATUS_DEVICE_ENABLED: APM is enabled
+ * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
+ * @STATUS_INT_ENABLED: interrupts are enabled
+ * @STATUS_RFKILL: the HW RFkill switch is in KILL position
+ * @STATUS_FW_ERROR: the fw is in error state
+enum iwl_trans_status {
+ STATUS_SYNC_HCMD_ACTIVE,
+ STATUS_DEVICE_ENABLED,
+ STATUS_TPOWER_PMI,
+ STATUS_INT_ENABLED,
+ STATUS_RFKILL,
+ STATUS_FW_ERROR,
* struct iwl_trans_config - transport configuration
@@ -361,9 +379,7 @@ struct iwl_trans;
* @start_hw: starts the HW- from that point on, the HW can send interrupts
* May sleep
- * @stop_hw: stops the HW- from that point on, the HW will be in low power but
- * will still issue interrupt if the HW RF kill is triggered unless
- * op_mode_leaving is true.
+ * @op_mode_leave: Turn off the HW RF kill indication if on
* @start_fw: allocates and inits all the resources for the transport
* layer. Also kick a fw image.
@@ -371,8 +387,11 @@ struct iwl_trans;
* @fw_alive: called when the fw sends alive notification. If the fw provides
* the SCD base address in SRAM, then provide it here, or 0 otherwise.
- * @stop_device:stops the whole device (embedded CPU put to reset)
- * May sleep
+ * @stop_device: stops the whole device (embedded CPU put to reset) and stops
+ * the HW. From that point on, the HW will be in low power but will still
+ * issue interrupt if the HW RF kill is triggered. This callback must do
+ * the right thing and not crash even if start_hw() was called but not
+ * start_fw(). May sleep
* @d3_suspend: put the device into the correct mode for WoWLAN during
* suspend. This is optional, if not implemented WoWLAN will not be
* supported. This callback may sleep.
@@ -418,7 +437,7 @@ struct iwl_trans;
struct iwl_trans_ops {
int (*start_hw)(struct iwl_trans *iwl_trans);
- void (*stop_hw)(struct iwl_trans *iwl_trans, bool op_mode_leaving);
+ void (*op_mode_leave)(struct iwl_trans *iwl_trans);
int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
bool run_in_rfkill);
void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
@@ -479,6 +498,7 @@ enum iwl_trans_state {
* @ops - pointer to iwl_trans_ops
* @op_mode - pointer to the op_mode
* @cfg - pointer to the configuration
+ * @status: a bit-mask of transport status flags
* @dev - pointer to struct device * that represents the device
* @hw_id: a u32 with the ID of the device / subdevice.
* Set during transport allocation.
@@ -499,6 +519,7 @@ struct iwl_trans {
struct iwl_op_mode *op_mode;
const struct iwl_cfg *cfg;
enum iwl_trans_state state;
+ unsigned long status;
u32 hw_rev;
@@ -540,15 +561,14 @@ static inline int iwl_trans_start_hw(struct iwl_trans *trans)
return trans->ops->start_hw(trans);
-static inline void iwl_trans_stop_hw(struct iwl_trans *trans,
- bool op_mode_leaving)
+static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
might_sleep();
- trans->ops->stop_hw(trans, op_mode_leaving);
+ if (trans->ops->op_mode_leave)
+ trans->ops->op_mode_leave(trans);
- if (op_mode_leaving)
- trans->op_mode = NULL;
+ trans->op_mode = NULL;
trans->state = IWL_TRANS_NO_FW;
@@ -570,6 +590,7 @@ static inline int iwl_trans_start_fw(struct iwl_trans *trans,
WARN_ON_ONCE(!trans->rx_mpdu_cmd);
+ clear_bit(STATUS_FW_ERROR, &trans->status);
return trans->ops->start_fw(trans, fw, run_in_rfkill);
@@ -601,6 +622,9 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
+ if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
@@ -640,6 +664,9 @@ static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int queue)
if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
@@ -760,7 +787,8 @@ static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
- trans->ops->set_pmi(trans, state);
+ if (trans->ops->set_pmi)
+ trans->ops->set_pmi(trans, state);
static inline void
@@ -780,6 +808,16 @@ iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
__release(nic_access);
+static inline void iwl_trans_fw_error(struct iwl_trans *trans)
+ if (WARN_ON_ONCE(!trans->op_mode))
+ /* prevent double restarts due to the same erroneous FW */
+ if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
+ iwl_op_mode_nic_error(trans->op_mode);
/*****************************************************
* driver (transport) register/unregister functions
******************************************************/
@@ -1,10 +1,10 @@
obj-$(CONFIG_IWLMVM) += iwlmvm.o
iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
-iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o
+iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o
iwlmvm-y += scan.o time-event.o rs.o
iwlmvm-y += power.o power_legacy.o bt-coex.o
iwlmvm-y += led.o tt.o
-iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
+iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
@@ -183,15 +183,29 @@ int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
+ * Update SF - Disable if needed. if this fails, SF might still be on
+ * while many macs are bound, which is forbidden - so fail the binding.
+ if (iwl_mvm_sf_update(mvm, vif, false))
return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, true);
int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, false);
+ ret = iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, false);
+ if (!ret)
+ if (iwl_mvm_sf_update(mvm, vif, true))
+ IWL_ERR(mvm, "Failed to update SF state\n");
@@ -396,7 +396,8 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
BT_VALID_ANT_ISOLATION |
BT_VALID_ANT_ISOLATION_THRS |
BT_VALID_TXTX_DELTA_FREQ_THRS |
- BT_VALID_TXRX_MAX_FREQ_0);
+ BT_VALID_TXRX_MAX_FREQ_0 |
+ BT_VALID_SYNC_TO_SCO);
if (mvm->cfg->bt_shared_single_ant)
memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
@@ -514,7 +515,7 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
if (IS_ERR_OR_NULL(sta))
- mvmsta = (void *)sta->drv_priv;
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
/* nothing to do */
if (mvmsta->bt_reduced_txpower == enable)
@@ -846,7 +847,7 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
data->num_bss_ifaces++;
@@ -917,11 +918,11 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
- struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
enum iwl_bt_coex_lut_type lut_type;
if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
- BT_LOW_TRAFFIC)
+ BT_HIGH_TRAFFIC)
return LINK_QUAL_AGG_TIME_LIMIT_DEF;
lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
@@ -936,7 +937,7 @@ u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
BT_HIGH_TRAFFIC)
@@ -1216,10 +1216,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (len >= sizeof(u32) * 2) {
mvm->d3_test_pme_ptr =
le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data);
- } else if (test) {
- /* in test mode we require the pointer */
- ret = -EIO;
iwl_free_resp(&d3_cfg_cmd);
@@ -1231,10 +1227,11 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvm->aux_sta.sta_id = old_aux_sta_id;
mvm_ap_sta->sta_id = old_ap_sta_id;
mvmvif->ap_sta_id = old_ap_sta_id;
- out_noreset:
- kfree(key_data.rsc_tsc);
ieee80211_restart_hw(mvm->hw);
+ out_noreset:
+ kfree(key_data.rsc_tsc);
mutex_unlock(&mvm->mutex);
@@ -1537,10 +1534,16 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
struct iwl_mvm_d3_gtk_iter_data gtkdata = {
.status = status,
+ u32 disconnection_reasons =
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
if (!status || !vif->bss_conf.bssid)
+ if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons)
/* find last GTK that we used initially, if any */
gtkdata.find_phase = true;
ieee80211_iter_keys(mvm->hw, vif,
@@ -1805,6 +1808,10 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
iwl_mvm_read_d3_sram(mvm);
keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (keep)
+ mvm->keep_vif = vif;
/* has unlocked the mutex, so skip that */
@@ -1861,6 +1868,7 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
mvm->d3_test_active = true;
+ mvm->keep_vif = NULL;
@@ -1871,10 +1879,14 @@ static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
u32 pme_asserted;
while (true) {
- pme_asserted = iwl_trans_read_mem32(mvm->trans,
- mvm->d3_test_pme_ptr);
- if (pme_asserted)
+ /* read pme_ptr if available */
+ if (mvm->d3_test_pme_ptr) {
+ pme_asserted = iwl_trans_read_mem32(mvm->trans,
+ mvm->d3_test_pme_ptr);
+ if (pme_asserted)
if (msleep_interruptible(100))
@@ -1885,6 +1897,10 @@ static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif)
+ /* skip the one we keep connection on */
+ if (_data == vif)
if (vif->type == NL80211_IFTYPE_STATION)
ieee80211_connection_loss(vif);
@@ -1911,7 +1927,7 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_d3_test_disconn_work_iter, NULL);
+ iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
ieee80211_wake_queues(mvm->hw);
@@ -0,0 +1,546 @@
+/******************************************************************************
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ * GPL LICENSE SUMMARY
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ * BSD LICENSE
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#include "mvm.h"
+#include "debugfs.h"
+static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
+ enum iwl_dbgfs_pm_mask param, int val)
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_dbgfs_pm *dbgfs_pm = &mvmvif->dbgfs_pm;
+ dbgfs_pm->mask |= param;
+ switch (param) {
+ case MVM_DEBUGFS_PM_KEEP_ALIVE: {
+ struct ieee80211_hw *hw = mvm->hw;
+ int dtimper = hw->conf.ps_dtim_period ?: 1;
+ int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
+ IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val);
+ if (val * MSEC_PER_SEC < 3 * dtimper_msec)
+ IWL_WARN(mvm,
+ "debugfs: keep alive period (%ld msec) is less than minimum required (%d msec)\n",
+ val * MSEC_PER_SEC, 3 * dtimper_msec);
+ dbgfs_pm->keep_alive_seconds = val;
+ case MVM_DEBUGFS_PM_SKIP_OVER_DTIM:
+ IWL_DEBUG_POWER(mvm, "skip_over_dtim %s\n",
+ val ? "enabled" : "disabled");
+ dbgfs_pm->skip_over_dtim = val;
+ case MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS:
+ IWL_DEBUG_POWER(mvm, "skip_dtim_periods=%d\n", val);
+ dbgfs_pm->skip_dtim_periods = val;
+ case MVM_DEBUGFS_PM_RX_DATA_TIMEOUT:
+ IWL_DEBUG_POWER(mvm, "rx_data_timeout=%d\n", val);
+ dbgfs_pm->rx_data_timeout = val;
+ case MVM_DEBUGFS_PM_TX_DATA_TIMEOUT:
+ IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
+ dbgfs_pm->tx_data_timeout = val;
+ case MVM_DEBUGFS_PM_DISABLE_POWER_OFF:
+ IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val);
+ dbgfs_pm->disable_power_off = val;
+ case MVM_DEBUGFS_PM_LPRX_ENA:
+ IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
+ dbgfs_pm->lprx_ena = val;
+ case MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD:
+ IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val);
+ dbgfs_pm->lprx_rssi_threshold = val;
+ case MVM_DEBUGFS_PM_SNOOZE_ENABLE:
+ IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val);
+ dbgfs_pm->snooze_ena = val;
+ case MVM_DEBUGFS_PM_UAPSD_MISBEHAVING:
+ IWL_DEBUG_POWER(mvm, "uapsd_misbehaving_enable=%d\n", val);
+ dbgfs_pm->uapsd_misbehaving = val;
+static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ enum iwl_dbgfs_pm_mask param;
+ int val, ret;
+ if (!strncmp("keep_alive=", buf, 11)) {
+ if (sscanf(buf + 11, "%d", &val) != 1)
+ param = MVM_DEBUGFS_PM_KEEP_ALIVE;
+ } else if (!strncmp("skip_over_dtim=", buf, 15)) {
+ if (sscanf(buf + 15, "%d", &val) != 1)
+ param = MVM_DEBUGFS_PM_SKIP_OVER_DTIM;
+ } else if (!strncmp("skip_dtim_periods=", buf, 18)) {
+ if (sscanf(buf + 18, "%d", &val) != 1)
+ param = MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS;
+ } else if (!strncmp("rx_data_timeout=", buf, 16)) {
+ if (sscanf(buf + 16, "%d", &val) != 1)
+ param = MVM_DEBUGFS_PM_RX_DATA_TIMEOUT;
+ } else if (!strncmp("tx_data_timeout=", buf, 16)) {
+ param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
+ } else if (!strncmp("disable_power_off=", buf, 18) &&
+ !(mvm->fw->ucode_capa.flags &
+ IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)) {
+ param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
+ } else if (!strncmp("lprx=", buf, 5)) {
+ if (sscanf(buf + 5, "%d", &val) != 1)
+ param = MVM_DEBUGFS_PM_LPRX_ENA;
+ } else if (!strncmp("lprx_rssi_threshold=", buf, 20)) {
+ if (sscanf(buf + 20, "%d", &val) != 1)
+ if (val > POWER_LPRX_RSSI_THRESHOLD_MAX || val <
+ POWER_LPRX_RSSI_THRESHOLD_MIN)
+ param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD;
+ } else if (!strncmp("snooze_enable=", buf, 14)) {
+ if (sscanf(buf + 14, "%d", &val) != 1)
+ param = MVM_DEBUGFS_PM_SNOOZE_ENABLE;
+ } else if (!strncmp("uapsd_misbehaving=", buf, 18)) {
+ param = MVM_DEBUGFS_PM_UAPSD_MISBEHAVING;
+ mutex_lock(&mvm->mutex);
+ iwl_dbgfs_update_pm(mvm, vif, param, val);
+ ret = iwl_mvm_power_update_mode(mvm, vif);
+ mutex_unlock(&mvm->mutex);
+ return ret ?: count;
+static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
+ struct ieee80211_vif *vif = file->private_data;
+ char buf[512];
+ int bufsz = sizeof(buf);
+ int pos;
+ pos = iwl_mvm_power_dbgfs_read(mvm, vif, buf, bufsz);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
+ u8 ap_sta_id;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ int pos = 0;
+ int i;
+ ap_sta_id = mvmvif->ap_sta_id;
+ pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n",
+ mvmvif->id, mvmvif->color);
+ pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n",
+ vif->bss_conf.bssid);
+ pos += scnprintf(buf+pos, bufsz-pos, "QoS:\n");
+ for (i = 0; i < ARRAY_SIZE(mvmvif->queue_params); i++)
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "\t%d: txop:%d - cw_min:%d - cw_max = %d - aifs = %d upasd = %d\n",
+ i, mvmvif->queue_params[i].txop,
+ mvmvif->queue_params[i].cw_min,
+ mvmvif->queue_params[i].cw_max,
+ mvmvif->queue_params[i].aifs,
+ mvmvif->queue_params[i].uapsd);
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ ap_sta_id != IWL_MVM_STATION_COUNT) {
+ struct ieee80211_sta *sta;
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[ap_sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (!IS_ERR_OR_NULL(sta)) {
+ struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ "ap_sta_id %d - reduced Tx power %d\n",
+ ap_sta_id,
+ mvm_sta->bt_reduced_txpower);
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ if (chanctx_conf)
+ "idle rx chains %d, active rx chains: %d\n",
+ chanctx_conf->rx_chains_static,
+ chanctx_conf->rx_chains_dynamic);
+ rcu_read_unlock();
+static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
+ enum iwl_dbgfs_bf_mask param, int value)
+ struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
+ dbgfs_bf->mask |= param;
+ case MVM_DEBUGFS_BF_ENERGY_DELTA:
+ dbgfs_bf->bf_energy_delta = value;
+ case MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA:
+ dbgfs_bf->bf_roaming_energy_delta = value;
+ case MVM_DEBUGFS_BF_ROAMING_STATE:
+ dbgfs_bf->bf_roaming_state = value;
+ case MVM_DEBUGFS_BF_TEMP_THRESHOLD:
+ dbgfs_bf->bf_temp_threshold = value;
+ case MVM_DEBUGFS_BF_TEMP_FAST_FILTER:
+ dbgfs_bf->bf_temp_fast_filter = value;
+ case MVM_DEBUGFS_BF_TEMP_SLOW_FILTER:
+ dbgfs_bf->bf_temp_slow_filter = value;
+ case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER:
+ dbgfs_bf->bf_enable_beacon_filter = value;
+ case MVM_DEBUGFS_BF_DEBUG_FLAG:
+ dbgfs_bf->bf_debug_flag = value;
+ case MVM_DEBUGFS_BF_ESCAPE_TIMER:
+ dbgfs_bf->bf_escape_timer = value;
+ case MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT:
+ dbgfs_bf->ba_enable_beacon_abort = value;
+ case MVM_DEBUGFS_BA_ESCAPE_TIMER:
+ dbgfs_bf->ba_escape_timer = value;
+static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
+ enum iwl_dbgfs_bf_mask param;
+ int value, ret = 0;
+ if (!strncmp("bf_energy_delta=", buf, 16)) {
+ if (sscanf(buf+16, "%d", &value) != 1)
+ if (value < IWL_BF_ENERGY_DELTA_MIN ||
+ value > IWL_BF_ENERGY_DELTA_MAX)
+ param = MVM_DEBUGFS_BF_ENERGY_DELTA;
+ } else if (!strncmp("bf_roaming_energy_delta=", buf, 24)) {
+ if (sscanf(buf+24, "%d", &value) != 1)
+ if (value < IWL_BF_ROAMING_ENERGY_DELTA_MIN ||
+ value > IWL_BF_ROAMING_ENERGY_DELTA_MAX)
+ param = MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA;
+ } else if (!strncmp("bf_roaming_state=", buf, 17)) {
+ if (sscanf(buf+17, "%d", &value) != 1)
+ if (value < IWL_BF_ROAMING_STATE_MIN ||
+ value > IWL_BF_ROAMING_STATE_MAX)
+ param = MVM_DEBUGFS_BF_ROAMING_STATE;
+ } else if (!strncmp("bf_temp_threshold=", buf, 18)) {
+ if (sscanf(buf+18, "%d", &value) != 1)
+ if (value < IWL_BF_TEMP_THRESHOLD_MIN ||
+ value > IWL_BF_TEMP_THRESHOLD_MAX)
+ param = MVM_DEBUGFS_BF_TEMP_THRESHOLD;
+ } else if (!strncmp("bf_temp_fast_filter=", buf, 20)) {
+ if (sscanf(buf+20, "%d", &value) != 1)
+ if (value < IWL_BF_TEMP_FAST_FILTER_MIN ||
+ value > IWL_BF_TEMP_FAST_FILTER_MAX)
+ param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER;
+ } else if (!strncmp("bf_temp_slow_filter=", buf, 20)) {
+ if (value < IWL_BF_TEMP_SLOW_FILTER_MIN ||
+ value > IWL_BF_TEMP_SLOW_FILTER_MAX)
+ param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER;
+ } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
+ if (value < 0 || value > 1)
+ param = MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER;
+ } else if (!strncmp("bf_debug_flag=", buf, 14)) {
+ if (sscanf(buf+14, "%d", &value) != 1)
+ param = MVM_DEBUGFS_BF_DEBUG_FLAG;
+ } else if (!strncmp("bf_escape_timer=", buf, 16)) {
+ if (value < IWL_BF_ESCAPE_TIMER_MIN ||
+ value > IWL_BF_ESCAPE_TIMER_MAX)
+ param = MVM_DEBUGFS_BF_ESCAPE_TIMER;
+ } else if (!strncmp("ba_escape_timer=", buf, 16)) {
+ if (value < IWL_BA_ESCAPE_TIMER_MIN ||
+ value > IWL_BA_ESCAPE_TIMER_MAX)
+ param = MVM_DEBUGFS_BA_ESCAPE_TIMER;
+ } else if (!strncmp("ba_enable_beacon_abort=", buf, 23)) {
+ if (sscanf(buf+23, "%d", &value) != 1)
+ param = MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT;
+ iwl_dbgfs_update_bf(vif, param, value);
+ if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif);
+static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
+ char buf[256];
+ const size_t bufsz = sizeof(buf);
+ struct iwl_beacon_filter_cmd cmd = {
+ IWL_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter =
+ cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT),
+ .ba_enable_beacon_abort =
+ cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT),
+ iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
+ if (mvmvif->bf_data.bf_enabled)
+ cmd.bf_enable_beacon_filter = cpu_to_le32(1);
+ cmd.bf_enable_beacon_filter = 0;
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n",
+ le32_to_cpu(cmd.bf_energy_delta));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n",
+ le32_to_cpu(cmd.bf_roaming_energy_delta));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n",
+ le32_to_cpu(cmd.bf_roaming_state));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_threshold = %d\n",
+ le32_to_cpu(cmd.bf_temp_threshold));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_fast_filter = %d\n",
+ le32_to_cpu(cmd.bf_temp_fast_filter));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_slow_filter = %d\n",
+ le32_to_cpu(cmd.bf_temp_slow_filter));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n",
+ le32_to_cpu(cmd.bf_enable_beacon_filter));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n",
+ le32_to_cpu(cmd.bf_debug_flag));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n",
+ le32_to_cpu(cmd.bf_escape_timer));
+ pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n",
+ le32_to_cpu(cmd.ba_escape_timer));
+ pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n",
+ le32_to_cpu(cmd.ba_enable_beacon_abort));
+#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
+#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
+#define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do { \
+ if (!debugfs_create_file(#name, mode, parent, vif, \
+ &iwl_dbgfs_##name##_ops)) \
+ goto err; \
+MVM_DEBUGFS_READ_FILE_OPS(mac_params);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
+void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+ struct dentry *dbgfs_dir = vif->debugfs_dir;
+ char buf[100];
+ * Check if debugfs directory already exist before creating it.
+ * This may happen when, for example, resetting hw or suspend-resume
+ if (!dbgfs_dir || mvmvif->dbgfs_dir)
+ mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
+ mvmvif->mvm = mvm;
+ if (!mvmvif->dbgfs_dir) {
+ IWL_ERR(mvm, "Failed to create debugfs directory under %s\n",
+ dbgfs_dir->d_name.name);
+ if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
+ ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
+ (vif->type == NL80211_IFTYPE_STATION && vif->p2p &&
+ mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS)))
+ MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
+ S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir,
+ if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
+ mvmvif == mvm->bf_allowed_vif)
+ MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir,
+ S_IRUSR | S_IWUSR);
+ * Create symlink for convenience pointing to interface specific
+ * debugfs entries for the driver. For example, under
+ * /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmvm/
+ * find
+ * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/
+ snprintf(buf, 100, "../../../%s/%s/%s/%s",
+ dbgfs_dir->d_parent->d_parent->d_name.name,
+ dbgfs_dir->d_parent->d_name.name,
+ dbgfs_dir->d_name.name,
+ mvmvif->dbgfs_dir->d_name.name);
+ mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name,
+ mvm->debugfs_dir, buf);
+ if (!mvmvif->dbgfs_slink)
+ IWL_ERR(mvm, "Can't create debugfs symbolic link under %s\n",
+err:
+ IWL_ERR(mvm, "Can't create debugfs entity\n");
+void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+ debugfs_remove(mvmvif->dbgfs_slink);
+ mvmvif->dbgfs_slink = NULL;
+ debugfs_remove_recursive(mvmvif->dbgfs_dir);
+ mvmvif->dbgfs_dir = NULL;
@@ -63,30 +63,18 @@
#include "mvm.h"
#include "sta.h"
#include "iwl-io.h"
+#include "iwl-prph.h"
-struct iwl_dbgfs_mvm_ctx {
- struct iwl_mvm *mvm;
- struct ieee80211_vif *vif;
-static ssize_t iwl_dbgfs_tx_flush_write(struct file *file,
+static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
- struct iwl_mvm *mvm = file->private_data;
- char buf[16];
- int buf_size, ret;
u32 scd_q_msk;
if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
if (sscanf(buf, "%x", &scd_q_msk) != 1)
@@ -99,24 +87,15 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct file *file,
-static ssize_t iwl_dbgfs_sta_drain_write(struct file *file,
+static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
- char buf[8];
- int buf_size, sta_id, drain, ret;
+ int sta_id, drain, ret;
if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
if (sta_id < 0 || sta_id >= IWL_MVM_STATION_COUNT)
@@ -194,20 +173,11 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
-static ssize_t iwl_dbgfs_sram_write(struct file *file,
- const char __user *user_buf, size_t count,
- loff_t *ppos)
+static ssize_t iwl_dbgfs_sram_write(struct iwl_mvm *mvm, char *buf,
- char buf[64];
- int buf_size;
u32 offset, len;
if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
if ((offset & 0x3) || (len & 0x3))
@@ -267,22 +237,14 @@ static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-static ssize_t iwl_dbgfs_disable_power_off_write(struct file *file,
+static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
- char buf[64] = {};
- int val;
+ int ret, val;
if (!mvm->ucode_loaded)
- count = min_t(size_t, count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, count))
if (!strncmp("disable_power_off_d0=", buf, 21)) {
if (sscanf(buf + 21, "%d", &val) != 1)
@@ -302,212 +264,6 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct file *file,
return ret ?: count;
-static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
- enum iwl_dbgfs_pm_mask param, int val)
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_dbgfs_pm *dbgfs_pm = &mvmvif->dbgfs_pm;
- dbgfs_pm->mask |= param;
- switch (param) {
- case MVM_DEBUGFS_PM_KEEP_ALIVE: {
- struct ieee80211_hw *hw = mvm->hw;
- int dtimper = hw->conf.ps_dtim_period ?: 1;
- int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
- IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val);
- if (val * MSEC_PER_SEC < 3 * dtimper_msec) {
- IWL_WARN(mvm,
- "debugfs: keep alive period (%ld msec) is less than minimum required (%d msec)\n",
- val * MSEC_PER_SEC, 3 * dtimper_msec);
- dbgfs_pm->keep_alive_seconds = val;
- case MVM_DEBUGFS_PM_SKIP_OVER_DTIM:
- IWL_DEBUG_POWER(mvm, "skip_over_dtim %s\n",
- val ? "enabled" : "disabled");
- dbgfs_pm->skip_over_dtim = val;
- case MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS:
- IWL_DEBUG_POWER(mvm, "skip_dtim_periods=%d\n", val);
- dbgfs_pm->skip_dtim_periods = val;
- case MVM_DEBUGFS_PM_RX_DATA_TIMEOUT:
- IWL_DEBUG_POWER(mvm, "rx_data_timeout=%d\n", val);
- dbgfs_pm->rx_data_timeout = val;
- case MVM_DEBUGFS_PM_TX_DATA_TIMEOUT:
- IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
- dbgfs_pm->tx_data_timeout = val;
- case MVM_DEBUGFS_PM_DISABLE_POWER_OFF:
- IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val);
- dbgfs_pm->disable_power_off = val;
- case MVM_DEBUGFS_PM_LPRX_ENA:
- IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
- dbgfs_pm->lprx_ena = val;
- case MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD:
- IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val);
- dbgfs_pm->lprx_rssi_threshold = val;
- case MVM_DEBUGFS_PM_SNOOZE_ENABLE:
- IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val);
- dbgfs_pm->snooze_ena = val;
-static ssize_t iwl_dbgfs_pm_params_write(struct file *file,
- struct ieee80211_vif *vif = file->private_data;
- struct iwl_mvm *mvm = mvmvif->dbgfs_data;
- enum iwl_dbgfs_pm_mask param;
- char buf[32] = {};
- if (!strncmp("keep_alive=", buf, 11)) {
- if (sscanf(buf + 11, "%d", &val) != 1)
- param = MVM_DEBUGFS_PM_KEEP_ALIVE;
- } else if (!strncmp("skip_over_dtim=", buf, 15)) {
- if (sscanf(buf + 15, "%d", &val) != 1)
- param = MVM_DEBUGFS_PM_SKIP_OVER_DTIM;
- } else if (!strncmp("skip_dtim_periods=", buf, 18)) {
- if (sscanf(buf + 18, "%d", &val) != 1)
- param = MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS;
- } else if (!strncmp("rx_data_timeout=", buf, 16)) {
- if (sscanf(buf + 16, "%d", &val) != 1)
- param = MVM_DEBUGFS_PM_RX_DATA_TIMEOUT;
- } else if (!strncmp("tx_data_timeout=", buf, 16)) {
- param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
- } else if (!strncmp("disable_power_off=", buf, 18) &&
- !(mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)) {
- param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
- } else if (!strncmp("lprx=", buf, 5)) {
- if (sscanf(buf + 5, "%d", &val) != 1)
- param = MVM_DEBUGFS_PM_LPRX_ENA;
- } else if (!strncmp("lprx_rssi_threshold=", buf, 20)) {
- if (sscanf(buf + 20, "%d", &val) != 1)
- if (val > POWER_LPRX_RSSI_THRESHOLD_MAX || val <
- POWER_LPRX_RSSI_THRESHOLD_MIN)
- param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD;
- } else if (!strncmp("snooze_enable=", buf, 14)) {
- if (sscanf(buf + 14, "%d", &val) != 1)
- param = MVM_DEBUGFS_PM_SNOOZE_ENABLE;
- mutex_lock(&mvm->mutex);
- iwl_dbgfs_update_pm(mvm, vif, param, val);
- ret = iwl_mvm_power_update_mode(mvm, vif);
- mutex_unlock(&mvm->mutex);
- return ret ?: count;
-static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
- char buf[512];
- int bufsz = sizeof(buf);
- int pos;
- pos = iwl_mvm_power_dbgfs_read(mvm, vif, buf, bufsz);
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
- u8 ap_sta_id;
- struct ieee80211_chanctx_conf *chanctx_conf;
- int pos = 0;
- ap_sta_id = mvmvif->ap_sta_id;
- pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n",
- mvmvif->id, mvmvif->color);
- pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n",
- vif->bss_conf.bssid);
- pos += scnprintf(buf+pos, bufsz-pos, "QoS:\n");
- for (i = 0; i < ARRAY_SIZE(mvmvif->queue_params); i++) {
- pos += scnprintf(buf+pos, bufsz-pos,
- "\t%d: txop:%d - cw_min:%d - cw_max = %d - aifs = %d upasd = %d\n",
- i, mvmvif->queue_params[i].txop,
- mvmvif->queue_params[i].cw_min,
- mvmvif->queue_params[i].cw_max,
- mvmvif->queue_params[i].aifs,
- mvmvif->queue_params[i].uapsd);
- if (vif->type == NL80211_IFTYPE_STATION &&
- ap_sta_id != IWL_MVM_STATION_COUNT) {
- struct ieee80211_sta *sta;
- struct iwl_mvm_sta *mvm_sta;
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[ap_sta_id],
- lockdep_is_held(&mvm->mutex));
- mvm_sta = (void *)sta->drv_priv;
- "ap_sta_id %d - reduced Tx power %d\n",
- ap_sta_id, mvm_sta->bt_reduced_txpower);
- rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
- if (chanctx_conf) {
- "idle rx chains %d, active rx chains: %d\n",
- chanctx_conf->rx_chains_static,
- chanctx_conf->rx_chains_dynamic);
- rcu_read_unlock();
#define BT_MBOX_MSG(_notif, _num, _field) \
((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
>> BT_MBOX##_num##_##_field##_POS)
@@ -783,11 +539,9 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
#undef PRINT_STAT_LE32
-static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
+static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
mutex_lock(&mvm->mutex);
@@ -804,6 +558,14 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
return count;
+static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf,
+ iwl_write_prph(mvm->trans, DEVICE_SET_NMI_REG, 1);
static ssize_t
iwl_dbgfs_scan_ant_rxchain_read(struct file *file,
char __user *user_buf,
@@ -828,21 +590,11 @@ iwl_dbgfs_scan_ant_rxchain_read(struct file *file,
-iwl_dbgfs_scan_ant_rxchain_write(struct file *file,
+iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
u8 scan_rx_ant;
- /* get the argument from the user and check if it is valid */
if (sscanf(buf, "%hhx", &scan_rx_ant) != 1)
if (scan_rx_ant > ANT_ABC)
@@ -850,228 +602,17 @@ iwl_dbgfs_scan_ant_rxchain_write(struct file *file,
if (scan_rx_ant & ~iwl_fw_valid_rx_ant(mvm->fw))
- /* change the rx antennas for scan command */
mvm->scan_rx_ant = scan_rx_ant;
-static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
- enum iwl_dbgfs_bf_mask param, int value)
- struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
- dbgfs_bf->mask |= param;
- case MVM_DEBUGFS_BF_ENERGY_DELTA:
- dbgfs_bf->bf_energy_delta = value;
- case MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA:
- dbgfs_bf->bf_roaming_energy_delta = value;
- case MVM_DEBUGFS_BF_ROAMING_STATE:
- dbgfs_bf->bf_roaming_state = value;
- case MVM_DEBUGFS_BF_TEMP_THRESHOLD:
- dbgfs_bf->bf_temp_threshold = value;
- case MVM_DEBUGFS_BF_TEMP_FAST_FILTER:
- dbgfs_bf->bf_temp_fast_filter = value;
- case MVM_DEBUGFS_BF_TEMP_SLOW_FILTER:
- dbgfs_bf->bf_temp_slow_filter = value;
- case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER:
- dbgfs_bf->bf_enable_beacon_filter = value;
- case MVM_DEBUGFS_BF_DEBUG_FLAG:
- dbgfs_bf->bf_debug_flag = value;
- case MVM_DEBUGFS_BF_ESCAPE_TIMER:
- dbgfs_bf->bf_escape_timer = value;
- case MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT:
- dbgfs_bf->ba_enable_beacon_abort = value;
- case MVM_DEBUGFS_BA_ESCAPE_TIMER:
- dbgfs_bf->ba_escape_timer = value;
-static ssize_t iwl_dbgfs_bf_params_write(struct file *file,
- enum iwl_dbgfs_bf_mask param;
- char buf[256];
- int value;
- if (!strncmp("bf_energy_delta=", buf, 16)) {
- if (sscanf(buf+16, "%d", &value) != 1)
- if (value < IWL_BF_ENERGY_DELTA_MIN ||
- value > IWL_BF_ENERGY_DELTA_MAX)
- param = MVM_DEBUGFS_BF_ENERGY_DELTA;
- } else if (!strncmp("bf_roaming_energy_delta=", buf, 24)) {
- if (sscanf(buf+24, "%d", &value) != 1)
- if (value < IWL_BF_ROAMING_ENERGY_DELTA_MIN ||
- value > IWL_BF_ROAMING_ENERGY_DELTA_MAX)
- param = MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA;
- } else if (!strncmp("bf_roaming_state=", buf, 17)) {
- if (sscanf(buf+17, "%d", &value) != 1)
- if (value < IWL_BF_ROAMING_STATE_MIN ||
- value > IWL_BF_ROAMING_STATE_MAX)
- param = MVM_DEBUGFS_BF_ROAMING_STATE;
- } else if (!strncmp("bf_temp_threshold=", buf, 18)) {
- if (sscanf(buf+18, "%d", &value) != 1)
- if (value < IWL_BF_TEMP_THRESHOLD_MIN ||
- value > IWL_BF_TEMP_THRESHOLD_MAX)
- param = MVM_DEBUGFS_BF_TEMP_THRESHOLD;
- } else if (!strncmp("bf_temp_fast_filter=", buf, 20)) {
- if (sscanf(buf+20, "%d", &value) != 1)
- if (value < IWL_BF_TEMP_FAST_FILTER_MIN ||
- value > IWL_BF_TEMP_FAST_FILTER_MAX)
- param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER;
- } else if (!strncmp("bf_temp_slow_filter=", buf, 20)) {
- if (value < IWL_BF_TEMP_SLOW_FILTER_MIN ||
- value > IWL_BF_TEMP_SLOW_FILTER_MAX)
- param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER;
- } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
- if (value < 0 || value > 1)
- param = MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER;
- } else if (!strncmp("bf_debug_flag=", buf, 14)) {
- if (sscanf(buf+14, "%d", &value) != 1)
- param = MVM_DEBUGFS_BF_DEBUG_FLAG;
- } else if (!strncmp("bf_escape_timer=", buf, 16)) {
- if (value < IWL_BF_ESCAPE_TIMER_MIN ||
- value > IWL_BF_ESCAPE_TIMER_MAX)
- param = MVM_DEBUGFS_BF_ESCAPE_TIMER;
- } else if (!strncmp("ba_escape_timer=", buf, 16)) {
- if (value < IWL_BA_ESCAPE_TIMER_MIN ||
- value > IWL_BA_ESCAPE_TIMER_MAX)
- param = MVM_DEBUGFS_BA_ESCAPE_TIMER;
- } else if (!strncmp("ba_enable_beacon_abort=", buf, 23)) {
- if (sscanf(buf+23, "%d", &value) != 1)
- param = MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT;
- iwl_dbgfs_update_bf(vif, param, value);
- if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) {
- ret = iwl_mvm_disable_beacon_filter(mvm, vif);
- ret = iwl_mvm_enable_beacon_filter(mvm, vif);
-static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
- const size_t bufsz = sizeof(buf);
- struct iwl_beacon_filter_cmd cmd = {
- IWL_BF_CMD_CONFIG_DEFAULTS,
- .bf_enable_beacon_filter =
- cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT),
- .ba_enable_beacon_abort =
- cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT),
- };
- iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
- if (mvmvif->bf_data.bf_enabled)
- cmd.bf_enable_beacon_filter = cpu_to_le32(1);
- cmd.bf_enable_beacon_filter = 0;
- pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n",
- le32_to_cpu(cmd.bf_energy_delta));
- pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n",
- le32_to_cpu(cmd.bf_roaming_energy_delta));
- pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n",
- le32_to_cpu(cmd.bf_roaming_state));
- pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_threshold = %d\n",
- le32_to_cpu(cmd.bf_temp_threshold));
- pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_fast_filter = %d\n",
- le32_to_cpu(cmd.bf_temp_fast_filter));
- pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_slow_filter = %d\n",
- le32_to_cpu(cmd.bf_temp_slow_filter));
- pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n",
- le32_to_cpu(cmd.bf_enable_beacon_filter));
- pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n",
- le32_to_cpu(cmd.bf_debug_flag));
- pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n",
- le32_to_cpu(cmd.bf_escape_timer));
- pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n",
- le32_to_cpu(cmd.ba_escape_timer));
- pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n",
- le32_to_cpu(cmd.ba_enable_beacon_abort));
#ifdef CONFIG_PM_SLEEP
-static ssize_t iwl_dbgfs_d3_sram_write(struct file *file,
+static ssize_t iwl_dbgfs_d3_sram_write(struct iwl_mvm *mvm, char *buf,
- char buf[8] = {};
int store;
if (sscanf(buf, "%d", &store) != 1)
@@ -1124,61 +665,33 @@ static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
-#define MVM_DEBUGFS_READ_FILE_OPS(name) \
-static const struct file_operations iwl_dbgfs_##name##_ops = { \
- .read = iwl_dbgfs_##name##_read, \
- .open = simple_open, \
- .llseek = generic_file_llseek, \
-#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name) \
- .write = iwl_dbgfs_##name##_write, \
-#define MVM_DEBUGFS_WRITE_FILE_OPS(name) \
+ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
+ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) do { \
if (!debugfs_create_file(#name, mode, parent, mvm, \
&iwl_dbgfs_##name##_ops)) \
goto err; \
-#define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do { \
- if (!debugfs_create_file(#name, mode, parent, vif, \
- &iwl_dbgfs_##name##_ops)) \
- goto err; \
/* Device wide debugfs entries */
-MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush);
-MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram);
+MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16);
+MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64);
MVM_DEBUGFS_READ_FILE_OPS(stations);
MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
-MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain);
+MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
+MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
-/* Interface specific debugfs entries */
-MVM_DEBUGFS_READ_FILE_OPS(mac_params);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params);
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
char buf[100];
@@ -1196,6 +709,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir,
S_IWUSR | S_IRUSR);
@@ -1206,6 +720,19 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
+ if (!debugfs_create_blob("nvm_hw", S_IRUSR,
+ mvm->debugfs_dir, &mvm->nvm_hw_blob))
+ goto err;
+ if (!debugfs_create_blob("nvm_sw", S_IRUSR,
+ mvm->debugfs_dir, &mvm->nvm_sw_blob))
+ if (!debugfs_create_blob("nvm_calib", S_IRUSR,
+ mvm->debugfs_dir, &mvm->nvm_calib_blob))
+ if (!debugfs_create_blob("nvm_prod", S_IRUSR,
+ mvm->debugfs_dir, &mvm->nvm_prod_blob))
* Create a symlink with mac80211. It will be removed when mac80211
* exists (before the opmode exists which removes the target.)
@@ -1221,72 +748,3 @@ err:
IWL_ERR(mvm, "Can't create the mvm debugfs directory\n");
-void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
- struct dentry *dbgfs_dir = vif->debugfs_dir;
- char buf[100];
- * Check if debugfs directory already exist before creating it.
- * This may happen when, for example, resetting hw or suspend-resume
- if (!dbgfs_dir || mvmvif->dbgfs_dir)
- mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
- mvmvif->dbgfs_data = mvm;
- if (!mvmvif->dbgfs_dir) {
- IWL_ERR(mvm, "Failed to create debugfs directory under %s\n",
- dbgfs_dir->d_name.name);
- if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
- vif->type == NL80211_IFTYPE_STATION && !vif->p2p)
- MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
- S_IRUSR);
- MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir,
- if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
- mvmvif == mvm->bf_allowed_vif)
- MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir,
- S_IRUSR | S_IWUSR);
- * Create symlink for convenience pointing to interface specific
- * debugfs entries for the driver. For example, under
- * /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmvm/
- * find
- * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/
- snprintf(buf, 100, "../../../%s/%s/%s/%s",
- dbgfs_dir->d_parent->d_parent->d_name.name,
- dbgfs_dir->d_parent->d_name.name,
- dbgfs_dir->d_name.name,
- mvmvif->dbgfs_dir->d_name.name);
- mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name,
- mvm->debugfs_dir, buf);
- if (!mvmvif->dbgfs_slink)
- IWL_ERR(mvm, "Can't create debugfs symbolic link under %s\n",
-err:
- IWL_ERR(mvm, "Can't create debugfs entity\n");
-void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
- debugfs_remove(mvmvif->dbgfs_slink);
- mvmvif->dbgfs_slink = NULL;
- debugfs_remove_recursive(mvmvif->dbgfs_dir);
- mvmvif->dbgfs_dir = NULL;
@@ -0,0 +1,101 @@
+#define MVM_DEBUGFS_READ_FILE_OPS(name) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+#define MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
+static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ argtype *arg = file->private_data; \
+ char buf[buflen] = {}; \
+ size_t buf_size = min(count, sizeof(buf) - 1); \
+ \
+ if (copy_from_user(buf, user_buf, buf_size)) \
+ return -EFAULT; \
+ return iwl_dbgfs_##name##_write(arg, buf, buf_size, ppos); \
+} \
+#define _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen, argtype) \
+MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
+ .write = _iwl_dbgfs_##name##_write, \
+#define _MVM_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype) \
@@ -127,6 +127,7 @@ enum iwl_bt_coex_valid_bit_msk {
BT_VALID_ANT_ISOLATION_THRS = BIT(15),
BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16),
BT_VALID_TXRX_MAX_FREQ_0 = BIT(17),
+ BT_VALID_SYNC_TO_SCO = BIT(18),
@@ -85,6 +85,8 @@
* PBW Snoozing enabled
* @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask
* @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
+ * @POWER_FLAGS_AP_UAPSD_MISBEHAVING_ENA_MSK: AP/GO's uAPSD misbehaving
+ * detection enablement
enum iwl_power_flags {
POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
@@ -94,6 +96,7 @@ enum iwl_power_flags {
POWER_FLAGS_BT_SCO_ENA = BIT(8),
POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(9),
POWER_FLAGS_LPRX_ENA_MSK = BIT(11),
+ POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK = BIT(12),
#define IWL_POWER_VEC_SIZE 5
@@ -228,6 +231,19 @@ struct iwl_mac_power_cmd {
u8 reserved;
+ * struct iwl_uapsd_misbehaving_ap_notif - FW sends this notification when
+ * associated AP is identified as improperly implementing uAPSD protocol.
+ * PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78
+ * @sta_id: index of station in uCode's station table - associated AP ID in
+ * this context.
+struct iwl_uapsd_misbehaving_ap_notif {
+ __le32 sta_id;
+ u8 mac_id;
+ u8 reserved[3];
* struct iwl_beacon_filter_cmd
* REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
@@ -281,8 +281,31 @@ enum {
/* # entries in rate scale table to support Tx retries */
#define LQ_MAX_RETRY_NUM 16
-/* Link quality command flags, only this one is available */
-#define LQ_FLAG_SET_STA_TLC_RTS_MSK BIT(0)
+/* Link quality command flags bit fields */
+/* Bit 0: (0) Don't use RTS (1) Use RTS */
+#define LQ_FLAG_USE_RTS_POS 0
+#define LQ_FLAG_USE_RTS_MSK (1 << LQ_FLAG_USE_RTS_POS)
+/* Bit 1-3: LQ command color. Used to match responses to LQ commands */
+#define LQ_FLAG_COLOR_POS 1
+#define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS)
+/* Bit 4-5: Tx RTS BW Signalling
+ * (0) No RTS BW signalling
+ * (1) Static BW signalling
+ * (2) Dynamic BW signalling
+#define LQ_FLAG_RTS_BW_SIG_POS 4
+#define LQ_FLAG_RTS_BW_SIG_NONE (0 << LQ_FLAG_RTS_BW_SIG_POS)
+#define LQ_FLAG_RTS_BW_SIG_STATIC (1 << LQ_FLAG_RTS_BW_SIG_POS)
+#define LQ_FLAG_RTS_BW_SIG_DYNAMIC (2 << LQ_FLAG_RTS_BW_SIG_POS)
+/* Bit 6: (0) No dynamic BW selection (1) Allow dynamic BW selection
+ * Dyanmic BW selection allows Tx with narrower BW then requested in rates
+#define LQ_FLAG_DYNAMIC_BW_POS 6
+#define LQ_FLAG_DYNAMIC_BW_MSK (1 << LQ_FLAG_DYNAMIC_BW_POS)
* struct iwl_lq_cmd - link quality command
@@ -530,14 +530,13 @@ struct iwl_scan_offload_schedule {
* iwl_scan_offload_flags
- * IWL_SCAN_OFFLOAD_FLAG_FILTER_SSID: filter mode - upload every beacon or match
- * ssid list.
+ * IWL_SCAN_OFFLOAD_FLAG_PASS_ALL: pass all results - no filtering.
* IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan.
* IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN: use energy based scan before partial scan
* on A band.
enum iwl_scan_offload_flags {
- IWL_SCAN_OFFLOAD_FLAG_FILTER_SSID = BIT(0),
+ IWL_SCAN_OFFLOAD_FLAG_PASS_ALL = BIT(0),
IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL = BIT(2),
IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN = BIT(3),
@@ -138,7 +138,14 @@ enum iwl_sta_flags {
* enum iwl_sta_key_flag - key flags for the ADD_STA host command
- * @STA_KEY_FLG_EN_MSK: mask for encryption algorithm
+ * @STA_KEY_FLG_NO_ENC: no encryption
+ * @STA_KEY_FLG_WEP: WEP encryption algorithm
+ * @STA_KEY_FLG_CCM: CCMP encryption algorithm
+ * @STA_KEY_FLG_TKIP: TKIP encryption algorithm
+ * @STA_KEY_FLG_EXT: extended cipher algorithm (depends on the FW support)
+ * @STA_KEY_FLG_CMAC: CMAC encryption algorithm
+ * @STA_KEY_FLG_ENC_UNKNOWN: unknown encryption algorithm
+ * @STA_KEY_FLG_EN_MSK: mask for encryption algorithmi value
* @STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from
* station info array (1 - n 1X mode)
* @STA_KEY_FLG_KEYID_MSK: the index of the key
@@ -152,6 +159,7 @@ enum iwl_sta_key_flag {
STA_KEY_FLG_WEP = (1 << 0),
STA_KEY_FLG_CCM = (2 << 0),
STA_KEY_FLG_TKIP = (3 << 0),
+ STA_KEY_FLG_EXT = (4 << 0),
STA_KEY_FLG_CMAC = (6 << 0),
STA_KEY_FLG_ENC_UNKNOWN = (7 << 0),
STA_KEY_FLG_EN_MSK = (7 << 0),
@@ -132,6 +132,7 @@ enum iwl_tx_flags {
#define TX_CMD_SEC_WEP 0x01
#define TX_CMD_SEC_CCM 0x02
#define TX_CMD_SEC_TKIP 0x03
+#define TX_CMD_SEC_EXT 0x04
#define TX_CMD_SEC_MSK 0x07
#define TX_CMD_SEC_WEP_KEY_IDX_POS 6
#define TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0
@@ -141,6 +141,7 @@ enum {
/* Power - legacy power table command */
POWER_TABLE_CMD = 0x77,
+ PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
/* Thermal Throttling*/
REPLY_THERMAL_MNG_BACKOFF = 0x7e,
@@ -183,6 +184,7 @@ enum {
BT_PROFILE_NOTIFICATION = 0xce,
BT_COEX_CI = 0x5d,
+ REPLY_SF_CFG_CMD = 0xd1,
REPLY_BEACON_FILTERING_CMD = 0xd2,
REPLY_DEBUG_CMD = 0xf0,
@@ -1052,6 +1054,7 @@ enum iwl_mvm_rx_status {
RX_MPDU_RES_STATUS_SEC_WEP_ENC = (1 << 8),
RX_MPDU_RES_STATUS_SEC_CCM_ENC = (2 << 8),
RX_MPDU_RES_STATUS_SEC_TKIP_ENC = (3 << 8),
+ RX_MPDU_RES_STATUS_SEC_EXT_ENC = (4 << 8),
RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC = (6 << 8),
RX_MPDU_RES_STATUS_SEC_ENC_ERR = (7 << 8),
RX_MPDU_RES_STATUS_SEC_ENC_MSK = (7 << 8),
@@ -1131,6 +1134,7 @@ struct iwl_set_calib_default_cmd {
} __packed; /* PHY_CALIB_OVERRIDE_VALUES_S */
#define MAX_PORT_ID_NUM 2
+#define MAX_MCAST_FILTERING_ADDRESSES 256
* struct iwl_mcast_filter_cmd - configure multicast filter.
@@ -1363,4 +1367,65 @@ struct iwl_notif_statistics { /* STATISTICS_NTFY_API_S_VER_8 */
struct mvm_statistics_general general;
+/***********************************
+ * Smart Fifo API
+ ***********************************/
+/* Smart Fifo state */
+enum iwl_sf_state {
+ SF_LONG_DELAY_ON = 0, /* should never be called by driver */
+ SF_FULL_ON,
+ SF_UNINIT,
+ SF_INIT_OFF,
+ SF_HW_NUM_STATES
+/* Smart Fifo possible scenario */
+enum iwl_sf_scenario {
+ SF_SCENARIO_SINGLE_UNICAST,
+ SF_SCENARIO_AGG_UNICAST,
+ SF_SCENARIO_MULTICAST,
+ SF_SCENARIO_BA_RESP,
+ SF_SCENARIO_TX_RESP,
+ SF_NUM_SCENARIO
+#define SF_TRANSIENT_STATES_NUMBER 2 /* SF_LONG_DELAY_ON and SF_FULL_ON */
+#define SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */
+/* smart FIFO default values */
+#define SF_W_MARK_SISO 4096
+#define SF_W_MARK_MIMO2 8192
+#define SF_W_MARK_MIMO3 6144
+#define SF_W_MARK_LEGACY 4096
+#define SF_W_MARK_SCAN 4096
+/* SF Scenarios timers for FULL_ON state (aligned to 32 uSec) */
+#define SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */
+#define SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */
+#define SF_AGG_UNICAST_IDLE_TIMER 320 /* 300 uSec */
+#define SF_AGG_UNICAST_AGING_TIMER 2016 /* 2 mSec */
+#define SF_MCAST_IDLE_TIMER 2016 /* 2 mSec */
+#define SF_MCAST_AGING_TIMER 10016 /* 10 mSec */
+#define SF_BA_IDLE_TIMER 320 /* 300 uSec */
+#define SF_BA_AGING_TIMER 2016 /* 2 mSec */
+#define SF_TX_RE_IDLE_TIMER 320 /* 300 uSec */
+#define SF_TX_RE_AGING_TIMER 2016 /* 2 mSec */
+#define SF_LONG_DELAY_AGING_TIMER 1000000 /* 1 Sec */
+ * Smart Fifo configuration command.
+ * @state: smart fifo state, types listed in iwl_sf_sate.
+ * @watermark: Minimum allowed availabe free space in RXF for transient state.
+ * @long_delay_timeouts: aging and idle timer values for each scenario
+ * in long delay state.
+ * @full_on_timeouts: timer values for each scenario in full on state.
+struct iwl_sf_cfg_cmd {
+ enum iwl_sf_state state;
+ __le32 watermark[SF_TRANSIENT_STATES_NUMBER];
+ __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
+ __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
+} __packed; /* SF_CFG_API_S_VER_2 */
#endif /* __fw_api_h__ */
@@ -241,7 +241,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
lockdep_assert_held(&mvm->mutex);
- if (mvm->init_ucode_complete)
+ if (WARN_ON_ONCE(mvm->init_ucode_complete))
iwl_init_notification_wait(&mvm->notif_wait,
@@ -287,7 +287,8 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
IWL_DEBUG_RF_KILL(mvm,
"jump over all phy activities due to RF kill\n");
iwl_remove_notification(&mvm->notif_wait, &calib_wait);
+ ret = 1;
/* Send TX valid antennas before triggering calibrations */
@@ -319,9 +320,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
error:
- if (!iwlmvm_mod_params.init_dbg) {
- iwl_trans_stop_device(mvm->trans);
- } else if (!mvm->nvm_data) {
+ if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
/* we want to debug INIT and we have no NVM - fake */
mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
sizeof(struct ieee80211_channel) +
@@ -370,11 +369,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
ret = -ERFKILL;
goto error;
- /* should stop & start HW since that INIT image just loaded */
- iwl_trans_stop_hw(mvm->trans, false);
- ret = iwl_trans_start_hw(mvm->trans);
+ if (!iwlmvm_mod_params.init_dbg) {
+ * should stop and start HW since that INIT
+ * image just loaded
+ iwl_trans_stop_device(mvm->trans);
+ ret = iwl_trans_start_hw(mvm->trans);
if (iwlmvm_mod_params.init_dbg)
@@ -386,6 +390,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
+ ret = iwl_mvm_sf_update(mvm, NULL, false);
+ IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
ret = iwl_send_tx_ant_cfg(mvm, iwl_fw_valid_tx_ant(mvm->fw));
@@ -488,6 +488,40 @@ static void iwl_mvm_ack_rates(struct iwl_mvm *mvm,
*ofdm_rates = ofdm;
+static void iwl_mvm_mac_ctxt_set_ht_flags(struct iwl_mvm *mvm,
+ struct iwl_mac_ctx_cmd *cmd)
+ /* for both sta and ap, ht_operation_mode hold the protection_mode */
+ u8 protection_mode = vif->bss_conf.ht_operation_mode &
+ IEEE80211_HT_OP_MODE_PROTECTION;
+ /* The fw does not distinguish between ht and fat */
+ u32 ht_flag = MAC_PROT_FLG_HT_PROT | MAC_PROT_FLG_FAT_PROT;
+ IWL_DEBUG_RATE(mvm, "protection mode set to %d\n", protection_mode);
+ * See section 9.23.3.1 of IEEE 80211-2012.
+ * Nongreenfield HT STAs Present is not supported.
+ switch (protection_mode) {
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+ cmd->protection_flags |= cpu_to_le32(ht_flag);
+ case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+ /* Protect when channel wider than 20MHz */
+ if (vif->bss_conf.chandef.width > NL80211_CHAN_WIDTH_20)
+ IWL_ERR(mvm, "Illegal protection mode %d\n",
+ protection_mode);
static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
struct iwl_mac_ctx_cmd *cmd,
@@ -495,6 +529,8 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
struct ieee80211_chanctx_conf *chanctx;
+ bool ht_enabled = !!(vif->bss_conf.ht_operation_mode &
+ IEEE80211_HT_OP_MODE_PROTECTION);
u8 cck_ack_rates, ofdm_ack_rates;
@@ -573,16 +609,13 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
cmd->protection_flags |=
cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
- * I think that we should enable these 2 flags regardless the HT PROT
- * fields in the HT IE, but I am not sure. Someone knows whom to ask?...
- if (vif->bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
+ IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
+ vif->bss_conf.use_cts_prot,
+ vif->bss_conf.ht_operation_mode);
+ if (vif->bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN);
- cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_HT_PROT |
- MAC_PROT_FLG_FAT_PROT);
+ if (ht_enabled)
+ iwl_mvm_mac_ctxt_set_ht_flags(mvm, vif, cmd);
cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
@@ -256,10 +256,17 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
- NL80211_FEATURE_P2P_GO_OPPPS;
+ NL80211_FEATURE_P2P_GO_OPPPS |
+ NL80211_FEATURE_LOW_PRIORITY_SCAN;
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
+ /* currently FW API supports only one optional cipher scheme */
+ if (mvm->fw->cs && mvm->fw->cs->cipher) {
+ mvm->hw->n_cipher_schemes = 1;
+ mvm->hw->cipher_schemes = mvm->fw->cs;
if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
mvm->trans->ops->d3_suspend &&
@@ -398,7 +405,6 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
iwl_trans_stop_device(mvm->trans);
mvm->scan_status = IWL_MVM_SCAN_NONE;
@@ -470,7 +476,6 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
cancel_work_sync(&mvm->roc_done_wk);
iwl_mvm_async_handlers_purge(mvm);
/* async_handlers_list is empty and will stay empty: HW is stopped */
@@ -487,17 +492,6 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
cancel_work_sync(&mvm->async_handlers_wk);
-static void iwl_mvm_pm_disable_iterator(void *data, u8 *mac,
- struct ieee80211_vif *vif)
- struct iwl_mvm *mvm = data;
- ret = iwl_mvm_power_disable(mvm, vif);
- IWL_ERR(mvm, "failed to disable power management\n");
static void iwl_mvm_power_update_iterator(void *data, u8 *mac,
@@ -520,6 +514,20 @@ static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
+static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ s8 tx_power)
+ /* FW is in charge of regulatory enforcement */
+ struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = {
+ .mac_context_id = iwl_mvm_vif_from_mac80211(vif)->id,
+ .pwr_restriction = cpu_to_le16(tx_power),
+ return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, CMD_SYNC,
+ sizeof(reduce_txpwr_cmd),
+ &reduce_txpwr_cmd);
static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
@@ -540,26 +548,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
goto out_unlock;
- * TODO: remove this temporary code.
- * Currently MVM FW supports power management only on single MAC.
- * If new interface added, disable PM on existing interface.
- * P2P device is a special case, since it is handled by FW similary to
- * scan. If P2P deviced is added, PM remains enabled on existing
- * Note: the method below does not count the new interface being added
- * at this moment.
+ /* Counting number of interfaces is needed for legacy PM */
if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count++;
- if (mvm->vif_count > 1) {
- IWL_DEBUG_MAC80211(mvm,
- "Disable power on existing interfaces\n");
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_pm_disable_iterator, mvm);
* The AP binding flow can be done only after the beacon
@@ -590,11 +581,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
goto out_release;
- * Update power state on the new interface. Admittedly, based on
- * mac80211 logics this power update will disable power management
- iwl_mvm_power_update_mode(mvm, vif);
+ iwl_mvm_power_disable(mvm, vif);
/* beacon filtering */
ret = iwl_mvm_disable_beacon_filter(mvm, vif);
@@ -655,9 +642,12 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
out_release:
mvm->vif_count--;
+ /* TODO: remove this when legacy PM will be discarded */
ieee80211_iterate_active_interfaces(
iwl_mvm_power_update_iterator, mvm);
iwl_mvm_mac_ctxt_release(mvm, vif);
out_unlock:
@@ -743,21 +733,13 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
mvmvif->phy_ctxt = NULL;
- * Check if only one additional interface remains after removing
- * current one. Update power mode on the remaining interface.
if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
- IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n",
- mvm->vif_count);
- if (mvm->vif_count == 1) {
- ieee80211_iterate_active_interfaces(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_power_update_iterator, mvm);
+ ieee80211_iterate_active_interfaces(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_update_iterator, mvm);
iwl_mvm_mac_ctxt_remove(mvm, vif);
@@ -766,23 +748,91 @@ out_release:
-static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- s8 tx_power)
+static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
- /* FW is in charge of regulatory enforcement */
- struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = {
- .mac_context_id = iwl_mvm_vif_from_mac80211(vif)->id,
- .pwr_restriction = cpu_to_le16(tx_power),
+struct iwl_mvm_mc_iter_data {
+ struct iwl_mvm *mvm;
+ int port_id;
+static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
+ struct iwl_mvm_mc_iter_data *data = _data;
+ struct iwl_mvm *mvm = data->mvm;
+ struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
+ int ret, len;
+ /* if we don't have free ports, mcast frames will be dropped */
+ if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM))
+ if (vif->type != NL80211_IFTYPE_STATION ||
+ !vif->bss_conf.assoc)
+ cmd->port_id = data->port_id++;
+ memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
+ len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
+ ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd);
+ IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
+static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
+ struct iwl_mvm_mc_iter_data iter_data = {
+ .mvm = mvm,
- return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, CMD_SYNC,
- sizeof(reduce_txpwr_cmd),
- &reduce_txpwr_cmd);
+ lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
+ iwl_mvm_mc_iface_iterator, &iter_data);
-static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
+static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mcast_filter_cmd *cmd;
+ struct netdev_hw_addr *addr;
+ int addr_count = netdev_hw_addr_list_count(mc_list);
+ bool pass_all = false;
+ int len;
+ if (addr_count > MAX_MCAST_FILTERING_ADDRESSES) {
+ pass_all = true;
+ addr_count = 0;
+ len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
+ cmd = kzalloc(len, GFP_ATOMIC);
+ if (!cmd)
+ if (pass_all) {
+ cmd->pass_all = 1;
+ return (u64)(unsigned long)cmd;
+ netdev_hw_addr_list_for_each(addr, mc_list) {
+ IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n",
+ cmd->count, addr->addr);
+ memcpy(&cmd->addr_list[cmd->count * ETH_ALEN],
+ addr->addr, ETH_ALEN);
+ cmd->count++;
static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
@@ -790,21 +840,22 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
unsigned int *total_flags,
u64 multicast)
- *total_flags = 0;
+ struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
-static int iwl_mvm_configure_mcast_filter(struct iwl_mvm *mvm,
- struct iwl_mcast_filter_cmd mcast_filter_cmd = {
- .pass_all = 1,
- memcpy(mcast_filter_cmd.bssid, vif->bss_conf.bssid, ETH_ALEN);
+ /* replace previous configuration */
+ kfree(mvm->mcast_filter_cmd);
+ mvm->mcast_filter_cmd = cmd;
- return iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC,
- sizeof(mcast_filter_cmd),
- &mcast_filter_cmd);
+ iwl_mvm_recalc_multicast(mvm);
+out:
+ *total_flags = 0;
static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
@@ -827,7 +878,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_ERR(mvm, "failed to update quotas\n");
- iwl_mvm_configure_mcast_filter(mvm, vif);
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
&mvm->status)) {
@@ -849,7 +899,17 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
iwl_mvm_protect_session(mvm, vif, dur, dur,
5 * dur);
+ iwl_mvm_sf_update(mvm, vif, false);
+ iwl_mvm_power_vif_assoc(mvm, vif);
} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
+ * If update fails - SF might be running in associated
+ * mode while disassociated - which is forbidden.
+ WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false),
+ "Failed to update SF upon disassociation\n");
/* remove AP station now that the MAC is unassoc */
ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
@@ -861,6 +921,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
/* reset rssi values */
mvmvif->bf_data.ave_beacon_signal = 0;
@@ -881,7 +943,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
iwl_mvm_remove_time_event(mvm, mvmvif,
&mvmvif->time_event_data);
- } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_QOS)) {
+ } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
+ BSS_CHANGED_QOS)) {
ret = iwl_mvm_power_update_mode(mvm, vif);
IWL_ERR(mvm, "failed to update power mode\n");
@@ -990,6 +1053,22 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
struct ieee80211_bss_conf *bss_conf,
u32 changes)
+ enum ieee80211_bss_change ht_change = BSS_CHANGED_ERP_CTS_PROT |
+ BSS_CHANGED_HT |
+ BSS_CHANGED_BANDWIDTH;
+ /* Changes will be applied when the AP/IBSS is started */
+ if (!mvmvif->ap_ibss_active)
+ if (changes & ht_change) {
+ ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
+ IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
/* Need to send a new beacon template to the FW */
if (changes & BSS_CHANGED_BEACON) {
if (iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
@@ -1080,7 +1159,7 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
switch (cmd) {
case STA_NOTIFY_SLEEP:
@@ -1102,6 +1181,28 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
+static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta)
+ * This is called before mac80211 does RCU synchronisation,
+ * so here we already invalidate our internal RCU-protected
+ * station pointer. The rest of the code will thus no longer
+ * be able to find the station this way, and we don't rely
+ * on further RCU synchronisation after the sta_state()
+ * callback deleted the station.
+ if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
+ ERR_PTR(-ENOENT));
static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
@@ -1149,7 +1250,8 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
ret = iwl_mvm_update_sta(mvm, vif, sta);
if (ret == 0)
iwl_mvm_rs_rate_init(mvm, sta,
- mvmvif->phy_ctxt->channel->band);
+ mvmvif->phy_ctxt->channel->band,
+ true);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
/* enable beacon filtering */
@@ -1187,6 +1289,17 @@ static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u32 changed)
+ changed & IEEE80211_RC_NSS_CHANGED)
static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 ac,
const struct ieee80211_tx_queue_params *params)
@@ -1309,7 +1422,12 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
+ /* currently FW supports only one optional cipher scheme */
+ if (hw->n_cipher_schemes &&
+ hw->cipher_schemes->cipher == key->cipher)
+ key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
@@ -1515,7 +1633,7 @@ static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
- ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def,
+ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
ctx->rx_chains_static,
ctx->rx_chains_dynamic);
if (ret) {
@@ -1559,7 +1677,7 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
- iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def,
+ iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
iwl_mvm_bt_coex_vif_change(mvm);
@@ -1602,7 +1720,13 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
- * Setting the quota at this stage is only required for monitor
+ * Power state must be updated before quotas,
+ * otherwise fw will complain.
+ mvm->bound_vif_cnt++;
+ iwl_mvm_power_update_binding(mvm, vif, true);
+ /* Setting the quota at this stage is only required for monitor
* interfaces. For the other types, the bss_info changed flow
* will handle quota settings.
@@ -1617,6 +1741,8 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
out_remove_binding:
iwl_mvm_binding_remove_vif(mvm, vif);
+ mvm->bound_vif_cnt--;
+ iwl_mvm_power_update_binding(mvm, vif, false);
@@ -1650,6 +1776,9 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
@@ -1744,14 +1873,17 @@ struct ieee80211_ops iwl_mvm_hw_ops = {
.add_interface = iwl_mvm_mac_add_interface,
.remove_interface = iwl_mvm_mac_remove_interface,
.config = iwl_mvm_mac_config,
+ .prepare_multicast = iwl_mvm_prepare_multicast,
.configure_filter = iwl_mvm_configure_filter,
.bss_info_changed = iwl_mvm_bss_info_changed,
.hw_scan = iwl_mvm_mac_hw_scan,
.cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
+ .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove,
.sta_state = iwl_mvm_mac_sta_state,
.sta_notify = iwl_mvm_mac_sta_notify,
.allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
.set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
+ .sta_rc_update = iwl_mvm_sta_rc_update,
.conf_tx = iwl_mvm_mac_conf_tx,
.mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
.sched_scan_start = iwl_mvm_mac_sched_scan_start,
@@ -163,6 +163,8 @@ struct iwl_mvm_power_ops {
struct ieee80211_vif *vif);
int (*power_update_device_mode)(struct iwl_mvm *mvm);
int (*power_disable)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+ void (*power_update_binding)(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, bool assign);
#ifdef CONFIG_IWLWIFI_DEBUGFS
int (*power_dbgfs_read)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
char *buf, int bufsz);
@@ -181,6 +183,7 @@ enum iwl_dbgfs_pm_mask {
MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
+ MVM_DEBUGFS_PM_UAPSD_MISBEHAVING = BIT(9),
struct iwl_dbgfs_pm {
@@ -193,6 +196,7 @@ struct iwl_dbgfs_pm {
bool lprx_ena;
u32 lprx_rssi_threshold;
bool snooze_ena;
+ bool uapsd_misbehaving;
int mask;
@@ -269,8 +273,8 @@ struct iwl_mvm_vif_bf_data {
* @bcast_sta: station used for broadcast packets. Used by the following
* vifs: P2P_DEVICE, GO and AP.
* @beacon_skb: the skb used to hold the AP/GO beacon template
- * @smps_requests: the requests of of differents parts of the driver, regard
- the desired smps mode.
+ * @smps_requests: the SMPS requests of differents parts of the driver,
+ * combined on update to yield the overall request to mac80211.
struct iwl_mvm_vif {
u16 id;
@@ -323,14 +327,19 @@ struct iwl_mvm_vif {
struct dentry *dbgfs_dir;
struct dentry *dbgfs_slink;
- void *dbgfs_data;
struct iwl_dbgfs_pm dbgfs_pm;
struct iwl_dbgfs_bf dbgfs_bf;
enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ];
+ /* FW identified misbehaving AP */
+ u8 uapsd_misbehaving_bssid[ETH_ALEN];
+ bool pm_prevented;
static inline struct iwl_mvm_vif *
@@ -479,6 +488,7 @@ struct iwl_mvm {
/* Scan status, cmd (pre-allocated) and auxiliary station */
enum iwl_scan_status scan_status;
struct iwl_scan_cmd *scan_cmd;
+ struct iwl_mcast_filter_cmd *mcast_filter_cmd;
/* rx chain antennas set through debugfs for the scan command */
@@ -489,11 +499,19 @@ struct iwl_mvm {
u8 scan_last_antenna_idx; /* to toggle TX between antennas */
u8 mgmt_last_antenna_idx;
+ /* last smart fifo state that was successfully sent to firmware */
+ enum iwl_sf_state sf_state;
struct dentry *debugfs_dir;
u32 dbgfs_sram_offset, dbgfs_sram_len;
bool disable_power_off;
bool disable_power_off_d3;
+ struct debugfs_blob_wrapper nvm_hw_blob;
+ struct debugfs_blob_wrapper nvm_sw_blob;
+ struct debugfs_blob_wrapper nvm_calib_blob;
+ struct debugfs_blob_wrapper nvm_prod_blob;
struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
@@ -507,12 +525,6 @@ struct iwl_mvm {
unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
- * This counter of created interfaces is referenced only in conjunction
- * with FW limitation related to power management. Currently PM is
- * supported only on a single interface.
- * IMPORTANT: this variable counts all interfaces except P2P device.
u8 vif_count;
/* -1 for always, 0 for never, >0 for that many times */
@@ -531,6 +543,7 @@ struct iwl_mvm {
bool store_d3_resume_sram;
void *d3_resume_sram;
u32 d3_test_pme_ptr;
+ struct ieee80211_vif *keep_vif;
@@ -554,6 +567,11 @@ struct iwl_mvm {
u8 aux_queue;
u8 first_agg_queue;
u8 last_agg_queue;
+ u8 bound_vif_cnt;
+ /* Indicate if device power save is allowed */
+ bool ps_prevented;
/* Extract MVM priv from op_mode and _hw */
@@ -750,8 +768,7 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
#endif /* CONFIG_IWLWIFI_DEBUGFS */
/* rate scaling */
-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
- u8 flags, bool init);
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
/* power managment */
static inline int iwl_mvm_power_update_mode(struct iwl_mvm *mvm,
@@ -773,6 +790,19 @@ static inline int iwl_mvm_power_update_device_mode(struct iwl_mvm *mvm)
+static inline void iwl_mvm_power_update_binding(struct iwl_mvm *mvm,
+ bool assign)
+ if (mvm->pm_ops->power_update_binding)
+ mvm->pm_ops->power_update_binding(mvm, vif, assign);
+void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
static inline int iwl_mvm_power_dbgfs_read(struct iwl_mvm *mvm,
@@ -864,4 +894,8 @@ void iwl_mvm_tt_initialize(struct iwl_mvm *mvm);
void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
+/* smart fifo */
+int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool added_vif);
#endif /* __IWL_MVM_H__ */
@@ -443,6 +443,29 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
mvm->nvm_sections[section].data = temp;
mvm->nvm_sections[section].length = ret;
+ switch (section) {
+ case NVM_SECTION_TYPE_HW:
+ mvm->nvm_hw_blob.data = temp;
+ mvm->nvm_hw_blob.size = ret;
+ case NVM_SECTION_TYPE_SW:
+ mvm->nvm_sw_blob.data = temp;
+ mvm->nvm_sw_blob.size = ret;
+ case NVM_SECTION_TYPE_CALIBRATION:
+ mvm->nvm_calib_blob.data = temp;
+ mvm->nvm_calib_blob.size = ret;
+ case NVM_SECTION_TYPE_PRODUCTION:
+ mvm->nvm_prod_blob.data = temp;
+ mvm->nvm_prod_blob.size = ret;
+ WARN(1, "section: %d", section);
kfree(nvm_buffer);
@@ -236,6 +236,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
false),
RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false),
+ RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
+ iwl_mvm_power_uapsd_misbehaving_ap_notif, false),
#undef RX_HANDLER
#define CMD(x) [x] = #x
@@ -311,6 +313,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(REPLY_THERMAL_MNG_BACKOFF),
CMD(MAC_PM_POWER_TABLE),
CMD(BT_COEX_CI),
+ CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
#undef CMD
@@ -341,7 +344,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
op_mode = hw->priv;
op_mode->ops = &iwl_mvm_ops;
- op_mode->trans = trans;
mvm = IWL_OP_MODE_GET_MVM(op_mode);
mvm->dev = trans->dev;
@@ -359,6 +361,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->aux_queue = 11;
mvm->first_agg_queue = 12;
+ mvm->sf_state = SF_UNINIT;
mutex_init(&mvm->mutex);
spin_lock_init(&mvm->async_handlers_lock);
@@ -424,7 +427,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
* there is no need to unnecessarily power up the NIC at driver load
if (iwlwifi_mod_params.nvm_file) {
- iwl_nvm_init(mvm);
+ err = iwl_nvm_init(mvm);
+ goto out_free;
err = iwl_trans_start_hw(mvm->trans);
@@ -432,16 +437,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
err = iwl_run_init_mvm_ucode(mvm, true);
+ iwl_trans_stop_device(trans);
/* returns 0 if successful, 1 if success but in rfkill */
if (err < 0 && !iwlmvm_mod_params.init_dbg) {
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
goto out_free;
- /* Stop the hw after the ALIVE and NVM has been read */
- if (!iwlmvm_mod_params.init_dbg)
scan_size = sizeof(struct iwl_scan_cmd) +
@@ -474,7 +476,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_phy_db_free(mvm->phy_db);
kfree(mvm->scan_cmd);
if (!iwlwifi_mod_params.nvm_file)
- iwl_trans_stop_hw(trans, true);
+ iwl_trans_op_mode_leave(trans);
ieee80211_free_hw(mvm->hw);
@@ -491,12 +493,14 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
ieee80211_unregister_hw(mvm->hw);
+ mvm->mcast_filter_cmd = NULL;
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
kfree(mvm->d3_resume_sram);
- iwl_trans_stop_hw(mvm->trans, true);
+ iwl_trans_op_mode_leave(mvm->trans);
mvm->phy_db = NULL;
@@ -186,6 +186,92 @@ static void iwl_mvm_power_log(struct iwl_mvm *mvm,
+static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
+ struct iwl_mac_power_cmd *cmd)
+ enum ieee80211_ac_numbers ac;
+ bool tid_found = false;
+ for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) {
+ if (!mvmvif->queue_params[ac].uapsd)
+ if (mvm->cur_ucode != IWL_UCODE_WOWLAN)
+ cmd->flags |=
+ cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+ cmd->uapsd_ac_flags |= BIT(ac);
+ /* QNDP TID - the highest TID with no admission control */
+ if (!tid_found && !mvmvif->queue_params[ac].acm) {
+ tid_found = true;
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ cmd->qndp_tid = 6;
+ case IEEE80211_AC_VI:
+ cmd->qndp_tid = 5;
+ case IEEE80211_AC_BE:
+ cmd->qndp_tid = 0;
+ case IEEE80211_AC_BK:
+ cmd->qndp_tid = 1;
+ if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)))
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK);
+ if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
+ BIT(IEEE80211_AC_VI) |
+ BIT(IEEE80211_AC_BE) |
+ BIT(IEEE80211_AC_BK))) {
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
+ cmd->snooze_interval = cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
+ cmd->snooze_window = (mvm->cur_ucode == IWL_UCODE_WOWLAN) ?
+ cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) :
+ cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
+ cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
+ if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags &
+ cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+ cmd->rx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
+ cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
+ cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+ cmd->heavy_tx_thld_packets =
+ IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS;
+ cmd->heavy_rx_thld_packets =
+ IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS;
+ IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
+ IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
+ cmd->heavy_tx_thld_percentage =
+ IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
+ cmd->heavy_rx_thld_percentage =
+ IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
struct iwl_mac_power_cmd *cmd)
@@ -198,8 +284,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
bool radar_detect = false;
struct iwl_mvm_vif *mvmvif __maybe_unused =
iwl_mvm_vif_from_mac80211(vif);
- enum ieee80211_ac_numbers ac;
- bool tid_found = false;
+ bool allow_uapsd = true;
cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color));
@@ -217,7 +302,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
- if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM ||
+ mvm->ps_prevented)
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
@@ -227,7 +313,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
mvmvif->dbgfs_pm.disable_power_off)
cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
- if (!vif->bss_conf.ps)
+ if (!vif->bss_conf.ps || mvmvif->pm_prevented)
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -269,81 +355,24 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
- for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) {
- if (!mvmvif->queue_params[ac].uapsd)
- continue;
- if (mvm->cur_ucode != IWL_UCODE_WOWLAN)
- cmd->flags |=
- cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
- cmd->uapsd_ac_flags |= BIT(ac);
+ if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
+ ETH_ALEN))
+ allow_uapsd = false;
- /* QNDP TID - the highest TID with no admission control */
- if (!tid_found && !mvmvif->queue_params[ac].acm) {
- tid_found = true;
- switch (ac) {
- case IEEE80211_AC_VO:
- cmd->qndp_tid = 6;
- case IEEE80211_AC_VI:
- cmd->qndp_tid = 5;
- case IEEE80211_AC_BE:
- cmd->qndp_tid = 0;
- case IEEE80211_AC_BK:
- cmd->qndp_tid = 1;
- if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
- if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
- BIT(IEEE80211_AC_VI) |
- BIT(IEEE80211_AC_BE) |
- BIT(IEEE80211_AC_BK))) {
- cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
- cmd->snooze_interval =
- cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
- cmd->snooze_window =
- (mvm->cur_ucode == IWL_UCODE_WOWLAN) ?
- cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) :
- cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
- cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
- if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags &
- cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
- cmd->rx_data_timeout_uapsd =
- cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
- cmd->tx_data_timeout_uapsd =
- cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
- cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
- cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
+ if (vif->p2p &&
+ !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
+ * Avoid using uAPSD if P2P client is associated to GO that uses
+ * opportunistic power save. This is due to current FW limitation.
+ vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
+ IEEE80211_P2P_OPPPS_ENABLE_BIT)
- if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
- cmd->heavy_tx_thld_packets =
- IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS;
- cmd->heavy_rx_thld_packets =
- IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS;
- IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
- IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
- cmd->heavy_tx_thld_percentage =
- IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
- cmd->heavy_rx_thld_percentage =
- IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
+ if (allow_uapsd)
+ iwl_mvm_power_configure_uapsd(mvm, vif, cmd);
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
@@ -381,6 +410,13 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cmd->flags &=
cpu_to_le16(~POWER_FLAGS_SNOOZE_ENA_MSK);
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_UAPSD_MISBEHAVING) {
+ u16 flag = POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK;
+ if (mvmvif->dbgfs_pm.uapsd_misbehaving)
+ cmd->flags |= cpu_to_le16(flag);
+ cmd->flags &= cpu_to_le16(flag);
@@ -391,18 +427,11 @@ static int iwl_mvm_power_mac_update_mode(struct iwl_mvm *mvm,
bool ba_enable;
struct iwl_mac_power_cmd cmd = {};
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ if (vif->type != NL80211_IFTYPE_STATION)
- * TODO: The following vif_count verification is temporary condition.
- * Avoid power mode update if more than one interface is currently
- * active. Remove this condition when FW will support power management
- * on multiple MACs.
- IWL_DEBUG_POWER(mvm, "Currently %d interfaces active\n",
- if (mvm->vif_count > 1)
+ !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS))
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
@@ -446,7 +475,7 @@ static int iwl_mvm_power_mac_disable(struct iwl_mvm *mvm,
sizeof(cmd), &cmd);
-static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
+static int _iwl_mvm_power_update_device(struct iwl_mvm *mvm, bool force_disable)
struct iwl_device_power_cmd cmd = {
.flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
@@ -455,7 +484,8 @@ static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
+ force_disable)
cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_CAM_MSK);
@@ -472,6 +502,78 @@ static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
&cmd);
+static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
+ return _iwl_mvm_power_update_device(mvm, false);
+void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+ if (memcmp(vif->bss_conf.bssid, mvmvif->uapsd_misbehaving_bssid,
+ memset(mvmvif->uapsd_misbehaving_bssid, 0, ETH_ALEN);
+static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac,
+ u8 *ap_sta_id = _data;
+ /* The ap_sta_id is not expected to change during current association
+ * so no explicit protection is needed
+ if (mvmvif->ap_sta_id == *ap_sta_id)
+ memcpy(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
+ ETH_ALEN);
+ struct iwl_device_cmd *cmd)
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data;
+ u8 ap_sta_id = le32_to_cpu(notif->sta_id);
+ ieee80211_iterate_active_interfaces_atomic(
+ iwl_mvm_power_uapsd_misbehav_ap_iterator, &ap_sta_id);
+static void iwl_mvm_power_binding_iterator(void *_data, u8 *mac,
+ struct iwl_mvm *mvm = _data;
+ mvmvif->pm_prevented = (mvm->bound_vif_cnt <= 1) ? false : true;
+ ret = iwl_mvm_power_mac_update_mode(mvm, vif);
+ WARN_ONCE(ret, "Failed to update power parameters on a specific vif\n");
+static void _iwl_mvm_power_update_binding(struct iwl_mvm *mvm,
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ int ret = _iwl_mvm_power_update_device(mvm, assign);
+ mvm->ps_prevented = assign;
+ WARN_ONCE(ret, "Failed to update power device state\n");
+ ieee80211_iterate_active_interfaces(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_binding_iterator,
+ mvm);
static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, char *buf,
@@ -494,70 +596,58 @@ static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
le16_to_cpu(cmd.keep_alive_seconds));
- if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
- pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
- (cmd.flags &
- cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
- 1 : 0);
- pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
- cmd.skip_dtim_periods);
- if (!(cmd.flags &
- cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
- "rx_data_timeout = %d\n",
- le32_to_cpu(cmd.rx_data_timeout));
- "tx_data_timeout = %d\n",
- le32_to_cpu(cmd.tx_data_timeout));
- if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
- "lprx_rssi_threshold = %d\n",
- cmd.lprx_rssi_threshold);
- if (cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
- pos +=
- scnprintf(buf+pos, bufsz-pos,
- "rx_data_timeout_uapsd = %d\n",
- le32_to_cpu(cmd.rx_data_timeout_uapsd));
- "tx_data_timeout_uapsd = %d\n",
- le32_to_cpu(cmd.tx_data_timeout_uapsd));
- pos += scnprintf(buf+pos, bufsz-pos, "qndp_tid = %d\n",
- cmd.qndp_tid);
- "uapsd_ac_flags = 0x%x\n",
- cmd.uapsd_ac_flags);
- "uapsd_max_sp = %d\n",
- cmd.uapsd_max_sp);
- "heavy_tx_thld_packets = %d\n",
- cmd.heavy_tx_thld_packets);
- "heavy_rx_thld_packets = %d\n",
- cmd.heavy_rx_thld_packets);
- "heavy_tx_thld_percentage = %d\n",
- cmd.heavy_tx_thld_percentage);
- "heavy_rx_thld_percentage = %d\n",
- cmd.heavy_rx_thld_percentage);
- scnprintf(buf+pos, bufsz-pos, "snooze_enable = %d\n",
- cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) ?
- if (cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
- "snooze_interval = %d\n",
- cmd.snooze_interval);
- "snooze_window = %d\n",
- cmd.snooze_window);
+ if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)))
+ return pos;
+ pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
+ (cmd.flags &
+ cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ? 1 : 0);
+ pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
+ cmd.skip_dtim_periods);
+ if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
+ pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
+ le32_to_cpu(cmd.rx_data_timeout));
+ pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
+ le32_to_cpu(cmd.tx_data_timeout));
+ if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
+ "lprx_rssi_threshold = %d\n",
+ cmd.lprx_rssi_threshold);
+ if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)))
+ pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout_uapsd = %d\n",
+ le32_to_cpu(cmd.rx_data_timeout_uapsd));
+ pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout_uapsd = %d\n",
+ le32_to_cpu(cmd.tx_data_timeout_uapsd));
+ pos += scnprintf(buf+pos, bufsz-pos, "qndp_tid = %d\n", cmd.qndp_tid);
+ pos += scnprintf(buf+pos, bufsz-pos, "uapsd_ac_flags = 0x%x\n",
+ cmd.uapsd_ac_flags);
+ pos += scnprintf(buf+pos, bufsz-pos, "uapsd_max_sp = %d\n",
+ cmd.uapsd_max_sp);
+ pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_packets = %d\n",
+ cmd.heavy_tx_thld_packets);
+ pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_packets = %d\n",
+ cmd.heavy_rx_thld_packets);
+ pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_percentage = %d\n",
+ cmd.heavy_tx_thld_percentage);
+ pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_percentage = %d\n",
+ cmd.heavy_rx_thld_percentage);
+ pos += scnprintf(buf+pos, bufsz-pos, "uapsd_misbehaving_enable = %d\n",
+ cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK)) ?
+ 1 : 0);
+ if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)))
+ pos += scnprintf(buf+pos, bufsz-pos, "snooze_interval = %d\n",
+ cmd.snooze_interval);
+ pos += scnprintf(buf+pos, bufsz-pos, "snooze_window = %d\n",
+ cmd.snooze_window);
return pos;
@@ -654,6 +744,7 @@ const struct iwl_mvm_power_ops pm_mac_ops = {
.power_update_mode = iwl_mvm_power_mac_update_mode,
.power_update_device_mode = iwl_mvm_power_update_device,
.power_disable = iwl_mvm_power_mac_disable,
+ .power_update_binding = _iwl_mvm_power_update_binding,
.power_dbgfs_read = iwl_mvm_power_mac_dbgfs_read,
@@ -217,8 +217,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
cmd.quotas[idx].quota =
cpu_to_le32(quota * data.n_interfaces[i]);
- cmd.quotas[idx].max_duration =
- cpu_to_le32(IWL_MVM_MAX_QUOTA);
+ cmd.quotas[idx].max_duration = cpu_to_le32(0);
idx++;
@@ -42,33 +42,37 @@
#define RS_NAME "iwl-mvm-rs"
-#define NUM_TRY_BEFORE_ANT_TOGGLE 1
-#define IWL_NUMBER_TRY 1
-#define IWL_HT_NUMBER_TRY 3
+#define NUM_TRY_BEFORE_ANT_TOGGLE 1
+#define RS_LEGACY_RETRIES_PER_RATE 1
+#define RS_HT_VHT_RETRIES_PER_RATE 2
+#define RS_HT_VHT_RETRIES_PER_RATE_TW 1
+#define RS_INITIAL_MIMO_NUM_RATES 3
+#define RS_INITIAL_SISO_NUM_RATES 3
+#define RS_INITIAL_LEGACY_NUM_RATES LINK_QUAL_MAX_RETRY_NUM
+#define RS_SECONDARY_LEGACY_NUM_RATES LINK_QUAL_MAX_RETRY_NUM
+#define RS_SECONDARY_SISO_NUM_RATES 3
+#define RS_SECONDARY_SISO_RETRIES 1
#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
-#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
+#define IWL_RATE_MIN_FAILURE_TH 3 /* min failures to calc tpt */
#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
/* max allowed rate miss before sync LQ cmd */
#define IWL_MISSED_RATE_MAX 15
-/* max time to accum history 2 seconds */
-#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
+#define RS_STAY_IN_COLUMN_TIMEOUT (5*HZ)
static u8 rs_ht_to_legacy[] = {
- [IWL_RATE_1M_INDEX] = IWL_RATE_6M_INDEX,
- [IWL_RATE_2M_INDEX] = IWL_RATE_6M_INDEX,
- [IWL_RATE_5M_INDEX] = IWL_RATE_6M_INDEX,
- [IWL_RATE_11M_INDEX] = IWL_RATE_6M_INDEX,
- [IWL_RATE_6M_INDEX] = IWL_RATE_6M_INDEX,
- [IWL_RATE_9M_INDEX] = IWL_RATE_6M_INDEX,
- [IWL_RATE_12M_INDEX] = IWL_RATE_9M_INDEX,
- [IWL_RATE_18M_INDEX] = IWL_RATE_12M_INDEX,
- [IWL_RATE_24M_INDEX] = IWL_RATE_18M_INDEX,
- [IWL_RATE_36M_INDEX] = IWL_RATE_24M_INDEX,
- [IWL_RATE_48M_INDEX] = IWL_RATE_36M_INDEX,
- [IWL_RATE_54M_INDEX] = IWL_RATE_48M_INDEX,
- [IWL_RATE_60M_INDEX] = IWL_RATE_54M_INDEX,
+ [IWL_RATE_MCS_0_INDEX] = IWL_RATE_6M_INDEX,
+ [IWL_RATE_MCS_1_INDEX] = IWL_RATE_9M_INDEX,
+ [IWL_RATE_MCS_2_INDEX] = IWL_RATE_12M_INDEX,
+ [IWL_RATE_MCS_3_INDEX] = IWL_RATE_18M_INDEX,
+ [IWL_RATE_MCS_4_INDEX] = IWL_RATE_24M_INDEX,
+ [IWL_RATE_MCS_5_INDEX] = IWL_RATE_36M_INDEX,
+ [IWL_RATE_MCS_6_INDEX] = IWL_RATE_48M_INDEX,
+ [IWL_RATE_MCS_7_INDEX] = IWL_RATE_54M_INDEX,
+ [IWL_RATE_MCS_8_INDEX] = IWL_RATE_54M_INDEX,
+ [IWL_RATE_MCS_9_INDEX] = IWL_RATE_54M_INDEX,
static const u8 ant_toggle_lookup[] = {
@@ -126,6 +130,196 @@ static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = {
IWL_DECLARE_MCS_RATE(9), /* MCS 9 */
+enum rs_action {
+ RS_ACTION_STAY = 0,
+ RS_ACTION_DOWNSCALE = -1,
+ RS_ACTION_UPSCALE = 1,
+enum rs_column_mode {
+ RS_INVALID = 0,
+ RS_LEGACY,
+ RS_SISO,
+ RS_MIMO2,
+#define MAX_NEXT_COLUMNS 5
+#define MAX_COLUMN_CHECKS 3
+typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl);
+struct rs_tx_column {
+ enum rs_column_mode mode;
+ u8 ant;
+ bool sgi;
+ enum rs_column next_columns[MAX_NEXT_COLUMNS];
+ allow_column_func_t checks[MAX_COLUMN_CHECKS];
+static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl)
+ if (!sta->ht_cap.ht_supported)
+ if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) < 2)
+ if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
+static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ struct rs_rate *rate = &tbl->rate;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ if (is_ht20(rate) && (ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_20))
+ if (is_ht40(rate) && (ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_40))
+ if (is_ht80(rate) && (vht_cap->cap &
+ IEEE80211_VHT_CAP_SHORT_GI_80))
+static const struct rs_tx_column rs_tx_columns[] = {
+ [RS_COLUMN_LEGACY_ANT_A] = {
+ .mode = RS_LEGACY,
+ .ant = ANT_A,
+ .next_columns = {
+ RS_COLUMN_LEGACY_ANT_B,
+ RS_COLUMN_SISO_ANT_A,
+ RS_COLUMN_MIMO2,
+ RS_COLUMN_INVALID,
+ [RS_COLUMN_LEGACY_ANT_B] = {
+ .ant = ANT_B,
+ RS_COLUMN_LEGACY_ANT_A,
+ RS_COLUMN_SISO_ANT_B,
+ [RS_COLUMN_SISO_ANT_A] = {
+ .mode = RS_SISO,
+ RS_COLUMN_SISO_ANT_A_SGI,
+ .checks = {
+ rs_siso_allow,
+ [RS_COLUMN_SISO_ANT_B] = {
+ RS_COLUMN_SISO_ANT_B_SGI,
+ [RS_COLUMN_SISO_ANT_A_SGI] = {
+ .sgi = true,
+ RS_COLUMN_MIMO2_SGI,
+ rs_sgi_allow,
+ [RS_COLUMN_SISO_ANT_B_SGI] = {
+ [RS_COLUMN_MIMO2] = {
+ .mode = RS_MIMO2,
+ .ant = ANT_AB,
+ rs_mimo_allow,
+ [RS_COLUMN_MIMO2_SGI] = {
static inline u8 rs_extract_rate(u32 rate_n_flags)
/* also works for HT because bits 7:6 are zero there */
@@ -170,21 +364,12 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
struct sk_buff *skb,
struct iwl_lq_sta *lq_sta);
-static void rs_fill_link_cmd(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta,
- struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
+static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ const struct rs_rate *initial_rate);
static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
-#ifdef CONFIG_MAC80211_DEBUGFS
-static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
- u32 *rate_n_flags);
- u32 *rate_n_flags)
-{}
* The following tables contain the expected throughput metrics for all rates
@@ -264,6 +449,52 @@ static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
#define MCS_INDEX_PER_STREAM (8)
+static const char *rs_pretty_ant(u8 ant)
+ static const char * const ant_name[] = {
+ [ANT_NONE] = "None",
+ [ANT_A] = "A",
+ [ANT_B] = "B",
+ [ANT_AB] = "AB",
+ [ANT_C] = "C",
+ [ANT_AC] = "AC",
+ [ANT_BC] = "BC",
+ [ANT_ABC] = "ABC",
+ if (ant > ANT_ABC)
+ return "UNKNOWN";
+ return ant_name[ant];
+static const char *rs_pretty_lq_type(enum iwl_table_type type)
+ static const char * const lq_types[] = {
+ [LQ_NONE] = "NONE",
+ [LQ_LEGACY_A] = "LEGACY_A",
+ [LQ_LEGACY_G] = "LEGACY_G",
+ [LQ_HT_SISO] = "HT SISO",
+ [LQ_HT_MIMO2] = "HT MIMO",
+ [LQ_VHT_SISO] = "VHT SISO",
+ [LQ_VHT_MIMO2] = "VHT MIMO",
+ if (type < LQ_NONE || type >= LQ_MAX)
+ return lq_types[type];
+static inline void rs_dump_rate(struct iwl_mvm *mvm, const struct rs_rate *rate,
+ const char *prefix)
+ IWL_DEBUG_RATE(mvm, "%s: (%s: %d) ANT: %s BW: %d SGI: %d\n",
+ prefix, rs_pretty_lq_type(rate->type),
+ rate->index, rs_pretty_ant(rate->ant),
+ rate->bw, rate->sgi);
static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
window->data = 0;
@@ -271,7 +502,6 @@ static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
window->success_ratio = IWL_INVALID_VALUE;
window->counter = 0;
window->average_tpt = IWL_INVALID_VALUE;
- window->stamp = 0;
static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
@@ -279,30 +509,6 @@ static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
return (ant_type & valid_antenna) == ant_type;
- * Program the device to use fixed rate for frame transmit
- * This is for debugging/testing only
- * once the device start use fixed rate, we need to reload the module
- * to being back the normal operation.
-static void rs_program_fix_rate(struct iwl_mvm *mvm,
- struct iwl_lq_sta *lq_sta)
- lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
- lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
- lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
- IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n",
- lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
- if (lq_sta->dbg_fixed_rate) {
- rs_fill_link_cmd(NULL, NULL, lq_sta, lq_sta->dbg_fixed_rate);
- iwl_mvm_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false);
static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_data, u8 tid,
@@ -428,192 +634,170 @@ static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
- /* Tag this window as having been updated */
- window->stamp = jiffies;
- * Fill uCode API rate_n_flags field, based on "search" or "active" table.
-/* FIXME:RS:remove this function and put the flags statically in the table */
-static u32 rate_n_flags_from_tbl(struct iwl_mvm *mvm,
- struct iwl_scale_tbl_info *tbl, int index)
+/* Convert rs_rate object into ucode rate bitmask */
+static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
+ struct rs_rate *rate)
- u32 rate_n_flags = 0;
+ u32 ucode_rate = 0;
+ int index = rate->index;
- rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
+ ucode_rate |= ((rate->ant << RATE_MCS_ANT_POS) &
RATE_MCS_ANT_ABC_MSK);
- if (is_legacy(tbl->lq_type)) {
- rate_n_flags |= iwl_rates[index].plcp;
+ if (is_legacy(rate)) {
+ ucode_rate |= iwl_rates[index].plcp;
if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
- rate_n_flags |= RATE_MCS_CCK_MSK;
- return rate_n_flags;
+ ucode_rate |= RATE_MCS_CCK_MSK;
+ return ucode_rate;
- if (is_ht(tbl->lq_type)) {
+ if (is_ht(rate)) {
if (index < IWL_FIRST_HT_RATE || index > IWL_LAST_HT_RATE) {
IWL_ERR(mvm, "Invalid HT rate index %d\n", index);
index = IWL_LAST_HT_RATE;
- rate_n_flags |= RATE_MCS_HT_MSK;
+ ucode_rate |= RATE_MCS_HT_MSK;
- if (is_ht_siso(tbl->lq_type))
- rate_n_flags |= iwl_rates[index].plcp_ht_siso;
- else if (is_ht_mimo2(tbl->lq_type))
- rate_n_flags |= iwl_rates[index].plcp_ht_mimo2;
+ if (is_ht_siso(rate))
+ ucode_rate |= iwl_rates[index].plcp_ht_siso;
+ else if (is_ht_mimo2(rate))
+ ucode_rate |= iwl_rates[index].plcp_ht_mimo2;
WARN_ON_ONCE(1);
- } else if (is_vht(tbl->lq_type)) {
+ } else if (is_vht(rate)) {
if (index < IWL_FIRST_VHT_RATE || index > IWL_LAST_VHT_RATE) {
IWL_ERR(mvm, "Invalid VHT rate index %d\n", index);
index = IWL_LAST_VHT_RATE;
- rate_n_flags |= RATE_MCS_VHT_MSK;
- if (is_vht_siso(tbl->lq_type))
- rate_n_flags |= iwl_rates[index].plcp_vht_siso;
- else if (is_vht_mimo2(tbl->lq_type))
- rate_n_flags |= iwl_rates[index].plcp_vht_mimo2;
+ ucode_rate |= RATE_MCS_VHT_MSK;
+ if (is_vht_siso(rate))
+ ucode_rate |= iwl_rates[index].plcp_vht_siso;
+ else if (is_vht_mimo2(rate))
+ ucode_rate |= iwl_rates[index].plcp_vht_mimo2;
- IWL_ERR(mvm, "Invalid tbl->lq_type %d\n", tbl->lq_type);
+ IWL_ERR(mvm, "Invalid rate->type %d\n", rate->type);
- rate_n_flags |= tbl->bw;
- if (tbl->is_SGI)
- rate_n_flags |= RATE_MCS_SGI_MSK;
+ ucode_rate |= rate->bw;
+ if (rate->sgi)
+ ucode_rate |= RATE_MCS_SGI_MSK;
- * Interpret uCode API's rate_n_flags format,
- * fill "search" or "active" tx mode table.
-static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
- enum ieee80211_band band,
- struct iwl_scale_tbl_info *tbl,
- int *rate_idx)
+/* Convert a ucode rate into an rs_rate object */
+static int rs_rate_from_ucode_rate(const u32 ucode_rate,
+ enum ieee80211_band band,
- u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
- u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
+ u32 ant_msk = ucode_rate & RATE_MCS_ANT_ABC_MSK;
+ u8 num_of_ant = get_num_of_ant_from_rate(ucode_rate);
u8 nss;
- memset(tbl, 0, offsetof(struct iwl_scale_tbl_info, win));
- *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
+ memset(rate, 0, sizeof(struct rs_rate));
+ rate->index = iwl_hwrate_to_plcp_idx(ucode_rate);
- if (*rate_idx == IWL_RATE_INVALID) {
- *rate_idx = -1;
+ if (rate->index == IWL_RATE_INVALID) {
+ rate->index = -1;
- tbl->is_SGI = 0; /* default legacy setup */
- tbl->bw = 0;
- tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
- tbl->lq_type = LQ_NONE;
- tbl->max_search = IWL_MAX_SEARCH;
+ rate->ant = (ant_msk >> RATE_MCS_ANT_POS);
/* Legacy */
- if (!(rate_n_flags & RATE_MCS_HT_MSK) &&
- !(rate_n_flags & RATE_MCS_VHT_MSK)) {
+ if (!(ucode_rate & RATE_MCS_HT_MSK) &&
+ !(ucode_rate & RATE_MCS_VHT_MSK)) {
if (num_of_ant == 1) {
if (band == IEEE80211_BAND_5GHZ)
- tbl->lq_type = LQ_LEGACY_A;
+ rate->type = LQ_LEGACY_A;
- tbl->lq_type = LQ_LEGACY_G;
+ rate->type = LQ_LEGACY_G;
/* HT or VHT */
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- tbl->is_SGI = 1;
+ if (ucode_rate & RATE_MCS_SGI_MSK)
+ rate->sgi = true;
- tbl->bw = rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
+ rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK;
- if (rate_n_flags & RATE_MCS_HT_MSK) {
- nss = ((rate_n_flags & RATE_HT_MCS_NSS_MSK) >>
+ if (ucode_rate & RATE_MCS_HT_MSK) {
+ nss = ((ucode_rate & RATE_HT_MCS_NSS_MSK) >>
RATE_HT_MCS_NSS_POS) + 1;
if (nss == 1) {
- tbl->lq_type = LQ_HT_SISO;
+ rate->type = LQ_HT_SISO;
WARN_ON_ONCE(num_of_ant != 1);
} else if (nss == 2) {
- tbl->lq_type = LQ_HT_MIMO2;
+ rate->type = LQ_HT_MIMO2;
WARN_ON_ONCE(num_of_ant != 2);
- } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
- nss = ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+ } else if (ucode_rate & RATE_MCS_VHT_MSK) {
+ nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >>
RATE_VHT_MCS_NSS_POS) + 1;
- tbl->lq_type = LQ_VHT_SISO;
+ rate->type = LQ_VHT_SISO;
- tbl->lq_type = LQ_VHT_MIMO2;
+ rate->type = LQ_VHT_MIMO2;
- WARN_ON_ONCE(tbl->bw == RATE_MCS_CHAN_WIDTH_160);
- WARN_ON_ONCE(tbl->bw == RATE_MCS_CHAN_WIDTH_80 &&
- !is_vht(tbl->lq_type));
+ WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_160);
+ WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 &&
+ !is_vht(rate));
/* switch to another antenna/antennas and return 1 */
/* if no other valid antenna found, return 0 */
-static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
- struct iwl_scale_tbl_info *tbl)
+static int rs_toggle_antenna(u32 valid_ant, struct rs_rate *rate)
u8 new_ant_type;
- if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
+ if (!rate->ant || rate->ant > ANT_ABC)
- if (!rs_is_valid_ant(valid_ant, tbl->ant_type))
+ if (!rs_is_valid_ant(valid_ant, rate->ant))
- new_ant_type = ant_toggle_lookup[tbl->ant_type];
+ new_ant_type = ant_toggle_lookup[rate->ant];
- while ((new_ant_type != tbl->ant_type) &&
+ while ((new_ant_type != rate->ant) &&
!rs_is_valid_ant(valid_ant, new_ant_type))
new_ant_type = ant_toggle_lookup[new_ant_type];
- if (new_ant_type == tbl->ant_type)
+ if (new_ant_type == rate->ant)
- tbl->ant_type = new_ant_type;
- *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
- *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
+ rate->ant = new_ant_type;
return 1;
- * rs_get_supported_rates - get the available rates
- * if management frame or broadcast frame only return
- * basic available rates.
static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
- struct ieee80211_hdr *hdr,
- enum iwl_table_type rate_type)
- if (is_legacy(rate_type))
+ if (is_legacy(rate))
return lq_sta->active_legacy_rate;
- else if (is_siso(rate_type))
+ else if (is_siso(rate))
return lq_sta->active_siso_rate;
- else if (is_mimo2(rate_type))
+ else if (is_mimo2(rate))
return lq_sta->active_mimo2_rate;
@@ -628,7 +812,7 @@ static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
/* 802.11A or ht walks to the next literal adjacent rate in
* the rate table */
- if (is_a_band(rate_type) || !is_legacy(rate_type)) {
+ if (is_type_a_band(rate_type) || !is_type_legacy(rate_type)) {
u32 mask;
@@ -676,73 +860,80 @@ static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
return (high << 8) | low;
-static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
- u8 scale_index, u8 ht_possible)
+static inline bool rs_rate_supported(struct iwl_lq_sta *lq_sta,
- s32 low;
- u16 rate_mask;
+ return BIT(rate->index) & rs_get_supported_rates(lq_sta, rate);
+/* Get the next supported lower rate in the current column.
+ * Return true if bottom rate in the current column was reached
+static bool rs_get_lower_rate_in_column(struct iwl_lq_sta *lq_sta,
+ u8 low;
u16 high_low;
- u8 switch_to_legacy = 0;
+ u16 rate_mask;
struct iwl_mvm *mvm = lq_sta->drv;
- /* check if we need to switch from HT to legacy rates.
- * assumption is that mandatory rates (1Mbps or 6Mbps)
- * are always supported (spec demand) */
- if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
- switch_to_legacy = 1;
- scale_index = rs_ht_to_legacy[scale_index];
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ rate_mask = rs_get_supported_rates(lq_sta, rate);
+ high_low = rs_get_adjacent_rate(mvm, rate->index, rate_mask,
+ rate->type);
+ low = high_low & 0xff;
- if (num_of_ant(tbl->ant_type) > 1)
- tbl->ant_type =
- first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
+ /* Bottom rate of column reached */
+ if (low == IWL_RATE_INVALID)
- tbl->is_SGI = 0;
+ rate->index = low;
- rate_mask = rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
+/* Get the next rate to use following a column downgrade */
+static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
+ struct iwl_mvm *mvm = lq_sta->drv;
- /* Mask with station rate restriction */
- /* supp_rates has no CCK bits in A mode */
+ /* No column to downgrade from Legacy */
+ } else if (is_siso(rate)) {
+ /* Downgrade to Legacy if we were in SISO */
if (lq_sta->band == IEEE80211_BAND_5GHZ)
- rate_mask = (u16)(rate_mask &
- (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
- rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
- /* If we switched from HT to legacy, check current rate */
- if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
- low = scale_index;
+ rate->bw = RATE_MCS_CHAN_WIDTH_20;
+ WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX &&
+ rate->index > IWL_RATE_MCS_9_INDEX);
+ rate->index = rs_ht_to_legacy[rate->index];
+ /* Downgrade to SISO with same MCS if in MIMO */
+ rate->type = is_vht_mimo2(rate) ?
+ LQ_VHT_SISO : LQ_HT_SISO;
- high_low = rs_get_adjacent_rate(lq_sta->drv, scale_index, rate_mask,
- tbl->lq_type);
- low = high_low & 0xff;
- if (low == IWL_RATE_INVALID)
+ if (num_of_ant(rate->ant) > 1)
+ rate->ant = first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
- return rate_n_flags_from_tbl(lq_sta->drv, tbl, low);
+ /* Relevant in both switching to SISO or Legacy */
+ rate->sgi = false;
+ if (!rs_rate_supported(lq_sta, rate))
+ rs_get_lower_rate_in_column(lq_sta, rate);
- * Simple function to compare two rate scale table types
-static bool table_type_matches(struct iwl_scale_tbl_info *a,
- struct iwl_scale_tbl_info *b)
+/* Simple function to compare two rate scale table types */
+static inline bool rs_rate_match(struct rs_rate *a,
+ struct rs_rate *b)
- return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
- (a->is_SGI == b->is_SGI);
+ return (a->type == b->type) && (a->ant == b->ant) && (a->sgi == b->sgi);
static u32 rs_ch_width_from_mac_flags(enum mac80211_rate_control_flags flags)
@@ -766,7 +957,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
int legacy_success;
int retries;
- int rs_index, mac_index, i;
+ int mac_index, i;
struct iwl_lq_sta *lq_sta = priv_sta;
struct iwl_lq_cmd *table;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -774,13 +965,10 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
enum mac80211_rate_control_flags mac_flags;
- u32 tx_rate;
- struct iwl_scale_tbl_info tbl_type;
+ u32 ucode_rate;
+ struct rs_rate rate;
struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
- IWL_DEBUG_RATE_LIMIT(mvm,
- "get frame ack response, update rate scale window\n");
/* Treat uninitialized rate scaling data same as non-existing. */
if (!lq_sta) {
IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n");
@@ -808,10 +996,10 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
* to a new "search" mode (which might become the new "active" mode).
table = &lq_sta->lq;
- tx_rate = le32_to_cpu(table->rs_table[0]);
- rs_get_tbl_info_from_mcs(tx_rate, info->band, &tbl_type, &rs_index);
+ ucode_rate = le32_to_cpu(table->rs_table[0]);
+ rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
if (info->band == IEEE80211_BAND_5GHZ)
- rs_index -= IWL_FIRST_OFDM_RATE;
+ rate.index -= IWL_FIRST_OFDM_RATE;
mac_flags = info->status.rates[0].flags;
mac_index = info->status.rates[0].idx;
/* For HT packets, map MCS to PLCP */
@@ -834,19 +1022,19 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
/* Here we actually compare this rate to the latest LQ command */
if ((mac_index < 0) ||
- (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
- (tbl_type.bw != rs_ch_width_from_mac_flags(mac_flags)) ||
- (tbl_type.ant_type != info->status.antenna) ||
- (!!(tx_rate & RATE_MCS_HT_MSK) !=
+ (rate.sgi != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
+ (rate.bw != rs_ch_width_from_mac_flags(mac_flags)) ||
+ (rate.ant != info->status.antenna) ||
+ (!!(ucode_rate & RATE_MCS_HT_MSK) !=
!!(mac_flags & IEEE80211_TX_RC_MCS)) ||
- (!!(tx_rate & RATE_MCS_VHT_MSK) !=
+ (!!(ucode_rate & RATE_MCS_VHT_MSK) !=
!!(mac_flags & IEEE80211_TX_RC_VHT_MCS)) ||
- (!!(tx_rate & RATE_HT_MCS_GF_MSK) !=
+ (!!(ucode_rate & RATE_HT_MCS_GF_MSK) !=
!!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
- (rs_index != mac_index)) {
+ (rate.index != mac_index)) {
IWL_DEBUG_RATE(mvm,
"initial rate %d does not match %d (0x%x)\n",
- mac_index, rs_index, tx_rate);
+ mac_index, rate.index, ucode_rate);
* Since rates mis-match, the last LQ command may have failed.
* After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
@@ -855,7 +1043,10 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
lq_sta->missed_rate_counter++;
if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
lq_sta->missed_rate_counter = 0;
- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
+ IWL_DEBUG_RATE(mvm,
+ "Too many rates mismatch. Send sync LQ. rs_state %d\n",
+ lq_sta->rs_state);
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
/* Regardless, ignore this status info for outdated rate */
@@ -864,28 +1055,23 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
/* Figure out if rate scale algorithm is in active or search table */
- if (table_type_matches(&tbl_type,
- &(lq_sta->lq_info[lq_sta->active_tbl]))) {
+ if (rs_rate_match(&rate,
+ &(lq_sta->lq_info[lq_sta->active_tbl].rate))) {
curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- } else if (table_type_matches(
- &tbl_type, &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
+ } else if (rs_rate_match(&rate,
+ &lq_sta->lq_info[1 - lq_sta->active_tbl].rate)) {
curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
"Neither active nor search matches tx rate\n");
tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- IWL_DEBUG_RATE(mvm, "active- lq:%x, ant:%x, SGI:%d\n",
- tmp_tbl->lq_type, tmp_tbl->ant_type,
- tmp_tbl->is_SGI);
+ rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE");
tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- IWL_DEBUG_RATE(mvm, "search- lq:%x, ant:%x, SGI:%d\n",
- IWL_DEBUG_RATE(mvm, "actual- lq:%x, ant:%x, SGI:%d\n",
- tbl_type.lq_type, tbl_type.ant_type,
- tbl_type.is_SGI);
+ rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH");
+ rs_dump_rate(mvm, &rate, "ACTUAL");
* no matching table found, let's by-pass the data collection
* and continue to perform rate scale to find the rate table
@@ -902,15 +1088,14 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
* first index into rate scale table.
if (info->flags & IEEE80211_TX_STAT_AMPDU) {
- rs_get_tbl_info_from_mcs(tx_rate, info->band, &tbl_type,
- &rs_index);
- rs_collect_tx_data(curr_tbl, rs_index,
+ rs_collect_tx_data(curr_tbl, rate.index,
info->status.ampdu_len,
info->status.ampdu_ack_len);
/* Update success/fail counts if not searching for new mode */
- if (lq_sta->stay_in_tbl) {
+ if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
lq_sta->total_success += info->status.ampdu_ack_len;
lq_sta->total_failed += (info->status.ampdu_len -
@@ -927,31 +1112,31 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
/* Collect data for each rate used during failed TX attempts */
for (i = 0; i <= retries; ++i) {
- tx_rate = le32_to_cpu(table->rs_table[i]);
- rs_get_tbl_info_from_mcs(tx_rate, info->band,
- &tbl_type, &rs_index);
+ ucode_rate = le32_to_cpu(table->rs_table[i]);
* Only collect stats if retried rate is in the same RS
* table as active/search.
- if (table_type_matches(&tbl_type, curr_tbl))
+ if (rs_rate_match(&rate, &curr_tbl->rate))
tmp_tbl = curr_tbl;
- else if (table_type_matches(&tbl_type, other_tbl))
+ else if (rs_rate_match(&rate, &other_tbl->rate))
tmp_tbl = other_tbl;
- rs_collect_tx_data(tmp_tbl, rs_index, 1,
+ rs_collect_tx_data(tmp_tbl, rate.index, 1,
i < retries ? 0 : legacy_success);
lq_sta->total_success += legacy_success;
lq_sta->total_failed += retries + (1 - legacy_success);
/* The last TX rate is cached in lq_sta; it's set in if/else above */
- lq_sta->last_rate_n_flags = tx_rate;
+ lq_sta->last_rate_n_flags = ucode_rate;
/* See if there's a better rate or modulation mode to try. */
if (sta && sta->supp_rates[sband->band])
@@ -969,8 +1154,8 @@ done:
static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
struct iwl_lq_sta *lq_sta)
- IWL_DEBUG_RATE(mvm, "we are staying in the same table\n");
- lq_sta->stay_in_tbl = 1; /* only place this gets set */
+ IWL_DEBUG_RATE(mvm, "Moving to RS_STATE_STAY_IN_COLUMN\n");
+ lq_sta->rs_state = RS_STATE_STAY_IN_COLUMN;
if (is_legacy) {
lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
@@ -984,37 +1169,31 @@ static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
lq_sta->total_failed = 0;
lq_sta->total_success = 0;
lq_sta->flush_timer = jiffies;
- lq_sta->action_counter = 0;
+ lq_sta->visited_columns = 0;
- * Find correct throughput table for given mode of modulation
-static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+static s32 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+ const struct rs_tx_column *column,
+ u32 bw)
/* Used to choose among HT tables */
s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
- /* Check for invalid LQ type */
- if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_ht(tbl->lq_type) &&
- !(is_vht(tbl->lq_type)))) {
- tbl->expected_tpt = expected_tpt_legacy;
+ if (WARN_ON_ONCE(column->mode != RS_LEGACY &&
+ column->mode != RS_SISO &&
+ column->mode != RS_MIMO2))
+ return expected_tpt_legacy;
/* Legacy rates have only one table */
+ if (column->mode == RS_LEGACY)
ht_tbl_pointer = expected_tpt_mimo2_20MHz;
/* Choose among many HT tables depending on number of streams
* (SISO/MIMO2), channel width (20/40/80), SGI, and aggregation
* status */
- if (is_siso(tbl->lq_type)) {
- switch (tbl->bw) {
+ if (column->mode == RS_SISO) {
+ switch (bw) {
case RATE_MCS_CHAN_WIDTH_20:
ht_tbl_pointer = expected_tpt_siso_20MHz;
@@ -1027,8 +1206,8 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
- } else if (is_mimo2(tbl->lq_type)) {
+ } else if (column->mode == RS_MIMO2) {
@@ -1045,14 +1224,23 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
- if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
- tbl->expected_tpt = ht_tbl_pointer[0];
- else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
- tbl->expected_tpt = ht_tbl_pointer[1];
- else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
- tbl->expected_tpt = ht_tbl_pointer[2];
+ if (!column->sgi && !lq_sta->is_agg) /* Normal */
+ return ht_tbl_pointer[0];
+ else if (column->sgi && !lq_sta->is_agg) /* SGI */
+ return ht_tbl_pointer[1];
+ else if (!column->sgi && lq_sta->is_agg) /* AGG */
+ return ht_tbl_pointer[2];
else /* AGG+SGI */
- tbl->expected_tpt = ht_tbl_pointer[3];
+ return ht_tbl_pointer[3];
+static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+ const struct rs_tx_column *column = &rs_tx_columns[tbl->column];
+ tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw);
@@ -1089,7 +1277,7 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
while (1) {
high_low = rs_get_adjacent_rate(mvm, rate, rate_mask,
+ tbl->rate.type);
low = high_low & 0xff;
high = (high_low >> 8) & 0xff;
@@ -1110,7 +1298,7 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
* "active" throughput (under perfect conditions).
if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
- ((active_sr > IWL_RATE_DECREASE_TH) &&
+ ((active_sr > RS_SR_FORCE_DECREASE) &&
(active_sr <= IWL_RATE_HIGH_TH) &&
(tpt_tbl[rate] <= active_tpt))) ||
((active_sr >= IWL_RATE_SCALE_SWITCH) &&
@@ -1157,533 +1345,373 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
return new_rate;
-/* Move to the next action and wrap around to the first action in case
- * we're at the last action. Assumes actions start at 0.
-static inline void rs_move_next_action(struct iwl_scale_tbl_info *tbl,
- u8 last_action)
- BUILD_BUG_ON(IWL_LEGACY_FIRST_ACTION != 0);
- BUILD_BUG_ON(IWL_SISO_FIRST_ACTION != 0);
- BUILD_BUG_ON(IWL_MIMO2_FIRST_ACTION != 0);
- tbl->action = (tbl->action + 1) % (last_action + 1);
-static void rs_set_bw_from_sta(struct iwl_scale_tbl_info *tbl,
- struct ieee80211_sta *sta)
+static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
if (sta->bandwidth >= IEEE80211_STA_RX_BW_80)
- tbl->bw = RATE_MCS_CHAN_WIDTH_80;
+ return RATE_MCS_CHAN_WIDTH_80;
else if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
- tbl->bw = RATE_MCS_CHAN_WIDTH_40;
- tbl->bw = RATE_MCS_CHAN_WIDTH_20;
-static bool rs_sgi_allowed(struct iwl_scale_tbl_info *tbl,
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
- if (is_ht20(tbl) && (ht_cap->cap &
- IEEE80211_HT_CAP_SGI_20))
- return true;
- if (is_ht40(tbl) && (ht_cap->cap &
- IEEE80211_HT_CAP_SGI_40))
- if (is_ht80(tbl) && (vht_cap->cap &
- IEEE80211_VHT_CAP_SHORT_GI_80))
+ return RATE_MCS_CHAN_WIDTH_40;
+ return RATE_MCS_CHAN_WIDTH_20;
- * Set up search table for MIMO2
+ * Check whether we should continue using same modulation mode, or
+ * begin search for a new mode, based on:
+ * 1) # tx successes or failures while using this mode
+ * 2) # times calling this function
+ * 3) elapsed time in this mode (not used, for now)
-static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
- struct iwl_lq_sta *lq_sta,
+static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
- s32 rate;
- if (!sta->ht_cap.ht_supported)
+ struct iwl_scale_tbl_info *tbl;
+ int active_tbl;
+ int flush_interval_passed = 0;
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ mvm = lq_sta->drv;
+ active_tbl = lq_sta->active_tbl;
- /* Need both Tx chains/antennas to support MIMO */
- if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) < 2)
+ tbl = &(lq_sta->lq_info[active_tbl]);
- IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO2\n");
+ /* If we've been disallowing search, see if we should now allow it */
+ /* Elapsed time using current modulation mode */
+ if (lq_sta->flush_timer)
+ flush_interval_passed =
+ time_after(jiffies,
+ (unsigned long)(lq_sta->flush_timer +
+ RS_STAY_IN_COLUMN_TIMEOUT));
- tbl->lq_type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
- tbl->action = 0;
- rate_mask = lq_sta->active_mimo2_rate;
+ * Check if we should allow search for new modulation mode.
+ * If many frames have failed or succeeded, or we've used
+ * this same modulation for a long time, allow search, and
+ * reset history stats that keep track of whether we should
+ * allow a new search. Also (below) reset all bitmaps and
+ * stats in active history.
+ if (force_search ||
+ (lq_sta->total_failed > lq_sta->max_failure_limit) ||
+ (lq_sta->total_success > lq_sta->max_success_limit) ||
+ ((!lq_sta->search_better_tbl) &&
+ (lq_sta->flush_timer) && (flush_interval_passed))) {
+ "LQ: stay is expired %d %d %d\n",
+ lq_sta->total_failed,
+ lq_sta->total_success,
+ flush_interval_passed);
- rs_set_bw_from_sta(tbl, sta);
- rs_set_expected_tpt_table(lq_sta, tbl);
+ /* Allow search for new mode */
+ lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_STARTED;
+ "Moving to RS_STATE_SEARCH_CYCLE_STARTED\n");
+ lq_sta->total_failed = 0;
+ lq_sta->total_success = 0;
+ lq_sta->flush_timer = 0;
+ /* mark the current column as visited */
+ lq_sta->visited_columns = BIT(tbl->column);
+ * Else if we've used this modulation mode enough repetitions
+ * (regardless of elapsed time or success/failure), reset
+ * history bitmaps and rate-specific stats for all rates in
+ * active table.
+ lq_sta->table_count++;
+ if (lq_sta->table_count >=
+ lq_sta->table_count_limit) {
+ lq_sta->table_count = 0;
- rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
+ "LQ: stay in table clear win\n");
+ for (i = 0; i < IWL_RATE_COUNT; i++)
+ rs_rate_scale_clear_window(
+ &(tbl->win[i]));
- IWL_DEBUG_RATE(mvm, "LQ: MIMO2 best rate %d mask %X\n",
- rate, rate_mask);
- if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
- IWL_DEBUG_RATE(mvm, "Can't switch with index %d rate mask %x\n",
+ /* If transitioning to allow "search", reset all history
+ * bitmaps and stats in active table (this will become the new
+ * "search" table). */
+ if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) {
+ IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
+ rs_rate_scale_clear_window(&(tbl->win[i]));
- tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate);
- IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index\n",
- tbl->current_rate);
- * Set up search table for SISO
+ * setup rate table in uCode
-static int rs_switch_to_siso(struct iwl_mvm *mvm,
+static void rs_update_rate_tbl(struct iwl_mvm *mvm,
+ rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
+static u8 rs_get_tid(struct iwl_lq_sta *lq_data,
+ struct ieee80211_hdr *hdr)
+ u8 tid = IWL_MAX_TID_COUNT;
- IWL_DEBUG_RATE(mvm, "LQ: try to switch to SISO\n");
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
- tbl->lq_type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
- rate_mask = lq_sta->active_siso_rate;
+ if (unlikely(tid > IWL_MAX_TID_COUNT))
+ tid = IWL_MAX_TID_COUNT;
+ return tid;
- IWL_DEBUG_RATE(mvm, "LQ: get best rate %d mask %X\n", rate, rate_mask);
- IWL_DEBUG_RATE(mvm,
- "can not switch with index %d rate mask %x\n",
- * Try to switch to new modulation mode from legacy
-static int rs_move_legacy_other(struct iwl_mvm *mvm,
- int index)
+static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- struct iwl_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- struct iwl_rate_scale_data *window = &(tbl->win[index]);
- u32 sz = (sizeof(struct iwl_scale_tbl_info) -
- (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
- u8 start_action;
- u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
- u8 tx_chains_num = num_of_ant(valid_tx_ant);
- u8 update_search_tbl_counter = 0;
+ int i, j, n;
+ enum rs_column next_col_id;
+ const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column];
+ const struct rs_tx_column *next_col;
+ allow_column_func_t allow_func;
+ u8 valid_ants = iwl_fw_valid_tx_ant(mvm->fw);
+ s32 *expected_tpt_tbl;
+ s32 tpt, max_expected_tpt;
+ for (i = 0; i < MAX_NEXT_COLUMNS; i++) {
+ next_col_id = curr_col->next_columns[i];
+ if (next_col_id == RS_COLUMN_INVALID)
+ if (lq_sta->visited_columns & BIT(next_col_id)) {
+ IWL_DEBUG_RATE(mvm, "Skip already visited column %d\n",
+ next_col_id);
- start_action = tbl->action;
- while (1) {
- lq_sta->action_counter++;
- switch (tbl->action) {
- case IWL_LEGACY_SWITCH_ANTENNA:
- IWL_DEBUG_RATE(mvm, "LQ: Legacy toggle Antenna\n");
+ next_col = &rs_tx_columns[next_col_id];
- if (tx_chains_num <= 1)
+ if (!rs_is_valid_ant(valid_ants, next_col->ant)) {
+ "Skip column %d as ANT config isn't supported by chip. valid_ants 0x%x column ant 0x%x\n",
+ next_col_id, valid_ants, next_col->ant);
- /* Don't change antenna if success has been great */
- if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+ for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
+ allow_func = next_col->checks[j];
+ if (allow_func && !allow_func(mvm, sta, tbl))
- /* Set up search table to try other antenna */
- memcpy(search_tbl, tbl, sz);
- if (rs_toggle_antenna(valid_tx_ant,
- &search_tbl->current_rate,
- search_tbl)) {
- update_search_tbl_counter = 1;
- rs_set_expected_tpt_table(lq_sta, search_tbl);
- case IWL_LEGACY_SWITCH_SISO:
- IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to SISO\n");
- /* Set up search table to try SISO */
- search_tbl->is_SGI = 0;
- ret = rs_switch_to_siso(mvm, lq_sta, sta,
- search_tbl, index);
- if (!ret) {
- case IWL_LEGACY_SWITCH_MIMO2:
- IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO2\n");
+ if (j != MAX_COLUMN_CHECKS) {
+ "Skip column %d: not allowed (check %d failed)\n",
+ next_col_id, j);
- /* Set up search table to try MIMO */
- search_tbl->ant_type = ANT_AB;
+ tpt = lq_sta->last_tpt / 100;
+ expected_tpt_tbl = rs_get_expected_tpt_table(lq_sta, next_col,
+ tbl->rate.bw);
+ if (WARN_ON_ONCE(!expected_tpt_tbl))
- if (!rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
+ max_expected_tpt = 0;
+ for (n = 0; n < IWL_RATE_COUNT; n++)
+ if (expected_tpt_tbl[n] > max_expected_tpt)
+ max_expected_tpt = expected_tpt_tbl[n];
- ret = rs_switch_to_mimo2(mvm, lq_sta, sta,
- WARN_ON_ONCE(1);
+ if (tpt >= max_expected_tpt) {
+ "Skip column %d: can't beat current TPT. Max expected %d current %d\n",
+ next_col_id, max_expected_tpt, tpt);
- rs_move_next_action(tbl, IWL_LEGACY_LAST_ACTION);
- if (tbl->action == start_action)
- search_tbl->lq_type = LQ_NONE;
- lq_sta->search_better_tbl = 1;
- if (update_search_tbl_counter)
- search_tbl->action = tbl->action;
+ if (i == MAX_NEXT_COLUMNS)
+ return RS_COLUMN_INVALID;
+ IWL_DEBUG_RATE(mvm, "Found potential column %d\n", next_col_id);
+ return next_col_id;
- * Try to switch to new modulation mode from SISO
-static int rs_move_siso_to_other(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta, int index)
+static int rs_switch_to_column(struct iwl_mvm *mvm,
+ enum rs_column col_id)
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
struct iwl_scale_tbl_info *search_tbl =
&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct rs_rate *rate = &search_tbl->rate;
+ const struct rs_tx_column *column = &rs_tx_columns[col_id];
+ const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+ u16 rate_mask = 0;
+ u32 rate_idx = 0;
- if (tbl->action == IWL_SISO_SWITCH_MIMO2 &&
- !iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
- tbl->action = IWL_SISO_SWITCH_ANTENNA;
+ memcpy(search_tbl, tbl, sz);
- case IWL_SISO_SWITCH_ANTENNA:
- IWL_DEBUG_RATE(mvm, "LQ: SISO toggle Antenna\n");
+ rate->sgi = column->sgi;
+ rate->ant = column->ant;
- if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
- BT_MBOX_MSG(&mvm->last_bt_notif, 3,
- TRAFFIC_LOAD) == 0)
- case IWL_SISO_SWITCH_MIMO2:
- IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO2\n");
+ if (column->mode == RS_LEGACY) {
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ rate_mask = lq_sta->active_legacy_rate;
+ } else if (column->mode == RS_SISO) {
+ rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
+ rate_mask = lq_sta->active_siso_rate;
+ rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
+ rate_mask = lq_sta->active_mimo2_rate;
+ WARN_ON_ONCE("Bad column mode");
+ rate->bw = rs_bw_from_sta_bw(sta);
+ search_tbl->column = col_id;
+ rs_set_expected_tpt_table(lq_sta, search_tbl);
- if (!ret)
- case IWL_SISO_SWITCH_GI:
- if (!rs_sgi_allowed(tbl, sta))
+ /* Get the best matching rate if we're changing modes. e.g.
+ * SISO->MIMO, LEGACY->SISO, MIMO->SISO
+ if (curr_column->mode != column->mode) {
+ rate_idx = rs_get_best_rate(mvm, lq_sta, search_tbl,
+ rate_mask, rate->index);
- IWL_DEBUG_RATE(mvm, "LQ: SISO toggle SGI/NGI\n");
+ if ((rate_idx == IWL_RATE_INVALID) ||
+ !(BIT(rate_idx) & rate_mask)) {
+ "can not switch with index %d"
+ " rate mask %x\n",
+ rate_idx, rate_mask);
- search_tbl->is_SGI = !tbl->is_SGI;
- if (tbl->is_SGI) {
- s32 tpt = lq_sta->last_tpt / 100;
- if (tpt >= search_tbl->expected_tpt[index])
- search_tbl->current_rate =
- rate_n_flags_from_tbl(mvm, search_tbl, index);
- rs_move_next_action(tbl, IWL_SISO_LAST_ACTION);
+ rate->index = rate_idx;
- out:
- * Try to switch to new modulation mode from MIMO2
-static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
- case IWL_MIMO2_SWITCH_SISO_A:
- case IWL_MIMO2_SWITCH_SISO_B:
- IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to SISO\n");
- /* Set up new search table for SISO */
- if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
- search_tbl->ant_type = ANT_A;
- else /* tbl->action == IWL_MIMO2_SWITCH_SISO_B */
- search_tbl->ant_type = ANT_B;
- case IWL_MIMO2_SWITCH_GI:
- IWL_DEBUG_RATE(mvm, "LQ: MIMO2 toggle SGI/NGI\n");
- /* Set up new search table for MIMO2 */
- * If active table already uses the fastest possible
- * modulation (dual stream with short guard interval),
- * and it's working well, there's no need to look
- * for a better type of modulation!
- rs_move_next_action(tbl, IWL_MIMO2_LAST_ACTION);
+ IWL_DEBUG_RATE(mvm, "Switched to column %d: Index %d\n",
+ col_id, rate->index);
+ lq_sta->visited_columns |= BIT(col_id);
+ rate->type = LQ_NONE;
+ return -1;
- * Check whether we should continue using same modulation mode, or
- * begin search for a new mode, based on:
- * 1) # tx successes or failures while using this mode
- * 2) # times calling this function
- * 3) elapsed time in this mode (not used, for now)
-static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
+static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm,
+ struct iwl_scale_tbl_info *tbl,
+ s32 sr, int low, int high,
+ int current_tpt,
+ int low_tpt, int high_tpt)
- struct iwl_scale_tbl_info *tbl;
- int active_tbl;
- int flush_interval_passed = 0;
- mvm = lq_sta->drv;
- active_tbl = lq_sta->active_tbl;
+ enum rs_action action = RS_ACTION_STAY;
- tbl = &(lq_sta->lq_info[active_tbl]);
- /* If we've been disallowing search, see if we should now allow it */
- /* Elapsed time using current modulation mode */
- if (lq_sta->flush_timer)
- flush_interval_passed =
- time_after(jiffies,
- (unsigned long)(lq_sta->flush_timer +
- IWL_RATE_SCALE_FLUSH_INTVL));
- * Check if we should allow search for new modulation mode.
- * If many frames have failed or succeeded, or we've used
- * this same modulation for a long time, allow search, and
- * reset history stats that keep track of whether we should
- * allow a new search. Also (below) reset all bitmaps and
- * stats in active history.
- if (force_search ||
- (lq_sta->total_failed > lq_sta->max_failure_limit) ||
- (lq_sta->total_success > lq_sta->max_success_limit) ||
- ((!lq_sta->search_better_tbl) &&
- (lq_sta->flush_timer) && (flush_interval_passed))) {
+ /* Too many failures, decrease rate */
+ if ((sr <= RS_SR_FORCE_DECREASE) || (current_tpt == 0)) {
+ "decrease rate because of low SR\n");
+ action = RS_ACTION_DOWNSCALE;
+ /* No throughput measured yet for adjacent rates; try increase. */
+ } else if ((low_tpt == IWL_INVALID_VALUE) &&
+ (high_tpt == IWL_INVALID_VALUE)) {
+ if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH) {
- "LQ: stay is expired %d %d %d\n",
- lq_sta->total_failed,
- lq_sta->total_success,
- flush_interval_passed);
+ "Good SR and no high rate measurement. "
+ "Increase rate\n");
+ action = RS_ACTION_UPSCALE;
+ } else if (low != IWL_RATE_INVALID) {
+ "Remain in current rate\n");
+ action = RS_ACTION_STAY;
- /* Allow search for new mode */
- lq_sta->stay_in_tbl = 0; /* only place reset */
- lq_sta->total_failed = 0;
- lq_sta->total_success = 0;
- lq_sta->flush_timer = 0;
- * Else if we've used this modulation mode enough repetitions
- * (regardless of elapsed time or success/failure), reset
- * history bitmaps and rate-specific stats for all rates in
- * active table.
- lq_sta->table_count++;
- if (lq_sta->table_count >=
- lq_sta->table_count_limit) {
- lq_sta->table_count = 0;
+ /* Both adjacent throughputs are measured, but neither one has better
+ * throughput; we're using the best rate, don't change it!
+ else if ((low_tpt != IWL_INVALID_VALUE) &&
+ (high_tpt != IWL_INVALID_VALUE) &&
+ (low_tpt < current_tpt) &&
+ (high_tpt < current_tpt)) {
+ "Both high and low are worse. "
+ "Maintain rate\n");
+ /* At least one adjacent rate's throughput is measured,
+ * and may have better performance.
+ else {
+ /* Higher adjacent rate's throughput is measured */
+ if (high_tpt != IWL_INVALID_VALUE) {
+ /* Higher rate has better throughput */
+ if (high_tpt > current_tpt &&
+ sr >= IWL_RATE_INCREASE_TH) {
- "LQ: stay in table clear win\n");
- for (i = 0; i < IWL_RATE_COUNT; i++)
- rs_rate_scale_clear_window(
- &(tbl->win[i]));
+ "Higher rate is better and good "
+ "SR. Increate rate\n");
+ "Higher rate isn't better OR "
+ "no good SR. Maintain rate\n");
- /* If transitioning to allow "search", reset all history
- * bitmaps and stats in active table (this will become the new
- * "search" table). */
- if (!lq_sta->stay_in_tbl) {
- rs_rate_scale_clear_window(&(tbl->win[i]));
+ /* Lower adjacent rate's throughput is measured */
+ } else if (low_tpt != IWL_INVALID_VALUE) {
+ /* Lower rate has better throughput */
+ if (low_tpt > current_tpt) {
+ "Lower rate is better. "
+ "Decrease rate\n");
+ } else if (sr >= IWL_RATE_INCREASE_TH) {
+ "Lower rate isn't better and "
+ "good SR. Increase rate\n");
- * setup rate table in uCode
-static void rs_update_rate_tbl(struct iwl_mvm *mvm,
- u32 rate;
- /* Update uCode's rate table. */
- rate = rate_n_flags_from_tbl(mvm, tbl, index);
- rs_fill_link_cmd(mvm, sta, lq_sta, rate);
-static u8 rs_get_tid(struct iwl_lq_sta *lq_data,
- struct ieee80211_hdr *hdr)
- u8 tid = IWL_MAX_TID_COUNT;
- if (ieee80211_is_data_qos(hdr->frame_control)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & 0xf;
+ /* Sanity check; asked for decrease, but success rate or throughput
+ * has been good at old rate. Don't change it.
+ if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID) &&
+ ((sr > IWL_RATE_HIGH_TH) ||
+ (current_tpt > (100 * tbl->expected_tpt[low])))) {
+ "Sanity check failed. Maintain rate\n");
- if (unlikely(tid > IWL_MAX_TID_COUNT))
- tid = IWL_MAX_TID_COUNT;
- return tid;
+ return action;
@@ -1705,20 +1733,19 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
int low_tpt = IWL_INVALID_VALUE;
int high_tpt = IWL_INVALID_VALUE;
u32 fail_count;
- s8 scale_action = 0;
+ enum rs_action scale_action = RS_ACTION_STAY;
u16 rate_mask;
u8 update_lq = 0;
struct iwl_scale_tbl_info *tbl, *tbl1;
- u16 rate_scale_index_msk = 0;
u8 active_tbl = 0;
u8 done_search = 0;
s32 sr;
u8 tid = IWL_MAX_TID_COUNT;
+ u8 prev_agg = lq_sta->is_agg;
struct iwl_mvm_sta *sta_priv = (void *)sta->drv_priv;
struct iwl_mvm_tid_data *tid_data;
- IWL_DEBUG_RATE(mvm, "rate scale calculate new rate for skb\n");
+ struct rs_rate *rate;
/* Send management frames and NO_ACK data using lowest rate. */
/* TODO: this could probably be improved.. */
@@ -1726,8 +1753,6 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
info->flags & IEEE80211_TX_CTL_NO_ACK)
- lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
tid = rs_get_tid(lq_sta, hdr);
if ((tid != IWL_MAX_TID_COUNT) &&
(lq_sta->tx_agg_tid_en & (1 << tid))) {
@@ -1751,45 +1776,29 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
active_tbl = 1 - lq_sta->active_tbl;
tbl = &(lq_sta->lq_info[active_tbl]);
+ rate = &tbl->rate;
+ if (prev_agg != lq_sta->is_agg) {
+ "Aggregation changed: prev %d current %d. Update expected TPT table\n",
+ prev_agg, lq_sta->is_agg);
+ rs_set_expected_tpt_table(lq_sta, tbl);
/* current tx rate */
index = lq_sta->last_txrate_idx;
- IWL_DEBUG_RATE(mvm, "Rate scale index %d for type %d\n", index,
/* rates available for this association, and for modulation mode */
- rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
- IWL_DEBUG_RATE(mvm, "mask 0x%04X\n", rate_mask);
- /* mask with station rate restriction */
- rate_scale_index_msk = (u16) (rate_mask &
- lq_sta->supp_rates);
- rate_scale_index_msk = rate_mask;
- if (!rate_scale_index_msk)
- if (!((1 << index) & rate_scale_index_msk)) {
+ if (!(BIT(index) & rate_mask)) {
IWL_ERR(mvm, "Current Rate is not valid\n");
if (lq_sta->search_better_tbl) {
/* revert to active table if search table is not valid*/
lq_sta->search_better_tbl = 0;
tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- /* get "active" rate info */
- index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
- rs_update_rate_tbl(mvm, sta, lq_sta, tbl, index);
+ rs_update_rate_tbl(mvm, sta, lq_sta, &tbl->rate);
@@ -1806,6 +1815,9 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
index = lq_sta->max_rate_idx;
update_lq = 1;
window = &(tbl->win[index]);
+ "Forcing user max rate %d\n",
+ index);
goto lq_update;
@@ -1822,8 +1834,9 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
(window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
- "LQ: still below TH. succ=%d total=%d for index %d\n",
- window->success_counter, window->counter, index);
+ "(%s: %d): Test Window: succ %d total %d\n",
+ rs_pretty_lq_type(rate->type),
+ index, window->success_counter, window->counter);
/* Can't calculate this yet; not enough history */
@@ -1838,8 +1851,6 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
* actual average throughput */
if (window->average_tpt != ((window->success_ratio *
tbl->expected_tpt[index] + 64) / 128)) {
- IWL_ERR(mvm,
- "expected_tpt should have been calculated by now\n");
window->average_tpt = ((window->success_ratio *
tbl->expected_tpt[index] + 64) / 128);
@@ -1851,34 +1862,33 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
* continuing to use the setup that we've been trying. */
if (window->average_tpt > lq_sta->last_tpt) {
- "LQ: SWITCHING TO NEW TABLE suc=%d cur-tpt=%d old-tpt=%d\n",
+ "SWITCHING TO NEW TABLE SR: %d "
+ "cur-tpt %d old-tpt %d\n",
window->success_ratio,
window->average_tpt,
lq_sta->last_tpt);
- if (!is_legacy(tbl->lq_type))
- lq_sta->enable_counter = 1;
/* Swap tables; "search" becomes "active" */
lq_sta->active_tbl = active_tbl;
current_tpt = window->average_tpt;
/* Else poor success; go back to mode in "active" table */
- "LQ: GOING BACK TO THE OLD TABLE suc=%d cur-tpt=%d old-tpt=%d\n",
+ "GOING BACK TO THE OLD TABLE: SR %d "
/* Nullify "search" table */
/* Revert to "active" table */
active_tbl = lq_sta->active_tbl;
/* Revert to "active" rate and throughput info */
+ index = tbl->rate.index;
current_tpt = lq_sta->last_tpt;
/* Need to set up a new rate table in uCode */
@@ -1894,8 +1904,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
/* (Else) not in search of better modulation mode, try for better
* starting rate, while staying in this mode. */
- high_low = rs_get_adjacent_rate(mvm, index, rate_scale_index_msk,
+ high_low = rs_get_adjacent_rate(mvm, index, rate_mask, rate->type);
@@ -1913,118 +1922,58 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
if (high != IWL_RATE_INVALID)
high_tpt = tbl->win[high].average_tpt;
- scale_action = 0;
- /* Too many failures, decrease rate */
- if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
- "decrease rate because of low success_ratio\n");
- scale_action = -1;
- /* No throughput measured yet for adjacent rates; try increase. */
- } else if ((low_tpt == IWL_INVALID_VALUE) &&
- (high_tpt == IWL_INVALID_VALUE)) {
- if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
- scale_action = 1;
- else if (low != IWL_RATE_INVALID)
- /* Both adjacent throughputs are measured, but neither one has better
- * throughput; we're using the best rate, don't change it! */
- else if ((low_tpt != IWL_INVALID_VALUE) &&
- (high_tpt != IWL_INVALID_VALUE) &&
- (low_tpt < current_tpt) &&
- (high_tpt < current_tpt))
- /* At least one adjacent rate's throughput is measured,
- * and may have better performance. */
- else {
- /* Higher adjacent rate's throughput is measured */
- if (high_tpt != IWL_INVALID_VALUE) {
- /* Higher rate has better throughput */
- if (high_tpt > current_tpt &&
- sr >= IWL_RATE_INCREASE_TH) {
- /* Lower adjacent rate's throughput is measured */
- } else if (low_tpt != IWL_INVALID_VALUE) {
- /* Lower rate has better throughput */
- if (low_tpt > current_tpt) {
- "decrease rate because of low tpt\n");
- } else if (sr >= IWL_RATE_INCREASE_TH) {
- /* Sanity check; asked for decrease, but success rate or throughput
- * has been good at old rate. Don't change it. */
- if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
- ((sr > IWL_RATE_HIGH_TH) ||
- (current_tpt > (100 * tbl->expected_tpt[low]))))
+ "(%s: %d): cur_tpt %d SR %d low %d high %d low_tpt %d high_tpt %d\n",
+ rs_pretty_lq_type(rate->type), index, current_tpt, sr,
+ low, high, low_tpt, high_tpt);
- if ((le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) >=
- IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && (is_mimo(tbl->lq_type))) {
- if (lq_sta->last_bt_traffic >
- le32_to_cpu(mvm->last_bt_notif.bt_activity_grading)) {
- * don't set scale_action, don't want to scale up if
- * the rate scale doesn't otherwise think that is a
- * good idea.
- } else if (lq_sta->last_bt_traffic <=
- lq_sta->last_bt_traffic =
- le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
+ scale_action = rs_get_rate_action(mvm, tbl, sr, low, high,
+ current_tpt, low_tpt, high_tpt);
- IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && is_mimo(tbl->lq_type)) {
- /* search for a new modulation */
+ /* Force a search in case BT doesn't like us being in MIMO */
+ if (is_mimo(rate) &&
+ !iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) {
+ "BT Coex forbids MIMO. Search for new config\n");
rs_stay_in_table(lq_sta, true);
switch (scale_action) {
+ case RS_ACTION_DOWNSCALE:
/* Decrease starting rate, update uCode's rate table */
if (low != IWL_RATE_INVALID) {
index = low;
+ "At the bottom rate. Can't decrease\n");
+ case RS_ACTION_UPSCALE:
/* Increase starting rate, update uCode's rate table */
if (high != IWL_RATE_INVALID) {
index = high;
+ "At the top rate. Can't increase\n");
+ case RS_ACTION_STAY:
/* No change */
- "choose rate scale index %d action %d low %d high %d type %d\n",
- index, scale_action, low, high, tbl->lq_type);
lq_update:
/* Replace uCode's rate table for the destination station. */
- if (update_lq)
+ if (update_lq) {
+ tbl->rate.index = index;
rs_stay_in_table(lq_sta, false);
@@ -2035,20 +1984,29 @@ lq_update:
* 3) Allowing a new search
if (!update_lq && !done_search &&
- !lq_sta->stay_in_tbl && window->counter) {
+ lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED
+ && window->counter) {
+ enum rs_column next_column;
/* Save current throughput to compare with "search" throughput*/
lq_sta->last_tpt = current_tpt;
- /* Select a new "search" modulation mode to try.
- * If one is found, set up the new "search" table. */
- if (is_legacy(tbl->lq_type))
- rs_move_legacy_other(mvm, lq_sta, sta, index);
- else if (is_siso(tbl->lq_type))
- rs_move_siso_to_other(mvm, lq_sta, sta, index);
- else if (is_mimo2(tbl->lq_type))
- rs_move_mimo2_to_other(mvm, lq_sta, sta, index);
+ "Start Search: update_lq %d done_search %d rs_state %d win->counter %d\n",
+ update_lq, done_search, lq_sta->rs_state,
+ window->counter);
+ next_column = rs_get_next_column(mvm, lq_sta, sta, tbl);
+ if (next_column != RS_COLUMN_INVALID) {
+ int ret = rs_switch_to_column(mvm, lq_sta, sta,
+ next_column);
+ lq_sta->search_better_tbl = 1;
+ "No more columns to explore in search cycle. Go to RS_STATE_SEARCH_CYCLE_ENDED\n");
+ lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_ENDED;
/* If new "search" mode was selected, set up in uCode table */
@@ -2058,36 +2016,31 @@ lq_update:
rs_rate_scale_clear_window(&(tbl->win[i]));
/* Use new "search" start rate */
- "Switch current mcs: %X index: %d\n",
- tbl->current_rate, index);
- rs_fill_link_cmd(mvm, sta, lq_sta, tbl->current_rate);
+ rs_dump_rate(mvm, &tbl->rate,
+ "Switch to SEARCH TABLE:");
+ rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate);
done_search = 1;
- if (done_search && !lq_sta->stay_in_tbl) {
+ if (done_search && lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_ENDED) {
/* If the "active" (non-search) mode was legacy,
* and we've tried switching antennas,
* but we haven't been able to try HT modes (not available),
* stay with best antenna legacy modulation for a while
* before next round of mode comparisons. */
tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
- if (is_legacy(tbl1->lq_type) && !sta->ht_cap.ht_supported &&
- lq_sta->action_counter > tbl1->max_search) {
+ if (is_legacy(&tbl1->rate) && !sta->ht_cap.ht_supported) {
IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n");
rs_set_stay_in_table(mvm, 1, lq_sta);
/* If we're in an HT mode, and all 3 mode switch actions
* have been tried and compared, stay in this best modulation
* mode for a while before next round of mode comparisons. */
- if (lq_sta->enable_counter &&
- (lq_sta->action_counter >= tbl1->max_search)) {
if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
(lq_sta->tx_agg_tid_en & (1 << tid)) &&
(tid != IWL_MAX_TID_COUNT)) {
@@ -2105,7 +2058,6 @@ lq_update:
- tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, index);
lq_sta->last_txrate_idx = index;
@@ -2126,12 +2078,12 @@ out:
static void rs_initialize_lq(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_sta,
- enum ieee80211_band band)
+ bool init)
struct iwl_scale_tbl_info *tbl;
- int rate_idx;
u8 valid_tx_ant;
@@ -2148,27 +2100,30 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
if ((i < 0) || (i >= IWL_RATE_COUNT))
i = 0;
- rate = iwl_rates[i].plcp;
- tbl->ant_type = first_antenna(valid_tx_ant);
- rate |= tbl->ant_type << RATE_MCS_ANT_POS;
- if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
- rate |= RATE_MCS_CCK_MSK;
+ rate->index = i;
+ rate->ant = first_antenna(valid_tx_ant);
+ if (band == IEEE80211_BAND_5GHZ)
- rs_get_tbl_info_from_mcs(rate, band, tbl, &rate_idx);
- if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
- rs_toggle_antenna(valid_tx_ant, &rate, tbl);
+ WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B);
+ if (rate->ant == ANT_A)
+ tbl->column = RS_COLUMN_LEGACY_ANT_A;
+ tbl->column = RS_COLUMN_LEGACY_ANT_B;
- rate = rate_n_flags_from_tbl(mvm, tbl, rate_idx);
- tbl->current_rate = rate;
rs_set_expected_tpt_table(lq_sta, tbl);
- rs_fill_link_cmd(NULL, NULL, lq_sta, rate);
+ rs_fill_lq_cmd(NULL, NULL, lq_sta, rate);
/* TODO restore station should remember the lq cmd */
- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_SYNC, true);
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, init);
static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
@@ -2182,8 +2137,6 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
struct iwl_lq_sta *lq_sta = mvm_sta;
- IWL_DEBUG_RATE_LIMIT(mvm, "rate scale calculate new rate for skb\n");
/* Get max rate if user set max rate */
if (lq_sta) {
lq_sta->max_rate_idx = txrc->max_rate_idx;
@@ -2242,11 +2195,59 @@ static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
return -1;
+static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
+ struct ieee80211_sta_vht_cap *vht_cap,
+ struct iwl_lq_sta *lq_sta)
+ int highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 1);
+ if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
+ for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
+ if (i == IWL_RATE_9M_INDEX)
+ /* Disable MCS9 as a workaround */
+ if (i == IWL_RATE_MCS_9_INDEX)
+ /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
+ if (i == IWL_RATE_MCS_9_INDEX &&
+ sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ lq_sta->active_siso_rate |= BIT(i);
+ if (sta->rx_nss < 2)
+ highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2);
+ lq_sta->active_mimo2_rate |= BIT(i);
* Called after adding a new station to initialize rate scaling
void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ enum ieee80211_band band, bool init)
int i, j;
struct ieee80211_hw *hw = mvm->hw;
@@ -2259,6 +2260,8 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
sta_priv = (struct iwl_mvm_sta *)sta->drv_priv;
lq_sta = &sta_priv->lq_sta;
+ memset(lq_sta, 0, sizeof(*lq_sta));
sband = hw->wiphy->bands[band];
lq_sta->lq.sta_id = sta_priv->sta_id;
@@ -2268,7 +2271,6 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
lq_sta->flush_timer = 0;
- lq_sta->supp_rates = sta->supp_rates[sband->band];
"LQ: *** rate scale station global init for station %d ***\n",
@@ -2308,27 +2310,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->is_vht = false;
- int highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 1);
- if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
- for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
- if (i == IWL_RATE_9M_INDEX)
- lq_sta->active_siso_rate |= BIT(i);
- highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2);
- lq_sta->active_mimo2_rate |= BIT(i);
- /* TODO: avoid MCS9 in 20Mhz which isn't valid for 11ac */
+ rs_vht_set_enabled_rates(sta, vht_cap, lq_sta);
lq_sta->is_vht = true;
@@ -2341,15 +2323,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
/* These values will be overridden later */
lq_sta->lq.single_stream_ant_msk =
first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
- lq_sta->lq.dual_stream_ant_msk =
- iwl_fw_valid_tx_ant(mvm->fw) &
- ~first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
- if (!lq_sta->lq.dual_stream_ant_msk) {
- lq_sta->lq.dual_stream_ant_msk = ANT_AB;
- } else if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) == 2) {
- iwl_fw_valid_tx_ant(mvm->fw);
+ lq_sta->lq.dual_stream_ant_msk = ANT_AB;
/* as default allow aggregation for all tids */
lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
@@ -2364,121 +2338,185 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->dbg_fixed_rate = 0;
- rs_initialize_lq(mvm, sta, lq_sta, band);
+ rs_initialize_lq(mvm, sta, lq_sta, band, init);
- struct iwl_lq_sta *lq_sta, u32 new_rate)
+static void rs_rate_update(void *mvm_r,
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
+ struct ieee80211_sta *sta, void *priv_sta,
+ u32 changed)
+ u8 tid;
+ struct iwl_op_mode *op_mode =
+ (struct iwl_op_mode *)mvm_r;
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ /* Stop any ongoing aggregations as rs starts off assuming no agg */
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
+ ieee80211_stop_tx_ba_session(sta, tid);
+ iwl_mvm_rs_rate_init(mvm, sta, sband->band, false);
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
+ struct iwl_lq_cmd *lq_cmd,
+ u32 ucode_rate)
+ int num_rates = ARRAY_SIZE(lq_cmd->rs_table);
+ __le32 ucode_rate_le32 = cpu_to_le32(ucode_rate);
+ for (i = 0; i < num_rates; i++)
+ lq_cmd->rs_table[i] = ucode_rate_le32;
+ rs_rate_from_ucode_rate(ucode_rate, band, &rate);
+ if (is_mimo(&rate))
+ lq_cmd->mimo_delim = num_rates - 1;
+ lq_cmd->mimo_delim = 0;
+#endif /* CONFIG_MAC80211_DEBUGFS */
+static void rs_fill_rates_for_column(struct iwl_mvm *mvm,
+ struct rs_rate *rate,
+ __le32 *rs_table, int *rs_table_index,
+ int num_rates, int num_retries,
+ u8 valid_tx_ant, bool toggle_ant)
- int index = 0;
- int repeat_rate = 0;
- u8 ant_toggle_cnt = 0;
- u8 use_ht_possible = 1;
+ __le32 ucode_rate;
+ bool bottom_reached = false;
+ int prev_rate_idx = rate->index;
+ int end = LINK_QUAL_MAX_RETRY_NUM;
+ int index = *rs_table_index;
+ for (i = 0; i < num_rates && index < end; i++) {
+ ucode_rate = cpu_to_le32(ucode_rate_from_rs_rate(mvm, rate));
+ for (j = 0; j < num_retries && index < end; j++, index++)
+ rs_table[index] = ucode_rate;
+ if (toggle_ant)
+ rs_toggle_antenna(valid_tx_ant, rate);
+ prev_rate_idx = rate->index;
+ bottom_reached = rs_get_lower_rate_in_column(lq_sta, rate);
+ if (bottom_reached && !is_legacy(rate))
+ if (!bottom_reached)
+ rate->index = prev_rate_idx;
+ *rs_table_index = index;
+/* Building the rate table is non trivial. When we're in MIMO2/VHT/80Mhz/SGI
+ * column the rate table should look like this:
+ * rate[0] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
+ * rate[1] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
+ * rate[2] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
+ * rate[3] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
+ * rate[4] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
+ * rate[5] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
+ * rate[6] 0x4005007 VHT | ANT: A BW: 80Mhz MCS: 7 NSS: 1 NGI
+ * rate[7] 0x4009006 VHT | ANT: B BW: 80Mhz MCS: 6 NSS: 1 NGI
+ * rate[8] 0x4005005 VHT | ANT: A BW: 80Mhz MCS: 5 NSS: 1 NGI
+ * rate[9] 0x800B Legacy | ANT: B Rate: 36 Mbps
+ * rate[10] 0x4009 Legacy | ANT: A Rate: 24 Mbps
+ * rate[11] 0x8007 Legacy | ANT: B Rate: 18 Mbps
+ * rate[12] 0x4005 Legacy | ANT: A Rate: 12 Mbps
+ * rate[13] 0x800F Legacy | ANT: B Rate: 9 Mbps
+ * rate[14] 0x400D Legacy | ANT: A Rate: 6 Mbps
+ * rate[15] 0x800D Legacy | ANT: B Rate: 6 Mbps
+static void rs_build_rates_table(struct iwl_mvm *mvm,
+ const struct rs_rate *initial_rate)
+ int num_rates, num_retries, index = 0;
u8 valid_tx_ant = 0;
struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
+ bool toggle_ant = false;
- /* Override starting rate (index 0) if needed for debug purposes */
- rs_dbgfs_set_mcs(lq_sta, &new_rate);
+ memcpy(&rate, initial_rate, sizeof(struct rs_rate));
- /* Interpret new_rate (rate_n_flags) */
- rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
- &tbl_type, &rate_idx);
+ if (mvm)
+ valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
- /* How many times should we repeat the initial rate? */
- if (is_legacy(tbl_type.lq_type)) {
- ant_toggle_cnt = 1;
- repeat_rate = IWL_NUMBER_TRY;
+ if (is_siso(&rate)) {
+ num_rates = RS_INITIAL_SISO_NUM_RATES;
+ num_retries = RS_HT_VHT_RETRIES_PER_RATE;
+ } else if (is_mimo(&rate)) {
+ num_rates = RS_INITIAL_MIMO_NUM_RATES;
- repeat_rate = min(IWL_HT_NUMBER_TRY,
- LINK_QUAL_AGG_DISABLE_START_DEF - 1);
+ num_rates = RS_INITIAL_LEGACY_NUM_RATES;
+ num_retries = RS_LEGACY_RETRIES_PER_RATE;
+ toggle_ant = true;
- lq_cmd->mimo_delim = is_mimo(tbl_type.lq_type) ? 1 : 0;
+ rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
+ num_rates, num_retries, valid_tx_ant,
+ toggle_ant);
- /* Fill 1st table entry (index 0) */
- lq_cmd->rs_table[index] = cpu_to_le32(new_rate);
+ rs_get_lower_rate_down_column(lq_sta, &rate);
- if (num_of_ant(tbl_type.ant_type) == 1)
- lq_cmd->single_stream_ant_msk = tbl_type.ant_type;
- else if (num_of_ant(tbl_type.ant_type) == 2)
- lq_cmd->dual_stream_ant_msk = tbl_type.ant_type;
- /* otherwise we don't modify the existing value */
+ num_rates = RS_SECONDARY_SISO_NUM_RATES;
+ num_retries = RS_SECONDARY_SISO_RETRIES;
+ } else if (is_legacy(&rate)) {
+ num_rates = RS_SECONDARY_LEGACY_NUM_RATES;
+ WARN_ON_ONCE(1);
- index++;
- repeat_rate--;
- if (mvm)
- valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
- /* Fill rest of rate table */
- while (index < LINK_QUAL_MAX_RETRY_NUM) {
- /* Repeat initial/next rate.
- * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
- * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
- while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
- if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
- ant_toggle_cnt++;
- else if (mvm &&
- rs_toggle_antenna(valid_tx_ant,
- &new_rate, &tbl_type))
- /* Override next rate if needed for debug purposes */
- /* Fill next table entry */
- lq_cmd->rs_table[index] =
- cpu_to_le32(new_rate);
- rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
- &rate_idx);
- /* Indicate to uCode which entries might be MIMO.
- * If initial rate was MIMO, this will finally end up
- * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
- if (is_mimo(tbl_type.lq_type))
- lq_cmd->mimo_delim = index;
- /* Get next rate */
- new_rate = rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
- use_ht_possible);
- /* How many times should we repeat the next rate? */
- repeat_rate = IWL_HT_NUMBER_TRY;
- /* Don't allow HT rates after next pass.
- * rs_get_lower_rate() will change type to LQ_LEGACY_A
- * or LQ_LEGACY_G.
- use_ht_possible = 0;
+ struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
+ u8 ant = initial_rate->ant;
+ if (lq_sta->dbg_fixed_rate) {
+ rs_build_rates_table_from_fixed(mvm, lq_cmd,
+ lq_sta->band,
+ lq_sta->dbg_fixed_rate);
+ ant = (lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
+ RATE_MCS_ANT_POS;
+ } else
+ rs_build_rates_table(mvm, lq_sta, initial_rate);
+ if (num_of_ant(ant) == 1)
+ lq_cmd->single_stream_ant_msk = ant;
lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
lq_cmd->agg_disable_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
@@ -2512,31 +2550,81 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
#ifdef CONFIG_MAC80211_DEBUGFS
+static int rs_pretty_print_rate(char *buf, const u32 rate)
- u8 valid_tx_ant;
- u8 ant_sel_tx;
- ant_sel_tx =
- ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
- >> RATE_MCS_ANT_POS);
- if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
- *rate_n_flags = lq_sta->dbg_fixed_rate;
- IWL_DEBUG_RATE(mvm, "Fixed rate ON\n");
- lq_sta->dbg_fixed_rate = 0;
- "Invalid antenna selection 0x%X, Valid is 0x%X\n",
- ant_sel_tx, valid_tx_ant);
- IWL_DEBUG_RATE(mvm, "Fixed rate OFF\n");
+ char *type, *bw;
+ u8 mcs = 0, nss = 0;
+ u8 ant = (rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS;
+ if (!(rate & RATE_MCS_HT_MSK) &&
+ !(rate & RATE_MCS_VHT_MSK)) {
+ int index = iwl_hwrate_to_plcp_idx(rate);
+ return sprintf(buf, "Legacy | ANT: %s Rate: %s Mbps\n",
+ rs_pretty_ant(ant), iwl_rate_mcs[index].mbps);
+ if (rate & RATE_MCS_VHT_MSK) {
+ type = "VHT";
+ mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
+ nss = ((rate & RATE_VHT_MCS_NSS_MSK)
+ >> RATE_VHT_MCS_NSS_POS) + 1;
+ } else if (rate & RATE_MCS_HT_MSK) {
+ type = "HT";
+ mcs = rate & RATE_HT_MCS_INDEX_MSK;
+ type = "Unknown"; /* shouldn't happen */
+ switch (rate & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ bw = "20Mhz";
+ case RATE_MCS_CHAN_WIDTH_40:
+ bw = "40Mhz";
+ case RATE_MCS_CHAN_WIDTH_80:
+ bw = "80Mhz";
+ case RATE_MCS_CHAN_WIDTH_160:
+ bw = "160Mhz";
+ bw = "BAD BW";
+ return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s\n",
+ type, rs_pretty_ant(ant), bw, mcs, nss,
+ (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
+ (rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
+ (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
+ (rate & RATE_MCS_BF_MSK) ? "BF " : "",
+ (rate & RATE_MCS_ZLF_MSK) ? "ZLF " : "");
+ * Program the device to use fixed rate for frame transmit
+ * This is for debugging/testing only
+ * once the device start use fixed rate, we need to reload the module
+ * to being back the normal operation.
+static void rs_program_fix_rate(struct iwl_mvm *mvm,
+ lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
+ lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+ lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+ IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n",
+ lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
+ rs_rate_from_ucode_rate(lq_sta->dbg_fixed_rate,
+ lq_sta->band, &rate);
+ rs_fill_lq_cmd(NULL, NULL, lq_sta, &rate);
+ iwl_mvm_send_lq_cmd(lq_sta->drv, &lq_sta->lq, false);
@@ -2572,15 +2660,14 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
char *buff;
int desc = 0;
int i = 0;
ssize_t ret;
struct iwl_lq_sta *lq_sta = file->private_data;
struct iwl_mvm *mvm;
mvm = lq_sta->drv;
- buff = kmalloc(1024, GFP_KERNEL);
+ buff = kmalloc(2048, GFP_KERNEL);
if (!buff)
@@ -2595,23 +2682,23 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
(iwl_fw_valid_tx_ant(mvm->fw) & ANT_B) ? "ANT_B," : "",
(iwl_fw_valid_tx_ant(mvm->fw) & ANT_C) ? "ANT_C" : "");
desc += sprintf(buff+desc, "lq type %s\n",
- (is_legacy(tbl->lq_type)) ? "legacy" :
- is_vht(tbl->lq_type) ? "VHT" : "HT");
+ (is_legacy(rate)) ? "legacy" :
+ is_vht(rate) ? "VHT" : "HT");
+ if (!is_legacy(rate)) {
desc += sprintf(buff+desc, " %s",
- (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
+ (is_siso(rate)) ? "SISO" : "MIMO2");
- (is_ht20(tbl)) ? "20MHz" :
- (is_ht40(tbl)) ? "40MHz" :
- (is_ht80(tbl)) ? "80Mhz" : "BAD BW");
+ (is_ht20(rate)) ? "20MHz" :
+ (is_ht40(rate)) ? "40MHz" :
+ (is_ht80(rate)) ? "80Mhz" : "BAD BW");
desc += sprintf(buff+desc, " %s %s\n",
- (tbl->is_SGI) ? "SGI" : "",
+ (rate->sgi) ? "SGI" : "NGI",
(lq_sta->is_agg) ? "AGG on" : "");
desc += sprintf(buff+desc, "last tx rate=0x%X\n",
lq_sta->last_rate_n_flags);
desc += sprintf(buff+desc,
- "general: flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
+ "general: flags=0x%X mimo-d=%d s-ant=0x%x d-ant=0x%x\n",
lq_sta->lq.flags,
lq_sta->lq.mimo_delim,
lq_sta->lq.single_stream_ant_msk,
@@ -2631,19 +2718,10 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
lq_sta->lq.initial_rate_index[3]);
for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
- index = iwl_hwrate_to_plcp_idx(
- le32_to_cpu(lq_sta->lq.rs_table[i]));
- desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
- i, le32_to_cpu(lq_sta->lq.rs_table[i]),
- iwl_rate_mcs[index].mbps);
- desc += sprintf(buff+desc,
- " rate[%d] 0x%X %smbps (%s)\n",
- iwl_rate_mcs[index].mbps,
- iwl_rate_mcs[index].mcs);
+ u32 r = le32_to_cpu(lq_sta->lq.rs_table[i]);
+ desc += sprintf(buff+desc, " rate[%d] 0x%X ", i, r);
+ desc += rs_pretty_print_rate(buff+desc, r);
ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
@@ -2665,6 +2743,7 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
buff = kmalloc(1024, GFP_KERNEL);
@@ -2673,16 +2752,17 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
for (i = 0; i < LQ_SIZE; i++) {
tbl = &(lq_sta->lq_info[i]);
"%s type=%d SGI=%d BW=%s DUP=0\n"
- "rate=0x%X\n",
+ "index=%d\n",
lq_sta->active_tbl == i ? "*" : "x",
- tbl->lq_type,
- tbl->is_SGI,
- is_ht20(tbl) ? "20Mhz" :
- is_ht40(tbl) ? "40Mhz" :
- is_ht80(tbl) ? "80Mhz" : "ERR",
+ rate->type,
+ rate->sgi,
+ is_ht20(rate) ? "20Mhz" :
+ is_ht40(rate) ? "40Mhz" :
+ is_ht80(rate) ? "80Mhz" : "ERR",
+ rate->index);
for (j = 0; j < IWL_RATE_COUNT; j++) {
"counter=%d success=%d %%=%d\n",
@@ -2746,6 +2826,7 @@ static struct rate_control_ops rs_mvm_ops = {
.free = rs_free,
.alloc_sta = rs_alloc_sta,
.free_sta = rs_free_sta,
+ .rate_update = rs_rate_update,
.add_sta_debugfs = rs_add_debugfs,
.remove_sta_debugfs = rs_remove_debugfs,
@@ -2778,13 +2859,13 @@ int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
if (enable) {
if (mvmsta->tx_protection == 0)
- lq->flags |= LQ_FLAG_SET_STA_TLC_RTS_MSK;
+ lq->flags |= LQ_FLAG_USE_RTS_MSK;
mvmsta->tx_protection++;
mvmsta->tx_protection--;
- lq->flags &= ~LQ_FLAG_SET_STA_TLC_RTS_MSK;
+ lq->flags &= ~LQ_FLAG_USE_RTS_MSK;
- return iwl_mvm_send_lq_cmd(mvm, lq, CMD_ASYNC, false);
+ return iwl_mvm_send_lq_cmd(mvm, lq, false);