|
@@ -1546,6 +1546,11 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
|
|
|
struct iwl_mvm_mc_iter_data *data = _data;
|
|
|
struct iwl_mvm *mvm = data->mvm;
|
|
|
struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
|
|
|
+ struct iwl_host_cmd hcmd = {
|
|
|
+ .id = MCAST_FILTER_CMD,
|
|
|
+ .flags = CMD_ASYNC,
|
|
|
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
|
|
|
+ };
|
|
|
int ret, len;
|
|
|
|
|
|
/* if we don't have free ports, mcast frames will be dropped */
|
|
@@ -1560,7 +1565,10 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
|
|
|
memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
|
|
|
len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
|
|
|
|
|
|
- ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
|
|
|
+ hcmd.len[0] = len;
|
|
|
+ hcmd.data[0] = cmd;
|
|
|
+
|
|
|
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
|
|
|
if (ret)
|
|
|
IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
|
|
|
}
|
|
@@ -1635,6 +1643,12 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
|
|
|
if (!cmd)
|
|
|
goto out;
|
|
|
|
|
|
+ if (changed_flags & FIF_ALLMULTI)
|
|
|
+ cmd->pass_all = !!(*total_flags & FIF_ALLMULTI);
|
|
|
+
|
|
|
+ if (cmd->pass_all)
|
|
|
+ cmd->count = 0;
|
|
|
+
|
|
|
iwl_mvm_recalc_multicast(mvm);
|
|
|
out:
|
|
|
mutex_unlock(&mvm->mutex);
|
|
@@ -2563,7 +2577,7 @@ static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
|
|
|
* queues, so we should never get a second deferred
|
|
|
* frame for the RA/TID.
|
|
|
*/
|
|
|
- iwl_mvm_start_mac_queues(mvm, info->hw_queue);
|
|
|
+ iwl_mvm_start_mac_queues(mvm, BIT(info->hw_queue));
|
|
|
ieee80211_free_txskb(mvm->hw, skb);
|
|
|
}
|
|
|
}
|
|
@@ -3975,6 +3989,43 @@ out_unlock:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
|
|
|
+{
|
|
|
+ if (drop) {
|
|
|
+ if (iwl_mvm_has_new_tx_api(mvm))
|
|
|
+ /* TODO new tx api */
|
|
|
+ WARN_ONCE(1,
|
|
|
+ "Need to implement flush TX queue\n");
|
|
|
+ else
|
|
|
+ iwl_mvm_flush_tx_path(mvm,
|
|
|
+ iwl_mvm_flushable_queues(mvm) & queues,
|
|
|
+ 0);
|
|
|
+ } else {
|
|
|
+ if (iwl_mvm_has_new_tx_api(mvm)) {
|
|
|
+ struct ieee80211_sta *sta;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ mutex_lock(&mvm->mutex);
|
|
|
+
|
|
|
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
|
|
|
+ sta = rcu_dereference_protected(
|
|
|
+ mvm->fw_id_to_mac_id[i],
|
|
|
+ lockdep_is_held(&mvm->mutex));
|
|
|
+ if (IS_ERR_OR_NULL(sta))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ iwl_mvm_wait_sta_queues_empty(mvm,
|
|
|
+ iwl_mvm_sta_from_mac80211(sta));
|
|
|
+ }
|
|
|
+
|
|
|
+ mutex_unlock(&mvm->mutex);
|
|
|
+ } else {
|
|
|
+ iwl_trans_wait_tx_queues_empty(mvm->trans,
|
|
|
+ queues);
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
|
|
|
struct ieee80211_vif *vif, u32 queues, bool drop)
|
|
|
{
|
|
@@ -3985,7 +4036,12 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
|
|
|
int i;
|
|
|
u32 msk = 0;
|
|
|
|
|
|
- if (!vif || vif->type != NL80211_IFTYPE_STATION)
|
|
|
+ if (!vif) {
|
|
|
+ iwl_mvm_flush_no_vif(mvm, queues, drop);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (vif->type != NL80211_IFTYPE_STATION)
|
|
|
return;
|
|
|
|
|
|
/* Make sure we're done with the deferred traffic before flushing */
|