status.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918
  1. /*
  2. * Copyright 2002-2005, Instant802 Networks, Inc.
  3. * Copyright 2005-2006, Devicescape Software, Inc.
  4. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  5. * Copyright 2008-2010 Johannes Berg <johannes@sipsolutions.net>
  6. * Copyright 2013-2014 Intel Mobile Communications GmbH
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/export.h>
  13. #include <linux/etherdevice.h>
  14. #include <linux/time.h>
  15. #include <net/mac80211.h>
  16. #include <asm/unaligned.h>
  17. #include "ieee80211_i.h"
  18. #include "rate.h"
  19. #include "mesh.h"
  20. #include "led.h"
  21. #include "wme.h"
  22. void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
  23. struct sk_buff *skb)
  24. {
  25. struct ieee80211_local *local = hw_to_local(hw);
  26. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  27. int tmp;
  28. skb->pkt_type = IEEE80211_TX_STATUS_MSG;
  29. skb_queue_tail(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS ?
  30. &local->skb_queue : &local->skb_queue_unreliable, skb);
  31. tmp = skb_queue_len(&local->skb_queue) +
  32. skb_queue_len(&local->skb_queue_unreliable);
  33. while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT &&
  34. (skb = skb_dequeue(&local->skb_queue_unreliable))) {
  35. ieee80211_free_txskb(hw, skb);
  36. tmp--;
  37. I802_DEBUG_INC(local->tx_status_drop);
  38. }
  39. tasklet_schedule(&local->tasklet);
  40. }
  41. EXPORT_SYMBOL(ieee80211_tx_status_irqsafe);
  42. static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
  43. struct sta_info *sta,
  44. struct sk_buff *skb)
  45. {
  46. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  47. struct ieee80211_hdr *hdr = (void *)skb->data;
  48. int ac;
  49. /*
  50. * This skb 'survived' a round-trip through the driver, and
  51. * hopefully the driver didn't mangle it too badly. However,
  52. * we can definitely not rely on the control information
  53. * being correct. Clear it so we don't get junk there, and
  54. * indicate that it needs new processing, but must not be
  55. * modified/encrypted again.
  56. */
  57. memset(&info->control, 0, sizeof(info->control));
  58. info->control.jiffies = jiffies;
  59. info->control.vif = &sta->sdata->vif;
  60. info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING |
  61. IEEE80211_TX_INTFL_RETRANSMISSION;
  62. info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
  63. sta->tx_filtered_count++;
  64. /*
  65. * Clear more-data bit on filtered frames, it might be set
  66. * but later frames might time out so it might have to be
  67. * clear again ... It's all rather unlikely (this frame
  68. * should time out first, right?) but let's not confuse
  69. * peers unnecessarily.
  70. */
  71. if (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_MOREDATA))
  72. hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
  73. if (ieee80211_is_data_qos(hdr->frame_control)) {
  74. u8 *p = ieee80211_get_qos_ctl(hdr);
  75. int tid = *p & IEEE80211_QOS_CTL_TID_MASK;
  76. /*
  77. * Clear EOSP if set, this could happen e.g.
  78. * if an absence period (us being a P2P GO)
  79. * shortens the SP.
  80. */
  81. if (*p & IEEE80211_QOS_CTL_EOSP)
  82. *p &= ~IEEE80211_QOS_CTL_EOSP;
  83. ac = ieee802_1d_to_ac[tid & 7];
  84. } else {
  85. ac = IEEE80211_AC_BE;
  86. }
  87. /*
  88. * Clear the TX filter mask for this STA when sending the next
  89. * packet. If the STA went to power save mode, this will happen
  90. * when it wakes up for the next time.
  91. */
  92. set_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT);
  93. /*
  94. * This code races in the following way:
  95. *
  96. * (1) STA sends frame indicating it will go to sleep and does so
  97. * (2) hardware/firmware adds STA to filter list, passes frame up
  98. * (3) hardware/firmware processes TX fifo and suppresses a frame
  99. * (4) we get TX status before having processed the frame and
  100. * knowing that the STA has gone to sleep.
  101. *
  102. * This is actually quite unlikely even when both those events are
  103. * processed from interrupts coming in quickly after one another or
  104. * even at the same time because we queue both TX status events and
  105. * RX frames to be processed by a tasklet and process them in the
  106. * same order that they were received or TX status last. Hence, there
  107. * is no race as long as the frame RX is processed before the next TX
  108. * status, which drivers can ensure, see below.
  109. *
  110. * Note that this can only happen if the hardware or firmware can
  111. * actually add STAs to the filter list, if this is done by the
  112. * driver in response to set_tim() (which will only reduce the race
  113. * this whole filtering tries to solve, not completely solve it)
  114. * this situation cannot happen.
  115. *
  116. * To completely solve this race drivers need to make sure that they
  117. * (a) don't mix the irq-safe/not irq-safe TX status/RX processing
  118. * functions and
  119. * (b) always process RX events before TX status events if ordering
  120. * can be unknown, for example with different interrupt status
  121. * bits.
  122. * (c) if PS mode transitions are manual (i.e. the flag
  123. * %IEEE80211_HW_AP_LINK_PS is set), always process PS state
  124. * changes before calling TX status events if ordering can be
  125. * unknown.
  126. */
  127. if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
  128. skb_queue_len(&sta->tx_filtered[ac]) < STA_MAX_TX_BUFFER) {
  129. skb_queue_tail(&sta->tx_filtered[ac], skb);
  130. sta_info_recalc_tim(sta);
  131. if (!timer_pending(&local->sta_cleanup))
  132. mod_timer(&local->sta_cleanup,
  133. round_jiffies(jiffies +
  134. STA_INFO_CLEANUP_INTERVAL));
  135. return;
  136. }
  137. if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
  138. !(info->flags & IEEE80211_TX_INTFL_RETRIED)) {
  139. /* Software retry the packet once */
  140. info->flags |= IEEE80211_TX_INTFL_RETRIED;
  141. ieee80211_add_pending_skb(local, skb);
  142. return;
  143. }
  144. ps_dbg_ratelimited(sta->sdata,
  145. "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n",
  146. skb_queue_len(&sta->tx_filtered[ac]),
  147. !!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies);
  148. ieee80211_free_txskb(&local->hw, skb);
  149. }
  150. static void ieee80211_check_pending_bar(struct sta_info *sta, u8 *addr, u8 tid)
  151. {
  152. struct tid_ampdu_tx *tid_tx;
  153. tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
  154. if (!tid_tx || !tid_tx->bar_pending)
  155. return;
  156. tid_tx->bar_pending = false;
  157. ieee80211_send_bar(&sta->sdata->vif, addr, tid, tid_tx->failed_bar_ssn);
  158. }
  159. static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
  160. {
  161. struct ieee80211_mgmt *mgmt = (void *) skb->data;
  162. struct ieee80211_local *local = sta->local;
  163. struct ieee80211_sub_if_data *sdata = sta->sdata;
  164. if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
  165. sta->last_rx = jiffies;
  166. if (ieee80211_is_data_qos(mgmt->frame_control)) {
  167. struct ieee80211_hdr *hdr = (void *) skb->data;
  168. u8 *qc = ieee80211_get_qos_ctl(hdr);
  169. u16 tid = qc[0] & 0xf;
  170. ieee80211_check_pending_bar(sta, hdr->addr1, tid);
  171. }
  172. if (ieee80211_is_action(mgmt->frame_control) &&
  173. mgmt->u.action.category == WLAN_CATEGORY_HT &&
  174. mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS &&
  175. ieee80211_sdata_running(sdata)) {
  176. enum ieee80211_smps_mode smps_mode;
  177. switch (mgmt->u.action.u.ht_smps.smps_control) {
  178. case WLAN_HT_SMPS_CONTROL_DYNAMIC:
  179. smps_mode = IEEE80211_SMPS_DYNAMIC;
  180. break;
  181. case WLAN_HT_SMPS_CONTROL_STATIC:
  182. smps_mode = IEEE80211_SMPS_STATIC;
  183. break;
  184. case WLAN_HT_SMPS_CONTROL_DISABLED:
  185. default: /* shouldn't happen since we don't send that */
  186. smps_mode = IEEE80211_SMPS_OFF;
  187. break;
  188. }
  189. if (sdata->vif.type == NL80211_IFTYPE_STATION) {
  190. /*
  191. * This update looks racy, but isn't -- if we come
  192. * here we've definitely got a station that we're
  193. * talking to, and on a managed interface that can
  194. * only be the AP. And the only other place updating
  195. * this variable in managed mode is before association.
  196. */
  197. sdata->smps_mode = smps_mode;
  198. ieee80211_queue_work(&local->hw, &sdata->recalc_smps);
  199. } else if (sdata->vif.type == NL80211_IFTYPE_AP ||
  200. sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
  201. sta->known_smps_mode = smps_mode;
  202. }
  203. }
  204. }
  205. static void ieee80211_set_bar_pending(struct sta_info *sta, u8 tid, u16 ssn)
  206. {
  207. struct tid_ampdu_tx *tid_tx;
  208. tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
  209. if (!tid_tx)
  210. return;
  211. tid_tx->failed_bar_ssn = ssn;
  212. tid_tx->bar_pending = true;
  213. }
  214. static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info)
  215. {
  216. int len = sizeof(struct ieee80211_radiotap_header);
  217. /* IEEE80211_RADIOTAP_RATE rate */
  218. if (info->status.rates[0].idx >= 0 &&
  219. !(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS |
  220. IEEE80211_TX_RC_VHT_MCS)))
  221. len += 2;
  222. /* IEEE80211_RADIOTAP_TX_FLAGS */
  223. len += 2;
  224. /* IEEE80211_RADIOTAP_DATA_RETRIES */
  225. len += 1;
  226. /* IEEE80211_RADIOTAP_MCS
  227. * IEEE80211_RADIOTAP_VHT */
  228. if (info->status.rates[0].idx >= 0) {
  229. if (info->status.rates[0].flags & IEEE80211_TX_RC_MCS)
  230. len += 3;
  231. else if (info->status.rates[0].flags & IEEE80211_TX_RC_VHT_MCS)
  232. len = ALIGN(len, 2) + 12;
  233. }
  234. return len;
  235. }
  236. static void
  237. ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
  238. struct ieee80211_supported_band *sband,
  239. struct sk_buff *skb, int retry_count,
  240. int rtap_len, int shift)
  241. {
  242. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  243. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  244. struct ieee80211_radiotap_header *rthdr;
  245. unsigned char *pos;
  246. u16 txflags;
  247. rthdr = (struct ieee80211_radiotap_header *) skb_push(skb, rtap_len);
  248. memset(rthdr, 0, rtap_len);
  249. rthdr->it_len = cpu_to_le16(rtap_len);
  250. rthdr->it_present =
  251. cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
  252. (1 << IEEE80211_RADIOTAP_DATA_RETRIES));
  253. pos = (unsigned char *)(rthdr + 1);
  254. /*
  255. * XXX: Once radiotap gets the bitmap reset thing the vendor
  256. * extensions proposal contains, we can actually report
  257. * the whole set of tries we did.
  258. */
  259. /* IEEE80211_RADIOTAP_RATE */
  260. if (info->status.rates[0].idx >= 0 &&
  261. !(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS |
  262. IEEE80211_TX_RC_VHT_MCS))) {
  263. u16 rate;
  264. rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
  265. rate = sband->bitrates[info->status.rates[0].idx].bitrate;
  266. *pos = DIV_ROUND_UP(rate, 5 * (1 << shift));
  267. /* padding for tx flags */
  268. pos += 2;
  269. }
  270. /* IEEE80211_RADIOTAP_TX_FLAGS */
  271. txflags = 0;
  272. if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
  273. !is_multicast_ether_addr(hdr->addr1))
  274. txflags |= IEEE80211_RADIOTAP_F_TX_FAIL;
  275. if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
  276. txflags |= IEEE80211_RADIOTAP_F_TX_CTS;
  277. if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
  278. txflags |= IEEE80211_RADIOTAP_F_TX_RTS;
  279. put_unaligned_le16(txflags, pos);
  280. pos += 2;
  281. /* IEEE80211_RADIOTAP_DATA_RETRIES */
  282. /* for now report the total retry_count */
  283. *pos = retry_count;
  284. pos++;
  285. if (info->status.rates[0].idx < 0)
  286. return;
  287. /* IEEE80211_RADIOTAP_MCS
  288. * IEEE80211_RADIOTAP_VHT */
  289. if (info->status.rates[0].flags & IEEE80211_TX_RC_MCS) {
  290. rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
  291. pos[0] = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
  292. IEEE80211_RADIOTAP_MCS_HAVE_GI |
  293. IEEE80211_RADIOTAP_MCS_HAVE_BW;
  294. if (info->status.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
  295. pos[1] |= IEEE80211_RADIOTAP_MCS_SGI;
  296. if (info->status.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
  297. pos[1] |= IEEE80211_RADIOTAP_MCS_BW_40;
  298. if (info->status.rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD)
  299. pos[1] |= IEEE80211_RADIOTAP_MCS_FMT_GF;
  300. pos[2] = info->status.rates[0].idx;
  301. pos += 3;
  302. } else if (info->status.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) {
  303. u16 known = local->hw.radiotap_vht_details &
  304. (IEEE80211_RADIOTAP_VHT_KNOWN_GI |
  305. IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH);
  306. rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
  307. /* required alignment from rthdr */
  308. pos = (u8 *)rthdr + ALIGN(pos - (u8 *)rthdr, 2);
  309. /* u16 known - IEEE80211_RADIOTAP_VHT_KNOWN_* */
  310. put_unaligned_le16(known, pos);
  311. pos += 2;
  312. /* u8 flags - IEEE80211_RADIOTAP_VHT_FLAG_* */
  313. if (info->status.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
  314. *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
  315. pos++;
  316. /* u8 bandwidth */
  317. if (info->status.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
  318. *pos = 1;
  319. else if (info->status.rates[0].flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
  320. *pos = 4;
  321. else if (info->status.rates[0].flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
  322. *pos = 11;
  323. else /* IEEE80211_TX_RC_{20_MHZ_WIDTH,FIXME:DUP_DATA} */
  324. *pos = 0;
  325. pos++;
  326. /* u8 mcs_nss[4] */
  327. *pos = (ieee80211_rate_get_vht_mcs(&info->status.rates[0]) << 4) |
  328. ieee80211_rate_get_vht_nss(&info->status.rates[0]);
  329. pos += 4;
  330. /* u8 coding */
  331. pos++;
  332. /* u8 group_id */
  333. pos++;
  334. /* u16 partial_aid */
  335. pos += 2;
  336. }
  337. }
  338. /*
  339. * Handles the tx for TDLS teardown frames.
  340. * If the frame wasn't ACKed by the peer - it will be re-sent through the AP
  341. */
  342. static void ieee80211_tdls_td_tx_handle(struct ieee80211_local *local,
  343. struct ieee80211_sub_if_data *sdata,
  344. struct sk_buff *skb, u32 flags)
  345. {
  346. struct sk_buff *teardown_skb;
  347. struct sk_buff *orig_teardown_skb;
  348. bool is_teardown = false;
  349. /* Get the teardown data we need and free the lock */
  350. spin_lock(&sdata->u.mgd.teardown_lock);
  351. teardown_skb = sdata->u.mgd.teardown_skb;
  352. orig_teardown_skb = sdata->u.mgd.orig_teardown_skb;
  353. if ((skb == orig_teardown_skb) && teardown_skb) {
  354. sdata->u.mgd.teardown_skb = NULL;
  355. sdata->u.mgd.orig_teardown_skb = NULL;
  356. is_teardown = true;
  357. }
  358. spin_unlock(&sdata->u.mgd.teardown_lock);
  359. if (is_teardown) {
  360. /* This mechanism relies on being able to get ACKs */
  361. WARN_ON(!(local->hw.flags &
  362. IEEE80211_HW_REPORTS_TX_ACK_STATUS));
  363. /* Check if peer has ACKed */
  364. if (flags & IEEE80211_TX_STAT_ACK) {
  365. dev_kfree_skb_any(teardown_skb);
  366. } else {
  367. tdls_dbg(sdata,
  368. "TDLS Resending teardown through AP\n");
  369. ieee80211_subif_start_xmit(teardown_skb, skb->dev);
  370. }
  371. }
  372. }
  373. static void ieee80211_report_used_skb(struct ieee80211_local *local,
  374. struct sk_buff *skb, bool dropped)
  375. {
  376. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  377. struct ieee80211_hdr *hdr = (void *)skb->data;
  378. bool acked = info->flags & IEEE80211_TX_STAT_ACK;
  379. if (dropped)
  380. acked = false;
  381. if (info->flags & (IEEE80211_TX_INTFL_NL80211_FRAME_TX |
  382. IEEE80211_TX_INTFL_MLME_CONN_TX)) {
  383. struct ieee80211_sub_if_data *sdata = NULL;
  384. struct ieee80211_sub_if_data *iter_sdata;
  385. u64 cookie = (unsigned long)skb;
  386. rcu_read_lock();
  387. if (skb->dev) {
  388. list_for_each_entry_rcu(iter_sdata, &local->interfaces,
  389. list) {
  390. if (!iter_sdata->dev)
  391. continue;
  392. if (skb->dev == iter_sdata->dev) {
  393. sdata = iter_sdata;
  394. break;
  395. }
  396. }
  397. } else {
  398. sdata = rcu_dereference(local->p2p_sdata);
  399. }
  400. if (!sdata) {
  401. skb->dev = NULL;
  402. } else if (info->flags & IEEE80211_TX_INTFL_MLME_CONN_TX) {
  403. unsigned int hdr_size =
  404. ieee80211_hdrlen(hdr->frame_control);
  405. /* Check to see if packet is a TDLS teardown packet */
  406. if (ieee80211_is_data(hdr->frame_control) &&
  407. (ieee80211_get_tdls_action(skb, hdr_size) ==
  408. WLAN_TDLS_TEARDOWN))
  409. ieee80211_tdls_td_tx_handle(local, sdata, skb,
  410. info->flags);
  411. else
  412. ieee80211_mgd_conn_tx_status(sdata,
  413. hdr->frame_control,
  414. acked);
  415. } else if (ieee80211_is_nullfunc(hdr->frame_control) ||
  416. ieee80211_is_qos_nullfunc(hdr->frame_control)) {
  417. cfg80211_probe_status(sdata->dev, hdr->addr1,
  418. cookie, acked, GFP_ATOMIC);
  419. } else {
  420. cfg80211_mgmt_tx_status(&sdata->wdev, cookie, skb->data,
  421. skb->len, acked, GFP_ATOMIC);
  422. }
  423. rcu_read_unlock();
  424. }
  425. if (unlikely(info->ack_frame_id)) {
  426. struct sk_buff *ack_skb;
  427. unsigned long flags;
  428. spin_lock_irqsave(&local->ack_status_lock, flags);
  429. ack_skb = idr_find(&local->ack_status_frames,
  430. info->ack_frame_id);
  431. if (ack_skb)
  432. idr_remove(&local->ack_status_frames,
  433. info->ack_frame_id);
  434. spin_unlock_irqrestore(&local->ack_status_lock, flags);
  435. if (ack_skb) {
  436. if (!dropped) {
  437. /* consumes ack_skb */
  438. skb_complete_wifi_ack(ack_skb, acked);
  439. } else {
  440. dev_kfree_skb_any(ack_skb);
  441. }
  442. }
  443. }
  444. }
  445. /*
  446. * Measure Tx frame completion and removal time for Tx latency statistics
  447. * calculation. A single Tx frame latency should be measured from when it
  448. * is entering the Kernel until we receive Tx complete confirmation indication
  449. * and remove the skb.
  450. */
  451. static void ieee80211_tx_latency_end_msrmnt(struct ieee80211_local *local,
  452. struct sk_buff *skb,
  453. struct sta_info *sta,
  454. struct ieee80211_hdr *hdr)
  455. {
  456. u32 msrmnt;
  457. u16 tid;
  458. u8 *qc;
  459. int i, bin_range_count;
  460. u32 *bin_ranges;
  461. __le16 fc;
  462. struct ieee80211_tx_latency_stat *tx_lat;
  463. struct ieee80211_tx_latency_bin_ranges *tx_latency;
  464. ktime_t skb_arv = skb->tstamp;
  465. tx_latency = rcu_dereference(local->tx_latency);
  466. /* assert Tx latency stats are enabled & frame arrived when enabled */
  467. if (!tx_latency || !ktime_to_ns(skb_arv))
  468. return;
  469. fc = hdr->frame_control;
  470. if (!ieee80211_is_data(fc)) /* make sure it is a data frame */
  471. return;
  472. /* get frame tid */
  473. if (ieee80211_is_data_qos(hdr->frame_control)) {
  474. qc = ieee80211_get_qos_ctl(hdr);
  475. tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
  476. } else {
  477. tid = 0;
  478. }
  479. tx_lat = &sta->tx_lat[tid];
  480. /* Calculate the latency */
  481. msrmnt = ktime_to_ms(ktime_sub(ktime_get(), skb_arv));
  482. if (tx_lat->max < msrmnt) /* update stats */
  483. tx_lat->max = msrmnt;
  484. tx_lat->counter++;
  485. tx_lat->sum += msrmnt;
  486. if (!tx_lat->bins) /* bins not activated */
  487. return;
  488. /* count how many Tx frames transmitted with the appropriate latency */
  489. bin_range_count = tx_latency->n_ranges;
  490. bin_ranges = tx_latency->ranges;
  491. for (i = 0; i < bin_range_count; i++) {
  492. if (msrmnt <= bin_ranges[i]) {
  493. tx_lat->bins[i]++;
  494. break;
  495. }
  496. }
  497. if (i == bin_range_count) /* msrmnt is bigger than the biggest range */
  498. tx_lat->bins[i]++;
  499. }
  500. /*
  501. * Use a static threshold for now, best value to be determined
  502. * by testing ...
  503. * Should it depend on:
  504. * - on # of retransmissions
  505. * - current throughput (higher value for higher tpt)?
  506. */
  507. #define STA_LOST_PKT_THRESHOLD 50
  508. #define STA_LOST_TDLS_PKT_THRESHOLD 10
  509. #define STA_LOST_TDLS_PKT_TIME (10*HZ) /* 10secs since last ACK */
  510. static void ieee80211_lost_packet(struct sta_info *sta, struct sk_buff *skb)
  511. {
  512. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  513. /* This packet was aggregated but doesn't carry status info */
  514. if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
  515. !(info->flags & IEEE80211_TX_STAT_AMPDU))
  516. return;
  517. sta->lost_packets++;
  518. if (!sta->sta.tdls && sta->lost_packets < STA_LOST_PKT_THRESHOLD)
  519. return;
  520. /*
  521. * If we're in TDLS mode, make sure that all STA_LOST_TDLS_PKT_THRESHOLD
  522. * of the last packets were lost, and that no ACK was received in the
  523. * last STA_LOST_TDLS_PKT_TIME ms, before triggering the CQM packet-loss
  524. * mechanism.
  525. */
  526. if (sta->sta.tdls &&
  527. (sta->lost_packets < STA_LOST_TDLS_PKT_THRESHOLD ||
  528. time_before(jiffies,
  529. sta->last_tdls_pkt_time + STA_LOST_TDLS_PKT_TIME)))
  530. return;
  531. cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr,
  532. sta->lost_packets, GFP_ATOMIC);
  533. sta->lost_packets = 0;
  534. }
  535. void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
  536. {
  537. struct sk_buff *skb2;
  538. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  539. struct ieee80211_local *local = hw_to_local(hw);
  540. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  541. __le16 fc;
  542. struct ieee80211_supported_band *sband;
  543. struct ieee80211_sub_if_data *sdata;
  544. struct net_device *prev_dev = NULL;
  545. struct sta_info *sta, *tmp;
  546. int retry_count = -1, i;
  547. int rates_idx = -1;
  548. bool send_to_cooked;
  549. bool acked;
  550. struct ieee80211_bar *bar;
  551. int rtap_len;
  552. int shift = 0;
  553. for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
  554. if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
  555. !(info->flags & IEEE80211_TX_STAT_AMPDU)) {
  556. /* just the first aggr frame carry status info */
  557. info->status.rates[i].idx = -1;
  558. info->status.rates[i].count = 0;
  559. break;
  560. } else if (info->status.rates[i].idx < 0) {
  561. break;
  562. } else if (i >= hw->max_report_rates) {
  563. /* the HW cannot have attempted that rate */
  564. info->status.rates[i].idx = -1;
  565. info->status.rates[i].count = 0;
  566. break;
  567. }
  568. retry_count += info->status.rates[i].count;
  569. }
  570. rates_idx = i - 1;
  571. if (retry_count < 0)
  572. retry_count = 0;
  573. rcu_read_lock();
  574. sband = local->hw.wiphy->bands[info->band];
  575. fc = hdr->frame_control;
  576. for_each_sta_info(local, hdr->addr1, sta, tmp) {
  577. /* skip wrong virtual interface */
  578. if (!ether_addr_equal(hdr->addr2, sta->sdata->vif.addr))
  579. continue;
  580. shift = ieee80211_vif_get_shift(&sta->sdata->vif);
  581. if (info->flags & IEEE80211_TX_STATUS_EOSP)
  582. clear_sta_flag(sta, WLAN_STA_SP);
  583. acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
  584. if (!acked && test_sta_flag(sta, WLAN_STA_PS_STA)) {
  585. /*
  586. * The STA is in power save mode, so assume
  587. * that this TX packet failed because of that.
  588. */
  589. ieee80211_handle_filtered_frame(local, sta, skb);
  590. rcu_read_unlock();
  591. return;
  592. }
  593. /* mesh Peer Service Period support */
  594. if (ieee80211_vif_is_mesh(&sta->sdata->vif) &&
  595. ieee80211_is_data_qos(fc))
  596. ieee80211_mpsp_trigger_process(
  597. ieee80211_get_qos_ctl(hdr),
  598. sta, true, acked);
  599. if ((local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) &&
  600. (ieee80211_is_data(hdr->frame_control)) &&
  601. (rates_idx != -1))
  602. sta->last_tx_rate = info->status.rates[rates_idx];
  603. if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
  604. (ieee80211_is_data_qos(fc))) {
  605. u16 tid, ssn;
  606. u8 *qc;
  607. qc = ieee80211_get_qos_ctl(hdr);
  608. tid = qc[0] & 0xf;
  609. ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10)
  610. & IEEE80211_SCTL_SEQ);
  611. ieee80211_send_bar(&sta->sdata->vif, hdr->addr1,
  612. tid, ssn);
  613. }
  614. if (!acked && ieee80211_is_back_req(fc)) {
  615. u16 tid, control;
  616. /*
  617. * BAR failed, store the last SSN and retry sending
  618. * the BAR when the next unicast transmission on the
  619. * same TID succeeds.
  620. */
  621. bar = (struct ieee80211_bar *) skb->data;
  622. control = le16_to_cpu(bar->control);
  623. if (!(control & IEEE80211_BAR_CTRL_MULTI_TID)) {
  624. u16 ssn = le16_to_cpu(bar->start_seq_num);
  625. tid = (control &
  626. IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
  627. IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
  628. ieee80211_set_bar_pending(sta, tid, ssn);
  629. }
  630. }
  631. if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
  632. ieee80211_handle_filtered_frame(local, sta, skb);
  633. rcu_read_unlock();
  634. return;
  635. } else {
  636. if (!acked)
  637. sta->tx_retry_failed++;
  638. sta->tx_retry_count += retry_count;
  639. }
  640. rate_control_tx_status(local, sband, sta, skb);
  641. if (ieee80211_vif_is_mesh(&sta->sdata->vif))
  642. ieee80211s_update_metric(local, sta, skb);
  643. if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
  644. ieee80211_frame_acked(sta, skb);
  645. if ((sta->sdata->vif.type == NL80211_IFTYPE_STATION) &&
  646. (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS))
  647. ieee80211_sta_tx_notify(sta->sdata, (void *) skb->data,
  648. acked, info->status.tx_time);
  649. if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
  650. if (info->flags & IEEE80211_TX_STAT_ACK) {
  651. if (sta->lost_packets)
  652. sta->lost_packets = 0;
  653. /* Track when last TDLS packet was ACKed */
  654. if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
  655. sta->last_tdls_pkt_time = jiffies;
  656. } else {
  657. ieee80211_lost_packet(sta, skb);
  658. }
  659. }
  660. if (acked)
  661. sta->last_ack_signal = info->status.ack_signal;
  662. /*
  663. * Measure frame removal for tx latency
  664. * statistics calculation
  665. */
  666. ieee80211_tx_latency_end_msrmnt(local, skb, sta, hdr);
  667. }
  668. rcu_read_unlock();
  669. ieee80211_led_tx(local);
  670. /* SNMP counters
  671. * Fragments are passed to low-level drivers as separate skbs, so these
  672. * are actually fragments, not frames. Update frame counters only for
  673. * the first fragment of the frame. */
  674. if (info->flags & IEEE80211_TX_STAT_ACK) {
  675. if (ieee80211_is_first_frag(hdr->seq_ctrl)) {
  676. local->dot11TransmittedFrameCount++;
  677. if (is_multicast_ether_addr(hdr->addr1))
  678. local->dot11MulticastTransmittedFrameCount++;
  679. if (retry_count > 0)
  680. local->dot11RetryCount++;
  681. if (retry_count > 1)
  682. local->dot11MultipleRetryCount++;
  683. }
  684. /* This counter shall be incremented for an acknowledged MPDU
  685. * with an individual address in the address 1 field or an MPDU
  686. * with a multicast address in the address 1 field of type Data
  687. * or Management. */
  688. if (!is_multicast_ether_addr(hdr->addr1) ||
  689. ieee80211_is_data(fc) ||
  690. ieee80211_is_mgmt(fc))
  691. local->dot11TransmittedFragmentCount++;
  692. } else {
  693. if (ieee80211_is_first_frag(hdr->seq_ctrl))
  694. local->dot11FailedCount++;
  695. }
  696. if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) &&
  697. (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
  698. !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
  699. local->ps_sdata && !(local->scanning)) {
  700. if (info->flags & IEEE80211_TX_STAT_ACK) {
  701. local->ps_sdata->u.mgd.flags |=
  702. IEEE80211_STA_NULLFUNC_ACKED;
  703. } else
  704. mod_timer(&local->dynamic_ps_timer, jiffies +
  705. msecs_to_jiffies(10));
  706. }
  707. ieee80211_report_used_skb(local, skb, false);
  708. /* this was a transmitted frame, but now we want to reuse it */
  709. skb_orphan(skb);
  710. /* Need to make a copy before skb->cb gets cleared */
  711. send_to_cooked = !!(info->flags & IEEE80211_TX_CTL_INJECTED) ||
  712. !(ieee80211_is_data(fc));
  713. /*
  714. * This is a bit racy but we can avoid a lot of work
  715. * with this test...
  716. */
  717. if (!local->monitors && (!send_to_cooked || !local->cooked_mntrs)) {
  718. dev_kfree_skb(skb);
  719. return;
  720. }
  721. /* send frame to monitor interfaces now */
  722. rtap_len = ieee80211_tx_radiotap_len(info);
  723. if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) {
  724. pr_err("ieee80211_tx_status: headroom too small\n");
  725. dev_kfree_skb(skb);
  726. return;
  727. }
  728. ieee80211_add_tx_radiotap_header(local, sband, skb, retry_count,
  729. rtap_len, shift);
  730. /* XXX: is this sufficient for BPF? */
  731. skb_set_mac_header(skb, 0);
  732. skb->ip_summed = CHECKSUM_UNNECESSARY;
  733. skb->pkt_type = PACKET_OTHERHOST;
  734. skb->protocol = htons(ETH_P_802_2);
  735. memset(skb->cb, 0, sizeof(skb->cb));
  736. rcu_read_lock();
  737. list_for_each_entry_rcu(sdata, &local->interfaces, list) {
  738. if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
  739. if (!ieee80211_sdata_running(sdata))
  740. continue;
  741. if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) &&
  742. !send_to_cooked)
  743. continue;
  744. if (prev_dev) {
  745. skb2 = skb_clone(skb, GFP_ATOMIC);
  746. if (skb2) {
  747. skb2->dev = prev_dev;
  748. netif_rx(skb2);
  749. }
  750. }
  751. prev_dev = sdata->dev;
  752. }
  753. }
  754. if (prev_dev) {
  755. skb->dev = prev_dev;
  756. netif_rx(skb);
  757. skb = NULL;
  758. }
  759. rcu_read_unlock();
  760. dev_kfree_skb(skb);
  761. }
  762. EXPORT_SYMBOL(ieee80211_tx_status);
  763. void ieee80211_report_low_ack(struct ieee80211_sta *pubsta, u32 num_packets)
  764. {
  765. struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
  766. cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr,
  767. num_packets, GFP_ATOMIC);
  768. }
  769. EXPORT_SYMBOL(ieee80211_report_low_ack);
  770. void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb)
  771. {
  772. struct ieee80211_local *local = hw_to_local(hw);
  773. ieee80211_report_used_skb(local, skb, true);
  774. dev_kfree_skb_any(skb);
  775. }
  776. EXPORT_SYMBOL(ieee80211_free_txskb);
  777. void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
  778. struct sk_buff_head *skbs)
  779. {
  780. struct sk_buff *skb;
  781. while ((skb = __skb_dequeue(skbs)))
  782. ieee80211_free_txskb(hw, skb);
  783. }