rt2x00queue.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333
  1. /*
  2. Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
  3. Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
  4. Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
  5. <http://rt2x00.serialmonkey.com>
  6. This program is free software; you can redistribute it and/or modify
  7. it under the terms of the GNU General Public License as published by
  8. the Free Software Foundation; either version 2 of the License, or
  9. (at your option) any later version.
  10. This program is distributed in the hope that it will be useful,
  11. but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. GNU General Public License for more details.
  14. You should have received a copy of the GNU General Public License
  15. along with this program; if not, see <http://www.gnu.org/licenses/>.
  16. */
  17. /*
  18. Module: rt2x00lib
  19. Abstract: rt2x00 queue specific routines.
  20. */
  21. #include <linux/slab.h>
  22. #include <linux/kernel.h>
  23. #include <linux/module.h>
  24. #include <linux/dma-mapping.h>
  25. #include "rt2x00.h"
  26. #include "rt2x00lib.h"
  27. struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
  28. {
  29. struct data_queue *queue = entry->queue;
  30. struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
  31. struct sk_buff *skb;
  32. struct skb_frame_desc *skbdesc;
  33. unsigned int frame_size;
  34. unsigned int head_size = 0;
  35. unsigned int tail_size = 0;
  36. /*
  37. * The frame size includes descriptor size, because the
  38. * hardware directly receive the frame into the skbuffer.
  39. */
  40. frame_size = queue->data_size + queue->desc_size + queue->winfo_size;
  41. /*
  42. * The payload should be aligned to a 4-byte boundary,
  43. * this means we need at least 3 bytes for moving the frame
  44. * into the correct offset.
  45. */
  46. head_size = 4;
  47. /*
  48. * For IV/EIV/ICV assembly we must make sure there is
  49. * at least 8 bytes bytes available in headroom for IV/EIV
  50. * and 8 bytes for ICV data as tailroon.
  51. */
  52. if (rt2x00_has_cap_hw_crypto(rt2x00dev)) {
  53. head_size += 8;
  54. tail_size += 8;
  55. }
  56. /*
  57. * Allocate skbuffer.
  58. */
  59. skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp);
  60. if (!skb)
  61. return NULL;
  62. /*
  63. * Make sure we not have a frame with the requested bytes
  64. * available in the head and tail.
  65. */
  66. skb_reserve(skb, head_size);
  67. skb_put(skb, frame_size);
  68. /*
  69. * Populate skbdesc.
  70. */
  71. skbdesc = get_skb_frame_desc(skb);
  72. memset(skbdesc, 0, sizeof(*skbdesc));
  73. skbdesc->entry = entry;
  74. if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
  75. dma_addr_t skb_dma;
  76. skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
  77. DMA_FROM_DEVICE);
  78. if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) {
  79. dev_kfree_skb_any(skb);
  80. return NULL;
  81. }
  82. skbdesc->skb_dma = skb_dma;
  83. skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
  84. }
  85. return skb;
  86. }
  87. int rt2x00queue_map_txskb(struct queue_entry *entry)
  88. {
  89. struct device *dev = entry->queue->rt2x00dev->dev;
  90. struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
  91. skbdesc->skb_dma =
  92. dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
  93. if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma)))
  94. return -ENOMEM;
  95. skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
  96. return 0;
  97. }
  98. EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
  99. void rt2x00queue_unmap_skb(struct queue_entry *entry)
  100. {
  101. struct device *dev = entry->queue->rt2x00dev->dev;
  102. struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
  103. if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
  104. dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
  105. DMA_FROM_DEVICE);
  106. skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
  107. } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
  108. dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
  109. DMA_TO_DEVICE);
  110. skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
  111. }
  112. }
  113. EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
  114. void rt2x00queue_free_skb(struct queue_entry *entry)
  115. {
  116. if (!entry->skb)
  117. return;
  118. rt2x00queue_unmap_skb(entry);
  119. dev_kfree_skb_any(entry->skb);
  120. entry->skb = NULL;
  121. }
  122. void rt2x00queue_align_frame(struct sk_buff *skb)
  123. {
  124. unsigned int frame_length = skb->len;
  125. unsigned int align = ALIGN_SIZE(skb, 0);
  126. if (!align)
  127. return;
  128. skb_push(skb, align);
  129. memmove(skb->data, skb->data + align, frame_length);
  130. skb_trim(skb, frame_length);
  131. }
  132. void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
  133. {
  134. unsigned int payload_length = skb->len - header_length;
  135. unsigned int header_align = ALIGN_SIZE(skb, 0);
  136. unsigned int payload_align = ALIGN_SIZE(skb, header_length);
  137. unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
  138. /*
  139. * Adjust the header alignment if the payload needs to be moved more
  140. * than the header.
  141. */
  142. if (payload_align > header_align)
  143. header_align += 4;
  144. /* There is nothing to do if no alignment is needed */
  145. if (!header_align)
  146. return;
  147. /* Reserve the amount of space needed in front of the frame */
  148. skb_push(skb, header_align);
  149. /*
  150. * Move the header.
  151. */
  152. memmove(skb->data, skb->data + header_align, header_length);
  153. /* Move the payload, if present and if required */
  154. if (payload_length && payload_align)
  155. memmove(skb->data + header_length + l2pad,
  156. skb->data + header_length + l2pad + payload_align,
  157. payload_length);
  158. /* Trim the skb to the correct size */
  159. skb_trim(skb, header_length + l2pad + payload_length);
  160. }
  161. void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
  162. {
  163. /*
  164. * L2 padding is only present if the skb contains more than just the
  165. * IEEE 802.11 header.
  166. */
  167. unsigned int l2pad = (skb->len > header_length) ?
  168. L2PAD_SIZE(header_length) : 0;
  169. if (!l2pad)
  170. return;
  171. memmove(skb->data + l2pad, skb->data, header_length);
  172. skb_pull(skb, l2pad);
  173. }
  174. static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
  175. struct sk_buff *skb,
  176. struct txentry_desc *txdesc)
  177. {
  178. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  179. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  180. struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
  181. u16 seqno;
  182. if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
  183. return;
  184. __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
  185. if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags)) {
  186. /*
  187. * rt2800 has a H/W (or F/W) bug, device incorrectly increase
  188. * seqno on retransmited data (non-QOS) frames. To workaround
  189. * the problem let's generate seqno in software if QOS is
  190. * disabled.
  191. */
  192. if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
  193. __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
  194. else
  195. /* H/W will generate sequence number */
  196. return;
  197. }
  198. /*
  199. * The hardware is not able to insert a sequence number. Assign a
  200. * software generated one here.
  201. *
  202. * This is wrong because beacons are not getting sequence
  203. * numbers assigned properly.
  204. *
  205. * A secondary problem exists for drivers that cannot toggle
  206. * sequence counting per-frame, since those will override the
  207. * sequence counter given by mac80211.
  208. */
  209. if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
  210. seqno = atomic_add_return(0x10, &intf->seqno);
  211. else
  212. seqno = atomic_read(&intf->seqno);
  213. hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
  214. hdr->seq_ctrl |= cpu_to_le16(seqno);
  215. }
  216. static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
  217. struct sk_buff *skb,
  218. struct txentry_desc *txdesc,
  219. const struct rt2x00_rate *hwrate)
  220. {
  221. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  222. struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
  223. unsigned int data_length;
  224. unsigned int duration;
  225. unsigned int residual;
  226. /*
  227. * Determine with what IFS priority this frame should be send.
  228. * Set ifs to IFS_SIFS when the this is not the first fragment,
  229. * or this fragment came after RTS/CTS.
  230. */
  231. if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
  232. txdesc->u.plcp.ifs = IFS_BACKOFF;
  233. else
  234. txdesc->u.plcp.ifs = IFS_SIFS;
  235. /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
  236. data_length = skb->len + 4;
  237. data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
  238. /*
  239. * PLCP setup
  240. * Length calculation depends on OFDM/CCK rate.
  241. */
  242. txdesc->u.plcp.signal = hwrate->plcp;
  243. txdesc->u.plcp.service = 0x04;
  244. if (hwrate->flags & DEV_RATE_OFDM) {
  245. txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
  246. txdesc->u.plcp.length_low = data_length & 0x3f;
  247. } else {
  248. /*
  249. * Convert length to microseconds.
  250. */
  251. residual = GET_DURATION_RES(data_length, hwrate->bitrate);
  252. duration = GET_DURATION(data_length, hwrate->bitrate);
  253. if (residual != 0) {
  254. duration++;
  255. /*
  256. * Check if we need to set the Length Extension
  257. */
  258. if (hwrate->bitrate == 110 && residual <= 30)
  259. txdesc->u.plcp.service |= 0x80;
  260. }
  261. txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
  262. txdesc->u.plcp.length_low = duration & 0xff;
  263. /*
  264. * When preamble is enabled we should set the
  265. * preamble bit for the signal.
  266. */
  267. if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
  268. txdesc->u.plcp.signal |= 0x08;
  269. }
  270. }
  271. static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
  272. struct sk_buff *skb,
  273. struct txentry_desc *txdesc,
  274. struct ieee80211_sta *sta,
  275. const struct rt2x00_rate *hwrate)
  276. {
  277. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  278. struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
  279. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  280. struct rt2x00_sta *sta_priv = NULL;
  281. if (sta) {
  282. txdesc->u.ht.mpdu_density =
  283. sta->ht_cap.ampdu_density;
  284. sta_priv = sta_to_rt2x00_sta(sta);
  285. txdesc->u.ht.wcid = sta_priv->wcid;
  286. }
  287. /*
  288. * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
  289. * mcs rate to be used
  290. */
  291. if (txrate->flags & IEEE80211_TX_RC_MCS) {
  292. txdesc->u.ht.mcs = txrate->idx;
  293. /*
  294. * MIMO PS should be set to 1 for STA's using dynamic SM PS
  295. * when using more then one tx stream (>MCS7).
  296. */
  297. if (sta && txdesc->u.ht.mcs > 7 &&
  298. sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
  299. __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
  300. } else {
  301. txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
  302. if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
  303. txdesc->u.ht.mcs |= 0x08;
  304. }
  305. if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) {
  306. if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
  307. txdesc->u.ht.txop = TXOP_SIFS;
  308. else
  309. txdesc->u.ht.txop = TXOP_BACKOFF;
  310. /* Left zero on all other settings. */
  311. return;
  312. }
  313. txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
  314. /*
  315. * Only one STBC stream is supported for now.
  316. */
  317. if (tx_info->flags & IEEE80211_TX_CTL_STBC)
  318. txdesc->u.ht.stbc = 1;
  319. /*
  320. * This frame is eligible for an AMPDU, however, don't aggregate
  321. * frames that are intended to probe a specific tx rate.
  322. */
  323. if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
  324. !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
  325. __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
  326. /*
  327. * Set 40Mhz mode if necessary (for legacy rates this will
  328. * duplicate the frame to both channels).
  329. */
  330. if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
  331. txrate->flags & IEEE80211_TX_RC_DUP_DATA)
  332. __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
  333. if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
  334. __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
  335. /*
  336. * Determine IFS values
  337. * - Use TXOP_BACKOFF for management frames except beacons
  338. * - Use TXOP_SIFS for fragment bursts
  339. * - Use TXOP_HTTXOP for everything else
  340. *
  341. * Note: rt2800 devices won't use CTS protection (if used)
  342. * for frames not transmitted with TXOP_HTTXOP
  343. */
  344. if (ieee80211_is_mgmt(hdr->frame_control) &&
  345. !ieee80211_is_beacon(hdr->frame_control))
  346. txdesc->u.ht.txop = TXOP_BACKOFF;
  347. else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
  348. txdesc->u.ht.txop = TXOP_SIFS;
  349. else
  350. txdesc->u.ht.txop = TXOP_HTTXOP;
  351. }
  352. static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
  353. struct sk_buff *skb,
  354. struct txentry_desc *txdesc,
  355. struct ieee80211_sta *sta)
  356. {
  357. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  358. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  359. struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
  360. struct ieee80211_rate *rate;
  361. const struct rt2x00_rate *hwrate = NULL;
  362. memset(txdesc, 0, sizeof(*txdesc));
  363. /*
  364. * Header and frame information.
  365. */
  366. txdesc->length = skb->len;
  367. txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb);
  368. /*
  369. * Check whether this frame is to be acked.
  370. */
  371. if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
  372. __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
  373. /*
  374. * Check if this is a RTS/CTS frame
  375. */
  376. if (ieee80211_is_rts(hdr->frame_control) ||
  377. ieee80211_is_cts(hdr->frame_control)) {
  378. __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
  379. if (ieee80211_is_rts(hdr->frame_control))
  380. __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
  381. else
  382. __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
  383. if (tx_info->control.rts_cts_rate_idx >= 0)
  384. rate =
  385. ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
  386. }
  387. /*
  388. * Determine retry information.
  389. */
  390. txdesc->retry_limit = tx_info->control.rates[0].count - 1;
  391. if (txdesc->retry_limit >= rt2x00dev->long_retry)
  392. __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
  393. /*
  394. * Check if more fragments are pending
  395. */
  396. if (ieee80211_has_morefrags(hdr->frame_control)) {
  397. __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
  398. __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
  399. }
  400. /*
  401. * Check if more frames (!= fragments) are pending
  402. */
  403. if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
  404. __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
  405. /*
  406. * Beacons and probe responses require the tsf timestamp
  407. * to be inserted into the frame.
  408. */
  409. if (ieee80211_is_beacon(hdr->frame_control) ||
  410. ieee80211_is_probe_resp(hdr->frame_control))
  411. __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
  412. if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
  413. !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
  414. __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
  415. /*
  416. * Determine rate modulation.
  417. */
  418. if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
  419. txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
  420. else if (txrate->flags & IEEE80211_TX_RC_MCS)
  421. txdesc->rate_mode = RATE_MODE_HT_MIX;
  422. else {
  423. rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
  424. hwrate = rt2x00_get_rate(rate->hw_value);
  425. if (hwrate->flags & DEV_RATE_OFDM)
  426. txdesc->rate_mode = RATE_MODE_OFDM;
  427. else
  428. txdesc->rate_mode = RATE_MODE_CCK;
  429. }
  430. /*
  431. * Apply TX descriptor handling by components
  432. */
  433. rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc);
  434. rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc);
  435. if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
  436. rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
  437. sta, hwrate);
  438. else
  439. rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
  440. hwrate);
  441. }
  442. static int rt2x00queue_write_tx_data(struct queue_entry *entry,
  443. struct txentry_desc *txdesc)
  444. {
  445. struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
  446. /*
  447. * This should not happen, we already checked the entry
  448. * was ours. When the hardware disagrees there has been
  449. * a queue corruption!
  450. */
  451. if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
  452. rt2x00dev->ops->lib->get_entry_state(entry))) {
  453. rt2x00_err(rt2x00dev,
  454. "Corrupt queue %d, accessing entry which is not ours\n"
  455. "Please file bug report to %s\n",
  456. entry->queue->qid, DRV_PROJECT);
  457. return -EINVAL;
  458. }
  459. /*
  460. * Add the requested extra tx headroom in front of the skb.
  461. */
  462. skb_push(entry->skb, rt2x00dev->extra_tx_headroom);
  463. memset(entry->skb->data, 0, rt2x00dev->extra_tx_headroom);
  464. /*
  465. * Call the driver's write_tx_data function, if it exists.
  466. */
  467. if (rt2x00dev->ops->lib->write_tx_data)
  468. rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
  469. /*
  470. * Map the skb to DMA.
  471. */
  472. if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags) &&
  473. rt2x00queue_map_txskb(entry))
  474. return -ENOMEM;
  475. return 0;
  476. }
  477. static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
  478. struct txentry_desc *txdesc)
  479. {
  480. struct data_queue *queue = entry->queue;
  481. queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
  482. /*
  483. * All processing on the frame has been completed, this means
  484. * it is now ready to be dumped to userspace through debugfs.
  485. */
  486. rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
  487. }
  488. static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
  489. struct txentry_desc *txdesc)
  490. {
  491. /*
  492. * Check if we need to kick the queue, there are however a few rules
  493. * 1) Don't kick unless this is the last in frame in a burst.
  494. * When the burst flag is set, this frame is always followed
  495. * by another frame which in some way are related to eachother.
  496. * This is true for fragments, RTS or CTS-to-self frames.
  497. * 2) Rule 1 can be broken when the available entries
  498. * in the queue are less then a certain threshold.
  499. */
  500. if (rt2x00queue_threshold(queue) ||
  501. !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
  502. queue->rt2x00dev->ops->lib->kick_queue(queue);
  503. }
  504. static void rt2x00queue_bar_check(struct queue_entry *entry)
  505. {
  506. struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
  507. struct ieee80211_bar *bar = (void *) (entry->skb->data +
  508. rt2x00dev->extra_tx_headroom);
  509. struct rt2x00_bar_list_entry *bar_entry;
  510. if (likely(!ieee80211_is_back_req(bar->frame_control)))
  511. return;
  512. bar_entry = kmalloc(sizeof(*bar_entry), GFP_ATOMIC);
  513. /*
  514. * If the alloc fails we still send the BAR out but just don't track
  515. * it in our bar list. And as a result we will report it to mac80211
  516. * back as failed.
  517. */
  518. if (!bar_entry)
  519. return;
  520. bar_entry->entry = entry;
  521. bar_entry->block_acked = 0;
  522. /*
  523. * Copy the relevant parts of the 802.11 BAR into out check list
  524. * such that we can use RCU for less-overhead in the RX path since
  525. * sending BARs and processing the according BlockAck should be
  526. * the exception.
  527. */
  528. memcpy(bar_entry->ra, bar->ra, sizeof(bar->ra));
  529. memcpy(bar_entry->ta, bar->ta, sizeof(bar->ta));
  530. bar_entry->control = bar->control;
  531. bar_entry->start_seq_num = bar->start_seq_num;
  532. /*
  533. * Insert BAR into our BAR check list.
  534. */
  535. spin_lock_bh(&rt2x00dev->bar_list_lock);
  536. list_add_tail_rcu(&bar_entry->list, &rt2x00dev->bar_list);
  537. spin_unlock_bh(&rt2x00dev->bar_list_lock);
  538. }
  539. int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
  540. struct ieee80211_sta *sta, bool local)
  541. {
  542. struct ieee80211_tx_info *tx_info;
  543. struct queue_entry *entry;
  544. struct txentry_desc txdesc;
  545. struct skb_frame_desc *skbdesc;
  546. u8 rate_idx, rate_flags;
  547. int ret = 0;
  548. /*
  549. * Copy all TX descriptor information into txdesc,
  550. * after that we are free to use the skb->cb array
  551. * for our information.
  552. */
  553. rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta);
  554. /*
  555. * All information is retrieved from the skb->cb array,
  556. * now we should claim ownership of the driver part of that
  557. * array, preserving the bitrate index and flags.
  558. */
  559. tx_info = IEEE80211_SKB_CB(skb);
  560. rate_idx = tx_info->control.rates[0].idx;
  561. rate_flags = tx_info->control.rates[0].flags;
  562. skbdesc = get_skb_frame_desc(skb);
  563. memset(skbdesc, 0, sizeof(*skbdesc));
  564. skbdesc->tx_rate_idx = rate_idx;
  565. skbdesc->tx_rate_flags = rate_flags;
  566. if (local)
  567. skbdesc->flags |= SKBDESC_NOT_MAC80211;
  568. /*
  569. * When hardware encryption is supported, and this frame
  570. * is to be encrypted, we should strip the IV/EIV data from
  571. * the frame so we can provide it to the driver separately.
  572. */
  573. if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
  574. !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
  575. if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags))
  576. rt2x00crypto_tx_copy_iv(skb, &txdesc);
  577. else
  578. rt2x00crypto_tx_remove_iv(skb, &txdesc);
  579. }
  580. /*
  581. * When DMA allocation is required we should guarantee to the
  582. * driver that the DMA is aligned to a 4-byte boundary.
  583. * However some drivers require L2 padding to pad the payload
  584. * rather then the header. This could be a requirement for
  585. * PCI and USB devices, while header alignment only is valid
  586. * for PCI devices.
  587. */
  588. if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags))
  589. rt2x00queue_insert_l2pad(skb, txdesc.header_length);
  590. else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
  591. rt2x00queue_align_frame(skb);
  592. /*
  593. * That function must be called with bh disabled.
  594. */
  595. spin_lock(&queue->tx_lock);
  596. if (unlikely(rt2x00queue_full(queue))) {
  597. rt2x00_err(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n",
  598. queue->qid);
  599. ret = -ENOBUFS;
  600. goto out;
  601. }
  602. entry = rt2x00queue_get_entry(queue, Q_INDEX);
  603. if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
  604. &entry->flags))) {
  605. rt2x00_err(queue->rt2x00dev,
  606. "Arrived at non-free entry in the non-full queue %d\n"
  607. "Please file bug report to %s\n",
  608. queue->qid, DRV_PROJECT);
  609. ret = -EINVAL;
  610. goto out;
  611. }
  612. skbdesc->entry = entry;
  613. entry->skb = skb;
  614. /*
  615. * It could be possible that the queue was corrupted and this
  616. * call failed. Since we always return NETDEV_TX_OK to mac80211,
  617. * this frame will simply be dropped.
  618. */
  619. if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
  620. clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
  621. entry->skb = NULL;
  622. ret = -EIO;
  623. goto out;
  624. }
  625. /*
  626. * Put BlockAckReqs into our check list for driver BA processing.
  627. */
  628. rt2x00queue_bar_check(entry);
  629. set_bit(ENTRY_DATA_PENDING, &entry->flags);
  630. rt2x00queue_index_inc(entry, Q_INDEX);
  631. rt2x00queue_write_tx_descriptor(entry, &txdesc);
  632. rt2x00queue_kick_tx_queue(queue, &txdesc);
  633. out:
  634. spin_unlock(&queue->tx_lock);
  635. return ret;
  636. }
  637. int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
  638. struct ieee80211_vif *vif)
  639. {
  640. struct rt2x00_intf *intf = vif_to_intf(vif);
  641. if (unlikely(!intf->beacon))
  642. return -ENOBUFS;
  643. mutex_lock(&intf->beacon_skb_mutex);
  644. /*
  645. * Clean up the beacon skb.
  646. */
  647. rt2x00queue_free_skb(intf->beacon);
  648. /*
  649. * Clear beacon (single bssid devices don't need to clear the beacon
  650. * since the beacon queue will get stopped anyway).
  651. */
  652. if (rt2x00dev->ops->lib->clear_beacon)
  653. rt2x00dev->ops->lib->clear_beacon(intf->beacon);
  654. mutex_unlock(&intf->beacon_skb_mutex);
  655. return 0;
  656. }
  657. int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
  658. struct ieee80211_vif *vif)
  659. {
  660. struct rt2x00_intf *intf = vif_to_intf(vif);
  661. struct skb_frame_desc *skbdesc;
  662. struct txentry_desc txdesc;
  663. if (unlikely(!intf->beacon))
  664. return -ENOBUFS;
  665. /*
  666. * Clean up the beacon skb.
  667. */
  668. rt2x00queue_free_skb(intf->beacon);
  669. intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
  670. if (!intf->beacon->skb)
  671. return -ENOMEM;
  672. /*
  673. * Copy all TX descriptor information into txdesc,
  674. * after that we are free to use the skb->cb array
  675. * for our information.
  676. */
  677. rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL);
  678. /*
  679. * Fill in skb descriptor
  680. */
  681. skbdesc = get_skb_frame_desc(intf->beacon->skb);
  682. memset(skbdesc, 0, sizeof(*skbdesc));
  683. skbdesc->entry = intf->beacon;
  684. /*
  685. * Send beacon to hardware.
  686. */
  687. rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
  688. return 0;
  689. }
  690. int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
  691. struct ieee80211_vif *vif)
  692. {
  693. struct rt2x00_intf *intf = vif_to_intf(vif);
  694. int ret;
  695. mutex_lock(&intf->beacon_skb_mutex);
  696. ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif);
  697. mutex_unlock(&intf->beacon_skb_mutex);
  698. return ret;
  699. }
  700. bool rt2x00queue_for_each_entry(struct data_queue *queue,
  701. enum queue_index start,
  702. enum queue_index end,
  703. void *data,
  704. bool (*fn)(struct queue_entry *entry,
  705. void *data))
  706. {
  707. unsigned long irqflags;
  708. unsigned int index_start;
  709. unsigned int index_end;
  710. unsigned int i;
  711. if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
  712. rt2x00_err(queue->rt2x00dev,
  713. "Entry requested from invalid index range (%d - %d)\n",
  714. start, end);
  715. return true;
  716. }
  717. /*
  718. * Only protect the range we are going to loop over,
  719. * if during our loop a extra entry is set to pending
  720. * it should not be kicked during this run, since it
  721. * is part of another TX operation.
  722. */
  723. spin_lock_irqsave(&queue->index_lock, irqflags);
  724. index_start = queue->index[start];
  725. index_end = queue->index[end];
  726. spin_unlock_irqrestore(&queue->index_lock, irqflags);
  727. /*
  728. * Start from the TX done pointer, this guarantees that we will
  729. * send out all frames in the correct order.
  730. */
  731. if (index_start < index_end) {
  732. for (i = index_start; i < index_end; i++) {
  733. if (fn(&queue->entries[i], data))
  734. return true;
  735. }
  736. } else {
  737. for (i = index_start; i < queue->limit; i++) {
  738. if (fn(&queue->entries[i], data))
  739. return true;
  740. }
  741. for (i = 0; i < index_end; i++) {
  742. if (fn(&queue->entries[i], data))
  743. return true;
  744. }
  745. }
  746. return false;
  747. }
  748. EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
  749. struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
  750. enum queue_index index)
  751. {
  752. struct queue_entry *entry;
  753. unsigned long irqflags;
  754. if (unlikely(index >= Q_INDEX_MAX)) {
  755. rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index type (%d)\n",
  756. index);
  757. return NULL;
  758. }
  759. spin_lock_irqsave(&queue->index_lock, irqflags);
  760. entry = &queue->entries[queue->index[index]];
  761. spin_unlock_irqrestore(&queue->index_lock, irqflags);
  762. return entry;
  763. }
  764. EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
  765. void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
  766. {
  767. struct data_queue *queue = entry->queue;
  768. unsigned long irqflags;
  769. if (unlikely(index >= Q_INDEX_MAX)) {
  770. rt2x00_err(queue->rt2x00dev,
  771. "Index change on invalid index type (%d)\n", index);
  772. return;
  773. }
  774. spin_lock_irqsave(&queue->index_lock, irqflags);
  775. queue->index[index]++;
  776. if (queue->index[index] >= queue->limit)
  777. queue->index[index] = 0;
  778. entry->last_action = jiffies;
  779. if (index == Q_INDEX) {
  780. queue->length++;
  781. } else if (index == Q_INDEX_DONE) {
  782. queue->length--;
  783. queue->count++;
  784. }
  785. spin_unlock_irqrestore(&queue->index_lock, irqflags);
  786. }
  787. static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
  788. {
  789. switch (queue->qid) {
  790. case QID_AC_VO:
  791. case QID_AC_VI:
  792. case QID_AC_BE:
  793. case QID_AC_BK:
  794. /*
  795. * For TX queues, we have to disable the queue
  796. * inside mac80211.
  797. */
  798. ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
  799. break;
  800. default:
  801. break;
  802. }
  803. }
  804. void rt2x00queue_pause_queue(struct data_queue *queue)
  805. {
  806. if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
  807. !test_bit(QUEUE_STARTED, &queue->flags) ||
  808. test_and_set_bit(QUEUE_PAUSED, &queue->flags))
  809. return;
  810. rt2x00queue_pause_queue_nocheck(queue);
  811. }
  812. EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
  813. void rt2x00queue_unpause_queue(struct data_queue *queue)
  814. {
  815. if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
  816. !test_bit(QUEUE_STARTED, &queue->flags) ||
  817. !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
  818. return;
  819. switch (queue->qid) {
  820. case QID_AC_VO:
  821. case QID_AC_VI:
  822. case QID_AC_BE:
  823. case QID_AC_BK:
  824. /*
  825. * For TX queues, we have to enable the queue
  826. * inside mac80211.
  827. */
  828. ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
  829. break;
  830. case QID_RX:
  831. /*
  832. * For RX we need to kick the queue now in order to
  833. * receive frames.
  834. */
  835. queue->rt2x00dev->ops->lib->kick_queue(queue);
  836. default:
  837. break;
  838. }
  839. }
  840. EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
  841. void rt2x00queue_start_queue(struct data_queue *queue)
  842. {
  843. mutex_lock(&queue->status_lock);
  844. if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
  845. test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
  846. mutex_unlock(&queue->status_lock);
  847. return;
  848. }
  849. set_bit(QUEUE_PAUSED, &queue->flags);
  850. queue->rt2x00dev->ops->lib->start_queue(queue);
  851. rt2x00queue_unpause_queue(queue);
  852. mutex_unlock(&queue->status_lock);
  853. }
  854. EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
  855. void rt2x00queue_stop_queue(struct data_queue *queue)
  856. {
  857. mutex_lock(&queue->status_lock);
  858. if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
  859. mutex_unlock(&queue->status_lock);
  860. return;
  861. }
  862. rt2x00queue_pause_queue_nocheck(queue);
  863. queue->rt2x00dev->ops->lib->stop_queue(queue);
  864. mutex_unlock(&queue->status_lock);
  865. }
  866. EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
  867. void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
  868. {
  869. bool tx_queue =
  870. (queue->qid == QID_AC_VO) ||
  871. (queue->qid == QID_AC_VI) ||
  872. (queue->qid == QID_AC_BE) ||
  873. (queue->qid == QID_AC_BK);
  874. /*
  875. * If we are not supposed to drop any pending
  876. * frames, this means we must force a start (=kick)
  877. * to the queue to make sure the hardware will
  878. * start transmitting.
  879. */
  880. if (!drop && tx_queue)
  881. queue->rt2x00dev->ops->lib->kick_queue(queue);
  882. /*
  883. * Check if driver supports flushing, if that is the case we can
  884. * defer the flushing to the driver. Otherwise we must use the
  885. * alternative which just waits for the queue to become empty.
  886. */
  887. if (likely(queue->rt2x00dev->ops->lib->flush_queue))
  888. queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
  889. /*
  890. * The queue flush has failed...
  891. */
  892. if (unlikely(!rt2x00queue_empty(queue)))
  893. rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n",
  894. queue->qid);
  895. }
  896. EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
  897. void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
  898. {
  899. struct data_queue *queue;
  900. /*
  901. * rt2x00queue_start_queue will call ieee80211_wake_queue
  902. * for each queue after is has been properly initialized.
  903. */
  904. tx_queue_for_each(rt2x00dev, queue)
  905. rt2x00queue_start_queue(queue);
  906. rt2x00queue_start_queue(rt2x00dev->rx);
  907. }
  908. EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
  909. void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
  910. {
  911. struct data_queue *queue;
  912. /*
  913. * rt2x00queue_stop_queue will call ieee80211_stop_queue
  914. * as well, but we are completely shutting doing everything
  915. * now, so it is much safer to stop all TX queues at once,
  916. * and use rt2x00queue_stop_queue for cleaning up.
  917. */
  918. ieee80211_stop_queues(rt2x00dev->hw);
  919. tx_queue_for_each(rt2x00dev, queue)
  920. rt2x00queue_stop_queue(queue);
  921. rt2x00queue_stop_queue(rt2x00dev->rx);
  922. }
  923. EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
  924. void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
  925. {
  926. struct data_queue *queue;
  927. tx_queue_for_each(rt2x00dev, queue)
  928. rt2x00queue_flush_queue(queue, drop);
  929. rt2x00queue_flush_queue(rt2x00dev->rx, drop);
  930. }
  931. EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
  932. static void rt2x00queue_reset(struct data_queue *queue)
  933. {
  934. unsigned long irqflags;
  935. unsigned int i;
  936. spin_lock_irqsave(&queue->index_lock, irqflags);
  937. queue->count = 0;
  938. queue->length = 0;
  939. for (i = 0; i < Q_INDEX_MAX; i++)
  940. queue->index[i] = 0;
  941. spin_unlock_irqrestore(&queue->index_lock, irqflags);
  942. }
  943. void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
  944. {
  945. struct data_queue *queue;
  946. unsigned int i;
  947. queue_for_each(rt2x00dev, queue) {
  948. rt2x00queue_reset(queue);
  949. for (i = 0; i < queue->limit; i++)
  950. rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
  951. }
  952. }
  953. static int rt2x00queue_alloc_entries(struct data_queue *queue)
  954. {
  955. struct queue_entry *entries;
  956. unsigned int entry_size;
  957. unsigned int i;
  958. rt2x00queue_reset(queue);
  959. /*
  960. * Allocate all queue entries.
  961. */
  962. entry_size = sizeof(*entries) + queue->priv_size;
  963. entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
  964. if (!entries)
  965. return -ENOMEM;
  966. #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
  967. (((char *)(__base)) + ((__limit) * (__esize)) + \
  968. ((__index) * (__psize)))
  969. for (i = 0; i < queue->limit; i++) {
  970. entries[i].flags = 0;
  971. entries[i].queue = queue;
  972. entries[i].skb = NULL;
  973. entries[i].entry_idx = i;
  974. entries[i].priv_data =
  975. QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
  976. sizeof(*entries), queue->priv_size);
  977. }
  978. #undef QUEUE_ENTRY_PRIV_OFFSET
  979. queue->entries = entries;
  980. return 0;
  981. }
  982. static void rt2x00queue_free_skbs(struct data_queue *queue)
  983. {
  984. unsigned int i;
  985. if (!queue->entries)
  986. return;
  987. for (i = 0; i < queue->limit; i++) {
  988. rt2x00queue_free_skb(&queue->entries[i]);
  989. }
  990. }
  991. static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
  992. {
  993. unsigned int i;
  994. struct sk_buff *skb;
  995. for (i = 0; i < queue->limit; i++) {
  996. skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL);
  997. if (!skb)
  998. return -ENOMEM;
  999. queue->entries[i].skb = skb;
  1000. }
  1001. return 0;
  1002. }
  1003. int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
  1004. {
  1005. struct data_queue *queue;
  1006. int status;
  1007. status = rt2x00queue_alloc_entries(rt2x00dev->rx);
  1008. if (status)
  1009. goto exit;
  1010. tx_queue_for_each(rt2x00dev, queue) {
  1011. status = rt2x00queue_alloc_entries(queue);
  1012. if (status)
  1013. goto exit;
  1014. }
  1015. status = rt2x00queue_alloc_entries(rt2x00dev->bcn);
  1016. if (status)
  1017. goto exit;
  1018. if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) {
  1019. status = rt2x00queue_alloc_entries(rt2x00dev->atim);
  1020. if (status)
  1021. goto exit;
  1022. }
  1023. status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx);
  1024. if (status)
  1025. goto exit;
  1026. return 0;
  1027. exit:
  1028. rt2x00_err(rt2x00dev, "Queue entries allocation failed\n");
  1029. rt2x00queue_uninitialize(rt2x00dev);
  1030. return status;
  1031. }
  1032. void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
  1033. {
  1034. struct data_queue *queue;
  1035. rt2x00queue_free_skbs(rt2x00dev->rx);
  1036. queue_for_each(rt2x00dev, queue) {
  1037. kfree(queue->entries);
  1038. queue->entries = NULL;
  1039. }
  1040. }
  1041. static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
  1042. struct data_queue *queue, enum data_queue_qid qid)
  1043. {
  1044. mutex_init(&queue->status_lock);
  1045. spin_lock_init(&queue->tx_lock);
  1046. spin_lock_init(&queue->index_lock);
  1047. queue->rt2x00dev = rt2x00dev;
  1048. queue->qid = qid;
  1049. queue->txop = 0;
  1050. queue->aifs = 2;
  1051. queue->cw_min = 5;
  1052. queue->cw_max = 10;
  1053. rt2x00dev->ops->queue_init(queue);
  1054. queue->threshold = DIV_ROUND_UP(queue->limit, 10);
  1055. }
  1056. int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
  1057. {
  1058. struct data_queue *queue;
  1059. enum data_queue_qid qid;
  1060. unsigned int req_atim =
  1061. !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags);
  1062. /*
  1063. * We need the following queues:
  1064. * RX: 1
  1065. * TX: ops->tx_queues
  1066. * Beacon: 1
  1067. * Atim: 1 (if required)
  1068. */
  1069. rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
  1070. queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
  1071. if (!queue) {
  1072. rt2x00_err(rt2x00dev, "Queue allocation failed\n");
  1073. return -ENOMEM;
  1074. }
  1075. /*
  1076. * Initialize pointers
  1077. */
  1078. rt2x00dev->rx = queue;
  1079. rt2x00dev->tx = &queue[1];
  1080. rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
  1081. rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
  1082. /*
  1083. * Initialize queue parameters.
  1084. * RX: qid = QID_RX
  1085. * TX: qid = QID_AC_VO + index
  1086. * TX: cw_min: 2^5 = 32.
  1087. * TX: cw_max: 2^10 = 1024.
  1088. * BCN: qid = QID_BEACON
  1089. * ATIM: qid = QID_ATIM
  1090. */
  1091. rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
  1092. qid = QID_AC_VO;
  1093. tx_queue_for_each(rt2x00dev, queue)
  1094. rt2x00queue_init(rt2x00dev, queue, qid++);
  1095. rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
  1096. if (req_atim)
  1097. rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
  1098. return 0;
  1099. }
  1100. void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
  1101. {
  1102. kfree(rt2x00dev->rx);
  1103. rt2x00dev->rx = NULL;
  1104. rt2x00dev->tx = NULL;
  1105. rt2x00dev->bcn = NULL;
  1106. }