tx-gen2.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190
  1. /******************************************************************************
  2. *
  3. * This file is provided under a dual BSD/GPLv2 license. When using or
  4. * redistributing this file, you may do so under either license.
  5. *
  6. * GPL LICENSE SUMMARY
  7. *
  8. * Copyright(c) 2017 Intel Deutschland GmbH
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of version 2 of the GNU General Public License as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * General Public License for more details.
  18. *
  19. * BSD LICENSE
  20. *
  21. * Copyright(c) 2017 Intel Deutschland GmbH
  22. * All rights reserved.
  23. *
  24. * Redistribution and use in source and binary forms, with or without
  25. * modification, are permitted provided that the following conditions
  26. * are met:
  27. *
  28. * * Redistributions of source code must retain the above copyright
  29. * notice, this list of conditions and the following disclaimer.
  30. * * Redistributions in binary form must reproduce the above copyright
  31. * notice, this list of conditions and the following disclaimer in
  32. * the documentation and/or other materials provided with the
  33. * distribution.
  34. * * Neither the name Intel Corporation nor the names of its
  35. * contributors may be used to endorse or promote products derived
  36. * from this software without specific prior written permission.
  37. *
  38. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  39. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  40. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  41. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  42. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  43. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  44. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  45. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  46. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  47. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  48. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  49. *
  50. *****************************************************************************/
  51. #include <linux/pm_runtime.h>
  52. #include <net/tso.h>
  53. #include "iwl-debug.h"
  54. #include "iwl-csr.h"
  55. #include "iwl-io.h"
  56. #include "internal.h"
  57. #include "fw/api/tx.h"
  58. /*
  59. * iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels
  60. */
  61. void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
  62. {
  63. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  64. int txq_id;
  65. /*
  66. * This function can be called before the op_mode disabled the
  67. * queues. This happens when we have an rfkill interrupt.
  68. * Since we stop Tx altogether - mark the queues as stopped.
  69. */
  70. memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
  71. memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
  72. /* Unmap DMA from host system and free skb's */
  73. for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) {
  74. if (!trans_pcie->txq[txq_id])
  75. continue;
  76. iwl_pcie_gen2_txq_unmap(trans, txq_id);
  77. }
  78. }
  79. /*
  80. * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
  81. */
  82. static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt,
  83. int num_tbs)
  84. {
  85. struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
  86. int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
  87. u8 filled_tfd_size, num_fetch_chunks;
  88. u16 len = byte_cnt;
  89. __le16 bc_ent;
  90. len = DIV_ROUND_UP(len, 4);
  91. if (WARN_ON(len > 0xFFF || idx >= txq->n_window))
  92. return;
  93. filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
  94. num_tbs * sizeof(struct iwl_tfh_tb);
  95. /*
  96. * filled_tfd_size contains the number of filled bytes in the TFD.
  97. * Dividing it by 64 will give the number of chunks to fetch
  98. * to SRAM- 0 for one chunk, 1 for 2 and so on.
  99. * If, for example, TFD contains only 3 TBs then 32 bytes
  100. * of the TFD are used, and only one chunk of 64 bytes should
  101. * be fetched
  102. */
  103. num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
  104. bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
  105. scd_bc_tbl->tfd_offset[idx] = bc_ent;
  106. }
  107. /*
  108. * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware
  109. */
  110. static void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
  111. struct iwl_txq *txq)
  112. {
  113. lockdep_assert_held(&txq->lock);
  114. IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
  115. /*
  116. * if not in power-save mode, uCode will never sleep when we're
  117. * trying to tx (during RFKILL, we're not trying to tx).
  118. */
  119. iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
  120. }
  121. static u8 iwl_pcie_gen2_get_num_tbs(struct iwl_trans *trans,
  122. struct iwl_tfh_tfd *tfd)
  123. {
  124. return le16_to_cpu(tfd->num_tbs) & 0x1f;
  125. }
  126. static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
  127. struct iwl_cmd_meta *meta,
  128. struct iwl_tfh_tfd *tfd)
  129. {
  130. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  131. int i, num_tbs;
  132. /* Sanity check on number of chunks */
  133. num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd);
  134. if (num_tbs > trans_pcie->max_tbs) {
  135. IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
  136. return;
  137. }
  138. /* first TB is never freed - it's the bidirectional DMA data */
  139. for (i = 1; i < num_tbs; i++) {
  140. if (meta->tbs & BIT(i))
  141. dma_unmap_page(trans->dev,
  142. le64_to_cpu(tfd->tbs[i].addr),
  143. le16_to_cpu(tfd->tbs[i].tb_len),
  144. DMA_TO_DEVICE);
  145. else
  146. dma_unmap_single(trans->dev,
  147. le64_to_cpu(tfd->tbs[i].addr),
  148. le16_to_cpu(tfd->tbs[i].tb_len),
  149. DMA_TO_DEVICE);
  150. }
  151. tfd->num_tbs = 0;
  152. }
  153. static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
  154. {
  155. /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
  156. * idx is bounded by n_window
  157. */
  158. int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
  159. lockdep_assert_held(&txq->lock);
  160. iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
  161. iwl_pcie_get_tfd(trans, txq, idx));
  162. /* free SKB */
  163. if (txq->entries) {
  164. struct sk_buff *skb;
  165. skb = txq->entries[idx].skb;
  166. /* Can be called from irqs-disabled context
  167. * If skb is not NULL, it means that the whole queue is being
  168. * freed and that the queue is not empty - free the skb
  169. */
  170. if (skb) {
  171. iwl_op_mode_free_skb(trans->op_mode, skb);
  172. txq->entries[idx].skb = NULL;
  173. }
  174. }
  175. }
  176. static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
  177. struct iwl_tfh_tfd *tfd, dma_addr_t addr,
  178. u16 len)
  179. {
  180. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  181. int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
  182. struct iwl_tfh_tb *tb = &tfd->tbs[idx];
  183. /* Each TFD can point to a maximum max_tbs Tx buffers */
  184. if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) {
  185. IWL_ERR(trans, "Error can not send more than %d chunks\n",
  186. trans_pcie->max_tbs);
  187. return -EINVAL;
  188. }
  189. put_unaligned_le64(addr, &tb->addr);
  190. tb->tb_len = cpu_to_le16(len);
  191. tfd->num_tbs = cpu_to_le16(idx + 1);
  192. return idx;
  193. }
  194. static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
  195. struct sk_buff *skb,
  196. struct iwl_tfh_tfd *tfd, int start_len,
  197. u8 hdr_len, struct iwl_device_cmd *dev_cmd)
  198. {
  199. #ifdef CONFIG_INET
  200. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  201. struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
  202. struct ieee80211_hdr *hdr = (void *)skb->data;
  203. unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
  204. unsigned int mss = skb_shinfo(skb)->gso_size;
  205. u16 length, iv_len, amsdu_pad;
  206. u8 *start_hdr;
  207. struct iwl_tso_hdr_page *hdr_page;
  208. struct page **page_ptr;
  209. struct tso_t tso;
  210. /* if the packet is protected, then it must be CCMP or GCMP */
  211. iv_len = ieee80211_has_protected(hdr->frame_control) ?
  212. IEEE80211_CCMP_HDR_LEN : 0;
  213. trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
  214. &dev_cmd->hdr, start_len, 0);
  215. ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
  216. snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
  217. total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
  218. amsdu_pad = 0;
  219. /* total amount of header we may need for this A-MSDU */
  220. hdr_room = DIV_ROUND_UP(total_len, mss) *
  221. (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
  222. /* Our device supports 9 segments at most, it will fit in 1 page */
  223. hdr_page = get_page_hdr(trans, hdr_room);
  224. if (!hdr_page)
  225. return -ENOMEM;
  226. get_page(hdr_page->page);
  227. start_hdr = hdr_page->pos;
  228. page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
  229. *page_ptr = hdr_page->page;
  230. memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
  231. hdr_page->pos += iv_len;
  232. /*
  233. * Pull the ieee80211 header + IV to be able to use TSO core,
  234. * we will restore it for the tx_status flow.
  235. */
  236. skb_pull(skb, hdr_len + iv_len);
  237. /*
  238. * Remove the length of all the headers that we don't actually
  239. * have in the MPDU by themselves, but that we duplicate into
  240. * all the different MSDUs inside the A-MSDU.
  241. */
  242. le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
  243. tso_start(skb, &tso);
  244. while (total_len) {
  245. /* this is the data left for this subframe */
  246. unsigned int data_left = min_t(unsigned int, mss, total_len);
  247. struct sk_buff *csum_skb = NULL;
  248. unsigned int tb_len;
  249. dma_addr_t tb_phys;
  250. u8 *subf_hdrs_start = hdr_page->pos;
  251. total_len -= data_left;
  252. memset(hdr_page->pos, 0, amsdu_pad);
  253. hdr_page->pos += amsdu_pad;
  254. amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
  255. data_left)) & 0x3;
  256. ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
  257. hdr_page->pos += ETH_ALEN;
  258. ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
  259. hdr_page->pos += ETH_ALEN;
  260. length = snap_ip_tcp_hdrlen + data_left;
  261. *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
  262. hdr_page->pos += sizeof(length);
  263. /*
  264. * This will copy the SNAP as well which will be considered
  265. * as MAC header.
  266. */
  267. tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
  268. hdr_page->pos += snap_ip_tcp_hdrlen;
  269. tb_len = hdr_page->pos - start_hdr;
  270. tb_phys = dma_map_single(trans->dev, start_hdr,
  271. tb_len, DMA_TO_DEVICE);
  272. if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
  273. dev_kfree_skb(csum_skb);
  274. goto out_err;
  275. }
  276. iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
  277. trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, tb_len);
  278. /* add this subframe's headers' length to the tx_cmd */
  279. le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
  280. /* prepare the start_hdr for the next subframe */
  281. start_hdr = hdr_page->pos;
  282. /* put the payload */
  283. while (data_left) {
  284. tb_len = min_t(unsigned int, tso.size, data_left);
  285. tb_phys = dma_map_single(trans->dev, tso.data,
  286. tb_len, DMA_TO_DEVICE);
  287. if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
  288. dev_kfree_skb(csum_skb);
  289. goto out_err;
  290. }
  291. iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
  292. trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
  293. tb_len);
  294. data_left -= tb_len;
  295. tso_build_data(skb, &tso, tb_len);
  296. }
  297. }
  298. /* re -add the WiFi header and IV */
  299. skb_push(skb, hdr_len + iv_len);
  300. return 0;
  301. out_err:
  302. #endif
  303. return -EINVAL;
  304. }
  305. static
  306. struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
  307. struct iwl_txq *txq,
  308. struct iwl_device_cmd *dev_cmd,
  309. struct sk_buff *skb,
  310. struct iwl_cmd_meta *out_meta)
  311. {
  312. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  313. int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
  314. struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
  315. dma_addr_t tb_phys;
  316. bool amsdu;
  317. int i, len, tb1_len, tb2_len, hdr_len;
  318. void *tb1_addr;
  319. memset(tfd, 0, sizeof(*tfd));
  320. amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
  321. (*ieee80211_get_qos_ctl(hdr) &
  322. IEEE80211_QOS_CTL_A_MSDU_PRESENT);
  323. tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
  324. /* The first TB points to bi-directional DMA data */
  325. if (!amsdu)
  326. memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
  327. IWL_FIRST_TB_SIZE);
  328. iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
  329. /* there must be data left over for TB1 or this code must be changed */
  330. BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
  331. /*
  332. * The second TB (tb1) points to the remainder of the TX command
  333. * and the 802.11 header - dword aligned size
  334. * (This calculation modifies the TX command, so do it before the
  335. * setup of the first TB)
  336. */
  337. len = sizeof(struct iwl_tx_cmd_gen2) + sizeof(struct iwl_cmd_header) +
  338. ieee80211_hdrlen(hdr->frame_control) - IWL_FIRST_TB_SIZE;
  339. /* do not align A-MSDU to dword as the subframe header aligns it */
  340. if (amsdu)
  341. tb1_len = len;
  342. else
  343. tb1_len = ALIGN(len, 4);
  344. /* map the data for TB1 */
  345. tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
  346. tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
  347. if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
  348. goto out_err;
  349. iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
  350. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  351. if (amsdu) {
  352. if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
  353. tb1_len + IWL_FIRST_TB_SIZE,
  354. hdr_len, dev_cmd))
  355. goto out_err;
  356. /*
  357. * building the A-MSDU might have changed this data, so memcpy
  358. * it now
  359. */
  360. memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
  361. IWL_FIRST_TB_SIZE);
  362. return tfd;
  363. }
  364. /* set up TFD's third entry to point to remainder of skb's head */
  365. tb2_len = skb_headlen(skb) - hdr_len;
  366. if (tb2_len > 0) {
  367. tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
  368. tb2_len, DMA_TO_DEVICE);
  369. if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
  370. goto out_err;
  371. iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
  372. }
  373. /* set up the remaining entries to point to the data */
  374. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  375. const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  376. int tb_idx;
  377. if (!skb_frag_size(frag))
  378. continue;
  379. tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
  380. skb_frag_size(frag), DMA_TO_DEVICE);
  381. if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
  382. goto out_err;
  383. tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
  384. skb_frag_size(frag));
  385. out_meta->tbs |= BIT(tb_idx);
  386. }
  387. trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
  388. IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
  389. trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
  390. return tfd;
  391. out_err:
  392. iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
  393. return NULL;
  394. }
  395. int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
  396. struct iwl_device_cmd *dev_cmd, int txq_id)
  397. {
  398. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  399. struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
  400. struct iwl_cmd_meta *out_meta;
  401. struct iwl_txq *txq = trans_pcie->txq[txq_id];
  402. int idx;
  403. void *tfd;
  404. if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
  405. "TX on unused queue %d\n", txq_id))
  406. return -EINVAL;
  407. if (skb_is_nonlinear(skb) &&
  408. skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
  409. __skb_linearize(skb))
  410. return -ENOMEM;
  411. spin_lock(&txq->lock);
  412. idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
  413. /* Set up driver data for this TFD */
  414. txq->entries[idx].skb = skb;
  415. txq->entries[idx].cmd = dev_cmd;
  416. dev_cmd->hdr.sequence =
  417. cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
  418. INDEX_TO_SEQ(idx)));
  419. /* Set up first empty entry in queue's array of Tx/cmd buffers */
  420. out_meta = &txq->entries[idx].meta;
  421. out_meta->flags = 0;
  422. tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
  423. if (!tfd) {
  424. spin_unlock(&txq->lock);
  425. return -1;
  426. }
  427. /* Set up entry for this TFD in Tx byte-count array */
  428. iwl_pcie_gen2_update_byte_tbl(txq, le16_to_cpu(tx_cmd->len),
  429. iwl_pcie_gen2_get_num_tbs(trans, tfd));
  430. /* start timer if queue currently empty */
  431. if (txq->read_ptr == txq->write_ptr) {
  432. if (txq->wd_timeout)
  433. mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
  434. IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id);
  435. iwl_trans_ref(trans);
  436. }
  437. /* Tell device the write index *just past* this latest filled TFD */
  438. txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
  439. iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
  440. if (iwl_queue_space(txq) < txq->high_mark)
  441. iwl_stop_queue(trans, txq);
  442. /*
  443. * At this point the frame is "transmitted" successfully
  444. * and we will get a TX status notification eventually.
  445. */
  446. spin_unlock(&txq->lock);
  447. return 0;
  448. }
  449. /*************** HOST COMMAND QUEUE FUNCTIONS *****/
  450. /*
  451. * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command
  452. * @priv: device private data point
  453. * @cmd: a pointer to the ucode command structure
  454. *
  455. * The function returns < 0 values to indicate the operation
  456. * failed. On success, it returns the index (>= 0) of command in the
  457. * command queue.
  458. */
  459. static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
  460. struct iwl_host_cmd *cmd)
  461. {
  462. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  463. struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
  464. struct iwl_device_cmd *out_cmd;
  465. struct iwl_cmd_meta *out_meta;
  466. unsigned long flags;
  467. void *dup_buf = NULL;
  468. dma_addr_t phys_addr;
  469. int i, cmd_pos, idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
  470. u16 copy_size, cmd_size, tb0_size;
  471. bool had_nocopy = false;
  472. u8 group_id = iwl_cmd_groupid(cmd->id);
  473. const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
  474. u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
  475. struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
  476. memset(tfd, 0, sizeof(*tfd));
  477. copy_size = sizeof(struct iwl_cmd_header_wide);
  478. cmd_size = sizeof(struct iwl_cmd_header_wide);
  479. for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
  480. cmddata[i] = cmd->data[i];
  481. cmdlen[i] = cmd->len[i];
  482. if (!cmd->len[i])
  483. continue;
  484. /* need at least IWL_FIRST_TB_SIZE copied */
  485. if (copy_size < IWL_FIRST_TB_SIZE) {
  486. int copy = IWL_FIRST_TB_SIZE - copy_size;
  487. if (copy > cmdlen[i])
  488. copy = cmdlen[i];
  489. cmdlen[i] -= copy;
  490. cmddata[i] += copy;
  491. copy_size += copy;
  492. }
  493. if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
  494. had_nocopy = true;
  495. if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
  496. idx = -EINVAL;
  497. goto free_dup_buf;
  498. }
  499. } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
  500. /*
  501. * This is also a chunk that isn't copied
  502. * to the static buffer so set had_nocopy.
  503. */
  504. had_nocopy = true;
  505. /* only allowed once */
  506. if (WARN_ON(dup_buf)) {
  507. idx = -EINVAL;
  508. goto free_dup_buf;
  509. }
  510. dup_buf = kmemdup(cmddata[i], cmdlen[i],
  511. GFP_ATOMIC);
  512. if (!dup_buf)
  513. return -ENOMEM;
  514. } else {
  515. /* NOCOPY must not be followed by normal! */
  516. if (WARN_ON(had_nocopy)) {
  517. idx = -EINVAL;
  518. goto free_dup_buf;
  519. }
  520. copy_size += cmdlen[i];
  521. }
  522. cmd_size += cmd->len[i];
  523. }
  524. /*
  525. * If any of the command structures end up being larger than the
  526. * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into
  527. * separate TFDs, then we will need to increase the size of the buffers
  528. */
  529. if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
  530. "Command %s (%#x) is too large (%d bytes)\n",
  531. iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) {
  532. idx = -EINVAL;
  533. goto free_dup_buf;
  534. }
  535. spin_lock_bh(&txq->lock);
  536. if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
  537. spin_unlock_bh(&txq->lock);
  538. IWL_ERR(trans, "No space in command queue\n");
  539. iwl_op_mode_cmd_queue_full(trans->op_mode);
  540. idx = -ENOSPC;
  541. goto free_dup_buf;
  542. }
  543. out_cmd = txq->entries[idx].cmd;
  544. out_meta = &txq->entries[idx].meta;
  545. /* re-initialize to NULL */
  546. memset(out_meta, 0, sizeof(*out_meta));
  547. if (cmd->flags & CMD_WANT_SKB)
  548. out_meta->source = cmd;
  549. /* set up the header */
  550. out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
  551. out_cmd->hdr_wide.group_id = group_id;
  552. out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
  553. out_cmd->hdr_wide.length =
  554. cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
  555. out_cmd->hdr_wide.reserved = 0;
  556. out_cmd->hdr_wide.sequence =
  557. cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
  558. INDEX_TO_SEQ(txq->write_ptr));
  559. cmd_pos = sizeof(struct iwl_cmd_header_wide);
  560. copy_size = sizeof(struct iwl_cmd_header_wide);
  561. /* and copy the data that needs to be copied */
  562. for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
  563. int copy;
  564. if (!cmd->len[i])
  565. continue;
  566. /* copy everything if not nocopy/dup */
  567. if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
  568. IWL_HCMD_DFL_DUP))) {
  569. copy = cmd->len[i];
  570. memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
  571. cmd_pos += copy;
  572. copy_size += copy;
  573. continue;
  574. }
  575. /*
  576. * Otherwise we need at least IWL_FIRST_TB_SIZE copied
  577. * in total (for bi-directional DMA), but copy up to what
  578. * we can fit into the payload for debug dump purposes.
  579. */
  580. copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
  581. memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
  582. cmd_pos += copy;
  583. /* However, treat copy_size the proper way, we need it below */
  584. if (copy_size < IWL_FIRST_TB_SIZE) {
  585. copy = IWL_FIRST_TB_SIZE - copy_size;
  586. if (copy > cmd->len[i])
  587. copy = cmd->len[i];
  588. copy_size += copy;
  589. }
  590. }
  591. IWL_DEBUG_HC(trans,
  592. "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
  593. iwl_get_cmd_string(trans, cmd->id), group_id,
  594. out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
  595. cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
  596. /* start the TFD with the minimum copy bytes */
  597. tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
  598. memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
  599. iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx),
  600. tb0_size);
  601. /* map first command fragment, if any remains */
  602. if (copy_size > tb0_size) {
  603. phys_addr = dma_map_single(trans->dev,
  604. ((u8 *)&out_cmd->hdr) + tb0_size,
  605. copy_size - tb0_size,
  606. DMA_TO_DEVICE);
  607. if (dma_mapping_error(trans->dev, phys_addr)) {
  608. idx = -ENOMEM;
  609. iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
  610. goto out;
  611. }
  612. iwl_pcie_gen2_set_tb(trans, tfd, phys_addr,
  613. copy_size - tb0_size);
  614. }
  615. /* map the remaining (adjusted) nocopy/dup fragments */
  616. for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
  617. const void *data = cmddata[i];
  618. if (!cmdlen[i])
  619. continue;
  620. if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
  621. IWL_HCMD_DFL_DUP)))
  622. continue;
  623. if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
  624. data = dup_buf;
  625. phys_addr = dma_map_single(trans->dev, (void *)data,
  626. cmdlen[i], DMA_TO_DEVICE);
  627. if (dma_mapping_error(trans->dev, phys_addr)) {
  628. idx = -ENOMEM;
  629. iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
  630. goto out;
  631. }
  632. iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]);
  633. }
  634. BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
  635. out_meta->flags = cmd->flags;
  636. if (WARN_ON_ONCE(txq->entries[idx].free_buf))
  637. kzfree(txq->entries[idx].free_buf);
  638. txq->entries[idx].free_buf = dup_buf;
  639. trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
  640. /* start timer if queue currently empty */
  641. if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
  642. mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
  643. spin_lock_irqsave(&trans_pcie->reg_lock, flags);
  644. if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
  645. !trans_pcie->ref_cmd_in_flight) {
  646. trans_pcie->ref_cmd_in_flight = true;
  647. IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
  648. iwl_trans_ref(trans);
  649. }
  650. /* Increment and update queue's write index */
  651. txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
  652. iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
  653. spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
  654. out:
  655. spin_unlock_bh(&txq->lock);
  656. free_dup_buf:
  657. if (idx < 0)
  658. kfree(dup_buf);
  659. return idx;
  660. }
  661. #define HOST_COMPLETE_TIMEOUT (2 * HZ)
  662. static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
  663. struct iwl_host_cmd *cmd)
  664. {
  665. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  666. const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
  667. struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
  668. int cmd_idx;
  669. int ret;
  670. IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
  671. if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
  672. &trans->status),
  673. "Command %s: a command is already active!\n", cmd_str))
  674. return -EIO;
  675. IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
  676. if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) {
  677. ret = wait_event_timeout(trans_pcie->d0i3_waitq,
  678. pm_runtime_active(&trans_pcie->pci_dev->dev),
  679. msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
  680. if (!ret) {
  681. IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n");
  682. return -ETIMEDOUT;
  683. }
  684. }
  685. cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
  686. if (cmd_idx < 0) {
  687. ret = cmd_idx;
  688. clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
  689. IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
  690. cmd_str, ret);
  691. return ret;
  692. }
  693. ret = wait_event_timeout(trans_pcie->wait_command_queue,
  694. !test_bit(STATUS_SYNC_HCMD_ACTIVE,
  695. &trans->status),
  696. HOST_COMPLETE_TIMEOUT);
  697. if (!ret) {
  698. IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
  699. cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
  700. IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
  701. txq->read_ptr, txq->write_ptr);
  702. clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
  703. IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
  704. cmd_str);
  705. ret = -ETIMEDOUT;
  706. iwl_force_nmi(trans);
  707. iwl_trans_fw_error(trans);
  708. goto cancel;
  709. }
  710. if (test_bit(STATUS_FW_ERROR, &trans->status)) {
  711. IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
  712. dump_stack();
  713. ret = -EIO;
  714. goto cancel;
  715. }
  716. if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
  717. test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
  718. IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
  719. ret = -ERFKILL;
  720. goto cancel;
  721. }
  722. if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
  723. IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
  724. ret = -EIO;
  725. goto cancel;
  726. }
  727. return 0;
  728. cancel:
  729. if (cmd->flags & CMD_WANT_SKB) {
  730. /*
  731. * Cancel the CMD_WANT_SKB flag for the cmd in the
  732. * TX cmd queue. Otherwise in case the cmd comes
  733. * in later, it will possibly set an invalid
  734. * address (cmd->meta.source).
  735. */
  736. txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
  737. }
  738. if (cmd->resp_pkt) {
  739. iwl_free_resp(cmd);
  740. cmd->resp_pkt = NULL;
  741. }
  742. return ret;
  743. }
  744. int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
  745. struct iwl_host_cmd *cmd)
  746. {
  747. if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
  748. test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
  749. IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
  750. cmd->id);
  751. return -ERFKILL;
  752. }
  753. if (cmd->flags & CMD_ASYNC) {
  754. int ret;
  755. /* An asynchronous command can not expect an SKB to be set. */
  756. if (WARN_ON(cmd->flags & CMD_WANT_SKB))
  757. return -EINVAL;
  758. ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
  759. if (ret < 0) {
  760. IWL_ERR(trans,
  761. "Error sending %s: enqueue_hcmd failed: %d\n",
  762. iwl_get_cmd_string(trans, cmd->id), ret);
  763. return ret;
  764. }
  765. return 0;
  766. }
  767. return iwl_pcie_gen2_send_hcmd_sync(trans, cmd);
  768. }
  769. /*
  770. * iwl_pcie_gen2_txq_unmap - Unmap any remaining DMA mappings and free skb's
  771. */
  772. void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
  773. {
  774. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  775. struct iwl_txq *txq = trans_pcie->txq[txq_id];
  776. spin_lock_bh(&txq->lock);
  777. while (txq->write_ptr != txq->read_ptr) {
  778. IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
  779. txq_id, txq->read_ptr);
  780. if (txq_id != trans_pcie->cmd_queue) {
  781. int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
  782. struct sk_buff *skb = txq->entries[idx].skb;
  783. if (WARN_ON_ONCE(!skb))
  784. continue;
  785. iwl_pcie_free_tso_page(trans_pcie, skb);
  786. }
  787. iwl_pcie_gen2_free_tfd(trans, txq);
  788. txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
  789. if (txq->read_ptr == txq->write_ptr) {
  790. unsigned long flags;
  791. spin_lock_irqsave(&trans_pcie->reg_lock, flags);
  792. if (txq_id != trans_pcie->cmd_queue) {
  793. IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
  794. txq->id);
  795. iwl_trans_unref(trans);
  796. } else if (trans_pcie->ref_cmd_in_flight) {
  797. trans_pcie->ref_cmd_in_flight = false;
  798. IWL_DEBUG_RPM(trans,
  799. "clear ref_cmd_in_flight\n");
  800. iwl_trans_unref(trans);
  801. }
  802. spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
  803. }
  804. }
  805. spin_unlock_bh(&txq->lock);
  806. /* just in case - this queue may have been stopped */
  807. iwl_wake_queue(trans, txq);
  808. }
  809. static void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
  810. struct iwl_txq *txq)
  811. {
  812. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  813. struct device *dev = trans->dev;
  814. /* De-alloc circular buffer of TFDs */
  815. if (txq->tfds) {
  816. dma_free_coherent(dev,
  817. trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX,
  818. txq->tfds, txq->dma_addr);
  819. dma_free_coherent(dev,
  820. sizeof(*txq->first_tb_bufs) * txq->n_window,
  821. txq->first_tb_bufs, txq->first_tb_dma);
  822. }
  823. kfree(txq->entries);
  824. iwl_pcie_free_dma_ptr(trans, &txq->bc_tbl);
  825. kfree(txq);
  826. }
  827. /*
  828. * iwl_pcie_txq_free - Deallocate DMA queue.
  829. * @txq: Transmit queue to deallocate.
  830. *
  831. * Empty queue by removing and destroying all BD's.
  832. * Free all buffers.
  833. * 0-fill, but do not free "txq" descriptor structure.
  834. */
  835. static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
  836. {
  837. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  838. struct iwl_txq *txq = trans_pcie->txq[txq_id];
  839. int i;
  840. if (WARN_ON(!txq))
  841. return;
  842. iwl_pcie_gen2_txq_unmap(trans, txq_id);
  843. /* De-alloc array of command/tx buffers */
  844. if (txq_id == trans_pcie->cmd_queue)
  845. for (i = 0; i < txq->n_window; i++) {
  846. kzfree(txq->entries[i].cmd);
  847. kzfree(txq->entries[i].free_buf);
  848. }
  849. del_timer_sync(&txq->stuck_timer);
  850. iwl_pcie_gen2_txq_free_memory(trans, txq);
  851. trans_pcie->txq[txq_id] = NULL;
  852. clear_bit(txq_id, trans_pcie->queue_used);
  853. }
  854. int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
  855. struct iwl_tx_queue_cfg_cmd *cmd,
  856. int cmd_id,
  857. unsigned int timeout)
  858. {
  859. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  860. struct iwl_tx_queue_cfg_rsp *rsp;
  861. struct iwl_txq *txq;
  862. struct iwl_host_cmd hcmd = {
  863. .id = cmd_id,
  864. .len = { sizeof(*cmd) },
  865. .data = { cmd, },
  866. .flags = CMD_WANT_SKB,
  867. };
  868. int ret, qid;
  869. u32 wr_ptr;
  870. txq = kzalloc(sizeof(*txq), GFP_KERNEL);
  871. if (!txq)
  872. return -ENOMEM;
  873. ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
  874. sizeof(struct iwlagn_scd_bc_tbl));
  875. if (ret) {
  876. IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
  877. kfree(txq);
  878. return -ENOMEM;
  879. }
  880. ret = iwl_pcie_txq_alloc(trans, txq, TFD_TX_CMD_SLOTS, false);
  881. if (ret) {
  882. IWL_ERR(trans, "Tx queue alloc failed\n");
  883. goto error;
  884. }
  885. ret = iwl_pcie_txq_init(trans, txq, TFD_TX_CMD_SLOTS, false);
  886. if (ret) {
  887. IWL_ERR(trans, "Tx queue init failed\n");
  888. goto error;
  889. }
  890. txq->wd_timeout = msecs_to_jiffies(timeout);
  891. cmd->tfdq_addr = cpu_to_le64(txq->dma_addr);
  892. cmd->byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
  893. cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(TFD_TX_CMD_SLOTS));
  894. ret = iwl_trans_send_cmd(trans, &hcmd);
  895. if (ret)
  896. goto error;
  897. if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) {
  898. ret = -EINVAL;
  899. goto error_free_resp;
  900. }
  901. rsp = (void *)hcmd.resp_pkt->data;
  902. qid = le16_to_cpu(rsp->queue_number);
  903. wr_ptr = le16_to_cpu(rsp->write_pointer);
  904. if (qid >= ARRAY_SIZE(trans_pcie->txq)) {
  905. WARN_ONCE(1, "queue index %d unsupported", qid);
  906. ret = -EIO;
  907. goto error_free_resp;
  908. }
  909. if (test_and_set_bit(qid, trans_pcie->queue_used)) {
  910. WARN_ONCE(1, "queue %d already used", qid);
  911. ret = -EIO;
  912. goto error_free_resp;
  913. }
  914. txq->id = qid;
  915. trans_pcie->txq[qid] = txq;
  916. wr_ptr &= (TFD_QUEUE_SIZE_MAX - 1);
  917. /* Place first TFD at index corresponding to start sequence number */
  918. txq->read_ptr = wr_ptr;
  919. txq->write_ptr = wr_ptr;
  920. iwl_write_direct32(trans, HBUS_TARG_WRPTR,
  921. (txq->write_ptr) | (qid << 16));
  922. IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
  923. iwl_free_resp(&hcmd);
  924. return qid;
  925. error_free_resp:
  926. iwl_free_resp(&hcmd);
  927. error:
  928. iwl_pcie_gen2_txq_free_memory(trans, txq);
  929. return ret;
  930. }
  931. void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
  932. {
  933. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  934. /*
  935. * Upon HW Rfkill - we stop the device, and then stop the queues
  936. * in the op_mode. Just for the sake of the simplicity of the op_mode,
  937. * allow the op_mode to call txq_disable after it already called
  938. * stop_device.
  939. */
  940. if (!test_and_clear_bit(queue, trans_pcie->queue_used)) {
  941. WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
  942. "queue %d not used", queue);
  943. return;
  944. }
  945. iwl_pcie_gen2_txq_unmap(trans, queue);
  946. IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
  947. }
  948. void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
  949. {
  950. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  951. int i;
  952. memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
  953. /* Free all TX queues */
  954. for (i = 0; i < ARRAY_SIZE(trans_pcie->txq); i++) {
  955. if (!trans_pcie->txq[i])
  956. continue;
  957. iwl_pcie_gen2_txq_free(trans, i);
  958. }
  959. }
  960. int iwl_pcie_gen2_tx_init(struct iwl_trans *trans)
  961. {
  962. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  963. struct iwl_txq *cmd_queue;
  964. int txq_id = trans_pcie->cmd_queue, ret;
  965. iwl_pcie_set_tx_cmd_queue_size(trans);
  966. /* alloc and init the command queue */
  967. if (!trans_pcie->txq[txq_id]) {
  968. cmd_queue = kzalloc(sizeof(*cmd_queue), GFP_KERNEL);
  969. if (!cmd_queue) {
  970. IWL_ERR(trans, "Not enough memory for command queue\n");
  971. return -ENOMEM;
  972. }
  973. trans_pcie->txq[txq_id] = cmd_queue;
  974. ret = iwl_pcie_txq_alloc(trans, cmd_queue,
  975. trans_pcie->tx_cmd_queue_size, true);
  976. if (ret) {
  977. IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
  978. goto error;
  979. }
  980. } else {
  981. cmd_queue = trans_pcie->txq[txq_id];
  982. }
  983. ret = iwl_pcie_txq_init(trans, cmd_queue,
  984. trans_pcie->tx_cmd_queue_size, true);
  985. if (ret) {
  986. IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
  987. goto error;
  988. }
  989. trans_pcie->txq[txq_id]->id = txq_id;
  990. set_bit(txq_id, trans_pcie->queue_used);
  991. return 0;
  992. error:
  993. iwl_pcie_gen2_tx_free(trans);
  994. return ret;
  995. }