rx.c 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976
  1. /******************************************************************************
  2. *
  3. * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  4. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  5. * Copyright(c) 2016 Intel Deutschland GmbH
  6. *
  7. * Portions of this file are derived from the ipw3945 project, as well
  8. * as portions of the ieee80211 subsystem header files.
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of version 2 of the GNU General Public License as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful, but WITHOUT
  15. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  16. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  17. * more details.
  18. *
  19. * You should have received a copy of the GNU General Public License along with
  20. * this program; if not, write to the Free Software Foundation, Inc.,
  21. * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  22. *
  23. * The full GNU General Public License is included in this distribution in the
  24. * file called LICENSE.
  25. *
  26. * Contact Information:
  27. * Intel Linux Wireless <linuxwifi@intel.com>
  28. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  29. *
  30. *****************************************************************************/
  31. #include <linux/sched.h>
  32. #include <linux/wait.h>
  33. #include <linux/gfp.h>
  34. #include "iwl-prph.h"
  35. #include "iwl-io.h"
  36. #include "internal.h"
  37. #include "iwl-op-mode.h"
  38. /******************************************************************************
  39. *
  40. * RX path functions
  41. *
  42. ******************************************************************************/
  43. /*
  44. * Rx theory of operation
  45. *
  46. * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
  47. * each of which point to Receive Buffers to be filled by the NIC. These get
  48. * used not only for Rx frames, but for any command response or notification
  49. * from the NIC. The driver and NIC manage the Rx buffers by means
  50. * of indexes into the circular buffer.
  51. *
  52. * Rx Queue Indexes
  53. * The host/firmware share two index registers for managing the Rx buffers.
  54. *
  55. * The READ index maps to the first position that the firmware may be writing
  56. * to -- the driver can read up to (but not including) this position and get
  57. * good data.
  58. * The READ index is managed by the firmware once the card is enabled.
  59. *
  60. * The WRITE index maps to the last position the driver has read from -- the
  61. * position preceding WRITE is the last slot the firmware can place a packet.
  62. *
  63. * The queue is empty (no good data) if WRITE = READ - 1, and is full if
  64. * WRITE = READ.
  65. *
  66. * During initialization, the host sets up the READ queue position to the first
  67. * INDEX position, and WRITE to the last (READ - 1 wrapped)
  68. *
  69. * When the firmware places a packet in a buffer, it will advance the READ index
  70. * and fire the RX interrupt. The driver can then query the READ index and
  71. * process as many packets as possible, moving the WRITE index forward as it
  72. * resets the Rx queue buffers with new memory.
  73. *
  74. * The management in the driver is as follows:
  75. * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
  76. * When the interrupt handler is called, the request is processed.
  77. * The page is either stolen - transferred to the upper layer
  78. * or reused - added immediately to the iwl->rxq->rx_free list.
  79. * + When the page is stolen - the driver updates the matching queue's used
  80. * count, detaches the RBD and transfers it to the queue used list.
  81. * When there are two used RBDs - they are transferred to the allocator empty
  82. * list. Work is then scheduled for the allocator to start allocating
  83. * eight buffers.
  84. * When there are another 6 used RBDs - they are transferred to the allocator
  85. * empty list and the driver tries to claim the pre-allocated buffers and
  86. * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
  87. * until ready.
  88. * When there are 8+ buffers in the free list - either from allocation or from
  89. * 8 reused unstolen pages - restock is called to update the FW and indexes.
  90. * + In order to make sure the allocator always has RBDs to use for allocation
  91. * the allocator has initial pool in the size of num_queues*(8-2) - the
  92. * maximum missing RBDs per allocation request (request posted with 2
  93. * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
  94. * The queues supplies the recycle of the rest of the RBDs.
  95. * + A received packet is processed and handed to the kernel network stack,
  96. * detached from the iwl->rxq. The driver 'processed' index is updated.
  97. * + If there are no allocated buffers in iwl->rxq->rx_free,
  98. * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
  99. * If there were enough free buffers and RX_STALLED is set it is cleared.
  100. *
  101. *
  102. * Driver sequence:
  103. *
  104. * iwl_rxq_alloc() Allocates rx_free
  105. * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
  106. * iwl_pcie_rxq_restock.
  107. * Used only during initialization.
  108. * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
  109. * queue, updates firmware pointers, and updates
  110. * the WRITE index.
  111. * iwl_pcie_rx_allocator() Background work for allocating pages.
  112. *
  113. * -- enable interrupts --
  114. * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
  115. * READ INDEX, detaching the SKB from the pool.
  116. * Moves the packet buffer from queue to rx_used.
  117. * Posts and claims requests to the allocator.
  118. * Calls iwl_pcie_rxq_restock to refill any empty
  119. * slots.
  120. *
  121. * RBD life-cycle:
  122. *
  123. * Init:
  124. * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
  125. *
  126. * Regular Receive interrupt:
  127. * Page Stolen:
  128. * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
  129. * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
  130. * Page not Stolen:
  131. * rxq.queue -> rxq.rx_free -> rxq.queue
  132. * ...
  133. *
  134. */
  135. /*
  136. * iwl_rxq_space - Return number of free slots available in queue.
  137. */
  138. static int iwl_rxq_space(const struct iwl_rxq *rxq)
  139. {
  140. /* Make sure rx queue size is a power of 2 */
  141. WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
  142. /*
  143. * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
  144. * between empty and completely full queues.
  145. * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
  146. * defined for negative dividends.
  147. */
  148. return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
  149. }
  150. /*
  151. * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
  152. */
  153. static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
  154. {
  155. return cpu_to_le32((u32)(dma_addr >> 8));
  156. }
  157. /*
  158. * iwl_pcie_rx_stop - stops the Rx DMA
  159. */
  160. int iwl_pcie_rx_stop(struct iwl_trans *trans)
  161. {
  162. if (trans->cfg->mq_rx_supported) {
  163. iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
  164. return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
  165. RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
  166. } else {
  167. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  168. return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
  169. FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
  170. 1000);
  171. }
  172. }
  173. /*
  174. * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
  175. */
  176. static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
  177. struct iwl_rxq *rxq)
  178. {
  179. u32 reg;
  180. lockdep_assert_held(&rxq->lock);
  181. /*
  182. * explicitly wake up the NIC if:
  183. * 1. shadow registers aren't enabled
  184. * 2. there is a chance that the NIC is asleep
  185. */
  186. if (!trans->cfg->base_params->shadow_reg_enable &&
  187. test_bit(STATUS_TPOWER_PMI, &trans->status)) {
  188. reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
  189. if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
  190. IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
  191. reg);
  192. iwl_set_bit(trans, CSR_GP_CNTRL,
  193. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  194. rxq->need_update = true;
  195. return;
  196. }
  197. }
  198. rxq->write_actual = round_down(rxq->write, 8);
  199. if (trans->cfg->mq_rx_supported)
  200. iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
  201. rxq->write_actual);
  202. else
  203. iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
  204. }
  205. static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
  206. {
  207. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  208. int i;
  209. for (i = 0; i < trans->num_rx_queues; i++) {
  210. struct iwl_rxq *rxq = &trans_pcie->rxq[i];
  211. if (!rxq->need_update)
  212. continue;
  213. spin_lock(&rxq->lock);
  214. iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
  215. rxq->need_update = false;
  216. spin_unlock(&rxq->lock);
  217. }
  218. }
  219. /*
  220. * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
  221. */
  222. static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
  223. struct iwl_rxq *rxq)
  224. {
  225. struct iwl_rx_mem_buffer *rxb;
  226. /*
  227. * If the device isn't enabled - no need to try to add buffers...
  228. * This can happen when we stop the device and still have an interrupt
  229. * pending. We stop the APM before we sync the interrupts because we
  230. * have to (see comment there). On the other hand, since the APM is
  231. * stopped, we cannot access the HW (in particular not prph).
  232. * So don't try to restock if the APM has been already stopped.
  233. */
  234. if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
  235. return;
  236. spin_lock(&rxq->lock);
  237. while (rxq->free_count) {
  238. __le64 *bd = (__le64 *)rxq->bd;
  239. /* Get next free Rx buffer, remove from free list */
  240. rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
  241. list);
  242. list_del(&rxb->list);
  243. rxb->invalid = false;
  244. /* 12 first bits are expected to be empty */
  245. WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
  246. /* Point to Rx buffer via next RBD in circular buffer */
  247. bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
  248. rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
  249. rxq->free_count--;
  250. }
  251. spin_unlock(&rxq->lock);
  252. /*
  253. * If we've added more space for the firmware to place data, tell it.
  254. * Increment device's write pointer in multiples of 8.
  255. */
  256. if (rxq->write_actual != (rxq->write & ~0x7)) {
  257. spin_lock(&rxq->lock);
  258. iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
  259. spin_unlock(&rxq->lock);
  260. }
  261. }
  262. /*
  263. * iwl_pcie_rxsq_restock - restock implementation for single queue rx
  264. */
  265. static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
  266. struct iwl_rxq *rxq)
  267. {
  268. struct iwl_rx_mem_buffer *rxb;
  269. /*
  270. * If the device isn't enabled - not need to try to add buffers...
  271. * This can happen when we stop the device and still have an interrupt
  272. * pending. We stop the APM before we sync the interrupts because we
  273. * have to (see comment there). On the other hand, since the APM is
  274. * stopped, we cannot access the HW (in particular not prph).
  275. * So don't try to restock if the APM has been already stopped.
  276. */
  277. if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
  278. return;
  279. spin_lock(&rxq->lock);
  280. while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
  281. __le32 *bd = (__le32 *)rxq->bd;
  282. /* The overwritten rxb must be a used one */
  283. rxb = rxq->queue[rxq->write];
  284. BUG_ON(rxb && rxb->page);
  285. /* Get next free Rx buffer, remove from free list */
  286. rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
  287. list);
  288. list_del(&rxb->list);
  289. rxb->invalid = false;
  290. /* Point to Rx buffer via next RBD in circular buffer */
  291. bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
  292. rxq->queue[rxq->write] = rxb;
  293. rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
  294. rxq->free_count--;
  295. }
  296. spin_unlock(&rxq->lock);
  297. /* If we've added more space for the firmware to place data, tell it.
  298. * Increment device's write pointer in multiples of 8. */
  299. if (rxq->write_actual != (rxq->write & ~0x7)) {
  300. spin_lock(&rxq->lock);
  301. iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
  302. spin_unlock(&rxq->lock);
  303. }
  304. }
  305. /*
  306. * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
  307. *
  308. * If there are slots in the RX queue that need to be restocked,
  309. * and we have free pre-allocated buffers, fill the ranks as much
  310. * as we can, pulling from rx_free.
  311. *
  312. * This moves the 'write' index forward to catch up with 'processed', and
  313. * also updates the memory address in the firmware to reference the new
  314. * target buffer.
  315. */
  316. static
  317. void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
  318. {
  319. if (trans->cfg->mq_rx_supported)
  320. iwl_pcie_rxmq_restock(trans, rxq);
  321. else
  322. iwl_pcie_rxsq_restock(trans, rxq);
  323. }
  324. /*
  325. * iwl_pcie_rx_alloc_page - allocates and returns a page.
  326. *
  327. */
  328. static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
  329. gfp_t priority)
  330. {
  331. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  332. struct page *page;
  333. gfp_t gfp_mask = priority;
  334. if (trans_pcie->rx_page_order > 0)
  335. gfp_mask |= __GFP_COMP;
  336. /* Alloc a new receive buffer */
  337. page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
  338. if (!page) {
  339. if (net_ratelimit())
  340. IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
  341. trans_pcie->rx_page_order);
  342. /*
  343. * Issue an error if we don't have enough pre-allocated
  344. * buffers.
  345. ` */
  346. if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
  347. IWL_CRIT(trans,
  348. "Failed to alloc_pages\n");
  349. return NULL;
  350. }
  351. return page;
  352. }
  353. /*
  354. * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
  355. *
  356. * A used RBD is an Rx buffer that has been given to the stack. To use it again
  357. * a page must be allocated and the RBD must point to the page. This function
  358. * doesn't change the HW pointer but handles the list of pages that is used by
  359. * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
  360. * allocated buffers.
  361. */
  362. static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
  363. struct iwl_rxq *rxq)
  364. {
  365. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  366. struct iwl_rx_mem_buffer *rxb;
  367. struct page *page;
  368. while (1) {
  369. spin_lock(&rxq->lock);
  370. if (list_empty(&rxq->rx_used)) {
  371. spin_unlock(&rxq->lock);
  372. return;
  373. }
  374. spin_unlock(&rxq->lock);
  375. /* Alloc a new receive buffer */
  376. page = iwl_pcie_rx_alloc_page(trans, priority);
  377. if (!page)
  378. return;
  379. spin_lock(&rxq->lock);
  380. if (list_empty(&rxq->rx_used)) {
  381. spin_unlock(&rxq->lock);
  382. __free_pages(page, trans_pcie->rx_page_order);
  383. return;
  384. }
  385. rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
  386. list);
  387. list_del(&rxb->list);
  388. spin_unlock(&rxq->lock);
  389. BUG_ON(rxb->page);
  390. rxb->page = page;
  391. /* Get physical address of the RB */
  392. rxb->page_dma =
  393. dma_map_page(trans->dev, page, 0,
  394. PAGE_SIZE << trans_pcie->rx_page_order,
  395. DMA_FROM_DEVICE);
  396. if (dma_mapping_error(trans->dev, rxb->page_dma)) {
  397. rxb->page = NULL;
  398. spin_lock(&rxq->lock);
  399. list_add(&rxb->list, &rxq->rx_used);
  400. spin_unlock(&rxq->lock);
  401. __free_pages(page, trans_pcie->rx_page_order);
  402. return;
  403. }
  404. spin_lock(&rxq->lock);
  405. list_add_tail(&rxb->list, &rxq->rx_free);
  406. rxq->free_count++;
  407. spin_unlock(&rxq->lock);
  408. }
  409. }
  410. static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
  411. {
  412. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  413. int i;
  414. for (i = 0; i < RX_POOL_SIZE; i++) {
  415. if (!trans_pcie->rx_pool[i].page)
  416. continue;
  417. dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
  418. PAGE_SIZE << trans_pcie->rx_page_order,
  419. DMA_FROM_DEVICE);
  420. __free_pages(trans_pcie->rx_pool[i].page,
  421. trans_pcie->rx_page_order);
  422. trans_pcie->rx_pool[i].page = NULL;
  423. }
  424. }
  425. /*
  426. * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
  427. *
  428. * Allocates for each received request 8 pages
  429. * Called as a scheduled work item.
  430. */
  431. static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
  432. {
  433. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  434. struct iwl_rb_allocator *rba = &trans_pcie->rba;
  435. struct list_head local_empty;
  436. int pending = atomic_xchg(&rba->req_pending, 0);
  437. IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
  438. /* If we were scheduled - there is at least one request */
  439. spin_lock(&rba->lock);
  440. /* swap out the rba->rbd_empty to a local list */
  441. list_replace_init(&rba->rbd_empty, &local_empty);
  442. spin_unlock(&rba->lock);
  443. while (pending) {
  444. int i;
  445. struct list_head local_allocated;
  446. gfp_t gfp_mask = GFP_KERNEL;
  447. /* Do not post a warning if there are only a few requests */
  448. if (pending < RX_PENDING_WATERMARK)
  449. gfp_mask |= __GFP_NOWARN;
  450. INIT_LIST_HEAD(&local_allocated);
  451. for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
  452. struct iwl_rx_mem_buffer *rxb;
  453. struct page *page;
  454. /* List should never be empty - each reused RBD is
  455. * returned to the list, and initial pool covers any
  456. * possible gap between the time the page is allocated
  457. * to the time the RBD is added.
  458. */
  459. BUG_ON(list_empty(&local_empty));
  460. /* Get the first rxb from the rbd list */
  461. rxb = list_first_entry(&local_empty,
  462. struct iwl_rx_mem_buffer, list);
  463. BUG_ON(rxb->page);
  464. /* Alloc a new receive buffer */
  465. page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
  466. if (!page)
  467. continue;
  468. rxb->page = page;
  469. /* Get physical address of the RB */
  470. rxb->page_dma = dma_map_page(trans->dev, page, 0,
  471. PAGE_SIZE << trans_pcie->rx_page_order,
  472. DMA_FROM_DEVICE);
  473. if (dma_mapping_error(trans->dev, rxb->page_dma)) {
  474. rxb->page = NULL;
  475. __free_pages(page, trans_pcie->rx_page_order);
  476. continue;
  477. }
  478. /* move the allocated entry to the out list */
  479. list_move(&rxb->list, &local_allocated);
  480. i++;
  481. }
  482. pending--;
  483. if (!pending) {
  484. pending = atomic_xchg(&rba->req_pending, 0);
  485. IWL_DEBUG_RX(trans,
  486. "Pending allocation requests = %d\n",
  487. pending);
  488. }
  489. spin_lock(&rba->lock);
  490. /* add the allocated rbds to the allocator allocated list */
  491. list_splice_tail(&local_allocated, &rba->rbd_allocated);
  492. /* get more empty RBDs for current pending requests */
  493. list_splice_tail_init(&rba->rbd_empty, &local_empty);
  494. spin_unlock(&rba->lock);
  495. atomic_inc(&rba->req_ready);
  496. }
  497. spin_lock(&rba->lock);
  498. /* return unused rbds to the allocator empty list */
  499. list_splice_tail(&local_empty, &rba->rbd_empty);
  500. spin_unlock(&rba->lock);
  501. }
  502. /*
  503. * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
  504. .*
  505. .* Called by queue when the queue posted allocation request and
  506. * has freed 8 RBDs in order to restock itself.
  507. * This function directly moves the allocated RBs to the queue's ownership
  508. * and updates the relevant counters.
  509. */
  510. static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
  511. struct iwl_rxq *rxq)
  512. {
  513. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  514. struct iwl_rb_allocator *rba = &trans_pcie->rba;
  515. int i;
  516. lockdep_assert_held(&rxq->lock);
  517. /*
  518. * atomic_dec_if_positive returns req_ready - 1 for any scenario.
  519. * If req_ready is 0 atomic_dec_if_positive will return -1 and this
  520. * function will return early, as there are no ready requests.
  521. * atomic_dec_if_positive will perofrm the *actual* decrement only if
  522. * req_ready > 0, i.e. - there are ready requests and the function
  523. * hands one request to the caller.
  524. */
  525. if (atomic_dec_if_positive(&rba->req_ready) < 0)
  526. return;
  527. spin_lock(&rba->lock);
  528. for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
  529. /* Get next free Rx buffer, remove it from free list */
  530. struct iwl_rx_mem_buffer *rxb =
  531. list_first_entry(&rba->rbd_allocated,
  532. struct iwl_rx_mem_buffer, list);
  533. list_move(&rxb->list, &rxq->rx_free);
  534. }
  535. spin_unlock(&rba->lock);
  536. rxq->used_count -= RX_CLAIM_REQ_ALLOC;
  537. rxq->free_count += RX_CLAIM_REQ_ALLOC;
  538. }
  539. static void iwl_pcie_rx_allocator_work(struct work_struct *data)
  540. {
  541. struct iwl_rb_allocator *rba_p =
  542. container_of(data, struct iwl_rb_allocator, rx_alloc);
  543. struct iwl_trans_pcie *trans_pcie =
  544. container_of(rba_p, struct iwl_trans_pcie, rba);
  545. iwl_pcie_rx_allocator(trans_pcie->trans);
  546. }
  547. static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
  548. {
  549. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  550. struct iwl_rb_allocator *rba = &trans_pcie->rba;
  551. struct device *dev = trans->dev;
  552. int i;
  553. int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
  554. sizeof(__le32);
  555. if (WARN_ON(trans_pcie->rxq))
  556. return -EINVAL;
  557. trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
  558. GFP_KERNEL);
  559. if (!trans_pcie->rxq)
  560. return -EINVAL;
  561. spin_lock_init(&rba->lock);
  562. for (i = 0; i < trans->num_rx_queues; i++) {
  563. struct iwl_rxq *rxq = &trans_pcie->rxq[i];
  564. spin_lock_init(&rxq->lock);
  565. if (trans->cfg->mq_rx_supported)
  566. rxq->queue_size = MQ_RX_TABLE_SIZE;
  567. else
  568. rxq->queue_size = RX_QUEUE_SIZE;
  569. /*
  570. * Allocate the circular buffer of Read Buffer Descriptors
  571. * (RBDs)
  572. */
  573. rxq->bd = dma_zalloc_coherent(dev,
  574. free_size * rxq->queue_size,
  575. &rxq->bd_dma, GFP_KERNEL);
  576. if (!rxq->bd)
  577. goto err;
  578. if (trans->cfg->mq_rx_supported) {
  579. rxq->used_bd = dma_zalloc_coherent(dev,
  580. sizeof(__le32) *
  581. rxq->queue_size,
  582. &rxq->used_bd_dma,
  583. GFP_KERNEL);
  584. if (!rxq->used_bd)
  585. goto err;
  586. }
  587. /*Allocate the driver's pointer to receive buffer status */
  588. rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
  589. &rxq->rb_stts_dma,
  590. GFP_KERNEL);
  591. if (!rxq->rb_stts)
  592. goto err;
  593. }
  594. return 0;
  595. err:
  596. for (i = 0; i < trans->num_rx_queues; i++) {
  597. struct iwl_rxq *rxq = &trans_pcie->rxq[i];
  598. if (rxq->bd)
  599. dma_free_coherent(dev, free_size * rxq->queue_size,
  600. rxq->bd, rxq->bd_dma);
  601. rxq->bd_dma = 0;
  602. rxq->bd = NULL;
  603. if (rxq->rb_stts)
  604. dma_free_coherent(trans->dev,
  605. sizeof(struct iwl_rb_status),
  606. rxq->rb_stts, rxq->rb_stts_dma);
  607. if (rxq->used_bd)
  608. dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size,
  609. rxq->used_bd, rxq->used_bd_dma);
  610. rxq->used_bd_dma = 0;
  611. rxq->used_bd = NULL;
  612. }
  613. kfree(trans_pcie->rxq);
  614. return -ENOMEM;
  615. }
  616. static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
  617. {
  618. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  619. u32 rb_size;
  620. unsigned long flags;
  621. const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
  622. switch (trans_pcie->rx_buf_size) {
  623. case IWL_AMSDU_4K:
  624. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
  625. break;
  626. case IWL_AMSDU_8K:
  627. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
  628. break;
  629. case IWL_AMSDU_12K:
  630. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
  631. break;
  632. default:
  633. WARN_ON(1);
  634. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
  635. }
  636. if (!iwl_trans_grab_nic_access(trans, &flags))
  637. return;
  638. /* Stop Rx DMA */
  639. iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  640. /* reset and flush pointers */
  641. iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
  642. iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
  643. iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
  644. /* Reset driver's Rx queue write index */
  645. iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
  646. /* Tell device where to find RBD circular buffer in DRAM */
  647. iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
  648. (u32)(rxq->bd_dma >> 8));
  649. /* Tell device where in DRAM to update its Rx status */
  650. iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
  651. rxq->rb_stts_dma >> 4);
  652. /* Enable Rx DMA
  653. * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
  654. * the credit mechanism in 5000 HW RX FIFO
  655. * Direct rx interrupts to hosts
  656. * Rx buffer size 4 or 8k or 12k
  657. * RB timeout 0x10
  658. * 256 RBDs
  659. */
  660. iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
  661. FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
  662. FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
  663. FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
  664. rb_size |
  665. (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
  666. (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
  667. iwl_trans_release_nic_access(trans, &flags);
  668. /* Set interrupt coalescing timer to default (2048 usecs) */
  669. iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
  670. /* W/A for interrupt coalescing bug in 7260 and 3160 */
  671. if (trans->cfg->host_interrupt_operation_mode)
  672. iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
  673. }
  674. void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable)
  675. {
  676. /*
  677. * Turn on the chicken-bits that cause MAC wakeup for RX-related
  678. * values.
  679. * This costs some power, but needed for W/A 9000 integrated A-step
  680. * bug where shadow registers are not in the retention list and their
  681. * value is lost when NIC powers down
  682. */
  683. if (trans->cfg->integrated) {
  684. iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
  685. CSR_MAC_SHADOW_REG_CTRL_RX_WAKE);
  686. iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTL2,
  687. CSR_MAC_SHADOW_REG_CTL2_RX_WAKE);
  688. }
  689. }
  690. static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
  691. {
  692. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  693. u32 rb_size, enabled = 0;
  694. unsigned long flags;
  695. int i;
  696. switch (trans_pcie->rx_buf_size) {
  697. case IWL_AMSDU_4K:
  698. rb_size = RFH_RXF_DMA_RB_SIZE_4K;
  699. break;
  700. case IWL_AMSDU_8K:
  701. rb_size = RFH_RXF_DMA_RB_SIZE_8K;
  702. break;
  703. case IWL_AMSDU_12K:
  704. rb_size = RFH_RXF_DMA_RB_SIZE_12K;
  705. break;
  706. default:
  707. WARN_ON(1);
  708. rb_size = RFH_RXF_DMA_RB_SIZE_4K;
  709. }
  710. if (!iwl_trans_grab_nic_access(trans, &flags))
  711. return;
  712. /* Stop Rx DMA */
  713. iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
  714. /* disable free amd used rx queue operation */
  715. iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
  716. for (i = 0; i < trans->num_rx_queues; i++) {
  717. /* Tell device where to find RBD free table in DRAM */
  718. iwl_write_prph64_no_grab(trans,
  719. RFH_Q_FRBDCB_BA_LSB(i),
  720. trans_pcie->rxq[i].bd_dma);
  721. /* Tell device where to find RBD used table in DRAM */
  722. iwl_write_prph64_no_grab(trans,
  723. RFH_Q_URBDCB_BA_LSB(i),
  724. trans_pcie->rxq[i].used_bd_dma);
  725. /* Tell device where in DRAM to update its Rx status */
  726. iwl_write_prph64_no_grab(trans,
  727. RFH_Q_URBD_STTS_WPTR_LSB(i),
  728. trans_pcie->rxq[i].rb_stts_dma);
  729. /* Reset device indice tables */
  730. iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
  731. iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
  732. iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
  733. enabled |= BIT(i) | BIT(i + 16);
  734. }
  735. /*
  736. * Enable Rx DMA
  737. * Rx buffer size 4 or 8k or 12k
  738. * Min RB size 4 or 8
  739. * Drop frames that exceed RB size
  740. * 512 RBDs
  741. */
  742. iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
  743. RFH_DMA_EN_ENABLE_VAL | rb_size |
  744. RFH_RXF_DMA_MIN_RB_4_8 |
  745. RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
  746. RFH_RXF_DMA_RBDCB_SIZE_512);
  747. /*
  748. * Activate DMA snooping.
  749. * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
  750. * Default queue is 0
  751. */
  752. iwl_write_prph_no_grab(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
  753. (DEFAULT_RXQ_NUM <<
  754. RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) |
  755. RFH_GEN_CFG_SERVICE_DMA_SNOOP |
  756. (trans->cfg->integrated ?
  757. RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
  758. RFH_GEN_CFG_RB_CHUNK_SIZE_128) <<
  759. RFH_GEN_CFG_RB_CHUNK_SIZE_POS);
  760. /* Enable the relevant rx queues */
  761. iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
  762. iwl_trans_release_nic_access(trans, &flags);
  763. /* Set interrupt coalescing timer to default (2048 usecs) */
  764. iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
  765. iwl_pcie_enable_rx_wake(trans, true);
  766. }
  767. static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
  768. {
  769. lockdep_assert_held(&rxq->lock);
  770. INIT_LIST_HEAD(&rxq->rx_free);
  771. INIT_LIST_HEAD(&rxq->rx_used);
  772. rxq->free_count = 0;
  773. rxq->used_count = 0;
  774. }
  775. static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
  776. {
  777. WARN_ON(1);
  778. return 0;
  779. }
  780. int iwl_pcie_rx_init(struct iwl_trans *trans)
  781. {
  782. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  783. struct iwl_rxq *def_rxq;
  784. struct iwl_rb_allocator *rba = &trans_pcie->rba;
  785. int i, err, queue_size, allocator_pool_size, num_alloc;
  786. if (!trans_pcie->rxq) {
  787. err = iwl_pcie_rx_alloc(trans);
  788. if (err)
  789. return err;
  790. }
  791. def_rxq = trans_pcie->rxq;
  792. if (!rba->alloc_wq)
  793. rba->alloc_wq = alloc_workqueue("rb_allocator",
  794. WQ_HIGHPRI | WQ_UNBOUND, 1);
  795. INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
  796. spin_lock(&rba->lock);
  797. atomic_set(&rba->req_pending, 0);
  798. atomic_set(&rba->req_ready, 0);
  799. INIT_LIST_HEAD(&rba->rbd_allocated);
  800. INIT_LIST_HEAD(&rba->rbd_empty);
  801. spin_unlock(&rba->lock);
  802. /* free all first - we might be reconfigured for a different size */
  803. iwl_pcie_free_rbs_pool(trans);
  804. for (i = 0; i < RX_QUEUE_SIZE; i++)
  805. def_rxq->queue[i] = NULL;
  806. for (i = 0; i < trans->num_rx_queues; i++) {
  807. struct iwl_rxq *rxq = &trans_pcie->rxq[i];
  808. rxq->id = i;
  809. spin_lock(&rxq->lock);
  810. /*
  811. * Set read write pointer to reflect that we have processed
  812. * and used all buffers, but have not restocked the Rx queue
  813. * with fresh buffers
  814. */
  815. rxq->read = 0;
  816. rxq->write = 0;
  817. rxq->write_actual = 0;
  818. memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
  819. iwl_pcie_rx_init_rxb_lists(rxq);
  820. if (!rxq->napi.poll)
  821. netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
  822. iwl_pcie_dummy_napi_poll, 64);
  823. spin_unlock(&rxq->lock);
  824. }
  825. /* move the pool to the default queue and allocator ownerships */
  826. queue_size = trans->cfg->mq_rx_supported ?
  827. MQ_RX_NUM_RBDS : RX_QUEUE_SIZE;
  828. allocator_pool_size = trans->num_rx_queues *
  829. (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
  830. num_alloc = queue_size + allocator_pool_size;
  831. BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
  832. ARRAY_SIZE(trans_pcie->rx_pool));
  833. for (i = 0; i < num_alloc; i++) {
  834. struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
  835. if (i < allocator_pool_size)
  836. list_add(&rxb->list, &rba->rbd_empty);
  837. else
  838. list_add(&rxb->list, &def_rxq->rx_used);
  839. trans_pcie->global_table[i] = rxb;
  840. rxb->vid = (u16)(i + 1);
  841. rxb->invalid = true;
  842. }
  843. iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
  844. if (trans->cfg->mq_rx_supported)
  845. iwl_pcie_rx_mq_hw_init(trans);
  846. else
  847. iwl_pcie_rx_hw_init(trans, def_rxq);
  848. iwl_pcie_rxq_restock(trans, def_rxq);
  849. spin_lock(&def_rxq->lock);
  850. iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq);
  851. spin_unlock(&def_rxq->lock);
  852. return 0;
  853. }
  854. void iwl_pcie_rx_free(struct iwl_trans *trans)
  855. {
  856. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  857. struct iwl_rb_allocator *rba = &trans_pcie->rba;
  858. int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
  859. sizeof(__le32);
  860. int i;
  861. /*
  862. * if rxq is NULL, it means that nothing has been allocated,
  863. * exit now
  864. */
  865. if (!trans_pcie->rxq) {
  866. IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
  867. return;
  868. }
  869. cancel_work_sync(&rba->rx_alloc);
  870. if (rba->alloc_wq) {
  871. destroy_workqueue(rba->alloc_wq);
  872. rba->alloc_wq = NULL;
  873. }
  874. iwl_pcie_free_rbs_pool(trans);
  875. for (i = 0; i < trans->num_rx_queues; i++) {
  876. struct iwl_rxq *rxq = &trans_pcie->rxq[i];
  877. if (rxq->bd)
  878. dma_free_coherent(trans->dev,
  879. free_size * rxq->queue_size,
  880. rxq->bd, rxq->bd_dma);
  881. rxq->bd_dma = 0;
  882. rxq->bd = NULL;
  883. if (rxq->rb_stts)
  884. dma_free_coherent(trans->dev,
  885. sizeof(struct iwl_rb_status),
  886. rxq->rb_stts, rxq->rb_stts_dma);
  887. else
  888. IWL_DEBUG_INFO(trans,
  889. "Free rxq->rb_stts which is NULL\n");
  890. if (rxq->used_bd)
  891. dma_free_coherent(trans->dev,
  892. sizeof(__le32) * rxq->queue_size,
  893. rxq->used_bd, rxq->used_bd_dma);
  894. rxq->used_bd_dma = 0;
  895. rxq->used_bd = NULL;
  896. if (rxq->napi.poll)
  897. netif_napi_del(&rxq->napi);
  898. }
  899. kfree(trans_pcie->rxq);
  900. }
  901. /*
  902. * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
  903. *
  904. * Called when a RBD can be reused. The RBD is transferred to the allocator.
  905. * When there are 2 empty RBDs - a request for allocation is posted
  906. */
  907. static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
  908. struct iwl_rx_mem_buffer *rxb,
  909. struct iwl_rxq *rxq, bool emergency)
  910. {
  911. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  912. struct iwl_rb_allocator *rba = &trans_pcie->rba;
  913. /* Move the RBD to the used list, will be moved to allocator in batches
  914. * before claiming or posting a request*/
  915. list_add_tail(&rxb->list, &rxq->rx_used);
  916. if (unlikely(emergency))
  917. return;
  918. /* Count the allocator owned RBDs */
  919. rxq->used_count++;
  920. /* If we have RX_POST_REQ_ALLOC new released rx buffers -
  921. * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
  922. * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
  923. * after but we still need to post another request.
  924. */
  925. if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
  926. /* Move the 2 RBDs to the allocator ownership.
  927. Allocator has another 6 from pool for the request completion*/
  928. spin_lock(&rba->lock);
  929. list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
  930. spin_unlock(&rba->lock);
  931. atomic_inc(&rba->req_pending);
  932. queue_work(rba->alloc_wq, &rba->rx_alloc);
  933. }
  934. }
  935. static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
  936. struct iwl_rxq *rxq,
  937. struct iwl_rx_mem_buffer *rxb,
  938. bool emergency)
  939. {
  940. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  941. struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
  942. bool page_stolen = false;
  943. int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
  944. u32 offset = 0;
  945. if (WARN_ON(!rxb))
  946. return;
  947. dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
  948. while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
  949. struct iwl_rx_packet *pkt;
  950. u16 sequence;
  951. bool reclaim;
  952. int index, cmd_index, len;
  953. struct iwl_rx_cmd_buffer rxcb = {
  954. ._offset = offset,
  955. ._rx_page_order = trans_pcie->rx_page_order,
  956. ._page = rxb->page,
  957. ._page_stolen = false,
  958. .truesize = max_len,
  959. };
  960. pkt = rxb_addr(&rxcb);
  961. if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
  962. break;
  963. WARN_ON((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
  964. FH_RSCSR_RXQ_POS != rxq->id);
  965. IWL_DEBUG_RX(trans,
  966. "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n",
  967. rxcb._offset,
  968. iwl_get_cmd_string(trans,
  969. iwl_cmd_id(pkt->hdr.cmd,
  970. pkt->hdr.group_id,
  971. 0)),
  972. pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence));
  973. len = iwl_rx_packet_len(pkt);
  974. len += sizeof(u32); /* account for status word */
  975. trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
  976. trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
  977. /* Reclaim a command buffer only if this packet is a response
  978. * to a (driver-originated) command.
  979. * If the packet (e.g. Rx frame) originated from uCode,
  980. * there is no command buffer to reclaim.
  981. * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
  982. * but apparently a few don't get set; catch them here. */
  983. reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
  984. if (reclaim) {
  985. int i;
  986. for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
  987. if (trans_pcie->no_reclaim_cmds[i] ==
  988. pkt->hdr.cmd) {
  989. reclaim = false;
  990. break;
  991. }
  992. }
  993. }
  994. sequence = le16_to_cpu(pkt->hdr.sequence);
  995. index = SEQ_TO_INDEX(sequence);
  996. cmd_index = get_cmd_index(&txq->q, index);
  997. if (rxq->id == 0)
  998. iwl_op_mode_rx(trans->op_mode, &rxq->napi,
  999. &rxcb);
  1000. else
  1001. iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
  1002. &rxcb, rxq->id);
  1003. if (reclaim) {
  1004. kzfree(txq->entries[cmd_index].free_buf);
  1005. txq->entries[cmd_index].free_buf = NULL;
  1006. }
  1007. /*
  1008. * After here, we should always check rxcb._page_stolen,
  1009. * if it is true then one of the handlers took the page.
  1010. */
  1011. if (reclaim) {
  1012. /* Invoke any callbacks, transfer the buffer to caller,
  1013. * and fire off the (possibly) blocking
  1014. * iwl_trans_send_cmd()
  1015. * as we reclaim the driver command queue */
  1016. if (!rxcb._page_stolen)
  1017. iwl_pcie_hcmd_complete(trans, &rxcb);
  1018. else
  1019. IWL_WARN(trans, "Claim null rxb?\n");
  1020. }
  1021. page_stolen |= rxcb._page_stolen;
  1022. offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
  1023. }
  1024. /* page was stolen from us -- free our reference */
  1025. if (page_stolen) {
  1026. __free_pages(rxb->page, trans_pcie->rx_page_order);
  1027. rxb->page = NULL;
  1028. }
  1029. /* Reuse the page if possible. For notification packets and
  1030. * SKBs that fail to Rx correctly, add them back into the
  1031. * rx_free list for reuse later. */
  1032. if (rxb->page != NULL) {
  1033. rxb->page_dma =
  1034. dma_map_page(trans->dev, rxb->page, 0,
  1035. PAGE_SIZE << trans_pcie->rx_page_order,
  1036. DMA_FROM_DEVICE);
  1037. if (dma_mapping_error(trans->dev, rxb->page_dma)) {
  1038. /*
  1039. * free the page(s) as well to not break
  1040. * the invariant that the items on the used
  1041. * list have no page(s)
  1042. */
  1043. __free_pages(rxb->page, trans_pcie->rx_page_order);
  1044. rxb->page = NULL;
  1045. iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
  1046. } else {
  1047. list_add_tail(&rxb->list, &rxq->rx_free);
  1048. rxq->free_count++;
  1049. }
  1050. } else
  1051. iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
  1052. }
  1053. /*
  1054. * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
  1055. */
  1056. static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
  1057. {
  1058. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1059. struct iwl_rxq *rxq = &trans_pcie->rxq[queue];
  1060. u32 r, i, count = 0;
  1061. bool emergency = false;
  1062. restart:
  1063. spin_lock(&rxq->lock);
  1064. /* uCode's read index (stored in shared DRAM) indicates the last Rx
  1065. * buffer that the driver may process (last buffer filled by ucode). */
  1066. r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
  1067. i = rxq->read;
  1068. /* W/A 9000 device step A0 wrap-around bug */
  1069. r &= (rxq->queue_size - 1);
  1070. /* Rx interrupt, but nothing sent from uCode */
  1071. if (i == r)
  1072. IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
  1073. while (i != r) {
  1074. struct iwl_rx_mem_buffer *rxb;
  1075. if (unlikely(rxq->used_count == rxq->queue_size / 2))
  1076. emergency = true;
  1077. if (trans->cfg->mq_rx_supported) {
  1078. /*
  1079. * used_bd is a 32 bit but only 12 are used to retrieve
  1080. * the vid
  1081. */
  1082. u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF;
  1083. if (WARN(!vid ||
  1084. vid > ARRAY_SIZE(trans_pcie->global_table),
  1085. "Invalid rxb index from HW %u\n", (u32)vid)) {
  1086. iwl_force_nmi(trans);
  1087. goto out;
  1088. }
  1089. rxb = trans_pcie->global_table[vid - 1];
  1090. if (WARN(rxb->invalid,
  1091. "Invalid rxb from HW %u\n", (u32)vid)) {
  1092. iwl_force_nmi(trans);
  1093. goto out;
  1094. }
  1095. rxb->invalid = true;
  1096. } else {
  1097. rxb = rxq->queue[i];
  1098. rxq->queue[i] = NULL;
  1099. }
  1100. IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
  1101. iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
  1102. i = (i + 1) & (rxq->queue_size - 1);
  1103. /*
  1104. * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
  1105. * try to claim the pre-allocated buffers from the allocator.
  1106. * If not ready - will try to reclaim next time.
  1107. * There is no need to reschedule work - allocator exits only
  1108. * on success
  1109. */
  1110. if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
  1111. iwl_pcie_rx_allocator_get(trans, rxq);
  1112. if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
  1113. struct iwl_rb_allocator *rba = &trans_pcie->rba;
  1114. /* Add the remaining empty RBDs for allocator use */
  1115. spin_lock(&rba->lock);
  1116. list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
  1117. spin_unlock(&rba->lock);
  1118. } else if (emergency) {
  1119. count++;
  1120. if (count == 8) {
  1121. count = 0;
  1122. if (rxq->used_count < rxq->queue_size / 3)
  1123. emergency = false;
  1124. rxq->read = i;
  1125. spin_unlock(&rxq->lock);
  1126. iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
  1127. iwl_pcie_rxq_restock(trans, rxq);
  1128. goto restart;
  1129. }
  1130. }
  1131. }
  1132. out:
  1133. /* Backtrack one entry */
  1134. rxq->read = i;
  1135. spin_unlock(&rxq->lock);
  1136. /*
  1137. * handle a case where in emergency there are some unallocated RBDs.
  1138. * those RBDs are in the used list, but are not tracked by the queue's
  1139. * used_count which counts allocator owned RBDs.
  1140. * unallocated emergency RBDs must be allocated on exit, otherwise
  1141. * when called again the function may not be in emergency mode and
  1142. * they will be handed to the allocator with no tracking in the RBD
  1143. * allocator counters, which will lead to them never being claimed back
  1144. * by the queue.
  1145. * by allocating them here, they are now in the queue free list, and
  1146. * will be restocked by the next call of iwl_pcie_rxq_restock.
  1147. */
  1148. if (unlikely(emergency && count))
  1149. iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
  1150. if (rxq->napi.poll)
  1151. napi_gro_flush(&rxq->napi, false);
  1152. iwl_pcie_rxq_restock(trans, rxq);
  1153. }
  1154. static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
  1155. {
  1156. u8 queue = entry->entry;
  1157. struct msix_entry *entries = entry - queue;
  1158. return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
  1159. }
  1160. static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
  1161. struct msix_entry *entry)
  1162. {
  1163. /*
  1164. * Before sending the interrupt the HW disables it to prevent
  1165. * a nested interrupt. This is done by writing 1 to the corresponding
  1166. * bit in the mask register. After handling the interrupt, it should be
  1167. * re-enabled by clearing this bit. This register is defined as
  1168. * write 1 clear (W1C) register, meaning that it's being clear
  1169. * by writing 1 to the bit.
  1170. */
  1171. iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
  1172. }
  1173. /*
  1174. * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
  1175. * This interrupt handler should be used with RSS queue only.
  1176. */
  1177. irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
  1178. {
  1179. struct msix_entry *entry = dev_id;
  1180. struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
  1181. struct iwl_trans *trans = trans_pcie->trans;
  1182. if (WARN_ON(entry->entry >= trans->num_rx_queues))
  1183. return IRQ_NONE;
  1184. lock_map_acquire(&trans->sync_cmd_lockdep_map);
  1185. local_bh_disable();
  1186. iwl_pcie_rx_handle(trans, entry->entry);
  1187. local_bh_enable();
  1188. iwl_pcie_clear_irq(trans, entry);
  1189. lock_map_release(&trans->sync_cmd_lockdep_map);
  1190. return IRQ_HANDLED;
  1191. }
  1192. /*
  1193. * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
  1194. */
  1195. static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
  1196. {
  1197. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1198. int i;
  1199. /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
  1200. if (trans->cfg->internal_wimax_coex &&
  1201. !trans->cfg->apmg_not_supported &&
  1202. (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
  1203. APMS_CLK_VAL_MRB_FUNC_MODE) ||
  1204. (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
  1205. APMG_PS_CTRL_VAL_RESET_REQ))) {
  1206. clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
  1207. iwl_op_mode_wimax_active(trans->op_mode);
  1208. wake_up(&trans_pcie->wait_command_queue);
  1209. return;
  1210. }
  1211. iwl_pcie_dump_csr(trans);
  1212. iwl_dump_fh(trans, NULL);
  1213. local_bh_disable();
  1214. /* The STATUS_FW_ERROR bit is set in this function. This must happen
  1215. * before we wake up the command caller, to ensure a proper cleanup. */
  1216. iwl_trans_fw_error(trans);
  1217. local_bh_enable();
  1218. for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
  1219. del_timer(&trans_pcie->txq[i].stuck_timer);
  1220. clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
  1221. wake_up(&trans_pcie->wait_command_queue);
  1222. }
  1223. static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
  1224. {
  1225. u32 inta;
  1226. lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
  1227. trace_iwlwifi_dev_irq(trans->dev);
  1228. /* Discover which interrupts are active/pending */
  1229. inta = iwl_read32(trans, CSR_INT);
  1230. /* the thread will service interrupts and re-enable them */
  1231. return inta;
  1232. }
  1233. /* a device (PCI-E) page is 4096 bytes long */
  1234. #define ICT_SHIFT 12
  1235. #define ICT_SIZE (1 << ICT_SHIFT)
  1236. #define ICT_COUNT (ICT_SIZE / sizeof(u32))
  1237. /* interrupt handler using ict table, with this interrupt driver will
  1238. * stop using INTA register to get device's interrupt, reading this register
  1239. * is expensive, device will write interrupts in ICT dram table, increment
  1240. * index then will fire interrupt to driver, driver will OR all ICT table
  1241. * entries from current index up to table entry with 0 value. the result is
  1242. * the interrupt we need to service, driver will set the entries back to 0 and
  1243. * set index.
  1244. */
  1245. static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
  1246. {
  1247. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1248. u32 inta;
  1249. u32 val = 0;
  1250. u32 read;
  1251. trace_iwlwifi_dev_irq(trans->dev);
  1252. /* Ignore interrupt if there's nothing in NIC to service.
  1253. * This may be due to IRQ shared with another device,
  1254. * or due to sporadic interrupts thrown from our NIC. */
  1255. read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
  1256. trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
  1257. if (!read)
  1258. return 0;
  1259. /*
  1260. * Collect all entries up to the first 0, starting from ict_index;
  1261. * note we already read at ict_index.
  1262. */
  1263. do {
  1264. val |= read;
  1265. IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
  1266. trans_pcie->ict_index, read);
  1267. trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
  1268. trans_pcie->ict_index =
  1269. ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
  1270. read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
  1271. trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
  1272. read);
  1273. } while (read);
  1274. /* We should not get this value, just ignore it. */
  1275. if (val == 0xffffffff)
  1276. val = 0;
  1277. /*
  1278. * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
  1279. * (bit 15 before shifting it to 31) to clear when using interrupt
  1280. * coalescing. fortunately, bits 18 and 19 stay set when this happens
  1281. * so we use them to decide on the real state of the Rx bit.
  1282. * In order words, bit 15 is set if bit 18 or bit 19 are set.
  1283. */
  1284. if (val & 0xC0000)
  1285. val |= 0x8000;
  1286. inta = (0xff & val) | ((0xff00 & val) << 16);
  1287. return inta;
  1288. }
  1289. irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
  1290. {
  1291. struct iwl_trans *trans = dev_id;
  1292. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1293. struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
  1294. u32 inta = 0;
  1295. u32 handled = 0;
  1296. lock_map_acquire(&trans->sync_cmd_lockdep_map);
  1297. spin_lock(&trans_pcie->irq_lock);
  1298. /* dram interrupt table not set yet,
  1299. * use legacy interrupt.
  1300. */
  1301. if (likely(trans_pcie->use_ict))
  1302. inta = iwl_pcie_int_cause_ict(trans);
  1303. else
  1304. inta = iwl_pcie_int_cause_non_ict(trans);
  1305. if (iwl_have_debug_level(IWL_DL_ISR)) {
  1306. IWL_DEBUG_ISR(trans,
  1307. "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
  1308. inta, trans_pcie->inta_mask,
  1309. iwl_read32(trans, CSR_INT_MASK),
  1310. iwl_read32(trans, CSR_FH_INT_STATUS));
  1311. if (inta & (~trans_pcie->inta_mask))
  1312. IWL_DEBUG_ISR(trans,
  1313. "We got a masked interrupt (0x%08x)\n",
  1314. inta & (~trans_pcie->inta_mask));
  1315. }
  1316. inta &= trans_pcie->inta_mask;
  1317. /*
  1318. * Ignore interrupt if there's nothing in NIC to service.
  1319. * This may be due to IRQ shared with another device,
  1320. * or due to sporadic interrupts thrown from our NIC.
  1321. */
  1322. if (unlikely(!inta)) {
  1323. IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
  1324. /*
  1325. * Re-enable interrupts here since we don't
  1326. * have anything to service
  1327. */
  1328. if (test_bit(STATUS_INT_ENABLED, &trans->status))
  1329. _iwl_enable_interrupts(trans);
  1330. spin_unlock(&trans_pcie->irq_lock);
  1331. lock_map_release(&trans->sync_cmd_lockdep_map);
  1332. return IRQ_NONE;
  1333. }
  1334. if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
  1335. /*
  1336. * Hardware disappeared. It might have
  1337. * already raised an interrupt.
  1338. */
  1339. IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
  1340. spin_unlock(&trans_pcie->irq_lock);
  1341. goto out;
  1342. }
  1343. /* Ack/clear/reset pending uCode interrupts.
  1344. * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
  1345. */
  1346. /* There is a hardware bug in the interrupt mask function that some
  1347. * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
  1348. * they are disabled in the CSR_INT_MASK register. Furthermore the
  1349. * ICT interrupt handling mechanism has another bug that might cause
  1350. * these unmasked interrupts fail to be detected. We workaround the
  1351. * hardware bugs here by ACKing all the possible interrupts so that
  1352. * interrupt coalescing can still be achieved.
  1353. */
  1354. iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
  1355. if (iwl_have_debug_level(IWL_DL_ISR))
  1356. IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
  1357. inta, iwl_read32(trans, CSR_INT_MASK));
  1358. spin_unlock(&trans_pcie->irq_lock);
  1359. /* Now service all interrupt bits discovered above. */
  1360. if (inta & CSR_INT_BIT_HW_ERR) {
  1361. IWL_ERR(trans, "Hardware error detected. Restarting.\n");
  1362. /* Tell the device to stop sending interrupts */
  1363. iwl_disable_interrupts(trans);
  1364. isr_stats->hw++;
  1365. iwl_pcie_irq_handle_error(trans);
  1366. handled |= CSR_INT_BIT_HW_ERR;
  1367. goto out;
  1368. }
  1369. if (iwl_have_debug_level(IWL_DL_ISR)) {
  1370. /* NIC fires this, but we don't use it, redundant with WAKEUP */
  1371. if (inta & CSR_INT_BIT_SCD) {
  1372. IWL_DEBUG_ISR(trans,
  1373. "Scheduler finished to transmit the frame/frames.\n");
  1374. isr_stats->sch++;
  1375. }
  1376. /* Alive notification via Rx interrupt will do the real work */
  1377. if (inta & CSR_INT_BIT_ALIVE) {
  1378. IWL_DEBUG_ISR(trans, "Alive interrupt\n");
  1379. isr_stats->alive++;
  1380. }
  1381. }
  1382. /* Safely ignore these bits for debug checks below */
  1383. inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
  1384. /* HW RF KILL switch toggled */
  1385. if (inta & CSR_INT_BIT_RF_KILL) {
  1386. bool hw_rfkill;
  1387. hw_rfkill = iwl_is_rfkill_set(trans);
  1388. IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
  1389. hw_rfkill ? "disable radio" : "enable radio");
  1390. isr_stats->rfkill++;
  1391. mutex_lock(&trans_pcie->mutex);
  1392. iwl_trans_pcie_rf_kill(trans, hw_rfkill);
  1393. mutex_unlock(&trans_pcie->mutex);
  1394. if (hw_rfkill) {
  1395. set_bit(STATUS_RFKILL, &trans->status);
  1396. if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
  1397. &trans->status))
  1398. IWL_DEBUG_RF_KILL(trans,
  1399. "Rfkill while SYNC HCMD in flight\n");
  1400. wake_up(&trans_pcie->wait_command_queue);
  1401. } else {
  1402. clear_bit(STATUS_RFKILL, &trans->status);
  1403. }
  1404. handled |= CSR_INT_BIT_RF_KILL;
  1405. }
  1406. /* Chip got too hot and stopped itself */
  1407. if (inta & CSR_INT_BIT_CT_KILL) {
  1408. IWL_ERR(trans, "Microcode CT kill error detected.\n");
  1409. isr_stats->ctkill++;
  1410. handled |= CSR_INT_BIT_CT_KILL;
  1411. }
  1412. /* Error detected by uCode */
  1413. if (inta & CSR_INT_BIT_SW_ERR) {
  1414. IWL_ERR(trans, "Microcode SW error detected. "
  1415. " Restarting 0x%X.\n", inta);
  1416. isr_stats->sw++;
  1417. iwl_pcie_irq_handle_error(trans);
  1418. handled |= CSR_INT_BIT_SW_ERR;
  1419. }
  1420. /* uCode wakes up after power-down sleep */
  1421. if (inta & CSR_INT_BIT_WAKEUP) {
  1422. IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
  1423. iwl_pcie_rxq_check_wrptr(trans);
  1424. iwl_pcie_txq_check_wrptrs(trans);
  1425. isr_stats->wakeup++;
  1426. handled |= CSR_INT_BIT_WAKEUP;
  1427. }
  1428. /* All uCode command responses, including Tx command responses,
  1429. * Rx "responses" (frame-received notification), and other
  1430. * notifications from uCode come through here*/
  1431. if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
  1432. CSR_INT_BIT_RX_PERIODIC)) {
  1433. IWL_DEBUG_ISR(trans, "Rx interrupt\n");
  1434. if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
  1435. handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
  1436. iwl_write32(trans, CSR_FH_INT_STATUS,
  1437. CSR_FH_INT_RX_MASK);
  1438. }
  1439. if (inta & CSR_INT_BIT_RX_PERIODIC) {
  1440. handled |= CSR_INT_BIT_RX_PERIODIC;
  1441. iwl_write32(trans,
  1442. CSR_INT, CSR_INT_BIT_RX_PERIODIC);
  1443. }
  1444. /* Sending RX interrupt require many steps to be done in the
  1445. * the device:
  1446. * 1- write interrupt to current index in ICT table.
  1447. * 2- dma RX frame.
  1448. * 3- update RX shared data to indicate last write index.
  1449. * 4- send interrupt.
  1450. * This could lead to RX race, driver could receive RX interrupt
  1451. * but the shared data changes does not reflect this;
  1452. * periodic interrupt will detect any dangling Rx activity.
  1453. */
  1454. /* Disable periodic interrupt; we use it as just a one-shot. */
  1455. iwl_write8(trans, CSR_INT_PERIODIC_REG,
  1456. CSR_INT_PERIODIC_DIS);
  1457. /*
  1458. * Enable periodic interrupt in 8 msec only if we received
  1459. * real RX interrupt (instead of just periodic int), to catch
  1460. * any dangling Rx interrupt. If it was just the periodic
  1461. * interrupt, there was no dangling Rx activity, and no need
  1462. * to extend the periodic interrupt; one-shot is enough.
  1463. */
  1464. if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
  1465. iwl_write8(trans, CSR_INT_PERIODIC_REG,
  1466. CSR_INT_PERIODIC_ENA);
  1467. isr_stats->rx++;
  1468. local_bh_disable();
  1469. iwl_pcie_rx_handle(trans, 0);
  1470. local_bh_enable();
  1471. }
  1472. /* This "Tx" DMA channel is used only for loading uCode */
  1473. if (inta & CSR_INT_BIT_FH_TX) {
  1474. iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
  1475. IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
  1476. isr_stats->tx++;
  1477. handled |= CSR_INT_BIT_FH_TX;
  1478. /* Wake up uCode load routine, now that load is complete */
  1479. trans_pcie->ucode_write_complete = true;
  1480. wake_up(&trans_pcie->ucode_write_waitq);
  1481. }
  1482. if (inta & ~handled) {
  1483. IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
  1484. isr_stats->unhandled++;
  1485. }
  1486. if (inta & ~(trans_pcie->inta_mask)) {
  1487. IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
  1488. inta & ~trans_pcie->inta_mask);
  1489. }
  1490. spin_lock(&trans_pcie->irq_lock);
  1491. /* only Re-enable all interrupt if disabled by irq */
  1492. if (test_bit(STATUS_INT_ENABLED, &trans->status))
  1493. _iwl_enable_interrupts(trans);
  1494. /* we are loading the firmware, enable FH_TX interrupt only */
  1495. else if (handled & CSR_INT_BIT_FH_TX)
  1496. iwl_enable_fw_load_int(trans);
  1497. /* Re-enable RF_KILL if it occurred */
  1498. else if (handled & CSR_INT_BIT_RF_KILL)
  1499. iwl_enable_rfkill_int(trans);
  1500. spin_unlock(&trans_pcie->irq_lock);
  1501. out:
  1502. lock_map_release(&trans->sync_cmd_lockdep_map);
  1503. return IRQ_HANDLED;
  1504. }
  1505. /******************************************************************************
  1506. *
  1507. * ICT functions
  1508. *
  1509. ******************************************************************************/
  1510. /* Free dram table */
  1511. void iwl_pcie_free_ict(struct iwl_trans *trans)
  1512. {
  1513. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1514. if (trans_pcie->ict_tbl) {
  1515. dma_free_coherent(trans->dev, ICT_SIZE,
  1516. trans_pcie->ict_tbl,
  1517. trans_pcie->ict_tbl_dma);
  1518. trans_pcie->ict_tbl = NULL;
  1519. trans_pcie->ict_tbl_dma = 0;
  1520. }
  1521. }
  1522. /*
  1523. * allocate dram shared table, it is an aligned memory
  1524. * block of ICT_SIZE.
  1525. * also reset all data related to ICT table interrupt.
  1526. */
  1527. int iwl_pcie_alloc_ict(struct iwl_trans *trans)
  1528. {
  1529. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1530. trans_pcie->ict_tbl =
  1531. dma_zalloc_coherent(trans->dev, ICT_SIZE,
  1532. &trans_pcie->ict_tbl_dma,
  1533. GFP_KERNEL);
  1534. if (!trans_pcie->ict_tbl)
  1535. return -ENOMEM;
  1536. /* just an API sanity check ... it is guaranteed to be aligned */
  1537. if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
  1538. iwl_pcie_free_ict(trans);
  1539. return -EINVAL;
  1540. }
  1541. return 0;
  1542. }
  1543. /* Device is going up inform it about using ICT interrupt table,
  1544. * also we need to tell the driver to start using ICT interrupt.
  1545. */
  1546. void iwl_pcie_reset_ict(struct iwl_trans *trans)
  1547. {
  1548. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1549. u32 val;
  1550. if (!trans_pcie->ict_tbl)
  1551. return;
  1552. spin_lock(&trans_pcie->irq_lock);
  1553. _iwl_disable_interrupts(trans);
  1554. memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
  1555. val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
  1556. val |= CSR_DRAM_INT_TBL_ENABLE |
  1557. CSR_DRAM_INIT_TBL_WRAP_CHECK |
  1558. CSR_DRAM_INIT_TBL_WRITE_POINTER;
  1559. IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
  1560. iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
  1561. trans_pcie->use_ict = true;
  1562. trans_pcie->ict_index = 0;
  1563. iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
  1564. _iwl_enable_interrupts(trans);
  1565. spin_unlock(&trans_pcie->irq_lock);
  1566. }
  1567. /* Device is going down disable ict interrupt usage */
  1568. void iwl_pcie_disable_ict(struct iwl_trans *trans)
  1569. {
  1570. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1571. spin_lock(&trans_pcie->irq_lock);
  1572. trans_pcie->use_ict = false;
  1573. spin_unlock(&trans_pcie->irq_lock);
  1574. }
  1575. irqreturn_t iwl_pcie_isr(int irq, void *data)
  1576. {
  1577. struct iwl_trans *trans = data;
  1578. if (!trans)
  1579. return IRQ_NONE;
  1580. /* Disable (but don't clear!) interrupts here to avoid
  1581. * back-to-back ISRs and sporadic interrupts from our NIC.
  1582. * If we have something to service, the tasklet will re-enable ints.
  1583. * If we *don't* have something, we'll re-enable before leaving here.
  1584. */
  1585. iwl_write32(trans, CSR_INT_MASK, 0x00000000);
  1586. return IRQ_WAKE_THREAD;
  1587. }
  1588. irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
  1589. {
  1590. return IRQ_WAKE_THREAD;
  1591. }
  1592. irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
  1593. {
  1594. struct msix_entry *entry = dev_id;
  1595. struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
  1596. struct iwl_trans *trans = trans_pcie->trans;
  1597. struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
  1598. u32 inta_fh, inta_hw;
  1599. lock_map_acquire(&trans->sync_cmd_lockdep_map);
  1600. spin_lock(&trans_pcie->irq_lock);
  1601. inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
  1602. inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
  1603. /*
  1604. * Clear causes registers to avoid being handling the same cause.
  1605. */
  1606. iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
  1607. iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
  1608. spin_unlock(&trans_pcie->irq_lock);
  1609. if (unlikely(!(inta_fh | inta_hw))) {
  1610. IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
  1611. lock_map_release(&trans->sync_cmd_lockdep_map);
  1612. return IRQ_NONE;
  1613. }
  1614. if (iwl_have_debug_level(IWL_DL_ISR))
  1615. IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n",
  1616. inta_fh,
  1617. iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
  1618. /* This "Tx" DMA channel is used only for loading uCode */
  1619. if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
  1620. IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
  1621. isr_stats->tx++;
  1622. /*
  1623. * Wake up uCode load routine,
  1624. * now that load is complete
  1625. */
  1626. trans_pcie->ucode_write_complete = true;
  1627. wake_up(&trans_pcie->ucode_write_waitq);
  1628. }
  1629. /* Error detected by uCode */
  1630. if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
  1631. (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
  1632. IWL_ERR(trans,
  1633. "Microcode SW error detected. Restarting 0x%X.\n",
  1634. inta_fh);
  1635. isr_stats->sw++;
  1636. iwl_pcie_irq_handle_error(trans);
  1637. }
  1638. /* After checking FH register check HW register */
  1639. if (iwl_have_debug_level(IWL_DL_ISR))
  1640. IWL_DEBUG_ISR(trans,
  1641. "ISR inta_hw 0x%08x, enabled 0x%08x\n",
  1642. inta_hw,
  1643. iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
  1644. /* Alive notification via Rx interrupt will do the real work */
  1645. if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
  1646. IWL_DEBUG_ISR(trans, "Alive interrupt\n");
  1647. isr_stats->alive++;
  1648. }
  1649. /* uCode wakes up after power-down sleep */
  1650. if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
  1651. IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
  1652. iwl_pcie_rxq_check_wrptr(trans);
  1653. iwl_pcie_txq_check_wrptrs(trans);
  1654. isr_stats->wakeup++;
  1655. }
  1656. /* Chip got too hot and stopped itself */
  1657. if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
  1658. IWL_ERR(trans, "Microcode CT kill error detected.\n");
  1659. isr_stats->ctkill++;
  1660. }
  1661. /* HW RF KILL switch toggled */
  1662. if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) {
  1663. bool hw_rfkill;
  1664. hw_rfkill = iwl_is_rfkill_set(trans);
  1665. IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
  1666. hw_rfkill ? "disable radio" : "enable radio");
  1667. isr_stats->rfkill++;
  1668. mutex_lock(&trans_pcie->mutex);
  1669. iwl_trans_pcie_rf_kill(trans, hw_rfkill);
  1670. mutex_unlock(&trans_pcie->mutex);
  1671. if (hw_rfkill) {
  1672. set_bit(STATUS_RFKILL, &trans->status);
  1673. if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
  1674. &trans->status))
  1675. IWL_DEBUG_RF_KILL(trans,
  1676. "Rfkill while SYNC HCMD in flight\n");
  1677. wake_up(&trans_pcie->wait_command_queue);
  1678. } else {
  1679. clear_bit(STATUS_RFKILL, &trans->status);
  1680. }
  1681. }
  1682. if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
  1683. IWL_ERR(trans,
  1684. "Hardware error detected. Restarting.\n");
  1685. isr_stats->hw++;
  1686. iwl_pcie_irq_handle_error(trans);
  1687. }
  1688. iwl_pcie_clear_irq(trans, entry);
  1689. lock_map_release(&trans->sync_cmd_lockdep_map);
  1690. return IRQ_HANDLED;
  1691. }