trans.c 87 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113
  1. /******************************************************************************
  2. *
  3. * This file is provided under a dual BSD/GPLv2 license. When using or
  4. * redistributing this file, you may do so under either license.
  5. *
  6. * GPL LICENSE SUMMARY
  7. *
  8. * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
  9. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  10. * Copyright(c) 2016 Intel Deutschland GmbH
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of version 2 of the GNU General Public License as
  14. * published by the Free Software Foundation.
  15. *
  16. * This program is distributed in the hope that it will be useful, but
  17. * WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; if not, write to the Free Software
  23. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  24. * USA
  25. *
  26. * The full GNU General Public License is included in this distribution
  27. * in the file called COPYING.
  28. *
  29. * Contact Information:
  30. * Intel Linux Wireless <linuxwifi@intel.com>
  31. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  32. *
  33. * BSD LICENSE
  34. *
  35. * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
  36. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  37. * Copyright(c) 2016 Intel Deutschland GmbH
  38. * All rights reserved.
  39. *
  40. * Redistribution and use in source and binary forms, with or without
  41. * modification, are permitted provided that the following conditions
  42. * are met:
  43. *
  44. * * Redistributions of source code must retain the above copyright
  45. * notice, this list of conditions and the following disclaimer.
  46. * * Redistributions in binary form must reproduce the above copyright
  47. * notice, this list of conditions and the following disclaimer in
  48. * the documentation and/or other materials provided with the
  49. * distribution.
  50. * * Neither the name Intel Corporation nor the names of its
  51. * contributors may be used to endorse or promote products derived
  52. * from this software without specific prior written permission.
  53. *
  54. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  55. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  56. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  57. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  58. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  59. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  60. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  61. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  62. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  63. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  64. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  65. *
  66. *****************************************************************************/
  67. #include <linux/pci.h>
  68. #include <linux/pci-aspm.h>
  69. #include <linux/interrupt.h>
  70. #include <linux/debugfs.h>
  71. #include <linux/sched.h>
  72. #include <linux/bitops.h>
  73. #include <linux/gfp.h>
  74. #include <linux/vmalloc.h>
  75. #include <linux/pm_runtime.h>
  76. #include "iwl-drv.h"
  77. #include "iwl-trans.h"
  78. #include "iwl-csr.h"
  79. #include "iwl-prph.h"
  80. #include "iwl-scd.h"
  81. #include "iwl-agn-hw.h"
  82. #include "iwl-fw-error-dump.h"
  83. #include "internal.h"
  84. #include "iwl-fh.h"
  85. /* extended range in FW SRAM */
  86. #define IWL_FW_MEM_EXTENDED_START 0x40000
  87. #define IWL_FW_MEM_EXTENDED_END 0x57FFF
  88. static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
  89. {
  90. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  91. if (!trans_pcie->fw_mon_page)
  92. return;
  93. dma_unmap_page(trans->dev, trans_pcie->fw_mon_phys,
  94. trans_pcie->fw_mon_size, DMA_FROM_DEVICE);
  95. __free_pages(trans_pcie->fw_mon_page,
  96. get_order(trans_pcie->fw_mon_size));
  97. trans_pcie->fw_mon_page = NULL;
  98. trans_pcie->fw_mon_phys = 0;
  99. trans_pcie->fw_mon_size = 0;
  100. }
  101. static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
  102. {
  103. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  104. struct page *page = NULL;
  105. dma_addr_t phys;
  106. u32 size = 0;
  107. u8 power;
  108. if (!max_power) {
  109. /* default max_power is maximum */
  110. max_power = 26;
  111. } else {
  112. max_power += 11;
  113. }
  114. if (WARN(max_power > 26,
  115. "External buffer size for monitor is too big %d, check the FW TLV\n",
  116. max_power))
  117. return;
  118. if (trans_pcie->fw_mon_page) {
  119. dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys,
  120. trans_pcie->fw_mon_size,
  121. DMA_FROM_DEVICE);
  122. return;
  123. }
  124. phys = 0;
  125. for (power = max_power; power >= 11; power--) {
  126. int order;
  127. size = BIT(power);
  128. order = get_order(size);
  129. page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO,
  130. order);
  131. if (!page)
  132. continue;
  133. phys = dma_map_page(trans->dev, page, 0, PAGE_SIZE << order,
  134. DMA_FROM_DEVICE);
  135. if (dma_mapping_error(trans->dev, phys)) {
  136. __free_pages(page, order);
  137. page = NULL;
  138. continue;
  139. }
  140. IWL_INFO(trans,
  141. "Allocated 0x%08x bytes (order %d) for firmware monitor.\n",
  142. size, order);
  143. break;
  144. }
  145. if (WARN_ON_ONCE(!page))
  146. return;
  147. if (power != max_power)
  148. IWL_ERR(trans,
  149. "Sorry - debug buffer is only %luK while you requested %luK\n",
  150. (unsigned long)BIT(power - 10),
  151. (unsigned long)BIT(max_power - 10));
  152. trans_pcie->fw_mon_page = page;
  153. trans_pcie->fw_mon_phys = phys;
  154. trans_pcie->fw_mon_size = size;
  155. }
  156. static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
  157. {
  158. iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
  159. ((reg & 0x0000ffff) | (2 << 28)));
  160. return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
  161. }
  162. static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
  163. {
  164. iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
  165. iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
  166. ((reg & 0x0000ffff) | (3 << 28)));
  167. }
  168. static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
  169. {
  170. if (trans->cfg->apmg_not_supported)
  171. return;
  172. if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
  173. iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
  174. APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
  175. ~APMG_PS_CTRL_MSK_PWR_SRC);
  176. else
  177. iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
  178. APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
  179. ~APMG_PS_CTRL_MSK_PWR_SRC);
  180. }
  181. /* PCI registers */
  182. #define PCI_CFG_RETRY_TIMEOUT 0x041
  183. static void iwl_pcie_apm_config(struct iwl_trans *trans)
  184. {
  185. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  186. u16 lctl;
  187. u16 cap;
  188. /*
  189. * HW bug W/A for instability in PCIe bus L0S->L1 transition.
  190. * Check if BIOS (or OS) enabled L1-ASPM on this device.
  191. * If so (likely), disable L0S, so device moves directly L0->L1;
  192. * costs negligible amount of power savings.
  193. * If not (unlikely), enable L0S, so there is at least some
  194. * power savings, even without L1.
  195. */
  196. pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
  197. if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
  198. iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
  199. else
  200. iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
  201. trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
  202. pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
  203. trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
  204. dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
  205. (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
  206. trans->ltr_enabled ? "En" : "Dis");
  207. }
  208. /*
  209. * Start up NIC's basic functionality after it has been reset
  210. * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
  211. * NOTE: This does not load uCode nor start the embedded processor
  212. */
  213. static int iwl_pcie_apm_init(struct iwl_trans *trans)
  214. {
  215. int ret = 0;
  216. IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
  217. /*
  218. * Use "set_bit" below rather than "write", to preserve any hardware
  219. * bits already set by default after reset.
  220. */
  221. /* Disable L0S exit timer (platform NMI Work/Around) */
  222. if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
  223. iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
  224. CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
  225. /*
  226. * Disable L0s without affecting L1;
  227. * don't wait for ICH L0s (ICH bug W/A)
  228. */
  229. iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
  230. CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
  231. /* Set FH wait threshold to maximum (HW error during stress W/A) */
  232. iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
  233. /*
  234. * Enable HAP INTA (interrupt from management bus) to
  235. * wake device's PCI Express link L1a -> L0s
  236. */
  237. iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
  238. CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
  239. iwl_pcie_apm_config(trans);
  240. /* Configure analog phase-lock-loop before activating to D0A */
  241. if (trans->cfg->base_params->pll_cfg)
  242. iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
  243. /*
  244. * Set "initialization complete" bit to move adapter from
  245. * D0U* --> D0A* (powered-up active) state.
  246. */
  247. iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
  248. /*
  249. * Wait for clock stabilization; once stabilized, access to
  250. * device-internal resources is supported, e.g. iwl_write_prph()
  251. * and accesses to uCode SRAM.
  252. */
  253. ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
  254. CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
  255. CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
  256. if (ret < 0) {
  257. IWL_DEBUG_INFO(trans, "Failed to init the card\n");
  258. goto out;
  259. }
  260. if (trans->cfg->host_interrupt_operation_mode) {
  261. /*
  262. * This is a bit of an abuse - This is needed for 7260 / 3160
  263. * only check host_interrupt_operation_mode even if this is
  264. * not related to host_interrupt_operation_mode.
  265. *
  266. * Enable the oscillator to count wake up time for L1 exit. This
  267. * consumes slightly more power (100uA) - but allows to be sure
  268. * that we wake up from L1 on time.
  269. *
  270. * This looks weird: read twice the same register, discard the
  271. * value, set a bit, and yet again, read that same register
  272. * just to discard the value. But that's the way the hardware
  273. * seems to like it.
  274. */
  275. iwl_read_prph(trans, OSC_CLK);
  276. iwl_read_prph(trans, OSC_CLK);
  277. iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
  278. iwl_read_prph(trans, OSC_CLK);
  279. iwl_read_prph(trans, OSC_CLK);
  280. }
  281. /*
  282. * Enable DMA clock and wait for it to stabilize.
  283. *
  284. * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
  285. * bits do not disable clocks. This preserves any hardware
  286. * bits already set by default in "CLK_CTRL_REG" after reset.
  287. */
  288. if (!trans->cfg->apmg_not_supported) {
  289. iwl_write_prph(trans, APMG_CLK_EN_REG,
  290. APMG_CLK_VAL_DMA_CLK_RQT);
  291. udelay(20);
  292. /* Disable L1-Active */
  293. iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
  294. APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
  295. /* Clear the interrupt in APMG if the NIC is in RFKILL */
  296. iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
  297. APMG_RTC_INT_STT_RFKILL);
  298. }
  299. set_bit(STATUS_DEVICE_ENABLED, &trans->status);
  300. out:
  301. return ret;
  302. }
  303. /*
  304. * Enable LP XTAL to avoid HW bug where device may consume much power if
  305. * FW is not loaded after device reset. LP XTAL is disabled by default
  306. * after device HW reset. Do it only if XTAL is fed by internal source.
  307. * Configure device's "persistence" mode to avoid resetting XTAL again when
  308. * SHRD_HW_RST occurs in S3.
  309. */
  310. static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
  311. {
  312. int ret;
  313. u32 apmg_gp1_reg;
  314. u32 apmg_xtal_cfg_reg;
  315. u32 dl_cfg_reg;
  316. /* Force XTAL ON */
  317. __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
  318. CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
  319. /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
  320. iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
  321. usleep_range(1000, 2000);
  322. /*
  323. * Set "initialization complete" bit to move adapter from
  324. * D0U* --> D0A* (powered-up active) state.
  325. */
  326. iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
  327. /*
  328. * Wait for clock stabilization; once stabilized, access to
  329. * device-internal resources is possible.
  330. */
  331. ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
  332. CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
  333. CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
  334. 25000);
  335. if (WARN_ON(ret < 0)) {
  336. IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
  337. /* Release XTAL ON request */
  338. __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
  339. CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
  340. return;
  341. }
  342. /*
  343. * Clear "disable persistence" to avoid LP XTAL resetting when
  344. * SHRD_HW_RST is applied in S3.
  345. */
  346. iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
  347. APMG_PCIDEV_STT_VAL_PERSIST_DIS);
  348. /*
  349. * Force APMG XTAL to be active to prevent its disabling by HW
  350. * caused by APMG idle state.
  351. */
  352. apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
  353. SHR_APMG_XTAL_CFG_REG);
  354. iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
  355. apmg_xtal_cfg_reg |
  356. SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
  357. /*
  358. * Reset entire device again - do controller reset (results in
  359. * SHRD_HW_RST). Turn MAC off before proceeding.
  360. */
  361. iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
  362. usleep_range(1000, 2000);
  363. /* Enable LP XTAL by indirect access through CSR */
  364. apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
  365. iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
  366. SHR_APMG_GP1_WF_XTAL_LP_EN |
  367. SHR_APMG_GP1_CHICKEN_BIT_SELECT);
  368. /* Clear delay line clock power up */
  369. dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
  370. iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
  371. ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
  372. /*
  373. * Enable persistence mode to avoid LP XTAL resetting when
  374. * SHRD_HW_RST is applied in S3.
  375. */
  376. iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
  377. CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
  378. /*
  379. * Clear "initialization complete" bit to move adapter from
  380. * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
  381. */
  382. iwl_clear_bit(trans, CSR_GP_CNTRL,
  383. CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
  384. /* Activates XTAL resources monitor */
  385. __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
  386. CSR_MONITOR_XTAL_RESOURCES);
  387. /* Release XTAL ON request */
  388. __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
  389. CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
  390. udelay(10);
  391. /* Release APMG XTAL */
  392. iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
  393. apmg_xtal_cfg_reg &
  394. ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
  395. }
  396. static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
  397. {
  398. int ret = 0;
  399. /* stop device's busmaster DMA activity */
  400. iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
  401. ret = iwl_poll_bit(trans, CSR_RESET,
  402. CSR_RESET_REG_FLAG_MASTER_DISABLED,
  403. CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
  404. if (ret < 0)
  405. IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
  406. IWL_DEBUG_INFO(trans, "stop master\n");
  407. return ret;
  408. }
  409. static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
  410. {
  411. IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
  412. if (op_mode_leave) {
  413. if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
  414. iwl_pcie_apm_init(trans);
  415. /* inform ME that we are leaving */
  416. if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
  417. iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
  418. APMG_PCIDEV_STT_VAL_WAKE_ME);
  419. else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
  420. iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
  421. CSR_RESET_LINK_PWR_MGMT_DISABLED);
  422. iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
  423. CSR_HW_IF_CONFIG_REG_PREPARE |
  424. CSR_HW_IF_CONFIG_REG_ENABLE_PME);
  425. mdelay(1);
  426. iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
  427. CSR_RESET_LINK_PWR_MGMT_DISABLED);
  428. }
  429. mdelay(5);
  430. }
  431. clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
  432. /* Stop device's DMA activity */
  433. iwl_pcie_apm_stop_master(trans);
  434. if (trans->cfg->lp_xtal_workaround) {
  435. iwl_pcie_apm_lp_xtal_enable(trans);
  436. return;
  437. }
  438. /* Reset the entire device */
  439. iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
  440. usleep_range(1000, 2000);
  441. /*
  442. * Clear "initialization complete" bit to move adapter from
  443. * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
  444. */
  445. iwl_clear_bit(trans, CSR_GP_CNTRL,
  446. CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
  447. }
  448. static int iwl_pcie_nic_init(struct iwl_trans *trans)
  449. {
  450. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  451. /* nic_init */
  452. spin_lock(&trans_pcie->irq_lock);
  453. iwl_pcie_apm_init(trans);
  454. spin_unlock(&trans_pcie->irq_lock);
  455. iwl_pcie_set_pwr(trans, false);
  456. iwl_op_mode_nic_config(trans->op_mode);
  457. /* Allocate the RX queue, or reset if it is already allocated */
  458. iwl_pcie_rx_init(trans);
  459. /* Allocate or reset and init all Tx and Command queues */
  460. if (iwl_pcie_tx_init(trans))
  461. return -ENOMEM;
  462. if (trans->cfg->base_params->shadow_reg_enable) {
  463. /* enable shadow regs in HW */
  464. iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
  465. IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
  466. }
  467. return 0;
  468. }
  469. #define HW_READY_TIMEOUT (50)
  470. /* Note: returns poll_bit return value, which is >= 0 if success */
  471. static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
  472. {
  473. int ret;
  474. iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
  475. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
  476. /* See if we got it */
  477. ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
  478. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
  479. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
  480. HW_READY_TIMEOUT);
  481. if (ret >= 0)
  482. iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
  483. IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
  484. return ret;
  485. }
  486. /* Note: returns standard 0/-ERROR code */
  487. static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
  488. {
  489. int ret;
  490. int t = 0;
  491. int iter;
  492. IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
  493. ret = iwl_pcie_set_hw_ready(trans);
  494. /* If the card is ready, exit 0 */
  495. if (ret >= 0)
  496. return 0;
  497. iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
  498. CSR_RESET_LINK_PWR_MGMT_DISABLED);
  499. usleep_range(1000, 2000);
  500. for (iter = 0; iter < 10; iter++) {
  501. /* If HW is not ready, prepare the conditions to check again */
  502. iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
  503. CSR_HW_IF_CONFIG_REG_PREPARE);
  504. do {
  505. ret = iwl_pcie_set_hw_ready(trans);
  506. if (ret >= 0)
  507. return 0;
  508. usleep_range(200, 1000);
  509. t += 200;
  510. } while (t < 150000);
  511. msleep(25);
  512. }
  513. IWL_ERR(trans, "Couldn't prepare the card\n");
  514. return ret;
  515. }
  516. /*
  517. * ucode
  518. */
  519. static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans,
  520. u32 dst_addr, dma_addr_t phy_addr,
  521. u32 byte_cnt)
  522. {
  523. iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
  524. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
  525. iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
  526. dst_addr);
  527. iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
  528. phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
  529. iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
  530. (iwl_get_dma_hi_addr(phy_addr)
  531. << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
  532. iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
  533. BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
  534. BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
  535. FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
  536. iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
  537. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
  538. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
  539. FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
  540. }
  541. static void iwl_pcie_load_firmware_chunk_tfh(struct iwl_trans *trans,
  542. u32 dst_addr, dma_addr_t phy_addr,
  543. u32 byte_cnt)
  544. {
  545. /* Stop DMA channel */
  546. iwl_write32(trans, TFH_SRV_DMA_CHNL0_CTRL, 0);
  547. /* Configure SRAM address */
  548. iwl_write32(trans, TFH_SRV_DMA_CHNL0_SRAM_ADDR,
  549. dst_addr);
  550. /* Configure DRAM address - 64 bit */
  551. iwl_write64(trans, TFH_SRV_DMA_CHNL0_DRAM_ADDR, phy_addr);
  552. /* Configure byte count to transfer */
  553. iwl_write32(trans, TFH_SRV_DMA_CHNL0_BC, byte_cnt);
  554. /* Enable the DRAM2SRAM to start */
  555. iwl_write32(trans, TFH_SRV_DMA_CHNL0_CTRL, TFH_SRV_DMA_SNOOP |
  556. TFH_SRV_DMA_TO_DRIVER |
  557. TFH_SRV_DMA_START);
  558. }
  559. static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
  560. u32 dst_addr, dma_addr_t phy_addr,
  561. u32 byte_cnt)
  562. {
  563. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  564. unsigned long flags;
  565. int ret;
  566. trans_pcie->ucode_write_complete = false;
  567. if (!iwl_trans_grab_nic_access(trans, &flags))
  568. return -EIO;
  569. if (trans->cfg->use_tfh)
  570. iwl_pcie_load_firmware_chunk_tfh(trans, dst_addr, phy_addr,
  571. byte_cnt);
  572. else
  573. iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
  574. byte_cnt);
  575. iwl_trans_release_nic_access(trans, &flags);
  576. ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
  577. trans_pcie->ucode_write_complete, 5 * HZ);
  578. if (!ret) {
  579. IWL_ERR(trans, "Failed to load firmware chunk!\n");
  580. return -ETIMEDOUT;
  581. }
  582. return 0;
  583. }
  584. static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
  585. const struct fw_desc *section)
  586. {
  587. u8 *v_addr;
  588. dma_addr_t p_addr;
  589. u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
  590. int ret = 0;
  591. IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
  592. section_num);
  593. v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
  594. GFP_KERNEL | __GFP_NOWARN);
  595. if (!v_addr) {
  596. IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
  597. chunk_sz = PAGE_SIZE;
  598. v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
  599. &p_addr, GFP_KERNEL);
  600. if (!v_addr)
  601. return -ENOMEM;
  602. }
  603. for (offset = 0; offset < section->len; offset += chunk_sz) {
  604. u32 copy_size, dst_addr;
  605. bool extended_addr = false;
  606. copy_size = min_t(u32, chunk_sz, section->len - offset);
  607. dst_addr = section->offset + offset;
  608. if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
  609. dst_addr <= IWL_FW_MEM_EXTENDED_END)
  610. extended_addr = true;
  611. if (extended_addr)
  612. iwl_set_bits_prph(trans, LMPM_CHICK,
  613. LMPM_CHICK_EXTENDED_ADDR_SPACE);
  614. memcpy(v_addr, (u8 *)section->data + offset, copy_size);
  615. ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
  616. copy_size);
  617. if (extended_addr)
  618. iwl_clear_bits_prph(trans, LMPM_CHICK,
  619. LMPM_CHICK_EXTENDED_ADDR_SPACE);
  620. if (ret) {
  621. IWL_ERR(trans,
  622. "Could not load the [%d] uCode section\n",
  623. section_num);
  624. break;
  625. }
  626. }
  627. dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
  628. return ret;
  629. }
  630. /*
  631. * Driver Takes the ownership on secure machine before FW load
  632. * and prevent race with the BT load.
  633. * W/A for ROM bug. (should be remove in the next Si step)
  634. */
  635. static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
  636. {
  637. u32 val, loop = 1000;
  638. /*
  639. * Check the RSA semaphore is accessible.
  640. * If the HW isn't locked and the rsa semaphore isn't accessible,
  641. * we are in trouble.
  642. */
  643. val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
  644. if (val & (BIT(1) | BIT(17))) {
  645. IWL_DEBUG_INFO(trans,
  646. "can't access the RSA semaphore it is write protected\n");
  647. return 0;
  648. }
  649. /* take ownership on the AUX IF */
  650. iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK);
  651. iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK);
  652. do {
  653. iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1);
  654. val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS);
  655. if (val == 0x1) {
  656. iwl_write_prph(trans, RSA_ENABLE, 0);
  657. return 0;
  658. }
  659. udelay(10);
  660. loop--;
  661. } while (loop > 0);
  662. IWL_ERR(trans, "Failed to take ownership on secure machine\n");
  663. return -EIO;
  664. }
  665. static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
  666. const struct fw_img *image,
  667. int cpu,
  668. int *first_ucode_section)
  669. {
  670. int shift_param;
  671. int i, ret = 0, sec_num = 0x1;
  672. u32 val, last_read_idx = 0;
  673. if (cpu == 1) {
  674. shift_param = 0;
  675. *first_ucode_section = 0;
  676. } else {
  677. shift_param = 16;
  678. (*first_ucode_section)++;
  679. }
  680. for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
  681. last_read_idx = i;
  682. /*
  683. * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
  684. * CPU1 to CPU2.
  685. * PAGING_SEPARATOR_SECTION delimiter - separate between
  686. * CPU2 non paged to CPU2 paging sec.
  687. */
  688. if (!image->sec[i].data ||
  689. image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
  690. image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
  691. IWL_DEBUG_FW(trans,
  692. "Break since Data not valid or Empty section, sec = %d\n",
  693. i);
  694. break;
  695. }
  696. ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
  697. if (ret)
  698. return ret;
  699. /* Notify ucode of loaded section number and status */
  700. if (trans->cfg->use_tfh) {
  701. val = iwl_read_prph(trans, UREG_UCODE_LOAD_STATUS);
  702. val = val | (sec_num << shift_param);
  703. iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, val);
  704. } else {
  705. val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
  706. val = val | (sec_num << shift_param);
  707. iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
  708. }
  709. sec_num = (sec_num << 1) | 0x1;
  710. }
  711. *first_ucode_section = last_read_idx;
  712. iwl_enable_interrupts(trans);
  713. if (trans->cfg->use_tfh) {
  714. if (cpu == 1)
  715. iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
  716. 0xFFFF);
  717. else
  718. iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
  719. 0xFFFFFFFF);
  720. } else {
  721. if (cpu == 1)
  722. iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
  723. 0xFFFF);
  724. else
  725. iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
  726. 0xFFFFFFFF);
  727. }
  728. return 0;
  729. }
  730. static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
  731. const struct fw_img *image,
  732. int cpu,
  733. int *first_ucode_section)
  734. {
  735. int shift_param;
  736. int i, ret = 0;
  737. u32 last_read_idx = 0;
  738. if (cpu == 1) {
  739. shift_param = 0;
  740. *first_ucode_section = 0;
  741. } else {
  742. shift_param = 16;
  743. (*first_ucode_section)++;
  744. }
  745. for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
  746. last_read_idx = i;
  747. /*
  748. * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
  749. * CPU1 to CPU2.
  750. * PAGING_SEPARATOR_SECTION delimiter - separate between
  751. * CPU2 non paged to CPU2 paging sec.
  752. */
  753. if (!image->sec[i].data ||
  754. image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
  755. image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
  756. IWL_DEBUG_FW(trans,
  757. "Break since Data not valid or Empty section, sec = %d\n",
  758. i);
  759. break;
  760. }
  761. ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
  762. if (ret)
  763. return ret;
  764. }
  765. *first_ucode_section = last_read_idx;
  766. return 0;
  767. }
  768. static void iwl_pcie_apply_destination(struct iwl_trans *trans)
  769. {
  770. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  771. const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
  772. int i;
  773. if (dest->version)
  774. IWL_ERR(trans,
  775. "DBG DEST version is %d - expect issues\n",
  776. dest->version);
  777. IWL_INFO(trans, "Applying debug destination %s\n",
  778. get_fw_dbg_mode_string(dest->monitor_mode));
  779. if (dest->monitor_mode == EXTERNAL_MODE)
  780. iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
  781. else
  782. IWL_WARN(trans, "PCI should have external buffer debug\n");
  783. for (i = 0; i < trans->dbg_dest_reg_num; i++) {
  784. u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
  785. u32 val = le32_to_cpu(dest->reg_ops[i].val);
  786. switch (dest->reg_ops[i].op) {
  787. case CSR_ASSIGN:
  788. iwl_write32(trans, addr, val);
  789. break;
  790. case CSR_SETBIT:
  791. iwl_set_bit(trans, addr, BIT(val));
  792. break;
  793. case CSR_CLEARBIT:
  794. iwl_clear_bit(trans, addr, BIT(val));
  795. break;
  796. case PRPH_ASSIGN:
  797. iwl_write_prph(trans, addr, val);
  798. break;
  799. case PRPH_SETBIT:
  800. iwl_set_bits_prph(trans, addr, BIT(val));
  801. break;
  802. case PRPH_CLEARBIT:
  803. iwl_clear_bits_prph(trans, addr, BIT(val));
  804. break;
  805. case PRPH_BLOCKBIT:
  806. if (iwl_read_prph(trans, addr) & BIT(val)) {
  807. IWL_ERR(trans,
  808. "BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
  809. val, addr);
  810. goto monitor;
  811. }
  812. break;
  813. default:
  814. IWL_ERR(trans, "FW debug - unknown OP %d\n",
  815. dest->reg_ops[i].op);
  816. break;
  817. }
  818. }
  819. monitor:
  820. if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
  821. iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
  822. trans_pcie->fw_mon_phys >> dest->base_shift);
  823. if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
  824. iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
  825. (trans_pcie->fw_mon_phys +
  826. trans_pcie->fw_mon_size - 256) >>
  827. dest->end_shift);
  828. else
  829. iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
  830. (trans_pcie->fw_mon_phys +
  831. trans_pcie->fw_mon_size) >>
  832. dest->end_shift);
  833. }
  834. }
  835. static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
  836. const struct fw_img *image)
  837. {
  838. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  839. int ret = 0;
  840. int first_ucode_section;
  841. IWL_DEBUG_FW(trans, "working with %s CPU\n",
  842. image->is_dual_cpus ? "Dual" : "Single");
  843. /* load to FW the binary non secured sections of CPU1 */
  844. ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
  845. if (ret)
  846. return ret;
  847. if (image->is_dual_cpus) {
  848. /* set CPU2 header address */
  849. iwl_write_prph(trans,
  850. LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
  851. LMPM_SECURE_CPU2_HDR_MEM_SPACE);
  852. /* load to FW the binary sections of CPU2 */
  853. ret = iwl_pcie_load_cpu_sections(trans, image, 2,
  854. &first_ucode_section);
  855. if (ret)
  856. return ret;
  857. }
  858. /* supported for 7000 only for the moment */
  859. if (iwlwifi_mod_params.fw_monitor &&
  860. trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
  861. iwl_pcie_alloc_fw_monitor(trans, 0);
  862. if (trans_pcie->fw_mon_size) {
  863. iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
  864. trans_pcie->fw_mon_phys >> 4);
  865. iwl_write_prph(trans, MON_BUFF_END_ADDR,
  866. (trans_pcie->fw_mon_phys +
  867. trans_pcie->fw_mon_size) >> 4);
  868. }
  869. } else if (trans->dbg_dest_tlv) {
  870. iwl_pcie_apply_destination(trans);
  871. }
  872. iwl_enable_interrupts(trans);
  873. /* release CPU reset */
  874. iwl_write32(trans, CSR_RESET, 0);
  875. return 0;
  876. }
  877. static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
  878. const struct fw_img *image)
  879. {
  880. int ret = 0;
  881. int first_ucode_section;
  882. IWL_DEBUG_FW(trans, "working with %s CPU\n",
  883. image->is_dual_cpus ? "Dual" : "Single");
  884. if (trans->dbg_dest_tlv)
  885. iwl_pcie_apply_destination(trans);
  886. /* TODO: remove in the next Si step */
  887. ret = iwl_pcie_rsa_race_bug_wa(trans);
  888. if (ret)
  889. return ret;
  890. /* configure the ucode to be ready to get the secured image */
  891. /* release CPU reset */
  892. iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
  893. /* load to FW the binary Secured sections of CPU1 */
  894. ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
  895. &first_ucode_section);
  896. if (ret)
  897. return ret;
  898. /* load to FW the binary sections of CPU2 */
  899. return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
  900. &first_ucode_section);
  901. }
  902. static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
  903. {
  904. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  905. bool hw_rfkill, was_hw_rfkill;
  906. lockdep_assert_held(&trans_pcie->mutex);
  907. if (trans_pcie->is_down)
  908. return;
  909. trans_pcie->is_down = true;
  910. was_hw_rfkill = iwl_is_rfkill_set(trans);
  911. /* tell the device to stop sending interrupts */
  912. iwl_disable_interrupts(trans);
  913. /* device going down, Stop using ICT table */
  914. iwl_pcie_disable_ict(trans);
  915. /*
  916. * If a HW restart happens during firmware loading,
  917. * then the firmware loading might call this function
  918. * and later it might be called again due to the
  919. * restart. So don't process again if the device is
  920. * already dead.
  921. */
  922. if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
  923. IWL_DEBUG_INFO(trans,
  924. "DEVICE_ENABLED bit was set and is now cleared\n");
  925. iwl_pcie_tx_stop(trans);
  926. iwl_pcie_rx_stop(trans);
  927. /* Power-down device's busmaster DMA clocks */
  928. if (!trans->cfg->apmg_not_supported) {
  929. iwl_write_prph(trans, APMG_CLK_DIS_REG,
  930. APMG_CLK_VAL_DMA_CLK_RQT);
  931. udelay(5);
  932. }
  933. }
  934. /* Make sure (redundant) we've released our request to stay awake */
  935. iwl_clear_bit(trans, CSR_GP_CNTRL,
  936. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  937. /* Stop the device, and put it in low power state */
  938. iwl_pcie_apm_stop(trans, false);
  939. /* stop and reset the on-board processor */
  940. iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
  941. usleep_range(1000, 2000);
  942. /*
  943. * Upon stop, the APM issues an interrupt if HW RF kill is set.
  944. * This is a bug in certain verions of the hardware.
  945. * Certain devices also keep sending HW RF kill interrupt all
  946. * the time, unless the interrupt is ACKed even if the interrupt
  947. * should be masked. Re-ACK all the interrupts here.
  948. */
  949. iwl_disable_interrupts(trans);
  950. /* clear all status bits */
  951. clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
  952. clear_bit(STATUS_INT_ENABLED, &trans->status);
  953. clear_bit(STATUS_TPOWER_PMI, &trans->status);
  954. clear_bit(STATUS_RFKILL, &trans->status);
  955. /*
  956. * Even if we stop the HW, we still want the RF kill
  957. * interrupt
  958. */
  959. iwl_enable_rfkill_int(trans);
  960. /*
  961. * Check again since the RF kill state may have changed while
  962. * all the interrupts were disabled, in this case we couldn't
  963. * receive the RF kill interrupt and update the state in the
  964. * op_mode.
  965. * Don't call the op_mode if the rkfill state hasn't changed.
  966. * This allows the op_mode to call stop_device from the rfkill
  967. * notification without endless recursion. Under very rare
  968. * circumstances, we might have a small recursion if the rfkill
  969. * state changed exactly now while we were called from stop_device.
  970. * This is very unlikely but can happen and is supported.
  971. */
  972. hw_rfkill = iwl_is_rfkill_set(trans);
  973. if (hw_rfkill)
  974. set_bit(STATUS_RFKILL, &trans->status);
  975. else
  976. clear_bit(STATUS_RFKILL, &trans->status);
  977. if (hw_rfkill != was_hw_rfkill)
  978. iwl_trans_pcie_rf_kill(trans, hw_rfkill);
  979. /* re-take ownership to prevent other users from stealing the device */
  980. iwl_pcie_prepare_card_hw(trans);
  981. }
  982. static void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
  983. {
  984. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  985. if (trans_pcie->msix_enabled) {
  986. int i;
  987. for (i = 0; i < trans_pcie->alloc_vecs; i++)
  988. synchronize_irq(trans_pcie->msix_entries[i].vector);
  989. } else {
  990. synchronize_irq(trans_pcie->pci_dev->irq);
  991. }
  992. }
  993. static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
  994. const struct fw_img *fw, bool run_in_rfkill)
  995. {
  996. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  997. bool hw_rfkill;
  998. int ret;
  999. /* This may fail if AMT took ownership of the device */
  1000. if (iwl_pcie_prepare_card_hw(trans)) {
  1001. IWL_WARN(trans, "Exit HW not ready\n");
  1002. ret = -EIO;
  1003. goto out;
  1004. }
  1005. iwl_enable_rfkill_int(trans);
  1006. iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
  1007. /*
  1008. * We enabled the RF-Kill interrupt and the handler may very
  1009. * well be running. Disable the interrupts to make sure no other
  1010. * interrupt can be fired.
  1011. */
  1012. iwl_disable_interrupts(trans);
  1013. /* Make sure it finished running */
  1014. iwl_pcie_synchronize_irqs(trans);
  1015. mutex_lock(&trans_pcie->mutex);
  1016. /* If platform's RF_KILL switch is NOT set to KILL */
  1017. hw_rfkill = iwl_is_rfkill_set(trans);
  1018. if (hw_rfkill)
  1019. set_bit(STATUS_RFKILL, &trans->status);
  1020. else
  1021. clear_bit(STATUS_RFKILL, &trans->status);
  1022. iwl_trans_pcie_rf_kill(trans, hw_rfkill);
  1023. if (hw_rfkill && !run_in_rfkill) {
  1024. ret = -ERFKILL;
  1025. goto out;
  1026. }
  1027. /* Someone called stop_device, don't try to start_fw */
  1028. if (trans_pcie->is_down) {
  1029. IWL_WARN(trans,
  1030. "Can't start_fw since the HW hasn't been started\n");
  1031. ret = -EIO;
  1032. goto out;
  1033. }
  1034. /* make sure rfkill handshake bits are cleared */
  1035. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  1036. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
  1037. CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
  1038. /* clear (again), then enable host interrupts */
  1039. iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
  1040. ret = iwl_pcie_nic_init(trans);
  1041. if (ret) {
  1042. IWL_ERR(trans, "Unable to init nic\n");
  1043. goto out;
  1044. }
  1045. /*
  1046. * Now, we load the firmware and don't want to be interrupted, even
  1047. * by the RF-Kill interrupt (hence mask all the interrupt besides the
  1048. * FH_TX interrupt which is needed to load the firmware). If the
  1049. * RF-Kill switch is toggled, we will find out after having loaded
  1050. * the firmware and return the proper value to the caller.
  1051. */
  1052. iwl_enable_fw_load_int(trans);
  1053. /* really make sure rfkill handshake bits are cleared */
  1054. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  1055. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  1056. /* Load the given image to the HW */
  1057. if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
  1058. ret = iwl_pcie_load_given_ucode_8000(trans, fw);
  1059. else
  1060. ret = iwl_pcie_load_given_ucode(trans, fw);
  1061. /* re-check RF-Kill state since we may have missed the interrupt */
  1062. hw_rfkill = iwl_is_rfkill_set(trans);
  1063. if (hw_rfkill)
  1064. set_bit(STATUS_RFKILL, &trans->status);
  1065. else
  1066. clear_bit(STATUS_RFKILL, &trans->status);
  1067. iwl_trans_pcie_rf_kill(trans, hw_rfkill);
  1068. if (hw_rfkill && !run_in_rfkill)
  1069. ret = -ERFKILL;
  1070. out:
  1071. mutex_unlock(&trans_pcie->mutex);
  1072. return ret;
  1073. }
  1074. static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
  1075. {
  1076. iwl_pcie_reset_ict(trans);
  1077. iwl_pcie_tx_start(trans, scd_addr);
  1078. }
  1079. static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
  1080. {
  1081. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1082. mutex_lock(&trans_pcie->mutex);
  1083. _iwl_trans_pcie_stop_device(trans, low_power);
  1084. mutex_unlock(&trans_pcie->mutex);
  1085. }
  1086. void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
  1087. {
  1088. struct iwl_trans_pcie __maybe_unused *trans_pcie =
  1089. IWL_TRANS_GET_PCIE_TRANS(trans);
  1090. lockdep_assert_held(&trans_pcie->mutex);
  1091. if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
  1092. _iwl_trans_pcie_stop_device(trans, true);
  1093. }
  1094. static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
  1095. bool reset)
  1096. {
  1097. if (!reset) {
  1098. /* Enable persistence mode to avoid reset */
  1099. iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
  1100. CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
  1101. }
  1102. iwl_disable_interrupts(trans);
  1103. /*
  1104. * in testing mode, the host stays awake and the
  1105. * hardware won't be reset (not even partially)
  1106. */
  1107. if (test)
  1108. return;
  1109. iwl_pcie_disable_ict(trans);
  1110. iwl_pcie_synchronize_irqs(trans);
  1111. iwl_clear_bit(trans, CSR_GP_CNTRL,
  1112. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  1113. iwl_clear_bit(trans, CSR_GP_CNTRL,
  1114. CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
  1115. iwl_pcie_enable_rx_wake(trans, false);
  1116. if (reset) {
  1117. /*
  1118. * reset TX queues -- some of their registers reset during S3
  1119. * so if we don't reset everything here the D3 image would try
  1120. * to execute some invalid memory upon resume
  1121. */
  1122. iwl_trans_pcie_tx_reset(trans);
  1123. }
  1124. iwl_pcie_set_pwr(trans, true);
  1125. }
  1126. static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
  1127. enum iwl_d3_status *status,
  1128. bool test, bool reset)
  1129. {
  1130. u32 val;
  1131. int ret;
  1132. if (test) {
  1133. iwl_enable_interrupts(trans);
  1134. *status = IWL_D3_STATUS_ALIVE;
  1135. return 0;
  1136. }
  1137. iwl_pcie_enable_rx_wake(trans, true);
  1138. /*
  1139. * Also enables interrupts - none will happen as the device doesn't
  1140. * know we're waking it up, only when the opmode actually tells it
  1141. * after this call.
  1142. */
  1143. iwl_pcie_reset_ict(trans);
  1144. iwl_enable_interrupts(trans);
  1145. iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  1146. iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
  1147. if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
  1148. udelay(2);
  1149. ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
  1150. CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
  1151. CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
  1152. 25000);
  1153. if (ret < 0) {
  1154. IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
  1155. return ret;
  1156. }
  1157. iwl_pcie_set_pwr(trans, false);
  1158. if (!reset) {
  1159. iwl_clear_bit(trans, CSR_GP_CNTRL,
  1160. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  1161. } else {
  1162. iwl_trans_pcie_tx_reset(trans);
  1163. ret = iwl_pcie_rx_init(trans);
  1164. if (ret) {
  1165. IWL_ERR(trans,
  1166. "Failed to resume the device (RX reset)\n");
  1167. return ret;
  1168. }
  1169. }
  1170. val = iwl_read32(trans, CSR_RESET);
  1171. if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
  1172. *status = IWL_D3_STATUS_RESET;
  1173. else
  1174. *status = IWL_D3_STATUS_ALIVE;
  1175. return 0;
  1176. }
  1177. struct iwl_causes_list {
  1178. u32 cause_num;
  1179. u32 mask_reg;
  1180. u8 addr;
  1181. };
  1182. static struct iwl_causes_list causes_list[] = {
  1183. {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
  1184. {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
  1185. {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
  1186. {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
  1187. {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
  1188. {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11},
  1189. {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
  1190. {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
  1191. {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
  1192. {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29},
  1193. {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
  1194. {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
  1195. {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
  1196. {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
  1197. };
  1198. static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
  1199. {
  1200. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1201. int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
  1202. int i;
  1203. /*
  1204. * Access all non RX causes and map them to the default irq.
  1205. * In case we are missing at least one interrupt vector,
  1206. * the first interrupt vector will serve non-RX and FBQ causes.
  1207. */
  1208. for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
  1209. iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
  1210. iwl_clear_bit(trans, causes_list[i].mask_reg,
  1211. causes_list[i].cause_num);
  1212. }
  1213. }
  1214. static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
  1215. {
  1216. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1217. u32 offset =
  1218. trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
  1219. u32 val, idx;
  1220. /*
  1221. * The first RX queue - fallback queue, which is designated for
  1222. * management frame, command responses etc, is always mapped to the
  1223. * first interrupt vector. The other RX queues are mapped to
  1224. * the other (N - 2) interrupt vectors.
  1225. */
  1226. val = BIT(MSIX_FH_INT_CAUSES_Q(0));
  1227. for (idx = 1; idx < trans->num_rx_queues; idx++) {
  1228. iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
  1229. MSIX_FH_INT_CAUSES_Q(idx - offset));
  1230. val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
  1231. }
  1232. iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
  1233. val = MSIX_FH_INT_CAUSES_Q(0);
  1234. if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
  1235. val |= MSIX_NON_AUTO_CLEAR_CAUSE;
  1236. iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
  1237. if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
  1238. iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
  1239. }
  1240. static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
  1241. {
  1242. struct iwl_trans *trans = trans_pcie->trans;
  1243. if (!trans_pcie->msix_enabled) {
  1244. if (trans->cfg->mq_rx_supported)
  1245. iwl_write_prph(trans, UREG_CHICK,
  1246. UREG_CHICK_MSI_ENABLE);
  1247. return;
  1248. }
  1249. iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
  1250. /*
  1251. * Each cause from the causes list above and the RX causes is
  1252. * represented as a byte in the IVAR table. The first nibble
  1253. * represents the bound interrupt vector of the cause, the second
  1254. * represents no auto clear for this cause. This will be set if its
  1255. * interrupt vector is bound to serve other causes.
  1256. */
  1257. iwl_pcie_map_rx_causes(trans);
  1258. iwl_pcie_map_non_rx_causes(trans);
  1259. trans_pcie->fh_init_mask =
  1260. ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
  1261. trans_pcie->fh_mask = trans_pcie->fh_init_mask;
  1262. trans_pcie->hw_init_mask =
  1263. ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
  1264. trans_pcie->hw_mask = trans_pcie->hw_init_mask;
  1265. }
  1266. static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
  1267. struct iwl_trans *trans)
  1268. {
  1269. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1270. int max_irqs, num_irqs, i, ret, nr_online_cpus;
  1271. u16 pci_cmd;
  1272. if (!trans->cfg->mq_rx_supported)
  1273. goto enable_msi;
  1274. nr_online_cpus = num_online_cpus();
  1275. max_irqs = min_t(u32, nr_online_cpus + 2, IWL_MAX_RX_HW_QUEUES);
  1276. for (i = 0; i < max_irqs; i++)
  1277. trans_pcie->msix_entries[i].entry = i;
  1278. num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
  1279. MSIX_MIN_INTERRUPT_VECTORS,
  1280. max_irqs);
  1281. if (num_irqs < 0) {
  1282. IWL_DEBUG_INFO(trans,
  1283. "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n",
  1284. num_irqs);
  1285. goto enable_msi;
  1286. }
  1287. trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0;
  1288. IWL_DEBUG_INFO(trans,
  1289. "MSI-X enabled. %d interrupt vectors were allocated\n",
  1290. num_irqs);
  1291. /*
  1292. * In case the OS provides fewer interrupts than requested, different
  1293. * causes will share the same interrupt vector as follows:
  1294. * One interrupt less: non rx causes shared with FBQ.
  1295. * Two interrupts less: non rx causes shared with FBQ and RSS.
  1296. * More than two interrupts: we will use fewer RSS queues.
  1297. */
  1298. if (num_irqs <= nr_online_cpus) {
  1299. trans_pcie->trans->num_rx_queues = num_irqs + 1;
  1300. trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
  1301. IWL_SHARED_IRQ_FIRST_RSS;
  1302. } else if (num_irqs == nr_online_cpus + 1) {
  1303. trans_pcie->trans->num_rx_queues = num_irqs;
  1304. trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
  1305. } else {
  1306. trans_pcie->trans->num_rx_queues = num_irqs - 1;
  1307. }
  1308. trans_pcie->alloc_vecs = num_irqs;
  1309. trans_pcie->msix_enabled = true;
  1310. return;
  1311. enable_msi:
  1312. ret = pci_enable_msi(pdev);
  1313. if (ret) {
  1314. dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
  1315. /* enable rfkill interrupt: hw bug w/a */
  1316. pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
  1317. if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
  1318. pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
  1319. pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
  1320. }
  1321. }
  1322. }
  1323. static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
  1324. {
  1325. int iter_rx_q, i, ret, cpu, offset;
  1326. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1327. i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;
  1328. iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i;
  1329. offset = 1 + i;
  1330. for (; i < iter_rx_q ; i++) {
  1331. /*
  1332. * Get the cpu prior to the place to search
  1333. * (i.e. return will be > i - 1).
  1334. */
  1335. cpu = cpumask_next(i - offset, cpu_online_mask);
  1336. cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);
  1337. ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector,
  1338. &trans_pcie->affinity_mask[i]);
  1339. if (ret)
  1340. IWL_ERR(trans_pcie->trans,
  1341. "Failed to set affinity mask for IRQ %d\n",
  1342. i);
  1343. }
  1344. }
  1345. static const char *queue_name(struct device *dev,
  1346. struct iwl_trans_pcie *trans_p, int i)
  1347. {
  1348. if (trans_p->shared_vec_mask) {
  1349. int vec = trans_p->shared_vec_mask &
  1350. IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
  1351. if (i == 0)
  1352. return DRV_NAME ": shared IRQ";
  1353. return devm_kasprintf(dev, GFP_KERNEL,
  1354. DRV_NAME ": queue %d", i + vec);
  1355. }
  1356. if (i == 0)
  1357. return DRV_NAME ": default queue";
  1358. if (i == trans_p->alloc_vecs - 1)
  1359. return DRV_NAME ": exception";
  1360. return devm_kasprintf(dev, GFP_KERNEL,
  1361. DRV_NAME ": queue %d", i);
  1362. }
  1363. static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
  1364. struct iwl_trans_pcie *trans_pcie)
  1365. {
  1366. int i;
  1367. for (i = 0; i < trans_pcie->alloc_vecs; i++) {
  1368. int ret;
  1369. struct msix_entry *msix_entry;
  1370. const char *qname = queue_name(&pdev->dev, trans_pcie, i);
  1371. if (!qname)
  1372. return -ENOMEM;
  1373. msix_entry = &trans_pcie->msix_entries[i];
  1374. ret = devm_request_threaded_irq(&pdev->dev,
  1375. msix_entry->vector,
  1376. iwl_pcie_msix_isr,
  1377. (i == trans_pcie->def_irq) ?
  1378. iwl_pcie_irq_msix_handler :
  1379. iwl_pcie_irq_rx_msix_handler,
  1380. IRQF_SHARED,
  1381. qname,
  1382. msix_entry);
  1383. if (ret) {
  1384. IWL_ERR(trans_pcie->trans,
  1385. "Error allocating IRQ %d\n", i);
  1386. return ret;
  1387. }
  1388. }
  1389. iwl_pcie_irq_set_affinity(trans_pcie->trans);
  1390. return 0;
  1391. }
  1392. static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
  1393. {
  1394. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1395. bool hw_rfkill;
  1396. int err;
  1397. lockdep_assert_held(&trans_pcie->mutex);
  1398. err = iwl_pcie_prepare_card_hw(trans);
  1399. if (err) {
  1400. IWL_ERR(trans, "Error while preparing HW: %d\n", err);
  1401. return err;
  1402. }
  1403. /* Reset the entire device */
  1404. iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
  1405. usleep_range(1000, 2000);
  1406. iwl_pcie_apm_init(trans);
  1407. iwl_pcie_init_msix(trans_pcie);
  1408. /* From now on, the op_mode will be kept updated about RF kill state */
  1409. iwl_enable_rfkill_int(trans);
  1410. /* Set is_down to false here so that...*/
  1411. trans_pcie->is_down = false;
  1412. hw_rfkill = iwl_is_rfkill_set(trans);
  1413. if (hw_rfkill)
  1414. set_bit(STATUS_RFKILL, &trans->status);
  1415. else
  1416. clear_bit(STATUS_RFKILL, &trans->status);
  1417. /* ... rfkill can call stop_device and set it false if needed */
  1418. iwl_trans_pcie_rf_kill(trans, hw_rfkill);
  1419. /* Make sure we sync here, because we'll need full access later */
  1420. if (low_power)
  1421. pm_runtime_resume(trans->dev);
  1422. return 0;
  1423. }
  1424. static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
  1425. {
  1426. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1427. int ret;
  1428. mutex_lock(&trans_pcie->mutex);
  1429. ret = _iwl_trans_pcie_start_hw(trans, low_power);
  1430. mutex_unlock(&trans_pcie->mutex);
  1431. return ret;
  1432. }
  1433. static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
  1434. {
  1435. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1436. mutex_lock(&trans_pcie->mutex);
  1437. /* disable interrupts - don't enable HW RF kill interrupt */
  1438. iwl_disable_interrupts(trans);
  1439. iwl_pcie_apm_stop(trans, true);
  1440. iwl_disable_interrupts(trans);
  1441. iwl_pcie_disable_ict(trans);
  1442. mutex_unlock(&trans_pcie->mutex);
  1443. iwl_pcie_synchronize_irqs(trans);
  1444. }
  1445. static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
  1446. {
  1447. writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
  1448. }
  1449. static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
  1450. {
  1451. writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
  1452. }
  1453. static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
  1454. {
  1455. return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
  1456. }
  1457. static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
  1458. {
  1459. iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
  1460. ((reg & 0x000FFFFF) | (3 << 24)));
  1461. return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
  1462. }
  1463. static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
  1464. u32 val)
  1465. {
  1466. iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
  1467. ((addr & 0x000FFFFF) | (3 << 24)));
  1468. iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
  1469. }
  1470. static void iwl_trans_pcie_configure(struct iwl_trans *trans,
  1471. const struct iwl_trans_config *trans_cfg)
  1472. {
  1473. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1474. trans_pcie->cmd_queue = trans_cfg->cmd_queue;
  1475. trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
  1476. trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
  1477. if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
  1478. trans_pcie->n_no_reclaim_cmds = 0;
  1479. else
  1480. trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
  1481. if (trans_pcie->n_no_reclaim_cmds)
  1482. memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
  1483. trans_pcie->n_no_reclaim_cmds * sizeof(u8));
  1484. trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
  1485. trans_pcie->rx_page_order =
  1486. iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
  1487. trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
  1488. trans_pcie->scd_set_active = trans_cfg->scd_set_active;
  1489. trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
  1490. trans_pcie->page_offs = trans_cfg->cb_data_offs;
  1491. trans_pcie->dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
  1492. trans->command_groups = trans_cfg->command_groups;
  1493. trans->command_groups_size = trans_cfg->command_groups_size;
  1494. /* Initialize NAPI here - it should be before registering to mac80211
  1495. * in the opmode but after the HW struct is allocated.
  1496. * As this function may be called again in some corner cases don't
  1497. * do anything if NAPI was already initialized.
  1498. */
  1499. if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY)
  1500. init_dummy_netdev(&trans_pcie->napi_dev);
  1501. }
  1502. void iwl_trans_pcie_free(struct iwl_trans *trans)
  1503. {
  1504. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1505. int i;
  1506. iwl_pcie_synchronize_irqs(trans);
  1507. iwl_pcie_tx_free(trans);
  1508. iwl_pcie_rx_free(trans);
  1509. if (trans_pcie->msix_enabled) {
  1510. for (i = 0; i < trans_pcie->alloc_vecs; i++) {
  1511. irq_set_affinity_hint(
  1512. trans_pcie->msix_entries[i].vector,
  1513. NULL);
  1514. }
  1515. trans_pcie->msix_enabled = false;
  1516. } else {
  1517. iwl_pcie_free_ict(trans);
  1518. }
  1519. iwl_pcie_free_fw_monitor(trans);
  1520. for_each_possible_cpu(i) {
  1521. struct iwl_tso_hdr_page *p =
  1522. per_cpu_ptr(trans_pcie->tso_hdr_page, i);
  1523. if (p->page)
  1524. __free_page(p->page);
  1525. }
  1526. free_percpu(trans_pcie->tso_hdr_page);
  1527. mutex_destroy(&trans_pcie->mutex);
  1528. iwl_trans_free(trans);
  1529. }
  1530. static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
  1531. {
  1532. if (state)
  1533. set_bit(STATUS_TPOWER_PMI, &trans->status);
  1534. else
  1535. clear_bit(STATUS_TPOWER_PMI, &trans->status);
  1536. }
  1537. static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
  1538. unsigned long *flags)
  1539. {
  1540. int ret;
  1541. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1542. spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
  1543. if (trans_pcie->cmd_hold_nic_awake)
  1544. goto out;
  1545. /* this bit wakes up the NIC */
  1546. __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
  1547. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  1548. if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
  1549. udelay(2);
  1550. /*
  1551. * These bits say the device is running, and should keep running for
  1552. * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
  1553. * but they do not indicate that embedded SRAM is restored yet;
  1554. * 3945 and 4965 have volatile SRAM, and must save/restore contents
  1555. * to/from host DRAM when sleeping/waking for power-saving.
  1556. * Each direction takes approximately 1/4 millisecond; with this
  1557. * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
  1558. * series of register accesses are expected (e.g. reading Event Log),
  1559. * to keep device from sleeping.
  1560. *
  1561. * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
  1562. * SRAM is okay/restored. We don't check that here because this call
  1563. * is just for hardware register access; but GP1 MAC_SLEEP check is a
  1564. * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
  1565. *
  1566. * 5000 series and later (including 1000 series) have non-volatile SRAM,
  1567. * and do not save/restore SRAM when power cycling.
  1568. */
  1569. ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
  1570. CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
  1571. (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
  1572. CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
  1573. if (unlikely(ret < 0)) {
  1574. iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
  1575. WARN_ONCE(1,
  1576. "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
  1577. iwl_read32(trans, CSR_GP_CNTRL));
  1578. spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
  1579. return false;
  1580. }
  1581. out:
  1582. /*
  1583. * Fool sparse by faking we release the lock - sparse will
  1584. * track nic_access anyway.
  1585. */
  1586. __release(&trans_pcie->reg_lock);
  1587. return true;
  1588. }
  1589. static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
  1590. unsigned long *flags)
  1591. {
  1592. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1593. lockdep_assert_held(&trans_pcie->reg_lock);
  1594. /*
  1595. * Fool sparse by faking we acquiring the lock - sparse will
  1596. * track nic_access anyway.
  1597. */
  1598. __acquire(&trans_pcie->reg_lock);
  1599. if (trans_pcie->cmd_hold_nic_awake)
  1600. goto out;
  1601. __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
  1602. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  1603. /*
  1604. * Above we read the CSR_GP_CNTRL register, which will flush
  1605. * any previous writes, but we need the write that clears the
  1606. * MAC_ACCESS_REQ bit to be performed before any other writes
  1607. * scheduled on different CPUs (after we drop reg_lock).
  1608. */
  1609. mmiowb();
  1610. out:
  1611. spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
  1612. }
  1613. static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
  1614. void *buf, int dwords)
  1615. {
  1616. unsigned long flags;
  1617. int offs, ret = 0;
  1618. u32 *vals = buf;
  1619. if (iwl_trans_grab_nic_access(trans, &flags)) {
  1620. iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
  1621. for (offs = 0; offs < dwords; offs++)
  1622. vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
  1623. iwl_trans_release_nic_access(trans, &flags);
  1624. } else {
  1625. ret = -EBUSY;
  1626. }
  1627. return ret;
  1628. }
  1629. static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
  1630. const void *buf, int dwords)
  1631. {
  1632. unsigned long flags;
  1633. int offs, ret = 0;
  1634. const u32 *vals = buf;
  1635. if (iwl_trans_grab_nic_access(trans, &flags)) {
  1636. iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
  1637. for (offs = 0; offs < dwords; offs++)
  1638. iwl_write32(trans, HBUS_TARG_MEM_WDAT,
  1639. vals ? vals[offs] : 0);
  1640. iwl_trans_release_nic_access(trans, &flags);
  1641. } else {
  1642. ret = -EBUSY;
  1643. }
  1644. return ret;
  1645. }
  1646. static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
  1647. unsigned long txqs,
  1648. bool freeze)
  1649. {
  1650. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1651. int queue;
  1652. for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
  1653. struct iwl_txq *txq = &trans_pcie->txq[queue];
  1654. unsigned long now;
  1655. spin_lock_bh(&txq->lock);
  1656. now = jiffies;
  1657. if (txq->frozen == freeze)
  1658. goto next_queue;
  1659. IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
  1660. freeze ? "Freezing" : "Waking", queue);
  1661. txq->frozen = freeze;
  1662. if (txq->read_ptr == txq->write_ptr)
  1663. goto next_queue;
  1664. if (freeze) {
  1665. if (unlikely(time_after(now,
  1666. txq->stuck_timer.expires))) {
  1667. /*
  1668. * The timer should have fired, maybe it is
  1669. * spinning right now on the lock.
  1670. */
  1671. goto next_queue;
  1672. }
  1673. /* remember how long until the timer fires */
  1674. txq->frozen_expiry_remainder =
  1675. txq->stuck_timer.expires - now;
  1676. del_timer(&txq->stuck_timer);
  1677. goto next_queue;
  1678. }
  1679. /*
  1680. * Wake a non-empty queue -> arm timer with the
  1681. * remainder before it froze
  1682. */
  1683. mod_timer(&txq->stuck_timer,
  1684. now + txq->frozen_expiry_remainder);
  1685. next_queue:
  1686. spin_unlock_bh(&txq->lock);
  1687. }
  1688. }
  1689. static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
  1690. {
  1691. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1692. int i;
  1693. for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
  1694. struct iwl_txq *txq = &trans_pcie->txq[i];
  1695. if (i == trans_pcie->cmd_queue)
  1696. continue;
  1697. spin_lock_bh(&txq->lock);
  1698. if (!block && !(WARN_ON_ONCE(!txq->block))) {
  1699. txq->block--;
  1700. if (!txq->block) {
  1701. iwl_write32(trans, HBUS_TARG_WRPTR,
  1702. txq->write_ptr | (i << 8));
  1703. }
  1704. } else if (block) {
  1705. txq->block++;
  1706. }
  1707. spin_unlock_bh(&txq->lock);
  1708. }
  1709. }
  1710. #define IWL_FLUSH_WAIT_MS 2000
  1711. void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
  1712. {
  1713. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1714. u32 scd_sram_addr;
  1715. u8 buf[16];
  1716. int cnt;
  1717. IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
  1718. txq->read_ptr, txq->write_ptr);
  1719. if (trans->cfg->use_tfh)
  1720. /* TODO: access new SCD registers and dump them */
  1721. return;
  1722. scd_sram_addr = trans_pcie->scd_base_addr +
  1723. SCD_TX_STTS_QUEUE_OFFSET(txq->id);
  1724. iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
  1725. iwl_print_hex_error(trans, buf, sizeof(buf));
  1726. for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
  1727. IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
  1728. iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
  1729. for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
  1730. u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
  1731. u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
  1732. bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
  1733. u32 tbl_dw =
  1734. iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
  1735. SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
  1736. if (cnt & 0x1)
  1737. tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
  1738. else
  1739. tbl_dw = tbl_dw & 0x0000FFFF;
  1740. IWL_ERR(trans,
  1741. "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
  1742. cnt, active ? "" : "in", fifo, tbl_dw,
  1743. iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
  1744. (TFD_QUEUE_SIZE_MAX - 1),
  1745. iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
  1746. }
  1747. }
  1748. static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
  1749. {
  1750. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1751. struct iwl_txq *txq;
  1752. int cnt;
  1753. unsigned long now = jiffies;
  1754. int ret = 0;
  1755. /* waiting for all the tx frames complete might take a while */
  1756. for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
  1757. u8 wr_ptr;
  1758. if (cnt == trans_pcie->cmd_queue)
  1759. continue;
  1760. if (!test_bit(cnt, trans_pcie->queue_used))
  1761. continue;
  1762. if (!(BIT(cnt) & txq_bm))
  1763. continue;
  1764. IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
  1765. txq = &trans_pcie->txq[cnt];
  1766. wr_ptr = ACCESS_ONCE(txq->write_ptr);
  1767. while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) &&
  1768. !time_after(jiffies,
  1769. now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
  1770. u8 write_ptr = ACCESS_ONCE(txq->write_ptr);
  1771. if (WARN_ONCE(wr_ptr != write_ptr,
  1772. "WR pointer moved while flushing %d -> %d\n",
  1773. wr_ptr, write_ptr))
  1774. return -ETIMEDOUT;
  1775. usleep_range(1000, 2000);
  1776. }
  1777. if (txq->read_ptr != txq->write_ptr) {
  1778. IWL_ERR(trans,
  1779. "fail to flush all tx fifo queues Q %d\n", cnt);
  1780. ret = -ETIMEDOUT;
  1781. break;
  1782. }
  1783. IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
  1784. }
  1785. if (ret)
  1786. iwl_trans_pcie_log_scd_error(trans, txq);
  1787. return ret;
  1788. }
  1789. static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
  1790. u32 mask, u32 value)
  1791. {
  1792. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1793. unsigned long flags;
  1794. spin_lock_irqsave(&trans_pcie->reg_lock, flags);
  1795. __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
  1796. spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
  1797. }
  1798. static void iwl_trans_pcie_ref(struct iwl_trans *trans)
  1799. {
  1800. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1801. if (iwlwifi_mod_params.d0i3_disable)
  1802. return;
  1803. pm_runtime_get(&trans_pcie->pci_dev->dev);
  1804. #ifdef CONFIG_PM
  1805. IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
  1806. atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
  1807. #endif /* CONFIG_PM */
  1808. }
  1809. static void iwl_trans_pcie_unref(struct iwl_trans *trans)
  1810. {
  1811. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1812. if (iwlwifi_mod_params.d0i3_disable)
  1813. return;
  1814. pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev);
  1815. pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev);
  1816. #ifdef CONFIG_PM
  1817. IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
  1818. atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
  1819. #endif /* CONFIG_PM */
  1820. }
  1821. static const char *get_csr_string(int cmd)
  1822. {
  1823. #define IWL_CMD(x) case x: return #x
  1824. switch (cmd) {
  1825. IWL_CMD(CSR_HW_IF_CONFIG_REG);
  1826. IWL_CMD(CSR_INT_COALESCING);
  1827. IWL_CMD(CSR_INT);
  1828. IWL_CMD(CSR_INT_MASK);
  1829. IWL_CMD(CSR_FH_INT_STATUS);
  1830. IWL_CMD(CSR_GPIO_IN);
  1831. IWL_CMD(CSR_RESET);
  1832. IWL_CMD(CSR_GP_CNTRL);
  1833. IWL_CMD(CSR_HW_REV);
  1834. IWL_CMD(CSR_EEPROM_REG);
  1835. IWL_CMD(CSR_EEPROM_GP);
  1836. IWL_CMD(CSR_OTP_GP_REG);
  1837. IWL_CMD(CSR_GIO_REG);
  1838. IWL_CMD(CSR_GP_UCODE_REG);
  1839. IWL_CMD(CSR_GP_DRIVER_REG);
  1840. IWL_CMD(CSR_UCODE_DRV_GP1);
  1841. IWL_CMD(CSR_UCODE_DRV_GP2);
  1842. IWL_CMD(CSR_LED_REG);
  1843. IWL_CMD(CSR_DRAM_INT_TBL_REG);
  1844. IWL_CMD(CSR_GIO_CHICKEN_BITS);
  1845. IWL_CMD(CSR_ANA_PLL_CFG);
  1846. IWL_CMD(CSR_HW_REV_WA_REG);
  1847. IWL_CMD(CSR_MONITOR_STATUS_REG);
  1848. IWL_CMD(CSR_DBG_HPET_MEM_REG);
  1849. default:
  1850. return "UNKNOWN";
  1851. }
  1852. #undef IWL_CMD
  1853. }
  1854. void iwl_pcie_dump_csr(struct iwl_trans *trans)
  1855. {
  1856. int i;
  1857. static const u32 csr_tbl[] = {
  1858. CSR_HW_IF_CONFIG_REG,
  1859. CSR_INT_COALESCING,
  1860. CSR_INT,
  1861. CSR_INT_MASK,
  1862. CSR_FH_INT_STATUS,
  1863. CSR_GPIO_IN,
  1864. CSR_RESET,
  1865. CSR_GP_CNTRL,
  1866. CSR_HW_REV,
  1867. CSR_EEPROM_REG,
  1868. CSR_EEPROM_GP,
  1869. CSR_OTP_GP_REG,
  1870. CSR_GIO_REG,
  1871. CSR_GP_UCODE_REG,
  1872. CSR_GP_DRIVER_REG,
  1873. CSR_UCODE_DRV_GP1,
  1874. CSR_UCODE_DRV_GP2,
  1875. CSR_LED_REG,
  1876. CSR_DRAM_INT_TBL_REG,
  1877. CSR_GIO_CHICKEN_BITS,
  1878. CSR_ANA_PLL_CFG,
  1879. CSR_MONITOR_STATUS_REG,
  1880. CSR_HW_REV_WA_REG,
  1881. CSR_DBG_HPET_MEM_REG
  1882. };
  1883. IWL_ERR(trans, "CSR values:\n");
  1884. IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
  1885. "CSR_INT_PERIODIC_REG)\n");
  1886. for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
  1887. IWL_ERR(trans, " %25s: 0X%08x\n",
  1888. get_csr_string(csr_tbl[i]),
  1889. iwl_read32(trans, csr_tbl[i]));
  1890. }
  1891. }
  1892. #ifdef CONFIG_IWLWIFI_DEBUGFS
  1893. /* create and remove of files */
  1894. #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
  1895. if (!debugfs_create_file(#name, mode, parent, trans, \
  1896. &iwl_dbgfs_##name##_ops)) \
  1897. goto err; \
  1898. } while (0)
  1899. /* file operation */
  1900. #define DEBUGFS_READ_FILE_OPS(name) \
  1901. static const struct file_operations iwl_dbgfs_##name##_ops = { \
  1902. .read = iwl_dbgfs_##name##_read, \
  1903. .open = simple_open, \
  1904. .llseek = generic_file_llseek, \
  1905. };
  1906. #define DEBUGFS_WRITE_FILE_OPS(name) \
  1907. static const struct file_operations iwl_dbgfs_##name##_ops = { \
  1908. .write = iwl_dbgfs_##name##_write, \
  1909. .open = simple_open, \
  1910. .llseek = generic_file_llseek, \
  1911. };
  1912. #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
  1913. static const struct file_operations iwl_dbgfs_##name##_ops = { \
  1914. .write = iwl_dbgfs_##name##_write, \
  1915. .read = iwl_dbgfs_##name##_read, \
  1916. .open = simple_open, \
  1917. .llseek = generic_file_llseek, \
  1918. };
  1919. static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
  1920. char __user *user_buf,
  1921. size_t count, loff_t *ppos)
  1922. {
  1923. struct iwl_trans *trans = file->private_data;
  1924. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1925. struct iwl_txq *txq;
  1926. char *buf;
  1927. int pos = 0;
  1928. int cnt;
  1929. int ret;
  1930. size_t bufsz;
  1931. bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
  1932. if (!trans_pcie->txq)
  1933. return -EAGAIN;
  1934. buf = kzalloc(bufsz, GFP_KERNEL);
  1935. if (!buf)
  1936. return -ENOMEM;
  1937. for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
  1938. txq = &trans_pcie->txq[cnt];
  1939. pos += scnprintf(buf + pos, bufsz - pos,
  1940. "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
  1941. cnt, txq->read_ptr, txq->write_ptr,
  1942. !!test_bit(cnt, trans_pcie->queue_used),
  1943. !!test_bit(cnt, trans_pcie->queue_stopped),
  1944. txq->need_update, txq->frozen,
  1945. (cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
  1946. }
  1947. ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
  1948. kfree(buf);
  1949. return ret;
  1950. }
  1951. static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
  1952. char __user *user_buf,
  1953. size_t count, loff_t *ppos)
  1954. {
  1955. struct iwl_trans *trans = file->private_data;
  1956. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1957. char *buf;
  1958. int pos = 0, i, ret;
  1959. size_t bufsz = sizeof(buf);
  1960. bufsz = sizeof(char) * 121 * trans->num_rx_queues;
  1961. if (!trans_pcie->rxq)
  1962. return -EAGAIN;
  1963. buf = kzalloc(bufsz, GFP_KERNEL);
  1964. if (!buf)
  1965. return -ENOMEM;
  1966. for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) {
  1967. struct iwl_rxq *rxq = &trans_pcie->rxq[i];
  1968. pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",
  1969. i);
  1970. pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n",
  1971. rxq->read);
  1972. pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n",
  1973. rxq->write);
  1974. pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n",
  1975. rxq->write_actual);
  1976. pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n",
  1977. rxq->need_update);
  1978. pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
  1979. rxq->free_count);
  1980. if (rxq->rb_stts) {
  1981. pos += scnprintf(buf + pos, bufsz - pos,
  1982. "\tclosed_rb_num: %u\n",
  1983. le16_to_cpu(rxq->rb_stts->closed_rb_num) &
  1984. 0x0FFF);
  1985. } else {
  1986. pos += scnprintf(buf + pos, bufsz - pos,
  1987. "\tclosed_rb_num: Not Allocated\n");
  1988. }
  1989. }
  1990. ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
  1991. kfree(buf);
  1992. return ret;
  1993. }
  1994. static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
  1995. char __user *user_buf,
  1996. size_t count, loff_t *ppos)
  1997. {
  1998. struct iwl_trans *trans = file->private_data;
  1999. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  2000. struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
  2001. int pos = 0;
  2002. char *buf;
  2003. int bufsz = 24 * 64; /* 24 items * 64 char per item */
  2004. ssize_t ret;
  2005. buf = kzalloc(bufsz, GFP_KERNEL);
  2006. if (!buf)
  2007. return -ENOMEM;
  2008. pos += scnprintf(buf + pos, bufsz - pos,
  2009. "Interrupt Statistics Report:\n");
  2010. pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
  2011. isr_stats->hw);
  2012. pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
  2013. isr_stats->sw);
  2014. if (isr_stats->sw || isr_stats->hw) {
  2015. pos += scnprintf(buf + pos, bufsz - pos,
  2016. "\tLast Restarting Code: 0x%X\n",
  2017. isr_stats->err_code);
  2018. }
  2019. #ifdef CONFIG_IWLWIFI_DEBUG
  2020. pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
  2021. isr_stats->sch);
  2022. pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
  2023. isr_stats->alive);
  2024. #endif
  2025. pos += scnprintf(buf + pos, bufsz - pos,
  2026. "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
  2027. pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
  2028. isr_stats->ctkill);
  2029. pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
  2030. isr_stats->wakeup);
  2031. pos += scnprintf(buf + pos, bufsz - pos,
  2032. "Rx command responses:\t\t %u\n", isr_stats->rx);
  2033. pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
  2034. isr_stats->tx);
  2035. pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
  2036. isr_stats->unhandled);
  2037. ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
  2038. kfree(buf);
  2039. return ret;
  2040. }
  2041. static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
  2042. const char __user *user_buf,
  2043. size_t count, loff_t *ppos)
  2044. {
  2045. struct iwl_trans *trans = file->private_data;
  2046. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  2047. struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
  2048. char buf[8];
  2049. int buf_size;
  2050. u32 reset_flag;
  2051. memset(buf, 0, sizeof(buf));
  2052. buf_size = min(count, sizeof(buf) - 1);
  2053. if (copy_from_user(buf, user_buf, buf_size))
  2054. return -EFAULT;
  2055. if (sscanf(buf, "%x", &reset_flag) != 1)
  2056. return -EFAULT;
  2057. if (reset_flag == 0)
  2058. memset(isr_stats, 0, sizeof(*isr_stats));
  2059. return count;
  2060. }
  2061. static ssize_t iwl_dbgfs_csr_write(struct file *file,
  2062. const char __user *user_buf,
  2063. size_t count, loff_t *ppos)
  2064. {
  2065. struct iwl_trans *trans = file->private_data;
  2066. char buf[8];
  2067. int buf_size;
  2068. int csr;
  2069. memset(buf, 0, sizeof(buf));
  2070. buf_size = min(count, sizeof(buf) - 1);
  2071. if (copy_from_user(buf, user_buf, buf_size))
  2072. return -EFAULT;
  2073. if (sscanf(buf, "%d", &csr) != 1)
  2074. return -EFAULT;
  2075. iwl_pcie_dump_csr(trans);
  2076. return count;
  2077. }
  2078. static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
  2079. char __user *user_buf,
  2080. size_t count, loff_t *ppos)
  2081. {
  2082. struct iwl_trans *trans = file->private_data;
  2083. char *buf = NULL;
  2084. ssize_t ret;
  2085. ret = iwl_dump_fh(trans, &buf);
  2086. if (ret < 0)
  2087. return ret;
  2088. if (!buf)
  2089. return -EINVAL;
  2090. ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
  2091. kfree(buf);
  2092. return ret;
  2093. }
  2094. DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
  2095. DEBUGFS_READ_FILE_OPS(fh_reg);
  2096. DEBUGFS_READ_FILE_OPS(rx_queue);
  2097. DEBUGFS_READ_FILE_OPS(tx_queue);
  2098. DEBUGFS_WRITE_FILE_OPS(csr);
  2099. /* Create the debugfs files and directories */
  2100. int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
  2101. {
  2102. struct dentry *dir = trans->dbgfs_dir;
  2103. DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
  2104. DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
  2105. DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
  2106. DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
  2107. DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
  2108. return 0;
  2109. err:
  2110. IWL_ERR(trans, "failed to create the trans debugfs entry\n");
  2111. return -ENOMEM;
  2112. }
  2113. #endif /*CONFIG_IWLWIFI_DEBUGFS */
  2114. static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
  2115. {
  2116. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  2117. u32 cmdlen = 0;
  2118. int i;
  2119. for (i = 0; i < trans_pcie->max_tbs; i++)
  2120. cmdlen += iwl_pcie_tfd_tb_get_len(trans, tfd, i);
  2121. return cmdlen;
  2122. }
  2123. static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
  2124. struct iwl_fw_error_dump_data **data,
  2125. int allocated_rb_nums)
  2126. {
  2127. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  2128. int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
  2129. /* Dump RBs is supported only for pre-9000 devices (1 queue) */
  2130. struct iwl_rxq *rxq = &trans_pcie->rxq[0];
  2131. u32 i, r, j, rb_len = 0;
  2132. spin_lock(&rxq->lock);
  2133. r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
  2134. for (i = rxq->read, j = 0;
  2135. i != r && j < allocated_rb_nums;
  2136. i = (i + 1) & RX_QUEUE_MASK, j++) {
  2137. struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
  2138. struct iwl_fw_error_dump_rb *rb;
  2139. dma_unmap_page(trans->dev, rxb->page_dma, max_len,
  2140. DMA_FROM_DEVICE);
  2141. rb_len += sizeof(**data) + sizeof(*rb) + max_len;
  2142. (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
  2143. (*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
  2144. rb = (void *)(*data)->data;
  2145. rb->index = cpu_to_le32(i);
  2146. memcpy(rb->data, page_address(rxb->page), max_len);
  2147. /* remap the page for the free benefit */
  2148. rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
  2149. max_len,
  2150. DMA_FROM_DEVICE);
  2151. *data = iwl_fw_error_next_data(*data);
  2152. }
  2153. spin_unlock(&rxq->lock);
  2154. return rb_len;
  2155. }
  2156. #define IWL_CSR_TO_DUMP (0x250)
  2157. static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
  2158. struct iwl_fw_error_dump_data **data)
  2159. {
  2160. u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
  2161. __le32 *val;
  2162. int i;
  2163. (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
  2164. (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
  2165. val = (void *)(*data)->data;
  2166. for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
  2167. *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
  2168. *data = iwl_fw_error_next_data(*data);
  2169. return csr_len;
  2170. }
  2171. static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
  2172. struct iwl_fw_error_dump_data **data)
  2173. {
  2174. u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
  2175. unsigned long flags;
  2176. __le32 *val;
  2177. int i;
  2178. if (!iwl_trans_grab_nic_access(trans, &flags))
  2179. return 0;
  2180. (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
  2181. (*data)->len = cpu_to_le32(fh_regs_len);
  2182. val = (void *)(*data)->data;
  2183. for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32))
  2184. *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
  2185. iwl_trans_release_nic_access(trans, &flags);
  2186. *data = iwl_fw_error_next_data(*data);
  2187. return sizeof(**data) + fh_regs_len;
  2188. }
  2189. static u32
  2190. iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
  2191. struct iwl_fw_error_dump_fw_mon *fw_mon_data,
  2192. u32 monitor_len)
  2193. {
  2194. u32 buf_size_in_dwords = (monitor_len >> 2);
  2195. u32 *buffer = (u32 *)fw_mon_data->data;
  2196. unsigned long flags;
  2197. u32 i;
  2198. if (!iwl_trans_grab_nic_access(trans, &flags))
  2199. return 0;
  2200. iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
  2201. for (i = 0; i < buf_size_in_dwords; i++)
  2202. buffer[i] = iwl_read_prph_no_grab(trans,
  2203. MON_DMARB_RD_DATA_ADDR);
  2204. iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
  2205. iwl_trans_release_nic_access(trans, &flags);
  2206. return monitor_len;
  2207. }
  2208. static u32
  2209. iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
  2210. struct iwl_fw_error_dump_data **data,
  2211. u32 monitor_len)
  2212. {
  2213. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  2214. u32 len = 0;
  2215. if ((trans_pcie->fw_mon_page &&
  2216. trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
  2217. trans->dbg_dest_tlv) {
  2218. struct iwl_fw_error_dump_fw_mon *fw_mon_data;
  2219. u32 base, write_ptr, wrap_cnt;
  2220. /* If there was a dest TLV - use the values from there */
  2221. if (trans->dbg_dest_tlv) {
  2222. write_ptr =
  2223. le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
  2224. wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
  2225. base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
  2226. } else {
  2227. base = MON_BUFF_BASE_ADDR;
  2228. write_ptr = MON_BUFF_WRPTR;
  2229. wrap_cnt = MON_BUFF_CYCLE_CNT;
  2230. }
  2231. (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
  2232. fw_mon_data = (void *)(*data)->data;
  2233. fw_mon_data->fw_mon_wr_ptr =
  2234. cpu_to_le32(iwl_read_prph(trans, write_ptr));
  2235. fw_mon_data->fw_mon_cycle_cnt =
  2236. cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
  2237. fw_mon_data->fw_mon_base_ptr =
  2238. cpu_to_le32(iwl_read_prph(trans, base));
  2239. len += sizeof(**data) + sizeof(*fw_mon_data);
  2240. if (trans_pcie->fw_mon_page) {
  2241. /*
  2242. * The firmware is now asserted, it won't write anything
  2243. * to the buffer. CPU can take ownership to fetch the
  2244. * data. The buffer will be handed back to the device
  2245. * before the firmware will be restarted.
  2246. */
  2247. dma_sync_single_for_cpu(trans->dev,
  2248. trans_pcie->fw_mon_phys,
  2249. trans_pcie->fw_mon_size,
  2250. DMA_FROM_DEVICE);
  2251. memcpy(fw_mon_data->data,
  2252. page_address(trans_pcie->fw_mon_page),
  2253. trans_pcie->fw_mon_size);
  2254. monitor_len = trans_pcie->fw_mon_size;
  2255. } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
  2256. /*
  2257. * Update pointers to reflect actual values after
  2258. * shifting
  2259. */
  2260. base = iwl_read_prph(trans, base) <<
  2261. trans->dbg_dest_tlv->base_shift;
  2262. iwl_trans_read_mem(trans, base, fw_mon_data->data,
  2263. monitor_len / sizeof(u32));
  2264. } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
  2265. monitor_len =
  2266. iwl_trans_pci_dump_marbh_monitor(trans,
  2267. fw_mon_data,
  2268. monitor_len);
  2269. } else {
  2270. /* Didn't match anything - output no monitor data */
  2271. monitor_len = 0;
  2272. }
  2273. len += monitor_len;
  2274. (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
  2275. }
  2276. return len;
  2277. }
  2278. static struct iwl_trans_dump_data
  2279. *iwl_trans_pcie_dump_data(struct iwl_trans *trans,
  2280. const struct iwl_fw_dbg_trigger_tlv *trigger)
  2281. {
  2282. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  2283. struct iwl_fw_error_dump_data *data;
  2284. struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
  2285. struct iwl_fw_error_dump_txcmd *txcmd;
  2286. struct iwl_trans_dump_data *dump_data;
  2287. u32 len, num_rbs;
  2288. u32 monitor_len;
  2289. int i, ptr;
  2290. bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
  2291. !trans->cfg->mq_rx_supported;
  2292. /* transport dump header */
  2293. len = sizeof(*dump_data);
  2294. /* host commands */
  2295. len += sizeof(*data) +
  2296. cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
  2297. /* FW monitor */
  2298. if (trans_pcie->fw_mon_page) {
  2299. len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
  2300. trans_pcie->fw_mon_size;
  2301. monitor_len = trans_pcie->fw_mon_size;
  2302. } else if (trans->dbg_dest_tlv) {
  2303. u32 base, end;
  2304. base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
  2305. end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
  2306. base = iwl_read_prph(trans, base) <<
  2307. trans->dbg_dest_tlv->base_shift;
  2308. end = iwl_read_prph(trans, end) <<
  2309. trans->dbg_dest_tlv->end_shift;
  2310. /* Make "end" point to the actual end */
  2311. if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 ||
  2312. trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
  2313. end += (1 << trans->dbg_dest_tlv->end_shift);
  2314. monitor_len = end - base;
  2315. len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
  2316. monitor_len;
  2317. } else {
  2318. monitor_len = 0;
  2319. }
  2320. if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
  2321. dump_data = vzalloc(len);
  2322. if (!dump_data)
  2323. return NULL;
  2324. data = (void *)dump_data->data;
  2325. len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
  2326. dump_data->len = len;
  2327. return dump_data;
  2328. }
  2329. /* CSR registers */
  2330. len += sizeof(*data) + IWL_CSR_TO_DUMP;
  2331. /* FH registers */
  2332. len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
  2333. if (dump_rbs) {
  2334. /* Dump RBs is supported only for pre-9000 devices (1 queue) */
  2335. struct iwl_rxq *rxq = &trans_pcie->rxq[0];
  2336. /* RBs */
  2337. num_rbs = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num))
  2338. & 0x0FFF;
  2339. num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
  2340. len += num_rbs * (sizeof(*data) +
  2341. sizeof(struct iwl_fw_error_dump_rb) +
  2342. (PAGE_SIZE << trans_pcie->rx_page_order));
  2343. }
  2344. dump_data = vzalloc(len);
  2345. if (!dump_data)
  2346. return NULL;
  2347. len = 0;
  2348. data = (void *)dump_data->data;
  2349. data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
  2350. txcmd = (void *)data->data;
  2351. spin_lock_bh(&cmdq->lock);
  2352. ptr = cmdq->write_ptr;
  2353. for (i = 0; i < cmdq->n_window; i++) {
  2354. u8 idx = get_cmd_index(cmdq, ptr);
  2355. u32 caplen, cmdlen;
  2356. cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds +
  2357. trans_pcie->tfd_size * ptr);
  2358. caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
  2359. if (cmdlen) {
  2360. len += sizeof(*txcmd) + caplen;
  2361. txcmd->cmdlen = cpu_to_le32(cmdlen);
  2362. txcmd->caplen = cpu_to_le32(caplen);
  2363. memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
  2364. txcmd = (void *)((u8 *)txcmd->data + caplen);
  2365. }
  2366. ptr = iwl_queue_dec_wrap(ptr);
  2367. }
  2368. spin_unlock_bh(&cmdq->lock);
  2369. data->len = cpu_to_le32(len);
  2370. len += sizeof(*data);
  2371. data = iwl_fw_error_next_data(data);
  2372. len += iwl_trans_pcie_dump_csr(trans, &data);
  2373. len += iwl_trans_pcie_fh_regs_dump(trans, &data);
  2374. if (dump_rbs)
  2375. len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
  2376. len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
  2377. dump_data->len = len;
  2378. return dump_data;
  2379. }
  2380. #ifdef CONFIG_PM_SLEEP
  2381. static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
  2382. {
  2383. if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
  2384. return iwl_pci_fw_enter_d0i3(trans);
  2385. return 0;
  2386. }
  2387. static void iwl_trans_pcie_resume(struct iwl_trans *trans)
  2388. {
  2389. if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
  2390. iwl_pci_fw_exit_d0i3(trans);
  2391. }
  2392. #endif /* CONFIG_PM_SLEEP */
  2393. static const struct iwl_trans_ops trans_ops_pcie = {
  2394. .start_hw = iwl_trans_pcie_start_hw,
  2395. .op_mode_leave = iwl_trans_pcie_op_mode_leave,
  2396. .fw_alive = iwl_trans_pcie_fw_alive,
  2397. .start_fw = iwl_trans_pcie_start_fw,
  2398. .stop_device = iwl_trans_pcie_stop_device,
  2399. .d3_suspend = iwl_trans_pcie_d3_suspend,
  2400. .d3_resume = iwl_trans_pcie_d3_resume,
  2401. #ifdef CONFIG_PM_SLEEP
  2402. .suspend = iwl_trans_pcie_suspend,
  2403. .resume = iwl_trans_pcie_resume,
  2404. #endif /* CONFIG_PM_SLEEP */
  2405. .send_cmd = iwl_trans_pcie_send_hcmd,
  2406. .tx = iwl_trans_pcie_tx,
  2407. .reclaim = iwl_trans_pcie_reclaim,
  2408. .txq_disable = iwl_trans_pcie_txq_disable,
  2409. .txq_enable = iwl_trans_pcie_txq_enable,
  2410. .get_txq_byte_table = iwl_trans_pcie_get_txq_byte_table,
  2411. .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
  2412. .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
  2413. .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
  2414. .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
  2415. .write8 = iwl_trans_pcie_write8,
  2416. .write32 = iwl_trans_pcie_write32,
  2417. .read32 = iwl_trans_pcie_read32,
  2418. .read_prph = iwl_trans_pcie_read_prph,
  2419. .write_prph = iwl_trans_pcie_write_prph,
  2420. .read_mem = iwl_trans_pcie_read_mem,
  2421. .write_mem = iwl_trans_pcie_write_mem,
  2422. .configure = iwl_trans_pcie_configure,
  2423. .set_pmi = iwl_trans_pcie_set_pmi,
  2424. .grab_nic_access = iwl_trans_pcie_grab_nic_access,
  2425. .release_nic_access = iwl_trans_pcie_release_nic_access,
  2426. .set_bits_mask = iwl_trans_pcie_set_bits_mask,
  2427. .ref = iwl_trans_pcie_ref,
  2428. .unref = iwl_trans_pcie_unref,
  2429. .dump_data = iwl_trans_pcie_dump_data,
  2430. };
  2431. struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
  2432. const struct pci_device_id *ent,
  2433. const struct iwl_cfg *cfg)
  2434. {
  2435. struct iwl_trans_pcie *trans_pcie;
  2436. struct iwl_trans *trans;
  2437. int ret, addr_size;
  2438. ret = pcim_enable_device(pdev);
  2439. if (ret)
  2440. return ERR_PTR(ret);
  2441. trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
  2442. &pdev->dev, cfg, &trans_ops_pcie, 0);
  2443. if (!trans)
  2444. return ERR_PTR(-ENOMEM);
  2445. trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  2446. trans_pcie->trans = trans;
  2447. spin_lock_init(&trans_pcie->irq_lock);
  2448. spin_lock_init(&trans_pcie->reg_lock);
  2449. mutex_init(&trans_pcie->mutex);
  2450. init_waitqueue_head(&trans_pcie->ucode_write_waitq);
  2451. trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
  2452. if (!trans_pcie->tso_hdr_page) {
  2453. ret = -ENOMEM;
  2454. goto out_no_pci;
  2455. }
  2456. if (!cfg->base_params->pcie_l1_allowed) {
  2457. /*
  2458. * W/A - seems to solve weird behavior. We need to remove this
  2459. * if we don't want to stay in L1 all the time. This wastes a
  2460. * lot of power.
  2461. */
  2462. pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
  2463. PCIE_LINK_STATE_L1 |
  2464. PCIE_LINK_STATE_CLKPM);
  2465. }
  2466. if (cfg->mq_rx_supported)
  2467. addr_size = 64;
  2468. else
  2469. addr_size = 36;
  2470. if (cfg->use_tfh) {
  2471. trans_pcie->max_tbs = IWL_TFH_NUM_TBS;
  2472. trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd);
  2473. } else {
  2474. trans_pcie->max_tbs = IWL_NUM_OF_TBS;
  2475. trans_pcie->tfd_size = sizeof(struct iwl_tfd);
  2476. }
  2477. trans->max_skb_frags = IWL_PCIE_MAX_FRAGS(trans_pcie);
  2478. pci_set_master(pdev);
  2479. ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size));
  2480. if (!ret)
  2481. ret = pci_set_consistent_dma_mask(pdev,
  2482. DMA_BIT_MASK(addr_size));
  2483. if (ret) {
  2484. ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  2485. if (!ret)
  2486. ret = pci_set_consistent_dma_mask(pdev,
  2487. DMA_BIT_MASK(32));
  2488. /* both attempts failed: */
  2489. if (ret) {
  2490. dev_err(&pdev->dev, "No suitable DMA available\n");
  2491. goto out_no_pci;
  2492. }
  2493. }
  2494. ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME);
  2495. if (ret) {
  2496. dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n");
  2497. goto out_no_pci;
  2498. }
  2499. trans_pcie->hw_base = pcim_iomap_table(pdev)[0];
  2500. if (!trans_pcie->hw_base) {
  2501. dev_err(&pdev->dev, "pcim_iomap_table failed\n");
  2502. ret = -ENODEV;
  2503. goto out_no_pci;
  2504. }
  2505. /* We disable the RETRY_TIMEOUT register (0x41) to keep
  2506. * PCI Tx retries from interfering with C3 CPU state */
  2507. pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
  2508. trans->dev = &pdev->dev;
  2509. trans_pcie->pci_dev = pdev;
  2510. iwl_disable_interrupts(trans);
  2511. trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
  2512. /*
  2513. * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
  2514. * changed, and now the revision step also includes bit 0-1 (no more
  2515. * "dash" value). To keep hw_rev backwards compatible - we'll store it
  2516. * in the old format.
  2517. */
  2518. if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
  2519. unsigned long flags;
  2520. trans->hw_rev = (trans->hw_rev & 0xfff0) |
  2521. (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
  2522. ret = iwl_pcie_prepare_card_hw(trans);
  2523. if (ret) {
  2524. IWL_WARN(trans, "Exit HW not ready\n");
  2525. goto out_no_pci;
  2526. }
  2527. /*
  2528. * in-order to recognize C step driver should read chip version
  2529. * id located at the AUX bus MISC address space.
  2530. */
  2531. iwl_set_bit(trans, CSR_GP_CNTRL,
  2532. CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
  2533. udelay(2);
  2534. ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
  2535. CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
  2536. CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
  2537. 25000);
  2538. if (ret < 0) {
  2539. IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n");
  2540. goto out_no_pci;
  2541. }
  2542. if (iwl_trans_grab_nic_access(trans, &flags)) {
  2543. u32 hw_step;
  2544. hw_step = iwl_read_prph_no_grab(trans, WFPM_CTRL_REG);
  2545. hw_step |= ENABLE_WFPM;
  2546. iwl_write_prph_no_grab(trans, WFPM_CTRL_REG, hw_step);
  2547. hw_step = iwl_read_prph_no_grab(trans, AUX_MISC_REG);
  2548. hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF;
  2549. if (hw_step == 0x3)
  2550. trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) |
  2551. (SILICON_C_STEP << 2);
  2552. iwl_trans_release_nic_access(trans, &flags);
  2553. }
  2554. }
  2555. trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
  2556. iwl_pcie_set_interrupt_capa(pdev, trans);
  2557. trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
  2558. snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
  2559. "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
  2560. /* Initialize the wait queue for commands */
  2561. init_waitqueue_head(&trans_pcie->wait_command_queue);
  2562. init_waitqueue_head(&trans_pcie->d0i3_waitq);
  2563. if (trans_pcie->msix_enabled) {
  2564. if (iwl_pcie_init_msix_handler(pdev, trans_pcie))
  2565. goto out_no_pci;
  2566. } else {
  2567. ret = iwl_pcie_alloc_ict(trans);
  2568. if (ret)
  2569. goto out_no_pci;
  2570. ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
  2571. iwl_pcie_isr,
  2572. iwl_pcie_irq_handler,
  2573. IRQF_SHARED, DRV_NAME, trans);
  2574. if (ret) {
  2575. IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
  2576. goto out_free_ict;
  2577. }
  2578. trans_pcie->inta_mask = CSR_INI_SET_MASK;
  2579. }
  2580. #ifdef CONFIG_IWLWIFI_PCIE_RTPM
  2581. trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3;
  2582. #else
  2583. trans->runtime_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
  2584. #endif /* CONFIG_IWLWIFI_PCIE_RTPM */
  2585. return trans;
  2586. out_free_ict:
  2587. iwl_pcie_free_ict(trans);
  2588. out_no_pci:
  2589. free_percpu(trans_pcie->tso_hdr_page);
  2590. iwl_trans_free(trans);
  2591. return ERR_PTR(ret);
  2592. }