sdma.c 89 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419
  1. /*
  2. * Copyright(c) 2015, 2016 Intel Corporation.
  3. *
  4. * This file is provided under a dual BSD/GPLv2 license. When using or
  5. * redistributing this file, you may do so under either license.
  6. *
  7. * GPL LICENSE SUMMARY
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * BSD LICENSE
  19. *
  20. * Redistribution and use in source and binary forms, with or without
  21. * modification, are permitted provided that the following conditions
  22. * are met:
  23. *
  24. * - Redistributions of source code must retain the above copyright
  25. * notice, this list of conditions and the following disclaimer.
  26. * - Redistributions in binary form must reproduce the above copyright
  27. * notice, this list of conditions and the following disclaimer in
  28. * the documentation and/or other materials provided with the
  29. * distribution.
  30. * - Neither the name of Intel Corporation nor the names of its
  31. * contributors may be used to endorse or promote products derived
  32. * from this software without specific prior written permission.
  33. *
  34. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45. *
  46. */
  47. #include <linux/spinlock.h>
  48. #include <linux/seqlock.h>
  49. #include <linux/netdevice.h>
  50. #include <linux/moduleparam.h>
  51. #include <linux/bitops.h>
  52. #include <linux/timer.h>
  53. #include <linux/vmalloc.h>
  54. #include <linux/highmem.h>
  55. #include "hfi.h"
  56. #include "common.h"
  57. #include "qp.h"
  58. #include "sdma.h"
  59. #include "iowait.h"
  60. #include "trace.h"
  61. /* must be a power of 2 >= 64 <= 32768 */
  62. #define SDMA_DESCQ_CNT 2048
  63. #define SDMA_DESC_INTR 64
  64. #define INVALID_TAIL 0xffff
  65. static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
  66. module_param(sdma_descq_cnt, uint, S_IRUGO);
  67. MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
  68. static uint sdma_idle_cnt = 250;
  69. module_param(sdma_idle_cnt, uint, S_IRUGO);
  70. MODULE_PARM_DESC(sdma_idle_cnt, "sdma interrupt idle delay (ns,default 250)");
  71. uint mod_num_sdma;
  72. module_param_named(num_sdma, mod_num_sdma, uint, S_IRUGO);
  73. MODULE_PARM_DESC(num_sdma, "Set max number SDMA engines to use");
  74. static uint sdma_desct_intr = SDMA_DESC_INTR;
  75. module_param_named(desct_intr, sdma_desct_intr, uint, S_IRUGO | S_IWUSR);
  76. MODULE_PARM_DESC(desct_intr, "Number of SDMA descriptor before interrupt");
  77. #define SDMA_WAIT_BATCH_SIZE 20
  78. /* max wait time for a SDMA engine to indicate it has halted */
  79. #define SDMA_ERR_HALT_TIMEOUT 10 /* ms */
  80. /* all SDMA engine errors that cause a halt */
  81. #define SD(name) SEND_DMA_##name
  82. #define ALL_SDMA_ENG_HALT_ERRS \
  83. (SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \
  84. | SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \
  85. | SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \
  86. | SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \
  87. | SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \
  88. | SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \
  89. | SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \
  90. | SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \
  91. | SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \
  92. | SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \
  93. | SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \
  94. | SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \
  95. | SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \
  96. | SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \
  97. | SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \
  98. | SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \
  99. | SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \
  100. | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK))
  101. /* sdma_sendctrl operations */
  102. #define SDMA_SENDCTRL_OP_ENABLE BIT(0)
  103. #define SDMA_SENDCTRL_OP_INTENABLE BIT(1)
  104. #define SDMA_SENDCTRL_OP_HALT BIT(2)
  105. #define SDMA_SENDCTRL_OP_CLEANUP BIT(3)
  106. /* handle long defines */
  107. #define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \
  108. SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK
  109. #define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \
  110. SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT
  111. static const char * const sdma_state_names[] = {
  112. [sdma_state_s00_hw_down] = "s00_HwDown",
  113. [sdma_state_s10_hw_start_up_halt_wait] = "s10_HwStartUpHaltWait",
  114. [sdma_state_s15_hw_start_up_clean_wait] = "s15_HwStartUpCleanWait",
  115. [sdma_state_s20_idle] = "s20_Idle",
  116. [sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
  117. [sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
  118. [sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
  119. [sdma_state_s60_idle_halt_wait] = "s60_IdleHaltWait",
  120. [sdma_state_s80_hw_freeze] = "s80_HwFreeze",
  121. [sdma_state_s82_freeze_sw_clean] = "s82_FreezeSwClean",
  122. [sdma_state_s99_running] = "s99_Running",
  123. };
  124. #ifdef CONFIG_SDMA_VERBOSITY
  125. static const char * const sdma_event_names[] = {
  126. [sdma_event_e00_go_hw_down] = "e00_GoHwDown",
  127. [sdma_event_e10_go_hw_start] = "e10_GoHwStart",
  128. [sdma_event_e15_hw_halt_done] = "e15_HwHaltDone",
  129. [sdma_event_e25_hw_clean_up_done] = "e25_HwCleanUpDone",
  130. [sdma_event_e30_go_running] = "e30_GoRunning",
  131. [sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
  132. [sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
  133. [sdma_event_e60_hw_halted] = "e60_HwHalted",
  134. [sdma_event_e70_go_idle] = "e70_GoIdle",
  135. [sdma_event_e80_hw_freeze] = "e80_HwFreeze",
  136. [sdma_event_e81_hw_frozen] = "e81_HwFrozen",
  137. [sdma_event_e82_hw_unfreeze] = "e82_HwUnfreeze",
  138. [sdma_event_e85_link_down] = "e85_LinkDown",
  139. [sdma_event_e90_sw_halted] = "e90_SwHalted",
  140. };
  141. #endif
  142. static const struct sdma_set_state_action sdma_action_table[] = {
  143. [sdma_state_s00_hw_down] = {
  144. .go_s99_running_tofalse = 1,
  145. .op_enable = 0,
  146. .op_intenable = 0,
  147. .op_halt = 0,
  148. .op_cleanup = 0,
  149. },
  150. [sdma_state_s10_hw_start_up_halt_wait] = {
  151. .op_enable = 0,
  152. .op_intenable = 0,
  153. .op_halt = 1,
  154. .op_cleanup = 0,
  155. },
  156. [sdma_state_s15_hw_start_up_clean_wait] = {
  157. .op_enable = 0,
  158. .op_intenable = 1,
  159. .op_halt = 0,
  160. .op_cleanup = 1,
  161. },
  162. [sdma_state_s20_idle] = {
  163. .op_enable = 0,
  164. .op_intenable = 1,
  165. .op_halt = 0,
  166. .op_cleanup = 0,
  167. },
  168. [sdma_state_s30_sw_clean_up_wait] = {
  169. .op_enable = 0,
  170. .op_intenable = 0,
  171. .op_halt = 0,
  172. .op_cleanup = 0,
  173. },
  174. [sdma_state_s40_hw_clean_up_wait] = {
  175. .op_enable = 0,
  176. .op_intenable = 0,
  177. .op_halt = 0,
  178. .op_cleanup = 1,
  179. },
  180. [sdma_state_s50_hw_halt_wait] = {
  181. .op_enable = 0,
  182. .op_intenable = 0,
  183. .op_halt = 0,
  184. .op_cleanup = 0,
  185. },
  186. [sdma_state_s60_idle_halt_wait] = {
  187. .go_s99_running_tofalse = 1,
  188. .op_enable = 0,
  189. .op_intenable = 0,
  190. .op_halt = 1,
  191. .op_cleanup = 0,
  192. },
  193. [sdma_state_s80_hw_freeze] = {
  194. .op_enable = 0,
  195. .op_intenable = 0,
  196. .op_halt = 0,
  197. .op_cleanup = 0,
  198. },
  199. [sdma_state_s82_freeze_sw_clean] = {
  200. .op_enable = 0,
  201. .op_intenable = 0,
  202. .op_halt = 0,
  203. .op_cleanup = 0,
  204. },
  205. [sdma_state_s99_running] = {
  206. .op_enable = 1,
  207. .op_intenable = 1,
  208. .op_halt = 0,
  209. .op_cleanup = 0,
  210. .go_s99_running_totrue = 1,
  211. },
  212. };
  213. #define SDMA_TAIL_UPDATE_THRESH 0x1F
  214. /* declare all statics here rather than keep sorting */
  215. static void sdma_complete(struct kref *);
  216. static void sdma_finalput(struct sdma_state *);
  217. static void sdma_get(struct sdma_state *);
  218. static void sdma_hw_clean_up_task(unsigned long);
  219. static void sdma_put(struct sdma_state *);
  220. static void sdma_set_state(struct sdma_engine *, enum sdma_states);
  221. static void sdma_start_hw_clean_up(struct sdma_engine *);
  222. static void sdma_sw_clean_up_task(unsigned long);
  223. static void sdma_sendctrl(struct sdma_engine *, unsigned);
  224. static void init_sdma_regs(struct sdma_engine *, u32, uint);
  225. static void sdma_process_event(
  226. struct sdma_engine *sde,
  227. enum sdma_events event);
  228. static void __sdma_process_event(
  229. struct sdma_engine *sde,
  230. enum sdma_events event);
  231. static void dump_sdma_state(struct sdma_engine *sde);
  232. static void sdma_make_progress(struct sdma_engine *sde, u64 status);
  233. static void sdma_desc_avail(struct sdma_engine *sde, unsigned avail);
  234. static void sdma_flush_descq(struct sdma_engine *sde);
  235. /**
  236. * sdma_state_name() - return state string from enum
  237. * @state: state
  238. */
  239. static const char *sdma_state_name(enum sdma_states state)
  240. {
  241. return sdma_state_names[state];
  242. }
  243. static void sdma_get(struct sdma_state *ss)
  244. {
  245. kref_get(&ss->kref);
  246. }
  247. static void sdma_complete(struct kref *kref)
  248. {
  249. struct sdma_state *ss =
  250. container_of(kref, struct sdma_state, kref);
  251. complete(&ss->comp);
  252. }
  253. static void sdma_put(struct sdma_state *ss)
  254. {
  255. kref_put(&ss->kref, sdma_complete);
  256. }
  257. static void sdma_finalput(struct sdma_state *ss)
  258. {
  259. sdma_put(ss);
  260. wait_for_completion(&ss->comp);
  261. }
  262. static inline void write_sde_csr(
  263. struct sdma_engine *sde,
  264. u32 offset0,
  265. u64 value)
  266. {
  267. write_kctxt_csr(sde->dd, sde->this_idx, offset0, value);
  268. }
  269. static inline u64 read_sde_csr(
  270. struct sdma_engine *sde,
  271. u32 offset0)
  272. {
  273. return read_kctxt_csr(sde->dd, sde->this_idx, offset0);
  274. }
  275. /*
  276. * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for
  277. * sdma engine 'sde' to drop to 0.
  278. */
  279. static void sdma_wait_for_packet_egress(struct sdma_engine *sde,
  280. int pause)
  281. {
  282. u64 off = 8 * sde->this_idx;
  283. struct hfi1_devdata *dd = sde->dd;
  284. int lcnt = 0;
  285. u64 reg_prev;
  286. u64 reg = 0;
  287. while (1) {
  288. reg_prev = reg;
  289. reg = read_csr(dd, off + SEND_EGRESS_SEND_DMA_STATUS);
  290. reg &= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK;
  291. reg >>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT;
  292. if (reg == 0)
  293. break;
  294. /* counter is reest if accupancy count changes */
  295. if (reg != reg_prev)
  296. lcnt = 0;
  297. if (lcnt++ > 500) {
  298. /* timed out - bounce the link */
  299. dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n",
  300. __func__, sde->this_idx, (u32)reg);
  301. queue_work(dd->pport->hfi1_wq,
  302. &dd->pport->link_bounce_work);
  303. break;
  304. }
  305. udelay(1);
  306. }
  307. }
  308. /*
  309. * sdma_wait() - wait for packet egress to complete for all SDMA engines,
  310. * and pause for credit return.
  311. */
  312. void sdma_wait(struct hfi1_devdata *dd)
  313. {
  314. int i;
  315. for (i = 0; i < dd->num_sdma; i++) {
  316. struct sdma_engine *sde = &dd->per_sdma[i];
  317. sdma_wait_for_packet_egress(sde, 0);
  318. }
  319. }
  320. static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt)
  321. {
  322. u64 reg;
  323. if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT))
  324. return;
  325. reg = cnt;
  326. reg &= SD(DESC_CNT_CNT_MASK);
  327. reg <<= SD(DESC_CNT_CNT_SHIFT);
  328. write_sde_csr(sde, SD(DESC_CNT), reg);
  329. }
  330. static inline void complete_tx(struct sdma_engine *sde,
  331. struct sdma_txreq *tx,
  332. int res)
  333. {
  334. /* protect against complete modifying */
  335. struct iowait *wait = tx->wait;
  336. callback_t complete = tx->complete;
  337. #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
  338. trace_hfi1_sdma_out_sn(sde, tx->sn);
  339. if (WARN_ON_ONCE(sde->head_sn != tx->sn))
  340. dd_dev_err(sde->dd, "expected %llu got %llu\n",
  341. sde->head_sn, tx->sn);
  342. sde->head_sn++;
  343. #endif
  344. __sdma_txclean(sde->dd, tx);
  345. if (complete)
  346. (*complete)(tx, res);
  347. if (wait && iowait_sdma_dec(wait))
  348. iowait_drain_wakeup(wait);
  349. }
  350. /*
  351. * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status
  352. *
  353. * Depending on timing there can be txreqs in two places:
  354. * - in the descq ring
  355. * - in the flush list
  356. *
  357. * To avoid ordering issues the descq ring needs to be flushed
  358. * first followed by the flush list.
  359. *
  360. * This routine is called from two places
  361. * - From a work queue item
  362. * - Directly from the state machine just before setting the
  363. * state to running
  364. *
  365. * Must be called with head_lock held
  366. *
  367. */
  368. static void sdma_flush(struct sdma_engine *sde)
  369. {
  370. struct sdma_txreq *txp, *txp_next;
  371. LIST_HEAD(flushlist);
  372. unsigned long flags;
  373. /* flush from head to tail */
  374. sdma_flush_descq(sde);
  375. spin_lock_irqsave(&sde->flushlist_lock, flags);
  376. /* copy flush list */
  377. list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) {
  378. list_del_init(&txp->list);
  379. list_add_tail(&txp->list, &flushlist);
  380. }
  381. spin_unlock_irqrestore(&sde->flushlist_lock, flags);
  382. /* flush from flush list */
  383. list_for_each_entry_safe(txp, txp_next, &flushlist, list)
  384. complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
  385. }
  386. /*
  387. * Fields a work request for flushing the descq ring
  388. * and the flush list
  389. *
  390. * If the engine has been brought to running during
  391. * the scheduling delay, the flush is ignored, assuming
  392. * that the process of bringing the engine to running
  393. * would have done this flush prior to going to running.
  394. *
  395. */
  396. static void sdma_field_flush(struct work_struct *work)
  397. {
  398. unsigned long flags;
  399. struct sdma_engine *sde =
  400. container_of(work, struct sdma_engine, flush_worker);
  401. write_seqlock_irqsave(&sde->head_lock, flags);
  402. if (!__sdma_running(sde))
  403. sdma_flush(sde);
  404. write_sequnlock_irqrestore(&sde->head_lock, flags);
  405. }
  406. static void sdma_err_halt_wait(struct work_struct *work)
  407. {
  408. struct sdma_engine *sde = container_of(work, struct sdma_engine,
  409. err_halt_worker);
  410. u64 statuscsr;
  411. unsigned long timeout;
  412. timeout = jiffies + msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT);
  413. while (1) {
  414. statuscsr = read_sde_csr(sde, SD(STATUS));
  415. statuscsr &= SD(STATUS_ENG_HALTED_SMASK);
  416. if (statuscsr)
  417. break;
  418. if (time_after(jiffies, timeout)) {
  419. dd_dev_err(sde->dd,
  420. "SDMA engine %d - timeout waiting for engine to halt\n",
  421. sde->this_idx);
  422. /*
  423. * Continue anyway. This could happen if there was
  424. * an uncorrectable error in the wrong spot.
  425. */
  426. break;
  427. }
  428. usleep_range(80, 120);
  429. }
  430. sdma_process_event(sde, sdma_event_e15_hw_halt_done);
  431. }
  432. static void sdma_err_progress_check_schedule(struct sdma_engine *sde)
  433. {
  434. if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) {
  435. unsigned index;
  436. struct hfi1_devdata *dd = sde->dd;
  437. for (index = 0; index < dd->num_sdma; index++) {
  438. struct sdma_engine *curr_sdma = &dd->per_sdma[index];
  439. if (curr_sdma != sde)
  440. curr_sdma->progress_check_head =
  441. curr_sdma->descq_head;
  442. }
  443. dd_dev_err(sde->dd,
  444. "SDMA engine %d - check scheduled\n",
  445. sde->this_idx);
  446. mod_timer(&sde->err_progress_check_timer, jiffies + 10);
  447. }
  448. }
  449. static void sdma_err_progress_check(unsigned long data)
  450. {
  451. unsigned index;
  452. struct sdma_engine *sde = (struct sdma_engine *)data;
  453. dd_dev_err(sde->dd, "SDE progress check event\n");
  454. for (index = 0; index < sde->dd->num_sdma; index++) {
  455. struct sdma_engine *curr_sde = &sde->dd->per_sdma[index];
  456. unsigned long flags;
  457. /* check progress on each engine except the current one */
  458. if (curr_sde == sde)
  459. continue;
  460. /*
  461. * We must lock interrupts when acquiring sde->lock,
  462. * to avoid a deadlock if interrupt triggers and spins on
  463. * the same lock on same CPU
  464. */
  465. spin_lock_irqsave(&curr_sde->tail_lock, flags);
  466. write_seqlock(&curr_sde->head_lock);
  467. /* skip non-running queues */
  468. if (curr_sde->state.current_state != sdma_state_s99_running) {
  469. write_sequnlock(&curr_sde->head_lock);
  470. spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
  471. continue;
  472. }
  473. if ((curr_sde->descq_head != curr_sde->descq_tail) &&
  474. (curr_sde->descq_head ==
  475. curr_sde->progress_check_head))
  476. __sdma_process_event(curr_sde,
  477. sdma_event_e90_sw_halted);
  478. write_sequnlock(&curr_sde->head_lock);
  479. spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
  480. }
  481. schedule_work(&sde->err_halt_worker);
  482. }
  483. static void sdma_hw_clean_up_task(unsigned long opaque)
  484. {
  485. struct sdma_engine *sde = (struct sdma_engine *)opaque;
  486. u64 statuscsr;
  487. while (1) {
  488. #ifdef CONFIG_SDMA_VERBOSITY
  489. dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
  490. sde->this_idx, slashstrip(__FILE__), __LINE__,
  491. __func__);
  492. #endif
  493. statuscsr = read_sde_csr(sde, SD(STATUS));
  494. statuscsr &= SD(STATUS_ENG_CLEANED_UP_SMASK);
  495. if (statuscsr)
  496. break;
  497. udelay(10);
  498. }
  499. sdma_process_event(sde, sdma_event_e25_hw_clean_up_done);
  500. }
  501. static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde)
  502. {
  503. smp_read_barrier_depends(); /* see sdma_update_tail() */
  504. return sde->tx_ring[sde->tx_head & sde->sdma_mask];
  505. }
  506. /*
  507. * flush ring for recovery
  508. */
  509. static void sdma_flush_descq(struct sdma_engine *sde)
  510. {
  511. u16 head, tail;
  512. int progress = 0;
  513. struct sdma_txreq *txp = get_txhead(sde);
  514. /* The reason for some of the complexity of this code is that
  515. * not all descriptors have corresponding txps. So, we have to
  516. * be able to skip over descs until we wander into the range of
  517. * the next txp on the list.
  518. */
  519. head = sde->descq_head & sde->sdma_mask;
  520. tail = sde->descq_tail & sde->sdma_mask;
  521. while (head != tail) {
  522. /* advance head, wrap if needed */
  523. head = ++sde->descq_head & sde->sdma_mask;
  524. /* if now past this txp's descs, do the callback */
  525. if (txp && txp->next_descq_idx == head) {
  526. /* remove from list */
  527. sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
  528. complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
  529. trace_hfi1_sdma_progress(sde, head, tail, txp);
  530. txp = get_txhead(sde);
  531. }
  532. progress++;
  533. }
  534. if (progress)
  535. sdma_desc_avail(sde, sdma_descq_freecnt(sde));
  536. }
  537. static void sdma_sw_clean_up_task(unsigned long opaque)
  538. {
  539. struct sdma_engine *sde = (struct sdma_engine *)opaque;
  540. unsigned long flags;
  541. spin_lock_irqsave(&sde->tail_lock, flags);
  542. write_seqlock(&sde->head_lock);
  543. /*
  544. * At this point, the following should always be true:
  545. * - We are halted, so no more descriptors are getting retired.
  546. * - We are not running, so no one is submitting new work.
  547. * - Only we can send the e40_sw_cleaned, so we can't start
  548. * running again until we say so. So, the active list and
  549. * descq are ours to play with.
  550. */
  551. /*
  552. * In the error clean up sequence, software clean must be called
  553. * before the hardware clean so we can use the hardware head in
  554. * the progress routine. A hardware clean or SPC unfreeze will
  555. * reset the hardware head.
  556. *
  557. * Process all retired requests. The progress routine will use the
  558. * latest physical hardware head - we are not running so speed does
  559. * not matter.
  560. */
  561. sdma_make_progress(sde, 0);
  562. sdma_flush(sde);
  563. /*
  564. * Reset our notion of head and tail.
  565. * Note that the HW registers have been reset via an earlier
  566. * clean up.
  567. */
  568. sde->descq_tail = 0;
  569. sde->descq_head = 0;
  570. sde->desc_avail = sdma_descq_freecnt(sde);
  571. *sde->head_dma = 0;
  572. __sdma_process_event(sde, sdma_event_e40_sw_cleaned);
  573. write_sequnlock(&sde->head_lock);
  574. spin_unlock_irqrestore(&sde->tail_lock, flags);
  575. }
  576. static void sdma_sw_tear_down(struct sdma_engine *sde)
  577. {
  578. struct sdma_state *ss = &sde->state;
  579. /* Releasing this reference means the state machine has stopped. */
  580. sdma_put(ss);
  581. /* stop waiting for all unfreeze events to complete */
  582. atomic_set(&sde->dd->sdma_unfreeze_count, -1);
  583. wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
  584. }
  585. static void sdma_start_hw_clean_up(struct sdma_engine *sde)
  586. {
  587. tasklet_hi_schedule(&sde->sdma_hw_clean_up_task);
  588. }
  589. static void sdma_set_state(struct sdma_engine *sde,
  590. enum sdma_states next_state)
  591. {
  592. struct sdma_state *ss = &sde->state;
  593. const struct sdma_set_state_action *action = sdma_action_table;
  594. unsigned op = 0;
  595. trace_hfi1_sdma_state(
  596. sde,
  597. sdma_state_names[ss->current_state],
  598. sdma_state_names[next_state]);
  599. /* debugging bookkeeping */
  600. ss->previous_state = ss->current_state;
  601. ss->previous_op = ss->current_op;
  602. ss->current_state = next_state;
  603. if (ss->previous_state != sdma_state_s99_running &&
  604. next_state == sdma_state_s99_running)
  605. sdma_flush(sde);
  606. if (action[next_state].op_enable)
  607. op |= SDMA_SENDCTRL_OP_ENABLE;
  608. if (action[next_state].op_intenable)
  609. op |= SDMA_SENDCTRL_OP_INTENABLE;
  610. if (action[next_state].op_halt)
  611. op |= SDMA_SENDCTRL_OP_HALT;
  612. if (action[next_state].op_cleanup)
  613. op |= SDMA_SENDCTRL_OP_CLEANUP;
  614. if (action[next_state].go_s99_running_tofalse)
  615. ss->go_s99_running = 0;
  616. if (action[next_state].go_s99_running_totrue)
  617. ss->go_s99_running = 1;
  618. ss->current_op = op;
  619. sdma_sendctrl(sde, ss->current_op);
  620. }
  621. /**
  622. * sdma_get_descq_cnt() - called when device probed
  623. *
  624. * Return a validated descq count.
  625. *
  626. * This is currently only used in the verbs initialization to build the tx
  627. * list.
  628. *
  629. * This will probably be deleted in favor of a more scalable approach to
  630. * alloc tx's.
  631. *
  632. */
  633. u16 sdma_get_descq_cnt(void)
  634. {
  635. u16 count = sdma_descq_cnt;
  636. if (!count)
  637. return SDMA_DESCQ_CNT;
  638. /* count must be a power of 2 greater than 64 and less than
  639. * 32768. Otherwise return default.
  640. */
  641. if (!is_power_of_2(count))
  642. return SDMA_DESCQ_CNT;
  643. if (count < 64 || count > 32768)
  644. return SDMA_DESCQ_CNT;
  645. return count;
  646. }
  647. /**
  648. * sdma_engine_get_vl() - return vl for a given sdma engine
  649. * @sde: sdma engine
  650. *
  651. * This function returns the vl mapped to a given engine, or an error if
  652. * the mapping can't be found. The mapping fields are protected by RCU.
  653. */
  654. int sdma_engine_get_vl(struct sdma_engine *sde)
  655. {
  656. struct hfi1_devdata *dd = sde->dd;
  657. struct sdma_vl_map *m;
  658. u8 vl;
  659. if (sde->this_idx >= TXE_NUM_SDMA_ENGINES)
  660. return -EINVAL;
  661. rcu_read_lock();
  662. m = rcu_dereference(dd->sdma_map);
  663. if (unlikely(!m)) {
  664. rcu_read_unlock();
  665. return -EINVAL;
  666. }
  667. vl = m->engine_to_vl[sde->this_idx];
  668. rcu_read_unlock();
  669. return vl;
  670. }
  671. /**
  672. * sdma_select_engine_vl() - select sdma engine
  673. * @dd: devdata
  674. * @selector: a spreading factor
  675. * @vl: this vl
  676. *
  677. *
  678. * This function returns an engine based on the selector and a vl. The
  679. * mapping fields are protected by RCU.
  680. */
  681. struct sdma_engine *sdma_select_engine_vl(
  682. struct hfi1_devdata *dd,
  683. u32 selector,
  684. u8 vl)
  685. {
  686. struct sdma_vl_map *m;
  687. struct sdma_map_elem *e;
  688. struct sdma_engine *rval;
  689. /* NOTE This should only happen if SC->VL changed after the initial
  690. * checks on the QP/AH
  691. * Default will return engine 0 below
  692. */
  693. if (vl >= num_vls) {
  694. rval = NULL;
  695. goto done;
  696. }
  697. rcu_read_lock();
  698. m = rcu_dereference(dd->sdma_map);
  699. if (unlikely(!m)) {
  700. rcu_read_unlock();
  701. return &dd->per_sdma[0];
  702. }
  703. e = m->map[vl & m->mask];
  704. rval = e->sde[selector & e->mask];
  705. rcu_read_unlock();
  706. done:
  707. rval = !rval ? &dd->per_sdma[0] : rval;
  708. trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx);
  709. return rval;
  710. }
  711. /**
  712. * sdma_select_engine_sc() - select sdma engine
  713. * @dd: devdata
  714. * @selector: a spreading factor
  715. * @sc5: the 5 bit sc
  716. *
  717. *
  718. * This function returns an engine based on the selector and an sc.
  719. */
  720. struct sdma_engine *sdma_select_engine_sc(
  721. struct hfi1_devdata *dd,
  722. u32 selector,
  723. u8 sc5)
  724. {
  725. u8 vl = sc_to_vlt(dd, sc5);
  726. return sdma_select_engine_vl(dd, selector, vl);
  727. }
  728. struct sdma_rht_map_elem {
  729. u32 mask;
  730. u8 ctr;
  731. struct sdma_engine *sde[0];
  732. };
  733. struct sdma_rht_node {
  734. unsigned long cpu_id;
  735. struct sdma_rht_map_elem *map[HFI1_MAX_VLS_SUPPORTED];
  736. struct rhash_head node;
  737. };
  738. #define NR_CPUS_HINT 192
  739. static const struct rhashtable_params sdma_rht_params = {
  740. .nelem_hint = NR_CPUS_HINT,
  741. .head_offset = offsetof(struct sdma_rht_node, node),
  742. .key_offset = offsetof(struct sdma_rht_node, cpu_id),
  743. .key_len = FIELD_SIZEOF(struct sdma_rht_node, cpu_id),
  744. .max_size = NR_CPUS,
  745. .min_size = 8,
  746. .automatic_shrinking = true,
  747. };
  748. /*
  749. * sdma_select_user_engine() - select sdma engine based on user setup
  750. * @dd: devdata
  751. * @selector: a spreading factor
  752. * @vl: this vl
  753. *
  754. * This function returns an sdma engine for a user sdma request.
  755. * User defined sdma engine affinity setting is honored when applicable,
  756. * otherwise system default sdma engine mapping is used. To ensure correct
  757. * ordering, the mapping from <selector, vl> to sde must remain unchanged.
  758. */
  759. struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
  760. u32 selector, u8 vl)
  761. {
  762. struct sdma_rht_node *rht_node;
  763. struct sdma_engine *sde = NULL;
  764. const struct cpumask *current_mask = &current->cpus_allowed;
  765. unsigned long cpu_id;
  766. /*
  767. * To ensure that always the same sdma engine(s) will be
  768. * selected make sure the process is pinned to this CPU only.
  769. */
  770. if (cpumask_weight(current_mask) != 1)
  771. goto out;
  772. cpu_id = smp_processor_id();
  773. rcu_read_lock();
  774. rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu_id,
  775. sdma_rht_params);
  776. if (rht_node && rht_node->map[vl]) {
  777. struct sdma_rht_map_elem *map = rht_node->map[vl];
  778. sde = map->sde[selector & map->mask];
  779. }
  780. rcu_read_unlock();
  781. if (sde)
  782. return sde;
  783. out:
  784. return sdma_select_engine_vl(dd, selector, vl);
  785. }
  786. static void sdma_populate_sde_map(struct sdma_rht_map_elem *map)
  787. {
  788. int i;
  789. for (i = 0; i < roundup_pow_of_two(map->ctr ? : 1) - map->ctr; i++)
  790. map->sde[map->ctr + i] = map->sde[i];
  791. }
  792. static void sdma_cleanup_sde_map(struct sdma_rht_map_elem *map,
  793. struct sdma_engine *sde)
  794. {
  795. unsigned int i, pow;
  796. /* only need to check the first ctr entries for a match */
  797. for (i = 0; i < map->ctr; i++) {
  798. if (map->sde[i] == sde) {
  799. memmove(&map->sde[i], &map->sde[i + 1],
  800. (map->ctr - i - 1) * sizeof(map->sde[0]));
  801. map->ctr--;
  802. pow = roundup_pow_of_two(map->ctr ? : 1);
  803. map->mask = pow - 1;
  804. sdma_populate_sde_map(map);
  805. break;
  806. }
  807. }
  808. }
  809. /*
  810. * Prevents concurrent reads and writes of the sdma engine cpu_mask
  811. */
  812. static DEFINE_MUTEX(process_to_sde_mutex);
  813. ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
  814. size_t count)
  815. {
  816. struct hfi1_devdata *dd = sde->dd;
  817. cpumask_var_t mask, new_mask;
  818. unsigned long cpu;
  819. int ret, vl, sz;
  820. vl = sdma_engine_get_vl(sde);
  821. if (unlikely(vl < 0))
  822. return -EINVAL;
  823. ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
  824. if (!ret)
  825. return -ENOMEM;
  826. ret = zalloc_cpumask_var(&new_mask, GFP_KERNEL);
  827. if (!ret) {
  828. free_cpumask_var(mask);
  829. return -ENOMEM;
  830. }
  831. ret = cpulist_parse(buf, mask);
  832. if (ret)
  833. goto out_free;
  834. if (!cpumask_subset(mask, cpu_online_mask)) {
  835. dd_dev_warn(sde->dd, "Invalid CPU mask\n");
  836. ret = -EINVAL;
  837. goto out_free;
  838. }
  839. sz = sizeof(struct sdma_rht_map_elem) +
  840. (TXE_NUM_SDMA_ENGINES * sizeof(struct sdma_engine *));
  841. mutex_lock(&process_to_sde_mutex);
  842. for_each_cpu(cpu, mask) {
  843. struct sdma_rht_node *rht_node;
  844. /* Check if we have this already mapped */
  845. if (cpumask_test_cpu(cpu, &sde->cpu_mask)) {
  846. cpumask_set_cpu(cpu, new_mask);
  847. continue;
  848. }
  849. if (vl >= ARRAY_SIZE(rht_node->map)) {
  850. ret = -EINVAL;
  851. goto out;
  852. }
  853. rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu,
  854. sdma_rht_params);
  855. if (!rht_node) {
  856. rht_node = kzalloc(sizeof(*rht_node), GFP_KERNEL);
  857. if (!rht_node) {
  858. ret = -ENOMEM;
  859. goto out;
  860. }
  861. rht_node->map[vl] = kzalloc(sz, GFP_KERNEL);
  862. if (!rht_node->map[vl]) {
  863. kfree(rht_node);
  864. ret = -ENOMEM;
  865. goto out;
  866. }
  867. rht_node->cpu_id = cpu;
  868. rht_node->map[vl]->mask = 0;
  869. rht_node->map[vl]->ctr = 1;
  870. rht_node->map[vl]->sde[0] = sde;
  871. ret = rhashtable_insert_fast(dd->sdma_rht,
  872. &rht_node->node,
  873. sdma_rht_params);
  874. if (ret) {
  875. kfree(rht_node->map[vl]);
  876. kfree(rht_node);
  877. dd_dev_err(sde->dd, "Failed to set process to sde affinity for cpu %lu\n",
  878. cpu);
  879. goto out;
  880. }
  881. } else {
  882. int ctr, pow;
  883. /* Add new user mappings */
  884. if (!rht_node->map[vl])
  885. rht_node->map[vl] = kzalloc(sz, GFP_KERNEL);
  886. if (!rht_node->map[vl]) {
  887. ret = -ENOMEM;
  888. goto out;
  889. }
  890. rht_node->map[vl]->ctr++;
  891. ctr = rht_node->map[vl]->ctr;
  892. rht_node->map[vl]->sde[ctr - 1] = sde;
  893. pow = roundup_pow_of_two(ctr);
  894. rht_node->map[vl]->mask = pow - 1;
  895. /* Populate the sde map table */
  896. sdma_populate_sde_map(rht_node->map[vl]);
  897. }
  898. cpumask_set_cpu(cpu, new_mask);
  899. }
  900. /* Clean up old mappings */
  901. for_each_cpu(cpu, cpu_online_mask) {
  902. struct sdma_rht_node *rht_node;
  903. /* Don't cleanup sdes that are set in the new mask */
  904. if (cpumask_test_cpu(cpu, mask))
  905. continue;
  906. rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu,
  907. sdma_rht_params);
  908. if (rht_node) {
  909. bool empty = true;
  910. int i;
  911. /* Remove mappings for old sde */
  912. for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
  913. if (rht_node->map[i])
  914. sdma_cleanup_sde_map(rht_node->map[i],
  915. sde);
  916. /* Free empty hash table entries */
  917. for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) {
  918. if (!rht_node->map[i])
  919. continue;
  920. if (rht_node->map[i]->ctr) {
  921. empty = false;
  922. break;
  923. }
  924. }
  925. if (empty) {
  926. ret = rhashtable_remove_fast(dd->sdma_rht,
  927. &rht_node->node,
  928. sdma_rht_params);
  929. WARN_ON(ret);
  930. for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
  931. kfree(rht_node->map[i]);
  932. kfree(rht_node);
  933. }
  934. }
  935. }
  936. cpumask_copy(&sde->cpu_mask, new_mask);
  937. out:
  938. mutex_unlock(&process_to_sde_mutex);
  939. out_free:
  940. free_cpumask_var(mask);
  941. free_cpumask_var(new_mask);
  942. return ret ? : strnlen(buf, PAGE_SIZE);
  943. }
  944. ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf)
  945. {
  946. mutex_lock(&process_to_sde_mutex);
  947. if (cpumask_empty(&sde->cpu_mask))
  948. snprintf(buf, PAGE_SIZE, "%s\n", "empty");
  949. else
  950. cpumap_print_to_pagebuf(true, buf, &sde->cpu_mask);
  951. mutex_unlock(&process_to_sde_mutex);
  952. return strnlen(buf, PAGE_SIZE);
  953. }
  954. static void sdma_rht_free(void *ptr, void *arg)
  955. {
  956. struct sdma_rht_node *rht_node = ptr;
  957. int i;
  958. for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
  959. kfree(rht_node->map[i]);
  960. kfree(rht_node);
  961. }
  962. /**
  963. * sdma_seqfile_dump_cpu_list() - debugfs dump the cpu to sdma mappings
  964. * @s: seq file
  965. * @dd: hfi1_devdata
  966. * @cpuid: cpu id
  967. *
  968. * This routine dumps the process to sde mappings per cpu
  969. */
  970. void sdma_seqfile_dump_cpu_list(struct seq_file *s,
  971. struct hfi1_devdata *dd,
  972. unsigned long cpuid)
  973. {
  974. struct sdma_rht_node *rht_node;
  975. int i, j;
  976. rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpuid,
  977. sdma_rht_params);
  978. if (!rht_node)
  979. return;
  980. seq_printf(s, "cpu%3lu: ", cpuid);
  981. for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) {
  982. if (!rht_node->map[i] || !rht_node->map[i]->ctr)
  983. continue;
  984. seq_printf(s, " vl%d: [", i);
  985. for (j = 0; j < rht_node->map[i]->ctr; j++) {
  986. if (!rht_node->map[i]->sde[j])
  987. continue;
  988. if (j > 0)
  989. seq_puts(s, ",");
  990. seq_printf(s, " sdma%2d",
  991. rht_node->map[i]->sde[j]->this_idx);
  992. }
  993. seq_puts(s, " ]");
  994. }
  995. seq_puts(s, "\n");
  996. }
  997. /*
  998. * Free the indicated map struct
  999. */
  1000. static void sdma_map_free(struct sdma_vl_map *m)
  1001. {
  1002. int i;
  1003. for (i = 0; m && i < m->actual_vls; i++)
  1004. kfree(m->map[i]);
  1005. kfree(m);
  1006. }
  1007. /*
  1008. * Handle RCU callback
  1009. */
  1010. static void sdma_map_rcu_callback(struct rcu_head *list)
  1011. {
  1012. struct sdma_vl_map *m = container_of(list, struct sdma_vl_map, list);
  1013. sdma_map_free(m);
  1014. }
  1015. /**
  1016. * sdma_map_init - called when # vls change
  1017. * @dd: hfi1_devdata
  1018. * @port: port number
  1019. * @num_vls: number of vls
  1020. * @vl_engines: per vl engine mapping (optional)
  1021. *
  1022. * This routine changes the mapping based on the number of vls.
  1023. *
  1024. * vl_engines is used to specify a non-uniform vl/engine loading. NULL
  1025. * implies auto computing the loading and giving each VLs a uniform
  1026. * distribution of engines per VL.
  1027. *
  1028. * The auto algorithm computes the sde_per_vl and the number of extra
  1029. * engines. Any extra engines are added from the last VL on down.
  1030. *
  1031. * rcu locking is used here to control access to the mapping fields.
  1032. *
  1033. * If either the num_vls or num_sdma are non-power of 2, the array sizes
  1034. * in the struct sdma_vl_map and the struct sdma_map_elem are rounded
  1035. * up to the next highest power of 2 and the first entry is reused
  1036. * in a round robin fashion.
  1037. *
  1038. * If an error occurs the map change is not done and the mapping is
  1039. * not changed.
  1040. *
  1041. */
  1042. int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines)
  1043. {
  1044. int i, j;
  1045. int extra, sde_per_vl;
  1046. int engine = 0;
  1047. u8 lvl_engines[OPA_MAX_VLS];
  1048. struct sdma_vl_map *oldmap, *newmap;
  1049. if (!(dd->flags & HFI1_HAS_SEND_DMA))
  1050. return 0;
  1051. if (!vl_engines) {
  1052. /* truncate divide */
  1053. sde_per_vl = dd->num_sdma / num_vls;
  1054. /* extras */
  1055. extra = dd->num_sdma % num_vls;
  1056. vl_engines = lvl_engines;
  1057. /* add extras from last vl down */
  1058. for (i = num_vls - 1; i >= 0; i--, extra--)
  1059. vl_engines[i] = sde_per_vl + (extra > 0 ? 1 : 0);
  1060. }
  1061. /* build new map */
  1062. newmap = kzalloc(
  1063. sizeof(struct sdma_vl_map) +
  1064. roundup_pow_of_two(num_vls) *
  1065. sizeof(struct sdma_map_elem *),
  1066. GFP_KERNEL);
  1067. if (!newmap)
  1068. goto bail;
  1069. newmap->actual_vls = num_vls;
  1070. newmap->vls = roundup_pow_of_two(num_vls);
  1071. newmap->mask = (1 << ilog2(newmap->vls)) - 1;
  1072. /* initialize back-map */
  1073. for (i = 0; i < TXE_NUM_SDMA_ENGINES; i++)
  1074. newmap->engine_to_vl[i] = -1;
  1075. for (i = 0; i < newmap->vls; i++) {
  1076. /* save for wrap around */
  1077. int first_engine = engine;
  1078. if (i < newmap->actual_vls) {
  1079. int sz = roundup_pow_of_two(vl_engines[i]);
  1080. /* only allocate once */
  1081. newmap->map[i] = kzalloc(
  1082. sizeof(struct sdma_map_elem) +
  1083. sz * sizeof(struct sdma_engine *),
  1084. GFP_KERNEL);
  1085. if (!newmap->map[i])
  1086. goto bail;
  1087. newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
  1088. /* assign engines */
  1089. for (j = 0; j < sz; j++) {
  1090. newmap->map[i]->sde[j] =
  1091. &dd->per_sdma[engine];
  1092. if (++engine >= first_engine + vl_engines[i])
  1093. /* wrap back to first engine */
  1094. engine = first_engine;
  1095. }
  1096. /* assign back-map */
  1097. for (j = 0; j < vl_engines[i]; j++)
  1098. newmap->engine_to_vl[first_engine + j] = i;
  1099. } else {
  1100. /* just re-use entry without allocating */
  1101. newmap->map[i] = newmap->map[i % num_vls];
  1102. }
  1103. engine = first_engine + vl_engines[i];
  1104. }
  1105. /* newmap in hand, save old map */
  1106. spin_lock_irq(&dd->sde_map_lock);
  1107. oldmap = rcu_dereference_protected(dd->sdma_map,
  1108. lockdep_is_held(&dd->sde_map_lock));
  1109. /* publish newmap */
  1110. rcu_assign_pointer(dd->sdma_map, newmap);
  1111. spin_unlock_irq(&dd->sde_map_lock);
  1112. /* success, free any old map after grace period */
  1113. if (oldmap)
  1114. call_rcu(&oldmap->list, sdma_map_rcu_callback);
  1115. return 0;
  1116. bail:
  1117. /* free any partial allocation */
  1118. sdma_map_free(newmap);
  1119. return -ENOMEM;
  1120. }
  1121. /*
  1122. * Clean up allocated memory.
  1123. *
  1124. * This routine is can be called regardless of the success of sdma_init()
  1125. *
  1126. */
  1127. static void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
  1128. {
  1129. size_t i;
  1130. struct sdma_engine *sde;
  1131. if (dd->sdma_pad_dma) {
  1132. dma_free_coherent(&dd->pcidev->dev, 4,
  1133. (void *)dd->sdma_pad_dma,
  1134. dd->sdma_pad_phys);
  1135. dd->sdma_pad_dma = NULL;
  1136. dd->sdma_pad_phys = 0;
  1137. }
  1138. if (dd->sdma_heads_dma) {
  1139. dma_free_coherent(&dd->pcidev->dev, dd->sdma_heads_size,
  1140. (void *)dd->sdma_heads_dma,
  1141. dd->sdma_heads_phys);
  1142. dd->sdma_heads_dma = NULL;
  1143. dd->sdma_heads_phys = 0;
  1144. }
  1145. for (i = 0; dd->per_sdma && i < num_engines; ++i) {
  1146. sde = &dd->per_sdma[i];
  1147. sde->head_dma = NULL;
  1148. sde->head_phys = 0;
  1149. if (sde->descq) {
  1150. dma_free_coherent(
  1151. &dd->pcidev->dev,
  1152. sde->descq_cnt * sizeof(u64[2]),
  1153. sde->descq,
  1154. sde->descq_phys
  1155. );
  1156. sde->descq = NULL;
  1157. sde->descq_phys = 0;
  1158. }
  1159. kvfree(sde->tx_ring);
  1160. sde->tx_ring = NULL;
  1161. }
  1162. spin_lock_irq(&dd->sde_map_lock);
  1163. sdma_map_free(rcu_access_pointer(dd->sdma_map));
  1164. RCU_INIT_POINTER(dd->sdma_map, NULL);
  1165. spin_unlock_irq(&dd->sde_map_lock);
  1166. synchronize_rcu();
  1167. kfree(dd->per_sdma);
  1168. dd->per_sdma = NULL;
  1169. if (dd->sdma_rht) {
  1170. rhashtable_free_and_destroy(dd->sdma_rht, sdma_rht_free, NULL);
  1171. kfree(dd->sdma_rht);
  1172. dd->sdma_rht = NULL;
  1173. }
  1174. }
  1175. /**
  1176. * sdma_init() - called when device probed
  1177. * @dd: hfi1_devdata
  1178. * @port: port number (currently only zero)
  1179. *
  1180. * sdma_init initializes the specified number of engines.
  1181. *
  1182. * The code initializes each sde, its csrs. Interrupts
  1183. * are not required to be enabled.
  1184. *
  1185. * Returns:
  1186. * 0 - success, -errno on failure
  1187. */
  1188. int sdma_init(struct hfi1_devdata *dd, u8 port)
  1189. {
  1190. unsigned this_idx;
  1191. struct sdma_engine *sde;
  1192. struct rhashtable *tmp_sdma_rht;
  1193. u16 descq_cnt;
  1194. void *curr_head;
  1195. struct hfi1_pportdata *ppd = dd->pport + port;
  1196. u32 per_sdma_credits;
  1197. uint idle_cnt = sdma_idle_cnt;
  1198. size_t num_engines = dd->chip_sdma_engines;
  1199. int ret = -ENOMEM;
  1200. if (!HFI1_CAP_IS_KSET(SDMA)) {
  1201. HFI1_CAP_CLEAR(SDMA_AHG);
  1202. return 0;
  1203. }
  1204. if (mod_num_sdma &&
  1205. /* can't exceed chip support */
  1206. mod_num_sdma <= dd->chip_sdma_engines &&
  1207. /* count must be >= vls */
  1208. mod_num_sdma >= num_vls)
  1209. num_engines = mod_num_sdma;
  1210. dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma);
  1211. dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", dd->chip_sdma_engines);
  1212. dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n",
  1213. dd->chip_sdma_mem_size);
  1214. per_sdma_credits =
  1215. dd->chip_sdma_mem_size / (num_engines * SDMA_BLOCK_SIZE);
  1216. /* set up freeze waitqueue */
  1217. init_waitqueue_head(&dd->sdma_unfreeze_wq);
  1218. atomic_set(&dd->sdma_unfreeze_count, 0);
  1219. descq_cnt = sdma_get_descq_cnt();
  1220. dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n",
  1221. num_engines, descq_cnt);
  1222. /* alloc memory for array of send engines */
  1223. dd->per_sdma = kcalloc(num_engines, sizeof(*dd->per_sdma), GFP_KERNEL);
  1224. if (!dd->per_sdma)
  1225. return ret;
  1226. idle_cnt = ns_to_cclock(dd, idle_cnt);
  1227. if (!sdma_desct_intr)
  1228. sdma_desct_intr = SDMA_DESC_INTR;
  1229. /* Allocate memory for SendDMA descriptor FIFOs */
  1230. for (this_idx = 0; this_idx < num_engines; ++this_idx) {
  1231. sde = &dd->per_sdma[this_idx];
  1232. sde->dd = dd;
  1233. sde->ppd = ppd;
  1234. sde->this_idx = this_idx;
  1235. sde->descq_cnt = descq_cnt;
  1236. sde->desc_avail = sdma_descq_freecnt(sde);
  1237. sde->sdma_shift = ilog2(descq_cnt);
  1238. sde->sdma_mask = (1 << sde->sdma_shift) - 1;
  1239. /* Create a mask specifically for each interrupt source */
  1240. sde->int_mask = (u64)1 << (0 * TXE_NUM_SDMA_ENGINES +
  1241. this_idx);
  1242. sde->progress_mask = (u64)1 << (1 * TXE_NUM_SDMA_ENGINES +
  1243. this_idx);
  1244. sde->idle_mask = (u64)1 << (2 * TXE_NUM_SDMA_ENGINES +
  1245. this_idx);
  1246. /* Create a combined mask to cover all 3 interrupt sources */
  1247. sde->imask = sde->int_mask | sde->progress_mask |
  1248. sde->idle_mask;
  1249. spin_lock_init(&sde->tail_lock);
  1250. seqlock_init(&sde->head_lock);
  1251. spin_lock_init(&sde->senddmactrl_lock);
  1252. spin_lock_init(&sde->flushlist_lock);
  1253. /* insure there is always a zero bit */
  1254. sde->ahg_bits = 0xfffffffe00000000ULL;
  1255. sdma_set_state(sde, sdma_state_s00_hw_down);
  1256. /* set up reference counting */
  1257. kref_init(&sde->state.kref);
  1258. init_completion(&sde->state.comp);
  1259. INIT_LIST_HEAD(&sde->flushlist);
  1260. INIT_LIST_HEAD(&sde->dmawait);
  1261. sde->tail_csr =
  1262. get_kctxt_csr_addr(dd, this_idx, SD(TAIL));
  1263. if (idle_cnt)
  1264. dd->default_desc1 =
  1265. SDMA_DESC1_HEAD_TO_HOST_FLAG;
  1266. else
  1267. dd->default_desc1 =
  1268. SDMA_DESC1_INT_REQ_FLAG;
  1269. tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task,
  1270. (unsigned long)sde);
  1271. tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
  1272. (unsigned long)sde);
  1273. INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait);
  1274. INIT_WORK(&sde->flush_worker, sdma_field_flush);
  1275. sde->progress_check_head = 0;
  1276. setup_timer(&sde->err_progress_check_timer,
  1277. sdma_err_progress_check, (unsigned long)sde);
  1278. sde->descq = dma_zalloc_coherent(
  1279. &dd->pcidev->dev,
  1280. descq_cnt * sizeof(u64[2]),
  1281. &sde->descq_phys,
  1282. GFP_KERNEL
  1283. );
  1284. if (!sde->descq)
  1285. goto bail;
  1286. sde->tx_ring =
  1287. kcalloc(descq_cnt, sizeof(struct sdma_txreq *),
  1288. GFP_KERNEL);
  1289. if (!sde->tx_ring)
  1290. sde->tx_ring =
  1291. vzalloc(
  1292. sizeof(struct sdma_txreq *) *
  1293. descq_cnt);
  1294. if (!sde->tx_ring)
  1295. goto bail;
  1296. }
  1297. dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
  1298. /* Allocate memory for DMA of head registers to memory */
  1299. dd->sdma_heads_dma = dma_zalloc_coherent(
  1300. &dd->pcidev->dev,
  1301. dd->sdma_heads_size,
  1302. &dd->sdma_heads_phys,
  1303. GFP_KERNEL
  1304. );
  1305. if (!dd->sdma_heads_dma) {
  1306. dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
  1307. goto bail;
  1308. }
  1309. /* Allocate memory for pad */
  1310. dd->sdma_pad_dma = dma_zalloc_coherent(
  1311. &dd->pcidev->dev,
  1312. sizeof(u32),
  1313. &dd->sdma_pad_phys,
  1314. GFP_KERNEL
  1315. );
  1316. if (!dd->sdma_pad_dma) {
  1317. dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
  1318. goto bail;
  1319. }
  1320. /* assign each engine to different cacheline and init registers */
  1321. curr_head = (void *)dd->sdma_heads_dma;
  1322. for (this_idx = 0; this_idx < num_engines; ++this_idx) {
  1323. unsigned long phys_offset;
  1324. sde = &dd->per_sdma[this_idx];
  1325. sde->head_dma = curr_head;
  1326. curr_head += L1_CACHE_BYTES;
  1327. phys_offset = (unsigned long)sde->head_dma -
  1328. (unsigned long)dd->sdma_heads_dma;
  1329. sde->head_phys = dd->sdma_heads_phys + phys_offset;
  1330. init_sdma_regs(sde, per_sdma_credits, idle_cnt);
  1331. }
  1332. dd->flags |= HFI1_HAS_SEND_DMA;
  1333. dd->flags |= idle_cnt ? HFI1_HAS_SDMA_TIMEOUT : 0;
  1334. dd->num_sdma = num_engines;
  1335. ret = sdma_map_init(dd, port, ppd->vls_operational, NULL);
  1336. if (ret < 0)
  1337. goto bail;
  1338. tmp_sdma_rht = kzalloc(sizeof(*tmp_sdma_rht), GFP_KERNEL);
  1339. if (!tmp_sdma_rht) {
  1340. ret = -ENOMEM;
  1341. goto bail;
  1342. }
  1343. ret = rhashtable_init(tmp_sdma_rht, &sdma_rht_params);
  1344. if (ret < 0)
  1345. goto bail;
  1346. dd->sdma_rht = tmp_sdma_rht;
  1347. dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma);
  1348. return 0;
  1349. bail:
  1350. sdma_clean(dd, num_engines);
  1351. return ret;
  1352. }
  1353. /**
  1354. * sdma_all_running() - called when the link goes up
  1355. * @dd: hfi1_devdata
  1356. *
  1357. * This routine moves all engines to the running state.
  1358. */
  1359. void sdma_all_running(struct hfi1_devdata *dd)
  1360. {
  1361. struct sdma_engine *sde;
  1362. unsigned int i;
  1363. /* move all engines to running */
  1364. for (i = 0; i < dd->num_sdma; ++i) {
  1365. sde = &dd->per_sdma[i];
  1366. sdma_process_event(sde, sdma_event_e30_go_running);
  1367. }
  1368. }
  1369. /**
  1370. * sdma_all_idle() - called when the link goes down
  1371. * @dd: hfi1_devdata
  1372. *
  1373. * This routine moves all engines to the idle state.
  1374. */
  1375. void sdma_all_idle(struct hfi1_devdata *dd)
  1376. {
  1377. struct sdma_engine *sde;
  1378. unsigned int i;
  1379. /* idle all engines */
  1380. for (i = 0; i < dd->num_sdma; ++i) {
  1381. sde = &dd->per_sdma[i];
  1382. sdma_process_event(sde, sdma_event_e70_go_idle);
  1383. }
  1384. }
  1385. /**
  1386. * sdma_start() - called to kick off state processing for all engines
  1387. * @dd: hfi1_devdata
  1388. *
  1389. * This routine is for kicking off the state processing for all required
  1390. * sdma engines. Interrupts need to be working at this point.
  1391. *
  1392. */
  1393. void sdma_start(struct hfi1_devdata *dd)
  1394. {
  1395. unsigned i;
  1396. struct sdma_engine *sde;
  1397. /* kick off the engines state processing */
  1398. for (i = 0; i < dd->num_sdma; ++i) {
  1399. sde = &dd->per_sdma[i];
  1400. sdma_process_event(sde, sdma_event_e10_go_hw_start);
  1401. }
  1402. }
  1403. /**
  1404. * sdma_exit() - used when module is removed
  1405. * @dd: hfi1_devdata
  1406. */
  1407. void sdma_exit(struct hfi1_devdata *dd)
  1408. {
  1409. unsigned this_idx;
  1410. struct sdma_engine *sde;
  1411. for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma;
  1412. ++this_idx) {
  1413. sde = &dd->per_sdma[this_idx];
  1414. if (!list_empty(&sde->dmawait))
  1415. dd_dev_err(dd, "sde %u: dmawait list not empty!\n",
  1416. sde->this_idx);
  1417. sdma_process_event(sde, sdma_event_e00_go_hw_down);
  1418. del_timer_sync(&sde->err_progress_check_timer);
  1419. /*
  1420. * This waits for the state machine to exit so it is not
  1421. * necessary to kill the sdma_sw_clean_up_task to make sure
  1422. * it is not running.
  1423. */
  1424. sdma_finalput(&sde->state);
  1425. }
  1426. sdma_clean(dd, dd->num_sdma);
  1427. }
  1428. /*
  1429. * unmap the indicated descriptor
  1430. */
  1431. static inline void sdma_unmap_desc(
  1432. struct hfi1_devdata *dd,
  1433. struct sdma_desc *descp)
  1434. {
  1435. switch (sdma_mapping_type(descp)) {
  1436. case SDMA_MAP_SINGLE:
  1437. dma_unmap_single(
  1438. &dd->pcidev->dev,
  1439. sdma_mapping_addr(descp),
  1440. sdma_mapping_len(descp),
  1441. DMA_TO_DEVICE);
  1442. break;
  1443. case SDMA_MAP_PAGE:
  1444. dma_unmap_page(
  1445. &dd->pcidev->dev,
  1446. sdma_mapping_addr(descp),
  1447. sdma_mapping_len(descp),
  1448. DMA_TO_DEVICE);
  1449. break;
  1450. }
  1451. }
  1452. /*
  1453. * return the mode as indicated by the first
  1454. * descriptor in the tx.
  1455. */
  1456. static inline u8 ahg_mode(struct sdma_txreq *tx)
  1457. {
  1458. return (tx->descp[0].qw[1] & SDMA_DESC1_HEADER_MODE_SMASK)
  1459. >> SDMA_DESC1_HEADER_MODE_SHIFT;
  1460. }
  1461. /**
  1462. * __sdma_txclean() - clean tx of mappings, descp *kmalloc's
  1463. * @dd: hfi1_devdata for unmapping
  1464. * @tx: tx request to clean
  1465. *
  1466. * This is used in the progress routine to clean the tx or
  1467. * by the ULP to toss an in-process tx build.
  1468. *
  1469. * The code can be called multiple times without issue.
  1470. *
  1471. */
  1472. void __sdma_txclean(
  1473. struct hfi1_devdata *dd,
  1474. struct sdma_txreq *tx)
  1475. {
  1476. u16 i;
  1477. if (tx->num_desc) {
  1478. u8 skip = 0, mode = ahg_mode(tx);
  1479. /* unmap first */
  1480. sdma_unmap_desc(dd, &tx->descp[0]);
  1481. /* determine number of AHG descriptors to skip */
  1482. if (mode > SDMA_AHG_APPLY_UPDATE1)
  1483. skip = mode >> 1;
  1484. for (i = 1 + skip; i < tx->num_desc; i++)
  1485. sdma_unmap_desc(dd, &tx->descp[i]);
  1486. tx->num_desc = 0;
  1487. }
  1488. kfree(tx->coalesce_buf);
  1489. tx->coalesce_buf = NULL;
  1490. /* kmalloc'ed descp */
  1491. if (unlikely(tx->desc_limit > ARRAY_SIZE(tx->descs))) {
  1492. tx->desc_limit = ARRAY_SIZE(tx->descs);
  1493. kfree(tx->descp);
  1494. }
  1495. }
  1496. static inline u16 sdma_gethead(struct sdma_engine *sde)
  1497. {
  1498. struct hfi1_devdata *dd = sde->dd;
  1499. int use_dmahead;
  1500. u16 hwhead;
  1501. #ifdef CONFIG_SDMA_VERBOSITY
  1502. dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
  1503. sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
  1504. #endif
  1505. retry:
  1506. use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) &&
  1507. (dd->flags & HFI1_HAS_SDMA_TIMEOUT);
  1508. hwhead = use_dmahead ?
  1509. (u16)le64_to_cpu(*sde->head_dma) :
  1510. (u16)read_sde_csr(sde, SD(HEAD));
  1511. if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) {
  1512. u16 cnt;
  1513. u16 swtail;
  1514. u16 swhead;
  1515. int sane;
  1516. swhead = sde->descq_head & sde->sdma_mask;
  1517. /* this code is really bad for cache line trading */
  1518. swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
  1519. cnt = sde->descq_cnt;
  1520. if (swhead < swtail)
  1521. /* not wrapped */
  1522. sane = (hwhead >= swhead) & (hwhead <= swtail);
  1523. else if (swhead > swtail)
  1524. /* wrapped around */
  1525. sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
  1526. (hwhead <= swtail);
  1527. else
  1528. /* empty */
  1529. sane = (hwhead == swhead);
  1530. if (unlikely(!sane)) {
  1531. dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n",
  1532. sde->this_idx,
  1533. use_dmahead ? "dma" : "kreg",
  1534. hwhead, swhead, swtail, cnt);
  1535. if (use_dmahead) {
  1536. /* try one more time, using csr */
  1537. use_dmahead = 0;
  1538. goto retry;
  1539. }
  1540. /* proceed as if no progress */
  1541. hwhead = swhead;
  1542. }
  1543. }
  1544. return hwhead;
  1545. }
  1546. /*
  1547. * This is called when there are send DMA descriptors that might be
  1548. * available.
  1549. *
  1550. * This is called with head_lock held.
  1551. */
  1552. static void sdma_desc_avail(struct sdma_engine *sde, unsigned avail)
  1553. {
  1554. struct iowait *wait, *nw;
  1555. struct iowait *waits[SDMA_WAIT_BATCH_SIZE];
  1556. unsigned i, n = 0, seq;
  1557. struct sdma_txreq *stx;
  1558. struct hfi1_ibdev *dev = &sde->dd->verbs_dev;
  1559. #ifdef CONFIG_SDMA_VERBOSITY
  1560. dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
  1561. slashstrip(__FILE__), __LINE__, __func__);
  1562. dd_dev_err(sde->dd, "avail: %u\n", avail);
  1563. #endif
  1564. do {
  1565. seq = read_seqbegin(&dev->iowait_lock);
  1566. if (!list_empty(&sde->dmawait)) {
  1567. /* at least one item */
  1568. write_seqlock(&dev->iowait_lock);
  1569. /* Harvest waiters wanting DMA descriptors */
  1570. list_for_each_entry_safe(
  1571. wait,
  1572. nw,
  1573. &sde->dmawait,
  1574. list) {
  1575. u16 num_desc = 0;
  1576. if (!wait->wakeup)
  1577. continue;
  1578. if (n == ARRAY_SIZE(waits))
  1579. break;
  1580. if (!list_empty(&wait->tx_head)) {
  1581. stx = list_first_entry(
  1582. &wait->tx_head,
  1583. struct sdma_txreq,
  1584. list);
  1585. num_desc = stx->num_desc;
  1586. }
  1587. if (num_desc > avail)
  1588. break;
  1589. avail -= num_desc;
  1590. list_del_init(&wait->list);
  1591. waits[n++] = wait;
  1592. }
  1593. write_sequnlock(&dev->iowait_lock);
  1594. break;
  1595. }
  1596. } while (read_seqretry(&dev->iowait_lock, seq));
  1597. for (i = 0; i < n; i++)
  1598. waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON);
  1599. }
  1600. /* head_lock must be held */
  1601. static void sdma_make_progress(struct sdma_engine *sde, u64 status)
  1602. {
  1603. struct sdma_txreq *txp = NULL;
  1604. int progress = 0;
  1605. u16 hwhead, swhead;
  1606. int idle_check_done = 0;
  1607. hwhead = sdma_gethead(sde);
  1608. /* The reason for some of the complexity of this code is that
  1609. * not all descriptors have corresponding txps. So, we have to
  1610. * be able to skip over descs until we wander into the range of
  1611. * the next txp on the list.
  1612. */
  1613. retry:
  1614. txp = get_txhead(sde);
  1615. swhead = sde->descq_head & sde->sdma_mask;
  1616. trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
  1617. while (swhead != hwhead) {
  1618. /* advance head, wrap if needed */
  1619. swhead = ++sde->descq_head & sde->sdma_mask;
  1620. /* if now past this txp's descs, do the callback */
  1621. if (txp && txp->next_descq_idx == swhead) {
  1622. /* remove from list */
  1623. sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
  1624. complete_tx(sde, txp, SDMA_TXREQ_S_OK);
  1625. /* see if there is another txp */
  1626. txp = get_txhead(sde);
  1627. }
  1628. trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
  1629. progress++;
  1630. }
  1631. /*
  1632. * The SDMA idle interrupt is not guaranteed to be ordered with respect
  1633. * to updates to the the dma_head location in host memory. The head
  1634. * value read might not be fully up to date. If there are pending
  1635. * descriptors and the SDMA idle interrupt fired then read from the
  1636. * CSR SDMA head instead to get the latest value from the hardware.
  1637. * The hardware SDMA head should be read at most once in this invocation
  1638. * of sdma_make_progress(..) which is ensured by idle_check_done flag
  1639. */
  1640. if ((status & sde->idle_mask) && !idle_check_done) {
  1641. u16 swtail;
  1642. swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
  1643. if (swtail != hwhead) {
  1644. hwhead = (u16)read_sde_csr(sde, SD(HEAD));
  1645. idle_check_done = 1;
  1646. goto retry;
  1647. }
  1648. }
  1649. sde->last_status = status;
  1650. if (progress)
  1651. sdma_desc_avail(sde, sdma_descq_freecnt(sde));
  1652. }
  1653. /*
  1654. * sdma_engine_interrupt() - interrupt handler for engine
  1655. * @sde: sdma engine
  1656. * @status: sdma interrupt reason
  1657. *
  1658. * Status is a mask of the 3 possible interrupts for this engine. It will
  1659. * contain bits _only_ for this SDMA engine. It will contain at least one
  1660. * bit, it may contain more.
  1661. */
  1662. void sdma_engine_interrupt(struct sdma_engine *sde, u64 status)
  1663. {
  1664. trace_hfi1_sdma_engine_interrupt(sde, status);
  1665. write_seqlock(&sde->head_lock);
  1666. sdma_set_desc_cnt(sde, sdma_desct_intr);
  1667. if (status & sde->idle_mask)
  1668. sde->idle_int_cnt++;
  1669. else if (status & sde->progress_mask)
  1670. sde->progress_int_cnt++;
  1671. else if (status & sde->int_mask)
  1672. sde->sdma_int_cnt++;
  1673. sdma_make_progress(sde, status);
  1674. write_sequnlock(&sde->head_lock);
  1675. }
  1676. /**
  1677. * sdma_engine_error() - error handler for engine
  1678. * @sde: sdma engine
  1679. * @status: sdma interrupt reason
  1680. */
  1681. void sdma_engine_error(struct sdma_engine *sde, u64 status)
  1682. {
  1683. unsigned long flags;
  1684. #ifdef CONFIG_SDMA_VERBOSITY
  1685. dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n",
  1686. sde->this_idx,
  1687. (unsigned long long)status,
  1688. sdma_state_names[sde->state.current_state]);
  1689. #endif
  1690. spin_lock_irqsave(&sde->tail_lock, flags);
  1691. write_seqlock(&sde->head_lock);
  1692. if (status & ALL_SDMA_ENG_HALT_ERRS)
  1693. __sdma_process_event(sde, sdma_event_e60_hw_halted);
  1694. if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) {
  1695. dd_dev_err(sde->dd,
  1696. "SDMA (%u) engine error: 0x%llx state %s\n",
  1697. sde->this_idx,
  1698. (unsigned long long)status,
  1699. sdma_state_names[sde->state.current_state]);
  1700. dump_sdma_state(sde);
  1701. }
  1702. write_sequnlock(&sde->head_lock);
  1703. spin_unlock_irqrestore(&sde->tail_lock, flags);
  1704. }
  1705. static void sdma_sendctrl(struct sdma_engine *sde, unsigned op)
  1706. {
  1707. u64 set_senddmactrl = 0;
  1708. u64 clr_senddmactrl = 0;
  1709. unsigned long flags;
  1710. #ifdef CONFIG_SDMA_VERBOSITY
  1711. dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n",
  1712. sde->this_idx,
  1713. (op & SDMA_SENDCTRL_OP_ENABLE) ? 1 : 0,
  1714. (op & SDMA_SENDCTRL_OP_INTENABLE) ? 1 : 0,
  1715. (op & SDMA_SENDCTRL_OP_HALT) ? 1 : 0,
  1716. (op & SDMA_SENDCTRL_OP_CLEANUP) ? 1 : 0);
  1717. #endif
  1718. if (op & SDMA_SENDCTRL_OP_ENABLE)
  1719. set_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
  1720. else
  1721. clr_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
  1722. if (op & SDMA_SENDCTRL_OP_INTENABLE)
  1723. set_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
  1724. else
  1725. clr_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
  1726. if (op & SDMA_SENDCTRL_OP_HALT)
  1727. set_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
  1728. else
  1729. clr_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
  1730. spin_lock_irqsave(&sde->senddmactrl_lock, flags);
  1731. sde->p_senddmactrl |= set_senddmactrl;
  1732. sde->p_senddmactrl &= ~clr_senddmactrl;
  1733. if (op & SDMA_SENDCTRL_OP_CLEANUP)
  1734. write_sde_csr(sde, SD(CTRL),
  1735. sde->p_senddmactrl |
  1736. SD(CTRL_SDMA_CLEANUP_SMASK));
  1737. else
  1738. write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl);
  1739. spin_unlock_irqrestore(&sde->senddmactrl_lock, flags);
  1740. #ifdef CONFIG_SDMA_VERBOSITY
  1741. sdma_dumpstate(sde);
  1742. #endif
  1743. }
  1744. static void sdma_setlengen(struct sdma_engine *sde)
  1745. {
  1746. #ifdef CONFIG_SDMA_VERBOSITY
  1747. dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
  1748. sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
  1749. #endif
  1750. /*
  1751. * Set SendDmaLenGen and clear-then-set the MSB of the generation
  1752. * count to enable generation checking and load the internal
  1753. * generation counter.
  1754. */
  1755. write_sde_csr(sde, SD(LEN_GEN),
  1756. (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT));
  1757. write_sde_csr(sde, SD(LEN_GEN),
  1758. ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) |
  1759. (4ULL << SD(LEN_GEN_GENERATION_SHIFT)));
  1760. }
  1761. static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail)
  1762. {
  1763. /* Commit writes to memory and advance the tail on the chip */
  1764. smp_wmb(); /* see get_txhead() */
  1765. writeq(tail, sde->tail_csr);
  1766. }
  1767. /*
  1768. * This is called when changing to state s10_hw_start_up_halt_wait as
  1769. * a result of send buffer errors or send DMA descriptor errors.
  1770. */
  1771. static void sdma_hw_start_up(struct sdma_engine *sde)
  1772. {
  1773. u64 reg;
  1774. #ifdef CONFIG_SDMA_VERBOSITY
  1775. dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
  1776. sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
  1777. #endif
  1778. sdma_setlengen(sde);
  1779. sdma_update_tail(sde, 0); /* Set SendDmaTail */
  1780. *sde->head_dma = 0;
  1781. reg = SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK) <<
  1782. SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT);
  1783. write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg);
  1784. }
  1785. /*
  1786. * set_sdma_integrity
  1787. *
  1788. * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'.
  1789. */
  1790. static void set_sdma_integrity(struct sdma_engine *sde)
  1791. {
  1792. struct hfi1_devdata *dd = sde->dd;
  1793. write_sde_csr(sde, SD(CHECK_ENABLE),
  1794. hfi1_pkt_base_sdma_integrity(dd));
  1795. }
  1796. static void init_sdma_regs(
  1797. struct sdma_engine *sde,
  1798. u32 credits,
  1799. uint idle_cnt)
  1800. {
  1801. u8 opval, opmask;
  1802. #ifdef CONFIG_SDMA_VERBOSITY
  1803. struct hfi1_devdata *dd = sde->dd;
  1804. dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n",
  1805. sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
  1806. #endif
  1807. write_sde_csr(sde, SD(BASE_ADDR), sde->descq_phys);
  1808. sdma_setlengen(sde);
  1809. sdma_update_tail(sde, 0); /* Set SendDmaTail */
  1810. write_sde_csr(sde, SD(RELOAD_CNT), idle_cnt);
  1811. write_sde_csr(sde, SD(DESC_CNT), 0);
  1812. write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys);
  1813. write_sde_csr(sde, SD(MEMORY),
  1814. ((u64)credits << SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) |
  1815. ((u64)(credits * sde->this_idx) <<
  1816. SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT)));
  1817. write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull);
  1818. set_sdma_integrity(sde);
  1819. opmask = OPCODE_CHECK_MASK_DISABLED;
  1820. opval = OPCODE_CHECK_VAL_DISABLED;
  1821. write_sde_csr(sde, SD(CHECK_OPCODE),
  1822. (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) |
  1823. (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT));
  1824. }
  1825. #ifdef CONFIG_SDMA_VERBOSITY
  1826. #define sdma_dumpstate_helper0(reg) do { \
  1827. csr = read_csr(sde->dd, reg); \
  1828. dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \
  1829. } while (0)
  1830. #define sdma_dumpstate_helper(reg) do { \
  1831. csr = read_sde_csr(sde, reg); \
  1832. dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \
  1833. #reg, sde->this_idx, csr); \
  1834. } while (0)
  1835. #define sdma_dumpstate_helper2(reg) do { \
  1836. csr = read_csr(sde->dd, reg + (8 * i)); \
  1837. dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \
  1838. #reg, i, csr); \
  1839. } while (0)
  1840. void sdma_dumpstate(struct sdma_engine *sde)
  1841. {
  1842. u64 csr;
  1843. unsigned i;
  1844. sdma_dumpstate_helper(SD(CTRL));
  1845. sdma_dumpstate_helper(SD(STATUS));
  1846. sdma_dumpstate_helper0(SD(ERR_STATUS));
  1847. sdma_dumpstate_helper0(SD(ERR_MASK));
  1848. sdma_dumpstate_helper(SD(ENG_ERR_STATUS));
  1849. sdma_dumpstate_helper(SD(ENG_ERR_MASK));
  1850. for (i = 0; i < CCE_NUM_INT_CSRS; ++i) {
  1851. sdma_dumpstate_helper2(CCE_INT_STATUS);
  1852. sdma_dumpstate_helper2(CCE_INT_MASK);
  1853. sdma_dumpstate_helper2(CCE_INT_BLOCKED);
  1854. }
  1855. sdma_dumpstate_helper(SD(TAIL));
  1856. sdma_dumpstate_helper(SD(HEAD));
  1857. sdma_dumpstate_helper(SD(PRIORITY_THLD));
  1858. sdma_dumpstate_helper(SD(IDLE_CNT));
  1859. sdma_dumpstate_helper(SD(RELOAD_CNT));
  1860. sdma_dumpstate_helper(SD(DESC_CNT));
  1861. sdma_dumpstate_helper(SD(DESC_FETCHED_CNT));
  1862. sdma_dumpstate_helper(SD(MEMORY));
  1863. sdma_dumpstate_helper0(SD(ENGINES));
  1864. sdma_dumpstate_helper0(SD(MEM_SIZE));
  1865. /* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS); */
  1866. sdma_dumpstate_helper(SD(BASE_ADDR));
  1867. sdma_dumpstate_helper(SD(LEN_GEN));
  1868. sdma_dumpstate_helper(SD(HEAD_ADDR));
  1869. sdma_dumpstate_helper(SD(CHECK_ENABLE));
  1870. sdma_dumpstate_helper(SD(CHECK_VL));
  1871. sdma_dumpstate_helper(SD(CHECK_JOB_KEY));
  1872. sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY));
  1873. sdma_dumpstate_helper(SD(CHECK_SLID));
  1874. sdma_dumpstate_helper(SD(CHECK_OPCODE));
  1875. }
  1876. #endif
  1877. static void dump_sdma_state(struct sdma_engine *sde)
  1878. {
  1879. struct hw_sdma_desc *descq;
  1880. struct hw_sdma_desc *descqp;
  1881. u64 desc[2];
  1882. u64 addr;
  1883. u8 gen;
  1884. u16 len;
  1885. u16 head, tail, cnt;
  1886. head = sde->descq_head & sde->sdma_mask;
  1887. tail = sde->descq_tail & sde->sdma_mask;
  1888. cnt = sdma_descq_freecnt(sde);
  1889. descq = sde->descq;
  1890. dd_dev_err(sde->dd,
  1891. "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
  1892. sde->this_idx, head, tail, cnt,
  1893. !list_empty(&sde->flushlist));
  1894. /* print info for each entry in the descriptor queue */
  1895. while (head != tail) {
  1896. char flags[6] = { 'x', 'x', 'x', 'x', 0 };
  1897. descqp = &sde->descq[head];
  1898. desc[0] = le64_to_cpu(descqp->qw[0]);
  1899. desc[1] = le64_to_cpu(descqp->qw[1]);
  1900. flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
  1901. flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
  1902. 'H' : '-';
  1903. flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
  1904. flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
  1905. addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
  1906. & SDMA_DESC0_PHY_ADDR_MASK;
  1907. gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
  1908. & SDMA_DESC1_GENERATION_MASK;
  1909. len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
  1910. & SDMA_DESC0_BYTE_COUNT_MASK;
  1911. dd_dev_err(sde->dd,
  1912. "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
  1913. head, flags, addr, gen, len);
  1914. dd_dev_err(sde->dd,
  1915. "\tdesc0:0x%016llx desc1 0x%016llx\n",
  1916. desc[0], desc[1]);
  1917. if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
  1918. dd_dev_err(sde->dd,
  1919. "\taidx: %u amode: %u alen: %u\n",
  1920. (u8)((desc[1] &
  1921. SDMA_DESC1_HEADER_INDEX_SMASK) >>
  1922. SDMA_DESC1_HEADER_INDEX_SHIFT),
  1923. (u8)((desc[1] &
  1924. SDMA_DESC1_HEADER_MODE_SMASK) >>
  1925. SDMA_DESC1_HEADER_MODE_SHIFT),
  1926. (u8)((desc[1] &
  1927. SDMA_DESC1_HEADER_DWS_SMASK) >>
  1928. SDMA_DESC1_HEADER_DWS_SHIFT));
  1929. head++;
  1930. head &= sde->sdma_mask;
  1931. }
  1932. }
  1933. #define SDE_FMT \
  1934. "SDE %u CPU %d STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n"
  1935. /**
  1936. * sdma_seqfile_dump_sde() - debugfs dump of sde
  1937. * @s: seq file
  1938. * @sde: send dma engine to dump
  1939. *
  1940. * This routine dumps the sde to the indicated seq file.
  1941. */
  1942. void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
  1943. {
  1944. u16 head, tail;
  1945. struct hw_sdma_desc *descqp;
  1946. u64 desc[2];
  1947. u64 addr;
  1948. u8 gen;
  1949. u16 len;
  1950. head = sde->descq_head & sde->sdma_mask;
  1951. tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
  1952. seq_printf(s, SDE_FMT, sde->this_idx,
  1953. sde->cpu,
  1954. sdma_state_name(sde->state.current_state),
  1955. (unsigned long long)read_sde_csr(sde, SD(CTRL)),
  1956. (unsigned long long)read_sde_csr(sde, SD(STATUS)),
  1957. (unsigned long long)read_sde_csr(sde, SD(ENG_ERR_STATUS)),
  1958. (unsigned long long)read_sde_csr(sde, SD(TAIL)), tail,
  1959. (unsigned long long)read_sde_csr(sde, SD(HEAD)), head,
  1960. (unsigned long long)le64_to_cpu(*sde->head_dma),
  1961. (unsigned long long)read_sde_csr(sde, SD(MEMORY)),
  1962. (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)),
  1963. (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)),
  1964. (unsigned long long)sde->last_status,
  1965. (unsigned long long)sde->ahg_bits,
  1966. sde->tx_tail,
  1967. sde->tx_head,
  1968. sde->descq_tail,
  1969. sde->descq_head,
  1970. !list_empty(&sde->flushlist),
  1971. sde->descq_full_count,
  1972. (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID));
  1973. /* print info for each entry in the descriptor queue */
  1974. while (head != tail) {
  1975. char flags[6] = { 'x', 'x', 'x', 'x', 0 };
  1976. descqp = &sde->descq[head];
  1977. desc[0] = le64_to_cpu(descqp->qw[0]);
  1978. desc[1] = le64_to_cpu(descqp->qw[1]);
  1979. flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
  1980. flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
  1981. 'H' : '-';
  1982. flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
  1983. flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
  1984. addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
  1985. & SDMA_DESC0_PHY_ADDR_MASK;
  1986. gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
  1987. & SDMA_DESC1_GENERATION_MASK;
  1988. len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
  1989. & SDMA_DESC0_BYTE_COUNT_MASK;
  1990. seq_printf(s,
  1991. "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
  1992. head, flags, addr, gen, len);
  1993. if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
  1994. seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n",
  1995. (u8)((desc[1] &
  1996. SDMA_DESC1_HEADER_INDEX_SMASK) >>
  1997. SDMA_DESC1_HEADER_INDEX_SHIFT),
  1998. (u8)((desc[1] &
  1999. SDMA_DESC1_HEADER_MODE_SMASK) >>
  2000. SDMA_DESC1_HEADER_MODE_SHIFT));
  2001. head = (head + 1) & sde->sdma_mask;
  2002. }
  2003. }
  2004. /*
  2005. * add the generation number into
  2006. * the qw1 and return
  2007. */
  2008. static inline u64 add_gen(struct sdma_engine *sde, u64 qw1)
  2009. {
  2010. u8 generation = (sde->descq_tail >> sde->sdma_shift) & 3;
  2011. qw1 &= ~SDMA_DESC1_GENERATION_SMASK;
  2012. qw1 |= ((u64)generation & SDMA_DESC1_GENERATION_MASK)
  2013. << SDMA_DESC1_GENERATION_SHIFT;
  2014. return qw1;
  2015. }
  2016. /*
  2017. * This routine submits the indicated tx
  2018. *
  2019. * Space has already been guaranteed and
  2020. * tail side of ring is locked.
  2021. *
  2022. * The hardware tail update is done
  2023. * in the caller and that is facilitated
  2024. * by returning the new tail.
  2025. *
  2026. * There is special case logic for ahg
  2027. * to not add the generation number for
  2028. * up to 2 descriptors that follow the
  2029. * first descriptor.
  2030. *
  2031. */
  2032. static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx)
  2033. {
  2034. int i;
  2035. u16 tail;
  2036. struct sdma_desc *descp = tx->descp;
  2037. u8 skip = 0, mode = ahg_mode(tx);
  2038. tail = sde->descq_tail & sde->sdma_mask;
  2039. sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
  2040. sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1]));
  2041. trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1],
  2042. tail, &sde->descq[tail]);
  2043. tail = ++sde->descq_tail & sde->sdma_mask;
  2044. descp++;
  2045. if (mode > SDMA_AHG_APPLY_UPDATE1)
  2046. skip = mode >> 1;
  2047. for (i = 1; i < tx->num_desc; i++, descp++) {
  2048. u64 qw1;
  2049. sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
  2050. if (skip) {
  2051. /* edits don't have generation */
  2052. qw1 = descp->qw[1];
  2053. skip--;
  2054. } else {
  2055. /* replace generation with real one for non-edits */
  2056. qw1 = add_gen(sde, descp->qw[1]);
  2057. }
  2058. sde->descq[tail].qw[1] = cpu_to_le64(qw1);
  2059. trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1,
  2060. tail, &sde->descq[tail]);
  2061. tail = ++sde->descq_tail & sde->sdma_mask;
  2062. }
  2063. tx->next_descq_idx = tail;
  2064. #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
  2065. tx->sn = sde->tail_sn++;
  2066. trace_hfi1_sdma_in_sn(sde, tx->sn);
  2067. WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]);
  2068. #endif
  2069. sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx;
  2070. sde->desc_avail -= tx->num_desc;
  2071. return tail;
  2072. }
  2073. /*
  2074. * Check for progress
  2075. */
  2076. static int sdma_check_progress(
  2077. struct sdma_engine *sde,
  2078. struct iowait *wait,
  2079. struct sdma_txreq *tx)
  2080. {
  2081. int ret;
  2082. sde->desc_avail = sdma_descq_freecnt(sde);
  2083. if (tx->num_desc <= sde->desc_avail)
  2084. return -EAGAIN;
  2085. /* pulse the head_lock */
  2086. if (wait && wait->sleep) {
  2087. unsigned seq;
  2088. seq = raw_seqcount_begin(
  2089. (const seqcount_t *)&sde->head_lock.seqcount);
  2090. ret = wait->sleep(sde, wait, tx, seq);
  2091. if (ret == -EAGAIN)
  2092. sde->desc_avail = sdma_descq_freecnt(sde);
  2093. } else {
  2094. ret = -EBUSY;
  2095. }
  2096. return ret;
  2097. }
  2098. /**
  2099. * sdma_send_txreq() - submit a tx req to ring
  2100. * @sde: sdma engine to use
  2101. * @wait: wait structure to use when full (may be NULL)
  2102. * @tx: sdma_txreq to submit
  2103. *
  2104. * The call submits the tx into the ring. If a iowait structure is non-NULL
  2105. * the packet will be queued to the list in wait.
  2106. *
  2107. * Return:
  2108. * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in
  2109. * ring (wait == NULL)
  2110. * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
  2111. */
  2112. int sdma_send_txreq(struct sdma_engine *sde,
  2113. struct iowait *wait,
  2114. struct sdma_txreq *tx)
  2115. {
  2116. int ret = 0;
  2117. u16 tail;
  2118. unsigned long flags;
  2119. /* user should have supplied entire packet */
  2120. if (unlikely(tx->tlen))
  2121. return -EINVAL;
  2122. tx->wait = wait;
  2123. spin_lock_irqsave(&sde->tail_lock, flags);
  2124. retry:
  2125. if (unlikely(!__sdma_running(sde)))
  2126. goto unlock_noconn;
  2127. if (unlikely(tx->num_desc > sde->desc_avail))
  2128. goto nodesc;
  2129. tail = submit_tx(sde, tx);
  2130. if (wait)
  2131. iowait_sdma_inc(wait);
  2132. sdma_update_tail(sde, tail);
  2133. unlock:
  2134. spin_unlock_irqrestore(&sde->tail_lock, flags);
  2135. return ret;
  2136. unlock_noconn:
  2137. if (wait)
  2138. iowait_sdma_inc(wait);
  2139. tx->next_descq_idx = 0;
  2140. #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
  2141. tx->sn = sde->tail_sn++;
  2142. trace_hfi1_sdma_in_sn(sde, tx->sn);
  2143. #endif
  2144. spin_lock(&sde->flushlist_lock);
  2145. list_add_tail(&tx->list, &sde->flushlist);
  2146. spin_unlock(&sde->flushlist_lock);
  2147. if (wait) {
  2148. wait->tx_count++;
  2149. wait->count += tx->num_desc;
  2150. }
  2151. schedule_work(&sde->flush_worker);
  2152. ret = -ECOMM;
  2153. goto unlock;
  2154. nodesc:
  2155. ret = sdma_check_progress(sde, wait, tx);
  2156. if (ret == -EAGAIN) {
  2157. ret = 0;
  2158. goto retry;
  2159. }
  2160. sde->descq_full_count++;
  2161. goto unlock;
  2162. }
  2163. /**
  2164. * sdma_send_txlist() - submit a list of tx req to ring
  2165. * @sde: sdma engine to use
  2166. * @wait: wait structure to use when full (may be NULL)
  2167. * @tx_list: list of sdma_txreqs to submit
  2168. * @count: pointer to a u32 which, after return will contain the total number of
  2169. * sdma_txreqs removed from the tx_list. This will include sdma_txreqs
  2170. * whose SDMA descriptors are submitted to the ring and the sdma_txreqs
  2171. * which are added to SDMA engine flush list if the SDMA engine state is
  2172. * not running.
  2173. *
  2174. * The call submits the list into the ring.
  2175. *
  2176. * If the iowait structure is non-NULL and not equal to the iowait list
  2177. * the unprocessed part of the list will be appended to the list in wait.
  2178. *
  2179. * In all cases, the tx_list will be updated so the head of the tx_list is
  2180. * the list of descriptors that have yet to be transmitted.
  2181. *
  2182. * The intent of this call is to provide a more efficient
  2183. * way of submitting multiple packets to SDMA while holding the tail
  2184. * side locking.
  2185. *
  2186. * Return:
  2187. * 0 - Success,
  2188. * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL)
  2189. * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
  2190. */
  2191. int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait,
  2192. struct list_head *tx_list, u32 *count_out)
  2193. {
  2194. struct sdma_txreq *tx, *tx_next;
  2195. int ret = 0;
  2196. unsigned long flags;
  2197. u16 tail = INVALID_TAIL;
  2198. u32 submit_count = 0, flush_count = 0, total_count;
  2199. spin_lock_irqsave(&sde->tail_lock, flags);
  2200. retry:
  2201. list_for_each_entry_safe(tx, tx_next, tx_list, list) {
  2202. tx->wait = wait;
  2203. if (unlikely(!__sdma_running(sde)))
  2204. goto unlock_noconn;
  2205. if (unlikely(tx->num_desc > sde->desc_avail))
  2206. goto nodesc;
  2207. if (unlikely(tx->tlen)) {
  2208. ret = -EINVAL;
  2209. goto update_tail;
  2210. }
  2211. list_del_init(&tx->list);
  2212. tail = submit_tx(sde, tx);
  2213. submit_count++;
  2214. if (tail != INVALID_TAIL &&
  2215. (submit_count & SDMA_TAIL_UPDATE_THRESH) == 0) {
  2216. sdma_update_tail(sde, tail);
  2217. tail = INVALID_TAIL;
  2218. }
  2219. }
  2220. update_tail:
  2221. total_count = submit_count + flush_count;
  2222. if (wait)
  2223. iowait_sdma_add(wait, total_count);
  2224. if (tail != INVALID_TAIL)
  2225. sdma_update_tail(sde, tail);
  2226. spin_unlock_irqrestore(&sde->tail_lock, flags);
  2227. *count_out = total_count;
  2228. return ret;
  2229. unlock_noconn:
  2230. spin_lock(&sde->flushlist_lock);
  2231. list_for_each_entry_safe(tx, tx_next, tx_list, list) {
  2232. tx->wait = wait;
  2233. list_del_init(&tx->list);
  2234. tx->next_descq_idx = 0;
  2235. #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
  2236. tx->sn = sde->tail_sn++;
  2237. trace_hfi1_sdma_in_sn(sde, tx->sn);
  2238. #endif
  2239. list_add_tail(&tx->list, &sde->flushlist);
  2240. flush_count++;
  2241. if (wait) {
  2242. wait->tx_count++;
  2243. wait->count += tx->num_desc;
  2244. }
  2245. }
  2246. spin_unlock(&sde->flushlist_lock);
  2247. schedule_work(&sde->flush_worker);
  2248. ret = -ECOMM;
  2249. goto update_tail;
  2250. nodesc:
  2251. ret = sdma_check_progress(sde, wait, tx);
  2252. if (ret == -EAGAIN) {
  2253. ret = 0;
  2254. goto retry;
  2255. }
  2256. sde->descq_full_count++;
  2257. goto update_tail;
  2258. }
  2259. static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event)
  2260. {
  2261. unsigned long flags;
  2262. spin_lock_irqsave(&sde->tail_lock, flags);
  2263. write_seqlock(&sde->head_lock);
  2264. __sdma_process_event(sde, event);
  2265. if (sde->state.current_state == sdma_state_s99_running)
  2266. sdma_desc_avail(sde, sdma_descq_freecnt(sde));
  2267. write_sequnlock(&sde->head_lock);
  2268. spin_unlock_irqrestore(&sde->tail_lock, flags);
  2269. }
  2270. static void __sdma_process_event(struct sdma_engine *sde,
  2271. enum sdma_events event)
  2272. {
  2273. struct sdma_state *ss = &sde->state;
  2274. int need_progress = 0;
  2275. /* CONFIG SDMA temporary */
  2276. #ifdef CONFIG_SDMA_VERBOSITY
  2277. dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx,
  2278. sdma_state_names[ss->current_state],
  2279. sdma_event_names[event]);
  2280. #endif
  2281. switch (ss->current_state) {
  2282. case sdma_state_s00_hw_down:
  2283. switch (event) {
  2284. case sdma_event_e00_go_hw_down:
  2285. break;
  2286. case sdma_event_e30_go_running:
  2287. /*
  2288. * If down, but running requested (usually result
  2289. * of link up, then we need to start up.
  2290. * This can happen when hw down is requested while
  2291. * bringing the link up with traffic active on
  2292. * 7220, e.g.
  2293. */
  2294. ss->go_s99_running = 1;
  2295. /* fall through and start dma engine */
  2296. case sdma_event_e10_go_hw_start:
  2297. /* This reference means the state machine is started */
  2298. sdma_get(&sde->state);
  2299. sdma_set_state(sde,
  2300. sdma_state_s10_hw_start_up_halt_wait);
  2301. break;
  2302. case sdma_event_e15_hw_halt_done:
  2303. break;
  2304. case sdma_event_e25_hw_clean_up_done:
  2305. break;
  2306. case sdma_event_e40_sw_cleaned:
  2307. sdma_sw_tear_down(sde);
  2308. break;
  2309. case sdma_event_e50_hw_cleaned:
  2310. break;
  2311. case sdma_event_e60_hw_halted:
  2312. break;
  2313. case sdma_event_e70_go_idle:
  2314. break;
  2315. case sdma_event_e80_hw_freeze:
  2316. break;
  2317. case sdma_event_e81_hw_frozen:
  2318. break;
  2319. case sdma_event_e82_hw_unfreeze:
  2320. break;
  2321. case sdma_event_e85_link_down:
  2322. break;
  2323. case sdma_event_e90_sw_halted:
  2324. break;
  2325. }
  2326. break;
  2327. case sdma_state_s10_hw_start_up_halt_wait:
  2328. switch (event) {
  2329. case sdma_event_e00_go_hw_down:
  2330. sdma_set_state(sde, sdma_state_s00_hw_down);
  2331. sdma_sw_tear_down(sde);
  2332. break;
  2333. case sdma_event_e10_go_hw_start:
  2334. break;
  2335. case sdma_event_e15_hw_halt_done:
  2336. sdma_set_state(sde,
  2337. sdma_state_s15_hw_start_up_clean_wait);
  2338. sdma_start_hw_clean_up(sde);
  2339. break;
  2340. case sdma_event_e25_hw_clean_up_done:
  2341. break;
  2342. case sdma_event_e30_go_running:
  2343. ss->go_s99_running = 1;
  2344. break;
  2345. case sdma_event_e40_sw_cleaned:
  2346. break;
  2347. case sdma_event_e50_hw_cleaned:
  2348. break;
  2349. case sdma_event_e60_hw_halted:
  2350. schedule_work(&sde->err_halt_worker);
  2351. break;
  2352. case sdma_event_e70_go_idle:
  2353. ss->go_s99_running = 0;
  2354. break;
  2355. case sdma_event_e80_hw_freeze:
  2356. break;
  2357. case sdma_event_e81_hw_frozen:
  2358. break;
  2359. case sdma_event_e82_hw_unfreeze:
  2360. break;
  2361. case sdma_event_e85_link_down:
  2362. break;
  2363. case sdma_event_e90_sw_halted:
  2364. break;
  2365. }
  2366. break;
  2367. case sdma_state_s15_hw_start_up_clean_wait:
  2368. switch (event) {
  2369. case sdma_event_e00_go_hw_down:
  2370. sdma_set_state(sde, sdma_state_s00_hw_down);
  2371. sdma_sw_tear_down(sde);
  2372. break;
  2373. case sdma_event_e10_go_hw_start:
  2374. break;
  2375. case sdma_event_e15_hw_halt_done:
  2376. break;
  2377. case sdma_event_e25_hw_clean_up_done:
  2378. sdma_hw_start_up(sde);
  2379. sdma_set_state(sde, ss->go_s99_running ?
  2380. sdma_state_s99_running :
  2381. sdma_state_s20_idle);
  2382. break;
  2383. case sdma_event_e30_go_running:
  2384. ss->go_s99_running = 1;
  2385. break;
  2386. case sdma_event_e40_sw_cleaned:
  2387. break;
  2388. case sdma_event_e50_hw_cleaned:
  2389. break;
  2390. case sdma_event_e60_hw_halted:
  2391. break;
  2392. case sdma_event_e70_go_idle:
  2393. ss->go_s99_running = 0;
  2394. break;
  2395. case sdma_event_e80_hw_freeze:
  2396. break;
  2397. case sdma_event_e81_hw_frozen:
  2398. break;
  2399. case sdma_event_e82_hw_unfreeze:
  2400. break;
  2401. case sdma_event_e85_link_down:
  2402. break;
  2403. case sdma_event_e90_sw_halted:
  2404. break;
  2405. }
  2406. break;
  2407. case sdma_state_s20_idle:
  2408. switch (event) {
  2409. case sdma_event_e00_go_hw_down:
  2410. sdma_set_state(sde, sdma_state_s00_hw_down);
  2411. sdma_sw_tear_down(sde);
  2412. break;
  2413. case sdma_event_e10_go_hw_start:
  2414. break;
  2415. case sdma_event_e15_hw_halt_done:
  2416. break;
  2417. case sdma_event_e25_hw_clean_up_done:
  2418. break;
  2419. case sdma_event_e30_go_running:
  2420. sdma_set_state(sde, sdma_state_s99_running);
  2421. ss->go_s99_running = 1;
  2422. break;
  2423. case sdma_event_e40_sw_cleaned:
  2424. break;
  2425. case sdma_event_e50_hw_cleaned:
  2426. break;
  2427. case sdma_event_e60_hw_halted:
  2428. sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
  2429. schedule_work(&sde->err_halt_worker);
  2430. break;
  2431. case sdma_event_e70_go_idle:
  2432. break;
  2433. case sdma_event_e85_link_down:
  2434. /* fall through */
  2435. case sdma_event_e80_hw_freeze:
  2436. sdma_set_state(sde, sdma_state_s80_hw_freeze);
  2437. atomic_dec(&sde->dd->sdma_unfreeze_count);
  2438. wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
  2439. break;
  2440. case sdma_event_e81_hw_frozen:
  2441. break;
  2442. case sdma_event_e82_hw_unfreeze:
  2443. break;
  2444. case sdma_event_e90_sw_halted:
  2445. break;
  2446. }
  2447. break;
  2448. case sdma_state_s30_sw_clean_up_wait:
  2449. switch (event) {
  2450. case sdma_event_e00_go_hw_down:
  2451. sdma_set_state(sde, sdma_state_s00_hw_down);
  2452. break;
  2453. case sdma_event_e10_go_hw_start:
  2454. break;
  2455. case sdma_event_e15_hw_halt_done:
  2456. break;
  2457. case sdma_event_e25_hw_clean_up_done:
  2458. break;
  2459. case sdma_event_e30_go_running:
  2460. ss->go_s99_running = 1;
  2461. break;
  2462. case sdma_event_e40_sw_cleaned:
  2463. sdma_set_state(sde, sdma_state_s40_hw_clean_up_wait);
  2464. sdma_start_hw_clean_up(sde);
  2465. break;
  2466. case sdma_event_e50_hw_cleaned:
  2467. break;
  2468. case sdma_event_e60_hw_halted:
  2469. break;
  2470. case sdma_event_e70_go_idle:
  2471. ss->go_s99_running = 0;
  2472. break;
  2473. case sdma_event_e80_hw_freeze:
  2474. break;
  2475. case sdma_event_e81_hw_frozen:
  2476. break;
  2477. case sdma_event_e82_hw_unfreeze:
  2478. break;
  2479. case sdma_event_e85_link_down:
  2480. ss->go_s99_running = 0;
  2481. break;
  2482. case sdma_event_e90_sw_halted:
  2483. break;
  2484. }
  2485. break;
  2486. case sdma_state_s40_hw_clean_up_wait:
  2487. switch (event) {
  2488. case sdma_event_e00_go_hw_down:
  2489. sdma_set_state(sde, sdma_state_s00_hw_down);
  2490. tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
  2491. break;
  2492. case sdma_event_e10_go_hw_start:
  2493. break;
  2494. case sdma_event_e15_hw_halt_done:
  2495. break;
  2496. case sdma_event_e25_hw_clean_up_done:
  2497. sdma_hw_start_up(sde);
  2498. sdma_set_state(sde, ss->go_s99_running ?
  2499. sdma_state_s99_running :
  2500. sdma_state_s20_idle);
  2501. break;
  2502. case sdma_event_e30_go_running:
  2503. ss->go_s99_running = 1;
  2504. break;
  2505. case sdma_event_e40_sw_cleaned:
  2506. break;
  2507. case sdma_event_e50_hw_cleaned:
  2508. break;
  2509. case sdma_event_e60_hw_halted:
  2510. break;
  2511. case sdma_event_e70_go_idle:
  2512. ss->go_s99_running = 0;
  2513. break;
  2514. case sdma_event_e80_hw_freeze:
  2515. break;
  2516. case sdma_event_e81_hw_frozen:
  2517. break;
  2518. case sdma_event_e82_hw_unfreeze:
  2519. break;
  2520. case sdma_event_e85_link_down:
  2521. ss->go_s99_running = 0;
  2522. break;
  2523. case sdma_event_e90_sw_halted:
  2524. break;
  2525. }
  2526. break;
  2527. case sdma_state_s50_hw_halt_wait:
  2528. switch (event) {
  2529. case sdma_event_e00_go_hw_down:
  2530. sdma_set_state(sde, sdma_state_s00_hw_down);
  2531. tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
  2532. break;
  2533. case sdma_event_e10_go_hw_start:
  2534. break;
  2535. case sdma_event_e15_hw_halt_done:
  2536. sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
  2537. tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
  2538. break;
  2539. case sdma_event_e25_hw_clean_up_done:
  2540. break;
  2541. case sdma_event_e30_go_running:
  2542. ss->go_s99_running = 1;
  2543. break;
  2544. case sdma_event_e40_sw_cleaned:
  2545. break;
  2546. case sdma_event_e50_hw_cleaned:
  2547. break;
  2548. case sdma_event_e60_hw_halted:
  2549. schedule_work(&sde->err_halt_worker);
  2550. break;
  2551. case sdma_event_e70_go_idle:
  2552. ss->go_s99_running = 0;
  2553. break;
  2554. case sdma_event_e80_hw_freeze:
  2555. break;
  2556. case sdma_event_e81_hw_frozen:
  2557. break;
  2558. case sdma_event_e82_hw_unfreeze:
  2559. break;
  2560. case sdma_event_e85_link_down:
  2561. ss->go_s99_running = 0;
  2562. break;
  2563. case sdma_event_e90_sw_halted:
  2564. break;
  2565. }
  2566. break;
  2567. case sdma_state_s60_idle_halt_wait:
  2568. switch (event) {
  2569. case sdma_event_e00_go_hw_down:
  2570. sdma_set_state(sde, sdma_state_s00_hw_down);
  2571. tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
  2572. break;
  2573. case sdma_event_e10_go_hw_start:
  2574. break;
  2575. case sdma_event_e15_hw_halt_done:
  2576. sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
  2577. tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
  2578. break;
  2579. case sdma_event_e25_hw_clean_up_done:
  2580. break;
  2581. case sdma_event_e30_go_running:
  2582. ss->go_s99_running = 1;
  2583. break;
  2584. case sdma_event_e40_sw_cleaned:
  2585. break;
  2586. case sdma_event_e50_hw_cleaned:
  2587. break;
  2588. case sdma_event_e60_hw_halted:
  2589. schedule_work(&sde->err_halt_worker);
  2590. break;
  2591. case sdma_event_e70_go_idle:
  2592. ss->go_s99_running = 0;
  2593. break;
  2594. case sdma_event_e80_hw_freeze:
  2595. break;
  2596. case sdma_event_e81_hw_frozen:
  2597. break;
  2598. case sdma_event_e82_hw_unfreeze:
  2599. break;
  2600. case sdma_event_e85_link_down:
  2601. break;
  2602. case sdma_event_e90_sw_halted:
  2603. break;
  2604. }
  2605. break;
  2606. case sdma_state_s80_hw_freeze:
  2607. switch (event) {
  2608. case sdma_event_e00_go_hw_down:
  2609. sdma_set_state(sde, sdma_state_s00_hw_down);
  2610. tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
  2611. break;
  2612. case sdma_event_e10_go_hw_start:
  2613. break;
  2614. case sdma_event_e15_hw_halt_done:
  2615. break;
  2616. case sdma_event_e25_hw_clean_up_done:
  2617. break;
  2618. case sdma_event_e30_go_running:
  2619. ss->go_s99_running = 1;
  2620. break;
  2621. case sdma_event_e40_sw_cleaned:
  2622. break;
  2623. case sdma_event_e50_hw_cleaned:
  2624. break;
  2625. case sdma_event_e60_hw_halted:
  2626. break;
  2627. case sdma_event_e70_go_idle:
  2628. ss->go_s99_running = 0;
  2629. break;
  2630. case sdma_event_e80_hw_freeze:
  2631. break;
  2632. case sdma_event_e81_hw_frozen:
  2633. sdma_set_state(sde, sdma_state_s82_freeze_sw_clean);
  2634. tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
  2635. break;
  2636. case sdma_event_e82_hw_unfreeze:
  2637. break;
  2638. case sdma_event_e85_link_down:
  2639. break;
  2640. case sdma_event_e90_sw_halted:
  2641. break;
  2642. }
  2643. break;
  2644. case sdma_state_s82_freeze_sw_clean:
  2645. switch (event) {
  2646. case sdma_event_e00_go_hw_down:
  2647. sdma_set_state(sde, sdma_state_s00_hw_down);
  2648. tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
  2649. break;
  2650. case sdma_event_e10_go_hw_start:
  2651. break;
  2652. case sdma_event_e15_hw_halt_done:
  2653. break;
  2654. case sdma_event_e25_hw_clean_up_done:
  2655. break;
  2656. case sdma_event_e30_go_running:
  2657. ss->go_s99_running = 1;
  2658. break;
  2659. case sdma_event_e40_sw_cleaned:
  2660. /* notify caller this engine is done cleaning */
  2661. atomic_dec(&sde->dd->sdma_unfreeze_count);
  2662. wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
  2663. break;
  2664. case sdma_event_e50_hw_cleaned:
  2665. break;
  2666. case sdma_event_e60_hw_halted:
  2667. break;
  2668. case sdma_event_e70_go_idle:
  2669. ss->go_s99_running = 0;
  2670. break;
  2671. case sdma_event_e80_hw_freeze:
  2672. break;
  2673. case sdma_event_e81_hw_frozen:
  2674. break;
  2675. case sdma_event_e82_hw_unfreeze:
  2676. sdma_hw_start_up(sde);
  2677. sdma_set_state(sde, ss->go_s99_running ?
  2678. sdma_state_s99_running :
  2679. sdma_state_s20_idle);
  2680. break;
  2681. case sdma_event_e85_link_down:
  2682. break;
  2683. case sdma_event_e90_sw_halted:
  2684. break;
  2685. }
  2686. break;
  2687. case sdma_state_s99_running:
  2688. switch (event) {
  2689. case sdma_event_e00_go_hw_down:
  2690. sdma_set_state(sde, sdma_state_s00_hw_down);
  2691. tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
  2692. break;
  2693. case sdma_event_e10_go_hw_start:
  2694. break;
  2695. case sdma_event_e15_hw_halt_done:
  2696. break;
  2697. case sdma_event_e25_hw_clean_up_done:
  2698. break;
  2699. case sdma_event_e30_go_running:
  2700. break;
  2701. case sdma_event_e40_sw_cleaned:
  2702. break;
  2703. case sdma_event_e50_hw_cleaned:
  2704. break;
  2705. case sdma_event_e60_hw_halted:
  2706. need_progress = 1;
  2707. sdma_err_progress_check_schedule(sde);
  2708. case sdma_event_e90_sw_halted:
  2709. /*
  2710. * SW initiated halt does not perform engines
  2711. * progress check
  2712. */
  2713. sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
  2714. schedule_work(&sde->err_halt_worker);
  2715. break;
  2716. case sdma_event_e70_go_idle:
  2717. sdma_set_state(sde, sdma_state_s60_idle_halt_wait);
  2718. break;
  2719. case sdma_event_e85_link_down:
  2720. ss->go_s99_running = 0;
  2721. /* fall through */
  2722. case sdma_event_e80_hw_freeze:
  2723. sdma_set_state(sde, sdma_state_s80_hw_freeze);
  2724. atomic_dec(&sde->dd->sdma_unfreeze_count);
  2725. wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
  2726. break;
  2727. case sdma_event_e81_hw_frozen:
  2728. break;
  2729. case sdma_event_e82_hw_unfreeze:
  2730. break;
  2731. }
  2732. break;
  2733. }
  2734. ss->last_event = event;
  2735. if (need_progress)
  2736. sdma_make_progress(sde, 0);
  2737. }
  2738. /*
  2739. * _extend_sdma_tx_descs() - helper to extend txreq
  2740. *
  2741. * This is called once the initial nominal allocation
  2742. * of descriptors in the sdma_txreq is exhausted.
  2743. *
  2744. * The code will bump the allocation up to the max
  2745. * of MAX_DESC (64) descriptors. There doesn't seem
  2746. * much point in an interim step. The last descriptor
  2747. * is reserved for coalesce buffer in order to support
  2748. * cases where input packet has >MAX_DESC iovecs.
  2749. *
  2750. */
  2751. static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
  2752. {
  2753. int i;
  2754. /* Handle last descriptor */
  2755. if (unlikely((tx->num_desc == (MAX_DESC - 1)))) {
  2756. /* if tlen is 0, it is for padding, release last descriptor */
  2757. if (!tx->tlen) {
  2758. tx->desc_limit = MAX_DESC;
  2759. } else if (!tx->coalesce_buf) {
  2760. /* allocate coalesce buffer with space for padding */
  2761. tx->coalesce_buf = kmalloc(tx->tlen + sizeof(u32),
  2762. GFP_ATOMIC);
  2763. if (!tx->coalesce_buf)
  2764. goto enomem;
  2765. tx->coalesce_idx = 0;
  2766. }
  2767. return 0;
  2768. }
  2769. if (unlikely(tx->num_desc == MAX_DESC))
  2770. goto enomem;
  2771. tx->descp = kmalloc_array(
  2772. MAX_DESC,
  2773. sizeof(struct sdma_desc),
  2774. GFP_ATOMIC);
  2775. if (!tx->descp)
  2776. goto enomem;
  2777. /* reserve last descriptor for coalescing */
  2778. tx->desc_limit = MAX_DESC - 1;
  2779. /* copy ones already built */
  2780. for (i = 0; i < tx->num_desc; i++)
  2781. tx->descp[i] = tx->descs[i];
  2782. return 0;
  2783. enomem:
  2784. __sdma_txclean(dd, tx);
  2785. return -ENOMEM;
  2786. }
  2787. /*
  2788. * ext_coal_sdma_tx_descs() - extend or coalesce sdma tx descriptors
  2789. *
  2790. * This is called once the initial nominal allocation of descriptors
  2791. * in the sdma_txreq is exhausted.
  2792. *
  2793. * This function calls _extend_sdma_tx_descs to extend or allocate
  2794. * coalesce buffer. If there is a allocated coalesce buffer, it will
  2795. * copy the input packet data into the coalesce buffer. It also adds
  2796. * coalesce buffer descriptor once when whole packet is received.
  2797. *
  2798. * Return:
  2799. * <0 - error
  2800. * 0 - coalescing, don't populate descriptor
  2801. * 1 - continue with populating descriptor
  2802. */
  2803. int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
  2804. int type, void *kvaddr, struct page *page,
  2805. unsigned long offset, u16 len)
  2806. {
  2807. int pad_len, rval;
  2808. dma_addr_t addr;
  2809. rval = _extend_sdma_tx_descs(dd, tx);
  2810. if (rval) {
  2811. __sdma_txclean(dd, tx);
  2812. return rval;
  2813. }
  2814. /* If coalesce buffer is allocated, copy data into it */
  2815. if (tx->coalesce_buf) {
  2816. if (type == SDMA_MAP_NONE) {
  2817. __sdma_txclean(dd, tx);
  2818. return -EINVAL;
  2819. }
  2820. if (type == SDMA_MAP_PAGE) {
  2821. kvaddr = kmap(page);
  2822. kvaddr += offset;
  2823. } else if (WARN_ON(!kvaddr)) {
  2824. __sdma_txclean(dd, tx);
  2825. return -EINVAL;
  2826. }
  2827. memcpy(tx->coalesce_buf + tx->coalesce_idx, kvaddr, len);
  2828. tx->coalesce_idx += len;
  2829. if (type == SDMA_MAP_PAGE)
  2830. kunmap(page);
  2831. /* If there is more data, return */
  2832. if (tx->tlen - tx->coalesce_idx)
  2833. return 0;
  2834. /* Whole packet is received; add any padding */
  2835. pad_len = tx->packet_len & (sizeof(u32) - 1);
  2836. if (pad_len) {
  2837. pad_len = sizeof(u32) - pad_len;
  2838. memset(tx->coalesce_buf + tx->coalesce_idx, 0, pad_len);
  2839. /* padding is taken care of for coalescing case */
  2840. tx->packet_len += pad_len;
  2841. tx->tlen += pad_len;
  2842. }
  2843. /* dma map the coalesce buffer */
  2844. addr = dma_map_single(&dd->pcidev->dev,
  2845. tx->coalesce_buf,
  2846. tx->tlen,
  2847. DMA_TO_DEVICE);
  2848. if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
  2849. __sdma_txclean(dd, tx);
  2850. return -ENOSPC;
  2851. }
  2852. /* Add descriptor for coalesce buffer */
  2853. tx->desc_limit = MAX_DESC;
  2854. return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx,
  2855. addr, tx->tlen);
  2856. }
  2857. return 1;
  2858. }
  2859. /* Update sdes when the lmc changes */
  2860. void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid)
  2861. {
  2862. struct sdma_engine *sde;
  2863. int i;
  2864. u64 sreg;
  2865. sreg = ((mask & SD(CHECK_SLID_MASK_MASK)) <<
  2866. SD(CHECK_SLID_MASK_SHIFT)) |
  2867. (((lid & mask) & SD(CHECK_SLID_VALUE_MASK)) <<
  2868. SD(CHECK_SLID_VALUE_SHIFT));
  2869. for (i = 0; i < dd->num_sdma; i++) {
  2870. hfi1_cdbg(LINKVERB, "SendDmaEngine[%d].SLID_CHECK = 0x%x",
  2871. i, (u32)sreg);
  2872. sde = &dd->per_sdma[i];
  2873. write_sde_csr(sde, SD(CHECK_SLID), sreg);
  2874. }
  2875. }
  2876. /* tx not dword sized - pad */
  2877. int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
  2878. {
  2879. int rval = 0;
  2880. tx->num_desc++;
  2881. if ((unlikely(tx->num_desc == tx->desc_limit))) {
  2882. rval = _extend_sdma_tx_descs(dd, tx);
  2883. if (rval) {
  2884. __sdma_txclean(dd, tx);
  2885. return rval;
  2886. }
  2887. }
  2888. /* finish the one just added */
  2889. make_tx_sdma_desc(
  2890. tx,
  2891. SDMA_MAP_NONE,
  2892. dd->sdma_pad_phys,
  2893. sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
  2894. _sdma_close_tx(dd, tx);
  2895. return rval;
  2896. }
  2897. /*
  2898. * Add ahg to the sdma_txreq
  2899. *
  2900. * The logic will consume up to 3
  2901. * descriptors at the beginning of
  2902. * sdma_txreq.
  2903. */
  2904. void _sdma_txreq_ahgadd(
  2905. struct sdma_txreq *tx,
  2906. u8 num_ahg,
  2907. u8 ahg_entry,
  2908. u32 *ahg,
  2909. u8 ahg_hlen)
  2910. {
  2911. u32 i, shift = 0, desc = 0;
  2912. u8 mode;
  2913. WARN_ON_ONCE(num_ahg > 9 || (ahg_hlen & 3) || ahg_hlen == 4);
  2914. /* compute mode */
  2915. if (num_ahg == 1)
  2916. mode = SDMA_AHG_APPLY_UPDATE1;
  2917. else if (num_ahg <= 5)
  2918. mode = SDMA_AHG_APPLY_UPDATE2;
  2919. else
  2920. mode = SDMA_AHG_APPLY_UPDATE3;
  2921. tx->num_desc++;
  2922. /* initialize to consumed descriptors to zero */
  2923. switch (mode) {
  2924. case SDMA_AHG_APPLY_UPDATE3:
  2925. tx->num_desc++;
  2926. tx->descs[2].qw[0] = 0;
  2927. tx->descs[2].qw[1] = 0;
  2928. /* FALLTHROUGH */
  2929. case SDMA_AHG_APPLY_UPDATE2:
  2930. tx->num_desc++;
  2931. tx->descs[1].qw[0] = 0;
  2932. tx->descs[1].qw[1] = 0;
  2933. break;
  2934. }
  2935. ahg_hlen >>= 2;
  2936. tx->descs[0].qw[1] |=
  2937. (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
  2938. << SDMA_DESC1_HEADER_INDEX_SHIFT) |
  2939. (((u64)ahg_hlen & SDMA_DESC1_HEADER_DWS_MASK)
  2940. << SDMA_DESC1_HEADER_DWS_SHIFT) |
  2941. (((u64)mode & SDMA_DESC1_HEADER_MODE_MASK)
  2942. << SDMA_DESC1_HEADER_MODE_SHIFT) |
  2943. (((u64)ahg[0] & SDMA_DESC1_HEADER_UPDATE1_MASK)
  2944. << SDMA_DESC1_HEADER_UPDATE1_SHIFT);
  2945. for (i = 0; i < (num_ahg - 1); i++) {
  2946. if (!shift && !(i & 2))
  2947. desc++;
  2948. tx->descs[desc].qw[!!(i & 2)] |=
  2949. (((u64)ahg[i + 1])
  2950. << shift);
  2951. shift = (shift + 32) & 63;
  2952. }
  2953. }
  2954. /**
  2955. * sdma_ahg_alloc - allocate an AHG entry
  2956. * @sde: engine to allocate from
  2957. *
  2958. * Return:
  2959. * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled,
  2960. * -ENOSPC if an entry is not available
  2961. */
  2962. int sdma_ahg_alloc(struct sdma_engine *sde)
  2963. {
  2964. int nr;
  2965. int oldbit;
  2966. if (!sde) {
  2967. trace_hfi1_ahg_allocate(sde, -EINVAL);
  2968. return -EINVAL;
  2969. }
  2970. while (1) {
  2971. nr = ffz(ACCESS_ONCE(sde->ahg_bits));
  2972. if (nr > 31) {
  2973. trace_hfi1_ahg_allocate(sde, -ENOSPC);
  2974. return -ENOSPC;
  2975. }
  2976. oldbit = test_and_set_bit(nr, &sde->ahg_bits);
  2977. if (!oldbit)
  2978. break;
  2979. cpu_relax();
  2980. }
  2981. trace_hfi1_ahg_allocate(sde, nr);
  2982. return nr;
  2983. }
  2984. /**
  2985. * sdma_ahg_free - free an AHG entry
  2986. * @sde: engine to return AHG entry
  2987. * @ahg_index: index to free
  2988. *
  2989. * This routine frees the indicate AHG entry.
  2990. */
  2991. void sdma_ahg_free(struct sdma_engine *sde, int ahg_index)
  2992. {
  2993. if (!sde)
  2994. return;
  2995. trace_hfi1_ahg_deallocate(sde, ahg_index);
  2996. if (ahg_index < 0 || ahg_index > 31)
  2997. return;
  2998. clear_bit(ahg_index, &sde->ahg_bits);
  2999. }
  3000. /*
  3001. * SPC freeze handling for SDMA engines. Called when the driver knows
  3002. * the SPC is going into a freeze but before the freeze is fully
  3003. * settled. Generally an error interrupt.
  3004. *
  3005. * This event will pull the engine out of running so no more entries can be
  3006. * added to the engine's queue.
  3007. */
  3008. void sdma_freeze_notify(struct hfi1_devdata *dd, int link_down)
  3009. {
  3010. int i;
  3011. enum sdma_events event = link_down ? sdma_event_e85_link_down :
  3012. sdma_event_e80_hw_freeze;
  3013. /* set up the wait but do not wait here */
  3014. atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
  3015. /* tell all engines to stop running and wait */
  3016. for (i = 0; i < dd->num_sdma; i++)
  3017. sdma_process_event(&dd->per_sdma[i], event);
  3018. /* sdma_freeze() will wait for all engines to have stopped */
  3019. }
  3020. /*
  3021. * SPC freeze handling for SDMA engines. Called when the driver knows
  3022. * the SPC is fully frozen.
  3023. */
  3024. void sdma_freeze(struct hfi1_devdata *dd)
  3025. {
  3026. int i;
  3027. int ret;
  3028. /*
  3029. * Make sure all engines have moved out of the running state before
  3030. * continuing.
  3031. */
  3032. ret = wait_event_interruptible(dd->sdma_unfreeze_wq,
  3033. atomic_read(&dd->sdma_unfreeze_count) <=
  3034. 0);
  3035. /* interrupted or count is negative, then unloading - just exit */
  3036. if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0)
  3037. return;
  3038. /* set up the count for the next wait */
  3039. atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
  3040. /* tell all engines that the SPC is frozen, they can start cleaning */
  3041. for (i = 0; i < dd->num_sdma; i++)
  3042. sdma_process_event(&dd->per_sdma[i], sdma_event_e81_hw_frozen);
  3043. /*
  3044. * Wait for everyone to finish software clean before exiting. The
  3045. * software clean will read engine CSRs, so must be completed before
  3046. * the next step, which will clear the engine CSRs.
  3047. */
  3048. (void)wait_event_interruptible(dd->sdma_unfreeze_wq,
  3049. atomic_read(&dd->sdma_unfreeze_count) <= 0);
  3050. /* no need to check results - done no matter what */
  3051. }
  3052. /*
  3053. * SPC freeze handling for the SDMA engines. Called after the SPC is unfrozen.
  3054. *
  3055. * The SPC freeze acts like a SDMA halt and a hardware clean combined. All
  3056. * that is left is a software clean. We could do it after the SPC is fully
  3057. * frozen, but then we'd have to add another state to wait for the unfreeze.
  3058. * Instead, just defer the software clean until the unfreeze step.
  3059. */
  3060. void sdma_unfreeze(struct hfi1_devdata *dd)
  3061. {
  3062. int i;
  3063. /* tell all engines start freeze clean up */
  3064. for (i = 0; i < dd->num_sdma; i++)
  3065. sdma_process_event(&dd->per_sdma[i],
  3066. sdma_event_e82_hw_unfreeze);
  3067. }
  3068. /**
  3069. * _sdma_engine_progress_schedule() - schedule progress on engine
  3070. * @sde: sdma_engine to schedule progress
  3071. *
  3072. */
  3073. void _sdma_engine_progress_schedule(
  3074. struct sdma_engine *sde)
  3075. {
  3076. trace_hfi1_sdma_engine_progress(sde, sde->progress_mask);
  3077. /* assume we have selected a good cpu */
  3078. write_csr(sde->dd,
  3079. CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)),
  3080. sde->progress_mask);
  3081. }