xhci-ring.c 129 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238
  1. /*
  2. * xHCI host controller driver
  3. *
  4. * Copyright (C) 2008 Intel Corp.
  5. *
  6. * Author: Sarah Sharp
  7. * Some code borrowed from the Linux EHCI driver.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  15. * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  16. * for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software Foundation,
  20. * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. /*
  23. * Ring initialization rules:
  24. * 1. Each segment is initialized to zero, except for link TRBs.
  25. * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
  26. * Consumer Cycle State (CCS), depending on ring function.
  27. * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
  28. *
  29. * Ring behavior rules:
  30. * 1. A ring is empty if enqueue == dequeue. This means there will always be at
  31. * least one free TRB in the ring. This is useful if you want to turn that
  32. * into a link TRB and expand the ring.
  33. * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
  34. * link TRB, then load the pointer with the address in the link TRB. If the
  35. * link TRB had its toggle bit set, you may need to update the ring cycle
  36. * state (see cycle bit rules). You may have to do this multiple times
  37. * until you reach a non-link TRB.
  38. * 3. A ring is full if enqueue++ (for the definition of increment above)
  39. * equals the dequeue pointer.
  40. *
  41. * Cycle bit rules:
  42. * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
  43. * in a link TRB, it must toggle the ring cycle state.
  44. * 2. When a producer increments an enqueue pointer and encounters a toggle bit
  45. * in a link TRB, it must toggle the ring cycle state.
  46. *
  47. * Producer rules:
  48. * 1. Check if ring is full before you enqueue.
  49. * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
  50. * Update enqueue pointer between each write (which may update the ring
  51. * cycle state).
  52. * 3. Notify consumer. If SW is producer, it rings the doorbell for command
  53. * and endpoint rings. If HC is the producer for the event ring,
  54. * and it generates an interrupt according to interrupt modulation rules.
  55. *
  56. * Consumer rules:
  57. * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
  58. * the TRB is owned by the consumer.
  59. * 2. Update dequeue pointer (which may update the ring cycle state) and
  60. * continue processing TRBs until you reach a TRB which is not owned by you.
  61. * 3. Notify the producer. SW is the consumer for the event ring, and it
  62. * updates event ring dequeue pointer. HC is the consumer for the command and
  63. * endpoint rings; it generates events on the event ring for these.
  64. */
  65. #include <linux/scatterlist.h>
  66. #include <linux/slab.h>
  67. #include "xhci.h"
  68. #include "xhci-trace.h"
  69. /*
  70. * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
  71. * address of the TRB.
  72. */
  73. dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
  74. union xhci_trb *trb)
  75. {
  76. unsigned long segment_offset;
  77. if (!seg || !trb || trb < seg->trbs)
  78. return 0;
  79. /* offset in TRBs */
  80. segment_offset = trb - seg->trbs;
  81. if (segment_offset >= TRBS_PER_SEGMENT)
  82. return 0;
  83. return seg->dma + (segment_offset * sizeof(*trb));
  84. }
  85. /* Does this link TRB point to the first segment in a ring,
  86. * or was the previous TRB the last TRB on the last segment in the ERST?
  87. */
  88. static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
  89. struct xhci_segment *seg, union xhci_trb *trb)
  90. {
  91. if (ring == xhci->event_ring)
  92. return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
  93. (seg->next == xhci->event_ring->first_seg);
  94. else
  95. return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
  96. }
  97. /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
  98. * segment? I.e. would the updated event TRB pointer step off the end of the
  99. * event seg?
  100. */
  101. static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
  102. struct xhci_segment *seg, union xhci_trb *trb)
  103. {
  104. if (ring == xhci->event_ring)
  105. return trb == &seg->trbs[TRBS_PER_SEGMENT];
  106. else
  107. return TRB_TYPE_LINK_LE32(trb->link.control);
  108. }
  109. static int enqueue_is_link_trb(struct xhci_ring *ring)
  110. {
  111. struct xhci_link_trb *link = &ring->enqueue->link;
  112. return TRB_TYPE_LINK_LE32(link->control);
  113. }
  114. /* Updates trb to point to the next TRB in the ring, and updates seg if the next
  115. * TRB is in a new segment. This does not skip over link TRBs, and it does not
  116. * effect the ring dequeue or enqueue pointers.
  117. */
  118. static void next_trb(struct xhci_hcd *xhci,
  119. struct xhci_ring *ring,
  120. struct xhci_segment **seg,
  121. union xhci_trb **trb)
  122. {
  123. if (last_trb(xhci, ring, *seg, *trb)) {
  124. *seg = (*seg)->next;
  125. *trb = ((*seg)->trbs);
  126. } else {
  127. (*trb)++;
  128. }
  129. }
  130. /*
  131. * See Cycle bit rules. SW is the consumer for the event ring only.
  132. * Don't make a ring full of link TRBs. That would be dumb and this would loop.
  133. */
  134. static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
  135. {
  136. ring->deq_updates++;
  137. /*
  138. * If this is not event ring, and the dequeue pointer
  139. * is not on a link TRB, there is one more usable TRB
  140. */
  141. if (ring->type != TYPE_EVENT &&
  142. !last_trb(xhci, ring, ring->deq_seg, ring->dequeue))
  143. ring->num_trbs_free++;
  144. do {
  145. /*
  146. * Update the dequeue pointer further if that was a link TRB or
  147. * we're at the end of an event ring segment (which doesn't have
  148. * link TRBS)
  149. */
  150. if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) {
  151. if (ring->type == TYPE_EVENT &&
  152. last_trb_on_last_seg(xhci, ring,
  153. ring->deq_seg, ring->dequeue)) {
  154. ring->cycle_state ^= 1;
  155. }
  156. ring->deq_seg = ring->deq_seg->next;
  157. ring->dequeue = ring->deq_seg->trbs;
  158. } else {
  159. ring->dequeue++;
  160. }
  161. } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
  162. }
  163. /*
  164. * See Cycle bit rules. SW is the consumer for the event ring only.
  165. * Don't make a ring full of link TRBs. That would be dumb and this would loop.
  166. *
  167. * If we've just enqueued a TRB that is in the middle of a TD (meaning the
  168. * chain bit is set), then set the chain bit in all the following link TRBs.
  169. * If we've enqueued the last TRB in a TD, make sure the following link TRBs
  170. * have their chain bit cleared (so that each Link TRB is a separate TD).
  171. *
  172. * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
  173. * set, but other sections talk about dealing with the chain bit set. This was
  174. * fixed in the 0.96 specification errata, but we have to assume that all 0.95
  175. * xHCI hardware can't handle the chain bit being cleared on a link TRB.
  176. *
  177. * @more_trbs_coming: Will you enqueue more TRBs before calling
  178. * prepare_transfer()?
  179. */
  180. static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
  181. bool more_trbs_coming)
  182. {
  183. u32 chain;
  184. union xhci_trb *next;
  185. chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
  186. /* If this is not event ring, there is one less usable TRB */
  187. if (ring->type != TYPE_EVENT &&
  188. !last_trb(xhci, ring, ring->enq_seg, ring->enqueue))
  189. ring->num_trbs_free--;
  190. next = ++(ring->enqueue);
  191. ring->enq_updates++;
  192. /* Update the dequeue pointer further if that was a link TRB or we're at
  193. * the end of an event ring segment (which doesn't have link TRBS)
  194. */
  195. while (last_trb(xhci, ring, ring->enq_seg, next)) {
  196. if (ring->type != TYPE_EVENT) {
  197. /*
  198. * If the caller doesn't plan on enqueueing more
  199. * TDs before ringing the doorbell, then we
  200. * don't want to give the link TRB to the
  201. * hardware just yet. We'll give the link TRB
  202. * back in prepare_ring() just before we enqueue
  203. * the TD at the top of the ring.
  204. */
  205. if (!chain && !more_trbs_coming)
  206. break;
  207. /* If we're not dealing with 0.95 hardware or
  208. * isoc rings on AMD 0.96 host,
  209. * carry over the chain bit of the previous TRB
  210. * (which may mean the chain bit is cleared).
  211. */
  212. if (!(ring->type == TYPE_ISOC &&
  213. (xhci->quirks & XHCI_AMD_0x96_HOST))
  214. && !xhci_link_trb_quirk(xhci)) {
  215. next->link.control &=
  216. cpu_to_le32(~TRB_CHAIN);
  217. next->link.control |=
  218. cpu_to_le32(chain);
  219. }
  220. /* Give this link TRB to the hardware */
  221. wmb();
  222. next->link.control ^= cpu_to_le32(TRB_CYCLE);
  223. /* Toggle the cycle bit after the last ring segment. */
  224. if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
  225. ring->cycle_state ^= 1;
  226. }
  227. }
  228. ring->enq_seg = ring->enq_seg->next;
  229. ring->enqueue = ring->enq_seg->trbs;
  230. next = ring->enqueue;
  231. }
  232. }
  233. /*
  234. * Check to see if there's room to enqueue num_trbs on the ring and make sure
  235. * enqueue pointer will not advance into dequeue segment. See rules above.
  236. */
  237. static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
  238. unsigned int num_trbs)
  239. {
  240. int num_trbs_in_deq_seg;
  241. if (ring->num_trbs_free < num_trbs)
  242. return 0;
  243. if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
  244. num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
  245. if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
  246. return 0;
  247. }
  248. return 1;
  249. }
  250. /* Ring the host controller doorbell after placing a command on the ring */
  251. void xhci_ring_cmd_db(struct xhci_hcd *xhci)
  252. {
  253. if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
  254. return;
  255. xhci_dbg(xhci, "// Ding dong!\n");
  256. writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
  257. /* Flush PCI posted writes */
  258. readl(&xhci->dba->doorbell[0]);
  259. }
  260. static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
  261. {
  262. u64 temp_64;
  263. int ret;
  264. xhci_dbg(xhci, "Abort command ring\n");
  265. temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
  266. xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
  267. xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
  268. &xhci->op_regs->cmd_ring);
  269. /* Section 4.6.1.2 of xHCI 1.0 spec says software should
  270. * time the completion od all xHCI commands, including
  271. * the Command Abort operation. If software doesn't see
  272. * CRR negated in a timely manner (e.g. longer than 5
  273. * seconds), then it should assume that the there are
  274. * larger problems with the xHC and assert HCRST.
  275. */
  276. ret = xhci_handshake(&xhci->op_regs->cmd_ring,
  277. CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
  278. if (ret < 0) {
  279. /* we are about to kill xhci, give it one more chance */
  280. xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
  281. &xhci->op_regs->cmd_ring);
  282. udelay(1000);
  283. ret = xhci_handshake(&xhci->op_regs->cmd_ring,
  284. CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
  285. if (ret == 0)
  286. return 0;
  287. xhci_err(xhci, "Stopped the command ring failed, "
  288. "maybe the host is dead\n");
  289. xhci->xhc_state |= XHCI_STATE_DYING;
  290. xhci_quiesce(xhci);
  291. xhci_halt(xhci);
  292. return -ESHUTDOWN;
  293. }
  294. return 0;
  295. }
  296. void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
  297. unsigned int slot_id,
  298. unsigned int ep_index,
  299. unsigned int stream_id)
  300. {
  301. __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
  302. struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
  303. unsigned int ep_state = ep->ep_state;
  304. /* Don't ring the doorbell for this endpoint if there are pending
  305. * cancellations because we don't want to interrupt processing.
  306. * We don't want to restart any stream rings if there's a set dequeue
  307. * pointer command pending because the device can choose to start any
  308. * stream once the endpoint is on the HW schedule.
  309. */
  310. if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
  311. (ep_state & EP_HALTED))
  312. return;
  313. writel(DB_VALUE(ep_index, stream_id), db_addr);
  314. /* The CPU has better things to do at this point than wait for a
  315. * write-posting flush. It'll get there soon enough.
  316. */
  317. }
  318. /* Ring the doorbell for any rings with pending URBs */
  319. static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
  320. unsigned int slot_id,
  321. unsigned int ep_index)
  322. {
  323. unsigned int stream_id;
  324. struct xhci_virt_ep *ep;
  325. ep = &xhci->devs[slot_id]->eps[ep_index];
  326. /* A ring has pending URBs if its TD list is not empty */
  327. if (!(ep->ep_state & EP_HAS_STREAMS)) {
  328. if (ep->ring && !(list_empty(&ep->ring->td_list)))
  329. xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
  330. return;
  331. }
  332. for (stream_id = 1; stream_id < ep->stream_info->num_streams;
  333. stream_id++) {
  334. struct xhci_stream_info *stream_info = ep->stream_info;
  335. if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
  336. xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
  337. stream_id);
  338. }
  339. }
  340. static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
  341. unsigned int slot_id, unsigned int ep_index,
  342. unsigned int stream_id)
  343. {
  344. struct xhci_virt_ep *ep;
  345. ep = &xhci->devs[slot_id]->eps[ep_index];
  346. /* Common case: no streams */
  347. if (!(ep->ep_state & EP_HAS_STREAMS))
  348. return ep->ring;
  349. if (stream_id == 0) {
  350. xhci_warn(xhci,
  351. "WARN: Slot ID %u, ep index %u has streams, "
  352. "but URB has no stream ID.\n",
  353. slot_id, ep_index);
  354. return NULL;
  355. }
  356. if (stream_id < ep->stream_info->num_streams)
  357. return ep->stream_info->stream_rings[stream_id];
  358. xhci_warn(xhci,
  359. "WARN: Slot ID %u, ep index %u has "
  360. "stream IDs 1 to %u allocated, "
  361. "but stream ID %u is requested.\n",
  362. slot_id, ep_index,
  363. ep->stream_info->num_streams - 1,
  364. stream_id);
  365. return NULL;
  366. }
  367. /* Get the right ring for the given URB.
  368. * If the endpoint supports streams, boundary check the URB's stream ID.
  369. * If the endpoint doesn't support streams, return the singular endpoint ring.
  370. */
  371. static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
  372. struct urb *urb)
  373. {
  374. return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
  375. xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
  376. }
  377. /*
  378. * Move the xHC's endpoint ring dequeue pointer past cur_td.
  379. * Record the new state of the xHC's endpoint ring dequeue segment,
  380. * dequeue pointer, and new consumer cycle state in state.
  381. * Update our internal representation of the ring's dequeue pointer.
  382. *
  383. * We do this in three jumps:
  384. * - First we update our new ring state to be the same as when the xHC stopped.
  385. * - Then we traverse the ring to find the segment that contains
  386. * the last TRB in the TD. We toggle the xHC's new cycle state when we pass
  387. * any link TRBs with the toggle cycle bit set.
  388. * - Finally we move the dequeue state one TRB further, toggling the cycle bit
  389. * if we've moved it past a link TRB with the toggle cycle bit set.
  390. *
  391. * Some of the uses of xhci_generic_trb are grotty, but if they're done
  392. * with correct __le32 accesses they should work fine. Only users of this are
  393. * in here.
  394. */
  395. void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
  396. unsigned int slot_id, unsigned int ep_index,
  397. unsigned int stream_id, struct xhci_td *cur_td,
  398. struct xhci_dequeue_state *state)
  399. {
  400. struct xhci_virt_device *dev = xhci->devs[slot_id];
  401. struct xhci_virt_ep *ep = &dev->eps[ep_index];
  402. struct xhci_ring *ep_ring;
  403. struct xhci_segment *new_seg;
  404. union xhci_trb *new_deq;
  405. dma_addr_t addr;
  406. u64 hw_dequeue;
  407. bool cycle_found = false;
  408. bool td_last_trb_found = false;
  409. ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
  410. ep_index, stream_id);
  411. if (!ep_ring) {
  412. xhci_warn(xhci, "WARN can't find new dequeue state "
  413. "for invalid stream ID %u.\n",
  414. stream_id);
  415. return;
  416. }
  417. /* Dig out the cycle state saved by the xHC during the stop ep cmd */
  418. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  419. "Finding endpoint context");
  420. /* 4.6.9 the css flag is written to the stream context for streams */
  421. if (ep->ep_state & EP_HAS_STREAMS) {
  422. struct xhci_stream_ctx *ctx =
  423. &ep->stream_info->stream_ctx_array[stream_id];
  424. hw_dequeue = le64_to_cpu(ctx->stream_ring);
  425. } else {
  426. struct xhci_ep_ctx *ep_ctx
  427. = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
  428. hw_dequeue = le64_to_cpu(ep_ctx->deq);
  429. }
  430. new_seg = ep_ring->deq_seg;
  431. new_deq = ep_ring->dequeue;
  432. state->new_cycle_state = hw_dequeue & 0x1;
  433. /*
  434. * We want to find the pointer, segment and cycle state of the new trb
  435. * (the one after current TD's last_trb). We know the cycle state at
  436. * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
  437. * found.
  438. */
  439. do {
  440. if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
  441. == (dma_addr_t)(hw_dequeue & ~0xf)) {
  442. cycle_found = true;
  443. if (td_last_trb_found)
  444. break;
  445. }
  446. if (new_deq == cur_td->last_trb)
  447. td_last_trb_found = true;
  448. if (cycle_found &&
  449. TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) &&
  450. new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE))
  451. state->new_cycle_state ^= 0x1;
  452. next_trb(xhci, ep_ring, &new_seg, &new_deq);
  453. /* Search wrapped around, bail out */
  454. if (new_deq == ep->ring->dequeue) {
  455. xhci_err(xhci, "Error: Failed finding new dequeue state\n");
  456. state->new_deq_seg = NULL;
  457. state->new_deq_ptr = NULL;
  458. return;
  459. }
  460. } while (!cycle_found || !td_last_trb_found);
  461. state->new_deq_seg = new_seg;
  462. state->new_deq_ptr = new_deq;
  463. /* Don't update the ring cycle state for the producer (us). */
  464. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  465. "Cycle state = 0x%x", state->new_cycle_state);
  466. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  467. "New dequeue segment = %p (virtual)",
  468. state->new_deq_seg);
  469. addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
  470. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  471. "New dequeue pointer = 0x%llx (DMA)",
  472. (unsigned long long) addr);
  473. }
  474. /* flip_cycle means flip the cycle bit of all but the first and last TRB.
  475. * (The last TRB actually points to the ring enqueue pointer, which is not part
  476. * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
  477. */
  478. static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
  479. struct xhci_td *cur_td, bool flip_cycle)
  480. {
  481. struct xhci_segment *cur_seg;
  482. union xhci_trb *cur_trb;
  483. for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
  484. true;
  485. next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
  486. if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
  487. /* Unchain any chained Link TRBs, but
  488. * leave the pointers intact.
  489. */
  490. cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
  491. /* Flip the cycle bit (link TRBs can't be the first
  492. * or last TRB).
  493. */
  494. if (flip_cycle)
  495. cur_trb->generic.field[3] ^=
  496. cpu_to_le32(TRB_CYCLE);
  497. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  498. "Cancel (unchain) link TRB");
  499. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  500. "Address = %p (0x%llx dma); "
  501. "in seg %p (0x%llx dma)",
  502. cur_trb,
  503. (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
  504. cur_seg,
  505. (unsigned long long)cur_seg->dma);
  506. } else {
  507. cur_trb->generic.field[0] = 0;
  508. cur_trb->generic.field[1] = 0;
  509. cur_trb->generic.field[2] = 0;
  510. /* Preserve only the cycle bit of this TRB */
  511. cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
  512. /* Flip the cycle bit except on the first or last TRB */
  513. if (flip_cycle && cur_trb != cur_td->first_trb &&
  514. cur_trb != cur_td->last_trb)
  515. cur_trb->generic.field[3] ^=
  516. cpu_to_le32(TRB_CYCLE);
  517. cur_trb->generic.field[3] |= cpu_to_le32(
  518. TRB_TYPE(TRB_TR_NOOP));
  519. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  520. "TRB to noop at offset 0x%llx",
  521. (unsigned long long)
  522. xhci_trb_virt_to_dma(cur_seg, cur_trb));
  523. }
  524. if (cur_trb == cur_td->last_trb)
  525. break;
  526. }
  527. }
  528. static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
  529. struct xhci_virt_ep *ep)
  530. {
  531. ep->ep_state &= ~EP_HALT_PENDING;
  532. /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the
  533. * timer is running on another CPU, we don't decrement stop_cmds_pending
  534. * (since we didn't successfully stop the watchdog timer).
  535. */
  536. if (del_timer(&ep->stop_cmd_timer))
  537. ep->stop_cmds_pending--;
  538. }
  539. /* Must be called with xhci->lock held in interrupt context */
  540. static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
  541. struct xhci_td *cur_td, int status)
  542. {
  543. struct usb_hcd *hcd;
  544. struct urb *urb;
  545. struct urb_priv *urb_priv;
  546. urb = cur_td->urb;
  547. urb_priv = urb->hcpriv;
  548. urb_priv->td_cnt++;
  549. hcd = bus_to_hcd(urb->dev->bus);
  550. /* Only giveback urb when this is the last td in urb */
  551. if (urb_priv->td_cnt == urb_priv->length) {
  552. if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
  553. xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
  554. if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
  555. if (xhci->quirks & XHCI_AMD_PLL_FIX)
  556. usb_amd_quirk_pll_enable();
  557. }
  558. }
  559. usb_hcd_unlink_urb_from_ep(hcd, urb);
  560. spin_unlock(&xhci->lock);
  561. usb_hcd_giveback_urb(hcd, urb, status);
  562. xhci_urb_free_priv(urb_priv);
  563. spin_lock(&xhci->lock);
  564. }
  565. }
  566. /*
  567. * When we get a command completion for a Stop Endpoint Command, we need to
  568. * unlink any cancelled TDs from the ring. There are two ways to do that:
  569. *
  570. * 1. If the HW was in the middle of processing the TD that needs to be
  571. * cancelled, then we must move the ring's dequeue pointer past the last TRB
  572. * in the TD with a Set Dequeue Pointer Command.
  573. * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
  574. * bit cleared) so that the HW will skip over them.
  575. */
  576. static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
  577. union xhci_trb *trb, struct xhci_event_cmd *event)
  578. {
  579. unsigned int ep_index;
  580. struct xhci_ring *ep_ring;
  581. struct xhci_virt_ep *ep;
  582. struct list_head *entry;
  583. struct xhci_td *cur_td = NULL;
  584. struct xhci_td *last_unlinked_td;
  585. struct xhci_dequeue_state deq_state;
  586. if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
  587. if (!xhci->devs[slot_id])
  588. xhci_warn(xhci, "Stop endpoint command "
  589. "completion for disabled slot %u\n",
  590. slot_id);
  591. return;
  592. }
  593. memset(&deq_state, 0, sizeof(deq_state));
  594. ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
  595. ep = &xhci->devs[slot_id]->eps[ep_index];
  596. if (list_empty(&ep->cancelled_td_list)) {
  597. xhci_stop_watchdog_timer_in_irq(xhci, ep);
  598. ep->stopped_td = NULL;
  599. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  600. return;
  601. }
  602. /* Fix up the ep ring first, so HW stops executing cancelled TDs.
  603. * We have the xHCI lock, so nothing can modify this list until we drop
  604. * it. We're also in the event handler, so we can't get re-interrupted
  605. * if another Stop Endpoint command completes
  606. */
  607. list_for_each(entry, &ep->cancelled_td_list) {
  608. cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
  609. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  610. "Removing canceled TD starting at 0x%llx (dma).",
  611. (unsigned long long)xhci_trb_virt_to_dma(
  612. cur_td->start_seg, cur_td->first_trb));
  613. ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
  614. if (!ep_ring) {
  615. /* This shouldn't happen unless a driver is mucking
  616. * with the stream ID after submission. This will
  617. * leave the TD on the hardware ring, and the hardware
  618. * will try to execute it, and may access a buffer
  619. * that has already been freed. In the best case, the
  620. * hardware will execute it, and the event handler will
  621. * ignore the completion event for that TD, since it was
  622. * removed from the td_list for that endpoint. In
  623. * short, don't muck with the stream ID after
  624. * submission.
  625. */
  626. xhci_warn(xhci, "WARN Cancelled URB %p "
  627. "has invalid stream ID %u.\n",
  628. cur_td->urb,
  629. cur_td->urb->stream_id);
  630. goto remove_finished_td;
  631. }
  632. /*
  633. * If we stopped on the TD we need to cancel, then we have to
  634. * move the xHC endpoint ring dequeue pointer past this TD.
  635. */
  636. if (cur_td == ep->stopped_td)
  637. xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
  638. cur_td->urb->stream_id,
  639. cur_td, &deq_state);
  640. else
  641. td_to_noop(xhci, ep_ring, cur_td, false);
  642. remove_finished_td:
  643. /*
  644. * The event handler won't see a completion for this TD anymore,
  645. * so remove it from the endpoint ring's TD list. Keep it in
  646. * the cancelled TD list for URB completion later.
  647. */
  648. list_del_init(&cur_td->td_list);
  649. }
  650. last_unlinked_td = cur_td;
  651. xhci_stop_watchdog_timer_in_irq(xhci, ep);
  652. /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
  653. if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
  654. xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
  655. ep->stopped_td->urb->stream_id, &deq_state);
  656. xhci_ring_cmd_db(xhci);
  657. } else {
  658. /* Otherwise ring the doorbell(s) to restart queued transfers */
  659. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  660. }
  661. ep->stopped_td = NULL;
  662. /*
  663. * Drop the lock and complete the URBs in the cancelled TD list.
  664. * New TDs to be cancelled might be added to the end of the list before
  665. * we can complete all the URBs for the TDs we already unlinked.
  666. * So stop when we've completed the URB for the last TD we unlinked.
  667. */
  668. do {
  669. cur_td = list_entry(ep->cancelled_td_list.next,
  670. struct xhci_td, cancelled_td_list);
  671. list_del_init(&cur_td->cancelled_td_list);
  672. /* Clean up the cancelled URB */
  673. /* Doesn't matter what we pass for status, since the core will
  674. * just overwrite it (because the URB has been unlinked).
  675. */
  676. xhci_giveback_urb_in_irq(xhci, cur_td, 0);
  677. /* Stop processing the cancelled list if the watchdog timer is
  678. * running.
  679. */
  680. if (xhci->xhc_state & XHCI_STATE_DYING)
  681. return;
  682. } while (cur_td != last_unlinked_td);
  683. /* Return to the event handler with xhci->lock re-acquired */
  684. }
  685. static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
  686. {
  687. struct xhci_td *cur_td;
  688. while (!list_empty(&ring->td_list)) {
  689. cur_td = list_first_entry(&ring->td_list,
  690. struct xhci_td, td_list);
  691. list_del_init(&cur_td->td_list);
  692. if (!list_empty(&cur_td->cancelled_td_list))
  693. list_del_init(&cur_td->cancelled_td_list);
  694. xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
  695. }
  696. }
  697. static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
  698. int slot_id, int ep_index)
  699. {
  700. struct xhci_td *cur_td;
  701. struct xhci_virt_ep *ep;
  702. struct xhci_ring *ring;
  703. ep = &xhci->devs[slot_id]->eps[ep_index];
  704. if ((ep->ep_state & EP_HAS_STREAMS) ||
  705. (ep->ep_state & EP_GETTING_NO_STREAMS)) {
  706. int stream_id;
  707. for (stream_id = 0; stream_id < ep->stream_info->num_streams;
  708. stream_id++) {
  709. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  710. "Killing URBs for slot ID %u, ep index %u, stream %u",
  711. slot_id, ep_index, stream_id + 1);
  712. xhci_kill_ring_urbs(xhci,
  713. ep->stream_info->stream_rings[stream_id]);
  714. }
  715. } else {
  716. ring = ep->ring;
  717. if (!ring)
  718. return;
  719. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  720. "Killing URBs for slot ID %u, ep index %u",
  721. slot_id, ep_index);
  722. xhci_kill_ring_urbs(xhci, ring);
  723. }
  724. while (!list_empty(&ep->cancelled_td_list)) {
  725. cur_td = list_first_entry(&ep->cancelled_td_list,
  726. struct xhci_td, cancelled_td_list);
  727. list_del_init(&cur_td->cancelled_td_list);
  728. xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
  729. }
  730. }
  731. /* Watchdog timer function for when a stop endpoint command fails to complete.
  732. * In this case, we assume the host controller is broken or dying or dead. The
  733. * host may still be completing some other events, so we have to be careful to
  734. * let the event ring handler and the URB dequeueing/enqueueing functions know
  735. * through xhci->state.
  736. *
  737. * The timer may also fire if the host takes a very long time to respond to the
  738. * command, and the stop endpoint command completion handler cannot delete the
  739. * timer before the timer function is called. Another endpoint cancellation may
  740. * sneak in before the timer function can grab the lock, and that may queue
  741. * another stop endpoint command and add the timer back. So we cannot use a
  742. * simple flag to say whether there is a pending stop endpoint command for a
  743. * particular endpoint.
  744. *
  745. * Instead we use a combination of that flag and a counter for the number of
  746. * pending stop endpoint commands. If the timer is the tail end of the last
  747. * stop endpoint command, and the endpoint's command is still pending, we assume
  748. * the host is dying.
  749. */
  750. void xhci_stop_endpoint_command_watchdog(unsigned long arg)
  751. {
  752. struct xhci_hcd *xhci;
  753. struct xhci_virt_ep *ep;
  754. int ret, i, j;
  755. unsigned long flags;
  756. ep = (struct xhci_virt_ep *) arg;
  757. xhci = ep->xhci;
  758. spin_lock_irqsave(&xhci->lock, flags);
  759. ep->stop_cmds_pending--;
  760. if (xhci->xhc_state & XHCI_STATE_DYING) {
  761. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  762. "Stop EP timer ran, but another timer marked "
  763. "xHCI as DYING, exiting.");
  764. spin_unlock_irqrestore(&xhci->lock, flags);
  765. return;
  766. }
  767. if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
  768. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  769. "Stop EP timer ran, but no command pending, "
  770. "exiting.");
  771. spin_unlock_irqrestore(&xhci->lock, flags);
  772. return;
  773. }
  774. xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
  775. xhci_warn(xhci, "Assuming host is dying, halting host.\n");
  776. /* Oops, HC is dead or dying or at least not responding to the stop
  777. * endpoint command.
  778. */
  779. xhci->xhc_state |= XHCI_STATE_DYING;
  780. /* Disable interrupts from the host controller and start halting it */
  781. xhci_quiesce(xhci);
  782. spin_unlock_irqrestore(&xhci->lock, flags);
  783. ret = xhci_halt(xhci);
  784. spin_lock_irqsave(&xhci->lock, flags);
  785. if (ret < 0) {
  786. /* This is bad; the host is not responding to commands and it's
  787. * not allowing itself to be halted. At least interrupts are
  788. * disabled. If we call usb_hc_died(), it will attempt to
  789. * disconnect all device drivers under this host. Those
  790. * disconnect() methods will wait for all URBs to be unlinked,
  791. * so we must complete them.
  792. */
  793. xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
  794. xhci_warn(xhci, "Completing active URBs anyway.\n");
  795. /* We could turn all TDs on the rings to no-ops. This won't
  796. * help if the host has cached part of the ring, and is slow if
  797. * we want to preserve the cycle bit. Skip it and hope the host
  798. * doesn't touch the memory.
  799. */
  800. }
  801. for (i = 0; i < MAX_HC_SLOTS; i++) {
  802. if (!xhci->devs[i])
  803. continue;
  804. for (j = 0; j < 31; j++)
  805. xhci_kill_endpoint_urbs(xhci, i, j);
  806. }
  807. spin_unlock_irqrestore(&xhci->lock, flags);
  808. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  809. "Calling usb_hc_died()");
  810. usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
  811. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  812. "xHCI host controller is dead.");
  813. }
  814. static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
  815. struct xhci_virt_device *dev,
  816. struct xhci_ring *ep_ring,
  817. unsigned int ep_index)
  818. {
  819. union xhci_trb *dequeue_temp;
  820. int num_trbs_free_temp;
  821. bool revert = false;
  822. num_trbs_free_temp = ep_ring->num_trbs_free;
  823. dequeue_temp = ep_ring->dequeue;
  824. /* If we get two back-to-back stalls, and the first stalled transfer
  825. * ends just before a link TRB, the dequeue pointer will be left on
  826. * the link TRB by the code in the while loop. So we have to update
  827. * the dequeue pointer one segment further, or we'll jump off
  828. * the segment into la-la-land.
  829. */
  830. if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) {
  831. ep_ring->deq_seg = ep_ring->deq_seg->next;
  832. ep_ring->dequeue = ep_ring->deq_seg->trbs;
  833. }
  834. while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
  835. /* We have more usable TRBs */
  836. ep_ring->num_trbs_free++;
  837. ep_ring->dequeue++;
  838. if (last_trb(xhci, ep_ring, ep_ring->deq_seg,
  839. ep_ring->dequeue)) {
  840. if (ep_ring->dequeue ==
  841. dev->eps[ep_index].queued_deq_ptr)
  842. break;
  843. ep_ring->deq_seg = ep_ring->deq_seg->next;
  844. ep_ring->dequeue = ep_ring->deq_seg->trbs;
  845. }
  846. if (ep_ring->dequeue == dequeue_temp) {
  847. revert = true;
  848. break;
  849. }
  850. }
  851. if (revert) {
  852. xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
  853. ep_ring->num_trbs_free = num_trbs_free_temp;
  854. }
  855. }
  856. /*
  857. * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
  858. * we need to clear the set deq pending flag in the endpoint ring state, so that
  859. * the TD queueing code can ring the doorbell again. We also need to ring the
  860. * endpoint doorbell to restart the ring, but only if there aren't more
  861. * cancellations pending.
  862. */
  863. static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
  864. union xhci_trb *trb, u32 cmd_comp_code)
  865. {
  866. unsigned int ep_index;
  867. unsigned int stream_id;
  868. struct xhci_ring *ep_ring;
  869. struct xhci_virt_device *dev;
  870. struct xhci_virt_ep *ep;
  871. struct xhci_ep_ctx *ep_ctx;
  872. struct xhci_slot_ctx *slot_ctx;
  873. ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
  874. stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
  875. dev = xhci->devs[slot_id];
  876. ep = &dev->eps[ep_index];
  877. ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
  878. if (!ep_ring) {
  879. xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
  880. stream_id);
  881. /* XXX: Harmless??? */
  882. goto cleanup;
  883. }
  884. ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
  885. slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
  886. if (cmd_comp_code != COMP_SUCCESS) {
  887. unsigned int ep_state;
  888. unsigned int slot_state;
  889. switch (cmd_comp_code) {
  890. case COMP_TRB_ERR:
  891. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
  892. break;
  893. case COMP_CTX_STATE:
  894. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
  895. ep_state = le32_to_cpu(ep_ctx->ep_info);
  896. ep_state &= EP_STATE_MASK;
  897. slot_state = le32_to_cpu(slot_ctx->dev_state);
  898. slot_state = GET_SLOT_STATE(slot_state);
  899. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  900. "Slot state = %u, EP state = %u",
  901. slot_state, ep_state);
  902. break;
  903. case COMP_EBADSLT:
  904. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
  905. slot_id);
  906. break;
  907. default:
  908. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
  909. cmd_comp_code);
  910. break;
  911. }
  912. /* OK what do we do now? The endpoint state is hosed, and we
  913. * should never get to this point if the synchronization between
  914. * queueing, and endpoint state are correct. This might happen
  915. * if the device gets disconnected after we've finished
  916. * cancelling URBs, which might not be an error...
  917. */
  918. } else {
  919. u64 deq;
  920. /* 4.6.10 deq ptr is written to the stream ctx for streams */
  921. if (ep->ep_state & EP_HAS_STREAMS) {
  922. struct xhci_stream_ctx *ctx =
  923. &ep->stream_info->stream_ctx_array[stream_id];
  924. deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
  925. } else {
  926. deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
  927. }
  928. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  929. "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
  930. if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
  931. ep->queued_deq_ptr) == deq) {
  932. /* Update the ring's dequeue segment and dequeue pointer
  933. * to reflect the new position.
  934. */
  935. update_ring_for_set_deq_completion(xhci, dev,
  936. ep_ring, ep_index);
  937. } else {
  938. xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
  939. xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
  940. ep->queued_deq_seg, ep->queued_deq_ptr);
  941. }
  942. }
  943. cleanup:
  944. dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
  945. dev->eps[ep_index].queued_deq_seg = NULL;
  946. dev->eps[ep_index].queued_deq_ptr = NULL;
  947. /* Restart any rings with pending URBs */
  948. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  949. }
  950. static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
  951. union xhci_trb *trb, u32 cmd_comp_code)
  952. {
  953. unsigned int ep_index;
  954. ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
  955. /* This command will only fail if the endpoint wasn't halted,
  956. * but we don't care.
  957. */
  958. xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
  959. "Ignoring reset ep completion code of %u", cmd_comp_code);
  960. /* HW with the reset endpoint quirk needs to have a configure endpoint
  961. * command complete before the endpoint can be used. Queue that here
  962. * because the HW can't handle two commands being queued in a row.
  963. */
  964. if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
  965. struct xhci_command *command;
  966. command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
  967. if (!command) {
  968. xhci_warn(xhci, "WARN Cannot submit cfg ep: ENOMEM\n");
  969. return;
  970. }
  971. xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
  972. "Queueing configure endpoint command");
  973. xhci_queue_configure_endpoint(xhci, command,
  974. xhci->devs[slot_id]->in_ctx->dma, slot_id,
  975. false);
  976. xhci_ring_cmd_db(xhci);
  977. } else {
  978. /* Clear our internal halted state */
  979. xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
  980. }
  981. }
  982. static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
  983. u32 cmd_comp_code)
  984. {
  985. if (cmd_comp_code == COMP_SUCCESS)
  986. xhci->slot_id = slot_id;
  987. else
  988. xhci->slot_id = 0;
  989. }
  990. static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
  991. {
  992. struct xhci_virt_device *virt_dev;
  993. virt_dev = xhci->devs[slot_id];
  994. if (!virt_dev)
  995. return;
  996. if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
  997. /* Delete default control endpoint resources */
  998. xhci_free_device_endpoint_resources(xhci, virt_dev, true);
  999. xhci_free_virt_device(xhci, slot_id);
  1000. }
  1001. static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
  1002. struct xhci_event_cmd *event, u32 cmd_comp_code)
  1003. {
  1004. struct xhci_virt_device *virt_dev;
  1005. struct xhci_input_control_ctx *ctrl_ctx;
  1006. unsigned int ep_index;
  1007. unsigned int ep_state;
  1008. u32 add_flags, drop_flags;
  1009. /*
  1010. * Configure endpoint commands can come from the USB core
  1011. * configuration or alt setting changes, or because the HW
  1012. * needed an extra configure endpoint command after a reset
  1013. * endpoint command or streams were being configured.
  1014. * If the command was for a halted endpoint, the xHCI driver
  1015. * is not waiting on the configure endpoint command.
  1016. */
  1017. virt_dev = xhci->devs[slot_id];
  1018. ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
  1019. if (!ctrl_ctx) {
  1020. xhci_warn(xhci, "Could not get input context, bad type.\n");
  1021. return;
  1022. }
  1023. add_flags = le32_to_cpu(ctrl_ctx->add_flags);
  1024. drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
  1025. /* Input ctx add_flags are the endpoint index plus one */
  1026. ep_index = xhci_last_valid_endpoint(add_flags) - 1;
  1027. /* A usb_set_interface() call directly after clearing a halted
  1028. * condition may race on this quirky hardware. Not worth
  1029. * worrying about, since this is prototype hardware. Not sure
  1030. * if this will work for streams, but streams support was
  1031. * untested on this prototype.
  1032. */
  1033. if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
  1034. ep_index != (unsigned int) -1 &&
  1035. add_flags - SLOT_FLAG == drop_flags) {
  1036. ep_state = virt_dev->eps[ep_index].ep_state;
  1037. if (!(ep_state & EP_HALTED))
  1038. return;
  1039. xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
  1040. "Completed config ep cmd - "
  1041. "last ep index = %d, state = %d",
  1042. ep_index, ep_state);
  1043. /* Clear internal halted state and restart ring(s) */
  1044. virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
  1045. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  1046. return;
  1047. }
  1048. return;
  1049. }
  1050. static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
  1051. struct xhci_event_cmd *event)
  1052. {
  1053. xhci_dbg(xhci, "Completed reset device command.\n");
  1054. if (!xhci->devs[slot_id])
  1055. xhci_warn(xhci, "Reset device command completion "
  1056. "for disabled slot %u\n", slot_id);
  1057. }
  1058. static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
  1059. struct xhci_event_cmd *event)
  1060. {
  1061. if (!(xhci->quirks & XHCI_NEC_HOST)) {
  1062. xhci->error_bitmask |= 1 << 6;
  1063. return;
  1064. }
  1065. xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
  1066. "NEC firmware version %2x.%02x",
  1067. NEC_FW_MAJOR(le32_to_cpu(event->status)),
  1068. NEC_FW_MINOR(le32_to_cpu(event->status)));
  1069. }
  1070. static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
  1071. {
  1072. list_del(&cmd->cmd_list);
  1073. if (cmd->completion) {
  1074. cmd->status = status;
  1075. complete(cmd->completion);
  1076. } else {
  1077. kfree(cmd);
  1078. }
  1079. }
  1080. void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
  1081. {
  1082. struct xhci_command *cur_cmd, *tmp_cmd;
  1083. list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
  1084. xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT);
  1085. }
  1086. /*
  1087. * Turn all commands on command ring with status set to "aborted" to no-op trbs.
  1088. * If there are other commands waiting then restart the ring and kick the timer.
  1089. * This must be called with command ring stopped and xhci->lock held.
  1090. */
  1091. static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
  1092. struct xhci_command *cur_cmd)
  1093. {
  1094. struct xhci_command *i_cmd, *tmp_cmd;
  1095. u32 cycle_state;
  1096. /* Turn all aborted commands in list to no-ops, then restart */
  1097. list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list,
  1098. cmd_list) {
  1099. if (i_cmd->status != COMP_CMD_ABORT)
  1100. continue;
  1101. i_cmd->status = COMP_CMD_STOP;
  1102. xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
  1103. i_cmd->command_trb);
  1104. /* get cycle state from the original cmd trb */
  1105. cycle_state = le32_to_cpu(
  1106. i_cmd->command_trb->generic.field[3]) & TRB_CYCLE;
  1107. /* modify the command trb to no-op command */
  1108. i_cmd->command_trb->generic.field[0] = 0;
  1109. i_cmd->command_trb->generic.field[1] = 0;
  1110. i_cmd->command_trb->generic.field[2] = 0;
  1111. i_cmd->command_trb->generic.field[3] = cpu_to_le32(
  1112. TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
  1113. /*
  1114. * caller waiting for completion is called when command
  1115. * completion event is received for these no-op commands
  1116. */
  1117. }
  1118. xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
  1119. /* ring command ring doorbell to restart the command ring */
  1120. if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
  1121. !(xhci->xhc_state & XHCI_STATE_DYING)) {
  1122. xhci->current_cmd = cur_cmd;
  1123. mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
  1124. xhci_ring_cmd_db(xhci);
  1125. }
  1126. return;
  1127. }
  1128. void xhci_handle_command_timeout(unsigned long data)
  1129. {
  1130. struct xhci_hcd *xhci;
  1131. int ret;
  1132. unsigned long flags;
  1133. u64 hw_ring_state;
  1134. struct xhci_command *cur_cmd = NULL;
  1135. xhci = (struct xhci_hcd *) data;
  1136. /* mark this command to be cancelled */
  1137. spin_lock_irqsave(&xhci->lock, flags);
  1138. if (xhci->current_cmd) {
  1139. cur_cmd = xhci->current_cmd;
  1140. cur_cmd->status = COMP_CMD_ABORT;
  1141. }
  1142. /* Make sure command ring is running before aborting it */
  1143. hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
  1144. if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
  1145. (hw_ring_state & CMD_RING_RUNNING)) {
  1146. spin_unlock_irqrestore(&xhci->lock, flags);
  1147. xhci_dbg(xhci, "Command timeout\n");
  1148. ret = xhci_abort_cmd_ring(xhci);
  1149. if (unlikely(ret == -ESHUTDOWN)) {
  1150. xhci_err(xhci, "Abort command ring failed\n");
  1151. xhci_cleanup_command_queue(xhci);
  1152. usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
  1153. xhci_dbg(xhci, "xHCI host controller is dead.\n");
  1154. }
  1155. return;
  1156. }
  1157. /* command timeout on stopped ring, ring can't be aborted */
  1158. xhci_dbg(xhci, "Command timeout on stopped ring\n");
  1159. xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
  1160. spin_unlock_irqrestore(&xhci->lock, flags);
  1161. return;
  1162. }
  1163. static void handle_cmd_completion(struct xhci_hcd *xhci,
  1164. struct xhci_event_cmd *event)
  1165. {
  1166. int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
  1167. u64 cmd_dma;
  1168. dma_addr_t cmd_dequeue_dma;
  1169. u32 cmd_comp_code;
  1170. union xhci_trb *cmd_trb;
  1171. struct xhci_command *cmd;
  1172. u32 cmd_type;
  1173. cmd_dma = le64_to_cpu(event->cmd_trb);
  1174. cmd_trb = xhci->cmd_ring->dequeue;
  1175. cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
  1176. cmd_trb);
  1177. /* Is the command ring deq ptr out of sync with the deq seg ptr? */
  1178. if (cmd_dequeue_dma == 0) {
  1179. xhci->error_bitmask |= 1 << 4;
  1180. return;
  1181. }
  1182. /* Does the DMA address match our internal dequeue pointer address? */
  1183. if (cmd_dma != (u64) cmd_dequeue_dma) {
  1184. xhci->error_bitmask |= 1 << 5;
  1185. return;
  1186. }
  1187. cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
  1188. if (cmd->command_trb != xhci->cmd_ring->dequeue) {
  1189. xhci_err(xhci,
  1190. "Command completion event does not match command\n");
  1191. return;
  1192. }
  1193. del_timer(&xhci->cmd_timer);
  1194. trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
  1195. cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
  1196. /* If CMD ring stopped we own the trbs between enqueue and dequeue */
  1197. if (cmd_comp_code == COMP_CMD_STOP) {
  1198. xhci_handle_stopped_cmd_ring(xhci, cmd);
  1199. return;
  1200. }
  1201. /*
  1202. * Host aborted the command ring, check if the current command was
  1203. * supposed to be aborted, otherwise continue normally.
  1204. * The command ring is stopped now, but the xHC will issue a Command
  1205. * Ring Stopped event which will cause us to restart it.
  1206. */
  1207. if (cmd_comp_code == COMP_CMD_ABORT) {
  1208. xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
  1209. if (cmd->status == COMP_CMD_ABORT)
  1210. goto event_handled;
  1211. }
  1212. cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
  1213. switch (cmd_type) {
  1214. case TRB_ENABLE_SLOT:
  1215. xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code);
  1216. break;
  1217. case TRB_DISABLE_SLOT:
  1218. xhci_handle_cmd_disable_slot(xhci, slot_id);
  1219. break;
  1220. case TRB_CONFIG_EP:
  1221. if (!cmd->completion)
  1222. xhci_handle_cmd_config_ep(xhci, slot_id, event,
  1223. cmd_comp_code);
  1224. break;
  1225. case TRB_EVAL_CONTEXT:
  1226. break;
  1227. case TRB_ADDR_DEV:
  1228. break;
  1229. case TRB_STOP_RING:
  1230. WARN_ON(slot_id != TRB_TO_SLOT_ID(
  1231. le32_to_cpu(cmd_trb->generic.field[3])));
  1232. xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
  1233. break;
  1234. case TRB_SET_DEQ:
  1235. WARN_ON(slot_id != TRB_TO_SLOT_ID(
  1236. le32_to_cpu(cmd_trb->generic.field[3])));
  1237. xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
  1238. break;
  1239. case TRB_CMD_NOOP:
  1240. /* Is this an aborted command turned to NO-OP? */
  1241. if (cmd->status == COMP_CMD_STOP)
  1242. cmd_comp_code = COMP_CMD_STOP;
  1243. break;
  1244. case TRB_RESET_EP:
  1245. WARN_ON(slot_id != TRB_TO_SLOT_ID(
  1246. le32_to_cpu(cmd_trb->generic.field[3])));
  1247. xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
  1248. break;
  1249. case TRB_RESET_DEV:
  1250. /* SLOT_ID field in reset device cmd completion event TRB is 0.
  1251. * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
  1252. */
  1253. slot_id = TRB_TO_SLOT_ID(
  1254. le32_to_cpu(cmd_trb->generic.field[3]));
  1255. xhci_handle_cmd_reset_dev(xhci, slot_id, event);
  1256. break;
  1257. case TRB_NEC_GET_FW:
  1258. xhci_handle_cmd_nec_get_fw(xhci, event);
  1259. break;
  1260. default:
  1261. /* Skip over unknown commands on the event ring */
  1262. xhci->error_bitmask |= 1 << 6;
  1263. break;
  1264. }
  1265. /* restart timer if this wasn't the last command */
  1266. if (cmd->cmd_list.next != &xhci->cmd_list) {
  1267. xhci->current_cmd = list_entry(cmd->cmd_list.next,
  1268. struct xhci_command, cmd_list);
  1269. mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
  1270. }
  1271. event_handled:
  1272. xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
  1273. inc_deq(xhci, xhci->cmd_ring);
  1274. }
  1275. static void handle_vendor_event(struct xhci_hcd *xhci,
  1276. union xhci_trb *event)
  1277. {
  1278. u32 trb_type;
  1279. trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
  1280. xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
  1281. if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
  1282. handle_cmd_completion(xhci, &event->event_cmd);
  1283. }
  1284. /* @port_id: the one-based port ID from the hardware (indexed from array of all
  1285. * port registers -- USB 3.0 and USB 2.0).
  1286. *
  1287. * Returns a zero-based port number, which is suitable for indexing into each of
  1288. * the split roothubs' port arrays and bus state arrays.
  1289. * Add one to it in order to call xhci_find_slot_id_by_port.
  1290. */
  1291. static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
  1292. struct xhci_hcd *xhci, u32 port_id)
  1293. {
  1294. unsigned int i;
  1295. unsigned int num_similar_speed_ports = 0;
  1296. /* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
  1297. * and usb2_ports are 0-based indexes. Count the number of similar
  1298. * speed ports, up to 1 port before this port.
  1299. */
  1300. for (i = 0; i < (port_id - 1); i++) {
  1301. u8 port_speed = xhci->port_array[i];
  1302. /*
  1303. * Skip ports that don't have known speeds, or have duplicate
  1304. * Extended Capabilities port speed entries.
  1305. */
  1306. if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
  1307. continue;
  1308. /*
  1309. * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and
  1310. * 1.1 ports are under the USB 2.0 hub. If the port speed
  1311. * matches the device speed, it's a similar speed port.
  1312. */
  1313. if ((port_speed == 0x03) == (hcd->speed == HCD_USB3))
  1314. num_similar_speed_ports++;
  1315. }
  1316. return num_similar_speed_ports;
  1317. }
  1318. static void handle_device_notification(struct xhci_hcd *xhci,
  1319. union xhci_trb *event)
  1320. {
  1321. u32 slot_id;
  1322. struct usb_device *udev;
  1323. slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
  1324. if (!xhci->devs[slot_id]) {
  1325. xhci_warn(xhci, "Device Notification event for "
  1326. "unused slot %u\n", slot_id);
  1327. return;
  1328. }
  1329. xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
  1330. slot_id);
  1331. udev = xhci->devs[slot_id]->udev;
  1332. if (udev && udev->parent)
  1333. usb_wakeup_notification(udev->parent, udev->portnum);
  1334. }
  1335. static void handle_port_status(struct xhci_hcd *xhci,
  1336. union xhci_trb *event)
  1337. {
  1338. struct usb_hcd *hcd;
  1339. u32 port_id;
  1340. u32 temp, temp1;
  1341. int max_ports;
  1342. int slot_id;
  1343. unsigned int faked_port_index;
  1344. u8 major_revision;
  1345. struct xhci_bus_state *bus_state;
  1346. __le32 __iomem **port_array;
  1347. bool bogus_port_status = false;
  1348. /* Port status change events always have a successful completion code */
  1349. if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
  1350. xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
  1351. xhci->error_bitmask |= 1 << 8;
  1352. }
  1353. port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
  1354. xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
  1355. max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
  1356. if ((port_id <= 0) || (port_id > max_ports)) {
  1357. xhci_warn(xhci, "Invalid port id %d\n", port_id);
  1358. inc_deq(xhci, xhci->event_ring);
  1359. return;
  1360. }
  1361. /* Figure out which usb_hcd this port is attached to:
  1362. * is it a USB 3.0 port or a USB 2.0/1.1 port?
  1363. */
  1364. major_revision = xhci->port_array[port_id - 1];
  1365. /* Find the right roothub. */
  1366. hcd = xhci_to_hcd(xhci);
  1367. if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
  1368. hcd = xhci->shared_hcd;
  1369. if (major_revision == 0) {
  1370. xhci_warn(xhci, "Event for port %u not in "
  1371. "Extended Capabilities, ignoring.\n",
  1372. port_id);
  1373. bogus_port_status = true;
  1374. goto cleanup;
  1375. }
  1376. if (major_revision == DUPLICATE_ENTRY) {
  1377. xhci_warn(xhci, "Event for port %u duplicated in"
  1378. "Extended Capabilities, ignoring.\n",
  1379. port_id);
  1380. bogus_port_status = true;
  1381. goto cleanup;
  1382. }
  1383. /*
  1384. * Hardware port IDs reported by a Port Status Change Event include USB
  1385. * 3.0 and USB 2.0 ports. We want to check if the port has reported a
  1386. * resume event, but we first need to translate the hardware port ID
  1387. * into the index into the ports on the correct split roothub, and the
  1388. * correct bus_state structure.
  1389. */
  1390. bus_state = &xhci->bus_state[hcd_index(hcd)];
  1391. if (hcd->speed == HCD_USB3)
  1392. port_array = xhci->usb3_ports;
  1393. else
  1394. port_array = xhci->usb2_ports;
  1395. /* Find the faked port hub number */
  1396. faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
  1397. port_id);
  1398. temp = readl(port_array[faked_port_index]);
  1399. if (hcd->state == HC_STATE_SUSPENDED) {
  1400. xhci_dbg(xhci, "resume root hub\n");
  1401. usb_hcd_resume_root_hub(hcd);
  1402. }
  1403. if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
  1404. bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
  1405. if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
  1406. xhci_dbg(xhci, "port resume event for port %d\n", port_id);
  1407. temp1 = readl(&xhci->op_regs->command);
  1408. if (!(temp1 & CMD_RUN)) {
  1409. xhci_warn(xhci, "xHC is not running.\n");
  1410. goto cleanup;
  1411. }
  1412. if (DEV_SUPERSPEED(temp)) {
  1413. xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
  1414. /* Set a flag to say the port signaled remote wakeup,
  1415. * so we can tell the difference between the end of
  1416. * device and host initiated resume.
  1417. */
  1418. bus_state->port_remote_wakeup |= 1 << faked_port_index;
  1419. xhci_test_and_clear_bit(xhci, port_array,
  1420. faked_port_index, PORT_PLC);
  1421. xhci_set_link_state(xhci, port_array, faked_port_index,
  1422. XDEV_U0);
  1423. /* Need to wait until the next link state change
  1424. * indicates the device is actually in U0.
  1425. */
  1426. bogus_port_status = true;
  1427. goto cleanup;
  1428. } else {
  1429. xhci_dbg(xhci, "resume HS port %d\n", port_id);
  1430. bus_state->resume_done[faked_port_index] = jiffies +
  1431. msecs_to_jiffies(USB_RESUME_TIMEOUT);
  1432. set_bit(faked_port_index, &bus_state->resuming_ports);
  1433. mod_timer(&hcd->rh_timer,
  1434. bus_state->resume_done[faked_port_index]);
  1435. /* Do the rest in GetPortStatus */
  1436. }
  1437. }
  1438. if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
  1439. DEV_SUPERSPEED(temp)) {
  1440. xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
  1441. /* We've just brought the device into U0 through either the
  1442. * Resume state after a device remote wakeup, or through the
  1443. * U3Exit state after a host-initiated resume. If it's a device
  1444. * initiated remote wake, don't pass up the link state change,
  1445. * so the roothub behavior is consistent with external
  1446. * USB 3.0 hub behavior.
  1447. */
  1448. slot_id = xhci_find_slot_id_by_port(hcd, xhci,
  1449. faked_port_index + 1);
  1450. if (slot_id && xhci->devs[slot_id])
  1451. xhci_ring_device(xhci, slot_id);
  1452. if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
  1453. bus_state->port_remote_wakeup &=
  1454. ~(1 << faked_port_index);
  1455. xhci_test_and_clear_bit(xhci, port_array,
  1456. faked_port_index, PORT_PLC);
  1457. usb_wakeup_notification(hcd->self.root_hub,
  1458. faked_port_index + 1);
  1459. bogus_port_status = true;
  1460. goto cleanup;
  1461. }
  1462. }
  1463. /*
  1464. * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
  1465. * RExit to a disconnect state). If so, let the the driver know it's
  1466. * out of the RExit state.
  1467. */
  1468. if (!DEV_SUPERSPEED(temp) &&
  1469. test_and_clear_bit(faked_port_index,
  1470. &bus_state->rexit_ports)) {
  1471. complete(&bus_state->rexit_done[faked_port_index]);
  1472. bogus_port_status = true;
  1473. goto cleanup;
  1474. }
  1475. if (hcd->speed != HCD_USB3)
  1476. xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
  1477. PORT_PLC);
  1478. cleanup:
  1479. /* Update event ring dequeue pointer before dropping the lock */
  1480. inc_deq(xhci, xhci->event_ring);
  1481. /* Don't make the USB core poll the roothub if we got a bad port status
  1482. * change event. Besides, at that point we can't tell which roothub
  1483. * (USB 2.0 or USB 3.0) to kick.
  1484. */
  1485. if (bogus_port_status)
  1486. return;
  1487. /*
  1488. * xHCI port-status-change events occur when the "or" of all the
  1489. * status-change bits in the portsc register changes from 0 to 1.
  1490. * New status changes won't cause an event if any other change
  1491. * bits are still set. When an event occurs, switch over to
  1492. * polling to avoid losing status changes.
  1493. */
  1494. xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
  1495. set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
  1496. spin_unlock(&xhci->lock);
  1497. /* Pass this up to the core */
  1498. usb_hcd_poll_rh_status(hcd);
  1499. spin_lock(&xhci->lock);
  1500. }
  1501. /*
  1502. * This TD is defined by the TRBs starting at start_trb in start_seg and ending
  1503. * at end_trb, which may be in another segment. If the suspect DMA address is a
  1504. * TRB in this TD, this function returns that TRB's segment. Otherwise it
  1505. * returns 0.
  1506. */
  1507. struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
  1508. struct xhci_segment *start_seg,
  1509. union xhci_trb *start_trb,
  1510. union xhci_trb *end_trb,
  1511. dma_addr_t suspect_dma,
  1512. bool debug)
  1513. {
  1514. dma_addr_t start_dma;
  1515. dma_addr_t end_seg_dma;
  1516. dma_addr_t end_trb_dma;
  1517. struct xhci_segment *cur_seg;
  1518. start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
  1519. cur_seg = start_seg;
  1520. do {
  1521. if (start_dma == 0)
  1522. return NULL;
  1523. /* We may get an event for a Link TRB in the middle of a TD */
  1524. end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
  1525. &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
  1526. /* If the end TRB isn't in this segment, this is set to 0 */
  1527. end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
  1528. if (debug)
  1529. xhci_warn(xhci,
  1530. "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
  1531. (unsigned long long)suspect_dma,
  1532. (unsigned long long)start_dma,
  1533. (unsigned long long)end_trb_dma,
  1534. (unsigned long long)cur_seg->dma,
  1535. (unsigned long long)end_seg_dma);
  1536. if (end_trb_dma > 0) {
  1537. /* The end TRB is in this segment, so suspect should be here */
  1538. if (start_dma <= end_trb_dma) {
  1539. if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
  1540. return cur_seg;
  1541. } else {
  1542. /* Case for one segment with
  1543. * a TD wrapped around to the top
  1544. */
  1545. if ((suspect_dma >= start_dma &&
  1546. suspect_dma <= end_seg_dma) ||
  1547. (suspect_dma >= cur_seg->dma &&
  1548. suspect_dma <= end_trb_dma))
  1549. return cur_seg;
  1550. }
  1551. return NULL;
  1552. } else {
  1553. /* Might still be somewhere in this segment */
  1554. if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
  1555. return cur_seg;
  1556. }
  1557. cur_seg = cur_seg->next;
  1558. start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
  1559. } while (cur_seg != start_seg);
  1560. return NULL;
  1561. }
  1562. static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
  1563. unsigned int slot_id, unsigned int ep_index,
  1564. unsigned int stream_id,
  1565. struct xhci_td *td, union xhci_trb *event_trb)
  1566. {
  1567. struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
  1568. struct xhci_command *command;
  1569. command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
  1570. if (!command)
  1571. return;
  1572. ep->ep_state |= EP_HALTED;
  1573. ep->stopped_stream = stream_id;
  1574. xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
  1575. xhci_cleanup_stalled_ring(xhci, ep_index, td);
  1576. ep->stopped_stream = 0;
  1577. xhci_ring_cmd_db(xhci);
  1578. }
  1579. /* Check if an error has halted the endpoint ring. The class driver will
  1580. * cleanup the halt for a non-default control endpoint if we indicate a stall.
  1581. * However, a babble and other errors also halt the endpoint ring, and the class
  1582. * driver won't clear the halt in that case, so we need to issue a Set Transfer
  1583. * Ring Dequeue Pointer command manually.
  1584. */
  1585. static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
  1586. struct xhci_ep_ctx *ep_ctx,
  1587. unsigned int trb_comp_code)
  1588. {
  1589. /* TRB completion codes that may require a manual halt cleanup */
  1590. if (trb_comp_code == COMP_TX_ERR ||
  1591. trb_comp_code == COMP_BABBLE ||
  1592. trb_comp_code == COMP_SPLIT_ERR)
  1593. /* The 0.96 spec says a babbling control endpoint
  1594. * is not halted. The 0.96 spec says it is. Some HW
  1595. * claims to be 0.95 compliant, but it halts the control
  1596. * endpoint anyway. Check if a babble halted the
  1597. * endpoint.
  1598. */
  1599. if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
  1600. cpu_to_le32(EP_STATE_HALTED))
  1601. return 1;
  1602. return 0;
  1603. }
  1604. int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
  1605. {
  1606. if (trb_comp_code >= 224 && trb_comp_code <= 255) {
  1607. /* Vendor defined "informational" completion code,
  1608. * treat as not-an-error.
  1609. */
  1610. xhci_dbg(xhci, "Vendor defined info completion code %u\n",
  1611. trb_comp_code);
  1612. xhci_dbg(xhci, "Treating code as success.\n");
  1613. return 1;
  1614. }
  1615. return 0;
  1616. }
  1617. /*
  1618. * Finish the td processing, remove the td from td list;
  1619. * Return 1 if the urb can be given back.
  1620. */
  1621. static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1622. union xhci_trb *event_trb, struct xhci_transfer_event *event,
  1623. struct xhci_virt_ep *ep, int *status, bool skip)
  1624. {
  1625. struct xhci_virt_device *xdev;
  1626. struct xhci_ring *ep_ring;
  1627. unsigned int slot_id;
  1628. int ep_index;
  1629. struct urb *urb = NULL;
  1630. struct xhci_ep_ctx *ep_ctx;
  1631. int ret = 0;
  1632. struct urb_priv *urb_priv;
  1633. u32 trb_comp_code;
  1634. slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
  1635. xdev = xhci->devs[slot_id];
  1636. ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
  1637. ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
  1638. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  1639. trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
  1640. if (skip)
  1641. goto td_cleanup;
  1642. if (trb_comp_code == COMP_STOP_INVAL ||
  1643. trb_comp_code == COMP_STOP ||
  1644. trb_comp_code == COMP_STOP_SHORT) {
  1645. /* The Endpoint Stop Command completion will take care of any
  1646. * stopped TDs. A stopped TD may be restarted, so don't update
  1647. * the ring dequeue pointer or take this TD off any lists yet.
  1648. */
  1649. ep->stopped_td = td;
  1650. return 0;
  1651. }
  1652. if (trb_comp_code == COMP_STALL ||
  1653. xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
  1654. trb_comp_code)) {
  1655. /* Issue a reset endpoint command to clear the host side
  1656. * halt, followed by a set dequeue command to move the
  1657. * dequeue pointer past the TD.
  1658. * The class driver clears the device side halt later.
  1659. */
  1660. xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
  1661. ep_ring->stream_id, td, event_trb);
  1662. } else {
  1663. /* Update ring dequeue pointer */
  1664. while (ep_ring->dequeue != td->last_trb)
  1665. inc_deq(xhci, ep_ring);
  1666. inc_deq(xhci, ep_ring);
  1667. }
  1668. td_cleanup:
  1669. /* Clean up the endpoint's TD list */
  1670. urb = td->urb;
  1671. urb_priv = urb->hcpriv;
  1672. /* Do one last check of the actual transfer length.
  1673. * If the host controller said we transferred more data than the buffer
  1674. * length, urb->actual_length will be a very big number (since it's
  1675. * unsigned). Play it safe and say we didn't transfer anything.
  1676. */
  1677. if (urb->actual_length > urb->transfer_buffer_length) {
  1678. xhci_warn(xhci, "URB transfer length is wrong, xHC issue? req. len = %u, act. len = %u\n",
  1679. urb->transfer_buffer_length,
  1680. urb->actual_length);
  1681. urb->actual_length = 0;
  1682. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1683. *status = -EREMOTEIO;
  1684. else
  1685. *status = 0;
  1686. }
  1687. list_del_init(&td->td_list);
  1688. /* Was this TD slated to be cancelled but completed anyway? */
  1689. if (!list_empty(&td->cancelled_td_list))
  1690. list_del_init(&td->cancelled_td_list);
  1691. urb_priv->td_cnt++;
  1692. /* Giveback the urb when all the tds are completed */
  1693. if (urb_priv->td_cnt == urb_priv->length) {
  1694. ret = 1;
  1695. if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
  1696. xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
  1697. if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
  1698. if (xhci->quirks & XHCI_AMD_PLL_FIX)
  1699. usb_amd_quirk_pll_enable();
  1700. }
  1701. }
  1702. }
  1703. return ret;
  1704. }
  1705. /*
  1706. * Process control tds, update urb status and actual_length.
  1707. */
  1708. static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1709. union xhci_trb *event_trb, struct xhci_transfer_event *event,
  1710. struct xhci_virt_ep *ep, int *status)
  1711. {
  1712. struct xhci_virt_device *xdev;
  1713. struct xhci_ring *ep_ring;
  1714. unsigned int slot_id;
  1715. int ep_index;
  1716. struct xhci_ep_ctx *ep_ctx;
  1717. u32 trb_comp_code;
  1718. slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
  1719. xdev = xhci->devs[slot_id];
  1720. ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
  1721. ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
  1722. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  1723. trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
  1724. switch (trb_comp_code) {
  1725. case COMP_SUCCESS:
  1726. if (event_trb == ep_ring->dequeue) {
  1727. xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
  1728. "without IOC set??\n");
  1729. *status = -ESHUTDOWN;
  1730. } else if (event_trb != td->last_trb) {
  1731. xhci_warn(xhci, "WARN: Success on ctrl data TRB "
  1732. "without IOC set??\n");
  1733. *status = -ESHUTDOWN;
  1734. } else {
  1735. *status = 0;
  1736. }
  1737. break;
  1738. case COMP_SHORT_TX:
  1739. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1740. *status = -EREMOTEIO;
  1741. else
  1742. *status = 0;
  1743. break;
  1744. case COMP_STOP_SHORT:
  1745. if (event_trb == ep_ring->dequeue || event_trb == td->last_trb)
  1746. xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
  1747. else
  1748. td->urb->actual_length =
  1749. EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
  1750. return finish_td(xhci, td, event_trb, event, ep, status, false);
  1751. case COMP_STOP:
  1752. /* Did we stop at data stage? */
  1753. if (event_trb != ep_ring->dequeue && event_trb != td->last_trb)
  1754. td->urb->actual_length =
  1755. td->urb->transfer_buffer_length -
  1756. EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
  1757. /* fall through */
  1758. case COMP_STOP_INVAL:
  1759. return finish_td(xhci, td, event_trb, event, ep, status, false);
  1760. default:
  1761. if (!xhci_requires_manual_halt_cleanup(xhci,
  1762. ep_ctx, trb_comp_code))
  1763. break;
  1764. xhci_dbg(xhci, "TRB error code %u, "
  1765. "halted endpoint index = %u\n",
  1766. trb_comp_code, ep_index);
  1767. /* else fall through */
  1768. case COMP_STALL:
  1769. /* Did we transfer part of the data (middle) phase? */
  1770. if (event_trb != ep_ring->dequeue &&
  1771. event_trb != td->last_trb)
  1772. td->urb->actual_length =
  1773. td->urb->transfer_buffer_length -
  1774. EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
  1775. else if (!td->urb_length_set)
  1776. td->urb->actual_length = 0;
  1777. return finish_td(xhci, td, event_trb, event, ep, status, false);
  1778. }
  1779. /*
  1780. * Did we transfer any data, despite the errors that might have
  1781. * happened? I.e. did we get past the setup stage?
  1782. */
  1783. if (event_trb != ep_ring->dequeue) {
  1784. /* The event was for the status stage */
  1785. if (event_trb == td->last_trb) {
  1786. if (td->urb_length_set) {
  1787. /* Don't overwrite a previously set error code
  1788. */
  1789. if ((*status == -EINPROGRESS || *status == 0) &&
  1790. (td->urb->transfer_flags
  1791. & URB_SHORT_NOT_OK))
  1792. /* Did we already see a short data
  1793. * stage? */
  1794. *status = -EREMOTEIO;
  1795. } else {
  1796. td->urb->actual_length =
  1797. td->urb->transfer_buffer_length;
  1798. }
  1799. } else {
  1800. /*
  1801. * Maybe the event was for the data stage? If so, update
  1802. * already the actual_length of the URB and flag it as
  1803. * set, so that it is not overwritten in the event for
  1804. * the last TRB.
  1805. */
  1806. td->urb_length_set = true;
  1807. td->urb->actual_length =
  1808. td->urb->transfer_buffer_length -
  1809. EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
  1810. xhci_dbg(xhci, "Waiting for status "
  1811. "stage event\n");
  1812. return 0;
  1813. }
  1814. }
  1815. return finish_td(xhci, td, event_trb, event, ep, status, false);
  1816. }
  1817. /*
  1818. * Process isochronous tds, update urb packet status and actual_length.
  1819. */
  1820. static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1821. union xhci_trb *event_trb, struct xhci_transfer_event *event,
  1822. struct xhci_virt_ep *ep, int *status)
  1823. {
  1824. struct xhci_ring *ep_ring;
  1825. struct urb_priv *urb_priv;
  1826. int idx;
  1827. int len = 0;
  1828. union xhci_trb *cur_trb;
  1829. struct xhci_segment *cur_seg;
  1830. struct usb_iso_packet_descriptor *frame;
  1831. u32 trb_comp_code;
  1832. bool skip_td = false;
  1833. ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
  1834. trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
  1835. urb_priv = td->urb->hcpriv;
  1836. idx = urb_priv->td_cnt;
  1837. frame = &td->urb->iso_frame_desc[idx];
  1838. /* handle completion code */
  1839. switch (trb_comp_code) {
  1840. case COMP_SUCCESS:
  1841. if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
  1842. frame->status = 0;
  1843. break;
  1844. }
  1845. if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
  1846. trb_comp_code = COMP_SHORT_TX;
  1847. /* fallthrough */
  1848. case COMP_STOP_SHORT:
  1849. case COMP_SHORT_TX:
  1850. frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
  1851. -EREMOTEIO : 0;
  1852. break;
  1853. case COMP_BW_OVER:
  1854. frame->status = -ECOMM;
  1855. skip_td = true;
  1856. break;
  1857. case COMP_BUFF_OVER:
  1858. case COMP_BABBLE:
  1859. frame->status = -EOVERFLOW;
  1860. skip_td = true;
  1861. break;
  1862. case COMP_DEV_ERR:
  1863. case COMP_STALL:
  1864. frame->status = -EPROTO;
  1865. skip_td = true;
  1866. break;
  1867. case COMP_TX_ERR:
  1868. frame->status = -EPROTO;
  1869. if (event_trb != td->last_trb)
  1870. return 0;
  1871. skip_td = true;
  1872. break;
  1873. case COMP_STOP:
  1874. case COMP_STOP_INVAL:
  1875. break;
  1876. default:
  1877. frame->status = -1;
  1878. break;
  1879. }
  1880. if (trb_comp_code == COMP_SUCCESS || skip_td) {
  1881. frame->actual_length = frame->length;
  1882. td->urb->actual_length += frame->length;
  1883. } else if (trb_comp_code == COMP_STOP_SHORT) {
  1884. frame->actual_length =
  1885. EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
  1886. td->urb->actual_length += frame->actual_length;
  1887. } else {
  1888. for (cur_trb = ep_ring->dequeue,
  1889. cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
  1890. next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
  1891. if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
  1892. !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
  1893. len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
  1894. }
  1895. len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
  1896. EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
  1897. if (trb_comp_code != COMP_STOP_INVAL) {
  1898. frame->actual_length = len;
  1899. td->urb->actual_length += len;
  1900. }
  1901. }
  1902. return finish_td(xhci, td, event_trb, event, ep, status, false);
  1903. }
  1904. static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1905. struct xhci_transfer_event *event,
  1906. struct xhci_virt_ep *ep, int *status)
  1907. {
  1908. struct xhci_ring *ep_ring;
  1909. struct urb_priv *urb_priv;
  1910. struct usb_iso_packet_descriptor *frame;
  1911. int idx;
  1912. ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
  1913. urb_priv = td->urb->hcpriv;
  1914. idx = urb_priv->td_cnt;
  1915. frame = &td->urb->iso_frame_desc[idx];
  1916. /* The transfer is partly done. */
  1917. frame->status = -EXDEV;
  1918. /* calc actual length */
  1919. frame->actual_length = 0;
  1920. /* Update ring dequeue pointer */
  1921. while (ep_ring->dequeue != td->last_trb)
  1922. inc_deq(xhci, ep_ring);
  1923. inc_deq(xhci, ep_ring);
  1924. return finish_td(xhci, td, NULL, event, ep, status, true);
  1925. }
  1926. /*
  1927. * Process bulk and interrupt tds, update urb status and actual_length.
  1928. */
  1929. static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1930. union xhci_trb *event_trb, struct xhci_transfer_event *event,
  1931. struct xhci_virt_ep *ep, int *status)
  1932. {
  1933. struct xhci_ring *ep_ring;
  1934. union xhci_trb *cur_trb;
  1935. struct xhci_segment *cur_seg;
  1936. u32 trb_comp_code;
  1937. ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
  1938. trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
  1939. switch (trb_comp_code) {
  1940. case COMP_SUCCESS:
  1941. /* Double check that the HW transferred everything. */
  1942. if (event_trb != td->last_trb ||
  1943. EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
  1944. xhci_warn(xhci, "WARN Successful completion "
  1945. "on short TX\n");
  1946. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1947. *status = -EREMOTEIO;
  1948. else
  1949. *status = 0;
  1950. if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
  1951. trb_comp_code = COMP_SHORT_TX;
  1952. } else {
  1953. *status = 0;
  1954. }
  1955. break;
  1956. case COMP_STOP_SHORT:
  1957. case COMP_SHORT_TX:
  1958. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1959. *status = -EREMOTEIO;
  1960. else
  1961. *status = 0;
  1962. break;
  1963. default:
  1964. /* Others already handled above */
  1965. break;
  1966. }
  1967. if (trb_comp_code == COMP_SHORT_TX)
  1968. xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
  1969. "%d bytes untransferred\n",
  1970. td->urb->ep->desc.bEndpointAddress,
  1971. td->urb->transfer_buffer_length,
  1972. EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
  1973. /* Stopped - short packet completion */
  1974. if (trb_comp_code == COMP_STOP_SHORT) {
  1975. td->urb->actual_length =
  1976. EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
  1977. if (td->urb->transfer_buffer_length <
  1978. td->urb->actual_length) {
  1979. xhci_warn(xhci, "HC gave bad length of %d bytes txed\n",
  1980. EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
  1981. td->urb->actual_length = 0;
  1982. /* status will be set by usb core for canceled urbs */
  1983. }
  1984. /* Fast path - was this the last TRB in the TD for this URB? */
  1985. } else if (event_trb == td->last_trb) {
  1986. if (td->urb_length_set && trb_comp_code == COMP_SHORT_TX)
  1987. return finish_td(xhci, td, event_trb, event, ep,
  1988. status, false);
  1989. if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
  1990. td->urb->actual_length =
  1991. td->urb->transfer_buffer_length -
  1992. EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
  1993. if (td->urb->transfer_buffer_length <
  1994. td->urb->actual_length) {
  1995. xhci_warn(xhci, "HC gave bad length "
  1996. "of %d bytes left\n",
  1997. EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
  1998. td->urb->actual_length = 0;
  1999. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  2000. *status = -EREMOTEIO;
  2001. else
  2002. *status = 0;
  2003. }
  2004. /* Don't overwrite a previously set error code */
  2005. if (*status == -EINPROGRESS) {
  2006. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  2007. *status = -EREMOTEIO;
  2008. else
  2009. *status = 0;
  2010. }
  2011. } else {
  2012. td->urb->actual_length =
  2013. td->urb->transfer_buffer_length;
  2014. /* Ignore a short packet completion if the
  2015. * untransferred length was zero.
  2016. */
  2017. if (*status == -EREMOTEIO)
  2018. *status = 0;
  2019. }
  2020. } else {
  2021. /* Slow path - walk the list, starting from the dequeue
  2022. * pointer, to get the actual length transferred.
  2023. */
  2024. td->urb->actual_length = 0;
  2025. for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
  2026. cur_trb != event_trb;
  2027. next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
  2028. if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
  2029. !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
  2030. td->urb->actual_length +=
  2031. TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
  2032. }
  2033. /* If the ring didn't stop on a Link or No-op TRB, add
  2034. * in the actual bytes transferred from the Normal TRB
  2035. */
  2036. if (trb_comp_code != COMP_STOP_INVAL)
  2037. td->urb->actual_length +=
  2038. TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
  2039. EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
  2040. if (trb_comp_code == COMP_SHORT_TX) {
  2041. xhci_dbg(xhci, "mid bulk/intr SP, wait for last TRB event\n");
  2042. td->urb_length_set = true;
  2043. return 0;
  2044. }
  2045. }
  2046. return finish_td(xhci, td, event_trb, event, ep, status, false);
  2047. }
  2048. /*
  2049. * If this function returns an error condition, it means it got a Transfer
  2050. * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
  2051. * At this point, the host controller is probably hosed and should be reset.
  2052. */
  2053. static int handle_tx_event(struct xhci_hcd *xhci,
  2054. struct xhci_transfer_event *event)
  2055. __releases(&xhci->lock)
  2056. __acquires(&xhci->lock)
  2057. {
  2058. struct xhci_virt_device *xdev;
  2059. struct xhci_virt_ep *ep;
  2060. struct xhci_ring *ep_ring;
  2061. unsigned int slot_id;
  2062. int ep_index;
  2063. struct xhci_td *td = NULL;
  2064. dma_addr_t event_dma;
  2065. struct xhci_segment *event_seg;
  2066. union xhci_trb *event_trb;
  2067. struct urb *urb = NULL;
  2068. int status = -EINPROGRESS;
  2069. struct urb_priv *urb_priv;
  2070. struct xhci_ep_ctx *ep_ctx;
  2071. struct list_head *tmp;
  2072. u32 trb_comp_code;
  2073. int ret = 0;
  2074. int td_num = 0;
  2075. bool handling_skipped_tds = false;
  2076. slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
  2077. xdev = xhci->devs[slot_id];
  2078. if (!xdev) {
  2079. xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
  2080. xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
  2081. (unsigned long long) xhci_trb_virt_to_dma(
  2082. xhci->event_ring->deq_seg,
  2083. xhci->event_ring->dequeue),
  2084. lower_32_bits(le64_to_cpu(event->buffer)),
  2085. upper_32_bits(le64_to_cpu(event->buffer)),
  2086. le32_to_cpu(event->transfer_len),
  2087. le32_to_cpu(event->flags));
  2088. xhci_dbg(xhci, "Event ring:\n");
  2089. xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
  2090. return -ENODEV;
  2091. }
  2092. /* Endpoint ID is 1 based, our index is zero based */
  2093. ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
  2094. ep = &xdev->eps[ep_index];
  2095. ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
  2096. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  2097. if (!ep_ring ||
  2098. (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
  2099. EP_STATE_DISABLED) {
  2100. xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
  2101. "or incorrect stream ring\n");
  2102. xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
  2103. (unsigned long long) xhci_trb_virt_to_dma(
  2104. xhci->event_ring->deq_seg,
  2105. xhci->event_ring->dequeue),
  2106. lower_32_bits(le64_to_cpu(event->buffer)),
  2107. upper_32_bits(le64_to_cpu(event->buffer)),
  2108. le32_to_cpu(event->transfer_len),
  2109. le32_to_cpu(event->flags));
  2110. xhci_dbg(xhci, "Event ring:\n");
  2111. xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
  2112. return -ENODEV;
  2113. }
  2114. /* Count current td numbers if ep->skip is set */
  2115. if (ep->skip) {
  2116. list_for_each(tmp, &ep_ring->td_list)
  2117. td_num++;
  2118. }
  2119. event_dma = le64_to_cpu(event->buffer);
  2120. trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
  2121. /* Look for common error cases */
  2122. switch (trb_comp_code) {
  2123. /* Skip codes that require special handling depending on
  2124. * transfer type
  2125. */
  2126. case COMP_SUCCESS:
  2127. if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
  2128. break;
  2129. if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
  2130. trb_comp_code = COMP_SHORT_TX;
  2131. else
  2132. xhci_warn_ratelimited(xhci,
  2133. "WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n");
  2134. case COMP_SHORT_TX:
  2135. break;
  2136. case COMP_STOP:
  2137. xhci_dbg(xhci, "Stopped on Transfer TRB\n");
  2138. break;
  2139. case COMP_STOP_INVAL:
  2140. xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
  2141. break;
  2142. case COMP_STOP_SHORT:
  2143. xhci_dbg(xhci, "Stopped with short packet transfer detected\n");
  2144. break;
  2145. case COMP_STALL:
  2146. xhci_dbg(xhci, "Stalled endpoint\n");
  2147. ep->ep_state |= EP_HALTED;
  2148. status = -EPIPE;
  2149. break;
  2150. case COMP_TRB_ERR:
  2151. xhci_warn(xhci, "WARN: TRB error on endpoint\n");
  2152. status = -EILSEQ;
  2153. break;
  2154. case COMP_SPLIT_ERR:
  2155. case COMP_TX_ERR:
  2156. xhci_dbg(xhci, "Transfer error on endpoint\n");
  2157. status = -EPROTO;
  2158. break;
  2159. case COMP_BABBLE:
  2160. xhci_dbg(xhci, "Babble error on endpoint\n");
  2161. status = -EOVERFLOW;
  2162. break;
  2163. case COMP_DB_ERR:
  2164. xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
  2165. status = -ENOSR;
  2166. break;
  2167. case COMP_BW_OVER:
  2168. xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
  2169. break;
  2170. case COMP_BUFF_OVER:
  2171. xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
  2172. break;
  2173. case COMP_UNDERRUN:
  2174. /*
  2175. * When the Isoch ring is empty, the xHC will generate
  2176. * a Ring Overrun Event for IN Isoch endpoint or Ring
  2177. * Underrun Event for OUT Isoch endpoint.
  2178. */
  2179. xhci_dbg(xhci, "underrun event on endpoint\n");
  2180. if (!list_empty(&ep_ring->td_list))
  2181. xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
  2182. "still with TDs queued?\n",
  2183. TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
  2184. ep_index);
  2185. goto cleanup;
  2186. case COMP_OVERRUN:
  2187. xhci_dbg(xhci, "overrun event on endpoint\n");
  2188. if (!list_empty(&ep_ring->td_list))
  2189. xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
  2190. "still with TDs queued?\n",
  2191. TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
  2192. ep_index);
  2193. goto cleanup;
  2194. case COMP_DEV_ERR:
  2195. xhci_warn(xhci, "WARN: detect an incompatible device");
  2196. status = -EPROTO;
  2197. break;
  2198. case COMP_MISSED_INT:
  2199. /*
  2200. * When encounter missed service error, one or more isoc tds
  2201. * may be missed by xHC.
  2202. * Set skip flag of the ep_ring; Complete the missed tds as
  2203. * short transfer when process the ep_ring next time.
  2204. */
  2205. ep->skip = true;
  2206. xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
  2207. goto cleanup;
  2208. case COMP_PING_ERR:
  2209. ep->skip = true;
  2210. xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n");
  2211. goto cleanup;
  2212. default:
  2213. if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
  2214. status = 0;
  2215. break;
  2216. }
  2217. xhci_warn(xhci, "ERROR Unknown event condition %u, HC probably busted\n",
  2218. trb_comp_code);
  2219. goto cleanup;
  2220. }
  2221. do {
  2222. /* This TRB should be in the TD at the head of this ring's
  2223. * TD list.
  2224. */
  2225. if (list_empty(&ep_ring->td_list)) {
  2226. /*
  2227. * A stopped endpoint may generate an extra completion
  2228. * event if the device was suspended. Don't print
  2229. * warnings.
  2230. */
  2231. if (!(trb_comp_code == COMP_STOP ||
  2232. trb_comp_code == COMP_STOP_INVAL)) {
  2233. xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
  2234. TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
  2235. ep_index);
  2236. xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
  2237. (le32_to_cpu(event->flags) &
  2238. TRB_TYPE_BITMASK)>>10);
  2239. xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
  2240. }
  2241. if (ep->skip) {
  2242. ep->skip = false;
  2243. xhci_dbg(xhci, "td_list is empty while skip "
  2244. "flag set. Clear skip flag.\n");
  2245. }
  2246. ret = 0;
  2247. goto cleanup;
  2248. }
  2249. /* We've skipped all the TDs on the ep ring when ep->skip set */
  2250. if (ep->skip && td_num == 0) {
  2251. ep->skip = false;
  2252. xhci_dbg(xhci, "All tds on the ep_ring skipped. "
  2253. "Clear skip flag.\n");
  2254. ret = 0;
  2255. goto cleanup;
  2256. }
  2257. td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
  2258. if (ep->skip)
  2259. td_num--;
  2260. /* Is this a TRB in the currently executing TD? */
  2261. event_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
  2262. td->last_trb, event_dma, false);
  2263. /*
  2264. * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
  2265. * is not in the current TD pointed by ep_ring->dequeue because
  2266. * that the hardware dequeue pointer still at the previous TRB
  2267. * of the current TD. The previous TRB maybe a Link TD or the
  2268. * last TRB of the previous TD. The command completion handle
  2269. * will take care the rest.
  2270. */
  2271. if (!event_seg && (trb_comp_code == COMP_STOP ||
  2272. trb_comp_code == COMP_STOP_INVAL)) {
  2273. ret = 0;
  2274. goto cleanup;
  2275. }
  2276. if (!event_seg) {
  2277. if (!ep->skip ||
  2278. !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
  2279. /* Some host controllers give a spurious
  2280. * successful event after a short transfer.
  2281. * Ignore it.
  2282. */
  2283. if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
  2284. ep_ring->last_td_was_short) {
  2285. ep_ring->last_td_was_short = false;
  2286. ret = 0;
  2287. goto cleanup;
  2288. }
  2289. /* HC is busted, give up! */
  2290. xhci_err(xhci,
  2291. "ERROR Transfer event TRB DMA ptr not "
  2292. "part of current TD ep_index %d "
  2293. "comp_code %u\n", ep_index,
  2294. trb_comp_code);
  2295. trb_in_td(xhci, ep_ring->deq_seg,
  2296. ep_ring->dequeue, td->last_trb,
  2297. event_dma, true);
  2298. return -ESHUTDOWN;
  2299. }
  2300. ret = skip_isoc_td(xhci, td, event, ep, &status);
  2301. goto cleanup;
  2302. }
  2303. if (trb_comp_code == COMP_SHORT_TX)
  2304. ep_ring->last_td_was_short = true;
  2305. else
  2306. ep_ring->last_td_was_short = false;
  2307. if (ep->skip) {
  2308. xhci_dbg(xhci, "Found td. Clear skip flag.\n");
  2309. ep->skip = false;
  2310. }
  2311. event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
  2312. sizeof(*event_trb)];
  2313. /*
  2314. * No-op TRB should not trigger interrupts.
  2315. * If event_trb is a no-op TRB, it means the
  2316. * corresponding TD has been cancelled. Just ignore
  2317. * the TD.
  2318. */
  2319. if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
  2320. xhci_dbg(xhci,
  2321. "event_trb is a no-op TRB. Skip it\n");
  2322. goto cleanup;
  2323. }
  2324. /* Now update the urb's actual_length and give back to
  2325. * the core
  2326. */
  2327. if (usb_endpoint_xfer_control(&td->urb->ep->desc))
  2328. ret = process_ctrl_td(xhci, td, event_trb, event, ep,
  2329. &status);
  2330. else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
  2331. ret = process_isoc_td(xhci, td, event_trb, event, ep,
  2332. &status);
  2333. else
  2334. ret = process_bulk_intr_td(xhci, td, event_trb, event,
  2335. ep, &status);
  2336. cleanup:
  2337. handling_skipped_tds = ep->skip &&
  2338. trb_comp_code != COMP_MISSED_INT &&
  2339. trb_comp_code != COMP_PING_ERR;
  2340. /*
  2341. * Do not update event ring dequeue pointer if we're in a loop
  2342. * processing missed tds.
  2343. */
  2344. if (!handling_skipped_tds)
  2345. inc_deq(xhci, xhci->event_ring);
  2346. if (ret) {
  2347. urb = td->urb;
  2348. urb_priv = urb->hcpriv;
  2349. xhci_urb_free_priv(urb_priv);
  2350. usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
  2351. if ((urb->actual_length != urb->transfer_buffer_length &&
  2352. (urb->transfer_flags &
  2353. URB_SHORT_NOT_OK)) ||
  2354. (status != 0 &&
  2355. !usb_endpoint_xfer_isoc(&urb->ep->desc)))
  2356. xhci_dbg(xhci, "Giveback URB %p, len = %d, "
  2357. "expected = %d, status = %d\n",
  2358. urb, urb->actual_length,
  2359. urb->transfer_buffer_length,
  2360. status);
  2361. spin_unlock(&xhci->lock);
  2362. /* EHCI, UHCI, and OHCI always unconditionally set the
  2363. * urb->status of an isochronous endpoint to 0.
  2364. */
  2365. if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
  2366. status = 0;
  2367. usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
  2368. spin_lock(&xhci->lock);
  2369. }
  2370. /*
  2371. * If ep->skip is set, it means there are missed tds on the
  2372. * endpoint ring need to take care of.
  2373. * Process them as short transfer until reach the td pointed by
  2374. * the event.
  2375. */
  2376. } while (handling_skipped_tds);
  2377. return 0;
  2378. }
  2379. /*
  2380. * This function handles all OS-owned events on the event ring. It may drop
  2381. * xhci->lock between event processing (e.g. to pass up port status changes).
  2382. * Returns >0 for "possibly more events to process" (caller should call again),
  2383. * otherwise 0 if done. In future, <0 returns should indicate error code.
  2384. */
  2385. static int xhci_handle_event(struct xhci_hcd *xhci)
  2386. {
  2387. union xhci_trb *event;
  2388. int update_ptrs = 1;
  2389. int ret;
  2390. if (!xhci->event_ring || !xhci->event_ring->dequeue) {
  2391. xhci->error_bitmask |= 1 << 1;
  2392. return 0;
  2393. }
  2394. event = xhci->event_ring->dequeue;
  2395. /* Does the HC or OS own the TRB? */
  2396. if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
  2397. xhci->event_ring->cycle_state) {
  2398. xhci->error_bitmask |= 1 << 2;
  2399. return 0;
  2400. }
  2401. /*
  2402. * Barrier between reading the TRB_CYCLE (valid) flag above and any
  2403. * speculative reads of the event's flags/data below.
  2404. */
  2405. rmb();
  2406. /* FIXME: Handle more event types. */
  2407. switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
  2408. case TRB_TYPE(TRB_COMPLETION):
  2409. handle_cmd_completion(xhci, &event->event_cmd);
  2410. break;
  2411. case TRB_TYPE(TRB_PORT_STATUS):
  2412. handle_port_status(xhci, event);
  2413. update_ptrs = 0;
  2414. break;
  2415. case TRB_TYPE(TRB_TRANSFER):
  2416. ret = handle_tx_event(xhci, &event->trans_event);
  2417. if (ret < 0)
  2418. xhci->error_bitmask |= 1 << 9;
  2419. else
  2420. update_ptrs = 0;
  2421. break;
  2422. case TRB_TYPE(TRB_DEV_NOTE):
  2423. handle_device_notification(xhci, event);
  2424. break;
  2425. default:
  2426. if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
  2427. TRB_TYPE(48))
  2428. handle_vendor_event(xhci, event);
  2429. else
  2430. xhci->error_bitmask |= 1 << 3;
  2431. }
  2432. /* Any of the above functions may drop and re-acquire the lock, so check
  2433. * to make sure a watchdog timer didn't mark the host as non-responsive.
  2434. */
  2435. if (xhci->xhc_state & XHCI_STATE_DYING) {
  2436. xhci_dbg(xhci, "xHCI host dying, returning from "
  2437. "event handler.\n");
  2438. return 0;
  2439. }
  2440. if (update_ptrs)
  2441. /* Update SW event ring dequeue pointer */
  2442. inc_deq(xhci, xhci->event_ring);
  2443. /* Are there more items on the event ring? Caller will call us again to
  2444. * check.
  2445. */
  2446. return 1;
  2447. }
  2448. /*
  2449. * xHCI spec says we can get an interrupt, and if the HC has an error condition,
  2450. * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
  2451. * indicators of an event TRB error, but we check the status *first* to be safe.
  2452. */
  2453. irqreturn_t xhci_irq(struct usb_hcd *hcd)
  2454. {
  2455. struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  2456. u32 status;
  2457. u64 temp_64;
  2458. union xhci_trb *event_ring_deq;
  2459. dma_addr_t deq;
  2460. spin_lock(&xhci->lock);
  2461. /* Check if the xHC generated the interrupt, or the irq is shared */
  2462. status = readl(&xhci->op_regs->status);
  2463. if (status == 0xffffffff)
  2464. goto hw_died;
  2465. if (!(status & STS_EINT)) {
  2466. spin_unlock(&xhci->lock);
  2467. return IRQ_NONE;
  2468. }
  2469. if (status & STS_FATAL) {
  2470. xhci_warn(xhci, "WARNING: Host System Error\n");
  2471. xhci_halt(xhci);
  2472. hw_died:
  2473. spin_unlock(&xhci->lock);
  2474. return IRQ_HANDLED;
  2475. }
  2476. /*
  2477. * Clear the op reg interrupt status first,
  2478. * so we can receive interrupts from other MSI-X interrupters.
  2479. * Write 1 to clear the interrupt status.
  2480. */
  2481. status |= STS_EINT;
  2482. writel(status, &xhci->op_regs->status);
  2483. /* FIXME when MSI-X is supported and there are multiple vectors */
  2484. /* Clear the MSI-X event interrupt status */
  2485. if (hcd->irq) {
  2486. u32 irq_pending;
  2487. /* Acknowledge the PCI interrupt */
  2488. irq_pending = readl(&xhci->ir_set->irq_pending);
  2489. irq_pending |= IMAN_IP;
  2490. writel(irq_pending, &xhci->ir_set->irq_pending);
  2491. }
  2492. if (xhci->xhc_state & XHCI_STATE_DYING) {
  2493. xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
  2494. "Shouldn't IRQs be disabled?\n");
  2495. /* Clear the event handler busy flag (RW1C);
  2496. * the event ring should be empty.
  2497. */
  2498. temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
  2499. xhci_write_64(xhci, temp_64 | ERST_EHB,
  2500. &xhci->ir_set->erst_dequeue);
  2501. spin_unlock(&xhci->lock);
  2502. return IRQ_HANDLED;
  2503. }
  2504. event_ring_deq = xhci->event_ring->dequeue;
  2505. /* FIXME this should be a delayed service routine
  2506. * that clears the EHB.
  2507. */
  2508. while (xhci_handle_event(xhci) > 0) {}
  2509. temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
  2510. /* If necessary, update the HW's version of the event ring deq ptr. */
  2511. if (event_ring_deq != xhci->event_ring->dequeue) {
  2512. deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
  2513. xhci->event_ring->dequeue);
  2514. if (deq == 0)
  2515. xhci_warn(xhci, "WARN something wrong with SW event "
  2516. "ring dequeue ptr.\n");
  2517. /* Update HC event ring dequeue pointer */
  2518. temp_64 &= ERST_PTR_MASK;
  2519. temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
  2520. }
  2521. /* Clear the event handler busy flag (RW1C); event ring is empty. */
  2522. temp_64 |= ERST_EHB;
  2523. xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
  2524. spin_unlock(&xhci->lock);
  2525. return IRQ_HANDLED;
  2526. }
  2527. irqreturn_t xhci_msi_irq(int irq, void *hcd)
  2528. {
  2529. return xhci_irq(hcd);
  2530. }
  2531. /**** Endpoint Ring Operations ****/
  2532. /*
  2533. * Generic function for queueing a TRB on a ring.
  2534. * The caller must have checked to make sure there's room on the ring.
  2535. *
  2536. * @more_trbs_coming: Will you enqueue more TRBs before calling
  2537. * prepare_transfer()?
  2538. */
  2539. static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
  2540. bool more_trbs_coming,
  2541. u32 field1, u32 field2, u32 field3, u32 field4)
  2542. {
  2543. struct xhci_generic_trb *trb;
  2544. trb = &ring->enqueue->generic;
  2545. trb->field[0] = cpu_to_le32(field1);
  2546. trb->field[1] = cpu_to_le32(field2);
  2547. trb->field[2] = cpu_to_le32(field3);
  2548. trb->field[3] = cpu_to_le32(field4);
  2549. inc_enq(xhci, ring, more_trbs_coming);
  2550. }
  2551. /*
  2552. * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
  2553. * FIXME allocate segments if the ring is full.
  2554. */
  2555. static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
  2556. u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
  2557. {
  2558. unsigned int num_trbs_needed;
  2559. /* Make sure the endpoint has been added to xHC schedule */
  2560. switch (ep_state) {
  2561. case EP_STATE_DISABLED:
  2562. /*
  2563. * USB core changed config/interfaces without notifying us,
  2564. * or hardware is reporting the wrong state.
  2565. */
  2566. xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
  2567. return -ENOENT;
  2568. case EP_STATE_ERROR:
  2569. xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
  2570. /* FIXME event handling code for error needs to clear it */
  2571. /* XXX not sure if this should be -ENOENT or not */
  2572. return -EINVAL;
  2573. case EP_STATE_HALTED:
  2574. xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
  2575. case EP_STATE_STOPPED:
  2576. case EP_STATE_RUNNING:
  2577. break;
  2578. default:
  2579. xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
  2580. /*
  2581. * FIXME issue Configure Endpoint command to try to get the HC
  2582. * back into a known state.
  2583. */
  2584. return -EINVAL;
  2585. }
  2586. while (1) {
  2587. if (room_on_ring(xhci, ep_ring, num_trbs))
  2588. break;
  2589. if (ep_ring == xhci->cmd_ring) {
  2590. xhci_err(xhci, "Do not support expand command ring\n");
  2591. return -ENOMEM;
  2592. }
  2593. xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
  2594. "ERROR no room on ep ring, try ring expansion");
  2595. num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
  2596. if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
  2597. mem_flags)) {
  2598. xhci_err(xhci, "Ring expansion failed\n");
  2599. return -ENOMEM;
  2600. }
  2601. }
  2602. if (enqueue_is_link_trb(ep_ring)) {
  2603. struct xhci_ring *ring = ep_ring;
  2604. union xhci_trb *next;
  2605. next = ring->enqueue;
  2606. while (last_trb(xhci, ring, ring->enq_seg, next)) {
  2607. /* If we're not dealing with 0.95 hardware or isoc rings
  2608. * on AMD 0.96 host, clear the chain bit.
  2609. */
  2610. if (!xhci_link_trb_quirk(xhci) &&
  2611. !(ring->type == TYPE_ISOC &&
  2612. (xhci->quirks & XHCI_AMD_0x96_HOST)))
  2613. next->link.control &= cpu_to_le32(~TRB_CHAIN);
  2614. else
  2615. next->link.control |= cpu_to_le32(TRB_CHAIN);
  2616. wmb();
  2617. next->link.control ^= cpu_to_le32(TRB_CYCLE);
  2618. /* Toggle the cycle bit after the last ring segment. */
  2619. if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
  2620. ring->cycle_state ^= 1;
  2621. }
  2622. ring->enq_seg = ring->enq_seg->next;
  2623. ring->enqueue = ring->enq_seg->trbs;
  2624. next = ring->enqueue;
  2625. }
  2626. }
  2627. return 0;
  2628. }
  2629. static int prepare_transfer(struct xhci_hcd *xhci,
  2630. struct xhci_virt_device *xdev,
  2631. unsigned int ep_index,
  2632. unsigned int stream_id,
  2633. unsigned int num_trbs,
  2634. struct urb *urb,
  2635. unsigned int td_index,
  2636. gfp_t mem_flags)
  2637. {
  2638. int ret;
  2639. struct urb_priv *urb_priv;
  2640. struct xhci_td *td;
  2641. struct xhci_ring *ep_ring;
  2642. struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  2643. ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
  2644. if (!ep_ring) {
  2645. xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
  2646. stream_id);
  2647. return -EINVAL;
  2648. }
  2649. ret = prepare_ring(xhci, ep_ring,
  2650. le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
  2651. num_trbs, mem_flags);
  2652. if (ret)
  2653. return ret;
  2654. urb_priv = urb->hcpriv;
  2655. td = urb_priv->td[td_index];
  2656. INIT_LIST_HEAD(&td->td_list);
  2657. INIT_LIST_HEAD(&td->cancelled_td_list);
  2658. if (td_index == 0) {
  2659. ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
  2660. if (unlikely(ret))
  2661. return ret;
  2662. }
  2663. td->urb = urb;
  2664. /* Add this TD to the tail of the endpoint ring's TD list */
  2665. list_add_tail(&td->td_list, &ep_ring->td_list);
  2666. td->start_seg = ep_ring->enq_seg;
  2667. td->first_trb = ep_ring->enqueue;
  2668. urb_priv->td[td_index] = td;
  2669. return 0;
  2670. }
  2671. static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
  2672. {
  2673. int num_sgs, num_trbs, running_total, temp, i;
  2674. struct scatterlist *sg;
  2675. sg = NULL;
  2676. num_sgs = urb->num_mapped_sgs;
  2677. temp = urb->transfer_buffer_length;
  2678. num_trbs = 0;
  2679. for_each_sg(urb->sg, sg, num_sgs, i) {
  2680. unsigned int len = sg_dma_len(sg);
  2681. /* Scatter gather list entries may cross 64KB boundaries */
  2682. running_total = TRB_MAX_BUFF_SIZE -
  2683. (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
  2684. running_total &= TRB_MAX_BUFF_SIZE - 1;
  2685. if (running_total != 0)
  2686. num_trbs++;
  2687. /* How many more 64KB chunks to transfer, how many more TRBs? */
  2688. while (running_total < sg_dma_len(sg) && running_total < temp) {
  2689. num_trbs++;
  2690. running_total += TRB_MAX_BUFF_SIZE;
  2691. }
  2692. len = min_t(int, len, temp);
  2693. temp -= len;
  2694. if (temp == 0)
  2695. break;
  2696. }
  2697. return num_trbs;
  2698. }
  2699. static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
  2700. {
  2701. if (num_trbs != 0)
  2702. dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
  2703. "TRBs, %d left\n", __func__,
  2704. urb->ep->desc.bEndpointAddress, num_trbs);
  2705. if (running_total != urb->transfer_buffer_length)
  2706. dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
  2707. "queued %#x (%d), asked for %#x (%d)\n",
  2708. __func__,
  2709. urb->ep->desc.bEndpointAddress,
  2710. running_total, running_total,
  2711. urb->transfer_buffer_length,
  2712. urb->transfer_buffer_length);
  2713. }
  2714. static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
  2715. unsigned int ep_index, unsigned int stream_id, int start_cycle,
  2716. struct xhci_generic_trb *start_trb)
  2717. {
  2718. /*
  2719. * Pass all the TRBs to the hardware at once and make sure this write
  2720. * isn't reordered.
  2721. */
  2722. wmb();
  2723. if (start_cycle)
  2724. start_trb->field[3] |= cpu_to_le32(start_cycle);
  2725. else
  2726. start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
  2727. xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
  2728. }
  2729. /*
  2730. * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
  2731. * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
  2732. * (comprised of sg list entries) can take several service intervals to
  2733. * transmit.
  2734. */
  2735. int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2736. struct urb *urb, int slot_id, unsigned int ep_index)
  2737. {
  2738. struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
  2739. xhci->devs[slot_id]->out_ctx, ep_index);
  2740. int xhci_interval;
  2741. int ep_interval;
  2742. xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
  2743. ep_interval = urb->interval;
  2744. /* Convert to microframes */
  2745. if (urb->dev->speed == USB_SPEED_LOW ||
  2746. urb->dev->speed == USB_SPEED_FULL)
  2747. ep_interval *= 8;
  2748. /* FIXME change this to a warning and a suggestion to use the new API
  2749. * to set the polling interval (once the API is added).
  2750. */
  2751. if (xhci_interval != ep_interval) {
  2752. dev_dbg_ratelimited(&urb->dev->dev,
  2753. "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
  2754. ep_interval, ep_interval == 1 ? "" : "s",
  2755. xhci_interval, xhci_interval == 1 ? "" : "s");
  2756. urb->interval = xhci_interval;
  2757. /* Convert back to frames for LS/FS devices */
  2758. if (urb->dev->speed == USB_SPEED_LOW ||
  2759. urb->dev->speed == USB_SPEED_FULL)
  2760. urb->interval /= 8;
  2761. }
  2762. return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
  2763. }
  2764. /*
  2765. * The TD size is the number of bytes remaining in the TD (including this TRB),
  2766. * right shifted by 10.
  2767. * It must fit in bits 21:17, so it can't be bigger than 31.
  2768. */
  2769. static u32 xhci_td_remainder(unsigned int remainder)
  2770. {
  2771. u32 max = (1 << (21 - 17 + 1)) - 1;
  2772. if ((remainder >> 10) >= max)
  2773. return max << 17;
  2774. else
  2775. return (remainder >> 10) << 17;
  2776. }
  2777. /*
  2778. * For xHCI 1.0 host controllers, TD size is the number of max packet sized
  2779. * packets remaining in the TD (*not* including this TRB).
  2780. *
  2781. * Total TD packet count = total_packet_count =
  2782. * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
  2783. *
  2784. * Packets transferred up to and including this TRB = packets_transferred =
  2785. * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
  2786. *
  2787. * TD size = total_packet_count - packets_transferred
  2788. *
  2789. * It must fit in bits 21:17, so it can't be bigger than 31.
  2790. * The last TRB in a TD must have the TD size set to zero.
  2791. */
  2792. static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
  2793. unsigned int total_packet_count, struct urb *urb,
  2794. unsigned int num_trbs_left)
  2795. {
  2796. int packets_transferred;
  2797. /* One TRB with a zero-length data packet. */
  2798. if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0))
  2799. return 0;
  2800. /* All the TRB queueing functions don't count the current TRB in
  2801. * running_total.
  2802. */
  2803. packets_transferred = (running_total + trb_buff_len) /
  2804. GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
  2805. if ((total_packet_count - packets_transferred) > 31)
  2806. return 31 << 17;
  2807. return (total_packet_count - packets_transferred) << 17;
  2808. }
  2809. static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2810. struct urb *urb, int slot_id, unsigned int ep_index)
  2811. {
  2812. struct xhci_ring *ep_ring;
  2813. unsigned int num_trbs;
  2814. struct urb_priv *urb_priv;
  2815. struct xhci_td *td;
  2816. struct scatterlist *sg;
  2817. int num_sgs;
  2818. int trb_buff_len, this_sg_len, running_total, ret;
  2819. unsigned int total_packet_count;
  2820. bool zero_length_needed;
  2821. bool first_trb;
  2822. int last_trb_num;
  2823. u64 addr;
  2824. bool more_trbs_coming;
  2825. struct xhci_generic_trb *start_trb;
  2826. int start_cycle;
  2827. ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
  2828. if (!ep_ring)
  2829. return -EINVAL;
  2830. num_trbs = count_sg_trbs_needed(xhci, urb);
  2831. num_sgs = urb->num_mapped_sgs;
  2832. total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
  2833. usb_endpoint_maxp(&urb->ep->desc));
  2834. ret = prepare_transfer(xhci, xhci->devs[slot_id],
  2835. ep_index, urb->stream_id,
  2836. num_trbs, urb, 0, mem_flags);
  2837. if (ret < 0)
  2838. return ret;
  2839. urb_priv = urb->hcpriv;
  2840. /* Deal with URB_ZERO_PACKET - need one more td/trb */
  2841. zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
  2842. urb_priv->length == 2;
  2843. if (zero_length_needed) {
  2844. num_trbs++;
  2845. xhci_dbg(xhci, "Creating zero length td.\n");
  2846. ret = prepare_transfer(xhci, xhci->devs[slot_id],
  2847. ep_index, urb->stream_id,
  2848. 1, urb, 1, mem_flags);
  2849. if (ret < 0)
  2850. return ret;
  2851. }
  2852. td = urb_priv->td[0];
  2853. /*
  2854. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  2855. * until we've finished creating all the other TRBs. The ring's cycle
  2856. * state may change as we enqueue the other TRBs, so save it too.
  2857. */
  2858. start_trb = &ep_ring->enqueue->generic;
  2859. start_cycle = ep_ring->cycle_state;
  2860. running_total = 0;
  2861. /*
  2862. * How much data is in the first TRB?
  2863. *
  2864. * There are three forces at work for TRB buffer pointers and lengths:
  2865. * 1. We don't want to walk off the end of this sg-list entry buffer.
  2866. * 2. The transfer length that the driver requested may be smaller than
  2867. * the amount of memory allocated for this scatter-gather list.
  2868. * 3. TRBs buffers can't cross 64KB boundaries.
  2869. */
  2870. sg = urb->sg;
  2871. addr = (u64) sg_dma_address(sg);
  2872. this_sg_len = sg_dma_len(sg);
  2873. trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
  2874. trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
  2875. if (trb_buff_len > urb->transfer_buffer_length)
  2876. trb_buff_len = urb->transfer_buffer_length;
  2877. first_trb = true;
  2878. last_trb_num = zero_length_needed ? 2 : 1;
  2879. /* Queue the first TRB, even if it's zero-length */
  2880. do {
  2881. u32 field = 0;
  2882. u32 length_field = 0;
  2883. u32 remainder = 0;
  2884. /* Don't change the cycle bit of the first TRB until later */
  2885. if (first_trb) {
  2886. first_trb = false;
  2887. if (start_cycle == 0)
  2888. field |= 0x1;
  2889. } else
  2890. field |= ep_ring->cycle_state;
  2891. /* Chain all the TRBs together; clear the chain bit in the last
  2892. * TRB to indicate it's the last TRB in the chain.
  2893. */
  2894. if (num_trbs > last_trb_num) {
  2895. field |= TRB_CHAIN;
  2896. } else if (num_trbs == last_trb_num) {
  2897. td->last_trb = ep_ring->enqueue;
  2898. field |= TRB_IOC;
  2899. } else if (zero_length_needed && num_trbs == 1) {
  2900. trb_buff_len = 0;
  2901. urb_priv->td[1]->last_trb = ep_ring->enqueue;
  2902. field |= TRB_IOC;
  2903. }
  2904. /* Only set interrupt on short packet for IN endpoints */
  2905. if (usb_urb_dir_in(urb))
  2906. field |= TRB_ISP;
  2907. if (TRB_MAX_BUFF_SIZE -
  2908. (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
  2909. xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
  2910. xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
  2911. (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
  2912. (unsigned int) addr + trb_buff_len);
  2913. }
  2914. /* Set the TRB length, TD size, and interrupter fields. */
  2915. if (xhci->hci_version < 0x100) {
  2916. remainder = xhci_td_remainder(
  2917. urb->transfer_buffer_length -
  2918. running_total);
  2919. } else {
  2920. remainder = xhci_v1_0_td_remainder(running_total,
  2921. trb_buff_len, total_packet_count, urb,
  2922. num_trbs - 1);
  2923. }
  2924. length_field = TRB_LEN(trb_buff_len) |
  2925. remainder |
  2926. TRB_INTR_TARGET(0);
  2927. if (num_trbs > 1)
  2928. more_trbs_coming = true;
  2929. else
  2930. more_trbs_coming = false;
  2931. queue_trb(xhci, ep_ring, more_trbs_coming,
  2932. lower_32_bits(addr),
  2933. upper_32_bits(addr),
  2934. length_field,
  2935. field | TRB_TYPE(TRB_NORMAL));
  2936. --num_trbs;
  2937. running_total += trb_buff_len;
  2938. /* Calculate length for next transfer --
  2939. * Are we done queueing all the TRBs for this sg entry?
  2940. */
  2941. this_sg_len -= trb_buff_len;
  2942. if (this_sg_len == 0) {
  2943. --num_sgs;
  2944. if (num_sgs == 0)
  2945. break;
  2946. sg = sg_next(sg);
  2947. addr = (u64) sg_dma_address(sg);
  2948. this_sg_len = sg_dma_len(sg);
  2949. } else {
  2950. addr += trb_buff_len;
  2951. }
  2952. trb_buff_len = TRB_MAX_BUFF_SIZE -
  2953. (addr & (TRB_MAX_BUFF_SIZE - 1));
  2954. trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
  2955. if (running_total + trb_buff_len > urb->transfer_buffer_length)
  2956. trb_buff_len =
  2957. urb->transfer_buffer_length - running_total;
  2958. } while (num_trbs > 0);
  2959. check_trb_math(urb, num_trbs, running_total);
  2960. giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
  2961. start_cycle, start_trb);
  2962. return 0;
  2963. }
  2964. /* This is very similar to what ehci-q.c qtd_fill() does */
  2965. int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2966. struct urb *urb, int slot_id, unsigned int ep_index)
  2967. {
  2968. struct xhci_ring *ep_ring;
  2969. struct urb_priv *urb_priv;
  2970. struct xhci_td *td;
  2971. int num_trbs;
  2972. struct xhci_generic_trb *start_trb;
  2973. bool first_trb;
  2974. int last_trb_num;
  2975. bool more_trbs_coming;
  2976. bool zero_length_needed;
  2977. int start_cycle;
  2978. u32 field, length_field;
  2979. int running_total, trb_buff_len, ret;
  2980. unsigned int total_packet_count;
  2981. u64 addr;
  2982. if (urb->num_sgs)
  2983. return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
  2984. ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
  2985. if (!ep_ring)
  2986. return -EINVAL;
  2987. num_trbs = 0;
  2988. /* How much data is (potentially) left before the 64KB boundary? */
  2989. running_total = TRB_MAX_BUFF_SIZE -
  2990. (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
  2991. running_total &= TRB_MAX_BUFF_SIZE - 1;
  2992. /* If there's some data on this 64KB chunk, or we have to send a
  2993. * zero-length transfer, we need at least one TRB
  2994. */
  2995. if (running_total != 0 || urb->transfer_buffer_length == 0)
  2996. num_trbs++;
  2997. /* How many more 64KB chunks to transfer, how many more TRBs? */
  2998. while (running_total < urb->transfer_buffer_length) {
  2999. num_trbs++;
  3000. running_total += TRB_MAX_BUFF_SIZE;
  3001. }
  3002. ret = prepare_transfer(xhci, xhci->devs[slot_id],
  3003. ep_index, urb->stream_id,
  3004. num_trbs, urb, 0, mem_flags);
  3005. if (ret < 0)
  3006. return ret;
  3007. urb_priv = urb->hcpriv;
  3008. /* Deal with URB_ZERO_PACKET - need one more td/trb */
  3009. zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
  3010. urb_priv->length == 2;
  3011. if (zero_length_needed) {
  3012. num_trbs++;
  3013. xhci_dbg(xhci, "Creating zero length td.\n");
  3014. ret = prepare_transfer(xhci, xhci->devs[slot_id],
  3015. ep_index, urb->stream_id,
  3016. 1, urb, 1, mem_flags);
  3017. if (ret < 0)
  3018. return ret;
  3019. }
  3020. td = urb_priv->td[0];
  3021. /*
  3022. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  3023. * until we've finished creating all the other TRBs. The ring's cycle
  3024. * state may change as we enqueue the other TRBs, so save it too.
  3025. */
  3026. start_trb = &ep_ring->enqueue->generic;
  3027. start_cycle = ep_ring->cycle_state;
  3028. running_total = 0;
  3029. total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
  3030. usb_endpoint_maxp(&urb->ep->desc));
  3031. /* How much data is in the first TRB? */
  3032. addr = (u64) urb->transfer_dma;
  3033. trb_buff_len = TRB_MAX_BUFF_SIZE -
  3034. (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
  3035. if (trb_buff_len > urb->transfer_buffer_length)
  3036. trb_buff_len = urb->transfer_buffer_length;
  3037. first_trb = true;
  3038. last_trb_num = zero_length_needed ? 2 : 1;
  3039. /* Queue the first TRB, even if it's zero-length */
  3040. do {
  3041. u32 remainder = 0;
  3042. field = 0;
  3043. /* Don't change the cycle bit of the first TRB until later */
  3044. if (first_trb) {
  3045. first_trb = false;
  3046. if (start_cycle == 0)
  3047. field |= 0x1;
  3048. } else
  3049. field |= ep_ring->cycle_state;
  3050. /* Chain all the TRBs together; clear the chain bit in the last
  3051. * TRB to indicate it's the last TRB in the chain.
  3052. */
  3053. if (num_trbs > last_trb_num) {
  3054. field |= TRB_CHAIN;
  3055. } else if (num_trbs == last_trb_num) {
  3056. td->last_trb = ep_ring->enqueue;
  3057. field |= TRB_IOC;
  3058. } else if (zero_length_needed && num_trbs == 1) {
  3059. trb_buff_len = 0;
  3060. urb_priv->td[1]->last_trb = ep_ring->enqueue;
  3061. field |= TRB_IOC;
  3062. }
  3063. /* Only set interrupt on short packet for IN endpoints */
  3064. if (usb_urb_dir_in(urb))
  3065. field |= TRB_ISP;
  3066. /* Set the TRB length, TD size, and interrupter fields. */
  3067. if (xhci->hci_version < 0x100) {
  3068. remainder = xhci_td_remainder(
  3069. urb->transfer_buffer_length -
  3070. running_total);
  3071. } else {
  3072. remainder = xhci_v1_0_td_remainder(running_total,
  3073. trb_buff_len, total_packet_count, urb,
  3074. num_trbs - 1);
  3075. }
  3076. length_field = TRB_LEN(trb_buff_len) |
  3077. remainder |
  3078. TRB_INTR_TARGET(0);
  3079. if (num_trbs > 1)
  3080. more_trbs_coming = true;
  3081. else
  3082. more_trbs_coming = false;
  3083. queue_trb(xhci, ep_ring, more_trbs_coming,
  3084. lower_32_bits(addr),
  3085. upper_32_bits(addr),
  3086. length_field,
  3087. field | TRB_TYPE(TRB_NORMAL));
  3088. --num_trbs;
  3089. running_total += trb_buff_len;
  3090. /* Calculate length for next transfer */
  3091. addr += trb_buff_len;
  3092. trb_buff_len = urb->transfer_buffer_length - running_total;
  3093. if (trb_buff_len > TRB_MAX_BUFF_SIZE)
  3094. trb_buff_len = TRB_MAX_BUFF_SIZE;
  3095. } while (num_trbs > 0);
  3096. check_trb_math(urb, num_trbs, running_total);
  3097. giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
  3098. start_cycle, start_trb);
  3099. return 0;
  3100. }
  3101. /* Caller must have locked xhci->lock */
  3102. int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  3103. struct urb *urb, int slot_id, unsigned int ep_index)
  3104. {
  3105. struct xhci_ring *ep_ring;
  3106. int num_trbs;
  3107. int ret;
  3108. struct usb_ctrlrequest *setup;
  3109. struct xhci_generic_trb *start_trb;
  3110. int start_cycle;
  3111. u32 field, length_field;
  3112. struct urb_priv *urb_priv;
  3113. struct xhci_td *td;
  3114. ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
  3115. if (!ep_ring)
  3116. return -EINVAL;
  3117. /*
  3118. * Need to copy setup packet into setup TRB, so we can't use the setup
  3119. * DMA address.
  3120. */
  3121. if (!urb->setup_packet)
  3122. return -EINVAL;
  3123. /* 1 TRB for setup, 1 for status */
  3124. num_trbs = 2;
  3125. /*
  3126. * Don't need to check if we need additional event data and normal TRBs,
  3127. * since data in control transfers will never get bigger than 16MB
  3128. * XXX: can we get a buffer that crosses 64KB boundaries?
  3129. */
  3130. if (urb->transfer_buffer_length > 0)
  3131. num_trbs++;
  3132. ret = prepare_transfer(xhci, xhci->devs[slot_id],
  3133. ep_index, urb->stream_id,
  3134. num_trbs, urb, 0, mem_flags);
  3135. if (ret < 0)
  3136. return ret;
  3137. urb_priv = urb->hcpriv;
  3138. td = urb_priv->td[0];
  3139. /*
  3140. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  3141. * until we've finished creating all the other TRBs. The ring's cycle
  3142. * state may change as we enqueue the other TRBs, so save it too.
  3143. */
  3144. start_trb = &ep_ring->enqueue->generic;
  3145. start_cycle = ep_ring->cycle_state;
  3146. /* Queue setup TRB - see section 6.4.1.2.1 */
  3147. /* FIXME better way to translate setup_packet into two u32 fields? */
  3148. setup = (struct usb_ctrlrequest *) urb->setup_packet;
  3149. field = 0;
  3150. field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
  3151. if (start_cycle == 0)
  3152. field |= 0x1;
  3153. /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
  3154. if (xhci->hci_version >= 0x100) {
  3155. if (urb->transfer_buffer_length > 0) {
  3156. if (setup->bRequestType & USB_DIR_IN)
  3157. field |= TRB_TX_TYPE(TRB_DATA_IN);
  3158. else
  3159. field |= TRB_TX_TYPE(TRB_DATA_OUT);
  3160. }
  3161. }
  3162. queue_trb(xhci, ep_ring, true,
  3163. setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
  3164. le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
  3165. TRB_LEN(8) | TRB_INTR_TARGET(0),
  3166. /* Immediate data in pointer */
  3167. field);
  3168. /* If there's data, queue data TRBs */
  3169. /* Only set interrupt on short packet for IN endpoints */
  3170. if (usb_urb_dir_in(urb))
  3171. field = TRB_ISP | TRB_TYPE(TRB_DATA);
  3172. else
  3173. field = TRB_TYPE(TRB_DATA);
  3174. length_field = TRB_LEN(urb->transfer_buffer_length) |
  3175. xhci_td_remainder(urb->transfer_buffer_length) |
  3176. TRB_INTR_TARGET(0);
  3177. if (urb->transfer_buffer_length > 0) {
  3178. if (setup->bRequestType & USB_DIR_IN)
  3179. field |= TRB_DIR_IN;
  3180. queue_trb(xhci, ep_ring, true,
  3181. lower_32_bits(urb->transfer_dma),
  3182. upper_32_bits(urb->transfer_dma),
  3183. length_field,
  3184. field | ep_ring->cycle_state);
  3185. }
  3186. /* Save the DMA address of the last TRB in the TD */
  3187. td->last_trb = ep_ring->enqueue;
  3188. /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
  3189. /* If the device sent data, the status stage is an OUT transfer */
  3190. if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
  3191. field = 0;
  3192. else
  3193. field = TRB_DIR_IN;
  3194. queue_trb(xhci, ep_ring, false,
  3195. 0,
  3196. 0,
  3197. TRB_INTR_TARGET(0),
  3198. /* Event on completion */
  3199. field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
  3200. giveback_first_trb(xhci, slot_id, ep_index, 0,
  3201. start_cycle, start_trb);
  3202. return 0;
  3203. }
  3204. static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
  3205. struct urb *urb, int i)
  3206. {
  3207. int num_trbs = 0;
  3208. u64 addr, td_len;
  3209. addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
  3210. td_len = urb->iso_frame_desc[i].length;
  3211. num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
  3212. TRB_MAX_BUFF_SIZE);
  3213. if (num_trbs == 0)
  3214. num_trbs++;
  3215. return num_trbs;
  3216. }
  3217. /*
  3218. * The transfer burst count field of the isochronous TRB defines the number of
  3219. * bursts that are required to move all packets in this TD. Only SuperSpeed
  3220. * devices can burst up to bMaxBurst number of packets per service interval.
  3221. * This field is zero based, meaning a value of zero in the field means one
  3222. * burst. Basically, for everything but SuperSpeed devices, this field will be
  3223. * zero. Only xHCI 1.0 host controllers support this field.
  3224. */
  3225. static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
  3226. struct usb_device *udev,
  3227. struct urb *urb, unsigned int total_packet_count)
  3228. {
  3229. unsigned int max_burst;
  3230. if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
  3231. return 0;
  3232. max_burst = urb->ep->ss_ep_comp.bMaxBurst;
  3233. return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
  3234. }
  3235. /*
  3236. * Returns the number of packets in the last "burst" of packets. This field is
  3237. * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
  3238. * the last burst packet count is equal to the total number of packets in the
  3239. * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
  3240. * must contain (bMaxBurst + 1) number of packets, but the last burst can
  3241. * contain 1 to (bMaxBurst + 1) packets.
  3242. */
  3243. static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
  3244. struct usb_device *udev,
  3245. struct urb *urb, unsigned int total_packet_count)
  3246. {
  3247. unsigned int max_burst;
  3248. unsigned int residue;
  3249. if (xhci->hci_version < 0x100)
  3250. return 0;
  3251. switch (udev->speed) {
  3252. case USB_SPEED_SUPER:
  3253. /* bMaxBurst is zero based: 0 means 1 packet per burst */
  3254. max_burst = urb->ep->ss_ep_comp.bMaxBurst;
  3255. residue = total_packet_count % (max_burst + 1);
  3256. /* If residue is zero, the last burst contains (max_burst + 1)
  3257. * number of packets, but the TLBPC field is zero-based.
  3258. */
  3259. if (residue == 0)
  3260. return max_burst;
  3261. return residue - 1;
  3262. default:
  3263. if (total_packet_count == 0)
  3264. return 0;
  3265. return total_packet_count - 1;
  3266. }
  3267. }
  3268. /*
  3269. * Calculates Frame ID field of the isochronous TRB identifies the
  3270. * target frame that the Interval associated with this Isochronous
  3271. * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
  3272. *
  3273. * Returns actual frame id on success, negative value on error.
  3274. */
  3275. static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
  3276. struct urb *urb, int index)
  3277. {
  3278. int start_frame, ist, ret = 0;
  3279. int start_frame_id, end_frame_id, current_frame_id;
  3280. if (urb->dev->speed == USB_SPEED_LOW ||
  3281. urb->dev->speed == USB_SPEED_FULL)
  3282. start_frame = urb->start_frame + index * urb->interval;
  3283. else
  3284. start_frame = (urb->start_frame + index * urb->interval) >> 3;
  3285. /* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
  3286. *
  3287. * If bit [3] of IST is cleared to '0', software can add a TRB no
  3288. * later than IST[2:0] Microframes before that TRB is scheduled to
  3289. * be executed.
  3290. * If bit [3] of IST is set to '1', software can add a TRB no later
  3291. * than IST[2:0] Frames before that TRB is scheduled to be executed.
  3292. */
  3293. ist = HCS_IST(xhci->hcs_params2) & 0x7;
  3294. if (HCS_IST(xhci->hcs_params2) & (1 << 3))
  3295. ist <<= 3;
  3296. /* Software shall not schedule an Isoch TD with a Frame ID value that
  3297. * is less than the Start Frame ID or greater than the End Frame ID,
  3298. * where:
  3299. *
  3300. * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
  3301. * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
  3302. *
  3303. * Both the End Frame ID and Start Frame ID values are calculated
  3304. * in microframes. When software determines the valid Frame ID value;
  3305. * The End Frame ID value should be rounded down to the nearest Frame
  3306. * boundary, and the Start Frame ID value should be rounded up to the
  3307. * nearest Frame boundary.
  3308. */
  3309. current_frame_id = readl(&xhci->run_regs->microframe_index);
  3310. start_frame_id = roundup(current_frame_id + ist + 1, 8);
  3311. end_frame_id = rounddown(current_frame_id + 895 * 8, 8);
  3312. start_frame &= 0x7ff;
  3313. start_frame_id = (start_frame_id >> 3) & 0x7ff;
  3314. end_frame_id = (end_frame_id >> 3) & 0x7ff;
  3315. xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
  3316. __func__, index, readl(&xhci->run_regs->microframe_index),
  3317. start_frame_id, end_frame_id, start_frame);
  3318. if (start_frame_id < end_frame_id) {
  3319. if (start_frame > end_frame_id ||
  3320. start_frame < start_frame_id)
  3321. ret = -EINVAL;
  3322. } else if (start_frame_id > end_frame_id) {
  3323. if ((start_frame > end_frame_id &&
  3324. start_frame < start_frame_id))
  3325. ret = -EINVAL;
  3326. } else {
  3327. ret = -EINVAL;
  3328. }
  3329. if (index == 0) {
  3330. if (ret == -EINVAL || start_frame == start_frame_id) {
  3331. start_frame = start_frame_id + 1;
  3332. if (urb->dev->speed == USB_SPEED_LOW ||
  3333. urb->dev->speed == USB_SPEED_FULL)
  3334. urb->start_frame = start_frame;
  3335. else
  3336. urb->start_frame = start_frame << 3;
  3337. ret = 0;
  3338. }
  3339. }
  3340. if (ret) {
  3341. xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
  3342. start_frame, current_frame_id, index,
  3343. start_frame_id, end_frame_id);
  3344. xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
  3345. return ret;
  3346. }
  3347. return start_frame;
  3348. }
  3349. /* This is for isoc transfer */
  3350. static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  3351. struct urb *urb, int slot_id, unsigned int ep_index)
  3352. {
  3353. struct xhci_ring *ep_ring;
  3354. struct urb_priv *urb_priv;
  3355. struct xhci_td *td;
  3356. int num_tds, trbs_per_td;
  3357. struct xhci_generic_trb *start_trb;
  3358. bool first_trb;
  3359. int start_cycle;
  3360. u32 field, length_field;
  3361. int running_total, trb_buff_len, td_len, td_remain_len, ret;
  3362. u64 start_addr, addr;
  3363. int i, j;
  3364. bool more_trbs_coming;
  3365. struct xhci_virt_ep *xep;
  3366. xep = &xhci->devs[slot_id]->eps[ep_index];
  3367. ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
  3368. num_tds = urb->number_of_packets;
  3369. if (num_tds < 1) {
  3370. xhci_dbg(xhci, "Isoc URB with zero packets?\n");
  3371. return -EINVAL;
  3372. }
  3373. start_addr = (u64) urb->transfer_dma;
  3374. start_trb = &ep_ring->enqueue->generic;
  3375. start_cycle = ep_ring->cycle_state;
  3376. urb_priv = urb->hcpriv;
  3377. /* Queue the first TRB, even if it's zero-length */
  3378. for (i = 0; i < num_tds; i++) {
  3379. unsigned int total_packet_count;
  3380. unsigned int burst_count;
  3381. unsigned int residue;
  3382. first_trb = true;
  3383. running_total = 0;
  3384. addr = start_addr + urb->iso_frame_desc[i].offset;
  3385. td_len = urb->iso_frame_desc[i].length;
  3386. td_remain_len = td_len;
  3387. total_packet_count = DIV_ROUND_UP(td_len,
  3388. GET_MAX_PACKET(
  3389. usb_endpoint_maxp(&urb->ep->desc)));
  3390. /* A zero-length transfer still involves at least one packet. */
  3391. if (total_packet_count == 0)
  3392. total_packet_count++;
  3393. burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
  3394. total_packet_count);
  3395. residue = xhci_get_last_burst_packet_count(xhci,
  3396. urb->dev, urb, total_packet_count);
  3397. trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
  3398. ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
  3399. urb->stream_id, trbs_per_td, urb, i, mem_flags);
  3400. if (ret < 0) {
  3401. if (i == 0)
  3402. return ret;
  3403. goto cleanup;
  3404. }
  3405. td = urb_priv->td[i];
  3406. for (j = 0; j < trbs_per_td; j++) {
  3407. int frame_id = 0;
  3408. u32 remainder = 0;
  3409. field = 0;
  3410. if (first_trb) {
  3411. field = TRB_TBC(burst_count) |
  3412. TRB_TLBPC(residue);
  3413. /* Queue the isoc TRB */
  3414. field |= TRB_TYPE(TRB_ISOC);
  3415. /* Calculate Frame ID and SIA fields */
  3416. if (!(urb->transfer_flags & URB_ISO_ASAP) &&
  3417. HCC_CFC(xhci->hcc_params)) {
  3418. frame_id = xhci_get_isoc_frame_id(xhci,
  3419. urb,
  3420. i);
  3421. if (frame_id >= 0)
  3422. field |= TRB_FRAME_ID(frame_id);
  3423. else
  3424. field |= TRB_SIA;
  3425. } else
  3426. field |= TRB_SIA;
  3427. if (i == 0) {
  3428. if (start_cycle == 0)
  3429. field |= 0x1;
  3430. } else
  3431. field |= ep_ring->cycle_state;
  3432. first_trb = false;
  3433. } else {
  3434. /* Queue other normal TRBs */
  3435. field |= TRB_TYPE(TRB_NORMAL);
  3436. field |= ep_ring->cycle_state;
  3437. }
  3438. /* Only set interrupt on short packet for IN EPs */
  3439. if (usb_urb_dir_in(urb))
  3440. field |= TRB_ISP;
  3441. /* Chain all the TRBs together; clear the chain bit in
  3442. * the last TRB to indicate it's the last TRB in the
  3443. * chain.
  3444. */
  3445. if (j < trbs_per_td - 1) {
  3446. field |= TRB_CHAIN;
  3447. more_trbs_coming = true;
  3448. } else {
  3449. td->last_trb = ep_ring->enqueue;
  3450. field |= TRB_IOC;
  3451. if (xhci->hci_version == 0x100 &&
  3452. !(xhci->quirks &
  3453. XHCI_AVOID_BEI)) {
  3454. /* Set BEI bit except for the last td */
  3455. if (i < num_tds - 1)
  3456. field |= TRB_BEI;
  3457. }
  3458. more_trbs_coming = false;
  3459. }
  3460. /* Calculate TRB length */
  3461. trb_buff_len = TRB_MAX_BUFF_SIZE -
  3462. (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
  3463. if (trb_buff_len > td_remain_len)
  3464. trb_buff_len = td_remain_len;
  3465. /* Set the TRB length, TD size, & interrupter fields. */
  3466. if (xhci->hci_version < 0x100) {
  3467. remainder = xhci_td_remainder(
  3468. td_len - running_total);
  3469. } else {
  3470. remainder = xhci_v1_0_td_remainder(
  3471. running_total, trb_buff_len,
  3472. total_packet_count, urb,
  3473. (trbs_per_td - j - 1));
  3474. }
  3475. length_field = TRB_LEN(trb_buff_len) |
  3476. remainder |
  3477. TRB_INTR_TARGET(0);
  3478. queue_trb(xhci, ep_ring, more_trbs_coming,
  3479. lower_32_bits(addr),
  3480. upper_32_bits(addr),
  3481. length_field,
  3482. field);
  3483. running_total += trb_buff_len;
  3484. addr += trb_buff_len;
  3485. td_remain_len -= trb_buff_len;
  3486. }
  3487. /* Check TD length */
  3488. if (running_total != td_len) {
  3489. xhci_err(xhci, "ISOC TD length unmatch\n");
  3490. ret = -EINVAL;
  3491. goto cleanup;
  3492. }
  3493. }
  3494. /* store the next frame id */
  3495. if (HCC_CFC(xhci->hcc_params))
  3496. xep->next_frame_id = urb->start_frame + num_tds * urb->interval;
  3497. if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
  3498. if (xhci->quirks & XHCI_AMD_PLL_FIX)
  3499. usb_amd_quirk_pll_disable();
  3500. }
  3501. xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
  3502. giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
  3503. start_cycle, start_trb);
  3504. return 0;
  3505. cleanup:
  3506. /* Clean up a partially enqueued isoc transfer. */
  3507. for (i--; i >= 0; i--)
  3508. list_del_init(&urb_priv->td[i]->td_list);
  3509. /* Use the first TD as a temporary variable to turn the TDs we've queued
  3510. * into No-ops with a software-owned cycle bit. That way the hardware
  3511. * won't accidentally start executing bogus TDs when we partially
  3512. * overwrite them. td->first_trb and td->start_seg are already set.
  3513. */
  3514. urb_priv->td[0]->last_trb = ep_ring->enqueue;
  3515. /* Every TRB except the first & last will have its cycle bit flipped. */
  3516. td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
  3517. /* Reset the ring enqueue back to the first TRB and its cycle bit. */
  3518. ep_ring->enqueue = urb_priv->td[0]->first_trb;
  3519. ep_ring->enq_seg = urb_priv->td[0]->start_seg;
  3520. ep_ring->cycle_state = start_cycle;
  3521. ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
  3522. usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
  3523. return ret;
  3524. }
  3525. static int ep_ring_is_processing(struct xhci_hcd *xhci,
  3526. int slot_id, unsigned int ep_index)
  3527. {
  3528. struct xhci_virt_device *xdev;
  3529. struct xhci_ring *ep_ring;
  3530. struct xhci_ep_ctx *ep_ctx;
  3531. struct xhci_virt_ep *xep;
  3532. dma_addr_t hw_deq;
  3533. xdev = xhci->devs[slot_id];
  3534. xep = &xhci->devs[slot_id]->eps[ep_index];
  3535. ep_ring = xep->ring;
  3536. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  3537. if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) != EP_STATE_RUNNING)
  3538. return 0;
  3539. hw_deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
  3540. return (hw_deq !=
  3541. xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue));
  3542. }
  3543. /*
  3544. * Check transfer ring to guarantee there is enough room for the urb.
  3545. * Update ISO URB start_frame and interval.
  3546. * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
  3547. * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
  3548. * Contiguous Frame ID is not supported by HC.
  3549. */
  3550. int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
  3551. struct urb *urb, int slot_id, unsigned int ep_index)
  3552. {
  3553. struct xhci_virt_device *xdev;
  3554. struct xhci_ring *ep_ring;
  3555. struct xhci_ep_ctx *ep_ctx;
  3556. int start_frame;
  3557. int xhci_interval;
  3558. int ep_interval;
  3559. int num_tds, num_trbs, i;
  3560. int ret;
  3561. struct xhci_virt_ep *xep;
  3562. int ist;
  3563. xdev = xhci->devs[slot_id];
  3564. xep = &xhci->devs[slot_id]->eps[ep_index];
  3565. ep_ring = xdev->eps[ep_index].ring;
  3566. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  3567. num_trbs = 0;
  3568. num_tds = urb->number_of_packets;
  3569. for (i = 0; i < num_tds; i++)
  3570. num_trbs += count_isoc_trbs_needed(xhci, urb, i);
  3571. /* Check the ring to guarantee there is enough room for the whole urb.
  3572. * Do not insert any td of the urb to the ring if the check failed.
  3573. */
  3574. ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
  3575. num_trbs, mem_flags);
  3576. if (ret)
  3577. return ret;
  3578. /*
  3579. * Check interval value. This should be done before we start to
  3580. * calculate the start frame value.
  3581. */
  3582. xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
  3583. ep_interval = urb->interval;
  3584. /* Convert to microframes */
  3585. if (urb->dev->speed == USB_SPEED_LOW ||
  3586. urb->dev->speed == USB_SPEED_FULL)
  3587. ep_interval *= 8;
  3588. /* FIXME change this to a warning and a suggestion to use the new API
  3589. * to set the polling interval (once the API is added).
  3590. */
  3591. if (xhci_interval != ep_interval) {
  3592. dev_dbg_ratelimited(&urb->dev->dev,
  3593. "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
  3594. ep_interval, ep_interval == 1 ? "" : "s",
  3595. xhci_interval, xhci_interval == 1 ? "" : "s");
  3596. urb->interval = xhci_interval;
  3597. /* Convert back to frames for LS/FS devices */
  3598. if (urb->dev->speed == USB_SPEED_LOW ||
  3599. urb->dev->speed == USB_SPEED_FULL)
  3600. urb->interval /= 8;
  3601. }
  3602. /* Calculate the start frame and put it in urb->start_frame. */
  3603. if (HCC_CFC(xhci->hcc_params) &&
  3604. ep_ring_is_processing(xhci, slot_id, ep_index)) {
  3605. urb->start_frame = xep->next_frame_id;
  3606. goto skip_start_over;
  3607. }
  3608. start_frame = readl(&xhci->run_regs->microframe_index);
  3609. start_frame &= 0x3fff;
  3610. /*
  3611. * Round up to the next frame and consider the time before trb really
  3612. * gets scheduled by hardare.
  3613. */
  3614. ist = HCS_IST(xhci->hcs_params2) & 0x7;
  3615. if (HCS_IST(xhci->hcs_params2) & (1 << 3))
  3616. ist <<= 3;
  3617. start_frame += ist + XHCI_CFC_DELAY;
  3618. start_frame = roundup(start_frame, 8);
  3619. /*
  3620. * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT
  3621. * is greate than 8 microframes.
  3622. */
  3623. if (urb->dev->speed == USB_SPEED_LOW ||
  3624. urb->dev->speed == USB_SPEED_FULL) {
  3625. start_frame = roundup(start_frame, urb->interval << 3);
  3626. urb->start_frame = start_frame >> 3;
  3627. } else {
  3628. start_frame = roundup(start_frame, urb->interval);
  3629. urb->start_frame = start_frame;
  3630. }
  3631. skip_start_over:
  3632. ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
  3633. return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
  3634. }
  3635. /**** Command Ring Operations ****/
  3636. /* Generic function for queueing a command TRB on the command ring.
  3637. * Check to make sure there's room on the command ring for one command TRB.
  3638. * Also check that there's room reserved for commands that must not fail.
  3639. * If this is a command that must not fail, meaning command_must_succeed = TRUE,
  3640. * then only check for the number of reserved spots.
  3641. * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
  3642. * because the command event handler may want to resubmit a failed command.
  3643. */
  3644. static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
  3645. u32 field1, u32 field2,
  3646. u32 field3, u32 field4, bool command_must_succeed)
  3647. {
  3648. int reserved_trbs = xhci->cmd_ring_reserved_trbs;
  3649. int ret;
  3650. if (xhci->xhc_state) {
  3651. xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
  3652. return -ESHUTDOWN;
  3653. }
  3654. if (!command_must_succeed)
  3655. reserved_trbs++;
  3656. ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
  3657. reserved_trbs, GFP_ATOMIC);
  3658. if (ret < 0) {
  3659. xhci_err(xhci, "ERR: No room for command on command ring\n");
  3660. if (command_must_succeed)
  3661. xhci_err(xhci, "ERR: Reserved TRB counting for "
  3662. "unfailable commands failed.\n");
  3663. return ret;
  3664. }
  3665. cmd->command_trb = xhci->cmd_ring->enqueue;
  3666. list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
  3667. /* if there are no other commands queued we start the timeout timer */
  3668. if (xhci->cmd_list.next == &cmd->cmd_list &&
  3669. !timer_pending(&xhci->cmd_timer)) {
  3670. xhci->current_cmd = cmd;
  3671. mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
  3672. }
  3673. queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
  3674. field4 | xhci->cmd_ring->cycle_state);
  3675. return 0;
  3676. }
  3677. /* Queue a slot enable or disable request on the command ring */
  3678. int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
  3679. u32 trb_type, u32 slot_id)
  3680. {
  3681. return queue_command(xhci, cmd, 0, 0, 0,
  3682. TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
  3683. }
  3684. /* Queue an address device command TRB */
  3685. int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
  3686. dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
  3687. {
  3688. return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
  3689. upper_32_bits(in_ctx_ptr), 0,
  3690. TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
  3691. | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
  3692. }
  3693. int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
  3694. u32 field1, u32 field2, u32 field3, u32 field4)
  3695. {
  3696. return queue_command(xhci, cmd, field1, field2, field3, field4, false);
  3697. }
  3698. /* Queue a reset device command TRB */
  3699. int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
  3700. u32 slot_id)
  3701. {
  3702. return queue_command(xhci, cmd, 0, 0, 0,
  3703. TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
  3704. false);
  3705. }
  3706. /* Queue a configure endpoint command TRB */
  3707. int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
  3708. struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
  3709. u32 slot_id, bool command_must_succeed)
  3710. {
  3711. return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
  3712. upper_32_bits(in_ctx_ptr), 0,
  3713. TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
  3714. command_must_succeed);
  3715. }
  3716. /* Queue an evaluate context command TRB */
  3717. int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
  3718. dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
  3719. {
  3720. return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
  3721. upper_32_bits(in_ctx_ptr), 0,
  3722. TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
  3723. command_must_succeed);
  3724. }
  3725. /*
  3726. * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
  3727. * activity on an endpoint that is about to be suspended.
  3728. */
  3729. int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
  3730. int slot_id, unsigned int ep_index, int suspend)
  3731. {
  3732. u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
  3733. u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
  3734. u32 type = TRB_TYPE(TRB_STOP_RING);
  3735. u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
  3736. return queue_command(xhci, cmd, 0, 0, 0,
  3737. trb_slot_id | trb_ep_index | type | trb_suspend, false);
  3738. }
  3739. /* Set Transfer Ring Dequeue Pointer command */
  3740. void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
  3741. unsigned int slot_id, unsigned int ep_index,
  3742. unsigned int stream_id,
  3743. struct xhci_dequeue_state *deq_state)
  3744. {
  3745. dma_addr_t addr;
  3746. u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
  3747. u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
  3748. u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
  3749. u32 trb_sct = 0;
  3750. u32 type = TRB_TYPE(TRB_SET_DEQ);
  3751. struct xhci_virt_ep *ep;
  3752. struct xhci_command *cmd;
  3753. int ret;
  3754. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  3755. "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u",
  3756. deq_state->new_deq_seg,
  3757. (unsigned long long)deq_state->new_deq_seg->dma,
  3758. deq_state->new_deq_ptr,
  3759. (unsigned long long)xhci_trb_virt_to_dma(
  3760. deq_state->new_deq_seg, deq_state->new_deq_ptr),
  3761. deq_state->new_cycle_state);
  3762. addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
  3763. deq_state->new_deq_ptr);
  3764. if (addr == 0) {
  3765. xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
  3766. xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
  3767. deq_state->new_deq_seg, deq_state->new_deq_ptr);
  3768. return;
  3769. }
  3770. ep = &xhci->devs[slot_id]->eps[ep_index];
  3771. if ((ep->ep_state & SET_DEQ_PENDING)) {
  3772. xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
  3773. xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
  3774. return;
  3775. }
  3776. /* This function gets called from contexts where it cannot sleep */
  3777. cmd = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
  3778. if (!cmd) {
  3779. xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr: ENOMEM\n");
  3780. return;
  3781. }
  3782. ep->queued_deq_seg = deq_state->new_deq_seg;
  3783. ep->queued_deq_ptr = deq_state->new_deq_ptr;
  3784. if (stream_id)
  3785. trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
  3786. ret = queue_command(xhci, cmd,
  3787. lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state,
  3788. upper_32_bits(addr), trb_stream_id,
  3789. trb_slot_id | trb_ep_index | type, false);
  3790. if (ret < 0) {
  3791. xhci_free_command(xhci, cmd);
  3792. return;
  3793. }
  3794. /* Stop the TD queueing code from ringing the doorbell until
  3795. * this command completes. The HC won't set the dequeue pointer
  3796. * if the ring is running, and ringing the doorbell starts the
  3797. * ring running.
  3798. */
  3799. ep->ep_state |= SET_DEQ_PENDING;
  3800. }
  3801. int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
  3802. int slot_id, unsigned int ep_index)
  3803. {
  3804. u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
  3805. u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
  3806. u32 type = TRB_TYPE(TRB_RESET_EP);
  3807. return queue_command(xhci, cmd, 0, 0, 0,
  3808. trb_slot_id | trb_ep_index | type, false);
  3809. }