xhci-ring.c 122 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983
  1. /*
  2. * xHCI host controller driver
  3. *
  4. * Copyright (C) 2008 Intel Corp.
  5. *
  6. * Author: Sarah Sharp
  7. * Some code borrowed from the Linux EHCI driver.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  15. * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  16. * for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software Foundation,
  20. * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. /*
  23. * Ring initialization rules:
  24. * 1. Each segment is initialized to zero, except for link TRBs.
  25. * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
  26. * Consumer Cycle State (CCS), depending on ring function.
  27. * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
  28. *
  29. * Ring behavior rules:
  30. * 1. A ring is empty if enqueue == dequeue. This means there will always be at
  31. * least one free TRB in the ring. This is useful if you want to turn that
  32. * into a link TRB and expand the ring.
  33. * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
  34. * link TRB, then load the pointer with the address in the link TRB. If the
  35. * link TRB had its toggle bit set, you may need to update the ring cycle
  36. * state (see cycle bit rules). You may have to do this multiple times
  37. * until you reach a non-link TRB.
  38. * 3. A ring is full if enqueue++ (for the definition of increment above)
  39. * equals the dequeue pointer.
  40. *
  41. * Cycle bit rules:
  42. * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
  43. * in a link TRB, it must toggle the ring cycle state.
  44. * 2. When a producer increments an enqueue pointer and encounters a toggle bit
  45. * in a link TRB, it must toggle the ring cycle state.
  46. *
  47. * Producer rules:
  48. * 1. Check if ring is full before you enqueue.
  49. * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
  50. * Update enqueue pointer between each write (which may update the ring
  51. * cycle state).
  52. * 3. Notify consumer. If SW is producer, it rings the doorbell for command
  53. * and endpoint rings. If HC is the producer for the event ring,
  54. * and it generates an interrupt according to interrupt modulation rules.
  55. *
  56. * Consumer rules:
  57. * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
  58. * the TRB is owned by the consumer.
  59. * 2. Update dequeue pointer (which may update the ring cycle state) and
  60. * continue processing TRBs until you reach a TRB which is not owned by you.
  61. * 3. Notify the producer. SW is the consumer for the event ring, and it
  62. * updates event ring dequeue pointer. HC is the consumer for the command and
  63. * endpoint rings; it generates events on the event ring for these.
  64. */
  65. #include <linux/scatterlist.h>
  66. #include <linux/slab.h>
  67. #include "xhci.h"
  68. #include "xhci-trace.h"
  69. /*
  70. * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
  71. * address of the TRB.
  72. */
  73. dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
  74. union xhci_trb *trb)
  75. {
  76. unsigned long segment_offset;
  77. if (!seg || !trb || trb < seg->trbs)
  78. return 0;
  79. /* offset in TRBs */
  80. segment_offset = trb - seg->trbs;
  81. if (segment_offset > TRBS_PER_SEGMENT)
  82. return 0;
  83. return seg->dma + (segment_offset * sizeof(*trb));
  84. }
  85. /* Does this link TRB point to the first segment in a ring,
  86. * or was the previous TRB the last TRB on the last segment in the ERST?
  87. */
  88. static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
  89. struct xhci_segment *seg, union xhci_trb *trb)
  90. {
  91. if (ring == xhci->event_ring)
  92. return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
  93. (seg->next == xhci->event_ring->first_seg);
  94. else
  95. return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
  96. }
  97. /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
  98. * segment? I.e. would the updated event TRB pointer step off the end of the
  99. * event seg?
  100. */
  101. static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
  102. struct xhci_segment *seg, union xhci_trb *trb)
  103. {
  104. if (ring == xhci->event_ring)
  105. return trb == &seg->trbs[TRBS_PER_SEGMENT];
  106. else
  107. return TRB_TYPE_LINK_LE32(trb->link.control);
  108. }
  109. static int enqueue_is_link_trb(struct xhci_ring *ring)
  110. {
  111. struct xhci_link_trb *link = &ring->enqueue->link;
  112. return TRB_TYPE_LINK_LE32(link->control);
  113. }
  114. /* Updates trb to point to the next TRB in the ring, and updates seg if the next
  115. * TRB is in a new segment. This does not skip over link TRBs, and it does not
  116. * effect the ring dequeue or enqueue pointers.
  117. */
  118. static void next_trb(struct xhci_hcd *xhci,
  119. struct xhci_ring *ring,
  120. struct xhci_segment **seg,
  121. union xhci_trb **trb)
  122. {
  123. if (last_trb(xhci, ring, *seg, *trb)) {
  124. *seg = (*seg)->next;
  125. *trb = ((*seg)->trbs);
  126. } else {
  127. (*trb)++;
  128. }
  129. }
  130. /*
  131. * See Cycle bit rules. SW is the consumer for the event ring only.
  132. * Don't make a ring full of link TRBs. That would be dumb and this would loop.
  133. */
  134. static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
  135. {
  136. ring->deq_updates++;
  137. /*
  138. * If this is not event ring, and the dequeue pointer
  139. * is not on a link TRB, there is one more usable TRB
  140. */
  141. if (ring->type != TYPE_EVENT &&
  142. !last_trb(xhci, ring, ring->deq_seg, ring->dequeue))
  143. ring->num_trbs_free++;
  144. do {
  145. /*
  146. * Update the dequeue pointer further if that was a link TRB or
  147. * we're at the end of an event ring segment (which doesn't have
  148. * link TRBS)
  149. */
  150. if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) {
  151. if (ring->type == TYPE_EVENT &&
  152. last_trb_on_last_seg(xhci, ring,
  153. ring->deq_seg, ring->dequeue)) {
  154. ring->cycle_state ^= 1;
  155. }
  156. ring->deq_seg = ring->deq_seg->next;
  157. ring->dequeue = ring->deq_seg->trbs;
  158. } else {
  159. ring->dequeue++;
  160. }
  161. } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
  162. }
  163. /*
  164. * See Cycle bit rules. SW is the consumer for the event ring only.
  165. * Don't make a ring full of link TRBs. That would be dumb and this would loop.
  166. *
  167. * If we've just enqueued a TRB that is in the middle of a TD (meaning the
  168. * chain bit is set), then set the chain bit in all the following link TRBs.
  169. * If we've enqueued the last TRB in a TD, make sure the following link TRBs
  170. * have their chain bit cleared (so that each Link TRB is a separate TD).
  171. *
  172. * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
  173. * set, but other sections talk about dealing with the chain bit set. This was
  174. * fixed in the 0.96 specification errata, but we have to assume that all 0.95
  175. * xHCI hardware can't handle the chain bit being cleared on a link TRB.
  176. *
  177. * @more_trbs_coming: Will you enqueue more TRBs before calling
  178. * prepare_transfer()?
  179. */
  180. static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
  181. bool more_trbs_coming)
  182. {
  183. u32 chain;
  184. union xhci_trb *next;
  185. chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
  186. /* If this is not event ring, there is one less usable TRB */
  187. if (ring->type != TYPE_EVENT &&
  188. !last_trb(xhci, ring, ring->enq_seg, ring->enqueue))
  189. ring->num_trbs_free--;
  190. next = ++(ring->enqueue);
  191. ring->enq_updates++;
  192. /* Update the dequeue pointer further if that was a link TRB or we're at
  193. * the end of an event ring segment (which doesn't have link TRBS)
  194. */
  195. while (last_trb(xhci, ring, ring->enq_seg, next)) {
  196. if (ring->type != TYPE_EVENT) {
  197. /*
  198. * If the caller doesn't plan on enqueueing more
  199. * TDs before ringing the doorbell, then we
  200. * don't want to give the link TRB to the
  201. * hardware just yet. We'll give the link TRB
  202. * back in prepare_ring() just before we enqueue
  203. * the TD at the top of the ring.
  204. */
  205. if (!chain && !more_trbs_coming)
  206. break;
  207. /* If we're not dealing with 0.95 hardware or
  208. * isoc rings on AMD 0.96 host,
  209. * carry over the chain bit of the previous TRB
  210. * (which may mean the chain bit is cleared).
  211. */
  212. if (!(ring->type == TYPE_ISOC &&
  213. (xhci->quirks & XHCI_AMD_0x96_HOST))
  214. && !xhci_link_trb_quirk(xhci)) {
  215. next->link.control &=
  216. cpu_to_le32(~TRB_CHAIN);
  217. next->link.control |=
  218. cpu_to_le32(chain);
  219. }
  220. /* Give this link TRB to the hardware */
  221. wmb();
  222. next->link.control ^= cpu_to_le32(TRB_CYCLE);
  223. /* Toggle the cycle bit after the last ring segment. */
  224. if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
  225. ring->cycle_state = (ring->cycle_state ? 0 : 1);
  226. }
  227. }
  228. ring->enq_seg = ring->enq_seg->next;
  229. ring->enqueue = ring->enq_seg->trbs;
  230. next = ring->enqueue;
  231. }
  232. }
  233. /*
  234. * Check to see if there's room to enqueue num_trbs on the ring and make sure
  235. * enqueue pointer will not advance into dequeue segment. See rules above.
  236. */
  237. static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
  238. unsigned int num_trbs)
  239. {
  240. int num_trbs_in_deq_seg;
  241. if (ring->num_trbs_free < num_trbs)
  242. return 0;
  243. if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
  244. num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
  245. if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
  246. return 0;
  247. }
  248. return 1;
  249. }
  250. /* Ring the host controller doorbell after placing a command on the ring */
  251. void xhci_ring_cmd_db(struct xhci_hcd *xhci)
  252. {
  253. if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
  254. return;
  255. xhci_dbg(xhci, "// Ding dong!\n");
  256. writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
  257. /* Flush PCI posted writes */
  258. readl(&xhci->dba->doorbell[0]);
  259. }
  260. static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
  261. {
  262. u64 temp_64;
  263. int ret;
  264. xhci_dbg(xhci, "Abort command ring\n");
  265. temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
  266. xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
  267. xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
  268. &xhci->op_regs->cmd_ring);
  269. /* Section 4.6.1.2 of xHCI 1.0 spec says software should
  270. * time the completion od all xHCI commands, including
  271. * the Command Abort operation. If software doesn't see
  272. * CRR negated in a timely manner (e.g. longer than 5
  273. * seconds), then it should assume that the there are
  274. * larger problems with the xHC and assert HCRST.
  275. */
  276. ret = xhci_handshake(xhci, &xhci->op_regs->cmd_ring,
  277. CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
  278. if (ret < 0) {
  279. xhci_err(xhci, "Stopped the command ring failed, "
  280. "maybe the host is dead\n");
  281. xhci->xhc_state |= XHCI_STATE_DYING;
  282. xhci_quiesce(xhci);
  283. xhci_halt(xhci);
  284. return -ESHUTDOWN;
  285. }
  286. return 0;
  287. }
  288. void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
  289. unsigned int slot_id,
  290. unsigned int ep_index,
  291. unsigned int stream_id)
  292. {
  293. __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
  294. struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
  295. unsigned int ep_state = ep->ep_state;
  296. /* Don't ring the doorbell for this endpoint if there are pending
  297. * cancellations because we don't want to interrupt processing.
  298. * We don't want to restart any stream rings if there's a set dequeue
  299. * pointer command pending because the device can choose to start any
  300. * stream once the endpoint is on the HW schedule.
  301. */
  302. if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
  303. (ep_state & EP_HALTED))
  304. return;
  305. writel(DB_VALUE(ep_index, stream_id), db_addr);
  306. /* The CPU has better things to do at this point than wait for a
  307. * write-posting flush. It'll get there soon enough.
  308. */
  309. }
  310. /* Ring the doorbell for any rings with pending URBs */
  311. static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
  312. unsigned int slot_id,
  313. unsigned int ep_index)
  314. {
  315. unsigned int stream_id;
  316. struct xhci_virt_ep *ep;
  317. ep = &xhci->devs[slot_id]->eps[ep_index];
  318. /* A ring has pending URBs if its TD list is not empty */
  319. if (!(ep->ep_state & EP_HAS_STREAMS)) {
  320. if (ep->ring && !(list_empty(&ep->ring->td_list)))
  321. xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
  322. return;
  323. }
  324. for (stream_id = 1; stream_id < ep->stream_info->num_streams;
  325. stream_id++) {
  326. struct xhci_stream_info *stream_info = ep->stream_info;
  327. if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
  328. xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
  329. stream_id);
  330. }
  331. }
  332. static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
  333. unsigned int slot_id, unsigned int ep_index,
  334. unsigned int stream_id)
  335. {
  336. struct xhci_virt_ep *ep;
  337. ep = &xhci->devs[slot_id]->eps[ep_index];
  338. /* Common case: no streams */
  339. if (!(ep->ep_state & EP_HAS_STREAMS))
  340. return ep->ring;
  341. if (stream_id == 0) {
  342. xhci_warn(xhci,
  343. "WARN: Slot ID %u, ep index %u has streams, "
  344. "but URB has no stream ID.\n",
  345. slot_id, ep_index);
  346. return NULL;
  347. }
  348. if (stream_id < ep->stream_info->num_streams)
  349. return ep->stream_info->stream_rings[stream_id];
  350. xhci_warn(xhci,
  351. "WARN: Slot ID %u, ep index %u has "
  352. "stream IDs 1 to %u allocated, "
  353. "but stream ID %u is requested.\n",
  354. slot_id, ep_index,
  355. ep->stream_info->num_streams - 1,
  356. stream_id);
  357. return NULL;
  358. }
  359. /* Get the right ring for the given URB.
  360. * If the endpoint supports streams, boundary check the URB's stream ID.
  361. * If the endpoint doesn't support streams, return the singular endpoint ring.
  362. */
  363. static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
  364. struct urb *urb)
  365. {
  366. return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
  367. xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
  368. }
  369. /*
  370. * Move the xHC's endpoint ring dequeue pointer past cur_td.
  371. * Record the new state of the xHC's endpoint ring dequeue segment,
  372. * dequeue pointer, and new consumer cycle state in state.
  373. * Update our internal representation of the ring's dequeue pointer.
  374. *
  375. * We do this in three jumps:
  376. * - First we update our new ring state to be the same as when the xHC stopped.
  377. * - Then we traverse the ring to find the segment that contains
  378. * the last TRB in the TD. We toggle the xHC's new cycle state when we pass
  379. * any link TRBs with the toggle cycle bit set.
  380. * - Finally we move the dequeue state one TRB further, toggling the cycle bit
  381. * if we've moved it past a link TRB with the toggle cycle bit set.
  382. *
  383. * Some of the uses of xhci_generic_trb are grotty, but if they're done
  384. * with correct __le32 accesses they should work fine. Only users of this are
  385. * in here.
  386. */
  387. void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
  388. unsigned int slot_id, unsigned int ep_index,
  389. unsigned int stream_id, struct xhci_td *cur_td,
  390. struct xhci_dequeue_state *state)
  391. {
  392. struct xhci_virt_device *dev = xhci->devs[slot_id];
  393. struct xhci_virt_ep *ep = &dev->eps[ep_index];
  394. struct xhci_ring *ep_ring;
  395. struct xhci_segment *new_seg;
  396. union xhci_trb *new_deq;
  397. dma_addr_t addr;
  398. u64 hw_dequeue;
  399. bool cycle_found = false;
  400. bool td_last_trb_found = false;
  401. ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
  402. ep_index, stream_id);
  403. if (!ep_ring) {
  404. xhci_warn(xhci, "WARN can't find new dequeue state "
  405. "for invalid stream ID %u.\n",
  406. stream_id);
  407. return;
  408. }
  409. /* Dig out the cycle state saved by the xHC during the stop ep cmd */
  410. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  411. "Finding endpoint context");
  412. /* 4.6.9 the css flag is written to the stream context for streams */
  413. if (ep->ep_state & EP_HAS_STREAMS) {
  414. struct xhci_stream_ctx *ctx =
  415. &ep->stream_info->stream_ctx_array[stream_id];
  416. hw_dequeue = le64_to_cpu(ctx->stream_ring);
  417. } else {
  418. struct xhci_ep_ctx *ep_ctx
  419. = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
  420. hw_dequeue = le64_to_cpu(ep_ctx->deq);
  421. }
  422. new_seg = ep_ring->deq_seg;
  423. new_deq = ep_ring->dequeue;
  424. state->new_cycle_state = hw_dequeue & 0x1;
  425. /*
  426. * We want to find the pointer, segment and cycle state of the new trb
  427. * (the one after current TD's last_trb). We know the cycle state at
  428. * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
  429. * found.
  430. */
  431. do {
  432. if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
  433. == (dma_addr_t)(hw_dequeue & ~0xf)) {
  434. cycle_found = true;
  435. if (td_last_trb_found)
  436. break;
  437. }
  438. if (new_deq == cur_td->last_trb)
  439. td_last_trb_found = true;
  440. if (cycle_found &&
  441. TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) &&
  442. new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE))
  443. state->new_cycle_state ^= 0x1;
  444. next_trb(xhci, ep_ring, &new_seg, &new_deq);
  445. /* Search wrapped around, bail out */
  446. if (new_deq == ep->ring->dequeue) {
  447. xhci_err(xhci, "Error: Failed finding new dequeue state\n");
  448. state->new_deq_seg = NULL;
  449. state->new_deq_ptr = NULL;
  450. return;
  451. }
  452. } while (!cycle_found || !td_last_trb_found);
  453. state->new_deq_seg = new_seg;
  454. state->new_deq_ptr = new_deq;
  455. /* Don't update the ring cycle state for the producer (us). */
  456. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  457. "Cycle state = 0x%x", state->new_cycle_state);
  458. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  459. "New dequeue segment = %p (virtual)",
  460. state->new_deq_seg);
  461. addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
  462. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  463. "New dequeue pointer = 0x%llx (DMA)",
  464. (unsigned long long) addr);
  465. }
  466. /* flip_cycle means flip the cycle bit of all but the first and last TRB.
  467. * (The last TRB actually points to the ring enqueue pointer, which is not part
  468. * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
  469. */
  470. static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
  471. struct xhci_td *cur_td, bool flip_cycle)
  472. {
  473. struct xhci_segment *cur_seg;
  474. union xhci_trb *cur_trb;
  475. for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
  476. true;
  477. next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
  478. if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
  479. /* Unchain any chained Link TRBs, but
  480. * leave the pointers intact.
  481. */
  482. cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
  483. /* Flip the cycle bit (link TRBs can't be the first
  484. * or last TRB).
  485. */
  486. if (flip_cycle)
  487. cur_trb->generic.field[3] ^=
  488. cpu_to_le32(TRB_CYCLE);
  489. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  490. "Cancel (unchain) link TRB");
  491. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  492. "Address = %p (0x%llx dma); "
  493. "in seg %p (0x%llx dma)",
  494. cur_trb,
  495. (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
  496. cur_seg,
  497. (unsigned long long)cur_seg->dma);
  498. } else {
  499. cur_trb->generic.field[0] = 0;
  500. cur_trb->generic.field[1] = 0;
  501. cur_trb->generic.field[2] = 0;
  502. /* Preserve only the cycle bit of this TRB */
  503. cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
  504. /* Flip the cycle bit except on the first or last TRB */
  505. if (flip_cycle && cur_trb != cur_td->first_trb &&
  506. cur_trb != cur_td->last_trb)
  507. cur_trb->generic.field[3] ^=
  508. cpu_to_le32(TRB_CYCLE);
  509. cur_trb->generic.field[3] |= cpu_to_le32(
  510. TRB_TYPE(TRB_TR_NOOP));
  511. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  512. "TRB to noop at offset 0x%llx",
  513. (unsigned long long)
  514. xhci_trb_virt_to_dma(cur_seg, cur_trb));
  515. }
  516. if (cur_trb == cur_td->last_trb)
  517. break;
  518. }
  519. }
  520. static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
  521. struct xhci_virt_ep *ep)
  522. {
  523. ep->ep_state &= ~EP_HALT_PENDING;
  524. /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the
  525. * timer is running on another CPU, we don't decrement stop_cmds_pending
  526. * (since we didn't successfully stop the watchdog timer).
  527. */
  528. if (del_timer(&ep->stop_cmd_timer))
  529. ep->stop_cmds_pending--;
  530. }
  531. /* Must be called with xhci->lock held in interrupt context */
  532. static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
  533. struct xhci_td *cur_td, int status)
  534. {
  535. struct usb_hcd *hcd;
  536. struct urb *urb;
  537. struct urb_priv *urb_priv;
  538. urb = cur_td->urb;
  539. urb_priv = urb->hcpriv;
  540. urb_priv->td_cnt++;
  541. hcd = bus_to_hcd(urb->dev->bus);
  542. /* Only giveback urb when this is the last td in urb */
  543. if (urb_priv->td_cnt == urb_priv->length) {
  544. if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
  545. xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
  546. if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
  547. if (xhci->quirks & XHCI_AMD_PLL_FIX)
  548. usb_amd_quirk_pll_enable();
  549. }
  550. }
  551. usb_hcd_unlink_urb_from_ep(hcd, urb);
  552. spin_unlock(&xhci->lock);
  553. usb_hcd_giveback_urb(hcd, urb, status);
  554. xhci_urb_free_priv(xhci, urb_priv);
  555. spin_lock(&xhci->lock);
  556. }
  557. }
  558. /*
  559. * When we get a command completion for a Stop Endpoint Command, we need to
  560. * unlink any cancelled TDs from the ring. There are two ways to do that:
  561. *
  562. * 1. If the HW was in the middle of processing the TD that needs to be
  563. * cancelled, then we must move the ring's dequeue pointer past the last TRB
  564. * in the TD with a Set Dequeue Pointer Command.
  565. * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
  566. * bit cleared) so that the HW will skip over them.
  567. */
  568. static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
  569. union xhci_trb *trb, struct xhci_event_cmd *event)
  570. {
  571. unsigned int ep_index;
  572. struct xhci_ring *ep_ring;
  573. struct xhci_virt_ep *ep;
  574. struct list_head *entry;
  575. struct xhci_td *cur_td = NULL;
  576. struct xhci_td *last_unlinked_td;
  577. struct xhci_dequeue_state deq_state;
  578. if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
  579. if (!xhci->devs[slot_id])
  580. xhci_warn(xhci, "Stop endpoint command "
  581. "completion for disabled slot %u\n",
  582. slot_id);
  583. return;
  584. }
  585. memset(&deq_state, 0, sizeof(deq_state));
  586. ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
  587. ep = &xhci->devs[slot_id]->eps[ep_index];
  588. if (list_empty(&ep->cancelled_td_list)) {
  589. xhci_stop_watchdog_timer_in_irq(xhci, ep);
  590. ep->stopped_td = NULL;
  591. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  592. return;
  593. }
  594. /* Fix up the ep ring first, so HW stops executing cancelled TDs.
  595. * We have the xHCI lock, so nothing can modify this list until we drop
  596. * it. We're also in the event handler, so we can't get re-interrupted
  597. * if another Stop Endpoint command completes
  598. */
  599. list_for_each(entry, &ep->cancelled_td_list) {
  600. cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
  601. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  602. "Removing canceled TD starting at 0x%llx (dma).",
  603. (unsigned long long)xhci_trb_virt_to_dma(
  604. cur_td->start_seg, cur_td->first_trb));
  605. ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
  606. if (!ep_ring) {
  607. /* This shouldn't happen unless a driver is mucking
  608. * with the stream ID after submission. This will
  609. * leave the TD on the hardware ring, and the hardware
  610. * will try to execute it, and may access a buffer
  611. * that has already been freed. In the best case, the
  612. * hardware will execute it, and the event handler will
  613. * ignore the completion event for that TD, since it was
  614. * removed from the td_list for that endpoint. In
  615. * short, don't muck with the stream ID after
  616. * submission.
  617. */
  618. xhci_warn(xhci, "WARN Cancelled URB %p "
  619. "has invalid stream ID %u.\n",
  620. cur_td->urb,
  621. cur_td->urb->stream_id);
  622. goto remove_finished_td;
  623. }
  624. /*
  625. * If we stopped on the TD we need to cancel, then we have to
  626. * move the xHC endpoint ring dequeue pointer past this TD.
  627. */
  628. if (cur_td == ep->stopped_td)
  629. xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
  630. cur_td->urb->stream_id,
  631. cur_td, &deq_state);
  632. else
  633. td_to_noop(xhci, ep_ring, cur_td, false);
  634. remove_finished_td:
  635. /*
  636. * The event handler won't see a completion for this TD anymore,
  637. * so remove it from the endpoint ring's TD list. Keep it in
  638. * the cancelled TD list for URB completion later.
  639. */
  640. list_del_init(&cur_td->td_list);
  641. }
  642. last_unlinked_td = cur_td;
  643. xhci_stop_watchdog_timer_in_irq(xhci, ep);
  644. /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
  645. if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
  646. xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
  647. ep->stopped_td->urb->stream_id, &deq_state);
  648. xhci_ring_cmd_db(xhci);
  649. } else {
  650. /* Otherwise ring the doorbell(s) to restart queued transfers */
  651. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  652. }
  653. /* Clear stopped_td if endpoint is not halted */
  654. if (!(ep->ep_state & EP_HALTED))
  655. ep->stopped_td = NULL;
  656. /*
  657. * Drop the lock and complete the URBs in the cancelled TD list.
  658. * New TDs to be cancelled might be added to the end of the list before
  659. * we can complete all the URBs for the TDs we already unlinked.
  660. * So stop when we've completed the URB for the last TD we unlinked.
  661. */
  662. do {
  663. cur_td = list_entry(ep->cancelled_td_list.next,
  664. struct xhci_td, cancelled_td_list);
  665. list_del_init(&cur_td->cancelled_td_list);
  666. /* Clean up the cancelled URB */
  667. /* Doesn't matter what we pass for status, since the core will
  668. * just overwrite it (because the URB has been unlinked).
  669. */
  670. xhci_giveback_urb_in_irq(xhci, cur_td, 0);
  671. /* Stop processing the cancelled list if the watchdog timer is
  672. * running.
  673. */
  674. if (xhci->xhc_state & XHCI_STATE_DYING)
  675. return;
  676. } while (cur_td != last_unlinked_td);
  677. /* Return to the event handler with xhci->lock re-acquired */
  678. }
  679. static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
  680. {
  681. struct xhci_td *cur_td;
  682. while (!list_empty(&ring->td_list)) {
  683. cur_td = list_first_entry(&ring->td_list,
  684. struct xhci_td, td_list);
  685. list_del_init(&cur_td->td_list);
  686. if (!list_empty(&cur_td->cancelled_td_list))
  687. list_del_init(&cur_td->cancelled_td_list);
  688. xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
  689. }
  690. }
  691. static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
  692. int slot_id, int ep_index)
  693. {
  694. struct xhci_td *cur_td;
  695. struct xhci_virt_ep *ep;
  696. struct xhci_ring *ring;
  697. ep = &xhci->devs[slot_id]->eps[ep_index];
  698. if ((ep->ep_state & EP_HAS_STREAMS) ||
  699. (ep->ep_state & EP_GETTING_NO_STREAMS)) {
  700. int stream_id;
  701. for (stream_id = 0; stream_id < ep->stream_info->num_streams;
  702. stream_id++) {
  703. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  704. "Killing URBs for slot ID %u, ep index %u, stream %u",
  705. slot_id, ep_index, stream_id + 1);
  706. xhci_kill_ring_urbs(xhci,
  707. ep->stream_info->stream_rings[stream_id]);
  708. }
  709. } else {
  710. ring = ep->ring;
  711. if (!ring)
  712. return;
  713. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  714. "Killing URBs for slot ID %u, ep index %u",
  715. slot_id, ep_index);
  716. xhci_kill_ring_urbs(xhci, ring);
  717. }
  718. while (!list_empty(&ep->cancelled_td_list)) {
  719. cur_td = list_first_entry(&ep->cancelled_td_list,
  720. struct xhci_td, cancelled_td_list);
  721. list_del_init(&cur_td->cancelled_td_list);
  722. xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
  723. }
  724. }
  725. /* Watchdog timer function for when a stop endpoint command fails to complete.
  726. * In this case, we assume the host controller is broken or dying or dead. The
  727. * host may still be completing some other events, so we have to be careful to
  728. * let the event ring handler and the URB dequeueing/enqueueing functions know
  729. * through xhci->state.
  730. *
  731. * The timer may also fire if the host takes a very long time to respond to the
  732. * command, and the stop endpoint command completion handler cannot delete the
  733. * timer before the timer function is called. Another endpoint cancellation may
  734. * sneak in before the timer function can grab the lock, and that may queue
  735. * another stop endpoint command and add the timer back. So we cannot use a
  736. * simple flag to say whether there is a pending stop endpoint command for a
  737. * particular endpoint.
  738. *
  739. * Instead we use a combination of that flag and a counter for the number of
  740. * pending stop endpoint commands. If the timer is the tail end of the last
  741. * stop endpoint command, and the endpoint's command is still pending, we assume
  742. * the host is dying.
  743. */
  744. void xhci_stop_endpoint_command_watchdog(unsigned long arg)
  745. {
  746. struct xhci_hcd *xhci;
  747. struct xhci_virt_ep *ep;
  748. int ret, i, j;
  749. unsigned long flags;
  750. ep = (struct xhci_virt_ep *) arg;
  751. xhci = ep->xhci;
  752. spin_lock_irqsave(&xhci->lock, flags);
  753. ep->stop_cmds_pending--;
  754. if (xhci->xhc_state & XHCI_STATE_DYING) {
  755. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  756. "Stop EP timer ran, but another timer marked "
  757. "xHCI as DYING, exiting.");
  758. spin_unlock_irqrestore(&xhci->lock, flags);
  759. return;
  760. }
  761. if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
  762. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  763. "Stop EP timer ran, but no command pending, "
  764. "exiting.");
  765. spin_unlock_irqrestore(&xhci->lock, flags);
  766. return;
  767. }
  768. xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
  769. xhci_warn(xhci, "Assuming host is dying, halting host.\n");
  770. /* Oops, HC is dead or dying or at least not responding to the stop
  771. * endpoint command.
  772. */
  773. xhci->xhc_state |= XHCI_STATE_DYING;
  774. /* Disable interrupts from the host controller and start halting it */
  775. xhci_quiesce(xhci);
  776. spin_unlock_irqrestore(&xhci->lock, flags);
  777. ret = xhci_halt(xhci);
  778. spin_lock_irqsave(&xhci->lock, flags);
  779. if (ret < 0) {
  780. /* This is bad; the host is not responding to commands and it's
  781. * not allowing itself to be halted. At least interrupts are
  782. * disabled. If we call usb_hc_died(), it will attempt to
  783. * disconnect all device drivers under this host. Those
  784. * disconnect() methods will wait for all URBs to be unlinked,
  785. * so we must complete them.
  786. */
  787. xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
  788. xhci_warn(xhci, "Completing active URBs anyway.\n");
  789. /* We could turn all TDs on the rings to no-ops. This won't
  790. * help if the host has cached part of the ring, and is slow if
  791. * we want to preserve the cycle bit. Skip it and hope the host
  792. * doesn't touch the memory.
  793. */
  794. }
  795. for (i = 0; i < MAX_HC_SLOTS; i++) {
  796. if (!xhci->devs[i])
  797. continue;
  798. for (j = 0; j < 31; j++)
  799. xhci_kill_endpoint_urbs(xhci, i, j);
  800. }
  801. spin_unlock_irqrestore(&xhci->lock, flags);
  802. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  803. "Calling usb_hc_died()");
  804. usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
  805. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  806. "xHCI host controller is dead.");
  807. }
  808. static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
  809. struct xhci_virt_device *dev,
  810. struct xhci_ring *ep_ring,
  811. unsigned int ep_index)
  812. {
  813. union xhci_trb *dequeue_temp;
  814. int num_trbs_free_temp;
  815. bool revert = false;
  816. num_trbs_free_temp = ep_ring->num_trbs_free;
  817. dequeue_temp = ep_ring->dequeue;
  818. /* If we get two back-to-back stalls, and the first stalled transfer
  819. * ends just before a link TRB, the dequeue pointer will be left on
  820. * the link TRB by the code in the while loop. So we have to update
  821. * the dequeue pointer one segment further, or we'll jump off
  822. * the segment into la-la-land.
  823. */
  824. if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) {
  825. ep_ring->deq_seg = ep_ring->deq_seg->next;
  826. ep_ring->dequeue = ep_ring->deq_seg->trbs;
  827. }
  828. while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
  829. /* We have more usable TRBs */
  830. ep_ring->num_trbs_free++;
  831. ep_ring->dequeue++;
  832. if (last_trb(xhci, ep_ring, ep_ring->deq_seg,
  833. ep_ring->dequeue)) {
  834. if (ep_ring->dequeue ==
  835. dev->eps[ep_index].queued_deq_ptr)
  836. break;
  837. ep_ring->deq_seg = ep_ring->deq_seg->next;
  838. ep_ring->dequeue = ep_ring->deq_seg->trbs;
  839. }
  840. if (ep_ring->dequeue == dequeue_temp) {
  841. revert = true;
  842. break;
  843. }
  844. }
  845. if (revert) {
  846. xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
  847. ep_ring->num_trbs_free = num_trbs_free_temp;
  848. }
  849. }
  850. /*
  851. * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
  852. * we need to clear the set deq pending flag in the endpoint ring state, so that
  853. * the TD queueing code can ring the doorbell again. We also need to ring the
  854. * endpoint doorbell to restart the ring, but only if there aren't more
  855. * cancellations pending.
  856. */
  857. static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
  858. union xhci_trb *trb, u32 cmd_comp_code)
  859. {
  860. unsigned int ep_index;
  861. unsigned int stream_id;
  862. struct xhci_ring *ep_ring;
  863. struct xhci_virt_device *dev;
  864. struct xhci_virt_ep *ep;
  865. struct xhci_ep_ctx *ep_ctx;
  866. struct xhci_slot_ctx *slot_ctx;
  867. ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
  868. stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
  869. dev = xhci->devs[slot_id];
  870. ep = &dev->eps[ep_index];
  871. ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
  872. if (!ep_ring) {
  873. xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
  874. stream_id);
  875. /* XXX: Harmless??? */
  876. goto cleanup;
  877. }
  878. ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
  879. slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
  880. if (cmd_comp_code != COMP_SUCCESS) {
  881. unsigned int ep_state;
  882. unsigned int slot_state;
  883. switch (cmd_comp_code) {
  884. case COMP_TRB_ERR:
  885. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
  886. break;
  887. case COMP_CTX_STATE:
  888. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
  889. ep_state = le32_to_cpu(ep_ctx->ep_info);
  890. ep_state &= EP_STATE_MASK;
  891. slot_state = le32_to_cpu(slot_ctx->dev_state);
  892. slot_state = GET_SLOT_STATE(slot_state);
  893. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  894. "Slot state = %u, EP state = %u",
  895. slot_state, ep_state);
  896. break;
  897. case COMP_EBADSLT:
  898. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
  899. slot_id);
  900. break;
  901. default:
  902. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
  903. cmd_comp_code);
  904. break;
  905. }
  906. /* OK what do we do now? The endpoint state is hosed, and we
  907. * should never get to this point if the synchronization between
  908. * queueing, and endpoint state are correct. This might happen
  909. * if the device gets disconnected after we've finished
  910. * cancelling URBs, which might not be an error...
  911. */
  912. } else {
  913. u64 deq;
  914. /* 4.6.10 deq ptr is written to the stream ctx for streams */
  915. if (ep->ep_state & EP_HAS_STREAMS) {
  916. struct xhci_stream_ctx *ctx =
  917. &ep->stream_info->stream_ctx_array[stream_id];
  918. deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
  919. } else {
  920. deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
  921. }
  922. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  923. "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
  924. if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
  925. ep->queued_deq_ptr) == deq) {
  926. /* Update the ring's dequeue segment and dequeue pointer
  927. * to reflect the new position.
  928. */
  929. update_ring_for_set_deq_completion(xhci, dev,
  930. ep_ring, ep_index);
  931. } else {
  932. xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
  933. xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
  934. ep->queued_deq_seg, ep->queued_deq_ptr);
  935. }
  936. }
  937. cleanup:
  938. dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
  939. dev->eps[ep_index].queued_deq_seg = NULL;
  940. dev->eps[ep_index].queued_deq_ptr = NULL;
  941. /* Restart any rings with pending URBs */
  942. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  943. }
  944. static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
  945. union xhci_trb *trb, u32 cmd_comp_code)
  946. {
  947. unsigned int ep_index;
  948. ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
  949. /* This command will only fail if the endpoint wasn't halted,
  950. * but we don't care.
  951. */
  952. xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
  953. "Ignoring reset ep completion code of %u", cmd_comp_code);
  954. /* HW with the reset endpoint quirk needs to have a configure endpoint
  955. * command complete before the endpoint can be used. Queue that here
  956. * because the HW can't handle two commands being queued in a row.
  957. */
  958. if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
  959. struct xhci_command *command;
  960. command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
  961. if (!command) {
  962. xhci_warn(xhci, "WARN Cannot submit cfg ep: ENOMEM\n");
  963. return;
  964. }
  965. xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
  966. "Queueing configure endpoint command");
  967. xhci_queue_configure_endpoint(xhci, command,
  968. xhci->devs[slot_id]->in_ctx->dma, slot_id,
  969. false);
  970. xhci_ring_cmd_db(xhci);
  971. } else {
  972. /* Clear our internal halted state and restart the ring(s) */
  973. xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
  974. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  975. }
  976. }
  977. static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
  978. u32 cmd_comp_code)
  979. {
  980. if (cmd_comp_code == COMP_SUCCESS)
  981. xhci->slot_id = slot_id;
  982. else
  983. xhci->slot_id = 0;
  984. }
  985. static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
  986. {
  987. struct xhci_virt_device *virt_dev;
  988. virt_dev = xhci->devs[slot_id];
  989. if (!virt_dev)
  990. return;
  991. if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
  992. /* Delete default control endpoint resources */
  993. xhci_free_device_endpoint_resources(xhci, virt_dev, true);
  994. xhci_free_virt_device(xhci, slot_id);
  995. }
  996. static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
  997. struct xhci_event_cmd *event, u32 cmd_comp_code)
  998. {
  999. struct xhci_virt_device *virt_dev;
  1000. struct xhci_input_control_ctx *ctrl_ctx;
  1001. unsigned int ep_index;
  1002. unsigned int ep_state;
  1003. u32 add_flags, drop_flags;
  1004. /*
  1005. * Configure endpoint commands can come from the USB core
  1006. * configuration or alt setting changes, or because the HW
  1007. * needed an extra configure endpoint command after a reset
  1008. * endpoint command or streams were being configured.
  1009. * If the command was for a halted endpoint, the xHCI driver
  1010. * is not waiting on the configure endpoint command.
  1011. */
  1012. virt_dev = xhci->devs[slot_id];
  1013. ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
  1014. if (!ctrl_ctx) {
  1015. xhci_warn(xhci, "Could not get input context, bad type.\n");
  1016. return;
  1017. }
  1018. add_flags = le32_to_cpu(ctrl_ctx->add_flags);
  1019. drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
  1020. /* Input ctx add_flags are the endpoint index plus one */
  1021. ep_index = xhci_last_valid_endpoint(add_flags) - 1;
  1022. /* A usb_set_interface() call directly after clearing a halted
  1023. * condition may race on this quirky hardware. Not worth
  1024. * worrying about, since this is prototype hardware. Not sure
  1025. * if this will work for streams, but streams support was
  1026. * untested on this prototype.
  1027. */
  1028. if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
  1029. ep_index != (unsigned int) -1 &&
  1030. add_flags - SLOT_FLAG == drop_flags) {
  1031. ep_state = virt_dev->eps[ep_index].ep_state;
  1032. if (!(ep_state & EP_HALTED))
  1033. return;
  1034. xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
  1035. "Completed config ep cmd - "
  1036. "last ep index = %d, state = %d",
  1037. ep_index, ep_state);
  1038. /* Clear internal halted state and restart ring(s) */
  1039. virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
  1040. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  1041. return;
  1042. }
  1043. return;
  1044. }
  1045. static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
  1046. struct xhci_event_cmd *event)
  1047. {
  1048. xhci_dbg(xhci, "Completed reset device command.\n");
  1049. if (!xhci->devs[slot_id])
  1050. xhci_warn(xhci, "Reset device command completion "
  1051. "for disabled slot %u\n", slot_id);
  1052. }
  1053. static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
  1054. struct xhci_event_cmd *event)
  1055. {
  1056. if (!(xhci->quirks & XHCI_NEC_HOST)) {
  1057. xhci->error_bitmask |= 1 << 6;
  1058. return;
  1059. }
  1060. xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
  1061. "NEC firmware version %2x.%02x",
  1062. NEC_FW_MAJOR(le32_to_cpu(event->status)),
  1063. NEC_FW_MINOR(le32_to_cpu(event->status)));
  1064. }
  1065. static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
  1066. {
  1067. list_del(&cmd->cmd_list);
  1068. if (cmd->completion) {
  1069. cmd->status = status;
  1070. complete(cmd->completion);
  1071. } else {
  1072. kfree(cmd);
  1073. }
  1074. }
  1075. void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
  1076. {
  1077. struct xhci_command *cur_cmd, *tmp_cmd;
  1078. list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
  1079. xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT);
  1080. }
  1081. /*
  1082. * Turn all commands on command ring with status set to "aborted" to no-op trbs.
  1083. * If there are other commands waiting then restart the ring and kick the timer.
  1084. * This must be called with command ring stopped and xhci->lock held.
  1085. */
  1086. static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
  1087. struct xhci_command *cur_cmd)
  1088. {
  1089. struct xhci_command *i_cmd, *tmp_cmd;
  1090. u32 cycle_state;
  1091. /* Turn all aborted commands in list to no-ops, then restart */
  1092. list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list,
  1093. cmd_list) {
  1094. if (i_cmd->status != COMP_CMD_ABORT)
  1095. continue;
  1096. i_cmd->status = COMP_CMD_STOP;
  1097. xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
  1098. i_cmd->command_trb);
  1099. /* get cycle state from the original cmd trb */
  1100. cycle_state = le32_to_cpu(
  1101. i_cmd->command_trb->generic.field[3]) & TRB_CYCLE;
  1102. /* modify the command trb to no-op command */
  1103. i_cmd->command_trb->generic.field[0] = 0;
  1104. i_cmd->command_trb->generic.field[1] = 0;
  1105. i_cmd->command_trb->generic.field[2] = 0;
  1106. i_cmd->command_trb->generic.field[3] = cpu_to_le32(
  1107. TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
  1108. /*
  1109. * caller waiting for completion is called when command
  1110. * completion event is received for these no-op commands
  1111. */
  1112. }
  1113. xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
  1114. /* ring command ring doorbell to restart the command ring */
  1115. if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
  1116. !(xhci->xhc_state & XHCI_STATE_DYING)) {
  1117. xhci->current_cmd = cur_cmd;
  1118. mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
  1119. xhci_ring_cmd_db(xhci);
  1120. }
  1121. return;
  1122. }
  1123. void xhci_handle_command_timeout(unsigned long data)
  1124. {
  1125. struct xhci_hcd *xhci;
  1126. int ret;
  1127. unsigned long flags;
  1128. u64 hw_ring_state;
  1129. struct xhci_command *cur_cmd = NULL;
  1130. xhci = (struct xhci_hcd *) data;
  1131. /* mark this command to be cancelled */
  1132. spin_lock_irqsave(&xhci->lock, flags);
  1133. if (xhci->current_cmd) {
  1134. cur_cmd = xhci->current_cmd;
  1135. cur_cmd->status = COMP_CMD_ABORT;
  1136. }
  1137. /* Make sure command ring is running before aborting it */
  1138. hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
  1139. if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
  1140. (hw_ring_state & CMD_RING_RUNNING)) {
  1141. spin_unlock_irqrestore(&xhci->lock, flags);
  1142. xhci_dbg(xhci, "Command timeout\n");
  1143. ret = xhci_abort_cmd_ring(xhci);
  1144. if (unlikely(ret == -ESHUTDOWN)) {
  1145. xhci_err(xhci, "Abort command ring failed\n");
  1146. xhci_cleanup_command_queue(xhci);
  1147. usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
  1148. xhci_dbg(xhci, "xHCI host controller is dead.\n");
  1149. }
  1150. return;
  1151. }
  1152. /* command timeout on stopped ring, ring can't be aborted */
  1153. xhci_dbg(xhci, "Command timeout on stopped ring\n");
  1154. xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
  1155. spin_unlock_irqrestore(&xhci->lock, flags);
  1156. return;
  1157. }
  1158. static void handle_cmd_completion(struct xhci_hcd *xhci,
  1159. struct xhci_event_cmd *event)
  1160. {
  1161. int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
  1162. u64 cmd_dma;
  1163. dma_addr_t cmd_dequeue_dma;
  1164. u32 cmd_comp_code;
  1165. union xhci_trb *cmd_trb;
  1166. struct xhci_command *cmd;
  1167. u32 cmd_type;
  1168. cmd_dma = le64_to_cpu(event->cmd_trb);
  1169. cmd_trb = xhci->cmd_ring->dequeue;
  1170. cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
  1171. cmd_trb);
  1172. /* Is the command ring deq ptr out of sync with the deq seg ptr? */
  1173. if (cmd_dequeue_dma == 0) {
  1174. xhci->error_bitmask |= 1 << 4;
  1175. return;
  1176. }
  1177. /* Does the DMA address match our internal dequeue pointer address? */
  1178. if (cmd_dma != (u64) cmd_dequeue_dma) {
  1179. xhci->error_bitmask |= 1 << 5;
  1180. return;
  1181. }
  1182. cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
  1183. if (cmd->command_trb != xhci->cmd_ring->dequeue) {
  1184. xhci_err(xhci,
  1185. "Command completion event does not match command\n");
  1186. return;
  1187. }
  1188. del_timer(&xhci->cmd_timer);
  1189. trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
  1190. cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
  1191. /* If CMD ring stopped we own the trbs between enqueue and dequeue */
  1192. if (cmd_comp_code == COMP_CMD_STOP) {
  1193. xhci_handle_stopped_cmd_ring(xhci, cmd);
  1194. return;
  1195. }
  1196. /*
  1197. * Host aborted the command ring, check if the current command was
  1198. * supposed to be aborted, otherwise continue normally.
  1199. * The command ring is stopped now, but the xHC will issue a Command
  1200. * Ring Stopped event which will cause us to restart it.
  1201. */
  1202. if (cmd_comp_code == COMP_CMD_ABORT) {
  1203. xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
  1204. if (cmd->status == COMP_CMD_ABORT)
  1205. goto event_handled;
  1206. }
  1207. cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
  1208. switch (cmd_type) {
  1209. case TRB_ENABLE_SLOT:
  1210. xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code);
  1211. break;
  1212. case TRB_DISABLE_SLOT:
  1213. xhci_handle_cmd_disable_slot(xhci, slot_id);
  1214. break;
  1215. case TRB_CONFIG_EP:
  1216. if (!cmd->completion)
  1217. xhci_handle_cmd_config_ep(xhci, slot_id, event,
  1218. cmd_comp_code);
  1219. break;
  1220. case TRB_EVAL_CONTEXT:
  1221. break;
  1222. case TRB_ADDR_DEV:
  1223. break;
  1224. case TRB_STOP_RING:
  1225. WARN_ON(slot_id != TRB_TO_SLOT_ID(
  1226. le32_to_cpu(cmd_trb->generic.field[3])));
  1227. xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
  1228. break;
  1229. case TRB_SET_DEQ:
  1230. WARN_ON(slot_id != TRB_TO_SLOT_ID(
  1231. le32_to_cpu(cmd_trb->generic.field[3])));
  1232. xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
  1233. break;
  1234. case TRB_CMD_NOOP:
  1235. /* Is this an aborted command turned to NO-OP? */
  1236. if (cmd->status == COMP_CMD_STOP)
  1237. cmd_comp_code = COMP_CMD_STOP;
  1238. break;
  1239. case TRB_RESET_EP:
  1240. WARN_ON(slot_id != TRB_TO_SLOT_ID(
  1241. le32_to_cpu(cmd_trb->generic.field[3])));
  1242. xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
  1243. break;
  1244. case TRB_RESET_DEV:
  1245. /* SLOT_ID field in reset device cmd completion event TRB is 0.
  1246. * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
  1247. */
  1248. slot_id = TRB_TO_SLOT_ID(
  1249. le32_to_cpu(cmd_trb->generic.field[3]));
  1250. xhci_handle_cmd_reset_dev(xhci, slot_id, event);
  1251. break;
  1252. case TRB_NEC_GET_FW:
  1253. xhci_handle_cmd_nec_get_fw(xhci, event);
  1254. break;
  1255. default:
  1256. /* Skip over unknown commands on the event ring */
  1257. xhci->error_bitmask |= 1 << 6;
  1258. break;
  1259. }
  1260. /* restart timer if this wasn't the last command */
  1261. if (cmd->cmd_list.next != &xhci->cmd_list) {
  1262. xhci->current_cmd = list_entry(cmd->cmd_list.next,
  1263. struct xhci_command, cmd_list);
  1264. mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
  1265. }
  1266. event_handled:
  1267. xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
  1268. inc_deq(xhci, xhci->cmd_ring);
  1269. }
  1270. static void handle_vendor_event(struct xhci_hcd *xhci,
  1271. union xhci_trb *event)
  1272. {
  1273. u32 trb_type;
  1274. trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
  1275. xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
  1276. if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
  1277. handle_cmd_completion(xhci, &event->event_cmd);
  1278. }
  1279. /* @port_id: the one-based port ID from the hardware (indexed from array of all
  1280. * port registers -- USB 3.0 and USB 2.0).
  1281. *
  1282. * Returns a zero-based port number, which is suitable for indexing into each of
  1283. * the split roothubs' port arrays and bus state arrays.
  1284. * Add one to it in order to call xhci_find_slot_id_by_port.
  1285. */
  1286. static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
  1287. struct xhci_hcd *xhci, u32 port_id)
  1288. {
  1289. unsigned int i;
  1290. unsigned int num_similar_speed_ports = 0;
  1291. /* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
  1292. * and usb2_ports are 0-based indexes. Count the number of similar
  1293. * speed ports, up to 1 port before this port.
  1294. */
  1295. for (i = 0; i < (port_id - 1); i++) {
  1296. u8 port_speed = xhci->port_array[i];
  1297. /*
  1298. * Skip ports that don't have known speeds, or have duplicate
  1299. * Extended Capabilities port speed entries.
  1300. */
  1301. if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
  1302. continue;
  1303. /*
  1304. * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and
  1305. * 1.1 ports are under the USB 2.0 hub. If the port speed
  1306. * matches the device speed, it's a similar speed port.
  1307. */
  1308. if ((port_speed == 0x03) == (hcd->speed == HCD_USB3))
  1309. num_similar_speed_ports++;
  1310. }
  1311. return num_similar_speed_ports;
  1312. }
  1313. static void handle_device_notification(struct xhci_hcd *xhci,
  1314. union xhci_trb *event)
  1315. {
  1316. u32 slot_id;
  1317. struct usb_device *udev;
  1318. slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
  1319. if (!xhci->devs[slot_id]) {
  1320. xhci_warn(xhci, "Device Notification event for "
  1321. "unused slot %u\n", slot_id);
  1322. return;
  1323. }
  1324. xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
  1325. slot_id);
  1326. udev = xhci->devs[slot_id]->udev;
  1327. if (udev && udev->parent)
  1328. usb_wakeup_notification(udev->parent, udev->portnum);
  1329. }
  1330. static void handle_port_status(struct xhci_hcd *xhci,
  1331. union xhci_trb *event)
  1332. {
  1333. struct usb_hcd *hcd;
  1334. u32 port_id;
  1335. u32 temp, temp1;
  1336. int max_ports;
  1337. int slot_id;
  1338. unsigned int faked_port_index;
  1339. u8 major_revision;
  1340. struct xhci_bus_state *bus_state;
  1341. __le32 __iomem **port_array;
  1342. bool bogus_port_status = false;
  1343. /* Port status change events always have a successful completion code */
  1344. if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
  1345. xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
  1346. xhci->error_bitmask |= 1 << 8;
  1347. }
  1348. port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
  1349. xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
  1350. max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
  1351. if ((port_id <= 0) || (port_id > max_ports)) {
  1352. xhci_warn(xhci, "Invalid port id %d\n", port_id);
  1353. inc_deq(xhci, xhci->event_ring);
  1354. return;
  1355. }
  1356. /* Figure out which usb_hcd this port is attached to:
  1357. * is it a USB 3.0 port or a USB 2.0/1.1 port?
  1358. */
  1359. major_revision = xhci->port_array[port_id - 1];
  1360. /* Find the right roothub. */
  1361. hcd = xhci_to_hcd(xhci);
  1362. if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
  1363. hcd = xhci->shared_hcd;
  1364. if (major_revision == 0) {
  1365. xhci_warn(xhci, "Event for port %u not in "
  1366. "Extended Capabilities, ignoring.\n",
  1367. port_id);
  1368. bogus_port_status = true;
  1369. goto cleanup;
  1370. }
  1371. if (major_revision == DUPLICATE_ENTRY) {
  1372. xhci_warn(xhci, "Event for port %u duplicated in"
  1373. "Extended Capabilities, ignoring.\n",
  1374. port_id);
  1375. bogus_port_status = true;
  1376. goto cleanup;
  1377. }
  1378. /*
  1379. * Hardware port IDs reported by a Port Status Change Event include USB
  1380. * 3.0 and USB 2.0 ports. We want to check if the port has reported a
  1381. * resume event, but we first need to translate the hardware port ID
  1382. * into the index into the ports on the correct split roothub, and the
  1383. * correct bus_state structure.
  1384. */
  1385. bus_state = &xhci->bus_state[hcd_index(hcd)];
  1386. if (hcd->speed == HCD_USB3)
  1387. port_array = xhci->usb3_ports;
  1388. else
  1389. port_array = xhci->usb2_ports;
  1390. /* Find the faked port hub number */
  1391. faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
  1392. port_id);
  1393. temp = readl(port_array[faked_port_index]);
  1394. if (hcd->state == HC_STATE_SUSPENDED) {
  1395. xhci_dbg(xhci, "resume root hub\n");
  1396. usb_hcd_resume_root_hub(hcd);
  1397. }
  1398. if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
  1399. xhci_dbg(xhci, "port resume event for port %d\n", port_id);
  1400. temp1 = readl(&xhci->op_regs->command);
  1401. if (!(temp1 & CMD_RUN)) {
  1402. xhci_warn(xhci, "xHC is not running.\n");
  1403. goto cleanup;
  1404. }
  1405. if (DEV_SUPERSPEED(temp)) {
  1406. xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
  1407. /* Set a flag to say the port signaled remote wakeup,
  1408. * so we can tell the difference between the end of
  1409. * device and host initiated resume.
  1410. */
  1411. bus_state->port_remote_wakeup |= 1 << faked_port_index;
  1412. xhci_test_and_clear_bit(xhci, port_array,
  1413. faked_port_index, PORT_PLC);
  1414. xhci_set_link_state(xhci, port_array, faked_port_index,
  1415. XDEV_U0);
  1416. /* Need to wait until the next link state change
  1417. * indicates the device is actually in U0.
  1418. */
  1419. bogus_port_status = true;
  1420. goto cleanup;
  1421. } else {
  1422. xhci_dbg(xhci, "resume HS port %d\n", port_id);
  1423. bus_state->resume_done[faked_port_index] = jiffies +
  1424. msecs_to_jiffies(20);
  1425. set_bit(faked_port_index, &bus_state->resuming_ports);
  1426. mod_timer(&hcd->rh_timer,
  1427. bus_state->resume_done[faked_port_index]);
  1428. /* Do the rest in GetPortStatus */
  1429. }
  1430. }
  1431. if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
  1432. DEV_SUPERSPEED(temp)) {
  1433. xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
  1434. /* We've just brought the device into U0 through either the
  1435. * Resume state after a device remote wakeup, or through the
  1436. * U3Exit state after a host-initiated resume. If it's a device
  1437. * initiated remote wake, don't pass up the link state change,
  1438. * so the roothub behavior is consistent with external
  1439. * USB 3.0 hub behavior.
  1440. */
  1441. slot_id = xhci_find_slot_id_by_port(hcd, xhci,
  1442. faked_port_index + 1);
  1443. if (slot_id && xhci->devs[slot_id])
  1444. xhci_ring_device(xhci, slot_id);
  1445. if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
  1446. bus_state->port_remote_wakeup &=
  1447. ~(1 << faked_port_index);
  1448. xhci_test_and_clear_bit(xhci, port_array,
  1449. faked_port_index, PORT_PLC);
  1450. usb_wakeup_notification(hcd->self.root_hub,
  1451. faked_port_index + 1);
  1452. bogus_port_status = true;
  1453. goto cleanup;
  1454. }
  1455. }
  1456. /*
  1457. * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
  1458. * RExit to a disconnect state). If so, let the the driver know it's
  1459. * out of the RExit state.
  1460. */
  1461. if (!DEV_SUPERSPEED(temp) &&
  1462. test_and_clear_bit(faked_port_index,
  1463. &bus_state->rexit_ports)) {
  1464. complete(&bus_state->rexit_done[faked_port_index]);
  1465. bogus_port_status = true;
  1466. goto cleanup;
  1467. }
  1468. if (hcd->speed != HCD_USB3)
  1469. xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
  1470. PORT_PLC);
  1471. cleanup:
  1472. /* Update event ring dequeue pointer before dropping the lock */
  1473. inc_deq(xhci, xhci->event_ring);
  1474. /* Don't make the USB core poll the roothub if we got a bad port status
  1475. * change event. Besides, at that point we can't tell which roothub
  1476. * (USB 2.0 or USB 3.0) to kick.
  1477. */
  1478. if (bogus_port_status)
  1479. return;
  1480. /*
  1481. * xHCI port-status-change events occur when the "or" of all the
  1482. * status-change bits in the portsc register changes from 0 to 1.
  1483. * New status changes won't cause an event if any other change
  1484. * bits are still set. When an event occurs, switch over to
  1485. * polling to avoid losing status changes.
  1486. */
  1487. xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
  1488. set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
  1489. spin_unlock(&xhci->lock);
  1490. /* Pass this up to the core */
  1491. usb_hcd_poll_rh_status(hcd);
  1492. spin_lock(&xhci->lock);
  1493. }
  1494. /*
  1495. * This TD is defined by the TRBs starting at start_trb in start_seg and ending
  1496. * at end_trb, which may be in another segment. If the suspect DMA address is a
  1497. * TRB in this TD, this function returns that TRB's segment. Otherwise it
  1498. * returns 0.
  1499. */
  1500. struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
  1501. struct xhci_segment *start_seg,
  1502. union xhci_trb *start_trb,
  1503. union xhci_trb *end_trb,
  1504. dma_addr_t suspect_dma,
  1505. bool debug)
  1506. {
  1507. dma_addr_t start_dma;
  1508. dma_addr_t end_seg_dma;
  1509. dma_addr_t end_trb_dma;
  1510. struct xhci_segment *cur_seg;
  1511. start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
  1512. cur_seg = start_seg;
  1513. do {
  1514. if (start_dma == 0)
  1515. return NULL;
  1516. /* We may get an event for a Link TRB in the middle of a TD */
  1517. end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
  1518. &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
  1519. /* If the end TRB isn't in this segment, this is set to 0 */
  1520. end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
  1521. if (debug)
  1522. xhci_warn(xhci,
  1523. "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
  1524. (unsigned long long)suspect_dma,
  1525. (unsigned long long)start_dma,
  1526. (unsigned long long)end_trb_dma,
  1527. (unsigned long long)cur_seg->dma,
  1528. (unsigned long long)end_seg_dma);
  1529. if (end_trb_dma > 0) {
  1530. /* The end TRB is in this segment, so suspect should be here */
  1531. if (start_dma <= end_trb_dma) {
  1532. if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
  1533. return cur_seg;
  1534. } else {
  1535. /* Case for one segment with
  1536. * a TD wrapped around to the top
  1537. */
  1538. if ((suspect_dma >= start_dma &&
  1539. suspect_dma <= end_seg_dma) ||
  1540. (suspect_dma >= cur_seg->dma &&
  1541. suspect_dma <= end_trb_dma))
  1542. return cur_seg;
  1543. }
  1544. return NULL;
  1545. } else {
  1546. /* Might still be somewhere in this segment */
  1547. if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
  1548. return cur_seg;
  1549. }
  1550. cur_seg = cur_seg->next;
  1551. start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
  1552. } while (cur_seg != start_seg);
  1553. return NULL;
  1554. }
  1555. static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
  1556. unsigned int slot_id, unsigned int ep_index,
  1557. unsigned int stream_id,
  1558. struct xhci_td *td, union xhci_trb *event_trb)
  1559. {
  1560. struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
  1561. struct xhci_command *command;
  1562. command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
  1563. if (!command)
  1564. return;
  1565. ep->ep_state |= EP_HALTED;
  1566. ep->stopped_td = td;
  1567. ep->stopped_stream = stream_id;
  1568. xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
  1569. xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
  1570. ep->stopped_td = NULL;
  1571. ep->stopped_stream = 0;
  1572. xhci_ring_cmd_db(xhci);
  1573. }
  1574. /* Check if an error has halted the endpoint ring. The class driver will
  1575. * cleanup the halt for a non-default control endpoint if we indicate a stall.
  1576. * However, a babble and other errors also halt the endpoint ring, and the class
  1577. * driver won't clear the halt in that case, so we need to issue a Set Transfer
  1578. * Ring Dequeue Pointer command manually.
  1579. */
  1580. static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
  1581. struct xhci_ep_ctx *ep_ctx,
  1582. unsigned int trb_comp_code)
  1583. {
  1584. /* TRB completion codes that may require a manual halt cleanup */
  1585. if (trb_comp_code == COMP_TX_ERR ||
  1586. trb_comp_code == COMP_BABBLE ||
  1587. trb_comp_code == COMP_SPLIT_ERR)
  1588. /* The 0.96 spec says a babbling control endpoint
  1589. * is not halted. The 0.96 spec says it is. Some HW
  1590. * claims to be 0.95 compliant, but it halts the control
  1591. * endpoint anyway. Check if a babble halted the
  1592. * endpoint.
  1593. */
  1594. if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
  1595. cpu_to_le32(EP_STATE_HALTED))
  1596. return 1;
  1597. return 0;
  1598. }
  1599. int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
  1600. {
  1601. if (trb_comp_code >= 224 && trb_comp_code <= 255) {
  1602. /* Vendor defined "informational" completion code,
  1603. * treat as not-an-error.
  1604. */
  1605. xhci_dbg(xhci, "Vendor defined info completion code %u\n",
  1606. trb_comp_code);
  1607. xhci_dbg(xhci, "Treating code as success.\n");
  1608. return 1;
  1609. }
  1610. return 0;
  1611. }
  1612. /*
  1613. * Finish the td processing, remove the td from td list;
  1614. * Return 1 if the urb can be given back.
  1615. */
  1616. static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1617. union xhci_trb *event_trb, struct xhci_transfer_event *event,
  1618. struct xhci_virt_ep *ep, int *status, bool skip)
  1619. {
  1620. struct xhci_virt_device *xdev;
  1621. struct xhci_ring *ep_ring;
  1622. unsigned int slot_id;
  1623. int ep_index;
  1624. struct urb *urb = NULL;
  1625. struct xhci_ep_ctx *ep_ctx;
  1626. int ret = 0;
  1627. struct urb_priv *urb_priv;
  1628. u32 trb_comp_code;
  1629. slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
  1630. xdev = xhci->devs[slot_id];
  1631. ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
  1632. ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
  1633. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  1634. trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
  1635. if (skip)
  1636. goto td_cleanup;
  1637. if (trb_comp_code == COMP_STOP_INVAL ||
  1638. trb_comp_code == COMP_STOP) {
  1639. /* The Endpoint Stop Command completion will take care of any
  1640. * stopped TDs. A stopped TD may be restarted, so don't update
  1641. * the ring dequeue pointer or take this TD off any lists yet.
  1642. */
  1643. ep->stopped_td = td;
  1644. return 0;
  1645. } else {
  1646. if (trb_comp_code == COMP_STALL) {
  1647. /* The transfer is completed from the driver's
  1648. * perspective, but we need to issue a set dequeue
  1649. * command for this stalled endpoint to move the dequeue
  1650. * pointer past the TD. We can't do that here because
  1651. * the halt condition must be cleared first. Let the
  1652. * USB class driver clear the stall later.
  1653. */
  1654. ep->stopped_td = td;
  1655. ep->stopped_stream = ep_ring->stream_id;
  1656. } else if (xhci_requires_manual_halt_cleanup(xhci,
  1657. ep_ctx, trb_comp_code)) {
  1658. /* Other types of errors halt the endpoint, but the
  1659. * class driver doesn't call usb_reset_endpoint() unless
  1660. * the error is -EPIPE. Clear the halted status in the
  1661. * xHCI hardware manually.
  1662. */
  1663. xhci_cleanup_halted_endpoint(xhci,
  1664. slot_id, ep_index, ep_ring->stream_id,
  1665. td, event_trb);
  1666. } else {
  1667. /* Update ring dequeue pointer */
  1668. while (ep_ring->dequeue != td->last_trb)
  1669. inc_deq(xhci, ep_ring);
  1670. inc_deq(xhci, ep_ring);
  1671. }
  1672. td_cleanup:
  1673. /* Clean up the endpoint's TD list */
  1674. urb = td->urb;
  1675. urb_priv = urb->hcpriv;
  1676. /* Do one last check of the actual transfer length.
  1677. * If the host controller said we transferred more data than
  1678. * the buffer length, urb->actual_length will be a very big
  1679. * number (since it's unsigned). Play it safe and say we didn't
  1680. * transfer anything.
  1681. */
  1682. if (urb->actual_length > urb->transfer_buffer_length) {
  1683. xhci_warn(xhci, "URB transfer length is wrong, "
  1684. "xHC issue? req. len = %u, "
  1685. "act. len = %u\n",
  1686. urb->transfer_buffer_length,
  1687. urb->actual_length);
  1688. urb->actual_length = 0;
  1689. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1690. *status = -EREMOTEIO;
  1691. else
  1692. *status = 0;
  1693. }
  1694. list_del_init(&td->td_list);
  1695. /* Was this TD slated to be cancelled but completed anyway? */
  1696. if (!list_empty(&td->cancelled_td_list))
  1697. list_del_init(&td->cancelled_td_list);
  1698. urb_priv->td_cnt++;
  1699. /* Giveback the urb when all the tds are completed */
  1700. if (urb_priv->td_cnt == urb_priv->length) {
  1701. ret = 1;
  1702. if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
  1703. xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
  1704. if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
  1705. == 0) {
  1706. if (xhci->quirks & XHCI_AMD_PLL_FIX)
  1707. usb_amd_quirk_pll_enable();
  1708. }
  1709. }
  1710. }
  1711. }
  1712. return ret;
  1713. }
  1714. /*
  1715. * Process control tds, update urb status and actual_length.
  1716. */
  1717. static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1718. union xhci_trb *event_trb, struct xhci_transfer_event *event,
  1719. struct xhci_virt_ep *ep, int *status)
  1720. {
  1721. struct xhci_virt_device *xdev;
  1722. struct xhci_ring *ep_ring;
  1723. unsigned int slot_id;
  1724. int ep_index;
  1725. struct xhci_ep_ctx *ep_ctx;
  1726. u32 trb_comp_code;
  1727. slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
  1728. xdev = xhci->devs[slot_id];
  1729. ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
  1730. ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
  1731. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  1732. trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
  1733. switch (trb_comp_code) {
  1734. case COMP_SUCCESS:
  1735. if (event_trb == ep_ring->dequeue) {
  1736. xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
  1737. "without IOC set??\n");
  1738. *status = -ESHUTDOWN;
  1739. } else if (event_trb != td->last_trb) {
  1740. xhci_warn(xhci, "WARN: Success on ctrl data TRB "
  1741. "without IOC set??\n");
  1742. *status = -ESHUTDOWN;
  1743. } else {
  1744. *status = 0;
  1745. }
  1746. break;
  1747. case COMP_SHORT_TX:
  1748. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1749. *status = -EREMOTEIO;
  1750. else
  1751. *status = 0;
  1752. break;
  1753. case COMP_STOP_INVAL:
  1754. case COMP_STOP:
  1755. return finish_td(xhci, td, event_trb, event, ep, status, false);
  1756. default:
  1757. if (!xhci_requires_manual_halt_cleanup(xhci,
  1758. ep_ctx, trb_comp_code))
  1759. break;
  1760. xhci_dbg(xhci, "TRB error code %u, "
  1761. "halted endpoint index = %u\n",
  1762. trb_comp_code, ep_index);
  1763. /* else fall through */
  1764. case COMP_STALL:
  1765. /* Did we transfer part of the data (middle) phase? */
  1766. if (event_trb != ep_ring->dequeue &&
  1767. event_trb != td->last_trb)
  1768. td->urb->actual_length =
  1769. td->urb->transfer_buffer_length -
  1770. EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
  1771. else
  1772. td->urb->actual_length = 0;
  1773. xhci_cleanup_halted_endpoint(xhci,
  1774. slot_id, ep_index, 0, td, event_trb);
  1775. return finish_td(xhci, td, event_trb, event, ep, status, true);
  1776. }
  1777. /*
  1778. * Did we transfer any data, despite the errors that might have
  1779. * happened? I.e. did we get past the setup stage?
  1780. */
  1781. if (event_trb != ep_ring->dequeue) {
  1782. /* The event was for the status stage */
  1783. if (event_trb == td->last_trb) {
  1784. if (td->urb->actual_length != 0) {
  1785. /* Don't overwrite a previously set error code
  1786. */
  1787. if ((*status == -EINPROGRESS || *status == 0) &&
  1788. (td->urb->transfer_flags
  1789. & URB_SHORT_NOT_OK))
  1790. /* Did we already see a short data
  1791. * stage? */
  1792. *status = -EREMOTEIO;
  1793. } else {
  1794. td->urb->actual_length =
  1795. td->urb->transfer_buffer_length;
  1796. }
  1797. } else {
  1798. /* Maybe the event was for the data stage? */
  1799. td->urb->actual_length =
  1800. td->urb->transfer_buffer_length -
  1801. EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
  1802. xhci_dbg(xhci, "Waiting for status "
  1803. "stage event\n");
  1804. return 0;
  1805. }
  1806. }
  1807. return finish_td(xhci, td, event_trb, event, ep, status, false);
  1808. }
  1809. /*
  1810. * Process isochronous tds, update urb packet status and actual_length.
  1811. */
  1812. static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1813. union xhci_trb *event_trb, struct xhci_transfer_event *event,
  1814. struct xhci_virt_ep *ep, int *status)
  1815. {
  1816. struct xhci_ring *ep_ring;
  1817. struct urb_priv *urb_priv;
  1818. int idx;
  1819. int len = 0;
  1820. union xhci_trb *cur_trb;
  1821. struct xhci_segment *cur_seg;
  1822. struct usb_iso_packet_descriptor *frame;
  1823. u32 trb_comp_code;
  1824. bool skip_td = false;
  1825. ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
  1826. trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
  1827. urb_priv = td->urb->hcpriv;
  1828. idx = urb_priv->td_cnt;
  1829. frame = &td->urb->iso_frame_desc[idx];
  1830. /* handle completion code */
  1831. switch (trb_comp_code) {
  1832. case COMP_SUCCESS:
  1833. if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
  1834. frame->status = 0;
  1835. break;
  1836. }
  1837. if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
  1838. trb_comp_code = COMP_SHORT_TX;
  1839. case COMP_SHORT_TX:
  1840. frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
  1841. -EREMOTEIO : 0;
  1842. break;
  1843. case COMP_BW_OVER:
  1844. frame->status = -ECOMM;
  1845. skip_td = true;
  1846. break;
  1847. case COMP_BUFF_OVER:
  1848. case COMP_BABBLE:
  1849. frame->status = -EOVERFLOW;
  1850. skip_td = true;
  1851. break;
  1852. case COMP_DEV_ERR:
  1853. case COMP_STALL:
  1854. case COMP_TX_ERR:
  1855. frame->status = -EPROTO;
  1856. skip_td = true;
  1857. break;
  1858. case COMP_STOP:
  1859. case COMP_STOP_INVAL:
  1860. break;
  1861. default:
  1862. frame->status = -1;
  1863. break;
  1864. }
  1865. if (trb_comp_code == COMP_SUCCESS || skip_td) {
  1866. frame->actual_length = frame->length;
  1867. td->urb->actual_length += frame->length;
  1868. } else {
  1869. for (cur_trb = ep_ring->dequeue,
  1870. cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
  1871. next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
  1872. if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
  1873. !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
  1874. len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
  1875. }
  1876. len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
  1877. EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
  1878. if (trb_comp_code != COMP_STOP_INVAL) {
  1879. frame->actual_length = len;
  1880. td->urb->actual_length += len;
  1881. }
  1882. }
  1883. return finish_td(xhci, td, event_trb, event, ep, status, false);
  1884. }
  1885. static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1886. struct xhci_transfer_event *event,
  1887. struct xhci_virt_ep *ep, int *status)
  1888. {
  1889. struct xhci_ring *ep_ring;
  1890. struct urb_priv *urb_priv;
  1891. struct usb_iso_packet_descriptor *frame;
  1892. int idx;
  1893. ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
  1894. urb_priv = td->urb->hcpriv;
  1895. idx = urb_priv->td_cnt;
  1896. frame = &td->urb->iso_frame_desc[idx];
  1897. /* The transfer is partly done. */
  1898. frame->status = -EXDEV;
  1899. /* calc actual length */
  1900. frame->actual_length = 0;
  1901. /* Update ring dequeue pointer */
  1902. while (ep_ring->dequeue != td->last_trb)
  1903. inc_deq(xhci, ep_ring);
  1904. inc_deq(xhci, ep_ring);
  1905. return finish_td(xhci, td, NULL, event, ep, status, true);
  1906. }
  1907. /*
  1908. * Process bulk and interrupt tds, update urb status and actual_length.
  1909. */
  1910. static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1911. union xhci_trb *event_trb, struct xhci_transfer_event *event,
  1912. struct xhci_virt_ep *ep, int *status)
  1913. {
  1914. struct xhci_ring *ep_ring;
  1915. union xhci_trb *cur_trb;
  1916. struct xhci_segment *cur_seg;
  1917. u32 trb_comp_code;
  1918. ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
  1919. trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
  1920. switch (trb_comp_code) {
  1921. case COMP_SUCCESS:
  1922. /* Double check that the HW transferred everything. */
  1923. if (event_trb != td->last_trb ||
  1924. EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
  1925. xhci_warn(xhci, "WARN Successful completion "
  1926. "on short TX\n");
  1927. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1928. *status = -EREMOTEIO;
  1929. else
  1930. *status = 0;
  1931. if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
  1932. trb_comp_code = COMP_SHORT_TX;
  1933. } else {
  1934. *status = 0;
  1935. }
  1936. break;
  1937. case COMP_SHORT_TX:
  1938. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1939. *status = -EREMOTEIO;
  1940. else
  1941. *status = 0;
  1942. break;
  1943. default:
  1944. /* Others already handled above */
  1945. break;
  1946. }
  1947. if (trb_comp_code == COMP_SHORT_TX)
  1948. xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
  1949. "%d bytes untransferred\n",
  1950. td->urb->ep->desc.bEndpointAddress,
  1951. td->urb->transfer_buffer_length,
  1952. EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
  1953. /* Fast path - was this the last TRB in the TD for this URB? */
  1954. if (event_trb == td->last_trb) {
  1955. if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
  1956. td->urb->actual_length =
  1957. td->urb->transfer_buffer_length -
  1958. EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
  1959. if (td->urb->transfer_buffer_length <
  1960. td->urb->actual_length) {
  1961. xhci_warn(xhci, "HC gave bad length "
  1962. "of %d bytes left\n",
  1963. EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
  1964. td->urb->actual_length = 0;
  1965. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1966. *status = -EREMOTEIO;
  1967. else
  1968. *status = 0;
  1969. }
  1970. /* Don't overwrite a previously set error code */
  1971. if (*status == -EINPROGRESS) {
  1972. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1973. *status = -EREMOTEIO;
  1974. else
  1975. *status = 0;
  1976. }
  1977. } else {
  1978. td->urb->actual_length =
  1979. td->urb->transfer_buffer_length;
  1980. /* Ignore a short packet completion if the
  1981. * untransferred length was zero.
  1982. */
  1983. if (*status == -EREMOTEIO)
  1984. *status = 0;
  1985. }
  1986. } else {
  1987. /* Slow path - walk the list, starting from the dequeue
  1988. * pointer, to get the actual length transferred.
  1989. */
  1990. td->urb->actual_length = 0;
  1991. for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
  1992. cur_trb != event_trb;
  1993. next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
  1994. if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
  1995. !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
  1996. td->urb->actual_length +=
  1997. TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
  1998. }
  1999. /* If the ring didn't stop on a Link or No-op TRB, add
  2000. * in the actual bytes transferred from the Normal TRB
  2001. */
  2002. if (trb_comp_code != COMP_STOP_INVAL)
  2003. td->urb->actual_length +=
  2004. TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
  2005. EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
  2006. }
  2007. return finish_td(xhci, td, event_trb, event, ep, status, false);
  2008. }
  2009. /*
  2010. * If this function returns an error condition, it means it got a Transfer
  2011. * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
  2012. * At this point, the host controller is probably hosed and should be reset.
  2013. */
  2014. static int handle_tx_event(struct xhci_hcd *xhci,
  2015. struct xhci_transfer_event *event)
  2016. __releases(&xhci->lock)
  2017. __acquires(&xhci->lock)
  2018. {
  2019. struct xhci_virt_device *xdev;
  2020. struct xhci_virt_ep *ep;
  2021. struct xhci_ring *ep_ring;
  2022. unsigned int slot_id;
  2023. int ep_index;
  2024. struct xhci_td *td = NULL;
  2025. dma_addr_t event_dma;
  2026. struct xhci_segment *event_seg;
  2027. union xhci_trb *event_trb;
  2028. struct urb *urb = NULL;
  2029. int status = -EINPROGRESS;
  2030. struct urb_priv *urb_priv;
  2031. struct xhci_ep_ctx *ep_ctx;
  2032. struct list_head *tmp;
  2033. u32 trb_comp_code;
  2034. int ret = 0;
  2035. int td_num = 0;
  2036. slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
  2037. xdev = xhci->devs[slot_id];
  2038. if (!xdev) {
  2039. xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
  2040. xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
  2041. (unsigned long long) xhci_trb_virt_to_dma(
  2042. xhci->event_ring->deq_seg,
  2043. xhci->event_ring->dequeue),
  2044. lower_32_bits(le64_to_cpu(event->buffer)),
  2045. upper_32_bits(le64_to_cpu(event->buffer)),
  2046. le32_to_cpu(event->transfer_len),
  2047. le32_to_cpu(event->flags));
  2048. xhci_dbg(xhci, "Event ring:\n");
  2049. xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
  2050. return -ENODEV;
  2051. }
  2052. /* Endpoint ID is 1 based, our index is zero based */
  2053. ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
  2054. ep = &xdev->eps[ep_index];
  2055. ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
  2056. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  2057. if (!ep_ring ||
  2058. (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
  2059. EP_STATE_DISABLED) {
  2060. xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
  2061. "or incorrect stream ring\n");
  2062. xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
  2063. (unsigned long long) xhci_trb_virt_to_dma(
  2064. xhci->event_ring->deq_seg,
  2065. xhci->event_ring->dequeue),
  2066. lower_32_bits(le64_to_cpu(event->buffer)),
  2067. upper_32_bits(le64_to_cpu(event->buffer)),
  2068. le32_to_cpu(event->transfer_len),
  2069. le32_to_cpu(event->flags));
  2070. xhci_dbg(xhci, "Event ring:\n");
  2071. xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
  2072. return -ENODEV;
  2073. }
  2074. /* Count current td numbers if ep->skip is set */
  2075. if (ep->skip) {
  2076. list_for_each(tmp, &ep_ring->td_list)
  2077. td_num++;
  2078. }
  2079. event_dma = le64_to_cpu(event->buffer);
  2080. trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
  2081. /* Look for common error cases */
  2082. switch (trb_comp_code) {
  2083. /* Skip codes that require special handling depending on
  2084. * transfer type
  2085. */
  2086. case COMP_SUCCESS:
  2087. if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
  2088. break;
  2089. if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
  2090. trb_comp_code = COMP_SHORT_TX;
  2091. else
  2092. xhci_warn_ratelimited(xhci,
  2093. "WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n");
  2094. case COMP_SHORT_TX:
  2095. break;
  2096. case COMP_STOP:
  2097. xhci_dbg(xhci, "Stopped on Transfer TRB\n");
  2098. break;
  2099. case COMP_STOP_INVAL:
  2100. xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
  2101. break;
  2102. case COMP_STALL:
  2103. xhci_dbg(xhci, "Stalled endpoint\n");
  2104. ep->ep_state |= EP_HALTED;
  2105. status = -EPIPE;
  2106. break;
  2107. case COMP_TRB_ERR:
  2108. xhci_warn(xhci, "WARN: TRB error on endpoint\n");
  2109. status = -EILSEQ;
  2110. break;
  2111. case COMP_SPLIT_ERR:
  2112. case COMP_TX_ERR:
  2113. xhci_dbg(xhci, "Transfer error on endpoint\n");
  2114. status = -EPROTO;
  2115. break;
  2116. case COMP_BABBLE:
  2117. xhci_dbg(xhci, "Babble error on endpoint\n");
  2118. status = -EOVERFLOW;
  2119. break;
  2120. case COMP_DB_ERR:
  2121. xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
  2122. status = -ENOSR;
  2123. break;
  2124. case COMP_BW_OVER:
  2125. xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
  2126. break;
  2127. case COMP_BUFF_OVER:
  2128. xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
  2129. break;
  2130. case COMP_UNDERRUN:
  2131. /*
  2132. * When the Isoch ring is empty, the xHC will generate
  2133. * a Ring Overrun Event for IN Isoch endpoint or Ring
  2134. * Underrun Event for OUT Isoch endpoint.
  2135. */
  2136. xhci_dbg(xhci, "underrun event on endpoint\n");
  2137. if (!list_empty(&ep_ring->td_list))
  2138. xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
  2139. "still with TDs queued?\n",
  2140. TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
  2141. ep_index);
  2142. goto cleanup;
  2143. case COMP_OVERRUN:
  2144. xhci_dbg(xhci, "overrun event on endpoint\n");
  2145. if (!list_empty(&ep_ring->td_list))
  2146. xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
  2147. "still with TDs queued?\n",
  2148. TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
  2149. ep_index);
  2150. goto cleanup;
  2151. case COMP_DEV_ERR:
  2152. xhci_warn(xhci, "WARN: detect an incompatible device");
  2153. status = -EPROTO;
  2154. break;
  2155. case COMP_MISSED_INT:
  2156. /*
  2157. * When encounter missed service error, one or more isoc tds
  2158. * may be missed by xHC.
  2159. * Set skip flag of the ep_ring; Complete the missed tds as
  2160. * short transfer when process the ep_ring next time.
  2161. */
  2162. ep->skip = true;
  2163. xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
  2164. goto cleanup;
  2165. default:
  2166. if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
  2167. status = 0;
  2168. break;
  2169. }
  2170. xhci_warn(xhci, "ERROR Unknown event condition, HC probably "
  2171. "busted\n");
  2172. goto cleanup;
  2173. }
  2174. do {
  2175. /* This TRB should be in the TD at the head of this ring's
  2176. * TD list.
  2177. */
  2178. if (list_empty(&ep_ring->td_list)) {
  2179. /*
  2180. * A stopped endpoint may generate an extra completion
  2181. * event if the device was suspended. Don't print
  2182. * warnings.
  2183. */
  2184. if (!(trb_comp_code == COMP_STOP ||
  2185. trb_comp_code == COMP_STOP_INVAL)) {
  2186. xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
  2187. TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
  2188. ep_index);
  2189. xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
  2190. (le32_to_cpu(event->flags) &
  2191. TRB_TYPE_BITMASK)>>10);
  2192. xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
  2193. }
  2194. if (ep->skip) {
  2195. ep->skip = false;
  2196. xhci_dbg(xhci, "td_list is empty while skip "
  2197. "flag set. Clear skip flag.\n");
  2198. }
  2199. ret = 0;
  2200. goto cleanup;
  2201. }
  2202. /* We've skipped all the TDs on the ep ring when ep->skip set */
  2203. if (ep->skip && td_num == 0) {
  2204. ep->skip = false;
  2205. xhci_dbg(xhci, "All tds on the ep_ring skipped. "
  2206. "Clear skip flag.\n");
  2207. ret = 0;
  2208. goto cleanup;
  2209. }
  2210. td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
  2211. if (ep->skip)
  2212. td_num--;
  2213. /* Is this a TRB in the currently executing TD? */
  2214. event_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
  2215. td->last_trb, event_dma, false);
  2216. /*
  2217. * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
  2218. * is not in the current TD pointed by ep_ring->dequeue because
  2219. * that the hardware dequeue pointer still at the previous TRB
  2220. * of the current TD. The previous TRB maybe a Link TD or the
  2221. * last TRB of the previous TD. The command completion handle
  2222. * will take care the rest.
  2223. */
  2224. if (!event_seg && (trb_comp_code == COMP_STOP ||
  2225. trb_comp_code == COMP_STOP_INVAL)) {
  2226. ret = 0;
  2227. goto cleanup;
  2228. }
  2229. if (!event_seg) {
  2230. if (!ep->skip ||
  2231. !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
  2232. /* Some host controllers give a spurious
  2233. * successful event after a short transfer.
  2234. * Ignore it.
  2235. */
  2236. if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
  2237. ep_ring->last_td_was_short) {
  2238. ep_ring->last_td_was_short = false;
  2239. ret = 0;
  2240. goto cleanup;
  2241. }
  2242. /* HC is busted, give up! */
  2243. xhci_err(xhci,
  2244. "ERROR Transfer event TRB DMA ptr not "
  2245. "part of current TD ep_index %d "
  2246. "comp_code %u\n", ep_index,
  2247. trb_comp_code);
  2248. trb_in_td(xhci, ep_ring->deq_seg,
  2249. ep_ring->dequeue, td->last_trb,
  2250. event_dma, true);
  2251. return -ESHUTDOWN;
  2252. }
  2253. ret = skip_isoc_td(xhci, td, event, ep, &status);
  2254. goto cleanup;
  2255. }
  2256. if (trb_comp_code == COMP_SHORT_TX)
  2257. ep_ring->last_td_was_short = true;
  2258. else
  2259. ep_ring->last_td_was_short = false;
  2260. if (ep->skip) {
  2261. xhci_dbg(xhci, "Found td. Clear skip flag.\n");
  2262. ep->skip = false;
  2263. }
  2264. event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
  2265. sizeof(*event_trb)];
  2266. /*
  2267. * No-op TRB should not trigger interrupts.
  2268. * If event_trb is a no-op TRB, it means the
  2269. * corresponding TD has been cancelled. Just ignore
  2270. * the TD.
  2271. */
  2272. if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
  2273. xhci_dbg(xhci,
  2274. "event_trb is a no-op TRB. Skip it\n");
  2275. goto cleanup;
  2276. }
  2277. /* Now update the urb's actual_length and give back to
  2278. * the core
  2279. */
  2280. if (usb_endpoint_xfer_control(&td->urb->ep->desc))
  2281. ret = process_ctrl_td(xhci, td, event_trb, event, ep,
  2282. &status);
  2283. else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
  2284. ret = process_isoc_td(xhci, td, event_trb, event, ep,
  2285. &status);
  2286. else
  2287. ret = process_bulk_intr_td(xhci, td, event_trb, event,
  2288. ep, &status);
  2289. cleanup:
  2290. /*
  2291. * Do not update event ring dequeue pointer if ep->skip is set.
  2292. * Will roll back to continue process missed tds.
  2293. */
  2294. if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
  2295. inc_deq(xhci, xhci->event_ring);
  2296. }
  2297. if (ret) {
  2298. urb = td->urb;
  2299. urb_priv = urb->hcpriv;
  2300. /* Leave the TD around for the reset endpoint function
  2301. * to use(but only if it's not a control endpoint,
  2302. * since we already queued the Set TR dequeue pointer
  2303. * command for stalled control endpoints).
  2304. */
  2305. if (usb_endpoint_xfer_control(&urb->ep->desc) ||
  2306. (trb_comp_code != COMP_STALL &&
  2307. trb_comp_code != COMP_BABBLE))
  2308. xhci_urb_free_priv(xhci, urb_priv);
  2309. else
  2310. kfree(urb_priv);
  2311. usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
  2312. if ((urb->actual_length != urb->transfer_buffer_length &&
  2313. (urb->transfer_flags &
  2314. URB_SHORT_NOT_OK)) ||
  2315. (status != 0 &&
  2316. !usb_endpoint_xfer_isoc(&urb->ep->desc)))
  2317. xhci_dbg(xhci, "Giveback URB %p, len = %d, "
  2318. "expected = %d, status = %d\n",
  2319. urb, urb->actual_length,
  2320. urb->transfer_buffer_length,
  2321. status);
  2322. spin_unlock(&xhci->lock);
  2323. /* EHCI, UHCI, and OHCI always unconditionally set the
  2324. * urb->status of an isochronous endpoint to 0.
  2325. */
  2326. if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
  2327. status = 0;
  2328. usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
  2329. spin_lock(&xhci->lock);
  2330. }
  2331. /*
  2332. * If ep->skip is set, it means there are missed tds on the
  2333. * endpoint ring need to take care of.
  2334. * Process them as short transfer until reach the td pointed by
  2335. * the event.
  2336. */
  2337. } while (ep->skip && trb_comp_code != COMP_MISSED_INT);
  2338. return 0;
  2339. }
  2340. /*
  2341. * This function handles all OS-owned events on the event ring. It may drop
  2342. * xhci->lock between event processing (e.g. to pass up port status changes).
  2343. * Returns >0 for "possibly more events to process" (caller should call again),
  2344. * otherwise 0 if done. In future, <0 returns should indicate error code.
  2345. */
  2346. static int xhci_handle_event(struct xhci_hcd *xhci)
  2347. {
  2348. union xhci_trb *event;
  2349. int update_ptrs = 1;
  2350. int ret;
  2351. if (!xhci->event_ring || !xhci->event_ring->dequeue) {
  2352. xhci->error_bitmask |= 1 << 1;
  2353. return 0;
  2354. }
  2355. event = xhci->event_ring->dequeue;
  2356. /* Does the HC or OS own the TRB? */
  2357. if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
  2358. xhci->event_ring->cycle_state) {
  2359. xhci->error_bitmask |= 1 << 2;
  2360. return 0;
  2361. }
  2362. /*
  2363. * Barrier between reading the TRB_CYCLE (valid) flag above and any
  2364. * speculative reads of the event's flags/data below.
  2365. */
  2366. rmb();
  2367. /* FIXME: Handle more event types. */
  2368. switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
  2369. case TRB_TYPE(TRB_COMPLETION):
  2370. handle_cmd_completion(xhci, &event->event_cmd);
  2371. break;
  2372. case TRB_TYPE(TRB_PORT_STATUS):
  2373. handle_port_status(xhci, event);
  2374. update_ptrs = 0;
  2375. break;
  2376. case TRB_TYPE(TRB_TRANSFER):
  2377. ret = handle_tx_event(xhci, &event->trans_event);
  2378. if (ret < 0)
  2379. xhci->error_bitmask |= 1 << 9;
  2380. else
  2381. update_ptrs = 0;
  2382. break;
  2383. case TRB_TYPE(TRB_DEV_NOTE):
  2384. handle_device_notification(xhci, event);
  2385. break;
  2386. default:
  2387. if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
  2388. TRB_TYPE(48))
  2389. handle_vendor_event(xhci, event);
  2390. else
  2391. xhci->error_bitmask |= 1 << 3;
  2392. }
  2393. /* Any of the above functions may drop and re-acquire the lock, so check
  2394. * to make sure a watchdog timer didn't mark the host as non-responsive.
  2395. */
  2396. if (xhci->xhc_state & XHCI_STATE_DYING) {
  2397. xhci_dbg(xhci, "xHCI host dying, returning from "
  2398. "event handler.\n");
  2399. return 0;
  2400. }
  2401. if (update_ptrs)
  2402. /* Update SW event ring dequeue pointer */
  2403. inc_deq(xhci, xhci->event_ring);
  2404. /* Are there more items on the event ring? Caller will call us again to
  2405. * check.
  2406. */
  2407. return 1;
  2408. }
  2409. /*
  2410. * xHCI spec says we can get an interrupt, and if the HC has an error condition,
  2411. * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
  2412. * indicators of an event TRB error, but we check the status *first* to be safe.
  2413. */
  2414. irqreturn_t xhci_irq(struct usb_hcd *hcd)
  2415. {
  2416. struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  2417. u32 status;
  2418. u64 temp_64;
  2419. union xhci_trb *event_ring_deq;
  2420. dma_addr_t deq;
  2421. spin_lock(&xhci->lock);
  2422. /* Check if the xHC generated the interrupt, or the irq is shared */
  2423. status = readl(&xhci->op_regs->status);
  2424. if (status == 0xffffffff)
  2425. goto hw_died;
  2426. if (!(status & STS_EINT)) {
  2427. spin_unlock(&xhci->lock);
  2428. return IRQ_NONE;
  2429. }
  2430. if (status & STS_FATAL) {
  2431. xhci_warn(xhci, "WARNING: Host System Error\n");
  2432. xhci_halt(xhci);
  2433. hw_died:
  2434. spin_unlock(&xhci->lock);
  2435. return -ESHUTDOWN;
  2436. }
  2437. /*
  2438. * Clear the op reg interrupt status first,
  2439. * so we can receive interrupts from other MSI-X interrupters.
  2440. * Write 1 to clear the interrupt status.
  2441. */
  2442. status |= STS_EINT;
  2443. writel(status, &xhci->op_regs->status);
  2444. /* FIXME when MSI-X is supported and there are multiple vectors */
  2445. /* Clear the MSI-X event interrupt status */
  2446. if (hcd->irq) {
  2447. u32 irq_pending;
  2448. /* Acknowledge the PCI interrupt */
  2449. irq_pending = readl(&xhci->ir_set->irq_pending);
  2450. irq_pending |= IMAN_IP;
  2451. writel(irq_pending, &xhci->ir_set->irq_pending);
  2452. }
  2453. if (xhci->xhc_state & XHCI_STATE_DYING) {
  2454. xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
  2455. "Shouldn't IRQs be disabled?\n");
  2456. /* Clear the event handler busy flag (RW1C);
  2457. * the event ring should be empty.
  2458. */
  2459. temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
  2460. xhci_write_64(xhci, temp_64 | ERST_EHB,
  2461. &xhci->ir_set->erst_dequeue);
  2462. spin_unlock(&xhci->lock);
  2463. return IRQ_HANDLED;
  2464. }
  2465. event_ring_deq = xhci->event_ring->dequeue;
  2466. /* FIXME this should be a delayed service routine
  2467. * that clears the EHB.
  2468. */
  2469. while (xhci_handle_event(xhci) > 0) {}
  2470. temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
  2471. /* If necessary, update the HW's version of the event ring deq ptr. */
  2472. if (event_ring_deq != xhci->event_ring->dequeue) {
  2473. deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
  2474. xhci->event_ring->dequeue);
  2475. if (deq == 0)
  2476. xhci_warn(xhci, "WARN something wrong with SW event "
  2477. "ring dequeue ptr.\n");
  2478. /* Update HC event ring dequeue pointer */
  2479. temp_64 &= ERST_PTR_MASK;
  2480. temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
  2481. }
  2482. /* Clear the event handler busy flag (RW1C); event ring is empty. */
  2483. temp_64 |= ERST_EHB;
  2484. xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
  2485. spin_unlock(&xhci->lock);
  2486. return IRQ_HANDLED;
  2487. }
  2488. irqreturn_t xhci_msi_irq(int irq, void *hcd)
  2489. {
  2490. return xhci_irq(hcd);
  2491. }
  2492. /**** Endpoint Ring Operations ****/
  2493. /*
  2494. * Generic function for queueing a TRB on a ring.
  2495. * The caller must have checked to make sure there's room on the ring.
  2496. *
  2497. * @more_trbs_coming: Will you enqueue more TRBs before calling
  2498. * prepare_transfer()?
  2499. */
  2500. static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
  2501. bool more_trbs_coming,
  2502. u32 field1, u32 field2, u32 field3, u32 field4)
  2503. {
  2504. struct xhci_generic_trb *trb;
  2505. trb = &ring->enqueue->generic;
  2506. trb->field[0] = cpu_to_le32(field1);
  2507. trb->field[1] = cpu_to_le32(field2);
  2508. trb->field[2] = cpu_to_le32(field3);
  2509. trb->field[3] = cpu_to_le32(field4);
  2510. inc_enq(xhci, ring, more_trbs_coming);
  2511. }
  2512. /*
  2513. * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
  2514. * FIXME allocate segments if the ring is full.
  2515. */
  2516. static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
  2517. u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
  2518. {
  2519. unsigned int num_trbs_needed;
  2520. /* Make sure the endpoint has been added to xHC schedule */
  2521. switch (ep_state) {
  2522. case EP_STATE_DISABLED:
  2523. /*
  2524. * USB core changed config/interfaces without notifying us,
  2525. * or hardware is reporting the wrong state.
  2526. */
  2527. xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
  2528. return -ENOENT;
  2529. case EP_STATE_ERROR:
  2530. xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
  2531. /* FIXME event handling code for error needs to clear it */
  2532. /* XXX not sure if this should be -ENOENT or not */
  2533. return -EINVAL;
  2534. case EP_STATE_HALTED:
  2535. xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
  2536. case EP_STATE_STOPPED:
  2537. case EP_STATE_RUNNING:
  2538. break;
  2539. default:
  2540. xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
  2541. /*
  2542. * FIXME issue Configure Endpoint command to try to get the HC
  2543. * back into a known state.
  2544. */
  2545. return -EINVAL;
  2546. }
  2547. while (1) {
  2548. if (room_on_ring(xhci, ep_ring, num_trbs))
  2549. break;
  2550. if (ep_ring == xhci->cmd_ring) {
  2551. xhci_err(xhci, "Do not support expand command ring\n");
  2552. return -ENOMEM;
  2553. }
  2554. xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
  2555. "ERROR no room on ep ring, try ring expansion");
  2556. num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
  2557. if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
  2558. mem_flags)) {
  2559. xhci_err(xhci, "Ring expansion failed\n");
  2560. return -ENOMEM;
  2561. }
  2562. }
  2563. if (enqueue_is_link_trb(ep_ring)) {
  2564. struct xhci_ring *ring = ep_ring;
  2565. union xhci_trb *next;
  2566. next = ring->enqueue;
  2567. while (last_trb(xhci, ring, ring->enq_seg, next)) {
  2568. /* If we're not dealing with 0.95 hardware or isoc rings
  2569. * on AMD 0.96 host, clear the chain bit.
  2570. */
  2571. if (!xhci_link_trb_quirk(xhci) &&
  2572. !(ring->type == TYPE_ISOC &&
  2573. (xhci->quirks & XHCI_AMD_0x96_HOST)))
  2574. next->link.control &= cpu_to_le32(~TRB_CHAIN);
  2575. else
  2576. next->link.control |= cpu_to_le32(TRB_CHAIN);
  2577. wmb();
  2578. next->link.control ^= cpu_to_le32(TRB_CYCLE);
  2579. /* Toggle the cycle bit after the last ring segment. */
  2580. if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
  2581. ring->cycle_state = (ring->cycle_state ? 0 : 1);
  2582. }
  2583. ring->enq_seg = ring->enq_seg->next;
  2584. ring->enqueue = ring->enq_seg->trbs;
  2585. next = ring->enqueue;
  2586. }
  2587. }
  2588. return 0;
  2589. }
  2590. static int prepare_transfer(struct xhci_hcd *xhci,
  2591. struct xhci_virt_device *xdev,
  2592. unsigned int ep_index,
  2593. unsigned int stream_id,
  2594. unsigned int num_trbs,
  2595. struct urb *urb,
  2596. unsigned int td_index,
  2597. gfp_t mem_flags)
  2598. {
  2599. int ret;
  2600. struct urb_priv *urb_priv;
  2601. struct xhci_td *td;
  2602. struct xhci_ring *ep_ring;
  2603. struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  2604. ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
  2605. if (!ep_ring) {
  2606. xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
  2607. stream_id);
  2608. return -EINVAL;
  2609. }
  2610. ret = prepare_ring(xhci, ep_ring,
  2611. le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
  2612. num_trbs, mem_flags);
  2613. if (ret)
  2614. return ret;
  2615. urb_priv = urb->hcpriv;
  2616. td = urb_priv->td[td_index];
  2617. INIT_LIST_HEAD(&td->td_list);
  2618. INIT_LIST_HEAD(&td->cancelled_td_list);
  2619. if (td_index == 0) {
  2620. ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
  2621. if (unlikely(ret))
  2622. return ret;
  2623. }
  2624. td->urb = urb;
  2625. /* Add this TD to the tail of the endpoint ring's TD list */
  2626. list_add_tail(&td->td_list, &ep_ring->td_list);
  2627. td->start_seg = ep_ring->enq_seg;
  2628. td->first_trb = ep_ring->enqueue;
  2629. urb_priv->td[td_index] = td;
  2630. return 0;
  2631. }
  2632. static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
  2633. {
  2634. int num_sgs, num_trbs, running_total, temp, i;
  2635. struct scatterlist *sg;
  2636. sg = NULL;
  2637. num_sgs = urb->num_mapped_sgs;
  2638. temp = urb->transfer_buffer_length;
  2639. num_trbs = 0;
  2640. for_each_sg(urb->sg, sg, num_sgs, i) {
  2641. unsigned int len = sg_dma_len(sg);
  2642. /* Scatter gather list entries may cross 64KB boundaries */
  2643. running_total = TRB_MAX_BUFF_SIZE -
  2644. (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
  2645. running_total &= TRB_MAX_BUFF_SIZE - 1;
  2646. if (running_total != 0)
  2647. num_trbs++;
  2648. /* How many more 64KB chunks to transfer, how many more TRBs? */
  2649. while (running_total < sg_dma_len(sg) && running_total < temp) {
  2650. num_trbs++;
  2651. running_total += TRB_MAX_BUFF_SIZE;
  2652. }
  2653. len = min_t(int, len, temp);
  2654. temp -= len;
  2655. if (temp == 0)
  2656. break;
  2657. }
  2658. return num_trbs;
  2659. }
  2660. static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
  2661. {
  2662. if (num_trbs != 0)
  2663. dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
  2664. "TRBs, %d left\n", __func__,
  2665. urb->ep->desc.bEndpointAddress, num_trbs);
  2666. if (running_total != urb->transfer_buffer_length)
  2667. dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
  2668. "queued %#x (%d), asked for %#x (%d)\n",
  2669. __func__,
  2670. urb->ep->desc.bEndpointAddress,
  2671. running_total, running_total,
  2672. urb->transfer_buffer_length,
  2673. urb->transfer_buffer_length);
  2674. }
  2675. static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
  2676. unsigned int ep_index, unsigned int stream_id, int start_cycle,
  2677. struct xhci_generic_trb *start_trb)
  2678. {
  2679. /*
  2680. * Pass all the TRBs to the hardware at once and make sure this write
  2681. * isn't reordered.
  2682. */
  2683. wmb();
  2684. if (start_cycle)
  2685. start_trb->field[3] |= cpu_to_le32(start_cycle);
  2686. else
  2687. start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
  2688. xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
  2689. }
  2690. /*
  2691. * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
  2692. * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
  2693. * (comprised of sg list entries) can take several service intervals to
  2694. * transmit.
  2695. */
  2696. int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2697. struct urb *urb, int slot_id, unsigned int ep_index)
  2698. {
  2699. struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
  2700. xhci->devs[slot_id]->out_ctx, ep_index);
  2701. int xhci_interval;
  2702. int ep_interval;
  2703. xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
  2704. ep_interval = urb->interval;
  2705. /* Convert to microframes */
  2706. if (urb->dev->speed == USB_SPEED_LOW ||
  2707. urb->dev->speed == USB_SPEED_FULL)
  2708. ep_interval *= 8;
  2709. /* FIXME change this to a warning and a suggestion to use the new API
  2710. * to set the polling interval (once the API is added).
  2711. */
  2712. if (xhci_interval != ep_interval) {
  2713. dev_dbg_ratelimited(&urb->dev->dev,
  2714. "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
  2715. ep_interval, ep_interval == 1 ? "" : "s",
  2716. xhci_interval, xhci_interval == 1 ? "" : "s");
  2717. urb->interval = xhci_interval;
  2718. /* Convert back to frames for LS/FS devices */
  2719. if (urb->dev->speed == USB_SPEED_LOW ||
  2720. urb->dev->speed == USB_SPEED_FULL)
  2721. urb->interval /= 8;
  2722. }
  2723. return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
  2724. }
  2725. /*
  2726. * The TD size is the number of bytes remaining in the TD (including this TRB),
  2727. * right shifted by 10.
  2728. * It must fit in bits 21:17, so it can't be bigger than 31.
  2729. */
  2730. static u32 xhci_td_remainder(unsigned int remainder)
  2731. {
  2732. u32 max = (1 << (21 - 17 + 1)) - 1;
  2733. if ((remainder >> 10) >= max)
  2734. return max << 17;
  2735. else
  2736. return (remainder >> 10) << 17;
  2737. }
  2738. /*
  2739. * For xHCI 1.0 host controllers, TD size is the number of max packet sized
  2740. * packets remaining in the TD (*not* including this TRB).
  2741. *
  2742. * Total TD packet count = total_packet_count =
  2743. * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
  2744. *
  2745. * Packets transferred up to and including this TRB = packets_transferred =
  2746. * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
  2747. *
  2748. * TD size = total_packet_count - packets_transferred
  2749. *
  2750. * It must fit in bits 21:17, so it can't be bigger than 31.
  2751. * The last TRB in a TD must have the TD size set to zero.
  2752. */
  2753. static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
  2754. unsigned int total_packet_count, struct urb *urb,
  2755. unsigned int num_trbs_left)
  2756. {
  2757. int packets_transferred;
  2758. /* One TRB with a zero-length data packet. */
  2759. if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0))
  2760. return 0;
  2761. /* All the TRB queueing functions don't count the current TRB in
  2762. * running_total.
  2763. */
  2764. packets_transferred = (running_total + trb_buff_len) /
  2765. GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
  2766. if ((total_packet_count - packets_transferred) > 31)
  2767. return 31 << 17;
  2768. return (total_packet_count - packets_transferred) << 17;
  2769. }
  2770. static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2771. struct urb *urb, int slot_id, unsigned int ep_index)
  2772. {
  2773. struct xhci_ring *ep_ring;
  2774. unsigned int num_trbs;
  2775. struct urb_priv *urb_priv;
  2776. struct xhci_td *td;
  2777. struct scatterlist *sg;
  2778. int num_sgs;
  2779. int trb_buff_len, this_sg_len, running_total;
  2780. unsigned int total_packet_count;
  2781. bool first_trb;
  2782. u64 addr;
  2783. bool more_trbs_coming;
  2784. struct xhci_generic_trb *start_trb;
  2785. int start_cycle;
  2786. ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
  2787. if (!ep_ring)
  2788. return -EINVAL;
  2789. num_trbs = count_sg_trbs_needed(xhci, urb);
  2790. num_sgs = urb->num_mapped_sgs;
  2791. total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
  2792. usb_endpoint_maxp(&urb->ep->desc));
  2793. trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
  2794. ep_index, urb->stream_id,
  2795. num_trbs, urb, 0, mem_flags);
  2796. if (trb_buff_len < 0)
  2797. return trb_buff_len;
  2798. urb_priv = urb->hcpriv;
  2799. td = urb_priv->td[0];
  2800. /*
  2801. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  2802. * until we've finished creating all the other TRBs. The ring's cycle
  2803. * state may change as we enqueue the other TRBs, so save it too.
  2804. */
  2805. start_trb = &ep_ring->enqueue->generic;
  2806. start_cycle = ep_ring->cycle_state;
  2807. running_total = 0;
  2808. /*
  2809. * How much data is in the first TRB?
  2810. *
  2811. * There are three forces at work for TRB buffer pointers and lengths:
  2812. * 1. We don't want to walk off the end of this sg-list entry buffer.
  2813. * 2. The transfer length that the driver requested may be smaller than
  2814. * the amount of memory allocated for this scatter-gather list.
  2815. * 3. TRBs buffers can't cross 64KB boundaries.
  2816. */
  2817. sg = urb->sg;
  2818. addr = (u64) sg_dma_address(sg);
  2819. this_sg_len = sg_dma_len(sg);
  2820. trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
  2821. trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
  2822. if (trb_buff_len > urb->transfer_buffer_length)
  2823. trb_buff_len = urb->transfer_buffer_length;
  2824. first_trb = true;
  2825. /* Queue the first TRB, even if it's zero-length */
  2826. do {
  2827. u32 field = 0;
  2828. u32 length_field = 0;
  2829. u32 remainder = 0;
  2830. /* Don't change the cycle bit of the first TRB until later */
  2831. if (first_trb) {
  2832. first_trb = false;
  2833. if (start_cycle == 0)
  2834. field |= 0x1;
  2835. } else
  2836. field |= ep_ring->cycle_state;
  2837. /* Chain all the TRBs together; clear the chain bit in the last
  2838. * TRB to indicate it's the last TRB in the chain.
  2839. */
  2840. if (num_trbs > 1) {
  2841. field |= TRB_CHAIN;
  2842. } else {
  2843. /* FIXME - add check for ZERO_PACKET flag before this */
  2844. td->last_trb = ep_ring->enqueue;
  2845. field |= TRB_IOC;
  2846. }
  2847. /* Only set interrupt on short packet for IN endpoints */
  2848. if (usb_urb_dir_in(urb))
  2849. field |= TRB_ISP;
  2850. if (TRB_MAX_BUFF_SIZE -
  2851. (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
  2852. xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
  2853. xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
  2854. (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
  2855. (unsigned int) addr + trb_buff_len);
  2856. }
  2857. /* Set the TRB length, TD size, and interrupter fields. */
  2858. if (xhci->hci_version < 0x100) {
  2859. remainder = xhci_td_remainder(
  2860. urb->transfer_buffer_length -
  2861. running_total);
  2862. } else {
  2863. remainder = xhci_v1_0_td_remainder(running_total,
  2864. trb_buff_len, total_packet_count, urb,
  2865. num_trbs - 1);
  2866. }
  2867. length_field = TRB_LEN(trb_buff_len) |
  2868. remainder |
  2869. TRB_INTR_TARGET(0);
  2870. if (num_trbs > 1)
  2871. more_trbs_coming = true;
  2872. else
  2873. more_trbs_coming = false;
  2874. queue_trb(xhci, ep_ring, more_trbs_coming,
  2875. lower_32_bits(addr),
  2876. upper_32_bits(addr),
  2877. length_field,
  2878. field | TRB_TYPE(TRB_NORMAL));
  2879. --num_trbs;
  2880. running_total += trb_buff_len;
  2881. /* Calculate length for next transfer --
  2882. * Are we done queueing all the TRBs for this sg entry?
  2883. */
  2884. this_sg_len -= trb_buff_len;
  2885. if (this_sg_len == 0) {
  2886. --num_sgs;
  2887. if (num_sgs == 0)
  2888. break;
  2889. sg = sg_next(sg);
  2890. addr = (u64) sg_dma_address(sg);
  2891. this_sg_len = sg_dma_len(sg);
  2892. } else {
  2893. addr += trb_buff_len;
  2894. }
  2895. trb_buff_len = TRB_MAX_BUFF_SIZE -
  2896. (addr & (TRB_MAX_BUFF_SIZE - 1));
  2897. trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
  2898. if (running_total + trb_buff_len > urb->transfer_buffer_length)
  2899. trb_buff_len =
  2900. urb->transfer_buffer_length - running_total;
  2901. } while (running_total < urb->transfer_buffer_length);
  2902. check_trb_math(urb, num_trbs, running_total);
  2903. giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
  2904. start_cycle, start_trb);
  2905. return 0;
  2906. }
  2907. /* This is very similar to what ehci-q.c qtd_fill() does */
  2908. int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2909. struct urb *urb, int slot_id, unsigned int ep_index)
  2910. {
  2911. struct xhci_ring *ep_ring;
  2912. struct urb_priv *urb_priv;
  2913. struct xhci_td *td;
  2914. int num_trbs;
  2915. struct xhci_generic_trb *start_trb;
  2916. bool first_trb;
  2917. bool more_trbs_coming;
  2918. int start_cycle;
  2919. u32 field, length_field;
  2920. int running_total, trb_buff_len, ret;
  2921. unsigned int total_packet_count;
  2922. u64 addr;
  2923. if (urb->num_sgs)
  2924. return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
  2925. ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
  2926. if (!ep_ring)
  2927. return -EINVAL;
  2928. num_trbs = 0;
  2929. /* How much data is (potentially) left before the 64KB boundary? */
  2930. running_total = TRB_MAX_BUFF_SIZE -
  2931. (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
  2932. running_total &= TRB_MAX_BUFF_SIZE - 1;
  2933. /* If there's some data on this 64KB chunk, or we have to send a
  2934. * zero-length transfer, we need at least one TRB
  2935. */
  2936. if (running_total != 0 || urb->transfer_buffer_length == 0)
  2937. num_trbs++;
  2938. /* How many more 64KB chunks to transfer, how many more TRBs? */
  2939. while (running_total < urb->transfer_buffer_length) {
  2940. num_trbs++;
  2941. running_total += TRB_MAX_BUFF_SIZE;
  2942. }
  2943. /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
  2944. ret = prepare_transfer(xhci, xhci->devs[slot_id],
  2945. ep_index, urb->stream_id,
  2946. num_trbs, urb, 0, mem_flags);
  2947. if (ret < 0)
  2948. return ret;
  2949. urb_priv = urb->hcpriv;
  2950. td = urb_priv->td[0];
  2951. /*
  2952. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  2953. * until we've finished creating all the other TRBs. The ring's cycle
  2954. * state may change as we enqueue the other TRBs, so save it too.
  2955. */
  2956. start_trb = &ep_ring->enqueue->generic;
  2957. start_cycle = ep_ring->cycle_state;
  2958. running_total = 0;
  2959. total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
  2960. usb_endpoint_maxp(&urb->ep->desc));
  2961. /* How much data is in the first TRB? */
  2962. addr = (u64) urb->transfer_dma;
  2963. trb_buff_len = TRB_MAX_BUFF_SIZE -
  2964. (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
  2965. if (trb_buff_len > urb->transfer_buffer_length)
  2966. trb_buff_len = urb->transfer_buffer_length;
  2967. first_trb = true;
  2968. /* Queue the first TRB, even if it's zero-length */
  2969. do {
  2970. u32 remainder = 0;
  2971. field = 0;
  2972. /* Don't change the cycle bit of the first TRB until later */
  2973. if (first_trb) {
  2974. first_trb = false;
  2975. if (start_cycle == 0)
  2976. field |= 0x1;
  2977. } else
  2978. field |= ep_ring->cycle_state;
  2979. /* Chain all the TRBs together; clear the chain bit in the last
  2980. * TRB to indicate it's the last TRB in the chain.
  2981. */
  2982. if (num_trbs > 1) {
  2983. field |= TRB_CHAIN;
  2984. } else {
  2985. /* FIXME - add check for ZERO_PACKET flag before this */
  2986. td->last_trb = ep_ring->enqueue;
  2987. field |= TRB_IOC;
  2988. }
  2989. /* Only set interrupt on short packet for IN endpoints */
  2990. if (usb_urb_dir_in(urb))
  2991. field |= TRB_ISP;
  2992. /* Set the TRB length, TD size, and interrupter fields. */
  2993. if (xhci->hci_version < 0x100) {
  2994. remainder = xhci_td_remainder(
  2995. urb->transfer_buffer_length -
  2996. running_total);
  2997. } else {
  2998. remainder = xhci_v1_0_td_remainder(running_total,
  2999. trb_buff_len, total_packet_count, urb,
  3000. num_trbs - 1);
  3001. }
  3002. length_field = TRB_LEN(trb_buff_len) |
  3003. remainder |
  3004. TRB_INTR_TARGET(0);
  3005. if (num_trbs > 1)
  3006. more_trbs_coming = true;
  3007. else
  3008. more_trbs_coming = false;
  3009. queue_trb(xhci, ep_ring, more_trbs_coming,
  3010. lower_32_bits(addr),
  3011. upper_32_bits(addr),
  3012. length_field,
  3013. field | TRB_TYPE(TRB_NORMAL));
  3014. --num_trbs;
  3015. running_total += trb_buff_len;
  3016. /* Calculate length for next transfer */
  3017. addr += trb_buff_len;
  3018. trb_buff_len = urb->transfer_buffer_length - running_total;
  3019. if (trb_buff_len > TRB_MAX_BUFF_SIZE)
  3020. trb_buff_len = TRB_MAX_BUFF_SIZE;
  3021. } while (running_total < urb->transfer_buffer_length);
  3022. check_trb_math(urb, num_trbs, running_total);
  3023. giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
  3024. start_cycle, start_trb);
  3025. return 0;
  3026. }
  3027. /* Caller must have locked xhci->lock */
  3028. int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  3029. struct urb *urb, int slot_id, unsigned int ep_index)
  3030. {
  3031. struct xhci_ring *ep_ring;
  3032. int num_trbs;
  3033. int ret;
  3034. struct usb_ctrlrequest *setup;
  3035. struct xhci_generic_trb *start_trb;
  3036. int start_cycle;
  3037. u32 field, length_field;
  3038. struct urb_priv *urb_priv;
  3039. struct xhci_td *td;
  3040. ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
  3041. if (!ep_ring)
  3042. return -EINVAL;
  3043. /*
  3044. * Need to copy setup packet into setup TRB, so we can't use the setup
  3045. * DMA address.
  3046. */
  3047. if (!urb->setup_packet)
  3048. return -EINVAL;
  3049. /* 1 TRB for setup, 1 for status */
  3050. num_trbs = 2;
  3051. /*
  3052. * Don't need to check if we need additional event data and normal TRBs,
  3053. * since data in control transfers will never get bigger than 16MB
  3054. * XXX: can we get a buffer that crosses 64KB boundaries?
  3055. */
  3056. if (urb->transfer_buffer_length > 0)
  3057. num_trbs++;
  3058. ret = prepare_transfer(xhci, xhci->devs[slot_id],
  3059. ep_index, urb->stream_id,
  3060. num_trbs, urb, 0, mem_flags);
  3061. if (ret < 0)
  3062. return ret;
  3063. urb_priv = urb->hcpriv;
  3064. td = urb_priv->td[0];
  3065. /*
  3066. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  3067. * until we've finished creating all the other TRBs. The ring's cycle
  3068. * state may change as we enqueue the other TRBs, so save it too.
  3069. */
  3070. start_trb = &ep_ring->enqueue->generic;
  3071. start_cycle = ep_ring->cycle_state;
  3072. /* Queue setup TRB - see section 6.4.1.2.1 */
  3073. /* FIXME better way to translate setup_packet into two u32 fields? */
  3074. setup = (struct usb_ctrlrequest *) urb->setup_packet;
  3075. field = 0;
  3076. field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
  3077. if (start_cycle == 0)
  3078. field |= 0x1;
  3079. /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
  3080. if (xhci->hci_version == 0x100) {
  3081. if (urb->transfer_buffer_length > 0) {
  3082. if (setup->bRequestType & USB_DIR_IN)
  3083. field |= TRB_TX_TYPE(TRB_DATA_IN);
  3084. else
  3085. field |= TRB_TX_TYPE(TRB_DATA_OUT);
  3086. }
  3087. }
  3088. queue_trb(xhci, ep_ring, true,
  3089. setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
  3090. le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
  3091. TRB_LEN(8) | TRB_INTR_TARGET(0),
  3092. /* Immediate data in pointer */
  3093. field);
  3094. /* If there's data, queue data TRBs */
  3095. /* Only set interrupt on short packet for IN endpoints */
  3096. if (usb_urb_dir_in(urb))
  3097. field = TRB_ISP | TRB_TYPE(TRB_DATA);
  3098. else
  3099. field = TRB_TYPE(TRB_DATA);
  3100. length_field = TRB_LEN(urb->transfer_buffer_length) |
  3101. xhci_td_remainder(urb->transfer_buffer_length) |
  3102. TRB_INTR_TARGET(0);
  3103. if (urb->transfer_buffer_length > 0) {
  3104. if (setup->bRequestType & USB_DIR_IN)
  3105. field |= TRB_DIR_IN;
  3106. queue_trb(xhci, ep_ring, true,
  3107. lower_32_bits(urb->transfer_dma),
  3108. upper_32_bits(urb->transfer_dma),
  3109. length_field,
  3110. field | ep_ring->cycle_state);
  3111. }
  3112. /* Save the DMA address of the last TRB in the TD */
  3113. td->last_trb = ep_ring->enqueue;
  3114. /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
  3115. /* If the device sent data, the status stage is an OUT transfer */
  3116. if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
  3117. field = 0;
  3118. else
  3119. field = TRB_DIR_IN;
  3120. queue_trb(xhci, ep_ring, false,
  3121. 0,
  3122. 0,
  3123. TRB_INTR_TARGET(0),
  3124. /* Event on completion */
  3125. field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
  3126. giveback_first_trb(xhci, slot_id, ep_index, 0,
  3127. start_cycle, start_trb);
  3128. return 0;
  3129. }
  3130. static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
  3131. struct urb *urb, int i)
  3132. {
  3133. int num_trbs = 0;
  3134. u64 addr, td_len;
  3135. addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
  3136. td_len = urb->iso_frame_desc[i].length;
  3137. num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
  3138. TRB_MAX_BUFF_SIZE);
  3139. if (num_trbs == 0)
  3140. num_trbs++;
  3141. return num_trbs;
  3142. }
  3143. /*
  3144. * The transfer burst count field of the isochronous TRB defines the number of
  3145. * bursts that are required to move all packets in this TD. Only SuperSpeed
  3146. * devices can burst up to bMaxBurst number of packets per service interval.
  3147. * This field is zero based, meaning a value of zero in the field means one
  3148. * burst. Basically, for everything but SuperSpeed devices, this field will be
  3149. * zero. Only xHCI 1.0 host controllers support this field.
  3150. */
  3151. static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
  3152. struct usb_device *udev,
  3153. struct urb *urb, unsigned int total_packet_count)
  3154. {
  3155. unsigned int max_burst;
  3156. if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
  3157. return 0;
  3158. max_burst = urb->ep->ss_ep_comp.bMaxBurst;
  3159. return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
  3160. }
  3161. /*
  3162. * Returns the number of packets in the last "burst" of packets. This field is
  3163. * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
  3164. * the last burst packet count is equal to the total number of packets in the
  3165. * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
  3166. * must contain (bMaxBurst + 1) number of packets, but the last burst can
  3167. * contain 1 to (bMaxBurst + 1) packets.
  3168. */
  3169. static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
  3170. struct usb_device *udev,
  3171. struct urb *urb, unsigned int total_packet_count)
  3172. {
  3173. unsigned int max_burst;
  3174. unsigned int residue;
  3175. if (xhci->hci_version < 0x100)
  3176. return 0;
  3177. switch (udev->speed) {
  3178. case USB_SPEED_SUPER:
  3179. /* bMaxBurst is zero based: 0 means 1 packet per burst */
  3180. max_burst = urb->ep->ss_ep_comp.bMaxBurst;
  3181. residue = total_packet_count % (max_burst + 1);
  3182. /* If residue is zero, the last burst contains (max_burst + 1)
  3183. * number of packets, but the TLBPC field is zero-based.
  3184. */
  3185. if (residue == 0)
  3186. return max_burst;
  3187. return residue - 1;
  3188. default:
  3189. if (total_packet_count == 0)
  3190. return 0;
  3191. return total_packet_count - 1;
  3192. }
  3193. }
  3194. /* This is for isoc transfer */
  3195. static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  3196. struct urb *urb, int slot_id, unsigned int ep_index)
  3197. {
  3198. struct xhci_ring *ep_ring;
  3199. struct urb_priv *urb_priv;
  3200. struct xhci_td *td;
  3201. int num_tds, trbs_per_td;
  3202. struct xhci_generic_trb *start_trb;
  3203. bool first_trb;
  3204. int start_cycle;
  3205. u32 field, length_field;
  3206. int running_total, trb_buff_len, td_len, td_remain_len, ret;
  3207. u64 start_addr, addr;
  3208. int i, j;
  3209. bool more_trbs_coming;
  3210. ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
  3211. num_tds = urb->number_of_packets;
  3212. if (num_tds < 1) {
  3213. xhci_dbg(xhci, "Isoc URB with zero packets?\n");
  3214. return -EINVAL;
  3215. }
  3216. start_addr = (u64) urb->transfer_dma;
  3217. start_trb = &ep_ring->enqueue->generic;
  3218. start_cycle = ep_ring->cycle_state;
  3219. urb_priv = urb->hcpriv;
  3220. /* Queue the first TRB, even if it's zero-length */
  3221. for (i = 0; i < num_tds; i++) {
  3222. unsigned int total_packet_count;
  3223. unsigned int burst_count;
  3224. unsigned int residue;
  3225. first_trb = true;
  3226. running_total = 0;
  3227. addr = start_addr + urb->iso_frame_desc[i].offset;
  3228. td_len = urb->iso_frame_desc[i].length;
  3229. td_remain_len = td_len;
  3230. total_packet_count = DIV_ROUND_UP(td_len,
  3231. GET_MAX_PACKET(
  3232. usb_endpoint_maxp(&urb->ep->desc)));
  3233. /* A zero-length transfer still involves at least one packet. */
  3234. if (total_packet_count == 0)
  3235. total_packet_count++;
  3236. burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
  3237. total_packet_count);
  3238. residue = xhci_get_last_burst_packet_count(xhci,
  3239. urb->dev, urb, total_packet_count);
  3240. trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
  3241. ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
  3242. urb->stream_id, trbs_per_td, urb, i, mem_flags);
  3243. if (ret < 0) {
  3244. if (i == 0)
  3245. return ret;
  3246. goto cleanup;
  3247. }
  3248. td = urb_priv->td[i];
  3249. for (j = 0; j < trbs_per_td; j++) {
  3250. u32 remainder = 0;
  3251. field = 0;
  3252. if (first_trb) {
  3253. field = TRB_TBC(burst_count) |
  3254. TRB_TLBPC(residue);
  3255. /* Queue the isoc TRB */
  3256. field |= TRB_TYPE(TRB_ISOC);
  3257. /* Assume URB_ISO_ASAP is set */
  3258. field |= TRB_SIA;
  3259. if (i == 0) {
  3260. if (start_cycle == 0)
  3261. field |= 0x1;
  3262. } else
  3263. field |= ep_ring->cycle_state;
  3264. first_trb = false;
  3265. } else {
  3266. /* Queue other normal TRBs */
  3267. field |= TRB_TYPE(TRB_NORMAL);
  3268. field |= ep_ring->cycle_state;
  3269. }
  3270. /* Only set interrupt on short packet for IN EPs */
  3271. if (usb_urb_dir_in(urb))
  3272. field |= TRB_ISP;
  3273. /* Chain all the TRBs together; clear the chain bit in
  3274. * the last TRB to indicate it's the last TRB in the
  3275. * chain.
  3276. */
  3277. if (j < trbs_per_td - 1) {
  3278. field |= TRB_CHAIN;
  3279. more_trbs_coming = true;
  3280. } else {
  3281. td->last_trb = ep_ring->enqueue;
  3282. field |= TRB_IOC;
  3283. if (xhci->hci_version == 0x100 &&
  3284. !(xhci->quirks &
  3285. XHCI_AVOID_BEI)) {
  3286. /* Set BEI bit except for the last td */
  3287. if (i < num_tds - 1)
  3288. field |= TRB_BEI;
  3289. }
  3290. more_trbs_coming = false;
  3291. }
  3292. /* Calculate TRB length */
  3293. trb_buff_len = TRB_MAX_BUFF_SIZE -
  3294. (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
  3295. if (trb_buff_len > td_remain_len)
  3296. trb_buff_len = td_remain_len;
  3297. /* Set the TRB length, TD size, & interrupter fields. */
  3298. if (xhci->hci_version < 0x100) {
  3299. remainder = xhci_td_remainder(
  3300. td_len - running_total);
  3301. } else {
  3302. remainder = xhci_v1_0_td_remainder(
  3303. running_total, trb_buff_len,
  3304. total_packet_count, urb,
  3305. (trbs_per_td - j - 1));
  3306. }
  3307. length_field = TRB_LEN(trb_buff_len) |
  3308. remainder |
  3309. TRB_INTR_TARGET(0);
  3310. queue_trb(xhci, ep_ring, more_trbs_coming,
  3311. lower_32_bits(addr),
  3312. upper_32_bits(addr),
  3313. length_field,
  3314. field);
  3315. running_total += trb_buff_len;
  3316. addr += trb_buff_len;
  3317. td_remain_len -= trb_buff_len;
  3318. }
  3319. /* Check TD length */
  3320. if (running_total != td_len) {
  3321. xhci_err(xhci, "ISOC TD length unmatch\n");
  3322. ret = -EINVAL;
  3323. goto cleanup;
  3324. }
  3325. }
  3326. if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
  3327. if (xhci->quirks & XHCI_AMD_PLL_FIX)
  3328. usb_amd_quirk_pll_disable();
  3329. }
  3330. xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
  3331. giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
  3332. start_cycle, start_trb);
  3333. return 0;
  3334. cleanup:
  3335. /* Clean up a partially enqueued isoc transfer. */
  3336. for (i--; i >= 0; i--)
  3337. list_del_init(&urb_priv->td[i]->td_list);
  3338. /* Use the first TD as a temporary variable to turn the TDs we've queued
  3339. * into No-ops with a software-owned cycle bit. That way the hardware
  3340. * won't accidentally start executing bogus TDs when we partially
  3341. * overwrite them. td->first_trb and td->start_seg are already set.
  3342. */
  3343. urb_priv->td[0]->last_trb = ep_ring->enqueue;
  3344. /* Every TRB except the first & last will have its cycle bit flipped. */
  3345. td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
  3346. /* Reset the ring enqueue back to the first TRB and its cycle bit. */
  3347. ep_ring->enqueue = urb_priv->td[0]->first_trb;
  3348. ep_ring->enq_seg = urb_priv->td[0]->start_seg;
  3349. ep_ring->cycle_state = start_cycle;
  3350. ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
  3351. usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
  3352. return ret;
  3353. }
  3354. /*
  3355. * Check transfer ring to guarantee there is enough room for the urb.
  3356. * Update ISO URB start_frame and interval.
  3357. * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
  3358. * update the urb->start_frame by now.
  3359. * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
  3360. */
  3361. int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
  3362. struct urb *urb, int slot_id, unsigned int ep_index)
  3363. {
  3364. struct xhci_virt_device *xdev;
  3365. struct xhci_ring *ep_ring;
  3366. struct xhci_ep_ctx *ep_ctx;
  3367. int start_frame;
  3368. int xhci_interval;
  3369. int ep_interval;
  3370. int num_tds, num_trbs, i;
  3371. int ret;
  3372. xdev = xhci->devs[slot_id];
  3373. ep_ring = xdev->eps[ep_index].ring;
  3374. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  3375. num_trbs = 0;
  3376. num_tds = urb->number_of_packets;
  3377. for (i = 0; i < num_tds; i++)
  3378. num_trbs += count_isoc_trbs_needed(xhci, urb, i);
  3379. /* Check the ring to guarantee there is enough room for the whole urb.
  3380. * Do not insert any td of the urb to the ring if the check failed.
  3381. */
  3382. ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
  3383. num_trbs, mem_flags);
  3384. if (ret)
  3385. return ret;
  3386. start_frame = readl(&xhci->run_regs->microframe_index);
  3387. start_frame &= 0x3fff;
  3388. urb->start_frame = start_frame;
  3389. if (urb->dev->speed == USB_SPEED_LOW ||
  3390. urb->dev->speed == USB_SPEED_FULL)
  3391. urb->start_frame >>= 3;
  3392. xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
  3393. ep_interval = urb->interval;
  3394. /* Convert to microframes */
  3395. if (urb->dev->speed == USB_SPEED_LOW ||
  3396. urb->dev->speed == USB_SPEED_FULL)
  3397. ep_interval *= 8;
  3398. /* FIXME change this to a warning and a suggestion to use the new API
  3399. * to set the polling interval (once the API is added).
  3400. */
  3401. if (xhci_interval != ep_interval) {
  3402. dev_dbg_ratelimited(&urb->dev->dev,
  3403. "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
  3404. ep_interval, ep_interval == 1 ? "" : "s",
  3405. xhci_interval, xhci_interval == 1 ? "" : "s");
  3406. urb->interval = xhci_interval;
  3407. /* Convert back to frames for LS/FS devices */
  3408. if (urb->dev->speed == USB_SPEED_LOW ||
  3409. urb->dev->speed == USB_SPEED_FULL)
  3410. urb->interval /= 8;
  3411. }
  3412. ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
  3413. return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
  3414. }
  3415. /**** Command Ring Operations ****/
  3416. /* Generic function for queueing a command TRB on the command ring.
  3417. * Check to make sure there's room on the command ring for one command TRB.
  3418. * Also check that there's room reserved for commands that must not fail.
  3419. * If this is a command that must not fail, meaning command_must_succeed = TRUE,
  3420. * then only check for the number of reserved spots.
  3421. * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
  3422. * because the command event handler may want to resubmit a failed command.
  3423. */
  3424. static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
  3425. u32 field1, u32 field2,
  3426. u32 field3, u32 field4, bool command_must_succeed)
  3427. {
  3428. int reserved_trbs = xhci->cmd_ring_reserved_trbs;
  3429. int ret;
  3430. if (xhci->xhc_state & XHCI_STATE_DYING)
  3431. return -ESHUTDOWN;
  3432. if (!command_must_succeed)
  3433. reserved_trbs++;
  3434. ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
  3435. reserved_trbs, GFP_ATOMIC);
  3436. if (ret < 0) {
  3437. xhci_err(xhci, "ERR: No room for command on command ring\n");
  3438. if (command_must_succeed)
  3439. xhci_err(xhci, "ERR: Reserved TRB counting for "
  3440. "unfailable commands failed.\n");
  3441. return ret;
  3442. }
  3443. cmd->command_trb = xhci->cmd_ring->enqueue;
  3444. list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
  3445. /* if there are no other commands queued we start the timeout timer */
  3446. if (xhci->cmd_list.next == &cmd->cmd_list &&
  3447. !timer_pending(&xhci->cmd_timer)) {
  3448. xhci->current_cmd = cmd;
  3449. mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
  3450. }
  3451. queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
  3452. field4 | xhci->cmd_ring->cycle_state);
  3453. return 0;
  3454. }
  3455. /* Queue a slot enable or disable request on the command ring */
  3456. int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
  3457. u32 trb_type, u32 slot_id)
  3458. {
  3459. return queue_command(xhci, cmd, 0, 0, 0,
  3460. TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
  3461. }
  3462. /* Queue an address device command TRB */
  3463. int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
  3464. dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
  3465. {
  3466. return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
  3467. upper_32_bits(in_ctx_ptr), 0,
  3468. TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
  3469. | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
  3470. }
  3471. int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
  3472. u32 field1, u32 field2, u32 field3, u32 field4)
  3473. {
  3474. return queue_command(xhci, cmd, field1, field2, field3, field4, false);
  3475. }
  3476. /* Queue a reset device command TRB */
  3477. int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
  3478. u32 slot_id)
  3479. {
  3480. return queue_command(xhci, cmd, 0, 0, 0,
  3481. TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
  3482. false);
  3483. }
  3484. /* Queue a configure endpoint command TRB */
  3485. int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
  3486. struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
  3487. u32 slot_id, bool command_must_succeed)
  3488. {
  3489. return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
  3490. upper_32_bits(in_ctx_ptr), 0,
  3491. TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
  3492. command_must_succeed);
  3493. }
  3494. /* Queue an evaluate context command TRB */
  3495. int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
  3496. dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
  3497. {
  3498. return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
  3499. upper_32_bits(in_ctx_ptr), 0,
  3500. TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
  3501. command_must_succeed);
  3502. }
  3503. /*
  3504. * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
  3505. * activity on an endpoint that is about to be suspended.
  3506. */
  3507. int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
  3508. int slot_id, unsigned int ep_index, int suspend)
  3509. {
  3510. u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
  3511. u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
  3512. u32 type = TRB_TYPE(TRB_STOP_RING);
  3513. u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
  3514. return queue_command(xhci, cmd, 0, 0, 0,
  3515. trb_slot_id | trb_ep_index | type | trb_suspend, false);
  3516. }
  3517. /* Set Transfer Ring Dequeue Pointer command */
  3518. void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
  3519. unsigned int slot_id, unsigned int ep_index,
  3520. unsigned int stream_id,
  3521. struct xhci_dequeue_state *deq_state)
  3522. {
  3523. dma_addr_t addr;
  3524. u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
  3525. u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
  3526. u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
  3527. u32 trb_sct = 0;
  3528. u32 type = TRB_TYPE(TRB_SET_DEQ);
  3529. struct xhci_virt_ep *ep;
  3530. struct xhci_command *cmd;
  3531. int ret;
  3532. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  3533. "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u",
  3534. deq_state->new_deq_seg,
  3535. (unsigned long long)deq_state->new_deq_seg->dma,
  3536. deq_state->new_deq_ptr,
  3537. (unsigned long long)xhci_trb_virt_to_dma(
  3538. deq_state->new_deq_seg, deq_state->new_deq_ptr),
  3539. deq_state->new_cycle_state);
  3540. addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
  3541. deq_state->new_deq_ptr);
  3542. if (addr == 0) {
  3543. xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
  3544. xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
  3545. deq_state->new_deq_seg, deq_state->new_deq_ptr);
  3546. return;
  3547. }
  3548. ep = &xhci->devs[slot_id]->eps[ep_index];
  3549. if ((ep->ep_state & SET_DEQ_PENDING)) {
  3550. xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
  3551. xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
  3552. return;
  3553. }
  3554. /* This function gets called from contexts where it cannot sleep */
  3555. cmd = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
  3556. if (!cmd) {
  3557. xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr: ENOMEM\n");
  3558. return;
  3559. }
  3560. ep->queued_deq_seg = deq_state->new_deq_seg;
  3561. ep->queued_deq_ptr = deq_state->new_deq_ptr;
  3562. if (stream_id)
  3563. trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
  3564. ret = queue_command(xhci, cmd,
  3565. lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state,
  3566. upper_32_bits(addr), trb_stream_id,
  3567. trb_slot_id | trb_ep_index | type, false);
  3568. if (ret < 0) {
  3569. xhci_free_command(xhci, cmd);
  3570. return;
  3571. }
  3572. /* Stop the TD queueing code from ringing the doorbell until
  3573. * this command completes. The HC won't set the dequeue pointer
  3574. * if the ring is running, and ringing the doorbell starts the
  3575. * ring running.
  3576. */
  3577. ep->ep_state |= SET_DEQ_PENDING;
  3578. }
  3579. int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
  3580. int slot_id, unsigned int ep_index)
  3581. {
  3582. u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
  3583. u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
  3584. u32 type = TRB_TYPE(TRB_RESET_EP);
  3585. return queue_command(xhci, cmd, 0, 0, 0,
  3586. trb_slot_id | trb_ep_index | type, false);
  3587. }