net2280.c 92 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569
  1. /*
  2. * Driver for the PLX NET2280 USB device controller.
  3. * Specs and errata are available from <http://www.plxtech.com>.
  4. *
  5. * PLX Technology Inc. (formerly NetChip Technology) supported the
  6. * development of this driver.
  7. *
  8. *
  9. * CODE STATUS HIGHLIGHTS
  10. *
  11. * This driver should work well with most "gadget" drivers, including
  12. * the Mass Storage, Serial, and Ethernet/RNDIS gadget drivers
  13. * as well as Gadget Zero and Gadgetfs.
  14. *
  15. * DMA is enabled by default.
  16. *
  17. * MSI is enabled by default. The legacy IRQ is used if MSI couldn't
  18. * be enabled.
  19. *
  20. * Note that almost all the errata workarounds here are only needed for
  21. * rev1 chips. Rev1a silicon (0110) fixes almost all of them.
  22. */
  23. /*
  24. * Copyright (C) 2003 David Brownell
  25. * Copyright (C) 2003-2005 PLX Technology, Inc.
  26. * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS
  27. *
  28. * Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility
  29. * with 2282 chip
  30. *
  31. * Modified Ricardo Ribalda Qtechnology AS to provide compatibility
  32. * with usb 338x chip. Based on PLX driver
  33. *
  34. * This program is free software; you can redistribute it and/or modify
  35. * it under the terms of the GNU General Public License as published by
  36. * the Free Software Foundation; either version 2 of the License, or
  37. * (at your option) any later version.
  38. */
  39. #include <linux/module.h>
  40. #include <linux/pci.h>
  41. #include <linux/dma-mapping.h>
  42. #include <linux/kernel.h>
  43. #include <linux/delay.h>
  44. #include <linux/ioport.h>
  45. #include <linux/slab.h>
  46. #include <linux/errno.h>
  47. #include <linux/init.h>
  48. #include <linux/timer.h>
  49. #include <linux/list.h>
  50. #include <linux/interrupt.h>
  51. #include <linux/moduleparam.h>
  52. #include <linux/device.h>
  53. #include <linux/usb/ch9.h>
  54. #include <linux/usb/gadget.h>
  55. #include <linux/prefetch.h>
  56. #include <linux/io.h>
  57. #include <asm/byteorder.h>
  58. #include <asm/irq.h>
  59. #include <asm/unaligned.h>
  60. #define DRIVER_DESC "PLX NET228x/USB338x USB Peripheral Controller"
  61. #define DRIVER_VERSION "2005 Sept 27/v3.0"
  62. #define EP_DONTUSE 13 /* nonzero */
  63. #define USE_RDK_LEDS /* GPIO pins control three LEDs */
  64. static const char driver_name[] = "net2280";
  65. static const char driver_desc[] = DRIVER_DESC;
  66. static const u32 ep_bit[9] = { 0, 17, 2, 19, 4, 1, 18, 3, 20 };
  67. static const char ep0name[] = "ep0";
  68. static const char *const ep_name[] = {
  69. ep0name,
  70. "ep-a", "ep-b", "ep-c", "ep-d",
  71. "ep-e", "ep-f", "ep-g", "ep-h",
  72. };
  73. /* mode 0 == ep-{a,b,c,d} 1K fifo each
  74. * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
  75. * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
  76. */
  77. static ushort fifo_mode;
  78. /* "modprobe net2280 fifo_mode=1" etc */
  79. module_param(fifo_mode, ushort, 0644);
  80. /* enable_suspend -- When enabled, the driver will respond to
  81. * USB suspend requests by powering down the NET2280. Otherwise,
  82. * USB suspend requests will be ignored. This is acceptable for
  83. * self-powered devices
  84. */
  85. static bool enable_suspend;
  86. /* "modprobe net2280 enable_suspend=1" etc */
  87. module_param(enable_suspend, bool, 0444);
  88. #define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
  89. static char *type_string(u8 bmAttributes)
  90. {
  91. switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
  92. case USB_ENDPOINT_XFER_BULK: return "bulk";
  93. case USB_ENDPOINT_XFER_ISOC: return "iso";
  94. case USB_ENDPOINT_XFER_INT: return "intr";
  95. }
  96. return "control";
  97. }
  98. #include "net2280.h"
  99. #define valid_bit cpu_to_le32(BIT(VALID_BIT))
  100. #define dma_done_ie cpu_to_le32(BIT(DMA_DONE_INTERRUPT_ENABLE))
  101. /*-------------------------------------------------------------------------*/
  102. static inline void enable_pciirqenb(struct net2280_ep *ep)
  103. {
  104. u32 tmp = readl(&ep->dev->regs->pciirqenb0);
  105. if (ep->dev->quirks & PLX_LEGACY)
  106. tmp |= BIT(ep->num);
  107. else
  108. tmp |= BIT(ep_bit[ep->num]);
  109. writel(tmp, &ep->dev->regs->pciirqenb0);
  110. return;
  111. }
  112. static int
  113. net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
  114. {
  115. struct net2280 *dev;
  116. struct net2280_ep *ep;
  117. u32 max, tmp;
  118. unsigned long flags;
  119. static const u32 ep_key[9] = { 1, 0, 1, 0, 1, 1, 0, 1, 0 };
  120. ep = container_of(_ep, struct net2280_ep, ep);
  121. if (!_ep || !desc || ep->desc || _ep->name == ep0name ||
  122. desc->bDescriptorType != USB_DT_ENDPOINT)
  123. return -EINVAL;
  124. dev = ep->dev;
  125. if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
  126. return -ESHUTDOWN;
  127. /* erratum 0119 workaround ties up an endpoint number */
  128. if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE)
  129. return -EDOM;
  130. if (dev->quirks & PLX_SUPERSPEED) {
  131. if ((desc->bEndpointAddress & 0x0f) >= 0x0c)
  132. return -EDOM;
  133. ep->is_in = !!usb_endpoint_dir_in(desc);
  134. if (dev->enhanced_mode && ep->is_in && ep_key[ep->num])
  135. return -EINVAL;
  136. }
  137. /* sanity check ep-e/ep-f since their fifos are small */
  138. max = usb_endpoint_maxp(desc) & 0x1fff;
  139. if (ep->num > 4 && max > 64 && (dev->quirks & PLX_LEGACY))
  140. return -ERANGE;
  141. spin_lock_irqsave(&dev->lock, flags);
  142. _ep->maxpacket = max & 0x7ff;
  143. ep->desc = desc;
  144. /* ep_reset() has already been called */
  145. ep->stopped = 0;
  146. ep->wedged = 0;
  147. ep->out_overflow = 0;
  148. /* set speed-dependent max packet; may kick in high bandwidth */
  149. set_max_speed(ep, max);
  150. /* set type, direction, address; reset fifo counters */
  151. writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
  152. tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
  153. if (tmp == USB_ENDPOINT_XFER_INT) {
  154. /* erratum 0105 workaround prevents hs NYET */
  155. if (dev->chiprev == 0100 &&
  156. dev->gadget.speed == USB_SPEED_HIGH &&
  157. !(desc->bEndpointAddress & USB_DIR_IN))
  158. writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE),
  159. &ep->regs->ep_rsp);
  160. } else if (tmp == USB_ENDPOINT_XFER_BULK) {
  161. /* catch some particularly blatant driver bugs */
  162. if ((dev->gadget.speed == USB_SPEED_SUPER && max != 1024) ||
  163. (dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
  164. (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
  165. spin_unlock_irqrestore(&dev->lock, flags);
  166. return -ERANGE;
  167. }
  168. }
  169. ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC);
  170. /* Enable this endpoint */
  171. if (dev->quirks & PLX_LEGACY) {
  172. tmp <<= ENDPOINT_TYPE;
  173. tmp |= desc->bEndpointAddress;
  174. /* default full fifo lines */
  175. tmp |= (4 << ENDPOINT_BYTE_COUNT);
  176. tmp |= BIT(ENDPOINT_ENABLE);
  177. ep->is_in = (tmp & USB_DIR_IN) != 0;
  178. } else {
  179. /* In Legacy mode, only OUT endpoints are used */
  180. if (dev->enhanced_mode && ep->is_in) {
  181. tmp <<= IN_ENDPOINT_TYPE;
  182. tmp |= BIT(IN_ENDPOINT_ENABLE);
  183. /* Not applicable to Legacy */
  184. tmp |= BIT(ENDPOINT_DIRECTION);
  185. } else {
  186. tmp <<= OUT_ENDPOINT_TYPE;
  187. tmp |= BIT(OUT_ENDPOINT_ENABLE);
  188. tmp |= (ep->is_in << ENDPOINT_DIRECTION);
  189. }
  190. tmp |= usb_endpoint_num(desc);
  191. tmp |= (ep->ep.maxburst << MAX_BURST_SIZE);
  192. }
  193. /* Make sure all the registers are written before ep_rsp*/
  194. wmb();
  195. /* for OUT transfers, block the rx fifo until a read is posted */
  196. if (!ep->is_in)
  197. writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
  198. else if (!(dev->quirks & PLX_2280)) {
  199. /* Added for 2282, Don't use nak packets on an in endpoint,
  200. * this was ignored on 2280
  201. */
  202. writel(BIT(CLEAR_NAK_OUT_PACKETS) |
  203. BIT(CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp);
  204. }
  205. writel(tmp, &ep->cfg->ep_cfg);
  206. /* enable irqs */
  207. if (!ep->dma) { /* pio, per-packet */
  208. enable_pciirqenb(ep);
  209. tmp = BIT(DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) |
  210. BIT(DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE);
  211. if (dev->quirks & PLX_2280)
  212. tmp |= readl(&ep->regs->ep_irqenb);
  213. writel(tmp, &ep->regs->ep_irqenb);
  214. } else { /* dma, per-request */
  215. tmp = BIT((8 + ep->num)); /* completion */
  216. tmp |= readl(&dev->regs->pciirqenb1);
  217. writel(tmp, &dev->regs->pciirqenb1);
  218. /* for short OUT transfers, dma completions can't
  219. * advance the queue; do it pio-style, by hand.
  220. * NOTE erratum 0112 workaround #2
  221. */
  222. if ((desc->bEndpointAddress & USB_DIR_IN) == 0) {
  223. tmp = BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE);
  224. writel(tmp, &ep->regs->ep_irqenb);
  225. enable_pciirqenb(ep);
  226. }
  227. }
  228. tmp = desc->bEndpointAddress;
  229. ep_dbg(dev, "enabled %s (ep%d%s-%s) %s max %04x\n",
  230. _ep->name, tmp & 0x0f, DIR_STRING(tmp),
  231. type_string(desc->bmAttributes),
  232. ep->dma ? "dma" : "pio", max);
  233. /* pci writes may still be posted */
  234. spin_unlock_irqrestore(&dev->lock, flags);
  235. return 0;
  236. }
  237. static int handshake(u32 __iomem *ptr, u32 mask, u32 done, int usec)
  238. {
  239. u32 result;
  240. do {
  241. result = readl(ptr);
  242. if (result == ~(u32)0) /* "device unplugged" */
  243. return -ENODEV;
  244. result &= mask;
  245. if (result == done)
  246. return 0;
  247. udelay(1);
  248. usec--;
  249. } while (usec > 0);
  250. return -ETIMEDOUT;
  251. }
  252. static const struct usb_ep_ops net2280_ep_ops;
  253. static void ep_reset_228x(struct net2280_regs __iomem *regs,
  254. struct net2280_ep *ep)
  255. {
  256. u32 tmp;
  257. ep->desc = NULL;
  258. INIT_LIST_HEAD(&ep->queue);
  259. usb_ep_set_maxpacket_limit(&ep->ep, ~0);
  260. ep->ep.ops = &net2280_ep_ops;
  261. /* disable the dma, irqs, endpoint... */
  262. if (ep->dma) {
  263. writel(0, &ep->dma->dmactl);
  264. writel(BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
  265. BIT(DMA_TRANSACTION_DONE_INTERRUPT) |
  266. BIT(DMA_ABORT),
  267. &ep->dma->dmastat);
  268. tmp = readl(&regs->pciirqenb0);
  269. tmp &= ~BIT(ep->num);
  270. writel(tmp, &regs->pciirqenb0);
  271. } else {
  272. tmp = readl(&regs->pciirqenb1);
  273. tmp &= ~BIT((8 + ep->num)); /* completion */
  274. writel(tmp, &regs->pciirqenb1);
  275. }
  276. writel(0, &ep->regs->ep_irqenb);
  277. /* init to our chosen defaults, notably so that we NAK OUT
  278. * packets until the driver queues a read (+note erratum 0112)
  279. */
  280. if (!ep->is_in || (ep->dev->quirks & PLX_2280)) {
  281. tmp = BIT(SET_NAK_OUT_PACKETS_MODE) |
  282. BIT(SET_NAK_OUT_PACKETS) |
  283. BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
  284. BIT(CLEAR_INTERRUPT_MODE);
  285. } else {
  286. /* added for 2282 */
  287. tmp = BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
  288. BIT(CLEAR_NAK_OUT_PACKETS) |
  289. BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
  290. BIT(CLEAR_INTERRUPT_MODE);
  291. }
  292. if (ep->num != 0) {
  293. tmp |= BIT(CLEAR_ENDPOINT_TOGGLE) |
  294. BIT(CLEAR_ENDPOINT_HALT);
  295. }
  296. writel(tmp, &ep->regs->ep_rsp);
  297. /* scrub most status bits, and flush any fifo state */
  298. if (ep->dev->quirks & PLX_2280)
  299. tmp = BIT(FIFO_OVERFLOW) |
  300. BIT(FIFO_UNDERFLOW);
  301. else
  302. tmp = 0;
  303. writel(tmp | BIT(TIMEOUT) |
  304. BIT(USB_STALL_SENT) |
  305. BIT(USB_IN_NAK_SENT) |
  306. BIT(USB_IN_ACK_RCVD) |
  307. BIT(USB_OUT_PING_NAK_SENT) |
  308. BIT(USB_OUT_ACK_SENT) |
  309. BIT(FIFO_FLUSH) |
  310. BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
  311. BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
  312. BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
  313. BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
  314. BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
  315. BIT(DATA_IN_TOKEN_INTERRUPT),
  316. &ep->regs->ep_stat);
  317. /* fifo size is handled separately */
  318. }
  319. static void ep_reset_338x(struct net2280_regs __iomem *regs,
  320. struct net2280_ep *ep)
  321. {
  322. u32 tmp, dmastat;
  323. ep->desc = NULL;
  324. INIT_LIST_HEAD(&ep->queue);
  325. usb_ep_set_maxpacket_limit(&ep->ep, ~0);
  326. ep->ep.ops = &net2280_ep_ops;
  327. /* disable the dma, irqs, endpoint... */
  328. if (ep->dma) {
  329. writel(0, &ep->dma->dmactl);
  330. writel(BIT(DMA_ABORT_DONE_INTERRUPT) |
  331. BIT(DMA_PAUSE_DONE_INTERRUPT) |
  332. BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
  333. BIT(DMA_TRANSACTION_DONE_INTERRUPT),
  334. /* | BIT(DMA_ABORT), */
  335. &ep->dma->dmastat);
  336. dmastat = readl(&ep->dma->dmastat);
  337. if (dmastat == 0x5002) {
  338. ep_warn(ep->dev, "The dmastat return = %x!!\n",
  339. dmastat);
  340. writel(0x5a, &ep->dma->dmastat);
  341. }
  342. tmp = readl(&regs->pciirqenb0);
  343. tmp &= ~BIT(ep_bit[ep->num]);
  344. writel(tmp, &regs->pciirqenb0);
  345. } else {
  346. if (ep->num < 5) {
  347. tmp = readl(&regs->pciirqenb1);
  348. tmp &= ~BIT((8 + ep->num)); /* completion */
  349. writel(tmp, &regs->pciirqenb1);
  350. }
  351. }
  352. writel(0, &ep->regs->ep_irqenb);
  353. writel(BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
  354. BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
  355. BIT(FIFO_OVERFLOW) |
  356. BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
  357. BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
  358. BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
  359. BIT(DATA_IN_TOKEN_INTERRUPT), &ep->regs->ep_stat);
  360. }
  361. static void nuke(struct net2280_ep *);
  362. static int net2280_disable(struct usb_ep *_ep)
  363. {
  364. struct net2280_ep *ep;
  365. unsigned long flags;
  366. ep = container_of(_ep, struct net2280_ep, ep);
  367. if (!_ep || !ep->desc || _ep->name == ep0name)
  368. return -EINVAL;
  369. spin_lock_irqsave(&ep->dev->lock, flags);
  370. nuke(ep);
  371. if (ep->dev->quirks & PLX_SUPERSPEED)
  372. ep_reset_338x(ep->dev->regs, ep);
  373. else
  374. ep_reset_228x(ep->dev->regs, ep);
  375. ep_vdbg(ep->dev, "disabled %s %s\n",
  376. ep->dma ? "dma" : "pio", _ep->name);
  377. /* synch memory views with the device */
  378. (void)readl(&ep->cfg->ep_cfg);
  379. if (!ep->dma && ep->num >= 1 && ep->num <= 4)
  380. ep->dma = &ep->dev->dma[ep->num - 1];
  381. spin_unlock_irqrestore(&ep->dev->lock, flags);
  382. return 0;
  383. }
  384. /*-------------------------------------------------------------------------*/
  385. static struct usb_request
  386. *net2280_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
  387. {
  388. struct net2280_ep *ep;
  389. struct net2280_request *req;
  390. if (!_ep)
  391. return NULL;
  392. ep = container_of(_ep, struct net2280_ep, ep);
  393. req = kzalloc(sizeof(*req), gfp_flags);
  394. if (!req)
  395. return NULL;
  396. INIT_LIST_HEAD(&req->queue);
  397. /* this dma descriptor may be swapped with the previous dummy */
  398. if (ep->dma) {
  399. struct net2280_dma *td;
  400. td = pci_pool_alloc(ep->dev->requests, gfp_flags,
  401. &req->td_dma);
  402. if (!td) {
  403. kfree(req);
  404. return NULL;
  405. }
  406. td->dmacount = 0; /* not VALID */
  407. td->dmadesc = td->dmaaddr;
  408. req->td = td;
  409. }
  410. return &req->req;
  411. }
  412. static void net2280_free_request(struct usb_ep *_ep, struct usb_request *_req)
  413. {
  414. struct net2280_ep *ep;
  415. struct net2280_request *req;
  416. ep = container_of(_ep, struct net2280_ep, ep);
  417. if (!_ep || !_req)
  418. return;
  419. req = container_of(_req, struct net2280_request, req);
  420. WARN_ON(!list_empty(&req->queue));
  421. if (req->td)
  422. pci_pool_free(ep->dev->requests, req->td, req->td_dma);
  423. kfree(req);
  424. }
  425. /*-------------------------------------------------------------------------*/
  426. /* load a packet into the fifo we use for usb IN transfers.
  427. * works for all endpoints.
  428. *
  429. * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
  430. * at a time, but this code is simpler because it knows it only writes
  431. * one packet. ep-a..ep-d should use dma instead.
  432. */
  433. static void write_fifo(struct net2280_ep *ep, struct usb_request *req)
  434. {
  435. struct net2280_ep_regs __iomem *regs = ep->regs;
  436. u8 *buf;
  437. u32 tmp;
  438. unsigned count, total;
  439. /* INVARIANT: fifo is currently empty. (testable) */
  440. if (req) {
  441. buf = req->buf + req->actual;
  442. prefetch(buf);
  443. total = req->length - req->actual;
  444. } else {
  445. total = 0;
  446. buf = NULL;
  447. }
  448. /* write just one packet at a time */
  449. count = ep->ep.maxpacket;
  450. if (count > total) /* min() cannot be used on a bitfield */
  451. count = total;
  452. ep_vdbg(ep->dev, "write %s fifo (IN) %d bytes%s req %p\n",
  453. ep->ep.name, count,
  454. (count != ep->ep.maxpacket) ? " (short)" : "",
  455. req);
  456. while (count >= 4) {
  457. /* NOTE be careful if you try to align these. fifo lines
  458. * should normally be full (4 bytes) and successive partial
  459. * lines are ok only in certain cases.
  460. */
  461. tmp = get_unaligned((u32 *)buf);
  462. cpu_to_le32s(&tmp);
  463. writel(tmp, &regs->ep_data);
  464. buf += 4;
  465. count -= 4;
  466. }
  467. /* last fifo entry is "short" unless we wrote a full packet.
  468. * also explicitly validate last word in (periodic) transfers
  469. * when maxpacket is not a multiple of 4 bytes.
  470. */
  471. if (count || total < ep->ep.maxpacket) {
  472. tmp = count ? get_unaligned((u32 *)buf) : count;
  473. cpu_to_le32s(&tmp);
  474. set_fifo_bytecount(ep, count & 0x03);
  475. writel(tmp, &regs->ep_data);
  476. }
  477. /* pci writes may still be posted */
  478. }
  479. /* work around erratum 0106: PCI and USB race over the OUT fifo.
  480. * caller guarantees chiprev 0100, out endpoint is NAKing, and
  481. * there's no real data in the fifo.
  482. *
  483. * NOTE: also used in cases where that erratum doesn't apply:
  484. * where the host wrote "too much" data to us.
  485. */
  486. static void out_flush(struct net2280_ep *ep)
  487. {
  488. u32 __iomem *statp;
  489. u32 tmp;
  490. statp = &ep->regs->ep_stat;
  491. tmp = readl(statp);
  492. if (tmp & BIT(NAK_OUT_PACKETS)) {
  493. ep_dbg(ep->dev, "%s %s %08x !NAK\n",
  494. ep->ep.name, __func__, tmp);
  495. writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
  496. }
  497. writel(BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
  498. BIT(DATA_PACKET_RECEIVED_INTERRUPT),
  499. statp);
  500. writel(BIT(FIFO_FLUSH), statp);
  501. /* Make sure that stap is written */
  502. mb();
  503. tmp = readl(statp);
  504. if (tmp & BIT(DATA_OUT_PING_TOKEN_INTERRUPT) &&
  505. /* high speed did bulk NYET; fifo isn't filling */
  506. ep->dev->gadget.speed == USB_SPEED_FULL) {
  507. unsigned usec;
  508. usec = 50; /* 64 byte bulk/interrupt */
  509. handshake(statp, BIT(USB_OUT_PING_NAK_SENT),
  510. BIT(USB_OUT_PING_NAK_SENT), usec);
  511. /* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */
  512. }
  513. }
  514. /* unload packet(s) from the fifo we use for usb OUT transfers.
  515. * returns true iff the request completed, because of short packet
  516. * or the request buffer having filled with full packets.
  517. *
  518. * for ep-a..ep-d this will read multiple packets out when they
  519. * have been accepted.
  520. */
  521. static int read_fifo(struct net2280_ep *ep, struct net2280_request *req)
  522. {
  523. struct net2280_ep_regs __iomem *regs = ep->regs;
  524. u8 *buf = req->req.buf + req->req.actual;
  525. unsigned count, tmp, is_short;
  526. unsigned cleanup = 0, prevent = 0;
  527. /* erratum 0106 ... packets coming in during fifo reads might
  528. * be incompletely rejected. not all cases have workarounds.
  529. */
  530. if (ep->dev->chiprev == 0x0100 &&
  531. ep->dev->gadget.speed == USB_SPEED_FULL) {
  532. udelay(1);
  533. tmp = readl(&ep->regs->ep_stat);
  534. if ((tmp & BIT(NAK_OUT_PACKETS)))
  535. cleanup = 1;
  536. else if ((tmp & BIT(FIFO_FULL))) {
  537. start_out_naking(ep);
  538. prevent = 1;
  539. }
  540. /* else: hope we don't see the problem */
  541. }
  542. /* never overflow the rx buffer. the fifo reads packets until
  543. * it sees a short one; we might not be ready for them all.
  544. */
  545. prefetchw(buf);
  546. count = readl(&regs->ep_avail);
  547. if (unlikely(count == 0)) {
  548. udelay(1);
  549. tmp = readl(&ep->regs->ep_stat);
  550. count = readl(&regs->ep_avail);
  551. /* handled that data already? */
  552. if (count == 0 && (tmp & BIT(NAK_OUT_PACKETS)) == 0)
  553. return 0;
  554. }
  555. tmp = req->req.length - req->req.actual;
  556. if (count > tmp) {
  557. /* as with DMA, data overflow gets flushed */
  558. if ((tmp % ep->ep.maxpacket) != 0) {
  559. ep_err(ep->dev,
  560. "%s out fifo %d bytes, expected %d\n",
  561. ep->ep.name, count, tmp);
  562. req->req.status = -EOVERFLOW;
  563. cleanup = 1;
  564. /* NAK_OUT_PACKETS will be set, so flushing is safe;
  565. * the next read will start with the next packet
  566. */
  567. } /* else it's a ZLP, no worries */
  568. count = tmp;
  569. }
  570. req->req.actual += count;
  571. is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0);
  572. ep_vdbg(ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n",
  573. ep->ep.name, count, is_short ? " (short)" : "",
  574. cleanup ? " flush" : "", prevent ? " nak" : "",
  575. req, req->req.actual, req->req.length);
  576. while (count >= 4) {
  577. tmp = readl(&regs->ep_data);
  578. cpu_to_le32s(&tmp);
  579. put_unaligned(tmp, (u32 *)buf);
  580. buf += 4;
  581. count -= 4;
  582. }
  583. if (count) {
  584. tmp = readl(&regs->ep_data);
  585. /* LE conversion is implicit here: */
  586. do {
  587. *buf++ = (u8) tmp;
  588. tmp >>= 8;
  589. } while (--count);
  590. }
  591. if (cleanup)
  592. out_flush(ep);
  593. if (prevent) {
  594. writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
  595. (void) readl(&ep->regs->ep_rsp);
  596. }
  597. return is_short || ((req->req.actual == req->req.length) &&
  598. !req->req.zero);
  599. }
  600. /* fill out dma descriptor to match a given request */
  601. static void fill_dma_desc(struct net2280_ep *ep,
  602. struct net2280_request *req, int valid)
  603. {
  604. struct net2280_dma *td = req->td;
  605. u32 dmacount = req->req.length;
  606. /* don't let DMA continue after a short OUT packet,
  607. * so overruns can't affect the next transfer.
  608. * in case of overruns on max-size packets, we can't
  609. * stop the fifo from filling but we can flush it.
  610. */
  611. if (ep->is_in)
  612. dmacount |= BIT(DMA_DIRECTION);
  613. if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0) ||
  614. !(ep->dev->quirks & PLX_2280))
  615. dmacount |= BIT(END_OF_CHAIN);
  616. req->valid = valid;
  617. if (valid)
  618. dmacount |= BIT(VALID_BIT);
  619. dmacount |= BIT(DMA_DONE_INTERRUPT_ENABLE);
  620. /* td->dmadesc = previously set by caller */
  621. td->dmaaddr = cpu_to_le32 (req->req.dma);
  622. /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
  623. wmb();
  624. td->dmacount = cpu_to_le32(dmacount);
  625. }
  626. static const u32 dmactl_default =
  627. BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
  628. BIT(DMA_CLEAR_COUNT_ENABLE) |
  629. /* erratum 0116 workaround part 1 (use POLLING) */
  630. (POLL_100_USEC << DESCRIPTOR_POLLING_RATE) |
  631. BIT(DMA_VALID_BIT_POLLING_ENABLE) |
  632. BIT(DMA_VALID_BIT_ENABLE) |
  633. BIT(DMA_SCATTER_GATHER_ENABLE) |
  634. /* erratum 0116 workaround part 2 (no AUTOSTART) */
  635. BIT(DMA_ENABLE);
  636. static inline void spin_stop_dma(struct net2280_dma_regs __iomem *dma)
  637. {
  638. handshake(&dma->dmactl, BIT(DMA_ENABLE), 0, 50);
  639. }
  640. static inline void stop_dma(struct net2280_dma_regs __iomem *dma)
  641. {
  642. writel(readl(&dma->dmactl) & ~BIT(DMA_ENABLE), &dma->dmactl);
  643. spin_stop_dma(dma);
  644. }
  645. static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
  646. {
  647. struct net2280_dma_regs __iomem *dma = ep->dma;
  648. unsigned int tmp = BIT(VALID_BIT) | (ep->is_in << DMA_DIRECTION);
  649. if (!(ep->dev->quirks & PLX_2280))
  650. tmp |= BIT(END_OF_CHAIN);
  651. writel(tmp, &dma->dmacount);
  652. writel(readl(&dma->dmastat), &dma->dmastat);
  653. writel(td_dma, &dma->dmadesc);
  654. if (ep->dev->quirks & PLX_SUPERSPEED)
  655. dmactl |= BIT(DMA_REQUEST_OUTSTANDING);
  656. writel(dmactl, &dma->dmactl);
  657. /* erratum 0116 workaround part 3: pci arbiter away from net2280 */
  658. (void) readl(&ep->dev->pci->pcimstctl);
  659. writel(BIT(DMA_START), &dma->dmastat);
  660. if (!ep->is_in)
  661. stop_out_naking(ep);
  662. }
  663. static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
  664. {
  665. u32 tmp;
  666. struct net2280_dma_regs __iomem *dma = ep->dma;
  667. /* FIXME can't use DMA for ZLPs */
  668. /* on this path we "know" there's no dma active (yet) */
  669. WARN_ON(readl(&dma->dmactl) & BIT(DMA_ENABLE));
  670. writel(0, &ep->dma->dmactl);
  671. /* previous OUT packet might have been short */
  672. if (!ep->is_in && (readl(&ep->regs->ep_stat) &
  673. BIT(NAK_OUT_PACKETS))) {
  674. writel(BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT),
  675. &ep->regs->ep_stat);
  676. tmp = readl(&ep->regs->ep_avail);
  677. if (tmp) {
  678. writel(readl(&dma->dmastat), &dma->dmastat);
  679. /* transfer all/some fifo data */
  680. writel(req->req.dma, &dma->dmaaddr);
  681. tmp = min(tmp, req->req.length);
  682. /* dma irq, faking scatterlist status */
  683. req->td->dmacount = cpu_to_le32(req->req.length - tmp);
  684. writel(BIT(DMA_DONE_INTERRUPT_ENABLE) | tmp,
  685. &dma->dmacount);
  686. req->td->dmadesc = 0;
  687. req->valid = 1;
  688. writel(BIT(DMA_ENABLE), &dma->dmactl);
  689. writel(BIT(DMA_START), &dma->dmastat);
  690. return;
  691. }
  692. }
  693. tmp = dmactl_default;
  694. /* force packet boundaries between dma requests, but prevent the
  695. * controller from automagically writing a last "short" packet
  696. * (zero length) unless the driver explicitly said to do that.
  697. */
  698. if (ep->is_in) {
  699. if (likely((req->req.length % ep->ep.maxpacket) ||
  700. req->req.zero)){
  701. tmp |= BIT(DMA_FIFO_VALIDATE);
  702. ep->in_fifo_validate = 1;
  703. } else
  704. ep->in_fifo_validate = 0;
  705. }
  706. /* init req->td, pointing to the current dummy */
  707. req->td->dmadesc = cpu_to_le32 (ep->td_dma);
  708. fill_dma_desc(ep, req, 1);
  709. req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN));
  710. start_queue(ep, tmp, req->td_dma);
  711. }
  712. static inline void
  713. queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid)
  714. {
  715. struct net2280_dma *end;
  716. dma_addr_t tmp;
  717. /* swap new dummy for old, link; fill and maybe activate */
  718. end = ep->dummy;
  719. ep->dummy = req->td;
  720. req->td = end;
  721. tmp = ep->td_dma;
  722. ep->td_dma = req->td_dma;
  723. req->td_dma = tmp;
  724. end->dmadesc = cpu_to_le32 (ep->td_dma);
  725. fill_dma_desc(ep, req, valid);
  726. }
  727. static void
  728. done(struct net2280_ep *ep, struct net2280_request *req, int status)
  729. {
  730. struct net2280 *dev;
  731. unsigned stopped = ep->stopped;
  732. list_del_init(&req->queue);
  733. if (req->req.status == -EINPROGRESS)
  734. req->req.status = status;
  735. else
  736. status = req->req.status;
  737. dev = ep->dev;
  738. if (ep->dma)
  739. usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
  740. if (status && status != -ESHUTDOWN)
  741. ep_vdbg(dev, "complete %s req %p stat %d len %u/%u\n",
  742. ep->ep.name, &req->req, status,
  743. req->req.actual, req->req.length);
  744. /* don't modify queue heads during completion callback */
  745. ep->stopped = 1;
  746. spin_unlock(&dev->lock);
  747. usb_gadget_giveback_request(&ep->ep, &req->req);
  748. spin_lock(&dev->lock);
  749. ep->stopped = stopped;
  750. }
  751. /*-------------------------------------------------------------------------*/
  752. static int
  753. net2280_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
  754. {
  755. struct net2280_request *req;
  756. struct net2280_ep *ep;
  757. struct net2280 *dev;
  758. unsigned long flags;
  759. /* we always require a cpu-view buffer, so that we can
  760. * always use pio (as fallback or whatever).
  761. */
  762. req = container_of(_req, struct net2280_request, req);
  763. if (!_req || !_req->complete || !_req->buf ||
  764. !list_empty(&req->queue))
  765. return -EINVAL;
  766. if (_req->length > (~0 & DMA_BYTE_COUNT_MASK))
  767. return -EDOM;
  768. ep = container_of(_ep, struct net2280_ep, ep);
  769. if (!_ep || (!ep->desc && ep->num != 0))
  770. return -EINVAL;
  771. dev = ep->dev;
  772. if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
  773. return -ESHUTDOWN;
  774. /* FIXME implement PIO fallback for ZLPs with DMA */
  775. if (ep->dma && _req->length == 0)
  776. return -EOPNOTSUPP;
  777. /* set up dma mapping in case the caller didn't */
  778. if (ep->dma) {
  779. int ret;
  780. ret = usb_gadget_map_request(&dev->gadget, _req,
  781. ep->is_in);
  782. if (ret)
  783. return ret;
  784. }
  785. ep_vdbg(dev, "%s queue req %p, len %d buf %p\n",
  786. _ep->name, _req, _req->length, _req->buf);
  787. spin_lock_irqsave(&dev->lock, flags);
  788. _req->status = -EINPROGRESS;
  789. _req->actual = 0;
  790. /* kickstart this i/o queue? */
  791. if (list_empty(&ep->queue) && !ep->stopped &&
  792. !((dev->quirks & PLX_SUPERSPEED) && ep->dma &&
  793. (readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)))) {
  794. /* use DMA if the endpoint supports it, else pio */
  795. if (ep->dma)
  796. start_dma(ep, req);
  797. else {
  798. /* maybe there's no control data, just status ack */
  799. if (ep->num == 0 && _req->length == 0) {
  800. allow_status(ep);
  801. done(ep, req, 0);
  802. ep_vdbg(dev, "%s status ack\n", ep->ep.name);
  803. goto done;
  804. }
  805. /* PIO ... stuff the fifo, or unblock it. */
  806. if (ep->is_in)
  807. write_fifo(ep, _req);
  808. else if (list_empty(&ep->queue)) {
  809. u32 s;
  810. /* OUT FIFO might have packet(s) buffered */
  811. s = readl(&ep->regs->ep_stat);
  812. if ((s & BIT(FIFO_EMPTY)) == 0) {
  813. /* note: _req->short_not_ok is
  814. * ignored here since PIO _always_
  815. * stops queue advance here, and
  816. * _req->status doesn't change for
  817. * short reads (only _req->actual)
  818. */
  819. if (read_fifo(ep, req) &&
  820. ep->num == 0) {
  821. done(ep, req, 0);
  822. allow_status(ep);
  823. /* don't queue it */
  824. req = NULL;
  825. } else if (read_fifo(ep, req) &&
  826. ep->num != 0) {
  827. done(ep, req, 0);
  828. req = NULL;
  829. } else
  830. s = readl(&ep->regs->ep_stat);
  831. }
  832. /* don't NAK, let the fifo fill */
  833. if (req && (s & BIT(NAK_OUT_PACKETS)))
  834. writel(BIT(CLEAR_NAK_OUT_PACKETS),
  835. &ep->regs->ep_rsp);
  836. }
  837. }
  838. } else if (ep->dma) {
  839. int valid = 1;
  840. if (ep->is_in) {
  841. int expect;
  842. /* preventing magic zlps is per-engine state, not
  843. * per-transfer; irq logic must recover hiccups.
  844. */
  845. expect = likely(req->req.zero ||
  846. (req->req.length % ep->ep.maxpacket));
  847. if (expect != ep->in_fifo_validate)
  848. valid = 0;
  849. }
  850. queue_dma(ep, req, valid);
  851. } /* else the irq handler advances the queue. */
  852. ep->responded = 1;
  853. if (req)
  854. list_add_tail(&req->queue, &ep->queue);
  855. done:
  856. spin_unlock_irqrestore(&dev->lock, flags);
  857. /* pci writes may still be posted */
  858. return 0;
  859. }
  860. static inline void
  861. dma_done(struct net2280_ep *ep, struct net2280_request *req, u32 dmacount,
  862. int status)
  863. {
  864. req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount);
  865. done(ep, req, status);
  866. }
  867. static void scan_dma_completions(struct net2280_ep *ep)
  868. {
  869. /* only look at descriptors that were "naturally" retired,
  870. * so fifo and list head state won't matter
  871. */
  872. while (!list_empty(&ep->queue)) {
  873. struct net2280_request *req;
  874. u32 tmp;
  875. req = list_entry(ep->queue.next,
  876. struct net2280_request, queue);
  877. if (!req->valid)
  878. break;
  879. rmb();
  880. tmp = le32_to_cpup(&req->td->dmacount);
  881. if ((tmp & BIT(VALID_BIT)) != 0)
  882. break;
  883. /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
  884. * cases where DMA must be aborted; this code handles
  885. * all non-abort DMA completions.
  886. */
  887. if (unlikely(req->td->dmadesc == 0)) {
  888. /* paranoia */
  889. tmp = readl(&ep->dma->dmacount);
  890. if (tmp & DMA_BYTE_COUNT_MASK)
  891. break;
  892. /* single transfer mode */
  893. dma_done(ep, req, tmp, 0);
  894. break;
  895. } else if (!ep->is_in &&
  896. (req->req.length % ep->ep.maxpacket) &&
  897. !(ep->dev->quirks & PLX_SUPERSPEED)) {
  898. tmp = readl(&ep->regs->ep_stat);
  899. /* AVOID TROUBLE HERE by not issuing short reads from
  900. * your gadget driver. That helps avoids errata 0121,
  901. * 0122, and 0124; not all cases trigger the warning.
  902. */
  903. if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) {
  904. ep_warn(ep->dev, "%s lost packet sync!\n",
  905. ep->ep.name);
  906. req->req.status = -EOVERFLOW;
  907. } else {
  908. tmp = readl(&ep->regs->ep_avail);
  909. if (tmp) {
  910. /* fifo gets flushed later */
  911. ep->out_overflow = 1;
  912. ep_dbg(ep->dev,
  913. "%s dma, discard %d len %d\n",
  914. ep->ep.name, tmp,
  915. req->req.length);
  916. req->req.status = -EOVERFLOW;
  917. }
  918. }
  919. }
  920. dma_done(ep, req, tmp, 0);
  921. }
  922. }
  923. static void restart_dma(struct net2280_ep *ep)
  924. {
  925. struct net2280_request *req;
  926. if (ep->stopped)
  927. return;
  928. req = list_entry(ep->queue.next, struct net2280_request, queue);
  929. start_dma(ep, req);
  930. }
  931. static void abort_dma(struct net2280_ep *ep)
  932. {
  933. /* abort the current transfer */
  934. if (likely(!list_empty(&ep->queue))) {
  935. /* FIXME work around errata 0121, 0122, 0124 */
  936. writel(BIT(DMA_ABORT), &ep->dma->dmastat);
  937. spin_stop_dma(ep->dma);
  938. } else
  939. stop_dma(ep->dma);
  940. scan_dma_completions(ep);
  941. }
  942. /* dequeue ALL requests */
  943. static void nuke(struct net2280_ep *ep)
  944. {
  945. struct net2280_request *req;
  946. /* called with spinlock held */
  947. ep->stopped = 1;
  948. if (ep->dma)
  949. abort_dma(ep);
  950. while (!list_empty(&ep->queue)) {
  951. req = list_entry(ep->queue.next,
  952. struct net2280_request,
  953. queue);
  954. done(ep, req, -ESHUTDOWN);
  955. }
  956. }
  957. /* dequeue JUST ONE request */
  958. static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
  959. {
  960. struct net2280_ep *ep;
  961. struct net2280_request *req;
  962. unsigned long flags;
  963. u32 dmactl;
  964. int stopped;
  965. ep = container_of(_ep, struct net2280_ep, ep);
  966. if (!_ep || (!ep->desc && ep->num != 0) || !_req)
  967. return -EINVAL;
  968. spin_lock_irqsave(&ep->dev->lock, flags);
  969. stopped = ep->stopped;
  970. /* quiesce dma while we patch the queue */
  971. dmactl = 0;
  972. ep->stopped = 1;
  973. if (ep->dma) {
  974. dmactl = readl(&ep->dma->dmactl);
  975. /* WARNING erratum 0127 may kick in ... */
  976. stop_dma(ep->dma);
  977. scan_dma_completions(ep);
  978. }
  979. /* make sure it's still queued on this endpoint */
  980. list_for_each_entry(req, &ep->queue, queue) {
  981. if (&req->req == _req)
  982. break;
  983. }
  984. if (&req->req != _req) {
  985. spin_unlock_irqrestore(&ep->dev->lock, flags);
  986. return -EINVAL;
  987. }
  988. /* queue head may be partially complete. */
  989. if (ep->queue.next == &req->queue) {
  990. if (ep->dma) {
  991. ep_dbg(ep->dev, "unlink (%s) dma\n", _ep->name);
  992. _req->status = -ECONNRESET;
  993. abort_dma(ep);
  994. if (likely(ep->queue.next == &req->queue)) {
  995. /* NOTE: misreports single-transfer mode*/
  996. req->td->dmacount = 0; /* invalidate */
  997. dma_done(ep, req,
  998. readl(&ep->dma->dmacount),
  999. -ECONNRESET);
  1000. }
  1001. } else {
  1002. ep_dbg(ep->dev, "unlink (%s) pio\n", _ep->name);
  1003. done(ep, req, -ECONNRESET);
  1004. }
  1005. req = NULL;
  1006. }
  1007. if (req)
  1008. done(ep, req, -ECONNRESET);
  1009. ep->stopped = stopped;
  1010. if (ep->dma) {
  1011. /* turn off dma on inactive queues */
  1012. if (list_empty(&ep->queue))
  1013. stop_dma(ep->dma);
  1014. else if (!ep->stopped) {
  1015. /* resume current request, or start new one */
  1016. if (req)
  1017. writel(dmactl, &ep->dma->dmactl);
  1018. else
  1019. start_dma(ep, list_entry(ep->queue.next,
  1020. struct net2280_request, queue));
  1021. }
  1022. }
  1023. spin_unlock_irqrestore(&ep->dev->lock, flags);
  1024. return 0;
  1025. }
  1026. /*-------------------------------------------------------------------------*/
  1027. static int net2280_fifo_status(struct usb_ep *_ep);
  1028. static int
  1029. net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
  1030. {
  1031. struct net2280_ep *ep;
  1032. unsigned long flags;
  1033. int retval = 0;
  1034. ep = container_of(_ep, struct net2280_ep, ep);
  1035. if (!_ep || (!ep->desc && ep->num != 0))
  1036. return -EINVAL;
  1037. if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
  1038. return -ESHUTDOWN;
  1039. if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03)
  1040. == USB_ENDPOINT_XFER_ISOC)
  1041. return -EINVAL;
  1042. spin_lock_irqsave(&ep->dev->lock, flags);
  1043. if (!list_empty(&ep->queue))
  1044. retval = -EAGAIN;
  1045. else if (ep->is_in && value && net2280_fifo_status(_ep) != 0)
  1046. retval = -EAGAIN;
  1047. else {
  1048. ep_vdbg(ep->dev, "%s %s %s\n", _ep->name,
  1049. value ? "set" : "clear",
  1050. wedged ? "wedge" : "halt");
  1051. /* set/clear, then synch memory views with the device */
  1052. if (value) {
  1053. if (ep->num == 0)
  1054. ep->dev->protocol_stall = 1;
  1055. else
  1056. set_halt(ep);
  1057. if (wedged)
  1058. ep->wedged = 1;
  1059. } else {
  1060. clear_halt(ep);
  1061. if (ep->dev->quirks & PLX_SUPERSPEED &&
  1062. !list_empty(&ep->queue) && ep->td_dma)
  1063. restart_dma(ep);
  1064. ep->wedged = 0;
  1065. }
  1066. (void) readl(&ep->regs->ep_rsp);
  1067. }
  1068. spin_unlock_irqrestore(&ep->dev->lock, flags);
  1069. return retval;
  1070. }
  1071. static int net2280_set_halt(struct usb_ep *_ep, int value)
  1072. {
  1073. return net2280_set_halt_and_wedge(_ep, value, 0);
  1074. }
  1075. static int net2280_set_wedge(struct usb_ep *_ep)
  1076. {
  1077. if (!_ep || _ep->name == ep0name)
  1078. return -EINVAL;
  1079. return net2280_set_halt_and_wedge(_ep, 1, 1);
  1080. }
  1081. static int net2280_fifo_status(struct usb_ep *_ep)
  1082. {
  1083. struct net2280_ep *ep;
  1084. u32 avail;
  1085. ep = container_of(_ep, struct net2280_ep, ep);
  1086. if (!_ep || (!ep->desc && ep->num != 0))
  1087. return -ENODEV;
  1088. if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
  1089. return -ESHUTDOWN;
  1090. avail = readl(&ep->regs->ep_avail) & (BIT(12) - 1);
  1091. if (avail > ep->fifo_size)
  1092. return -EOVERFLOW;
  1093. if (ep->is_in)
  1094. avail = ep->fifo_size - avail;
  1095. return avail;
  1096. }
  1097. static void net2280_fifo_flush(struct usb_ep *_ep)
  1098. {
  1099. struct net2280_ep *ep;
  1100. ep = container_of(_ep, struct net2280_ep, ep);
  1101. if (!_ep || (!ep->desc && ep->num != 0))
  1102. return;
  1103. if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
  1104. return;
  1105. writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
  1106. (void) readl(&ep->regs->ep_rsp);
  1107. }
  1108. static const struct usb_ep_ops net2280_ep_ops = {
  1109. .enable = net2280_enable,
  1110. .disable = net2280_disable,
  1111. .alloc_request = net2280_alloc_request,
  1112. .free_request = net2280_free_request,
  1113. .queue = net2280_queue,
  1114. .dequeue = net2280_dequeue,
  1115. .set_halt = net2280_set_halt,
  1116. .set_wedge = net2280_set_wedge,
  1117. .fifo_status = net2280_fifo_status,
  1118. .fifo_flush = net2280_fifo_flush,
  1119. };
  1120. /*-------------------------------------------------------------------------*/
  1121. static int net2280_get_frame(struct usb_gadget *_gadget)
  1122. {
  1123. struct net2280 *dev;
  1124. unsigned long flags;
  1125. u16 retval;
  1126. if (!_gadget)
  1127. return -ENODEV;
  1128. dev = container_of(_gadget, struct net2280, gadget);
  1129. spin_lock_irqsave(&dev->lock, flags);
  1130. retval = get_idx_reg(dev->regs, REG_FRAME) & 0x03ff;
  1131. spin_unlock_irqrestore(&dev->lock, flags);
  1132. return retval;
  1133. }
  1134. static int net2280_wakeup(struct usb_gadget *_gadget)
  1135. {
  1136. struct net2280 *dev;
  1137. u32 tmp;
  1138. unsigned long flags;
  1139. if (!_gadget)
  1140. return 0;
  1141. dev = container_of(_gadget, struct net2280, gadget);
  1142. spin_lock_irqsave(&dev->lock, flags);
  1143. tmp = readl(&dev->usb->usbctl);
  1144. if (tmp & BIT(DEVICE_REMOTE_WAKEUP_ENABLE))
  1145. writel(BIT(GENERATE_RESUME), &dev->usb->usbstat);
  1146. spin_unlock_irqrestore(&dev->lock, flags);
  1147. /* pci writes may still be posted */
  1148. return 0;
  1149. }
  1150. static int net2280_set_selfpowered(struct usb_gadget *_gadget, int value)
  1151. {
  1152. struct net2280 *dev;
  1153. u32 tmp;
  1154. unsigned long flags;
  1155. if (!_gadget)
  1156. return 0;
  1157. dev = container_of(_gadget, struct net2280, gadget);
  1158. spin_lock_irqsave(&dev->lock, flags);
  1159. tmp = readl(&dev->usb->usbctl);
  1160. if (value) {
  1161. tmp |= BIT(SELF_POWERED_STATUS);
  1162. _gadget->is_selfpowered = 1;
  1163. } else {
  1164. tmp &= ~BIT(SELF_POWERED_STATUS);
  1165. _gadget->is_selfpowered = 0;
  1166. }
  1167. writel(tmp, &dev->usb->usbctl);
  1168. spin_unlock_irqrestore(&dev->lock, flags);
  1169. return 0;
  1170. }
  1171. static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
  1172. {
  1173. struct net2280 *dev;
  1174. u32 tmp;
  1175. unsigned long flags;
  1176. if (!_gadget)
  1177. return -ENODEV;
  1178. dev = container_of(_gadget, struct net2280, gadget);
  1179. spin_lock_irqsave(&dev->lock, flags);
  1180. tmp = readl(&dev->usb->usbctl);
  1181. dev->softconnect = (is_on != 0);
  1182. if (is_on)
  1183. tmp |= BIT(USB_DETECT_ENABLE);
  1184. else
  1185. tmp &= ~BIT(USB_DETECT_ENABLE);
  1186. writel(tmp, &dev->usb->usbctl);
  1187. spin_unlock_irqrestore(&dev->lock, flags);
  1188. return 0;
  1189. }
  1190. static int net2280_start(struct usb_gadget *_gadget,
  1191. struct usb_gadget_driver *driver);
  1192. static int net2280_stop(struct usb_gadget *_gadget);
  1193. static const struct usb_gadget_ops net2280_ops = {
  1194. .get_frame = net2280_get_frame,
  1195. .wakeup = net2280_wakeup,
  1196. .set_selfpowered = net2280_set_selfpowered,
  1197. .pullup = net2280_pullup,
  1198. .udc_start = net2280_start,
  1199. .udc_stop = net2280_stop,
  1200. };
  1201. /*-------------------------------------------------------------------------*/
  1202. #ifdef CONFIG_USB_GADGET_DEBUG_FILES
  1203. /* FIXME move these into procfs, and use seq_file.
  1204. * Sysfs _still_ doesn't behave for arbitrarily sized files,
  1205. * and also doesn't help products using this with 2.4 kernels.
  1206. */
  1207. /* "function" sysfs attribute */
  1208. static ssize_t function_show(struct device *_dev, struct device_attribute *attr,
  1209. char *buf)
  1210. {
  1211. struct net2280 *dev = dev_get_drvdata(_dev);
  1212. if (!dev->driver || !dev->driver->function ||
  1213. strlen(dev->driver->function) > PAGE_SIZE)
  1214. return 0;
  1215. return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function);
  1216. }
  1217. static DEVICE_ATTR_RO(function);
  1218. static ssize_t registers_show(struct device *_dev,
  1219. struct device_attribute *attr, char *buf)
  1220. {
  1221. struct net2280 *dev;
  1222. char *next;
  1223. unsigned size, t;
  1224. unsigned long flags;
  1225. int i;
  1226. u32 t1, t2;
  1227. const char *s;
  1228. dev = dev_get_drvdata(_dev);
  1229. next = buf;
  1230. size = PAGE_SIZE;
  1231. spin_lock_irqsave(&dev->lock, flags);
  1232. if (dev->driver)
  1233. s = dev->driver->driver.name;
  1234. else
  1235. s = "(none)";
  1236. /* Main Control Registers */
  1237. t = scnprintf(next, size, "%s version " DRIVER_VERSION
  1238. ", chiprev %04x\n\n"
  1239. "devinit %03x fifoctl %08x gadget '%s'\n"
  1240. "pci irqenb0 %02x irqenb1 %08x "
  1241. "irqstat0 %04x irqstat1 %08x\n",
  1242. driver_name, dev->chiprev,
  1243. readl(&dev->regs->devinit),
  1244. readl(&dev->regs->fifoctl),
  1245. s,
  1246. readl(&dev->regs->pciirqenb0),
  1247. readl(&dev->regs->pciirqenb1),
  1248. readl(&dev->regs->irqstat0),
  1249. readl(&dev->regs->irqstat1));
  1250. size -= t;
  1251. next += t;
  1252. /* USB Control Registers */
  1253. t1 = readl(&dev->usb->usbctl);
  1254. t2 = readl(&dev->usb->usbstat);
  1255. if (t1 & BIT(VBUS_PIN)) {
  1256. if (t2 & BIT(HIGH_SPEED))
  1257. s = "high speed";
  1258. else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
  1259. s = "powered";
  1260. else
  1261. s = "full speed";
  1262. /* full speed bit (6) not working?? */
  1263. } else
  1264. s = "not attached";
  1265. t = scnprintf(next, size,
  1266. "stdrsp %08x usbctl %08x usbstat %08x "
  1267. "addr 0x%02x (%s)\n",
  1268. readl(&dev->usb->stdrsp), t1, t2,
  1269. readl(&dev->usb->ouraddr), s);
  1270. size -= t;
  1271. next += t;
  1272. /* PCI Master Control Registers */
  1273. /* DMA Control Registers */
  1274. /* Configurable EP Control Registers */
  1275. for (i = 0; i < dev->n_ep; i++) {
  1276. struct net2280_ep *ep;
  1277. ep = &dev->ep[i];
  1278. if (i && !ep->desc)
  1279. continue;
  1280. t1 = readl(&ep->cfg->ep_cfg);
  1281. t2 = readl(&ep->regs->ep_rsp) & 0xff;
  1282. t = scnprintf(next, size,
  1283. "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s"
  1284. "irqenb %02x\n",
  1285. ep->ep.name, t1, t2,
  1286. (t2 & BIT(CLEAR_NAK_OUT_PACKETS))
  1287. ? "NAK " : "",
  1288. (t2 & BIT(CLEAR_EP_HIDE_STATUS_PHASE))
  1289. ? "hide " : "",
  1290. (t2 & BIT(CLEAR_EP_FORCE_CRC_ERROR))
  1291. ? "CRC " : "",
  1292. (t2 & BIT(CLEAR_INTERRUPT_MODE))
  1293. ? "interrupt " : "",
  1294. (t2 & BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE))
  1295. ? "status " : "",
  1296. (t2 & BIT(CLEAR_NAK_OUT_PACKETS_MODE))
  1297. ? "NAKmode " : "",
  1298. (t2 & BIT(CLEAR_ENDPOINT_TOGGLE))
  1299. ? "DATA1 " : "DATA0 ",
  1300. (t2 & BIT(CLEAR_ENDPOINT_HALT))
  1301. ? "HALT " : "",
  1302. readl(&ep->regs->ep_irqenb));
  1303. size -= t;
  1304. next += t;
  1305. t = scnprintf(next, size,
  1306. "\tstat %08x avail %04x "
  1307. "(ep%d%s-%s)%s\n",
  1308. readl(&ep->regs->ep_stat),
  1309. readl(&ep->regs->ep_avail),
  1310. t1 & 0x0f, DIR_STRING(t1),
  1311. type_string(t1 >> 8),
  1312. ep->stopped ? "*" : "");
  1313. size -= t;
  1314. next += t;
  1315. if (!ep->dma)
  1316. continue;
  1317. t = scnprintf(next, size,
  1318. " dma\tctl %08x stat %08x count %08x\n"
  1319. "\taddr %08x desc %08x\n",
  1320. readl(&ep->dma->dmactl),
  1321. readl(&ep->dma->dmastat),
  1322. readl(&ep->dma->dmacount),
  1323. readl(&ep->dma->dmaaddr),
  1324. readl(&ep->dma->dmadesc));
  1325. size -= t;
  1326. next += t;
  1327. }
  1328. /* Indexed Registers (none yet) */
  1329. /* Statistics */
  1330. t = scnprintf(next, size, "\nirqs: ");
  1331. size -= t;
  1332. next += t;
  1333. for (i = 0; i < dev->n_ep; i++) {
  1334. struct net2280_ep *ep;
  1335. ep = &dev->ep[i];
  1336. if (i && !ep->irqs)
  1337. continue;
  1338. t = scnprintf(next, size, " %s/%lu", ep->ep.name, ep->irqs);
  1339. size -= t;
  1340. next += t;
  1341. }
  1342. t = scnprintf(next, size, "\n");
  1343. size -= t;
  1344. next += t;
  1345. spin_unlock_irqrestore(&dev->lock, flags);
  1346. return PAGE_SIZE - size;
  1347. }
  1348. static DEVICE_ATTR_RO(registers);
  1349. static ssize_t queues_show(struct device *_dev, struct device_attribute *attr,
  1350. char *buf)
  1351. {
  1352. struct net2280 *dev;
  1353. char *next;
  1354. unsigned size;
  1355. unsigned long flags;
  1356. int i;
  1357. dev = dev_get_drvdata(_dev);
  1358. next = buf;
  1359. size = PAGE_SIZE;
  1360. spin_lock_irqsave(&dev->lock, flags);
  1361. for (i = 0; i < dev->n_ep; i++) {
  1362. struct net2280_ep *ep = &dev->ep[i];
  1363. struct net2280_request *req;
  1364. int t;
  1365. if (i != 0) {
  1366. const struct usb_endpoint_descriptor *d;
  1367. d = ep->desc;
  1368. if (!d)
  1369. continue;
  1370. t = d->bEndpointAddress;
  1371. t = scnprintf(next, size,
  1372. "\n%s (ep%d%s-%s) max %04x %s fifo %d\n",
  1373. ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
  1374. (t & USB_DIR_IN) ? "in" : "out",
  1375. type_string(d->bmAttributes),
  1376. usb_endpoint_maxp(d) & 0x1fff,
  1377. ep->dma ? "dma" : "pio", ep->fifo_size
  1378. );
  1379. } else /* ep0 should only have one transfer queued */
  1380. t = scnprintf(next, size, "ep0 max 64 pio %s\n",
  1381. ep->is_in ? "in" : "out");
  1382. if (t <= 0 || t > size)
  1383. goto done;
  1384. size -= t;
  1385. next += t;
  1386. if (list_empty(&ep->queue)) {
  1387. t = scnprintf(next, size, "\t(nothing queued)\n");
  1388. if (t <= 0 || t > size)
  1389. goto done;
  1390. size -= t;
  1391. next += t;
  1392. continue;
  1393. }
  1394. list_for_each_entry(req, &ep->queue, queue) {
  1395. if (ep->dma && req->td_dma == readl(&ep->dma->dmadesc))
  1396. t = scnprintf(next, size,
  1397. "\treq %p len %d/%d "
  1398. "buf %p (dmacount %08x)\n",
  1399. &req->req, req->req.actual,
  1400. req->req.length, req->req.buf,
  1401. readl(&ep->dma->dmacount));
  1402. else
  1403. t = scnprintf(next, size,
  1404. "\treq %p len %d/%d buf %p\n",
  1405. &req->req, req->req.actual,
  1406. req->req.length, req->req.buf);
  1407. if (t <= 0 || t > size)
  1408. goto done;
  1409. size -= t;
  1410. next += t;
  1411. if (ep->dma) {
  1412. struct net2280_dma *td;
  1413. td = req->td;
  1414. t = scnprintf(next, size, "\t td %08x "
  1415. " count %08x buf %08x desc %08x\n",
  1416. (u32) req->td_dma,
  1417. le32_to_cpu(td->dmacount),
  1418. le32_to_cpu(td->dmaaddr),
  1419. le32_to_cpu(td->dmadesc));
  1420. if (t <= 0 || t > size)
  1421. goto done;
  1422. size -= t;
  1423. next += t;
  1424. }
  1425. }
  1426. }
  1427. done:
  1428. spin_unlock_irqrestore(&dev->lock, flags);
  1429. return PAGE_SIZE - size;
  1430. }
  1431. static DEVICE_ATTR_RO(queues);
  1432. #else
  1433. #define device_create_file(a, b) (0)
  1434. #define device_remove_file(a, b) do { } while (0)
  1435. #endif
  1436. /*-------------------------------------------------------------------------*/
  1437. /* another driver-specific mode might be a request type doing dma
  1438. * to/from another device fifo instead of to/from memory.
  1439. */
  1440. static void set_fifo_mode(struct net2280 *dev, int mode)
  1441. {
  1442. /* keeping high bits preserves BAR2 */
  1443. writel((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl);
  1444. /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */
  1445. INIT_LIST_HEAD(&dev->gadget.ep_list);
  1446. list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
  1447. list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
  1448. switch (mode) {
  1449. case 0:
  1450. list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
  1451. list_add_tail(&dev->ep[4].ep.ep_list, &dev->gadget.ep_list);
  1452. dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
  1453. break;
  1454. case 1:
  1455. dev->ep[1].fifo_size = dev->ep[2].fifo_size = 2048;
  1456. break;
  1457. case 2:
  1458. list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
  1459. dev->ep[1].fifo_size = 2048;
  1460. dev->ep[2].fifo_size = 1024;
  1461. break;
  1462. }
  1463. /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */
  1464. list_add_tail(&dev->ep[5].ep.ep_list, &dev->gadget.ep_list);
  1465. list_add_tail(&dev->ep[6].ep.ep_list, &dev->gadget.ep_list);
  1466. }
  1467. static void defect7374_disable_data_eps(struct net2280 *dev)
  1468. {
  1469. /*
  1470. * For Defect 7374, disable data EPs (and more):
  1471. * - This phase undoes the earlier phase of the Defect 7374 workaround,
  1472. * returing ep regs back to normal.
  1473. */
  1474. struct net2280_ep *ep;
  1475. int i;
  1476. unsigned char ep_sel;
  1477. u32 tmp_reg;
  1478. for (i = 1; i < 5; i++) {
  1479. ep = &dev->ep[i];
  1480. writel(0, &ep->cfg->ep_cfg);
  1481. }
  1482. /* CSROUT, CSRIN, PCIOUT, PCIIN, STATIN, RCIN */
  1483. for (i = 0; i < 6; i++)
  1484. writel(0, &dev->dep[i].dep_cfg);
  1485. for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
  1486. /* Select an endpoint for subsequent operations: */
  1487. tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
  1488. writel(((tmp_reg & ~0x1f) | ep_sel), &dev->plregs->pl_ep_ctrl);
  1489. if (ep_sel < 2 || (ep_sel > 9 && ep_sel < 14) ||
  1490. ep_sel == 18 || ep_sel == 20)
  1491. continue;
  1492. /* Change settings on some selected endpoints */
  1493. tmp_reg = readl(&dev->plregs->pl_ep_cfg_4);
  1494. tmp_reg &= ~BIT(NON_CTRL_IN_TOLERATE_BAD_DIR);
  1495. writel(tmp_reg, &dev->plregs->pl_ep_cfg_4);
  1496. tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
  1497. tmp_reg |= BIT(EP_INITIALIZED);
  1498. writel(tmp_reg, &dev->plregs->pl_ep_ctrl);
  1499. }
  1500. }
  1501. static void defect7374_enable_data_eps_zero(struct net2280 *dev)
  1502. {
  1503. u32 tmp = 0, tmp_reg;
  1504. u32 scratch;
  1505. int i;
  1506. unsigned char ep_sel;
  1507. scratch = get_idx_reg(dev->regs, SCRATCH);
  1508. WARN_ON((scratch & (0xf << DEFECT7374_FSM_FIELD))
  1509. == DEFECT7374_FSM_SS_CONTROL_READ);
  1510. scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
  1511. ep_warn(dev, "Operate Defect 7374 workaround soft this time");
  1512. ep_warn(dev, "It will operate on cold-reboot and SS connect");
  1513. /*GPEPs:*/
  1514. tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_DIRECTION) |
  1515. (2 << OUT_ENDPOINT_TYPE) | (2 << IN_ENDPOINT_TYPE) |
  1516. ((dev->enhanced_mode) ?
  1517. BIT(OUT_ENDPOINT_ENABLE) : BIT(ENDPOINT_ENABLE)) |
  1518. BIT(IN_ENDPOINT_ENABLE));
  1519. for (i = 1; i < 5; i++)
  1520. writel(tmp, &dev->ep[i].cfg->ep_cfg);
  1521. /* CSRIN, PCIIN, STATIN, RCIN*/
  1522. tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_ENABLE));
  1523. writel(tmp, &dev->dep[1].dep_cfg);
  1524. writel(tmp, &dev->dep[3].dep_cfg);
  1525. writel(tmp, &dev->dep[4].dep_cfg);
  1526. writel(tmp, &dev->dep[5].dep_cfg);
  1527. /*Implemented for development and debug.
  1528. * Can be refined/tuned later.*/
  1529. for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
  1530. /* Select an endpoint for subsequent operations: */
  1531. tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
  1532. writel(((tmp_reg & ~0x1f) | ep_sel),
  1533. &dev->plregs->pl_ep_ctrl);
  1534. if (ep_sel == 1) {
  1535. tmp =
  1536. (readl(&dev->plregs->pl_ep_ctrl) |
  1537. BIT(CLEAR_ACK_ERROR_CODE) | 0);
  1538. writel(tmp, &dev->plregs->pl_ep_ctrl);
  1539. continue;
  1540. }
  1541. if (ep_sel == 0 || (ep_sel > 9 && ep_sel < 14) ||
  1542. ep_sel == 18 || ep_sel == 20)
  1543. continue;
  1544. tmp = (readl(&dev->plregs->pl_ep_cfg_4) |
  1545. BIT(NON_CTRL_IN_TOLERATE_BAD_DIR) | 0);
  1546. writel(tmp, &dev->plregs->pl_ep_cfg_4);
  1547. tmp = readl(&dev->plregs->pl_ep_ctrl) &
  1548. ~BIT(EP_INITIALIZED);
  1549. writel(tmp, &dev->plregs->pl_ep_ctrl);
  1550. }
  1551. /* Set FSM to focus on the first Control Read:
  1552. * - Tip: Connection speed is known upon the first
  1553. * setup request.*/
  1554. scratch |= DEFECT7374_FSM_WAITING_FOR_CONTROL_READ;
  1555. set_idx_reg(dev->regs, SCRATCH, scratch);
  1556. }
  1557. /* keeping it simple:
  1558. * - one bus driver, initted first;
  1559. * - one function driver, initted second
  1560. *
  1561. * most of the work to support multiple net2280 controllers would
  1562. * be to associate this gadget driver (yes?) with all of them, or
  1563. * perhaps to bind specific drivers to specific devices.
  1564. */
  1565. static void usb_reset_228x(struct net2280 *dev)
  1566. {
  1567. u32 tmp;
  1568. dev->gadget.speed = USB_SPEED_UNKNOWN;
  1569. (void) readl(&dev->usb->usbctl);
  1570. net2280_led_init(dev);
  1571. /* disable automatic responses, and irqs */
  1572. writel(0, &dev->usb->stdrsp);
  1573. writel(0, &dev->regs->pciirqenb0);
  1574. writel(0, &dev->regs->pciirqenb1);
  1575. /* clear old dma and irq state */
  1576. for (tmp = 0; tmp < 4; tmp++) {
  1577. struct net2280_ep *ep = &dev->ep[tmp + 1];
  1578. if (ep->dma)
  1579. abort_dma(ep);
  1580. }
  1581. writel(~0, &dev->regs->irqstat0),
  1582. writel(~(u32)BIT(SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1),
  1583. /* reset, and enable pci */
  1584. tmp = readl(&dev->regs->devinit) |
  1585. BIT(PCI_ENABLE) |
  1586. BIT(FIFO_SOFT_RESET) |
  1587. BIT(USB_SOFT_RESET) |
  1588. BIT(M8051_RESET);
  1589. writel(tmp, &dev->regs->devinit);
  1590. /* standard fifo and endpoint allocations */
  1591. set_fifo_mode(dev, (fifo_mode <= 2) ? fifo_mode : 0);
  1592. }
  1593. static void usb_reset_338x(struct net2280 *dev)
  1594. {
  1595. u32 tmp;
  1596. dev->gadget.speed = USB_SPEED_UNKNOWN;
  1597. (void)readl(&dev->usb->usbctl);
  1598. net2280_led_init(dev);
  1599. if (dev->bug7734_patched) {
  1600. /* disable automatic responses, and irqs */
  1601. writel(0, &dev->usb->stdrsp);
  1602. writel(0, &dev->regs->pciirqenb0);
  1603. writel(0, &dev->regs->pciirqenb1);
  1604. }
  1605. /* clear old dma and irq state */
  1606. for (tmp = 0; tmp < 4; tmp++) {
  1607. struct net2280_ep *ep = &dev->ep[tmp + 1];
  1608. if (ep->dma)
  1609. abort_dma(ep);
  1610. }
  1611. writel(~0, &dev->regs->irqstat0), writel(~0, &dev->regs->irqstat1);
  1612. if (dev->bug7734_patched) {
  1613. /* reset, and enable pci */
  1614. tmp = readl(&dev->regs->devinit) |
  1615. BIT(PCI_ENABLE) |
  1616. BIT(FIFO_SOFT_RESET) |
  1617. BIT(USB_SOFT_RESET) |
  1618. BIT(M8051_RESET);
  1619. writel(tmp, &dev->regs->devinit);
  1620. }
  1621. /* always ep-{1,2,3,4} ... maybe not ep-3 or ep-4 */
  1622. INIT_LIST_HEAD(&dev->gadget.ep_list);
  1623. for (tmp = 1; tmp < dev->n_ep; tmp++)
  1624. list_add_tail(&dev->ep[tmp].ep.ep_list, &dev->gadget.ep_list);
  1625. }
  1626. static void usb_reset(struct net2280 *dev)
  1627. {
  1628. if (dev->quirks & PLX_LEGACY)
  1629. return usb_reset_228x(dev);
  1630. return usb_reset_338x(dev);
  1631. }
  1632. static void usb_reinit_228x(struct net2280 *dev)
  1633. {
  1634. u32 tmp;
  1635. /* basic endpoint init */
  1636. for (tmp = 0; tmp < 7; tmp++) {
  1637. struct net2280_ep *ep = &dev->ep[tmp];
  1638. ep->ep.name = ep_name[tmp];
  1639. ep->dev = dev;
  1640. ep->num = tmp;
  1641. if (tmp > 0 && tmp <= 4) {
  1642. ep->fifo_size = 1024;
  1643. ep->dma = &dev->dma[tmp - 1];
  1644. } else
  1645. ep->fifo_size = 64;
  1646. ep->regs = &dev->epregs[tmp];
  1647. ep->cfg = &dev->epregs[tmp];
  1648. ep_reset_228x(dev->regs, ep);
  1649. }
  1650. usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
  1651. usb_ep_set_maxpacket_limit(&dev->ep[5].ep, 64);
  1652. usb_ep_set_maxpacket_limit(&dev->ep[6].ep, 64);
  1653. dev->gadget.ep0 = &dev->ep[0].ep;
  1654. dev->ep[0].stopped = 0;
  1655. INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
  1656. /* we want to prevent lowlevel/insecure access from the USB host,
  1657. * but erratum 0119 means this enable bit is ignored
  1658. */
  1659. for (tmp = 0; tmp < 5; tmp++)
  1660. writel(EP_DONTUSE, &dev->dep[tmp].dep_cfg);
  1661. }
  1662. static void usb_reinit_338x(struct net2280 *dev)
  1663. {
  1664. int i;
  1665. u32 tmp, val;
  1666. static const u32 ne[9] = { 0, 1, 2, 3, 4, 1, 2, 3, 4 };
  1667. static const u32 ep_reg_addr[9] = { 0x00, 0xC0, 0x00, 0xC0, 0x00,
  1668. 0x00, 0xC0, 0x00, 0xC0 };
  1669. /* basic endpoint init */
  1670. for (i = 0; i < dev->n_ep; i++) {
  1671. struct net2280_ep *ep = &dev->ep[i];
  1672. ep->ep.name = ep_name[i];
  1673. ep->dev = dev;
  1674. ep->num = i;
  1675. if (i > 0 && i <= 4)
  1676. ep->dma = &dev->dma[i - 1];
  1677. if (dev->enhanced_mode) {
  1678. ep->cfg = &dev->epregs[ne[i]];
  1679. ep->regs = (struct net2280_ep_regs __iomem *)
  1680. (((void __iomem *)&dev->epregs[ne[i]]) +
  1681. ep_reg_addr[i]);
  1682. ep->fiforegs = &dev->fiforegs[i];
  1683. } else {
  1684. ep->cfg = &dev->epregs[i];
  1685. ep->regs = &dev->epregs[i];
  1686. ep->fiforegs = &dev->fiforegs[i];
  1687. }
  1688. ep->fifo_size = (i != 0) ? 2048 : 512;
  1689. ep_reset_338x(dev->regs, ep);
  1690. }
  1691. usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 512);
  1692. dev->gadget.ep0 = &dev->ep[0].ep;
  1693. dev->ep[0].stopped = 0;
  1694. /* Link layer set up */
  1695. if (dev->bug7734_patched) {
  1696. tmp = readl(&dev->usb_ext->usbctl2) &
  1697. ~(BIT(U1_ENABLE) | BIT(U2_ENABLE) | BIT(LTM_ENABLE));
  1698. writel(tmp, &dev->usb_ext->usbctl2);
  1699. }
  1700. /* Hardware Defect and Workaround */
  1701. val = readl(&dev->ll_lfps_regs->ll_lfps_5);
  1702. val &= ~(0xf << TIMER_LFPS_6US);
  1703. val |= 0x5 << TIMER_LFPS_6US;
  1704. writel(val, &dev->ll_lfps_regs->ll_lfps_5);
  1705. val = readl(&dev->ll_lfps_regs->ll_lfps_6);
  1706. val &= ~(0xffff << TIMER_LFPS_80US);
  1707. val |= 0x0100 << TIMER_LFPS_80US;
  1708. writel(val, &dev->ll_lfps_regs->ll_lfps_6);
  1709. /*
  1710. * AA_AB Errata. Issue 4. Workaround for SuperSpeed USB
  1711. * Hot Reset Exit Handshake may Fail in Specific Case using
  1712. * Default Register Settings. Workaround for Enumeration test.
  1713. */
  1714. val = readl(&dev->ll_tsn_regs->ll_tsn_counters_2);
  1715. val &= ~(0x1f << HOT_TX_NORESET_TS2);
  1716. val |= 0x10 << HOT_TX_NORESET_TS2;
  1717. writel(val, &dev->ll_tsn_regs->ll_tsn_counters_2);
  1718. val = readl(&dev->ll_tsn_regs->ll_tsn_counters_3);
  1719. val &= ~(0x1f << HOT_RX_RESET_TS2);
  1720. val |= 0x3 << HOT_RX_RESET_TS2;
  1721. writel(val, &dev->ll_tsn_regs->ll_tsn_counters_3);
  1722. /*
  1723. * Set Recovery Idle to Recover bit:
  1724. * - On SS connections, setting Recovery Idle to Recover Fmw improves
  1725. * link robustness with various hosts and hubs.
  1726. * - It is safe to set for all connection speeds; all chip revisions.
  1727. * - R-M-W to leave other bits undisturbed.
  1728. * - Reference PLX TT-7372
  1729. */
  1730. val = readl(&dev->ll_chicken_reg->ll_tsn_chicken_bit);
  1731. val |= BIT(RECOVERY_IDLE_TO_RECOVER_FMW);
  1732. writel(val, &dev->ll_chicken_reg->ll_tsn_chicken_bit);
  1733. INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
  1734. /* disable dedicated endpoints */
  1735. writel(0x0D, &dev->dep[0].dep_cfg);
  1736. writel(0x0D, &dev->dep[1].dep_cfg);
  1737. writel(0x0E, &dev->dep[2].dep_cfg);
  1738. writel(0x0E, &dev->dep[3].dep_cfg);
  1739. writel(0x0F, &dev->dep[4].dep_cfg);
  1740. writel(0x0C, &dev->dep[5].dep_cfg);
  1741. }
  1742. static void usb_reinit(struct net2280 *dev)
  1743. {
  1744. if (dev->quirks & PLX_LEGACY)
  1745. return usb_reinit_228x(dev);
  1746. return usb_reinit_338x(dev);
  1747. }
  1748. static void ep0_start_228x(struct net2280 *dev)
  1749. {
  1750. writel(BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
  1751. BIT(CLEAR_NAK_OUT_PACKETS) |
  1752. BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE),
  1753. &dev->epregs[0].ep_rsp);
  1754. /*
  1755. * hardware optionally handles a bunch of standard requests
  1756. * that the API hides from drivers anyway. have it do so.
  1757. * endpoint status/features are handled in software, to
  1758. * help pass tests for some dubious behavior.
  1759. */
  1760. writel(BIT(SET_TEST_MODE) |
  1761. BIT(SET_ADDRESS) |
  1762. BIT(DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP) |
  1763. BIT(GET_DEVICE_STATUS) |
  1764. BIT(GET_INTERFACE_STATUS),
  1765. &dev->usb->stdrsp);
  1766. writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
  1767. BIT(SELF_POWERED_USB_DEVICE) |
  1768. BIT(REMOTE_WAKEUP_SUPPORT) |
  1769. (dev->softconnect << USB_DETECT_ENABLE) |
  1770. BIT(SELF_POWERED_STATUS),
  1771. &dev->usb->usbctl);
  1772. /* enable irqs so we can see ep0 and general operation */
  1773. writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
  1774. BIT(ENDPOINT_0_INTERRUPT_ENABLE),
  1775. &dev->regs->pciirqenb0);
  1776. writel(BIT(PCI_INTERRUPT_ENABLE) |
  1777. BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE) |
  1778. BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE) |
  1779. BIT(PCI_RETRY_ABORT_INTERRUPT_ENABLE) |
  1780. BIT(VBUS_INTERRUPT_ENABLE) |
  1781. BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
  1782. BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE),
  1783. &dev->regs->pciirqenb1);
  1784. /* don't leave any writes posted */
  1785. (void) readl(&dev->usb->usbctl);
  1786. }
  1787. static void ep0_start_338x(struct net2280 *dev)
  1788. {
  1789. if (dev->bug7734_patched)
  1790. writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
  1791. BIT(SET_EP_HIDE_STATUS_PHASE),
  1792. &dev->epregs[0].ep_rsp);
  1793. /*
  1794. * hardware optionally handles a bunch of standard requests
  1795. * that the API hides from drivers anyway. have it do so.
  1796. * endpoint status/features are handled in software, to
  1797. * help pass tests for some dubious behavior.
  1798. */
  1799. writel(BIT(SET_ISOCHRONOUS_DELAY) |
  1800. BIT(SET_SEL) |
  1801. BIT(SET_TEST_MODE) |
  1802. BIT(SET_ADDRESS) |
  1803. BIT(GET_INTERFACE_STATUS) |
  1804. BIT(GET_DEVICE_STATUS),
  1805. &dev->usb->stdrsp);
  1806. dev->wakeup_enable = 1;
  1807. writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
  1808. (dev->softconnect << USB_DETECT_ENABLE) |
  1809. BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
  1810. &dev->usb->usbctl);
  1811. /* enable irqs so we can see ep0 and general operation */
  1812. writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
  1813. BIT(ENDPOINT_0_INTERRUPT_ENABLE),
  1814. &dev->regs->pciirqenb0);
  1815. writel(BIT(PCI_INTERRUPT_ENABLE) |
  1816. BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
  1817. BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE) |
  1818. BIT(VBUS_INTERRUPT_ENABLE),
  1819. &dev->regs->pciirqenb1);
  1820. /* don't leave any writes posted */
  1821. (void)readl(&dev->usb->usbctl);
  1822. }
  1823. static void ep0_start(struct net2280 *dev)
  1824. {
  1825. if (dev->quirks & PLX_LEGACY)
  1826. return ep0_start_228x(dev);
  1827. return ep0_start_338x(dev);
  1828. }
  1829. /* when a driver is successfully registered, it will receive
  1830. * control requests including set_configuration(), which enables
  1831. * non-control requests. then usb traffic follows until a
  1832. * disconnect is reported. then a host may connect again, or
  1833. * the driver might get unbound.
  1834. */
  1835. static int net2280_start(struct usb_gadget *_gadget,
  1836. struct usb_gadget_driver *driver)
  1837. {
  1838. struct net2280 *dev;
  1839. int retval;
  1840. unsigned i;
  1841. /* insist on high speed support from the driver, since
  1842. * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
  1843. * "must not be used in normal operation"
  1844. */
  1845. if (!driver || driver->max_speed < USB_SPEED_HIGH ||
  1846. !driver->setup)
  1847. return -EINVAL;
  1848. dev = container_of(_gadget, struct net2280, gadget);
  1849. for (i = 0; i < dev->n_ep; i++)
  1850. dev->ep[i].irqs = 0;
  1851. /* hook up the driver ... */
  1852. dev->softconnect = 1;
  1853. driver->driver.bus = NULL;
  1854. dev->driver = driver;
  1855. retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
  1856. if (retval)
  1857. goto err_unbind;
  1858. retval = device_create_file(&dev->pdev->dev, &dev_attr_queues);
  1859. if (retval)
  1860. goto err_func;
  1861. /* enable host detection and ep0; and we're ready
  1862. * for set_configuration as well as eventual disconnect.
  1863. */
  1864. net2280_led_active(dev, 1);
  1865. if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched)
  1866. defect7374_enable_data_eps_zero(dev);
  1867. ep0_start(dev);
  1868. /* pci writes may still be posted */
  1869. return 0;
  1870. err_func:
  1871. device_remove_file(&dev->pdev->dev, &dev_attr_function);
  1872. err_unbind:
  1873. dev->driver = NULL;
  1874. return retval;
  1875. }
  1876. static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
  1877. {
  1878. int i;
  1879. /* don't disconnect if it's not connected */
  1880. if (dev->gadget.speed == USB_SPEED_UNKNOWN)
  1881. driver = NULL;
  1882. /* stop hardware; prevent new request submissions;
  1883. * and kill any outstanding requests.
  1884. */
  1885. usb_reset(dev);
  1886. for (i = 0; i < dev->n_ep; i++)
  1887. nuke(&dev->ep[i]);
  1888. /* report disconnect; the driver is already quiesced */
  1889. if (driver) {
  1890. spin_unlock(&dev->lock);
  1891. driver->disconnect(&dev->gadget);
  1892. spin_lock(&dev->lock);
  1893. }
  1894. usb_reinit(dev);
  1895. }
  1896. static int net2280_stop(struct usb_gadget *_gadget)
  1897. {
  1898. struct net2280 *dev;
  1899. unsigned long flags;
  1900. dev = container_of(_gadget, struct net2280, gadget);
  1901. spin_lock_irqsave(&dev->lock, flags);
  1902. stop_activity(dev, NULL);
  1903. spin_unlock_irqrestore(&dev->lock, flags);
  1904. net2280_led_active(dev, 0);
  1905. device_remove_file(&dev->pdev->dev, &dev_attr_function);
  1906. device_remove_file(&dev->pdev->dev, &dev_attr_queues);
  1907. dev->driver = NULL;
  1908. return 0;
  1909. }
  1910. /*-------------------------------------------------------------------------*/
  1911. /* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
  1912. * also works for dma-capable endpoints, in pio mode or just
  1913. * to manually advance the queue after short OUT transfers.
  1914. */
  1915. static void handle_ep_small(struct net2280_ep *ep)
  1916. {
  1917. struct net2280_request *req;
  1918. u32 t;
  1919. /* 0 error, 1 mid-data, 2 done */
  1920. int mode = 1;
  1921. if (!list_empty(&ep->queue))
  1922. req = list_entry(ep->queue.next,
  1923. struct net2280_request, queue);
  1924. else
  1925. req = NULL;
  1926. /* ack all, and handle what we care about */
  1927. t = readl(&ep->regs->ep_stat);
  1928. ep->irqs++;
  1929. ep_vdbg(ep->dev, "%s ack ep_stat %08x, req %p\n",
  1930. ep->ep.name, t, req ? &req->req : NULL);
  1931. if (!ep->is_in || (ep->dev->quirks & PLX_2280))
  1932. writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat);
  1933. else
  1934. /* Added for 2282 */
  1935. writel(t, &ep->regs->ep_stat);
  1936. /* for ep0, monitor token irqs to catch data stage length errors
  1937. * and to synchronize on status.
  1938. *
  1939. * also, to defer reporting of protocol stalls ... here's where
  1940. * data or status first appears, handling stalls here should never
  1941. * cause trouble on the host side..
  1942. *
  1943. * control requests could be slightly faster without token synch for
  1944. * status, but status can jam up that way.
  1945. */
  1946. if (unlikely(ep->num == 0)) {
  1947. if (ep->is_in) {
  1948. /* status; stop NAKing */
  1949. if (t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) {
  1950. if (ep->dev->protocol_stall) {
  1951. ep->stopped = 1;
  1952. set_halt(ep);
  1953. }
  1954. if (!req)
  1955. allow_status(ep);
  1956. mode = 2;
  1957. /* reply to extra IN data tokens with a zlp */
  1958. } else if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
  1959. if (ep->dev->protocol_stall) {
  1960. ep->stopped = 1;
  1961. set_halt(ep);
  1962. mode = 2;
  1963. } else if (ep->responded &&
  1964. !req && !ep->stopped)
  1965. write_fifo(ep, NULL);
  1966. }
  1967. } else {
  1968. /* status; stop NAKing */
  1969. if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
  1970. if (ep->dev->protocol_stall) {
  1971. ep->stopped = 1;
  1972. set_halt(ep);
  1973. }
  1974. mode = 2;
  1975. /* an extra OUT token is an error */
  1976. } else if (((t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) &&
  1977. req &&
  1978. req->req.actual == req->req.length) ||
  1979. (ep->responded && !req)) {
  1980. ep->dev->protocol_stall = 1;
  1981. set_halt(ep);
  1982. ep->stopped = 1;
  1983. if (req)
  1984. done(ep, req, -EOVERFLOW);
  1985. req = NULL;
  1986. }
  1987. }
  1988. }
  1989. if (unlikely(!req))
  1990. return;
  1991. /* manual DMA queue advance after short OUT */
  1992. if (likely(ep->dma)) {
  1993. if (t & BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
  1994. u32 count;
  1995. int stopped = ep->stopped;
  1996. /* TRANSFERRED works around OUT_DONE erratum 0112.
  1997. * we expect (N <= maxpacket) bytes; host wrote M.
  1998. * iff (M < N) we won't ever see a DMA interrupt.
  1999. */
  2000. ep->stopped = 1;
  2001. for (count = 0; ; t = readl(&ep->regs->ep_stat)) {
  2002. /* any preceding dma transfers must finish.
  2003. * dma handles (M >= N), may empty the queue
  2004. */
  2005. scan_dma_completions(ep);
  2006. if (unlikely(list_empty(&ep->queue) ||
  2007. ep->out_overflow)) {
  2008. req = NULL;
  2009. break;
  2010. }
  2011. req = list_entry(ep->queue.next,
  2012. struct net2280_request, queue);
  2013. /* here either (M < N), a "real" short rx;
  2014. * or (M == N) and the queue didn't empty
  2015. */
  2016. if (likely(t & BIT(FIFO_EMPTY))) {
  2017. count = readl(&ep->dma->dmacount);
  2018. count &= DMA_BYTE_COUNT_MASK;
  2019. if (readl(&ep->dma->dmadesc)
  2020. != req->td_dma)
  2021. req = NULL;
  2022. break;
  2023. }
  2024. udelay(1);
  2025. }
  2026. /* stop DMA, leave ep NAKing */
  2027. writel(BIT(DMA_ABORT), &ep->dma->dmastat);
  2028. spin_stop_dma(ep->dma);
  2029. if (likely(req)) {
  2030. req->td->dmacount = 0;
  2031. t = readl(&ep->regs->ep_avail);
  2032. dma_done(ep, req, count,
  2033. (ep->out_overflow || t)
  2034. ? -EOVERFLOW : 0);
  2035. }
  2036. /* also flush to prevent erratum 0106 trouble */
  2037. if (unlikely(ep->out_overflow ||
  2038. (ep->dev->chiprev == 0x0100 &&
  2039. ep->dev->gadget.speed
  2040. == USB_SPEED_FULL))) {
  2041. out_flush(ep);
  2042. ep->out_overflow = 0;
  2043. }
  2044. /* (re)start dma if needed, stop NAKing */
  2045. ep->stopped = stopped;
  2046. if (!list_empty(&ep->queue))
  2047. restart_dma(ep);
  2048. } else
  2049. ep_dbg(ep->dev, "%s dma ep_stat %08x ??\n",
  2050. ep->ep.name, t);
  2051. return;
  2052. /* data packet(s) received (in the fifo, OUT) */
  2053. } else if (t & BIT(DATA_PACKET_RECEIVED_INTERRUPT)) {
  2054. if (read_fifo(ep, req) && ep->num != 0)
  2055. mode = 2;
  2056. /* data packet(s) transmitted (IN) */
  2057. } else if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT)) {
  2058. unsigned len;
  2059. len = req->req.length - req->req.actual;
  2060. if (len > ep->ep.maxpacket)
  2061. len = ep->ep.maxpacket;
  2062. req->req.actual += len;
  2063. /* if we wrote it all, we're usually done */
  2064. /* send zlps until the status stage */
  2065. if ((req->req.actual == req->req.length) &&
  2066. (!req->req.zero || len != ep->ep.maxpacket) && ep->num)
  2067. mode = 2;
  2068. /* there was nothing to do ... */
  2069. } else if (mode == 1)
  2070. return;
  2071. /* done */
  2072. if (mode == 2) {
  2073. /* stream endpoints often resubmit/unlink in completion */
  2074. done(ep, req, 0);
  2075. /* maybe advance queue to next request */
  2076. if (ep->num == 0) {
  2077. /* NOTE: net2280 could let gadget driver start the
  2078. * status stage later. since not all controllers let
  2079. * them control that, the api doesn't (yet) allow it.
  2080. */
  2081. if (!ep->stopped)
  2082. allow_status(ep);
  2083. req = NULL;
  2084. } else {
  2085. if (!list_empty(&ep->queue) && !ep->stopped)
  2086. req = list_entry(ep->queue.next,
  2087. struct net2280_request, queue);
  2088. else
  2089. req = NULL;
  2090. if (req && !ep->is_in)
  2091. stop_out_naking(ep);
  2092. }
  2093. }
  2094. /* is there a buffer for the next packet?
  2095. * for best streaming performance, make sure there is one.
  2096. */
  2097. if (req && !ep->stopped) {
  2098. /* load IN fifo with next packet (may be zlp) */
  2099. if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT))
  2100. write_fifo(ep, &req->req);
  2101. }
  2102. }
  2103. static struct net2280_ep *get_ep_by_addr(struct net2280 *dev, u16 wIndex)
  2104. {
  2105. struct net2280_ep *ep;
  2106. if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
  2107. return &dev->ep[0];
  2108. list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
  2109. u8 bEndpointAddress;
  2110. if (!ep->desc)
  2111. continue;
  2112. bEndpointAddress = ep->desc->bEndpointAddress;
  2113. if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
  2114. continue;
  2115. if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
  2116. return ep;
  2117. }
  2118. return NULL;
  2119. }
  2120. static void defect7374_workaround(struct net2280 *dev, struct usb_ctrlrequest r)
  2121. {
  2122. u32 scratch, fsmvalue;
  2123. u32 ack_wait_timeout, state;
  2124. /* Workaround for Defect 7374 (U1/U2 erroneously rejected): */
  2125. scratch = get_idx_reg(dev->regs, SCRATCH);
  2126. fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD);
  2127. scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
  2128. if (!((fsmvalue == DEFECT7374_FSM_WAITING_FOR_CONTROL_READ) &&
  2129. (r.bRequestType & USB_DIR_IN)))
  2130. return;
  2131. /* This is the first Control Read for this connection: */
  2132. if (!(readl(&dev->usb->usbstat) & BIT(SUPER_SPEED_MODE))) {
  2133. /*
  2134. * Connection is NOT SS:
  2135. * - Connection must be FS or HS.
  2136. * - This FSM state should allow workaround software to
  2137. * run after the next USB connection.
  2138. */
  2139. scratch |= DEFECT7374_FSM_NON_SS_CONTROL_READ;
  2140. dev->bug7734_patched = 1;
  2141. goto restore_data_eps;
  2142. }
  2143. /* Connection is SS: */
  2144. for (ack_wait_timeout = 0;
  2145. ack_wait_timeout < DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS;
  2146. ack_wait_timeout++) {
  2147. state = readl(&dev->plregs->pl_ep_status_1)
  2148. & (0xff << STATE);
  2149. if ((state >= (ACK_GOOD_NORMAL << STATE)) &&
  2150. (state <= (ACK_GOOD_MORE_ACKS_TO_COME << STATE))) {
  2151. scratch |= DEFECT7374_FSM_SS_CONTROL_READ;
  2152. dev->bug7734_patched = 1;
  2153. break;
  2154. }
  2155. /*
  2156. * We have not yet received host's Data Phase ACK
  2157. * - Wait and try again.
  2158. */
  2159. udelay(DEFECT_7374_PROCESSOR_WAIT_TIME);
  2160. continue;
  2161. }
  2162. if (ack_wait_timeout >= DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS) {
  2163. ep_err(dev, "FAIL: Defect 7374 workaround waited but failed "
  2164. "to detect SS host's data phase ACK.");
  2165. ep_err(dev, "PL_EP_STATUS_1(23:16):.Expected from 0x11 to 0x16"
  2166. "got 0x%2.2x.\n", state >> STATE);
  2167. } else {
  2168. ep_warn(dev, "INFO: Defect 7374 workaround waited about\n"
  2169. "%duSec for Control Read Data Phase ACK\n",
  2170. DEFECT_7374_PROCESSOR_WAIT_TIME * ack_wait_timeout);
  2171. }
  2172. restore_data_eps:
  2173. /*
  2174. * Restore data EPs to their pre-workaround settings (disabled,
  2175. * initialized, and other details).
  2176. */
  2177. defect7374_disable_data_eps(dev);
  2178. set_idx_reg(dev->regs, SCRATCH, scratch);
  2179. return;
  2180. }
  2181. static void ep_clear_seqnum(struct net2280_ep *ep)
  2182. {
  2183. struct net2280 *dev = ep->dev;
  2184. u32 val;
  2185. static const u32 ep_pl[9] = { 0, 3, 4, 7, 8, 2, 5, 6, 9 };
  2186. val = readl(&dev->plregs->pl_ep_ctrl) & ~0x1f;
  2187. val |= ep_pl[ep->num];
  2188. writel(val, &dev->plregs->pl_ep_ctrl);
  2189. val |= BIT(SEQUENCE_NUMBER_RESET);
  2190. writel(val, &dev->plregs->pl_ep_ctrl);
  2191. return;
  2192. }
  2193. static void handle_stat0_irqs_superspeed(struct net2280 *dev,
  2194. struct net2280_ep *ep, struct usb_ctrlrequest r)
  2195. {
  2196. int tmp = 0;
  2197. #define w_value le16_to_cpu(r.wValue)
  2198. #define w_index le16_to_cpu(r.wIndex)
  2199. #define w_length le16_to_cpu(r.wLength)
  2200. switch (r.bRequest) {
  2201. struct net2280_ep *e;
  2202. u16 status;
  2203. case USB_REQ_SET_CONFIGURATION:
  2204. dev->addressed_state = !w_value;
  2205. goto usb3_delegate;
  2206. case USB_REQ_GET_STATUS:
  2207. switch (r.bRequestType) {
  2208. case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
  2209. status = dev->wakeup_enable ? 0x02 : 0x00;
  2210. if (dev->gadget.is_selfpowered)
  2211. status |= BIT(0);
  2212. status |= (dev->u1_enable << 2 | dev->u2_enable << 3 |
  2213. dev->ltm_enable << 4);
  2214. writel(0, &dev->epregs[0].ep_irqenb);
  2215. set_fifo_bytecount(ep, sizeof(status));
  2216. writel((__force u32) status, &dev->epregs[0].ep_data);
  2217. allow_status_338x(ep);
  2218. break;
  2219. case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
  2220. e = get_ep_by_addr(dev, w_index);
  2221. if (!e)
  2222. goto do_stall3;
  2223. status = readl(&e->regs->ep_rsp) &
  2224. BIT(CLEAR_ENDPOINT_HALT);
  2225. writel(0, &dev->epregs[0].ep_irqenb);
  2226. set_fifo_bytecount(ep, sizeof(status));
  2227. writel((__force u32) status, &dev->epregs[0].ep_data);
  2228. allow_status_338x(ep);
  2229. break;
  2230. default:
  2231. goto usb3_delegate;
  2232. }
  2233. break;
  2234. case USB_REQ_CLEAR_FEATURE:
  2235. switch (r.bRequestType) {
  2236. case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
  2237. if (!dev->addressed_state) {
  2238. switch (w_value) {
  2239. case USB_DEVICE_U1_ENABLE:
  2240. dev->u1_enable = 0;
  2241. writel(readl(&dev->usb_ext->usbctl2) &
  2242. ~BIT(U1_ENABLE),
  2243. &dev->usb_ext->usbctl2);
  2244. allow_status_338x(ep);
  2245. goto next_endpoints3;
  2246. case USB_DEVICE_U2_ENABLE:
  2247. dev->u2_enable = 0;
  2248. writel(readl(&dev->usb_ext->usbctl2) &
  2249. ~BIT(U2_ENABLE),
  2250. &dev->usb_ext->usbctl2);
  2251. allow_status_338x(ep);
  2252. goto next_endpoints3;
  2253. case USB_DEVICE_LTM_ENABLE:
  2254. dev->ltm_enable = 0;
  2255. writel(readl(&dev->usb_ext->usbctl2) &
  2256. ~BIT(LTM_ENABLE),
  2257. &dev->usb_ext->usbctl2);
  2258. allow_status_338x(ep);
  2259. goto next_endpoints3;
  2260. default:
  2261. break;
  2262. }
  2263. }
  2264. if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
  2265. dev->wakeup_enable = 0;
  2266. writel(readl(&dev->usb->usbctl) &
  2267. ~BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
  2268. &dev->usb->usbctl);
  2269. allow_status_338x(ep);
  2270. break;
  2271. }
  2272. goto usb3_delegate;
  2273. case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
  2274. e = get_ep_by_addr(dev, w_index);
  2275. if (!e)
  2276. goto do_stall3;
  2277. if (w_value != USB_ENDPOINT_HALT)
  2278. goto do_stall3;
  2279. ep_vdbg(dev, "%s clear halt\n", e->ep.name);
  2280. /*
  2281. * Workaround for SS SeqNum not cleared via
  2282. * Endpoint Halt (Clear) bit. select endpoint
  2283. */
  2284. ep_clear_seqnum(e);
  2285. clear_halt(e);
  2286. if (!list_empty(&e->queue) && e->td_dma)
  2287. restart_dma(e);
  2288. allow_status(ep);
  2289. ep->stopped = 1;
  2290. break;
  2291. default:
  2292. goto usb3_delegate;
  2293. }
  2294. break;
  2295. case USB_REQ_SET_FEATURE:
  2296. switch (r.bRequestType) {
  2297. case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
  2298. if (!dev->addressed_state) {
  2299. switch (w_value) {
  2300. case USB_DEVICE_U1_ENABLE:
  2301. dev->u1_enable = 1;
  2302. writel(readl(&dev->usb_ext->usbctl2) |
  2303. BIT(U1_ENABLE),
  2304. &dev->usb_ext->usbctl2);
  2305. allow_status_338x(ep);
  2306. goto next_endpoints3;
  2307. case USB_DEVICE_U2_ENABLE:
  2308. dev->u2_enable = 1;
  2309. writel(readl(&dev->usb_ext->usbctl2) |
  2310. BIT(U2_ENABLE),
  2311. &dev->usb_ext->usbctl2);
  2312. allow_status_338x(ep);
  2313. goto next_endpoints3;
  2314. case USB_DEVICE_LTM_ENABLE:
  2315. dev->ltm_enable = 1;
  2316. writel(readl(&dev->usb_ext->usbctl2) |
  2317. BIT(LTM_ENABLE),
  2318. &dev->usb_ext->usbctl2);
  2319. allow_status_338x(ep);
  2320. goto next_endpoints3;
  2321. default:
  2322. break;
  2323. }
  2324. }
  2325. if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
  2326. dev->wakeup_enable = 1;
  2327. writel(readl(&dev->usb->usbctl) |
  2328. BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
  2329. &dev->usb->usbctl);
  2330. allow_status_338x(ep);
  2331. break;
  2332. }
  2333. goto usb3_delegate;
  2334. case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
  2335. e = get_ep_by_addr(dev, w_index);
  2336. if (!e || (w_value != USB_ENDPOINT_HALT))
  2337. goto do_stall3;
  2338. ep->stopped = 1;
  2339. if (ep->num == 0)
  2340. ep->dev->protocol_stall = 1;
  2341. else {
  2342. if (ep->dma)
  2343. abort_dma(ep);
  2344. set_halt(ep);
  2345. }
  2346. allow_status_338x(ep);
  2347. break;
  2348. default:
  2349. goto usb3_delegate;
  2350. }
  2351. break;
  2352. default:
  2353. usb3_delegate:
  2354. ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x ep_cfg %08x\n",
  2355. r.bRequestType, r.bRequest,
  2356. w_value, w_index, w_length,
  2357. readl(&ep->cfg->ep_cfg));
  2358. ep->responded = 0;
  2359. spin_unlock(&dev->lock);
  2360. tmp = dev->driver->setup(&dev->gadget, &r);
  2361. spin_lock(&dev->lock);
  2362. }
  2363. do_stall3:
  2364. if (tmp < 0) {
  2365. ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n",
  2366. r.bRequestType, r.bRequest, tmp);
  2367. dev->protocol_stall = 1;
  2368. /* TD 9.9 Halt Endpoint test. TD 9.22 Set feature test */
  2369. set_halt(ep);
  2370. }
  2371. next_endpoints3:
  2372. #undef w_value
  2373. #undef w_index
  2374. #undef w_length
  2375. return;
  2376. }
  2377. static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
  2378. {
  2379. struct net2280_ep *ep;
  2380. u32 num, scratch;
  2381. /* most of these don't need individual acks */
  2382. stat &= ~BIT(INTA_ASSERTED);
  2383. if (!stat)
  2384. return;
  2385. /* ep_dbg(dev, "irqstat0 %04x\n", stat); */
  2386. /* starting a control request? */
  2387. if (unlikely(stat & BIT(SETUP_PACKET_INTERRUPT))) {
  2388. union {
  2389. u32 raw[2];
  2390. struct usb_ctrlrequest r;
  2391. } u;
  2392. int tmp;
  2393. struct net2280_request *req;
  2394. if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
  2395. u32 val = readl(&dev->usb->usbstat);
  2396. if (val & BIT(SUPER_SPEED)) {
  2397. dev->gadget.speed = USB_SPEED_SUPER;
  2398. usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
  2399. EP0_SS_MAX_PACKET_SIZE);
  2400. } else if (val & BIT(HIGH_SPEED)) {
  2401. dev->gadget.speed = USB_SPEED_HIGH;
  2402. usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
  2403. EP0_HS_MAX_PACKET_SIZE);
  2404. } else {
  2405. dev->gadget.speed = USB_SPEED_FULL;
  2406. usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
  2407. EP0_HS_MAX_PACKET_SIZE);
  2408. }
  2409. net2280_led_speed(dev, dev->gadget.speed);
  2410. ep_dbg(dev, "%s\n",
  2411. usb_speed_string(dev->gadget.speed));
  2412. }
  2413. ep = &dev->ep[0];
  2414. ep->irqs++;
  2415. /* make sure any leftover request state is cleared */
  2416. stat &= ~BIT(ENDPOINT_0_INTERRUPT);
  2417. while (!list_empty(&ep->queue)) {
  2418. req = list_entry(ep->queue.next,
  2419. struct net2280_request, queue);
  2420. done(ep, req, (req->req.actual == req->req.length)
  2421. ? 0 : -EPROTO);
  2422. }
  2423. ep->stopped = 0;
  2424. dev->protocol_stall = 0;
  2425. if (!(dev->quirks & PLX_SUPERSPEED)) {
  2426. if (ep->dev->quirks & PLX_2280)
  2427. tmp = BIT(FIFO_OVERFLOW) |
  2428. BIT(FIFO_UNDERFLOW);
  2429. else
  2430. tmp = 0;
  2431. writel(tmp | BIT(TIMEOUT) |
  2432. BIT(USB_STALL_SENT) |
  2433. BIT(USB_IN_NAK_SENT) |
  2434. BIT(USB_IN_ACK_RCVD) |
  2435. BIT(USB_OUT_PING_NAK_SENT) |
  2436. BIT(USB_OUT_ACK_SENT) |
  2437. BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
  2438. BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
  2439. BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
  2440. BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
  2441. BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
  2442. BIT(DATA_IN_TOKEN_INTERRUPT),
  2443. &ep->regs->ep_stat);
  2444. }
  2445. u.raw[0] = readl(&dev->usb->setup0123);
  2446. u.raw[1] = readl(&dev->usb->setup4567);
  2447. cpu_to_le32s(&u.raw[0]);
  2448. cpu_to_le32s(&u.raw[1]);
  2449. if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched)
  2450. defect7374_workaround(dev, u.r);
  2451. tmp = 0;
  2452. #define w_value le16_to_cpu(u.r.wValue)
  2453. #define w_index le16_to_cpu(u.r.wIndex)
  2454. #define w_length le16_to_cpu(u.r.wLength)
  2455. /* ack the irq */
  2456. writel(BIT(SETUP_PACKET_INTERRUPT), &dev->regs->irqstat0);
  2457. stat ^= BIT(SETUP_PACKET_INTERRUPT);
  2458. /* watch control traffic at the token level, and force
  2459. * synchronization before letting the status stage happen.
  2460. * FIXME ignore tokens we'll NAK, until driver responds.
  2461. * that'll mean a lot less irqs for some drivers.
  2462. */
  2463. ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
  2464. if (ep->is_in) {
  2465. scratch = BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
  2466. BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
  2467. BIT(DATA_IN_TOKEN_INTERRUPT);
  2468. stop_out_naking(ep);
  2469. } else
  2470. scratch = BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
  2471. BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
  2472. BIT(DATA_IN_TOKEN_INTERRUPT);
  2473. writel(scratch, &dev->epregs[0].ep_irqenb);
  2474. /* we made the hardware handle most lowlevel requests;
  2475. * everything else goes uplevel to the gadget code.
  2476. */
  2477. ep->responded = 1;
  2478. if (dev->gadget.speed == USB_SPEED_SUPER) {
  2479. handle_stat0_irqs_superspeed(dev, ep, u.r);
  2480. goto next_endpoints;
  2481. }
  2482. switch (u.r.bRequest) {
  2483. case USB_REQ_GET_STATUS: {
  2484. struct net2280_ep *e;
  2485. __le32 status;
  2486. /* hw handles device and interface status */
  2487. if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
  2488. goto delegate;
  2489. e = get_ep_by_addr(dev, w_index);
  2490. if (!e || w_length > 2)
  2491. goto do_stall;
  2492. if (readl(&e->regs->ep_rsp) & BIT(SET_ENDPOINT_HALT))
  2493. status = cpu_to_le32(1);
  2494. else
  2495. status = cpu_to_le32(0);
  2496. /* don't bother with a request object! */
  2497. writel(0, &dev->epregs[0].ep_irqenb);
  2498. set_fifo_bytecount(ep, w_length);
  2499. writel((__force u32)status, &dev->epregs[0].ep_data);
  2500. allow_status(ep);
  2501. ep_vdbg(dev, "%s stat %02x\n", ep->ep.name, status);
  2502. goto next_endpoints;
  2503. }
  2504. break;
  2505. case USB_REQ_CLEAR_FEATURE: {
  2506. struct net2280_ep *e;
  2507. /* hw handles device features */
  2508. if (u.r.bRequestType != USB_RECIP_ENDPOINT)
  2509. goto delegate;
  2510. if (w_value != USB_ENDPOINT_HALT || w_length != 0)
  2511. goto do_stall;
  2512. e = get_ep_by_addr(dev, w_index);
  2513. if (!e)
  2514. goto do_stall;
  2515. if (e->wedged) {
  2516. ep_vdbg(dev, "%s wedged, halt not cleared\n",
  2517. ep->ep.name);
  2518. } else {
  2519. ep_vdbg(dev, "%s clear halt\n", e->ep.name);
  2520. clear_halt(e);
  2521. if ((ep->dev->quirks & PLX_SUPERSPEED) &&
  2522. !list_empty(&e->queue) && e->td_dma)
  2523. restart_dma(e);
  2524. }
  2525. allow_status(ep);
  2526. goto next_endpoints;
  2527. }
  2528. break;
  2529. case USB_REQ_SET_FEATURE: {
  2530. struct net2280_ep *e;
  2531. /* hw handles device features */
  2532. if (u.r.bRequestType != USB_RECIP_ENDPOINT)
  2533. goto delegate;
  2534. if (w_value != USB_ENDPOINT_HALT || w_length != 0)
  2535. goto do_stall;
  2536. e = get_ep_by_addr(dev, w_index);
  2537. if (!e)
  2538. goto do_stall;
  2539. if (e->ep.name == ep0name)
  2540. goto do_stall;
  2541. set_halt(e);
  2542. if ((dev->quirks & PLX_SUPERSPEED) && e->dma)
  2543. abort_dma(e);
  2544. allow_status(ep);
  2545. ep_vdbg(dev, "%s set halt\n", ep->ep.name);
  2546. goto next_endpoints;
  2547. }
  2548. break;
  2549. default:
  2550. delegate:
  2551. ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x "
  2552. "ep_cfg %08x\n",
  2553. u.r.bRequestType, u.r.bRequest,
  2554. w_value, w_index, w_length,
  2555. readl(&ep->cfg->ep_cfg));
  2556. ep->responded = 0;
  2557. spin_unlock(&dev->lock);
  2558. tmp = dev->driver->setup(&dev->gadget, &u.r);
  2559. spin_lock(&dev->lock);
  2560. }
  2561. /* stall ep0 on error */
  2562. if (tmp < 0) {
  2563. do_stall:
  2564. ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n",
  2565. u.r.bRequestType, u.r.bRequest, tmp);
  2566. dev->protocol_stall = 1;
  2567. }
  2568. /* some in/out token irq should follow; maybe stall then.
  2569. * driver must queue a request (even zlp) or halt ep0
  2570. * before the host times out.
  2571. */
  2572. }
  2573. #undef w_value
  2574. #undef w_index
  2575. #undef w_length
  2576. next_endpoints:
  2577. /* endpoint data irq ? */
  2578. scratch = stat & 0x7f;
  2579. stat &= ~0x7f;
  2580. for (num = 0; scratch; num++) {
  2581. u32 t;
  2582. /* do this endpoint's FIFO and queue need tending? */
  2583. t = BIT(num);
  2584. if ((scratch & t) == 0)
  2585. continue;
  2586. scratch ^= t;
  2587. ep = &dev->ep[num];
  2588. handle_ep_small(ep);
  2589. }
  2590. if (stat)
  2591. ep_dbg(dev, "unhandled irqstat0 %08x\n", stat);
  2592. }
  2593. #define DMA_INTERRUPTS (BIT(DMA_D_INTERRUPT) | \
  2594. BIT(DMA_C_INTERRUPT) | \
  2595. BIT(DMA_B_INTERRUPT) | \
  2596. BIT(DMA_A_INTERRUPT))
  2597. #define PCI_ERROR_INTERRUPTS ( \
  2598. BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT) | \
  2599. BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT) | \
  2600. BIT(PCI_RETRY_ABORT_INTERRUPT))
  2601. static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
  2602. {
  2603. struct net2280_ep *ep;
  2604. u32 tmp, num, mask, scratch;
  2605. /* after disconnect there's nothing else to do! */
  2606. tmp = BIT(VBUS_INTERRUPT) | BIT(ROOT_PORT_RESET_INTERRUPT);
  2607. mask = BIT(SUPER_SPEED) | BIT(HIGH_SPEED) | BIT(FULL_SPEED);
  2608. /* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set.
  2609. * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and
  2610. * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT
  2611. * only indicates a change in the reset state).
  2612. */
  2613. if (stat & tmp) {
  2614. bool reset = false;
  2615. bool disconnect = false;
  2616. /*
  2617. * Ignore disconnects and resets if the speed hasn't been set.
  2618. * VBUS can bounce and there's always an initial reset.
  2619. */
  2620. writel(tmp, &dev->regs->irqstat1);
  2621. if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
  2622. if ((stat & BIT(VBUS_INTERRUPT)) &&
  2623. (readl(&dev->usb->usbctl) &
  2624. BIT(VBUS_PIN)) == 0) {
  2625. disconnect = true;
  2626. ep_dbg(dev, "disconnect %s\n",
  2627. dev->driver->driver.name);
  2628. } else if ((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) &&
  2629. (readl(&dev->usb->usbstat) & mask)
  2630. == 0) {
  2631. reset = true;
  2632. ep_dbg(dev, "reset %s\n",
  2633. dev->driver->driver.name);
  2634. }
  2635. if (disconnect || reset) {
  2636. stop_activity(dev, dev->driver);
  2637. ep0_start(dev);
  2638. spin_unlock(&dev->lock);
  2639. if (reset)
  2640. usb_gadget_udc_reset
  2641. (&dev->gadget, dev->driver);
  2642. else
  2643. (dev->driver->disconnect)
  2644. (&dev->gadget);
  2645. spin_lock(&dev->lock);
  2646. return;
  2647. }
  2648. }
  2649. stat &= ~tmp;
  2650. /* vBUS can bounce ... one of many reasons to ignore the
  2651. * notion of hotplug events on bus connect/disconnect!
  2652. */
  2653. if (!stat)
  2654. return;
  2655. }
  2656. /* NOTE: chip stays in PCI D0 state for now, but it could
  2657. * enter D1 to save more power
  2658. */
  2659. tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT);
  2660. if (stat & tmp) {
  2661. writel(tmp, &dev->regs->irqstat1);
  2662. if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) {
  2663. if (dev->driver->suspend)
  2664. dev->driver->suspend(&dev->gadget);
  2665. if (!enable_suspend)
  2666. stat &= ~BIT(SUSPEND_REQUEST_INTERRUPT);
  2667. } else {
  2668. if (dev->driver->resume)
  2669. dev->driver->resume(&dev->gadget);
  2670. /* at high speed, note erratum 0133 */
  2671. }
  2672. stat &= ~tmp;
  2673. }
  2674. /* clear any other status/irqs */
  2675. if (stat)
  2676. writel(stat, &dev->regs->irqstat1);
  2677. /* some status we can just ignore */
  2678. if (dev->quirks & PLX_2280)
  2679. stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
  2680. BIT(SUSPEND_REQUEST_INTERRUPT) |
  2681. BIT(RESUME_INTERRUPT) |
  2682. BIT(SOF_INTERRUPT));
  2683. else
  2684. stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
  2685. BIT(RESUME_INTERRUPT) |
  2686. BIT(SOF_DOWN_INTERRUPT) |
  2687. BIT(SOF_INTERRUPT));
  2688. if (!stat)
  2689. return;
  2690. /* ep_dbg(dev, "irqstat1 %08x\n", stat);*/
  2691. /* DMA status, for ep-{a,b,c,d} */
  2692. scratch = stat & DMA_INTERRUPTS;
  2693. stat &= ~DMA_INTERRUPTS;
  2694. scratch >>= 9;
  2695. for (num = 0; scratch; num++) {
  2696. struct net2280_dma_regs __iomem *dma;
  2697. tmp = BIT(num);
  2698. if ((tmp & scratch) == 0)
  2699. continue;
  2700. scratch ^= tmp;
  2701. ep = &dev->ep[num + 1];
  2702. dma = ep->dma;
  2703. if (!dma)
  2704. continue;
  2705. /* clear ep's dma status */
  2706. tmp = readl(&dma->dmastat);
  2707. writel(tmp, &dma->dmastat);
  2708. /* dma sync*/
  2709. if (dev->quirks & PLX_SUPERSPEED) {
  2710. u32 r_dmacount = readl(&dma->dmacount);
  2711. if (!ep->is_in && (r_dmacount & 0x00FFFFFF) &&
  2712. (tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT)))
  2713. continue;
  2714. }
  2715. if (!(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) {
  2716. ep_dbg(ep->dev, "%s no xact done? %08x\n",
  2717. ep->ep.name, tmp);
  2718. continue;
  2719. }
  2720. stop_dma(ep->dma);
  2721. /* OUT transfers terminate when the data from the
  2722. * host is in our memory. Process whatever's done.
  2723. * On this path, we know transfer's last packet wasn't
  2724. * less than req->length. NAK_OUT_PACKETS may be set,
  2725. * or the FIFO may already be holding new packets.
  2726. *
  2727. * IN transfers can linger in the FIFO for a very
  2728. * long time ... we ignore that for now, accounting
  2729. * precisely (like PIO does) needs per-packet irqs
  2730. */
  2731. scan_dma_completions(ep);
  2732. /* disable dma on inactive queues; else maybe restart */
  2733. if (!list_empty(&ep->queue)) {
  2734. tmp = readl(&dma->dmactl);
  2735. restart_dma(ep);
  2736. }
  2737. ep->irqs++;
  2738. }
  2739. /* NOTE: there are other PCI errors we might usefully notice.
  2740. * if they appear very often, here's where to try recovering.
  2741. */
  2742. if (stat & PCI_ERROR_INTERRUPTS) {
  2743. ep_err(dev, "pci dma error; stat %08x\n", stat);
  2744. stat &= ~PCI_ERROR_INTERRUPTS;
  2745. /* these are fatal errors, but "maybe" they won't
  2746. * happen again ...
  2747. */
  2748. stop_activity(dev, dev->driver);
  2749. ep0_start(dev);
  2750. stat = 0;
  2751. }
  2752. if (stat)
  2753. ep_dbg(dev, "unhandled irqstat1 %08x\n", stat);
  2754. }
  2755. static irqreturn_t net2280_irq(int irq, void *_dev)
  2756. {
  2757. struct net2280 *dev = _dev;
  2758. /* shared interrupt, not ours */
  2759. if ((dev->quirks & PLX_LEGACY) &&
  2760. (!(readl(&dev->regs->irqstat0) & BIT(INTA_ASSERTED))))
  2761. return IRQ_NONE;
  2762. spin_lock(&dev->lock);
  2763. /* handle disconnect, dma, and more */
  2764. handle_stat1_irqs(dev, readl(&dev->regs->irqstat1));
  2765. /* control requests and PIO */
  2766. handle_stat0_irqs(dev, readl(&dev->regs->irqstat0));
  2767. if (dev->quirks & PLX_SUPERSPEED) {
  2768. /* re-enable interrupt to trigger any possible new interrupt */
  2769. u32 pciirqenb1 = readl(&dev->regs->pciirqenb1);
  2770. writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1);
  2771. writel(pciirqenb1, &dev->regs->pciirqenb1);
  2772. }
  2773. spin_unlock(&dev->lock);
  2774. return IRQ_HANDLED;
  2775. }
  2776. /*-------------------------------------------------------------------------*/
  2777. static void gadget_release(struct device *_dev)
  2778. {
  2779. struct net2280 *dev = dev_get_drvdata(_dev);
  2780. kfree(dev);
  2781. }
  2782. /* tear down the binding between this driver and the pci device */
  2783. static void net2280_remove(struct pci_dev *pdev)
  2784. {
  2785. struct net2280 *dev = pci_get_drvdata(pdev);
  2786. usb_del_gadget_udc(&dev->gadget);
  2787. BUG_ON(dev->driver);
  2788. /* then clean up the resources we allocated during probe() */
  2789. net2280_led_shutdown(dev);
  2790. if (dev->requests) {
  2791. int i;
  2792. for (i = 1; i < 5; i++) {
  2793. if (!dev->ep[i].dummy)
  2794. continue;
  2795. pci_pool_free(dev->requests, dev->ep[i].dummy,
  2796. dev->ep[i].td_dma);
  2797. }
  2798. pci_pool_destroy(dev->requests);
  2799. }
  2800. if (dev->got_irq)
  2801. free_irq(pdev->irq, dev);
  2802. if (dev->quirks & PLX_SUPERSPEED)
  2803. pci_disable_msi(pdev);
  2804. if (dev->regs)
  2805. iounmap(dev->regs);
  2806. if (dev->region)
  2807. release_mem_region(pci_resource_start(pdev, 0),
  2808. pci_resource_len(pdev, 0));
  2809. if (dev->enabled)
  2810. pci_disable_device(pdev);
  2811. device_remove_file(&pdev->dev, &dev_attr_registers);
  2812. ep_info(dev, "unbind\n");
  2813. }
  2814. /* wrap this driver around the specified device, but
  2815. * don't respond over USB until a gadget driver binds to us.
  2816. */
  2817. static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  2818. {
  2819. struct net2280 *dev;
  2820. unsigned long resource, len;
  2821. void __iomem *base = NULL;
  2822. int retval, i;
  2823. /* alloc, and start init */
  2824. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  2825. if (dev == NULL) {
  2826. retval = -ENOMEM;
  2827. goto done;
  2828. }
  2829. pci_set_drvdata(pdev, dev);
  2830. spin_lock_init(&dev->lock);
  2831. dev->quirks = id->driver_data;
  2832. dev->pdev = pdev;
  2833. dev->gadget.ops = &net2280_ops;
  2834. dev->gadget.max_speed = (dev->quirks & PLX_SUPERSPEED) ?
  2835. USB_SPEED_SUPER : USB_SPEED_HIGH;
  2836. /* the "gadget" abstracts/virtualizes the controller */
  2837. dev->gadget.name = driver_name;
  2838. /* now all the pci goodies ... */
  2839. if (pci_enable_device(pdev) < 0) {
  2840. retval = -ENODEV;
  2841. goto done;
  2842. }
  2843. dev->enabled = 1;
  2844. /* BAR 0 holds all the registers
  2845. * BAR 1 is 8051 memory; unused here (note erratum 0103)
  2846. * BAR 2 is fifo memory; unused here
  2847. */
  2848. resource = pci_resource_start(pdev, 0);
  2849. len = pci_resource_len(pdev, 0);
  2850. if (!request_mem_region(resource, len, driver_name)) {
  2851. ep_dbg(dev, "controller already in use\n");
  2852. retval = -EBUSY;
  2853. goto done;
  2854. }
  2855. dev->region = 1;
  2856. /* FIXME provide firmware download interface to put
  2857. * 8051 code into the chip, e.g. to turn on PCI PM.
  2858. */
  2859. base = ioremap_nocache(resource, len);
  2860. if (base == NULL) {
  2861. ep_dbg(dev, "can't map memory\n");
  2862. retval = -EFAULT;
  2863. goto done;
  2864. }
  2865. dev->regs = (struct net2280_regs __iomem *) base;
  2866. dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080);
  2867. dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100);
  2868. dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180);
  2869. dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
  2870. dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
  2871. if (dev->quirks & PLX_SUPERSPEED) {
  2872. u32 fsmvalue;
  2873. u32 usbstat;
  2874. dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *)
  2875. (base + 0x00b4);
  2876. dev->fiforegs = (struct usb338x_fifo_regs __iomem *)
  2877. (base + 0x0500);
  2878. dev->llregs = (struct usb338x_ll_regs __iomem *)
  2879. (base + 0x0700);
  2880. dev->ll_lfps_regs = (struct usb338x_ll_lfps_regs __iomem *)
  2881. (base + 0x0748);
  2882. dev->ll_tsn_regs = (struct usb338x_ll_tsn_regs __iomem *)
  2883. (base + 0x077c);
  2884. dev->ll_chicken_reg = (struct usb338x_ll_chi_regs __iomem *)
  2885. (base + 0x079c);
  2886. dev->plregs = (struct usb338x_pl_regs __iomem *)
  2887. (base + 0x0800);
  2888. usbstat = readl(&dev->usb->usbstat);
  2889. dev->enhanced_mode = !!(usbstat & BIT(11));
  2890. dev->n_ep = (dev->enhanced_mode) ? 9 : 5;
  2891. /* put into initial config, link up all endpoints */
  2892. fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
  2893. (0xf << DEFECT7374_FSM_FIELD);
  2894. /* See if firmware needs to set up for workaround: */
  2895. if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) {
  2896. dev->bug7734_patched = 1;
  2897. writel(0, &dev->usb->usbctl);
  2898. } else
  2899. dev->bug7734_patched = 0;
  2900. } else {
  2901. dev->enhanced_mode = 0;
  2902. dev->n_ep = 7;
  2903. /* put into initial config, link up all endpoints */
  2904. writel(0, &dev->usb->usbctl);
  2905. }
  2906. usb_reset(dev);
  2907. usb_reinit(dev);
  2908. /* irq setup after old hardware is cleaned up */
  2909. if (!pdev->irq) {
  2910. ep_err(dev, "No IRQ. Check PCI setup!\n");
  2911. retval = -ENODEV;
  2912. goto done;
  2913. }
  2914. if (dev->quirks & PLX_SUPERSPEED)
  2915. if (pci_enable_msi(pdev))
  2916. ep_err(dev, "Failed to enable MSI mode\n");
  2917. if (request_irq(pdev->irq, net2280_irq, IRQF_SHARED,
  2918. driver_name, dev)) {
  2919. ep_err(dev, "request interrupt %d failed\n", pdev->irq);
  2920. retval = -EBUSY;
  2921. goto done;
  2922. }
  2923. dev->got_irq = 1;
  2924. /* DMA setup */
  2925. /* NOTE: we know only the 32 LSBs of dma addresses may be nonzero */
  2926. dev->requests = pci_pool_create("requests", pdev,
  2927. sizeof(struct net2280_dma),
  2928. 0 /* no alignment requirements */,
  2929. 0 /* or page-crossing issues */);
  2930. if (!dev->requests) {
  2931. ep_dbg(dev, "can't get request pool\n");
  2932. retval = -ENOMEM;
  2933. goto done;
  2934. }
  2935. for (i = 1; i < 5; i++) {
  2936. struct net2280_dma *td;
  2937. td = pci_pool_alloc(dev->requests, GFP_KERNEL,
  2938. &dev->ep[i].td_dma);
  2939. if (!td) {
  2940. ep_dbg(dev, "can't get dummy %d\n", i);
  2941. retval = -ENOMEM;
  2942. goto done;
  2943. }
  2944. td->dmacount = 0; /* not VALID */
  2945. td->dmadesc = td->dmaaddr;
  2946. dev->ep[i].dummy = td;
  2947. }
  2948. /* enable lower-overhead pci memory bursts during DMA */
  2949. if (dev->quirks & PLX_LEGACY)
  2950. writel(BIT(DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE) |
  2951. /*
  2952. * 256 write retries may not be enough...
  2953. BIT(PCI_RETRY_ABORT_ENABLE) |
  2954. */
  2955. BIT(DMA_READ_MULTIPLE_ENABLE) |
  2956. BIT(DMA_READ_LINE_ENABLE),
  2957. &dev->pci->pcimstctl);
  2958. /* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
  2959. pci_set_master(pdev);
  2960. pci_try_set_mwi(pdev);
  2961. /* ... also flushes any posted pci writes */
  2962. dev->chiprev = get_idx_reg(dev->regs, REG_CHIPREV) & 0xffff;
  2963. /* done */
  2964. ep_info(dev, "%s\n", driver_desc);
  2965. ep_info(dev, "irq %d, pci mem %p, chip rev %04x\n",
  2966. pdev->irq, base, dev->chiprev);
  2967. ep_info(dev, "version: " DRIVER_VERSION "; %s\n",
  2968. dev->enhanced_mode ? "enhanced mode" : "legacy mode");
  2969. retval = device_create_file(&pdev->dev, &dev_attr_registers);
  2970. if (retval)
  2971. goto done;
  2972. retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget,
  2973. gadget_release);
  2974. if (retval)
  2975. goto done;
  2976. return 0;
  2977. done:
  2978. if (dev)
  2979. net2280_remove(pdev);
  2980. return retval;
  2981. }
  2982. /* make sure the board is quiescent; otherwise it will continue
  2983. * generating IRQs across the upcoming reboot.
  2984. */
  2985. static void net2280_shutdown(struct pci_dev *pdev)
  2986. {
  2987. struct net2280 *dev = pci_get_drvdata(pdev);
  2988. /* disable IRQs */
  2989. writel(0, &dev->regs->pciirqenb0);
  2990. writel(0, &dev->regs->pciirqenb1);
  2991. /* disable the pullup so the host will think we're gone */
  2992. writel(0, &dev->usb->usbctl);
  2993. }
  2994. /*-------------------------------------------------------------------------*/
  2995. static const struct pci_device_id pci_ids[] = { {
  2996. .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
  2997. .class_mask = ~0,
  2998. .vendor = PCI_VENDOR_ID_PLX_LEGACY,
  2999. .device = 0x2280,
  3000. .subvendor = PCI_ANY_ID,
  3001. .subdevice = PCI_ANY_ID,
  3002. .driver_data = PLX_LEGACY | PLX_2280,
  3003. }, {
  3004. .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
  3005. .class_mask = ~0,
  3006. .vendor = PCI_VENDOR_ID_PLX_LEGACY,
  3007. .device = 0x2282,
  3008. .subvendor = PCI_ANY_ID,
  3009. .subdevice = PCI_ANY_ID,
  3010. .driver_data = PLX_LEGACY,
  3011. },
  3012. {
  3013. .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
  3014. .class_mask = ~0,
  3015. .vendor = PCI_VENDOR_ID_PLX,
  3016. .device = 0x3380,
  3017. .subvendor = PCI_ANY_ID,
  3018. .subdevice = PCI_ANY_ID,
  3019. .driver_data = PLX_SUPERSPEED,
  3020. },
  3021. {
  3022. .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
  3023. .class_mask = ~0,
  3024. .vendor = PCI_VENDOR_ID_PLX,
  3025. .device = 0x3382,
  3026. .subvendor = PCI_ANY_ID,
  3027. .subdevice = PCI_ANY_ID,
  3028. .driver_data = PLX_SUPERSPEED,
  3029. },
  3030. { /* end: all zeroes */ }
  3031. };
  3032. MODULE_DEVICE_TABLE(pci, pci_ids);
  3033. /* pci driver glue; this is a "new style" PCI driver module */
  3034. static struct pci_driver net2280_pci_driver = {
  3035. .name = (char *) driver_name,
  3036. .id_table = pci_ids,
  3037. .probe = net2280_probe,
  3038. .remove = net2280_remove,
  3039. .shutdown = net2280_shutdown,
  3040. /* FIXME add power management support */
  3041. };
  3042. module_pci_driver(net2280_pci_driver);
  3043. MODULE_DESCRIPTION(DRIVER_DESC);
  3044. MODULE_AUTHOR("David Brownell");
  3045. MODULE_LICENSE("GPL");