sungem.c 76 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045
  1. /* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $
  2. * sungem.c: Sun GEM ethernet driver.
  3. *
  4. * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com)
  5. *
  6. * Support for Apple GMAC and assorted PHYs, WOL, Power Management
  7. * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org)
  8. * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp.
  9. *
  10. * NAPI and NETPOLL support
  11. * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com)
  12. *
  13. */
  14. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15. #include <linux/module.h>
  16. #include <linux/kernel.h>
  17. #include <linux/types.h>
  18. #include <linux/fcntl.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/ioport.h>
  21. #include <linux/in.h>
  22. #include <linux/sched.h>
  23. #include <linux/string.h>
  24. #include <linux/delay.h>
  25. #include <linux/errno.h>
  26. #include <linux/pci.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/netdevice.h>
  29. #include <linux/etherdevice.h>
  30. #include <linux/skbuff.h>
  31. #include <linux/mii.h>
  32. #include <linux/ethtool.h>
  33. #include <linux/crc32.h>
  34. #include <linux/random.h>
  35. #include <linux/workqueue.h>
  36. #include <linux/if_vlan.h>
  37. #include <linux/bitops.h>
  38. #include <linux/mm.h>
  39. #include <linux/gfp.h>
  40. #include <asm/io.h>
  41. #include <asm/byteorder.h>
  42. #include <linux/uaccess.h>
  43. #include <asm/irq.h>
  44. #ifdef CONFIG_SPARC
  45. #include <asm/idprom.h>
  46. #include <asm/prom.h>
  47. #endif
  48. #ifdef CONFIG_PPC_PMAC
  49. #include <asm/prom.h>
  50. #include <asm/machdep.h>
  51. #include <asm/pmac_feature.h>
  52. #endif
  53. #include <linux/sungem_phy.h>
  54. #include "sungem.h"
  55. /* Stripping FCS is causing problems, disabled for now */
  56. #undef STRIP_FCS
  57. #define DEFAULT_MSG (NETIF_MSG_DRV | \
  58. NETIF_MSG_PROBE | \
  59. NETIF_MSG_LINK)
  60. #define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
  61. SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
  62. SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \
  63. SUPPORTED_Pause | SUPPORTED_Autoneg)
  64. #define DRV_NAME "sungem"
  65. #define DRV_VERSION "1.0"
  66. #define DRV_AUTHOR "David S. Miller <davem@redhat.com>"
  67. static char version[] =
  68. DRV_NAME ".c:v" DRV_VERSION " " DRV_AUTHOR "\n";
  69. MODULE_AUTHOR(DRV_AUTHOR);
  70. MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver");
  71. MODULE_LICENSE("GPL");
  72. #define GEM_MODULE_NAME "gem"
  73. static const struct pci_device_id gem_pci_tbl[] = {
  74. { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
  75. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  76. /* These models only differ from the original GEM in
  77. * that their tx/rx fifos are of a different size and
  78. * they only support 10/100 speeds. -DaveM
  79. *
  80. * Apple's GMAC does support gigabit on machines with
  81. * the BCM54xx PHYs. -BenH
  82. */
  83. { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM,
  84. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  85. { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC,
  86. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  87. { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP,
  88. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  89. { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2,
  90. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  91. { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC,
  92. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  93. { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM,
  94. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  95. { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC,
  96. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  97. {0, }
  98. };
  99. MODULE_DEVICE_TABLE(pci, gem_pci_tbl);
  100. static u16 __sungem_phy_read(struct gem *gp, int phy_addr, int reg)
  101. {
  102. u32 cmd;
  103. int limit = 10000;
  104. cmd = (1 << 30);
  105. cmd |= (2 << 28);
  106. cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
  107. cmd |= (reg << 18) & MIF_FRAME_REGAD;
  108. cmd |= (MIF_FRAME_TAMSB);
  109. writel(cmd, gp->regs + MIF_FRAME);
  110. while (--limit) {
  111. cmd = readl(gp->regs + MIF_FRAME);
  112. if (cmd & MIF_FRAME_TALSB)
  113. break;
  114. udelay(10);
  115. }
  116. if (!limit)
  117. cmd = 0xffff;
  118. return cmd & MIF_FRAME_DATA;
  119. }
  120. static inline int _sungem_phy_read(struct net_device *dev, int mii_id, int reg)
  121. {
  122. struct gem *gp = netdev_priv(dev);
  123. return __sungem_phy_read(gp, mii_id, reg);
  124. }
  125. static inline u16 sungem_phy_read(struct gem *gp, int reg)
  126. {
  127. return __sungem_phy_read(gp, gp->mii_phy_addr, reg);
  128. }
  129. static void __sungem_phy_write(struct gem *gp, int phy_addr, int reg, u16 val)
  130. {
  131. u32 cmd;
  132. int limit = 10000;
  133. cmd = (1 << 30);
  134. cmd |= (1 << 28);
  135. cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
  136. cmd |= (reg << 18) & MIF_FRAME_REGAD;
  137. cmd |= (MIF_FRAME_TAMSB);
  138. cmd |= (val & MIF_FRAME_DATA);
  139. writel(cmd, gp->regs + MIF_FRAME);
  140. while (limit--) {
  141. cmd = readl(gp->regs + MIF_FRAME);
  142. if (cmd & MIF_FRAME_TALSB)
  143. break;
  144. udelay(10);
  145. }
  146. }
  147. static inline void _sungem_phy_write(struct net_device *dev, int mii_id, int reg, int val)
  148. {
  149. struct gem *gp = netdev_priv(dev);
  150. __sungem_phy_write(gp, mii_id, reg, val & 0xffff);
  151. }
  152. static inline void sungem_phy_write(struct gem *gp, int reg, u16 val)
  153. {
  154. __sungem_phy_write(gp, gp->mii_phy_addr, reg, val);
  155. }
  156. static inline void gem_enable_ints(struct gem *gp)
  157. {
  158. /* Enable all interrupts but TXDONE */
  159. writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
  160. }
  161. static inline void gem_disable_ints(struct gem *gp)
  162. {
  163. /* Disable all interrupts, including TXDONE */
  164. writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
  165. (void)readl(gp->regs + GREG_IMASK); /* write posting */
  166. }
  167. static void gem_get_cell(struct gem *gp)
  168. {
  169. BUG_ON(gp->cell_enabled < 0);
  170. gp->cell_enabled++;
  171. #ifdef CONFIG_PPC_PMAC
  172. if (gp->cell_enabled == 1) {
  173. mb();
  174. pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1);
  175. udelay(10);
  176. }
  177. #endif /* CONFIG_PPC_PMAC */
  178. }
  179. /* Turn off the chip's clock */
  180. static void gem_put_cell(struct gem *gp)
  181. {
  182. BUG_ON(gp->cell_enabled <= 0);
  183. gp->cell_enabled--;
  184. #ifdef CONFIG_PPC_PMAC
  185. if (gp->cell_enabled == 0) {
  186. mb();
  187. pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0);
  188. udelay(10);
  189. }
  190. #endif /* CONFIG_PPC_PMAC */
  191. }
  192. static inline void gem_netif_stop(struct gem *gp)
  193. {
  194. netif_trans_update(gp->dev); /* prevent tx timeout */
  195. napi_disable(&gp->napi);
  196. netif_tx_disable(gp->dev);
  197. }
  198. static inline void gem_netif_start(struct gem *gp)
  199. {
  200. /* NOTE: unconditional netif_wake_queue is only
  201. * appropriate so long as all callers are assured to
  202. * have free tx slots.
  203. */
  204. netif_wake_queue(gp->dev);
  205. napi_enable(&gp->napi);
  206. }
  207. static void gem_schedule_reset(struct gem *gp)
  208. {
  209. gp->reset_task_pending = 1;
  210. schedule_work(&gp->reset_task);
  211. }
  212. static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits)
  213. {
  214. if (netif_msg_intr(gp))
  215. printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name);
  216. }
  217. static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
  218. {
  219. u32 pcs_istat = readl(gp->regs + PCS_ISTAT);
  220. u32 pcs_miistat;
  221. if (netif_msg_intr(gp))
  222. printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n",
  223. gp->dev->name, pcs_istat);
  224. if (!(pcs_istat & PCS_ISTAT_LSC)) {
  225. netdev_err(dev, "PCS irq but no link status change???\n");
  226. return 0;
  227. }
  228. /* The link status bit latches on zero, so you must
  229. * read it twice in such a case to see a transition
  230. * to the link being up.
  231. */
  232. pcs_miistat = readl(gp->regs + PCS_MIISTAT);
  233. if (!(pcs_miistat & PCS_MIISTAT_LS))
  234. pcs_miistat |=
  235. (readl(gp->regs + PCS_MIISTAT) &
  236. PCS_MIISTAT_LS);
  237. if (pcs_miistat & PCS_MIISTAT_ANC) {
  238. /* The remote-fault indication is only valid
  239. * when autoneg has completed.
  240. */
  241. if (pcs_miistat & PCS_MIISTAT_RF)
  242. netdev_info(dev, "PCS AutoNEG complete, RemoteFault\n");
  243. else
  244. netdev_info(dev, "PCS AutoNEG complete\n");
  245. }
  246. if (pcs_miistat & PCS_MIISTAT_LS) {
  247. netdev_info(dev, "PCS link is now up\n");
  248. netif_carrier_on(gp->dev);
  249. } else {
  250. netdev_info(dev, "PCS link is now down\n");
  251. netif_carrier_off(gp->dev);
  252. /* If this happens and the link timer is not running,
  253. * reset so we re-negotiate.
  254. */
  255. if (!timer_pending(&gp->link_timer))
  256. return 1;
  257. }
  258. return 0;
  259. }
  260. static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
  261. {
  262. u32 txmac_stat = readl(gp->regs + MAC_TXSTAT);
  263. if (netif_msg_intr(gp))
  264. printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
  265. gp->dev->name, txmac_stat);
  266. /* Defer timer expiration is quite normal,
  267. * don't even log the event.
  268. */
  269. if ((txmac_stat & MAC_TXSTAT_DTE) &&
  270. !(txmac_stat & ~MAC_TXSTAT_DTE))
  271. return 0;
  272. if (txmac_stat & MAC_TXSTAT_URUN) {
  273. netdev_err(dev, "TX MAC xmit underrun\n");
  274. dev->stats.tx_fifo_errors++;
  275. }
  276. if (txmac_stat & MAC_TXSTAT_MPE) {
  277. netdev_err(dev, "TX MAC max packet size error\n");
  278. dev->stats.tx_errors++;
  279. }
  280. /* The rest are all cases of one of the 16-bit TX
  281. * counters expiring.
  282. */
  283. if (txmac_stat & MAC_TXSTAT_NCE)
  284. dev->stats.collisions += 0x10000;
  285. if (txmac_stat & MAC_TXSTAT_ECE) {
  286. dev->stats.tx_aborted_errors += 0x10000;
  287. dev->stats.collisions += 0x10000;
  288. }
  289. if (txmac_stat & MAC_TXSTAT_LCE) {
  290. dev->stats.tx_aborted_errors += 0x10000;
  291. dev->stats.collisions += 0x10000;
  292. }
  293. /* We do not keep track of MAC_TXSTAT_FCE and
  294. * MAC_TXSTAT_PCE events.
  295. */
  296. return 0;
  297. }
  298. /* When we get a RX fifo overflow, the RX unit in GEM is probably hung
  299. * so we do the following.
  300. *
  301. * If any part of the reset goes wrong, we return 1 and that causes the
  302. * whole chip to be reset.
  303. */
  304. static int gem_rxmac_reset(struct gem *gp)
  305. {
  306. struct net_device *dev = gp->dev;
  307. int limit, i;
  308. u64 desc_dma;
  309. u32 val;
  310. /* First, reset & disable MAC RX. */
  311. writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
  312. for (limit = 0; limit < 5000; limit++) {
  313. if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD))
  314. break;
  315. udelay(10);
  316. }
  317. if (limit == 5000) {
  318. netdev_err(dev, "RX MAC will not reset, resetting whole chip\n");
  319. return 1;
  320. }
  321. writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB,
  322. gp->regs + MAC_RXCFG);
  323. for (limit = 0; limit < 5000; limit++) {
  324. if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB))
  325. break;
  326. udelay(10);
  327. }
  328. if (limit == 5000) {
  329. netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
  330. return 1;
  331. }
  332. /* Second, disable RX DMA. */
  333. writel(0, gp->regs + RXDMA_CFG);
  334. for (limit = 0; limit < 5000; limit++) {
  335. if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE))
  336. break;
  337. udelay(10);
  338. }
  339. if (limit == 5000) {
  340. netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
  341. return 1;
  342. }
  343. mdelay(5);
  344. /* Execute RX reset command. */
  345. writel(gp->swrst_base | GREG_SWRST_RXRST,
  346. gp->regs + GREG_SWRST);
  347. for (limit = 0; limit < 5000; limit++) {
  348. if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST))
  349. break;
  350. udelay(10);
  351. }
  352. if (limit == 5000) {
  353. netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
  354. return 1;
  355. }
  356. /* Refresh the RX ring. */
  357. for (i = 0; i < RX_RING_SIZE; i++) {
  358. struct gem_rxd *rxd = &gp->init_block->rxd[i];
  359. if (gp->rx_skbs[i] == NULL) {
  360. netdev_err(dev, "Parts of RX ring empty, resetting whole chip\n");
  361. return 1;
  362. }
  363. rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
  364. }
  365. gp->rx_new = gp->rx_old = 0;
  366. /* Now we must reprogram the rest of RX unit. */
  367. desc_dma = (u64) gp->gblock_dvma;
  368. desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
  369. writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
  370. writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
  371. writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
  372. val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
  373. ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
  374. writel(val, gp->regs + RXDMA_CFG);
  375. if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
  376. writel(((5 & RXDMA_BLANK_IPKTS) |
  377. ((8 << 12) & RXDMA_BLANK_ITIME)),
  378. gp->regs + RXDMA_BLANK);
  379. else
  380. writel(((5 & RXDMA_BLANK_IPKTS) |
  381. ((4 << 12) & RXDMA_BLANK_ITIME)),
  382. gp->regs + RXDMA_BLANK);
  383. val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
  384. val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
  385. writel(val, gp->regs + RXDMA_PTHRESH);
  386. val = readl(gp->regs + RXDMA_CFG);
  387. writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
  388. writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
  389. val = readl(gp->regs + MAC_RXCFG);
  390. writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
  391. return 0;
  392. }
  393. static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
  394. {
  395. u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT);
  396. int ret = 0;
  397. if (netif_msg_intr(gp))
  398. printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n",
  399. gp->dev->name, rxmac_stat);
  400. if (rxmac_stat & MAC_RXSTAT_OFLW) {
  401. u32 smac = readl(gp->regs + MAC_SMACHINE);
  402. netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac);
  403. dev->stats.rx_over_errors++;
  404. dev->stats.rx_fifo_errors++;
  405. ret = gem_rxmac_reset(gp);
  406. }
  407. if (rxmac_stat & MAC_RXSTAT_ACE)
  408. dev->stats.rx_frame_errors += 0x10000;
  409. if (rxmac_stat & MAC_RXSTAT_CCE)
  410. dev->stats.rx_crc_errors += 0x10000;
  411. if (rxmac_stat & MAC_RXSTAT_LCE)
  412. dev->stats.rx_length_errors += 0x10000;
  413. /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
  414. * events.
  415. */
  416. return ret;
  417. }
  418. static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
  419. {
  420. u32 mac_cstat = readl(gp->regs + MAC_CSTAT);
  421. if (netif_msg_intr(gp))
  422. printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n",
  423. gp->dev->name, mac_cstat);
  424. /* This interrupt is just for pause frame and pause
  425. * tracking. It is useful for diagnostics and debug
  426. * but probably by default we will mask these events.
  427. */
  428. if (mac_cstat & MAC_CSTAT_PS)
  429. gp->pause_entered++;
  430. if (mac_cstat & MAC_CSTAT_PRCV)
  431. gp->pause_last_time_recvd = (mac_cstat >> 16);
  432. return 0;
  433. }
  434. static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
  435. {
  436. u32 mif_status = readl(gp->regs + MIF_STATUS);
  437. u32 reg_val, changed_bits;
  438. reg_val = (mif_status & MIF_STATUS_DATA) >> 16;
  439. changed_bits = (mif_status & MIF_STATUS_STAT);
  440. gem_handle_mif_event(gp, reg_val, changed_bits);
  441. return 0;
  442. }
  443. static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
  444. {
  445. u32 pci_estat = readl(gp->regs + GREG_PCIESTAT);
  446. if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
  447. gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
  448. netdev_err(dev, "PCI error [%04x]", pci_estat);
  449. if (pci_estat & GREG_PCIESTAT_BADACK)
  450. pr_cont(" <No ACK64# during ABS64 cycle>");
  451. if (pci_estat & GREG_PCIESTAT_DTRTO)
  452. pr_cont(" <Delayed transaction timeout>");
  453. if (pci_estat & GREG_PCIESTAT_OTHER)
  454. pr_cont(" <other>");
  455. pr_cont("\n");
  456. } else {
  457. pci_estat |= GREG_PCIESTAT_OTHER;
  458. netdev_err(dev, "PCI error\n");
  459. }
  460. if (pci_estat & GREG_PCIESTAT_OTHER) {
  461. u16 pci_cfg_stat;
  462. /* Interrogate PCI config space for the
  463. * true cause.
  464. */
  465. pci_read_config_word(gp->pdev, PCI_STATUS,
  466. &pci_cfg_stat);
  467. netdev_err(dev, "Read PCI cfg space status [%04x]\n",
  468. pci_cfg_stat);
  469. if (pci_cfg_stat & PCI_STATUS_PARITY)
  470. netdev_err(dev, "PCI parity error detected\n");
  471. if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT)
  472. netdev_err(dev, "PCI target abort\n");
  473. if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT)
  474. netdev_err(dev, "PCI master acks target abort\n");
  475. if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT)
  476. netdev_err(dev, "PCI master abort\n");
  477. if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR)
  478. netdev_err(dev, "PCI system error SERR#\n");
  479. if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY)
  480. netdev_err(dev, "PCI parity error\n");
  481. /* Write the error bits back to clear them. */
  482. pci_cfg_stat &= (PCI_STATUS_PARITY |
  483. PCI_STATUS_SIG_TARGET_ABORT |
  484. PCI_STATUS_REC_TARGET_ABORT |
  485. PCI_STATUS_REC_MASTER_ABORT |
  486. PCI_STATUS_SIG_SYSTEM_ERROR |
  487. PCI_STATUS_DETECTED_PARITY);
  488. pci_write_config_word(gp->pdev,
  489. PCI_STATUS, pci_cfg_stat);
  490. }
  491. /* For all PCI errors, we should reset the chip. */
  492. return 1;
  493. }
  494. /* All non-normal interrupt conditions get serviced here.
  495. * Returns non-zero if we should just exit the interrupt
  496. * handler right now (ie. if we reset the card which invalidates
  497. * all of the other original irq status bits).
  498. */
  499. static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status)
  500. {
  501. if (gem_status & GREG_STAT_RXNOBUF) {
  502. /* Frame arrived, no free RX buffers available. */
  503. if (netif_msg_rx_err(gp))
  504. printk(KERN_DEBUG "%s: no buffer for rx frame\n",
  505. gp->dev->name);
  506. dev->stats.rx_dropped++;
  507. }
  508. if (gem_status & GREG_STAT_RXTAGERR) {
  509. /* corrupt RX tag framing */
  510. if (netif_msg_rx_err(gp))
  511. printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
  512. gp->dev->name);
  513. dev->stats.rx_errors++;
  514. return 1;
  515. }
  516. if (gem_status & GREG_STAT_PCS) {
  517. if (gem_pcs_interrupt(dev, gp, gem_status))
  518. return 1;
  519. }
  520. if (gem_status & GREG_STAT_TXMAC) {
  521. if (gem_txmac_interrupt(dev, gp, gem_status))
  522. return 1;
  523. }
  524. if (gem_status & GREG_STAT_RXMAC) {
  525. if (gem_rxmac_interrupt(dev, gp, gem_status))
  526. return 1;
  527. }
  528. if (gem_status & GREG_STAT_MAC) {
  529. if (gem_mac_interrupt(dev, gp, gem_status))
  530. return 1;
  531. }
  532. if (gem_status & GREG_STAT_MIF) {
  533. if (gem_mif_interrupt(dev, gp, gem_status))
  534. return 1;
  535. }
  536. if (gem_status & GREG_STAT_PCIERR) {
  537. if (gem_pci_interrupt(dev, gp, gem_status))
  538. return 1;
  539. }
  540. return 0;
  541. }
  542. static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status)
  543. {
  544. int entry, limit;
  545. entry = gp->tx_old;
  546. limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT);
  547. while (entry != limit) {
  548. struct sk_buff *skb;
  549. struct gem_txd *txd;
  550. dma_addr_t dma_addr;
  551. u32 dma_len;
  552. int frag;
  553. if (netif_msg_tx_done(gp))
  554. printk(KERN_DEBUG "%s: tx done, slot %d\n",
  555. gp->dev->name, entry);
  556. skb = gp->tx_skbs[entry];
  557. if (skb_shinfo(skb)->nr_frags) {
  558. int last = entry + skb_shinfo(skb)->nr_frags;
  559. int walk = entry;
  560. int incomplete = 0;
  561. last &= (TX_RING_SIZE - 1);
  562. for (;;) {
  563. walk = NEXT_TX(walk);
  564. if (walk == limit)
  565. incomplete = 1;
  566. if (walk == last)
  567. break;
  568. }
  569. if (incomplete)
  570. break;
  571. }
  572. gp->tx_skbs[entry] = NULL;
  573. dev->stats.tx_bytes += skb->len;
  574. for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
  575. txd = &gp->init_block->txd[entry];
  576. dma_addr = le64_to_cpu(txd->buffer);
  577. dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ;
  578. pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
  579. entry = NEXT_TX(entry);
  580. }
  581. dev->stats.tx_packets++;
  582. dev_consume_skb_any(skb);
  583. }
  584. gp->tx_old = entry;
  585. /* Need to make the tx_old update visible to gem_start_xmit()
  586. * before checking for netif_queue_stopped(). Without the
  587. * memory barrier, there is a small possibility that gem_start_xmit()
  588. * will miss it and cause the queue to be stopped forever.
  589. */
  590. smp_mb();
  591. if (unlikely(netif_queue_stopped(dev) &&
  592. TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))) {
  593. struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
  594. __netif_tx_lock(txq, smp_processor_id());
  595. if (netif_queue_stopped(dev) &&
  596. TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
  597. netif_wake_queue(dev);
  598. __netif_tx_unlock(txq);
  599. }
  600. }
  601. static __inline__ void gem_post_rxds(struct gem *gp, int limit)
  602. {
  603. int cluster_start, curr, count, kick;
  604. cluster_start = curr = (gp->rx_new & ~(4 - 1));
  605. count = 0;
  606. kick = -1;
  607. dma_wmb();
  608. while (curr != limit) {
  609. curr = NEXT_RX(curr);
  610. if (++count == 4) {
  611. struct gem_rxd *rxd =
  612. &gp->init_block->rxd[cluster_start];
  613. for (;;) {
  614. rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
  615. rxd++;
  616. cluster_start = NEXT_RX(cluster_start);
  617. if (cluster_start == curr)
  618. break;
  619. }
  620. kick = curr;
  621. count = 0;
  622. }
  623. }
  624. if (kick >= 0) {
  625. mb();
  626. writel(kick, gp->regs + RXDMA_KICK);
  627. }
  628. }
  629. #define ALIGNED_RX_SKB_ADDR(addr) \
  630. ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
  631. static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size,
  632. gfp_t gfp_flags)
  633. {
  634. struct sk_buff *skb = alloc_skb(size + 64, gfp_flags);
  635. if (likely(skb)) {
  636. unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data);
  637. skb_reserve(skb, offset);
  638. }
  639. return skb;
  640. }
  641. static int gem_rx(struct gem *gp, int work_to_do)
  642. {
  643. struct net_device *dev = gp->dev;
  644. int entry, drops, work_done = 0;
  645. u32 done;
  646. __sum16 csum;
  647. if (netif_msg_rx_status(gp))
  648. printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
  649. gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new);
  650. entry = gp->rx_new;
  651. drops = 0;
  652. done = readl(gp->regs + RXDMA_DONE);
  653. for (;;) {
  654. struct gem_rxd *rxd = &gp->init_block->rxd[entry];
  655. struct sk_buff *skb;
  656. u64 status = le64_to_cpu(rxd->status_word);
  657. dma_addr_t dma_addr;
  658. int len;
  659. if ((status & RXDCTRL_OWN) != 0)
  660. break;
  661. if (work_done >= RX_RING_SIZE || work_done >= work_to_do)
  662. break;
  663. /* When writing back RX descriptor, GEM writes status
  664. * then buffer address, possibly in separate transactions.
  665. * If we don't wait for the chip to write both, we could
  666. * post a new buffer to this descriptor then have GEM spam
  667. * on the buffer address. We sync on the RX completion
  668. * register to prevent this from happening.
  669. */
  670. if (entry == done) {
  671. done = readl(gp->regs + RXDMA_DONE);
  672. if (entry == done)
  673. break;
  674. }
  675. /* We can now account for the work we're about to do */
  676. work_done++;
  677. skb = gp->rx_skbs[entry];
  678. len = (status & RXDCTRL_BUFSZ) >> 16;
  679. if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
  680. dev->stats.rx_errors++;
  681. if (len < ETH_ZLEN)
  682. dev->stats.rx_length_errors++;
  683. if (len & RXDCTRL_BAD)
  684. dev->stats.rx_crc_errors++;
  685. /* We'll just return it to GEM. */
  686. drop_it:
  687. dev->stats.rx_dropped++;
  688. goto next;
  689. }
  690. dma_addr = le64_to_cpu(rxd->buffer);
  691. if (len > RX_COPY_THRESHOLD) {
  692. struct sk_buff *new_skb;
  693. new_skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
  694. if (new_skb == NULL) {
  695. drops++;
  696. goto drop_it;
  697. }
  698. pci_unmap_page(gp->pdev, dma_addr,
  699. RX_BUF_ALLOC_SIZE(gp),
  700. PCI_DMA_FROMDEVICE);
  701. gp->rx_skbs[entry] = new_skb;
  702. skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET));
  703. rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev,
  704. virt_to_page(new_skb->data),
  705. offset_in_page(new_skb->data),
  706. RX_BUF_ALLOC_SIZE(gp),
  707. PCI_DMA_FROMDEVICE));
  708. skb_reserve(new_skb, RX_OFFSET);
  709. /* Trim the original skb for the netif. */
  710. skb_trim(skb, len);
  711. } else {
  712. struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2);
  713. if (copy_skb == NULL) {
  714. drops++;
  715. goto drop_it;
  716. }
  717. skb_reserve(copy_skb, 2);
  718. skb_put(copy_skb, len);
  719. pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
  720. skb_copy_from_linear_data(skb, copy_skb->data, len);
  721. pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
  722. /* We'll reuse the original ring buffer. */
  723. skb = copy_skb;
  724. }
  725. csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
  726. skb->csum = csum_unfold(csum);
  727. skb->ip_summed = CHECKSUM_COMPLETE;
  728. skb->protocol = eth_type_trans(skb, gp->dev);
  729. napi_gro_receive(&gp->napi, skb);
  730. dev->stats.rx_packets++;
  731. dev->stats.rx_bytes += len;
  732. next:
  733. entry = NEXT_RX(entry);
  734. }
  735. gem_post_rxds(gp, entry);
  736. gp->rx_new = entry;
  737. if (drops)
  738. netdev_info(gp->dev, "Memory squeeze, deferring packet\n");
  739. return work_done;
  740. }
  741. static int gem_poll(struct napi_struct *napi, int budget)
  742. {
  743. struct gem *gp = container_of(napi, struct gem, napi);
  744. struct net_device *dev = gp->dev;
  745. int work_done;
  746. work_done = 0;
  747. do {
  748. /* Handle anomalies */
  749. if (unlikely(gp->status & GREG_STAT_ABNORMAL)) {
  750. struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
  751. int reset;
  752. /* We run the abnormal interrupt handling code with
  753. * the Tx lock. It only resets the Rx portion of the
  754. * chip, but we need to guard it against DMA being
  755. * restarted by the link poll timer
  756. */
  757. __netif_tx_lock(txq, smp_processor_id());
  758. reset = gem_abnormal_irq(dev, gp, gp->status);
  759. __netif_tx_unlock(txq);
  760. if (reset) {
  761. gem_schedule_reset(gp);
  762. napi_complete(napi);
  763. return work_done;
  764. }
  765. }
  766. /* Run TX completion thread */
  767. gem_tx(dev, gp, gp->status);
  768. /* Run RX thread. We don't use any locking here,
  769. * code willing to do bad things - like cleaning the
  770. * rx ring - must call napi_disable(), which
  771. * schedule_timeout()'s if polling is already disabled.
  772. */
  773. work_done += gem_rx(gp, budget - work_done);
  774. if (work_done >= budget)
  775. return work_done;
  776. gp->status = readl(gp->regs + GREG_STAT);
  777. } while (gp->status & GREG_STAT_NAPI);
  778. napi_complete_done(napi, work_done);
  779. gem_enable_ints(gp);
  780. return work_done;
  781. }
  782. static irqreturn_t gem_interrupt(int irq, void *dev_id)
  783. {
  784. struct net_device *dev = dev_id;
  785. struct gem *gp = netdev_priv(dev);
  786. if (napi_schedule_prep(&gp->napi)) {
  787. u32 gem_status = readl(gp->regs + GREG_STAT);
  788. if (unlikely(gem_status == 0)) {
  789. napi_enable(&gp->napi);
  790. return IRQ_NONE;
  791. }
  792. if (netif_msg_intr(gp))
  793. printk(KERN_DEBUG "%s: gem_interrupt() gem_status: 0x%x\n",
  794. gp->dev->name, gem_status);
  795. gp->status = gem_status;
  796. gem_disable_ints(gp);
  797. __napi_schedule(&gp->napi);
  798. }
  799. /* If polling was disabled at the time we received that
  800. * interrupt, we may return IRQ_HANDLED here while we
  801. * should return IRQ_NONE. No big deal...
  802. */
  803. return IRQ_HANDLED;
  804. }
  805. #ifdef CONFIG_NET_POLL_CONTROLLER
  806. static void gem_poll_controller(struct net_device *dev)
  807. {
  808. struct gem *gp = netdev_priv(dev);
  809. disable_irq(gp->pdev->irq);
  810. gem_interrupt(gp->pdev->irq, dev);
  811. enable_irq(gp->pdev->irq);
  812. }
  813. #endif
  814. static void gem_tx_timeout(struct net_device *dev)
  815. {
  816. struct gem *gp = netdev_priv(dev);
  817. netdev_err(dev, "transmit timed out, resetting\n");
  818. netdev_err(dev, "TX_STATE[%08x:%08x:%08x]\n",
  819. readl(gp->regs + TXDMA_CFG),
  820. readl(gp->regs + MAC_TXSTAT),
  821. readl(gp->regs + MAC_TXCFG));
  822. netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
  823. readl(gp->regs + RXDMA_CFG),
  824. readl(gp->regs + MAC_RXSTAT),
  825. readl(gp->regs + MAC_RXCFG));
  826. gem_schedule_reset(gp);
  827. }
  828. static __inline__ int gem_intme(int entry)
  829. {
  830. /* Algorithm: IRQ every 1/2 of descriptors. */
  831. if (!(entry & ((TX_RING_SIZE>>1)-1)))
  832. return 1;
  833. return 0;
  834. }
  835. static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
  836. struct net_device *dev)
  837. {
  838. struct gem *gp = netdev_priv(dev);
  839. int entry;
  840. u64 ctrl;
  841. ctrl = 0;
  842. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  843. const u64 csum_start_off = skb_checksum_start_offset(skb);
  844. const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
  845. ctrl = (TXDCTRL_CENAB |
  846. (csum_start_off << 15) |
  847. (csum_stuff_off << 21));
  848. }
  849. if (unlikely(TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1))) {
  850. /* This is a hard error, log it. */
  851. if (!netif_queue_stopped(dev)) {
  852. netif_stop_queue(dev);
  853. netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
  854. }
  855. return NETDEV_TX_BUSY;
  856. }
  857. entry = gp->tx_new;
  858. gp->tx_skbs[entry] = skb;
  859. if (skb_shinfo(skb)->nr_frags == 0) {
  860. struct gem_txd *txd = &gp->init_block->txd[entry];
  861. dma_addr_t mapping;
  862. u32 len;
  863. len = skb->len;
  864. mapping = pci_map_page(gp->pdev,
  865. virt_to_page(skb->data),
  866. offset_in_page(skb->data),
  867. len, PCI_DMA_TODEVICE);
  868. ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len;
  869. if (gem_intme(entry))
  870. ctrl |= TXDCTRL_INTME;
  871. txd->buffer = cpu_to_le64(mapping);
  872. dma_wmb();
  873. txd->control_word = cpu_to_le64(ctrl);
  874. entry = NEXT_TX(entry);
  875. } else {
  876. struct gem_txd *txd;
  877. u32 first_len;
  878. u64 intme;
  879. dma_addr_t first_mapping;
  880. int frag, first_entry = entry;
  881. intme = 0;
  882. if (gem_intme(entry))
  883. intme |= TXDCTRL_INTME;
  884. /* We must give this initial chunk to the device last.
  885. * Otherwise we could race with the device.
  886. */
  887. first_len = skb_headlen(skb);
  888. first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data),
  889. offset_in_page(skb->data),
  890. first_len, PCI_DMA_TODEVICE);
  891. entry = NEXT_TX(entry);
  892. for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
  893. const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
  894. u32 len;
  895. dma_addr_t mapping;
  896. u64 this_ctrl;
  897. len = skb_frag_size(this_frag);
  898. mapping = skb_frag_dma_map(&gp->pdev->dev, this_frag,
  899. 0, len, DMA_TO_DEVICE);
  900. this_ctrl = ctrl;
  901. if (frag == skb_shinfo(skb)->nr_frags - 1)
  902. this_ctrl |= TXDCTRL_EOF;
  903. txd = &gp->init_block->txd[entry];
  904. txd->buffer = cpu_to_le64(mapping);
  905. dma_wmb();
  906. txd->control_word = cpu_to_le64(this_ctrl | len);
  907. if (gem_intme(entry))
  908. intme |= TXDCTRL_INTME;
  909. entry = NEXT_TX(entry);
  910. }
  911. txd = &gp->init_block->txd[first_entry];
  912. txd->buffer = cpu_to_le64(first_mapping);
  913. dma_wmb();
  914. txd->control_word =
  915. cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len);
  916. }
  917. gp->tx_new = entry;
  918. if (unlikely(TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))) {
  919. netif_stop_queue(dev);
  920. /* netif_stop_queue() must be done before checking
  921. * checking tx index in TX_BUFFS_AVAIL() below, because
  922. * in gem_tx(), we update tx_old before checking for
  923. * netif_queue_stopped().
  924. */
  925. smp_mb();
  926. if (TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
  927. netif_wake_queue(dev);
  928. }
  929. if (netif_msg_tx_queued(gp))
  930. printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
  931. dev->name, entry, skb->len);
  932. mb();
  933. writel(gp->tx_new, gp->regs + TXDMA_KICK);
  934. return NETDEV_TX_OK;
  935. }
  936. static void gem_pcs_reset(struct gem *gp)
  937. {
  938. int limit;
  939. u32 val;
  940. /* Reset PCS unit. */
  941. val = readl(gp->regs + PCS_MIICTRL);
  942. val |= PCS_MIICTRL_RST;
  943. writel(val, gp->regs + PCS_MIICTRL);
  944. limit = 32;
  945. while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
  946. udelay(100);
  947. if (limit-- <= 0)
  948. break;
  949. }
  950. if (limit < 0)
  951. netdev_warn(gp->dev, "PCS reset bit would not clear\n");
  952. }
  953. static void gem_pcs_reinit_adv(struct gem *gp)
  954. {
  955. u32 val;
  956. /* Make sure PCS is disabled while changing advertisement
  957. * configuration.
  958. */
  959. val = readl(gp->regs + PCS_CFG);
  960. val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
  961. writel(val, gp->regs + PCS_CFG);
  962. /* Advertise all capabilities except asymmetric
  963. * pause.
  964. */
  965. val = readl(gp->regs + PCS_MIIADV);
  966. val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
  967. PCS_MIIADV_SP | PCS_MIIADV_AP);
  968. writel(val, gp->regs + PCS_MIIADV);
  969. /* Enable and restart auto-negotiation, disable wrapback/loopback,
  970. * and re-enable PCS.
  971. */
  972. val = readl(gp->regs + PCS_MIICTRL);
  973. val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
  974. val &= ~PCS_MIICTRL_WB;
  975. writel(val, gp->regs + PCS_MIICTRL);
  976. val = readl(gp->regs + PCS_CFG);
  977. val |= PCS_CFG_ENABLE;
  978. writel(val, gp->regs + PCS_CFG);
  979. /* Make sure serialink loopback is off. The meaning
  980. * of this bit is logically inverted based upon whether
  981. * you are in Serialink or SERDES mode.
  982. */
  983. val = readl(gp->regs + PCS_SCTRL);
  984. if (gp->phy_type == phy_serialink)
  985. val &= ~PCS_SCTRL_LOOP;
  986. else
  987. val |= PCS_SCTRL_LOOP;
  988. writel(val, gp->regs + PCS_SCTRL);
  989. }
  990. #define STOP_TRIES 32
  991. static void gem_reset(struct gem *gp)
  992. {
  993. int limit;
  994. u32 val;
  995. /* Make sure we won't get any more interrupts */
  996. writel(0xffffffff, gp->regs + GREG_IMASK);
  997. /* Reset the chip */
  998. writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST,
  999. gp->regs + GREG_SWRST);
  1000. limit = STOP_TRIES;
  1001. do {
  1002. udelay(20);
  1003. val = readl(gp->regs + GREG_SWRST);
  1004. if (limit-- <= 0)
  1005. break;
  1006. } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
  1007. if (limit < 0)
  1008. netdev_err(gp->dev, "SW reset is ghetto\n");
  1009. if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
  1010. gem_pcs_reinit_adv(gp);
  1011. }
  1012. static void gem_start_dma(struct gem *gp)
  1013. {
  1014. u32 val;
  1015. /* We are ready to rock, turn everything on. */
  1016. val = readl(gp->regs + TXDMA_CFG);
  1017. writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
  1018. val = readl(gp->regs + RXDMA_CFG);
  1019. writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
  1020. val = readl(gp->regs + MAC_TXCFG);
  1021. writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
  1022. val = readl(gp->regs + MAC_RXCFG);
  1023. writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
  1024. (void) readl(gp->regs + MAC_RXCFG);
  1025. udelay(100);
  1026. gem_enable_ints(gp);
  1027. writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
  1028. }
  1029. /* DMA won't be actually stopped before about 4ms tho ...
  1030. */
  1031. static void gem_stop_dma(struct gem *gp)
  1032. {
  1033. u32 val;
  1034. /* We are done rocking, turn everything off. */
  1035. val = readl(gp->regs + TXDMA_CFG);
  1036. writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
  1037. val = readl(gp->regs + RXDMA_CFG);
  1038. writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
  1039. val = readl(gp->regs + MAC_TXCFG);
  1040. writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
  1041. val = readl(gp->regs + MAC_RXCFG);
  1042. writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
  1043. (void) readl(gp->regs + MAC_RXCFG);
  1044. /* Need to wait a bit ... done by the caller */
  1045. }
  1046. // XXX dbl check what that function should do when called on PCS PHY
  1047. static void gem_begin_auto_negotiation(struct gem *gp,
  1048. const struct ethtool_link_ksettings *ep)
  1049. {
  1050. u32 advertise, features;
  1051. int autoneg;
  1052. int speed;
  1053. int duplex;
  1054. u32 advertising;
  1055. if (ep)
  1056. ethtool_convert_link_mode_to_legacy_u32(
  1057. &advertising, ep->link_modes.advertising);
  1058. if (gp->phy_type != phy_mii_mdio0 &&
  1059. gp->phy_type != phy_mii_mdio1)
  1060. goto non_mii;
  1061. /* Setup advertise */
  1062. if (found_mii_phy(gp))
  1063. features = gp->phy_mii.def->features;
  1064. else
  1065. features = 0;
  1066. advertise = features & ADVERTISE_MASK;
  1067. if (gp->phy_mii.advertising != 0)
  1068. advertise &= gp->phy_mii.advertising;
  1069. autoneg = gp->want_autoneg;
  1070. speed = gp->phy_mii.speed;
  1071. duplex = gp->phy_mii.duplex;
  1072. /* Setup link parameters */
  1073. if (!ep)
  1074. goto start_aneg;
  1075. if (ep->base.autoneg == AUTONEG_ENABLE) {
  1076. advertise = advertising;
  1077. autoneg = 1;
  1078. } else {
  1079. autoneg = 0;
  1080. speed = ep->base.speed;
  1081. duplex = ep->base.duplex;
  1082. }
  1083. start_aneg:
  1084. /* Sanitize settings based on PHY capabilities */
  1085. if ((features & SUPPORTED_Autoneg) == 0)
  1086. autoneg = 0;
  1087. if (speed == SPEED_1000 &&
  1088. !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)))
  1089. speed = SPEED_100;
  1090. if (speed == SPEED_100 &&
  1091. !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full)))
  1092. speed = SPEED_10;
  1093. if (duplex == DUPLEX_FULL &&
  1094. !(features & (SUPPORTED_1000baseT_Full |
  1095. SUPPORTED_100baseT_Full |
  1096. SUPPORTED_10baseT_Full)))
  1097. duplex = DUPLEX_HALF;
  1098. if (speed == 0)
  1099. speed = SPEED_10;
  1100. /* If we are asleep, we don't try to actually setup the PHY, we
  1101. * just store the settings
  1102. */
  1103. if (!netif_device_present(gp->dev)) {
  1104. gp->phy_mii.autoneg = gp->want_autoneg = autoneg;
  1105. gp->phy_mii.speed = speed;
  1106. gp->phy_mii.duplex = duplex;
  1107. return;
  1108. }
  1109. /* Configure PHY & start aneg */
  1110. gp->want_autoneg = autoneg;
  1111. if (autoneg) {
  1112. if (found_mii_phy(gp))
  1113. gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise);
  1114. gp->lstate = link_aneg;
  1115. } else {
  1116. if (found_mii_phy(gp))
  1117. gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex);
  1118. gp->lstate = link_force_ok;
  1119. }
  1120. non_mii:
  1121. gp->timer_ticks = 0;
  1122. mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
  1123. }
  1124. /* A link-up condition has occurred, initialize and enable the
  1125. * rest of the chip.
  1126. */
  1127. static int gem_set_link_modes(struct gem *gp)
  1128. {
  1129. struct netdev_queue *txq = netdev_get_tx_queue(gp->dev, 0);
  1130. int full_duplex, speed, pause;
  1131. u32 val;
  1132. full_duplex = 0;
  1133. speed = SPEED_10;
  1134. pause = 0;
  1135. if (found_mii_phy(gp)) {
  1136. if (gp->phy_mii.def->ops->read_link(&gp->phy_mii))
  1137. return 1;
  1138. full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL);
  1139. speed = gp->phy_mii.speed;
  1140. pause = gp->phy_mii.pause;
  1141. } else if (gp->phy_type == phy_serialink ||
  1142. gp->phy_type == phy_serdes) {
  1143. u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
  1144. if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes)
  1145. full_duplex = 1;
  1146. speed = SPEED_1000;
  1147. }
  1148. netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n",
  1149. speed, (full_duplex ? "full" : "half"));
  1150. /* We take the tx queue lock to avoid collisions between
  1151. * this code, the tx path and the NAPI-driven error path
  1152. */
  1153. __netif_tx_lock(txq, smp_processor_id());
  1154. val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU);
  1155. if (full_duplex) {
  1156. val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL);
  1157. } else {
  1158. /* MAC_TXCFG_NBO must be zero. */
  1159. }
  1160. writel(val, gp->regs + MAC_TXCFG);
  1161. val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED);
  1162. if (!full_duplex &&
  1163. (gp->phy_type == phy_mii_mdio0 ||
  1164. gp->phy_type == phy_mii_mdio1)) {
  1165. val |= MAC_XIFCFG_DISE;
  1166. } else if (full_duplex) {
  1167. val |= MAC_XIFCFG_FLED;
  1168. }
  1169. if (speed == SPEED_1000)
  1170. val |= (MAC_XIFCFG_GMII);
  1171. writel(val, gp->regs + MAC_XIFCFG);
  1172. /* If gigabit and half-duplex, enable carrier extension
  1173. * mode. Else, disable it.
  1174. */
  1175. if (speed == SPEED_1000 && !full_duplex) {
  1176. val = readl(gp->regs + MAC_TXCFG);
  1177. writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
  1178. val = readl(gp->regs + MAC_RXCFG);
  1179. writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
  1180. } else {
  1181. val = readl(gp->regs + MAC_TXCFG);
  1182. writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
  1183. val = readl(gp->regs + MAC_RXCFG);
  1184. writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
  1185. }
  1186. if (gp->phy_type == phy_serialink ||
  1187. gp->phy_type == phy_serdes) {
  1188. u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
  1189. if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP))
  1190. pause = 1;
  1191. }
  1192. if (!full_duplex)
  1193. writel(512, gp->regs + MAC_STIME);
  1194. else
  1195. writel(64, gp->regs + MAC_STIME);
  1196. val = readl(gp->regs + MAC_MCCFG);
  1197. if (pause)
  1198. val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE);
  1199. else
  1200. val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE);
  1201. writel(val, gp->regs + MAC_MCCFG);
  1202. gem_start_dma(gp);
  1203. __netif_tx_unlock(txq);
  1204. if (netif_msg_link(gp)) {
  1205. if (pause) {
  1206. netdev_info(gp->dev,
  1207. "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
  1208. gp->rx_fifo_sz,
  1209. gp->rx_pause_off,
  1210. gp->rx_pause_on);
  1211. } else {
  1212. netdev_info(gp->dev, "Pause is disabled\n");
  1213. }
  1214. }
  1215. return 0;
  1216. }
  1217. static int gem_mdio_link_not_up(struct gem *gp)
  1218. {
  1219. switch (gp->lstate) {
  1220. case link_force_ret:
  1221. netif_info(gp, link, gp->dev,
  1222. "Autoneg failed again, keeping forced mode\n");
  1223. gp->phy_mii.def->ops->setup_forced(&gp->phy_mii,
  1224. gp->last_forced_speed, DUPLEX_HALF);
  1225. gp->timer_ticks = 5;
  1226. gp->lstate = link_force_ok;
  1227. return 0;
  1228. case link_aneg:
  1229. /* We try forced modes after a failed aneg only on PHYs that don't
  1230. * have "magic_aneg" bit set, which means they internally do the
  1231. * while forced-mode thingy. On these, we just restart aneg
  1232. */
  1233. if (gp->phy_mii.def->magic_aneg)
  1234. return 1;
  1235. netif_info(gp, link, gp->dev, "switching to forced 100bt\n");
  1236. /* Try forced modes. */
  1237. gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100,
  1238. DUPLEX_HALF);
  1239. gp->timer_ticks = 5;
  1240. gp->lstate = link_force_try;
  1241. return 0;
  1242. case link_force_try:
  1243. /* Downgrade from 100 to 10 Mbps if necessary.
  1244. * If already at 10Mbps, warn user about the
  1245. * situation every 10 ticks.
  1246. */
  1247. if (gp->phy_mii.speed == SPEED_100) {
  1248. gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10,
  1249. DUPLEX_HALF);
  1250. gp->timer_ticks = 5;
  1251. netif_info(gp, link, gp->dev,
  1252. "switching to forced 10bt\n");
  1253. return 0;
  1254. } else
  1255. return 1;
  1256. default:
  1257. return 0;
  1258. }
  1259. }
  1260. static void gem_link_timer(unsigned long data)
  1261. {
  1262. struct gem *gp = (struct gem *) data;
  1263. struct net_device *dev = gp->dev;
  1264. int restart_aneg = 0;
  1265. /* There's no point doing anything if we're going to be reset */
  1266. if (gp->reset_task_pending)
  1267. return;
  1268. if (gp->phy_type == phy_serialink ||
  1269. gp->phy_type == phy_serdes) {
  1270. u32 val = readl(gp->regs + PCS_MIISTAT);
  1271. if (!(val & PCS_MIISTAT_LS))
  1272. val = readl(gp->regs + PCS_MIISTAT);
  1273. if ((val & PCS_MIISTAT_LS) != 0) {
  1274. if (gp->lstate == link_up)
  1275. goto restart;
  1276. gp->lstate = link_up;
  1277. netif_carrier_on(dev);
  1278. (void)gem_set_link_modes(gp);
  1279. }
  1280. goto restart;
  1281. }
  1282. if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) {
  1283. /* Ok, here we got a link. If we had it due to a forced
  1284. * fallback, and we were configured for autoneg, we do
  1285. * retry a short autoneg pass. If you know your hub is
  1286. * broken, use ethtool ;)
  1287. */
  1288. if (gp->lstate == link_force_try && gp->want_autoneg) {
  1289. gp->lstate = link_force_ret;
  1290. gp->last_forced_speed = gp->phy_mii.speed;
  1291. gp->timer_ticks = 5;
  1292. if (netif_msg_link(gp))
  1293. netdev_info(dev,
  1294. "Got link after fallback, retrying autoneg once...\n");
  1295. gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising);
  1296. } else if (gp->lstate != link_up) {
  1297. gp->lstate = link_up;
  1298. netif_carrier_on(dev);
  1299. if (gem_set_link_modes(gp))
  1300. restart_aneg = 1;
  1301. }
  1302. } else {
  1303. /* If the link was previously up, we restart the
  1304. * whole process
  1305. */
  1306. if (gp->lstate == link_up) {
  1307. gp->lstate = link_down;
  1308. netif_info(gp, link, dev, "Link down\n");
  1309. netif_carrier_off(dev);
  1310. gem_schedule_reset(gp);
  1311. /* The reset task will restart the timer */
  1312. return;
  1313. } else if (++gp->timer_ticks > 10) {
  1314. if (found_mii_phy(gp))
  1315. restart_aneg = gem_mdio_link_not_up(gp);
  1316. else
  1317. restart_aneg = 1;
  1318. }
  1319. }
  1320. if (restart_aneg) {
  1321. gem_begin_auto_negotiation(gp, NULL);
  1322. return;
  1323. }
  1324. restart:
  1325. mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
  1326. }
  1327. static void gem_clean_rings(struct gem *gp)
  1328. {
  1329. struct gem_init_block *gb = gp->init_block;
  1330. struct sk_buff *skb;
  1331. int i;
  1332. dma_addr_t dma_addr;
  1333. for (i = 0; i < RX_RING_SIZE; i++) {
  1334. struct gem_rxd *rxd;
  1335. rxd = &gb->rxd[i];
  1336. if (gp->rx_skbs[i] != NULL) {
  1337. skb = gp->rx_skbs[i];
  1338. dma_addr = le64_to_cpu(rxd->buffer);
  1339. pci_unmap_page(gp->pdev, dma_addr,
  1340. RX_BUF_ALLOC_SIZE(gp),
  1341. PCI_DMA_FROMDEVICE);
  1342. dev_kfree_skb_any(skb);
  1343. gp->rx_skbs[i] = NULL;
  1344. }
  1345. rxd->status_word = 0;
  1346. dma_wmb();
  1347. rxd->buffer = 0;
  1348. }
  1349. for (i = 0; i < TX_RING_SIZE; i++) {
  1350. if (gp->tx_skbs[i] != NULL) {
  1351. struct gem_txd *txd;
  1352. int frag;
  1353. skb = gp->tx_skbs[i];
  1354. gp->tx_skbs[i] = NULL;
  1355. for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
  1356. int ent = i & (TX_RING_SIZE - 1);
  1357. txd = &gb->txd[ent];
  1358. dma_addr = le64_to_cpu(txd->buffer);
  1359. pci_unmap_page(gp->pdev, dma_addr,
  1360. le64_to_cpu(txd->control_word) &
  1361. TXDCTRL_BUFSZ, PCI_DMA_TODEVICE);
  1362. if (frag != skb_shinfo(skb)->nr_frags)
  1363. i++;
  1364. }
  1365. dev_kfree_skb_any(skb);
  1366. }
  1367. }
  1368. }
  1369. static void gem_init_rings(struct gem *gp)
  1370. {
  1371. struct gem_init_block *gb = gp->init_block;
  1372. struct net_device *dev = gp->dev;
  1373. int i;
  1374. dma_addr_t dma_addr;
  1375. gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0;
  1376. gem_clean_rings(gp);
  1377. gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN,
  1378. (unsigned)VLAN_ETH_FRAME_LEN);
  1379. for (i = 0; i < RX_RING_SIZE; i++) {
  1380. struct sk_buff *skb;
  1381. struct gem_rxd *rxd = &gb->rxd[i];
  1382. skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_KERNEL);
  1383. if (!skb) {
  1384. rxd->buffer = 0;
  1385. rxd->status_word = 0;
  1386. continue;
  1387. }
  1388. gp->rx_skbs[i] = skb;
  1389. skb_put(skb, (gp->rx_buf_sz + RX_OFFSET));
  1390. dma_addr = pci_map_page(gp->pdev,
  1391. virt_to_page(skb->data),
  1392. offset_in_page(skb->data),
  1393. RX_BUF_ALLOC_SIZE(gp),
  1394. PCI_DMA_FROMDEVICE);
  1395. rxd->buffer = cpu_to_le64(dma_addr);
  1396. dma_wmb();
  1397. rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
  1398. skb_reserve(skb, RX_OFFSET);
  1399. }
  1400. for (i = 0; i < TX_RING_SIZE; i++) {
  1401. struct gem_txd *txd = &gb->txd[i];
  1402. txd->control_word = 0;
  1403. dma_wmb();
  1404. txd->buffer = 0;
  1405. }
  1406. wmb();
  1407. }
  1408. /* Init PHY interface and start link poll state machine */
  1409. static void gem_init_phy(struct gem *gp)
  1410. {
  1411. u32 mifcfg;
  1412. /* Revert MIF CFG setting done on stop_phy */
  1413. mifcfg = readl(gp->regs + MIF_CFG);
  1414. mifcfg &= ~MIF_CFG_BBMODE;
  1415. writel(mifcfg, gp->regs + MIF_CFG);
  1416. if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
  1417. int i;
  1418. /* Those delay sucks, the HW seem to love them though, I'll
  1419. * serisouly consider breaking some locks here to be able
  1420. * to schedule instead
  1421. */
  1422. for (i = 0; i < 3; i++) {
  1423. #ifdef CONFIG_PPC_PMAC
  1424. pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0);
  1425. msleep(20);
  1426. #endif
  1427. /* Some PHYs used by apple have problem getting back to us,
  1428. * we do an additional reset here
  1429. */
  1430. sungem_phy_write(gp, MII_BMCR, BMCR_RESET);
  1431. msleep(20);
  1432. if (sungem_phy_read(gp, MII_BMCR) != 0xffff)
  1433. break;
  1434. if (i == 2)
  1435. netdev_warn(gp->dev, "GMAC PHY not responding !\n");
  1436. }
  1437. }
  1438. if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
  1439. gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
  1440. u32 val;
  1441. /* Init datapath mode register. */
  1442. if (gp->phy_type == phy_mii_mdio0 ||
  1443. gp->phy_type == phy_mii_mdio1) {
  1444. val = PCS_DMODE_MGM;
  1445. } else if (gp->phy_type == phy_serialink) {
  1446. val = PCS_DMODE_SM | PCS_DMODE_GMOE;
  1447. } else {
  1448. val = PCS_DMODE_ESM;
  1449. }
  1450. writel(val, gp->regs + PCS_DMODE);
  1451. }
  1452. if (gp->phy_type == phy_mii_mdio0 ||
  1453. gp->phy_type == phy_mii_mdio1) {
  1454. /* Reset and detect MII PHY */
  1455. sungem_phy_probe(&gp->phy_mii, gp->mii_phy_addr);
  1456. /* Init PHY */
  1457. if (gp->phy_mii.def && gp->phy_mii.def->ops->init)
  1458. gp->phy_mii.def->ops->init(&gp->phy_mii);
  1459. } else {
  1460. gem_pcs_reset(gp);
  1461. gem_pcs_reinit_adv(gp);
  1462. }
  1463. /* Default aneg parameters */
  1464. gp->timer_ticks = 0;
  1465. gp->lstate = link_down;
  1466. netif_carrier_off(gp->dev);
  1467. /* Print things out */
  1468. if (gp->phy_type == phy_mii_mdio0 ||
  1469. gp->phy_type == phy_mii_mdio1)
  1470. netdev_info(gp->dev, "Found %s PHY\n",
  1471. gp->phy_mii.def ? gp->phy_mii.def->name : "no");
  1472. gem_begin_auto_negotiation(gp, NULL);
  1473. }
  1474. static void gem_init_dma(struct gem *gp)
  1475. {
  1476. u64 desc_dma = (u64) gp->gblock_dvma;
  1477. u32 val;
  1478. val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE);
  1479. writel(val, gp->regs + TXDMA_CFG);
  1480. writel(desc_dma >> 32, gp->regs + TXDMA_DBHI);
  1481. writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW);
  1482. desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
  1483. writel(0, gp->regs + TXDMA_KICK);
  1484. val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
  1485. ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
  1486. writel(val, gp->regs + RXDMA_CFG);
  1487. writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
  1488. writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
  1489. writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
  1490. val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
  1491. val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
  1492. writel(val, gp->regs + RXDMA_PTHRESH);
  1493. if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
  1494. writel(((5 & RXDMA_BLANK_IPKTS) |
  1495. ((8 << 12) & RXDMA_BLANK_ITIME)),
  1496. gp->regs + RXDMA_BLANK);
  1497. else
  1498. writel(((5 & RXDMA_BLANK_IPKTS) |
  1499. ((4 << 12) & RXDMA_BLANK_ITIME)),
  1500. gp->regs + RXDMA_BLANK);
  1501. }
  1502. static u32 gem_setup_multicast(struct gem *gp)
  1503. {
  1504. u32 rxcfg = 0;
  1505. int i;
  1506. if ((gp->dev->flags & IFF_ALLMULTI) ||
  1507. (netdev_mc_count(gp->dev) > 256)) {
  1508. for (i=0; i<16; i++)
  1509. writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
  1510. rxcfg |= MAC_RXCFG_HFE;
  1511. } else if (gp->dev->flags & IFF_PROMISC) {
  1512. rxcfg |= MAC_RXCFG_PROM;
  1513. } else {
  1514. u16 hash_table[16];
  1515. u32 crc;
  1516. struct netdev_hw_addr *ha;
  1517. int i;
  1518. memset(hash_table, 0, sizeof(hash_table));
  1519. netdev_for_each_mc_addr(ha, gp->dev) {
  1520. crc = ether_crc_le(6, ha->addr);
  1521. crc >>= 24;
  1522. hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
  1523. }
  1524. for (i=0; i<16; i++)
  1525. writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2));
  1526. rxcfg |= MAC_RXCFG_HFE;
  1527. }
  1528. return rxcfg;
  1529. }
  1530. static void gem_init_mac(struct gem *gp)
  1531. {
  1532. unsigned char *e = &gp->dev->dev_addr[0];
  1533. writel(0x1bf0, gp->regs + MAC_SNDPAUSE);
  1534. writel(0x00, gp->regs + MAC_IPG0);
  1535. writel(0x08, gp->regs + MAC_IPG1);
  1536. writel(0x04, gp->regs + MAC_IPG2);
  1537. writel(0x40, gp->regs + MAC_STIME);
  1538. writel(0x40, gp->regs + MAC_MINFSZ);
  1539. /* Ethernet payload + header + FCS + optional VLAN tag. */
  1540. writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ);
  1541. writel(0x07, gp->regs + MAC_PASIZE);
  1542. writel(0x04, gp->regs + MAC_JAMSIZE);
  1543. writel(0x10, gp->regs + MAC_ATTLIM);
  1544. writel(0x8808, gp->regs + MAC_MCTYPE);
  1545. writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED);
  1546. writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
  1547. writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
  1548. writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
  1549. writel(0, gp->regs + MAC_ADDR3);
  1550. writel(0, gp->regs + MAC_ADDR4);
  1551. writel(0, gp->regs + MAC_ADDR5);
  1552. writel(0x0001, gp->regs + MAC_ADDR6);
  1553. writel(0xc200, gp->regs + MAC_ADDR7);
  1554. writel(0x0180, gp->regs + MAC_ADDR8);
  1555. writel(0, gp->regs + MAC_AFILT0);
  1556. writel(0, gp->regs + MAC_AFILT1);
  1557. writel(0, gp->regs + MAC_AFILT2);
  1558. writel(0, gp->regs + MAC_AF21MSK);
  1559. writel(0, gp->regs + MAC_AF0MSK);
  1560. gp->mac_rx_cfg = gem_setup_multicast(gp);
  1561. #ifdef STRIP_FCS
  1562. gp->mac_rx_cfg |= MAC_RXCFG_SFCS;
  1563. #endif
  1564. writel(0, gp->regs + MAC_NCOLL);
  1565. writel(0, gp->regs + MAC_FASUCC);
  1566. writel(0, gp->regs + MAC_ECOLL);
  1567. writel(0, gp->regs + MAC_LCOLL);
  1568. writel(0, gp->regs + MAC_DTIMER);
  1569. writel(0, gp->regs + MAC_PATMPS);
  1570. writel(0, gp->regs + MAC_RFCTR);
  1571. writel(0, gp->regs + MAC_LERR);
  1572. writel(0, gp->regs + MAC_AERR);
  1573. writel(0, gp->regs + MAC_FCSERR);
  1574. writel(0, gp->regs + MAC_RXCVERR);
  1575. /* Clear RX/TX/MAC/XIF config, we will set these up and enable
  1576. * them once a link is established.
  1577. */
  1578. writel(0, gp->regs + MAC_TXCFG);
  1579. writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG);
  1580. writel(0, gp->regs + MAC_MCCFG);
  1581. writel(0, gp->regs + MAC_XIFCFG);
  1582. /* Setup MAC interrupts. We want to get all of the interesting
  1583. * counter expiration events, but we do not want to hear about
  1584. * normal rx/tx as the DMA engine tells us that.
  1585. */
  1586. writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK);
  1587. writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
  1588. /* Don't enable even the PAUSE interrupts for now, we
  1589. * make no use of those events other than to record them.
  1590. */
  1591. writel(0xffffffff, gp->regs + MAC_MCMASK);
  1592. /* Don't enable GEM's WOL in normal operations
  1593. */
  1594. if (gp->has_wol)
  1595. writel(0, gp->regs + WOL_WAKECSR);
  1596. }
  1597. static void gem_init_pause_thresholds(struct gem *gp)
  1598. {
  1599. u32 cfg;
  1600. /* Calculate pause thresholds. Setting the OFF threshold to the
  1601. * full RX fifo size effectively disables PAUSE generation which
  1602. * is what we do for 10/100 only GEMs which have FIFOs too small
  1603. * to make real gains from PAUSE.
  1604. */
  1605. if (gp->rx_fifo_sz <= (2 * 1024)) {
  1606. gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz;
  1607. } else {
  1608. int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63;
  1609. int off = (gp->rx_fifo_sz - (max_frame * 2));
  1610. int on = off - max_frame;
  1611. gp->rx_pause_off = off;
  1612. gp->rx_pause_on = on;
  1613. }
  1614. /* Configure the chip "burst" DMA mode & enable some
  1615. * HW bug fixes on Apple version
  1616. */
  1617. cfg = 0;
  1618. if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
  1619. cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX;
  1620. #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
  1621. cfg |= GREG_CFG_IBURST;
  1622. #endif
  1623. cfg |= ((31 << 1) & GREG_CFG_TXDMALIM);
  1624. cfg |= ((31 << 6) & GREG_CFG_RXDMALIM);
  1625. writel(cfg, gp->regs + GREG_CFG);
  1626. /* If Infinite Burst didn't stick, then use different
  1627. * thresholds (and Apple bug fixes don't exist)
  1628. */
  1629. if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) {
  1630. cfg = ((2 << 1) & GREG_CFG_TXDMALIM);
  1631. cfg |= ((8 << 6) & GREG_CFG_RXDMALIM);
  1632. writel(cfg, gp->regs + GREG_CFG);
  1633. }
  1634. }
  1635. static int gem_check_invariants(struct gem *gp)
  1636. {
  1637. struct pci_dev *pdev = gp->pdev;
  1638. u32 mif_cfg;
  1639. /* On Apple's sungem, we can't rely on registers as the chip
  1640. * was been powered down by the firmware. The PHY is looked
  1641. * up later on.
  1642. */
  1643. if (pdev->vendor == PCI_VENDOR_ID_APPLE) {
  1644. gp->phy_type = phy_mii_mdio0;
  1645. gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
  1646. gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
  1647. gp->swrst_base = 0;
  1648. mif_cfg = readl(gp->regs + MIF_CFG);
  1649. mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1);
  1650. mif_cfg |= MIF_CFG_MDI0;
  1651. writel(mif_cfg, gp->regs + MIF_CFG);
  1652. writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE);
  1653. writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG);
  1654. /* We hard-code the PHY address so we can properly bring it out of
  1655. * reset later on, we can't really probe it at this point, though
  1656. * that isn't an issue.
  1657. */
  1658. if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC)
  1659. gp->mii_phy_addr = 1;
  1660. else
  1661. gp->mii_phy_addr = 0;
  1662. return 0;
  1663. }
  1664. mif_cfg = readl(gp->regs + MIF_CFG);
  1665. if (pdev->vendor == PCI_VENDOR_ID_SUN &&
  1666. pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) {
  1667. /* One of the MII PHYs _must_ be present
  1668. * as this chip has no gigabit PHY.
  1669. */
  1670. if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) {
  1671. pr_err("RIO GEM lacks MII phy, mif_cfg[%08x]\n",
  1672. mif_cfg);
  1673. return -1;
  1674. }
  1675. }
  1676. /* Determine initial PHY interface type guess. MDIO1 is the
  1677. * external PHY and thus takes precedence over MDIO0.
  1678. */
  1679. if (mif_cfg & MIF_CFG_MDI1) {
  1680. gp->phy_type = phy_mii_mdio1;
  1681. mif_cfg |= MIF_CFG_PSELECT;
  1682. writel(mif_cfg, gp->regs + MIF_CFG);
  1683. } else if (mif_cfg & MIF_CFG_MDI0) {
  1684. gp->phy_type = phy_mii_mdio0;
  1685. mif_cfg &= ~MIF_CFG_PSELECT;
  1686. writel(mif_cfg, gp->regs + MIF_CFG);
  1687. } else {
  1688. #ifdef CONFIG_SPARC
  1689. const char *p;
  1690. p = of_get_property(gp->of_node, "shared-pins", NULL);
  1691. if (p && !strcmp(p, "serdes"))
  1692. gp->phy_type = phy_serdes;
  1693. else
  1694. #endif
  1695. gp->phy_type = phy_serialink;
  1696. }
  1697. if (gp->phy_type == phy_mii_mdio1 ||
  1698. gp->phy_type == phy_mii_mdio0) {
  1699. int i;
  1700. for (i = 0; i < 32; i++) {
  1701. gp->mii_phy_addr = i;
  1702. if (sungem_phy_read(gp, MII_BMCR) != 0xffff)
  1703. break;
  1704. }
  1705. if (i == 32) {
  1706. if (pdev->device != PCI_DEVICE_ID_SUN_GEM) {
  1707. pr_err("RIO MII phy will not respond\n");
  1708. return -1;
  1709. }
  1710. gp->phy_type = phy_serdes;
  1711. }
  1712. }
  1713. /* Fetch the FIFO configurations now too. */
  1714. gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
  1715. gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
  1716. if (pdev->vendor == PCI_VENDOR_ID_SUN) {
  1717. if (pdev->device == PCI_DEVICE_ID_SUN_GEM) {
  1718. if (gp->tx_fifo_sz != (9 * 1024) ||
  1719. gp->rx_fifo_sz != (20 * 1024)) {
  1720. pr_err("GEM has bogus fifo sizes tx(%d) rx(%d)\n",
  1721. gp->tx_fifo_sz, gp->rx_fifo_sz);
  1722. return -1;
  1723. }
  1724. gp->swrst_base = 0;
  1725. } else {
  1726. if (gp->tx_fifo_sz != (2 * 1024) ||
  1727. gp->rx_fifo_sz != (2 * 1024)) {
  1728. pr_err("RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
  1729. gp->tx_fifo_sz, gp->rx_fifo_sz);
  1730. return -1;
  1731. }
  1732. gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT;
  1733. }
  1734. }
  1735. return 0;
  1736. }
  1737. static void gem_reinit_chip(struct gem *gp)
  1738. {
  1739. /* Reset the chip */
  1740. gem_reset(gp);
  1741. /* Make sure ints are disabled */
  1742. gem_disable_ints(gp);
  1743. /* Allocate & setup ring buffers */
  1744. gem_init_rings(gp);
  1745. /* Configure pause thresholds */
  1746. gem_init_pause_thresholds(gp);
  1747. /* Init DMA & MAC engines */
  1748. gem_init_dma(gp);
  1749. gem_init_mac(gp);
  1750. }
  1751. static void gem_stop_phy(struct gem *gp, int wol)
  1752. {
  1753. u32 mifcfg;
  1754. /* Let the chip settle down a bit, it seems that helps
  1755. * for sleep mode on some models
  1756. */
  1757. msleep(10);
  1758. /* Make sure we aren't polling PHY status change. We
  1759. * don't currently use that feature though
  1760. */
  1761. mifcfg = readl(gp->regs + MIF_CFG);
  1762. mifcfg &= ~MIF_CFG_POLL;
  1763. writel(mifcfg, gp->regs + MIF_CFG);
  1764. if (wol && gp->has_wol) {
  1765. unsigned char *e = &gp->dev->dev_addr[0];
  1766. u32 csr;
  1767. /* Setup wake-on-lan for MAGIC packet */
  1768. writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB,
  1769. gp->regs + MAC_RXCFG);
  1770. writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0);
  1771. writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1);
  1772. writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2);
  1773. writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT);
  1774. csr = WOL_WAKECSR_ENABLE;
  1775. if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0)
  1776. csr |= WOL_WAKECSR_MII;
  1777. writel(csr, gp->regs + WOL_WAKECSR);
  1778. } else {
  1779. writel(0, gp->regs + MAC_RXCFG);
  1780. (void)readl(gp->regs + MAC_RXCFG);
  1781. /* Machine sleep will die in strange ways if we
  1782. * dont wait a bit here, looks like the chip takes
  1783. * some time to really shut down
  1784. */
  1785. msleep(10);
  1786. }
  1787. writel(0, gp->regs + MAC_TXCFG);
  1788. writel(0, gp->regs + MAC_XIFCFG);
  1789. writel(0, gp->regs + TXDMA_CFG);
  1790. writel(0, gp->regs + RXDMA_CFG);
  1791. if (!wol) {
  1792. gem_reset(gp);
  1793. writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST);
  1794. writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
  1795. if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend)
  1796. gp->phy_mii.def->ops->suspend(&gp->phy_mii);
  1797. /* According to Apple, we must set the MDIO pins to this begnign
  1798. * state or we may 1) eat more current, 2) damage some PHYs
  1799. */
  1800. writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG);
  1801. writel(0, gp->regs + MIF_BBCLK);
  1802. writel(0, gp->regs + MIF_BBDATA);
  1803. writel(0, gp->regs + MIF_BBOENAB);
  1804. writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG);
  1805. (void) readl(gp->regs + MAC_XIFCFG);
  1806. }
  1807. }
  1808. static int gem_do_start(struct net_device *dev)
  1809. {
  1810. struct gem *gp = netdev_priv(dev);
  1811. int rc;
  1812. /* Enable the cell */
  1813. gem_get_cell(gp);
  1814. /* Make sure PCI access and bus master are enabled */
  1815. rc = pci_enable_device(gp->pdev);
  1816. if (rc) {
  1817. netdev_err(dev, "Failed to enable chip on PCI bus !\n");
  1818. /* Put cell and forget it for now, it will be considered as
  1819. * still asleep, a new sleep cycle may bring it back
  1820. */
  1821. gem_put_cell(gp);
  1822. return -ENXIO;
  1823. }
  1824. pci_set_master(gp->pdev);
  1825. /* Init & setup chip hardware */
  1826. gem_reinit_chip(gp);
  1827. /* An interrupt might come in handy */
  1828. rc = request_irq(gp->pdev->irq, gem_interrupt,
  1829. IRQF_SHARED, dev->name, (void *)dev);
  1830. if (rc) {
  1831. netdev_err(dev, "failed to request irq !\n");
  1832. gem_reset(gp);
  1833. gem_clean_rings(gp);
  1834. gem_put_cell(gp);
  1835. return rc;
  1836. }
  1837. /* Mark us as attached again if we come from resume(), this has
  1838. * no effect if we weren't detached and needs to be done now.
  1839. */
  1840. netif_device_attach(dev);
  1841. /* Restart NAPI & queues */
  1842. gem_netif_start(gp);
  1843. /* Detect & init PHY, start autoneg etc... this will
  1844. * eventually result in starting DMA operations when
  1845. * the link is up
  1846. */
  1847. gem_init_phy(gp);
  1848. return 0;
  1849. }
  1850. static void gem_do_stop(struct net_device *dev, int wol)
  1851. {
  1852. struct gem *gp = netdev_priv(dev);
  1853. /* Stop NAPI and stop tx queue */
  1854. gem_netif_stop(gp);
  1855. /* Make sure ints are disabled. We don't care about
  1856. * synchronizing as NAPI is disabled, thus a stray
  1857. * interrupt will do nothing bad (our irq handler
  1858. * just schedules NAPI)
  1859. */
  1860. gem_disable_ints(gp);
  1861. /* Stop the link timer */
  1862. del_timer_sync(&gp->link_timer);
  1863. /* We cannot cancel the reset task while holding the
  1864. * rtnl lock, we'd get an A->B / B->A deadlock stituation
  1865. * if we did. This is not an issue however as the reset
  1866. * task is synchronized vs. us (rtnl_lock) and will do
  1867. * nothing if the device is down or suspended. We do
  1868. * still clear reset_task_pending to avoid a spurrious
  1869. * reset later on in case we do resume before it gets
  1870. * scheduled.
  1871. */
  1872. gp->reset_task_pending = 0;
  1873. /* If we are going to sleep with WOL */
  1874. gem_stop_dma(gp);
  1875. msleep(10);
  1876. if (!wol)
  1877. gem_reset(gp);
  1878. msleep(10);
  1879. /* Get rid of rings */
  1880. gem_clean_rings(gp);
  1881. /* No irq needed anymore */
  1882. free_irq(gp->pdev->irq, (void *) dev);
  1883. /* Shut the PHY down eventually and setup WOL */
  1884. gem_stop_phy(gp, wol);
  1885. /* Make sure bus master is disabled */
  1886. pci_disable_device(gp->pdev);
  1887. /* Cell not needed neither if no WOL */
  1888. if (!wol)
  1889. gem_put_cell(gp);
  1890. }
  1891. static void gem_reset_task(struct work_struct *work)
  1892. {
  1893. struct gem *gp = container_of(work, struct gem, reset_task);
  1894. /* Lock out the network stack (essentially shield ourselves
  1895. * against a racing open, close, control call, or suspend
  1896. */
  1897. rtnl_lock();
  1898. /* Skip the reset task if suspended or closed, or if it's
  1899. * been cancelled by gem_do_stop (see comment there)
  1900. */
  1901. if (!netif_device_present(gp->dev) ||
  1902. !netif_running(gp->dev) ||
  1903. !gp->reset_task_pending) {
  1904. rtnl_unlock();
  1905. return;
  1906. }
  1907. /* Stop the link timer */
  1908. del_timer_sync(&gp->link_timer);
  1909. /* Stop NAPI and tx */
  1910. gem_netif_stop(gp);
  1911. /* Reset the chip & rings */
  1912. gem_reinit_chip(gp);
  1913. if (gp->lstate == link_up)
  1914. gem_set_link_modes(gp);
  1915. /* Restart NAPI and Tx */
  1916. gem_netif_start(gp);
  1917. /* We are back ! */
  1918. gp->reset_task_pending = 0;
  1919. /* If the link is not up, restart autoneg, else restart the
  1920. * polling timer
  1921. */
  1922. if (gp->lstate != link_up)
  1923. gem_begin_auto_negotiation(gp, NULL);
  1924. else
  1925. mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
  1926. rtnl_unlock();
  1927. }
  1928. static int gem_open(struct net_device *dev)
  1929. {
  1930. /* We allow open while suspended, we just do nothing,
  1931. * the chip will be initialized in resume()
  1932. */
  1933. if (netif_device_present(dev))
  1934. return gem_do_start(dev);
  1935. return 0;
  1936. }
  1937. static int gem_close(struct net_device *dev)
  1938. {
  1939. if (netif_device_present(dev))
  1940. gem_do_stop(dev, 0);
  1941. return 0;
  1942. }
  1943. #ifdef CONFIG_PM
  1944. static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
  1945. {
  1946. struct net_device *dev = pci_get_drvdata(pdev);
  1947. struct gem *gp = netdev_priv(dev);
  1948. /* Lock the network stack first to avoid racing with open/close,
  1949. * reset task and setting calls
  1950. */
  1951. rtnl_lock();
  1952. /* Not running, mark ourselves non-present, no need for
  1953. * a lock here
  1954. */
  1955. if (!netif_running(dev)) {
  1956. netif_device_detach(dev);
  1957. rtnl_unlock();
  1958. return 0;
  1959. }
  1960. netdev_info(dev, "suspending, WakeOnLan %s\n",
  1961. (gp->wake_on_lan && netif_running(dev)) ?
  1962. "enabled" : "disabled");
  1963. /* Tell the network stack we're gone. gem_do_stop() below will
  1964. * synchronize with TX, stop NAPI etc...
  1965. */
  1966. netif_device_detach(dev);
  1967. /* Switch off chip, remember WOL setting */
  1968. gp->asleep_wol = !!gp->wake_on_lan;
  1969. gem_do_stop(dev, gp->asleep_wol);
  1970. /* Unlock the network stack */
  1971. rtnl_unlock();
  1972. return 0;
  1973. }
  1974. static int gem_resume(struct pci_dev *pdev)
  1975. {
  1976. struct net_device *dev = pci_get_drvdata(pdev);
  1977. struct gem *gp = netdev_priv(dev);
  1978. /* See locking comment in gem_suspend */
  1979. rtnl_lock();
  1980. /* Not running, mark ourselves present, no need for
  1981. * a lock here
  1982. */
  1983. if (!netif_running(dev)) {
  1984. netif_device_attach(dev);
  1985. rtnl_unlock();
  1986. return 0;
  1987. }
  1988. /* Restart chip. If that fails there isn't much we can do, we
  1989. * leave things stopped.
  1990. */
  1991. gem_do_start(dev);
  1992. /* If we had WOL enabled, the cell clock was never turned off during
  1993. * sleep, so we end up beeing unbalanced. Fix that here
  1994. */
  1995. if (gp->asleep_wol)
  1996. gem_put_cell(gp);
  1997. /* Unlock the network stack */
  1998. rtnl_unlock();
  1999. return 0;
  2000. }
  2001. #endif /* CONFIG_PM */
  2002. static struct net_device_stats *gem_get_stats(struct net_device *dev)
  2003. {
  2004. struct gem *gp = netdev_priv(dev);
  2005. /* I have seen this being called while the PM was in progress,
  2006. * so we shield against this. Let's also not poke at registers
  2007. * while the reset task is going on.
  2008. *
  2009. * TODO: Move stats collection elsewhere (link timer ?) and
  2010. * make this a nop to avoid all those synchro issues
  2011. */
  2012. if (!netif_device_present(dev) || !netif_running(dev))
  2013. goto bail;
  2014. /* Better safe than sorry... */
  2015. if (WARN_ON(!gp->cell_enabled))
  2016. goto bail;
  2017. dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR);
  2018. writel(0, gp->regs + MAC_FCSERR);
  2019. dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR);
  2020. writel(0, gp->regs + MAC_AERR);
  2021. dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR);
  2022. writel(0, gp->regs + MAC_LERR);
  2023. dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
  2024. dev->stats.collisions +=
  2025. (readl(gp->regs + MAC_ECOLL) + readl(gp->regs + MAC_LCOLL));
  2026. writel(0, gp->regs + MAC_ECOLL);
  2027. writel(0, gp->regs + MAC_LCOLL);
  2028. bail:
  2029. return &dev->stats;
  2030. }
  2031. static int gem_set_mac_address(struct net_device *dev, void *addr)
  2032. {
  2033. struct sockaddr *macaddr = (struct sockaddr *) addr;
  2034. struct gem *gp = netdev_priv(dev);
  2035. unsigned char *e = &dev->dev_addr[0];
  2036. if (!is_valid_ether_addr(macaddr->sa_data))
  2037. return -EADDRNOTAVAIL;
  2038. memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len);
  2039. /* We'll just catch it later when the device is up'd or resumed */
  2040. if (!netif_running(dev) || !netif_device_present(dev))
  2041. return 0;
  2042. /* Better safe than sorry... */
  2043. if (WARN_ON(!gp->cell_enabled))
  2044. return 0;
  2045. writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
  2046. writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
  2047. writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
  2048. return 0;
  2049. }
  2050. static void gem_set_multicast(struct net_device *dev)
  2051. {
  2052. struct gem *gp = netdev_priv(dev);
  2053. u32 rxcfg, rxcfg_new;
  2054. int limit = 10000;
  2055. if (!netif_running(dev) || !netif_device_present(dev))
  2056. return;
  2057. /* Better safe than sorry... */
  2058. if (gp->reset_task_pending || WARN_ON(!gp->cell_enabled))
  2059. return;
  2060. rxcfg = readl(gp->regs + MAC_RXCFG);
  2061. rxcfg_new = gem_setup_multicast(gp);
  2062. #ifdef STRIP_FCS
  2063. rxcfg_new |= MAC_RXCFG_SFCS;
  2064. #endif
  2065. gp->mac_rx_cfg = rxcfg_new;
  2066. writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
  2067. while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) {
  2068. if (!limit--)
  2069. break;
  2070. udelay(10);
  2071. }
  2072. rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE);
  2073. rxcfg |= rxcfg_new;
  2074. writel(rxcfg, gp->regs + MAC_RXCFG);
  2075. }
  2076. /* Jumbo-grams don't seem to work :-( */
  2077. #define GEM_MIN_MTU ETH_MIN_MTU
  2078. #if 1
  2079. #define GEM_MAX_MTU ETH_DATA_LEN
  2080. #else
  2081. #define GEM_MAX_MTU 9000
  2082. #endif
  2083. static int gem_change_mtu(struct net_device *dev, int new_mtu)
  2084. {
  2085. struct gem *gp = netdev_priv(dev);
  2086. dev->mtu = new_mtu;
  2087. /* We'll just catch it later when the device is up'd or resumed */
  2088. if (!netif_running(dev) || !netif_device_present(dev))
  2089. return 0;
  2090. /* Better safe than sorry... */
  2091. if (WARN_ON(!gp->cell_enabled))
  2092. return 0;
  2093. gem_netif_stop(gp);
  2094. gem_reinit_chip(gp);
  2095. if (gp->lstate == link_up)
  2096. gem_set_link_modes(gp);
  2097. gem_netif_start(gp);
  2098. return 0;
  2099. }
  2100. static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  2101. {
  2102. struct gem *gp = netdev_priv(dev);
  2103. strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
  2104. strlcpy(info->version, DRV_VERSION, sizeof(info->version));
  2105. strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info));
  2106. }
  2107. static int gem_get_link_ksettings(struct net_device *dev,
  2108. struct ethtool_link_ksettings *cmd)
  2109. {
  2110. struct gem *gp = netdev_priv(dev);
  2111. u32 supported, advertising;
  2112. if (gp->phy_type == phy_mii_mdio0 ||
  2113. gp->phy_type == phy_mii_mdio1) {
  2114. if (gp->phy_mii.def)
  2115. supported = gp->phy_mii.def->features;
  2116. else
  2117. supported = (SUPPORTED_10baseT_Half |
  2118. SUPPORTED_10baseT_Full);
  2119. /* XXX hardcoded stuff for now */
  2120. cmd->base.port = PORT_MII;
  2121. cmd->base.phy_address = 0; /* XXX fixed PHYAD */
  2122. /* Return current PHY settings */
  2123. cmd->base.autoneg = gp->want_autoneg;
  2124. cmd->base.speed = gp->phy_mii.speed;
  2125. cmd->base.duplex = gp->phy_mii.duplex;
  2126. advertising = gp->phy_mii.advertising;
  2127. /* If we started with a forced mode, we don't have a default
  2128. * advertise set, we need to return something sensible so
  2129. * userland can re-enable autoneg properly.
  2130. */
  2131. if (advertising == 0)
  2132. advertising = supported;
  2133. } else { // XXX PCS ?
  2134. supported =
  2135. (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
  2136. SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
  2137. SUPPORTED_Autoneg);
  2138. advertising = supported;
  2139. cmd->base.speed = 0;
  2140. cmd->base.duplex = 0;
  2141. cmd->base.port = 0;
  2142. cmd->base.phy_address = 0;
  2143. cmd->base.autoneg = 0;
  2144. /* serdes means usually a Fibre connector, with most fixed */
  2145. if (gp->phy_type == phy_serdes) {
  2146. cmd->base.port = PORT_FIBRE;
  2147. supported = (SUPPORTED_1000baseT_Half |
  2148. SUPPORTED_1000baseT_Full |
  2149. SUPPORTED_FIBRE | SUPPORTED_Autoneg |
  2150. SUPPORTED_Pause | SUPPORTED_Asym_Pause);
  2151. advertising = supported;
  2152. if (gp->lstate == link_up)
  2153. cmd->base.speed = SPEED_1000;
  2154. cmd->base.duplex = DUPLEX_FULL;
  2155. cmd->base.autoneg = 1;
  2156. }
  2157. }
  2158. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
  2159. supported);
  2160. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
  2161. advertising);
  2162. return 0;
  2163. }
  2164. static int gem_set_link_ksettings(struct net_device *dev,
  2165. const struct ethtool_link_ksettings *cmd)
  2166. {
  2167. struct gem *gp = netdev_priv(dev);
  2168. u32 speed = cmd->base.speed;
  2169. u32 advertising;
  2170. ethtool_convert_link_mode_to_legacy_u32(&advertising,
  2171. cmd->link_modes.advertising);
  2172. /* Verify the settings we care about. */
  2173. if (cmd->base.autoneg != AUTONEG_ENABLE &&
  2174. cmd->base.autoneg != AUTONEG_DISABLE)
  2175. return -EINVAL;
  2176. if (cmd->base.autoneg == AUTONEG_ENABLE &&
  2177. advertising == 0)
  2178. return -EINVAL;
  2179. if (cmd->base.autoneg == AUTONEG_DISABLE &&
  2180. ((speed != SPEED_1000 &&
  2181. speed != SPEED_100 &&
  2182. speed != SPEED_10) ||
  2183. (cmd->base.duplex != DUPLEX_HALF &&
  2184. cmd->base.duplex != DUPLEX_FULL)))
  2185. return -EINVAL;
  2186. /* Apply settings and restart link process. */
  2187. if (netif_device_present(gp->dev)) {
  2188. del_timer_sync(&gp->link_timer);
  2189. gem_begin_auto_negotiation(gp, cmd);
  2190. }
  2191. return 0;
  2192. }
  2193. static int gem_nway_reset(struct net_device *dev)
  2194. {
  2195. struct gem *gp = netdev_priv(dev);
  2196. if (!gp->want_autoneg)
  2197. return -EINVAL;
  2198. /* Restart link process */
  2199. if (netif_device_present(gp->dev)) {
  2200. del_timer_sync(&gp->link_timer);
  2201. gem_begin_auto_negotiation(gp, NULL);
  2202. }
  2203. return 0;
  2204. }
  2205. static u32 gem_get_msglevel(struct net_device *dev)
  2206. {
  2207. struct gem *gp = netdev_priv(dev);
  2208. return gp->msg_enable;
  2209. }
  2210. static void gem_set_msglevel(struct net_device *dev, u32 value)
  2211. {
  2212. struct gem *gp = netdev_priv(dev);
  2213. gp->msg_enable = value;
  2214. }
  2215. /* Add more when I understand how to program the chip */
  2216. /* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */
  2217. #define WOL_SUPPORTED_MASK (WAKE_MAGIC)
  2218. static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  2219. {
  2220. struct gem *gp = netdev_priv(dev);
  2221. /* Add more when I understand how to program the chip */
  2222. if (gp->has_wol) {
  2223. wol->supported = WOL_SUPPORTED_MASK;
  2224. wol->wolopts = gp->wake_on_lan;
  2225. } else {
  2226. wol->supported = 0;
  2227. wol->wolopts = 0;
  2228. }
  2229. }
  2230. static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  2231. {
  2232. struct gem *gp = netdev_priv(dev);
  2233. if (!gp->has_wol)
  2234. return -EOPNOTSUPP;
  2235. gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK;
  2236. return 0;
  2237. }
  2238. static const struct ethtool_ops gem_ethtool_ops = {
  2239. .get_drvinfo = gem_get_drvinfo,
  2240. .get_link = ethtool_op_get_link,
  2241. .nway_reset = gem_nway_reset,
  2242. .get_msglevel = gem_get_msglevel,
  2243. .set_msglevel = gem_set_msglevel,
  2244. .get_wol = gem_get_wol,
  2245. .set_wol = gem_set_wol,
  2246. .get_link_ksettings = gem_get_link_ksettings,
  2247. .set_link_ksettings = gem_set_link_ksettings,
  2248. };
  2249. static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  2250. {
  2251. struct gem *gp = netdev_priv(dev);
  2252. struct mii_ioctl_data *data = if_mii(ifr);
  2253. int rc = -EOPNOTSUPP;
  2254. /* For SIOCGMIIREG and SIOCSMIIREG the core checks for us that
  2255. * netif_device_present() is true and holds rtnl_lock for us
  2256. * so we have nothing to worry about
  2257. */
  2258. switch (cmd) {
  2259. case SIOCGMIIPHY: /* Get address of MII PHY in use. */
  2260. data->phy_id = gp->mii_phy_addr;
  2261. /* Fallthrough... */
  2262. case SIOCGMIIREG: /* Read MII PHY register. */
  2263. data->val_out = __sungem_phy_read(gp, data->phy_id & 0x1f,
  2264. data->reg_num & 0x1f);
  2265. rc = 0;
  2266. break;
  2267. case SIOCSMIIREG: /* Write MII PHY register. */
  2268. __sungem_phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f,
  2269. data->val_in);
  2270. rc = 0;
  2271. break;
  2272. }
  2273. return rc;
  2274. }
  2275. #if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC))
  2276. /* Fetch MAC address from vital product data of PCI ROM. */
  2277. static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr)
  2278. {
  2279. int this_offset;
  2280. for (this_offset = 0x20; this_offset < len; this_offset++) {
  2281. void __iomem *p = rom_base + this_offset;
  2282. int i;
  2283. if (readb(p + 0) != 0x90 ||
  2284. readb(p + 1) != 0x00 ||
  2285. readb(p + 2) != 0x09 ||
  2286. readb(p + 3) != 0x4e ||
  2287. readb(p + 4) != 0x41 ||
  2288. readb(p + 5) != 0x06)
  2289. continue;
  2290. this_offset += 6;
  2291. p += 6;
  2292. for (i = 0; i < 6; i++)
  2293. dev_addr[i] = readb(p + i);
  2294. return 1;
  2295. }
  2296. return 0;
  2297. }
  2298. static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
  2299. {
  2300. size_t size;
  2301. void __iomem *p = pci_map_rom(pdev, &size);
  2302. if (p) {
  2303. int found;
  2304. found = readb(p) == 0x55 &&
  2305. readb(p + 1) == 0xaa &&
  2306. find_eth_addr_in_vpd(p, (64 * 1024), dev_addr);
  2307. pci_unmap_rom(pdev, p);
  2308. if (found)
  2309. return;
  2310. }
  2311. /* Sun MAC prefix then 3 random bytes. */
  2312. dev_addr[0] = 0x08;
  2313. dev_addr[1] = 0x00;
  2314. dev_addr[2] = 0x20;
  2315. get_random_bytes(dev_addr + 3, 3);
  2316. }
  2317. #endif /* not Sparc and not PPC */
  2318. static int gem_get_device_address(struct gem *gp)
  2319. {
  2320. #if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC)
  2321. struct net_device *dev = gp->dev;
  2322. const unsigned char *addr;
  2323. addr = of_get_property(gp->of_node, "local-mac-address", NULL);
  2324. if (addr == NULL) {
  2325. #ifdef CONFIG_SPARC
  2326. addr = idprom->id_ethaddr;
  2327. #else
  2328. printk("\n");
  2329. pr_err("%s: can't get mac-address\n", dev->name);
  2330. return -1;
  2331. #endif
  2332. }
  2333. memcpy(dev->dev_addr, addr, ETH_ALEN);
  2334. #else
  2335. get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
  2336. #endif
  2337. return 0;
  2338. }
  2339. static void gem_remove_one(struct pci_dev *pdev)
  2340. {
  2341. struct net_device *dev = pci_get_drvdata(pdev);
  2342. if (dev) {
  2343. struct gem *gp = netdev_priv(dev);
  2344. unregister_netdev(dev);
  2345. /* Ensure reset task is truly gone */
  2346. cancel_work_sync(&gp->reset_task);
  2347. /* Free resources */
  2348. pci_free_consistent(pdev,
  2349. sizeof(struct gem_init_block),
  2350. gp->init_block,
  2351. gp->gblock_dvma);
  2352. iounmap(gp->regs);
  2353. pci_release_regions(pdev);
  2354. free_netdev(dev);
  2355. }
  2356. }
  2357. static const struct net_device_ops gem_netdev_ops = {
  2358. .ndo_open = gem_open,
  2359. .ndo_stop = gem_close,
  2360. .ndo_start_xmit = gem_start_xmit,
  2361. .ndo_get_stats = gem_get_stats,
  2362. .ndo_set_rx_mode = gem_set_multicast,
  2363. .ndo_do_ioctl = gem_ioctl,
  2364. .ndo_tx_timeout = gem_tx_timeout,
  2365. .ndo_change_mtu = gem_change_mtu,
  2366. .ndo_validate_addr = eth_validate_addr,
  2367. .ndo_set_mac_address = gem_set_mac_address,
  2368. #ifdef CONFIG_NET_POLL_CONTROLLER
  2369. .ndo_poll_controller = gem_poll_controller,
  2370. #endif
  2371. };
  2372. static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  2373. {
  2374. unsigned long gemreg_base, gemreg_len;
  2375. struct net_device *dev;
  2376. struct gem *gp;
  2377. int err, pci_using_dac;
  2378. printk_once(KERN_INFO "%s", version);
  2379. /* Apple gmac note: during probe, the chip is powered up by
  2380. * the arch code to allow the code below to work (and to let
  2381. * the chip be probed on the config space. It won't stay powered
  2382. * up until the interface is brought up however, so we can't rely
  2383. * on register configuration done at this point.
  2384. */
  2385. err = pci_enable_device(pdev);
  2386. if (err) {
  2387. pr_err("Cannot enable MMIO operation, aborting\n");
  2388. return err;
  2389. }
  2390. pci_set_master(pdev);
  2391. /* Configure DMA attributes. */
  2392. /* All of the GEM documentation states that 64-bit DMA addressing
  2393. * is fully supported and should work just fine. However the
  2394. * front end for RIO based GEMs is different and only supports
  2395. * 32-bit addressing.
  2396. *
  2397. * For now we assume the various PPC GEMs are 32-bit only as well.
  2398. */
  2399. if (pdev->vendor == PCI_VENDOR_ID_SUN &&
  2400. pdev->device == PCI_DEVICE_ID_SUN_GEM &&
  2401. !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
  2402. pci_using_dac = 1;
  2403. } else {
  2404. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  2405. if (err) {
  2406. pr_err("No usable DMA configuration, aborting\n");
  2407. goto err_disable_device;
  2408. }
  2409. pci_using_dac = 0;
  2410. }
  2411. gemreg_base = pci_resource_start(pdev, 0);
  2412. gemreg_len = pci_resource_len(pdev, 0);
  2413. if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
  2414. pr_err("Cannot find proper PCI device base address, aborting\n");
  2415. err = -ENODEV;
  2416. goto err_disable_device;
  2417. }
  2418. dev = alloc_etherdev(sizeof(*gp));
  2419. if (!dev) {
  2420. err = -ENOMEM;
  2421. goto err_disable_device;
  2422. }
  2423. SET_NETDEV_DEV(dev, &pdev->dev);
  2424. gp = netdev_priv(dev);
  2425. err = pci_request_regions(pdev, DRV_NAME);
  2426. if (err) {
  2427. pr_err("Cannot obtain PCI resources, aborting\n");
  2428. goto err_out_free_netdev;
  2429. }
  2430. gp->pdev = pdev;
  2431. gp->dev = dev;
  2432. gp->msg_enable = DEFAULT_MSG;
  2433. init_timer(&gp->link_timer);
  2434. gp->link_timer.function = gem_link_timer;
  2435. gp->link_timer.data = (unsigned long) gp;
  2436. INIT_WORK(&gp->reset_task, gem_reset_task);
  2437. gp->lstate = link_down;
  2438. gp->timer_ticks = 0;
  2439. netif_carrier_off(dev);
  2440. gp->regs = ioremap(gemreg_base, gemreg_len);
  2441. if (!gp->regs) {
  2442. pr_err("Cannot map device registers, aborting\n");
  2443. err = -EIO;
  2444. goto err_out_free_res;
  2445. }
  2446. /* On Apple, we want a reference to the Open Firmware device-tree
  2447. * node. We use it for clock control.
  2448. */
  2449. #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC)
  2450. gp->of_node = pci_device_to_OF_node(pdev);
  2451. #endif
  2452. /* Only Apple version supports WOL afaik */
  2453. if (pdev->vendor == PCI_VENDOR_ID_APPLE)
  2454. gp->has_wol = 1;
  2455. /* Make sure cell is enabled */
  2456. gem_get_cell(gp);
  2457. /* Make sure everything is stopped and in init state */
  2458. gem_reset(gp);
  2459. /* Fill up the mii_phy structure (even if we won't use it) */
  2460. gp->phy_mii.dev = dev;
  2461. gp->phy_mii.mdio_read = _sungem_phy_read;
  2462. gp->phy_mii.mdio_write = _sungem_phy_write;
  2463. #ifdef CONFIG_PPC_PMAC
  2464. gp->phy_mii.platform_data = gp->of_node;
  2465. #endif
  2466. /* By default, we start with autoneg */
  2467. gp->want_autoneg = 1;
  2468. /* Check fifo sizes, PHY type, etc... */
  2469. if (gem_check_invariants(gp)) {
  2470. err = -ENODEV;
  2471. goto err_out_iounmap;
  2472. }
  2473. /* It is guaranteed that the returned buffer will be at least
  2474. * PAGE_SIZE aligned.
  2475. */
  2476. gp->init_block = (struct gem_init_block *)
  2477. pci_alloc_consistent(pdev, sizeof(struct gem_init_block),
  2478. &gp->gblock_dvma);
  2479. if (!gp->init_block) {
  2480. pr_err("Cannot allocate init block, aborting\n");
  2481. err = -ENOMEM;
  2482. goto err_out_iounmap;
  2483. }
  2484. err = gem_get_device_address(gp);
  2485. if (err)
  2486. goto err_out_free_consistent;
  2487. dev->netdev_ops = &gem_netdev_ops;
  2488. netif_napi_add(dev, &gp->napi, gem_poll, 64);
  2489. dev->ethtool_ops = &gem_ethtool_ops;
  2490. dev->watchdog_timeo = 5 * HZ;
  2491. dev->dma = 0;
  2492. /* Set that now, in case PM kicks in now */
  2493. pci_set_drvdata(pdev, dev);
  2494. /* We can do scatter/gather and HW checksum */
  2495. dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
  2496. dev->features |= dev->hw_features | NETIF_F_RXCSUM;
  2497. if (pci_using_dac)
  2498. dev->features |= NETIF_F_HIGHDMA;
  2499. /* MTU range: 68 - 1500 (Jumbo mode is broken) */
  2500. dev->min_mtu = GEM_MIN_MTU;
  2501. dev->max_mtu = GEM_MAX_MTU;
  2502. /* Register with kernel */
  2503. if (register_netdev(dev)) {
  2504. pr_err("Cannot register net device, aborting\n");
  2505. err = -ENOMEM;
  2506. goto err_out_free_consistent;
  2507. }
  2508. /* Undo the get_cell with appropriate locking (we could use
  2509. * ndo_init/uninit but that would be even more clumsy imho)
  2510. */
  2511. rtnl_lock();
  2512. gem_put_cell(gp);
  2513. rtnl_unlock();
  2514. netdev_info(dev, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n",
  2515. dev->dev_addr);
  2516. return 0;
  2517. err_out_free_consistent:
  2518. gem_remove_one(pdev);
  2519. err_out_iounmap:
  2520. gem_put_cell(gp);
  2521. iounmap(gp->regs);
  2522. err_out_free_res:
  2523. pci_release_regions(pdev);
  2524. err_out_free_netdev:
  2525. free_netdev(dev);
  2526. err_disable_device:
  2527. pci_disable_device(pdev);
  2528. return err;
  2529. }
  2530. static struct pci_driver gem_driver = {
  2531. .name = GEM_MODULE_NAME,
  2532. .id_table = gem_pci_tbl,
  2533. .probe = gem_init_one,
  2534. .remove = gem_remove_one,
  2535. #ifdef CONFIG_PM
  2536. .suspend = gem_suspend,
  2537. .resume = gem_resume,
  2538. #endif /* CONFIG_PM */
  2539. };
  2540. module_pci_driver(gem_driver);