sungem.c 76 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $
  3. * sungem.c: Sun GEM ethernet driver.
  4. *
  5. * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com)
  6. *
  7. * Support for Apple GMAC and assorted PHYs, WOL, Power Management
  8. * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org)
  9. * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp.
  10. *
  11. * NAPI and NETPOLL support
  12. * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com)
  13. *
  14. */
  15. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16. #include <linux/module.h>
  17. #include <linux/kernel.h>
  18. #include <linux/types.h>
  19. #include <linux/fcntl.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/ioport.h>
  22. #include <linux/in.h>
  23. #include <linux/sched.h>
  24. #include <linux/string.h>
  25. #include <linux/delay.h>
  26. #include <linux/errno.h>
  27. #include <linux/pci.h>
  28. #include <linux/dma-mapping.h>
  29. #include <linux/netdevice.h>
  30. #include <linux/etherdevice.h>
  31. #include <linux/skbuff.h>
  32. #include <linux/mii.h>
  33. #include <linux/ethtool.h>
  34. #include <linux/crc32.h>
  35. #include <linux/random.h>
  36. #include <linux/workqueue.h>
  37. #include <linux/if_vlan.h>
  38. #include <linux/bitops.h>
  39. #include <linux/mm.h>
  40. #include <linux/gfp.h>
  41. #include <asm/io.h>
  42. #include <asm/byteorder.h>
  43. #include <linux/uaccess.h>
  44. #include <asm/irq.h>
  45. #ifdef CONFIG_SPARC
  46. #include <asm/idprom.h>
  47. #include <asm/prom.h>
  48. #endif
  49. #ifdef CONFIG_PPC_PMAC
  50. #include <asm/prom.h>
  51. #include <asm/machdep.h>
  52. #include <asm/pmac_feature.h>
  53. #endif
  54. #include <linux/sungem_phy.h>
  55. #include "sungem.h"
  56. /* Stripping FCS is causing problems, disabled for now */
  57. #undef STRIP_FCS
  58. #define DEFAULT_MSG (NETIF_MSG_DRV | \
  59. NETIF_MSG_PROBE | \
  60. NETIF_MSG_LINK)
  61. #define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
  62. SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
  63. SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \
  64. SUPPORTED_Pause | SUPPORTED_Autoneg)
  65. #define DRV_NAME "sungem"
  66. #define DRV_VERSION "1.0"
  67. #define DRV_AUTHOR "David S. Miller <davem@redhat.com>"
  68. static char version[] =
  69. DRV_NAME ".c:v" DRV_VERSION " " DRV_AUTHOR "\n";
  70. MODULE_AUTHOR(DRV_AUTHOR);
  71. MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver");
  72. MODULE_LICENSE("GPL");
  73. #define GEM_MODULE_NAME "gem"
  74. static const struct pci_device_id gem_pci_tbl[] = {
  75. { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
  76. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  77. /* These models only differ from the original GEM in
  78. * that their tx/rx fifos are of a different size and
  79. * they only support 10/100 speeds. -DaveM
  80. *
  81. * Apple's GMAC does support gigabit on machines with
  82. * the BCM54xx PHYs. -BenH
  83. */
  84. { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM,
  85. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  86. { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC,
  87. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  88. { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP,
  89. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  90. { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2,
  91. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  92. { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC,
  93. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  94. { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM,
  95. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  96. { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC,
  97. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  98. {0, }
  99. };
  100. MODULE_DEVICE_TABLE(pci, gem_pci_tbl);
  101. static u16 __sungem_phy_read(struct gem *gp, int phy_addr, int reg)
  102. {
  103. u32 cmd;
  104. int limit = 10000;
  105. cmd = (1 << 30);
  106. cmd |= (2 << 28);
  107. cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
  108. cmd |= (reg << 18) & MIF_FRAME_REGAD;
  109. cmd |= (MIF_FRAME_TAMSB);
  110. writel(cmd, gp->regs + MIF_FRAME);
  111. while (--limit) {
  112. cmd = readl(gp->regs + MIF_FRAME);
  113. if (cmd & MIF_FRAME_TALSB)
  114. break;
  115. udelay(10);
  116. }
  117. if (!limit)
  118. cmd = 0xffff;
  119. return cmd & MIF_FRAME_DATA;
  120. }
  121. static inline int _sungem_phy_read(struct net_device *dev, int mii_id, int reg)
  122. {
  123. struct gem *gp = netdev_priv(dev);
  124. return __sungem_phy_read(gp, mii_id, reg);
  125. }
  126. static inline u16 sungem_phy_read(struct gem *gp, int reg)
  127. {
  128. return __sungem_phy_read(gp, gp->mii_phy_addr, reg);
  129. }
  130. static void __sungem_phy_write(struct gem *gp, int phy_addr, int reg, u16 val)
  131. {
  132. u32 cmd;
  133. int limit = 10000;
  134. cmd = (1 << 30);
  135. cmd |= (1 << 28);
  136. cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
  137. cmd |= (reg << 18) & MIF_FRAME_REGAD;
  138. cmd |= (MIF_FRAME_TAMSB);
  139. cmd |= (val & MIF_FRAME_DATA);
  140. writel(cmd, gp->regs + MIF_FRAME);
  141. while (limit--) {
  142. cmd = readl(gp->regs + MIF_FRAME);
  143. if (cmd & MIF_FRAME_TALSB)
  144. break;
  145. udelay(10);
  146. }
  147. }
  148. static inline void _sungem_phy_write(struct net_device *dev, int mii_id, int reg, int val)
  149. {
  150. struct gem *gp = netdev_priv(dev);
  151. __sungem_phy_write(gp, mii_id, reg, val & 0xffff);
  152. }
  153. static inline void sungem_phy_write(struct gem *gp, int reg, u16 val)
  154. {
  155. __sungem_phy_write(gp, gp->mii_phy_addr, reg, val);
  156. }
  157. static inline void gem_enable_ints(struct gem *gp)
  158. {
  159. /* Enable all interrupts but TXDONE */
  160. writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
  161. }
  162. static inline void gem_disable_ints(struct gem *gp)
  163. {
  164. /* Disable all interrupts, including TXDONE */
  165. writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
  166. (void)readl(gp->regs + GREG_IMASK); /* write posting */
  167. }
  168. static void gem_get_cell(struct gem *gp)
  169. {
  170. BUG_ON(gp->cell_enabled < 0);
  171. gp->cell_enabled++;
  172. #ifdef CONFIG_PPC_PMAC
  173. if (gp->cell_enabled == 1) {
  174. mb();
  175. pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1);
  176. udelay(10);
  177. }
  178. #endif /* CONFIG_PPC_PMAC */
  179. }
  180. /* Turn off the chip's clock */
  181. static void gem_put_cell(struct gem *gp)
  182. {
  183. BUG_ON(gp->cell_enabled <= 0);
  184. gp->cell_enabled--;
  185. #ifdef CONFIG_PPC_PMAC
  186. if (gp->cell_enabled == 0) {
  187. mb();
  188. pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0);
  189. udelay(10);
  190. }
  191. #endif /* CONFIG_PPC_PMAC */
  192. }
  193. static inline void gem_netif_stop(struct gem *gp)
  194. {
  195. netif_trans_update(gp->dev); /* prevent tx timeout */
  196. napi_disable(&gp->napi);
  197. netif_tx_disable(gp->dev);
  198. }
  199. static inline void gem_netif_start(struct gem *gp)
  200. {
  201. /* NOTE: unconditional netif_wake_queue is only
  202. * appropriate so long as all callers are assured to
  203. * have free tx slots.
  204. */
  205. netif_wake_queue(gp->dev);
  206. napi_enable(&gp->napi);
  207. }
  208. static void gem_schedule_reset(struct gem *gp)
  209. {
  210. gp->reset_task_pending = 1;
  211. schedule_work(&gp->reset_task);
  212. }
  213. static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits)
  214. {
  215. if (netif_msg_intr(gp))
  216. printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name);
  217. }
  218. static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
  219. {
  220. u32 pcs_istat = readl(gp->regs + PCS_ISTAT);
  221. u32 pcs_miistat;
  222. if (netif_msg_intr(gp))
  223. printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n",
  224. gp->dev->name, pcs_istat);
  225. if (!(pcs_istat & PCS_ISTAT_LSC)) {
  226. netdev_err(dev, "PCS irq but no link status change???\n");
  227. return 0;
  228. }
  229. /* The link status bit latches on zero, so you must
  230. * read it twice in such a case to see a transition
  231. * to the link being up.
  232. */
  233. pcs_miistat = readl(gp->regs + PCS_MIISTAT);
  234. if (!(pcs_miistat & PCS_MIISTAT_LS))
  235. pcs_miistat |=
  236. (readl(gp->regs + PCS_MIISTAT) &
  237. PCS_MIISTAT_LS);
  238. if (pcs_miistat & PCS_MIISTAT_ANC) {
  239. /* The remote-fault indication is only valid
  240. * when autoneg has completed.
  241. */
  242. if (pcs_miistat & PCS_MIISTAT_RF)
  243. netdev_info(dev, "PCS AutoNEG complete, RemoteFault\n");
  244. else
  245. netdev_info(dev, "PCS AutoNEG complete\n");
  246. }
  247. if (pcs_miistat & PCS_MIISTAT_LS) {
  248. netdev_info(dev, "PCS link is now up\n");
  249. netif_carrier_on(gp->dev);
  250. } else {
  251. netdev_info(dev, "PCS link is now down\n");
  252. netif_carrier_off(gp->dev);
  253. /* If this happens and the link timer is not running,
  254. * reset so we re-negotiate.
  255. */
  256. if (!timer_pending(&gp->link_timer))
  257. return 1;
  258. }
  259. return 0;
  260. }
  261. static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
  262. {
  263. u32 txmac_stat = readl(gp->regs + MAC_TXSTAT);
  264. if (netif_msg_intr(gp))
  265. printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
  266. gp->dev->name, txmac_stat);
  267. /* Defer timer expiration is quite normal,
  268. * don't even log the event.
  269. */
  270. if ((txmac_stat & MAC_TXSTAT_DTE) &&
  271. !(txmac_stat & ~MAC_TXSTAT_DTE))
  272. return 0;
  273. if (txmac_stat & MAC_TXSTAT_URUN) {
  274. netdev_err(dev, "TX MAC xmit underrun\n");
  275. dev->stats.tx_fifo_errors++;
  276. }
  277. if (txmac_stat & MAC_TXSTAT_MPE) {
  278. netdev_err(dev, "TX MAC max packet size error\n");
  279. dev->stats.tx_errors++;
  280. }
  281. /* The rest are all cases of one of the 16-bit TX
  282. * counters expiring.
  283. */
  284. if (txmac_stat & MAC_TXSTAT_NCE)
  285. dev->stats.collisions += 0x10000;
  286. if (txmac_stat & MAC_TXSTAT_ECE) {
  287. dev->stats.tx_aborted_errors += 0x10000;
  288. dev->stats.collisions += 0x10000;
  289. }
  290. if (txmac_stat & MAC_TXSTAT_LCE) {
  291. dev->stats.tx_aborted_errors += 0x10000;
  292. dev->stats.collisions += 0x10000;
  293. }
  294. /* We do not keep track of MAC_TXSTAT_FCE and
  295. * MAC_TXSTAT_PCE events.
  296. */
  297. return 0;
  298. }
  299. /* When we get a RX fifo overflow, the RX unit in GEM is probably hung
  300. * so we do the following.
  301. *
  302. * If any part of the reset goes wrong, we return 1 and that causes the
  303. * whole chip to be reset.
  304. */
  305. static int gem_rxmac_reset(struct gem *gp)
  306. {
  307. struct net_device *dev = gp->dev;
  308. int limit, i;
  309. u64 desc_dma;
  310. u32 val;
  311. /* First, reset & disable MAC RX. */
  312. writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
  313. for (limit = 0; limit < 5000; limit++) {
  314. if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD))
  315. break;
  316. udelay(10);
  317. }
  318. if (limit == 5000) {
  319. netdev_err(dev, "RX MAC will not reset, resetting whole chip\n");
  320. return 1;
  321. }
  322. writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB,
  323. gp->regs + MAC_RXCFG);
  324. for (limit = 0; limit < 5000; limit++) {
  325. if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB))
  326. break;
  327. udelay(10);
  328. }
  329. if (limit == 5000) {
  330. netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
  331. return 1;
  332. }
  333. /* Second, disable RX DMA. */
  334. writel(0, gp->regs + RXDMA_CFG);
  335. for (limit = 0; limit < 5000; limit++) {
  336. if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE))
  337. break;
  338. udelay(10);
  339. }
  340. if (limit == 5000) {
  341. netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
  342. return 1;
  343. }
  344. mdelay(5);
  345. /* Execute RX reset command. */
  346. writel(gp->swrst_base | GREG_SWRST_RXRST,
  347. gp->regs + GREG_SWRST);
  348. for (limit = 0; limit < 5000; limit++) {
  349. if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST))
  350. break;
  351. udelay(10);
  352. }
  353. if (limit == 5000) {
  354. netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
  355. return 1;
  356. }
  357. /* Refresh the RX ring. */
  358. for (i = 0; i < RX_RING_SIZE; i++) {
  359. struct gem_rxd *rxd = &gp->init_block->rxd[i];
  360. if (gp->rx_skbs[i] == NULL) {
  361. netdev_err(dev, "Parts of RX ring empty, resetting whole chip\n");
  362. return 1;
  363. }
  364. rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
  365. }
  366. gp->rx_new = gp->rx_old = 0;
  367. /* Now we must reprogram the rest of RX unit. */
  368. desc_dma = (u64) gp->gblock_dvma;
  369. desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
  370. writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
  371. writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
  372. writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
  373. val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
  374. ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
  375. writel(val, gp->regs + RXDMA_CFG);
  376. if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
  377. writel(((5 & RXDMA_BLANK_IPKTS) |
  378. ((8 << 12) & RXDMA_BLANK_ITIME)),
  379. gp->regs + RXDMA_BLANK);
  380. else
  381. writel(((5 & RXDMA_BLANK_IPKTS) |
  382. ((4 << 12) & RXDMA_BLANK_ITIME)),
  383. gp->regs + RXDMA_BLANK);
  384. val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
  385. val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
  386. writel(val, gp->regs + RXDMA_PTHRESH);
  387. val = readl(gp->regs + RXDMA_CFG);
  388. writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
  389. writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
  390. val = readl(gp->regs + MAC_RXCFG);
  391. writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
  392. return 0;
  393. }
  394. static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
  395. {
  396. u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT);
  397. int ret = 0;
  398. if (netif_msg_intr(gp))
  399. printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n",
  400. gp->dev->name, rxmac_stat);
  401. if (rxmac_stat & MAC_RXSTAT_OFLW) {
  402. u32 smac = readl(gp->regs + MAC_SMACHINE);
  403. netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac);
  404. dev->stats.rx_over_errors++;
  405. dev->stats.rx_fifo_errors++;
  406. ret = gem_rxmac_reset(gp);
  407. }
  408. if (rxmac_stat & MAC_RXSTAT_ACE)
  409. dev->stats.rx_frame_errors += 0x10000;
  410. if (rxmac_stat & MAC_RXSTAT_CCE)
  411. dev->stats.rx_crc_errors += 0x10000;
  412. if (rxmac_stat & MAC_RXSTAT_LCE)
  413. dev->stats.rx_length_errors += 0x10000;
  414. /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
  415. * events.
  416. */
  417. return ret;
  418. }
  419. static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
  420. {
  421. u32 mac_cstat = readl(gp->regs + MAC_CSTAT);
  422. if (netif_msg_intr(gp))
  423. printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n",
  424. gp->dev->name, mac_cstat);
  425. /* This interrupt is just for pause frame and pause
  426. * tracking. It is useful for diagnostics and debug
  427. * but probably by default we will mask these events.
  428. */
  429. if (mac_cstat & MAC_CSTAT_PS)
  430. gp->pause_entered++;
  431. if (mac_cstat & MAC_CSTAT_PRCV)
  432. gp->pause_last_time_recvd = (mac_cstat >> 16);
  433. return 0;
  434. }
  435. static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
  436. {
  437. u32 mif_status = readl(gp->regs + MIF_STATUS);
  438. u32 reg_val, changed_bits;
  439. reg_val = (mif_status & MIF_STATUS_DATA) >> 16;
  440. changed_bits = (mif_status & MIF_STATUS_STAT);
  441. gem_handle_mif_event(gp, reg_val, changed_bits);
  442. return 0;
  443. }
  444. static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
  445. {
  446. u32 pci_estat = readl(gp->regs + GREG_PCIESTAT);
  447. if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
  448. gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
  449. netdev_err(dev, "PCI error [%04x]", pci_estat);
  450. if (pci_estat & GREG_PCIESTAT_BADACK)
  451. pr_cont(" <No ACK64# during ABS64 cycle>");
  452. if (pci_estat & GREG_PCIESTAT_DTRTO)
  453. pr_cont(" <Delayed transaction timeout>");
  454. if (pci_estat & GREG_PCIESTAT_OTHER)
  455. pr_cont(" <other>");
  456. pr_cont("\n");
  457. } else {
  458. pci_estat |= GREG_PCIESTAT_OTHER;
  459. netdev_err(dev, "PCI error\n");
  460. }
  461. if (pci_estat & GREG_PCIESTAT_OTHER) {
  462. u16 pci_cfg_stat;
  463. /* Interrogate PCI config space for the
  464. * true cause.
  465. */
  466. pci_read_config_word(gp->pdev, PCI_STATUS,
  467. &pci_cfg_stat);
  468. netdev_err(dev, "Read PCI cfg space status [%04x]\n",
  469. pci_cfg_stat);
  470. if (pci_cfg_stat & PCI_STATUS_PARITY)
  471. netdev_err(dev, "PCI parity error detected\n");
  472. if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT)
  473. netdev_err(dev, "PCI target abort\n");
  474. if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT)
  475. netdev_err(dev, "PCI master acks target abort\n");
  476. if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT)
  477. netdev_err(dev, "PCI master abort\n");
  478. if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR)
  479. netdev_err(dev, "PCI system error SERR#\n");
  480. if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY)
  481. netdev_err(dev, "PCI parity error\n");
  482. /* Write the error bits back to clear them. */
  483. pci_cfg_stat &= (PCI_STATUS_PARITY |
  484. PCI_STATUS_SIG_TARGET_ABORT |
  485. PCI_STATUS_REC_TARGET_ABORT |
  486. PCI_STATUS_REC_MASTER_ABORT |
  487. PCI_STATUS_SIG_SYSTEM_ERROR |
  488. PCI_STATUS_DETECTED_PARITY);
  489. pci_write_config_word(gp->pdev,
  490. PCI_STATUS, pci_cfg_stat);
  491. }
  492. /* For all PCI errors, we should reset the chip. */
  493. return 1;
  494. }
  495. /* All non-normal interrupt conditions get serviced here.
  496. * Returns non-zero if we should just exit the interrupt
  497. * handler right now (ie. if we reset the card which invalidates
  498. * all of the other original irq status bits).
  499. */
  500. static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status)
  501. {
  502. if (gem_status & GREG_STAT_RXNOBUF) {
  503. /* Frame arrived, no free RX buffers available. */
  504. if (netif_msg_rx_err(gp))
  505. printk(KERN_DEBUG "%s: no buffer for rx frame\n",
  506. gp->dev->name);
  507. dev->stats.rx_dropped++;
  508. }
  509. if (gem_status & GREG_STAT_RXTAGERR) {
  510. /* corrupt RX tag framing */
  511. if (netif_msg_rx_err(gp))
  512. printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
  513. gp->dev->name);
  514. dev->stats.rx_errors++;
  515. return 1;
  516. }
  517. if (gem_status & GREG_STAT_PCS) {
  518. if (gem_pcs_interrupt(dev, gp, gem_status))
  519. return 1;
  520. }
  521. if (gem_status & GREG_STAT_TXMAC) {
  522. if (gem_txmac_interrupt(dev, gp, gem_status))
  523. return 1;
  524. }
  525. if (gem_status & GREG_STAT_RXMAC) {
  526. if (gem_rxmac_interrupt(dev, gp, gem_status))
  527. return 1;
  528. }
  529. if (gem_status & GREG_STAT_MAC) {
  530. if (gem_mac_interrupt(dev, gp, gem_status))
  531. return 1;
  532. }
  533. if (gem_status & GREG_STAT_MIF) {
  534. if (gem_mif_interrupt(dev, gp, gem_status))
  535. return 1;
  536. }
  537. if (gem_status & GREG_STAT_PCIERR) {
  538. if (gem_pci_interrupt(dev, gp, gem_status))
  539. return 1;
  540. }
  541. return 0;
  542. }
  543. static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status)
  544. {
  545. int entry, limit;
  546. entry = gp->tx_old;
  547. limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT);
  548. while (entry != limit) {
  549. struct sk_buff *skb;
  550. struct gem_txd *txd;
  551. dma_addr_t dma_addr;
  552. u32 dma_len;
  553. int frag;
  554. if (netif_msg_tx_done(gp))
  555. printk(KERN_DEBUG "%s: tx done, slot %d\n",
  556. gp->dev->name, entry);
  557. skb = gp->tx_skbs[entry];
  558. if (skb_shinfo(skb)->nr_frags) {
  559. int last = entry + skb_shinfo(skb)->nr_frags;
  560. int walk = entry;
  561. int incomplete = 0;
  562. last &= (TX_RING_SIZE - 1);
  563. for (;;) {
  564. walk = NEXT_TX(walk);
  565. if (walk == limit)
  566. incomplete = 1;
  567. if (walk == last)
  568. break;
  569. }
  570. if (incomplete)
  571. break;
  572. }
  573. gp->tx_skbs[entry] = NULL;
  574. dev->stats.tx_bytes += skb->len;
  575. for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
  576. txd = &gp->init_block->txd[entry];
  577. dma_addr = le64_to_cpu(txd->buffer);
  578. dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ;
  579. pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
  580. entry = NEXT_TX(entry);
  581. }
  582. dev->stats.tx_packets++;
  583. dev_consume_skb_any(skb);
  584. }
  585. gp->tx_old = entry;
  586. /* Need to make the tx_old update visible to gem_start_xmit()
  587. * before checking for netif_queue_stopped(). Without the
  588. * memory barrier, there is a small possibility that gem_start_xmit()
  589. * will miss it and cause the queue to be stopped forever.
  590. */
  591. smp_mb();
  592. if (unlikely(netif_queue_stopped(dev) &&
  593. TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))) {
  594. struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
  595. __netif_tx_lock(txq, smp_processor_id());
  596. if (netif_queue_stopped(dev) &&
  597. TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
  598. netif_wake_queue(dev);
  599. __netif_tx_unlock(txq);
  600. }
  601. }
  602. static __inline__ void gem_post_rxds(struct gem *gp, int limit)
  603. {
  604. int cluster_start, curr, count, kick;
  605. cluster_start = curr = (gp->rx_new & ~(4 - 1));
  606. count = 0;
  607. kick = -1;
  608. dma_wmb();
  609. while (curr != limit) {
  610. curr = NEXT_RX(curr);
  611. if (++count == 4) {
  612. struct gem_rxd *rxd =
  613. &gp->init_block->rxd[cluster_start];
  614. for (;;) {
  615. rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
  616. rxd++;
  617. cluster_start = NEXT_RX(cluster_start);
  618. if (cluster_start == curr)
  619. break;
  620. }
  621. kick = curr;
  622. count = 0;
  623. }
  624. }
  625. if (kick >= 0) {
  626. mb();
  627. writel(kick, gp->regs + RXDMA_KICK);
  628. }
  629. }
  630. #define ALIGNED_RX_SKB_ADDR(addr) \
  631. ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
  632. static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size,
  633. gfp_t gfp_flags)
  634. {
  635. struct sk_buff *skb = alloc_skb(size + 64, gfp_flags);
  636. if (likely(skb)) {
  637. unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data);
  638. skb_reserve(skb, offset);
  639. }
  640. return skb;
  641. }
  642. static int gem_rx(struct gem *gp, int work_to_do)
  643. {
  644. struct net_device *dev = gp->dev;
  645. int entry, drops, work_done = 0;
  646. u32 done;
  647. __sum16 csum;
  648. if (netif_msg_rx_status(gp))
  649. printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
  650. gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new);
  651. entry = gp->rx_new;
  652. drops = 0;
  653. done = readl(gp->regs + RXDMA_DONE);
  654. for (;;) {
  655. struct gem_rxd *rxd = &gp->init_block->rxd[entry];
  656. struct sk_buff *skb;
  657. u64 status = le64_to_cpu(rxd->status_word);
  658. dma_addr_t dma_addr;
  659. int len;
  660. if ((status & RXDCTRL_OWN) != 0)
  661. break;
  662. if (work_done >= RX_RING_SIZE || work_done >= work_to_do)
  663. break;
  664. /* When writing back RX descriptor, GEM writes status
  665. * then buffer address, possibly in separate transactions.
  666. * If we don't wait for the chip to write both, we could
  667. * post a new buffer to this descriptor then have GEM spam
  668. * on the buffer address. We sync on the RX completion
  669. * register to prevent this from happening.
  670. */
  671. if (entry == done) {
  672. done = readl(gp->regs + RXDMA_DONE);
  673. if (entry == done)
  674. break;
  675. }
  676. /* We can now account for the work we're about to do */
  677. work_done++;
  678. skb = gp->rx_skbs[entry];
  679. len = (status & RXDCTRL_BUFSZ) >> 16;
  680. if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
  681. dev->stats.rx_errors++;
  682. if (len < ETH_ZLEN)
  683. dev->stats.rx_length_errors++;
  684. if (len & RXDCTRL_BAD)
  685. dev->stats.rx_crc_errors++;
  686. /* We'll just return it to GEM. */
  687. drop_it:
  688. dev->stats.rx_dropped++;
  689. goto next;
  690. }
  691. dma_addr = le64_to_cpu(rxd->buffer);
  692. if (len > RX_COPY_THRESHOLD) {
  693. struct sk_buff *new_skb;
  694. new_skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
  695. if (new_skb == NULL) {
  696. drops++;
  697. goto drop_it;
  698. }
  699. pci_unmap_page(gp->pdev, dma_addr,
  700. RX_BUF_ALLOC_SIZE(gp),
  701. PCI_DMA_FROMDEVICE);
  702. gp->rx_skbs[entry] = new_skb;
  703. skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET));
  704. rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev,
  705. virt_to_page(new_skb->data),
  706. offset_in_page(new_skb->data),
  707. RX_BUF_ALLOC_SIZE(gp),
  708. PCI_DMA_FROMDEVICE));
  709. skb_reserve(new_skb, RX_OFFSET);
  710. /* Trim the original skb for the netif. */
  711. skb_trim(skb, len);
  712. } else {
  713. struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2);
  714. if (copy_skb == NULL) {
  715. drops++;
  716. goto drop_it;
  717. }
  718. skb_reserve(copy_skb, 2);
  719. skb_put(copy_skb, len);
  720. pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
  721. skb_copy_from_linear_data(skb, copy_skb->data, len);
  722. pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
  723. /* We'll reuse the original ring buffer. */
  724. skb = copy_skb;
  725. }
  726. csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
  727. skb->csum = csum_unfold(csum);
  728. skb->ip_summed = CHECKSUM_COMPLETE;
  729. skb->protocol = eth_type_trans(skb, gp->dev);
  730. napi_gro_receive(&gp->napi, skb);
  731. dev->stats.rx_packets++;
  732. dev->stats.rx_bytes += len;
  733. next:
  734. entry = NEXT_RX(entry);
  735. }
  736. gem_post_rxds(gp, entry);
  737. gp->rx_new = entry;
  738. if (drops)
  739. netdev_info(gp->dev, "Memory squeeze, deferring packet\n");
  740. return work_done;
  741. }
  742. static int gem_poll(struct napi_struct *napi, int budget)
  743. {
  744. struct gem *gp = container_of(napi, struct gem, napi);
  745. struct net_device *dev = gp->dev;
  746. int work_done;
  747. work_done = 0;
  748. do {
  749. /* Handle anomalies */
  750. if (unlikely(gp->status & GREG_STAT_ABNORMAL)) {
  751. struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
  752. int reset;
  753. /* We run the abnormal interrupt handling code with
  754. * the Tx lock. It only resets the Rx portion of the
  755. * chip, but we need to guard it against DMA being
  756. * restarted by the link poll timer
  757. */
  758. __netif_tx_lock(txq, smp_processor_id());
  759. reset = gem_abnormal_irq(dev, gp, gp->status);
  760. __netif_tx_unlock(txq);
  761. if (reset) {
  762. gem_schedule_reset(gp);
  763. napi_complete(napi);
  764. return work_done;
  765. }
  766. }
  767. /* Run TX completion thread */
  768. gem_tx(dev, gp, gp->status);
  769. /* Run RX thread. We don't use any locking here,
  770. * code willing to do bad things - like cleaning the
  771. * rx ring - must call napi_disable(), which
  772. * schedule_timeout()'s if polling is already disabled.
  773. */
  774. work_done += gem_rx(gp, budget - work_done);
  775. if (work_done >= budget)
  776. return work_done;
  777. gp->status = readl(gp->regs + GREG_STAT);
  778. } while (gp->status & GREG_STAT_NAPI);
  779. napi_complete_done(napi, work_done);
  780. gem_enable_ints(gp);
  781. return work_done;
  782. }
  783. static irqreturn_t gem_interrupt(int irq, void *dev_id)
  784. {
  785. struct net_device *dev = dev_id;
  786. struct gem *gp = netdev_priv(dev);
  787. if (napi_schedule_prep(&gp->napi)) {
  788. u32 gem_status = readl(gp->regs + GREG_STAT);
  789. if (unlikely(gem_status == 0)) {
  790. napi_enable(&gp->napi);
  791. return IRQ_NONE;
  792. }
  793. if (netif_msg_intr(gp))
  794. printk(KERN_DEBUG "%s: gem_interrupt() gem_status: 0x%x\n",
  795. gp->dev->name, gem_status);
  796. gp->status = gem_status;
  797. gem_disable_ints(gp);
  798. __napi_schedule(&gp->napi);
  799. }
  800. /* If polling was disabled at the time we received that
  801. * interrupt, we may return IRQ_HANDLED here while we
  802. * should return IRQ_NONE. No big deal...
  803. */
  804. return IRQ_HANDLED;
  805. }
  806. #ifdef CONFIG_NET_POLL_CONTROLLER
  807. static void gem_poll_controller(struct net_device *dev)
  808. {
  809. struct gem *gp = netdev_priv(dev);
  810. disable_irq(gp->pdev->irq);
  811. gem_interrupt(gp->pdev->irq, dev);
  812. enable_irq(gp->pdev->irq);
  813. }
  814. #endif
  815. static void gem_tx_timeout(struct net_device *dev)
  816. {
  817. struct gem *gp = netdev_priv(dev);
  818. netdev_err(dev, "transmit timed out, resetting\n");
  819. netdev_err(dev, "TX_STATE[%08x:%08x:%08x]\n",
  820. readl(gp->regs + TXDMA_CFG),
  821. readl(gp->regs + MAC_TXSTAT),
  822. readl(gp->regs + MAC_TXCFG));
  823. netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
  824. readl(gp->regs + RXDMA_CFG),
  825. readl(gp->regs + MAC_RXSTAT),
  826. readl(gp->regs + MAC_RXCFG));
  827. gem_schedule_reset(gp);
  828. }
  829. static __inline__ int gem_intme(int entry)
  830. {
  831. /* Algorithm: IRQ every 1/2 of descriptors. */
  832. if (!(entry & ((TX_RING_SIZE>>1)-1)))
  833. return 1;
  834. return 0;
  835. }
  836. static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
  837. struct net_device *dev)
  838. {
  839. struct gem *gp = netdev_priv(dev);
  840. int entry;
  841. u64 ctrl;
  842. ctrl = 0;
  843. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  844. const u64 csum_start_off = skb_checksum_start_offset(skb);
  845. const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
  846. ctrl = (TXDCTRL_CENAB |
  847. (csum_start_off << 15) |
  848. (csum_stuff_off << 21));
  849. }
  850. if (unlikely(TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1))) {
  851. /* This is a hard error, log it. */
  852. if (!netif_queue_stopped(dev)) {
  853. netif_stop_queue(dev);
  854. netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
  855. }
  856. return NETDEV_TX_BUSY;
  857. }
  858. entry = gp->tx_new;
  859. gp->tx_skbs[entry] = skb;
  860. if (skb_shinfo(skb)->nr_frags == 0) {
  861. struct gem_txd *txd = &gp->init_block->txd[entry];
  862. dma_addr_t mapping;
  863. u32 len;
  864. len = skb->len;
  865. mapping = pci_map_page(gp->pdev,
  866. virt_to_page(skb->data),
  867. offset_in_page(skb->data),
  868. len, PCI_DMA_TODEVICE);
  869. ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len;
  870. if (gem_intme(entry))
  871. ctrl |= TXDCTRL_INTME;
  872. txd->buffer = cpu_to_le64(mapping);
  873. dma_wmb();
  874. txd->control_word = cpu_to_le64(ctrl);
  875. entry = NEXT_TX(entry);
  876. } else {
  877. struct gem_txd *txd;
  878. u32 first_len;
  879. u64 intme;
  880. dma_addr_t first_mapping;
  881. int frag, first_entry = entry;
  882. intme = 0;
  883. if (gem_intme(entry))
  884. intme |= TXDCTRL_INTME;
  885. /* We must give this initial chunk to the device last.
  886. * Otherwise we could race with the device.
  887. */
  888. first_len = skb_headlen(skb);
  889. first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data),
  890. offset_in_page(skb->data),
  891. first_len, PCI_DMA_TODEVICE);
  892. entry = NEXT_TX(entry);
  893. for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
  894. const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
  895. u32 len;
  896. dma_addr_t mapping;
  897. u64 this_ctrl;
  898. len = skb_frag_size(this_frag);
  899. mapping = skb_frag_dma_map(&gp->pdev->dev, this_frag,
  900. 0, len, DMA_TO_DEVICE);
  901. this_ctrl = ctrl;
  902. if (frag == skb_shinfo(skb)->nr_frags - 1)
  903. this_ctrl |= TXDCTRL_EOF;
  904. txd = &gp->init_block->txd[entry];
  905. txd->buffer = cpu_to_le64(mapping);
  906. dma_wmb();
  907. txd->control_word = cpu_to_le64(this_ctrl | len);
  908. if (gem_intme(entry))
  909. intme |= TXDCTRL_INTME;
  910. entry = NEXT_TX(entry);
  911. }
  912. txd = &gp->init_block->txd[first_entry];
  913. txd->buffer = cpu_to_le64(first_mapping);
  914. dma_wmb();
  915. txd->control_word =
  916. cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len);
  917. }
  918. gp->tx_new = entry;
  919. if (unlikely(TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))) {
  920. netif_stop_queue(dev);
  921. /* netif_stop_queue() must be done before checking
  922. * checking tx index in TX_BUFFS_AVAIL() below, because
  923. * in gem_tx(), we update tx_old before checking for
  924. * netif_queue_stopped().
  925. */
  926. smp_mb();
  927. if (TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
  928. netif_wake_queue(dev);
  929. }
  930. if (netif_msg_tx_queued(gp))
  931. printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
  932. dev->name, entry, skb->len);
  933. mb();
  934. writel(gp->tx_new, gp->regs + TXDMA_KICK);
  935. return NETDEV_TX_OK;
  936. }
  937. static void gem_pcs_reset(struct gem *gp)
  938. {
  939. int limit;
  940. u32 val;
  941. /* Reset PCS unit. */
  942. val = readl(gp->regs + PCS_MIICTRL);
  943. val |= PCS_MIICTRL_RST;
  944. writel(val, gp->regs + PCS_MIICTRL);
  945. limit = 32;
  946. while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
  947. udelay(100);
  948. if (limit-- <= 0)
  949. break;
  950. }
  951. if (limit < 0)
  952. netdev_warn(gp->dev, "PCS reset bit would not clear\n");
  953. }
  954. static void gem_pcs_reinit_adv(struct gem *gp)
  955. {
  956. u32 val;
  957. /* Make sure PCS is disabled while changing advertisement
  958. * configuration.
  959. */
  960. val = readl(gp->regs + PCS_CFG);
  961. val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
  962. writel(val, gp->regs + PCS_CFG);
  963. /* Advertise all capabilities except asymmetric
  964. * pause.
  965. */
  966. val = readl(gp->regs + PCS_MIIADV);
  967. val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
  968. PCS_MIIADV_SP | PCS_MIIADV_AP);
  969. writel(val, gp->regs + PCS_MIIADV);
  970. /* Enable and restart auto-negotiation, disable wrapback/loopback,
  971. * and re-enable PCS.
  972. */
  973. val = readl(gp->regs + PCS_MIICTRL);
  974. val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
  975. val &= ~PCS_MIICTRL_WB;
  976. writel(val, gp->regs + PCS_MIICTRL);
  977. val = readl(gp->regs + PCS_CFG);
  978. val |= PCS_CFG_ENABLE;
  979. writel(val, gp->regs + PCS_CFG);
  980. /* Make sure serialink loopback is off. The meaning
  981. * of this bit is logically inverted based upon whether
  982. * you are in Serialink or SERDES mode.
  983. */
  984. val = readl(gp->regs + PCS_SCTRL);
  985. if (gp->phy_type == phy_serialink)
  986. val &= ~PCS_SCTRL_LOOP;
  987. else
  988. val |= PCS_SCTRL_LOOP;
  989. writel(val, gp->regs + PCS_SCTRL);
  990. }
  991. #define STOP_TRIES 32
  992. static void gem_reset(struct gem *gp)
  993. {
  994. int limit;
  995. u32 val;
  996. /* Make sure we won't get any more interrupts */
  997. writel(0xffffffff, gp->regs + GREG_IMASK);
  998. /* Reset the chip */
  999. writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST,
  1000. gp->regs + GREG_SWRST);
  1001. limit = STOP_TRIES;
  1002. do {
  1003. udelay(20);
  1004. val = readl(gp->regs + GREG_SWRST);
  1005. if (limit-- <= 0)
  1006. break;
  1007. } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
  1008. if (limit < 0)
  1009. netdev_err(gp->dev, "SW reset is ghetto\n");
  1010. if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
  1011. gem_pcs_reinit_adv(gp);
  1012. }
  1013. static void gem_start_dma(struct gem *gp)
  1014. {
  1015. u32 val;
  1016. /* We are ready to rock, turn everything on. */
  1017. val = readl(gp->regs + TXDMA_CFG);
  1018. writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
  1019. val = readl(gp->regs + RXDMA_CFG);
  1020. writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
  1021. val = readl(gp->regs + MAC_TXCFG);
  1022. writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
  1023. val = readl(gp->regs + MAC_RXCFG);
  1024. writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
  1025. (void) readl(gp->regs + MAC_RXCFG);
  1026. udelay(100);
  1027. gem_enable_ints(gp);
  1028. writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
  1029. }
  1030. /* DMA won't be actually stopped before about 4ms tho ...
  1031. */
  1032. static void gem_stop_dma(struct gem *gp)
  1033. {
  1034. u32 val;
  1035. /* We are done rocking, turn everything off. */
  1036. val = readl(gp->regs + TXDMA_CFG);
  1037. writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
  1038. val = readl(gp->regs + RXDMA_CFG);
  1039. writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
  1040. val = readl(gp->regs + MAC_TXCFG);
  1041. writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
  1042. val = readl(gp->regs + MAC_RXCFG);
  1043. writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
  1044. (void) readl(gp->regs + MAC_RXCFG);
  1045. /* Need to wait a bit ... done by the caller */
  1046. }
  1047. // XXX dbl check what that function should do when called on PCS PHY
  1048. static void gem_begin_auto_negotiation(struct gem *gp,
  1049. const struct ethtool_link_ksettings *ep)
  1050. {
  1051. u32 advertise, features;
  1052. int autoneg;
  1053. int speed;
  1054. int duplex;
  1055. u32 advertising;
  1056. if (ep)
  1057. ethtool_convert_link_mode_to_legacy_u32(
  1058. &advertising, ep->link_modes.advertising);
  1059. if (gp->phy_type != phy_mii_mdio0 &&
  1060. gp->phy_type != phy_mii_mdio1)
  1061. goto non_mii;
  1062. /* Setup advertise */
  1063. if (found_mii_phy(gp))
  1064. features = gp->phy_mii.def->features;
  1065. else
  1066. features = 0;
  1067. advertise = features & ADVERTISE_MASK;
  1068. if (gp->phy_mii.advertising != 0)
  1069. advertise &= gp->phy_mii.advertising;
  1070. autoneg = gp->want_autoneg;
  1071. speed = gp->phy_mii.speed;
  1072. duplex = gp->phy_mii.duplex;
  1073. /* Setup link parameters */
  1074. if (!ep)
  1075. goto start_aneg;
  1076. if (ep->base.autoneg == AUTONEG_ENABLE) {
  1077. advertise = advertising;
  1078. autoneg = 1;
  1079. } else {
  1080. autoneg = 0;
  1081. speed = ep->base.speed;
  1082. duplex = ep->base.duplex;
  1083. }
  1084. start_aneg:
  1085. /* Sanitize settings based on PHY capabilities */
  1086. if ((features & SUPPORTED_Autoneg) == 0)
  1087. autoneg = 0;
  1088. if (speed == SPEED_1000 &&
  1089. !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)))
  1090. speed = SPEED_100;
  1091. if (speed == SPEED_100 &&
  1092. !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full)))
  1093. speed = SPEED_10;
  1094. if (duplex == DUPLEX_FULL &&
  1095. !(features & (SUPPORTED_1000baseT_Full |
  1096. SUPPORTED_100baseT_Full |
  1097. SUPPORTED_10baseT_Full)))
  1098. duplex = DUPLEX_HALF;
  1099. if (speed == 0)
  1100. speed = SPEED_10;
  1101. /* If we are asleep, we don't try to actually setup the PHY, we
  1102. * just store the settings
  1103. */
  1104. if (!netif_device_present(gp->dev)) {
  1105. gp->phy_mii.autoneg = gp->want_autoneg = autoneg;
  1106. gp->phy_mii.speed = speed;
  1107. gp->phy_mii.duplex = duplex;
  1108. return;
  1109. }
  1110. /* Configure PHY & start aneg */
  1111. gp->want_autoneg = autoneg;
  1112. if (autoneg) {
  1113. if (found_mii_phy(gp))
  1114. gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise);
  1115. gp->lstate = link_aneg;
  1116. } else {
  1117. if (found_mii_phy(gp))
  1118. gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex);
  1119. gp->lstate = link_force_ok;
  1120. }
  1121. non_mii:
  1122. gp->timer_ticks = 0;
  1123. mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
  1124. }
  1125. /* A link-up condition has occurred, initialize and enable the
  1126. * rest of the chip.
  1127. */
  1128. static int gem_set_link_modes(struct gem *gp)
  1129. {
  1130. struct netdev_queue *txq = netdev_get_tx_queue(gp->dev, 0);
  1131. int full_duplex, speed, pause;
  1132. u32 val;
  1133. full_duplex = 0;
  1134. speed = SPEED_10;
  1135. pause = 0;
  1136. if (found_mii_phy(gp)) {
  1137. if (gp->phy_mii.def->ops->read_link(&gp->phy_mii))
  1138. return 1;
  1139. full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL);
  1140. speed = gp->phy_mii.speed;
  1141. pause = gp->phy_mii.pause;
  1142. } else if (gp->phy_type == phy_serialink ||
  1143. gp->phy_type == phy_serdes) {
  1144. u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
  1145. if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes)
  1146. full_duplex = 1;
  1147. speed = SPEED_1000;
  1148. }
  1149. netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n",
  1150. speed, (full_duplex ? "full" : "half"));
  1151. /* We take the tx queue lock to avoid collisions between
  1152. * this code, the tx path and the NAPI-driven error path
  1153. */
  1154. __netif_tx_lock(txq, smp_processor_id());
  1155. val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU);
  1156. if (full_duplex) {
  1157. val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL);
  1158. } else {
  1159. /* MAC_TXCFG_NBO must be zero. */
  1160. }
  1161. writel(val, gp->regs + MAC_TXCFG);
  1162. val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED);
  1163. if (!full_duplex &&
  1164. (gp->phy_type == phy_mii_mdio0 ||
  1165. gp->phy_type == phy_mii_mdio1)) {
  1166. val |= MAC_XIFCFG_DISE;
  1167. } else if (full_duplex) {
  1168. val |= MAC_XIFCFG_FLED;
  1169. }
  1170. if (speed == SPEED_1000)
  1171. val |= (MAC_XIFCFG_GMII);
  1172. writel(val, gp->regs + MAC_XIFCFG);
  1173. /* If gigabit and half-duplex, enable carrier extension
  1174. * mode. Else, disable it.
  1175. */
  1176. if (speed == SPEED_1000 && !full_duplex) {
  1177. val = readl(gp->regs + MAC_TXCFG);
  1178. writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
  1179. val = readl(gp->regs + MAC_RXCFG);
  1180. writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
  1181. } else {
  1182. val = readl(gp->regs + MAC_TXCFG);
  1183. writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
  1184. val = readl(gp->regs + MAC_RXCFG);
  1185. writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
  1186. }
  1187. if (gp->phy_type == phy_serialink ||
  1188. gp->phy_type == phy_serdes) {
  1189. u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
  1190. if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP))
  1191. pause = 1;
  1192. }
  1193. if (!full_duplex)
  1194. writel(512, gp->regs + MAC_STIME);
  1195. else
  1196. writel(64, gp->regs + MAC_STIME);
  1197. val = readl(gp->regs + MAC_MCCFG);
  1198. if (pause)
  1199. val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE);
  1200. else
  1201. val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE);
  1202. writel(val, gp->regs + MAC_MCCFG);
  1203. gem_start_dma(gp);
  1204. __netif_tx_unlock(txq);
  1205. if (netif_msg_link(gp)) {
  1206. if (pause) {
  1207. netdev_info(gp->dev,
  1208. "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
  1209. gp->rx_fifo_sz,
  1210. gp->rx_pause_off,
  1211. gp->rx_pause_on);
  1212. } else {
  1213. netdev_info(gp->dev, "Pause is disabled\n");
  1214. }
  1215. }
  1216. return 0;
  1217. }
  1218. static int gem_mdio_link_not_up(struct gem *gp)
  1219. {
  1220. switch (gp->lstate) {
  1221. case link_force_ret:
  1222. netif_info(gp, link, gp->dev,
  1223. "Autoneg failed again, keeping forced mode\n");
  1224. gp->phy_mii.def->ops->setup_forced(&gp->phy_mii,
  1225. gp->last_forced_speed, DUPLEX_HALF);
  1226. gp->timer_ticks = 5;
  1227. gp->lstate = link_force_ok;
  1228. return 0;
  1229. case link_aneg:
  1230. /* We try forced modes after a failed aneg only on PHYs that don't
  1231. * have "magic_aneg" bit set, which means they internally do the
  1232. * while forced-mode thingy. On these, we just restart aneg
  1233. */
  1234. if (gp->phy_mii.def->magic_aneg)
  1235. return 1;
  1236. netif_info(gp, link, gp->dev, "switching to forced 100bt\n");
  1237. /* Try forced modes. */
  1238. gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100,
  1239. DUPLEX_HALF);
  1240. gp->timer_ticks = 5;
  1241. gp->lstate = link_force_try;
  1242. return 0;
  1243. case link_force_try:
  1244. /* Downgrade from 100 to 10 Mbps if necessary.
  1245. * If already at 10Mbps, warn user about the
  1246. * situation every 10 ticks.
  1247. */
  1248. if (gp->phy_mii.speed == SPEED_100) {
  1249. gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10,
  1250. DUPLEX_HALF);
  1251. gp->timer_ticks = 5;
  1252. netif_info(gp, link, gp->dev,
  1253. "switching to forced 10bt\n");
  1254. return 0;
  1255. } else
  1256. return 1;
  1257. default:
  1258. return 0;
  1259. }
  1260. }
  1261. static void gem_link_timer(struct timer_list *t)
  1262. {
  1263. struct gem *gp = from_timer(gp, t, link_timer);
  1264. struct net_device *dev = gp->dev;
  1265. int restart_aneg = 0;
  1266. /* There's no point doing anything if we're going to be reset */
  1267. if (gp->reset_task_pending)
  1268. return;
  1269. if (gp->phy_type == phy_serialink ||
  1270. gp->phy_type == phy_serdes) {
  1271. u32 val = readl(gp->regs + PCS_MIISTAT);
  1272. if (!(val & PCS_MIISTAT_LS))
  1273. val = readl(gp->regs + PCS_MIISTAT);
  1274. if ((val & PCS_MIISTAT_LS) != 0) {
  1275. if (gp->lstate == link_up)
  1276. goto restart;
  1277. gp->lstate = link_up;
  1278. netif_carrier_on(dev);
  1279. (void)gem_set_link_modes(gp);
  1280. }
  1281. goto restart;
  1282. }
  1283. if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) {
  1284. /* Ok, here we got a link. If we had it due to a forced
  1285. * fallback, and we were configured for autoneg, we do
  1286. * retry a short autoneg pass. If you know your hub is
  1287. * broken, use ethtool ;)
  1288. */
  1289. if (gp->lstate == link_force_try && gp->want_autoneg) {
  1290. gp->lstate = link_force_ret;
  1291. gp->last_forced_speed = gp->phy_mii.speed;
  1292. gp->timer_ticks = 5;
  1293. if (netif_msg_link(gp))
  1294. netdev_info(dev,
  1295. "Got link after fallback, retrying autoneg once...\n");
  1296. gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising);
  1297. } else if (gp->lstate != link_up) {
  1298. gp->lstate = link_up;
  1299. netif_carrier_on(dev);
  1300. if (gem_set_link_modes(gp))
  1301. restart_aneg = 1;
  1302. }
  1303. } else {
  1304. /* If the link was previously up, we restart the
  1305. * whole process
  1306. */
  1307. if (gp->lstate == link_up) {
  1308. gp->lstate = link_down;
  1309. netif_info(gp, link, dev, "Link down\n");
  1310. netif_carrier_off(dev);
  1311. gem_schedule_reset(gp);
  1312. /* The reset task will restart the timer */
  1313. return;
  1314. } else if (++gp->timer_ticks > 10) {
  1315. if (found_mii_phy(gp))
  1316. restart_aneg = gem_mdio_link_not_up(gp);
  1317. else
  1318. restart_aneg = 1;
  1319. }
  1320. }
  1321. if (restart_aneg) {
  1322. gem_begin_auto_negotiation(gp, NULL);
  1323. return;
  1324. }
  1325. restart:
  1326. mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
  1327. }
  1328. static void gem_clean_rings(struct gem *gp)
  1329. {
  1330. struct gem_init_block *gb = gp->init_block;
  1331. struct sk_buff *skb;
  1332. int i;
  1333. dma_addr_t dma_addr;
  1334. for (i = 0; i < RX_RING_SIZE; i++) {
  1335. struct gem_rxd *rxd;
  1336. rxd = &gb->rxd[i];
  1337. if (gp->rx_skbs[i] != NULL) {
  1338. skb = gp->rx_skbs[i];
  1339. dma_addr = le64_to_cpu(rxd->buffer);
  1340. pci_unmap_page(gp->pdev, dma_addr,
  1341. RX_BUF_ALLOC_SIZE(gp),
  1342. PCI_DMA_FROMDEVICE);
  1343. dev_kfree_skb_any(skb);
  1344. gp->rx_skbs[i] = NULL;
  1345. }
  1346. rxd->status_word = 0;
  1347. dma_wmb();
  1348. rxd->buffer = 0;
  1349. }
  1350. for (i = 0; i < TX_RING_SIZE; i++) {
  1351. if (gp->tx_skbs[i] != NULL) {
  1352. struct gem_txd *txd;
  1353. int frag;
  1354. skb = gp->tx_skbs[i];
  1355. gp->tx_skbs[i] = NULL;
  1356. for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
  1357. int ent = i & (TX_RING_SIZE - 1);
  1358. txd = &gb->txd[ent];
  1359. dma_addr = le64_to_cpu(txd->buffer);
  1360. pci_unmap_page(gp->pdev, dma_addr,
  1361. le64_to_cpu(txd->control_word) &
  1362. TXDCTRL_BUFSZ, PCI_DMA_TODEVICE);
  1363. if (frag != skb_shinfo(skb)->nr_frags)
  1364. i++;
  1365. }
  1366. dev_kfree_skb_any(skb);
  1367. }
  1368. }
  1369. }
  1370. static void gem_init_rings(struct gem *gp)
  1371. {
  1372. struct gem_init_block *gb = gp->init_block;
  1373. struct net_device *dev = gp->dev;
  1374. int i;
  1375. dma_addr_t dma_addr;
  1376. gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0;
  1377. gem_clean_rings(gp);
  1378. gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN,
  1379. (unsigned)VLAN_ETH_FRAME_LEN);
  1380. for (i = 0; i < RX_RING_SIZE; i++) {
  1381. struct sk_buff *skb;
  1382. struct gem_rxd *rxd = &gb->rxd[i];
  1383. skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_KERNEL);
  1384. if (!skb) {
  1385. rxd->buffer = 0;
  1386. rxd->status_word = 0;
  1387. continue;
  1388. }
  1389. gp->rx_skbs[i] = skb;
  1390. skb_put(skb, (gp->rx_buf_sz + RX_OFFSET));
  1391. dma_addr = pci_map_page(gp->pdev,
  1392. virt_to_page(skb->data),
  1393. offset_in_page(skb->data),
  1394. RX_BUF_ALLOC_SIZE(gp),
  1395. PCI_DMA_FROMDEVICE);
  1396. rxd->buffer = cpu_to_le64(dma_addr);
  1397. dma_wmb();
  1398. rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
  1399. skb_reserve(skb, RX_OFFSET);
  1400. }
  1401. for (i = 0; i < TX_RING_SIZE; i++) {
  1402. struct gem_txd *txd = &gb->txd[i];
  1403. txd->control_word = 0;
  1404. dma_wmb();
  1405. txd->buffer = 0;
  1406. }
  1407. wmb();
  1408. }
  1409. /* Init PHY interface and start link poll state machine */
  1410. static void gem_init_phy(struct gem *gp)
  1411. {
  1412. u32 mifcfg;
  1413. /* Revert MIF CFG setting done on stop_phy */
  1414. mifcfg = readl(gp->regs + MIF_CFG);
  1415. mifcfg &= ~MIF_CFG_BBMODE;
  1416. writel(mifcfg, gp->regs + MIF_CFG);
  1417. if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
  1418. int i;
  1419. /* Those delay sucks, the HW seem to love them though, I'll
  1420. * serisouly consider breaking some locks here to be able
  1421. * to schedule instead
  1422. */
  1423. for (i = 0; i < 3; i++) {
  1424. #ifdef CONFIG_PPC_PMAC
  1425. pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0);
  1426. msleep(20);
  1427. #endif
  1428. /* Some PHYs used by apple have problem getting back to us,
  1429. * we do an additional reset here
  1430. */
  1431. sungem_phy_write(gp, MII_BMCR, BMCR_RESET);
  1432. msleep(20);
  1433. if (sungem_phy_read(gp, MII_BMCR) != 0xffff)
  1434. break;
  1435. if (i == 2)
  1436. netdev_warn(gp->dev, "GMAC PHY not responding !\n");
  1437. }
  1438. }
  1439. if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
  1440. gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
  1441. u32 val;
  1442. /* Init datapath mode register. */
  1443. if (gp->phy_type == phy_mii_mdio0 ||
  1444. gp->phy_type == phy_mii_mdio1) {
  1445. val = PCS_DMODE_MGM;
  1446. } else if (gp->phy_type == phy_serialink) {
  1447. val = PCS_DMODE_SM | PCS_DMODE_GMOE;
  1448. } else {
  1449. val = PCS_DMODE_ESM;
  1450. }
  1451. writel(val, gp->regs + PCS_DMODE);
  1452. }
  1453. if (gp->phy_type == phy_mii_mdio0 ||
  1454. gp->phy_type == phy_mii_mdio1) {
  1455. /* Reset and detect MII PHY */
  1456. sungem_phy_probe(&gp->phy_mii, gp->mii_phy_addr);
  1457. /* Init PHY */
  1458. if (gp->phy_mii.def && gp->phy_mii.def->ops->init)
  1459. gp->phy_mii.def->ops->init(&gp->phy_mii);
  1460. } else {
  1461. gem_pcs_reset(gp);
  1462. gem_pcs_reinit_adv(gp);
  1463. }
  1464. /* Default aneg parameters */
  1465. gp->timer_ticks = 0;
  1466. gp->lstate = link_down;
  1467. netif_carrier_off(gp->dev);
  1468. /* Print things out */
  1469. if (gp->phy_type == phy_mii_mdio0 ||
  1470. gp->phy_type == phy_mii_mdio1)
  1471. netdev_info(gp->dev, "Found %s PHY\n",
  1472. gp->phy_mii.def ? gp->phy_mii.def->name : "no");
  1473. gem_begin_auto_negotiation(gp, NULL);
  1474. }
  1475. static void gem_init_dma(struct gem *gp)
  1476. {
  1477. u64 desc_dma = (u64) gp->gblock_dvma;
  1478. u32 val;
  1479. val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE);
  1480. writel(val, gp->regs + TXDMA_CFG);
  1481. writel(desc_dma >> 32, gp->regs + TXDMA_DBHI);
  1482. writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW);
  1483. desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
  1484. writel(0, gp->regs + TXDMA_KICK);
  1485. val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
  1486. ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
  1487. writel(val, gp->regs + RXDMA_CFG);
  1488. writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
  1489. writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
  1490. writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
  1491. val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
  1492. val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
  1493. writel(val, gp->regs + RXDMA_PTHRESH);
  1494. if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
  1495. writel(((5 & RXDMA_BLANK_IPKTS) |
  1496. ((8 << 12) & RXDMA_BLANK_ITIME)),
  1497. gp->regs + RXDMA_BLANK);
  1498. else
  1499. writel(((5 & RXDMA_BLANK_IPKTS) |
  1500. ((4 << 12) & RXDMA_BLANK_ITIME)),
  1501. gp->regs + RXDMA_BLANK);
  1502. }
  1503. static u32 gem_setup_multicast(struct gem *gp)
  1504. {
  1505. u32 rxcfg = 0;
  1506. int i;
  1507. if ((gp->dev->flags & IFF_ALLMULTI) ||
  1508. (netdev_mc_count(gp->dev) > 256)) {
  1509. for (i=0; i<16; i++)
  1510. writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
  1511. rxcfg |= MAC_RXCFG_HFE;
  1512. } else if (gp->dev->flags & IFF_PROMISC) {
  1513. rxcfg |= MAC_RXCFG_PROM;
  1514. } else {
  1515. u16 hash_table[16];
  1516. u32 crc;
  1517. struct netdev_hw_addr *ha;
  1518. int i;
  1519. memset(hash_table, 0, sizeof(hash_table));
  1520. netdev_for_each_mc_addr(ha, gp->dev) {
  1521. crc = ether_crc_le(6, ha->addr);
  1522. crc >>= 24;
  1523. hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
  1524. }
  1525. for (i=0; i<16; i++)
  1526. writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2));
  1527. rxcfg |= MAC_RXCFG_HFE;
  1528. }
  1529. return rxcfg;
  1530. }
  1531. static void gem_init_mac(struct gem *gp)
  1532. {
  1533. unsigned char *e = &gp->dev->dev_addr[0];
  1534. writel(0x1bf0, gp->regs + MAC_SNDPAUSE);
  1535. writel(0x00, gp->regs + MAC_IPG0);
  1536. writel(0x08, gp->regs + MAC_IPG1);
  1537. writel(0x04, gp->regs + MAC_IPG2);
  1538. writel(0x40, gp->regs + MAC_STIME);
  1539. writel(0x40, gp->regs + MAC_MINFSZ);
  1540. /* Ethernet payload + header + FCS + optional VLAN tag. */
  1541. writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ);
  1542. writel(0x07, gp->regs + MAC_PASIZE);
  1543. writel(0x04, gp->regs + MAC_JAMSIZE);
  1544. writel(0x10, gp->regs + MAC_ATTLIM);
  1545. writel(0x8808, gp->regs + MAC_MCTYPE);
  1546. writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED);
  1547. writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
  1548. writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
  1549. writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
  1550. writel(0, gp->regs + MAC_ADDR3);
  1551. writel(0, gp->regs + MAC_ADDR4);
  1552. writel(0, gp->regs + MAC_ADDR5);
  1553. writel(0x0001, gp->regs + MAC_ADDR6);
  1554. writel(0xc200, gp->regs + MAC_ADDR7);
  1555. writel(0x0180, gp->regs + MAC_ADDR8);
  1556. writel(0, gp->regs + MAC_AFILT0);
  1557. writel(0, gp->regs + MAC_AFILT1);
  1558. writel(0, gp->regs + MAC_AFILT2);
  1559. writel(0, gp->regs + MAC_AF21MSK);
  1560. writel(0, gp->regs + MAC_AF0MSK);
  1561. gp->mac_rx_cfg = gem_setup_multicast(gp);
  1562. #ifdef STRIP_FCS
  1563. gp->mac_rx_cfg |= MAC_RXCFG_SFCS;
  1564. #endif
  1565. writel(0, gp->regs + MAC_NCOLL);
  1566. writel(0, gp->regs + MAC_FASUCC);
  1567. writel(0, gp->regs + MAC_ECOLL);
  1568. writel(0, gp->regs + MAC_LCOLL);
  1569. writel(0, gp->regs + MAC_DTIMER);
  1570. writel(0, gp->regs + MAC_PATMPS);
  1571. writel(0, gp->regs + MAC_RFCTR);
  1572. writel(0, gp->regs + MAC_LERR);
  1573. writel(0, gp->regs + MAC_AERR);
  1574. writel(0, gp->regs + MAC_FCSERR);
  1575. writel(0, gp->regs + MAC_RXCVERR);
  1576. /* Clear RX/TX/MAC/XIF config, we will set these up and enable
  1577. * them once a link is established.
  1578. */
  1579. writel(0, gp->regs + MAC_TXCFG);
  1580. writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG);
  1581. writel(0, gp->regs + MAC_MCCFG);
  1582. writel(0, gp->regs + MAC_XIFCFG);
  1583. /* Setup MAC interrupts. We want to get all of the interesting
  1584. * counter expiration events, but we do not want to hear about
  1585. * normal rx/tx as the DMA engine tells us that.
  1586. */
  1587. writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK);
  1588. writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
  1589. /* Don't enable even the PAUSE interrupts for now, we
  1590. * make no use of those events other than to record them.
  1591. */
  1592. writel(0xffffffff, gp->regs + MAC_MCMASK);
  1593. /* Don't enable GEM's WOL in normal operations
  1594. */
  1595. if (gp->has_wol)
  1596. writel(0, gp->regs + WOL_WAKECSR);
  1597. }
  1598. static void gem_init_pause_thresholds(struct gem *gp)
  1599. {
  1600. u32 cfg;
  1601. /* Calculate pause thresholds. Setting the OFF threshold to the
  1602. * full RX fifo size effectively disables PAUSE generation which
  1603. * is what we do for 10/100 only GEMs which have FIFOs too small
  1604. * to make real gains from PAUSE.
  1605. */
  1606. if (gp->rx_fifo_sz <= (2 * 1024)) {
  1607. gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz;
  1608. } else {
  1609. int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63;
  1610. int off = (gp->rx_fifo_sz - (max_frame * 2));
  1611. int on = off - max_frame;
  1612. gp->rx_pause_off = off;
  1613. gp->rx_pause_on = on;
  1614. }
  1615. /* Configure the chip "burst" DMA mode & enable some
  1616. * HW bug fixes on Apple version
  1617. */
  1618. cfg = 0;
  1619. if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
  1620. cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX;
  1621. #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
  1622. cfg |= GREG_CFG_IBURST;
  1623. #endif
  1624. cfg |= ((31 << 1) & GREG_CFG_TXDMALIM);
  1625. cfg |= ((31 << 6) & GREG_CFG_RXDMALIM);
  1626. writel(cfg, gp->regs + GREG_CFG);
  1627. /* If Infinite Burst didn't stick, then use different
  1628. * thresholds (and Apple bug fixes don't exist)
  1629. */
  1630. if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) {
  1631. cfg = ((2 << 1) & GREG_CFG_TXDMALIM);
  1632. cfg |= ((8 << 6) & GREG_CFG_RXDMALIM);
  1633. writel(cfg, gp->regs + GREG_CFG);
  1634. }
  1635. }
  1636. static int gem_check_invariants(struct gem *gp)
  1637. {
  1638. struct pci_dev *pdev = gp->pdev;
  1639. u32 mif_cfg;
  1640. /* On Apple's sungem, we can't rely on registers as the chip
  1641. * was been powered down by the firmware. The PHY is looked
  1642. * up later on.
  1643. */
  1644. if (pdev->vendor == PCI_VENDOR_ID_APPLE) {
  1645. gp->phy_type = phy_mii_mdio0;
  1646. gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
  1647. gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
  1648. gp->swrst_base = 0;
  1649. mif_cfg = readl(gp->regs + MIF_CFG);
  1650. mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1);
  1651. mif_cfg |= MIF_CFG_MDI0;
  1652. writel(mif_cfg, gp->regs + MIF_CFG);
  1653. writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE);
  1654. writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG);
  1655. /* We hard-code the PHY address so we can properly bring it out of
  1656. * reset later on, we can't really probe it at this point, though
  1657. * that isn't an issue.
  1658. */
  1659. if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC)
  1660. gp->mii_phy_addr = 1;
  1661. else
  1662. gp->mii_phy_addr = 0;
  1663. return 0;
  1664. }
  1665. mif_cfg = readl(gp->regs + MIF_CFG);
  1666. if (pdev->vendor == PCI_VENDOR_ID_SUN &&
  1667. pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) {
  1668. /* One of the MII PHYs _must_ be present
  1669. * as this chip has no gigabit PHY.
  1670. */
  1671. if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) {
  1672. pr_err("RIO GEM lacks MII phy, mif_cfg[%08x]\n",
  1673. mif_cfg);
  1674. return -1;
  1675. }
  1676. }
  1677. /* Determine initial PHY interface type guess. MDIO1 is the
  1678. * external PHY and thus takes precedence over MDIO0.
  1679. */
  1680. if (mif_cfg & MIF_CFG_MDI1) {
  1681. gp->phy_type = phy_mii_mdio1;
  1682. mif_cfg |= MIF_CFG_PSELECT;
  1683. writel(mif_cfg, gp->regs + MIF_CFG);
  1684. } else if (mif_cfg & MIF_CFG_MDI0) {
  1685. gp->phy_type = phy_mii_mdio0;
  1686. mif_cfg &= ~MIF_CFG_PSELECT;
  1687. writel(mif_cfg, gp->regs + MIF_CFG);
  1688. } else {
  1689. #ifdef CONFIG_SPARC
  1690. const char *p;
  1691. p = of_get_property(gp->of_node, "shared-pins", NULL);
  1692. if (p && !strcmp(p, "serdes"))
  1693. gp->phy_type = phy_serdes;
  1694. else
  1695. #endif
  1696. gp->phy_type = phy_serialink;
  1697. }
  1698. if (gp->phy_type == phy_mii_mdio1 ||
  1699. gp->phy_type == phy_mii_mdio0) {
  1700. int i;
  1701. for (i = 0; i < 32; i++) {
  1702. gp->mii_phy_addr = i;
  1703. if (sungem_phy_read(gp, MII_BMCR) != 0xffff)
  1704. break;
  1705. }
  1706. if (i == 32) {
  1707. if (pdev->device != PCI_DEVICE_ID_SUN_GEM) {
  1708. pr_err("RIO MII phy will not respond\n");
  1709. return -1;
  1710. }
  1711. gp->phy_type = phy_serdes;
  1712. }
  1713. }
  1714. /* Fetch the FIFO configurations now too. */
  1715. gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
  1716. gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
  1717. if (pdev->vendor == PCI_VENDOR_ID_SUN) {
  1718. if (pdev->device == PCI_DEVICE_ID_SUN_GEM) {
  1719. if (gp->tx_fifo_sz != (9 * 1024) ||
  1720. gp->rx_fifo_sz != (20 * 1024)) {
  1721. pr_err("GEM has bogus fifo sizes tx(%d) rx(%d)\n",
  1722. gp->tx_fifo_sz, gp->rx_fifo_sz);
  1723. return -1;
  1724. }
  1725. gp->swrst_base = 0;
  1726. } else {
  1727. if (gp->tx_fifo_sz != (2 * 1024) ||
  1728. gp->rx_fifo_sz != (2 * 1024)) {
  1729. pr_err("RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
  1730. gp->tx_fifo_sz, gp->rx_fifo_sz);
  1731. return -1;
  1732. }
  1733. gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT;
  1734. }
  1735. }
  1736. return 0;
  1737. }
  1738. static void gem_reinit_chip(struct gem *gp)
  1739. {
  1740. /* Reset the chip */
  1741. gem_reset(gp);
  1742. /* Make sure ints are disabled */
  1743. gem_disable_ints(gp);
  1744. /* Allocate & setup ring buffers */
  1745. gem_init_rings(gp);
  1746. /* Configure pause thresholds */
  1747. gem_init_pause_thresholds(gp);
  1748. /* Init DMA & MAC engines */
  1749. gem_init_dma(gp);
  1750. gem_init_mac(gp);
  1751. }
  1752. static void gem_stop_phy(struct gem *gp, int wol)
  1753. {
  1754. u32 mifcfg;
  1755. /* Let the chip settle down a bit, it seems that helps
  1756. * for sleep mode on some models
  1757. */
  1758. msleep(10);
  1759. /* Make sure we aren't polling PHY status change. We
  1760. * don't currently use that feature though
  1761. */
  1762. mifcfg = readl(gp->regs + MIF_CFG);
  1763. mifcfg &= ~MIF_CFG_POLL;
  1764. writel(mifcfg, gp->regs + MIF_CFG);
  1765. if (wol && gp->has_wol) {
  1766. unsigned char *e = &gp->dev->dev_addr[0];
  1767. u32 csr;
  1768. /* Setup wake-on-lan for MAGIC packet */
  1769. writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB,
  1770. gp->regs + MAC_RXCFG);
  1771. writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0);
  1772. writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1);
  1773. writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2);
  1774. writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT);
  1775. csr = WOL_WAKECSR_ENABLE;
  1776. if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0)
  1777. csr |= WOL_WAKECSR_MII;
  1778. writel(csr, gp->regs + WOL_WAKECSR);
  1779. } else {
  1780. writel(0, gp->regs + MAC_RXCFG);
  1781. (void)readl(gp->regs + MAC_RXCFG);
  1782. /* Machine sleep will die in strange ways if we
  1783. * dont wait a bit here, looks like the chip takes
  1784. * some time to really shut down
  1785. */
  1786. msleep(10);
  1787. }
  1788. writel(0, gp->regs + MAC_TXCFG);
  1789. writel(0, gp->regs + MAC_XIFCFG);
  1790. writel(0, gp->regs + TXDMA_CFG);
  1791. writel(0, gp->regs + RXDMA_CFG);
  1792. if (!wol) {
  1793. gem_reset(gp);
  1794. writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST);
  1795. writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
  1796. if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend)
  1797. gp->phy_mii.def->ops->suspend(&gp->phy_mii);
  1798. /* According to Apple, we must set the MDIO pins to this begnign
  1799. * state or we may 1) eat more current, 2) damage some PHYs
  1800. */
  1801. writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG);
  1802. writel(0, gp->regs + MIF_BBCLK);
  1803. writel(0, gp->regs + MIF_BBDATA);
  1804. writel(0, gp->regs + MIF_BBOENAB);
  1805. writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG);
  1806. (void) readl(gp->regs + MAC_XIFCFG);
  1807. }
  1808. }
  1809. static int gem_do_start(struct net_device *dev)
  1810. {
  1811. struct gem *gp = netdev_priv(dev);
  1812. int rc;
  1813. /* Enable the cell */
  1814. gem_get_cell(gp);
  1815. /* Make sure PCI access and bus master are enabled */
  1816. rc = pci_enable_device(gp->pdev);
  1817. if (rc) {
  1818. netdev_err(dev, "Failed to enable chip on PCI bus !\n");
  1819. /* Put cell and forget it for now, it will be considered as
  1820. * still asleep, a new sleep cycle may bring it back
  1821. */
  1822. gem_put_cell(gp);
  1823. return -ENXIO;
  1824. }
  1825. pci_set_master(gp->pdev);
  1826. /* Init & setup chip hardware */
  1827. gem_reinit_chip(gp);
  1828. /* An interrupt might come in handy */
  1829. rc = request_irq(gp->pdev->irq, gem_interrupt,
  1830. IRQF_SHARED, dev->name, (void *)dev);
  1831. if (rc) {
  1832. netdev_err(dev, "failed to request irq !\n");
  1833. gem_reset(gp);
  1834. gem_clean_rings(gp);
  1835. gem_put_cell(gp);
  1836. return rc;
  1837. }
  1838. /* Mark us as attached again if we come from resume(), this has
  1839. * no effect if we weren't detached and needs to be done now.
  1840. */
  1841. netif_device_attach(dev);
  1842. /* Restart NAPI & queues */
  1843. gem_netif_start(gp);
  1844. /* Detect & init PHY, start autoneg etc... this will
  1845. * eventually result in starting DMA operations when
  1846. * the link is up
  1847. */
  1848. gem_init_phy(gp);
  1849. return 0;
  1850. }
  1851. static void gem_do_stop(struct net_device *dev, int wol)
  1852. {
  1853. struct gem *gp = netdev_priv(dev);
  1854. /* Stop NAPI and stop tx queue */
  1855. gem_netif_stop(gp);
  1856. /* Make sure ints are disabled. We don't care about
  1857. * synchronizing as NAPI is disabled, thus a stray
  1858. * interrupt will do nothing bad (our irq handler
  1859. * just schedules NAPI)
  1860. */
  1861. gem_disable_ints(gp);
  1862. /* Stop the link timer */
  1863. del_timer_sync(&gp->link_timer);
  1864. /* We cannot cancel the reset task while holding the
  1865. * rtnl lock, we'd get an A->B / B->A deadlock stituation
  1866. * if we did. This is not an issue however as the reset
  1867. * task is synchronized vs. us (rtnl_lock) and will do
  1868. * nothing if the device is down or suspended. We do
  1869. * still clear reset_task_pending to avoid a spurrious
  1870. * reset later on in case we do resume before it gets
  1871. * scheduled.
  1872. */
  1873. gp->reset_task_pending = 0;
  1874. /* If we are going to sleep with WOL */
  1875. gem_stop_dma(gp);
  1876. msleep(10);
  1877. if (!wol)
  1878. gem_reset(gp);
  1879. msleep(10);
  1880. /* Get rid of rings */
  1881. gem_clean_rings(gp);
  1882. /* No irq needed anymore */
  1883. free_irq(gp->pdev->irq, (void *) dev);
  1884. /* Shut the PHY down eventually and setup WOL */
  1885. gem_stop_phy(gp, wol);
  1886. /* Make sure bus master is disabled */
  1887. pci_disable_device(gp->pdev);
  1888. /* Cell not needed neither if no WOL */
  1889. if (!wol)
  1890. gem_put_cell(gp);
  1891. }
  1892. static void gem_reset_task(struct work_struct *work)
  1893. {
  1894. struct gem *gp = container_of(work, struct gem, reset_task);
  1895. /* Lock out the network stack (essentially shield ourselves
  1896. * against a racing open, close, control call, or suspend
  1897. */
  1898. rtnl_lock();
  1899. /* Skip the reset task if suspended or closed, or if it's
  1900. * been cancelled by gem_do_stop (see comment there)
  1901. */
  1902. if (!netif_device_present(gp->dev) ||
  1903. !netif_running(gp->dev) ||
  1904. !gp->reset_task_pending) {
  1905. rtnl_unlock();
  1906. return;
  1907. }
  1908. /* Stop the link timer */
  1909. del_timer_sync(&gp->link_timer);
  1910. /* Stop NAPI and tx */
  1911. gem_netif_stop(gp);
  1912. /* Reset the chip & rings */
  1913. gem_reinit_chip(gp);
  1914. if (gp->lstate == link_up)
  1915. gem_set_link_modes(gp);
  1916. /* Restart NAPI and Tx */
  1917. gem_netif_start(gp);
  1918. /* We are back ! */
  1919. gp->reset_task_pending = 0;
  1920. /* If the link is not up, restart autoneg, else restart the
  1921. * polling timer
  1922. */
  1923. if (gp->lstate != link_up)
  1924. gem_begin_auto_negotiation(gp, NULL);
  1925. else
  1926. mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
  1927. rtnl_unlock();
  1928. }
  1929. static int gem_open(struct net_device *dev)
  1930. {
  1931. /* We allow open while suspended, we just do nothing,
  1932. * the chip will be initialized in resume()
  1933. */
  1934. if (netif_device_present(dev))
  1935. return gem_do_start(dev);
  1936. return 0;
  1937. }
  1938. static int gem_close(struct net_device *dev)
  1939. {
  1940. if (netif_device_present(dev))
  1941. gem_do_stop(dev, 0);
  1942. return 0;
  1943. }
  1944. #ifdef CONFIG_PM
  1945. static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
  1946. {
  1947. struct net_device *dev = pci_get_drvdata(pdev);
  1948. struct gem *gp = netdev_priv(dev);
  1949. /* Lock the network stack first to avoid racing with open/close,
  1950. * reset task and setting calls
  1951. */
  1952. rtnl_lock();
  1953. /* Not running, mark ourselves non-present, no need for
  1954. * a lock here
  1955. */
  1956. if (!netif_running(dev)) {
  1957. netif_device_detach(dev);
  1958. rtnl_unlock();
  1959. return 0;
  1960. }
  1961. netdev_info(dev, "suspending, WakeOnLan %s\n",
  1962. (gp->wake_on_lan && netif_running(dev)) ?
  1963. "enabled" : "disabled");
  1964. /* Tell the network stack we're gone. gem_do_stop() below will
  1965. * synchronize with TX, stop NAPI etc...
  1966. */
  1967. netif_device_detach(dev);
  1968. /* Switch off chip, remember WOL setting */
  1969. gp->asleep_wol = !!gp->wake_on_lan;
  1970. gem_do_stop(dev, gp->asleep_wol);
  1971. /* Unlock the network stack */
  1972. rtnl_unlock();
  1973. return 0;
  1974. }
  1975. static int gem_resume(struct pci_dev *pdev)
  1976. {
  1977. struct net_device *dev = pci_get_drvdata(pdev);
  1978. struct gem *gp = netdev_priv(dev);
  1979. /* See locking comment in gem_suspend */
  1980. rtnl_lock();
  1981. /* Not running, mark ourselves present, no need for
  1982. * a lock here
  1983. */
  1984. if (!netif_running(dev)) {
  1985. netif_device_attach(dev);
  1986. rtnl_unlock();
  1987. return 0;
  1988. }
  1989. /* Restart chip. If that fails there isn't much we can do, we
  1990. * leave things stopped.
  1991. */
  1992. gem_do_start(dev);
  1993. /* If we had WOL enabled, the cell clock was never turned off during
  1994. * sleep, so we end up beeing unbalanced. Fix that here
  1995. */
  1996. if (gp->asleep_wol)
  1997. gem_put_cell(gp);
  1998. /* Unlock the network stack */
  1999. rtnl_unlock();
  2000. return 0;
  2001. }
  2002. #endif /* CONFIG_PM */
  2003. static struct net_device_stats *gem_get_stats(struct net_device *dev)
  2004. {
  2005. struct gem *gp = netdev_priv(dev);
  2006. /* I have seen this being called while the PM was in progress,
  2007. * so we shield against this. Let's also not poke at registers
  2008. * while the reset task is going on.
  2009. *
  2010. * TODO: Move stats collection elsewhere (link timer ?) and
  2011. * make this a nop to avoid all those synchro issues
  2012. */
  2013. if (!netif_device_present(dev) || !netif_running(dev))
  2014. goto bail;
  2015. /* Better safe than sorry... */
  2016. if (WARN_ON(!gp->cell_enabled))
  2017. goto bail;
  2018. dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR);
  2019. writel(0, gp->regs + MAC_FCSERR);
  2020. dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR);
  2021. writel(0, gp->regs + MAC_AERR);
  2022. dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR);
  2023. writel(0, gp->regs + MAC_LERR);
  2024. dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
  2025. dev->stats.collisions +=
  2026. (readl(gp->regs + MAC_ECOLL) + readl(gp->regs + MAC_LCOLL));
  2027. writel(0, gp->regs + MAC_ECOLL);
  2028. writel(0, gp->regs + MAC_LCOLL);
  2029. bail:
  2030. return &dev->stats;
  2031. }
  2032. static int gem_set_mac_address(struct net_device *dev, void *addr)
  2033. {
  2034. struct sockaddr *macaddr = (struct sockaddr *) addr;
  2035. struct gem *gp = netdev_priv(dev);
  2036. unsigned char *e = &dev->dev_addr[0];
  2037. if (!is_valid_ether_addr(macaddr->sa_data))
  2038. return -EADDRNOTAVAIL;
  2039. memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len);
  2040. /* We'll just catch it later when the device is up'd or resumed */
  2041. if (!netif_running(dev) || !netif_device_present(dev))
  2042. return 0;
  2043. /* Better safe than sorry... */
  2044. if (WARN_ON(!gp->cell_enabled))
  2045. return 0;
  2046. writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
  2047. writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
  2048. writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
  2049. return 0;
  2050. }
  2051. static void gem_set_multicast(struct net_device *dev)
  2052. {
  2053. struct gem *gp = netdev_priv(dev);
  2054. u32 rxcfg, rxcfg_new;
  2055. int limit = 10000;
  2056. if (!netif_running(dev) || !netif_device_present(dev))
  2057. return;
  2058. /* Better safe than sorry... */
  2059. if (gp->reset_task_pending || WARN_ON(!gp->cell_enabled))
  2060. return;
  2061. rxcfg = readl(gp->regs + MAC_RXCFG);
  2062. rxcfg_new = gem_setup_multicast(gp);
  2063. #ifdef STRIP_FCS
  2064. rxcfg_new |= MAC_RXCFG_SFCS;
  2065. #endif
  2066. gp->mac_rx_cfg = rxcfg_new;
  2067. writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
  2068. while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) {
  2069. if (!limit--)
  2070. break;
  2071. udelay(10);
  2072. }
  2073. rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE);
  2074. rxcfg |= rxcfg_new;
  2075. writel(rxcfg, gp->regs + MAC_RXCFG);
  2076. }
  2077. /* Jumbo-grams don't seem to work :-( */
  2078. #define GEM_MIN_MTU ETH_MIN_MTU
  2079. #if 1
  2080. #define GEM_MAX_MTU ETH_DATA_LEN
  2081. #else
  2082. #define GEM_MAX_MTU 9000
  2083. #endif
  2084. static int gem_change_mtu(struct net_device *dev, int new_mtu)
  2085. {
  2086. struct gem *gp = netdev_priv(dev);
  2087. dev->mtu = new_mtu;
  2088. /* We'll just catch it later when the device is up'd or resumed */
  2089. if (!netif_running(dev) || !netif_device_present(dev))
  2090. return 0;
  2091. /* Better safe than sorry... */
  2092. if (WARN_ON(!gp->cell_enabled))
  2093. return 0;
  2094. gem_netif_stop(gp);
  2095. gem_reinit_chip(gp);
  2096. if (gp->lstate == link_up)
  2097. gem_set_link_modes(gp);
  2098. gem_netif_start(gp);
  2099. return 0;
  2100. }
  2101. static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  2102. {
  2103. struct gem *gp = netdev_priv(dev);
  2104. strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
  2105. strlcpy(info->version, DRV_VERSION, sizeof(info->version));
  2106. strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info));
  2107. }
  2108. static int gem_get_link_ksettings(struct net_device *dev,
  2109. struct ethtool_link_ksettings *cmd)
  2110. {
  2111. struct gem *gp = netdev_priv(dev);
  2112. u32 supported, advertising;
  2113. if (gp->phy_type == phy_mii_mdio0 ||
  2114. gp->phy_type == phy_mii_mdio1) {
  2115. if (gp->phy_mii.def)
  2116. supported = gp->phy_mii.def->features;
  2117. else
  2118. supported = (SUPPORTED_10baseT_Half |
  2119. SUPPORTED_10baseT_Full);
  2120. /* XXX hardcoded stuff for now */
  2121. cmd->base.port = PORT_MII;
  2122. cmd->base.phy_address = 0; /* XXX fixed PHYAD */
  2123. /* Return current PHY settings */
  2124. cmd->base.autoneg = gp->want_autoneg;
  2125. cmd->base.speed = gp->phy_mii.speed;
  2126. cmd->base.duplex = gp->phy_mii.duplex;
  2127. advertising = gp->phy_mii.advertising;
  2128. /* If we started with a forced mode, we don't have a default
  2129. * advertise set, we need to return something sensible so
  2130. * userland can re-enable autoneg properly.
  2131. */
  2132. if (advertising == 0)
  2133. advertising = supported;
  2134. } else { // XXX PCS ?
  2135. supported =
  2136. (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
  2137. SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
  2138. SUPPORTED_Autoneg);
  2139. advertising = supported;
  2140. cmd->base.speed = 0;
  2141. cmd->base.duplex = 0;
  2142. cmd->base.port = 0;
  2143. cmd->base.phy_address = 0;
  2144. cmd->base.autoneg = 0;
  2145. /* serdes means usually a Fibre connector, with most fixed */
  2146. if (gp->phy_type == phy_serdes) {
  2147. cmd->base.port = PORT_FIBRE;
  2148. supported = (SUPPORTED_1000baseT_Half |
  2149. SUPPORTED_1000baseT_Full |
  2150. SUPPORTED_FIBRE | SUPPORTED_Autoneg |
  2151. SUPPORTED_Pause | SUPPORTED_Asym_Pause);
  2152. advertising = supported;
  2153. if (gp->lstate == link_up)
  2154. cmd->base.speed = SPEED_1000;
  2155. cmd->base.duplex = DUPLEX_FULL;
  2156. cmd->base.autoneg = 1;
  2157. }
  2158. }
  2159. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
  2160. supported);
  2161. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
  2162. advertising);
  2163. return 0;
  2164. }
  2165. static int gem_set_link_ksettings(struct net_device *dev,
  2166. const struct ethtool_link_ksettings *cmd)
  2167. {
  2168. struct gem *gp = netdev_priv(dev);
  2169. u32 speed = cmd->base.speed;
  2170. u32 advertising;
  2171. ethtool_convert_link_mode_to_legacy_u32(&advertising,
  2172. cmd->link_modes.advertising);
  2173. /* Verify the settings we care about. */
  2174. if (cmd->base.autoneg != AUTONEG_ENABLE &&
  2175. cmd->base.autoneg != AUTONEG_DISABLE)
  2176. return -EINVAL;
  2177. if (cmd->base.autoneg == AUTONEG_ENABLE &&
  2178. advertising == 0)
  2179. return -EINVAL;
  2180. if (cmd->base.autoneg == AUTONEG_DISABLE &&
  2181. ((speed != SPEED_1000 &&
  2182. speed != SPEED_100 &&
  2183. speed != SPEED_10) ||
  2184. (cmd->base.duplex != DUPLEX_HALF &&
  2185. cmd->base.duplex != DUPLEX_FULL)))
  2186. return -EINVAL;
  2187. /* Apply settings and restart link process. */
  2188. if (netif_device_present(gp->dev)) {
  2189. del_timer_sync(&gp->link_timer);
  2190. gem_begin_auto_negotiation(gp, cmd);
  2191. }
  2192. return 0;
  2193. }
  2194. static int gem_nway_reset(struct net_device *dev)
  2195. {
  2196. struct gem *gp = netdev_priv(dev);
  2197. if (!gp->want_autoneg)
  2198. return -EINVAL;
  2199. /* Restart link process */
  2200. if (netif_device_present(gp->dev)) {
  2201. del_timer_sync(&gp->link_timer);
  2202. gem_begin_auto_negotiation(gp, NULL);
  2203. }
  2204. return 0;
  2205. }
  2206. static u32 gem_get_msglevel(struct net_device *dev)
  2207. {
  2208. struct gem *gp = netdev_priv(dev);
  2209. return gp->msg_enable;
  2210. }
  2211. static void gem_set_msglevel(struct net_device *dev, u32 value)
  2212. {
  2213. struct gem *gp = netdev_priv(dev);
  2214. gp->msg_enable = value;
  2215. }
  2216. /* Add more when I understand how to program the chip */
  2217. /* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */
  2218. #define WOL_SUPPORTED_MASK (WAKE_MAGIC)
  2219. static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  2220. {
  2221. struct gem *gp = netdev_priv(dev);
  2222. /* Add more when I understand how to program the chip */
  2223. if (gp->has_wol) {
  2224. wol->supported = WOL_SUPPORTED_MASK;
  2225. wol->wolopts = gp->wake_on_lan;
  2226. } else {
  2227. wol->supported = 0;
  2228. wol->wolopts = 0;
  2229. }
  2230. }
  2231. static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  2232. {
  2233. struct gem *gp = netdev_priv(dev);
  2234. if (!gp->has_wol)
  2235. return -EOPNOTSUPP;
  2236. gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK;
  2237. return 0;
  2238. }
  2239. static const struct ethtool_ops gem_ethtool_ops = {
  2240. .get_drvinfo = gem_get_drvinfo,
  2241. .get_link = ethtool_op_get_link,
  2242. .nway_reset = gem_nway_reset,
  2243. .get_msglevel = gem_get_msglevel,
  2244. .set_msglevel = gem_set_msglevel,
  2245. .get_wol = gem_get_wol,
  2246. .set_wol = gem_set_wol,
  2247. .get_link_ksettings = gem_get_link_ksettings,
  2248. .set_link_ksettings = gem_set_link_ksettings,
  2249. };
  2250. static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  2251. {
  2252. struct gem *gp = netdev_priv(dev);
  2253. struct mii_ioctl_data *data = if_mii(ifr);
  2254. int rc = -EOPNOTSUPP;
  2255. /* For SIOCGMIIREG and SIOCSMIIREG the core checks for us that
  2256. * netif_device_present() is true and holds rtnl_lock for us
  2257. * so we have nothing to worry about
  2258. */
  2259. switch (cmd) {
  2260. case SIOCGMIIPHY: /* Get address of MII PHY in use. */
  2261. data->phy_id = gp->mii_phy_addr;
  2262. /* Fallthrough... */
  2263. case SIOCGMIIREG: /* Read MII PHY register. */
  2264. data->val_out = __sungem_phy_read(gp, data->phy_id & 0x1f,
  2265. data->reg_num & 0x1f);
  2266. rc = 0;
  2267. break;
  2268. case SIOCSMIIREG: /* Write MII PHY register. */
  2269. __sungem_phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f,
  2270. data->val_in);
  2271. rc = 0;
  2272. break;
  2273. }
  2274. return rc;
  2275. }
  2276. #if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC))
  2277. /* Fetch MAC address from vital product data of PCI ROM. */
  2278. static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr)
  2279. {
  2280. int this_offset;
  2281. for (this_offset = 0x20; this_offset < len; this_offset++) {
  2282. void __iomem *p = rom_base + this_offset;
  2283. int i;
  2284. if (readb(p + 0) != 0x90 ||
  2285. readb(p + 1) != 0x00 ||
  2286. readb(p + 2) != 0x09 ||
  2287. readb(p + 3) != 0x4e ||
  2288. readb(p + 4) != 0x41 ||
  2289. readb(p + 5) != 0x06)
  2290. continue;
  2291. this_offset += 6;
  2292. p += 6;
  2293. for (i = 0; i < 6; i++)
  2294. dev_addr[i] = readb(p + i);
  2295. return 1;
  2296. }
  2297. return 0;
  2298. }
  2299. static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
  2300. {
  2301. size_t size;
  2302. void __iomem *p = pci_map_rom(pdev, &size);
  2303. if (p) {
  2304. int found;
  2305. found = readb(p) == 0x55 &&
  2306. readb(p + 1) == 0xaa &&
  2307. find_eth_addr_in_vpd(p, (64 * 1024), dev_addr);
  2308. pci_unmap_rom(pdev, p);
  2309. if (found)
  2310. return;
  2311. }
  2312. /* Sun MAC prefix then 3 random bytes. */
  2313. dev_addr[0] = 0x08;
  2314. dev_addr[1] = 0x00;
  2315. dev_addr[2] = 0x20;
  2316. get_random_bytes(dev_addr + 3, 3);
  2317. }
  2318. #endif /* not Sparc and not PPC */
  2319. static int gem_get_device_address(struct gem *gp)
  2320. {
  2321. #if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC)
  2322. struct net_device *dev = gp->dev;
  2323. const unsigned char *addr;
  2324. addr = of_get_property(gp->of_node, "local-mac-address", NULL);
  2325. if (addr == NULL) {
  2326. #ifdef CONFIG_SPARC
  2327. addr = idprom->id_ethaddr;
  2328. #else
  2329. printk("\n");
  2330. pr_err("%s: can't get mac-address\n", dev->name);
  2331. return -1;
  2332. #endif
  2333. }
  2334. memcpy(dev->dev_addr, addr, ETH_ALEN);
  2335. #else
  2336. get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
  2337. #endif
  2338. return 0;
  2339. }
  2340. static void gem_remove_one(struct pci_dev *pdev)
  2341. {
  2342. struct net_device *dev = pci_get_drvdata(pdev);
  2343. if (dev) {
  2344. struct gem *gp = netdev_priv(dev);
  2345. unregister_netdev(dev);
  2346. /* Ensure reset task is truly gone */
  2347. cancel_work_sync(&gp->reset_task);
  2348. /* Free resources */
  2349. pci_free_consistent(pdev,
  2350. sizeof(struct gem_init_block),
  2351. gp->init_block,
  2352. gp->gblock_dvma);
  2353. iounmap(gp->regs);
  2354. pci_release_regions(pdev);
  2355. free_netdev(dev);
  2356. }
  2357. }
  2358. static const struct net_device_ops gem_netdev_ops = {
  2359. .ndo_open = gem_open,
  2360. .ndo_stop = gem_close,
  2361. .ndo_start_xmit = gem_start_xmit,
  2362. .ndo_get_stats = gem_get_stats,
  2363. .ndo_set_rx_mode = gem_set_multicast,
  2364. .ndo_do_ioctl = gem_ioctl,
  2365. .ndo_tx_timeout = gem_tx_timeout,
  2366. .ndo_change_mtu = gem_change_mtu,
  2367. .ndo_validate_addr = eth_validate_addr,
  2368. .ndo_set_mac_address = gem_set_mac_address,
  2369. #ifdef CONFIG_NET_POLL_CONTROLLER
  2370. .ndo_poll_controller = gem_poll_controller,
  2371. #endif
  2372. };
  2373. static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  2374. {
  2375. unsigned long gemreg_base, gemreg_len;
  2376. struct net_device *dev;
  2377. struct gem *gp;
  2378. int err, pci_using_dac;
  2379. printk_once(KERN_INFO "%s", version);
  2380. /* Apple gmac note: during probe, the chip is powered up by
  2381. * the arch code to allow the code below to work (and to let
  2382. * the chip be probed on the config space. It won't stay powered
  2383. * up until the interface is brought up however, so we can't rely
  2384. * on register configuration done at this point.
  2385. */
  2386. err = pci_enable_device(pdev);
  2387. if (err) {
  2388. pr_err("Cannot enable MMIO operation, aborting\n");
  2389. return err;
  2390. }
  2391. pci_set_master(pdev);
  2392. /* Configure DMA attributes. */
  2393. /* All of the GEM documentation states that 64-bit DMA addressing
  2394. * is fully supported and should work just fine. However the
  2395. * front end for RIO based GEMs is different and only supports
  2396. * 32-bit addressing.
  2397. *
  2398. * For now we assume the various PPC GEMs are 32-bit only as well.
  2399. */
  2400. if (pdev->vendor == PCI_VENDOR_ID_SUN &&
  2401. pdev->device == PCI_DEVICE_ID_SUN_GEM &&
  2402. !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
  2403. pci_using_dac = 1;
  2404. } else {
  2405. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  2406. if (err) {
  2407. pr_err("No usable DMA configuration, aborting\n");
  2408. goto err_disable_device;
  2409. }
  2410. pci_using_dac = 0;
  2411. }
  2412. gemreg_base = pci_resource_start(pdev, 0);
  2413. gemreg_len = pci_resource_len(pdev, 0);
  2414. if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
  2415. pr_err("Cannot find proper PCI device base address, aborting\n");
  2416. err = -ENODEV;
  2417. goto err_disable_device;
  2418. }
  2419. dev = alloc_etherdev(sizeof(*gp));
  2420. if (!dev) {
  2421. err = -ENOMEM;
  2422. goto err_disable_device;
  2423. }
  2424. SET_NETDEV_DEV(dev, &pdev->dev);
  2425. gp = netdev_priv(dev);
  2426. err = pci_request_regions(pdev, DRV_NAME);
  2427. if (err) {
  2428. pr_err("Cannot obtain PCI resources, aborting\n");
  2429. goto err_out_free_netdev;
  2430. }
  2431. gp->pdev = pdev;
  2432. gp->dev = dev;
  2433. gp->msg_enable = DEFAULT_MSG;
  2434. timer_setup(&gp->link_timer, gem_link_timer, 0);
  2435. INIT_WORK(&gp->reset_task, gem_reset_task);
  2436. gp->lstate = link_down;
  2437. gp->timer_ticks = 0;
  2438. netif_carrier_off(dev);
  2439. gp->regs = ioremap(gemreg_base, gemreg_len);
  2440. if (!gp->regs) {
  2441. pr_err("Cannot map device registers, aborting\n");
  2442. err = -EIO;
  2443. goto err_out_free_res;
  2444. }
  2445. /* On Apple, we want a reference to the Open Firmware device-tree
  2446. * node. We use it for clock control.
  2447. */
  2448. #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC)
  2449. gp->of_node = pci_device_to_OF_node(pdev);
  2450. #endif
  2451. /* Only Apple version supports WOL afaik */
  2452. if (pdev->vendor == PCI_VENDOR_ID_APPLE)
  2453. gp->has_wol = 1;
  2454. /* Make sure cell is enabled */
  2455. gem_get_cell(gp);
  2456. /* Make sure everything is stopped and in init state */
  2457. gem_reset(gp);
  2458. /* Fill up the mii_phy structure (even if we won't use it) */
  2459. gp->phy_mii.dev = dev;
  2460. gp->phy_mii.mdio_read = _sungem_phy_read;
  2461. gp->phy_mii.mdio_write = _sungem_phy_write;
  2462. #ifdef CONFIG_PPC_PMAC
  2463. gp->phy_mii.platform_data = gp->of_node;
  2464. #endif
  2465. /* By default, we start with autoneg */
  2466. gp->want_autoneg = 1;
  2467. /* Check fifo sizes, PHY type, etc... */
  2468. if (gem_check_invariants(gp)) {
  2469. err = -ENODEV;
  2470. goto err_out_iounmap;
  2471. }
  2472. /* It is guaranteed that the returned buffer will be at least
  2473. * PAGE_SIZE aligned.
  2474. */
  2475. gp->init_block = (struct gem_init_block *)
  2476. pci_alloc_consistent(pdev, sizeof(struct gem_init_block),
  2477. &gp->gblock_dvma);
  2478. if (!gp->init_block) {
  2479. pr_err("Cannot allocate init block, aborting\n");
  2480. err = -ENOMEM;
  2481. goto err_out_iounmap;
  2482. }
  2483. err = gem_get_device_address(gp);
  2484. if (err)
  2485. goto err_out_free_consistent;
  2486. dev->netdev_ops = &gem_netdev_ops;
  2487. netif_napi_add(dev, &gp->napi, gem_poll, 64);
  2488. dev->ethtool_ops = &gem_ethtool_ops;
  2489. dev->watchdog_timeo = 5 * HZ;
  2490. dev->dma = 0;
  2491. /* Set that now, in case PM kicks in now */
  2492. pci_set_drvdata(pdev, dev);
  2493. /* We can do scatter/gather and HW checksum */
  2494. dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
  2495. dev->features |= dev->hw_features | NETIF_F_RXCSUM;
  2496. if (pci_using_dac)
  2497. dev->features |= NETIF_F_HIGHDMA;
  2498. /* MTU range: 68 - 1500 (Jumbo mode is broken) */
  2499. dev->min_mtu = GEM_MIN_MTU;
  2500. dev->max_mtu = GEM_MAX_MTU;
  2501. /* Register with kernel */
  2502. if (register_netdev(dev)) {
  2503. pr_err("Cannot register net device, aborting\n");
  2504. err = -ENOMEM;
  2505. goto err_out_free_consistent;
  2506. }
  2507. /* Undo the get_cell with appropriate locking (we could use
  2508. * ndo_init/uninit but that would be even more clumsy imho)
  2509. */
  2510. rtnl_lock();
  2511. gem_put_cell(gp);
  2512. rtnl_unlock();
  2513. netdev_info(dev, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n",
  2514. dev->dev_addr);
  2515. return 0;
  2516. err_out_free_consistent:
  2517. gem_remove_one(pdev);
  2518. err_out_iounmap:
  2519. gem_put_cell(gp);
  2520. iounmap(gp->regs);
  2521. err_out_free_res:
  2522. pci_release_regions(pdev);
  2523. err_out_free_netdev:
  2524. free_netdev(dev);
  2525. err_disable_device:
  2526. pci_disable_device(pdev);
  2527. return err;
  2528. }
  2529. static struct pci_driver gem_driver = {
  2530. .name = GEM_MODULE_NAME,
  2531. .id_table = gem_pci_tbl,
  2532. .probe = gem_init_one,
  2533. .remove = gem_remove_one,
  2534. #ifdef CONFIG_PM
  2535. .suspend = gem_suspend,
  2536. .resume = gem_resume,
  2537. #endif /* CONFIG_PM */
  2538. };
  2539. module_pci_driver(gem_driver);