smc911x.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184
  1. /*
  2. * smc911x.c
  3. * This is a driver for SMSC's LAN911{5,6,7,8} single-chip Ethernet devices.
  4. *
  5. * Copyright (C) 2005 Sensoria Corp
  6. * Derived from the unified SMC91x driver by Nicolas Pitre
  7. * and the smsc911x.c reference driver by SMSC
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  21. *
  22. * Arguments:
  23. * watchdog = TX watchdog timeout
  24. * tx_fifo_kb = Size of TX FIFO in KB
  25. *
  26. * History:
  27. * 04/16/05 Dustin McIntire Initial version
  28. */
  29. static const char version[] =
  30. "smc911x.c: v1.0 04-16-2005 by Dustin McIntire <dustin@sensoria.com>\n";
  31. /* Debugging options */
  32. #define ENABLE_SMC_DEBUG_RX 0
  33. #define ENABLE_SMC_DEBUG_TX 0
  34. #define ENABLE_SMC_DEBUG_DMA 0
  35. #define ENABLE_SMC_DEBUG_PKTS 0
  36. #define ENABLE_SMC_DEBUG_MISC 0
  37. #define ENABLE_SMC_DEBUG_FUNC 0
  38. #define SMC_DEBUG_RX ((ENABLE_SMC_DEBUG_RX ? 1 : 0) << 0)
  39. #define SMC_DEBUG_TX ((ENABLE_SMC_DEBUG_TX ? 1 : 0) << 1)
  40. #define SMC_DEBUG_DMA ((ENABLE_SMC_DEBUG_DMA ? 1 : 0) << 2)
  41. #define SMC_DEBUG_PKTS ((ENABLE_SMC_DEBUG_PKTS ? 1 : 0) << 3)
  42. #define SMC_DEBUG_MISC ((ENABLE_SMC_DEBUG_MISC ? 1 : 0) << 4)
  43. #define SMC_DEBUG_FUNC ((ENABLE_SMC_DEBUG_FUNC ? 1 : 0) << 5)
  44. #ifndef SMC_DEBUG
  45. #define SMC_DEBUG ( SMC_DEBUG_RX | \
  46. SMC_DEBUG_TX | \
  47. SMC_DEBUG_DMA | \
  48. SMC_DEBUG_PKTS | \
  49. SMC_DEBUG_MISC | \
  50. SMC_DEBUG_FUNC \
  51. )
  52. #endif
  53. #include <linux/module.h>
  54. #include <linux/kernel.h>
  55. #include <linux/sched.h>
  56. #include <linux/delay.h>
  57. #include <linux/interrupt.h>
  58. #include <linux/errno.h>
  59. #include <linux/ioport.h>
  60. #include <linux/crc32.h>
  61. #include <linux/device.h>
  62. #include <linux/platform_device.h>
  63. #include <linux/spinlock.h>
  64. #include <linux/ethtool.h>
  65. #include <linux/mii.h>
  66. #include <linux/workqueue.h>
  67. #include <linux/netdevice.h>
  68. #include <linux/etherdevice.h>
  69. #include <linux/skbuff.h>
  70. #include <asm/io.h>
  71. #include "smc911x.h"
  72. /*
  73. * Transmit timeout, default 5 seconds.
  74. */
  75. static int watchdog = 5000;
  76. module_param(watchdog, int, 0400);
  77. MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
  78. static int tx_fifo_kb=8;
  79. module_param(tx_fifo_kb, int, 0400);
  80. MODULE_PARM_DESC(tx_fifo_kb,"transmit FIFO size in KB (1<x<15)(default=8)");
  81. MODULE_LICENSE("GPL");
  82. MODULE_ALIAS("platform:smc911x");
  83. /*
  84. * The internal workings of the driver. If you are changing anything
  85. * here with the SMC stuff, you should have the datasheet and know
  86. * what you are doing.
  87. */
  88. #define CARDNAME "smc911x"
  89. /*
  90. * Use power-down feature of the chip
  91. */
  92. #define POWER_DOWN 1
  93. #if SMC_DEBUG > 0
  94. #define DBG(n, dev, args...) \
  95. do { \
  96. if (SMC_DEBUG & (n)) \
  97. netdev_dbg(dev, args); \
  98. } while (0)
  99. #define PRINTK(dev, args...) netdev_info(dev, args)
  100. #else
  101. #define DBG(n, dev, args...) do { } while (0)
  102. #define PRINTK(dev, args...) netdev_dbg(dev, args)
  103. #endif
  104. #if SMC_DEBUG_PKTS > 0
  105. static void PRINT_PKT(u_char *buf, int length)
  106. {
  107. int i;
  108. int remainder;
  109. int lines;
  110. lines = length / 16;
  111. remainder = length % 16;
  112. for (i = 0; i < lines ; i ++) {
  113. int cur;
  114. printk(KERN_DEBUG);
  115. for (cur = 0; cur < 8; cur++) {
  116. u_char a, b;
  117. a = *buf++;
  118. b = *buf++;
  119. pr_cont("%02x%02x ", a, b);
  120. }
  121. pr_cont("\n");
  122. }
  123. printk(KERN_DEBUG);
  124. for (i = 0; i < remainder/2 ; i++) {
  125. u_char a, b;
  126. a = *buf++;
  127. b = *buf++;
  128. pr_cont("%02x%02x ", a, b);
  129. }
  130. pr_cont("\n");
  131. }
  132. #else
  133. #define PRINT_PKT(x...) do { } while (0)
  134. #endif
  135. /* this enables an interrupt in the interrupt mask register */
  136. #define SMC_ENABLE_INT(lp, x) do { \
  137. unsigned int __mask; \
  138. __mask = SMC_GET_INT_EN((lp)); \
  139. __mask |= (x); \
  140. SMC_SET_INT_EN((lp), __mask); \
  141. } while (0)
  142. /* this disables an interrupt from the interrupt mask register */
  143. #define SMC_DISABLE_INT(lp, x) do { \
  144. unsigned int __mask; \
  145. __mask = SMC_GET_INT_EN((lp)); \
  146. __mask &= ~(x); \
  147. SMC_SET_INT_EN((lp), __mask); \
  148. } while (0)
  149. /*
  150. * this does a soft reset on the device
  151. */
  152. static void smc911x_reset(struct net_device *dev)
  153. {
  154. struct smc911x_local *lp = netdev_priv(dev);
  155. unsigned int reg, timeout=0, resets=1, irq_cfg;
  156. unsigned long flags;
  157. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  158. /* Take out of PM setting first */
  159. if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) {
  160. /* Write to the bytetest will take out of powerdown */
  161. SMC_SET_BYTE_TEST(lp, 0);
  162. timeout=10;
  163. do {
  164. udelay(10);
  165. reg = SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_;
  166. } while (--timeout && !reg);
  167. if (timeout == 0) {
  168. PRINTK(dev, "smc911x_reset timeout waiting for PM restore\n");
  169. return;
  170. }
  171. }
  172. /* Disable all interrupts */
  173. spin_lock_irqsave(&lp->lock, flags);
  174. SMC_SET_INT_EN(lp, 0);
  175. spin_unlock_irqrestore(&lp->lock, flags);
  176. while (resets--) {
  177. SMC_SET_HW_CFG(lp, HW_CFG_SRST_);
  178. timeout=10;
  179. do {
  180. udelay(10);
  181. reg = SMC_GET_HW_CFG(lp);
  182. /* If chip indicates reset timeout then try again */
  183. if (reg & HW_CFG_SRST_TO_) {
  184. PRINTK(dev, "chip reset timeout, retrying...\n");
  185. resets++;
  186. break;
  187. }
  188. } while (--timeout && (reg & HW_CFG_SRST_));
  189. }
  190. if (timeout == 0) {
  191. PRINTK(dev, "smc911x_reset timeout waiting for reset\n");
  192. return;
  193. }
  194. /* make sure EEPROM has finished loading before setting GPIO_CFG */
  195. timeout=1000;
  196. while (--timeout && (SMC_GET_E2P_CMD(lp) & E2P_CMD_EPC_BUSY_))
  197. udelay(10);
  198. if (timeout == 0){
  199. PRINTK(dev, "smc911x_reset timeout waiting for EEPROM busy\n");
  200. return;
  201. }
  202. /* Initialize interrupts */
  203. SMC_SET_INT_EN(lp, 0);
  204. SMC_ACK_INT(lp, -1);
  205. /* Reset the FIFO level and flow control settings */
  206. SMC_SET_HW_CFG(lp, (lp->tx_fifo_kb & 0xF) << 16);
  207. //TODO: Figure out what appropriate pause time is
  208. SMC_SET_FLOW(lp, FLOW_FCPT_ | FLOW_FCEN_);
  209. SMC_SET_AFC_CFG(lp, lp->afc_cfg);
  210. /* Set to LED outputs */
  211. SMC_SET_GPIO_CFG(lp, 0x70070000);
  212. /*
  213. * Deassert IRQ for 1*10us for edge type interrupts
  214. * and drive IRQ pin push-pull
  215. */
  216. irq_cfg = (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_;
  217. #ifdef SMC_DYNAMIC_BUS_CONFIG
  218. if (lp->cfg.irq_polarity)
  219. irq_cfg |= INT_CFG_IRQ_POL_;
  220. #endif
  221. SMC_SET_IRQ_CFG(lp, irq_cfg);
  222. /* clear anything saved */
  223. if (lp->pending_tx_skb != NULL) {
  224. dev_kfree_skb (lp->pending_tx_skb);
  225. lp->pending_tx_skb = NULL;
  226. dev->stats.tx_errors++;
  227. dev->stats.tx_aborted_errors++;
  228. }
  229. }
  230. /*
  231. * Enable Interrupts, Receive, and Transmit
  232. */
  233. static void smc911x_enable(struct net_device *dev)
  234. {
  235. struct smc911x_local *lp = netdev_priv(dev);
  236. unsigned mask, cfg, cr;
  237. unsigned long flags;
  238. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  239. spin_lock_irqsave(&lp->lock, flags);
  240. SMC_SET_MAC_ADDR(lp, dev->dev_addr);
  241. /* Enable TX */
  242. cfg = SMC_GET_HW_CFG(lp);
  243. cfg &= HW_CFG_TX_FIF_SZ_ | 0xFFF;
  244. cfg |= HW_CFG_SF_;
  245. SMC_SET_HW_CFG(lp, cfg);
  246. SMC_SET_FIFO_TDA(lp, 0xFF);
  247. /* Update TX stats on every 64 packets received or every 1 sec */
  248. SMC_SET_FIFO_TSL(lp, 64);
  249. SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
  250. SMC_GET_MAC_CR(lp, cr);
  251. cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_;
  252. SMC_SET_MAC_CR(lp, cr);
  253. SMC_SET_TX_CFG(lp, TX_CFG_TX_ON_);
  254. /* Add 2 byte padding to start of packets */
  255. SMC_SET_RX_CFG(lp, (2<<8) & RX_CFG_RXDOFF_);
  256. /* Turn on receiver and enable RX */
  257. if (cr & MAC_CR_RXEN_)
  258. DBG(SMC_DEBUG_RX, dev, "Receiver already enabled\n");
  259. SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_);
  260. /* Interrupt on every received packet */
  261. SMC_SET_FIFO_RSA(lp, 0x01);
  262. SMC_SET_FIFO_RSL(lp, 0x00);
  263. /* now, enable interrupts */
  264. mask = INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_ | INT_EN_RSFL_EN_ |
  265. INT_EN_GPT_INT_EN_ | INT_EN_RXDFH_INT_EN_ | INT_EN_RXE_EN_ |
  266. INT_EN_PHY_INT_EN_;
  267. if (IS_REV_A(lp->revision))
  268. mask|=INT_EN_RDFL_EN_;
  269. else {
  270. mask|=INT_EN_RDFO_EN_;
  271. }
  272. SMC_ENABLE_INT(lp, mask);
  273. spin_unlock_irqrestore(&lp->lock, flags);
  274. }
  275. /*
  276. * this puts the device in an inactive state
  277. */
  278. static void smc911x_shutdown(struct net_device *dev)
  279. {
  280. struct smc911x_local *lp = netdev_priv(dev);
  281. unsigned cr;
  282. unsigned long flags;
  283. DBG(SMC_DEBUG_FUNC, dev, "%s: --> %s\n", CARDNAME, __func__);
  284. /* Disable IRQ's */
  285. SMC_SET_INT_EN(lp, 0);
  286. /* Turn of Rx and TX */
  287. spin_lock_irqsave(&lp->lock, flags);
  288. SMC_GET_MAC_CR(lp, cr);
  289. cr &= ~(MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_);
  290. SMC_SET_MAC_CR(lp, cr);
  291. SMC_SET_TX_CFG(lp, TX_CFG_STOP_TX_);
  292. spin_unlock_irqrestore(&lp->lock, flags);
  293. }
  294. static inline void smc911x_drop_pkt(struct net_device *dev)
  295. {
  296. struct smc911x_local *lp = netdev_priv(dev);
  297. unsigned int fifo_count, timeout, reg;
  298. DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "%s: --> %s\n",
  299. CARDNAME, __func__);
  300. fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF;
  301. if (fifo_count <= 4) {
  302. /* Manually dump the packet data */
  303. while (fifo_count--)
  304. SMC_GET_RX_FIFO(lp);
  305. } else {
  306. /* Fast forward through the bad packet */
  307. SMC_SET_RX_DP_CTRL(lp, RX_DP_CTRL_FFWD_BUSY_);
  308. timeout=50;
  309. do {
  310. udelay(10);
  311. reg = SMC_GET_RX_DP_CTRL(lp) & RX_DP_CTRL_FFWD_BUSY_;
  312. } while (--timeout && reg);
  313. if (timeout == 0) {
  314. PRINTK(dev, "timeout waiting for RX fast forward\n");
  315. }
  316. }
  317. }
  318. /*
  319. * This is the procedure to handle the receipt of a packet.
  320. * It should be called after checking for packet presence in
  321. * the RX status FIFO. It must be called with the spin lock
  322. * already held.
  323. */
  324. static inline void smc911x_rcv(struct net_device *dev)
  325. {
  326. struct smc911x_local *lp = netdev_priv(dev);
  327. unsigned int pkt_len, status;
  328. struct sk_buff *skb;
  329. unsigned char *data;
  330. DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "--> %s\n",
  331. __func__);
  332. status = SMC_GET_RX_STS_FIFO(lp);
  333. DBG(SMC_DEBUG_RX, dev, "Rx pkt len %d status 0x%08x\n",
  334. (status & 0x3fff0000) >> 16, status & 0xc000ffff);
  335. pkt_len = (status & RX_STS_PKT_LEN_) >> 16;
  336. if (status & RX_STS_ES_) {
  337. /* Deal with a bad packet */
  338. dev->stats.rx_errors++;
  339. if (status & RX_STS_CRC_ERR_)
  340. dev->stats.rx_crc_errors++;
  341. else {
  342. if (status & RX_STS_LEN_ERR_)
  343. dev->stats.rx_length_errors++;
  344. if (status & RX_STS_MCAST_)
  345. dev->stats.multicast++;
  346. }
  347. /* Remove the bad packet data from the RX FIFO */
  348. smc911x_drop_pkt(dev);
  349. } else {
  350. /* Receive a valid packet */
  351. /* Alloc a buffer with extra room for DMA alignment */
  352. skb = netdev_alloc_skb(dev, pkt_len+32);
  353. if (unlikely(skb == NULL)) {
  354. PRINTK(dev, "Low memory, rcvd packet dropped.\n");
  355. dev->stats.rx_dropped++;
  356. smc911x_drop_pkt(dev);
  357. return;
  358. }
  359. /* Align IP header to 32 bits
  360. * Note that the device is configured to add a 2
  361. * byte padding to the packet start, so we really
  362. * want to write to the orignal data pointer */
  363. data = skb->data;
  364. skb_reserve(skb, 2);
  365. skb_put(skb,pkt_len-4);
  366. #ifdef SMC_USE_DMA
  367. {
  368. unsigned int fifo;
  369. /* Lower the FIFO threshold if possible */
  370. fifo = SMC_GET_FIFO_INT(lp);
  371. if (fifo & 0xFF) fifo--;
  372. DBG(SMC_DEBUG_RX, dev, "Setting RX stat FIFO threshold to %d\n",
  373. fifo & 0xff);
  374. SMC_SET_FIFO_INT(lp, fifo);
  375. /* Setup RX DMA */
  376. SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN16_ | ((2<<8) & RX_CFG_RXDOFF_));
  377. lp->rxdma_active = 1;
  378. lp->current_rx_skb = skb;
  379. SMC_PULL_DATA(lp, data, (pkt_len+2+15) & ~15);
  380. /* Packet processing deferred to DMA RX interrupt */
  381. }
  382. #else
  383. SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_));
  384. SMC_PULL_DATA(lp, data, pkt_len+2+3);
  385. DBG(SMC_DEBUG_PKTS, dev, "Received packet\n");
  386. PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64);
  387. skb->protocol = eth_type_trans(skb, dev);
  388. netif_rx(skb);
  389. dev->stats.rx_packets++;
  390. dev->stats.rx_bytes += pkt_len-4;
  391. #endif
  392. }
  393. }
  394. /*
  395. * This is called to actually send a packet to the chip.
  396. */
  397. static void smc911x_hardware_send_pkt(struct net_device *dev)
  398. {
  399. struct smc911x_local *lp = netdev_priv(dev);
  400. struct sk_buff *skb;
  401. unsigned int cmdA, cmdB, len;
  402. unsigned char *buf;
  403. DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n", __func__);
  404. BUG_ON(lp->pending_tx_skb == NULL);
  405. skb = lp->pending_tx_skb;
  406. lp->pending_tx_skb = NULL;
  407. /* cmdA {25:24] data alignment [20:16] start offset [10:0] buffer length */
  408. /* cmdB {31:16] pkt tag [10:0] length */
  409. #ifdef SMC_USE_DMA
  410. /* 16 byte buffer alignment mode */
  411. buf = (char*)((u32)(skb->data) & ~0xF);
  412. len = (skb->len + 0xF + ((u32)skb->data & 0xF)) & ~0xF;
  413. cmdA = (1<<24) | (((u32)skb->data & 0xF)<<16) |
  414. TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ |
  415. skb->len;
  416. #else
  417. buf = (char*)((u32)skb->data & ~0x3);
  418. len = (skb->len + 3 + ((u32)skb->data & 3)) & ~0x3;
  419. cmdA = (((u32)skb->data & 0x3) << 16) |
  420. TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ |
  421. skb->len;
  422. #endif
  423. /* tag is packet length so we can use this in stats update later */
  424. cmdB = (skb->len << 16) | (skb->len & 0x7FF);
  425. DBG(SMC_DEBUG_TX, dev, "TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n",
  426. len, len, buf, cmdA, cmdB);
  427. SMC_SET_TX_FIFO(lp, cmdA);
  428. SMC_SET_TX_FIFO(lp, cmdB);
  429. DBG(SMC_DEBUG_PKTS, dev, "Transmitted packet\n");
  430. PRINT_PKT(buf, len <= 64 ? len : 64);
  431. /* Send pkt via PIO or DMA */
  432. #ifdef SMC_USE_DMA
  433. lp->current_tx_skb = skb;
  434. SMC_PUSH_DATA(lp, buf, len);
  435. /* DMA complete IRQ will free buffer and set jiffies */
  436. #else
  437. SMC_PUSH_DATA(lp, buf, len);
  438. dev->trans_start = jiffies;
  439. dev_kfree_skb_irq(skb);
  440. #endif
  441. if (!lp->tx_throttle) {
  442. netif_wake_queue(dev);
  443. }
  444. SMC_ENABLE_INT(lp, INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_);
  445. }
  446. /*
  447. * Since I am not sure if I will have enough room in the chip's ram
  448. * to store the packet, I call this routine which either sends it
  449. * now, or set the card to generates an interrupt when ready
  450. * for the packet.
  451. */
  452. static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
  453. {
  454. struct smc911x_local *lp = netdev_priv(dev);
  455. unsigned int free;
  456. unsigned long flags;
  457. DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n",
  458. __func__);
  459. spin_lock_irqsave(&lp->lock, flags);
  460. BUG_ON(lp->pending_tx_skb != NULL);
  461. free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_;
  462. DBG(SMC_DEBUG_TX, dev, "TX free space %d\n", free);
  463. /* Turn off the flow when running out of space in FIFO */
  464. if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) {
  465. DBG(SMC_DEBUG_TX, dev, "Disabling data flow due to low FIFO space (%d)\n",
  466. free);
  467. /* Reenable when at least 1 packet of size MTU present */
  468. SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64);
  469. lp->tx_throttle = 1;
  470. netif_stop_queue(dev);
  471. }
  472. /* Drop packets when we run out of space in TX FIFO
  473. * Account for overhead required for:
  474. *
  475. * Tx command words 8 bytes
  476. * Start offset 15 bytes
  477. * End padding 15 bytes
  478. */
  479. if (unlikely(free < (skb->len + 8 + 15 + 15))) {
  480. netdev_warn(dev, "No Tx free space %d < %d\n",
  481. free, skb->len);
  482. lp->pending_tx_skb = NULL;
  483. dev->stats.tx_errors++;
  484. dev->stats.tx_dropped++;
  485. spin_unlock_irqrestore(&lp->lock, flags);
  486. dev_kfree_skb(skb);
  487. return NETDEV_TX_OK;
  488. }
  489. #ifdef SMC_USE_DMA
  490. {
  491. /* If the DMA is already running then defer this packet Tx until
  492. * the DMA IRQ starts it
  493. */
  494. if (lp->txdma_active) {
  495. DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Tx DMA running, deferring packet\n");
  496. lp->pending_tx_skb = skb;
  497. netif_stop_queue(dev);
  498. spin_unlock_irqrestore(&lp->lock, flags);
  499. return NETDEV_TX_OK;
  500. } else {
  501. DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Activating Tx DMA\n");
  502. lp->txdma_active = 1;
  503. }
  504. }
  505. #endif
  506. lp->pending_tx_skb = skb;
  507. smc911x_hardware_send_pkt(dev);
  508. spin_unlock_irqrestore(&lp->lock, flags);
  509. return NETDEV_TX_OK;
  510. }
  511. /*
  512. * This handles a TX status interrupt, which is only called when:
  513. * - a TX error occurred, or
  514. * - TX of a packet completed.
  515. */
  516. static void smc911x_tx(struct net_device *dev)
  517. {
  518. struct smc911x_local *lp = netdev_priv(dev);
  519. unsigned int tx_status;
  520. DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n",
  521. __func__);
  522. /* Collect the TX status */
  523. while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) {
  524. DBG(SMC_DEBUG_TX, dev, "Tx stat FIFO used 0x%04x\n",
  525. (SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16);
  526. tx_status = SMC_GET_TX_STS_FIFO(lp);
  527. dev->stats.tx_packets++;
  528. dev->stats.tx_bytes+=tx_status>>16;
  529. DBG(SMC_DEBUG_TX, dev, "Tx FIFO tag 0x%04x status 0x%04x\n",
  530. (tx_status & 0xffff0000) >> 16,
  531. tx_status & 0x0000ffff);
  532. /* count Tx errors, but ignore lost carrier errors when in
  533. * full-duplex mode */
  534. if ((tx_status & TX_STS_ES_) && !(lp->ctl_rfduplx &&
  535. !(tx_status & 0x00000306))) {
  536. dev->stats.tx_errors++;
  537. }
  538. if (tx_status & TX_STS_MANY_COLL_) {
  539. dev->stats.collisions+=16;
  540. dev->stats.tx_aborted_errors++;
  541. } else {
  542. dev->stats.collisions+=(tx_status & TX_STS_COLL_CNT_) >> 3;
  543. }
  544. /* carrier error only has meaning for half-duplex communication */
  545. if ((tx_status & (TX_STS_LOC_ | TX_STS_NO_CARR_)) &&
  546. !lp->ctl_rfduplx) {
  547. dev->stats.tx_carrier_errors++;
  548. }
  549. if (tx_status & TX_STS_LATE_COLL_) {
  550. dev->stats.collisions++;
  551. dev->stats.tx_aborted_errors++;
  552. }
  553. }
  554. }
  555. /*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
  556. /*
  557. * Reads a register from the MII Management serial interface
  558. */
  559. static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg)
  560. {
  561. struct smc911x_local *lp = netdev_priv(dev);
  562. unsigned int phydata;
  563. SMC_GET_MII(lp, phyreg, phyaddr, phydata);
  564. DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n",
  565. __func__, phyaddr, phyreg, phydata);
  566. return phydata;
  567. }
  568. /*
  569. * Writes a register to the MII Management serial interface
  570. */
  571. static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg,
  572. int phydata)
  573. {
  574. struct smc911x_local *lp = netdev_priv(dev);
  575. DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
  576. __func__, phyaddr, phyreg, phydata);
  577. SMC_SET_MII(lp, phyreg, phyaddr, phydata);
  578. }
  579. /*
  580. * Finds and reports the PHY address (115 and 117 have external
  581. * PHY interface 118 has internal only
  582. */
  583. static void smc911x_phy_detect(struct net_device *dev)
  584. {
  585. struct smc911x_local *lp = netdev_priv(dev);
  586. int phyaddr;
  587. unsigned int cfg, id1, id2;
  588. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  589. lp->phy_type = 0;
  590. /*
  591. * Scan all 32 PHY addresses if necessary, starting at
  592. * PHY#1 to PHY#31, and then PHY#0 last.
  593. */
  594. switch(lp->version) {
  595. case CHIP_9115:
  596. case CHIP_9117:
  597. case CHIP_9215:
  598. case CHIP_9217:
  599. cfg = SMC_GET_HW_CFG(lp);
  600. if (cfg & HW_CFG_EXT_PHY_DET_) {
  601. cfg &= ~HW_CFG_PHY_CLK_SEL_;
  602. cfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_;
  603. SMC_SET_HW_CFG(lp, cfg);
  604. udelay(10); /* Wait for clocks to stop */
  605. cfg |= HW_CFG_EXT_PHY_EN_;
  606. SMC_SET_HW_CFG(lp, cfg);
  607. udelay(10); /* Wait for clocks to stop */
  608. cfg &= ~HW_CFG_PHY_CLK_SEL_;
  609. cfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_;
  610. SMC_SET_HW_CFG(lp, cfg);
  611. udelay(10); /* Wait for clocks to stop */
  612. cfg |= HW_CFG_SMI_SEL_;
  613. SMC_SET_HW_CFG(lp, cfg);
  614. for (phyaddr = 1; phyaddr < 32; ++phyaddr) {
  615. /* Read the PHY identifiers */
  616. SMC_GET_PHY_ID1(lp, phyaddr & 31, id1);
  617. SMC_GET_PHY_ID2(lp, phyaddr & 31, id2);
  618. /* Make sure it is a valid identifier */
  619. if (id1 != 0x0000 && id1 != 0xffff &&
  620. id1 != 0x8000 && id2 != 0x0000 &&
  621. id2 != 0xffff && id2 != 0x8000) {
  622. /* Save the PHY's address */
  623. lp->mii.phy_id = phyaddr & 31;
  624. lp->phy_type = id1 << 16 | id2;
  625. break;
  626. }
  627. }
  628. if (phyaddr < 32)
  629. /* Found an external PHY */
  630. break;
  631. }
  632. default:
  633. /* Internal media only */
  634. SMC_GET_PHY_ID1(lp, 1, id1);
  635. SMC_GET_PHY_ID2(lp, 1, id2);
  636. /* Save the PHY's address */
  637. lp->mii.phy_id = 1;
  638. lp->phy_type = id1 << 16 | id2;
  639. }
  640. DBG(SMC_DEBUG_MISC, dev, "phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%d\n",
  641. id1, id2, lp->mii.phy_id);
  642. }
  643. /*
  644. * Sets the PHY to a configuration as determined by the user.
  645. * Called with spin_lock held.
  646. */
  647. static int smc911x_phy_fixed(struct net_device *dev)
  648. {
  649. struct smc911x_local *lp = netdev_priv(dev);
  650. int phyaddr = lp->mii.phy_id;
  651. int bmcr;
  652. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  653. /* Enter Link Disable state */
  654. SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
  655. bmcr |= BMCR_PDOWN;
  656. SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
  657. /*
  658. * Set our fixed capabilities
  659. * Disable auto-negotiation
  660. */
  661. bmcr &= ~BMCR_ANENABLE;
  662. if (lp->ctl_rfduplx)
  663. bmcr |= BMCR_FULLDPLX;
  664. if (lp->ctl_rspeed == 100)
  665. bmcr |= BMCR_SPEED100;
  666. /* Write our capabilities to the phy control register */
  667. SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
  668. /* Re-Configure the Receive/Phy Control register */
  669. bmcr &= ~BMCR_PDOWN;
  670. SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
  671. return 1;
  672. }
  673. /**
  674. * smc911x_phy_reset - reset the phy
  675. * @dev: net device
  676. * @phy: phy address
  677. *
  678. * Issue a software reset for the specified PHY and
  679. * wait up to 100ms for the reset to complete. We should
  680. * not access the PHY for 50ms after issuing the reset.
  681. *
  682. * The time to wait appears to be dependent on the PHY.
  683. *
  684. */
  685. static int smc911x_phy_reset(struct net_device *dev, int phy)
  686. {
  687. struct smc911x_local *lp = netdev_priv(dev);
  688. int timeout;
  689. unsigned long flags;
  690. unsigned int reg;
  691. DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__);
  692. spin_lock_irqsave(&lp->lock, flags);
  693. reg = SMC_GET_PMT_CTRL(lp);
  694. reg &= ~0xfffff030;
  695. reg |= PMT_CTRL_PHY_RST_;
  696. SMC_SET_PMT_CTRL(lp, reg);
  697. spin_unlock_irqrestore(&lp->lock, flags);
  698. for (timeout = 2; timeout; timeout--) {
  699. msleep(50);
  700. spin_lock_irqsave(&lp->lock, flags);
  701. reg = SMC_GET_PMT_CTRL(lp);
  702. spin_unlock_irqrestore(&lp->lock, flags);
  703. if (!(reg & PMT_CTRL_PHY_RST_)) {
  704. /* extra delay required because the phy may
  705. * not be completed with its reset
  706. * when PHY_BCR_RESET_ is cleared. 256us
  707. * should suffice, but use 500us to be safe
  708. */
  709. udelay(500);
  710. break;
  711. }
  712. }
  713. return reg & PMT_CTRL_PHY_RST_;
  714. }
  715. /**
  716. * smc911x_phy_powerdown - powerdown phy
  717. * @dev: net device
  718. * @phy: phy address
  719. *
  720. * Power down the specified PHY
  721. */
  722. static void smc911x_phy_powerdown(struct net_device *dev, int phy)
  723. {
  724. struct smc911x_local *lp = netdev_priv(dev);
  725. unsigned int bmcr;
  726. /* Enter Link Disable state */
  727. SMC_GET_PHY_BMCR(lp, phy, bmcr);
  728. bmcr |= BMCR_PDOWN;
  729. SMC_SET_PHY_BMCR(lp, phy, bmcr);
  730. }
  731. /**
  732. * smc911x_phy_check_media - check the media status and adjust BMCR
  733. * @dev: net device
  734. * @init: set true for initialisation
  735. *
  736. * Select duplex mode depending on negotiation state. This
  737. * also updates our carrier state.
  738. */
  739. static void smc911x_phy_check_media(struct net_device *dev, int init)
  740. {
  741. struct smc911x_local *lp = netdev_priv(dev);
  742. int phyaddr = lp->mii.phy_id;
  743. unsigned int bmcr, cr;
  744. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  745. if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) {
  746. /* duplex state has changed */
  747. SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
  748. SMC_GET_MAC_CR(lp, cr);
  749. if (lp->mii.full_duplex) {
  750. DBG(SMC_DEBUG_MISC, dev, "Configuring for full-duplex mode\n");
  751. bmcr |= BMCR_FULLDPLX;
  752. cr |= MAC_CR_RCVOWN_;
  753. } else {
  754. DBG(SMC_DEBUG_MISC, dev, "Configuring for half-duplex mode\n");
  755. bmcr &= ~BMCR_FULLDPLX;
  756. cr &= ~MAC_CR_RCVOWN_;
  757. }
  758. SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
  759. SMC_SET_MAC_CR(lp, cr);
  760. }
  761. }
  762. /*
  763. * Configures the specified PHY through the MII management interface
  764. * using Autonegotiation.
  765. * Calls smc911x_phy_fixed() if the user has requested a certain config.
  766. * If RPC ANEG bit is set, the media selection is dependent purely on
  767. * the selection by the MII (either in the MII BMCR reg or the result
  768. * of autonegotiation.) If the RPC ANEG bit is cleared, the selection
  769. * is controlled by the RPC SPEED and RPC DPLX bits.
  770. */
  771. static void smc911x_phy_configure(struct work_struct *work)
  772. {
  773. struct smc911x_local *lp = container_of(work, struct smc911x_local,
  774. phy_configure);
  775. struct net_device *dev = lp->netdev;
  776. int phyaddr = lp->mii.phy_id;
  777. int my_phy_caps; /* My PHY capabilities */
  778. int my_ad_caps; /* My Advertised capabilities */
  779. int status;
  780. unsigned long flags;
  781. DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__);
  782. /*
  783. * We should not be called if phy_type is zero.
  784. */
  785. if (lp->phy_type == 0)
  786. return;
  787. if (smc911x_phy_reset(dev, phyaddr)) {
  788. netdev_info(dev, "PHY reset timed out\n");
  789. return;
  790. }
  791. spin_lock_irqsave(&lp->lock, flags);
  792. /*
  793. * Enable PHY Interrupts (for register 18)
  794. * Interrupts listed here are enabled
  795. */
  796. SMC_SET_PHY_INT_MASK(lp, phyaddr, PHY_INT_MASK_ENERGY_ON_ |
  797. PHY_INT_MASK_ANEG_COMP_ | PHY_INT_MASK_REMOTE_FAULT_ |
  798. PHY_INT_MASK_LINK_DOWN_);
  799. /* If the user requested no auto neg, then go set his request */
  800. if (lp->mii.force_media) {
  801. smc911x_phy_fixed(dev);
  802. goto smc911x_phy_configure_exit;
  803. }
  804. /* Copy our capabilities from MII_BMSR to MII_ADVERTISE */
  805. SMC_GET_PHY_BMSR(lp, phyaddr, my_phy_caps);
  806. if (!(my_phy_caps & BMSR_ANEGCAPABLE)) {
  807. netdev_info(dev, "Auto negotiation NOT supported\n");
  808. smc911x_phy_fixed(dev);
  809. goto smc911x_phy_configure_exit;
  810. }
  811. /* CSMA capable w/ both pauses */
  812. my_ad_caps = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  813. if (my_phy_caps & BMSR_100BASE4)
  814. my_ad_caps |= ADVERTISE_100BASE4;
  815. if (my_phy_caps & BMSR_100FULL)
  816. my_ad_caps |= ADVERTISE_100FULL;
  817. if (my_phy_caps & BMSR_100HALF)
  818. my_ad_caps |= ADVERTISE_100HALF;
  819. if (my_phy_caps & BMSR_10FULL)
  820. my_ad_caps |= ADVERTISE_10FULL;
  821. if (my_phy_caps & BMSR_10HALF)
  822. my_ad_caps |= ADVERTISE_10HALF;
  823. /* Disable capabilities not selected by our user */
  824. if (lp->ctl_rspeed != 100)
  825. my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF);
  826. if (!lp->ctl_rfduplx)
  827. my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL);
  828. /* Update our Auto-Neg Advertisement Register */
  829. SMC_SET_PHY_MII_ADV(lp, phyaddr, my_ad_caps);
  830. lp->mii.advertising = my_ad_caps;
  831. /*
  832. * Read the register back. Without this, it appears that when
  833. * auto-negotiation is restarted, sometimes it isn't ready and
  834. * the link does not come up.
  835. */
  836. udelay(10);
  837. SMC_GET_PHY_MII_ADV(lp, phyaddr, status);
  838. DBG(SMC_DEBUG_MISC, dev, "phy caps=0x%04x\n", my_phy_caps);
  839. DBG(SMC_DEBUG_MISC, dev, "phy advertised caps=0x%04x\n", my_ad_caps);
  840. /* Restart auto-negotiation process in order to advertise my caps */
  841. SMC_SET_PHY_BMCR(lp, phyaddr, BMCR_ANENABLE | BMCR_ANRESTART);
  842. smc911x_phy_check_media(dev, 1);
  843. smc911x_phy_configure_exit:
  844. spin_unlock_irqrestore(&lp->lock, flags);
  845. }
  846. /*
  847. * smc911x_phy_interrupt
  848. *
  849. * Purpose: Handle interrupts relating to PHY register 18. This is
  850. * called from the "hard" interrupt handler under our private spinlock.
  851. */
  852. static void smc911x_phy_interrupt(struct net_device *dev)
  853. {
  854. struct smc911x_local *lp = netdev_priv(dev);
  855. int phyaddr = lp->mii.phy_id;
  856. int status;
  857. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  858. if (lp->phy_type == 0)
  859. return;
  860. smc911x_phy_check_media(dev, 0);
  861. /* read to clear status bits */
  862. SMC_GET_PHY_INT_SRC(lp, phyaddr,status);
  863. DBG(SMC_DEBUG_MISC, dev, "PHY interrupt status 0x%04x\n",
  864. status & 0xffff);
  865. DBG(SMC_DEBUG_MISC, dev, "AFC_CFG 0x%08x\n",
  866. SMC_GET_AFC_CFG(lp));
  867. }
  868. /*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/
  869. /*
  870. * This is the main routine of the driver, to handle the device when
  871. * it needs some attention.
  872. */
  873. static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
  874. {
  875. struct net_device *dev = dev_id;
  876. struct smc911x_local *lp = netdev_priv(dev);
  877. unsigned int status, mask, timeout;
  878. unsigned int rx_overrun=0, cr, pkts;
  879. unsigned long flags;
  880. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  881. spin_lock_irqsave(&lp->lock, flags);
  882. /* Spurious interrupt check */
  883. if ((SMC_GET_IRQ_CFG(lp) & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) !=
  884. (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) {
  885. spin_unlock_irqrestore(&lp->lock, flags);
  886. return IRQ_NONE;
  887. }
  888. mask = SMC_GET_INT_EN(lp);
  889. SMC_SET_INT_EN(lp, 0);
  890. /* set a timeout value, so I don't stay here forever */
  891. timeout = 8;
  892. do {
  893. status = SMC_GET_INT(lp);
  894. DBG(SMC_DEBUG_MISC, dev, "INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n",
  895. status, mask, status & ~mask);
  896. status &= mask;
  897. if (!status)
  898. break;
  899. /* Handle SW interrupt condition */
  900. if (status & INT_STS_SW_INT_) {
  901. SMC_ACK_INT(lp, INT_STS_SW_INT_);
  902. mask &= ~INT_EN_SW_INT_EN_;
  903. }
  904. /* Handle various error conditions */
  905. if (status & INT_STS_RXE_) {
  906. SMC_ACK_INT(lp, INT_STS_RXE_);
  907. dev->stats.rx_errors++;
  908. }
  909. if (status & INT_STS_RXDFH_INT_) {
  910. SMC_ACK_INT(lp, INT_STS_RXDFH_INT_);
  911. dev->stats.rx_dropped+=SMC_GET_RX_DROP(lp);
  912. }
  913. /* Undocumented interrupt-what is the right thing to do here? */
  914. if (status & INT_STS_RXDF_INT_) {
  915. SMC_ACK_INT(lp, INT_STS_RXDF_INT_);
  916. }
  917. /* Rx Data FIFO exceeds set level */
  918. if (status & INT_STS_RDFL_) {
  919. if (IS_REV_A(lp->revision)) {
  920. rx_overrun=1;
  921. SMC_GET_MAC_CR(lp, cr);
  922. cr &= ~MAC_CR_RXEN_;
  923. SMC_SET_MAC_CR(lp, cr);
  924. DBG(SMC_DEBUG_RX, dev, "RX overrun\n");
  925. dev->stats.rx_errors++;
  926. dev->stats.rx_fifo_errors++;
  927. }
  928. SMC_ACK_INT(lp, INT_STS_RDFL_);
  929. }
  930. if (status & INT_STS_RDFO_) {
  931. if (!IS_REV_A(lp->revision)) {
  932. SMC_GET_MAC_CR(lp, cr);
  933. cr &= ~MAC_CR_RXEN_;
  934. SMC_SET_MAC_CR(lp, cr);
  935. rx_overrun=1;
  936. DBG(SMC_DEBUG_RX, dev, "RX overrun\n");
  937. dev->stats.rx_errors++;
  938. dev->stats.rx_fifo_errors++;
  939. }
  940. SMC_ACK_INT(lp, INT_STS_RDFO_);
  941. }
  942. /* Handle receive condition */
  943. if ((status & INT_STS_RSFL_) || rx_overrun) {
  944. unsigned int fifo;
  945. DBG(SMC_DEBUG_RX, dev, "RX irq\n");
  946. fifo = SMC_GET_RX_FIFO_INF(lp);
  947. pkts = (fifo & RX_FIFO_INF_RXSUSED_) >> 16;
  948. DBG(SMC_DEBUG_RX, dev, "Rx FIFO pkts %d, bytes %d\n",
  949. pkts, fifo & 0xFFFF);
  950. if (pkts != 0) {
  951. #ifdef SMC_USE_DMA
  952. unsigned int fifo;
  953. if (lp->rxdma_active){
  954. DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev,
  955. "RX DMA active\n");
  956. /* The DMA is already running so up the IRQ threshold */
  957. fifo = SMC_GET_FIFO_INT(lp) & ~0xFF;
  958. fifo |= pkts & 0xFF;
  959. DBG(SMC_DEBUG_RX, dev,
  960. "Setting RX stat FIFO threshold to %d\n",
  961. fifo & 0xff);
  962. SMC_SET_FIFO_INT(lp, fifo);
  963. } else
  964. #endif
  965. smc911x_rcv(dev);
  966. }
  967. SMC_ACK_INT(lp, INT_STS_RSFL_);
  968. }
  969. /* Handle transmit FIFO available */
  970. if (status & INT_STS_TDFA_) {
  971. DBG(SMC_DEBUG_TX, dev, "TX data FIFO space available irq\n");
  972. SMC_SET_FIFO_TDA(lp, 0xFF);
  973. lp->tx_throttle = 0;
  974. #ifdef SMC_USE_DMA
  975. if (!lp->txdma_active)
  976. #endif
  977. netif_wake_queue(dev);
  978. SMC_ACK_INT(lp, INT_STS_TDFA_);
  979. }
  980. /* Handle transmit done condition */
  981. #if 1
  982. if (status & (INT_STS_TSFL_ | INT_STS_GPT_INT_)) {
  983. DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC, dev,
  984. "Tx stat FIFO limit (%d) /GPT irq\n",
  985. (SMC_GET_FIFO_INT(lp) & 0x00ff0000) >> 16);
  986. smc911x_tx(dev);
  987. SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
  988. SMC_ACK_INT(lp, INT_STS_TSFL_);
  989. SMC_ACK_INT(lp, INT_STS_TSFL_ | INT_STS_GPT_INT_);
  990. }
  991. #else
  992. if (status & INT_STS_TSFL_) {
  993. DBG(SMC_DEBUG_TX, dev, "TX status FIFO limit (%d) irq\n", ?);
  994. smc911x_tx(dev);
  995. SMC_ACK_INT(lp, INT_STS_TSFL_);
  996. }
  997. if (status & INT_STS_GPT_INT_) {
  998. DBG(SMC_DEBUG_RX, dev, "IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n",
  999. SMC_GET_IRQ_CFG(lp),
  1000. SMC_GET_FIFO_INT(lp),
  1001. SMC_GET_RX_CFG(lp));
  1002. DBG(SMC_DEBUG_RX, dev, "Rx Stat FIFO Used 0x%02x Data FIFO Used 0x%04x Stat FIFO 0x%08x\n",
  1003. (SMC_GET_RX_FIFO_INF(lp) & 0x00ff0000) >> 16,
  1004. SMC_GET_RX_FIFO_INF(lp) & 0xffff,
  1005. SMC_GET_RX_STS_FIFO_PEEK(lp));
  1006. SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
  1007. SMC_ACK_INT(lp, INT_STS_GPT_INT_);
  1008. }
  1009. #endif
  1010. /* Handle PHY interrupt condition */
  1011. if (status & INT_STS_PHY_INT_) {
  1012. DBG(SMC_DEBUG_MISC, dev, "PHY irq\n");
  1013. smc911x_phy_interrupt(dev);
  1014. SMC_ACK_INT(lp, INT_STS_PHY_INT_);
  1015. }
  1016. } while (--timeout);
  1017. /* restore mask state */
  1018. SMC_SET_INT_EN(lp, mask);
  1019. DBG(SMC_DEBUG_MISC, dev, "Interrupt done (%d loops)\n",
  1020. 8-timeout);
  1021. spin_unlock_irqrestore(&lp->lock, flags);
  1022. return IRQ_HANDLED;
  1023. }
  1024. #ifdef SMC_USE_DMA
  1025. static void
  1026. smc911x_tx_dma_irq(int dma, void *data)
  1027. {
  1028. struct net_device *dev = (struct net_device *)data;
  1029. struct smc911x_local *lp = netdev_priv(dev);
  1030. struct sk_buff *skb = lp->current_tx_skb;
  1031. unsigned long flags;
  1032. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1033. DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n");
  1034. /* Clear the DMA interrupt sources */
  1035. SMC_DMA_ACK_IRQ(dev, dma);
  1036. BUG_ON(skb == NULL);
  1037. dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE);
  1038. dev->trans_start = jiffies;
  1039. dev_kfree_skb_irq(skb);
  1040. lp->current_tx_skb = NULL;
  1041. if (lp->pending_tx_skb != NULL)
  1042. smc911x_hardware_send_pkt(dev);
  1043. else {
  1044. DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev,
  1045. "No pending Tx packets. DMA disabled\n");
  1046. spin_lock_irqsave(&lp->lock, flags);
  1047. lp->txdma_active = 0;
  1048. if (!lp->tx_throttle) {
  1049. netif_wake_queue(dev);
  1050. }
  1051. spin_unlock_irqrestore(&lp->lock, flags);
  1052. }
  1053. DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev,
  1054. "TX DMA irq completed\n");
  1055. }
  1056. static void
  1057. smc911x_rx_dma_irq(int dma, void *data)
  1058. {
  1059. struct net_device *dev = (struct net_device *)data;
  1060. unsigned long ioaddr = dev->base_addr;
  1061. struct smc911x_local *lp = netdev_priv(dev);
  1062. struct sk_buff *skb = lp->current_rx_skb;
  1063. unsigned long flags;
  1064. unsigned int pkts;
  1065. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1066. DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, "RX DMA irq handler\n");
  1067. /* Clear the DMA interrupt sources */
  1068. SMC_DMA_ACK_IRQ(dev, dma);
  1069. dma_unmap_single(NULL, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE);
  1070. BUG_ON(skb == NULL);
  1071. lp->current_rx_skb = NULL;
  1072. PRINT_PKT(skb->data, skb->len);
  1073. skb->protocol = eth_type_trans(skb, dev);
  1074. dev->stats.rx_packets++;
  1075. dev->stats.rx_bytes += skb->len;
  1076. netif_rx(skb);
  1077. spin_lock_irqsave(&lp->lock, flags);
  1078. pkts = (SMC_GET_RX_FIFO_INF(lp) & RX_FIFO_INF_RXSUSED_) >> 16;
  1079. if (pkts != 0) {
  1080. smc911x_rcv(dev);
  1081. }else {
  1082. lp->rxdma_active = 0;
  1083. }
  1084. spin_unlock_irqrestore(&lp->lock, flags);
  1085. DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev,
  1086. "RX DMA irq completed. DMA RX FIFO PKTS %d\n",
  1087. pkts);
  1088. }
  1089. #endif /* SMC_USE_DMA */
  1090. #ifdef CONFIG_NET_POLL_CONTROLLER
  1091. /*
  1092. * Polling receive - used by netconsole and other diagnostic tools
  1093. * to allow network i/o with interrupts disabled.
  1094. */
  1095. static void smc911x_poll_controller(struct net_device *dev)
  1096. {
  1097. disable_irq(dev->irq);
  1098. smc911x_interrupt(dev->irq, dev);
  1099. enable_irq(dev->irq);
  1100. }
  1101. #endif
  1102. /* Our watchdog timed out. Called by the networking layer */
  1103. static void smc911x_timeout(struct net_device *dev)
  1104. {
  1105. struct smc911x_local *lp = netdev_priv(dev);
  1106. int status, mask;
  1107. unsigned long flags;
  1108. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1109. spin_lock_irqsave(&lp->lock, flags);
  1110. status = SMC_GET_INT(lp);
  1111. mask = SMC_GET_INT_EN(lp);
  1112. spin_unlock_irqrestore(&lp->lock, flags);
  1113. DBG(SMC_DEBUG_MISC, dev, "INT 0x%02x MASK 0x%02x\n",
  1114. status, mask);
  1115. /* Dump the current TX FIFO contents and restart */
  1116. mask = SMC_GET_TX_CFG(lp);
  1117. SMC_SET_TX_CFG(lp, mask | TX_CFG_TXS_DUMP_ | TX_CFG_TXD_DUMP_);
  1118. /*
  1119. * Reconfiguring the PHY doesn't seem like a bad idea here, but
  1120. * smc911x_phy_configure() calls msleep() which calls schedule_timeout()
  1121. * which calls schedule(). Hence we use a work queue.
  1122. */
  1123. if (lp->phy_type != 0)
  1124. schedule_work(&lp->phy_configure);
  1125. /* We can accept TX packets again */
  1126. dev->trans_start = jiffies; /* prevent tx timeout */
  1127. netif_wake_queue(dev);
  1128. }
  1129. /*
  1130. * This routine will, depending on the values passed to it,
  1131. * either make it accept multicast packets, go into
  1132. * promiscuous mode (for TCPDUMP and cousins) or accept
  1133. * a select set of multicast packets
  1134. */
  1135. static void smc911x_set_multicast_list(struct net_device *dev)
  1136. {
  1137. struct smc911x_local *lp = netdev_priv(dev);
  1138. unsigned int multicast_table[2];
  1139. unsigned int mcr, update_multicast = 0;
  1140. unsigned long flags;
  1141. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1142. spin_lock_irqsave(&lp->lock, flags);
  1143. SMC_GET_MAC_CR(lp, mcr);
  1144. spin_unlock_irqrestore(&lp->lock, flags);
  1145. if (dev->flags & IFF_PROMISC) {
  1146. DBG(SMC_DEBUG_MISC, dev, "RCR_PRMS\n");
  1147. mcr |= MAC_CR_PRMS_;
  1148. }
  1149. /*
  1150. * Here, I am setting this to accept all multicast packets.
  1151. * I don't need to zero the multicast table, because the flag is
  1152. * checked before the table is
  1153. */
  1154. else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
  1155. DBG(SMC_DEBUG_MISC, dev, "RCR_ALMUL\n");
  1156. mcr |= MAC_CR_MCPAS_;
  1157. }
  1158. /*
  1159. * This sets the internal hardware table to filter out unwanted
  1160. * multicast packets before they take up memory.
  1161. *
  1162. * The SMC chip uses a hash table where the high 6 bits of the CRC of
  1163. * address are the offset into the table. If that bit is 1, then the
  1164. * multicast packet is accepted. Otherwise, it's dropped silently.
  1165. *
  1166. * To use the 6 bits as an offset into the table, the high 1 bit is
  1167. * the number of the 32 bit register, while the low 5 bits are the bit
  1168. * within that register.
  1169. */
  1170. else if (!netdev_mc_empty(dev)) {
  1171. struct netdev_hw_addr *ha;
  1172. /* Set the Hash perfec mode */
  1173. mcr |= MAC_CR_HPFILT_;
  1174. /* start with a table of all zeros: reject all */
  1175. memset(multicast_table, 0, sizeof(multicast_table));
  1176. netdev_for_each_mc_addr(ha, dev) {
  1177. u32 position;
  1178. /* upper 6 bits are used as hash index */
  1179. position = ether_crc(ETH_ALEN, ha->addr)>>26;
  1180. multicast_table[position>>5] |= 1 << (position&0x1f);
  1181. }
  1182. /* be sure I get rid of flags I might have set */
  1183. mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
  1184. /* now, the table can be loaded into the chipset */
  1185. update_multicast = 1;
  1186. } else {
  1187. DBG(SMC_DEBUG_MISC, dev, "~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n");
  1188. mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
  1189. /*
  1190. * since I'm disabling all multicast entirely, I need to
  1191. * clear the multicast list
  1192. */
  1193. memset(multicast_table, 0, sizeof(multicast_table));
  1194. update_multicast = 1;
  1195. }
  1196. spin_lock_irqsave(&lp->lock, flags);
  1197. SMC_SET_MAC_CR(lp, mcr);
  1198. if (update_multicast) {
  1199. DBG(SMC_DEBUG_MISC, dev,
  1200. "update mcast hash table 0x%08x 0x%08x\n",
  1201. multicast_table[0], multicast_table[1]);
  1202. SMC_SET_HASHL(lp, multicast_table[0]);
  1203. SMC_SET_HASHH(lp, multicast_table[1]);
  1204. }
  1205. spin_unlock_irqrestore(&lp->lock, flags);
  1206. }
  1207. /*
  1208. * Open and Initialize the board
  1209. *
  1210. * Set up everything, reset the card, etc..
  1211. */
  1212. static int
  1213. smc911x_open(struct net_device *dev)
  1214. {
  1215. struct smc911x_local *lp = netdev_priv(dev);
  1216. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1217. /* reset the hardware */
  1218. smc911x_reset(dev);
  1219. /* Configure the PHY, initialize the link state */
  1220. smc911x_phy_configure(&lp->phy_configure);
  1221. /* Turn on Tx + Rx */
  1222. smc911x_enable(dev);
  1223. netif_start_queue(dev);
  1224. return 0;
  1225. }
  1226. /*
  1227. * smc911x_close
  1228. *
  1229. * this makes the board clean up everything that it can
  1230. * and not talk to the outside world. Caused by
  1231. * an 'ifconfig ethX down'
  1232. */
  1233. static int smc911x_close(struct net_device *dev)
  1234. {
  1235. struct smc911x_local *lp = netdev_priv(dev);
  1236. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1237. netif_stop_queue(dev);
  1238. netif_carrier_off(dev);
  1239. /* clear everything */
  1240. smc911x_shutdown(dev);
  1241. if (lp->phy_type != 0) {
  1242. /* We need to ensure that no calls to
  1243. * smc911x_phy_configure are pending.
  1244. */
  1245. cancel_work_sync(&lp->phy_configure);
  1246. smc911x_phy_powerdown(dev, lp->mii.phy_id);
  1247. }
  1248. if (lp->pending_tx_skb) {
  1249. dev_kfree_skb(lp->pending_tx_skb);
  1250. lp->pending_tx_skb = NULL;
  1251. }
  1252. return 0;
  1253. }
  1254. /*
  1255. * Ethtool support
  1256. */
  1257. static int
  1258. smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
  1259. {
  1260. struct smc911x_local *lp = netdev_priv(dev);
  1261. int ret, status;
  1262. unsigned long flags;
  1263. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1264. cmd->maxtxpkt = 1;
  1265. cmd->maxrxpkt = 1;
  1266. if (lp->phy_type != 0) {
  1267. spin_lock_irqsave(&lp->lock, flags);
  1268. ret = mii_ethtool_gset(&lp->mii, cmd);
  1269. spin_unlock_irqrestore(&lp->lock, flags);
  1270. } else {
  1271. cmd->supported = SUPPORTED_10baseT_Half |
  1272. SUPPORTED_10baseT_Full |
  1273. SUPPORTED_TP | SUPPORTED_AUI;
  1274. if (lp->ctl_rspeed == 10)
  1275. ethtool_cmd_speed_set(cmd, SPEED_10);
  1276. else if (lp->ctl_rspeed == 100)
  1277. ethtool_cmd_speed_set(cmd, SPEED_100);
  1278. cmd->autoneg = AUTONEG_DISABLE;
  1279. if (lp->mii.phy_id==1)
  1280. cmd->transceiver = XCVR_INTERNAL;
  1281. else
  1282. cmd->transceiver = XCVR_EXTERNAL;
  1283. cmd->port = 0;
  1284. SMC_GET_PHY_SPECIAL(lp, lp->mii.phy_id, status);
  1285. cmd->duplex =
  1286. (status & (PHY_SPECIAL_SPD_10FULL_ | PHY_SPECIAL_SPD_100FULL_)) ?
  1287. DUPLEX_FULL : DUPLEX_HALF;
  1288. ret = 0;
  1289. }
  1290. return ret;
  1291. }
  1292. static int
  1293. smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
  1294. {
  1295. struct smc911x_local *lp = netdev_priv(dev);
  1296. int ret;
  1297. unsigned long flags;
  1298. if (lp->phy_type != 0) {
  1299. spin_lock_irqsave(&lp->lock, flags);
  1300. ret = mii_ethtool_sset(&lp->mii, cmd);
  1301. spin_unlock_irqrestore(&lp->lock, flags);
  1302. } else {
  1303. if (cmd->autoneg != AUTONEG_DISABLE ||
  1304. cmd->speed != SPEED_10 ||
  1305. (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) ||
  1306. (cmd->port != PORT_TP && cmd->port != PORT_AUI))
  1307. return -EINVAL;
  1308. lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL;
  1309. ret = 0;
  1310. }
  1311. return ret;
  1312. }
  1313. static void
  1314. smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  1315. {
  1316. strlcpy(info->driver, CARDNAME, sizeof(info->driver));
  1317. strlcpy(info->version, version, sizeof(info->version));
  1318. strlcpy(info->bus_info, dev_name(dev->dev.parent),
  1319. sizeof(info->bus_info));
  1320. }
  1321. static int smc911x_ethtool_nwayreset(struct net_device *dev)
  1322. {
  1323. struct smc911x_local *lp = netdev_priv(dev);
  1324. int ret = -EINVAL;
  1325. unsigned long flags;
  1326. if (lp->phy_type != 0) {
  1327. spin_lock_irqsave(&lp->lock, flags);
  1328. ret = mii_nway_restart(&lp->mii);
  1329. spin_unlock_irqrestore(&lp->lock, flags);
  1330. }
  1331. return ret;
  1332. }
  1333. static u32 smc911x_ethtool_getmsglevel(struct net_device *dev)
  1334. {
  1335. struct smc911x_local *lp = netdev_priv(dev);
  1336. return lp->msg_enable;
  1337. }
  1338. static void smc911x_ethtool_setmsglevel(struct net_device *dev, u32 level)
  1339. {
  1340. struct smc911x_local *lp = netdev_priv(dev);
  1341. lp->msg_enable = level;
  1342. }
  1343. static int smc911x_ethtool_getregslen(struct net_device *dev)
  1344. {
  1345. /* System regs + MAC regs + PHY regs */
  1346. return (((E2P_CMD - ID_REV)/4 + 1) +
  1347. (WUCSR - MAC_CR)+1 + 32) * sizeof(u32);
  1348. }
  1349. static void smc911x_ethtool_getregs(struct net_device *dev,
  1350. struct ethtool_regs* regs, void *buf)
  1351. {
  1352. struct smc911x_local *lp = netdev_priv(dev);
  1353. unsigned long flags;
  1354. u32 reg,i,j=0;
  1355. u32 *data = (u32*)buf;
  1356. regs->version = lp->version;
  1357. for(i=ID_REV;i<=E2P_CMD;i+=4) {
  1358. data[j++] = SMC_inl(lp, i);
  1359. }
  1360. for(i=MAC_CR;i<=WUCSR;i++) {
  1361. spin_lock_irqsave(&lp->lock, flags);
  1362. SMC_GET_MAC_CSR(lp, i, reg);
  1363. spin_unlock_irqrestore(&lp->lock, flags);
  1364. data[j++] = reg;
  1365. }
  1366. for(i=0;i<=31;i++) {
  1367. spin_lock_irqsave(&lp->lock, flags);
  1368. SMC_GET_MII(lp, i, lp->mii.phy_id, reg);
  1369. spin_unlock_irqrestore(&lp->lock, flags);
  1370. data[j++] = reg & 0xFFFF;
  1371. }
  1372. }
  1373. static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev)
  1374. {
  1375. struct smc911x_local *lp = netdev_priv(dev);
  1376. unsigned int timeout;
  1377. int e2p_cmd;
  1378. e2p_cmd = SMC_GET_E2P_CMD(lp);
  1379. for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) {
  1380. if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) {
  1381. PRINTK(dev, "%s timeout waiting for EEPROM to respond\n",
  1382. __func__);
  1383. return -EFAULT;
  1384. }
  1385. mdelay(1);
  1386. e2p_cmd = SMC_GET_E2P_CMD(lp);
  1387. }
  1388. if (timeout == 0) {
  1389. PRINTK(dev, "%s timeout waiting for EEPROM CMD not busy\n",
  1390. __func__);
  1391. return -ETIMEDOUT;
  1392. }
  1393. return 0;
  1394. }
  1395. static inline int smc911x_ethtool_write_eeprom_cmd(struct net_device *dev,
  1396. int cmd, int addr)
  1397. {
  1398. struct smc911x_local *lp = netdev_priv(dev);
  1399. int ret;
  1400. if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
  1401. return ret;
  1402. SMC_SET_E2P_CMD(lp, E2P_CMD_EPC_BUSY_ |
  1403. ((cmd) & (0x7<<28)) |
  1404. ((addr) & 0xFF));
  1405. return 0;
  1406. }
  1407. static inline int smc911x_ethtool_read_eeprom_byte(struct net_device *dev,
  1408. u8 *data)
  1409. {
  1410. struct smc911x_local *lp = netdev_priv(dev);
  1411. int ret;
  1412. if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
  1413. return ret;
  1414. *data = SMC_GET_E2P_DATA(lp);
  1415. return 0;
  1416. }
  1417. static inline int smc911x_ethtool_write_eeprom_byte(struct net_device *dev,
  1418. u8 data)
  1419. {
  1420. struct smc911x_local *lp = netdev_priv(dev);
  1421. int ret;
  1422. if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
  1423. return ret;
  1424. SMC_SET_E2P_DATA(lp, data);
  1425. return 0;
  1426. }
  1427. static int smc911x_ethtool_geteeprom(struct net_device *dev,
  1428. struct ethtool_eeprom *eeprom, u8 *data)
  1429. {
  1430. u8 eebuf[SMC911X_EEPROM_LEN];
  1431. int i, ret;
  1432. for(i=0;i<SMC911X_EEPROM_LEN;i++) {
  1433. if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_READ_, i ))!=0)
  1434. return ret;
  1435. if ((ret=smc911x_ethtool_read_eeprom_byte(dev, &eebuf[i]))!=0)
  1436. return ret;
  1437. }
  1438. memcpy(data, eebuf+eeprom->offset, eeprom->len);
  1439. return 0;
  1440. }
  1441. static int smc911x_ethtool_seteeprom(struct net_device *dev,
  1442. struct ethtool_eeprom *eeprom, u8 *data)
  1443. {
  1444. int i, ret;
  1445. /* Enable erase */
  1446. if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_EWEN_, 0 ))!=0)
  1447. return ret;
  1448. for(i=eeprom->offset;i<(eeprom->offset+eeprom->len);i++) {
  1449. /* erase byte */
  1450. if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_ERASE_, i ))!=0)
  1451. return ret;
  1452. /* write byte */
  1453. if ((ret=smc911x_ethtool_write_eeprom_byte(dev, *data))!=0)
  1454. return ret;
  1455. if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_WRITE_, i ))!=0)
  1456. return ret;
  1457. }
  1458. return 0;
  1459. }
  1460. static int smc911x_ethtool_geteeprom_len(struct net_device *dev)
  1461. {
  1462. return SMC911X_EEPROM_LEN;
  1463. }
  1464. static const struct ethtool_ops smc911x_ethtool_ops = {
  1465. .get_settings = smc911x_ethtool_getsettings,
  1466. .set_settings = smc911x_ethtool_setsettings,
  1467. .get_drvinfo = smc911x_ethtool_getdrvinfo,
  1468. .get_msglevel = smc911x_ethtool_getmsglevel,
  1469. .set_msglevel = smc911x_ethtool_setmsglevel,
  1470. .nway_reset = smc911x_ethtool_nwayreset,
  1471. .get_link = ethtool_op_get_link,
  1472. .get_regs_len = smc911x_ethtool_getregslen,
  1473. .get_regs = smc911x_ethtool_getregs,
  1474. .get_eeprom_len = smc911x_ethtool_geteeprom_len,
  1475. .get_eeprom = smc911x_ethtool_geteeprom,
  1476. .set_eeprom = smc911x_ethtool_seteeprom,
  1477. };
  1478. /*
  1479. * smc911x_findirq
  1480. *
  1481. * This routine has a simple purpose -- make the SMC chip generate an
  1482. * interrupt, so an auto-detect routine can detect it, and find the IRQ,
  1483. */
  1484. static int smc911x_findirq(struct net_device *dev)
  1485. {
  1486. struct smc911x_local *lp = netdev_priv(dev);
  1487. int timeout = 20;
  1488. unsigned long cookie;
  1489. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1490. cookie = probe_irq_on();
  1491. /*
  1492. * Force a SW interrupt
  1493. */
  1494. SMC_SET_INT_EN(lp, INT_EN_SW_INT_EN_);
  1495. /*
  1496. * Wait until positive that the interrupt has been generated
  1497. */
  1498. do {
  1499. int int_status;
  1500. udelay(10);
  1501. int_status = SMC_GET_INT_EN(lp);
  1502. if (int_status & INT_EN_SW_INT_EN_)
  1503. break; /* got the interrupt */
  1504. } while (--timeout);
  1505. /*
  1506. * there is really nothing that I can do here if timeout fails,
  1507. * as autoirq_report will return a 0 anyway, which is what I
  1508. * want in this case. Plus, the clean up is needed in both
  1509. * cases.
  1510. */
  1511. /* and disable all interrupts again */
  1512. SMC_SET_INT_EN(lp, 0);
  1513. /* and return what I found */
  1514. return probe_irq_off(cookie);
  1515. }
  1516. static const struct net_device_ops smc911x_netdev_ops = {
  1517. .ndo_open = smc911x_open,
  1518. .ndo_stop = smc911x_close,
  1519. .ndo_start_xmit = smc911x_hard_start_xmit,
  1520. .ndo_tx_timeout = smc911x_timeout,
  1521. .ndo_set_rx_mode = smc911x_set_multicast_list,
  1522. .ndo_change_mtu = eth_change_mtu,
  1523. .ndo_validate_addr = eth_validate_addr,
  1524. .ndo_set_mac_address = eth_mac_addr,
  1525. #ifdef CONFIG_NET_POLL_CONTROLLER
  1526. .ndo_poll_controller = smc911x_poll_controller,
  1527. #endif
  1528. };
  1529. /*
  1530. * Function: smc911x_probe(unsigned long ioaddr)
  1531. *
  1532. * Purpose:
  1533. * Tests to see if a given ioaddr points to an SMC911x chip.
  1534. * Returns a 0 on success
  1535. *
  1536. * Algorithm:
  1537. * (1) see if the endian word is OK
  1538. * (1) see if I recognize the chip ID in the appropriate register
  1539. *
  1540. * Here I do typical initialization tasks.
  1541. *
  1542. * o Initialize the structure if needed
  1543. * o print out my vanity message if not done so already
  1544. * o print out what type of hardware is detected
  1545. * o print out the ethernet address
  1546. * o find the IRQ
  1547. * o set up my private data
  1548. * o configure the dev structure with my subroutines
  1549. * o actually GRAB the irq.
  1550. * o GRAB the region
  1551. */
  1552. static int smc911x_probe(struct net_device *dev)
  1553. {
  1554. struct smc911x_local *lp = netdev_priv(dev);
  1555. int i, retval;
  1556. unsigned int val, chip_id, revision;
  1557. const char *version_string;
  1558. unsigned long irq_flags;
  1559. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1560. /* First, see if the endian word is recognized */
  1561. val = SMC_GET_BYTE_TEST(lp);
  1562. DBG(SMC_DEBUG_MISC, dev, "%s: endian probe returned 0x%04x\n",
  1563. CARDNAME, val);
  1564. if (val != 0x87654321) {
  1565. netdev_err(dev, "Invalid chip endian 0x%08x\n", val);
  1566. retval = -ENODEV;
  1567. goto err_out;
  1568. }
  1569. /*
  1570. * check if the revision register is something that I
  1571. * recognize. These might need to be added to later,
  1572. * as future revisions could be added.
  1573. */
  1574. chip_id = SMC_GET_PN(lp);
  1575. DBG(SMC_DEBUG_MISC, dev, "%s: id probe returned 0x%04x\n",
  1576. CARDNAME, chip_id);
  1577. for(i=0;chip_ids[i].id != 0; i++) {
  1578. if (chip_ids[i].id == chip_id) break;
  1579. }
  1580. if (!chip_ids[i].id) {
  1581. netdev_err(dev, "Unknown chip ID %04x\n", chip_id);
  1582. retval = -ENODEV;
  1583. goto err_out;
  1584. }
  1585. version_string = chip_ids[i].name;
  1586. revision = SMC_GET_REV(lp);
  1587. DBG(SMC_DEBUG_MISC, dev, "%s: revision = 0x%04x\n", CARDNAME, revision);
  1588. /* At this point I'll assume that the chip is an SMC911x. */
  1589. DBG(SMC_DEBUG_MISC, dev, "%s: Found a %s\n",
  1590. CARDNAME, chip_ids[i].name);
  1591. /* Validate the TX FIFO size requested */
  1592. if ((tx_fifo_kb < 2) || (tx_fifo_kb > 14)) {
  1593. netdev_err(dev, "Invalid TX FIFO size requested %d\n",
  1594. tx_fifo_kb);
  1595. retval = -EINVAL;
  1596. goto err_out;
  1597. }
  1598. /* fill in some of the fields */
  1599. lp->version = chip_ids[i].id;
  1600. lp->revision = revision;
  1601. lp->tx_fifo_kb = tx_fifo_kb;
  1602. /* Reverse calculate the RX FIFO size from the TX */
  1603. lp->tx_fifo_size=(lp->tx_fifo_kb<<10) - 512;
  1604. lp->rx_fifo_size= ((0x4000 - 512 - lp->tx_fifo_size) / 16) * 15;
  1605. /* Set the automatic flow control values */
  1606. switch(lp->tx_fifo_kb) {
  1607. /*
  1608. * AFC_HI is about ((Rx Data Fifo Size)*2/3)/64
  1609. * AFC_LO is AFC_HI/2
  1610. * BACK_DUR is about 5uS*(AFC_LO) rounded down
  1611. */
  1612. case 2:/* 13440 Rx Data Fifo Size */
  1613. lp->afc_cfg=0x008C46AF;break;
  1614. case 3:/* 12480 Rx Data Fifo Size */
  1615. lp->afc_cfg=0x0082419F;break;
  1616. case 4:/* 11520 Rx Data Fifo Size */
  1617. lp->afc_cfg=0x00783C9F;break;
  1618. case 5:/* 10560 Rx Data Fifo Size */
  1619. lp->afc_cfg=0x006E374F;break;
  1620. case 6:/* 9600 Rx Data Fifo Size */
  1621. lp->afc_cfg=0x0064328F;break;
  1622. case 7:/* 8640 Rx Data Fifo Size */
  1623. lp->afc_cfg=0x005A2D7F;break;
  1624. case 8:/* 7680 Rx Data Fifo Size */
  1625. lp->afc_cfg=0x0050287F;break;
  1626. case 9:/* 6720 Rx Data Fifo Size */
  1627. lp->afc_cfg=0x0046236F;break;
  1628. case 10:/* 5760 Rx Data Fifo Size */
  1629. lp->afc_cfg=0x003C1E6F;break;
  1630. case 11:/* 4800 Rx Data Fifo Size */
  1631. lp->afc_cfg=0x0032195F;break;
  1632. /*
  1633. * AFC_HI is ~1520 bytes less than RX Data Fifo Size
  1634. * AFC_LO is AFC_HI/2
  1635. * BACK_DUR is about 5uS*(AFC_LO) rounded down
  1636. */
  1637. case 12:/* 3840 Rx Data Fifo Size */
  1638. lp->afc_cfg=0x0024124F;break;
  1639. case 13:/* 2880 Rx Data Fifo Size */
  1640. lp->afc_cfg=0x0015073F;break;
  1641. case 14:/* 1920 Rx Data Fifo Size */
  1642. lp->afc_cfg=0x0006032F;break;
  1643. default:
  1644. PRINTK(dev, "ERROR -- no AFC_CFG setting found");
  1645. break;
  1646. }
  1647. DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX, dev,
  1648. "%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME,
  1649. lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg);
  1650. spin_lock_init(&lp->lock);
  1651. /* Get the MAC address */
  1652. SMC_GET_MAC_ADDR(lp, dev->dev_addr);
  1653. /* now, reset the chip, and put it into a known state */
  1654. smc911x_reset(dev);
  1655. /*
  1656. * If dev->irq is 0, then the device has to be banged on to see
  1657. * what the IRQ is.
  1658. *
  1659. * Specifying an IRQ is done with the assumption that the user knows
  1660. * what (s)he is doing. No checking is done!!!!
  1661. */
  1662. if (dev->irq < 1) {
  1663. int trials;
  1664. trials = 3;
  1665. while (trials--) {
  1666. dev->irq = smc911x_findirq(dev);
  1667. if (dev->irq)
  1668. break;
  1669. /* kick the card and try again */
  1670. smc911x_reset(dev);
  1671. }
  1672. }
  1673. if (dev->irq == 0) {
  1674. netdev_warn(dev, "Couldn't autodetect your IRQ. Use irq=xx.\n");
  1675. retval = -ENODEV;
  1676. goto err_out;
  1677. }
  1678. dev->irq = irq_canonicalize(dev->irq);
  1679. /* Fill in the fields of the device structure with ethernet values. */
  1680. ether_setup(dev);
  1681. dev->netdev_ops = &smc911x_netdev_ops;
  1682. dev->watchdog_timeo = msecs_to_jiffies(watchdog);
  1683. dev->ethtool_ops = &smc911x_ethtool_ops;
  1684. INIT_WORK(&lp->phy_configure, smc911x_phy_configure);
  1685. lp->mii.phy_id_mask = 0x1f;
  1686. lp->mii.reg_num_mask = 0x1f;
  1687. lp->mii.force_media = 0;
  1688. lp->mii.full_duplex = 0;
  1689. lp->mii.dev = dev;
  1690. lp->mii.mdio_read = smc911x_phy_read;
  1691. lp->mii.mdio_write = smc911x_phy_write;
  1692. /*
  1693. * Locate the phy, if any.
  1694. */
  1695. smc911x_phy_detect(dev);
  1696. /* Set default parameters */
  1697. lp->msg_enable = NETIF_MSG_LINK;
  1698. lp->ctl_rfduplx = 1;
  1699. lp->ctl_rspeed = 100;
  1700. #ifdef SMC_DYNAMIC_BUS_CONFIG
  1701. irq_flags = lp->cfg.irq_flags;
  1702. #else
  1703. irq_flags = IRQF_SHARED | SMC_IRQ_SENSE;
  1704. #endif
  1705. /* Grab the IRQ */
  1706. retval = request_irq(dev->irq, smc911x_interrupt,
  1707. irq_flags, dev->name, dev);
  1708. if (retval)
  1709. goto err_out;
  1710. #ifdef SMC_USE_DMA
  1711. lp->rxdma = SMC_DMA_REQUEST(dev, smc911x_rx_dma_irq);
  1712. lp->txdma = SMC_DMA_REQUEST(dev, smc911x_tx_dma_irq);
  1713. lp->rxdma_active = 0;
  1714. lp->txdma_active = 0;
  1715. dev->dma = lp->rxdma;
  1716. #endif
  1717. retval = register_netdev(dev);
  1718. if (retval == 0) {
  1719. /* now, print out the card info, in a short format.. */
  1720. netdev_info(dev, "%s (rev %d) at %#lx IRQ %d",
  1721. version_string, lp->revision,
  1722. dev->base_addr, dev->irq);
  1723. #ifdef SMC_USE_DMA
  1724. if (lp->rxdma != -1)
  1725. pr_cont(" RXDMA %d", lp->rxdma);
  1726. if (lp->txdma != -1)
  1727. pr_cont(" TXDMA %d", lp->txdma);
  1728. #endif
  1729. pr_cont("\n");
  1730. if (!is_valid_ether_addr(dev->dev_addr)) {
  1731. netdev_warn(dev, "Invalid ethernet MAC address. Please set using ifconfig\n");
  1732. } else {
  1733. /* Print the Ethernet address */
  1734. netdev_info(dev, "Ethernet addr: %pM\n",
  1735. dev->dev_addr);
  1736. }
  1737. if (lp->phy_type == 0) {
  1738. PRINTK(dev, "No PHY found\n");
  1739. } else if ((lp->phy_type & ~0xff) == LAN911X_INTERNAL_PHY_ID) {
  1740. PRINTK(dev, "LAN911x Internal PHY\n");
  1741. } else {
  1742. PRINTK(dev, "External PHY 0x%08x\n", lp->phy_type);
  1743. }
  1744. }
  1745. err_out:
  1746. #ifdef SMC_USE_DMA
  1747. if (retval) {
  1748. if (lp->rxdma != -1) {
  1749. SMC_DMA_FREE(dev, lp->rxdma);
  1750. }
  1751. if (lp->txdma != -1) {
  1752. SMC_DMA_FREE(dev, lp->txdma);
  1753. }
  1754. }
  1755. #endif
  1756. return retval;
  1757. }
  1758. /*
  1759. * smc911x_drv_probe(void)
  1760. *
  1761. * Output:
  1762. * 0 --> there is a device
  1763. * anything else, error
  1764. */
  1765. static int smc911x_drv_probe(struct platform_device *pdev)
  1766. {
  1767. struct net_device *ndev;
  1768. struct resource *res;
  1769. struct smc911x_local *lp;
  1770. void __iomem *addr;
  1771. int ret;
  1772. /* ndev is not valid yet, so avoid passing it in. */
  1773. DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
  1774. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1775. if (!res) {
  1776. ret = -ENODEV;
  1777. goto out;
  1778. }
  1779. /*
  1780. * Request the regions.
  1781. */
  1782. if (!request_mem_region(res->start, SMC911X_IO_EXTENT, CARDNAME)) {
  1783. ret = -EBUSY;
  1784. goto out;
  1785. }
  1786. ndev = alloc_etherdev(sizeof(struct smc911x_local));
  1787. if (!ndev) {
  1788. ret = -ENOMEM;
  1789. goto release_1;
  1790. }
  1791. SET_NETDEV_DEV(ndev, &pdev->dev);
  1792. ndev->dma = (unsigned char)-1;
  1793. ndev->irq = platform_get_irq(pdev, 0);
  1794. lp = netdev_priv(ndev);
  1795. lp->netdev = ndev;
  1796. #ifdef SMC_DYNAMIC_BUS_CONFIG
  1797. {
  1798. struct smc911x_platdata *pd = dev_get_platdata(&pdev->dev);
  1799. if (!pd) {
  1800. ret = -EINVAL;
  1801. goto release_both;
  1802. }
  1803. memcpy(&lp->cfg, pd, sizeof(lp->cfg));
  1804. }
  1805. #endif
  1806. addr = ioremap(res->start, SMC911X_IO_EXTENT);
  1807. if (!addr) {
  1808. ret = -ENOMEM;
  1809. goto release_both;
  1810. }
  1811. platform_set_drvdata(pdev, ndev);
  1812. lp->base = addr;
  1813. ndev->base_addr = res->start;
  1814. ret = smc911x_probe(ndev);
  1815. if (ret != 0) {
  1816. iounmap(addr);
  1817. release_both:
  1818. free_netdev(ndev);
  1819. release_1:
  1820. release_mem_region(res->start, SMC911X_IO_EXTENT);
  1821. out:
  1822. pr_info("%s: not found (%d).\n", CARDNAME, ret);
  1823. }
  1824. #ifdef SMC_USE_DMA
  1825. else {
  1826. lp->physaddr = res->start;
  1827. lp->dev = &pdev->dev;
  1828. }
  1829. #endif
  1830. return ret;
  1831. }
  1832. static int smc911x_drv_remove(struct platform_device *pdev)
  1833. {
  1834. struct net_device *ndev = platform_get_drvdata(pdev);
  1835. struct smc911x_local *lp = netdev_priv(ndev);
  1836. struct resource *res;
  1837. DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
  1838. unregister_netdev(ndev);
  1839. free_irq(ndev->irq, ndev);
  1840. #ifdef SMC_USE_DMA
  1841. {
  1842. if (lp->rxdma != -1) {
  1843. SMC_DMA_FREE(dev, lp->rxdma);
  1844. }
  1845. if (lp->txdma != -1) {
  1846. SMC_DMA_FREE(dev, lp->txdma);
  1847. }
  1848. }
  1849. #endif
  1850. iounmap(lp->base);
  1851. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1852. release_mem_region(res->start, SMC911X_IO_EXTENT);
  1853. free_netdev(ndev);
  1854. return 0;
  1855. }
  1856. static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state)
  1857. {
  1858. struct net_device *ndev = platform_get_drvdata(dev);
  1859. struct smc911x_local *lp = netdev_priv(ndev);
  1860. DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
  1861. if (ndev) {
  1862. if (netif_running(ndev)) {
  1863. netif_device_detach(ndev);
  1864. smc911x_shutdown(ndev);
  1865. #if POWER_DOWN
  1866. /* Set D2 - Energy detect only setting */
  1867. SMC_SET_PMT_CTRL(lp, 2<<12);
  1868. #endif
  1869. }
  1870. }
  1871. return 0;
  1872. }
  1873. static int smc911x_drv_resume(struct platform_device *dev)
  1874. {
  1875. struct net_device *ndev = platform_get_drvdata(dev);
  1876. DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
  1877. if (ndev) {
  1878. struct smc911x_local *lp = netdev_priv(ndev);
  1879. if (netif_running(ndev)) {
  1880. smc911x_reset(ndev);
  1881. if (lp->phy_type != 0)
  1882. smc911x_phy_configure(&lp->phy_configure);
  1883. smc911x_enable(ndev);
  1884. netif_device_attach(ndev);
  1885. }
  1886. }
  1887. return 0;
  1888. }
  1889. static struct platform_driver smc911x_driver = {
  1890. .probe = smc911x_drv_probe,
  1891. .remove = smc911x_drv_remove,
  1892. .suspend = smc911x_drv_suspend,
  1893. .resume = smc911x_drv_resume,
  1894. .driver = {
  1895. .name = CARDNAME,
  1896. .owner = THIS_MODULE,
  1897. },
  1898. };
  1899. module_platform_driver(smc911x_driver);