rrunner.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684
  1. /*
  2. * rrunner.c: Linux driver for the Essential RoadRunner HIPPI board.
  3. *
  4. * Copyright (C) 1998-2002 by Jes Sorensen, <jes@wildopensource.com>.
  5. *
  6. * Thanks to Essential Communication for providing us with hardware
  7. * and very comprehensive documentation without which I would not have
  8. * been able to write this driver. A special thank you to John Gibbon
  9. * for sorting out the legal issues, with the NDA, allowing the code to
  10. * be released under the GPL.
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. *
  17. * Thanks to Jayaram Bhat from ODS/Essential for fixing some of the
  18. * stupid bugs in my code.
  19. *
  20. * Softnet support and various other patches from Val Henson of
  21. * ODS/Essential.
  22. *
  23. * PCI DMA mapping code partly based on work by Francois Romieu.
  24. */
  25. #define DEBUG 1
  26. #define RX_DMA_SKBUFF 1
  27. #define PKT_COPY_THRESHOLD 512
  28. #include <linux/module.h>
  29. #include <linux/types.h>
  30. #include <linux/errno.h>
  31. #include <linux/ioport.h>
  32. #include <linux/pci.h>
  33. #include <linux/kernel.h>
  34. #include <linux/netdevice.h>
  35. #include <linux/hippidevice.h>
  36. #include <linux/skbuff.h>
  37. #include <linux/delay.h>
  38. #include <linux/mm.h>
  39. #include <linux/slab.h>
  40. #include <net/sock.h>
  41. #include <asm/cache.h>
  42. #include <asm/byteorder.h>
  43. #include <asm/io.h>
  44. #include <asm/irq.h>
  45. #include <linux/uaccess.h>
  46. #define rr_if_busy(dev) netif_queue_stopped(dev)
  47. #define rr_if_running(dev) netif_running(dev)
  48. #include "rrunner.h"
  49. #define RUN_AT(x) (jiffies + (x))
  50. MODULE_AUTHOR("Jes Sorensen <jes@wildopensource.com>");
  51. MODULE_DESCRIPTION("Essential RoadRunner HIPPI driver");
  52. MODULE_LICENSE("GPL");
  53. static char version[] = "rrunner.c: v0.50 11/11/2002 Jes Sorensen (jes@wildopensource.com)\n";
  54. static const struct net_device_ops rr_netdev_ops = {
  55. .ndo_open = rr_open,
  56. .ndo_stop = rr_close,
  57. .ndo_do_ioctl = rr_ioctl,
  58. .ndo_start_xmit = rr_start_xmit,
  59. .ndo_set_mac_address = hippi_mac_addr,
  60. };
  61. /*
  62. * Implementation notes:
  63. *
  64. * The DMA engine only allows for DMA within physical 64KB chunks of
  65. * memory. The current approach of the driver (and stack) is to use
  66. * linear blocks of memory for the skbuffs. However, as the data block
  67. * is always the first part of the skb and skbs are 2^n aligned so we
  68. * are guarantted to get the whole block within one 64KB align 64KB
  69. * chunk.
  70. *
  71. * On the long term, relying on being able to allocate 64KB linear
  72. * chunks of memory is not feasible and the skb handling code and the
  73. * stack will need to know about I/O vectors or something similar.
  74. */
  75. static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  76. {
  77. struct net_device *dev;
  78. static int version_disp;
  79. u8 pci_latency;
  80. struct rr_private *rrpriv;
  81. void *tmpptr;
  82. dma_addr_t ring_dma;
  83. int ret = -ENOMEM;
  84. dev = alloc_hippi_dev(sizeof(struct rr_private));
  85. if (!dev)
  86. goto out3;
  87. ret = pci_enable_device(pdev);
  88. if (ret) {
  89. ret = -ENODEV;
  90. goto out2;
  91. }
  92. rrpriv = netdev_priv(dev);
  93. SET_NETDEV_DEV(dev, &pdev->dev);
  94. ret = pci_request_regions(pdev, "rrunner");
  95. if (ret < 0)
  96. goto out;
  97. pci_set_drvdata(pdev, dev);
  98. rrpriv->pci_dev = pdev;
  99. spin_lock_init(&rrpriv->lock);
  100. dev->netdev_ops = &rr_netdev_ops;
  101. /* display version info if adapter is found */
  102. if (!version_disp) {
  103. /* set display flag to TRUE so that */
  104. /* we only display this string ONCE */
  105. version_disp = 1;
  106. printk(version);
  107. }
  108. pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
  109. if (pci_latency <= 0x58){
  110. pci_latency = 0x58;
  111. pci_write_config_byte(pdev, PCI_LATENCY_TIMER, pci_latency);
  112. }
  113. pci_set_master(pdev);
  114. printk(KERN_INFO "%s: Essential RoadRunner serial HIPPI "
  115. "at 0x%llx, irq %i, PCI latency %i\n", dev->name,
  116. (unsigned long long)pci_resource_start(pdev, 0),
  117. pdev->irq, pci_latency);
  118. /*
  119. * Remap the MMIO regs into kernel space.
  120. */
  121. rrpriv->regs = pci_iomap(pdev, 0, 0x1000);
  122. if (!rrpriv->regs) {
  123. printk(KERN_ERR "%s: Unable to map I/O register, "
  124. "RoadRunner will be disabled.\n", dev->name);
  125. ret = -EIO;
  126. goto out;
  127. }
  128. tmpptr = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
  129. rrpriv->tx_ring = tmpptr;
  130. rrpriv->tx_ring_dma = ring_dma;
  131. if (!tmpptr) {
  132. ret = -ENOMEM;
  133. goto out;
  134. }
  135. tmpptr = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
  136. rrpriv->rx_ring = tmpptr;
  137. rrpriv->rx_ring_dma = ring_dma;
  138. if (!tmpptr) {
  139. ret = -ENOMEM;
  140. goto out;
  141. }
  142. tmpptr = pci_alloc_consistent(pdev, EVT_RING_SIZE, &ring_dma);
  143. rrpriv->evt_ring = tmpptr;
  144. rrpriv->evt_ring_dma = ring_dma;
  145. if (!tmpptr) {
  146. ret = -ENOMEM;
  147. goto out;
  148. }
  149. /*
  150. * Don't access any register before this point!
  151. */
  152. #ifdef __BIG_ENDIAN
  153. writel(readl(&rrpriv->regs->HostCtrl) | NO_SWAP,
  154. &rrpriv->regs->HostCtrl);
  155. #endif
  156. /*
  157. * Need to add a case for little-endian 64-bit hosts here.
  158. */
  159. rr_init(dev);
  160. ret = register_netdev(dev);
  161. if (ret)
  162. goto out;
  163. return 0;
  164. out:
  165. if (rrpriv->evt_ring)
  166. pci_free_consistent(pdev, EVT_RING_SIZE, rrpriv->evt_ring,
  167. rrpriv->evt_ring_dma);
  168. if (rrpriv->rx_ring)
  169. pci_free_consistent(pdev, RX_TOTAL_SIZE, rrpriv->rx_ring,
  170. rrpriv->rx_ring_dma);
  171. if (rrpriv->tx_ring)
  172. pci_free_consistent(pdev, TX_TOTAL_SIZE, rrpriv->tx_ring,
  173. rrpriv->tx_ring_dma);
  174. if (rrpriv->regs)
  175. pci_iounmap(pdev, rrpriv->regs);
  176. if (pdev)
  177. pci_release_regions(pdev);
  178. out2:
  179. free_netdev(dev);
  180. out3:
  181. return ret;
  182. }
  183. static void rr_remove_one(struct pci_dev *pdev)
  184. {
  185. struct net_device *dev = pci_get_drvdata(pdev);
  186. struct rr_private *rr = netdev_priv(dev);
  187. if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)) {
  188. printk(KERN_ERR "%s: trying to unload running NIC\n",
  189. dev->name);
  190. writel(HALT_NIC, &rr->regs->HostCtrl);
  191. }
  192. unregister_netdev(dev);
  193. pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring,
  194. rr->evt_ring_dma);
  195. pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring,
  196. rr->rx_ring_dma);
  197. pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring,
  198. rr->tx_ring_dma);
  199. pci_iounmap(pdev, rr->regs);
  200. pci_release_regions(pdev);
  201. pci_disable_device(pdev);
  202. free_netdev(dev);
  203. }
  204. /*
  205. * Commands are considered to be slow, thus there is no reason to
  206. * inline this.
  207. */
  208. static void rr_issue_cmd(struct rr_private *rrpriv, struct cmd *cmd)
  209. {
  210. struct rr_regs __iomem *regs;
  211. u32 idx;
  212. regs = rrpriv->regs;
  213. /*
  214. * This is temporary - it will go away in the final version.
  215. * We probably also want to make this function inline.
  216. */
  217. if (readl(&regs->HostCtrl) & NIC_HALTED){
  218. printk("issuing command for halted NIC, code 0x%x, "
  219. "HostCtrl %08x\n", cmd->code, readl(&regs->HostCtrl));
  220. if (readl(&regs->Mode) & FATAL_ERR)
  221. printk("error codes Fail1 %02x, Fail2 %02x\n",
  222. readl(&regs->Fail1), readl(&regs->Fail2));
  223. }
  224. idx = rrpriv->info->cmd_ctrl.pi;
  225. writel(*(u32*)(cmd), &regs->CmdRing[idx]);
  226. wmb();
  227. idx = (idx - 1) % CMD_RING_ENTRIES;
  228. rrpriv->info->cmd_ctrl.pi = idx;
  229. wmb();
  230. if (readl(&regs->Mode) & FATAL_ERR)
  231. printk("error code %02x\n", readl(&regs->Fail1));
  232. }
  233. /*
  234. * Reset the board in a sensible manner. The NIC is already halted
  235. * when we get here and a spin-lock is held.
  236. */
  237. static int rr_reset(struct net_device *dev)
  238. {
  239. struct rr_private *rrpriv;
  240. struct rr_regs __iomem *regs;
  241. u32 start_pc;
  242. int i;
  243. rrpriv = netdev_priv(dev);
  244. regs = rrpriv->regs;
  245. rr_load_firmware(dev);
  246. writel(0x01000000, &regs->TX_state);
  247. writel(0xff800000, &regs->RX_state);
  248. writel(0, &regs->AssistState);
  249. writel(CLEAR_INTA, &regs->LocalCtrl);
  250. writel(0x01, &regs->BrkPt);
  251. writel(0, &regs->Timer);
  252. writel(0, &regs->TimerRef);
  253. writel(RESET_DMA, &regs->DmaReadState);
  254. writel(RESET_DMA, &regs->DmaWriteState);
  255. writel(0, &regs->DmaWriteHostHi);
  256. writel(0, &regs->DmaWriteHostLo);
  257. writel(0, &regs->DmaReadHostHi);
  258. writel(0, &regs->DmaReadHostLo);
  259. writel(0, &regs->DmaReadLen);
  260. writel(0, &regs->DmaWriteLen);
  261. writel(0, &regs->DmaWriteLcl);
  262. writel(0, &regs->DmaWriteIPchecksum);
  263. writel(0, &regs->DmaReadLcl);
  264. writel(0, &regs->DmaReadIPchecksum);
  265. writel(0, &regs->PciState);
  266. #if (BITS_PER_LONG == 64) && defined __LITTLE_ENDIAN
  267. writel(SWAP_DATA | PTR64BIT | PTR_WD_SWAP, &regs->Mode);
  268. #elif (BITS_PER_LONG == 64)
  269. writel(SWAP_DATA | PTR64BIT | PTR_WD_NOSWAP, &regs->Mode);
  270. #else
  271. writel(SWAP_DATA | PTR32BIT | PTR_WD_NOSWAP, &regs->Mode);
  272. #endif
  273. #if 0
  274. /*
  275. * Don't worry, this is just black magic.
  276. */
  277. writel(0xdf000, &regs->RxBase);
  278. writel(0xdf000, &regs->RxPrd);
  279. writel(0xdf000, &regs->RxCon);
  280. writel(0xce000, &regs->TxBase);
  281. writel(0xce000, &regs->TxPrd);
  282. writel(0xce000, &regs->TxCon);
  283. writel(0, &regs->RxIndPro);
  284. writel(0, &regs->RxIndCon);
  285. writel(0, &regs->RxIndRef);
  286. writel(0, &regs->TxIndPro);
  287. writel(0, &regs->TxIndCon);
  288. writel(0, &regs->TxIndRef);
  289. writel(0xcc000, &regs->pad10[0]);
  290. writel(0, &regs->DrCmndPro);
  291. writel(0, &regs->DrCmndCon);
  292. writel(0, &regs->DwCmndPro);
  293. writel(0, &regs->DwCmndCon);
  294. writel(0, &regs->DwCmndRef);
  295. writel(0, &regs->DrDataPro);
  296. writel(0, &regs->DrDataCon);
  297. writel(0, &regs->DrDataRef);
  298. writel(0, &regs->DwDataPro);
  299. writel(0, &regs->DwDataCon);
  300. writel(0, &regs->DwDataRef);
  301. #endif
  302. writel(0xffffffff, &regs->MbEvent);
  303. writel(0, &regs->Event);
  304. writel(0, &regs->TxPi);
  305. writel(0, &regs->IpRxPi);
  306. writel(0, &regs->EvtCon);
  307. writel(0, &regs->EvtPrd);
  308. rrpriv->info->evt_ctrl.pi = 0;
  309. for (i = 0; i < CMD_RING_ENTRIES; i++)
  310. writel(0, &regs->CmdRing[i]);
  311. /*
  312. * Why 32 ? is this not cache line size dependent?
  313. */
  314. writel(RBURST_64|WBURST_64, &regs->PciState);
  315. wmb();
  316. start_pc = rr_read_eeprom_word(rrpriv,
  317. offsetof(struct eeprom, rncd_info.FwStart));
  318. #if (DEBUG > 1)
  319. printk("%s: Executing firmware at address 0x%06x\n",
  320. dev->name, start_pc);
  321. #endif
  322. writel(start_pc + 0x800, &regs->Pc);
  323. wmb();
  324. udelay(5);
  325. writel(start_pc, &regs->Pc);
  326. wmb();
  327. return 0;
  328. }
  329. /*
  330. * Read a string from the EEPROM.
  331. */
  332. static unsigned int rr_read_eeprom(struct rr_private *rrpriv,
  333. unsigned long offset,
  334. unsigned char *buf,
  335. unsigned long length)
  336. {
  337. struct rr_regs __iomem *regs = rrpriv->regs;
  338. u32 misc, io, host, i;
  339. io = readl(&regs->ExtIo);
  340. writel(0, &regs->ExtIo);
  341. misc = readl(&regs->LocalCtrl);
  342. writel(0, &regs->LocalCtrl);
  343. host = readl(&regs->HostCtrl);
  344. writel(host | HALT_NIC, &regs->HostCtrl);
  345. mb();
  346. for (i = 0; i < length; i++){
  347. writel((EEPROM_BASE + ((offset+i) << 3)), &regs->WinBase);
  348. mb();
  349. buf[i] = (readl(&regs->WinData) >> 24) & 0xff;
  350. mb();
  351. }
  352. writel(host, &regs->HostCtrl);
  353. writel(misc, &regs->LocalCtrl);
  354. writel(io, &regs->ExtIo);
  355. mb();
  356. return i;
  357. }
  358. /*
  359. * Shortcut to read one word (4 bytes) out of the EEPROM and convert
  360. * it to our CPU byte-order.
  361. */
  362. static u32 rr_read_eeprom_word(struct rr_private *rrpriv,
  363. size_t offset)
  364. {
  365. __be32 word;
  366. if ((rr_read_eeprom(rrpriv, offset,
  367. (unsigned char *)&word, 4) == 4))
  368. return be32_to_cpu(word);
  369. return 0;
  370. }
  371. /*
  372. * Write a string to the EEPROM.
  373. *
  374. * This is only called when the firmware is not running.
  375. */
  376. static unsigned int write_eeprom(struct rr_private *rrpriv,
  377. unsigned long offset,
  378. unsigned char *buf,
  379. unsigned long length)
  380. {
  381. struct rr_regs __iomem *regs = rrpriv->regs;
  382. u32 misc, io, data, i, j, ready, error = 0;
  383. io = readl(&regs->ExtIo);
  384. writel(0, &regs->ExtIo);
  385. misc = readl(&regs->LocalCtrl);
  386. writel(ENABLE_EEPROM_WRITE, &regs->LocalCtrl);
  387. mb();
  388. for (i = 0; i < length; i++){
  389. writel((EEPROM_BASE + ((offset+i) << 3)), &regs->WinBase);
  390. mb();
  391. data = buf[i] << 24;
  392. /*
  393. * Only try to write the data if it is not the same
  394. * value already.
  395. */
  396. if ((readl(&regs->WinData) & 0xff000000) != data){
  397. writel(data, &regs->WinData);
  398. ready = 0;
  399. j = 0;
  400. mb();
  401. while(!ready){
  402. udelay(20);
  403. if ((readl(&regs->WinData) & 0xff000000) ==
  404. data)
  405. ready = 1;
  406. mb();
  407. if (j++ > 5000){
  408. printk("data mismatch: %08x, "
  409. "WinData %08x\n", data,
  410. readl(&regs->WinData));
  411. ready = 1;
  412. error = 1;
  413. }
  414. }
  415. }
  416. }
  417. writel(misc, &regs->LocalCtrl);
  418. writel(io, &regs->ExtIo);
  419. mb();
  420. return error;
  421. }
  422. static int rr_init(struct net_device *dev)
  423. {
  424. struct rr_private *rrpriv;
  425. struct rr_regs __iomem *regs;
  426. u32 sram_size, rev;
  427. rrpriv = netdev_priv(dev);
  428. regs = rrpriv->regs;
  429. rev = readl(&regs->FwRev);
  430. rrpriv->fw_rev = rev;
  431. if (rev > 0x00020024)
  432. printk(" Firmware revision: %i.%i.%i\n", (rev >> 16),
  433. ((rev >> 8) & 0xff), (rev & 0xff));
  434. else if (rev >= 0x00020000) {
  435. printk(" Firmware revision: %i.%i.%i (2.0.37 or "
  436. "later is recommended)\n", (rev >> 16),
  437. ((rev >> 8) & 0xff), (rev & 0xff));
  438. }else{
  439. printk(" Firmware revision too old: %i.%i.%i, please "
  440. "upgrade to 2.0.37 or later.\n",
  441. (rev >> 16), ((rev >> 8) & 0xff), (rev & 0xff));
  442. }
  443. #if (DEBUG > 2)
  444. printk(" Maximum receive rings %i\n", readl(&regs->MaxRxRng));
  445. #endif
  446. /*
  447. * Read the hardware address from the eeprom. The HW address
  448. * is not really necessary for HIPPI but awfully convenient.
  449. * The pointer arithmetic to put it in dev_addr is ugly, but
  450. * Donald Becker does it this way for the GigE version of this
  451. * card and it's shorter and more portable than any
  452. * other method I've seen. -VAL
  453. */
  454. *(__be16 *)(dev->dev_addr) =
  455. htons(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA)));
  456. *(__be32 *)(dev->dev_addr+2) =
  457. htonl(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA[4])));
  458. printk(" MAC: %pM\n", dev->dev_addr);
  459. sram_size = rr_read_eeprom_word(rrpriv, 8);
  460. printk(" SRAM size 0x%06x\n", sram_size);
  461. return 0;
  462. }
  463. static int rr_init1(struct net_device *dev)
  464. {
  465. struct rr_private *rrpriv;
  466. struct rr_regs __iomem *regs;
  467. unsigned long myjif, flags;
  468. struct cmd cmd;
  469. u32 hostctrl;
  470. int ecode = 0;
  471. short i;
  472. rrpriv = netdev_priv(dev);
  473. regs = rrpriv->regs;
  474. spin_lock_irqsave(&rrpriv->lock, flags);
  475. hostctrl = readl(&regs->HostCtrl);
  476. writel(hostctrl | HALT_NIC | RR_CLEAR_INT, &regs->HostCtrl);
  477. wmb();
  478. if (hostctrl & PARITY_ERR){
  479. printk("%s: Parity error halting NIC - this is serious!\n",
  480. dev->name);
  481. spin_unlock_irqrestore(&rrpriv->lock, flags);
  482. ecode = -EFAULT;
  483. goto error;
  484. }
  485. set_rxaddr(regs, rrpriv->rx_ctrl_dma);
  486. set_infoaddr(regs, rrpriv->info_dma);
  487. rrpriv->info->evt_ctrl.entry_size = sizeof(struct event);
  488. rrpriv->info->evt_ctrl.entries = EVT_RING_ENTRIES;
  489. rrpriv->info->evt_ctrl.mode = 0;
  490. rrpriv->info->evt_ctrl.pi = 0;
  491. set_rraddr(&rrpriv->info->evt_ctrl.rngptr, rrpriv->evt_ring_dma);
  492. rrpriv->info->cmd_ctrl.entry_size = sizeof(struct cmd);
  493. rrpriv->info->cmd_ctrl.entries = CMD_RING_ENTRIES;
  494. rrpriv->info->cmd_ctrl.mode = 0;
  495. rrpriv->info->cmd_ctrl.pi = 15;
  496. for (i = 0; i < CMD_RING_ENTRIES; i++) {
  497. writel(0, &regs->CmdRing[i]);
  498. }
  499. for (i = 0; i < TX_RING_ENTRIES; i++) {
  500. rrpriv->tx_ring[i].size = 0;
  501. set_rraddr(&rrpriv->tx_ring[i].addr, 0);
  502. rrpriv->tx_skbuff[i] = NULL;
  503. }
  504. rrpriv->info->tx_ctrl.entry_size = sizeof(struct tx_desc);
  505. rrpriv->info->tx_ctrl.entries = TX_RING_ENTRIES;
  506. rrpriv->info->tx_ctrl.mode = 0;
  507. rrpriv->info->tx_ctrl.pi = 0;
  508. set_rraddr(&rrpriv->info->tx_ctrl.rngptr, rrpriv->tx_ring_dma);
  509. /*
  510. * Set dirty_tx before we start receiving interrupts, otherwise
  511. * the interrupt handler might think it is supposed to process
  512. * tx ints before we are up and running, which may cause a null
  513. * pointer access in the int handler.
  514. */
  515. rrpriv->tx_full = 0;
  516. rrpriv->cur_rx = 0;
  517. rrpriv->dirty_rx = rrpriv->dirty_tx = 0;
  518. rr_reset(dev);
  519. /* Tuning values */
  520. writel(0x5000, &regs->ConRetry);
  521. writel(0x100, &regs->ConRetryTmr);
  522. writel(0x500000, &regs->ConTmout);
  523. writel(0x60, &regs->IntrTmr);
  524. writel(0x500000, &regs->TxDataMvTimeout);
  525. writel(0x200000, &regs->RxDataMvTimeout);
  526. writel(0x80, &regs->WriteDmaThresh);
  527. writel(0x80, &regs->ReadDmaThresh);
  528. rrpriv->fw_running = 0;
  529. wmb();
  530. hostctrl &= ~(HALT_NIC | INVALID_INST_B | PARITY_ERR);
  531. writel(hostctrl, &regs->HostCtrl);
  532. wmb();
  533. spin_unlock_irqrestore(&rrpriv->lock, flags);
  534. for (i = 0; i < RX_RING_ENTRIES; i++) {
  535. struct sk_buff *skb;
  536. dma_addr_t addr;
  537. rrpriv->rx_ring[i].mode = 0;
  538. skb = alloc_skb(dev->mtu + HIPPI_HLEN, GFP_ATOMIC);
  539. if (!skb) {
  540. printk(KERN_WARNING "%s: Unable to allocate memory "
  541. "for receive ring - halting NIC\n", dev->name);
  542. ecode = -ENOMEM;
  543. goto error;
  544. }
  545. rrpriv->rx_skbuff[i] = skb;
  546. addr = pci_map_single(rrpriv->pci_dev, skb->data,
  547. dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE);
  548. /*
  549. * Sanity test to see if we conflict with the DMA
  550. * limitations of the Roadrunner.
  551. */
  552. if ((((unsigned long)skb->data) & 0xfff) > ~65320)
  553. printk("skb alloc error\n");
  554. set_rraddr(&rrpriv->rx_ring[i].addr, addr);
  555. rrpriv->rx_ring[i].size = dev->mtu + HIPPI_HLEN;
  556. }
  557. rrpriv->rx_ctrl[4].entry_size = sizeof(struct rx_desc);
  558. rrpriv->rx_ctrl[4].entries = RX_RING_ENTRIES;
  559. rrpriv->rx_ctrl[4].mode = 8;
  560. rrpriv->rx_ctrl[4].pi = 0;
  561. wmb();
  562. set_rraddr(&rrpriv->rx_ctrl[4].rngptr, rrpriv->rx_ring_dma);
  563. udelay(1000);
  564. /*
  565. * Now start the FirmWare.
  566. */
  567. cmd.code = C_START_FW;
  568. cmd.ring = 0;
  569. cmd.index = 0;
  570. rr_issue_cmd(rrpriv, &cmd);
  571. /*
  572. * Give the FirmWare time to chew on the `get running' command.
  573. */
  574. myjif = jiffies + 5 * HZ;
  575. while (time_before(jiffies, myjif) && !rrpriv->fw_running)
  576. cpu_relax();
  577. netif_start_queue(dev);
  578. return ecode;
  579. error:
  580. /*
  581. * We might have gotten here because we are out of memory,
  582. * make sure we release everything we allocated before failing
  583. */
  584. for (i = 0; i < RX_RING_ENTRIES; i++) {
  585. struct sk_buff *skb = rrpriv->rx_skbuff[i];
  586. if (skb) {
  587. pci_unmap_single(rrpriv->pci_dev,
  588. rrpriv->rx_ring[i].addr.addrlo,
  589. dev->mtu + HIPPI_HLEN,
  590. PCI_DMA_FROMDEVICE);
  591. rrpriv->rx_ring[i].size = 0;
  592. set_rraddr(&rrpriv->rx_ring[i].addr, 0);
  593. dev_kfree_skb(skb);
  594. rrpriv->rx_skbuff[i] = NULL;
  595. }
  596. }
  597. return ecode;
  598. }
  599. /*
  600. * All events are considered to be slow (RX/TX ints do not generate
  601. * events) and are handled here, outside the main interrupt handler,
  602. * to reduce the size of the handler.
  603. */
  604. static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx)
  605. {
  606. struct rr_private *rrpriv;
  607. struct rr_regs __iomem *regs;
  608. u32 tmp;
  609. rrpriv = netdev_priv(dev);
  610. regs = rrpriv->regs;
  611. while (prodidx != eidx){
  612. switch (rrpriv->evt_ring[eidx].code){
  613. case E_NIC_UP:
  614. tmp = readl(&regs->FwRev);
  615. printk(KERN_INFO "%s: Firmware revision %i.%i.%i "
  616. "up and running\n", dev->name,
  617. (tmp >> 16), ((tmp >> 8) & 0xff), (tmp & 0xff));
  618. rrpriv->fw_running = 1;
  619. writel(RX_RING_ENTRIES - 1, &regs->IpRxPi);
  620. wmb();
  621. break;
  622. case E_LINK_ON:
  623. printk(KERN_INFO "%s: Optical link ON\n", dev->name);
  624. break;
  625. case E_LINK_OFF:
  626. printk(KERN_INFO "%s: Optical link OFF\n", dev->name);
  627. break;
  628. case E_RX_IDLE:
  629. printk(KERN_WARNING "%s: RX data not moving\n",
  630. dev->name);
  631. goto drop;
  632. case E_WATCHDOG:
  633. printk(KERN_INFO "%s: The watchdog is here to see "
  634. "us\n", dev->name);
  635. break;
  636. case E_INTERN_ERR:
  637. printk(KERN_ERR "%s: HIPPI Internal NIC error\n",
  638. dev->name);
  639. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
  640. &regs->HostCtrl);
  641. wmb();
  642. break;
  643. case E_HOST_ERR:
  644. printk(KERN_ERR "%s: Host software error\n",
  645. dev->name);
  646. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
  647. &regs->HostCtrl);
  648. wmb();
  649. break;
  650. /*
  651. * TX events.
  652. */
  653. case E_CON_REJ:
  654. printk(KERN_WARNING "%s: Connection rejected\n",
  655. dev->name);
  656. dev->stats.tx_aborted_errors++;
  657. break;
  658. case E_CON_TMOUT:
  659. printk(KERN_WARNING "%s: Connection timeout\n",
  660. dev->name);
  661. break;
  662. case E_DISC_ERR:
  663. printk(KERN_WARNING "%s: HIPPI disconnect error\n",
  664. dev->name);
  665. dev->stats.tx_aborted_errors++;
  666. break;
  667. case E_INT_PRTY:
  668. printk(KERN_ERR "%s: HIPPI Internal Parity error\n",
  669. dev->name);
  670. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
  671. &regs->HostCtrl);
  672. wmb();
  673. break;
  674. case E_TX_IDLE:
  675. printk(KERN_WARNING "%s: Transmitter idle\n",
  676. dev->name);
  677. break;
  678. case E_TX_LINK_DROP:
  679. printk(KERN_WARNING "%s: Link lost during transmit\n",
  680. dev->name);
  681. dev->stats.tx_aborted_errors++;
  682. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
  683. &regs->HostCtrl);
  684. wmb();
  685. break;
  686. case E_TX_INV_RNG:
  687. printk(KERN_ERR "%s: Invalid send ring block\n",
  688. dev->name);
  689. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
  690. &regs->HostCtrl);
  691. wmb();
  692. break;
  693. case E_TX_INV_BUF:
  694. printk(KERN_ERR "%s: Invalid send buffer address\n",
  695. dev->name);
  696. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
  697. &regs->HostCtrl);
  698. wmb();
  699. break;
  700. case E_TX_INV_DSC:
  701. printk(KERN_ERR "%s: Invalid descriptor address\n",
  702. dev->name);
  703. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
  704. &regs->HostCtrl);
  705. wmb();
  706. break;
  707. /*
  708. * RX events.
  709. */
  710. case E_RX_RNG_OUT:
  711. printk(KERN_INFO "%s: Receive ring full\n", dev->name);
  712. break;
  713. case E_RX_PAR_ERR:
  714. printk(KERN_WARNING "%s: Receive parity error\n",
  715. dev->name);
  716. goto drop;
  717. case E_RX_LLRC_ERR:
  718. printk(KERN_WARNING "%s: Receive LLRC error\n",
  719. dev->name);
  720. goto drop;
  721. case E_PKT_LN_ERR:
  722. printk(KERN_WARNING "%s: Receive packet length "
  723. "error\n", dev->name);
  724. goto drop;
  725. case E_DTA_CKSM_ERR:
  726. printk(KERN_WARNING "%s: Data checksum error\n",
  727. dev->name);
  728. goto drop;
  729. case E_SHT_BST:
  730. printk(KERN_WARNING "%s: Unexpected short burst "
  731. "error\n", dev->name);
  732. goto drop;
  733. case E_STATE_ERR:
  734. printk(KERN_WARNING "%s: Recv. state transition"
  735. " error\n", dev->name);
  736. goto drop;
  737. case E_UNEXP_DATA:
  738. printk(KERN_WARNING "%s: Unexpected data error\n",
  739. dev->name);
  740. goto drop;
  741. case E_LST_LNK_ERR:
  742. printk(KERN_WARNING "%s: Link lost error\n",
  743. dev->name);
  744. goto drop;
  745. case E_FRM_ERR:
  746. printk(KERN_WARNING "%s: Framming Error\n",
  747. dev->name);
  748. goto drop;
  749. case E_FLG_SYN_ERR:
  750. printk(KERN_WARNING "%s: Flag sync. lost during "
  751. "packet\n", dev->name);
  752. goto drop;
  753. case E_RX_INV_BUF:
  754. printk(KERN_ERR "%s: Invalid receive buffer "
  755. "address\n", dev->name);
  756. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
  757. &regs->HostCtrl);
  758. wmb();
  759. break;
  760. case E_RX_INV_DSC:
  761. printk(KERN_ERR "%s: Invalid receive descriptor "
  762. "address\n", dev->name);
  763. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
  764. &regs->HostCtrl);
  765. wmb();
  766. break;
  767. case E_RNG_BLK:
  768. printk(KERN_ERR "%s: Invalid ring block\n",
  769. dev->name);
  770. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
  771. &regs->HostCtrl);
  772. wmb();
  773. break;
  774. drop:
  775. /* Label packet to be dropped.
  776. * Actual dropping occurs in rx
  777. * handling.
  778. *
  779. * The index of packet we get to drop is
  780. * the index of the packet following
  781. * the bad packet. -kbf
  782. */
  783. {
  784. u16 index = rrpriv->evt_ring[eidx].index;
  785. index = (index + (RX_RING_ENTRIES - 1)) %
  786. RX_RING_ENTRIES;
  787. rrpriv->rx_ring[index].mode |=
  788. (PACKET_BAD | PACKET_END);
  789. }
  790. break;
  791. default:
  792. printk(KERN_WARNING "%s: Unhandled event 0x%02x\n",
  793. dev->name, rrpriv->evt_ring[eidx].code);
  794. }
  795. eidx = (eidx + 1) % EVT_RING_ENTRIES;
  796. }
  797. rrpriv->info->evt_ctrl.pi = eidx;
  798. wmb();
  799. return eidx;
  800. }
  801. static void rx_int(struct net_device *dev, u32 rxlimit, u32 index)
  802. {
  803. struct rr_private *rrpriv = netdev_priv(dev);
  804. struct rr_regs __iomem *regs = rrpriv->regs;
  805. do {
  806. struct rx_desc *desc;
  807. u32 pkt_len;
  808. desc = &(rrpriv->rx_ring[index]);
  809. pkt_len = desc->size;
  810. #if (DEBUG > 2)
  811. printk("index %i, rxlimit %i\n", index, rxlimit);
  812. printk("len %x, mode %x\n", pkt_len, desc->mode);
  813. #endif
  814. if ( (rrpriv->rx_ring[index].mode & PACKET_BAD) == PACKET_BAD){
  815. dev->stats.rx_dropped++;
  816. goto defer;
  817. }
  818. if (pkt_len > 0){
  819. struct sk_buff *skb, *rx_skb;
  820. rx_skb = rrpriv->rx_skbuff[index];
  821. if (pkt_len < PKT_COPY_THRESHOLD) {
  822. skb = alloc_skb(pkt_len, GFP_ATOMIC);
  823. if (skb == NULL){
  824. printk(KERN_WARNING "%s: Unable to allocate skb (%i bytes), deferring packet\n", dev->name, pkt_len);
  825. dev->stats.rx_dropped++;
  826. goto defer;
  827. } else {
  828. pci_dma_sync_single_for_cpu(rrpriv->pci_dev,
  829. desc->addr.addrlo,
  830. pkt_len,
  831. PCI_DMA_FROMDEVICE);
  832. memcpy(skb_put(skb, pkt_len),
  833. rx_skb->data, pkt_len);
  834. pci_dma_sync_single_for_device(rrpriv->pci_dev,
  835. desc->addr.addrlo,
  836. pkt_len,
  837. PCI_DMA_FROMDEVICE);
  838. }
  839. }else{
  840. struct sk_buff *newskb;
  841. newskb = alloc_skb(dev->mtu + HIPPI_HLEN,
  842. GFP_ATOMIC);
  843. if (newskb){
  844. dma_addr_t addr;
  845. pci_unmap_single(rrpriv->pci_dev,
  846. desc->addr.addrlo, dev->mtu +
  847. HIPPI_HLEN, PCI_DMA_FROMDEVICE);
  848. skb = rx_skb;
  849. skb_put(skb, pkt_len);
  850. rrpriv->rx_skbuff[index] = newskb;
  851. addr = pci_map_single(rrpriv->pci_dev,
  852. newskb->data,
  853. dev->mtu + HIPPI_HLEN,
  854. PCI_DMA_FROMDEVICE);
  855. set_rraddr(&desc->addr, addr);
  856. } else {
  857. printk("%s: Out of memory, deferring "
  858. "packet\n", dev->name);
  859. dev->stats.rx_dropped++;
  860. goto defer;
  861. }
  862. }
  863. skb->protocol = hippi_type_trans(skb, dev);
  864. netif_rx(skb); /* send it up */
  865. dev->stats.rx_packets++;
  866. dev->stats.rx_bytes += pkt_len;
  867. }
  868. defer:
  869. desc->mode = 0;
  870. desc->size = dev->mtu + HIPPI_HLEN;
  871. if ((index & 7) == 7)
  872. writel(index, &regs->IpRxPi);
  873. index = (index + 1) % RX_RING_ENTRIES;
  874. } while(index != rxlimit);
  875. rrpriv->cur_rx = index;
  876. wmb();
  877. }
  878. static irqreturn_t rr_interrupt(int irq, void *dev_id)
  879. {
  880. struct rr_private *rrpriv;
  881. struct rr_regs __iomem *regs;
  882. struct net_device *dev = (struct net_device *)dev_id;
  883. u32 prodidx, rxindex, eidx, txcsmr, rxlimit, txcon;
  884. rrpriv = netdev_priv(dev);
  885. regs = rrpriv->regs;
  886. if (!(readl(&regs->HostCtrl) & RR_INT))
  887. return IRQ_NONE;
  888. spin_lock(&rrpriv->lock);
  889. prodidx = readl(&regs->EvtPrd);
  890. txcsmr = (prodidx >> 8) & 0xff;
  891. rxlimit = (prodidx >> 16) & 0xff;
  892. prodidx &= 0xff;
  893. #if (DEBUG > 2)
  894. printk("%s: interrupt, prodidx = %i, eidx = %i\n", dev->name,
  895. prodidx, rrpriv->info->evt_ctrl.pi);
  896. #endif
  897. /*
  898. * Order here is important. We must handle events
  899. * before doing anything else in order to catch
  900. * such things as LLRC errors, etc -kbf
  901. */
  902. eidx = rrpriv->info->evt_ctrl.pi;
  903. if (prodidx != eidx)
  904. eidx = rr_handle_event(dev, prodidx, eidx);
  905. rxindex = rrpriv->cur_rx;
  906. if (rxindex != rxlimit)
  907. rx_int(dev, rxlimit, rxindex);
  908. txcon = rrpriv->dirty_tx;
  909. if (txcsmr != txcon) {
  910. do {
  911. /* Due to occational firmware TX producer/consumer out
  912. * of sync. error need to check entry in ring -kbf
  913. */
  914. if(rrpriv->tx_skbuff[txcon]){
  915. struct tx_desc *desc;
  916. struct sk_buff *skb;
  917. desc = &(rrpriv->tx_ring[txcon]);
  918. skb = rrpriv->tx_skbuff[txcon];
  919. dev->stats.tx_packets++;
  920. dev->stats.tx_bytes += skb->len;
  921. pci_unmap_single(rrpriv->pci_dev,
  922. desc->addr.addrlo, skb->len,
  923. PCI_DMA_TODEVICE);
  924. dev_kfree_skb_irq(skb);
  925. rrpriv->tx_skbuff[txcon] = NULL;
  926. desc->size = 0;
  927. set_rraddr(&rrpriv->tx_ring[txcon].addr, 0);
  928. desc->mode = 0;
  929. }
  930. txcon = (txcon + 1) % TX_RING_ENTRIES;
  931. } while (txcsmr != txcon);
  932. wmb();
  933. rrpriv->dirty_tx = txcon;
  934. if (rrpriv->tx_full && rr_if_busy(dev) &&
  935. (((rrpriv->info->tx_ctrl.pi + 1) % TX_RING_ENTRIES)
  936. != rrpriv->dirty_tx)){
  937. rrpriv->tx_full = 0;
  938. netif_wake_queue(dev);
  939. }
  940. }
  941. eidx |= ((txcsmr << 8) | (rxlimit << 16));
  942. writel(eidx, &regs->EvtCon);
  943. wmb();
  944. spin_unlock(&rrpriv->lock);
  945. return IRQ_HANDLED;
  946. }
  947. static inline void rr_raz_tx(struct rr_private *rrpriv,
  948. struct net_device *dev)
  949. {
  950. int i;
  951. for (i = 0; i < TX_RING_ENTRIES; i++) {
  952. struct sk_buff *skb = rrpriv->tx_skbuff[i];
  953. if (skb) {
  954. struct tx_desc *desc = &(rrpriv->tx_ring[i]);
  955. pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo,
  956. skb->len, PCI_DMA_TODEVICE);
  957. desc->size = 0;
  958. set_rraddr(&desc->addr, 0);
  959. dev_kfree_skb(skb);
  960. rrpriv->tx_skbuff[i] = NULL;
  961. }
  962. }
  963. }
  964. static inline void rr_raz_rx(struct rr_private *rrpriv,
  965. struct net_device *dev)
  966. {
  967. int i;
  968. for (i = 0; i < RX_RING_ENTRIES; i++) {
  969. struct sk_buff *skb = rrpriv->rx_skbuff[i];
  970. if (skb) {
  971. struct rx_desc *desc = &(rrpriv->rx_ring[i]);
  972. pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo,
  973. dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE);
  974. desc->size = 0;
  975. set_rraddr(&desc->addr, 0);
  976. dev_kfree_skb(skb);
  977. rrpriv->rx_skbuff[i] = NULL;
  978. }
  979. }
  980. }
  981. static void rr_timer(unsigned long data)
  982. {
  983. struct net_device *dev = (struct net_device *)data;
  984. struct rr_private *rrpriv = netdev_priv(dev);
  985. struct rr_regs __iomem *regs = rrpriv->regs;
  986. unsigned long flags;
  987. if (readl(&regs->HostCtrl) & NIC_HALTED){
  988. printk("%s: Restarting nic\n", dev->name);
  989. memset(rrpriv->rx_ctrl, 0, 256 * sizeof(struct ring_ctrl));
  990. memset(rrpriv->info, 0, sizeof(struct rr_info));
  991. wmb();
  992. rr_raz_tx(rrpriv, dev);
  993. rr_raz_rx(rrpriv, dev);
  994. if (rr_init1(dev)) {
  995. spin_lock_irqsave(&rrpriv->lock, flags);
  996. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
  997. &regs->HostCtrl);
  998. spin_unlock_irqrestore(&rrpriv->lock, flags);
  999. }
  1000. }
  1001. rrpriv->timer.expires = RUN_AT(5*HZ);
  1002. add_timer(&rrpriv->timer);
  1003. }
  1004. static int rr_open(struct net_device *dev)
  1005. {
  1006. struct rr_private *rrpriv = netdev_priv(dev);
  1007. struct pci_dev *pdev = rrpriv->pci_dev;
  1008. struct rr_regs __iomem *regs;
  1009. int ecode = 0;
  1010. unsigned long flags;
  1011. dma_addr_t dma_addr;
  1012. regs = rrpriv->regs;
  1013. if (rrpriv->fw_rev < 0x00020000) {
  1014. printk(KERN_WARNING "%s: trying to configure device with "
  1015. "obsolete firmware\n", dev->name);
  1016. ecode = -EBUSY;
  1017. goto error;
  1018. }
  1019. rrpriv->rx_ctrl = pci_alloc_consistent(pdev,
  1020. 256 * sizeof(struct ring_ctrl),
  1021. &dma_addr);
  1022. if (!rrpriv->rx_ctrl) {
  1023. ecode = -ENOMEM;
  1024. goto error;
  1025. }
  1026. rrpriv->rx_ctrl_dma = dma_addr;
  1027. memset(rrpriv->rx_ctrl, 0, 256*sizeof(struct ring_ctrl));
  1028. rrpriv->info = pci_alloc_consistent(pdev, sizeof(struct rr_info),
  1029. &dma_addr);
  1030. if (!rrpriv->info) {
  1031. ecode = -ENOMEM;
  1032. goto error;
  1033. }
  1034. rrpriv->info_dma = dma_addr;
  1035. memset(rrpriv->info, 0, sizeof(struct rr_info));
  1036. wmb();
  1037. spin_lock_irqsave(&rrpriv->lock, flags);
  1038. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl);
  1039. readl(&regs->HostCtrl);
  1040. spin_unlock_irqrestore(&rrpriv->lock, flags);
  1041. if (request_irq(pdev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) {
  1042. printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
  1043. dev->name, pdev->irq);
  1044. ecode = -EAGAIN;
  1045. goto error;
  1046. }
  1047. if ((ecode = rr_init1(dev)))
  1048. goto error;
  1049. /* Set the timer to switch to check for link beat and perhaps switch
  1050. to an alternate media type. */
  1051. init_timer(&rrpriv->timer);
  1052. rrpriv->timer.expires = RUN_AT(5*HZ); /* 5 sec. watchdog */
  1053. rrpriv->timer.data = (unsigned long)dev;
  1054. rrpriv->timer.function = rr_timer; /* timer handler */
  1055. add_timer(&rrpriv->timer);
  1056. netif_start_queue(dev);
  1057. return ecode;
  1058. error:
  1059. spin_lock_irqsave(&rrpriv->lock, flags);
  1060. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl);
  1061. spin_unlock_irqrestore(&rrpriv->lock, flags);
  1062. if (rrpriv->info) {
  1063. pci_free_consistent(pdev, sizeof(struct rr_info), rrpriv->info,
  1064. rrpriv->info_dma);
  1065. rrpriv->info = NULL;
  1066. }
  1067. if (rrpriv->rx_ctrl) {
  1068. pci_free_consistent(pdev, sizeof(struct ring_ctrl),
  1069. rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
  1070. rrpriv->rx_ctrl = NULL;
  1071. }
  1072. netif_stop_queue(dev);
  1073. return ecode;
  1074. }
  1075. static void rr_dump(struct net_device *dev)
  1076. {
  1077. struct rr_private *rrpriv;
  1078. struct rr_regs __iomem *regs;
  1079. u32 index, cons;
  1080. short i;
  1081. int len;
  1082. rrpriv = netdev_priv(dev);
  1083. regs = rrpriv->regs;
  1084. printk("%s: dumping NIC TX rings\n", dev->name);
  1085. printk("RxPrd %08x, TxPrd %02x, EvtPrd %08x, TxPi %02x, TxCtrlPi %02x\n",
  1086. readl(&regs->RxPrd), readl(&regs->TxPrd),
  1087. readl(&regs->EvtPrd), readl(&regs->TxPi),
  1088. rrpriv->info->tx_ctrl.pi);
  1089. printk("Error code 0x%x\n", readl(&regs->Fail1));
  1090. index = (((readl(&regs->EvtPrd) >> 8) & 0xff) - 1) % TX_RING_ENTRIES;
  1091. cons = rrpriv->dirty_tx;
  1092. printk("TX ring index %i, TX consumer %i\n",
  1093. index, cons);
  1094. if (rrpriv->tx_skbuff[index]){
  1095. len = min_t(int, 0x80, rrpriv->tx_skbuff[index]->len);
  1096. printk("skbuff for index %i is valid - dumping data (0x%x bytes - DMA len 0x%x)\n", index, len, rrpriv->tx_ring[index].size);
  1097. for (i = 0; i < len; i++){
  1098. if (!(i & 7))
  1099. printk("\n");
  1100. printk("%02x ", (unsigned char) rrpriv->tx_skbuff[index]->data[i]);
  1101. }
  1102. printk("\n");
  1103. }
  1104. if (rrpriv->tx_skbuff[cons]){
  1105. len = min_t(int, 0x80, rrpriv->tx_skbuff[cons]->len);
  1106. printk("skbuff for cons %i is valid - dumping data (0x%x bytes - skbuff len 0x%x)\n", cons, len, rrpriv->tx_skbuff[cons]->len);
  1107. printk("mode 0x%x, size 0x%x,\n phys %08Lx, skbuff-addr %08lx, truesize 0x%x\n",
  1108. rrpriv->tx_ring[cons].mode,
  1109. rrpriv->tx_ring[cons].size,
  1110. (unsigned long long) rrpriv->tx_ring[cons].addr.addrlo,
  1111. (unsigned long)rrpriv->tx_skbuff[cons]->data,
  1112. (unsigned int)rrpriv->tx_skbuff[cons]->truesize);
  1113. for (i = 0; i < len; i++){
  1114. if (!(i & 7))
  1115. printk("\n");
  1116. printk("%02x ", (unsigned char)rrpriv->tx_ring[cons].size);
  1117. }
  1118. printk("\n");
  1119. }
  1120. printk("dumping TX ring info:\n");
  1121. for (i = 0; i < TX_RING_ENTRIES; i++)
  1122. printk("mode 0x%x, size 0x%x, phys-addr %08Lx\n",
  1123. rrpriv->tx_ring[i].mode,
  1124. rrpriv->tx_ring[i].size,
  1125. (unsigned long long) rrpriv->tx_ring[i].addr.addrlo);
  1126. }
  1127. static int rr_close(struct net_device *dev)
  1128. {
  1129. struct rr_private *rrpriv = netdev_priv(dev);
  1130. struct rr_regs __iomem *regs = rrpriv->regs;
  1131. struct pci_dev *pdev = rrpriv->pci_dev;
  1132. unsigned long flags;
  1133. u32 tmp;
  1134. short i;
  1135. netif_stop_queue(dev);
  1136. /*
  1137. * Lock to make sure we are not cleaning up while another CPU
  1138. * is handling interrupts.
  1139. */
  1140. spin_lock_irqsave(&rrpriv->lock, flags);
  1141. tmp = readl(&regs->HostCtrl);
  1142. if (tmp & NIC_HALTED){
  1143. printk("%s: NIC already halted\n", dev->name);
  1144. rr_dump(dev);
  1145. }else{
  1146. tmp |= HALT_NIC | RR_CLEAR_INT;
  1147. writel(tmp, &regs->HostCtrl);
  1148. readl(&regs->HostCtrl);
  1149. }
  1150. rrpriv->fw_running = 0;
  1151. del_timer_sync(&rrpriv->timer);
  1152. writel(0, &regs->TxPi);
  1153. writel(0, &regs->IpRxPi);
  1154. writel(0, &regs->EvtCon);
  1155. writel(0, &regs->EvtPrd);
  1156. for (i = 0; i < CMD_RING_ENTRIES; i++)
  1157. writel(0, &regs->CmdRing[i]);
  1158. rrpriv->info->tx_ctrl.entries = 0;
  1159. rrpriv->info->cmd_ctrl.pi = 0;
  1160. rrpriv->info->evt_ctrl.pi = 0;
  1161. rrpriv->rx_ctrl[4].entries = 0;
  1162. rr_raz_tx(rrpriv, dev);
  1163. rr_raz_rx(rrpriv, dev);
  1164. pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl),
  1165. rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
  1166. rrpriv->rx_ctrl = NULL;
  1167. pci_free_consistent(pdev, sizeof(struct rr_info), rrpriv->info,
  1168. rrpriv->info_dma);
  1169. rrpriv->info = NULL;
  1170. free_irq(pdev->irq, dev);
  1171. spin_unlock_irqrestore(&rrpriv->lock, flags);
  1172. return 0;
  1173. }
  1174. static netdev_tx_t rr_start_xmit(struct sk_buff *skb,
  1175. struct net_device *dev)
  1176. {
  1177. struct rr_private *rrpriv = netdev_priv(dev);
  1178. struct rr_regs __iomem *regs = rrpriv->regs;
  1179. struct hippi_cb *hcb = (struct hippi_cb *) skb->cb;
  1180. struct ring_ctrl *txctrl;
  1181. unsigned long flags;
  1182. u32 index, len = skb->len;
  1183. u32 *ifield;
  1184. struct sk_buff *new_skb;
  1185. if (readl(&regs->Mode) & FATAL_ERR)
  1186. printk("error codes Fail1 %02x, Fail2 %02x\n",
  1187. readl(&regs->Fail1), readl(&regs->Fail2));
  1188. /*
  1189. * We probably need to deal with tbusy here to prevent overruns.
  1190. */
  1191. if (skb_headroom(skb) < 8){
  1192. printk("incoming skb too small - reallocating\n");
  1193. if (!(new_skb = dev_alloc_skb(len + 8))) {
  1194. dev_kfree_skb(skb);
  1195. netif_wake_queue(dev);
  1196. return NETDEV_TX_OK;
  1197. }
  1198. skb_reserve(new_skb, 8);
  1199. skb_put(new_skb, len);
  1200. skb_copy_from_linear_data(skb, new_skb->data, len);
  1201. dev_kfree_skb(skb);
  1202. skb = new_skb;
  1203. }
  1204. ifield = (u32 *)skb_push(skb, 8);
  1205. ifield[0] = 0;
  1206. ifield[1] = hcb->ifield;
  1207. /*
  1208. * We don't need the lock before we are actually going to start
  1209. * fiddling with the control blocks.
  1210. */
  1211. spin_lock_irqsave(&rrpriv->lock, flags);
  1212. txctrl = &rrpriv->info->tx_ctrl;
  1213. index = txctrl->pi;
  1214. rrpriv->tx_skbuff[index] = skb;
  1215. set_rraddr(&rrpriv->tx_ring[index].addr, pci_map_single(
  1216. rrpriv->pci_dev, skb->data, len + 8, PCI_DMA_TODEVICE));
  1217. rrpriv->tx_ring[index].size = len + 8; /* include IFIELD */
  1218. rrpriv->tx_ring[index].mode = PACKET_START | PACKET_END;
  1219. txctrl->pi = (index + 1) % TX_RING_ENTRIES;
  1220. wmb();
  1221. writel(txctrl->pi, &regs->TxPi);
  1222. if (txctrl->pi == rrpriv->dirty_tx){
  1223. rrpriv->tx_full = 1;
  1224. netif_stop_queue(dev);
  1225. }
  1226. spin_unlock_irqrestore(&rrpriv->lock, flags);
  1227. return NETDEV_TX_OK;
  1228. }
  1229. /*
  1230. * Read the firmware out of the EEPROM and put it into the SRAM
  1231. * (or from user space - later)
  1232. *
  1233. * This operation requires the NIC to be halted and is performed with
  1234. * interrupts disabled and with the spinlock hold.
  1235. */
  1236. static int rr_load_firmware(struct net_device *dev)
  1237. {
  1238. struct rr_private *rrpriv;
  1239. struct rr_regs __iomem *regs;
  1240. size_t eptr, segptr;
  1241. int i, j;
  1242. u32 localctrl, sptr, len, tmp;
  1243. u32 p2len, p2size, nr_seg, revision, io, sram_size;
  1244. rrpriv = netdev_priv(dev);
  1245. regs = rrpriv->regs;
  1246. if (dev->flags & IFF_UP)
  1247. return -EBUSY;
  1248. if (!(readl(&regs->HostCtrl) & NIC_HALTED)){
  1249. printk("%s: Trying to load firmware to a running NIC.\n",
  1250. dev->name);
  1251. return -EBUSY;
  1252. }
  1253. localctrl = readl(&regs->LocalCtrl);
  1254. writel(0, &regs->LocalCtrl);
  1255. writel(0, &regs->EvtPrd);
  1256. writel(0, &regs->RxPrd);
  1257. writel(0, &regs->TxPrd);
  1258. /*
  1259. * First wipe the entire SRAM, otherwise we might run into all
  1260. * kinds of trouble ... sigh, this took almost all afternoon
  1261. * to track down ;-(
  1262. */
  1263. io = readl(&regs->ExtIo);
  1264. writel(0, &regs->ExtIo);
  1265. sram_size = rr_read_eeprom_word(rrpriv, 8);
  1266. for (i = 200; i < sram_size / 4; i++){
  1267. writel(i * 4, &regs->WinBase);
  1268. mb();
  1269. writel(0, &regs->WinData);
  1270. mb();
  1271. }
  1272. writel(io, &regs->ExtIo);
  1273. mb();
  1274. eptr = rr_read_eeprom_word(rrpriv,
  1275. offsetof(struct eeprom, rncd_info.AddrRunCodeSegs));
  1276. eptr = ((eptr & 0x1fffff) >> 3);
  1277. p2len = rr_read_eeprom_word(rrpriv, 0x83*4);
  1278. p2len = (p2len << 2);
  1279. p2size = rr_read_eeprom_word(rrpriv, 0x84*4);
  1280. p2size = ((p2size & 0x1fffff) >> 3);
  1281. if ((eptr < p2size) || (eptr > (p2size + p2len))){
  1282. printk("%s: eptr is invalid\n", dev->name);
  1283. goto out;
  1284. }
  1285. revision = rr_read_eeprom_word(rrpriv,
  1286. offsetof(struct eeprom, manf.HeaderFmt));
  1287. if (revision != 1){
  1288. printk("%s: invalid firmware format (%i)\n",
  1289. dev->name, revision);
  1290. goto out;
  1291. }
  1292. nr_seg = rr_read_eeprom_word(rrpriv, eptr);
  1293. eptr +=4;
  1294. #if (DEBUG > 1)
  1295. printk("%s: nr_seg %i\n", dev->name, nr_seg);
  1296. #endif
  1297. for (i = 0; i < nr_seg; i++){
  1298. sptr = rr_read_eeprom_word(rrpriv, eptr);
  1299. eptr += 4;
  1300. len = rr_read_eeprom_word(rrpriv, eptr);
  1301. eptr += 4;
  1302. segptr = rr_read_eeprom_word(rrpriv, eptr);
  1303. segptr = ((segptr & 0x1fffff) >> 3);
  1304. eptr += 4;
  1305. #if (DEBUG > 1)
  1306. printk("%s: segment %i, sram address %06x, length %04x, segptr %06x\n",
  1307. dev->name, i, sptr, len, segptr);
  1308. #endif
  1309. for (j = 0; j < len; j++){
  1310. tmp = rr_read_eeprom_word(rrpriv, segptr);
  1311. writel(sptr, &regs->WinBase);
  1312. mb();
  1313. writel(tmp, &regs->WinData);
  1314. mb();
  1315. segptr += 4;
  1316. sptr += 4;
  1317. }
  1318. }
  1319. out:
  1320. writel(localctrl, &regs->LocalCtrl);
  1321. mb();
  1322. return 0;
  1323. }
  1324. static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  1325. {
  1326. struct rr_private *rrpriv;
  1327. unsigned char *image, *oldimage;
  1328. unsigned long flags;
  1329. unsigned int i;
  1330. int error = -EOPNOTSUPP;
  1331. rrpriv = netdev_priv(dev);
  1332. switch(cmd){
  1333. case SIOCRRGFW:
  1334. if (!capable(CAP_SYS_RAWIO)){
  1335. return -EPERM;
  1336. }
  1337. image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
  1338. if (!image)
  1339. return -ENOMEM;
  1340. if (rrpriv->fw_running){
  1341. printk("%s: Firmware already running\n", dev->name);
  1342. error = -EPERM;
  1343. goto gf_out;
  1344. }
  1345. spin_lock_irqsave(&rrpriv->lock, flags);
  1346. i = rr_read_eeprom(rrpriv, 0, image, EEPROM_BYTES);
  1347. spin_unlock_irqrestore(&rrpriv->lock, flags);
  1348. if (i != EEPROM_BYTES){
  1349. printk(KERN_ERR "%s: Error reading EEPROM\n",
  1350. dev->name);
  1351. error = -EFAULT;
  1352. goto gf_out;
  1353. }
  1354. error = copy_to_user(rq->ifr_data, image, EEPROM_BYTES);
  1355. if (error)
  1356. error = -EFAULT;
  1357. gf_out:
  1358. kfree(image);
  1359. return error;
  1360. case SIOCRRPFW:
  1361. if (!capable(CAP_SYS_RAWIO)){
  1362. return -EPERM;
  1363. }
  1364. image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
  1365. oldimage = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
  1366. if (!image || !oldimage) {
  1367. error = -ENOMEM;
  1368. goto wf_out;
  1369. }
  1370. error = copy_from_user(image, rq->ifr_data, EEPROM_BYTES);
  1371. if (error) {
  1372. error = -EFAULT;
  1373. goto wf_out;
  1374. }
  1375. if (rrpriv->fw_running){
  1376. printk("%s: Firmware already running\n", dev->name);
  1377. error = -EPERM;
  1378. goto wf_out;
  1379. }
  1380. printk("%s: Updating EEPROM firmware\n", dev->name);
  1381. spin_lock_irqsave(&rrpriv->lock, flags);
  1382. error = write_eeprom(rrpriv, 0, image, EEPROM_BYTES);
  1383. if (error)
  1384. printk(KERN_ERR "%s: Error writing EEPROM\n",
  1385. dev->name);
  1386. i = rr_read_eeprom(rrpriv, 0, oldimage, EEPROM_BYTES);
  1387. spin_unlock_irqrestore(&rrpriv->lock, flags);
  1388. if (i != EEPROM_BYTES)
  1389. printk(KERN_ERR "%s: Error reading back EEPROM "
  1390. "image\n", dev->name);
  1391. error = memcmp(image, oldimage, EEPROM_BYTES);
  1392. if (error){
  1393. printk(KERN_ERR "%s: Error verifying EEPROM image\n",
  1394. dev->name);
  1395. error = -EFAULT;
  1396. }
  1397. wf_out:
  1398. kfree(oldimage);
  1399. kfree(image);
  1400. return error;
  1401. case SIOCRRID:
  1402. return put_user(0x52523032, (int __user *)rq->ifr_data);
  1403. default:
  1404. return error;
  1405. }
  1406. }
  1407. static const struct pci_device_id rr_pci_tbl[] = {
  1408. { PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER,
  1409. PCI_ANY_ID, PCI_ANY_ID, },
  1410. { 0,}
  1411. };
  1412. MODULE_DEVICE_TABLE(pci, rr_pci_tbl);
  1413. static struct pci_driver rr_driver = {
  1414. .name = "rrunner",
  1415. .id_table = rr_pci_tbl,
  1416. .probe = rr_init_one,
  1417. .remove = rr_remove_one,
  1418. };
  1419. module_pci_driver(rr_driver);