sbni.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628
  1. /* sbni.c: Granch SBNI12 leased line adapters driver for linux
  2. *
  3. * Written 2001 by Denis I.Timofeev (timofeev@granch.ru)
  4. *
  5. * Previous versions were written by Yaroslav Polyakov,
  6. * Alexey Zverev and Max Khon.
  7. *
  8. * Driver supports SBNI12-02,-04,-05,-10,-11 cards, single and
  9. * double-channel, PCI and ISA modifications.
  10. * More info and useful utilities to work with SBNI12 cards you can find
  11. * at http://www.granch.com (English) or http://www.granch.ru (Russian)
  12. *
  13. * This software may be used and distributed according to the terms
  14. * of the GNU General Public License.
  15. *
  16. *
  17. * 5.0.1 Jun 22 2001
  18. * - Fixed bug in probe
  19. * 5.0.0 Jun 06 2001
  20. * - Driver was completely redesigned by Denis I.Timofeev,
  21. * - now PCI/Dual, ISA/Dual (with single interrupt line) models are
  22. * - supported
  23. * 3.3.0 Thu Feb 24 21:30:28 NOVT 2000
  24. * - PCI cards support
  25. * 3.2.0 Mon Dec 13 22:26:53 NOVT 1999
  26. * - Completely rebuilt all the packet storage system
  27. * - to work in Ethernet-like style.
  28. * 3.1.1 just fixed some bugs (5 aug 1999)
  29. * 3.1.0 added balancing feature (26 apr 1999)
  30. * 3.0.1 just fixed some bugs (14 apr 1999).
  31. * 3.0.0 Initial Revision, Yaroslav Polyakov (24 Feb 1999)
  32. * - added pre-calculation for CRC, fixed bug with "len-2" frames,
  33. * - removed outbound fragmentation (MTU=1000), written CRC-calculation
  34. * - on asm, added work with hard_headers and now we have our own cache
  35. * - for them, optionally supported word-interchange on some chipsets,
  36. *
  37. * Known problem: this driver wasn't tested on multiprocessor machine.
  38. */
  39. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  40. #include <linux/module.h>
  41. #include <linux/kernel.h>
  42. #include <linux/ptrace.h>
  43. #include <linux/fcntl.h>
  44. #include <linux/ioport.h>
  45. #include <linux/interrupt.h>
  46. #include <linux/string.h>
  47. #include <linux/errno.h>
  48. #include <linux/netdevice.h>
  49. #include <linux/etherdevice.h>
  50. #include <linux/pci.h>
  51. #include <linux/skbuff.h>
  52. #include <linux/timer.h>
  53. #include <linux/init.h>
  54. #include <linux/delay.h>
  55. #include <net/net_namespace.h>
  56. #include <net/arp.h>
  57. #include <net/Space.h>
  58. #include <asm/io.h>
  59. #include <asm/types.h>
  60. #include <asm/byteorder.h>
  61. #include <asm/irq.h>
  62. #include <linux/uaccess.h>
  63. #include "sbni.h"
  64. /* device private data */
  65. struct net_local {
  66. struct timer_list watchdog;
  67. spinlock_t lock;
  68. struct sk_buff *rx_buf_p; /* receive buffer ptr */
  69. struct sk_buff *tx_buf_p; /* transmit buffer ptr */
  70. unsigned int framelen; /* current frame length */
  71. unsigned int maxframe; /* maximum valid frame length */
  72. unsigned int state;
  73. unsigned int inppos, outpos; /* positions in rx/tx buffers */
  74. /* transmitting frame number - from frames qty to 1 */
  75. unsigned int tx_frameno;
  76. /* expected number of next receiving frame */
  77. unsigned int wait_frameno;
  78. /* count of failed attempts to frame send - 32 attempts do before
  79. error - while receiver tunes on opposite side of wire */
  80. unsigned int trans_errors;
  81. /* idle time; send pong when limit exceeded */
  82. unsigned int timer_ticks;
  83. /* fields used for receive level autoselection */
  84. int delta_rxl;
  85. unsigned int cur_rxl_index, timeout_rxl;
  86. unsigned long cur_rxl_rcvd, prev_rxl_rcvd;
  87. struct sbni_csr1 csr1; /* current value of CSR1 */
  88. struct sbni_in_stats in_stats; /* internal statistics */
  89. struct net_device *second; /* for ISA/dual cards */
  90. #ifdef CONFIG_SBNI_MULTILINE
  91. struct net_device *master;
  92. struct net_device *link;
  93. #endif
  94. };
  95. static int sbni_card_probe( unsigned long );
  96. static int sbni_pci_probe( struct net_device * );
  97. static struct net_device *sbni_probe1(struct net_device *, unsigned long, int);
  98. static int sbni_open( struct net_device * );
  99. static int sbni_close( struct net_device * );
  100. static netdev_tx_t sbni_start_xmit(struct sk_buff *,
  101. struct net_device * );
  102. static int sbni_ioctl( struct net_device *, struct ifreq *, int );
  103. static void set_multicast_list( struct net_device * );
  104. static irqreturn_t sbni_interrupt( int, void * );
  105. static void handle_channel( struct net_device * );
  106. static int recv_frame( struct net_device * );
  107. static void send_frame( struct net_device * );
  108. static int upload_data( struct net_device *,
  109. unsigned, unsigned, unsigned, u32 );
  110. static void download_data( struct net_device *, u32 * );
  111. static void sbni_watchdog( unsigned long );
  112. static void interpret_ack( struct net_device *, unsigned );
  113. static int append_frame_to_pkt( struct net_device *, unsigned, u32 );
  114. static void indicate_pkt( struct net_device * );
  115. static void card_start( struct net_device * );
  116. static void prepare_to_send( struct sk_buff *, struct net_device * );
  117. static void drop_xmit_queue( struct net_device * );
  118. static void send_frame_header( struct net_device *, u32 * );
  119. static int skip_tail( unsigned int, unsigned int, u32 );
  120. static int check_fhdr( u32, u32 *, u32 *, u32 *, u32 *, u32 * );
  121. static void change_level( struct net_device * );
  122. static void timeout_change_level( struct net_device * );
  123. static u32 calc_crc32( u32, u8 *, u32 );
  124. static struct sk_buff * get_rx_buf( struct net_device * );
  125. static int sbni_init( struct net_device * );
  126. #ifdef CONFIG_SBNI_MULTILINE
  127. static int enslave( struct net_device *, struct net_device * );
  128. static int emancipate( struct net_device * );
  129. #endif
  130. static const char version[] =
  131. "Granch SBNI12 driver ver 5.0.1 Jun 22 2001 Denis I.Timofeev.\n";
  132. static bool skip_pci_probe __initdata = false;
  133. static int scandone __initdata = 0;
  134. static int num __initdata = 0;
  135. static unsigned char rxl_tab[];
  136. static u32 crc32tab[];
  137. /* A list of all installed devices, for removing the driver module. */
  138. static struct net_device *sbni_cards[ SBNI_MAX_NUM_CARDS ];
  139. /* Lists of device's parameters */
  140. static u32 io[ SBNI_MAX_NUM_CARDS ] __initdata =
  141. { [0 ... SBNI_MAX_NUM_CARDS-1] = -1 };
  142. static u32 irq[ SBNI_MAX_NUM_CARDS ] __initdata;
  143. static u32 baud[ SBNI_MAX_NUM_CARDS ] __initdata;
  144. static u32 rxl[ SBNI_MAX_NUM_CARDS ] __initdata =
  145. { [0 ... SBNI_MAX_NUM_CARDS-1] = -1 };
  146. static u32 mac[ SBNI_MAX_NUM_CARDS ] __initdata;
  147. #ifndef MODULE
  148. typedef u32 iarr[];
  149. static iarr *dest[5] __initdata = { &io, &irq, &baud, &rxl, &mac };
  150. #endif
  151. /* A zero-terminated list of I/O addresses to be probed on ISA bus */
  152. static unsigned int netcard_portlist[ ] __initdata = {
  153. 0x210, 0x214, 0x220, 0x224, 0x230, 0x234, 0x240, 0x244, 0x250, 0x254,
  154. 0x260, 0x264, 0x270, 0x274, 0x280, 0x284, 0x290, 0x294, 0x2a0, 0x2a4,
  155. 0x2b0, 0x2b4, 0x2c0, 0x2c4, 0x2d0, 0x2d4, 0x2e0, 0x2e4, 0x2f0, 0x2f4,
  156. 0 };
  157. #define NET_LOCAL_LOCK(dev) (((struct net_local *)netdev_priv(dev))->lock)
  158. /*
  159. * Look for SBNI card which addr stored in dev->base_addr, if nonzero.
  160. * Otherwise, look through PCI bus. If none PCI-card was found, scan ISA.
  161. */
  162. static inline int __init
  163. sbni_isa_probe( struct net_device *dev )
  164. {
  165. if( dev->base_addr > 0x1ff &&
  166. request_region( dev->base_addr, SBNI_IO_EXTENT, dev->name ) &&
  167. sbni_probe1( dev, dev->base_addr, dev->irq ) )
  168. return 0;
  169. else {
  170. pr_err("base address 0x%lx is busy, or adapter is malfunctional!\n",
  171. dev->base_addr);
  172. return -ENODEV;
  173. }
  174. }
  175. static const struct net_device_ops sbni_netdev_ops = {
  176. .ndo_open = sbni_open,
  177. .ndo_stop = sbni_close,
  178. .ndo_start_xmit = sbni_start_xmit,
  179. .ndo_set_rx_mode = set_multicast_list,
  180. .ndo_do_ioctl = sbni_ioctl,
  181. .ndo_set_mac_address = eth_mac_addr,
  182. .ndo_validate_addr = eth_validate_addr,
  183. };
  184. static void __init sbni_devsetup(struct net_device *dev)
  185. {
  186. ether_setup( dev );
  187. dev->netdev_ops = &sbni_netdev_ops;
  188. }
  189. int __init sbni_probe(int unit)
  190. {
  191. struct net_device *dev;
  192. int err;
  193. dev = alloc_netdev(sizeof(struct net_local), "sbni",
  194. NET_NAME_UNKNOWN, sbni_devsetup);
  195. if (!dev)
  196. return -ENOMEM;
  197. dev->netdev_ops = &sbni_netdev_ops;
  198. sprintf(dev->name, "sbni%d", unit);
  199. netdev_boot_setup_check(dev);
  200. err = sbni_init(dev);
  201. if (err) {
  202. free_netdev(dev);
  203. return err;
  204. }
  205. err = register_netdev(dev);
  206. if (err) {
  207. release_region( dev->base_addr, SBNI_IO_EXTENT );
  208. free_netdev(dev);
  209. return err;
  210. }
  211. pr_info_once("%s", version);
  212. return 0;
  213. }
  214. static int __init sbni_init(struct net_device *dev)
  215. {
  216. int i;
  217. if( dev->base_addr )
  218. return sbni_isa_probe( dev );
  219. /* otherwise we have to perform search our adapter */
  220. if( io[ num ] != -1 )
  221. dev->base_addr = io[ num ],
  222. dev->irq = irq[ num ];
  223. else if( scandone || io[ 0 ] != -1 )
  224. return -ENODEV;
  225. /* if io[ num ] contains non-zero address, then that is on ISA bus */
  226. if( dev->base_addr )
  227. return sbni_isa_probe( dev );
  228. /* ...otherwise - scan PCI first */
  229. if( !skip_pci_probe && !sbni_pci_probe( dev ) )
  230. return 0;
  231. if( io[ num ] == -1 ) {
  232. /* Auto-scan will be stopped when first ISA card were found */
  233. scandone = 1;
  234. if( num > 0 )
  235. return -ENODEV;
  236. }
  237. for( i = 0; netcard_portlist[ i ]; ++i ) {
  238. int ioaddr = netcard_portlist[ i ];
  239. if( request_region( ioaddr, SBNI_IO_EXTENT, dev->name ) &&
  240. sbni_probe1( dev, ioaddr, 0 ))
  241. return 0;
  242. }
  243. return -ENODEV;
  244. }
  245. static int __init
  246. sbni_pci_probe( struct net_device *dev )
  247. {
  248. struct pci_dev *pdev = NULL;
  249. while( (pdev = pci_get_class( PCI_CLASS_NETWORK_OTHER << 8, pdev ))
  250. != NULL ) {
  251. int pci_irq_line;
  252. unsigned long pci_ioaddr;
  253. if( pdev->vendor != SBNI_PCI_VENDOR &&
  254. pdev->device != SBNI_PCI_DEVICE )
  255. continue;
  256. pci_ioaddr = pci_resource_start( pdev, 0 );
  257. pci_irq_line = pdev->irq;
  258. /* Avoid already found cards from previous calls */
  259. if( !request_region( pci_ioaddr, SBNI_IO_EXTENT, dev->name ) ) {
  260. if (pdev->subsystem_device != 2)
  261. continue;
  262. /* Dual adapter is present */
  263. if (!request_region(pci_ioaddr += 4, SBNI_IO_EXTENT,
  264. dev->name ) )
  265. continue;
  266. }
  267. if (pci_irq_line <= 0 || pci_irq_line >= nr_irqs)
  268. pr_warn(
  269. "WARNING: The PCI BIOS assigned this PCI card to IRQ %d, which is unlikely to work!.\n"
  270. "You should use the PCI BIOS setup to assign a valid IRQ line.\n",
  271. pci_irq_line );
  272. /* avoiding re-enable dual adapters */
  273. if( (pci_ioaddr & 7) == 0 && pci_enable_device( pdev ) ) {
  274. release_region( pci_ioaddr, SBNI_IO_EXTENT );
  275. pci_dev_put( pdev );
  276. return -EIO;
  277. }
  278. if( sbni_probe1( dev, pci_ioaddr, pci_irq_line ) ) {
  279. SET_NETDEV_DEV(dev, &pdev->dev);
  280. /* not the best thing to do, but this is all messed up
  281. for hotplug systems anyway... */
  282. pci_dev_put( pdev );
  283. return 0;
  284. }
  285. }
  286. return -ENODEV;
  287. }
  288. static struct net_device * __init
  289. sbni_probe1( struct net_device *dev, unsigned long ioaddr, int irq )
  290. {
  291. struct net_local *nl;
  292. if( sbni_card_probe( ioaddr ) ) {
  293. release_region( ioaddr, SBNI_IO_EXTENT );
  294. return NULL;
  295. }
  296. outb( 0, ioaddr + CSR0 );
  297. if( irq < 2 ) {
  298. unsigned long irq_mask;
  299. irq_mask = probe_irq_on();
  300. outb( EN_INT | TR_REQ, ioaddr + CSR0 );
  301. outb( PR_RES, ioaddr + CSR1 );
  302. mdelay(50);
  303. irq = probe_irq_off(irq_mask);
  304. outb( 0, ioaddr + CSR0 );
  305. if( !irq ) {
  306. pr_err("%s: can't detect device irq!\n", dev->name);
  307. release_region( ioaddr, SBNI_IO_EXTENT );
  308. return NULL;
  309. }
  310. } else if( irq == 2 )
  311. irq = 9;
  312. dev->irq = irq;
  313. dev->base_addr = ioaddr;
  314. /* Fill in sbni-specific dev fields. */
  315. nl = netdev_priv(dev);
  316. if( !nl ) {
  317. pr_err("%s: unable to get memory!\n", dev->name);
  318. release_region( ioaddr, SBNI_IO_EXTENT );
  319. return NULL;
  320. }
  321. memset( nl, 0, sizeof(struct net_local) );
  322. spin_lock_init( &nl->lock );
  323. /* store MAC address (generate if that isn't known) */
  324. *(__be16 *)dev->dev_addr = htons( 0x00ff );
  325. *(__be32 *)(dev->dev_addr + 2) = htonl( 0x01000000 |
  326. ((mac[num] ?
  327. mac[num] :
  328. (u32)((long)netdev_priv(dev))) & 0x00ffffff));
  329. /* store link settings (speed, receive level ) */
  330. nl->maxframe = DEFAULT_FRAME_LEN;
  331. nl->csr1.rate = baud[ num ];
  332. if( (nl->cur_rxl_index = rxl[ num ]) == -1 )
  333. /* autotune rxl */
  334. nl->cur_rxl_index = DEF_RXL,
  335. nl->delta_rxl = DEF_RXL_DELTA;
  336. else
  337. nl->delta_rxl = 0;
  338. nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
  339. if( inb( ioaddr + CSR0 ) & 0x01 )
  340. nl->state |= FL_SLOW_MODE;
  341. pr_notice("%s: ioaddr %#lx, irq %d, MAC: 00:ff:01:%02x:%02x:%02x\n",
  342. dev->name, dev->base_addr, dev->irq,
  343. ((u8 *)dev->dev_addr)[3],
  344. ((u8 *)dev->dev_addr)[4],
  345. ((u8 *)dev->dev_addr)[5]);
  346. pr_notice("%s: speed %d",
  347. dev->name,
  348. ((nl->state & FL_SLOW_MODE) ? 500000 : 2000000)
  349. / (1 << nl->csr1.rate));
  350. if( nl->delta_rxl == 0 )
  351. pr_cont(", receive level 0x%x (fixed)\n", nl->cur_rxl_index);
  352. else
  353. pr_cont(", receive level (auto)\n");
  354. #ifdef CONFIG_SBNI_MULTILINE
  355. nl->master = dev;
  356. nl->link = NULL;
  357. #endif
  358. sbni_cards[ num++ ] = dev;
  359. return dev;
  360. }
  361. /* -------------------------------------------------------------------------- */
  362. #ifdef CONFIG_SBNI_MULTILINE
  363. static netdev_tx_t
  364. sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
  365. {
  366. struct net_device *p;
  367. netif_stop_queue( dev );
  368. /* Looking for idle device in the list */
  369. for( p = dev; p; ) {
  370. struct net_local *nl = netdev_priv(p);
  371. spin_lock( &nl->lock );
  372. if( nl->tx_buf_p || (nl->state & FL_LINE_DOWN) ) {
  373. p = nl->link;
  374. spin_unlock( &nl->lock );
  375. } else {
  376. /* Idle dev is found */
  377. prepare_to_send( skb, p );
  378. spin_unlock( &nl->lock );
  379. netif_start_queue( dev );
  380. return NETDEV_TX_OK;
  381. }
  382. }
  383. return NETDEV_TX_BUSY;
  384. }
  385. #else /* CONFIG_SBNI_MULTILINE */
  386. static netdev_tx_t
  387. sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
  388. {
  389. struct net_local *nl = netdev_priv(dev);
  390. netif_stop_queue( dev );
  391. spin_lock( &nl->lock );
  392. prepare_to_send( skb, dev );
  393. spin_unlock( &nl->lock );
  394. return NETDEV_TX_OK;
  395. }
  396. #endif /* CONFIG_SBNI_MULTILINE */
  397. /* -------------------------------------------------------------------------- */
  398. /* interrupt handler */
  399. /*
  400. * SBNI12D-10, -11/ISA boards within "common interrupt" mode could not
  401. * be looked as two independent single-channel devices. Every channel seems
  402. * as Ethernet interface but interrupt handler must be common. Really, first
  403. * channel ("master") driver only registers the handler. In its struct net_local
  404. * it has got pointer to "slave" channel's struct net_local and handles that's
  405. * interrupts too.
  406. * dev of successfully attached ISA SBNI boards is linked to list.
  407. * While next board driver is initialized, it scans this list. If one
  408. * has found dev with same irq and ioaddr different by 4 then it assumes
  409. * this board to be "master".
  410. */
  411. static irqreturn_t
  412. sbni_interrupt( int irq, void *dev_id )
  413. {
  414. struct net_device *dev = dev_id;
  415. struct net_local *nl = netdev_priv(dev);
  416. int repeat;
  417. spin_lock( &nl->lock );
  418. if( nl->second )
  419. spin_lock(&NET_LOCAL_LOCK(nl->second));
  420. do {
  421. repeat = 0;
  422. if( inb( dev->base_addr + CSR0 ) & (RC_RDY | TR_RDY) )
  423. handle_channel( dev ),
  424. repeat = 1;
  425. if( nl->second && /* second channel present */
  426. (inb( nl->second->base_addr+CSR0 ) & (RC_RDY | TR_RDY)) )
  427. handle_channel( nl->second ),
  428. repeat = 1;
  429. } while( repeat );
  430. if( nl->second )
  431. spin_unlock(&NET_LOCAL_LOCK(nl->second));
  432. spin_unlock( &nl->lock );
  433. return IRQ_HANDLED;
  434. }
  435. static void
  436. handle_channel( struct net_device *dev )
  437. {
  438. struct net_local *nl = netdev_priv(dev);
  439. unsigned long ioaddr = dev->base_addr;
  440. int req_ans;
  441. unsigned char csr0;
  442. #ifdef CONFIG_SBNI_MULTILINE
  443. /* Lock the master device because we going to change its local data */
  444. if( nl->state & FL_SLAVE )
  445. spin_lock(&NET_LOCAL_LOCK(nl->master));
  446. #endif
  447. outb( (inb( ioaddr + CSR0 ) & ~EN_INT) | TR_REQ, ioaddr + CSR0 );
  448. nl->timer_ticks = CHANGE_LEVEL_START_TICKS;
  449. for(;;) {
  450. csr0 = inb( ioaddr + CSR0 );
  451. if( ( csr0 & (RC_RDY | TR_RDY) ) == 0 )
  452. break;
  453. req_ans = !(nl->state & FL_PREV_OK);
  454. if( csr0 & RC_RDY )
  455. req_ans = recv_frame( dev );
  456. /*
  457. * TR_RDY always equals 1 here because we have owned the marker,
  458. * and we set TR_REQ when disabled interrupts
  459. */
  460. csr0 = inb( ioaddr + CSR0 );
  461. if( !(csr0 & TR_RDY) || (csr0 & RC_RDY) )
  462. netdev_err(dev, "internal error!\n");
  463. /* if state & FL_NEED_RESEND != 0 then tx_frameno != 0 */
  464. if( req_ans || nl->tx_frameno != 0 )
  465. send_frame( dev );
  466. else
  467. /* send marker without any data */
  468. outb( inb( ioaddr + CSR0 ) & ~TR_REQ, ioaddr + CSR0 );
  469. }
  470. outb( inb( ioaddr + CSR0 ) | EN_INT, ioaddr + CSR0 );
  471. #ifdef CONFIG_SBNI_MULTILINE
  472. if( nl->state & FL_SLAVE )
  473. spin_unlock(&NET_LOCAL_LOCK(nl->master));
  474. #endif
  475. }
  476. /*
  477. * Routine returns 1 if it needs to acknowledge received frame.
  478. * Empty frame received without errors won't be acknowledged.
  479. */
  480. static int
  481. recv_frame( struct net_device *dev )
  482. {
  483. struct net_local *nl = netdev_priv(dev);
  484. unsigned long ioaddr = dev->base_addr;
  485. u32 crc = CRC32_INITIAL;
  486. unsigned framelen = 0, frameno, ack;
  487. unsigned is_first, frame_ok = 0;
  488. if( check_fhdr( ioaddr, &framelen, &frameno, &ack, &is_first, &crc ) ) {
  489. frame_ok = framelen > 4
  490. ? upload_data( dev, framelen, frameno, is_first, crc )
  491. : skip_tail( ioaddr, framelen, crc );
  492. if( frame_ok )
  493. interpret_ack( dev, ack );
  494. }
  495. outb( inb( ioaddr + CSR0 ) ^ CT_ZER, ioaddr + CSR0 );
  496. if( frame_ok ) {
  497. nl->state |= FL_PREV_OK;
  498. if( framelen > 4 )
  499. nl->in_stats.all_rx_number++;
  500. } else
  501. nl->state &= ~FL_PREV_OK,
  502. change_level( dev ),
  503. nl->in_stats.all_rx_number++,
  504. nl->in_stats.bad_rx_number++;
  505. return !frame_ok || framelen > 4;
  506. }
  507. static void
  508. send_frame( struct net_device *dev )
  509. {
  510. struct net_local *nl = netdev_priv(dev);
  511. u32 crc = CRC32_INITIAL;
  512. if( nl->state & FL_NEED_RESEND ) {
  513. /* if frame was sended but not ACK'ed - resend it */
  514. if( nl->trans_errors ) {
  515. --nl->trans_errors;
  516. if( nl->framelen != 0 )
  517. nl->in_stats.resend_tx_number++;
  518. } else {
  519. /* cannot xmit with many attempts */
  520. #ifdef CONFIG_SBNI_MULTILINE
  521. if( (nl->state & FL_SLAVE) || nl->link )
  522. #endif
  523. nl->state |= FL_LINE_DOWN;
  524. drop_xmit_queue( dev );
  525. goto do_send;
  526. }
  527. } else
  528. nl->trans_errors = TR_ERROR_COUNT;
  529. send_frame_header( dev, &crc );
  530. nl->state |= FL_NEED_RESEND;
  531. /*
  532. * FL_NEED_RESEND will be cleared after ACK, but if empty
  533. * frame sended then in prepare_to_send next frame
  534. */
  535. if( nl->framelen ) {
  536. download_data( dev, &crc );
  537. nl->in_stats.all_tx_number++;
  538. nl->state |= FL_WAIT_ACK;
  539. }
  540. outsb( dev->base_addr + DAT, (u8 *)&crc, sizeof crc );
  541. do_send:
  542. outb( inb( dev->base_addr + CSR0 ) & ~TR_REQ, dev->base_addr + CSR0 );
  543. if( nl->tx_frameno )
  544. /* next frame exists - we request card to send it */
  545. outb( inb( dev->base_addr + CSR0 ) | TR_REQ,
  546. dev->base_addr + CSR0 );
  547. }
  548. /*
  549. * Write the frame data into adapter's buffer memory, and calculate CRC.
  550. * Do padding if necessary.
  551. */
  552. static void
  553. download_data( struct net_device *dev, u32 *crc_p )
  554. {
  555. struct net_local *nl = netdev_priv(dev);
  556. struct sk_buff *skb = nl->tx_buf_p;
  557. unsigned len = min_t(unsigned int, skb->len - nl->outpos, nl->framelen);
  558. outsb( dev->base_addr + DAT, skb->data + nl->outpos, len );
  559. *crc_p = calc_crc32( *crc_p, skb->data + nl->outpos, len );
  560. /* if packet too short we should write some more bytes to pad */
  561. for( len = nl->framelen - len; len--; )
  562. outb( 0, dev->base_addr + DAT ),
  563. *crc_p = CRC32( 0, *crc_p );
  564. }
  565. static int
  566. upload_data( struct net_device *dev, unsigned framelen, unsigned frameno,
  567. unsigned is_first, u32 crc )
  568. {
  569. struct net_local *nl = netdev_priv(dev);
  570. int frame_ok;
  571. if( is_first )
  572. nl->wait_frameno = frameno,
  573. nl->inppos = 0;
  574. if( nl->wait_frameno == frameno ) {
  575. if( nl->inppos + framelen <= ETHER_MAX_LEN )
  576. frame_ok = append_frame_to_pkt( dev, framelen, crc );
  577. /*
  578. * if CRC is right but framelen incorrect then transmitter
  579. * error was occurred... drop entire packet
  580. */
  581. else if( (frame_ok = skip_tail( dev->base_addr, framelen, crc ))
  582. != 0 )
  583. nl->wait_frameno = 0,
  584. nl->inppos = 0,
  585. #ifdef CONFIG_SBNI_MULTILINE
  586. nl->master->stats.rx_errors++,
  587. nl->master->stats.rx_missed_errors++;
  588. #else
  589. dev->stats.rx_errors++,
  590. dev->stats.rx_missed_errors++;
  591. #endif
  592. /* now skip all frames until is_first != 0 */
  593. } else
  594. frame_ok = skip_tail( dev->base_addr, framelen, crc );
  595. if( is_first && !frame_ok )
  596. /*
  597. * Frame has been broken, but we had already stored
  598. * is_first... Drop entire packet.
  599. */
  600. nl->wait_frameno = 0,
  601. #ifdef CONFIG_SBNI_MULTILINE
  602. nl->master->stats.rx_errors++,
  603. nl->master->stats.rx_crc_errors++;
  604. #else
  605. dev->stats.rx_errors++,
  606. dev->stats.rx_crc_errors++;
  607. #endif
  608. return frame_ok;
  609. }
  610. static inline void
  611. send_complete( struct net_device *dev )
  612. {
  613. struct net_local *nl = netdev_priv(dev);
  614. #ifdef CONFIG_SBNI_MULTILINE
  615. nl->master->stats.tx_packets++;
  616. nl->master->stats.tx_bytes += nl->tx_buf_p->len;
  617. #else
  618. dev->stats.tx_packets++;
  619. dev->stats.tx_bytes += nl->tx_buf_p->len;
  620. #endif
  621. dev_kfree_skb_irq( nl->tx_buf_p );
  622. nl->tx_buf_p = NULL;
  623. nl->outpos = 0;
  624. nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
  625. nl->framelen = 0;
  626. }
  627. static void
  628. interpret_ack( struct net_device *dev, unsigned ack )
  629. {
  630. struct net_local *nl = netdev_priv(dev);
  631. if( ack == FRAME_SENT_OK ) {
  632. nl->state &= ~FL_NEED_RESEND;
  633. if( nl->state & FL_WAIT_ACK ) {
  634. nl->outpos += nl->framelen;
  635. if( --nl->tx_frameno )
  636. nl->framelen = min_t(unsigned int,
  637. nl->maxframe,
  638. nl->tx_buf_p->len - nl->outpos);
  639. else
  640. send_complete( dev ),
  641. #ifdef CONFIG_SBNI_MULTILINE
  642. netif_wake_queue( nl->master );
  643. #else
  644. netif_wake_queue( dev );
  645. #endif
  646. }
  647. }
  648. nl->state &= ~FL_WAIT_ACK;
  649. }
  650. /*
  651. * Glue received frame with previous fragments of packet.
  652. * Indicate packet when last frame would be accepted.
  653. */
  654. static int
  655. append_frame_to_pkt( struct net_device *dev, unsigned framelen, u32 crc )
  656. {
  657. struct net_local *nl = netdev_priv(dev);
  658. u8 *p;
  659. if( nl->inppos + framelen > ETHER_MAX_LEN )
  660. return 0;
  661. if( !nl->rx_buf_p && !(nl->rx_buf_p = get_rx_buf( dev )) )
  662. return 0;
  663. p = nl->rx_buf_p->data + nl->inppos;
  664. insb( dev->base_addr + DAT, p, framelen );
  665. if( calc_crc32( crc, p, framelen ) != CRC32_REMAINDER )
  666. return 0;
  667. nl->inppos += framelen - 4;
  668. if( --nl->wait_frameno == 0 ) /* last frame received */
  669. indicate_pkt( dev );
  670. return 1;
  671. }
  672. /*
  673. * Prepare to start output on adapter.
  674. * Transmitter will be actually activated when marker is accepted.
  675. */
  676. static void
  677. prepare_to_send( struct sk_buff *skb, struct net_device *dev )
  678. {
  679. struct net_local *nl = netdev_priv(dev);
  680. unsigned int len;
  681. /* nl->tx_buf_p == NULL here! */
  682. if( nl->tx_buf_p )
  683. netdev_err(dev, "memory leak!\n");
  684. nl->outpos = 0;
  685. nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
  686. len = skb->len;
  687. if( len < SBNI_MIN_LEN )
  688. len = SBNI_MIN_LEN;
  689. nl->tx_buf_p = skb;
  690. nl->tx_frameno = DIV_ROUND_UP(len, nl->maxframe);
  691. nl->framelen = len < nl->maxframe ? len : nl->maxframe;
  692. outb( inb( dev->base_addr + CSR0 ) | TR_REQ, dev->base_addr + CSR0 );
  693. #ifdef CONFIG_SBNI_MULTILINE
  694. netif_trans_update(nl->master);
  695. #else
  696. netif_trans_update(dev);
  697. #endif
  698. }
  699. static void
  700. drop_xmit_queue( struct net_device *dev )
  701. {
  702. struct net_local *nl = netdev_priv(dev);
  703. if( nl->tx_buf_p )
  704. dev_kfree_skb_any( nl->tx_buf_p ),
  705. nl->tx_buf_p = NULL,
  706. #ifdef CONFIG_SBNI_MULTILINE
  707. nl->master->stats.tx_errors++,
  708. nl->master->stats.tx_carrier_errors++;
  709. #else
  710. dev->stats.tx_errors++,
  711. dev->stats.tx_carrier_errors++;
  712. #endif
  713. nl->tx_frameno = 0;
  714. nl->framelen = 0;
  715. nl->outpos = 0;
  716. nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
  717. #ifdef CONFIG_SBNI_MULTILINE
  718. netif_start_queue( nl->master );
  719. netif_trans_update(nl->master);
  720. #else
  721. netif_start_queue( dev );
  722. netif_trans_update(dev);
  723. #endif
  724. }
  725. static void
  726. send_frame_header( struct net_device *dev, u32 *crc_p )
  727. {
  728. struct net_local *nl = netdev_priv(dev);
  729. u32 crc = *crc_p;
  730. u32 len_field = nl->framelen + 6; /* CRC + frameno + reserved */
  731. u8 value;
  732. if( nl->state & FL_NEED_RESEND )
  733. len_field |= FRAME_RETRY; /* non-first attempt... */
  734. if( nl->outpos == 0 )
  735. len_field |= FRAME_FIRST;
  736. len_field |= (nl->state & FL_PREV_OK) ? FRAME_SENT_OK : FRAME_SENT_BAD;
  737. outb( SBNI_SIG, dev->base_addr + DAT );
  738. value = (u8) len_field;
  739. outb( value, dev->base_addr + DAT );
  740. crc = CRC32( value, crc );
  741. value = (u8) (len_field >> 8);
  742. outb( value, dev->base_addr + DAT );
  743. crc = CRC32( value, crc );
  744. outb( nl->tx_frameno, dev->base_addr + DAT );
  745. crc = CRC32( nl->tx_frameno, crc );
  746. outb( 0, dev->base_addr + DAT );
  747. crc = CRC32( 0, crc );
  748. *crc_p = crc;
  749. }
  750. /*
  751. * if frame tail not needed (incorrect number or received twice),
  752. * it won't store, but CRC will be calculated
  753. */
  754. static int
  755. skip_tail( unsigned int ioaddr, unsigned int tail_len, u32 crc )
  756. {
  757. while( tail_len-- )
  758. crc = CRC32( inb( ioaddr + DAT ), crc );
  759. return crc == CRC32_REMAINDER;
  760. }
  761. /*
  762. * Preliminary checks if frame header is correct, calculates its CRC
  763. * and split it to simple fields
  764. */
  765. static int
  766. check_fhdr( u32 ioaddr, u32 *framelen, u32 *frameno, u32 *ack,
  767. u32 *is_first, u32 *crc_p )
  768. {
  769. u32 crc = *crc_p;
  770. u8 value;
  771. if( inb( ioaddr + DAT ) != SBNI_SIG )
  772. return 0;
  773. value = inb( ioaddr + DAT );
  774. *framelen = (u32)value;
  775. crc = CRC32( value, crc );
  776. value = inb( ioaddr + DAT );
  777. *framelen |= ((u32)value) << 8;
  778. crc = CRC32( value, crc );
  779. *ack = *framelen & FRAME_ACK_MASK;
  780. *is_first = (*framelen & FRAME_FIRST) != 0;
  781. if( (*framelen &= FRAME_LEN_MASK) < 6 ||
  782. *framelen > SBNI_MAX_FRAME - 3 )
  783. return 0;
  784. value = inb( ioaddr + DAT );
  785. *frameno = (u32)value;
  786. crc = CRC32( value, crc );
  787. crc = CRC32( inb( ioaddr + DAT ), crc ); /* reserved byte */
  788. *framelen -= 2;
  789. *crc_p = crc;
  790. return 1;
  791. }
  792. static struct sk_buff *
  793. get_rx_buf( struct net_device *dev )
  794. {
  795. /* +2 is to compensate for the alignment fixup below */
  796. struct sk_buff *skb = dev_alloc_skb( ETHER_MAX_LEN + 2 );
  797. if( !skb )
  798. return NULL;
  799. skb_reserve( skb, 2 ); /* Align IP on longword boundaries */
  800. return skb;
  801. }
  802. static void
  803. indicate_pkt( struct net_device *dev )
  804. {
  805. struct net_local *nl = netdev_priv(dev);
  806. struct sk_buff *skb = nl->rx_buf_p;
  807. skb_put( skb, nl->inppos );
  808. #ifdef CONFIG_SBNI_MULTILINE
  809. skb->protocol = eth_type_trans( skb, nl->master );
  810. netif_rx( skb );
  811. ++nl->master->stats.rx_packets;
  812. nl->master->stats.rx_bytes += nl->inppos;
  813. #else
  814. skb->protocol = eth_type_trans( skb, dev );
  815. netif_rx( skb );
  816. ++dev->stats.rx_packets;
  817. dev->stats.rx_bytes += nl->inppos;
  818. #endif
  819. nl->rx_buf_p = NULL; /* protocol driver will clear this sk_buff */
  820. }
  821. /* -------------------------------------------------------------------------- */
  822. /*
  823. * Routine checks periodically wire activity and regenerates marker if
  824. * connect was inactive for a long time.
  825. */
  826. static void
  827. sbni_watchdog( unsigned long arg )
  828. {
  829. struct net_device *dev = (struct net_device *) arg;
  830. struct net_local *nl = netdev_priv(dev);
  831. struct timer_list *w = &nl->watchdog;
  832. unsigned long flags;
  833. unsigned char csr0;
  834. spin_lock_irqsave( &nl->lock, flags );
  835. csr0 = inb( dev->base_addr + CSR0 );
  836. if( csr0 & RC_CHK ) {
  837. if( nl->timer_ticks ) {
  838. if( csr0 & (RC_RDY | BU_EMP) )
  839. /* receiving not active */
  840. nl->timer_ticks--;
  841. } else {
  842. nl->in_stats.timeout_number++;
  843. if( nl->delta_rxl )
  844. timeout_change_level( dev );
  845. outb( *(u_char *)&nl->csr1 | PR_RES,
  846. dev->base_addr + CSR1 );
  847. csr0 = inb( dev->base_addr + CSR0 );
  848. }
  849. } else
  850. nl->state &= ~FL_LINE_DOWN;
  851. outb( csr0 | RC_CHK, dev->base_addr + CSR0 );
  852. init_timer( w );
  853. w->expires = jiffies + SBNI_TIMEOUT;
  854. w->data = arg;
  855. w->function = sbni_watchdog;
  856. add_timer( w );
  857. spin_unlock_irqrestore( &nl->lock, flags );
  858. }
  859. static unsigned char rxl_tab[] = {
  860. 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x08,
  861. 0x0a, 0x0c, 0x0f, 0x16, 0x18, 0x1a, 0x1c, 0x1f
  862. };
  863. #define SIZE_OF_TIMEOUT_RXL_TAB 4
  864. static unsigned char timeout_rxl_tab[] = {
  865. 0x03, 0x05, 0x08, 0x0b
  866. };
  867. /* -------------------------------------------------------------------------- */
  868. static void
  869. card_start( struct net_device *dev )
  870. {
  871. struct net_local *nl = netdev_priv(dev);
  872. nl->timer_ticks = CHANGE_LEVEL_START_TICKS;
  873. nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
  874. nl->state |= FL_PREV_OK;
  875. nl->inppos = nl->outpos = 0;
  876. nl->wait_frameno = 0;
  877. nl->tx_frameno = 0;
  878. nl->framelen = 0;
  879. outb( *(u_char *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 );
  880. outb( EN_INT, dev->base_addr + CSR0 );
  881. }
  882. /* -------------------------------------------------------------------------- */
  883. /* Receive level auto-selection */
  884. static void
  885. change_level( struct net_device *dev )
  886. {
  887. struct net_local *nl = netdev_priv(dev);
  888. if( nl->delta_rxl == 0 ) /* do not auto-negotiate RxL */
  889. return;
  890. if( nl->cur_rxl_index == 0 )
  891. nl->delta_rxl = 1;
  892. else if( nl->cur_rxl_index == 15 )
  893. nl->delta_rxl = -1;
  894. else if( nl->cur_rxl_rcvd < nl->prev_rxl_rcvd )
  895. nl->delta_rxl = -nl->delta_rxl;
  896. nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index += nl->delta_rxl ];
  897. inb( dev->base_addr + CSR0 ); /* needs for PCI cards */
  898. outb( *(u8 *)&nl->csr1, dev->base_addr + CSR1 );
  899. nl->prev_rxl_rcvd = nl->cur_rxl_rcvd;
  900. nl->cur_rxl_rcvd = 0;
  901. }
  902. static void
  903. timeout_change_level( struct net_device *dev )
  904. {
  905. struct net_local *nl = netdev_priv(dev);
  906. nl->cur_rxl_index = timeout_rxl_tab[ nl->timeout_rxl ];
  907. if( ++nl->timeout_rxl >= 4 )
  908. nl->timeout_rxl = 0;
  909. nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
  910. inb( dev->base_addr + CSR0 );
  911. outb( *(unsigned char *)&nl->csr1, dev->base_addr + CSR1 );
  912. nl->prev_rxl_rcvd = nl->cur_rxl_rcvd;
  913. nl->cur_rxl_rcvd = 0;
  914. }
  915. /* -------------------------------------------------------------------------- */
  916. /*
  917. * Open/initialize the board.
  918. */
  919. static int
  920. sbni_open( struct net_device *dev )
  921. {
  922. struct net_local *nl = netdev_priv(dev);
  923. struct timer_list *w = &nl->watchdog;
  924. /*
  925. * For double ISA adapters within "common irq" mode, we have to
  926. * determine whether primary or secondary channel is initialized,
  927. * and set the irq handler only in first case.
  928. */
  929. if( dev->base_addr < 0x400 ) { /* ISA only */
  930. struct net_device **p = sbni_cards;
  931. for( ; *p && p < sbni_cards + SBNI_MAX_NUM_CARDS; ++p )
  932. if( (*p)->irq == dev->irq &&
  933. ((*p)->base_addr == dev->base_addr + 4 ||
  934. (*p)->base_addr == dev->base_addr - 4) &&
  935. (*p)->flags & IFF_UP ) {
  936. ((struct net_local *) (netdev_priv(*p)))
  937. ->second = dev;
  938. netdev_notice(dev, "using shared irq with %s\n",
  939. (*p)->name);
  940. nl->state |= FL_SECONDARY;
  941. goto handler_attached;
  942. }
  943. }
  944. if( request_irq(dev->irq, sbni_interrupt, IRQF_SHARED, dev->name, dev) ) {
  945. netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
  946. return -EAGAIN;
  947. }
  948. handler_attached:
  949. spin_lock( &nl->lock );
  950. memset( &dev->stats, 0, sizeof(struct net_device_stats) );
  951. memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) );
  952. card_start( dev );
  953. netif_start_queue( dev );
  954. /* set timer watchdog */
  955. init_timer( w );
  956. w->expires = jiffies + SBNI_TIMEOUT;
  957. w->data = (unsigned long) dev;
  958. w->function = sbni_watchdog;
  959. add_timer( w );
  960. spin_unlock( &nl->lock );
  961. return 0;
  962. }
  963. static int
  964. sbni_close( struct net_device *dev )
  965. {
  966. struct net_local *nl = netdev_priv(dev);
  967. if( nl->second && nl->second->flags & IFF_UP ) {
  968. netdev_notice(dev, "Secondary channel (%s) is active!\n",
  969. nl->second->name);
  970. return -EBUSY;
  971. }
  972. #ifdef CONFIG_SBNI_MULTILINE
  973. if( nl->state & FL_SLAVE )
  974. emancipate( dev );
  975. else
  976. while( nl->link ) /* it's master device! */
  977. emancipate( nl->link );
  978. #endif
  979. spin_lock( &nl->lock );
  980. nl->second = NULL;
  981. drop_xmit_queue( dev );
  982. netif_stop_queue( dev );
  983. del_timer( &nl->watchdog );
  984. outb( 0, dev->base_addr + CSR0 );
  985. if( !(nl->state & FL_SECONDARY) )
  986. free_irq( dev->irq, dev );
  987. nl->state &= FL_SECONDARY;
  988. spin_unlock( &nl->lock );
  989. return 0;
  990. }
  991. /*
  992. Valid combinations in CSR0 (for probing):
  993. VALID_DECODER 0000,0011,1011,1010
  994. ; 0 ; -
  995. TR_REQ ; 1 ; +
  996. TR_RDY ; 2 ; -
  997. TR_RDY TR_REQ ; 3 ; +
  998. BU_EMP ; 4 ; +
  999. BU_EMP TR_REQ ; 5 ; +
  1000. BU_EMP TR_RDY ; 6 ; -
  1001. BU_EMP TR_RDY TR_REQ ; 7 ; +
  1002. RC_RDY ; 8 ; +
  1003. RC_RDY TR_REQ ; 9 ; +
  1004. RC_RDY TR_RDY ; 10 ; -
  1005. RC_RDY TR_RDY TR_REQ ; 11 ; -
  1006. RC_RDY BU_EMP ; 12 ; -
  1007. RC_RDY BU_EMP TR_REQ ; 13 ; -
  1008. RC_RDY BU_EMP TR_RDY ; 14 ; -
  1009. RC_RDY BU_EMP TR_RDY TR_REQ ; 15 ; -
  1010. */
  1011. #define VALID_DECODER (2 + 8 + 0x10 + 0x20 + 0x80 + 0x100 + 0x200)
  1012. static int
  1013. sbni_card_probe( unsigned long ioaddr )
  1014. {
  1015. unsigned char csr0;
  1016. csr0 = inb( ioaddr + CSR0 );
  1017. if( csr0 != 0xff && csr0 != 0x00 ) {
  1018. csr0 &= ~EN_INT;
  1019. if( csr0 & BU_EMP )
  1020. csr0 |= EN_INT;
  1021. if( VALID_DECODER & (1 << (csr0 >> 4)) )
  1022. return 0;
  1023. }
  1024. return -ENODEV;
  1025. }
  1026. /* -------------------------------------------------------------------------- */
  1027. static int
  1028. sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
  1029. {
  1030. struct net_local *nl = netdev_priv(dev);
  1031. struct sbni_flags flags;
  1032. int error = 0;
  1033. #ifdef CONFIG_SBNI_MULTILINE
  1034. struct net_device *slave_dev;
  1035. char slave_name[ 8 ];
  1036. #endif
  1037. switch( cmd ) {
  1038. case SIOCDEVGETINSTATS :
  1039. if (copy_to_user( ifr->ifr_data, &nl->in_stats,
  1040. sizeof(struct sbni_in_stats) ))
  1041. error = -EFAULT;
  1042. break;
  1043. case SIOCDEVRESINSTATS :
  1044. if (!capable(CAP_NET_ADMIN))
  1045. return -EPERM;
  1046. memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) );
  1047. break;
  1048. case SIOCDEVGHWSTATE :
  1049. flags.mac_addr = *(u32 *)(dev->dev_addr + 3);
  1050. flags.rate = nl->csr1.rate;
  1051. flags.slow_mode = (nl->state & FL_SLOW_MODE) != 0;
  1052. flags.rxl = nl->cur_rxl_index;
  1053. flags.fixed_rxl = nl->delta_rxl == 0;
  1054. if (copy_to_user( ifr->ifr_data, &flags, sizeof flags ))
  1055. error = -EFAULT;
  1056. break;
  1057. case SIOCDEVSHWSTATE :
  1058. if (!capable(CAP_NET_ADMIN))
  1059. return -EPERM;
  1060. spin_lock( &nl->lock );
  1061. flags = *(struct sbni_flags*) &ifr->ifr_ifru;
  1062. if( flags.fixed_rxl )
  1063. nl->delta_rxl = 0,
  1064. nl->cur_rxl_index = flags.rxl;
  1065. else
  1066. nl->delta_rxl = DEF_RXL_DELTA,
  1067. nl->cur_rxl_index = DEF_RXL;
  1068. nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
  1069. nl->csr1.rate = flags.rate;
  1070. outb( *(u8 *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 );
  1071. spin_unlock( &nl->lock );
  1072. break;
  1073. #ifdef CONFIG_SBNI_MULTILINE
  1074. case SIOCDEVENSLAVE :
  1075. if (!capable(CAP_NET_ADMIN))
  1076. return -EPERM;
  1077. if (copy_from_user( slave_name, ifr->ifr_data, sizeof slave_name ))
  1078. return -EFAULT;
  1079. slave_dev = dev_get_by_name(&init_net, slave_name );
  1080. if( !slave_dev || !(slave_dev->flags & IFF_UP) ) {
  1081. netdev_err(dev, "trying to enslave non-active device %s\n",
  1082. slave_name);
  1083. if (slave_dev)
  1084. dev_put(slave_dev);
  1085. return -EPERM;
  1086. }
  1087. return enslave( dev, slave_dev );
  1088. case SIOCDEVEMANSIPATE :
  1089. if (!capable(CAP_NET_ADMIN))
  1090. return -EPERM;
  1091. return emancipate( dev );
  1092. #endif /* CONFIG_SBNI_MULTILINE */
  1093. default :
  1094. return -EOPNOTSUPP;
  1095. }
  1096. return error;
  1097. }
  1098. #ifdef CONFIG_SBNI_MULTILINE
  1099. static int
  1100. enslave( struct net_device *dev, struct net_device *slave_dev )
  1101. {
  1102. struct net_local *nl = netdev_priv(dev);
  1103. struct net_local *snl = netdev_priv(slave_dev);
  1104. if( nl->state & FL_SLAVE ) /* This isn't master or free device */
  1105. return -EBUSY;
  1106. if( snl->state & FL_SLAVE ) /* That was already enslaved */
  1107. return -EBUSY;
  1108. spin_lock( &nl->lock );
  1109. spin_lock( &snl->lock );
  1110. /* append to list */
  1111. snl->link = nl->link;
  1112. nl->link = slave_dev;
  1113. snl->master = dev;
  1114. snl->state |= FL_SLAVE;
  1115. /* Summary statistics of MultiLine operation will be stored
  1116. in master's counters */
  1117. memset( &slave_dev->stats, 0, sizeof(struct net_device_stats) );
  1118. netif_stop_queue( slave_dev );
  1119. netif_wake_queue( dev ); /* Now we are able to transmit */
  1120. spin_unlock( &snl->lock );
  1121. spin_unlock( &nl->lock );
  1122. netdev_notice(dev, "slave device (%s) attached\n", slave_dev->name);
  1123. return 0;
  1124. }
  1125. static int
  1126. emancipate( struct net_device *dev )
  1127. {
  1128. struct net_local *snl = netdev_priv(dev);
  1129. struct net_device *p = snl->master;
  1130. struct net_local *nl = netdev_priv(p);
  1131. if( !(snl->state & FL_SLAVE) )
  1132. return -EINVAL;
  1133. spin_lock( &nl->lock );
  1134. spin_lock( &snl->lock );
  1135. drop_xmit_queue( dev );
  1136. /* exclude from list */
  1137. for(;;) { /* must be in list */
  1138. struct net_local *t = netdev_priv(p);
  1139. if( t->link == dev ) {
  1140. t->link = snl->link;
  1141. break;
  1142. }
  1143. p = t->link;
  1144. }
  1145. snl->link = NULL;
  1146. snl->master = dev;
  1147. snl->state &= ~FL_SLAVE;
  1148. netif_start_queue( dev );
  1149. spin_unlock( &snl->lock );
  1150. spin_unlock( &nl->lock );
  1151. dev_put( dev );
  1152. return 0;
  1153. }
  1154. #endif
  1155. static void
  1156. set_multicast_list( struct net_device *dev )
  1157. {
  1158. return; /* sbni always operate in promiscuos mode */
  1159. }
  1160. #ifdef MODULE
  1161. module_param_hw_array(io, int, ioport, NULL, 0);
  1162. module_param_hw_array(irq, int, irq, NULL, 0);
  1163. module_param_array(baud, int, NULL, 0);
  1164. module_param_array(rxl, int, NULL, 0);
  1165. module_param_array(mac, int, NULL, 0);
  1166. module_param(skip_pci_probe, bool, 0);
  1167. MODULE_LICENSE("GPL");
  1168. int __init init_module( void )
  1169. {
  1170. struct net_device *dev;
  1171. int err;
  1172. while( num < SBNI_MAX_NUM_CARDS ) {
  1173. dev = alloc_netdev(sizeof(struct net_local), "sbni%d",
  1174. NET_NAME_UNKNOWN, sbni_devsetup);
  1175. if( !dev)
  1176. break;
  1177. sprintf( dev->name, "sbni%d", num );
  1178. err = sbni_init(dev);
  1179. if (err) {
  1180. free_netdev(dev);
  1181. break;
  1182. }
  1183. if( register_netdev( dev ) ) {
  1184. release_region( dev->base_addr, SBNI_IO_EXTENT );
  1185. free_netdev( dev );
  1186. break;
  1187. }
  1188. }
  1189. return *sbni_cards ? 0 : -ENODEV;
  1190. }
  1191. void
  1192. cleanup_module(void)
  1193. {
  1194. int i;
  1195. for (i = 0; i < SBNI_MAX_NUM_CARDS; ++i) {
  1196. struct net_device *dev = sbni_cards[i];
  1197. if (dev != NULL) {
  1198. unregister_netdev(dev);
  1199. release_region(dev->base_addr, SBNI_IO_EXTENT);
  1200. free_netdev(dev);
  1201. }
  1202. }
  1203. }
  1204. #else /* MODULE */
  1205. static int __init
  1206. sbni_setup( char *p )
  1207. {
  1208. int n, parm;
  1209. if( *p++ != '(' )
  1210. goto bad_param;
  1211. for( n = 0, parm = 0; *p && n < 8; ) {
  1212. (*dest[ parm ])[ n ] = simple_strtol( p, &p, 0 );
  1213. if( !*p || *p == ')' )
  1214. return 1;
  1215. if( *p == ';' )
  1216. ++p, ++n, parm = 0;
  1217. else if( *p++ != ',' )
  1218. break;
  1219. else
  1220. if( ++parm >= 5 )
  1221. break;
  1222. }
  1223. bad_param:
  1224. pr_err("Error in sbni kernel parameter!\n");
  1225. return 0;
  1226. }
  1227. __setup( "sbni=", sbni_setup );
  1228. #endif /* MODULE */
  1229. /* -------------------------------------------------------------------------- */
  1230. static u32
  1231. calc_crc32( u32 crc, u8 *p, u32 len )
  1232. {
  1233. while( len-- )
  1234. crc = CRC32( *p++, crc );
  1235. return crc;
  1236. }
  1237. static u32 crc32tab[] __attribute__ ((aligned(8))) = {
  1238. 0xD202EF8D, 0xA505DF1B, 0x3C0C8EA1, 0x4B0BBE37,
  1239. 0xD56F2B94, 0xA2681B02, 0x3B614AB8, 0x4C667A2E,
  1240. 0xDCD967BF, 0xABDE5729, 0x32D70693, 0x45D03605,
  1241. 0xDBB4A3A6, 0xACB39330, 0x35BAC28A, 0x42BDF21C,
  1242. 0xCFB5FFE9, 0xB8B2CF7F, 0x21BB9EC5, 0x56BCAE53,
  1243. 0xC8D83BF0, 0xBFDF0B66, 0x26D65ADC, 0x51D16A4A,
  1244. 0xC16E77DB, 0xB669474D, 0x2F6016F7, 0x58672661,
  1245. 0xC603B3C2, 0xB1048354, 0x280DD2EE, 0x5F0AE278,
  1246. 0xE96CCF45, 0x9E6BFFD3, 0x0762AE69, 0x70659EFF,
  1247. 0xEE010B5C, 0x99063BCA, 0x000F6A70, 0x77085AE6,
  1248. 0xE7B74777, 0x90B077E1, 0x09B9265B, 0x7EBE16CD,
  1249. 0xE0DA836E, 0x97DDB3F8, 0x0ED4E242, 0x79D3D2D4,
  1250. 0xF4DBDF21, 0x83DCEFB7, 0x1AD5BE0D, 0x6DD28E9B,
  1251. 0xF3B61B38, 0x84B12BAE, 0x1DB87A14, 0x6ABF4A82,
  1252. 0xFA005713, 0x8D076785, 0x140E363F, 0x630906A9,
  1253. 0xFD6D930A, 0x8A6AA39C, 0x1363F226, 0x6464C2B0,
  1254. 0xA4DEAE1D, 0xD3D99E8B, 0x4AD0CF31, 0x3DD7FFA7,
  1255. 0xA3B36A04, 0xD4B45A92, 0x4DBD0B28, 0x3ABA3BBE,
  1256. 0xAA05262F, 0xDD0216B9, 0x440B4703, 0x330C7795,
  1257. 0xAD68E236, 0xDA6FD2A0, 0x4366831A, 0x3461B38C,
  1258. 0xB969BE79, 0xCE6E8EEF, 0x5767DF55, 0x2060EFC3,
  1259. 0xBE047A60, 0xC9034AF6, 0x500A1B4C, 0x270D2BDA,
  1260. 0xB7B2364B, 0xC0B506DD, 0x59BC5767, 0x2EBB67F1,
  1261. 0xB0DFF252, 0xC7D8C2C4, 0x5ED1937E, 0x29D6A3E8,
  1262. 0x9FB08ED5, 0xE8B7BE43, 0x71BEEFF9, 0x06B9DF6F,
  1263. 0x98DD4ACC, 0xEFDA7A5A, 0x76D32BE0, 0x01D41B76,
  1264. 0x916B06E7, 0xE66C3671, 0x7F6567CB, 0x0862575D,
  1265. 0x9606C2FE, 0xE101F268, 0x7808A3D2, 0x0F0F9344,
  1266. 0x82079EB1, 0xF500AE27, 0x6C09FF9D, 0x1B0ECF0B,
  1267. 0x856A5AA8, 0xF26D6A3E, 0x6B643B84, 0x1C630B12,
  1268. 0x8CDC1683, 0xFBDB2615, 0x62D277AF, 0x15D54739,
  1269. 0x8BB1D29A, 0xFCB6E20C, 0x65BFB3B6, 0x12B88320,
  1270. 0x3FBA6CAD, 0x48BD5C3B, 0xD1B40D81, 0xA6B33D17,
  1271. 0x38D7A8B4, 0x4FD09822, 0xD6D9C998, 0xA1DEF90E,
  1272. 0x3161E49F, 0x4666D409, 0xDF6F85B3, 0xA868B525,
  1273. 0x360C2086, 0x410B1010, 0xD80241AA, 0xAF05713C,
  1274. 0x220D7CC9, 0x550A4C5F, 0xCC031DE5, 0xBB042D73,
  1275. 0x2560B8D0, 0x52678846, 0xCB6ED9FC, 0xBC69E96A,
  1276. 0x2CD6F4FB, 0x5BD1C46D, 0xC2D895D7, 0xB5DFA541,
  1277. 0x2BBB30E2, 0x5CBC0074, 0xC5B551CE, 0xB2B26158,
  1278. 0x04D44C65, 0x73D37CF3, 0xEADA2D49, 0x9DDD1DDF,
  1279. 0x03B9887C, 0x74BEB8EA, 0xEDB7E950, 0x9AB0D9C6,
  1280. 0x0A0FC457, 0x7D08F4C1, 0xE401A57B, 0x930695ED,
  1281. 0x0D62004E, 0x7A6530D8, 0xE36C6162, 0x946B51F4,
  1282. 0x19635C01, 0x6E646C97, 0xF76D3D2D, 0x806A0DBB,
  1283. 0x1E0E9818, 0x6909A88E, 0xF000F934, 0x8707C9A2,
  1284. 0x17B8D433, 0x60BFE4A5, 0xF9B6B51F, 0x8EB18589,
  1285. 0x10D5102A, 0x67D220BC, 0xFEDB7106, 0x89DC4190,
  1286. 0x49662D3D, 0x3E611DAB, 0xA7684C11, 0xD06F7C87,
  1287. 0x4E0BE924, 0x390CD9B2, 0xA0058808, 0xD702B89E,
  1288. 0x47BDA50F, 0x30BA9599, 0xA9B3C423, 0xDEB4F4B5,
  1289. 0x40D06116, 0x37D75180, 0xAEDE003A, 0xD9D930AC,
  1290. 0x54D13D59, 0x23D60DCF, 0xBADF5C75, 0xCDD86CE3,
  1291. 0x53BCF940, 0x24BBC9D6, 0xBDB2986C, 0xCAB5A8FA,
  1292. 0x5A0AB56B, 0x2D0D85FD, 0xB404D447, 0xC303E4D1,
  1293. 0x5D677172, 0x2A6041E4, 0xB369105E, 0xC46E20C8,
  1294. 0x72080DF5, 0x050F3D63, 0x9C066CD9, 0xEB015C4F,
  1295. 0x7565C9EC, 0x0262F97A, 0x9B6BA8C0, 0xEC6C9856,
  1296. 0x7CD385C7, 0x0BD4B551, 0x92DDE4EB, 0xE5DAD47D,
  1297. 0x7BBE41DE, 0x0CB97148, 0x95B020F2, 0xE2B71064,
  1298. 0x6FBF1D91, 0x18B82D07, 0x81B17CBD, 0xF6B64C2B,
  1299. 0x68D2D988, 0x1FD5E91E, 0x86DCB8A4, 0xF1DB8832,
  1300. 0x616495A3, 0x1663A535, 0x8F6AF48F, 0xF86DC419,
  1301. 0x660951BA, 0x110E612C, 0x88073096, 0xFF000000
  1302. };