ixp4xx_eth.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523
  1. /*
  2. * Intel IXP4xx Ethernet driver for Linux
  3. *
  4. * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of version 2 of the GNU General Public License
  8. * as published by the Free Software Foundation.
  9. *
  10. * Ethernet port config (0x00 is not present on IXP42X):
  11. *
  12. * logical port 0x00 0x10 0x20
  13. * NPE 0 (NPE-A) 1 (NPE-B) 2 (NPE-C)
  14. * physical PortId 2 0 1
  15. * TX queue 23 24 25
  16. * RX-free queue 26 27 28
  17. * TX-done queue is always 31, per-port RX and TX-ready queues are configurable
  18. *
  19. *
  20. * Queue entries:
  21. * bits 0 -> 1 - NPE ID (RX and TX-done)
  22. * bits 0 -> 2 - priority (TX, per 802.1D)
  23. * bits 3 -> 4 - port ID (user-set?)
  24. * bits 5 -> 31 - physical descriptor address
  25. */
  26. #include <linux/delay.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/dmapool.h>
  29. #include <linux/etherdevice.h>
  30. #include <linux/io.h>
  31. #include <linux/kernel.h>
  32. #include <linux/net_tstamp.h>
  33. #include <linux/phy.h>
  34. #include <linux/platform_device.h>
  35. #include <linux/ptp_classify.h>
  36. #include <linux/slab.h>
  37. #include <linux/module.h>
  38. #include <mach/ixp46x_ts.h>
  39. #include <mach/npe.h>
  40. #include <mach/qmgr.h>
  41. #define DEBUG_DESC 0
  42. #define DEBUG_RX 0
  43. #define DEBUG_TX 0
  44. #define DEBUG_PKT_BYTES 0
  45. #define DEBUG_MDIO 0
  46. #define DEBUG_CLOSE 0
  47. #define DRV_NAME "ixp4xx_eth"
  48. #define MAX_NPES 3
  49. #define RX_DESCS 64 /* also length of all RX queues */
  50. #define TX_DESCS 16 /* also length of all TX queues */
  51. #define TXDONE_QUEUE_LEN 64 /* dwords */
  52. #define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
  53. #define REGS_SIZE 0x1000
  54. #define MAX_MRU 1536 /* 0x600 */
  55. #define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4)
  56. #define NAPI_WEIGHT 16
  57. #define MDIO_INTERVAL (3 * HZ)
  58. #define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */
  59. #define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */
  60. #define NPE_ID(port_id) ((port_id) >> 4)
  61. #define PHYSICAL_ID(port_id) ((NPE_ID(port_id) + 2) % 3)
  62. #define TX_QUEUE(port_id) (NPE_ID(port_id) + 23)
  63. #define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26)
  64. #define TXDONE_QUEUE 31
  65. #define PTP_SLAVE_MODE 1
  66. #define PTP_MASTER_MODE 2
  67. #define PORT2CHANNEL(p) NPE_ID(p->id)
  68. /* TX Control Registers */
  69. #define TX_CNTRL0_TX_EN 0x01
  70. #define TX_CNTRL0_HALFDUPLEX 0x02
  71. #define TX_CNTRL0_RETRY 0x04
  72. #define TX_CNTRL0_PAD_EN 0x08
  73. #define TX_CNTRL0_APPEND_FCS 0x10
  74. #define TX_CNTRL0_2DEFER 0x20
  75. #define TX_CNTRL0_RMII 0x40 /* reduced MII */
  76. #define TX_CNTRL1_RETRIES 0x0F /* 4 bits */
  77. /* RX Control Registers */
  78. #define RX_CNTRL0_RX_EN 0x01
  79. #define RX_CNTRL0_PADSTRIP_EN 0x02
  80. #define RX_CNTRL0_SEND_FCS 0x04
  81. #define RX_CNTRL0_PAUSE_EN 0x08
  82. #define RX_CNTRL0_LOOP_EN 0x10
  83. #define RX_CNTRL0_ADDR_FLTR_EN 0x20
  84. #define RX_CNTRL0_RX_RUNT_EN 0x40
  85. #define RX_CNTRL0_BCAST_DIS 0x80
  86. #define RX_CNTRL1_DEFER_EN 0x01
  87. /* Core Control Register */
  88. #define CORE_RESET 0x01
  89. #define CORE_RX_FIFO_FLUSH 0x02
  90. #define CORE_TX_FIFO_FLUSH 0x04
  91. #define CORE_SEND_JAM 0x08
  92. #define CORE_MDC_EN 0x10 /* MDIO using NPE-B ETH-0 only */
  93. #define DEFAULT_TX_CNTRL0 (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY | \
  94. TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \
  95. TX_CNTRL0_2DEFER)
  96. #define DEFAULT_RX_CNTRL0 RX_CNTRL0_RX_EN
  97. #define DEFAULT_CORE_CNTRL CORE_MDC_EN
  98. /* NPE message codes */
  99. #define NPE_GETSTATUS 0x00
  100. #define NPE_EDB_SETPORTADDRESS 0x01
  101. #define NPE_EDB_GETMACADDRESSDATABASE 0x02
  102. #define NPE_EDB_SETMACADDRESSSDATABASE 0x03
  103. #define NPE_GETSTATS 0x04
  104. #define NPE_RESETSTATS 0x05
  105. #define NPE_SETMAXFRAMELENGTHS 0x06
  106. #define NPE_VLAN_SETRXTAGMODE 0x07
  107. #define NPE_VLAN_SETDEFAULTRXVID 0x08
  108. #define NPE_VLAN_SETPORTVLANTABLEENTRY 0x09
  109. #define NPE_VLAN_SETPORTVLANTABLERANGE 0x0A
  110. #define NPE_VLAN_SETRXQOSENTRY 0x0B
  111. #define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C
  112. #define NPE_STP_SETBLOCKINGSTATE 0x0D
  113. #define NPE_FW_SETFIREWALLMODE 0x0E
  114. #define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F
  115. #define NPE_PC_SETAPMACTABLE 0x11
  116. #define NPE_SETLOOPBACK_MODE 0x12
  117. #define NPE_PC_SETBSSIDTABLE 0x13
  118. #define NPE_ADDRESS_FILTER_CONFIG 0x14
  119. #define NPE_APPENDFCSCONFIG 0x15
  120. #define NPE_NOTIFY_MAC_RECOVERY_DONE 0x16
  121. #define NPE_MAC_RECOVERY_START 0x17
  122. #ifdef __ARMEB__
  123. typedef struct sk_buff buffer_t;
  124. #define free_buffer dev_kfree_skb
  125. #define free_buffer_irq dev_kfree_skb_irq
  126. #else
  127. typedef void buffer_t;
  128. #define free_buffer kfree
  129. #define free_buffer_irq kfree
  130. #endif
  131. struct eth_regs {
  132. u32 tx_control[2], __res1[2]; /* 000 */
  133. u32 rx_control[2], __res2[2]; /* 010 */
  134. u32 random_seed, __res3[3]; /* 020 */
  135. u32 partial_empty_threshold, __res4; /* 030 */
  136. u32 partial_full_threshold, __res5; /* 038 */
  137. u32 tx_start_bytes, __res6[3]; /* 040 */
  138. u32 tx_deferral, rx_deferral, __res7[2];/* 050 */
  139. u32 tx_2part_deferral[2], __res8[2]; /* 060 */
  140. u32 slot_time, __res9[3]; /* 070 */
  141. u32 mdio_command[4]; /* 080 */
  142. u32 mdio_status[4]; /* 090 */
  143. u32 mcast_mask[6], __res10[2]; /* 0A0 */
  144. u32 mcast_addr[6], __res11[2]; /* 0C0 */
  145. u32 int_clock_threshold, __res12[3]; /* 0E0 */
  146. u32 hw_addr[6], __res13[61]; /* 0F0 */
  147. u32 core_control; /* 1FC */
  148. };
  149. struct port {
  150. struct resource *mem_res;
  151. struct eth_regs __iomem *regs;
  152. struct npe *npe;
  153. struct net_device *netdev;
  154. struct napi_struct napi;
  155. struct eth_plat_info *plat;
  156. buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
  157. struct desc *desc_tab; /* coherent */
  158. u32 desc_tab_phys;
  159. int id; /* logical port ID */
  160. int speed, duplex;
  161. u8 firmware[4];
  162. int hwts_tx_en;
  163. int hwts_rx_en;
  164. };
  165. /* NPE message structure */
  166. struct msg {
  167. #ifdef __ARMEB__
  168. u8 cmd, eth_id, byte2, byte3;
  169. u8 byte4, byte5, byte6, byte7;
  170. #else
  171. u8 byte3, byte2, eth_id, cmd;
  172. u8 byte7, byte6, byte5, byte4;
  173. #endif
  174. };
  175. /* Ethernet packet descriptor */
  176. struct desc {
  177. u32 next; /* pointer to next buffer, unused */
  178. #ifdef __ARMEB__
  179. u16 buf_len; /* buffer length */
  180. u16 pkt_len; /* packet length */
  181. u32 data; /* pointer to data buffer in RAM */
  182. u8 dest_id;
  183. u8 src_id;
  184. u16 flags;
  185. u8 qos;
  186. u8 padlen;
  187. u16 vlan_tci;
  188. #else
  189. u16 pkt_len; /* packet length */
  190. u16 buf_len; /* buffer length */
  191. u32 data; /* pointer to data buffer in RAM */
  192. u16 flags;
  193. u8 src_id;
  194. u8 dest_id;
  195. u16 vlan_tci;
  196. u8 padlen;
  197. u8 qos;
  198. #endif
  199. #ifdef __ARMEB__
  200. u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3;
  201. u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1;
  202. u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5;
  203. #else
  204. u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0;
  205. u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4;
  206. u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2;
  207. #endif
  208. };
  209. #define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
  210. (n) * sizeof(struct desc))
  211. #define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
  212. #define tx_desc_phys(port, n) ((port)->desc_tab_phys + \
  213. ((n) + RX_DESCS) * sizeof(struct desc))
  214. #define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS])
  215. #ifndef __ARMEB__
  216. static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
  217. {
  218. int i;
  219. for (i = 0; i < cnt; i++)
  220. dest[i] = swab32(src[i]);
  221. }
  222. #endif
  223. static spinlock_t mdio_lock;
  224. static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
  225. static struct mii_bus *mdio_bus;
  226. static int ports_open;
  227. static struct port *npe_port_tab[MAX_NPES];
  228. static struct dma_pool *dma_pool;
  229. static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
  230. {
  231. u8 *data = skb->data;
  232. unsigned int offset;
  233. u16 *hi, *id;
  234. u32 lo;
  235. if (ptp_classify_raw(skb) != PTP_CLASS_V1_IPV4)
  236. return 0;
  237. offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
  238. if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
  239. return 0;
  240. hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
  241. id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
  242. memcpy(&lo, &hi[1], sizeof(lo));
  243. return (uid_hi == ntohs(*hi) &&
  244. uid_lo == ntohl(lo) &&
  245. seqid == ntohs(*id));
  246. }
  247. static void ixp_rx_timestamp(struct port *port, struct sk_buff *skb)
  248. {
  249. struct skb_shared_hwtstamps *shhwtstamps;
  250. struct ixp46x_ts_regs *regs;
  251. u64 ns;
  252. u32 ch, hi, lo, val;
  253. u16 uid, seq;
  254. if (!port->hwts_rx_en)
  255. return;
  256. ch = PORT2CHANNEL(port);
  257. regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
  258. val = __raw_readl(&regs->channel[ch].ch_event);
  259. if (!(val & RX_SNAPSHOT_LOCKED))
  260. return;
  261. lo = __raw_readl(&regs->channel[ch].src_uuid_lo);
  262. hi = __raw_readl(&regs->channel[ch].src_uuid_hi);
  263. uid = hi & 0xffff;
  264. seq = (hi >> 16) & 0xffff;
  265. if (!ixp_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
  266. goto out;
  267. lo = __raw_readl(&regs->channel[ch].rx_snap_lo);
  268. hi = __raw_readl(&regs->channel[ch].rx_snap_hi);
  269. ns = ((u64) hi) << 32;
  270. ns |= lo;
  271. ns <<= TICKS_NS_SHIFT;
  272. shhwtstamps = skb_hwtstamps(skb);
  273. memset(shhwtstamps, 0, sizeof(*shhwtstamps));
  274. shhwtstamps->hwtstamp = ns_to_ktime(ns);
  275. out:
  276. __raw_writel(RX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
  277. }
  278. static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb)
  279. {
  280. struct skb_shared_hwtstamps shhwtstamps;
  281. struct ixp46x_ts_regs *regs;
  282. struct skb_shared_info *shtx;
  283. u64 ns;
  284. u32 ch, cnt, hi, lo, val;
  285. shtx = skb_shinfo(skb);
  286. if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && port->hwts_tx_en))
  287. shtx->tx_flags |= SKBTX_IN_PROGRESS;
  288. else
  289. return;
  290. ch = PORT2CHANNEL(port);
  291. regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
  292. /*
  293. * This really stinks, but we have to poll for the Tx time stamp.
  294. * Usually, the time stamp is ready after 4 to 6 microseconds.
  295. */
  296. for (cnt = 0; cnt < 100; cnt++) {
  297. val = __raw_readl(&regs->channel[ch].ch_event);
  298. if (val & TX_SNAPSHOT_LOCKED)
  299. break;
  300. udelay(1);
  301. }
  302. if (!(val & TX_SNAPSHOT_LOCKED)) {
  303. shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
  304. return;
  305. }
  306. lo = __raw_readl(&regs->channel[ch].tx_snap_lo);
  307. hi = __raw_readl(&regs->channel[ch].tx_snap_hi);
  308. ns = ((u64) hi) << 32;
  309. ns |= lo;
  310. ns <<= TICKS_NS_SHIFT;
  311. memset(&shhwtstamps, 0, sizeof(shhwtstamps));
  312. shhwtstamps.hwtstamp = ns_to_ktime(ns);
  313. skb_tstamp_tx(skb, &shhwtstamps);
  314. __raw_writel(TX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
  315. }
  316. static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
  317. {
  318. struct hwtstamp_config cfg;
  319. struct ixp46x_ts_regs *regs;
  320. struct port *port = netdev_priv(netdev);
  321. int ch;
  322. if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
  323. return -EFAULT;
  324. if (cfg.flags) /* reserved for future extensions */
  325. return -EINVAL;
  326. ch = PORT2CHANNEL(port);
  327. regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
  328. if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
  329. return -ERANGE;
  330. switch (cfg.rx_filter) {
  331. case HWTSTAMP_FILTER_NONE:
  332. port->hwts_rx_en = 0;
  333. break;
  334. case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
  335. port->hwts_rx_en = PTP_SLAVE_MODE;
  336. __raw_writel(0, &regs->channel[ch].ch_control);
  337. break;
  338. case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
  339. port->hwts_rx_en = PTP_MASTER_MODE;
  340. __raw_writel(MASTER_MODE, &regs->channel[ch].ch_control);
  341. break;
  342. default:
  343. return -ERANGE;
  344. }
  345. port->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON;
  346. /* Clear out any old time stamps. */
  347. __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED,
  348. &regs->channel[ch].ch_event);
  349. return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
  350. }
  351. static int hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
  352. {
  353. struct hwtstamp_config cfg;
  354. struct port *port = netdev_priv(netdev);
  355. cfg.flags = 0;
  356. cfg.tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
  357. switch (port->hwts_rx_en) {
  358. case 0:
  359. cfg.rx_filter = HWTSTAMP_FILTER_NONE;
  360. break;
  361. case PTP_SLAVE_MODE:
  362. cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
  363. break;
  364. case PTP_MASTER_MODE:
  365. cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
  366. break;
  367. default:
  368. WARN_ON_ONCE(1);
  369. return -ERANGE;
  370. }
  371. return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
  372. }
  373. static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
  374. int write, u16 cmd)
  375. {
  376. int cycles = 0;
  377. if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) {
  378. printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name);
  379. return -1;
  380. }
  381. if (write) {
  382. __raw_writel(cmd & 0xFF, &mdio_regs->mdio_command[0]);
  383. __raw_writel(cmd >> 8, &mdio_regs->mdio_command[1]);
  384. }
  385. __raw_writel(((phy_id << 5) | location) & 0xFF,
  386. &mdio_regs->mdio_command[2]);
  387. __raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */,
  388. &mdio_regs->mdio_command[3]);
  389. while ((cycles < MAX_MDIO_RETRIES) &&
  390. (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) {
  391. udelay(1);
  392. cycles++;
  393. }
  394. if (cycles == MAX_MDIO_RETRIES) {
  395. printk(KERN_ERR "%s #%i: MII write failed\n", bus->name,
  396. phy_id);
  397. return -1;
  398. }
  399. #if DEBUG_MDIO
  400. printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name,
  401. phy_id, write ? "write" : "read", cycles);
  402. #endif
  403. if (write)
  404. return 0;
  405. if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) {
  406. #if DEBUG_MDIO
  407. printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name,
  408. phy_id);
  409. #endif
  410. return 0xFFFF; /* don't return error */
  411. }
  412. return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) |
  413. ((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8);
  414. }
  415. static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location)
  416. {
  417. unsigned long flags;
  418. int ret;
  419. spin_lock_irqsave(&mdio_lock, flags);
  420. ret = ixp4xx_mdio_cmd(bus, phy_id, location, 0, 0);
  421. spin_unlock_irqrestore(&mdio_lock, flags);
  422. #if DEBUG_MDIO
  423. printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name,
  424. phy_id, location, ret);
  425. #endif
  426. return ret;
  427. }
  428. static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location,
  429. u16 val)
  430. {
  431. unsigned long flags;
  432. int ret;
  433. spin_lock_irqsave(&mdio_lock, flags);
  434. ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val);
  435. spin_unlock_irqrestore(&mdio_lock, flags);
  436. #if DEBUG_MDIO
  437. printk(KERN_DEBUG "%s #%i: MII write [%i] <- 0x%X, err = %i\n",
  438. bus->name, phy_id, location, val, ret);
  439. #endif
  440. return ret;
  441. }
  442. static int ixp4xx_mdio_register(void)
  443. {
  444. int err;
  445. if (!(mdio_bus = mdiobus_alloc()))
  446. return -ENOMEM;
  447. if (cpu_is_ixp43x()) {
  448. /* IXP43x lacks NPE-B and uses NPE-C for MII PHY access */
  449. if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEC_ETH))
  450. return -ENODEV;
  451. mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
  452. } else {
  453. /* All MII PHY accesses use NPE-B Ethernet registers */
  454. if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
  455. return -ENODEV;
  456. mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
  457. }
  458. __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
  459. spin_lock_init(&mdio_lock);
  460. mdio_bus->name = "IXP4xx MII Bus";
  461. mdio_bus->read = &ixp4xx_mdio_read;
  462. mdio_bus->write = &ixp4xx_mdio_write;
  463. snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "ixp4xx-eth-0");
  464. if ((err = mdiobus_register(mdio_bus)))
  465. mdiobus_free(mdio_bus);
  466. return err;
  467. }
  468. static void ixp4xx_mdio_remove(void)
  469. {
  470. mdiobus_unregister(mdio_bus);
  471. mdiobus_free(mdio_bus);
  472. }
  473. static void ixp4xx_adjust_link(struct net_device *dev)
  474. {
  475. struct port *port = netdev_priv(dev);
  476. struct phy_device *phydev = dev->phydev;
  477. if (!phydev->link) {
  478. if (port->speed) {
  479. port->speed = 0;
  480. printk(KERN_INFO "%s: link down\n", dev->name);
  481. }
  482. return;
  483. }
  484. if (port->speed == phydev->speed && port->duplex == phydev->duplex)
  485. return;
  486. port->speed = phydev->speed;
  487. port->duplex = phydev->duplex;
  488. if (port->duplex)
  489. __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
  490. &port->regs->tx_control[0]);
  491. else
  492. __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
  493. &port->regs->tx_control[0]);
  494. printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
  495. dev->name, port->speed, port->duplex ? "full" : "half");
  496. }
  497. static inline void debug_pkt(struct net_device *dev, const char *func,
  498. u8 *data, int len)
  499. {
  500. #if DEBUG_PKT_BYTES
  501. int i;
  502. printk(KERN_DEBUG "%s: %s(%i) ", dev->name, func, len);
  503. for (i = 0; i < len; i++) {
  504. if (i >= DEBUG_PKT_BYTES)
  505. break;
  506. printk("%s%02X",
  507. ((i == 6) || (i == 12) || (i >= 14)) ? " " : "",
  508. data[i]);
  509. }
  510. printk("\n");
  511. #endif
  512. }
  513. static inline void debug_desc(u32 phys, struct desc *desc)
  514. {
  515. #if DEBUG_DESC
  516. printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X"
  517. " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n",
  518. phys, desc->next, desc->buf_len, desc->pkt_len,
  519. desc->data, desc->dest_id, desc->src_id, desc->flags,
  520. desc->qos, desc->padlen, desc->vlan_tci,
  521. desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2,
  522. desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5,
  523. desc->src_mac_0, desc->src_mac_1, desc->src_mac_2,
  524. desc->src_mac_3, desc->src_mac_4, desc->src_mac_5);
  525. #endif
  526. }
  527. static inline int queue_get_desc(unsigned int queue, struct port *port,
  528. int is_tx)
  529. {
  530. u32 phys, tab_phys, n_desc;
  531. struct desc *tab;
  532. if (!(phys = qmgr_get_entry(queue)))
  533. return -1;
  534. phys &= ~0x1F; /* mask out non-address bits */
  535. tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
  536. tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
  537. n_desc = (phys - tab_phys) / sizeof(struct desc);
  538. BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS));
  539. debug_desc(phys, &tab[n_desc]);
  540. BUG_ON(tab[n_desc].next);
  541. return n_desc;
  542. }
  543. static inline void queue_put_desc(unsigned int queue, u32 phys,
  544. struct desc *desc)
  545. {
  546. debug_desc(phys, desc);
  547. BUG_ON(phys & 0x1F);
  548. qmgr_put_entry(queue, phys);
  549. /* Don't check for queue overflow here, we've allocated sufficient
  550. length and queues >= 32 don't support this check anyway. */
  551. }
  552. static inline void dma_unmap_tx(struct port *port, struct desc *desc)
  553. {
  554. #ifdef __ARMEB__
  555. dma_unmap_single(&port->netdev->dev, desc->data,
  556. desc->buf_len, DMA_TO_DEVICE);
  557. #else
  558. dma_unmap_single(&port->netdev->dev, desc->data & ~3,
  559. ALIGN((desc->data & 3) + desc->buf_len, 4),
  560. DMA_TO_DEVICE);
  561. #endif
  562. }
  563. static void eth_rx_irq(void *pdev)
  564. {
  565. struct net_device *dev = pdev;
  566. struct port *port = netdev_priv(dev);
  567. #if DEBUG_RX
  568. printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name);
  569. #endif
  570. qmgr_disable_irq(port->plat->rxq);
  571. napi_schedule(&port->napi);
  572. }
  573. static int eth_poll(struct napi_struct *napi, int budget)
  574. {
  575. struct port *port = container_of(napi, struct port, napi);
  576. struct net_device *dev = port->netdev;
  577. unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id);
  578. int received = 0;
  579. #if DEBUG_RX
  580. printk(KERN_DEBUG "%s: eth_poll\n", dev->name);
  581. #endif
  582. while (received < budget) {
  583. struct sk_buff *skb;
  584. struct desc *desc;
  585. int n;
  586. #ifdef __ARMEB__
  587. struct sk_buff *temp;
  588. u32 phys;
  589. #endif
  590. if ((n = queue_get_desc(rxq, port, 0)) < 0) {
  591. #if DEBUG_RX
  592. printk(KERN_DEBUG "%s: eth_poll napi_complete\n",
  593. dev->name);
  594. #endif
  595. napi_complete(napi);
  596. qmgr_enable_irq(rxq);
  597. if (!qmgr_stat_below_low_watermark(rxq) &&
  598. napi_reschedule(napi)) { /* not empty again */
  599. #if DEBUG_RX
  600. printk(KERN_DEBUG "%s: eth_poll"
  601. " napi_reschedule successed\n",
  602. dev->name);
  603. #endif
  604. qmgr_disable_irq(rxq);
  605. continue;
  606. }
  607. #if DEBUG_RX
  608. printk(KERN_DEBUG "%s: eth_poll all done\n",
  609. dev->name);
  610. #endif
  611. return received; /* all work done */
  612. }
  613. desc = rx_desc_ptr(port, n);
  614. #ifdef __ARMEB__
  615. if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) {
  616. phys = dma_map_single(&dev->dev, skb->data,
  617. RX_BUFF_SIZE, DMA_FROM_DEVICE);
  618. if (dma_mapping_error(&dev->dev, phys)) {
  619. dev_kfree_skb(skb);
  620. skb = NULL;
  621. }
  622. }
  623. #else
  624. skb = netdev_alloc_skb(dev,
  625. ALIGN(NET_IP_ALIGN + desc->pkt_len, 4));
  626. #endif
  627. if (!skb) {
  628. dev->stats.rx_dropped++;
  629. /* put the desc back on RX-ready queue */
  630. desc->buf_len = MAX_MRU;
  631. desc->pkt_len = 0;
  632. queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
  633. continue;
  634. }
  635. /* process received frame */
  636. #ifdef __ARMEB__
  637. temp = skb;
  638. skb = port->rx_buff_tab[n];
  639. dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
  640. RX_BUFF_SIZE, DMA_FROM_DEVICE);
  641. #else
  642. dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN,
  643. RX_BUFF_SIZE, DMA_FROM_DEVICE);
  644. memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
  645. ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
  646. #endif
  647. skb_reserve(skb, NET_IP_ALIGN);
  648. skb_put(skb, desc->pkt_len);
  649. debug_pkt(dev, "eth_poll", skb->data, skb->len);
  650. ixp_rx_timestamp(port, skb);
  651. skb->protocol = eth_type_trans(skb, dev);
  652. dev->stats.rx_packets++;
  653. dev->stats.rx_bytes += skb->len;
  654. netif_receive_skb(skb);
  655. /* put the new buffer on RX-free queue */
  656. #ifdef __ARMEB__
  657. port->rx_buff_tab[n] = temp;
  658. desc->data = phys + NET_IP_ALIGN;
  659. #endif
  660. desc->buf_len = MAX_MRU;
  661. desc->pkt_len = 0;
  662. queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
  663. received++;
  664. }
  665. #if DEBUG_RX
  666. printk(KERN_DEBUG "eth_poll(): end, not all work done\n");
  667. #endif
  668. return received; /* not all work done */
  669. }
  670. static void eth_txdone_irq(void *unused)
  671. {
  672. u32 phys;
  673. #if DEBUG_TX
  674. printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n");
  675. #endif
  676. while ((phys = qmgr_get_entry(TXDONE_QUEUE)) != 0) {
  677. u32 npe_id, n_desc;
  678. struct port *port;
  679. struct desc *desc;
  680. int start;
  681. npe_id = phys & 3;
  682. BUG_ON(npe_id >= MAX_NPES);
  683. port = npe_port_tab[npe_id];
  684. BUG_ON(!port);
  685. phys &= ~0x1F; /* mask out non-address bits */
  686. n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc);
  687. BUG_ON(n_desc >= TX_DESCS);
  688. desc = tx_desc_ptr(port, n_desc);
  689. debug_desc(phys, desc);
  690. if (port->tx_buff_tab[n_desc]) { /* not the draining packet */
  691. port->netdev->stats.tx_packets++;
  692. port->netdev->stats.tx_bytes += desc->pkt_len;
  693. dma_unmap_tx(port, desc);
  694. #if DEBUG_TX
  695. printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n",
  696. port->netdev->name, port->tx_buff_tab[n_desc]);
  697. #endif
  698. free_buffer_irq(port->tx_buff_tab[n_desc]);
  699. port->tx_buff_tab[n_desc] = NULL;
  700. }
  701. start = qmgr_stat_below_low_watermark(port->plat->txreadyq);
  702. queue_put_desc(port->plat->txreadyq, phys, desc);
  703. if (start) { /* TX-ready queue was empty */
  704. #if DEBUG_TX
  705. printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n",
  706. port->netdev->name);
  707. #endif
  708. netif_wake_queue(port->netdev);
  709. }
  710. }
  711. }
  712. static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
  713. {
  714. struct port *port = netdev_priv(dev);
  715. unsigned int txreadyq = port->plat->txreadyq;
  716. int len, offset, bytes, n;
  717. void *mem;
  718. u32 phys;
  719. struct desc *desc;
  720. #if DEBUG_TX
  721. printk(KERN_DEBUG "%s: eth_xmit\n", dev->name);
  722. #endif
  723. if (unlikely(skb->len > MAX_MRU)) {
  724. dev_kfree_skb(skb);
  725. dev->stats.tx_errors++;
  726. return NETDEV_TX_OK;
  727. }
  728. debug_pkt(dev, "eth_xmit", skb->data, skb->len);
  729. len = skb->len;
  730. #ifdef __ARMEB__
  731. offset = 0; /* no need to keep alignment */
  732. bytes = len;
  733. mem = skb->data;
  734. #else
  735. offset = (int)skb->data & 3; /* keep 32-bit alignment */
  736. bytes = ALIGN(offset + len, 4);
  737. if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
  738. dev_kfree_skb(skb);
  739. dev->stats.tx_dropped++;
  740. return NETDEV_TX_OK;
  741. }
  742. memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
  743. #endif
  744. phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
  745. if (dma_mapping_error(&dev->dev, phys)) {
  746. dev_kfree_skb(skb);
  747. #ifndef __ARMEB__
  748. kfree(mem);
  749. #endif
  750. dev->stats.tx_dropped++;
  751. return NETDEV_TX_OK;
  752. }
  753. n = queue_get_desc(txreadyq, port, 1);
  754. BUG_ON(n < 0);
  755. desc = tx_desc_ptr(port, n);
  756. #ifdef __ARMEB__
  757. port->tx_buff_tab[n] = skb;
  758. #else
  759. port->tx_buff_tab[n] = mem;
  760. #endif
  761. desc->data = phys + offset;
  762. desc->buf_len = desc->pkt_len = len;
  763. /* NPE firmware pads short frames with zeros internally */
  764. wmb();
  765. queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
  766. if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
  767. #if DEBUG_TX
  768. printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name);
  769. #endif
  770. netif_stop_queue(dev);
  771. /* we could miss TX ready interrupt */
  772. /* really empty in fact */
  773. if (!qmgr_stat_below_low_watermark(txreadyq)) {
  774. #if DEBUG_TX
  775. printk(KERN_DEBUG "%s: eth_xmit ready again\n",
  776. dev->name);
  777. #endif
  778. netif_wake_queue(dev);
  779. }
  780. }
  781. #if DEBUG_TX
  782. printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name);
  783. #endif
  784. ixp_tx_timestamp(port, skb);
  785. skb_tx_timestamp(skb);
  786. #ifndef __ARMEB__
  787. dev_kfree_skb(skb);
  788. #endif
  789. return NETDEV_TX_OK;
  790. }
  791. static void eth_set_mcast_list(struct net_device *dev)
  792. {
  793. struct port *port = netdev_priv(dev);
  794. struct netdev_hw_addr *ha;
  795. u8 diffs[ETH_ALEN], *addr;
  796. int i;
  797. static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
  798. if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) {
  799. for (i = 0; i < ETH_ALEN; i++) {
  800. __raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
  801. __raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
  802. }
  803. __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
  804. &port->regs->rx_control[0]);
  805. return;
  806. }
  807. if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) {
  808. __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
  809. &port->regs->rx_control[0]);
  810. return;
  811. }
  812. eth_zero_addr(diffs);
  813. addr = NULL;
  814. netdev_for_each_mc_addr(ha, dev) {
  815. if (!addr)
  816. addr = ha->addr; /* first MAC address */
  817. for (i = 0; i < ETH_ALEN; i++)
  818. diffs[i] |= addr[i] ^ ha->addr[i];
  819. }
  820. for (i = 0; i < ETH_ALEN; i++) {
  821. __raw_writel(addr[i], &port->regs->mcast_addr[i]);
  822. __raw_writel(~diffs[i], &port->regs->mcast_mask[i]);
  823. }
  824. __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
  825. &port->regs->rx_control[0]);
  826. }
  827. static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
  828. {
  829. if (!netif_running(dev))
  830. return -EINVAL;
  831. if (cpu_is_ixp46x()) {
  832. if (cmd == SIOCSHWTSTAMP)
  833. return hwtstamp_set(dev, req);
  834. if (cmd == SIOCGHWTSTAMP)
  835. return hwtstamp_get(dev, req);
  836. }
  837. return phy_mii_ioctl(dev->phydev, req, cmd);
  838. }
  839. /* ethtool support */
  840. static void ixp4xx_get_drvinfo(struct net_device *dev,
  841. struct ethtool_drvinfo *info)
  842. {
  843. struct port *port = netdev_priv(dev);
  844. strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
  845. snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u",
  846. port->firmware[0], port->firmware[1],
  847. port->firmware[2], port->firmware[3]);
  848. strlcpy(info->bus_info, "internal", sizeof(info->bus_info));
  849. }
  850. static int ixp4xx_nway_reset(struct net_device *dev)
  851. {
  852. return phy_start_aneg(dev->phydev);
  853. }
  854. int ixp46x_phc_index = -1;
  855. EXPORT_SYMBOL_GPL(ixp46x_phc_index);
  856. static int ixp4xx_get_ts_info(struct net_device *dev,
  857. struct ethtool_ts_info *info)
  858. {
  859. if (!cpu_is_ixp46x()) {
  860. info->so_timestamping =
  861. SOF_TIMESTAMPING_TX_SOFTWARE |
  862. SOF_TIMESTAMPING_RX_SOFTWARE |
  863. SOF_TIMESTAMPING_SOFTWARE;
  864. info->phc_index = -1;
  865. return 0;
  866. }
  867. info->so_timestamping =
  868. SOF_TIMESTAMPING_TX_HARDWARE |
  869. SOF_TIMESTAMPING_RX_HARDWARE |
  870. SOF_TIMESTAMPING_RAW_HARDWARE;
  871. info->phc_index = ixp46x_phc_index;
  872. info->tx_types =
  873. (1 << HWTSTAMP_TX_OFF) |
  874. (1 << HWTSTAMP_TX_ON);
  875. info->rx_filters =
  876. (1 << HWTSTAMP_FILTER_NONE) |
  877. (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
  878. (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ);
  879. return 0;
  880. }
  881. static const struct ethtool_ops ixp4xx_ethtool_ops = {
  882. .get_drvinfo = ixp4xx_get_drvinfo,
  883. .nway_reset = ixp4xx_nway_reset,
  884. .get_link = ethtool_op_get_link,
  885. .get_ts_info = ixp4xx_get_ts_info,
  886. .get_link_ksettings = phy_ethtool_get_link_ksettings,
  887. .set_link_ksettings = phy_ethtool_set_link_ksettings,
  888. };
  889. static int request_queues(struct port *port)
  890. {
  891. int err;
  892. err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0,
  893. "%s:RX-free", port->netdev->name);
  894. if (err)
  895. return err;
  896. err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0,
  897. "%s:RX", port->netdev->name);
  898. if (err)
  899. goto rel_rxfree;
  900. err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0,
  901. "%s:TX", port->netdev->name);
  902. if (err)
  903. goto rel_rx;
  904. err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
  905. "%s:TX-ready", port->netdev->name);
  906. if (err)
  907. goto rel_tx;
  908. /* TX-done queue handles skbs sent out by the NPEs */
  909. if (!ports_open) {
  910. err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0,
  911. "%s:TX-done", DRV_NAME);
  912. if (err)
  913. goto rel_txready;
  914. }
  915. return 0;
  916. rel_txready:
  917. qmgr_release_queue(port->plat->txreadyq);
  918. rel_tx:
  919. qmgr_release_queue(TX_QUEUE(port->id));
  920. rel_rx:
  921. qmgr_release_queue(port->plat->rxq);
  922. rel_rxfree:
  923. qmgr_release_queue(RXFREE_QUEUE(port->id));
  924. printk(KERN_DEBUG "%s: unable to request hardware queues\n",
  925. port->netdev->name);
  926. return err;
  927. }
  928. static void release_queues(struct port *port)
  929. {
  930. qmgr_release_queue(RXFREE_QUEUE(port->id));
  931. qmgr_release_queue(port->plat->rxq);
  932. qmgr_release_queue(TX_QUEUE(port->id));
  933. qmgr_release_queue(port->plat->txreadyq);
  934. if (!ports_open)
  935. qmgr_release_queue(TXDONE_QUEUE);
  936. }
  937. static int init_queues(struct port *port)
  938. {
  939. int i;
  940. if (!ports_open) {
  941. dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
  942. POOL_ALLOC_SIZE, 32, 0);
  943. if (!dma_pool)
  944. return -ENOMEM;
  945. }
  946. if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
  947. &port->desc_tab_phys)))
  948. return -ENOMEM;
  949. memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
  950. memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
  951. memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
  952. /* Setup RX buffers */
  953. for (i = 0; i < RX_DESCS; i++) {
  954. struct desc *desc = rx_desc_ptr(port, i);
  955. buffer_t *buff; /* skb or kmalloc()ated memory */
  956. void *data;
  957. #ifdef __ARMEB__
  958. if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE)))
  959. return -ENOMEM;
  960. data = buff->data;
  961. #else
  962. if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL)))
  963. return -ENOMEM;
  964. data = buff;
  965. #endif
  966. desc->buf_len = MAX_MRU;
  967. desc->data = dma_map_single(&port->netdev->dev, data,
  968. RX_BUFF_SIZE, DMA_FROM_DEVICE);
  969. if (dma_mapping_error(&port->netdev->dev, desc->data)) {
  970. free_buffer(buff);
  971. return -EIO;
  972. }
  973. desc->data += NET_IP_ALIGN;
  974. port->rx_buff_tab[i] = buff;
  975. }
  976. return 0;
  977. }
  978. static void destroy_queues(struct port *port)
  979. {
  980. int i;
  981. if (port->desc_tab) {
  982. for (i = 0; i < RX_DESCS; i++) {
  983. struct desc *desc = rx_desc_ptr(port, i);
  984. buffer_t *buff = port->rx_buff_tab[i];
  985. if (buff) {
  986. dma_unmap_single(&port->netdev->dev,
  987. desc->data - NET_IP_ALIGN,
  988. RX_BUFF_SIZE, DMA_FROM_DEVICE);
  989. free_buffer(buff);
  990. }
  991. }
  992. for (i = 0; i < TX_DESCS; i++) {
  993. struct desc *desc = tx_desc_ptr(port, i);
  994. buffer_t *buff = port->tx_buff_tab[i];
  995. if (buff) {
  996. dma_unmap_tx(port, desc);
  997. free_buffer(buff);
  998. }
  999. }
  1000. dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
  1001. port->desc_tab = NULL;
  1002. }
  1003. if (!ports_open && dma_pool) {
  1004. dma_pool_destroy(dma_pool);
  1005. dma_pool = NULL;
  1006. }
  1007. }
  1008. static int eth_open(struct net_device *dev)
  1009. {
  1010. struct port *port = netdev_priv(dev);
  1011. struct npe *npe = port->npe;
  1012. struct msg msg;
  1013. int i, err;
  1014. if (!npe_running(npe)) {
  1015. err = npe_load_firmware(npe, npe_name(npe), &dev->dev);
  1016. if (err)
  1017. return err;
  1018. if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) {
  1019. printk(KERN_ERR "%s: %s not responding\n", dev->name,
  1020. npe_name(npe));
  1021. return -EIO;
  1022. }
  1023. port->firmware[0] = msg.byte4;
  1024. port->firmware[1] = msg.byte5;
  1025. port->firmware[2] = msg.byte6;
  1026. port->firmware[3] = msg.byte7;
  1027. }
  1028. memset(&msg, 0, sizeof(msg));
  1029. msg.cmd = NPE_VLAN_SETRXQOSENTRY;
  1030. msg.eth_id = port->id;
  1031. msg.byte5 = port->plat->rxq | 0x80;
  1032. msg.byte7 = port->plat->rxq << 4;
  1033. for (i = 0; i < 8; i++) {
  1034. msg.byte3 = i;
  1035. if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ"))
  1036. return -EIO;
  1037. }
  1038. msg.cmd = NPE_EDB_SETPORTADDRESS;
  1039. msg.eth_id = PHYSICAL_ID(port->id);
  1040. msg.byte2 = dev->dev_addr[0];
  1041. msg.byte3 = dev->dev_addr[1];
  1042. msg.byte4 = dev->dev_addr[2];
  1043. msg.byte5 = dev->dev_addr[3];
  1044. msg.byte6 = dev->dev_addr[4];
  1045. msg.byte7 = dev->dev_addr[5];
  1046. if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC"))
  1047. return -EIO;
  1048. memset(&msg, 0, sizeof(msg));
  1049. msg.cmd = NPE_FW_SETFIREWALLMODE;
  1050. msg.eth_id = port->id;
  1051. if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE"))
  1052. return -EIO;
  1053. if ((err = request_queues(port)) != 0)
  1054. return err;
  1055. if ((err = init_queues(port)) != 0) {
  1056. destroy_queues(port);
  1057. release_queues(port);
  1058. return err;
  1059. }
  1060. port->speed = 0; /* force "link up" message */
  1061. phy_start(dev->phydev);
  1062. for (i = 0; i < ETH_ALEN; i++)
  1063. __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
  1064. __raw_writel(0x08, &port->regs->random_seed);
  1065. __raw_writel(0x12, &port->regs->partial_empty_threshold);
  1066. __raw_writel(0x30, &port->regs->partial_full_threshold);
  1067. __raw_writel(0x08, &port->regs->tx_start_bytes);
  1068. __raw_writel(0x15, &port->regs->tx_deferral);
  1069. __raw_writel(0x08, &port->regs->tx_2part_deferral[0]);
  1070. __raw_writel(0x07, &port->regs->tx_2part_deferral[1]);
  1071. __raw_writel(0x80, &port->regs->slot_time);
  1072. __raw_writel(0x01, &port->regs->int_clock_threshold);
  1073. /* Populate queues with buffers, no failure after this point */
  1074. for (i = 0; i < TX_DESCS; i++)
  1075. queue_put_desc(port->plat->txreadyq,
  1076. tx_desc_phys(port, i), tx_desc_ptr(port, i));
  1077. for (i = 0; i < RX_DESCS; i++)
  1078. queue_put_desc(RXFREE_QUEUE(port->id),
  1079. rx_desc_phys(port, i), rx_desc_ptr(port, i));
  1080. __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]);
  1081. __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]);
  1082. __raw_writel(0, &port->regs->rx_control[1]);
  1083. __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);
  1084. napi_enable(&port->napi);
  1085. eth_set_mcast_list(dev);
  1086. netif_start_queue(dev);
  1087. qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
  1088. eth_rx_irq, dev);
  1089. if (!ports_open) {
  1090. qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY,
  1091. eth_txdone_irq, NULL);
  1092. qmgr_enable_irq(TXDONE_QUEUE);
  1093. }
  1094. ports_open++;
  1095. /* we may already have RX data, enables IRQ */
  1096. napi_schedule(&port->napi);
  1097. return 0;
  1098. }
  1099. static int eth_close(struct net_device *dev)
  1100. {
  1101. struct port *port = netdev_priv(dev);
  1102. struct msg msg;
  1103. int buffs = RX_DESCS; /* allocated RX buffers */
  1104. int i;
  1105. ports_open--;
  1106. qmgr_disable_irq(port->plat->rxq);
  1107. napi_disable(&port->napi);
  1108. netif_stop_queue(dev);
  1109. while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0)
  1110. buffs--;
  1111. memset(&msg, 0, sizeof(msg));
  1112. msg.cmd = NPE_SETLOOPBACK_MODE;
  1113. msg.eth_id = port->id;
  1114. msg.byte3 = 1;
  1115. if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK"))
  1116. printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name);
  1117. i = 0;
  1118. do { /* drain RX buffers */
  1119. while (queue_get_desc(port->plat->rxq, port, 0) >= 0)
  1120. buffs--;
  1121. if (!buffs)
  1122. break;
  1123. if (qmgr_stat_empty(TX_QUEUE(port->id))) {
  1124. /* we have to inject some packet */
  1125. struct desc *desc;
  1126. u32 phys;
  1127. int n = queue_get_desc(port->plat->txreadyq, port, 1);
  1128. BUG_ON(n < 0);
  1129. desc = tx_desc_ptr(port, n);
  1130. phys = tx_desc_phys(port, n);
  1131. desc->buf_len = desc->pkt_len = 1;
  1132. wmb();
  1133. queue_put_desc(TX_QUEUE(port->id), phys, desc);
  1134. }
  1135. udelay(1);
  1136. } while (++i < MAX_CLOSE_WAIT);
  1137. if (buffs)
  1138. printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
  1139. " left in NPE\n", dev->name, buffs);
  1140. #if DEBUG_CLOSE
  1141. if (!buffs)
  1142. printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i);
  1143. #endif
  1144. buffs = TX_DESCS;
  1145. while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0)
  1146. buffs--; /* cancel TX */
  1147. i = 0;
  1148. do {
  1149. while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
  1150. buffs--;
  1151. if (!buffs)
  1152. break;
  1153. } while (++i < MAX_CLOSE_WAIT);
  1154. if (buffs)
  1155. printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
  1156. "left in NPE\n", dev->name, buffs);
  1157. #if DEBUG_CLOSE
  1158. if (!buffs)
  1159. printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
  1160. #endif
  1161. msg.byte3 = 0;
  1162. if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK"))
  1163. printk(KERN_CRIT "%s: unable to disable loopback\n",
  1164. dev->name);
  1165. phy_stop(dev->phydev);
  1166. if (!ports_open)
  1167. qmgr_disable_irq(TXDONE_QUEUE);
  1168. destroy_queues(port);
  1169. release_queues(port);
  1170. return 0;
  1171. }
  1172. static const struct net_device_ops ixp4xx_netdev_ops = {
  1173. .ndo_open = eth_open,
  1174. .ndo_stop = eth_close,
  1175. .ndo_start_xmit = eth_xmit,
  1176. .ndo_set_rx_mode = eth_set_mcast_list,
  1177. .ndo_do_ioctl = eth_ioctl,
  1178. .ndo_change_mtu = eth_change_mtu,
  1179. .ndo_set_mac_address = eth_mac_addr,
  1180. .ndo_validate_addr = eth_validate_addr,
  1181. };
  1182. static int eth_init_one(struct platform_device *pdev)
  1183. {
  1184. struct port *port;
  1185. struct net_device *dev;
  1186. struct eth_plat_info *plat = dev_get_platdata(&pdev->dev);
  1187. struct phy_device *phydev = NULL;
  1188. u32 regs_phys;
  1189. char phy_id[MII_BUS_ID_SIZE + 3];
  1190. int err;
  1191. if (!(dev = alloc_etherdev(sizeof(struct port))))
  1192. return -ENOMEM;
  1193. SET_NETDEV_DEV(dev, &pdev->dev);
  1194. port = netdev_priv(dev);
  1195. port->netdev = dev;
  1196. port->id = pdev->id;
  1197. switch (port->id) {
  1198. case IXP4XX_ETH_NPEA:
  1199. port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT;
  1200. regs_phys = IXP4XX_EthA_BASE_PHYS;
  1201. break;
  1202. case IXP4XX_ETH_NPEB:
  1203. port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
  1204. regs_phys = IXP4XX_EthB_BASE_PHYS;
  1205. break;
  1206. case IXP4XX_ETH_NPEC:
  1207. port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
  1208. regs_phys = IXP4XX_EthC_BASE_PHYS;
  1209. break;
  1210. default:
  1211. err = -ENODEV;
  1212. goto err_free;
  1213. }
  1214. dev->netdev_ops = &ixp4xx_netdev_ops;
  1215. dev->ethtool_ops = &ixp4xx_ethtool_ops;
  1216. dev->tx_queue_len = 100;
  1217. netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT);
  1218. if (!(port->npe = npe_request(NPE_ID(port->id)))) {
  1219. err = -EIO;
  1220. goto err_free;
  1221. }
  1222. port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name);
  1223. if (!port->mem_res) {
  1224. err = -EBUSY;
  1225. goto err_npe_rel;
  1226. }
  1227. port->plat = plat;
  1228. npe_port_tab[NPE_ID(port->id)] = port;
  1229. memcpy(dev->dev_addr, plat->hwaddr, ETH_ALEN);
  1230. platform_set_drvdata(pdev, dev);
  1231. __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET,
  1232. &port->regs->core_control);
  1233. udelay(50);
  1234. __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
  1235. udelay(50);
  1236. snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
  1237. mdio_bus->id, plat->phy);
  1238. phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link,
  1239. PHY_INTERFACE_MODE_MII);
  1240. if (IS_ERR(phydev)) {
  1241. err = PTR_ERR(phydev);
  1242. goto err_free_mem;
  1243. }
  1244. phydev->irq = PHY_POLL;
  1245. if ((err = register_netdev(dev)))
  1246. goto err_phy_dis;
  1247. printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy,
  1248. npe_name(port->npe));
  1249. return 0;
  1250. err_phy_dis:
  1251. phy_disconnect(phydev);
  1252. err_free_mem:
  1253. npe_port_tab[NPE_ID(port->id)] = NULL;
  1254. release_resource(port->mem_res);
  1255. err_npe_rel:
  1256. npe_release(port->npe);
  1257. err_free:
  1258. free_netdev(dev);
  1259. return err;
  1260. }
  1261. static int eth_remove_one(struct platform_device *pdev)
  1262. {
  1263. struct net_device *dev = platform_get_drvdata(pdev);
  1264. struct phy_device *phydev = dev->phydev;
  1265. struct port *port = netdev_priv(dev);
  1266. unregister_netdev(dev);
  1267. phy_disconnect(phydev);
  1268. npe_port_tab[NPE_ID(port->id)] = NULL;
  1269. npe_release(port->npe);
  1270. release_resource(port->mem_res);
  1271. free_netdev(dev);
  1272. return 0;
  1273. }
  1274. static struct platform_driver ixp4xx_eth_driver = {
  1275. .driver.name = DRV_NAME,
  1276. .probe = eth_init_one,
  1277. .remove = eth_remove_one,
  1278. };
  1279. static int __init eth_init_module(void)
  1280. {
  1281. int err;
  1282. if ((err = ixp4xx_mdio_register()))
  1283. return err;
  1284. return platform_driver_register(&ixp4xx_eth_driver);
  1285. }
  1286. static void __exit eth_cleanup_module(void)
  1287. {
  1288. platform_driver_unregister(&ixp4xx_eth_driver);
  1289. ixp4xx_mdio_remove();
  1290. }
  1291. MODULE_AUTHOR("Krzysztof Halasa");
  1292. MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver");
  1293. MODULE_LICENSE("GPL v2");
  1294. MODULE_ALIAS("platform:ixp4xx_eth");
  1295. module_init(eth_init_module);
  1296. module_exit(eth_cleanup_module);