pxa168_eth.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686
  1. /*
  2. * PXA168 ethernet driver.
  3. * Most of the code is derived from mv643xx ethernet driver.
  4. *
  5. * Copyright (C) 2010 Marvell International Ltd.
  6. * Sachin Sanap <ssanap@marvell.com>
  7. * Zhangfei Gao <zgao6@marvell.com>
  8. * Philip Rakity <prakity@marvell.com>
  9. * Mark Brown <markb@marvell.com>
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version 2
  14. * of the License, or (at your option) any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  23. */
  24. #include <linux/bitops.h>
  25. #include <linux/clk.h>
  26. #include <linux/delay.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/etherdevice.h>
  29. #include <linux/ethtool.h>
  30. #include <linux/in.h>
  31. #include <linux/interrupt.h>
  32. #include <linux/io.h>
  33. #include <linux/ip.h>
  34. #include <linux/kernel.h>
  35. #include <linux/module.h>
  36. #include <linux/of.h>
  37. #include <linux/of_net.h>
  38. #include <linux/phy.h>
  39. #include <linux/platform_device.h>
  40. #include <linux/pxa168_eth.h>
  41. #include <linux/tcp.h>
  42. #include <linux/types.h>
  43. #include <linux/udp.h>
  44. #include <linux/workqueue.h>
  45. #include <asm/pgtable.h>
  46. #include <asm/cacheflush.h>
  47. #define DRIVER_NAME "pxa168-eth"
  48. #define DRIVER_VERSION "0.3"
  49. /*
  50. * Registers
  51. */
  52. #define PHY_ADDRESS 0x0000
  53. #define SMI 0x0010
  54. #define PORT_CONFIG 0x0400
  55. #define PORT_CONFIG_EXT 0x0408
  56. #define PORT_COMMAND 0x0410
  57. #define PORT_STATUS 0x0418
  58. #define HTPR 0x0428
  59. #define MAC_ADDR_LOW 0x0430
  60. #define MAC_ADDR_HIGH 0x0438
  61. #define SDMA_CONFIG 0x0440
  62. #define SDMA_CMD 0x0448
  63. #define INT_CAUSE 0x0450
  64. #define INT_W_CLEAR 0x0454
  65. #define INT_MASK 0x0458
  66. #define ETH_F_RX_DESC_0 0x0480
  67. #define ETH_C_RX_DESC_0 0x04A0
  68. #define ETH_C_TX_DESC_1 0x04E4
  69. /* smi register */
  70. #define SMI_BUSY (1 << 28) /* 0 - Write, 1 - Read */
  71. #define SMI_R_VALID (1 << 27) /* 0 - Write, 1 - Read */
  72. #define SMI_OP_W (0 << 26) /* Write operation */
  73. #define SMI_OP_R (1 << 26) /* Read operation */
  74. #define PHY_WAIT_ITERATIONS 10
  75. #define PXA168_ETH_PHY_ADDR_DEFAULT 0
  76. /* RX & TX descriptor command */
  77. #define BUF_OWNED_BY_DMA (1 << 31)
  78. /* RX descriptor status */
  79. #define RX_EN_INT (1 << 23)
  80. #define RX_FIRST_DESC (1 << 17)
  81. #define RX_LAST_DESC (1 << 16)
  82. #define RX_ERROR (1 << 15)
  83. /* TX descriptor command */
  84. #define TX_EN_INT (1 << 23)
  85. #define TX_GEN_CRC (1 << 22)
  86. #define TX_ZERO_PADDING (1 << 18)
  87. #define TX_FIRST_DESC (1 << 17)
  88. #define TX_LAST_DESC (1 << 16)
  89. #define TX_ERROR (1 << 15)
  90. /* SDMA_CMD */
  91. #define SDMA_CMD_AT (1 << 31)
  92. #define SDMA_CMD_TXDL (1 << 24)
  93. #define SDMA_CMD_TXDH (1 << 23)
  94. #define SDMA_CMD_AR (1 << 15)
  95. #define SDMA_CMD_ERD (1 << 7)
  96. /* Bit definitions of the Port Config Reg */
  97. #define PCR_HS (1 << 12)
  98. #define PCR_EN (1 << 7)
  99. #define PCR_PM (1 << 0)
  100. /* Bit definitions of the Port Config Extend Reg */
  101. #define PCXR_2BSM (1 << 28)
  102. #define PCXR_DSCP_EN (1 << 21)
  103. #define PCXR_MFL_1518 (0 << 14)
  104. #define PCXR_MFL_1536 (1 << 14)
  105. #define PCXR_MFL_2048 (2 << 14)
  106. #define PCXR_MFL_64K (3 << 14)
  107. #define PCXR_FLP (1 << 11)
  108. #define PCXR_PRIO_TX_OFF 3
  109. #define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF)
  110. /* Bit definitions of the SDMA Config Reg */
  111. #define SDCR_BSZ_OFF 12
  112. #define SDCR_BSZ8 (3 << SDCR_BSZ_OFF)
  113. #define SDCR_BSZ4 (2 << SDCR_BSZ_OFF)
  114. #define SDCR_BSZ2 (1 << SDCR_BSZ_OFF)
  115. #define SDCR_BSZ1 (0 << SDCR_BSZ_OFF)
  116. #define SDCR_BLMR (1 << 6)
  117. #define SDCR_BLMT (1 << 7)
  118. #define SDCR_RIFB (1 << 9)
  119. #define SDCR_RC_OFF 2
  120. #define SDCR_RC_MAX_RETRANS (0xf << SDCR_RC_OFF)
  121. /*
  122. * Bit definitions of the Interrupt Cause Reg
  123. * and Interrupt MASK Reg is the same
  124. */
  125. #define ICR_RXBUF (1 << 0)
  126. #define ICR_TXBUF_H (1 << 2)
  127. #define ICR_TXBUF_L (1 << 3)
  128. #define ICR_TXEND_H (1 << 6)
  129. #define ICR_TXEND_L (1 << 7)
  130. #define ICR_RXERR (1 << 8)
  131. #define ICR_TXERR_H (1 << 10)
  132. #define ICR_TXERR_L (1 << 11)
  133. #define ICR_TX_UDR (1 << 13)
  134. #define ICR_MII_CH (1 << 28)
  135. #define ALL_INTS (ICR_TXBUF_H | ICR_TXBUF_L | ICR_TX_UDR |\
  136. ICR_TXERR_H | ICR_TXERR_L |\
  137. ICR_TXEND_H | ICR_TXEND_L |\
  138. ICR_RXBUF | ICR_RXERR | ICR_MII_CH)
  139. #define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
  140. #define NUM_RX_DESCS 64
  141. #define NUM_TX_DESCS 64
  142. #define HASH_ADD 0
  143. #define HASH_DELETE 1
  144. #define HASH_ADDR_TABLE_SIZE 0x4000 /* 16K (1/2K address - PCR_HS == 1) */
  145. #define HOP_NUMBER 12
  146. /* Bit definitions for Port status */
  147. #define PORT_SPEED_100 (1 << 0)
  148. #define FULL_DUPLEX (1 << 1)
  149. #define FLOW_CONTROL_DISABLED (1 << 2)
  150. #define LINK_UP (1 << 3)
  151. /* Bit definitions for work to be done */
  152. #define WORK_LINK (1 << 0)
  153. #define WORK_TX_DONE (1 << 1)
  154. /*
  155. * Misc definitions.
  156. */
  157. #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
  158. struct rx_desc {
  159. u32 cmd_sts; /* Descriptor command status */
  160. u16 byte_cnt; /* Descriptor buffer byte count */
  161. u16 buf_size; /* Buffer size */
  162. u32 buf_ptr; /* Descriptor buffer pointer */
  163. u32 next_desc_ptr; /* Next descriptor pointer */
  164. };
  165. struct tx_desc {
  166. u32 cmd_sts; /* Command/status field */
  167. u16 reserved;
  168. u16 byte_cnt; /* buffer byte count */
  169. u32 buf_ptr; /* pointer to buffer for this descriptor */
  170. u32 next_desc_ptr; /* Pointer to next descriptor */
  171. };
  172. struct pxa168_eth_private {
  173. int port_num; /* User Ethernet port number */
  174. int phy_addr;
  175. int rx_resource_err; /* Rx ring resource error flag */
  176. /* Next available and first returning Rx resource */
  177. int rx_curr_desc_q, rx_used_desc_q;
  178. /* Next available and first returning Tx resource */
  179. int tx_curr_desc_q, tx_used_desc_q;
  180. struct rx_desc *p_rx_desc_area;
  181. dma_addr_t rx_desc_dma;
  182. int rx_desc_area_size;
  183. struct sk_buff **rx_skb;
  184. struct tx_desc *p_tx_desc_area;
  185. dma_addr_t tx_desc_dma;
  186. int tx_desc_area_size;
  187. struct sk_buff **tx_skb;
  188. struct work_struct tx_timeout_task;
  189. struct net_device *dev;
  190. struct napi_struct napi;
  191. u8 work_todo;
  192. int skb_size;
  193. /* Size of Tx Ring per queue */
  194. int tx_ring_size;
  195. /* Number of tx descriptors in use */
  196. int tx_desc_count;
  197. /* Size of Rx Ring per queue */
  198. int rx_ring_size;
  199. /* Number of rx descriptors in use */
  200. int rx_desc_count;
  201. /*
  202. * Used in case RX Ring is empty, which can occur when
  203. * system does not have resources (skb's)
  204. */
  205. struct timer_list timeout;
  206. struct mii_bus *smi_bus;
  207. struct phy_device *phy;
  208. /* clock */
  209. struct clk *clk;
  210. struct pxa168_eth_platform_data *pd;
  211. /*
  212. * Ethernet controller base address.
  213. */
  214. void __iomem *base;
  215. /* Pointer to the hardware address filter table */
  216. void *htpr;
  217. dma_addr_t htpr_dma;
  218. };
  219. struct addr_table_entry {
  220. __le32 lo;
  221. __le32 hi;
  222. };
  223. /* Bit fields of a Hash Table Entry */
  224. enum hash_table_entry {
  225. HASH_ENTRY_VALID = 1,
  226. SKIP = 2,
  227. HASH_ENTRY_RECEIVE_DISCARD = 4,
  228. HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
  229. };
  230. static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
  231. static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd);
  232. static int pxa168_init_hw(struct pxa168_eth_private *pep);
  233. static void eth_port_reset(struct net_device *dev);
  234. static void eth_port_start(struct net_device *dev);
  235. static int pxa168_eth_open(struct net_device *dev);
  236. static int pxa168_eth_stop(struct net_device *dev);
  237. static int ethernet_phy_setup(struct net_device *dev);
  238. static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
  239. {
  240. return readl(pep->base + offset);
  241. }
  242. static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
  243. {
  244. writel(data, pep->base + offset);
  245. }
  246. static void abort_dma(struct pxa168_eth_private *pep)
  247. {
  248. int delay;
  249. int max_retries = 40;
  250. do {
  251. wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);
  252. udelay(100);
  253. delay = 10;
  254. while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))
  255. && delay-- > 0) {
  256. udelay(10);
  257. }
  258. } while (max_retries-- > 0 && delay <= 0);
  259. if (max_retries <= 0)
  260. netdev_err(pep->dev, "%s : DMA Stuck\n", __func__);
  261. }
  262. static int ethernet_phy_get(struct pxa168_eth_private *pep)
  263. {
  264. unsigned int reg_data;
  265. reg_data = rdl(pep, PHY_ADDRESS);
  266. return (reg_data >> (5 * pep->port_num)) & 0x1f;
  267. }
  268. static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)
  269. {
  270. u32 reg_data;
  271. int addr_shift = 5 * pep->port_num;
  272. reg_data = rdl(pep, PHY_ADDRESS);
  273. reg_data &= ~(0x1f << addr_shift);
  274. reg_data |= (phy_addr & 0x1f) << addr_shift;
  275. wrl(pep, PHY_ADDRESS, reg_data);
  276. }
  277. static void rxq_refill(struct net_device *dev)
  278. {
  279. struct pxa168_eth_private *pep = netdev_priv(dev);
  280. struct sk_buff *skb;
  281. struct rx_desc *p_used_rx_desc;
  282. int used_rx_desc;
  283. while (pep->rx_desc_count < pep->rx_ring_size) {
  284. int size;
  285. skb = netdev_alloc_skb(dev, pep->skb_size);
  286. if (!skb)
  287. break;
  288. if (SKB_DMA_REALIGN)
  289. skb_reserve(skb, SKB_DMA_REALIGN);
  290. pep->rx_desc_count++;
  291. /* Get 'used' Rx descriptor */
  292. used_rx_desc = pep->rx_used_desc_q;
  293. p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
  294. size = skb_end_pointer(skb) - skb->data;
  295. p_used_rx_desc->buf_ptr = dma_map_single(NULL,
  296. skb->data,
  297. size,
  298. DMA_FROM_DEVICE);
  299. p_used_rx_desc->buf_size = size;
  300. pep->rx_skb[used_rx_desc] = skb;
  301. /* Return the descriptor to DMA ownership */
  302. wmb();
  303. p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
  304. wmb();
  305. /* Move the used descriptor pointer to the next descriptor */
  306. pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
  307. /* Any Rx return cancels the Rx resource error status */
  308. pep->rx_resource_err = 0;
  309. skb_reserve(skb, ETH_HW_IP_ALIGN);
  310. }
  311. /*
  312. * If RX ring is empty of SKB, set a timer to try allocating
  313. * again at a later time.
  314. */
  315. if (pep->rx_desc_count == 0) {
  316. pep->timeout.expires = jiffies + (HZ / 10);
  317. add_timer(&pep->timeout);
  318. }
  319. }
  320. static inline void rxq_refill_timer_wrapper(unsigned long data)
  321. {
  322. struct pxa168_eth_private *pep = (void *)data;
  323. napi_schedule(&pep->napi);
  324. }
  325. static inline u8 flip_8_bits(u8 x)
  326. {
  327. return (((x) & 0x01) << 3) | (((x) & 0x02) << 1)
  328. | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3)
  329. | (((x) & 0x10) << 3) | (((x) & 0x20) << 1)
  330. | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3);
  331. }
  332. static void nibble_swap_every_byte(unsigned char *mac_addr)
  333. {
  334. int i;
  335. for (i = 0; i < ETH_ALEN; i++) {
  336. mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) |
  337. ((mac_addr[i] & 0xf0) >> 4);
  338. }
  339. }
  340. static void inverse_every_nibble(unsigned char *mac_addr)
  341. {
  342. int i;
  343. for (i = 0; i < ETH_ALEN; i++)
  344. mac_addr[i] = flip_8_bits(mac_addr[i]);
  345. }
  346. /*
  347. * ----------------------------------------------------------------------------
  348. * This function will calculate the hash function of the address.
  349. * Inputs
  350. * mac_addr_orig - MAC address.
  351. * Outputs
  352. * return the calculated entry.
  353. */
  354. static u32 hash_function(unsigned char *mac_addr_orig)
  355. {
  356. u32 hash_result;
  357. u32 addr0;
  358. u32 addr1;
  359. u32 addr2;
  360. u32 addr3;
  361. unsigned char mac_addr[ETH_ALEN];
  362. /* Make a copy of MAC address since we are going to performe bit
  363. * operations on it
  364. */
  365. memcpy(mac_addr, mac_addr_orig, ETH_ALEN);
  366. nibble_swap_every_byte(mac_addr);
  367. inverse_every_nibble(mac_addr);
  368. addr0 = (mac_addr[5] >> 2) & 0x3f;
  369. addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2);
  370. addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1;
  371. addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8);
  372. hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3);
  373. hash_result = hash_result & 0x07ff;
  374. return hash_result;
  375. }
  376. /*
  377. * ----------------------------------------------------------------------------
  378. * This function will add/del an entry to the address table.
  379. * Inputs
  380. * pep - ETHERNET .
  381. * mac_addr - MAC address.
  382. * skip - if 1, skip this address.Used in case of deleting an entry which is a
  383. * part of chain in the hash table.We can't just delete the entry since
  384. * that will break the chain.We need to defragment the tables time to
  385. * time.
  386. * rd - 0 Discard packet upon match.
  387. * - 1 Receive packet upon match.
  388. * Outputs
  389. * address table entry is added/deleted.
  390. * 0 if success.
  391. * -ENOSPC if table full
  392. */
  393. static int add_del_hash_entry(struct pxa168_eth_private *pep,
  394. unsigned char *mac_addr,
  395. u32 rd, u32 skip, int del)
  396. {
  397. struct addr_table_entry *entry, *start;
  398. u32 new_high;
  399. u32 new_low;
  400. u32 i;
  401. new_low = (((mac_addr[1] >> 4) & 0xf) << 15)
  402. | (((mac_addr[1] >> 0) & 0xf) << 11)
  403. | (((mac_addr[0] >> 4) & 0xf) << 7)
  404. | (((mac_addr[0] >> 0) & 0xf) << 3)
  405. | (((mac_addr[3] >> 4) & 0x1) << 31)
  406. | (((mac_addr[3] >> 0) & 0xf) << 27)
  407. | (((mac_addr[2] >> 4) & 0xf) << 23)
  408. | (((mac_addr[2] >> 0) & 0xf) << 19)
  409. | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT)
  410. | HASH_ENTRY_VALID;
  411. new_high = (((mac_addr[5] >> 4) & 0xf) << 15)
  412. | (((mac_addr[5] >> 0) & 0xf) << 11)
  413. | (((mac_addr[4] >> 4) & 0xf) << 7)
  414. | (((mac_addr[4] >> 0) & 0xf) << 3)
  415. | (((mac_addr[3] >> 5) & 0x7) << 0);
  416. /*
  417. * Pick the appropriate table, start scanning for free/reusable
  418. * entries at the index obtained by hashing the specified MAC address
  419. */
  420. start = pep->htpr;
  421. entry = start + hash_function(mac_addr);
  422. for (i = 0; i < HOP_NUMBER; i++) {
  423. if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
  424. break;
  425. } else {
  426. /* if same address put in same position */
  427. if (((le32_to_cpu(entry->lo) & 0xfffffff8) ==
  428. (new_low & 0xfffffff8)) &&
  429. (le32_to_cpu(entry->hi) == new_high)) {
  430. break;
  431. }
  432. }
  433. if (entry == start + 0x7ff)
  434. entry = start;
  435. else
  436. entry++;
  437. }
  438. if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) &&
  439. (le32_to_cpu(entry->hi) != new_high) && del)
  440. return 0;
  441. if (i == HOP_NUMBER) {
  442. if (!del) {
  443. netdev_info(pep->dev,
  444. "%s: table section is full, need to "
  445. "move to 16kB implementation?\n",
  446. __FILE__);
  447. return -ENOSPC;
  448. } else
  449. return 0;
  450. }
  451. /*
  452. * Update the selected entry
  453. */
  454. if (del) {
  455. entry->hi = 0;
  456. entry->lo = 0;
  457. } else {
  458. entry->hi = cpu_to_le32(new_high);
  459. entry->lo = cpu_to_le32(new_low);
  460. }
  461. return 0;
  462. }
  463. /*
  464. * ----------------------------------------------------------------------------
  465. * Create an addressTable entry from MAC address info
  466. * found in the specifed net_device struct
  467. *
  468. * Input : pointer to ethernet interface network device structure
  469. * Output : N/A
  470. */
  471. static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
  472. unsigned char *oaddr,
  473. unsigned char *addr)
  474. {
  475. /* Delete old entry */
  476. if (oaddr)
  477. add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);
  478. /* Add new entry */
  479. add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
  480. }
  481. static int init_hash_table(struct pxa168_eth_private *pep)
  482. {
  483. /*
  484. * Hardware expects CPU to build a hash table based on a predefined
  485. * hash function and populate it based on hardware address. The
  486. * location of the hash table is identified by 32-bit pointer stored
  487. * in HTPR internal register. Two possible sizes exists for the hash
  488. * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB
  489. * (16kB of DRAM required (4 x 4 kB banks)).We currently only support
  490. * 1/2kB.
  491. */
  492. /* TODO: Add support for 8kB hash table and alternative hash
  493. * function.Driver can dynamically switch to them if the 1/2kB hash
  494. * table is full.
  495. */
  496. if (pep->htpr == NULL) {
  497. pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent,
  498. HASH_ADDR_TABLE_SIZE,
  499. &pep->htpr_dma, GFP_KERNEL);
  500. if (pep->htpr == NULL)
  501. return -ENOMEM;
  502. } else {
  503. memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
  504. }
  505. wrl(pep, HTPR, pep->htpr_dma);
  506. return 0;
  507. }
  508. static void pxa168_eth_set_rx_mode(struct net_device *dev)
  509. {
  510. struct pxa168_eth_private *pep = netdev_priv(dev);
  511. struct netdev_hw_addr *ha;
  512. u32 val;
  513. val = rdl(pep, PORT_CONFIG);
  514. if (dev->flags & IFF_PROMISC)
  515. val |= PCR_PM;
  516. else
  517. val &= ~PCR_PM;
  518. wrl(pep, PORT_CONFIG, val);
  519. /*
  520. * Remove the old list of MAC address and add dev->addr
  521. * and multicast address.
  522. */
  523. memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
  524. update_hash_table_mac_address(pep, NULL, dev->dev_addr);
  525. netdev_for_each_mc_addr(ha, dev)
  526. update_hash_table_mac_address(pep, NULL, ha->addr);
  527. }
  528. static void pxa168_eth_get_mac_address(struct net_device *dev,
  529. unsigned char *addr)
  530. {
  531. struct pxa168_eth_private *pep = netdev_priv(dev);
  532. unsigned int mac_h = rdl(pep, MAC_ADDR_HIGH);
  533. unsigned int mac_l = rdl(pep, MAC_ADDR_LOW);
  534. addr[0] = (mac_h >> 24) & 0xff;
  535. addr[1] = (mac_h >> 16) & 0xff;
  536. addr[2] = (mac_h >> 8) & 0xff;
  537. addr[3] = mac_h & 0xff;
  538. addr[4] = (mac_l >> 8) & 0xff;
  539. addr[5] = mac_l & 0xff;
  540. }
  541. static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
  542. {
  543. struct sockaddr *sa = addr;
  544. struct pxa168_eth_private *pep = netdev_priv(dev);
  545. unsigned char oldMac[ETH_ALEN];
  546. u32 mac_h, mac_l;
  547. if (!is_valid_ether_addr(sa->sa_data))
  548. return -EADDRNOTAVAIL;
  549. memcpy(oldMac, dev->dev_addr, ETH_ALEN);
  550. memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
  551. mac_h = dev->dev_addr[0] << 24;
  552. mac_h |= dev->dev_addr[1] << 16;
  553. mac_h |= dev->dev_addr[2] << 8;
  554. mac_h |= dev->dev_addr[3];
  555. mac_l = dev->dev_addr[4] << 8;
  556. mac_l |= dev->dev_addr[5];
  557. wrl(pep, MAC_ADDR_HIGH, mac_h);
  558. wrl(pep, MAC_ADDR_LOW, mac_l);
  559. netif_addr_lock_bh(dev);
  560. update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
  561. netif_addr_unlock_bh(dev);
  562. return 0;
  563. }
  564. static void eth_port_start(struct net_device *dev)
  565. {
  566. unsigned int val = 0;
  567. struct pxa168_eth_private *pep = netdev_priv(dev);
  568. int tx_curr_desc, rx_curr_desc;
  569. /* Perform PHY reset, if there is a PHY. */
  570. if (pep->phy != NULL) {
  571. struct ethtool_cmd cmd;
  572. pxa168_get_settings(pep->dev, &cmd);
  573. phy_init_hw(pep->phy);
  574. pxa168_set_settings(pep->dev, &cmd);
  575. }
  576. /* Assignment of Tx CTRP of given queue */
  577. tx_curr_desc = pep->tx_curr_desc_q;
  578. wrl(pep, ETH_C_TX_DESC_1,
  579. (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
  580. /* Assignment of Rx CRDP of given queue */
  581. rx_curr_desc = pep->rx_curr_desc_q;
  582. wrl(pep, ETH_C_RX_DESC_0,
  583. (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
  584. wrl(pep, ETH_F_RX_DESC_0,
  585. (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
  586. /* Clear all interrupts */
  587. wrl(pep, INT_CAUSE, 0);
  588. /* Enable all interrupts for receive, transmit and error. */
  589. wrl(pep, INT_MASK, ALL_INTS);
  590. val = rdl(pep, PORT_CONFIG);
  591. val |= PCR_EN;
  592. wrl(pep, PORT_CONFIG, val);
  593. /* Start RX DMA engine */
  594. val = rdl(pep, SDMA_CMD);
  595. val |= SDMA_CMD_ERD;
  596. wrl(pep, SDMA_CMD, val);
  597. }
  598. static void eth_port_reset(struct net_device *dev)
  599. {
  600. struct pxa168_eth_private *pep = netdev_priv(dev);
  601. unsigned int val = 0;
  602. /* Stop all interrupts for receive, transmit and error. */
  603. wrl(pep, INT_MASK, 0);
  604. /* Clear all interrupts */
  605. wrl(pep, INT_CAUSE, 0);
  606. /* Stop RX DMA */
  607. val = rdl(pep, SDMA_CMD);
  608. val &= ~SDMA_CMD_ERD; /* abort dma command */
  609. /* Abort any transmit and receive operations and put DMA
  610. * in idle state.
  611. */
  612. abort_dma(pep);
  613. /* Disable port */
  614. val = rdl(pep, PORT_CONFIG);
  615. val &= ~PCR_EN;
  616. wrl(pep, PORT_CONFIG, val);
  617. }
  618. /*
  619. * txq_reclaim - Free the tx desc data for completed descriptors
  620. * If force is non-zero, frees uncompleted descriptors as well
  621. */
  622. static int txq_reclaim(struct net_device *dev, int force)
  623. {
  624. struct pxa168_eth_private *pep = netdev_priv(dev);
  625. struct tx_desc *desc;
  626. u32 cmd_sts;
  627. struct sk_buff *skb;
  628. int tx_index;
  629. dma_addr_t addr;
  630. int count;
  631. int released = 0;
  632. netif_tx_lock(dev);
  633. pep->work_todo &= ~WORK_TX_DONE;
  634. while (pep->tx_desc_count > 0) {
  635. tx_index = pep->tx_used_desc_q;
  636. desc = &pep->p_tx_desc_area[tx_index];
  637. cmd_sts = desc->cmd_sts;
  638. if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) {
  639. if (released > 0) {
  640. goto txq_reclaim_end;
  641. } else {
  642. released = -1;
  643. goto txq_reclaim_end;
  644. }
  645. }
  646. pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;
  647. pep->tx_desc_count--;
  648. addr = desc->buf_ptr;
  649. count = desc->byte_cnt;
  650. skb = pep->tx_skb[tx_index];
  651. if (skb)
  652. pep->tx_skb[tx_index] = NULL;
  653. if (cmd_sts & TX_ERROR) {
  654. if (net_ratelimit())
  655. netdev_err(dev, "Error in TX\n");
  656. dev->stats.tx_errors++;
  657. }
  658. dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
  659. if (skb)
  660. dev_kfree_skb_irq(skb);
  661. released++;
  662. }
  663. txq_reclaim_end:
  664. netif_tx_unlock(dev);
  665. return released;
  666. }
  667. static void pxa168_eth_tx_timeout(struct net_device *dev)
  668. {
  669. struct pxa168_eth_private *pep = netdev_priv(dev);
  670. netdev_info(dev, "TX timeout desc_count %d\n", pep->tx_desc_count);
  671. schedule_work(&pep->tx_timeout_task);
  672. }
  673. static void pxa168_eth_tx_timeout_task(struct work_struct *work)
  674. {
  675. struct pxa168_eth_private *pep = container_of(work,
  676. struct pxa168_eth_private,
  677. tx_timeout_task);
  678. struct net_device *dev = pep->dev;
  679. pxa168_eth_stop(dev);
  680. pxa168_eth_open(dev);
  681. }
  682. static int rxq_process(struct net_device *dev, int budget)
  683. {
  684. struct pxa168_eth_private *pep = netdev_priv(dev);
  685. struct net_device_stats *stats = &dev->stats;
  686. unsigned int received_packets = 0;
  687. struct sk_buff *skb;
  688. while (budget-- > 0) {
  689. int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
  690. struct rx_desc *rx_desc;
  691. unsigned int cmd_sts;
  692. /* Do not process Rx ring in case of Rx ring resource error */
  693. if (pep->rx_resource_err)
  694. break;
  695. rx_curr_desc = pep->rx_curr_desc_q;
  696. rx_used_desc = pep->rx_used_desc_q;
  697. rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
  698. cmd_sts = rx_desc->cmd_sts;
  699. rmb();
  700. if (cmd_sts & (BUF_OWNED_BY_DMA))
  701. break;
  702. skb = pep->rx_skb[rx_curr_desc];
  703. pep->rx_skb[rx_curr_desc] = NULL;
  704. rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;
  705. pep->rx_curr_desc_q = rx_next_curr_desc;
  706. /* Rx descriptors exhausted. */
  707. /* Set the Rx ring resource error flag */
  708. if (rx_next_curr_desc == rx_used_desc)
  709. pep->rx_resource_err = 1;
  710. pep->rx_desc_count--;
  711. dma_unmap_single(NULL, rx_desc->buf_ptr,
  712. rx_desc->buf_size,
  713. DMA_FROM_DEVICE);
  714. received_packets++;
  715. /*
  716. * Update statistics.
  717. * Note byte count includes 4 byte CRC count
  718. */
  719. stats->rx_packets++;
  720. stats->rx_bytes += rx_desc->byte_cnt;
  721. /*
  722. * In case received a packet without first / last bits on OR
  723. * the error summary bit is on, the packets needs to be droped.
  724. */
  725. if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
  726. (RX_FIRST_DESC | RX_LAST_DESC))
  727. || (cmd_sts & RX_ERROR)) {
  728. stats->rx_dropped++;
  729. if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
  730. (RX_FIRST_DESC | RX_LAST_DESC)) {
  731. if (net_ratelimit())
  732. netdev_err(dev,
  733. "Rx pkt on multiple desc\n");
  734. }
  735. if (cmd_sts & RX_ERROR)
  736. stats->rx_errors++;
  737. dev_kfree_skb_irq(skb);
  738. } else {
  739. /*
  740. * The -4 is for the CRC in the trailer of the
  741. * received packet
  742. */
  743. skb_put(skb, rx_desc->byte_cnt - 4);
  744. skb->protocol = eth_type_trans(skb, dev);
  745. netif_receive_skb(skb);
  746. }
  747. }
  748. /* Fill RX ring with skb's */
  749. rxq_refill(dev);
  750. return received_packets;
  751. }
  752. static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
  753. struct net_device *dev)
  754. {
  755. u32 icr;
  756. int ret = 0;
  757. icr = rdl(pep, INT_CAUSE);
  758. if (icr == 0)
  759. return IRQ_NONE;
  760. wrl(pep, INT_CAUSE, ~icr);
  761. if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) {
  762. pep->work_todo |= WORK_TX_DONE;
  763. ret = 1;
  764. }
  765. if (icr & ICR_RXBUF)
  766. ret = 1;
  767. if (icr & ICR_MII_CH) {
  768. pep->work_todo |= WORK_LINK;
  769. ret = 1;
  770. }
  771. return ret;
  772. }
  773. static void handle_link_event(struct pxa168_eth_private *pep)
  774. {
  775. struct net_device *dev = pep->dev;
  776. u32 port_status;
  777. int speed;
  778. int duplex;
  779. int fc;
  780. port_status = rdl(pep, PORT_STATUS);
  781. if (!(port_status & LINK_UP)) {
  782. if (netif_carrier_ok(dev)) {
  783. netdev_info(dev, "link down\n");
  784. netif_carrier_off(dev);
  785. txq_reclaim(dev, 1);
  786. }
  787. return;
  788. }
  789. if (port_status & PORT_SPEED_100)
  790. speed = 100;
  791. else
  792. speed = 10;
  793. duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
  794. fc = (port_status & FLOW_CONTROL_DISABLED) ? 0 : 1;
  795. netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
  796. speed, duplex ? "full" : "half", fc ? "en" : "dis");
  797. if (!netif_carrier_ok(dev))
  798. netif_carrier_on(dev);
  799. }
  800. static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
  801. {
  802. struct net_device *dev = (struct net_device *)dev_id;
  803. struct pxa168_eth_private *pep = netdev_priv(dev);
  804. if (unlikely(!pxa168_eth_collect_events(pep, dev)))
  805. return IRQ_NONE;
  806. /* Disable interrupts */
  807. wrl(pep, INT_MASK, 0);
  808. napi_schedule(&pep->napi);
  809. return IRQ_HANDLED;
  810. }
  811. static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
  812. {
  813. int skb_size;
  814. /*
  815. * Reserve 2+14 bytes for an ethernet header (the hardware
  816. * automatically prepends 2 bytes of dummy data to each
  817. * received packet), 16 bytes for up to four VLAN tags, and
  818. * 4 bytes for the trailing FCS -- 36 bytes total.
  819. */
  820. skb_size = pep->dev->mtu + 36;
  821. /*
  822. * Make sure that the skb size is a multiple of 8 bytes, as
  823. * the lower three bits of the receive descriptor's buffer
  824. * size field are ignored by the hardware.
  825. */
  826. pep->skb_size = (skb_size + 7) & ~7;
  827. /*
  828. * If NET_SKB_PAD is smaller than a cache line,
  829. * netdev_alloc_skb() will cause skb->data to be misaligned
  830. * to a cache line boundary. If this is the case, include
  831. * some extra space to allow re-aligning the data area.
  832. */
  833. pep->skb_size += SKB_DMA_REALIGN;
  834. }
  835. static int set_port_config_ext(struct pxa168_eth_private *pep)
  836. {
  837. int skb_size;
  838. pxa168_eth_recalc_skb_size(pep);
  839. if (pep->skb_size <= 1518)
  840. skb_size = PCXR_MFL_1518;
  841. else if (pep->skb_size <= 1536)
  842. skb_size = PCXR_MFL_1536;
  843. else if (pep->skb_size <= 2048)
  844. skb_size = PCXR_MFL_2048;
  845. else
  846. skb_size = PCXR_MFL_64K;
  847. /* Extended Port Configuration */
  848. wrl(pep,
  849. PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */
  850. PCXR_DSCP_EN | /* Enable DSCP in IP */
  851. skb_size | PCXR_FLP | /* do not force link pass */
  852. PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */
  853. return 0;
  854. }
  855. static int pxa168_init_hw(struct pxa168_eth_private *pep)
  856. {
  857. int err = 0;
  858. /* Disable interrupts */
  859. wrl(pep, INT_MASK, 0);
  860. wrl(pep, INT_CAUSE, 0);
  861. /* Write to ICR to clear interrupts. */
  862. wrl(pep, INT_W_CLEAR, 0);
  863. /* Abort any transmit and receive operations and put DMA
  864. * in idle state.
  865. */
  866. abort_dma(pep);
  867. /* Initialize address hash table */
  868. err = init_hash_table(pep);
  869. if (err)
  870. return err;
  871. /* SDMA configuration */
  872. wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */
  873. SDCR_RIFB | /* Rx interrupt on frame */
  874. SDCR_BLMT | /* Little endian transmit */
  875. SDCR_BLMR | /* Little endian receive */
  876. SDCR_RC_MAX_RETRANS); /* Max retransmit count */
  877. /* Port Configuration */
  878. wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */
  879. set_port_config_ext(pep);
  880. return err;
  881. }
  882. static int rxq_init(struct net_device *dev)
  883. {
  884. struct pxa168_eth_private *pep = netdev_priv(dev);
  885. struct rx_desc *p_rx_desc;
  886. int size = 0, i = 0;
  887. int rx_desc_num = pep->rx_ring_size;
  888. /* Allocate RX skb rings */
  889. pep->rx_skb = kzalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
  890. GFP_KERNEL);
  891. if (!pep->rx_skb)
  892. return -ENOMEM;
  893. /* Allocate RX ring */
  894. pep->rx_desc_count = 0;
  895. size = pep->rx_ring_size * sizeof(struct rx_desc);
  896. pep->rx_desc_area_size = size;
  897. pep->p_rx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
  898. &pep->rx_desc_dma,
  899. GFP_KERNEL);
  900. if (!pep->p_rx_desc_area)
  901. goto out;
  902. /* initialize the next_desc_ptr links in the Rx descriptors ring */
  903. p_rx_desc = pep->p_rx_desc_area;
  904. for (i = 0; i < rx_desc_num; i++) {
  905. p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
  906. ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
  907. }
  908. /* Save Rx desc pointer to driver struct. */
  909. pep->rx_curr_desc_q = 0;
  910. pep->rx_used_desc_q = 0;
  911. pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
  912. return 0;
  913. out:
  914. kfree(pep->rx_skb);
  915. return -ENOMEM;
  916. }
  917. static void rxq_deinit(struct net_device *dev)
  918. {
  919. struct pxa168_eth_private *pep = netdev_priv(dev);
  920. int curr;
  921. /* Free preallocated skb's on RX rings */
  922. for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
  923. if (pep->rx_skb[curr]) {
  924. dev_kfree_skb(pep->rx_skb[curr]);
  925. pep->rx_desc_count--;
  926. }
  927. }
  928. if (pep->rx_desc_count)
  929. netdev_err(dev, "Error in freeing Rx Ring. %d skb's still\n",
  930. pep->rx_desc_count);
  931. /* Free RX ring */
  932. if (pep->p_rx_desc_area)
  933. dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,
  934. pep->p_rx_desc_area, pep->rx_desc_dma);
  935. kfree(pep->rx_skb);
  936. }
  937. static int txq_init(struct net_device *dev)
  938. {
  939. struct pxa168_eth_private *pep = netdev_priv(dev);
  940. struct tx_desc *p_tx_desc;
  941. int size = 0, i = 0;
  942. int tx_desc_num = pep->tx_ring_size;
  943. pep->tx_skb = kzalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
  944. GFP_KERNEL);
  945. if (!pep->tx_skb)
  946. return -ENOMEM;
  947. /* Allocate TX ring */
  948. pep->tx_desc_count = 0;
  949. size = pep->tx_ring_size * sizeof(struct tx_desc);
  950. pep->tx_desc_area_size = size;
  951. pep->p_tx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
  952. &pep->tx_desc_dma,
  953. GFP_KERNEL);
  954. if (!pep->p_tx_desc_area)
  955. goto out;
  956. /* Initialize the next_desc_ptr links in the Tx descriptors ring */
  957. p_tx_desc = pep->p_tx_desc_area;
  958. for (i = 0; i < tx_desc_num; i++) {
  959. p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
  960. ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
  961. }
  962. pep->tx_curr_desc_q = 0;
  963. pep->tx_used_desc_q = 0;
  964. pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
  965. return 0;
  966. out:
  967. kfree(pep->tx_skb);
  968. return -ENOMEM;
  969. }
  970. static void txq_deinit(struct net_device *dev)
  971. {
  972. struct pxa168_eth_private *pep = netdev_priv(dev);
  973. /* Free outstanding skb's on TX ring */
  974. txq_reclaim(dev, 1);
  975. BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);
  976. /* Free TX ring */
  977. if (pep->p_tx_desc_area)
  978. dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,
  979. pep->p_tx_desc_area, pep->tx_desc_dma);
  980. kfree(pep->tx_skb);
  981. }
  982. static int pxa168_eth_open(struct net_device *dev)
  983. {
  984. struct pxa168_eth_private *pep = netdev_priv(dev);
  985. int err;
  986. err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev);
  987. if (err) {
  988. dev_err(&dev->dev, "can't assign irq\n");
  989. return -EAGAIN;
  990. }
  991. pep->rx_resource_err = 0;
  992. err = rxq_init(dev);
  993. if (err != 0)
  994. goto out_free_irq;
  995. err = txq_init(dev);
  996. if (err != 0)
  997. goto out_free_rx_skb;
  998. pep->rx_used_desc_q = 0;
  999. pep->rx_curr_desc_q = 0;
  1000. /* Fill RX ring with skb's */
  1001. rxq_refill(dev);
  1002. pep->rx_used_desc_q = 0;
  1003. pep->rx_curr_desc_q = 0;
  1004. netif_carrier_off(dev);
  1005. eth_port_start(dev);
  1006. napi_enable(&pep->napi);
  1007. return 0;
  1008. out_free_rx_skb:
  1009. rxq_deinit(dev);
  1010. out_free_irq:
  1011. free_irq(dev->irq, dev);
  1012. return err;
  1013. }
  1014. static int pxa168_eth_stop(struct net_device *dev)
  1015. {
  1016. struct pxa168_eth_private *pep = netdev_priv(dev);
  1017. eth_port_reset(dev);
  1018. /* Disable interrupts */
  1019. wrl(pep, INT_MASK, 0);
  1020. wrl(pep, INT_CAUSE, 0);
  1021. /* Write to ICR to clear interrupts. */
  1022. wrl(pep, INT_W_CLEAR, 0);
  1023. napi_disable(&pep->napi);
  1024. del_timer_sync(&pep->timeout);
  1025. netif_carrier_off(dev);
  1026. free_irq(dev->irq, dev);
  1027. rxq_deinit(dev);
  1028. txq_deinit(dev);
  1029. return 0;
  1030. }
  1031. static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
  1032. {
  1033. int retval;
  1034. struct pxa168_eth_private *pep = netdev_priv(dev);
  1035. if ((mtu > 9500) || (mtu < 68))
  1036. return -EINVAL;
  1037. dev->mtu = mtu;
  1038. retval = set_port_config_ext(pep);
  1039. if (!netif_running(dev))
  1040. return 0;
  1041. /*
  1042. * Stop and then re-open the interface. This will allocate RX
  1043. * skbs of the new MTU.
  1044. * There is a possible danger that the open will not succeed,
  1045. * due to memory being full.
  1046. */
  1047. pxa168_eth_stop(dev);
  1048. if (pxa168_eth_open(dev)) {
  1049. dev_err(&dev->dev,
  1050. "fatal error on re-opening device after MTU change\n");
  1051. }
  1052. return 0;
  1053. }
  1054. static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
  1055. {
  1056. int tx_desc_curr;
  1057. tx_desc_curr = pep->tx_curr_desc_q;
  1058. pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;
  1059. BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);
  1060. pep->tx_desc_count++;
  1061. return tx_desc_curr;
  1062. }
  1063. static int pxa168_rx_poll(struct napi_struct *napi, int budget)
  1064. {
  1065. struct pxa168_eth_private *pep =
  1066. container_of(napi, struct pxa168_eth_private, napi);
  1067. struct net_device *dev = pep->dev;
  1068. int work_done = 0;
  1069. if (unlikely(pep->work_todo & WORK_LINK)) {
  1070. pep->work_todo &= ~(WORK_LINK);
  1071. handle_link_event(pep);
  1072. }
  1073. /*
  1074. * We call txq_reclaim every time since in NAPI interupts are disabled
  1075. * and due to this we miss the TX_DONE interrupt,which is not updated in
  1076. * interrupt status register.
  1077. */
  1078. txq_reclaim(dev, 0);
  1079. if (netif_queue_stopped(dev)
  1080. && pep->tx_ring_size - pep->tx_desc_count > 1) {
  1081. netif_wake_queue(dev);
  1082. }
  1083. work_done = rxq_process(dev, budget);
  1084. if (work_done < budget) {
  1085. napi_complete(napi);
  1086. wrl(pep, INT_MASK, ALL_INTS);
  1087. }
  1088. return work_done;
  1089. }
  1090. static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1091. {
  1092. struct pxa168_eth_private *pep = netdev_priv(dev);
  1093. struct net_device_stats *stats = &dev->stats;
  1094. struct tx_desc *desc;
  1095. int tx_index;
  1096. int length;
  1097. tx_index = eth_alloc_tx_desc_index(pep);
  1098. desc = &pep->p_tx_desc_area[tx_index];
  1099. length = skb->len;
  1100. pep->tx_skb[tx_index] = skb;
  1101. desc->byte_cnt = length;
  1102. desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
  1103. skb_tx_timestamp(skb);
  1104. wmb();
  1105. desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
  1106. TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
  1107. wmb();
  1108. wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
  1109. stats->tx_bytes += length;
  1110. stats->tx_packets++;
  1111. dev->trans_start = jiffies;
  1112. if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
  1113. /* We handled the current skb, but now we are out of space.*/
  1114. netif_stop_queue(dev);
  1115. }
  1116. return NETDEV_TX_OK;
  1117. }
  1118. static int smi_wait_ready(struct pxa168_eth_private *pep)
  1119. {
  1120. int i = 0;
  1121. /* wait for the SMI register to become available */
  1122. for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {
  1123. if (i == PHY_WAIT_ITERATIONS)
  1124. return -ETIMEDOUT;
  1125. msleep(10);
  1126. }
  1127. return 0;
  1128. }
  1129. static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum)
  1130. {
  1131. struct pxa168_eth_private *pep = bus->priv;
  1132. int i = 0;
  1133. int val;
  1134. if (smi_wait_ready(pep)) {
  1135. netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
  1136. return -ETIMEDOUT;
  1137. }
  1138. wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);
  1139. /* now wait for the data to be valid */
  1140. for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {
  1141. if (i == PHY_WAIT_ITERATIONS) {
  1142. netdev_warn(pep->dev,
  1143. "pxa168_eth: SMI bus read not valid\n");
  1144. return -ENODEV;
  1145. }
  1146. msleep(10);
  1147. }
  1148. return val & 0xffff;
  1149. }
  1150. static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
  1151. u16 value)
  1152. {
  1153. struct pxa168_eth_private *pep = bus->priv;
  1154. if (smi_wait_ready(pep)) {
  1155. netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
  1156. return -ETIMEDOUT;
  1157. }
  1158. wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |
  1159. SMI_OP_W | (value & 0xffff));
  1160. if (smi_wait_ready(pep)) {
  1161. netdev_err(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
  1162. return -ETIMEDOUT;
  1163. }
  1164. return 0;
  1165. }
  1166. static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
  1167. int cmd)
  1168. {
  1169. struct pxa168_eth_private *pep = netdev_priv(dev);
  1170. if (pep->phy != NULL)
  1171. return phy_mii_ioctl(pep->phy, ifr, cmd);
  1172. return -EOPNOTSUPP;
  1173. }
  1174. static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)
  1175. {
  1176. struct mii_bus *bus = pep->smi_bus;
  1177. struct phy_device *phydev;
  1178. int start;
  1179. int num;
  1180. int i;
  1181. if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) {
  1182. /* Scan entire range */
  1183. start = ethernet_phy_get(pep);
  1184. num = 32;
  1185. } else {
  1186. /* Use phy addr specific to platform */
  1187. start = phy_addr & 0x1f;
  1188. num = 1;
  1189. }
  1190. phydev = NULL;
  1191. for (i = 0; i < num; i++) {
  1192. int addr = (start + i) & 0x1f;
  1193. if (bus->phy_map[addr] == NULL)
  1194. mdiobus_scan(bus, addr);
  1195. if (phydev == NULL) {
  1196. phydev = bus->phy_map[addr];
  1197. if (phydev != NULL)
  1198. ethernet_phy_set_addr(pep, addr);
  1199. }
  1200. }
  1201. return phydev;
  1202. }
  1203. static void phy_init(struct pxa168_eth_private *pep)
  1204. {
  1205. struct phy_device *phy = pep->phy;
  1206. phy_attach(pep->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_MII);
  1207. if (pep->pd && pep->pd->speed != 0) {
  1208. phy->autoneg = AUTONEG_DISABLE;
  1209. phy->advertising = 0;
  1210. phy->speed = pep->pd->speed;
  1211. phy->duplex = pep->pd->duplex;
  1212. } else {
  1213. phy->autoneg = AUTONEG_ENABLE;
  1214. phy->speed = 0;
  1215. phy->duplex = 0;
  1216. phy->supported &= PHY_BASIC_FEATURES;
  1217. phy->advertising = phy->supported | ADVERTISED_Autoneg;
  1218. }
  1219. phy_start_aneg(phy);
  1220. }
  1221. static int ethernet_phy_setup(struct net_device *dev)
  1222. {
  1223. struct pxa168_eth_private *pep = netdev_priv(dev);
  1224. if (pep->pd && pep->pd->init)
  1225. pep->pd->init();
  1226. pep->phy = phy_scan(pep, pep->phy_addr & 0x1f);
  1227. if (pep->phy != NULL)
  1228. phy_init(pep);
  1229. update_hash_table_mac_address(pep, NULL, dev->dev_addr);
  1230. return 0;
  1231. }
  1232. static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1233. {
  1234. struct pxa168_eth_private *pep = netdev_priv(dev);
  1235. int err;
  1236. err = phy_read_status(pep->phy);
  1237. if (err == 0)
  1238. err = phy_ethtool_gset(pep->phy, cmd);
  1239. return err;
  1240. }
  1241. static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1242. {
  1243. struct pxa168_eth_private *pep = netdev_priv(dev);
  1244. return phy_ethtool_sset(pep->phy, cmd);
  1245. }
  1246. static void pxa168_get_drvinfo(struct net_device *dev,
  1247. struct ethtool_drvinfo *info)
  1248. {
  1249. strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
  1250. strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
  1251. strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
  1252. strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
  1253. }
  1254. static const struct ethtool_ops pxa168_ethtool_ops = {
  1255. .get_settings = pxa168_get_settings,
  1256. .set_settings = pxa168_set_settings,
  1257. .get_drvinfo = pxa168_get_drvinfo,
  1258. .get_link = ethtool_op_get_link,
  1259. .get_ts_info = ethtool_op_get_ts_info,
  1260. };
  1261. static const struct net_device_ops pxa168_eth_netdev_ops = {
  1262. .ndo_open = pxa168_eth_open,
  1263. .ndo_stop = pxa168_eth_stop,
  1264. .ndo_start_xmit = pxa168_eth_start_xmit,
  1265. .ndo_set_rx_mode = pxa168_eth_set_rx_mode,
  1266. .ndo_set_mac_address = pxa168_eth_set_mac_address,
  1267. .ndo_validate_addr = eth_validate_addr,
  1268. .ndo_do_ioctl = pxa168_eth_do_ioctl,
  1269. .ndo_change_mtu = pxa168_eth_change_mtu,
  1270. .ndo_tx_timeout = pxa168_eth_tx_timeout,
  1271. };
  1272. static int pxa168_eth_probe(struct platform_device *pdev)
  1273. {
  1274. struct pxa168_eth_private *pep = NULL;
  1275. struct net_device *dev = NULL;
  1276. struct resource *res;
  1277. struct clk *clk;
  1278. struct device_node *np;
  1279. const unsigned char *mac_addr = NULL;
  1280. int err;
  1281. printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
  1282. clk = devm_clk_get(&pdev->dev, NULL);
  1283. if (IS_ERR(clk)) {
  1284. dev_err(&pdev->dev, "Fast Ethernet failed to get clock\n");
  1285. return -ENODEV;
  1286. }
  1287. clk_prepare_enable(clk);
  1288. dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
  1289. if (!dev) {
  1290. err = -ENOMEM;
  1291. goto err_clk;
  1292. }
  1293. platform_set_drvdata(pdev, dev);
  1294. pep = netdev_priv(dev);
  1295. pep->dev = dev;
  1296. pep->clk = clk;
  1297. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1298. if (res == NULL) {
  1299. err = -ENODEV;
  1300. goto err_netdev;
  1301. }
  1302. pep->base = devm_ioremap_resource(&pdev->dev, res);
  1303. if (IS_ERR(pep->base)) {
  1304. err = -ENOMEM;
  1305. goto err_netdev;
  1306. }
  1307. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  1308. BUG_ON(!res);
  1309. dev->irq = res->start;
  1310. dev->netdev_ops = &pxa168_eth_netdev_ops;
  1311. dev->watchdog_timeo = 2 * HZ;
  1312. dev->base_addr = 0;
  1313. dev->ethtool_ops = &pxa168_ethtool_ops;
  1314. INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
  1315. if (pdev->dev.of_node)
  1316. mac_addr = of_get_mac_address(pdev->dev.of_node);
  1317. if (mac_addr && is_valid_ether_addr(mac_addr)) {
  1318. ether_addr_copy(dev->dev_addr, mac_addr);
  1319. } else {
  1320. /* try reading the mac address, if set by the bootloader */
  1321. pxa168_eth_get_mac_address(dev, dev->dev_addr);
  1322. if (!is_valid_ether_addr(dev->dev_addr)) {
  1323. dev_info(&pdev->dev, "Using random mac address\n");
  1324. eth_hw_addr_random(dev);
  1325. }
  1326. }
  1327. pep->rx_ring_size = NUM_RX_DESCS;
  1328. pep->tx_ring_size = NUM_TX_DESCS;
  1329. pep->pd = dev_get_platdata(&pdev->dev);
  1330. if (pep->pd) {
  1331. if (pep->pd->rx_queue_size)
  1332. pep->rx_ring_size = pep->pd->rx_queue_size;
  1333. if (pep->pd->tx_queue_size)
  1334. pep->tx_ring_size = pep->pd->tx_queue_size;
  1335. pep->port_num = pep->pd->port_number;
  1336. pep->phy_addr = pep->pd->phy_addr;
  1337. } else if (pdev->dev.of_node) {
  1338. of_property_read_u32(pdev->dev.of_node, "port-id",
  1339. &pep->port_num);
  1340. np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
  1341. if (np)
  1342. of_property_read_u32(np, "reg", &pep->phy_addr);
  1343. }
  1344. /* Hardware supports only 3 ports */
  1345. BUG_ON(pep->port_num > 2);
  1346. netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
  1347. memset(&pep->timeout, 0, sizeof(struct timer_list));
  1348. init_timer(&pep->timeout);
  1349. pep->timeout.function = rxq_refill_timer_wrapper;
  1350. pep->timeout.data = (unsigned long)pep;
  1351. pep->smi_bus = mdiobus_alloc();
  1352. if (pep->smi_bus == NULL) {
  1353. err = -ENOMEM;
  1354. goto err_base;
  1355. }
  1356. pep->smi_bus->priv = pep;
  1357. pep->smi_bus->name = "pxa168_eth smi";
  1358. pep->smi_bus->read = pxa168_smi_read;
  1359. pep->smi_bus->write = pxa168_smi_write;
  1360. snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d",
  1361. pdev->name, pdev->id);
  1362. pep->smi_bus->parent = &pdev->dev;
  1363. pep->smi_bus->phy_mask = 0xffffffff;
  1364. err = mdiobus_register(pep->smi_bus);
  1365. if (err)
  1366. goto err_free_mdio;
  1367. pxa168_init_hw(pep);
  1368. err = ethernet_phy_setup(dev);
  1369. if (err)
  1370. goto err_mdiobus;
  1371. SET_NETDEV_DEV(dev, &pdev->dev);
  1372. err = register_netdev(dev);
  1373. if (err)
  1374. goto err_mdiobus;
  1375. return 0;
  1376. err_mdiobus:
  1377. mdiobus_unregister(pep->smi_bus);
  1378. err_free_mdio:
  1379. mdiobus_free(pep->smi_bus);
  1380. err_base:
  1381. iounmap(pep->base);
  1382. err_netdev:
  1383. free_netdev(dev);
  1384. err_clk:
  1385. clk_disable(clk);
  1386. clk_put(clk);
  1387. return err;
  1388. }
  1389. static int pxa168_eth_remove(struct platform_device *pdev)
  1390. {
  1391. struct net_device *dev = platform_get_drvdata(pdev);
  1392. struct pxa168_eth_private *pep = netdev_priv(dev);
  1393. if (pep->htpr) {
  1394. dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
  1395. pep->htpr, pep->htpr_dma);
  1396. pep->htpr = NULL;
  1397. }
  1398. if (pep->clk) {
  1399. clk_disable(pep->clk);
  1400. clk_put(pep->clk);
  1401. pep->clk = NULL;
  1402. }
  1403. if (pep->phy != NULL)
  1404. phy_detach(pep->phy);
  1405. iounmap(pep->base);
  1406. pep->base = NULL;
  1407. mdiobus_unregister(pep->smi_bus);
  1408. mdiobus_free(pep->smi_bus);
  1409. unregister_netdev(dev);
  1410. cancel_work_sync(&pep->tx_timeout_task);
  1411. free_netdev(dev);
  1412. return 0;
  1413. }
  1414. static void pxa168_eth_shutdown(struct platform_device *pdev)
  1415. {
  1416. struct net_device *dev = platform_get_drvdata(pdev);
  1417. eth_port_reset(dev);
  1418. }
  1419. #ifdef CONFIG_PM
  1420. static int pxa168_eth_resume(struct platform_device *pdev)
  1421. {
  1422. return -ENOSYS;
  1423. }
  1424. static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state)
  1425. {
  1426. return -ENOSYS;
  1427. }
  1428. #else
  1429. #define pxa168_eth_resume NULL
  1430. #define pxa168_eth_suspend NULL
  1431. #endif
  1432. static const struct of_device_id pxa168_eth_of_match[] = {
  1433. { .compatible = "marvell,pxa168-eth" },
  1434. { },
  1435. };
  1436. MODULE_DEVICE_TABLE(of, pxa168_eth_of_match);
  1437. static struct platform_driver pxa168_eth_driver = {
  1438. .probe = pxa168_eth_probe,
  1439. .remove = pxa168_eth_remove,
  1440. .shutdown = pxa168_eth_shutdown,
  1441. .resume = pxa168_eth_resume,
  1442. .suspend = pxa168_eth_suspend,
  1443. .driver = {
  1444. .name = DRIVER_NAME,
  1445. .of_match_table = of_match_ptr(pxa168_eth_of_match),
  1446. },
  1447. };
  1448. module_platform_driver(pxa168_eth_driver);
  1449. MODULE_LICENSE("GPL");
  1450. MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
  1451. MODULE_ALIAS("platform:pxa168_eth");