bgmac.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608
  1. /*
  2. * Driver for (BCM4706)? GBit MAC core on BCMA bus.
  3. *
  4. * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
  5. *
  6. * Licensed under the GNU/GPL. See COPYING for details.
  7. */
  8. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9. #include <linux/bcma/bcma.h>
  10. #include <linux/etherdevice.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/bcm47xx_nvram.h>
  13. #include <linux/phy.h>
  14. #include <linux/phy_fixed.h>
  15. #include "bgmac.h"
  16. static bool bgmac_wait_value(struct bgmac *bgmac, u16 reg, u32 mask,
  17. u32 value, int timeout)
  18. {
  19. u32 val;
  20. int i;
  21. for (i = 0; i < timeout / 10; i++) {
  22. val = bgmac_read(bgmac, reg);
  23. if ((val & mask) == value)
  24. return true;
  25. udelay(10);
  26. }
  27. dev_err(bgmac->dev, "Timeout waiting for reg 0x%X\n", reg);
  28. return false;
  29. }
  30. /**************************************************
  31. * DMA
  32. **************************************************/
  33. static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
  34. {
  35. u32 val;
  36. int i;
  37. if (!ring->mmio_base)
  38. return;
  39. /* Suspend DMA TX ring first.
  40. * bgmac_wait_value doesn't support waiting for any of few values, so
  41. * implement whole loop here.
  42. */
  43. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
  44. BGMAC_DMA_TX_SUSPEND);
  45. for (i = 0; i < 10000 / 10; i++) {
  46. val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
  47. val &= BGMAC_DMA_TX_STAT;
  48. if (val == BGMAC_DMA_TX_STAT_DISABLED ||
  49. val == BGMAC_DMA_TX_STAT_IDLEWAIT ||
  50. val == BGMAC_DMA_TX_STAT_STOPPED) {
  51. i = 0;
  52. break;
  53. }
  54. udelay(10);
  55. }
  56. if (i)
  57. dev_err(bgmac->dev, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
  58. ring->mmio_base, val);
  59. /* Remove SUSPEND bit */
  60. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
  61. if (!bgmac_wait_value(bgmac,
  62. ring->mmio_base + BGMAC_DMA_TX_STATUS,
  63. BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
  64. 10000)) {
  65. dev_warn(bgmac->dev, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
  66. ring->mmio_base);
  67. udelay(300);
  68. val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
  69. if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
  70. dev_err(bgmac->dev, "Reset of DMA TX ring 0x%X failed\n",
  71. ring->mmio_base);
  72. }
  73. }
  74. static void bgmac_dma_tx_enable(struct bgmac *bgmac,
  75. struct bgmac_dma_ring *ring)
  76. {
  77. u32 ctl;
  78. ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
  79. if (bgmac->feature_flags & BGMAC_FEAT_TX_MASK_SETUP) {
  80. ctl &= ~BGMAC_DMA_TX_BL_MASK;
  81. ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT;
  82. ctl &= ~BGMAC_DMA_TX_MR_MASK;
  83. ctl |= BGMAC_DMA_TX_MR_2 << BGMAC_DMA_TX_MR_SHIFT;
  84. ctl &= ~BGMAC_DMA_TX_PC_MASK;
  85. ctl |= BGMAC_DMA_TX_PC_16 << BGMAC_DMA_TX_PC_SHIFT;
  86. ctl &= ~BGMAC_DMA_TX_PT_MASK;
  87. ctl |= BGMAC_DMA_TX_PT_8 << BGMAC_DMA_TX_PT_SHIFT;
  88. }
  89. ctl |= BGMAC_DMA_TX_ENABLE;
  90. ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
  91. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
  92. }
  93. static void
  94. bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
  95. int i, int len, u32 ctl0)
  96. {
  97. struct bgmac_slot_info *slot;
  98. struct bgmac_dma_desc *dma_desc;
  99. u32 ctl1;
  100. if (i == BGMAC_TX_RING_SLOTS - 1)
  101. ctl0 |= BGMAC_DESC_CTL0_EOT;
  102. ctl1 = len & BGMAC_DESC_CTL1_LEN;
  103. slot = &ring->slots[i];
  104. dma_desc = &ring->cpu_base[i];
  105. dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
  106. dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
  107. dma_desc->ctl0 = cpu_to_le32(ctl0);
  108. dma_desc->ctl1 = cpu_to_le32(ctl1);
  109. }
  110. static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
  111. struct bgmac_dma_ring *ring,
  112. struct sk_buff *skb)
  113. {
  114. struct device *dma_dev = bgmac->dma_dev;
  115. struct net_device *net_dev = bgmac->net_dev;
  116. int index = ring->end % BGMAC_TX_RING_SLOTS;
  117. struct bgmac_slot_info *slot = &ring->slots[index];
  118. int nr_frags;
  119. u32 flags;
  120. int i;
  121. if (skb->len > BGMAC_DESC_CTL1_LEN) {
  122. netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len);
  123. goto err_drop;
  124. }
  125. if (skb->ip_summed == CHECKSUM_PARTIAL)
  126. skb_checksum_help(skb);
  127. nr_frags = skb_shinfo(skb)->nr_frags;
  128. /* ring->end - ring->start will return the number of valid slots,
  129. * even when ring->end overflows
  130. */
  131. if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) {
  132. netdev_err(bgmac->net_dev, "TX ring is full, queue should be stopped!\n");
  133. netif_stop_queue(net_dev);
  134. return NETDEV_TX_BUSY;
  135. }
  136. slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb),
  137. DMA_TO_DEVICE);
  138. if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
  139. goto err_dma_head;
  140. flags = BGMAC_DESC_CTL0_SOF;
  141. if (!nr_frags)
  142. flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
  143. bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags);
  144. flags = 0;
  145. for (i = 0; i < nr_frags; i++) {
  146. struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
  147. int len = skb_frag_size(frag);
  148. index = (index + 1) % BGMAC_TX_RING_SLOTS;
  149. slot = &ring->slots[index];
  150. slot->dma_addr = skb_frag_dma_map(dma_dev, frag, 0,
  151. len, DMA_TO_DEVICE);
  152. if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
  153. goto err_dma;
  154. if (i == nr_frags - 1)
  155. flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
  156. bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags);
  157. }
  158. slot->skb = skb;
  159. ring->end += nr_frags + 1;
  160. netdev_sent_queue(net_dev, skb->len);
  161. wmb();
  162. /* Increase ring->end to point empty slot. We tell hardware the first
  163. * slot it should *not* read.
  164. */
  165. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
  166. ring->index_base +
  167. (ring->end % BGMAC_TX_RING_SLOTS) *
  168. sizeof(struct bgmac_dma_desc));
  169. if (ring->end - ring->start >= BGMAC_TX_RING_SLOTS - 8)
  170. netif_stop_queue(net_dev);
  171. return NETDEV_TX_OK;
  172. err_dma:
  173. dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
  174. DMA_TO_DEVICE);
  175. while (i-- > 0) {
  176. int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
  177. struct bgmac_slot_info *slot = &ring->slots[index];
  178. u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
  179. int len = ctl1 & BGMAC_DESC_CTL1_LEN;
  180. dma_unmap_page(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE);
  181. }
  182. err_dma_head:
  183. netdev_err(bgmac->net_dev, "Mapping error of skb on ring 0x%X\n",
  184. ring->mmio_base);
  185. err_drop:
  186. dev_kfree_skb(skb);
  187. net_dev->stats.tx_dropped++;
  188. net_dev->stats.tx_errors++;
  189. return NETDEV_TX_OK;
  190. }
  191. /* Free transmitted packets */
  192. static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
  193. {
  194. struct device *dma_dev = bgmac->dma_dev;
  195. int empty_slot;
  196. bool freed = false;
  197. unsigned bytes_compl = 0, pkts_compl = 0;
  198. /* The last slot that hardware didn't consume yet */
  199. empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
  200. empty_slot &= BGMAC_DMA_TX_STATDPTR;
  201. empty_slot -= ring->index_base;
  202. empty_slot &= BGMAC_DMA_TX_STATDPTR;
  203. empty_slot /= sizeof(struct bgmac_dma_desc);
  204. while (ring->start != ring->end) {
  205. int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
  206. struct bgmac_slot_info *slot = &ring->slots[slot_idx];
  207. u32 ctl0, ctl1;
  208. int len;
  209. if (slot_idx == empty_slot)
  210. break;
  211. ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
  212. ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
  213. len = ctl1 & BGMAC_DESC_CTL1_LEN;
  214. if (ctl0 & BGMAC_DESC_CTL0_SOF)
  215. /* Unmap no longer used buffer */
  216. dma_unmap_single(dma_dev, slot->dma_addr, len,
  217. DMA_TO_DEVICE);
  218. else
  219. dma_unmap_page(dma_dev, slot->dma_addr, len,
  220. DMA_TO_DEVICE);
  221. if (slot->skb) {
  222. bgmac->net_dev->stats.tx_bytes += slot->skb->len;
  223. bgmac->net_dev->stats.tx_packets++;
  224. bytes_compl += slot->skb->len;
  225. pkts_compl++;
  226. /* Free memory! :) */
  227. dev_kfree_skb(slot->skb);
  228. slot->skb = NULL;
  229. }
  230. slot->dma_addr = 0;
  231. ring->start++;
  232. freed = true;
  233. }
  234. if (!pkts_compl)
  235. return;
  236. netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
  237. if (netif_queue_stopped(bgmac->net_dev))
  238. netif_wake_queue(bgmac->net_dev);
  239. }
  240. static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
  241. {
  242. if (!ring->mmio_base)
  243. return;
  244. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
  245. if (!bgmac_wait_value(bgmac,
  246. ring->mmio_base + BGMAC_DMA_RX_STATUS,
  247. BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED,
  248. 10000))
  249. dev_err(bgmac->dev, "Reset of ring 0x%X RX failed\n",
  250. ring->mmio_base);
  251. }
  252. static void bgmac_dma_rx_enable(struct bgmac *bgmac,
  253. struct bgmac_dma_ring *ring)
  254. {
  255. u32 ctl;
  256. ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
  257. /* preserve ONLY bits 16-17 from current hardware value */
  258. ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
  259. if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) {
  260. ctl &= ~BGMAC_DMA_RX_BL_MASK;
  261. ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;
  262. ctl &= ~BGMAC_DMA_RX_PC_MASK;
  263. ctl |= BGMAC_DMA_RX_PC_8 << BGMAC_DMA_RX_PC_SHIFT;
  264. ctl &= ~BGMAC_DMA_RX_PT_MASK;
  265. ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT;
  266. }
  267. ctl |= BGMAC_DMA_RX_ENABLE;
  268. ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
  269. ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
  270. ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT;
  271. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl);
  272. }
  273. static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
  274. struct bgmac_slot_info *slot)
  275. {
  276. struct device *dma_dev = bgmac->dma_dev;
  277. dma_addr_t dma_addr;
  278. struct bgmac_rx_header *rx;
  279. void *buf;
  280. /* Alloc skb */
  281. buf = netdev_alloc_frag(BGMAC_RX_ALLOC_SIZE);
  282. if (!buf)
  283. return -ENOMEM;
  284. /* Poison - if everything goes fine, hardware will overwrite it */
  285. rx = buf + BGMAC_RX_BUF_OFFSET;
  286. rx->len = cpu_to_le16(0xdead);
  287. rx->flags = cpu_to_le16(0xbeef);
  288. /* Map skb for the DMA */
  289. dma_addr = dma_map_single(dma_dev, buf + BGMAC_RX_BUF_OFFSET,
  290. BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
  291. if (dma_mapping_error(dma_dev, dma_addr)) {
  292. netdev_err(bgmac->net_dev, "DMA mapping error\n");
  293. put_page(virt_to_head_page(buf));
  294. return -ENOMEM;
  295. }
  296. /* Update the slot */
  297. slot->buf = buf;
  298. slot->dma_addr = dma_addr;
  299. return 0;
  300. }
  301. static void bgmac_dma_rx_update_index(struct bgmac *bgmac,
  302. struct bgmac_dma_ring *ring)
  303. {
  304. dma_wmb();
  305. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
  306. ring->index_base +
  307. ring->end * sizeof(struct bgmac_dma_desc));
  308. }
  309. static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
  310. struct bgmac_dma_ring *ring, int desc_idx)
  311. {
  312. struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
  313. u32 ctl0 = 0, ctl1 = 0;
  314. if (desc_idx == BGMAC_RX_RING_SLOTS - 1)
  315. ctl0 |= BGMAC_DESC_CTL0_EOT;
  316. ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
  317. /* Is there any BGMAC device that requires extension? */
  318. /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
  319. * B43_DMA64_DCTL1_ADDREXT_MASK;
  320. */
  321. dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
  322. dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
  323. dma_desc->ctl0 = cpu_to_le32(ctl0);
  324. dma_desc->ctl1 = cpu_to_le32(ctl1);
  325. ring->end = desc_idx;
  326. }
  327. static void bgmac_dma_rx_poison_buf(struct device *dma_dev,
  328. struct bgmac_slot_info *slot)
  329. {
  330. struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
  331. dma_sync_single_for_cpu(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
  332. DMA_FROM_DEVICE);
  333. rx->len = cpu_to_le16(0xdead);
  334. rx->flags = cpu_to_le16(0xbeef);
  335. dma_sync_single_for_device(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
  336. DMA_FROM_DEVICE);
  337. }
  338. static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
  339. int weight)
  340. {
  341. u32 end_slot;
  342. int handled = 0;
  343. end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
  344. end_slot &= BGMAC_DMA_RX_STATDPTR;
  345. end_slot -= ring->index_base;
  346. end_slot &= BGMAC_DMA_RX_STATDPTR;
  347. end_slot /= sizeof(struct bgmac_dma_desc);
  348. while (ring->start != end_slot) {
  349. struct device *dma_dev = bgmac->dma_dev;
  350. struct bgmac_slot_info *slot = &ring->slots[ring->start];
  351. struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
  352. struct sk_buff *skb;
  353. void *buf = slot->buf;
  354. dma_addr_t dma_addr = slot->dma_addr;
  355. u16 len, flags;
  356. do {
  357. /* Prepare new skb as replacement */
  358. if (bgmac_dma_rx_skb_for_slot(bgmac, slot)) {
  359. bgmac_dma_rx_poison_buf(dma_dev, slot);
  360. break;
  361. }
  362. /* Unmap buffer to make it accessible to the CPU */
  363. dma_unmap_single(dma_dev, dma_addr,
  364. BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
  365. /* Get info from the header */
  366. len = le16_to_cpu(rx->len);
  367. flags = le16_to_cpu(rx->flags);
  368. /* Check for poison and drop or pass the packet */
  369. if (len == 0xdead && flags == 0xbeef) {
  370. netdev_err(bgmac->net_dev, "Found poisoned packet at slot %d, DMA issue!\n",
  371. ring->start);
  372. put_page(virt_to_head_page(buf));
  373. bgmac->net_dev->stats.rx_errors++;
  374. break;
  375. }
  376. if (len > BGMAC_RX_ALLOC_SIZE) {
  377. netdev_err(bgmac->net_dev, "Found oversized packet at slot %d, DMA issue!\n",
  378. ring->start);
  379. put_page(virt_to_head_page(buf));
  380. bgmac->net_dev->stats.rx_length_errors++;
  381. bgmac->net_dev->stats.rx_errors++;
  382. break;
  383. }
  384. /* Omit CRC. */
  385. len -= ETH_FCS_LEN;
  386. skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
  387. if (unlikely(!skb)) {
  388. netdev_err(bgmac->net_dev, "build_skb failed\n");
  389. put_page(virt_to_head_page(buf));
  390. bgmac->net_dev->stats.rx_errors++;
  391. break;
  392. }
  393. skb_put(skb, BGMAC_RX_FRAME_OFFSET +
  394. BGMAC_RX_BUF_OFFSET + len);
  395. skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
  396. BGMAC_RX_BUF_OFFSET);
  397. skb_checksum_none_assert(skb);
  398. skb->protocol = eth_type_trans(skb, bgmac->net_dev);
  399. bgmac->net_dev->stats.rx_bytes += len;
  400. bgmac->net_dev->stats.rx_packets++;
  401. napi_gro_receive(&bgmac->napi, skb);
  402. handled++;
  403. } while (0);
  404. bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
  405. if (++ring->start >= BGMAC_RX_RING_SLOTS)
  406. ring->start = 0;
  407. if (handled >= weight) /* Should never be greater */
  408. break;
  409. }
  410. bgmac_dma_rx_update_index(bgmac, ring);
  411. return handled;
  412. }
  413. /* Does ring support unaligned addressing? */
  414. static bool bgmac_dma_unaligned(struct bgmac *bgmac,
  415. struct bgmac_dma_ring *ring,
  416. enum bgmac_dma_ring_type ring_type)
  417. {
  418. switch (ring_type) {
  419. case BGMAC_DMA_RING_TX:
  420. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
  421. 0xff0);
  422. if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO))
  423. return true;
  424. break;
  425. case BGMAC_DMA_RING_RX:
  426. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
  427. 0xff0);
  428. if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO))
  429. return true;
  430. break;
  431. }
  432. return false;
  433. }
  434. static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
  435. struct bgmac_dma_ring *ring)
  436. {
  437. struct device *dma_dev = bgmac->dma_dev;
  438. struct bgmac_dma_desc *dma_desc = ring->cpu_base;
  439. struct bgmac_slot_info *slot;
  440. int i;
  441. for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) {
  442. int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN;
  443. slot = &ring->slots[i];
  444. dev_kfree_skb(slot->skb);
  445. if (!slot->dma_addr)
  446. continue;
  447. if (slot->skb)
  448. dma_unmap_single(dma_dev, slot->dma_addr,
  449. len, DMA_TO_DEVICE);
  450. else
  451. dma_unmap_page(dma_dev, slot->dma_addr,
  452. len, DMA_TO_DEVICE);
  453. }
  454. }
  455. static void bgmac_dma_rx_ring_free(struct bgmac *bgmac,
  456. struct bgmac_dma_ring *ring)
  457. {
  458. struct device *dma_dev = bgmac->dma_dev;
  459. struct bgmac_slot_info *slot;
  460. int i;
  461. for (i = 0; i < BGMAC_RX_RING_SLOTS; i++) {
  462. slot = &ring->slots[i];
  463. if (!slot->dma_addr)
  464. continue;
  465. dma_unmap_single(dma_dev, slot->dma_addr,
  466. BGMAC_RX_BUF_SIZE,
  467. DMA_FROM_DEVICE);
  468. put_page(virt_to_head_page(slot->buf));
  469. slot->dma_addr = 0;
  470. }
  471. }
  472. static void bgmac_dma_ring_desc_free(struct bgmac *bgmac,
  473. struct bgmac_dma_ring *ring,
  474. int num_slots)
  475. {
  476. struct device *dma_dev = bgmac->dma_dev;
  477. int size;
  478. if (!ring->cpu_base)
  479. return;
  480. /* Free ring of descriptors */
  481. size = num_slots * sizeof(struct bgmac_dma_desc);
  482. dma_free_coherent(dma_dev, size, ring->cpu_base,
  483. ring->dma_base);
  484. }
  485. static void bgmac_dma_cleanup(struct bgmac *bgmac)
  486. {
  487. int i;
  488. for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
  489. bgmac_dma_tx_ring_free(bgmac, &bgmac->tx_ring[i]);
  490. for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
  491. bgmac_dma_rx_ring_free(bgmac, &bgmac->rx_ring[i]);
  492. }
  493. static void bgmac_dma_free(struct bgmac *bgmac)
  494. {
  495. int i;
  496. for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
  497. bgmac_dma_ring_desc_free(bgmac, &bgmac->tx_ring[i],
  498. BGMAC_TX_RING_SLOTS);
  499. for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
  500. bgmac_dma_ring_desc_free(bgmac, &bgmac->rx_ring[i],
  501. BGMAC_RX_RING_SLOTS);
  502. }
  503. static int bgmac_dma_alloc(struct bgmac *bgmac)
  504. {
  505. struct device *dma_dev = bgmac->dma_dev;
  506. struct bgmac_dma_ring *ring;
  507. static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
  508. BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
  509. int size; /* ring size: different for Tx and Rx */
  510. int err;
  511. int i;
  512. BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
  513. BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
  514. if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) {
  515. dev_err(bgmac->dev, "Core does not report 64-bit DMA\n");
  516. return -ENOTSUPP;
  517. }
  518. for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
  519. ring = &bgmac->tx_ring[i];
  520. ring->mmio_base = ring_base[i];
  521. /* Alloc ring of descriptors */
  522. size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
  523. ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
  524. &ring->dma_base,
  525. GFP_KERNEL);
  526. if (!ring->cpu_base) {
  527. dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n",
  528. ring->mmio_base);
  529. goto err_dma_free;
  530. }
  531. ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
  532. BGMAC_DMA_RING_TX);
  533. if (ring->unaligned)
  534. ring->index_base = lower_32_bits(ring->dma_base);
  535. else
  536. ring->index_base = 0;
  537. /* No need to alloc TX slots yet */
  538. }
  539. for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
  540. ring = &bgmac->rx_ring[i];
  541. ring->mmio_base = ring_base[i];
  542. /* Alloc ring of descriptors */
  543. size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
  544. ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
  545. &ring->dma_base,
  546. GFP_KERNEL);
  547. if (!ring->cpu_base) {
  548. dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n",
  549. ring->mmio_base);
  550. err = -ENOMEM;
  551. goto err_dma_free;
  552. }
  553. ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
  554. BGMAC_DMA_RING_RX);
  555. if (ring->unaligned)
  556. ring->index_base = lower_32_bits(ring->dma_base);
  557. else
  558. ring->index_base = 0;
  559. }
  560. return 0;
  561. err_dma_free:
  562. bgmac_dma_free(bgmac);
  563. return -ENOMEM;
  564. }
  565. static int bgmac_dma_init(struct bgmac *bgmac)
  566. {
  567. struct bgmac_dma_ring *ring;
  568. int i, err;
  569. for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
  570. ring = &bgmac->tx_ring[i];
  571. if (!ring->unaligned)
  572. bgmac_dma_tx_enable(bgmac, ring);
  573. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
  574. lower_32_bits(ring->dma_base));
  575. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
  576. upper_32_bits(ring->dma_base));
  577. if (ring->unaligned)
  578. bgmac_dma_tx_enable(bgmac, ring);
  579. ring->start = 0;
  580. ring->end = 0; /* Points the slot that should *not* be read */
  581. }
  582. for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
  583. int j;
  584. ring = &bgmac->rx_ring[i];
  585. if (!ring->unaligned)
  586. bgmac_dma_rx_enable(bgmac, ring);
  587. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
  588. lower_32_bits(ring->dma_base));
  589. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
  590. upper_32_bits(ring->dma_base));
  591. if (ring->unaligned)
  592. bgmac_dma_rx_enable(bgmac, ring);
  593. ring->start = 0;
  594. ring->end = 0;
  595. for (j = 0; j < BGMAC_RX_RING_SLOTS; j++) {
  596. err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
  597. if (err)
  598. goto error;
  599. bgmac_dma_rx_setup_desc(bgmac, ring, j);
  600. }
  601. bgmac_dma_rx_update_index(bgmac, ring);
  602. }
  603. return 0;
  604. error:
  605. bgmac_dma_cleanup(bgmac);
  606. return err;
  607. }
  608. /**************************************************
  609. * Chip ops
  610. **************************************************/
  611. /* TODO: can we just drop @force? Can we don't reset MAC at all if there is
  612. * nothing to change? Try if after stabilizng driver.
  613. */
  614. static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
  615. bool force)
  616. {
  617. u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
  618. u32 new_val = (cmdcfg & mask) | set;
  619. u32 cmdcfg_sr;
  620. if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
  621. cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
  622. else
  623. cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
  624. bgmac_set(bgmac, BGMAC_CMDCFG, cmdcfg_sr);
  625. udelay(2);
  626. if (new_val != cmdcfg || force)
  627. bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
  628. bgmac_mask(bgmac, BGMAC_CMDCFG, ~cmdcfg_sr);
  629. udelay(2);
  630. }
  631. static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
  632. {
  633. u32 tmp;
  634. tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
  635. bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
  636. tmp = (addr[4] << 8) | addr[5];
  637. bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
  638. }
  639. static void bgmac_set_rx_mode(struct net_device *net_dev)
  640. {
  641. struct bgmac *bgmac = netdev_priv(net_dev);
  642. if (net_dev->flags & IFF_PROMISC)
  643. bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
  644. else
  645. bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
  646. }
  647. #if 0 /* We don't use that regs yet */
  648. static void bgmac_chip_stats_update(struct bgmac *bgmac)
  649. {
  650. int i;
  651. if (!(bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB)) {
  652. for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
  653. bgmac->mib_tx_regs[i] =
  654. bgmac_read(bgmac,
  655. BGMAC_TX_GOOD_OCTETS + (i * 4));
  656. for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
  657. bgmac->mib_rx_regs[i] =
  658. bgmac_read(bgmac,
  659. BGMAC_RX_GOOD_OCTETS + (i * 4));
  660. }
  661. /* TODO: what else? how to handle BCM4706? Specs are needed */
  662. }
  663. #endif
  664. static void bgmac_clear_mib(struct bgmac *bgmac)
  665. {
  666. int i;
  667. if (bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB)
  668. return;
  669. bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR);
  670. for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
  671. bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4));
  672. for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
  673. bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4));
  674. }
  675. /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
  676. static void bgmac_mac_speed(struct bgmac *bgmac)
  677. {
  678. u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
  679. u32 set = 0;
  680. switch (bgmac->mac_speed) {
  681. case SPEED_10:
  682. set |= BGMAC_CMDCFG_ES_10;
  683. break;
  684. case SPEED_100:
  685. set |= BGMAC_CMDCFG_ES_100;
  686. break;
  687. case SPEED_1000:
  688. set |= BGMAC_CMDCFG_ES_1000;
  689. break;
  690. case SPEED_2500:
  691. set |= BGMAC_CMDCFG_ES_2500;
  692. break;
  693. default:
  694. dev_err(bgmac->dev, "Unsupported speed: %d\n",
  695. bgmac->mac_speed);
  696. }
  697. if (bgmac->mac_duplex == DUPLEX_HALF)
  698. set |= BGMAC_CMDCFG_HD;
  699. bgmac_cmdcfg_maskset(bgmac, mask, set, true);
  700. }
  701. static void bgmac_miiconfig(struct bgmac *bgmac)
  702. {
  703. if (bgmac->feature_flags & BGMAC_FEAT_FORCE_SPEED_2500) {
  704. bgmac_idm_write(bgmac, BCMA_IOCTL,
  705. bgmac_idm_read(bgmac, BCMA_IOCTL) | 0x40 |
  706. BGMAC_BCMA_IOCTL_SW_CLKEN);
  707. bgmac->mac_speed = SPEED_2500;
  708. bgmac->mac_duplex = DUPLEX_FULL;
  709. bgmac_mac_speed(bgmac);
  710. } else {
  711. u8 imode;
  712. imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) &
  713. BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT;
  714. if (imode == 0 || imode == 1) {
  715. bgmac->mac_speed = SPEED_100;
  716. bgmac->mac_duplex = DUPLEX_FULL;
  717. bgmac_mac_speed(bgmac);
  718. }
  719. }
  720. }
  721. /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
  722. static void bgmac_chip_reset(struct bgmac *bgmac)
  723. {
  724. u32 cmdcfg_sr;
  725. u32 iost;
  726. int i;
  727. if (bgmac_clk_enabled(bgmac)) {
  728. if (!bgmac->stats_grabbed) {
  729. /* bgmac_chip_stats_update(bgmac); */
  730. bgmac->stats_grabbed = true;
  731. }
  732. for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
  733. bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
  734. bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
  735. udelay(1);
  736. for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
  737. bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]);
  738. /* TODO: Clear software multicast filter list */
  739. }
  740. iost = bgmac_idm_read(bgmac, BCMA_IOST);
  741. if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED)
  742. iost &= ~BGMAC_BCMA_IOST_ATTACHED;
  743. /* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */
  744. if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) {
  745. u32 flags = 0;
  746. if (iost & BGMAC_BCMA_IOST_ATTACHED) {
  747. flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
  748. if (!bgmac->has_robosw)
  749. flags |= BGMAC_BCMA_IOCTL_SW_RESET;
  750. }
  751. bgmac_clk_enable(bgmac, flags);
  752. }
  753. /* Request Misc PLL for corerev > 2 */
  754. if (bgmac->feature_flags & BGMAC_FEAT_MISC_PLL_REQ) {
  755. bgmac_set(bgmac, BCMA_CLKCTLST,
  756. BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ);
  757. bgmac_wait_value(bgmac, BCMA_CLKCTLST,
  758. BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
  759. BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
  760. 1000);
  761. }
  762. if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_PHY) {
  763. u8 et_swtype = 0;
  764. u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
  765. BGMAC_CHIPCTL_1_IF_TYPE_MII;
  766. char buf[4];
  767. if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
  768. if (kstrtou8(buf, 0, &et_swtype))
  769. dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n",
  770. buf);
  771. et_swtype &= 0x0f;
  772. et_swtype <<= 4;
  773. sw_type = et_swtype;
  774. } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_EPHYRMII) {
  775. sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RMII |
  776. BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
  777. } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_RGMII) {
  778. sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
  779. BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
  780. }
  781. bgmac_cco_ctl_maskset(bgmac, 1, ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
  782. BGMAC_CHIPCTL_1_SW_TYPE_MASK),
  783. sw_type);
  784. } else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE) {
  785. u32 sw_type = BGMAC_CHIPCTL_4_IF_TYPE_MII |
  786. BGMAC_CHIPCTL_4_SW_TYPE_EPHY;
  787. u8 et_swtype = 0;
  788. char buf[4];
  789. if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
  790. if (kstrtou8(buf, 0, &et_swtype))
  791. dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n",
  792. buf);
  793. sw_type = (et_swtype & 0x0f) << 12;
  794. } else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII) {
  795. sw_type = BGMAC_CHIPCTL_4_IF_TYPE_RGMII |
  796. BGMAC_CHIPCTL_4_SW_TYPE_RGMII;
  797. }
  798. bgmac_cco_ctl_maskset(bgmac, 4, ~(BGMAC_CHIPCTL_4_IF_TYPE_MASK |
  799. BGMAC_CHIPCTL_4_SW_TYPE_MASK),
  800. sw_type);
  801. } else if (bgmac->feature_flags & BGMAC_FEAT_CC7_IF_TYPE_RGMII) {
  802. bgmac_cco_ctl_maskset(bgmac, 7, ~BGMAC_CHIPCTL_7_IF_TYPE_MASK,
  803. BGMAC_CHIPCTL_7_IF_TYPE_RGMII);
  804. }
  805. if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
  806. bgmac_idm_write(bgmac, BCMA_IOCTL,
  807. bgmac_idm_read(bgmac, BCMA_IOCTL) &
  808. ~BGMAC_BCMA_IOCTL_SW_RESET);
  809. /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
  810. * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
  811. * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
  812. * be keps until taking MAC out of the reset.
  813. */
  814. if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
  815. cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
  816. else
  817. cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
  818. bgmac_cmdcfg_maskset(bgmac,
  819. ~(BGMAC_CMDCFG_TE |
  820. BGMAC_CMDCFG_RE |
  821. BGMAC_CMDCFG_RPI |
  822. BGMAC_CMDCFG_TAI |
  823. BGMAC_CMDCFG_HD |
  824. BGMAC_CMDCFG_ML |
  825. BGMAC_CMDCFG_CFE |
  826. BGMAC_CMDCFG_RL |
  827. BGMAC_CMDCFG_RED |
  828. BGMAC_CMDCFG_PE |
  829. BGMAC_CMDCFG_TPI |
  830. BGMAC_CMDCFG_PAD_EN |
  831. BGMAC_CMDCFG_PF),
  832. BGMAC_CMDCFG_PROM |
  833. BGMAC_CMDCFG_NLC |
  834. BGMAC_CMDCFG_CFE |
  835. cmdcfg_sr,
  836. false);
  837. bgmac->mac_speed = SPEED_UNKNOWN;
  838. bgmac->mac_duplex = DUPLEX_UNKNOWN;
  839. bgmac_clear_mib(bgmac);
  840. if (bgmac->feature_flags & BGMAC_FEAT_CMN_PHY_CTL)
  841. bgmac_cmn_maskset32(bgmac, BCMA_GMAC_CMN_PHY_CTL, ~0,
  842. BCMA_GMAC_CMN_PC_MTE);
  843. else
  844. bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
  845. bgmac_miiconfig(bgmac);
  846. if (bgmac->mii_bus)
  847. bgmac->mii_bus->reset(bgmac->mii_bus);
  848. netdev_reset_queue(bgmac->net_dev);
  849. }
  850. static void bgmac_chip_intrs_on(struct bgmac *bgmac)
  851. {
  852. bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask);
  853. }
  854. static void bgmac_chip_intrs_off(struct bgmac *bgmac)
  855. {
  856. bgmac_write(bgmac, BGMAC_INT_MASK, 0);
  857. bgmac_read(bgmac, BGMAC_INT_MASK);
  858. }
  859. /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
  860. static void bgmac_enable(struct bgmac *bgmac)
  861. {
  862. u32 cmdcfg_sr;
  863. u32 cmdcfg;
  864. u32 mode;
  865. if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
  866. cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
  867. else
  868. cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
  869. cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
  870. bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
  871. cmdcfg_sr, true);
  872. udelay(2);
  873. cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
  874. bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
  875. mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
  876. BGMAC_DS_MM_SHIFT;
  877. if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0)
  878. bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
  879. if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) && mode == 2)
  880. bgmac_cco_ctl_maskset(bgmac, 1, ~0,
  881. BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
  882. if (bgmac->feature_flags & (BGMAC_FEAT_FLW_CTRL1 |
  883. BGMAC_FEAT_FLW_CTRL2)) {
  884. u32 fl_ctl;
  885. if (bgmac->feature_flags & BGMAC_FEAT_FLW_CTRL1)
  886. fl_ctl = 0x2300e1;
  887. else
  888. fl_ctl = 0x03cb04cb;
  889. bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
  890. bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
  891. }
  892. if (bgmac->feature_flags & BGMAC_FEAT_SET_RXQ_CLK) {
  893. u32 rxq_ctl;
  894. u16 bp_clk;
  895. u8 mdp;
  896. rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
  897. rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
  898. bp_clk = bgmac_get_bus_clock(bgmac) / 1000000;
  899. mdp = (bp_clk * 128 / 1000) - 3;
  900. rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
  901. bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
  902. }
  903. }
  904. /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
  905. static void bgmac_chip_init(struct bgmac *bgmac)
  906. {
  907. /* Clear any erroneously pending interrupts */
  908. bgmac_write(bgmac, BGMAC_INT_STATUS, ~0);
  909. /* 1 interrupt per received frame */
  910. bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
  911. /* Enable 802.3x tx flow control (honor received PAUSE frames) */
  912. bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
  913. bgmac_set_rx_mode(bgmac->net_dev);
  914. bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
  915. if (bgmac->loopback)
  916. bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
  917. else
  918. bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
  919. bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
  920. bgmac_chip_intrs_on(bgmac);
  921. bgmac_enable(bgmac);
  922. }
  923. static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
  924. {
  925. struct bgmac *bgmac = netdev_priv(dev_id);
  926. u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS);
  927. int_status &= bgmac->int_mask;
  928. if (!int_status)
  929. return IRQ_NONE;
  930. int_status &= ~(BGMAC_IS_TX0 | BGMAC_IS_RX);
  931. if (int_status)
  932. dev_err(bgmac->dev, "Unknown IRQs: 0x%08X\n", int_status);
  933. /* Disable new interrupts until handling existing ones */
  934. bgmac_chip_intrs_off(bgmac);
  935. napi_schedule(&bgmac->napi);
  936. return IRQ_HANDLED;
  937. }
  938. static int bgmac_poll(struct napi_struct *napi, int weight)
  939. {
  940. struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
  941. int handled = 0;
  942. /* Ack */
  943. bgmac_write(bgmac, BGMAC_INT_STATUS, ~0);
  944. bgmac_dma_tx_free(bgmac, &bgmac->tx_ring[0]);
  945. handled += bgmac_dma_rx_read(bgmac, &bgmac->rx_ring[0], weight);
  946. /* Poll again if more events arrived in the meantime */
  947. if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX))
  948. return weight;
  949. if (handled < weight) {
  950. napi_complete_done(napi, handled);
  951. bgmac_chip_intrs_on(bgmac);
  952. }
  953. return handled;
  954. }
  955. /**************************************************
  956. * net_device_ops
  957. **************************************************/
  958. static int bgmac_open(struct net_device *net_dev)
  959. {
  960. struct bgmac *bgmac = netdev_priv(net_dev);
  961. int err = 0;
  962. bgmac_chip_reset(bgmac);
  963. err = bgmac_dma_init(bgmac);
  964. if (err)
  965. return err;
  966. /* Specs say about reclaiming rings here, but we do that in DMA init */
  967. bgmac_chip_init(bgmac);
  968. err = request_irq(bgmac->irq, bgmac_interrupt, IRQF_SHARED,
  969. KBUILD_MODNAME, net_dev);
  970. if (err < 0) {
  971. dev_err(bgmac->dev, "IRQ request error: %d!\n", err);
  972. bgmac_dma_cleanup(bgmac);
  973. return err;
  974. }
  975. napi_enable(&bgmac->napi);
  976. phy_start(net_dev->phydev);
  977. netif_start_queue(net_dev);
  978. return 0;
  979. }
  980. static int bgmac_stop(struct net_device *net_dev)
  981. {
  982. struct bgmac *bgmac = netdev_priv(net_dev);
  983. netif_carrier_off(net_dev);
  984. phy_stop(net_dev->phydev);
  985. napi_disable(&bgmac->napi);
  986. bgmac_chip_intrs_off(bgmac);
  987. free_irq(bgmac->irq, net_dev);
  988. bgmac_chip_reset(bgmac);
  989. bgmac_dma_cleanup(bgmac);
  990. return 0;
  991. }
  992. static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
  993. struct net_device *net_dev)
  994. {
  995. struct bgmac *bgmac = netdev_priv(net_dev);
  996. struct bgmac_dma_ring *ring;
  997. /* No QOS support yet */
  998. ring = &bgmac->tx_ring[0];
  999. return bgmac_dma_tx_add(bgmac, ring, skb);
  1000. }
  1001. static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
  1002. {
  1003. struct bgmac *bgmac = netdev_priv(net_dev);
  1004. struct sockaddr *sa = addr;
  1005. int ret;
  1006. ret = eth_prepare_mac_addr_change(net_dev, addr);
  1007. if (ret < 0)
  1008. return ret;
  1009. ether_addr_copy(net_dev->dev_addr, sa->sa_data);
  1010. bgmac_write_mac_address(bgmac, net_dev->dev_addr);
  1011. eth_commit_mac_addr_change(net_dev, addr);
  1012. return 0;
  1013. }
  1014. static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
  1015. {
  1016. if (!netif_running(net_dev))
  1017. return -EINVAL;
  1018. return phy_mii_ioctl(net_dev->phydev, ifr, cmd);
  1019. }
  1020. static const struct net_device_ops bgmac_netdev_ops = {
  1021. .ndo_open = bgmac_open,
  1022. .ndo_stop = bgmac_stop,
  1023. .ndo_start_xmit = bgmac_start_xmit,
  1024. .ndo_set_rx_mode = bgmac_set_rx_mode,
  1025. .ndo_set_mac_address = bgmac_set_mac_address,
  1026. .ndo_validate_addr = eth_validate_addr,
  1027. .ndo_do_ioctl = bgmac_ioctl,
  1028. };
  1029. /**************************************************
  1030. * ethtool_ops
  1031. **************************************************/
  1032. struct bgmac_stat {
  1033. u8 size;
  1034. u32 offset;
  1035. const char *name;
  1036. };
  1037. static struct bgmac_stat bgmac_get_strings_stats[] = {
  1038. { 8, BGMAC_TX_GOOD_OCTETS, "tx_good_octets" },
  1039. { 4, BGMAC_TX_GOOD_PKTS, "tx_good" },
  1040. { 8, BGMAC_TX_OCTETS, "tx_octets" },
  1041. { 4, BGMAC_TX_PKTS, "tx_pkts" },
  1042. { 4, BGMAC_TX_BROADCAST_PKTS, "tx_broadcast" },
  1043. { 4, BGMAC_TX_MULTICAST_PKTS, "tx_multicast" },
  1044. { 4, BGMAC_TX_LEN_64, "tx_64" },
  1045. { 4, BGMAC_TX_LEN_65_TO_127, "tx_65_127" },
  1046. { 4, BGMAC_TX_LEN_128_TO_255, "tx_128_255" },
  1047. { 4, BGMAC_TX_LEN_256_TO_511, "tx_256_511" },
  1048. { 4, BGMAC_TX_LEN_512_TO_1023, "tx_512_1023" },
  1049. { 4, BGMAC_TX_LEN_1024_TO_1522, "tx_1024_1522" },
  1050. { 4, BGMAC_TX_LEN_1523_TO_2047, "tx_1523_2047" },
  1051. { 4, BGMAC_TX_LEN_2048_TO_4095, "tx_2048_4095" },
  1052. { 4, BGMAC_TX_LEN_4096_TO_8191, "tx_4096_8191" },
  1053. { 4, BGMAC_TX_LEN_8192_TO_MAX, "tx_8192_max" },
  1054. { 4, BGMAC_TX_JABBER_PKTS, "tx_jabber" },
  1055. { 4, BGMAC_TX_OVERSIZE_PKTS, "tx_oversize" },
  1056. { 4, BGMAC_TX_FRAGMENT_PKTS, "tx_fragment" },
  1057. { 4, BGMAC_TX_UNDERRUNS, "tx_underruns" },
  1058. { 4, BGMAC_TX_TOTAL_COLS, "tx_total_cols" },
  1059. { 4, BGMAC_TX_SINGLE_COLS, "tx_single_cols" },
  1060. { 4, BGMAC_TX_MULTIPLE_COLS, "tx_multiple_cols" },
  1061. { 4, BGMAC_TX_EXCESSIVE_COLS, "tx_excessive_cols" },
  1062. { 4, BGMAC_TX_LATE_COLS, "tx_late_cols" },
  1063. { 4, BGMAC_TX_DEFERED, "tx_defered" },
  1064. { 4, BGMAC_TX_CARRIER_LOST, "tx_carrier_lost" },
  1065. { 4, BGMAC_TX_PAUSE_PKTS, "tx_pause" },
  1066. { 4, BGMAC_TX_UNI_PKTS, "tx_unicast" },
  1067. { 4, BGMAC_TX_Q0_PKTS, "tx_q0" },
  1068. { 8, BGMAC_TX_Q0_OCTETS, "tx_q0_octets" },
  1069. { 4, BGMAC_TX_Q1_PKTS, "tx_q1" },
  1070. { 8, BGMAC_TX_Q1_OCTETS, "tx_q1_octets" },
  1071. { 4, BGMAC_TX_Q2_PKTS, "tx_q2" },
  1072. { 8, BGMAC_TX_Q2_OCTETS, "tx_q2_octets" },
  1073. { 4, BGMAC_TX_Q3_PKTS, "tx_q3" },
  1074. { 8, BGMAC_TX_Q3_OCTETS, "tx_q3_octets" },
  1075. { 8, BGMAC_RX_GOOD_OCTETS, "rx_good_octets" },
  1076. { 4, BGMAC_RX_GOOD_PKTS, "rx_good" },
  1077. { 8, BGMAC_RX_OCTETS, "rx_octets" },
  1078. { 4, BGMAC_RX_PKTS, "rx_pkts" },
  1079. { 4, BGMAC_RX_BROADCAST_PKTS, "rx_broadcast" },
  1080. { 4, BGMAC_RX_MULTICAST_PKTS, "rx_multicast" },
  1081. { 4, BGMAC_RX_LEN_64, "rx_64" },
  1082. { 4, BGMAC_RX_LEN_65_TO_127, "rx_65_127" },
  1083. { 4, BGMAC_RX_LEN_128_TO_255, "rx_128_255" },
  1084. { 4, BGMAC_RX_LEN_256_TO_511, "rx_256_511" },
  1085. { 4, BGMAC_RX_LEN_512_TO_1023, "rx_512_1023" },
  1086. { 4, BGMAC_RX_LEN_1024_TO_1522, "rx_1024_1522" },
  1087. { 4, BGMAC_RX_LEN_1523_TO_2047, "rx_1523_2047" },
  1088. { 4, BGMAC_RX_LEN_2048_TO_4095, "rx_2048_4095" },
  1089. { 4, BGMAC_RX_LEN_4096_TO_8191, "rx_4096_8191" },
  1090. { 4, BGMAC_RX_LEN_8192_TO_MAX, "rx_8192_max" },
  1091. { 4, BGMAC_RX_JABBER_PKTS, "rx_jabber" },
  1092. { 4, BGMAC_RX_OVERSIZE_PKTS, "rx_oversize" },
  1093. { 4, BGMAC_RX_FRAGMENT_PKTS, "rx_fragment" },
  1094. { 4, BGMAC_RX_MISSED_PKTS, "rx_missed" },
  1095. { 4, BGMAC_RX_CRC_ALIGN_ERRS, "rx_crc_align" },
  1096. { 4, BGMAC_RX_UNDERSIZE, "rx_undersize" },
  1097. { 4, BGMAC_RX_CRC_ERRS, "rx_crc" },
  1098. { 4, BGMAC_RX_ALIGN_ERRS, "rx_align" },
  1099. { 4, BGMAC_RX_SYMBOL_ERRS, "rx_symbol" },
  1100. { 4, BGMAC_RX_PAUSE_PKTS, "rx_pause" },
  1101. { 4, BGMAC_RX_NONPAUSE_PKTS, "rx_nonpause" },
  1102. { 4, BGMAC_RX_SACHANGES, "rx_sa_changes" },
  1103. { 4, BGMAC_RX_UNI_PKTS, "rx_unicast" },
  1104. };
  1105. #define BGMAC_STATS_LEN ARRAY_SIZE(bgmac_get_strings_stats)
  1106. static int bgmac_get_sset_count(struct net_device *dev, int string_set)
  1107. {
  1108. switch (string_set) {
  1109. case ETH_SS_STATS:
  1110. return BGMAC_STATS_LEN;
  1111. }
  1112. return -EOPNOTSUPP;
  1113. }
  1114. static void bgmac_get_strings(struct net_device *dev, u32 stringset,
  1115. u8 *data)
  1116. {
  1117. int i;
  1118. if (stringset != ETH_SS_STATS)
  1119. return;
  1120. for (i = 0; i < BGMAC_STATS_LEN; i++)
  1121. strlcpy(data + i * ETH_GSTRING_LEN,
  1122. bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN);
  1123. }
  1124. static void bgmac_get_ethtool_stats(struct net_device *dev,
  1125. struct ethtool_stats *ss, uint64_t *data)
  1126. {
  1127. struct bgmac *bgmac = netdev_priv(dev);
  1128. const struct bgmac_stat *s;
  1129. unsigned int i;
  1130. u64 val;
  1131. if (!netif_running(dev))
  1132. return;
  1133. for (i = 0; i < BGMAC_STATS_LEN; i++) {
  1134. s = &bgmac_get_strings_stats[i];
  1135. val = 0;
  1136. if (s->size == 8)
  1137. val = (u64)bgmac_read(bgmac, s->offset + 4) << 32;
  1138. val |= bgmac_read(bgmac, s->offset);
  1139. data[i] = val;
  1140. }
  1141. }
  1142. static void bgmac_get_drvinfo(struct net_device *net_dev,
  1143. struct ethtool_drvinfo *info)
  1144. {
  1145. strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
  1146. strlcpy(info->bus_info, "AXI", sizeof(info->bus_info));
  1147. }
  1148. static const struct ethtool_ops bgmac_ethtool_ops = {
  1149. .get_strings = bgmac_get_strings,
  1150. .get_sset_count = bgmac_get_sset_count,
  1151. .get_ethtool_stats = bgmac_get_ethtool_stats,
  1152. .get_drvinfo = bgmac_get_drvinfo,
  1153. .get_link_ksettings = phy_ethtool_get_link_ksettings,
  1154. .set_link_ksettings = phy_ethtool_set_link_ksettings,
  1155. };
  1156. /**************************************************
  1157. * MII
  1158. **************************************************/
  1159. void bgmac_adjust_link(struct net_device *net_dev)
  1160. {
  1161. struct bgmac *bgmac = netdev_priv(net_dev);
  1162. struct phy_device *phy_dev = net_dev->phydev;
  1163. bool update = false;
  1164. if (phy_dev->link) {
  1165. if (phy_dev->speed != bgmac->mac_speed) {
  1166. bgmac->mac_speed = phy_dev->speed;
  1167. update = true;
  1168. }
  1169. if (phy_dev->duplex != bgmac->mac_duplex) {
  1170. bgmac->mac_duplex = phy_dev->duplex;
  1171. update = true;
  1172. }
  1173. }
  1174. if (update) {
  1175. bgmac_mac_speed(bgmac);
  1176. phy_print_status(phy_dev);
  1177. }
  1178. }
  1179. EXPORT_SYMBOL_GPL(bgmac_adjust_link);
  1180. int bgmac_phy_connect_direct(struct bgmac *bgmac)
  1181. {
  1182. struct fixed_phy_status fphy_status = {
  1183. .link = 1,
  1184. .speed = SPEED_1000,
  1185. .duplex = DUPLEX_FULL,
  1186. };
  1187. struct phy_device *phy_dev;
  1188. int err;
  1189. phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
  1190. if (!phy_dev || IS_ERR(phy_dev)) {
  1191. dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
  1192. return -ENODEV;
  1193. }
  1194. err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link,
  1195. PHY_INTERFACE_MODE_MII);
  1196. if (err) {
  1197. dev_err(bgmac->dev, "Connecting PHY failed\n");
  1198. return err;
  1199. }
  1200. return err;
  1201. }
  1202. EXPORT_SYMBOL_GPL(bgmac_phy_connect_direct);
  1203. struct bgmac *bgmac_alloc(struct device *dev)
  1204. {
  1205. struct net_device *net_dev;
  1206. struct bgmac *bgmac;
  1207. /* Allocation and references */
  1208. net_dev = devm_alloc_etherdev(dev, sizeof(*bgmac));
  1209. if (!net_dev)
  1210. return NULL;
  1211. net_dev->netdev_ops = &bgmac_netdev_ops;
  1212. net_dev->ethtool_ops = &bgmac_ethtool_ops;
  1213. bgmac = netdev_priv(net_dev);
  1214. bgmac->dev = dev;
  1215. bgmac->net_dev = net_dev;
  1216. return bgmac;
  1217. }
  1218. EXPORT_SYMBOL_GPL(bgmac_alloc);
  1219. int bgmac_enet_probe(struct bgmac *bgmac)
  1220. {
  1221. struct net_device *net_dev = bgmac->net_dev;
  1222. int err;
  1223. net_dev->irq = bgmac->irq;
  1224. SET_NETDEV_DEV(net_dev, bgmac->dev);
  1225. dev_set_drvdata(bgmac->dev, bgmac);
  1226. if (!is_valid_ether_addr(net_dev->dev_addr)) {
  1227. dev_err(bgmac->dev, "Invalid MAC addr: %pM\n",
  1228. net_dev->dev_addr);
  1229. eth_hw_addr_random(net_dev);
  1230. dev_warn(bgmac->dev, "Using random MAC: %pM\n",
  1231. net_dev->dev_addr);
  1232. }
  1233. /* This (reset &) enable is not preset in specs or reference driver but
  1234. * Broadcom does it in arch PCI code when enabling fake PCI device.
  1235. */
  1236. bgmac_clk_enable(bgmac, 0);
  1237. /* This seems to be fixing IRQ by assigning OOB #6 to the core */
  1238. if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6)
  1239. bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86);
  1240. bgmac_chip_reset(bgmac);
  1241. err = bgmac_dma_alloc(bgmac);
  1242. if (err) {
  1243. dev_err(bgmac->dev, "Unable to alloc memory for DMA\n");
  1244. goto err_out;
  1245. }
  1246. bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
  1247. if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
  1248. bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
  1249. netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
  1250. err = bgmac_phy_connect(bgmac);
  1251. if (err) {
  1252. dev_err(bgmac->dev, "Cannot connect to phy\n");
  1253. goto err_dma_free;
  1254. }
  1255. net_dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
  1256. net_dev->hw_features = net_dev->features;
  1257. net_dev->vlan_features = net_dev->features;
  1258. err = register_netdev(bgmac->net_dev);
  1259. if (err) {
  1260. dev_err(bgmac->dev, "Cannot register net device\n");
  1261. goto err_phy_disconnect;
  1262. }
  1263. netif_carrier_off(net_dev);
  1264. return 0;
  1265. err_phy_disconnect:
  1266. phy_disconnect(net_dev->phydev);
  1267. err_dma_free:
  1268. bgmac_dma_free(bgmac);
  1269. err_out:
  1270. return err;
  1271. }
  1272. EXPORT_SYMBOL_GPL(bgmac_enet_probe);
  1273. void bgmac_enet_remove(struct bgmac *bgmac)
  1274. {
  1275. unregister_netdev(bgmac->net_dev);
  1276. phy_disconnect(bgmac->net_dev->phydev);
  1277. netif_napi_del(&bgmac->napi);
  1278. bgmac_dma_free(bgmac);
  1279. free_netdev(bgmac->net_dev);
  1280. }
  1281. EXPORT_SYMBOL_GPL(bgmac_enet_remove);
  1282. int bgmac_enet_suspend(struct bgmac *bgmac)
  1283. {
  1284. if (!netif_running(bgmac->net_dev))
  1285. return 0;
  1286. phy_stop(bgmac->net_dev->phydev);
  1287. netif_stop_queue(bgmac->net_dev);
  1288. napi_disable(&bgmac->napi);
  1289. netif_tx_lock(bgmac->net_dev);
  1290. netif_device_detach(bgmac->net_dev);
  1291. netif_tx_unlock(bgmac->net_dev);
  1292. bgmac_chip_intrs_off(bgmac);
  1293. bgmac_chip_reset(bgmac);
  1294. bgmac_dma_cleanup(bgmac);
  1295. return 0;
  1296. }
  1297. EXPORT_SYMBOL_GPL(bgmac_enet_suspend);
  1298. int bgmac_enet_resume(struct bgmac *bgmac)
  1299. {
  1300. int rc;
  1301. if (!netif_running(bgmac->net_dev))
  1302. return 0;
  1303. rc = bgmac_dma_init(bgmac);
  1304. if (rc)
  1305. return rc;
  1306. bgmac_chip_init(bgmac);
  1307. napi_enable(&bgmac->napi);
  1308. netif_tx_lock(bgmac->net_dev);
  1309. netif_device_attach(bgmac->net_dev);
  1310. netif_tx_unlock(bgmac->net_dev);
  1311. netif_start_queue(bgmac->net_dev);
  1312. phy_start(bgmac->net_dev->phydev);
  1313. return 0;
  1314. }
  1315. EXPORT_SYMBOL_GPL(bgmac_enet_resume);
  1316. MODULE_AUTHOR("Rafał Miłecki");
  1317. MODULE_LICENSE("GPL");