bgmac.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555
  1. /*
  2. * Driver for (BCM4706)? GBit MAC core on BCMA bus.
  3. *
  4. * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
  5. *
  6. * Licensed under the GNU/GPL. See COPYING for details.
  7. */
  8. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9. #include <linux/bcma/bcma.h>
  10. #include <linux/etherdevice.h>
  11. #include <linux/bcm47xx_nvram.h>
  12. #include "bgmac.h"
  13. static bool bgmac_wait_value(struct bgmac *bgmac, u16 reg, u32 mask,
  14. u32 value, int timeout)
  15. {
  16. u32 val;
  17. int i;
  18. for (i = 0; i < timeout / 10; i++) {
  19. val = bgmac_read(bgmac, reg);
  20. if ((val & mask) == value)
  21. return true;
  22. udelay(10);
  23. }
  24. dev_err(bgmac->dev, "Timeout waiting for reg 0x%X\n", reg);
  25. return false;
  26. }
  27. /**************************************************
  28. * DMA
  29. **************************************************/
  30. static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
  31. {
  32. u32 val;
  33. int i;
  34. if (!ring->mmio_base)
  35. return;
  36. /* Suspend DMA TX ring first.
  37. * bgmac_wait_value doesn't support waiting for any of few values, so
  38. * implement whole loop here.
  39. */
  40. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
  41. BGMAC_DMA_TX_SUSPEND);
  42. for (i = 0; i < 10000 / 10; i++) {
  43. val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
  44. val &= BGMAC_DMA_TX_STAT;
  45. if (val == BGMAC_DMA_TX_STAT_DISABLED ||
  46. val == BGMAC_DMA_TX_STAT_IDLEWAIT ||
  47. val == BGMAC_DMA_TX_STAT_STOPPED) {
  48. i = 0;
  49. break;
  50. }
  51. udelay(10);
  52. }
  53. if (i)
  54. dev_err(bgmac->dev, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
  55. ring->mmio_base, val);
  56. /* Remove SUSPEND bit */
  57. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
  58. if (!bgmac_wait_value(bgmac,
  59. ring->mmio_base + BGMAC_DMA_TX_STATUS,
  60. BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
  61. 10000)) {
  62. dev_warn(bgmac->dev, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
  63. ring->mmio_base);
  64. udelay(300);
  65. val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
  66. if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
  67. dev_err(bgmac->dev, "Reset of DMA TX ring 0x%X failed\n",
  68. ring->mmio_base);
  69. }
  70. }
  71. static void bgmac_dma_tx_enable(struct bgmac *bgmac,
  72. struct bgmac_dma_ring *ring)
  73. {
  74. u32 ctl;
  75. ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
  76. if (bgmac->feature_flags & BGMAC_FEAT_TX_MASK_SETUP) {
  77. ctl &= ~BGMAC_DMA_TX_BL_MASK;
  78. ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT;
  79. ctl &= ~BGMAC_DMA_TX_MR_MASK;
  80. ctl |= BGMAC_DMA_TX_MR_2 << BGMAC_DMA_TX_MR_SHIFT;
  81. ctl &= ~BGMAC_DMA_TX_PC_MASK;
  82. ctl |= BGMAC_DMA_TX_PC_16 << BGMAC_DMA_TX_PC_SHIFT;
  83. ctl &= ~BGMAC_DMA_TX_PT_MASK;
  84. ctl |= BGMAC_DMA_TX_PT_8 << BGMAC_DMA_TX_PT_SHIFT;
  85. }
  86. ctl |= BGMAC_DMA_TX_ENABLE;
  87. ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
  88. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
  89. }
  90. static void
  91. bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
  92. int i, int len, u32 ctl0)
  93. {
  94. struct bgmac_slot_info *slot;
  95. struct bgmac_dma_desc *dma_desc;
  96. u32 ctl1;
  97. if (i == BGMAC_TX_RING_SLOTS - 1)
  98. ctl0 |= BGMAC_DESC_CTL0_EOT;
  99. ctl1 = len & BGMAC_DESC_CTL1_LEN;
  100. slot = &ring->slots[i];
  101. dma_desc = &ring->cpu_base[i];
  102. dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
  103. dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
  104. dma_desc->ctl0 = cpu_to_le32(ctl0);
  105. dma_desc->ctl1 = cpu_to_le32(ctl1);
  106. }
  107. static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
  108. struct bgmac_dma_ring *ring,
  109. struct sk_buff *skb)
  110. {
  111. struct device *dma_dev = bgmac->dma_dev;
  112. struct net_device *net_dev = bgmac->net_dev;
  113. int index = ring->end % BGMAC_TX_RING_SLOTS;
  114. struct bgmac_slot_info *slot = &ring->slots[index];
  115. int nr_frags;
  116. u32 flags;
  117. int i;
  118. if (skb->len > BGMAC_DESC_CTL1_LEN) {
  119. netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len);
  120. goto err_drop;
  121. }
  122. if (skb->ip_summed == CHECKSUM_PARTIAL)
  123. skb_checksum_help(skb);
  124. nr_frags = skb_shinfo(skb)->nr_frags;
  125. /* ring->end - ring->start will return the number of valid slots,
  126. * even when ring->end overflows
  127. */
  128. if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) {
  129. netdev_err(bgmac->net_dev, "TX ring is full, queue should be stopped!\n");
  130. netif_stop_queue(net_dev);
  131. return NETDEV_TX_BUSY;
  132. }
  133. slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb),
  134. DMA_TO_DEVICE);
  135. if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
  136. goto err_dma_head;
  137. flags = BGMAC_DESC_CTL0_SOF;
  138. if (!nr_frags)
  139. flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
  140. bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags);
  141. flags = 0;
  142. for (i = 0; i < nr_frags; i++) {
  143. struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
  144. int len = skb_frag_size(frag);
  145. index = (index + 1) % BGMAC_TX_RING_SLOTS;
  146. slot = &ring->slots[index];
  147. slot->dma_addr = skb_frag_dma_map(dma_dev, frag, 0,
  148. len, DMA_TO_DEVICE);
  149. if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
  150. goto err_dma;
  151. if (i == nr_frags - 1)
  152. flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
  153. bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags);
  154. }
  155. slot->skb = skb;
  156. ring->end += nr_frags + 1;
  157. netdev_sent_queue(net_dev, skb->len);
  158. wmb();
  159. /* Increase ring->end to point empty slot. We tell hardware the first
  160. * slot it should *not* read.
  161. */
  162. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
  163. ring->index_base +
  164. (ring->end % BGMAC_TX_RING_SLOTS) *
  165. sizeof(struct bgmac_dma_desc));
  166. if (ring->end - ring->start >= BGMAC_TX_RING_SLOTS - 8)
  167. netif_stop_queue(net_dev);
  168. return NETDEV_TX_OK;
  169. err_dma:
  170. dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
  171. DMA_TO_DEVICE);
  172. while (i-- > 0) {
  173. int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
  174. struct bgmac_slot_info *slot = &ring->slots[index];
  175. u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
  176. int len = ctl1 & BGMAC_DESC_CTL1_LEN;
  177. dma_unmap_page(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE);
  178. }
  179. err_dma_head:
  180. netdev_err(bgmac->net_dev, "Mapping error of skb on ring 0x%X\n",
  181. ring->mmio_base);
  182. err_drop:
  183. dev_kfree_skb(skb);
  184. net_dev->stats.tx_dropped++;
  185. net_dev->stats.tx_errors++;
  186. return NETDEV_TX_OK;
  187. }
  188. /* Free transmitted packets */
  189. static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
  190. {
  191. struct device *dma_dev = bgmac->dma_dev;
  192. int empty_slot;
  193. bool freed = false;
  194. unsigned bytes_compl = 0, pkts_compl = 0;
  195. /* The last slot that hardware didn't consume yet */
  196. empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
  197. empty_slot &= BGMAC_DMA_TX_STATDPTR;
  198. empty_slot -= ring->index_base;
  199. empty_slot &= BGMAC_DMA_TX_STATDPTR;
  200. empty_slot /= sizeof(struct bgmac_dma_desc);
  201. while (ring->start != ring->end) {
  202. int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
  203. struct bgmac_slot_info *slot = &ring->slots[slot_idx];
  204. u32 ctl0, ctl1;
  205. int len;
  206. if (slot_idx == empty_slot)
  207. break;
  208. ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
  209. ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
  210. len = ctl1 & BGMAC_DESC_CTL1_LEN;
  211. if (ctl0 & BGMAC_DESC_CTL0_SOF)
  212. /* Unmap no longer used buffer */
  213. dma_unmap_single(dma_dev, slot->dma_addr, len,
  214. DMA_TO_DEVICE);
  215. else
  216. dma_unmap_page(dma_dev, slot->dma_addr, len,
  217. DMA_TO_DEVICE);
  218. if (slot->skb) {
  219. bgmac->net_dev->stats.tx_bytes += slot->skb->len;
  220. bgmac->net_dev->stats.tx_packets++;
  221. bytes_compl += slot->skb->len;
  222. pkts_compl++;
  223. /* Free memory! :) */
  224. dev_kfree_skb(slot->skb);
  225. slot->skb = NULL;
  226. }
  227. slot->dma_addr = 0;
  228. ring->start++;
  229. freed = true;
  230. }
  231. if (!pkts_compl)
  232. return;
  233. netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
  234. if (netif_queue_stopped(bgmac->net_dev))
  235. netif_wake_queue(bgmac->net_dev);
  236. }
  237. static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
  238. {
  239. if (!ring->mmio_base)
  240. return;
  241. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
  242. if (!bgmac_wait_value(bgmac,
  243. ring->mmio_base + BGMAC_DMA_RX_STATUS,
  244. BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED,
  245. 10000))
  246. dev_err(bgmac->dev, "Reset of ring 0x%X RX failed\n",
  247. ring->mmio_base);
  248. }
  249. static void bgmac_dma_rx_enable(struct bgmac *bgmac,
  250. struct bgmac_dma_ring *ring)
  251. {
  252. u32 ctl;
  253. ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
  254. if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) {
  255. ctl &= ~BGMAC_DMA_RX_BL_MASK;
  256. ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;
  257. ctl &= ~BGMAC_DMA_RX_PC_MASK;
  258. ctl |= BGMAC_DMA_RX_PC_8 << BGMAC_DMA_RX_PC_SHIFT;
  259. ctl &= ~BGMAC_DMA_RX_PT_MASK;
  260. ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT;
  261. }
  262. ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
  263. ctl |= BGMAC_DMA_RX_ENABLE;
  264. ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
  265. ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
  266. ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT;
  267. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl);
  268. }
  269. static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
  270. struct bgmac_slot_info *slot)
  271. {
  272. struct device *dma_dev = bgmac->dma_dev;
  273. dma_addr_t dma_addr;
  274. struct bgmac_rx_header *rx;
  275. void *buf;
  276. /* Alloc skb */
  277. buf = netdev_alloc_frag(BGMAC_RX_ALLOC_SIZE);
  278. if (!buf)
  279. return -ENOMEM;
  280. /* Poison - if everything goes fine, hardware will overwrite it */
  281. rx = buf + BGMAC_RX_BUF_OFFSET;
  282. rx->len = cpu_to_le16(0xdead);
  283. rx->flags = cpu_to_le16(0xbeef);
  284. /* Map skb for the DMA */
  285. dma_addr = dma_map_single(dma_dev, buf + BGMAC_RX_BUF_OFFSET,
  286. BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
  287. if (dma_mapping_error(dma_dev, dma_addr)) {
  288. netdev_err(bgmac->net_dev, "DMA mapping error\n");
  289. put_page(virt_to_head_page(buf));
  290. return -ENOMEM;
  291. }
  292. /* Update the slot */
  293. slot->buf = buf;
  294. slot->dma_addr = dma_addr;
  295. return 0;
  296. }
  297. static void bgmac_dma_rx_update_index(struct bgmac *bgmac,
  298. struct bgmac_dma_ring *ring)
  299. {
  300. dma_wmb();
  301. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
  302. ring->index_base +
  303. ring->end * sizeof(struct bgmac_dma_desc));
  304. }
  305. static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
  306. struct bgmac_dma_ring *ring, int desc_idx)
  307. {
  308. struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
  309. u32 ctl0 = 0, ctl1 = 0;
  310. if (desc_idx == BGMAC_RX_RING_SLOTS - 1)
  311. ctl0 |= BGMAC_DESC_CTL0_EOT;
  312. ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
  313. /* Is there any BGMAC device that requires extension? */
  314. /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
  315. * B43_DMA64_DCTL1_ADDREXT_MASK;
  316. */
  317. dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
  318. dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
  319. dma_desc->ctl0 = cpu_to_le32(ctl0);
  320. dma_desc->ctl1 = cpu_to_le32(ctl1);
  321. ring->end = desc_idx;
  322. }
  323. static void bgmac_dma_rx_poison_buf(struct device *dma_dev,
  324. struct bgmac_slot_info *slot)
  325. {
  326. struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
  327. dma_sync_single_for_cpu(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
  328. DMA_FROM_DEVICE);
  329. rx->len = cpu_to_le16(0xdead);
  330. rx->flags = cpu_to_le16(0xbeef);
  331. dma_sync_single_for_device(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
  332. DMA_FROM_DEVICE);
  333. }
  334. static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
  335. int weight)
  336. {
  337. u32 end_slot;
  338. int handled = 0;
  339. end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
  340. end_slot &= BGMAC_DMA_RX_STATDPTR;
  341. end_slot -= ring->index_base;
  342. end_slot &= BGMAC_DMA_RX_STATDPTR;
  343. end_slot /= sizeof(struct bgmac_dma_desc);
  344. while (ring->start != end_slot) {
  345. struct device *dma_dev = bgmac->dma_dev;
  346. struct bgmac_slot_info *slot = &ring->slots[ring->start];
  347. struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
  348. struct sk_buff *skb;
  349. void *buf = slot->buf;
  350. dma_addr_t dma_addr = slot->dma_addr;
  351. u16 len, flags;
  352. do {
  353. /* Prepare new skb as replacement */
  354. if (bgmac_dma_rx_skb_for_slot(bgmac, slot)) {
  355. bgmac_dma_rx_poison_buf(dma_dev, slot);
  356. break;
  357. }
  358. /* Unmap buffer to make it accessible to the CPU */
  359. dma_unmap_single(dma_dev, dma_addr,
  360. BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
  361. /* Get info from the header */
  362. len = le16_to_cpu(rx->len);
  363. flags = le16_to_cpu(rx->flags);
  364. /* Check for poison and drop or pass the packet */
  365. if (len == 0xdead && flags == 0xbeef) {
  366. netdev_err(bgmac->net_dev, "Found poisoned packet at slot %d, DMA issue!\n",
  367. ring->start);
  368. put_page(virt_to_head_page(buf));
  369. bgmac->net_dev->stats.rx_errors++;
  370. break;
  371. }
  372. if (len > BGMAC_RX_ALLOC_SIZE) {
  373. netdev_err(bgmac->net_dev, "Found oversized packet at slot %d, DMA issue!\n",
  374. ring->start);
  375. put_page(virt_to_head_page(buf));
  376. bgmac->net_dev->stats.rx_length_errors++;
  377. bgmac->net_dev->stats.rx_errors++;
  378. break;
  379. }
  380. /* Omit CRC. */
  381. len -= ETH_FCS_LEN;
  382. skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
  383. if (unlikely(!skb)) {
  384. netdev_err(bgmac->net_dev, "build_skb failed\n");
  385. put_page(virt_to_head_page(buf));
  386. bgmac->net_dev->stats.rx_errors++;
  387. break;
  388. }
  389. skb_put(skb, BGMAC_RX_FRAME_OFFSET +
  390. BGMAC_RX_BUF_OFFSET + len);
  391. skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
  392. BGMAC_RX_BUF_OFFSET);
  393. skb_checksum_none_assert(skb);
  394. skb->protocol = eth_type_trans(skb, bgmac->net_dev);
  395. bgmac->net_dev->stats.rx_bytes += len;
  396. bgmac->net_dev->stats.rx_packets++;
  397. napi_gro_receive(&bgmac->napi, skb);
  398. handled++;
  399. } while (0);
  400. bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
  401. if (++ring->start >= BGMAC_RX_RING_SLOTS)
  402. ring->start = 0;
  403. if (handled >= weight) /* Should never be greater */
  404. break;
  405. }
  406. bgmac_dma_rx_update_index(bgmac, ring);
  407. return handled;
  408. }
  409. /* Does ring support unaligned addressing? */
  410. static bool bgmac_dma_unaligned(struct bgmac *bgmac,
  411. struct bgmac_dma_ring *ring,
  412. enum bgmac_dma_ring_type ring_type)
  413. {
  414. switch (ring_type) {
  415. case BGMAC_DMA_RING_TX:
  416. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
  417. 0xff0);
  418. if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO))
  419. return true;
  420. break;
  421. case BGMAC_DMA_RING_RX:
  422. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
  423. 0xff0);
  424. if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO))
  425. return true;
  426. break;
  427. }
  428. return false;
  429. }
  430. static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
  431. struct bgmac_dma_ring *ring)
  432. {
  433. struct device *dma_dev = bgmac->dma_dev;
  434. struct bgmac_dma_desc *dma_desc = ring->cpu_base;
  435. struct bgmac_slot_info *slot;
  436. int i;
  437. for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) {
  438. int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN;
  439. slot = &ring->slots[i];
  440. dev_kfree_skb(slot->skb);
  441. if (!slot->dma_addr)
  442. continue;
  443. if (slot->skb)
  444. dma_unmap_single(dma_dev, slot->dma_addr,
  445. len, DMA_TO_DEVICE);
  446. else
  447. dma_unmap_page(dma_dev, slot->dma_addr,
  448. len, DMA_TO_DEVICE);
  449. }
  450. }
  451. static void bgmac_dma_rx_ring_free(struct bgmac *bgmac,
  452. struct bgmac_dma_ring *ring)
  453. {
  454. struct device *dma_dev = bgmac->dma_dev;
  455. struct bgmac_slot_info *slot;
  456. int i;
  457. for (i = 0; i < BGMAC_RX_RING_SLOTS; i++) {
  458. slot = &ring->slots[i];
  459. if (!slot->dma_addr)
  460. continue;
  461. dma_unmap_single(dma_dev, slot->dma_addr,
  462. BGMAC_RX_BUF_SIZE,
  463. DMA_FROM_DEVICE);
  464. put_page(virt_to_head_page(slot->buf));
  465. slot->dma_addr = 0;
  466. }
  467. }
  468. static void bgmac_dma_ring_desc_free(struct bgmac *bgmac,
  469. struct bgmac_dma_ring *ring,
  470. int num_slots)
  471. {
  472. struct device *dma_dev = bgmac->dma_dev;
  473. int size;
  474. if (!ring->cpu_base)
  475. return;
  476. /* Free ring of descriptors */
  477. size = num_slots * sizeof(struct bgmac_dma_desc);
  478. dma_free_coherent(dma_dev, size, ring->cpu_base,
  479. ring->dma_base);
  480. }
  481. static void bgmac_dma_cleanup(struct bgmac *bgmac)
  482. {
  483. int i;
  484. for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
  485. bgmac_dma_tx_ring_free(bgmac, &bgmac->tx_ring[i]);
  486. for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
  487. bgmac_dma_rx_ring_free(bgmac, &bgmac->rx_ring[i]);
  488. }
  489. static void bgmac_dma_free(struct bgmac *bgmac)
  490. {
  491. int i;
  492. for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
  493. bgmac_dma_ring_desc_free(bgmac, &bgmac->tx_ring[i],
  494. BGMAC_TX_RING_SLOTS);
  495. for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
  496. bgmac_dma_ring_desc_free(bgmac, &bgmac->rx_ring[i],
  497. BGMAC_RX_RING_SLOTS);
  498. }
  499. static int bgmac_dma_alloc(struct bgmac *bgmac)
  500. {
  501. struct device *dma_dev = bgmac->dma_dev;
  502. struct bgmac_dma_ring *ring;
  503. static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
  504. BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
  505. int size; /* ring size: different for Tx and Rx */
  506. int err;
  507. int i;
  508. BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
  509. BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
  510. if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) {
  511. dev_err(bgmac->dev, "Core does not report 64-bit DMA\n");
  512. return -ENOTSUPP;
  513. }
  514. for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
  515. ring = &bgmac->tx_ring[i];
  516. ring->mmio_base = ring_base[i];
  517. /* Alloc ring of descriptors */
  518. size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
  519. ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
  520. &ring->dma_base,
  521. GFP_KERNEL);
  522. if (!ring->cpu_base) {
  523. dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n",
  524. ring->mmio_base);
  525. goto err_dma_free;
  526. }
  527. ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
  528. BGMAC_DMA_RING_TX);
  529. if (ring->unaligned)
  530. ring->index_base = lower_32_bits(ring->dma_base);
  531. else
  532. ring->index_base = 0;
  533. /* No need to alloc TX slots yet */
  534. }
  535. for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
  536. ring = &bgmac->rx_ring[i];
  537. ring->mmio_base = ring_base[i];
  538. /* Alloc ring of descriptors */
  539. size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
  540. ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
  541. &ring->dma_base,
  542. GFP_KERNEL);
  543. if (!ring->cpu_base) {
  544. dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n",
  545. ring->mmio_base);
  546. err = -ENOMEM;
  547. goto err_dma_free;
  548. }
  549. ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
  550. BGMAC_DMA_RING_RX);
  551. if (ring->unaligned)
  552. ring->index_base = lower_32_bits(ring->dma_base);
  553. else
  554. ring->index_base = 0;
  555. }
  556. return 0;
  557. err_dma_free:
  558. bgmac_dma_free(bgmac);
  559. return -ENOMEM;
  560. }
  561. static int bgmac_dma_init(struct bgmac *bgmac)
  562. {
  563. struct bgmac_dma_ring *ring;
  564. int i, err;
  565. for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
  566. ring = &bgmac->tx_ring[i];
  567. if (!ring->unaligned)
  568. bgmac_dma_tx_enable(bgmac, ring);
  569. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
  570. lower_32_bits(ring->dma_base));
  571. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
  572. upper_32_bits(ring->dma_base));
  573. if (ring->unaligned)
  574. bgmac_dma_tx_enable(bgmac, ring);
  575. ring->start = 0;
  576. ring->end = 0; /* Points the slot that should *not* be read */
  577. }
  578. for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
  579. int j;
  580. ring = &bgmac->rx_ring[i];
  581. if (!ring->unaligned)
  582. bgmac_dma_rx_enable(bgmac, ring);
  583. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
  584. lower_32_bits(ring->dma_base));
  585. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
  586. upper_32_bits(ring->dma_base));
  587. if (ring->unaligned)
  588. bgmac_dma_rx_enable(bgmac, ring);
  589. ring->start = 0;
  590. ring->end = 0;
  591. for (j = 0; j < BGMAC_RX_RING_SLOTS; j++) {
  592. err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
  593. if (err)
  594. goto error;
  595. bgmac_dma_rx_setup_desc(bgmac, ring, j);
  596. }
  597. bgmac_dma_rx_update_index(bgmac, ring);
  598. }
  599. return 0;
  600. error:
  601. bgmac_dma_cleanup(bgmac);
  602. return err;
  603. }
  604. /**************************************************
  605. * Chip ops
  606. **************************************************/
  607. /* TODO: can we just drop @force? Can we don't reset MAC at all if there is
  608. * nothing to change? Try if after stabilizng driver.
  609. */
  610. static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
  611. bool force)
  612. {
  613. u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
  614. u32 new_val = (cmdcfg & mask) | set;
  615. u32 cmdcfg_sr;
  616. if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
  617. cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
  618. else
  619. cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
  620. bgmac_set(bgmac, BGMAC_CMDCFG, cmdcfg_sr);
  621. udelay(2);
  622. if (new_val != cmdcfg || force)
  623. bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
  624. bgmac_mask(bgmac, BGMAC_CMDCFG, ~cmdcfg_sr);
  625. udelay(2);
  626. }
  627. static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
  628. {
  629. u32 tmp;
  630. tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
  631. bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
  632. tmp = (addr[4] << 8) | addr[5];
  633. bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
  634. }
  635. static void bgmac_set_rx_mode(struct net_device *net_dev)
  636. {
  637. struct bgmac *bgmac = netdev_priv(net_dev);
  638. if (net_dev->flags & IFF_PROMISC)
  639. bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
  640. else
  641. bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
  642. }
  643. #if 0 /* We don't use that regs yet */
  644. static void bgmac_chip_stats_update(struct bgmac *bgmac)
  645. {
  646. int i;
  647. if (!(bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB)) {
  648. for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
  649. bgmac->mib_tx_regs[i] =
  650. bgmac_read(bgmac,
  651. BGMAC_TX_GOOD_OCTETS + (i * 4));
  652. for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
  653. bgmac->mib_rx_regs[i] =
  654. bgmac_read(bgmac,
  655. BGMAC_RX_GOOD_OCTETS + (i * 4));
  656. }
  657. /* TODO: what else? how to handle BCM4706? Specs are needed */
  658. }
  659. #endif
  660. static void bgmac_clear_mib(struct bgmac *bgmac)
  661. {
  662. int i;
  663. if (bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB)
  664. return;
  665. bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR);
  666. for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
  667. bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4));
  668. for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
  669. bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4));
  670. }
  671. /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
  672. static void bgmac_mac_speed(struct bgmac *bgmac)
  673. {
  674. u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
  675. u32 set = 0;
  676. switch (bgmac->mac_speed) {
  677. case SPEED_10:
  678. set |= BGMAC_CMDCFG_ES_10;
  679. break;
  680. case SPEED_100:
  681. set |= BGMAC_CMDCFG_ES_100;
  682. break;
  683. case SPEED_1000:
  684. set |= BGMAC_CMDCFG_ES_1000;
  685. break;
  686. case SPEED_2500:
  687. set |= BGMAC_CMDCFG_ES_2500;
  688. break;
  689. default:
  690. dev_err(bgmac->dev, "Unsupported speed: %d\n",
  691. bgmac->mac_speed);
  692. }
  693. if (bgmac->mac_duplex == DUPLEX_HALF)
  694. set |= BGMAC_CMDCFG_HD;
  695. bgmac_cmdcfg_maskset(bgmac, mask, set, true);
  696. }
  697. static void bgmac_miiconfig(struct bgmac *bgmac)
  698. {
  699. if (bgmac->feature_flags & BGMAC_FEAT_FORCE_SPEED_2500) {
  700. bgmac_idm_write(bgmac, BCMA_IOCTL,
  701. bgmac_idm_read(bgmac, BCMA_IOCTL) | 0x40 |
  702. BGMAC_BCMA_IOCTL_SW_CLKEN);
  703. bgmac->mac_speed = SPEED_2500;
  704. bgmac->mac_duplex = DUPLEX_FULL;
  705. bgmac_mac_speed(bgmac);
  706. } else {
  707. u8 imode;
  708. imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) &
  709. BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT;
  710. if (imode == 0 || imode == 1) {
  711. bgmac->mac_speed = SPEED_100;
  712. bgmac->mac_duplex = DUPLEX_FULL;
  713. bgmac_mac_speed(bgmac);
  714. }
  715. }
  716. }
  717. /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
  718. static void bgmac_chip_reset(struct bgmac *bgmac)
  719. {
  720. u32 cmdcfg_sr;
  721. u32 iost;
  722. int i;
  723. if (bgmac_clk_enabled(bgmac)) {
  724. if (!bgmac->stats_grabbed) {
  725. /* bgmac_chip_stats_update(bgmac); */
  726. bgmac->stats_grabbed = true;
  727. }
  728. for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
  729. bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
  730. bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
  731. udelay(1);
  732. for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
  733. bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]);
  734. /* TODO: Clear software multicast filter list */
  735. }
  736. iost = bgmac_idm_read(bgmac, BCMA_IOST);
  737. if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED)
  738. iost &= ~BGMAC_BCMA_IOST_ATTACHED;
  739. /* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */
  740. if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) {
  741. u32 flags = 0;
  742. if (iost & BGMAC_BCMA_IOST_ATTACHED) {
  743. flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
  744. if (!bgmac->has_robosw)
  745. flags |= BGMAC_BCMA_IOCTL_SW_RESET;
  746. }
  747. bgmac_clk_enable(bgmac, flags);
  748. }
  749. /* Request Misc PLL for corerev > 2 */
  750. if (bgmac->feature_flags & BGMAC_FEAT_MISC_PLL_REQ) {
  751. bgmac_set(bgmac, BCMA_CLKCTLST,
  752. BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ);
  753. bgmac_wait_value(bgmac, BCMA_CLKCTLST,
  754. BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
  755. BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
  756. 1000);
  757. }
  758. if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_PHY) {
  759. u8 et_swtype = 0;
  760. u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
  761. BGMAC_CHIPCTL_1_IF_TYPE_MII;
  762. char buf[4];
  763. if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
  764. if (kstrtou8(buf, 0, &et_swtype))
  765. dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n",
  766. buf);
  767. et_swtype &= 0x0f;
  768. et_swtype <<= 4;
  769. sw_type = et_swtype;
  770. } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_EPHYRMII) {
  771. sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RMII |
  772. BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
  773. } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_RGMII) {
  774. sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
  775. BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
  776. }
  777. bgmac_cco_ctl_maskset(bgmac, 1, ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
  778. BGMAC_CHIPCTL_1_SW_TYPE_MASK),
  779. sw_type);
  780. } else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE) {
  781. u32 sw_type = BGMAC_CHIPCTL_4_IF_TYPE_MII |
  782. BGMAC_CHIPCTL_4_SW_TYPE_EPHY;
  783. u8 et_swtype = 0;
  784. char buf[4];
  785. if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
  786. if (kstrtou8(buf, 0, &et_swtype))
  787. dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n",
  788. buf);
  789. sw_type = (et_swtype & 0x0f) << 12;
  790. } else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII) {
  791. sw_type = BGMAC_CHIPCTL_4_IF_TYPE_RGMII |
  792. BGMAC_CHIPCTL_4_SW_TYPE_RGMII;
  793. }
  794. bgmac_cco_ctl_maskset(bgmac, 4, ~(BGMAC_CHIPCTL_4_IF_TYPE_MASK |
  795. BGMAC_CHIPCTL_4_SW_TYPE_MASK),
  796. sw_type);
  797. } else if (bgmac->feature_flags & BGMAC_FEAT_CC7_IF_TYPE_RGMII) {
  798. bgmac_cco_ctl_maskset(bgmac, 7, ~BGMAC_CHIPCTL_7_IF_TYPE_MASK,
  799. BGMAC_CHIPCTL_7_IF_TYPE_RGMII);
  800. }
  801. if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
  802. bgmac_idm_write(bgmac, BCMA_IOCTL,
  803. bgmac_idm_read(bgmac, BCMA_IOCTL) &
  804. ~BGMAC_BCMA_IOCTL_SW_RESET);
  805. /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
  806. * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
  807. * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
  808. * be keps until taking MAC out of the reset.
  809. */
  810. if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
  811. cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
  812. else
  813. cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
  814. bgmac_cmdcfg_maskset(bgmac,
  815. ~(BGMAC_CMDCFG_TE |
  816. BGMAC_CMDCFG_RE |
  817. BGMAC_CMDCFG_RPI |
  818. BGMAC_CMDCFG_TAI |
  819. BGMAC_CMDCFG_HD |
  820. BGMAC_CMDCFG_ML |
  821. BGMAC_CMDCFG_CFE |
  822. BGMAC_CMDCFG_RL |
  823. BGMAC_CMDCFG_RED |
  824. BGMAC_CMDCFG_PE |
  825. BGMAC_CMDCFG_TPI |
  826. BGMAC_CMDCFG_PAD_EN |
  827. BGMAC_CMDCFG_PF),
  828. BGMAC_CMDCFG_PROM |
  829. BGMAC_CMDCFG_NLC |
  830. BGMAC_CMDCFG_CFE |
  831. cmdcfg_sr,
  832. false);
  833. bgmac->mac_speed = SPEED_UNKNOWN;
  834. bgmac->mac_duplex = DUPLEX_UNKNOWN;
  835. bgmac_clear_mib(bgmac);
  836. if (bgmac->feature_flags & BGMAC_FEAT_CMN_PHY_CTL)
  837. bgmac_cmn_maskset32(bgmac, BCMA_GMAC_CMN_PHY_CTL, ~0,
  838. BCMA_GMAC_CMN_PC_MTE);
  839. else
  840. bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
  841. bgmac_miiconfig(bgmac);
  842. if (bgmac->mii_bus)
  843. bgmac->mii_bus->reset(bgmac->mii_bus);
  844. netdev_reset_queue(bgmac->net_dev);
  845. }
  846. static void bgmac_chip_intrs_on(struct bgmac *bgmac)
  847. {
  848. bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask);
  849. }
  850. static void bgmac_chip_intrs_off(struct bgmac *bgmac)
  851. {
  852. bgmac_write(bgmac, BGMAC_INT_MASK, 0);
  853. bgmac_read(bgmac, BGMAC_INT_MASK);
  854. }
  855. /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
  856. static void bgmac_enable(struct bgmac *bgmac)
  857. {
  858. u32 cmdcfg_sr;
  859. u32 cmdcfg;
  860. u32 mode;
  861. if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
  862. cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
  863. else
  864. cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
  865. cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
  866. bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
  867. cmdcfg_sr, true);
  868. udelay(2);
  869. cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
  870. bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
  871. mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
  872. BGMAC_DS_MM_SHIFT;
  873. if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0)
  874. bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
  875. if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST && mode == 2)
  876. bgmac_cco_ctl_maskset(bgmac, 1, ~0,
  877. BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
  878. if (bgmac->feature_flags & (BGMAC_FEAT_FLW_CTRL1 |
  879. BGMAC_FEAT_FLW_CTRL2)) {
  880. u32 fl_ctl;
  881. if (bgmac->feature_flags & BGMAC_FEAT_FLW_CTRL1)
  882. fl_ctl = 0x2300e1;
  883. else
  884. fl_ctl = 0x03cb04cb;
  885. bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
  886. bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
  887. }
  888. if (bgmac->feature_flags & BGMAC_FEAT_SET_RXQ_CLK) {
  889. u32 rxq_ctl;
  890. u16 bp_clk;
  891. u8 mdp;
  892. rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
  893. rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
  894. bp_clk = bgmac_get_bus_clock(bgmac) / 1000000;
  895. mdp = (bp_clk * 128 / 1000) - 3;
  896. rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
  897. bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
  898. }
  899. }
  900. /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
  901. static void bgmac_chip_init(struct bgmac *bgmac)
  902. {
  903. /* 1 interrupt per received frame */
  904. bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
  905. /* Enable 802.3x tx flow control (honor received PAUSE frames) */
  906. bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
  907. bgmac_set_rx_mode(bgmac->net_dev);
  908. bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
  909. if (bgmac->loopback)
  910. bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
  911. else
  912. bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
  913. bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
  914. bgmac_chip_intrs_on(bgmac);
  915. bgmac_enable(bgmac);
  916. }
  917. static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
  918. {
  919. struct bgmac *bgmac = netdev_priv(dev_id);
  920. u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS);
  921. int_status &= bgmac->int_mask;
  922. if (!int_status)
  923. return IRQ_NONE;
  924. int_status &= ~(BGMAC_IS_TX0 | BGMAC_IS_RX);
  925. if (int_status)
  926. dev_err(bgmac->dev, "Unknown IRQs: 0x%08X\n", int_status);
  927. /* Disable new interrupts until handling existing ones */
  928. bgmac_chip_intrs_off(bgmac);
  929. napi_schedule(&bgmac->napi);
  930. return IRQ_HANDLED;
  931. }
  932. static int bgmac_poll(struct napi_struct *napi, int weight)
  933. {
  934. struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
  935. int handled = 0;
  936. /* Ack */
  937. bgmac_write(bgmac, BGMAC_INT_STATUS, ~0);
  938. bgmac_dma_tx_free(bgmac, &bgmac->tx_ring[0]);
  939. handled += bgmac_dma_rx_read(bgmac, &bgmac->rx_ring[0], weight);
  940. /* Poll again if more events arrived in the meantime */
  941. if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX))
  942. return weight;
  943. if (handled < weight) {
  944. napi_complete(napi);
  945. bgmac_chip_intrs_on(bgmac);
  946. }
  947. return handled;
  948. }
  949. /**************************************************
  950. * net_device_ops
  951. **************************************************/
  952. static int bgmac_open(struct net_device *net_dev)
  953. {
  954. struct bgmac *bgmac = netdev_priv(net_dev);
  955. int err = 0;
  956. bgmac_chip_reset(bgmac);
  957. err = bgmac_dma_init(bgmac);
  958. if (err)
  959. return err;
  960. /* Specs say about reclaiming rings here, but we do that in DMA init */
  961. bgmac_chip_init(bgmac);
  962. err = request_irq(bgmac->irq, bgmac_interrupt, IRQF_SHARED,
  963. KBUILD_MODNAME, net_dev);
  964. if (err < 0) {
  965. dev_err(bgmac->dev, "IRQ request error: %d!\n", err);
  966. bgmac_dma_cleanup(bgmac);
  967. return err;
  968. }
  969. napi_enable(&bgmac->napi);
  970. phy_start(net_dev->phydev);
  971. netif_start_queue(net_dev);
  972. return 0;
  973. }
  974. static int bgmac_stop(struct net_device *net_dev)
  975. {
  976. struct bgmac *bgmac = netdev_priv(net_dev);
  977. netif_carrier_off(net_dev);
  978. phy_stop(net_dev->phydev);
  979. napi_disable(&bgmac->napi);
  980. bgmac_chip_intrs_off(bgmac);
  981. free_irq(bgmac->irq, net_dev);
  982. bgmac_chip_reset(bgmac);
  983. bgmac_dma_cleanup(bgmac);
  984. return 0;
  985. }
  986. static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
  987. struct net_device *net_dev)
  988. {
  989. struct bgmac *bgmac = netdev_priv(net_dev);
  990. struct bgmac_dma_ring *ring;
  991. /* No QOS support yet */
  992. ring = &bgmac->tx_ring[0];
  993. return bgmac_dma_tx_add(bgmac, ring, skb);
  994. }
  995. static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
  996. {
  997. struct bgmac *bgmac = netdev_priv(net_dev);
  998. int ret;
  999. ret = eth_prepare_mac_addr_change(net_dev, addr);
  1000. if (ret < 0)
  1001. return ret;
  1002. bgmac_write_mac_address(bgmac, (u8 *)addr);
  1003. eth_commit_mac_addr_change(net_dev, addr);
  1004. return 0;
  1005. }
  1006. static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
  1007. {
  1008. if (!netif_running(net_dev))
  1009. return -EINVAL;
  1010. return phy_mii_ioctl(net_dev->phydev, ifr, cmd);
  1011. }
  1012. static const struct net_device_ops bgmac_netdev_ops = {
  1013. .ndo_open = bgmac_open,
  1014. .ndo_stop = bgmac_stop,
  1015. .ndo_start_xmit = bgmac_start_xmit,
  1016. .ndo_set_rx_mode = bgmac_set_rx_mode,
  1017. .ndo_set_mac_address = bgmac_set_mac_address,
  1018. .ndo_validate_addr = eth_validate_addr,
  1019. .ndo_do_ioctl = bgmac_ioctl,
  1020. };
  1021. /**************************************************
  1022. * ethtool_ops
  1023. **************************************************/
  1024. struct bgmac_stat {
  1025. u8 size;
  1026. u32 offset;
  1027. const char *name;
  1028. };
  1029. static struct bgmac_stat bgmac_get_strings_stats[] = {
  1030. { 8, BGMAC_TX_GOOD_OCTETS, "tx_good_octets" },
  1031. { 4, BGMAC_TX_GOOD_PKTS, "tx_good" },
  1032. { 8, BGMAC_TX_OCTETS, "tx_octets" },
  1033. { 4, BGMAC_TX_PKTS, "tx_pkts" },
  1034. { 4, BGMAC_TX_BROADCAST_PKTS, "tx_broadcast" },
  1035. { 4, BGMAC_TX_MULTICAST_PKTS, "tx_multicast" },
  1036. { 4, BGMAC_TX_LEN_64, "tx_64" },
  1037. { 4, BGMAC_TX_LEN_65_TO_127, "tx_65_127" },
  1038. { 4, BGMAC_TX_LEN_128_TO_255, "tx_128_255" },
  1039. { 4, BGMAC_TX_LEN_256_TO_511, "tx_256_511" },
  1040. { 4, BGMAC_TX_LEN_512_TO_1023, "tx_512_1023" },
  1041. { 4, BGMAC_TX_LEN_1024_TO_1522, "tx_1024_1522" },
  1042. { 4, BGMAC_TX_LEN_1523_TO_2047, "tx_1523_2047" },
  1043. { 4, BGMAC_TX_LEN_2048_TO_4095, "tx_2048_4095" },
  1044. { 4, BGMAC_TX_LEN_4096_TO_8191, "tx_4096_8191" },
  1045. { 4, BGMAC_TX_LEN_8192_TO_MAX, "tx_8192_max" },
  1046. { 4, BGMAC_TX_JABBER_PKTS, "tx_jabber" },
  1047. { 4, BGMAC_TX_OVERSIZE_PKTS, "tx_oversize" },
  1048. { 4, BGMAC_TX_FRAGMENT_PKTS, "tx_fragment" },
  1049. { 4, BGMAC_TX_UNDERRUNS, "tx_underruns" },
  1050. { 4, BGMAC_TX_TOTAL_COLS, "tx_total_cols" },
  1051. { 4, BGMAC_TX_SINGLE_COLS, "tx_single_cols" },
  1052. { 4, BGMAC_TX_MULTIPLE_COLS, "tx_multiple_cols" },
  1053. { 4, BGMAC_TX_EXCESSIVE_COLS, "tx_excessive_cols" },
  1054. { 4, BGMAC_TX_LATE_COLS, "tx_late_cols" },
  1055. { 4, BGMAC_TX_DEFERED, "tx_defered" },
  1056. { 4, BGMAC_TX_CARRIER_LOST, "tx_carrier_lost" },
  1057. { 4, BGMAC_TX_PAUSE_PKTS, "tx_pause" },
  1058. { 4, BGMAC_TX_UNI_PKTS, "tx_unicast" },
  1059. { 4, BGMAC_TX_Q0_PKTS, "tx_q0" },
  1060. { 8, BGMAC_TX_Q0_OCTETS, "tx_q0_octets" },
  1061. { 4, BGMAC_TX_Q1_PKTS, "tx_q1" },
  1062. { 8, BGMAC_TX_Q1_OCTETS, "tx_q1_octets" },
  1063. { 4, BGMAC_TX_Q2_PKTS, "tx_q2" },
  1064. { 8, BGMAC_TX_Q2_OCTETS, "tx_q2_octets" },
  1065. { 4, BGMAC_TX_Q3_PKTS, "tx_q3" },
  1066. { 8, BGMAC_TX_Q3_OCTETS, "tx_q3_octets" },
  1067. { 8, BGMAC_RX_GOOD_OCTETS, "rx_good_octets" },
  1068. { 4, BGMAC_RX_GOOD_PKTS, "rx_good" },
  1069. { 8, BGMAC_RX_OCTETS, "rx_octets" },
  1070. { 4, BGMAC_RX_PKTS, "rx_pkts" },
  1071. { 4, BGMAC_RX_BROADCAST_PKTS, "rx_broadcast" },
  1072. { 4, BGMAC_RX_MULTICAST_PKTS, "rx_multicast" },
  1073. { 4, BGMAC_RX_LEN_64, "rx_64" },
  1074. { 4, BGMAC_RX_LEN_65_TO_127, "rx_65_127" },
  1075. { 4, BGMAC_RX_LEN_128_TO_255, "rx_128_255" },
  1076. { 4, BGMAC_RX_LEN_256_TO_511, "rx_256_511" },
  1077. { 4, BGMAC_RX_LEN_512_TO_1023, "rx_512_1023" },
  1078. { 4, BGMAC_RX_LEN_1024_TO_1522, "rx_1024_1522" },
  1079. { 4, BGMAC_RX_LEN_1523_TO_2047, "rx_1523_2047" },
  1080. { 4, BGMAC_RX_LEN_2048_TO_4095, "rx_2048_4095" },
  1081. { 4, BGMAC_RX_LEN_4096_TO_8191, "rx_4096_8191" },
  1082. { 4, BGMAC_RX_LEN_8192_TO_MAX, "rx_8192_max" },
  1083. { 4, BGMAC_RX_JABBER_PKTS, "rx_jabber" },
  1084. { 4, BGMAC_RX_OVERSIZE_PKTS, "rx_oversize" },
  1085. { 4, BGMAC_RX_FRAGMENT_PKTS, "rx_fragment" },
  1086. { 4, BGMAC_RX_MISSED_PKTS, "rx_missed" },
  1087. { 4, BGMAC_RX_CRC_ALIGN_ERRS, "rx_crc_align" },
  1088. { 4, BGMAC_RX_UNDERSIZE, "rx_undersize" },
  1089. { 4, BGMAC_RX_CRC_ERRS, "rx_crc" },
  1090. { 4, BGMAC_RX_ALIGN_ERRS, "rx_align" },
  1091. { 4, BGMAC_RX_SYMBOL_ERRS, "rx_symbol" },
  1092. { 4, BGMAC_RX_PAUSE_PKTS, "rx_pause" },
  1093. { 4, BGMAC_RX_NONPAUSE_PKTS, "rx_nonpause" },
  1094. { 4, BGMAC_RX_SACHANGES, "rx_sa_changes" },
  1095. { 4, BGMAC_RX_UNI_PKTS, "rx_unicast" },
  1096. };
  1097. #define BGMAC_STATS_LEN ARRAY_SIZE(bgmac_get_strings_stats)
  1098. static int bgmac_get_sset_count(struct net_device *dev, int string_set)
  1099. {
  1100. switch (string_set) {
  1101. case ETH_SS_STATS:
  1102. return BGMAC_STATS_LEN;
  1103. }
  1104. return -EOPNOTSUPP;
  1105. }
  1106. static void bgmac_get_strings(struct net_device *dev, u32 stringset,
  1107. u8 *data)
  1108. {
  1109. int i;
  1110. if (stringset != ETH_SS_STATS)
  1111. return;
  1112. for (i = 0; i < BGMAC_STATS_LEN; i++)
  1113. strlcpy(data + i * ETH_GSTRING_LEN,
  1114. bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN);
  1115. }
  1116. static void bgmac_get_ethtool_stats(struct net_device *dev,
  1117. struct ethtool_stats *ss, uint64_t *data)
  1118. {
  1119. struct bgmac *bgmac = netdev_priv(dev);
  1120. const struct bgmac_stat *s;
  1121. unsigned int i;
  1122. u64 val;
  1123. if (!netif_running(dev))
  1124. return;
  1125. for (i = 0; i < BGMAC_STATS_LEN; i++) {
  1126. s = &bgmac_get_strings_stats[i];
  1127. val = 0;
  1128. if (s->size == 8)
  1129. val = (u64)bgmac_read(bgmac, s->offset + 4) << 32;
  1130. val |= bgmac_read(bgmac, s->offset);
  1131. data[i] = val;
  1132. }
  1133. }
  1134. static void bgmac_get_drvinfo(struct net_device *net_dev,
  1135. struct ethtool_drvinfo *info)
  1136. {
  1137. strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
  1138. strlcpy(info->bus_info, "AXI", sizeof(info->bus_info));
  1139. }
  1140. static const struct ethtool_ops bgmac_ethtool_ops = {
  1141. .get_strings = bgmac_get_strings,
  1142. .get_sset_count = bgmac_get_sset_count,
  1143. .get_ethtool_stats = bgmac_get_ethtool_stats,
  1144. .get_drvinfo = bgmac_get_drvinfo,
  1145. .get_link_ksettings = phy_ethtool_get_link_ksettings,
  1146. .set_link_ksettings = phy_ethtool_set_link_ksettings,
  1147. };
  1148. /**************************************************
  1149. * MII
  1150. **************************************************/
  1151. static void bgmac_adjust_link(struct net_device *net_dev)
  1152. {
  1153. struct bgmac *bgmac = netdev_priv(net_dev);
  1154. struct phy_device *phy_dev = net_dev->phydev;
  1155. bool update = false;
  1156. if (phy_dev->link) {
  1157. if (phy_dev->speed != bgmac->mac_speed) {
  1158. bgmac->mac_speed = phy_dev->speed;
  1159. update = true;
  1160. }
  1161. if (phy_dev->duplex != bgmac->mac_duplex) {
  1162. bgmac->mac_duplex = phy_dev->duplex;
  1163. update = true;
  1164. }
  1165. }
  1166. if (update) {
  1167. bgmac_mac_speed(bgmac);
  1168. phy_print_status(phy_dev);
  1169. }
  1170. }
  1171. static int bgmac_phy_connect_direct(struct bgmac *bgmac)
  1172. {
  1173. struct fixed_phy_status fphy_status = {
  1174. .link = 1,
  1175. .speed = SPEED_1000,
  1176. .duplex = DUPLEX_FULL,
  1177. };
  1178. struct phy_device *phy_dev;
  1179. int err;
  1180. phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
  1181. if (!phy_dev || IS_ERR(phy_dev)) {
  1182. dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
  1183. return -ENODEV;
  1184. }
  1185. err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link,
  1186. PHY_INTERFACE_MODE_MII);
  1187. if (err) {
  1188. dev_err(bgmac->dev, "Connecting PHY failed\n");
  1189. return err;
  1190. }
  1191. return err;
  1192. }
  1193. static int bgmac_phy_connect(struct bgmac *bgmac)
  1194. {
  1195. struct phy_device *phy_dev;
  1196. char bus_id[MII_BUS_ID_SIZE + 3];
  1197. /* Connect to the PHY */
  1198. snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id,
  1199. bgmac->phyaddr);
  1200. phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
  1201. PHY_INTERFACE_MODE_MII);
  1202. if (IS_ERR(phy_dev)) {
  1203. dev_err(bgmac->dev, "PHY connecton failed\n");
  1204. return PTR_ERR(phy_dev);
  1205. }
  1206. return 0;
  1207. }
  1208. int bgmac_enet_probe(struct bgmac *info)
  1209. {
  1210. struct net_device *net_dev;
  1211. struct bgmac *bgmac;
  1212. int err;
  1213. /* Allocation and references */
  1214. net_dev = alloc_etherdev(sizeof(*bgmac));
  1215. if (!net_dev)
  1216. return -ENOMEM;
  1217. net_dev->netdev_ops = &bgmac_netdev_ops;
  1218. net_dev->ethtool_ops = &bgmac_ethtool_ops;
  1219. bgmac = netdev_priv(net_dev);
  1220. memcpy(bgmac, info, sizeof(*bgmac));
  1221. bgmac->net_dev = net_dev;
  1222. net_dev->irq = bgmac->irq;
  1223. SET_NETDEV_DEV(net_dev, bgmac->dev);
  1224. if (!is_valid_ether_addr(bgmac->mac_addr)) {
  1225. dev_err(bgmac->dev, "Invalid MAC addr: %pM\n",
  1226. bgmac->mac_addr);
  1227. eth_random_addr(bgmac->mac_addr);
  1228. dev_warn(bgmac->dev, "Using random MAC: %pM\n",
  1229. bgmac->mac_addr);
  1230. }
  1231. ether_addr_copy(net_dev->dev_addr, bgmac->mac_addr);
  1232. /* This (reset &) enable is not preset in specs or reference driver but
  1233. * Broadcom does it in arch PCI code when enabling fake PCI device.
  1234. */
  1235. bgmac_clk_enable(bgmac, 0);
  1236. /* This seems to be fixing IRQ by assigning OOB #6 to the core */
  1237. if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6)
  1238. bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86);
  1239. bgmac_chip_reset(bgmac);
  1240. err = bgmac_dma_alloc(bgmac);
  1241. if (err) {
  1242. dev_err(bgmac->dev, "Unable to alloc memory for DMA\n");
  1243. goto err_netdev_free;
  1244. }
  1245. bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
  1246. if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
  1247. bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
  1248. netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
  1249. if (!bgmac->mii_bus)
  1250. err = bgmac_phy_connect_direct(bgmac);
  1251. else
  1252. err = bgmac_phy_connect(bgmac);
  1253. if (err) {
  1254. dev_err(bgmac->dev, "Cannot connect to phy\n");
  1255. goto err_dma_free;
  1256. }
  1257. net_dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
  1258. net_dev->hw_features = net_dev->features;
  1259. net_dev->vlan_features = net_dev->features;
  1260. err = register_netdev(bgmac->net_dev);
  1261. if (err) {
  1262. dev_err(bgmac->dev, "Cannot register net device\n");
  1263. goto err_phy_disconnect;
  1264. }
  1265. netif_carrier_off(net_dev);
  1266. return 0;
  1267. err_phy_disconnect:
  1268. phy_disconnect(net_dev->phydev);
  1269. err_dma_free:
  1270. bgmac_dma_free(bgmac);
  1271. err_netdev_free:
  1272. free_netdev(net_dev);
  1273. return err;
  1274. }
  1275. EXPORT_SYMBOL_GPL(bgmac_enet_probe);
  1276. void bgmac_enet_remove(struct bgmac *bgmac)
  1277. {
  1278. unregister_netdev(bgmac->net_dev);
  1279. phy_disconnect(bgmac->net_dev->phydev);
  1280. netif_napi_del(&bgmac->napi);
  1281. bgmac_dma_free(bgmac);
  1282. free_netdev(bgmac->net_dev);
  1283. }
  1284. EXPORT_SYMBOL_GPL(bgmac_enet_remove);
  1285. MODULE_AUTHOR("Rafał Miłecki");
  1286. MODULE_LICENSE("GPL");