bgmac.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742
  1. /*
  2. * Driver for (BCM4706)? GBit MAC core on BCMA bus.
  3. *
  4. * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
  5. *
  6. * Licensed under the GNU/GPL. See COPYING for details.
  7. */
  8. #include "bgmac.h"
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/delay.h>
  12. #include <linux/etherdevice.h>
  13. #include <linux/mii.h>
  14. #include <linux/phy.h>
  15. #include <linux/phy_fixed.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/dma-mapping.h>
  18. #include <linux/bcm47xx_nvram.h>
  19. static const struct bcma_device_id bgmac_bcma_tbl[] = {
  20. BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
  21. BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
  22. {},
  23. };
  24. MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
  25. static inline bool bgmac_is_bcm4707_family(struct bgmac *bgmac)
  26. {
  27. switch (bgmac->core->bus->chipinfo.id) {
  28. case BCMA_CHIP_ID_BCM4707:
  29. case BCMA_CHIP_ID_BCM47094:
  30. case BCMA_CHIP_ID_BCM53018:
  31. return true;
  32. default:
  33. return false;
  34. }
  35. }
  36. static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask,
  37. u32 value, int timeout)
  38. {
  39. u32 val;
  40. int i;
  41. for (i = 0; i < timeout / 10; i++) {
  42. val = bcma_read32(core, reg);
  43. if ((val & mask) == value)
  44. return true;
  45. udelay(10);
  46. }
  47. pr_err("Timeout waiting for reg 0x%X\n", reg);
  48. return false;
  49. }
  50. /**************************************************
  51. * DMA
  52. **************************************************/
  53. static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
  54. {
  55. u32 val;
  56. int i;
  57. if (!ring->mmio_base)
  58. return;
  59. /* Suspend DMA TX ring first.
  60. * bgmac_wait_value doesn't support waiting for any of few values, so
  61. * implement whole loop here.
  62. */
  63. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
  64. BGMAC_DMA_TX_SUSPEND);
  65. for (i = 0; i < 10000 / 10; i++) {
  66. val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
  67. val &= BGMAC_DMA_TX_STAT;
  68. if (val == BGMAC_DMA_TX_STAT_DISABLED ||
  69. val == BGMAC_DMA_TX_STAT_IDLEWAIT ||
  70. val == BGMAC_DMA_TX_STAT_STOPPED) {
  71. i = 0;
  72. break;
  73. }
  74. udelay(10);
  75. }
  76. if (i)
  77. bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
  78. ring->mmio_base, val);
  79. /* Remove SUSPEND bit */
  80. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
  81. if (!bgmac_wait_value(bgmac->core,
  82. ring->mmio_base + BGMAC_DMA_TX_STATUS,
  83. BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
  84. 10000)) {
  85. bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
  86. ring->mmio_base);
  87. udelay(300);
  88. val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
  89. if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
  90. bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n",
  91. ring->mmio_base);
  92. }
  93. }
  94. static void bgmac_dma_tx_enable(struct bgmac *bgmac,
  95. struct bgmac_dma_ring *ring)
  96. {
  97. u32 ctl;
  98. ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
  99. if (bgmac->core->id.rev >= 4) {
  100. ctl &= ~BGMAC_DMA_TX_BL_MASK;
  101. ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT;
  102. ctl &= ~BGMAC_DMA_TX_MR_MASK;
  103. ctl |= BGMAC_DMA_TX_MR_2 << BGMAC_DMA_TX_MR_SHIFT;
  104. ctl &= ~BGMAC_DMA_TX_PC_MASK;
  105. ctl |= BGMAC_DMA_TX_PC_16 << BGMAC_DMA_TX_PC_SHIFT;
  106. ctl &= ~BGMAC_DMA_TX_PT_MASK;
  107. ctl |= BGMAC_DMA_TX_PT_8 << BGMAC_DMA_TX_PT_SHIFT;
  108. }
  109. ctl |= BGMAC_DMA_TX_ENABLE;
  110. ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
  111. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
  112. }
  113. static void
  114. bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
  115. int i, int len, u32 ctl0)
  116. {
  117. struct bgmac_slot_info *slot;
  118. struct bgmac_dma_desc *dma_desc;
  119. u32 ctl1;
  120. if (i == BGMAC_TX_RING_SLOTS - 1)
  121. ctl0 |= BGMAC_DESC_CTL0_EOT;
  122. ctl1 = len & BGMAC_DESC_CTL1_LEN;
  123. slot = &ring->slots[i];
  124. dma_desc = &ring->cpu_base[i];
  125. dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
  126. dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
  127. dma_desc->ctl0 = cpu_to_le32(ctl0);
  128. dma_desc->ctl1 = cpu_to_le32(ctl1);
  129. }
  130. static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
  131. struct bgmac_dma_ring *ring,
  132. struct sk_buff *skb)
  133. {
  134. struct device *dma_dev = bgmac->core->dma_dev;
  135. struct net_device *net_dev = bgmac->net_dev;
  136. int index = ring->end % BGMAC_TX_RING_SLOTS;
  137. struct bgmac_slot_info *slot = &ring->slots[index];
  138. int nr_frags;
  139. u32 flags;
  140. int i;
  141. if (skb->len > BGMAC_DESC_CTL1_LEN) {
  142. bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
  143. goto err_drop;
  144. }
  145. if (skb->ip_summed == CHECKSUM_PARTIAL)
  146. skb_checksum_help(skb);
  147. nr_frags = skb_shinfo(skb)->nr_frags;
  148. /* ring->end - ring->start will return the number of valid slots,
  149. * even when ring->end overflows
  150. */
  151. if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) {
  152. bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
  153. netif_stop_queue(net_dev);
  154. return NETDEV_TX_BUSY;
  155. }
  156. slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb),
  157. DMA_TO_DEVICE);
  158. if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
  159. goto err_dma_head;
  160. flags = BGMAC_DESC_CTL0_SOF;
  161. if (!nr_frags)
  162. flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
  163. bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags);
  164. flags = 0;
  165. for (i = 0; i < nr_frags; i++) {
  166. struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
  167. int len = skb_frag_size(frag);
  168. index = (index + 1) % BGMAC_TX_RING_SLOTS;
  169. slot = &ring->slots[index];
  170. slot->dma_addr = skb_frag_dma_map(dma_dev, frag, 0,
  171. len, DMA_TO_DEVICE);
  172. if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
  173. goto err_dma;
  174. if (i == nr_frags - 1)
  175. flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
  176. bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags);
  177. }
  178. slot->skb = skb;
  179. ring->end += nr_frags + 1;
  180. netdev_sent_queue(net_dev, skb->len);
  181. wmb();
  182. /* Increase ring->end to point empty slot. We tell hardware the first
  183. * slot it should *not* read.
  184. */
  185. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
  186. ring->index_base +
  187. (ring->end % BGMAC_TX_RING_SLOTS) *
  188. sizeof(struct bgmac_dma_desc));
  189. if (ring->end - ring->start >= BGMAC_TX_RING_SLOTS - 8)
  190. netif_stop_queue(net_dev);
  191. return NETDEV_TX_OK;
  192. err_dma:
  193. dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
  194. DMA_TO_DEVICE);
  195. while (i > 0) {
  196. int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
  197. struct bgmac_slot_info *slot = &ring->slots[index];
  198. u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
  199. int len = ctl1 & BGMAC_DESC_CTL1_LEN;
  200. dma_unmap_page(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE);
  201. }
  202. err_dma_head:
  203. bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
  204. ring->mmio_base);
  205. err_drop:
  206. dev_kfree_skb(skb);
  207. return NETDEV_TX_OK;
  208. }
  209. /* Free transmitted packets */
  210. static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
  211. {
  212. struct device *dma_dev = bgmac->core->dma_dev;
  213. int empty_slot;
  214. bool freed = false;
  215. unsigned bytes_compl = 0, pkts_compl = 0;
  216. /* The last slot that hardware didn't consume yet */
  217. empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
  218. empty_slot &= BGMAC_DMA_TX_STATDPTR;
  219. empty_slot -= ring->index_base;
  220. empty_slot &= BGMAC_DMA_TX_STATDPTR;
  221. empty_slot /= sizeof(struct bgmac_dma_desc);
  222. while (ring->start != ring->end) {
  223. int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
  224. struct bgmac_slot_info *slot = &ring->slots[slot_idx];
  225. u32 ctl1;
  226. int len;
  227. if (slot_idx == empty_slot)
  228. break;
  229. ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
  230. len = ctl1 & BGMAC_DESC_CTL1_LEN;
  231. if (ctl1 & BGMAC_DESC_CTL0_SOF)
  232. /* Unmap no longer used buffer */
  233. dma_unmap_single(dma_dev, slot->dma_addr, len,
  234. DMA_TO_DEVICE);
  235. else
  236. dma_unmap_page(dma_dev, slot->dma_addr, len,
  237. DMA_TO_DEVICE);
  238. if (slot->skb) {
  239. bytes_compl += slot->skb->len;
  240. pkts_compl++;
  241. /* Free memory! :) */
  242. dev_kfree_skb(slot->skb);
  243. slot->skb = NULL;
  244. }
  245. slot->dma_addr = 0;
  246. ring->start++;
  247. freed = true;
  248. }
  249. if (!pkts_compl)
  250. return;
  251. netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
  252. if (netif_queue_stopped(bgmac->net_dev))
  253. netif_wake_queue(bgmac->net_dev);
  254. }
  255. static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
  256. {
  257. if (!ring->mmio_base)
  258. return;
  259. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
  260. if (!bgmac_wait_value(bgmac->core,
  261. ring->mmio_base + BGMAC_DMA_RX_STATUS,
  262. BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED,
  263. 10000))
  264. bgmac_err(bgmac, "Reset of ring 0x%X RX failed\n",
  265. ring->mmio_base);
  266. }
  267. static void bgmac_dma_rx_enable(struct bgmac *bgmac,
  268. struct bgmac_dma_ring *ring)
  269. {
  270. u32 ctl;
  271. ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
  272. if (bgmac->core->id.rev >= 4) {
  273. ctl &= ~BGMAC_DMA_RX_BL_MASK;
  274. ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;
  275. ctl &= ~BGMAC_DMA_RX_PC_MASK;
  276. ctl |= BGMAC_DMA_RX_PC_8 << BGMAC_DMA_RX_PC_SHIFT;
  277. ctl &= ~BGMAC_DMA_RX_PT_MASK;
  278. ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT;
  279. }
  280. ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
  281. ctl |= BGMAC_DMA_RX_ENABLE;
  282. ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
  283. ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
  284. ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT;
  285. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl);
  286. }
  287. static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
  288. struct bgmac_slot_info *slot)
  289. {
  290. struct device *dma_dev = bgmac->core->dma_dev;
  291. dma_addr_t dma_addr;
  292. struct bgmac_rx_header *rx;
  293. void *buf;
  294. /* Alloc skb */
  295. buf = netdev_alloc_frag(BGMAC_RX_ALLOC_SIZE);
  296. if (!buf)
  297. return -ENOMEM;
  298. /* Poison - if everything goes fine, hardware will overwrite it */
  299. rx = buf + BGMAC_RX_BUF_OFFSET;
  300. rx->len = cpu_to_le16(0xdead);
  301. rx->flags = cpu_to_le16(0xbeef);
  302. /* Map skb for the DMA */
  303. dma_addr = dma_map_single(dma_dev, buf + BGMAC_RX_BUF_OFFSET,
  304. BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
  305. if (dma_mapping_error(dma_dev, dma_addr)) {
  306. bgmac_err(bgmac, "DMA mapping error\n");
  307. put_page(virt_to_head_page(buf));
  308. return -ENOMEM;
  309. }
  310. /* Update the slot */
  311. slot->buf = buf;
  312. slot->dma_addr = dma_addr;
  313. return 0;
  314. }
  315. static void bgmac_dma_rx_update_index(struct bgmac *bgmac,
  316. struct bgmac_dma_ring *ring)
  317. {
  318. dma_wmb();
  319. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
  320. ring->index_base +
  321. ring->end * sizeof(struct bgmac_dma_desc));
  322. }
  323. static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
  324. struct bgmac_dma_ring *ring, int desc_idx)
  325. {
  326. struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
  327. u32 ctl0 = 0, ctl1 = 0;
  328. if (desc_idx == BGMAC_RX_RING_SLOTS - 1)
  329. ctl0 |= BGMAC_DESC_CTL0_EOT;
  330. ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
  331. /* Is there any BGMAC device that requires extension? */
  332. /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
  333. * B43_DMA64_DCTL1_ADDREXT_MASK;
  334. */
  335. dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
  336. dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
  337. dma_desc->ctl0 = cpu_to_le32(ctl0);
  338. dma_desc->ctl1 = cpu_to_le32(ctl1);
  339. ring->end = desc_idx;
  340. }
  341. static void bgmac_dma_rx_poison_buf(struct device *dma_dev,
  342. struct bgmac_slot_info *slot)
  343. {
  344. struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
  345. dma_sync_single_for_cpu(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
  346. DMA_FROM_DEVICE);
  347. rx->len = cpu_to_le16(0xdead);
  348. rx->flags = cpu_to_le16(0xbeef);
  349. dma_sync_single_for_device(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
  350. DMA_FROM_DEVICE);
  351. }
  352. static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
  353. int weight)
  354. {
  355. u32 end_slot;
  356. int handled = 0;
  357. end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
  358. end_slot &= BGMAC_DMA_RX_STATDPTR;
  359. end_slot -= ring->index_base;
  360. end_slot &= BGMAC_DMA_RX_STATDPTR;
  361. end_slot /= sizeof(struct bgmac_dma_desc);
  362. while (ring->start != end_slot) {
  363. struct device *dma_dev = bgmac->core->dma_dev;
  364. struct bgmac_slot_info *slot = &ring->slots[ring->start];
  365. struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
  366. struct sk_buff *skb;
  367. void *buf = slot->buf;
  368. dma_addr_t dma_addr = slot->dma_addr;
  369. u16 len, flags;
  370. do {
  371. /* Prepare new skb as replacement */
  372. if (bgmac_dma_rx_skb_for_slot(bgmac, slot)) {
  373. bgmac_dma_rx_poison_buf(dma_dev, slot);
  374. break;
  375. }
  376. /* Unmap buffer to make it accessible to the CPU */
  377. dma_unmap_single(dma_dev, dma_addr,
  378. BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
  379. /* Get info from the header */
  380. len = le16_to_cpu(rx->len);
  381. flags = le16_to_cpu(rx->flags);
  382. /* Check for poison and drop or pass the packet */
  383. if (len == 0xdead && flags == 0xbeef) {
  384. bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
  385. ring->start);
  386. put_page(virt_to_head_page(buf));
  387. break;
  388. }
  389. if (len > BGMAC_RX_ALLOC_SIZE) {
  390. bgmac_err(bgmac, "Found oversized packet at slot %d, DMA issue!\n",
  391. ring->start);
  392. put_page(virt_to_head_page(buf));
  393. break;
  394. }
  395. /* Omit CRC. */
  396. len -= ETH_FCS_LEN;
  397. skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
  398. if (unlikely(!skb)) {
  399. bgmac_err(bgmac, "build_skb failed\n");
  400. put_page(virt_to_head_page(buf));
  401. break;
  402. }
  403. skb_put(skb, BGMAC_RX_FRAME_OFFSET +
  404. BGMAC_RX_BUF_OFFSET + len);
  405. skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
  406. BGMAC_RX_BUF_OFFSET);
  407. skb_checksum_none_assert(skb);
  408. skb->protocol = eth_type_trans(skb, bgmac->net_dev);
  409. napi_gro_receive(&bgmac->napi, skb);
  410. handled++;
  411. } while (0);
  412. bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
  413. if (++ring->start >= BGMAC_RX_RING_SLOTS)
  414. ring->start = 0;
  415. if (handled >= weight) /* Should never be greater */
  416. break;
  417. }
  418. bgmac_dma_rx_update_index(bgmac, ring);
  419. return handled;
  420. }
  421. /* Does ring support unaligned addressing? */
  422. static bool bgmac_dma_unaligned(struct bgmac *bgmac,
  423. struct bgmac_dma_ring *ring,
  424. enum bgmac_dma_ring_type ring_type)
  425. {
  426. switch (ring_type) {
  427. case BGMAC_DMA_RING_TX:
  428. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
  429. 0xff0);
  430. if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO))
  431. return true;
  432. break;
  433. case BGMAC_DMA_RING_RX:
  434. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
  435. 0xff0);
  436. if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO))
  437. return true;
  438. break;
  439. }
  440. return false;
  441. }
  442. static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
  443. struct bgmac_dma_ring *ring)
  444. {
  445. struct device *dma_dev = bgmac->core->dma_dev;
  446. struct bgmac_dma_desc *dma_desc = ring->cpu_base;
  447. struct bgmac_slot_info *slot;
  448. int i;
  449. for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) {
  450. int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN;
  451. slot = &ring->slots[i];
  452. dev_kfree_skb(slot->skb);
  453. if (!slot->dma_addr)
  454. continue;
  455. if (slot->skb)
  456. dma_unmap_single(dma_dev, slot->dma_addr,
  457. len, DMA_TO_DEVICE);
  458. else
  459. dma_unmap_page(dma_dev, slot->dma_addr,
  460. len, DMA_TO_DEVICE);
  461. }
  462. }
  463. static void bgmac_dma_rx_ring_free(struct bgmac *bgmac,
  464. struct bgmac_dma_ring *ring)
  465. {
  466. struct device *dma_dev = bgmac->core->dma_dev;
  467. struct bgmac_slot_info *slot;
  468. int i;
  469. for (i = 0; i < BGMAC_RX_RING_SLOTS; i++) {
  470. slot = &ring->slots[i];
  471. if (!slot->dma_addr)
  472. continue;
  473. dma_unmap_single(dma_dev, slot->dma_addr,
  474. BGMAC_RX_BUF_SIZE,
  475. DMA_FROM_DEVICE);
  476. put_page(virt_to_head_page(slot->buf));
  477. slot->dma_addr = 0;
  478. }
  479. }
  480. static void bgmac_dma_ring_desc_free(struct bgmac *bgmac,
  481. struct bgmac_dma_ring *ring,
  482. int num_slots)
  483. {
  484. struct device *dma_dev = bgmac->core->dma_dev;
  485. int size;
  486. if (!ring->cpu_base)
  487. return;
  488. /* Free ring of descriptors */
  489. size = num_slots * sizeof(struct bgmac_dma_desc);
  490. dma_free_coherent(dma_dev, size, ring->cpu_base,
  491. ring->dma_base);
  492. }
  493. static void bgmac_dma_cleanup(struct bgmac *bgmac)
  494. {
  495. int i;
  496. for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
  497. bgmac_dma_tx_ring_free(bgmac, &bgmac->tx_ring[i]);
  498. for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
  499. bgmac_dma_rx_ring_free(bgmac, &bgmac->rx_ring[i]);
  500. }
  501. static void bgmac_dma_free(struct bgmac *bgmac)
  502. {
  503. int i;
  504. for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
  505. bgmac_dma_ring_desc_free(bgmac, &bgmac->tx_ring[i],
  506. BGMAC_TX_RING_SLOTS);
  507. for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
  508. bgmac_dma_ring_desc_free(bgmac, &bgmac->rx_ring[i],
  509. BGMAC_RX_RING_SLOTS);
  510. }
  511. static int bgmac_dma_alloc(struct bgmac *bgmac)
  512. {
  513. struct device *dma_dev = bgmac->core->dma_dev;
  514. struct bgmac_dma_ring *ring;
  515. static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
  516. BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
  517. int size; /* ring size: different for Tx and Rx */
  518. int err;
  519. int i;
  520. BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
  521. BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
  522. if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) {
  523. bgmac_err(bgmac, "Core does not report 64-bit DMA\n");
  524. return -ENOTSUPP;
  525. }
  526. for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
  527. ring = &bgmac->tx_ring[i];
  528. ring->mmio_base = ring_base[i];
  529. /* Alloc ring of descriptors */
  530. size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
  531. ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
  532. &ring->dma_base,
  533. GFP_KERNEL);
  534. if (!ring->cpu_base) {
  535. bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n",
  536. ring->mmio_base);
  537. goto err_dma_free;
  538. }
  539. ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
  540. BGMAC_DMA_RING_TX);
  541. if (ring->unaligned)
  542. ring->index_base = lower_32_bits(ring->dma_base);
  543. else
  544. ring->index_base = 0;
  545. /* No need to alloc TX slots yet */
  546. }
  547. for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
  548. ring = &bgmac->rx_ring[i];
  549. ring->mmio_base = ring_base[i];
  550. /* Alloc ring of descriptors */
  551. size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
  552. ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
  553. &ring->dma_base,
  554. GFP_KERNEL);
  555. if (!ring->cpu_base) {
  556. bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n",
  557. ring->mmio_base);
  558. err = -ENOMEM;
  559. goto err_dma_free;
  560. }
  561. ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
  562. BGMAC_DMA_RING_RX);
  563. if (ring->unaligned)
  564. ring->index_base = lower_32_bits(ring->dma_base);
  565. else
  566. ring->index_base = 0;
  567. }
  568. return 0;
  569. err_dma_free:
  570. bgmac_dma_free(bgmac);
  571. return -ENOMEM;
  572. }
  573. static int bgmac_dma_init(struct bgmac *bgmac)
  574. {
  575. struct bgmac_dma_ring *ring;
  576. int i, err;
  577. for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
  578. ring = &bgmac->tx_ring[i];
  579. if (!ring->unaligned)
  580. bgmac_dma_tx_enable(bgmac, ring);
  581. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
  582. lower_32_bits(ring->dma_base));
  583. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
  584. upper_32_bits(ring->dma_base));
  585. if (ring->unaligned)
  586. bgmac_dma_tx_enable(bgmac, ring);
  587. ring->start = 0;
  588. ring->end = 0; /* Points the slot that should *not* be read */
  589. }
  590. for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
  591. int j;
  592. ring = &bgmac->rx_ring[i];
  593. if (!ring->unaligned)
  594. bgmac_dma_rx_enable(bgmac, ring);
  595. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
  596. lower_32_bits(ring->dma_base));
  597. bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
  598. upper_32_bits(ring->dma_base));
  599. if (ring->unaligned)
  600. bgmac_dma_rx_enable(bgmac, ring);
  601. ring->start = 0;
  602. ring->end = 0;
  603. for (j = 0; j < BGMAC_RX_RING_SLOTS; j++) {
  604. err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
  605. if (err)
  606. goto error;
  607. bgmac_dma_rx_setup_desc(bgmac, ring, j);
  608. }
  609. bgmac_dma_rx_update_index(bgmac, ring);
  610. }
  611. return 0;
  612. error:
  613. bgmac_dma_cleanup(bgmac);
  614. return err;
  615. }
  616. /**************************************************
  617. * PHY ops
  618. **************************************************/
  619. static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
  620. {
  621. struct bcma_device *core;
  622. u16 phy_access_addr;
  623. u16 phy_ctl_addr;
  624. u32 tmp;
  625. BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK);
  626. BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK);
  627. BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT);
  628. BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK);
  629. BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT);
  630. BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE);
  631. BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START);
  632. BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK);
  633. BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK);
  634. BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT);
  635. BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE);
  636. if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
  637. core = bgmac->core->bus->drv_gmac_cmn.core;
  638. phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
  639. phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
  640. } else {
  641. core = bgmac->core;
  642. phy_access_addr = BGMAC_PHY_ACCESS;
  643. phy_ctl_addr = BGMAC_PHY_CNTL;
  644. }
  645. tmp = bcma_read32(core, phy_ctl_addr);
  646. tmp &= ~BGMAC_PC_EPA_MASK;
  647. tmp |= phyaddr;
  648. bcma_write32(core, phy_ctl_addr, tmp);
  649. tmp = BGMAC_PA_START;
  650. tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
  651. tmp |= reg << BGMAC_PA_REG_SHIFT;
  652. bcma_write32(core, phy_access_addr, tmp);
  653. if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
  654. bgmac_err(bgmac, "Reading PHY %d register 0x%X failed\n",
  655. phyaddr, reg);
  656. return 0xffff;
  657. }
  658. return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK;
  659. }
  660. /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
  661. static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
  662. {
  663. struct bcma_device *core;
  664. u16 phy_access_addr;
  665. u16 phy_ctl_addr;
  666. u32 tmp;
  667. if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
  668. core = bgmac->core->bus->drv_gmac_cmn.core;
  669. phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
  670. phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
  671. } else {
  672. core = bgmac->core;
  673. phy_access_addr = BGMAC_PHY_ACCESS;
  674. phy_ctl_addr = BGMAC_PHY_CNTL;
  675. }
  676. tmp = bcma_read32(core, phy_ctl_addr);
  677. tmp &= ~BGMAC_PC_EPA_MASK;
  678. tmp |= phyaddr;
  679. bcma_write32(core, phy_ctl_addr, tmp);
  680. bgmac_write(bgmac, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
  681. if (bgmac_read(bgmac, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
  682. bgmac_warn(bgmac, "Error setting MDIO int\n");
  683. tmp = BGMAC_PA_START;
  684. tmp |= BGMAC_PA_WRITE;
  685. tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
  686. tmp |= reg << BGMAC_PA_REG_SHIFT;
  687. tmp |= value;
  688. bcma_write32(core, phy_access_addr, tmp);
  689. if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
  690. bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n",
  691. phyaddr, reg);
  692. return -ETIMEDOUT;
  693. }
  694. return 0;
  695. }
  696. /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
  697. static void bgmac_phy_init(struct bgmac *bgmac)
  698. {
  699. struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
  700. struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
  701. u8 i;
  702. if (ci->id == BCMA_CHIP_ID_BCM5356) {
  703. for (i = 0; i < 5; i++) {
  704. bgmac_phy_write(bgmac, i, 0x1f, 0x008b);
  705. bgmac_phy_write(bgmac, i, 0x15, 0x0100);
  706. bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
  707. bgmac_phy_write(bgmac, i, 0x12, 0x2aaa);
  708. bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
  709. }
  710. }
  711. if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) ||
  712. (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) ||
  713. (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) {
  714. bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0);
  715. bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0);
  716. for (i = 0; i < 5; i++) {
  717. bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
  718. bgmac_phy_write(bgmac, i, 0x16, 0x5284);
  719. bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
  720. bgmac_phy_write(bgmac, i, 0x17, 0x0010);
  721. bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
  722. bgmac_phy_write(bgmac, i, 0x16, 0x5296);
  723. bgmac_phy_write(bgmac, i, 0x17, 0x1073);
  724. bgmac_phy_write(bgmac, i, 0x17, 0x9073);
  725. bgmac_phy_write(bgmac, i, 0x16, 0x52b6);
  726. bgmac_phy_write(bgmac, i, 0x17, 0x9273);
  727. bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
  728. }
  729. }
  730. }
  731. /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */
  732. static void bgmac_phy_reset(struct bgmac *bgmac)
  733. {
  734. if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
  735. return;
  736. bgmac_phy_write(bgmac, bgmac->phyaddr, MII_BMCR, BMCR_RESET);
  737. udelay(100);
  738. if (bgmac_phy_read(bgmac, bgmac->phyaddr, MII_BMCR) & BMCR_RESET)
  739. bgmac_err(bgmac, "PHY reset failed\n");
  740. bgmac_phy_init(bgmac);
  741. }
  742. /**************************************************
  743. * Chip ops
  744. **************************************************/
  745. /* TODO: can we just drop @force? Can we don't reset MAC at all if there is
  746. * nothing to change? Try if after stabilizng driver.
  747. */
  748. static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
  749. bool force)
  750. {
  751. u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
  752. u32 new_val = (cmdcfg & mask) | set;
  753. bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR(bgmac->core->id.rev));
  754. udelay(2);
  755. if (new_val != cmdcfg || force)
  756. bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
  757. bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR(bgmac->core->id.rev));
  758. udelay(2);
  759. }
  760. static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
  761. {
  762. u32 tmp;
  763. tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
  764. bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
  765. tmp = (addr[4] << 8) | addr[5];
  766. bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
  767. }
  768. static void bgmac_set_rx_mode(struct net_device *net_dev)
  769. {
  770. struct bgmac *bgmac = netdev_priv(net_dev);
  771. if (net_dev->flags & IFF_PROMISC)
  772. bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
  773. else
  774. bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
  775. }
  776. #if 0 /* We don't use that regs yet */
  777. static void bgmac_chip_stats_update(struct bgmac *bgmac)
  778. {
  779. int i;
  780. if (bgmac->core->id.id != BCMA_CORE_4706_MAC_GBIT) {
  781. for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
  782. bgmac->mib_tx_regs[i] =
  783. bgmac_read(bgmac,
  784. BGMAC_TX_GOOD_OCTETS + (i * 4));
  785. for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
  786. bgmac->mib_rx_regs[i] =
  787. bgmac_read(bgmac,
  788. BGMAC_RX_GOOD_OCTETS + (i * 4));
  789. }
  790. /* TODO: what else? how to handle BCM4706? Specs are needed */
  791. }
  792. #endif
  793. static void bgmac_clear_mib(struct bgmac *bgmac)
  794. {
  795. int i;
  796. if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT)
  797. return;
  798. bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR);
  799. for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
  800. bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4));
  801. for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
  802. bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4));
  803. }
  804. /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
  805. static void bgmac_mac_speed(struct bgmac *bgmac)
  806. {
  807. u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
  808. u32 set = 0;
  809. switch (bgmac->mac_speed) {
  810. case SPEED_10:
  811. set |= BGMAC_CMDCFG_ES_10;
  812. break;
  813. case SPEED_100:
  814. set |= BGMAC_CMDCFG_ES_100;
  815. break;
  816. case SPEED_1000:
  817. set |= BGMAC_CMDCFG_ES_1000;
  818. break;
  819. case SPEED_2500:
  820. set |= BGMAC_CMDCFG_ES_2500;
  821. break;
  822. default:
  823. bgmac_err(bgmac, "Unsupported speed: %d\n", bgmac->mac_speed);
  824. }
  825. if (bgmac->mac_duplex == DUPLEX_HALF)
  826. set |= BGMAC_CMDCFG_HD;
  827. bgmac_cmdcfg_maskset(bgmac, mask, set, true);
  828. }
  829. static void bgmac_miiconfig(struct bgmac *bgmac)
  830. {
  831. struct bcma_device *core = bgmac->core;
  832. u8 imode;
  833. if (bgmac_is_bcm4707_family(bgmac)) {
  834. bcma_awrite32(core, BCMA_IOCTL,
  835. bcma_aread32(core, BCMA_IOCTL) | 0x40 |
  836. BGMAC_BCMA_IOCTL_SW_CLKEN);
  837. bgmac->mac_speed = SPEED_2500;
  838. bgmac->mac_duplex = DUPLEX_FULL;
  839. bgmac_mac_speed(bgmac);
  840. } else {
  841. imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) &
  842. BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT;
  843. if (imode == 0 || imode == 1) {
  844. bgmac->mac_speed = SPEED_100;
  845. bgmac->mac_duplex = DUPLEX_FULL;
  846. bgmac_mac_speed(bgmac);
  847. }
  848. }
  849. }
  850. /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
  851. static void bgmac_chip_reset(struct bgmac *bgmac)
  852. {
  853. struct bcma_device *core = bgmac->core;
  854. struct bcma_bus *bus = core->bus;
  855. struct bcma_chipinfo *ci = &bus->chipinfo;
  856. u32 flags;
  857. u32 iost;
  858. int i;
  859. if (bcma_core_is_enabled(core)) {
  860. if (!bgmac->stats_grabbed) {
  861. /* bgmac_chip_stats_update(bgmac); */
  862. bgmac->stats_grabbed = true;
  863. }
  864. for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
  865. bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
  866. bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
  867. udelay(1);
  868. for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
  869. bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]);
  870. /* TODO: Clear software multicast filter list */
  871. }
  872. iost = bcma_aread32(core, BCMA_IOST);
  873. if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
  874. (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
  875. (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188))
  876. iost &= ~BGMAC_BCMA_IOST_ATTACHED;
  877. /* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */
  878. if (ci->id != BCMA_CHIP_ID_BCM4707 &&
  879. ci->id != BCMA_CHIP_ID_BCM47094) {
  880. flags = 0;
  881. if (iost & BGMAC_BCMA_IOST_ATTACHED) {
  882. flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
  883. if (!bgmac->has_robosw)
  884. flags |= BGMAC_BCMA_IOCTL_SW_RESET;
  885. }
  886. bcma_core_enable(core, flags);
  887. }
  888. /* Request Misc PLL for corerev > 2 */
  889. if (core->id.rev > 2 && !bgmac_is_bcm4707_family(bgmac)) {
  890. bgmac_set(bgmac, BCMA_CLKCTLST,
  891. BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ);
  892. bgmac_wait_value(bgmac->core, BCMA_CLKCTLST,
  893. BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
  894. BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
  895. 1000);
  896. }
  897. if (ci->id == BCMA_CHIP_ID_BCM5357 ||
  898. ci->id == BCMA_CHIP_ID_BCM4749 ||
  899. ci->id == BCMA_CHIP_ID_BCM53572) {
  900. struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
  901. u8 et_swtype = 0;
  902. u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
  903. BGMAC_CHIPCTL_1_IF_TYPE_MII;
  904. char buf[4];
  905. if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
  906. if (kstrtou8(buf, 0, &et_swtype))
  907. bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
  908. buf);
  909. et_swtype &= 0x0f;
  910. et_swtype <<= 4;
  911. sw_type = et_swtype;
  912. } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358) {
  913. sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
  914. } else if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
  915. (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
  916. (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {
  917. sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
  918. BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
  919. }
  920. bcma_chipco_chipctl_maskset(cc, 1,
  921. ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
  922. BGMAC_CHIPCTL_1_SW_TYPE_MASK),
  923. sw_type);
  924. }
  925. if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
  926. bcma_awrite32(core, BCMA_IOCTL,
  927. bcma_aread32(core, BCMA_IOCTL) &
  928. ~BGMAC_BCMA_IOCTL_SW_RESET);
  929. /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
  930. * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
  931. * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
  932. * be keps until taking MAC out of the reset.
  933. */
  934. bgmac_cmdcfg_maskset(bgmac,
  935. ~(BGMAC_CMDCFG_TE |
  936. BGMAC_CMDCFG_RE |
  937. BGMAC_CMDCFG_RPI |
  938. BGMAC_CMDCFG_TAI |
  939. BGMAC_CMDCFG_HD |
  940. BGMAC_CMDCFG_ML |
  941. BGMAC_CMDCFG_CFE |
  942. BGMAC_CMDCFG_RL |
  943. BGMAC_CMDCFG_RED |
  944. BGMAC_CMDCFG_PE |
  945. BGMAC_CMDCFG_TPI |
  946. BGMAC_CMDCFG_PAD_EN |
  947. BGMAC_CMDCFG_PF),
  948. BGMAC_CMDCFG_PROM |
  949. BGMAC_CMDCFG_NLC |
  950. BGMAC_CMDCFG_CFE |
  951. BGMAC_CMDCFG_SR(core->id.rev),
  952. false);
  953. bgmac->mac_speed = SPEED_UNKNOWN;
  954. bgmac->mac_duplex = DUPLEX_UNKNOWN;
  955. bgmac_clear_mib(bgmac);
  956. if (core->id.id == BCMA_CORE_4706_MAC_GBIT)
  957. bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0,
  958. BCMA_GMAC_CMN_PC_MTE);
  959. else
  960. bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
  961. bgmac_miiconfig(bgmac);
  962. bgmac_phy_init(bgmac);
  963. netdev_reset_queue(bgmac->net_dev);
  964. }
  965. static void bgmac_chip_intrs_on(struct bgmac *bgmac)
  966. {
  967. bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask);
  968. }
  969. static void bgmac_chip_intrs_off(struct bgmac *bgmac)
  970. {
  971. bgmac_write(bgmac, BGMAC_INT_MASK, 0);
  972. bgmac_read(bgmac, BGMAC_INT_MASK);
  973. }
  974. /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
  975. static void bgmac_enable(struct bgmac *bgmac)
  976. {
  977. struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
  978. u32 cmdcfg;
  979. u32 mode;
  980. u32 rxq_ctl;
  981. u32 fl_ctl;
  982. u16 bp_clk;
  983. u8 mdp;
  984. cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
  985. bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
  986. BGMAC_CMDCFG_SR(bgmac->core->id.rev), true);
  987. udelay(2);
  988. cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
  989. bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
  990. mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
  991. BGMAC_DS_MM_SHIFT;
  992. if (ci->id != BCMA_CHIP_ID_BCM47162 || mode != 0)
  993. bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
  994. if (ci->id == BCMA_CHIP_ID_BCM47162 && mode == 2)
  995. bcma_chipco_chipctl_maskset(&bgmac->core->bus->drv_cc, 1, ~0,
  996. BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
  997. switch (ci->id) {
  998. case BCMA_CHIP_ID_BCM5357:
  999. case BCMA_CHIP_ID_BCM4749:
  1000. case BCMA_CHIP_ID_BCM53572:
  1001. case BCMA_CHIP_ID_BCM4716:
  1002. case BCMA_CHIP_ID_BCM47162:
  1003. fl_ctl = 0x03cb04cb;
  1004. if (ci->id == BCMA_CHIP_ID_BCM5357 ||
  1005. ci->id == BCMA_CHIP_ID_BCM4749 ||
  1006. ci->id == BCMA_CHIP_ID_BCM53572)
  1007. fl_ctl = 0x2300e1;
  1008. bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
  1009. bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
  1010. break;
  1011. }
  1012. if (!bgmac_is_bcm4707_family(bgmac)) {
  1013. rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
  1014. rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
  1015. bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) /
  1016. 1000000;
  1017. mdp = (bp_clk * 128 / 1000) - 3;
  1018. rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
  1019. bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
  1020. }
  1021. }
  1022. /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
  1023. static void bgmac_chip_init(struct bgmac *bgmac)
  1024. {
  1025. /* 1 interrupt per received frame */
  1026. bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
  1027. /* Enable 802.3x tx flow control (honor received PAUSE frames) */
  1028. bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
  1029. bgmac_set_rx_mode(bgmac->net_dev);
  1030. bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
  1031. if (bgmac->loopback)
  1032. bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
  1033. else
  1034. bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
  1035. bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
  1036. bgmac_chip_intrs_on(bgmac);
  1037. bgmac_enable(bgmac);
  1038. }
  1039. static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
  1040. {
  1041. struct bgmac *bgmac = netdev_priv(dev_id);
  1042. u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS);
  1043. int_status &= bgmac->int_mask;
  1044. if (!int_status)
  1045. return IRQ_NONE;
  1046. int_status &= ~(BGMAC_IS_TX0 | BGMAC_IS_RX);
  1047. if (int_status)
  1048. bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", int_status);
  1049. /* Disable new interrupts until handling existing ones */
  1050. bgmac_chip_intrs_off(bgmac);
  1051. napi_schedule(&bgmac->napi);
  1052. return IRQ_HANDLED;
  1053. }
  1054. static int bgmac_poll(struct napi_struct *napi, int weight)
  1055. {
  1056. struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
  1057. int handled = 0;
  1058. /* Ack */
  1059. bgmac_write(bgmac, BGMAC_INT_STATUS, ~0);
  1060. bgmac_dma_tx_free(bgmac, &bgmac->tx_ring[0]);
  1061. handled += bgmac_dma_rx_read(bgmac, &bgmac->rx_ring[0], weight);
  1062. /* Poll again if more events arrived in the meantime */
  1063. if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX))
  1064. return weight;
  1065. if (handled < weight) {
  1066. napi_complete(napi);
  1067. bgmac_chip_intrs_on(bgmac);
  1068. }
  1069. return handled;
  1070. }
  1071. /**************************************************
  1072. * net_device_ops
  1073. **************************************************/
  1074. static int bgmac_open(struct net_device *net_dev)
  1075. {
  1076. struct bgmac *bgmac = netdev_priv(net_dev);
  1077. int err = 0;
  1078. bgmac_chip_reset(bgmac);
  1079. err = bgmac_dma_init(bgmac);
  1080. if (err)
  1081. return err;
  1082. /* Specs say about reclaiming rings here, but we do that in DMA init */
  1083. bgmac_chip_init(bgmac);
  1084. err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED,
  1085. KBUILD_MODNAME, net_dev);
  1086. if (err < 0) {
  1087. bgmac_err(bgmac, "IRQ request error: %d!\n", err);
  1088. bgmac_dma_cleanup(bgmac);
  1089. return err;
  1090. }
  1091. napi_enable(&bgmac->napi);
  1092. phy_start(bgmac->phy_dev);
  1093. netif_carrier_on(net_dev);
  1094. return 0;
  1095. }
  1096. static int bgmac_stop(struct net_device *net_dev)
  1097. {
  1098. struct bgmac *bgmac = netdev_priv(net_dev);
  1099. netif_carrier_off(net_dev);
  1100. phy_stop(bgmac->phy_dev);
  1101. napi_disable(&bgmac->napi);
  1102. bgmac_chip_intrs_off(bgmac);
  1103. free_irq(bgmac->core->irq, net_dev);
  1104. bgmac_chip_reset(bgmac);
  1105. bgmac_dma_cleanup(bgmac);
  1106. return 0;
  1107. }
  1108. static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
  1109. struct net_device *net_dev)
  1110. {
  1111. struct bgmac *bgmac = netdev_priv(net_dev);
  1112. struct bgmac_dma_ring *ring;
  1113. /* No QOS support yet */
  1114. ring = &bgmac->tx_ring[0];
  1115. return bgmac_dma_tx_add(bgmac, ring, skb);
  1116. }
  1117. static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
  1118. {
  1119. struct bgmac *bgmac = netdev_priv(net_dev);
  1120. int ret;
  1121. ret = eth_prepare_mac_addr_change(net_dev, addr);
  1122. if (ret < 0)
  1123. return ret;
  1124. bgmac_write_mac_address(bgmac, (u8 *)addr);
  1125. eth_commit_mac_addr_change(net_dev, addr);
  1126. return 0;
  1127. }
  1128. static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
  1129. {
  1130. struct bgmac *bgmac = netdev_priv(net_dev);
  1131. if (!netif_running(net_dev))
  1132. return -EINVAL;
  1133. return phy_mii_ioctl(bgmac->phy_dev, ifr, cmd);
  1134. }
  1135. static const struct net_device_ops bgmac_netdev_ops = {
  1136. .ndo_open = bgmac_open,
  1137. .ndo_stop = bgmac_stop,
  1138. .ndo_start_xmit = bgmac_start_xmit,
  1139. .ndo_set_rx_mode = bgmac_set_rx_mode,
  1140. .ndo_set_mac_address = bgmac_set_mac_address,
  1141. .ndo_validate_addr = eth_validate_addr,
  1142. .ndo_do_ioctl = bgmac_ioctl,
  1143. };
  1144. /**************************************************
  1145. * ethtool_ops
  1146. **************************************************/
  1147. static int bgmac_get_settings(struct net_device *net_dev,
  1148. struct ethtool_cmd *cmd)
  1149. {
  1150. struct bgmac *bgmac = netdev_priv(net_dev);
  1151. return phy_ethtool_gset(bgmac->phy_dev, cmd);
  1152. }
  1153. static int bgmac_set_settings(struct net_device *net_dev,
  1154. struct ethtool_cmd *cmd)
  1155. {
  1156. struct bgmac *bgmac = netdev_priv(net_dev);
  1157. return phy_ethtool_sset(bgmac->phy_dev, cmd);
  1158. }
  1159. static void bgmac_get_drvinfo(struct net_device *net_dev,
  1160. struct ethtool_drvinfo *info)
  1161. {
  1162. strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
  1163. strlcpy(info->bus_info, "BCMA", sizeof(info->bus_info));
  1164. }
  1165. static const struct ethtool_ops bgmac_ethtool_ops = {
  1166. .get_settings = bgmac_get_settings,
  1167. .set_settings = bgmac_set_settings,
  1168. .get_drvinfo = bgmac_get_drvinfo,
  1169. };
  1170. /**************************************************
  1171. * MII
  1172. **************************************************/
  1173. static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum)
  1174. {
  1175. return bgmac_phy_read(bus->priv, mii_id, regnum);
  1176. }
  1177. static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
  1178. u16 value)
  1179. {
  1180. return bgmac_phy_write(bus->priv, mii_id, regnum, value);
  1181. }
  1182. static void bgmac_adjust_link(struct net_device *net_dev)
  1183. {
  1184. struct bgmac *bgmac = netdev_priv(net_dev);
  1185. struct phy_device *phy_dev = bgmac->phy_dev;
  1186. bool update = false;
  1187. if (phy_dev->link) {
  1188. if (phy_dev->speed != bgmac->mac_speed) {
  1189. bgmac->mac_speed = phy_dev->speed;
  1190. update = true;
  1191. }
  1192. if (phy_dev->duplex != bgmac->mac_duplex) {
  1193. bgmac->mac_duplex = phy_dev->duplex;
  1194. update = true;
  1195. }
  1196. }
  1197. if (update) {
  1198. bgmac_mac_speed(bgmac);
  1199. phy_print_status(phy_dev);
  1200. }
  1201. }
  1202. static int bgmac_fixed_phy_register(struct bgmac *bgmac)
  1203. {
  1204. struct fixed_phy_status fphy_status = {
  1205. .link = 1,
  1206. .speed = SPEED_1000,
  1207. .duplex = DUPLEX_FULL,
  1208. };
  1209. struct phy_device *phy_dev;
  1210. int err;
  1211. phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
  1212. if (!phy_dev || IS_ERR(phy_dev)) {
  1213. bgmac_err(bgmac, "Failed to register fixed PHY device\n");
  1214. return -ENODEV;
  1215. }
  1216. err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link,
  1217. PHY_INTERFACE_MODE_MII);
  1218. if (err) {
  1219. bgmac_err(bgmac, "Connecting PHY failed\n");
  1220. return err;
  1221. }
  1222. bgmac->phy_dev = phy_dev;
  1223. return err;
  1224. }
  1225. static int bgmac_mii_register(struct bgmac *bgmac)
  1226. {
  1227. struct mii_bus *mii_bus;
  1228. struct phy_device *phy_dev;
  1229. char bus_id[MII_BUS_ID_SIZE + 3];
  1230. int err = 0;
  1231. if (bgmac_is_bcm4707_family(bgmac))
  1232. return bgmac_fixed_phy_register(bgmac);
  1233. mii_bus = mdiobus_alloc();
  1234. if (!mii_bus)
  1235. return -ENOMEM;
  1236. mii_bus->name = "bgmac mii bus";
  1237. sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num,
  1238. bgmac->core->core_unit);
  1239. mii_bus->priv = bgmac;
  1240. mii_bus->read = bgmac_mii_read;
  1241. mii_bus->write = bgmac_mii_write;
  1242. mii_bus->parent = &bgmac->core->dev;
  1243. mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
  1244. err = mdiobus_register(mii_bus);
  1245. if (err) {
  1246. bgmac_err(bgmac, "Registration of mii bus failed\n");
  1247. goto err_free_bus;
  1248. }
  1249. bgmac->mii_bus = mii_bus;
  1250. /* Connect to the PHY */
  1251. snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
  1252. bgmac->phyaddr);
  1253. phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
  1254. PHY_INTERFACE_MODE_MII);
  1255. if (IS_ERR(phy_dev)) {
  1256. bgmac_err(bgmac, "PHY connecton failed\n");
  1257. err = PTR_ERR(phy_dev);
  1258. goto err_unregister_bus;
  1259. }
  1260. bgmac->phy_dev = phy_dev;
  1261. return err;
  1262. err_unregister_bus:
  1263. mdiobus_unregister(mii_bus);
  1264. err_free_bus:
  1265. mdiobus_free(mii_bus);
  1266. return err;
  1267. }
  1268. static void bgmac_mii_unregister(struct bgmac *bgmac)
  1269. {
  1270. struct mii_bus *mii_bus = bgmac->mii_bus;
  1271. mdiobus_unregister(mii_bus);
  1272. mdiobus_free(mii_bus);
  1273. }
  1274. /**************************************************
  1275. * BCMA bus ops
  1276. **************************************************/
  1277. /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
  1278. static int bgmac_probe(struct bcma_device *core)
  1279. {
  1280. struct net_device *net_dev;
  1281. struct bgmac *bgmac;
  1282. struct ssb_sprom *sprom = &core->bus->sprom;
  1283. u8 *mac;
  1284. int err;
  1285. switch (core->core_unit) {
  1286. case 0:
  1287. mac = sprom->et0mac;
  1288. break;
  1289. case 1:
  1290. mac = sprom->et1mac;
  1291. break;
  1292. case 2:
  1293. mac = sprom->et2mac;
  1294. break;
  1295. default:
  1296. pr_err("Unsupported core_unit %d\n", core->core_unit);
  1297. return -ENOTSUPP;
  1298. }
  1299. if (!is_valid_ether_addr(mac)) {
  1300. dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac);
  1301. eth_random_addr(mac);
  1302. dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
  1303. }
  1304. /* Allocation and references */
  1305. net_dev = alloc_etherdev(sizeof(*bgmac));
  1306. if (!net_dev)
  1307. return -ENOMEM;
  1308. net_dev->netdev_ops = &bgmac_netdev_ops;
  1309. net_dev->irq = core->irq;
  1310. net_dev->ethtool_ops = &bgmac_ethtool_ops;
  1311. bgmac = netdev_priv(net_dev);
  1312. bgmac->net_dev = net_dev;
  1313. bgmac->core = core;
  1314. bcma_set_drvdata(core, bgmac);
  1315. /* Defaults */
  1316. memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
  1317. /* On BCM4706 we need common core to access PHY */
  1318. if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
  1319. !core->bus->drv_gmac_cmn.core) {
  1320. bgmac_err(bgmac, "GMAC CMN core not found (required for BCM4706)\n");
  1321. err = -ENODEV;
  1322. goto err_netdev_free;
  1323. }
  1324. bgmac->cmn = core->bus->drv_gmac_cmn.core;
  1325. switch (core->core_unit) {
  1326. case 0:
  1327. bgmac->phyaddr = sprom->et0phyaddr;
  1328. break;
  1329. case 1:
  1330. bgmac->phyaddr = sprom->et1phyaddr;
  1331. break;
  1332. case 2:
  1333. bgmac->phyaddr = sprom->et2phyaddr;
  1334. break;
  1335. }
  1336. bgmac->phyaddr &= BGMAC_PHY_MASK;
  1337. if (bgmac->phyaddr == BGMAC_PHY_MASK) {
  1338. bgmac_err(bgmac, "No PHY found\n");
  1339. err = -ENODEV;
  1340. goto err_netdev_free;
  1341. }
  1342. bgmac_info(bgmac, "Found PHY addr: %d%s\n", bgmac->phyaddr,
  1343. bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : "");
  1344. if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) {
  1345. bgmac_err(bgmac, "PCI setup not implemented\n");
  1346. err = -ENOTSUPP;
  1347. goto err_netdev_free;
  1348. }
  1349. bgmac_chip_reset(bgmac);
  1350. /* For Northstar, we have to take all GMAC core out of reset */
  1351. if (bgmac_is_bcm4707_family(bgmac)) {
  1352. struct bcma_device *ns_core;
  1353. int ns_gmac;
  1354. /* Northstar has 4 GMAC cores */
  1355. for (ns_gmac = 0; ns_gmac < 4; ns_gmac++) {
  1356. /* As Northstar requirement, we have to reset all GMACs
  1357. * before accessing one. bgmac_chip_reset() call
  1358. * bcma_core_enable() for this core. Then the other
  1359. * three GMACs didn't reset. We do it here.
  1360. */
  1361. ns_core = bcma_find_core_unit(core->bus,
  1362. BCMA_CORE_MAC_GBIT,
  1363. ns_gmac);
  1364. if (ns_core && !bcma_core_is_enabled(ns_core))
  1365. bcma_core_enable(ns_core, 0);
  1366. }
  1367. }
  1368. err = bgmac_dma_alloc(bgmac);
  1369. if (err) {
  1370. bgmac_err(bgmac, "Unable to alloc memory for DMA\n");
  1371. goto err_netdev_free;
  1372. }
  1373. bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
  1374. if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
  1375. bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
  1376. /* TODO: reset the external phy. Specs are needed */
  1377. bgmac_phy_reset(bgmac);
  1378. bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo &
  1379. BGMAC_BFL_ENETROBO);
  1380. if (bgmac->has_robosw)
  1381. bgmac_warn(bgmac, "Support for Roboswitch not implemented\n");
  1382. if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
  1383. bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
  1384. netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
  1385. err = bgmac_mii_register(bgmac);
  1386. if (err) {
  1387. bgmac_err(bgmac, "Cannot register MDIO\n");
  1388. goto err_dma_free;
  1389. }
  1390. net_dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
  1391. net_dev->hw_features = net_dev->features;
  1392. net_dev->vlan_features = net_dev->features;
  1393. err = register_netdev(bgmac->net_dev);
  1394. if (err) {
  1395. bgmac_err(bgmac, "Cannot register net device\n");
  1396. goto err_mii_unregister;
  1397. }
  1398. netif_carrier_off(net_dev);
  1399. return 0;
  1400. err_mii_unregister:
  1401. bgmac_mii_unregister(bgmac);
  1402. err_dma_free:
  1403. bgmac_dma_free(bgmac);
  1404. err_netdev_free:
  1405. bcma_set_drvdata(core, NULL);
  1406. free_netdev(net_dev);
  1407. return err;
  1408. }
  1409. static void bgmac_remove(struct bcma_device *core)
  1410. {
  1411. struct bgmac *bgmac = bcma_get_drvdata(core);
  1412. unregister_netdev(bgmac->net_dev);
  1413. bgmac_mii_unregister(bgmac);
  1414. netif_napi_del(&bgmac->napi);
  1415. bgmac_dma_free(bgmac);
  1416. bcma_set_drvdata(core, NULL);
  1417. free_netdev(bgmac->net_dev);
  1418. }
  1419. static struct bcma_driver bgmac_bcma_driver = {
  1420. .name = KBUILD_MODNAME,
  1421. .id_table = bgmac_bcma_tbl,
  1422. .probe = bgmac_probe,
  1423. .remove = bgmac_remove,
  1424. };
  1425. static int __init bgmac_init(void)
  1426. {
  1427. int err;
  1428. err = bcma_driver_register(&bgmac_bcma_driver);
  1429. if (err)
  1430. return err;
  1431. pr_info("Broadcom 47xx GBit MAC driver loaded\n");
  1432. return 0;
  1433. }
  1434. static void __exit bgmac_exit(void)
  1435. {
  1436. bcma_driver_unregister(&bgmac_bcma_driver);
  1437. }
  1438. module_init(bgmac_init)
  1439. module_exit(bgmac_exit)
  1440. MODULE_AUTHOR("Rafał Miłecki");
  1441. MODULE_LICENSE("GPL");