hns_enet.c 56 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192
  1. /*
  2. * Copyright (c) 2014-2015 Hisilicon Limited.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. */
  9. #include <linux/clk.h>
  10. #include <linux/cpumask.h>
  11. #include <linux/etherdevice.h>
  12. #include <linux/if_vlan.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/io.h>
  15. #include <linux/ip.h>
  16. #include <linux/ipv6.h>
  17. #include <linux/module.h>
  18. #include <linux/phy.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/skbuff.h>
  21. #include "hnae.h"
  22. #include "hns_enet.h"
  23. #include "hns_dsaf_mac.h"
  24. #define NIC_MAX_Q_PER_VF 16
  25. #define HNS_NIC_TX_TIMEOUT (5 * HZ)
  26. #define SERVICE_TIMER_HZ (1 * HZ)
  27. #define NIC_TX_CLEAN_MAX_NUM 256
  28. #define NIC_RX_CLEAN_MAX_NUM 64
  29. #define RCB_IRQ_NOT_INITED 0
  30. #define RCB_IRQ_INITED 1
  31. #define HNS_BUFFER_SIZE_2048 2048
  32. #define BD_MAX_SEND_SIZE 8191
  33. #define SKB_TMP_LEN(SKB) \
  34. (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
  35. static void fill_v2_desc(struct hnae_ring *ring, void *priv,
  36. int size, dma_addr_t dma, int frag_end,
  37. int buf_num, enum hns_desc_type type, int mtu)
  38. {
  39. struct hnae_desc *desc = &ring->desc[ring->next_to_use];
  40. struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
  41. struct iphdr *iphdr;
  42. struct ipv6hdr *ipv6hdr;
  43. struct sk_buff *skb;
  44. __be16 protocol;
  45. u8 bn_pid = 0;
  46. u8 rrcfv = 0;
  47. u8 ip_offset = 0;
  48. u8 tvsvsn = 0;
  49. u16 mss = 0;
  50. u8 l4_len = 0;
  51. u16 paylen = 0;
  52. desc_cb->priv = priv;
  53. desc_cb->length = size;
  54. desc_cb->dma = dma;
  55. desc_cb->type = type;
  56. desc->addr = cpu_to_le64(dma);
  57. desc->tx.send_size = cpu_to_le16((u16)size);
  58. /* config bd buffer end */
  59. hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
  60. hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1);
  61. /* fill port_id in the tx bd for sending management pkts */
  62. hnae_set_field(bn_pid, HNSV2_TXD_PORTID_M,
  63. HNSV2_TXD_PORTID_S, ring->q->handle->dport_id);
  64. if (type == DESC_TYPE_SKB) {
  65. skb = (struct sk_buff *)priv;
  66. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  67. skb_reset_mac_len(skb);
  68. protocol = skb->protocol;
  69. ip_offset = ETH_HLEN;
  70. if (protocol == htons(ETH_P_8021Q)) {
  71. ip_offset += VLAN_HLEN;
  72. protocol = vlan_get_protocol(skb);
  73. skb->protocol = protocol;
  74. }
  75. if (skb->protocol == htons(ETH_P_IP)) {
  76. iphdr = ip_hdr(skb);
  77. hnae_set_bit(rrcfv, HNSV2_TXD_L3CS_B, 1);
  78. hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
  79. /* check for tcp/udp header */
  80. if (iphdr->protocol == IPPROTO_TCP &&
  81. skb_is_gso(skb)) {
  82. hnae_set_bit(tvsvsn,
  83. HNSV2_TXD_TSE_B, 1);
  84. l4_len = tcp_hdrlen(skb);
  85. mss = skb_shinfo(skb)->gso_size;
  86. paylen = skb->len - SKB_TMP_LEN(skb);
  87. }
  88. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  89. hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
  90. ipv6hdr = ipv6_hdr(skb);
  91. hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
  92. /* check for tcp/udp header */
  93. if (ipv6hdr->nexthdr == IPPROTO_TCP &&
  94. skb_is_gso(skb) && skb_is_gso_v6(skb)) {
  95. hnae_set_bit(tvsvsn,
  96. HNSV2_TXD_TSE_B, 1);
  97. l4_len = tcp_hdrlen(skb);
  98. mss = skb_shinfo(skb)->gso_size;
  99. paylen = skb->len - SKB_TMP_LEN(skb);
  100. }
  101. }
  102. desc->tx.ip_offset = ip_offset;
  103. desc->tx.tse_vlan_snap_v6_sctp_nth = tvsvsn;
  104. desc->tx.mss = cpu_to_le16(mss);
  105. desc->tx.l4_len = l4_len;
  106. desc->tx.paylen = cpu_to_le16(paylen);
  107. }
  108. }
  109. hnae_set_bit(rrcfv, HNSV2_TXD_FE_B, frag_end);
  110. desc->tx.bn_pid = bn_pid;
  111. desc->tx.ra_ri_cs_fe_vld = rrcfv;
  112. ring_ptr_move_fw(ring, next_to_use);
  113. }
  114. static const struct acpi_device_id hns_enet_acpi_match[] = {
  115. { "HISI00C1", 0 },
  116. { "HISI00C2", 0 },
  117. { },
  118. };
  119. MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match);
  120. static void fill_desc(struct hnae_ring *ring, void *priv,
  121. int size, dma_addr_t dma, int frag_end,
  122. int buf_num, enum hns_desc_type type, int mtu)
  123. {
  124. struct hnae_desc *desc = &ring->desc[ring->next_to_use];
  125. struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
  126. struct sk_buff *skb;
  127. __be16 protocol;
  128. u32 ip_offset;
  129. u32 asid_bufnum_pid = 0;
  130. u32 flag_ipoffset = 0;
  131. desc_cb->priv = priv;
  132. desc_cb->length = size;
  133. desc_cb->dma = dma;
  134. desc_cb->type = type;
  135. desc->addr = cpu_to_le64(dma);
  136. desc->tx.send_size = cpu_to_le16((u16)size);
  137. /*config bd buffer end */
  138. flag_ipoffset |= 1 << HNS_TXD_VLD_B;
  139. asid_bufnum_pid |= buf_num << HNS_TXD_BUFNUM_S;
  140. if (type == DESC_TYPE_SKB) {
  141. skb = (struct sk_buff *)priv;
  142. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  143. protocol = skb->protocol;
  144. ip_offset = ETH_HLEN;
  145. /*if it is a SW VLAN check the next protocol*/
  146. if (protocol == htons(ETH_P_8021Q)) {
  147. ip_offset += VLAN_HLEN;
  148. protocol = vlan_get_protocol(skb);
  149. skb->protocol = protocol;
  150. }
  151. if (skb->protocol == htons(ETH_P_IP)) {
  152. flag_ipoffset |= 1 << HNS_TXD_L3CS_B;
  153. /* check for tcp/udp header */
  154. flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
  155. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  156. /* ipv6 has not l3 cs, check for L4 header */
  157. flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
  158. }
  159. flag_ipoffset |= ip_offset << HNS_TXD_IPOFFSET_S;
  160. }
  161. }
  162. flag_ipoffset |= frag_end << HNS_TXD_FE_B;
  163. desc->tx.asid_bufnum_pid = cpu_to_le16(asid_bufnum_pid);
  164. desc->tx.flag_ipoffset = cpu_to_le32(flag_ipoffset);
  165. ring_ptr_move_fw(ring, next_to_use);
  166. }
  167. static void unfill_desc(struct hnae_ring *ring)
  168. {
  169. ring_ptr_move_bw(ring, next_to_use);
  170. }
  171. static int hns_nic_maybe_stop_tx(
  172. struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
  173. {
  174. struct sk_buff *skb = *out_skb;
  175. struct sk_buff *new_skb = NULL;
  176. int buf_num;
  177. /* no. of segments (plus a header) */
  178. buf_num = skb_shinfo(skb)->nr_frags + 1;
  179. if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
  180. if (ring_space(ring) < 1)
  181. return -EBUSY;
  182. new_skb = skb_copy(skb, GFP_ATOMIC);
  183. if (!new_skb)
  184. return -ENOMEM;
  185. dev_kfree_skb_any(skb);
  186. *out_skb = new_skb;
  187. buf_num = 1;
  188. } else if (buf_num > ring_space(ring)) {
  189. return -EBUSY;
  190. }
  191. *bnum = buf_num;
  192. return 0;
  193. }
  194. static int hns_nic_maybe_stop_tso(
  195. struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
  196. {
  197. int i;
  198. int size;
  199. int buf_num;
  200. int frag_num;
  201. struct sk_buff *skb = *out_skb;
  202. struct sk_buff *new_skb = NULL;
  203. struct skb_frag_struct *frag;
  204. size = skb_headlen(skb);
  205. buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
  206. frag_num = skb_shinfo(skb)->nr_frags;
  207. for (i = 0; i < frag_num; i++) {
  208. frag = &skb_shinfo(skb)->frags[i];
  209. size = skb_frag_size(frag);
  210. buf_num += (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
  211. }
  212. if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
  213. buf_num = (skb->len + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
  214. if (ring_space(ring) < buf_num)
  215. return -EBUSY;
  216. /* manual split the send packet */
  217. new_skb = skb_copy(skb, GFP_ATOMIC);
  218. if (!new_skb)
  219. return -ENOMEM;
  220. dev_kfree_skb_any(skb);
  221. *out_skb = new_skb;
  222. } else if (ring_space(ring) < buf_num) {
  223. return -EBUSY;
  224. }
  225. *bnum = buf_num;
  226. return 0;
  227. }
  228. static void fill_tso_desc(struct hnae_ring *ring, void *priv,
  229. int size, dma_addr_t dma, int frag_end,
  230. int buf_num, enum hns_desc_type type, int mtu)
  231. {
  232. int frag_buf_num;
  233. int sizeoflast;
  234. int k;
  235. frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
  236. sizeoflast = size % BD_MAX_SEND_SIZE;
  237. sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE;
  238. /* when the frag size is bigger than hardware, split this frag */
  239. for (k = 0; k < frag_buf_num; k++)
  240. fill_v2_desc(ring, priv,
  241. (k == frag_buf_num - 1) ?
  242. sizeoflast : BD_MAX_SEND_SIZE,
  243. dma + BD_MAX_SEND_SIZE * k,
  244. frag_end && (k == frag_buf_num - 1) ? 1 : 0,
  245. buf_num,
  246. (type == DESC_TYPE_SKB && !k) ?
  247. DESC_TYPE_SKB : DESC_TYPE_PAGE,
  248. mtu);
  249. }
  250. int hns_nic_net_xmit_hw(struct net_device *ndev,
  251. struct sk_buff *skb,
  252. struct hns_nic_ring_data *ring_data)
  253. {
  254. struct hns_nic_priv *priv = netdev_priv(ndev);
  255. struct hnae_ring *ring = ring_data->ring;
  256. struct device *dev = ring_to_dev(ring);
  257. struct netdev_queue *dev_queue;
  258. struct skb_frag_struct *frag;
  259. int buf_num;
  260. int seg_num;
  261. dma_addr_t dma;
  262. int size, next_to_use;
  263. int i;
  264. switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
  265. case -EBUSY:
  266. ring->stats.tx_busy++;
  267. goto out_net_tx_busy;
  268. case -ENOMEM:
  269. ring->stats.sw_err_cnt++;
  270. netdev_err(ndev, "no memory to xmit!\n");
  271. goto out_err_tx_ok;
  272. default:
  273. break;
  274. }
  275. /* no. of segments (plus a header) */
  276. seg_num = skb_shinfo(skb)->nr_frags + 1;
  277. next_to_use = ring->next_to_use;
  278. /* fill the first part */
  279. size = skb_headlen(skb);
  280. dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
  281. if (dma_mapping_error(dev, dma)) {
  282. netdev_err(ndev, "TX head DMA map failed\n");
  283. ring->stats.sw_err_cnt++;
  284. goto out_err_tx_ok;
  285. }
  286. priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
  287. buf_num, DESC_TYPE_SKB, ndev->mtu);
  288. /* fill the fragments */
  289. for (i = 1; i < seg_num; i++) {
  290. frag = &skb_shinfo(skb)->frags[i - 1];
  291. size = skb_frag_size(frag);
  292. dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
  293. if (dma_mapping_error(dev, dma)) {
  294. netdev_err(ndev, "TX frag(%d) DMA map failed\n", i);
  295. ring->stats.sw_err_cnt++;
  296. goto out_map_frag_fail;
  297. }
  298. priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
  299. seg_num - 1 == i ? 1 : 0, buf_num,
  300. DESC_TYPE_PAGE, ndev->mtu);
  301. }
  302. /*complete translate all packets*/
  303. dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
  304. netdev_tx_sent_queue(dev_queue, skb->len);
  305. wmb(); /* commit all data before submit */
  306. assert(skb->queue_mapping < priv->ae_handle->q_num);
  307. hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
  308. ring->stats.tx_pkts++;
  309. ring->stats.tx_bytes += skb->len;
  310. return NETDEV_TX_OK;
  311. out_map_frag_fail:
  312. while (ring->next_to_use != next_to_use) {
  313. unfill_desc(ring);
  314. if (ring->next_to_use != next_to_use)
  315. dma_unmap_page(dev,
  316. ring->desc_cb[ring->next_to_use].dma,
  317. ring->desc_cb[ring->next_to_use].length,
  318. DMA_TO_DEVICE);
  319. else
  320. dma_unmap_single(dev,
  321. ring->desc_cb[next_to_use].dma,
  322. ring->desc_cb[next_to_use].length,
  323. DMA_TO_DEVICE);
  324. }
  325. out_err_tx_ok:
  326. dev_kfree_skb_any(skb);
  327. return NETDEV_TX_OK;
  328. out_net_tx_busy:
  329. netif_stop_subqueue(ndev, skb->queue_mapping);
  330. /* Herbert's original patch had:
  331. * smp_mb__after_netif_stop_queue();
  332. * but since that doesn't exist yet, just open code it.
  333. */
  334. smp_mb();
  335. return NETDEV_TX_BUSY;
  336. }
  337. /**
  338. * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
  339. * @data: pointer to the start of the headers
  340. * @max: total length of section to find headers in
  341. *
  342. * This function is meant to determine the length of headers that will
  343. * be recognized by hardware for LRO, GRO, and RSC offloads. The main
  344. * motivation of doing this is to only perform one pull for IPv4 TCP
  345. * packets so that we can do basic things like calculating the gso_size
  346. * based on the average data per packet.
  347. **/
  348. static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag,
  349. unsigned int max_size)
  350. {
  351. unsigned char *network;
  352. u8 hlen;
  353. /* this should never happen, but better safe than sorry */
  354. if (max_size < ETH_HLEN)
  355. return max_size;
  356. /* initialize network frame pointer */
  357. network = data;
  358. /* set first protocol and move network header forward */
  359. network += ETH_HLEN;
  360. /* handle any vlan tag if present */
  361. if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S)
  362. == HNS_RX_FLAG_VLAN_PRESENT) {
  363. if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
  364. return max_size;
  365. network += VLAN_HLEN;
  366. }
  367. /* handle L3 protocols */
  368. if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
  369. == HNS_RX_FLAG_L3ID_IPV4) {
  370. if ((typeof(max_size))(network - data) >
  371. (max_size - sizeof(struct iphdr)))
  372. return max_size;
  373. /* access ihl as a u8 to avoid unaligned access on ia64 */
  374. hlen = (network[0] & 0x0F) << 2;
  375. /* verify hlen meets minimum size requirements */
  376. if (hlen < sizeof(struct iphdr))
  377. return network - data;
  378. /* record next protocol if header is present */
  379. } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
  380. == HNS_RX_FLAG_L3ID_IPV6) {
  381. if ((typeof(max_size))(network - data) >
  382. (max_size - sizeof(struct ipv6hdr)))
  383. return max_size;
  384. /* record next protocol */
  385. hlen = sizeof(struct ipv6hdr);
  386. } else {
  387. return network - data;
  388. }
  389. /* relocate pointer to start of L4 header */
  390. network += hlen;
  391. /* finally sort out TCP/UDP */
  392. if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
  393. == HNS_RX_FLAG_L4ID_TCP) {
  394. if ((typeof(max_size))(network - data) >
  395. (max_size - sizeof(struct tcphdr)))
  396. return max_size;
  397. /* access doff as a u8 to avoid unaligned access on ia64 */
  398. hlen = (network[12] & 0xF0) >> 2;
  399. /* verify hlen meets minimum size requirements */
  400. if (hlen < sizeof(struct tcphdr))
  401. return network - data;
  402. network += hlen;
  403. } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
  404. == HNS_RX_FLAG_L4ID_UDP) {
  405. if ((typeof(max_size))(network - data) >
  406. (max_size - sizeof(struct udphdr)))
  407. return max_size;
  408. network += sizeof(struct udphdr);
  409. }
  410. /* If everything has gone correctly network should be the
  411. * data section of the packet and will be the end of the header.
  412. * If not then it probably represents the end of the last recognized
  413. * header.
  414. */
  415. if ((typeof(max_size))(network - data) < max_size)
  416. return network - data;
  417. else
  418. return max_size;
  419. }
  420. static void hns_nic_reuse_page(struct sk_buff *skb, int i,
  421. struct hnae_ring *ring, int pull_len,
  422. struct hnae_desc_cb *desc_cb)
  423. {
  424. struct hnae_desc *desc;
  425. int truesize, size;
  426. int last_offset;
  427. bool twobufs;
  428. twobufs = ((PAGE_SIZE < 8192) && hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048);
  429. desc = &ring->desc[ring->next_to_clean];
  430. size = le16_to_cpu(desc->rx.size);
  431. if (twobufs) {
  432. truesize = hnae_buf_size(ring);
  433. } else {
  434. truesize = ALIGN(size, L1_CACHE_BYTES);
  435. last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
  436. }
  437. skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
  438. size - pull_len, truesize - pull_len);
  439. /* avoid re-using remote pages,flag default unreuse */
  440. if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
  441. return;
  442. if (twobufs) {
  443. /* if we are only owner of page we can reuse it */
  444. if (likely(page_count(desc_cb->priv) == 1)) {
  445. /* flip page offset to other buffer */
  446. desc_cb->page_offset ^= truesize;
  447. desc_cb->reuse_flag = 1;
  448. /* bump ref count on page before it is given*/
  449. get_page(desc_cb->priv);
  450. }
  451. return;
  452. }
  453. /* move offset up to the next cache line */
  454. desc_cb->page_offset += truesize;
  455. if (desc_cb->page_offset <= last_offset) {
  456. desc_cb->reuse_flag = 1;
  457. /* bump ref count on page before it is given*/
  458. get_page(desc_cb->priv);
  459. }
  460. }
  461. static void get_v2rx_desc_bnum(u32 bnum_flag, int *out_bnum)
  462. {
  463. *out_bnum = hnae_get_field(bnum_flag,
  464. HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S) + 1;
  465. }
  466. static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum)
  467. {
  468. *out_bnum = hnae_get_field(bnum_flag,
  469. HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S);
  470. }
  471. static void hns_nic_rx_checksum(struct hns_nic_ring_data *ring_data,
  472. struct sk_buff *skb, u32 flag)
  473. {
  474. struct net_device *netdev = ring_data->napi.dev;
  475. u32 l3id;
  476. u32 l4id;
  477. /* check if RX checksum offload is enabled */
  478. if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
  479. return;
  480. /* In hardware, we only support checksum for the following protocols:
  481. * 1) IPv4,
  482. * 2) TCP(over IPv4 or IPv6),
  483. * 3) UDP(over IPv4 or IPv6),
  484. * 4) SCTP(over IPv4 or IPv6)
  485. * but we support many L3(IPv4, IPv6, MPLS, PPPoE etc) and L4(TCP,
  486. * UDP, GRE, SCTP, IGMP, ICMP etc.) protocols.
  487. *
  488. * Hardware limitation:
  489. * Our present hardware RX Descriptor lacks L3/L4 checksum "Status &
  490. * Error" bit (which usually can be used to indicate whether checksum
  491. * was calculated by the hardware and if there was any error encountered
  492. * during checksum calculation).
  493. *
  494. * Software workaround:
  495. * We do get info within the RX descriptor about the kind of L3/L4
  496. * protocol coming in the packet and the error status. These errors
  497. * might not just be checksum errors but could be related to version,
  498. * length of IPv4, UDP, TCP etc.
  499. * Because there is no-way of knowing if it is a L3/L4 error due to bad
  500. * checksum or any other L3/L4 error, we will not (cannot) convey
  501. * checksum status for such cases to upper stack and will not maintain
  502. * the RX L3/L4 checksum counters as well.
  503. */
  504. l3id = hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S);
  505. l4id = hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S);
  506. /* check L3 protocol for which checksum is supported */
  507. if ((l3id != HNS_RX_FLAG_L3ID_IPV4) && (l3id != HNS_RX_FLAG_L3ID_IPV6))
  508. return;
  509. /* check for any(not just checksum)flagged L3 protocol errors */
  510. if (unlikely(hnae_get_bit(flag, HNS_RXD_L3E_B)))
  511. return;
  512. /* we do not support checksum of fragmented packets */
  513. if (unlikely(hnae_get_bit(flag, HNS_RXD_FRAG_B)))
  514. return;
  515. /* check L4 protocol for which checksum is supported */
  516. if ((l4id != HNS_RX_FLAG_L4ID_TCP) &&
  517. (l4id != HNS_RX_FLAG_L4ID_UDP) &&
  518. (l4id != HNS_RX_FLAG_L4ID_SCTP))
  519. return;
  520. /* check for any(not just checksum)flagged L4 protocol errors */
  521. if (unlikely(hnae_get_bit(flag, HNS_RXD_L4E_B)))
  522. return;
  523. /* now, this has to be a packet with valid RX checksum */
  524. skb->ip_summed = CHECKSUM_UNNECESSARY;
  525. }
  526. static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
  527. struct sk_buff **out_skb, int *out_bnum)
  528. {
  529. struct hnae_ring *ring = ring_data->ring;
  530. struct net_device *ndev = ring_data->napi.dev;
  531. struct hns_nic_priv *priv = netdev_priv(ndev);
  532. struct sk_buff *skb;
  533. struct hnae_desc *desc;
  534. struct hnae_desc_cb *desc_cb;
  535. unsigned char *va;
  536. int bnum, length, i;
  537. int pull_len;
  538. u32 bnum_flag;
  539. desc = &ring->desc[ring->next_to_clean];
  540. desc_cb = &ring->desc_cb[ring->next_to_clean];
  541. prefetch(desc);
  542. va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
  543. /* prefetch first cache line of first page */
  544. prefetch(va);
  545. #if L1_CACHE_BYTES < 128
  546. prefetch(va + L1_CACHE_BYTES);
  547. #endif
  548. skb = *out_skb = napi_alloc_skb(&ring_data->napi,
  549. HNS_RX_HEAD_SIZE);
  550. if (unlikely(!skb)) {
  551. netdev_err(ndev, "alloc rx skb fail\n");
  552. ring->stats.sw_err_cnt++;
  553. return -ENOMEM;
  554. }
  555. prefetchw(skb->data);
  556. length = le16_to_cpu(desc->rx.pkt_len);
  557. bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
  558. priv->ops.get_rxd_bnum(bnum_flag, &bnum);
  559. *out_bnum = bnum;
  560. if (length <= HNS_RX_HEAD_SIZE) {
  561. memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
  562. /* we can reuse buffer as-is, just make sure it is local */
  563. if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
  564. desc_cb->reuse_flag = 1;
  565. else /* this page cannot be reused so discard it */
  566. put_page(desc_cb->priv);
  567. ring_ptr_move_fw(ring, next_to_clean);
  568. if (unlikely(bnum != 1)) { /* check err*/
  569. *out_bnum = 1;
  570. goto out_bnum_err;
  571. }
  572. } else {
  573. ring->stats.seg_pkt_cnt++;
  574. pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE);
  575. memcpy(__skb_put(skb, pull_len), va,
  576. ALIGN(pull_len, sizeof(long)));
  577. hns_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
  578. ring_ptr_move_fw(ring, next_to_clean);
  579. if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/
  580. *out_bnum = 1;
  581. goto out_bnum_err;
  582. }
  583. for (i = 1; i < bnum; i++) {
  584. desc = &ring->desc[ring->next_to_clean];
  585. desc_cb = &ring->desc_cb[ring->next_to_clean];
  586. hns_nic_reuse_page(skb, i, ring, 0, desc_cb);
  587. ring_ptr_move_fw(ring, next_to_clean);
  588. }
  589. }
  590. /* check except process, free skb and jump the desc */
  591. if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) {
  592. out_bnum_err:
  593. *out_bnum = *out_bnum ? *out_bnum : 1; /* ntc moved,cannot 0*/
  594. netdev_err(ndev, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n",
  595. bnum, ring->max_desc_num_per_pkt,
  596. length, (int)MAX_SKB_FRAGS,
  597. ((u64 *)desc)[0], ((u64 *)desc)[1]);
  598. ring->stats.err_bd_num++;
  599. dev_kfree_skb_any(skb);
  600. return -EDOM;
  601. }
  602. bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
  603. if (unlikely(!hnae_get_bit(bnum_flag, HNS_RXD_VLD_B))) {
  604. netdev_err(ndev, "no valid bd,%016llx,%016llx\n",
  605. ((u64 *)desc)[0], ((u64 *)desc)[1]);
  606. ring->stats.non_vld_descs++;
  607. dev_kfree_skb_any(skb);
  608. return -EINVAL;
  609. }
  610. if (unlikely((!desc->rx.pkt_len) ||
  611. hnae_get_bit(bnum_flag, HNS_RXD_DROP_B))) {
  612. ring->stats.err_pkt_len++;
  613. dev_kfree_skb_any(skb);
  614. return -EFAULT;
  615. }
  616. if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L2E_B))) {
  617. ring->stats.l2_err++;
  618. dev_kfree_skb_any(skb);
  619. return -EFAULT;
  620. }
  621. ring->stats.rx_pkts++;
  622. ring->stats.rx_bytes += skb->len;
  623. /* indicate to upper stack if our hardware has already calculated
  624. * the RX checksum
  625. */
  626. hns_nic_rx_checksum(ring_data, skb, bnum_flag);
  627. return 0;
  628. }
  629. static void
  630. hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count)
  631. {
  632. int i, ret;
  633. struct hnae_desc_cb res_cbs;
  634. struct hnae_desc_cb *desc_cb;
  635. struct hnae_ring *ring = ring_data->ring;
  636. struct net_device *ndev = ring_data->napi.dev;
  637. for (i = 0; i < cleand_count; i++) {
  638. desc_cb = &ring->desc_cb[ring->next_to_use];
  639. if (desc_cb->reuse_flag) {
  640. ring->stats.reuse_pg_cnt++;
  641. hnae_reuse_buffer(ring, ring->next_to_use);
  642. } else {
  643. ret = hnae_reserve_buffer_map(ring, &res_cbs);
  644. if (ret) {
  645. ring->stats.sw_err_cnt++;
  646. netdev_err(ndev, "hnae reserve buffer map failed.\n");
  647. break;
  648. }
  649. hnae_replace_buffer(ring, ring->next_to_use, &res_cbs);
  650. }
  651. ring_ptr_move_fw(ring, next_to_use);
  652. }
  653. wmb(); /* make all data has been write before submit */
  654. writel_relaxed(i, ring->io_base + RCB_REG_HEAD);
  655. }
  656. /* return error number for error or number of desc left to take
  657. */
  658. static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
  659. struct sk_buff *skb)
  660. {
  661. struct net_device *ndev = ring_data->napi.dev;
  662. skb->protocol = eth_type_trans(skb, ndev);
  663. (void)napi_gro_receive(&ring_data->napi, skb);
  664. }
  665. static int hns_desc_unused(struct hnae_ring *ring)
  666. {
  667. int ntc = ring->next_to_clean;
  668. int ntu = ring->next_to_use;
  669. return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
  670. }
  671. static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
  672. int budget, void *v)
  673. {
  674. struct hnae_ring *ring = ring_data->ring;
  675. struct sk_buff *skb;
  676. int num, bnum;
  677. #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
  678. int recv_pkts, recv_bds, clean_count, err;
  679. int unused_count = hns_desc_unused(ring);
  680. num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
  681. rmb(); /* make sure num taken effect before the other data is touched */
  682. recv_pkts = 0, recv_bds = 0, clean_count = 0;
  683. num -= unused_count;
  684. while (recv_pkts < budget && recv_bds < num) {
  685. /* reuse or realloc buffers */
  686. if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
  687. hns_nic_alloc_rx_buffers(ring_data,
  688. clean_count + unused_count);
  689. clean_count = 0;
  690. unused_count = hns_desc_unused(ring);
  691. }
  692. /* poll one pkt */
  693. err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum);
  694. if (unlikely(!skb)) /* this fault cannot be repaired */
  695. goto out;
  696. recv_bds += bnum;
  697. clean_count += bnum;
  698. if (unlikely(err)) { /* do jump the err */
  699. recv_pkts++;
  700. continue;
  701. }
  702. /* do update ip stack process*/
  703. ((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)(
  704. ring_data, skb);
  705. recv_pkts++;
  706. }
  707. out:
  708. /* make all data has been write before submit */
  709. if (clean_count + unused_count > 0)
  710. hns_nic_alloc_rx_buffers(ring_data,
  711. clean_count + unused_count);
  712. return recv_pkts;
  713. }
  714. static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
  715. {
  716. struct hnae_ring *ring = ring_data->ring;
  717. int num = 0;
  718. ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
  719. /* for hardware bug fixed */
  720. num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
  721. if (num > 0) {
  722. ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
  723. ring_data->ring, 1);
  724. napi_schedule(&ring_data->napi);
  725. }
  726. }
  727. static void hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
  728. {
  729. struct hnae_ring *ring = ring_data->ring;
  730. int num = 0;
  731. num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
  732. if (num == 0)
  733. ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
  734. ring, 0);
  735. else
  736. napi_schedule(&ring_data->napi);
  737. }
  738. static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
  739. int *bytes, int *pkts)
  740. {
  741. struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
  742. (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
  743. (*bytes) += desc_cb->length;
  744. /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
  745. hnae_free_buffer_detach(ring, ring->next_to_clean);
  746. ring_ptr_move_fw(ring, next_to_clean);
  747. }
  748. static int is_valid_clean_head(struct hnae_ring *ring, int h)
  749. {
  750. int u = ring->next_to_use;
  751. int c = ring->next_to_clean;
  752. if (unlikely(h > ring->desc_num))
  753. return 0;
  754. assert(u > 0 && u < ring->desc_num);
  755. assert(c > 0 && c < ring->desc_num);
  756. assert(u != c && h != c); /* must be checked before call this func */
  757. return u > c ? (h > c && h <= u) : (h > c || h <= u);
  758. }
  759. /* netif_tx_lock will turn down the performance, set only when necessary */
  760. #ifdef CONFIG_NET_POLL_CONTROLLER
  761. #define NETIF_TX_LOCK(ndev) netif_tx_lock(ndev)
  762. #define NETIF_TX_UNLOCK(ndev) netif_tx_unlock(ndev)
  763. #else
  764. #define NETIF_TX_LOCK(ndev)
  765. #define NETIF_TX_UNLOCK(ndev)
  766. #endif
  767. /* reclaim all desc in one budget
  768. * return error or number of desc left
  769. */
  770. static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
  771. int budget, void *v)
  772. {
  773. struct hnae_ring *ring = ring_data->ring;
  774. struct net_device *ndev = ring_data->napi.dev;
  775. struct netdev_queue *dev_queue;
  776. struct hns_nic_priv *priv = netdev_priv(ndev);
  777. int head;
  778. int bytes, pkts;
  779. NETIF_TX_LOCK(ndev);
  780. head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
  781. rmb(); /* make sure head is ready before touch any data */
  782. if (is_ring_empty(ring) || head == ring->next_to_clean) {
  783. NETIF_TX_UNLOCK(ndev);
  784. return 0; /* no data to poll */
  785. }
  786. if (!is_valid_clean_head(ring, head)) {
  787. netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
  788. ring->next_to_use, ring->next_to_clean);
  789. ring->stats.io_err_cnt++;
  790. NETIF_TX_UNLOCK(ndev);
  791. return -EIO;
  792. }
  793. bytes = 0;
  794. pkts = 0;
  795. while (head != ring->next_to_clean) {
  796. hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
  797. /* issue prefetch for next Tx descriptor */
  798. prefetch(&ring->desc_cb[ring->next_to_clean]);
  799. }
  800. NETIF_TX_UNLOCK(ndev);
  801. dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
  802. netdev_tx_completed_queue(dev_queue, pkts, bytes);
  803. if (unlikely(priv->link && !netif_carrier_ok(ndev)))
  804. netif_carrier_on(ndev);
  805. if (unlikely(pkts && netif_carrier_ok(ndev) &&
  806. (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) {
  807. /* Make sure that anybody stopping the queue after this
  808. * sees the new next_to_clean.
  809. */
  810. smp_mb();
  811. if (netif_tx_queue_stopped(dev_queue) &&
  812. !test_bit(NIC_STATE_DOWN, &priv->state)) {
  813. netif_tx_wake_queue(dev_queue);
  814. ring->stats.restart_queue++;
  815. }
  816. }
  817. return 0;
  818. }
  819. static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
  820. {
  821. struct hnae_ring *ring = ring_data->ring;
  822. int head;
  823. ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
  824. head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
  825. if (head != ring->next_to_clean) {
  826. ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
  827. ring_data->ring, 1);
  828. napi_schedule(&ring_data->napi);
  829. }
  830. }
  831. static void hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
  832. {
  833. struct hnae_ring *ring = ring_data->ring;
  834. int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
  835. if (head == ring->next_to_clean)
  836. ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
  837. ring, 0);
  838. else
  839. napi_schedule(&ring_data->napi);
  840. }
  841. static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
  842. {
  843. struct hnae_ring *ring = ring_data->ring;
  844. struct net_device *ndev = ring_data->napi.dev;
  845. struct netdev_queue *dev_queue;
  846. int head;
  847. int bytes, pkts;
  848. NETIF_TX_LOCK(ndev);
  849. head = ring->next_to_use; /* ntu :soft setted ring position*/
  850. bytes = 0;
  851. pkts = 0;
  852. while (head != ring->next_to_clean)
  853. hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
  854. NETIF_TX_UNLOCK(ndev);
  855. dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
  856. netdev_tx_reset_queue(dev_queue);
  857. }
  858. static int hns_nic_common_poll(struct napi_struct *napi, int budget)
  859. {
  860. struct hns_nic_ring_data *ring_data =
  861. container_of(napi, struct hns_nic_ring_data, napi);
  862. int clean_complete = ring_data->poll_one(
  863. ring_data, budget, ring_data->ex_process);
  864. if (clean_complete >= 0 && clean_complete < budget) {
  865. napi_complete(napi);
  866. ring_data->fini_process(ring_data);
  867. return 0;
  868. }
  869. return clean_complete;
  870. }
  871. static irqreturn_t hns_irq_handle(int irq, void *dev)
  872. {
  873. struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev;
  874. ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
  875. ring_data->ring, 1);
  876. napi_schedule(&ring_data->napi);
  877. return IRQ_HANDLED;
  878. }
  879. /**
  880. *hns_nic_adjust_link - adjust net work mode by the phy stat or new param
  881. *@ndev: net device
  882. */
  883. static void hns_nic_adjust_link(struct net_device *ndev)
  884. {
  885. struct hns_nic_priv *priv = netdev_priv(ndev);
  886. struct hnae_handle *h = priv->ae_handle;
  887. int state = 1;
  888. if (ndev->phydev) {
  889. h->dev->ops->adjust_link(h, ndev->phydev->speed,
  890. ndev->phydev->duplex);
  891. state = ndev->phydev->link;
  892. }
  893. state = state && h->dev->ops->get_status(h);
  894. if (state != priv->link) {
  895. if (state) {
  896. netif_carrier_on(ndev);
  897. netif_tx_wake_all_queues(ndev);
  898. netdev_info(ndev, "link up\n");
  899. } else {
  900. netif_carrier_off(ndev);
  901. netdev_info(ndev, "link down\n");
  902. }
  903. priv->link = state;
  904. }
  905. }
  906. /**
  907. *hns_nic_init_phy - init phy
  908. *@ndev: net device
  909. *@h: ae handle
  910. * Return 0 on success, negative on failure
  911. */
  912. int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
  913. {
  914. struct phy_device *phy_dev = h->phy_dev;
  915. int ret;
  916. if (!h->phy_dev)
  917. return 0;
  918. if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
  919. phy_dev->dev_flags = 0;
  920. ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link,
  921. h->phy_if);
  922. } else {
  923. ret = phy_attach_direct(ndev, phy_dev, 0, h->phy_if);
  924. }
  925. if (unlikely(ret))
  926. return -ENODEV;
  927. phy_dev->supported &= h->if_support;
  928. phy_dev->advertising = phy_dev->supported;
  929. if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
  930. phy_dev->autoneg = false;
  931. return 0;
  932. }
  933. static int hns_nic_ring_open(struct net_device *netdev, int idx)
  934. {
  935. struct hns_nic_priv *priv = netdev_priv(netdev);
  936. struct hnae_handle *h = priv->ae_handle;
  937. napi_enable(&priv->ring_data[idx].napi);
  938. enable_irq(priv->ring_data[idx].ring->irq);
  939. h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0);
  940. return 0;
  941. }
  942. static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
  943. {
  944. struct hns_nic_priv *priv = netdev_priv(ndev);
  945. struct hnae_handle *h = priv->ae_handle;
  946. struct sockaddr *mac_addr = p;
  947. int ret;
  948. if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
  949. return -EADDRNOTAVAIL;
  950. ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data);
  951. if (ret) {
  952. netdev_err(ndev, "set_mac_address fail, ret=%d!\n", ret);
  953. return ret;
  954. }
  955. memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len);
  956. return 0;
  957. }
  958. void hns_nic_update_stats(struct net_device *netdev)
  959. {
  960. struct hns_nic_priv *priv = netdev_priv(netdev);
  961. struct hnae_handle *h = priv->ae_handle;
  962. h->dev->ops->update_stats(h, &netdev->stats);
  963. }
  964. /* set mac addr if it is configed. or leave it to the AE driver */
  965. static void hns_init_mac_addr(struct net_device *ndev)
  966. {
  967. struct hns_nic_priv *priv = netdev_priv(ndev);
  968. if (!device_get_mac_address(priv->dev, ndev->dev_addr, ETH_ALEN)) {
  969. eth_hw_addr_random(ndev);
  970. dev_warn(priv->dev, "No valid mac, use random mac %pM",
  971. ndev->dev_addr);
  972. }
  973. }
  974. static void hns_nic_ring_close(struct net_device *netdev, int idx)
  975. {
  976. struct hns_nic_priv *priv = netdev_priv(netdev);
  977. struct hnae_handle *h = priv->ae_handle;
  978. h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1);
  979. disable_irq(priv->ring_data[idx].ring->irq);
  980. napi_disable(&priv->ring_data[idx].napi);
  981. }
  982. static void hns_set_irq_affinity(struct hns_nic_priv *priv)
  983. {
  984. struct hnae_handle *h = priv->ae_handle;
  985. struct hns_nic_ring_data *rd;
  986. int i;
  987. int cpu;
  988. cpumask_var_t mask;
  989. if (!alloc_cpumask_var(&mask, GFP_KERNEL))
  990. return;
  991. /*diffrent irq banlance for 16core and 32core*/
  992. if (h->q_num == num_possible_cpus()) {
  993. for (i = 0; i < h->q_num * 2; i++) {
  994. rd = &priv->ring_data[i];
  995. if (cpu_online(rd->queue_index)) {
  996. cpumask_clear(mask);
  997. cpu = rd->queue_index;
  998. cpumask_set_cpu(cpu, mask);
  999. (void)irq_set_affinity_hint(rd->ring->irq,
  1000. mask);
  1001. }
  1002. }
  1003. } else {
  1004. for (i = 0; i < h->q_num; i++) {
  1005. rd = &priv->ring_data[i];
  1006. if (cpu_online(rd->queue_index * 2)) {
  1007. cpumask_clear(mask);
  1008. cpu = rd->queue_index * 2;
  1009. cpumask_set_cpu(cpu, mask);
  1010. (void)irq_set_affinity_hint(rd->ring->irq,
  1011. mask);
  1012. }
  1013. }
  1014. for (i = h->q_num; i < h->q_num * 2; i++) {
  1015. rd = &priv->ring_data[i];
  1016. if (cpu_online(rd->queue_index * 2 + 1)) {
  1017. cpumask_clear(mask);
  1018. cpu = rd->queue_index * 2 + 1;
  1019. cpumask_set_cpu(cpu, mask);
  1020. (void)irq_set_affinity_hint(rd->ring->irq,
  1021. mask);
  1022. }
  1023. }
  1024. }
  1025. free_cpumask_var(mask);
  1026. }
  1027. static int hns_nic_init_irq(struct hns_nic_priv *priv)
  1028. {
  1029. struct hnae_handle *h = priv->ae_handle;
  1030. struct hns_nic_ring_data *rd;
  1031. int i;
  1032. int ret;
  1033. for (i = 0; i < h->q_num * 2; i++) {
  1034. rd = &priv->ring_data[i];
  1035. if (rd->ring->irq_init_flag == RCB_IRQ_INITED)
  1036. break;
  1037. snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
  1038. "%s-%s%d", priv->netdev->name,
  1039. (i < h->q_num ? "tx" : "rx"), rd->queue_index);
  1040. rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
  1041. ret = request_irq(rd->ring->irq,
  1042. hns_irq_handle, 0, rd->ring->ring_name, rd);
  1043. if (ret) {
  1044. netdev_err(priv->netdev, "request irq(%d) fail\n",
  1045. rd->ring->irq);
  1046. return ret;
  1047. }
  1048. disable_irq(rd->ring->irq);
  1049. rd->ring->irq_init_flag = RCB_IRQ_INITED;
  1050. }
  1051. /*set cpu affinity*/
  1052. hns_set_irq_affinity(priv);
  1053. return 0;
  1054. }
  1055. static int hns_nic_net_up(struct net_device *ndev)
  1056. {
  1057. struct hns_nic_priv *priv = netdev_priv(ndev);
  1058. struct hnae_handle *h = priv->ae_handle;
  1059. int i, j;
  1060. int ret;
  1061. ret = hns_nic_init_irq(priv);
  1062. if (ret != 0) {
  1063. netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
  1064. return ret;
  1065. }
  1066. for (i = 0; i < h->q_num * 2; i++) {
  1067. ret = hns_nic_ring_open(ndev, i);
  1068. if (ret)
  1069. goto out_has_some_queues;
  1070. }
  1071. ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr);
  1072. if (ret)
  1073. goto out_set_mac_addr_err;
  1074. ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
  1075. if (ret)
  1076. goto out_start_err;
  1077. if (ndev->phydev)
  1078. phy_start(ndev->phydev);
  1079. clear_bit(NIC_STATE_DOWN, &priv->state);
  1080. (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
  1081. return 0;
  1082. out_start_err:
  1083. netif_stop_queue(ndev);
  1084. out_set_mac_addr_err:
  1085. out_has_some_queues:
  1086. for (j = i - 1; j >= 0; j--)
  1087. hns_nic_ring_close(ndev, j);
  1088. set_bit(NIC_STATE_DOWN, &priv->state);
  1089. return ret;
  1090. }
  1091. static void hns_nic_net_down(struct net_device *ndev)
  1092. {
  1093. int i;
  1094. struct hnae_ae_ops *ops;
  1095. struct hns_nic_priv *priv = netdev_priv(ndev);
  1096. if (test_and_set_bit(NIC_STATE_DOWN, &priv->state))
  1097. return;
  1098. (void)del_timer_sync(&priv->service_timer);
  1099. netif_tx_stop_all_queues(ndev);
  1100. netif_carrier_off(ndev);
  1101. netif_tx_disable(ndev);
  1102. priv->link = 0;
  1103. if (ndev->phydev)
  1104. phy_stop(ndev->phydev);
  1105. ops = priv->ae_handle->dev->ops;
  1106. if (ops->stop)
  1107. ops->stop(priv->ae_handle);
  1108. netif_tx_stop_all_queues(ndev);
  1109. for (i = priv->ae_handle->q_num - 1; i >= 0; i--) {
  1110. hns_nic_ring_close(ndev, i);
  1111. hns_nic_ring_close(ndev, i + priv->ae_handle->q_num);
  1112. /* clean tx buffers*/
  1113. hns_nic_tx_clr_all_bufs(priv->ring_data + i);
  1114. }
  1115. }
  1116. void hns_nic_net_reset(struct net_device *ndev)
  1117. {
  1118. struct hns_nic_priv *priv = netdev_priv(ndev);
  1119. struct hnae_handle *handle = priv->ae_handle;
  1120. while (test_and_set_bit(NIC_STATE_RESETTING, &priv->state))
  1121. usleep_range(1000, 2000);
  1122. (void)hnae_reinit_handle(handle);
  1123. clear_bit(NIC_STATE_RESETTING, &priv->state);
  1124. }
  1125. void hns_nic_net_reinit(struct net_device *netdev)
  1126. {
  1127. struct hns_nic_priv *priv = netdev_priv(netdev);
  1128. netif_trans_update(priv->netdev);
  1129. while (test_and_set_bit(NIC_STATE_REINITING, &priv->state))
  1130. usleep_range(1000, 2000);
  1131. hns_nic_net_down(netdev);
  1132. hns_nic_net_reset(netdev);
  1133. (void)hns_nic_net_up(netdev);
  1134. clear_bit(NIC_STATE_REINITING, &priv->state);
  1135. }
  1136. static int hns_nic_net_open(struct net_device *ndev)
  1137. {
  1138. struct hns_nic_priv *priv = netdev_priv(ndev);
  1139. struct hnae_handle *h = priv->ae_handle;
  1140. int ret;
  1141. if (test_bit(NIC_STATE_TESTING, &priv->state))
  1142. return -EBUSY;
  1143. priv->link = 0;
  1144. netif_carrier_off(ndev);
  1145. ret = netif_set_real_num_tx_queues(ndev, h->q_num);
  1146. if (ret < 0) {
  1147. netdev_err(ndev, "netif_set_real_num_tx_queues fail, ret=%d!\n",
  1148. ret);
  1149. return ret;
  1150. }
  1151. ret = netif_set_real_num_rx_queues(ndev, h->q_num);
  1152. if (ret < 0) {
  1153. netdev_err(ndev,
  1154. "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
  1155. return ret;
  1156. }
  1157. ret = hns_nic_net_up(ndev);
  1158. if (ret) {
  1159. netdev_err(ndev,
  1160. "hns net up fail, ret=%d!\n", ret);
  1161. return ret;
  1162. }
  1163. return 0;
  1164. }
  1165. static int hns_nic_net_stop(struct net_device *ndev)
  1166. {
  1167. hns_nic_net_down(ndev);
  1168. return 0;
  1169. }
  1170. static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
  1171. static void hns_nic_net_timeout(struct net_device *ndev)
  1172. {
  1173. struct hns_nic_priv *priv = netdev_priv(ndev);
  1174. hns_tx_timeout_reset(priv);
  1175. }
  1176. static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
  1177. int cmd)
  1178. {
  1179. struct phy_device *phy_dev = netdev->phydev;
  1180. if (!netif_running(netdev))
  1181. return -EINVAL;
  1182. if (!phy_dev)
  1183. return -ENOTSUPP;
  1184. return phy_mii_ioctl(phy_dev, ifr, cmd);
  1185. }
  1186. /* use only for netconsole to poll with the device without interrupt */
  1187. #ifdef CONFIG_NET_POLL_CONTROLLER
  1188. void hns_nic_poll_controller(struct net_device *ndev)
  1189. {
  1190. struct hns_nic_priv *priv = netdev_priv(ndev);
  1191. unsigned long flags;
  1192. int i;
  1193. local_irq_save(flags);
  1194. for (i = 0; i < priv->ae_handle->q_num * 2; i++)
  1195. napi_schedule(&priv->ring_data[i].napi);
  1196. local_irq_restore(flags);
  1197. }
  1198. #endif
  1199. static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
  1200. struct net_device *ndev)
  1201. {
  1202. struct hns_nic_priv *priv = netdev_priv(ndev);
  1203. int ret;
  1204. assert(skb->queue_mapping < ndev->ae_handle->q_num);
  1205. ret = hns_nic_net_xmit_hw(ndev, skb,
  1206. &tx_ring_data(priv, skb->queue_mapping));
  1207. if (ret == NETDEV_TX_OK) {
  1208. netif_trans_update(ndev);
  1209. ndev->stats.tx_bytes += skb->len;
  1210. ndev->stats.tx_packets++;
  1211. }
  1212. return (netdev_tx_t)ret;
  1213. }
  1214. static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
  1215. {
  1216. struct hns_nic_priv *priv = netdev_priv(ndev);
  1217. struct hnae_handle *h = priv->ae_handle;
  1218. int ret;
  1219. if (!h->dev->ops->set_mtu)
  1220. return -ENOTSUPP;
  1221. if (netif_running(ndev)) {
  1222. (void)hns_nic_net_stop(ndev);
  1223. msleep(100);
  1224. ret = h->dev->ops->set_mtu(h, new_mtu);
  1225. if (ret)
  1226. netdev_err(ndev, "set mtu fail, return value %d\n",
  1227. ret);
  1228. if (hns_nic_net_open(ndev))
  1229. netdev_err(ndev, "hns net open fail\n");
  1230. } else {
  1231. ret = h->dev->ops->set_mtu(h, new_mtu);
  1232. }
  1233. if (!ret)
  1234. ndev->mtu = new_mtu;
  1235. return ret;
  1236. }
  1237. static int hns_nic_set_features(struct net_device *netdev,
  1238. netdev_features_t features)
  1239. {
  1240. struct hns_nic_priv *priv = netdev_priv(netdev);
  1241. switch (priv->enet_ver) {
  1242. case AE_VERSION_1:
  1243. if (features & (NETIF_F_TSO | NETIF_F_TSO6))
  1244. netdev_info(netdev, "enet v1 do not support tso!\n");
  1245. break;
  1246. default:
  1247. if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
  1248. priv->ops.fill_desc = fill_tso_desc;
  1249. priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
  1250. /* The chip only support 7*4096 */
  1251. netif_set_gso_max_size(netdev, 7 * 4096);
  1252. } else {
  1253. priv->ops.fill_desc = fill_v2_desc;
  1254. priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
  1255. }
  1256. break;
  1257. }
  1258. netdev->features = features;
  1259. return 0;
  1260. }
  1261. static netdev_features_t hns_nic_fix_features(
  1262. struct net_device *netdev, netdev_features_t features)
  1263. {
  1264. struct hns_nic_priv *priv = netdev_priv(netdev);
  1265. switch (priv->enet_ver) {
  1266. case AE_VERSION_1:
  1267. features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
  1268. NETIF_F_HW_VLAN_CTAG_FILTER);
  1269. break;
  1270. default:
  1271. break;
  1272. }
  1273. return features;
  1274. }
  1275. static int hns_nic_uc_sync(struct net_device *netdev, const unsigned char *addr)
  1276. {
  1277. struct hns_nic_priv *priv = netdev_priv(netdev);
  1278. struct hnae_handle *h = priv->ae_handle;
  1279. if (h->dev->ops->add_uc_addr)
  1280. return h->dev->ops->add_uc_addr(h, addr);
  1281. return 0;
  1282. }
  1283. static int hns_nic_uc_unsync(struct net_device *netdev,
  1284. const unsigned char *addr)
  1285. {
  1286. struct hns_nic_priv *priv = netdev_priv(netdev);
  1287. struct hnae_handle *h = priv->ae_handle;
  1288. if (h->dev->ops->rm_uc_addr)
  1289. return h->dev->ops->rm_uc_addr(h, addr);
  1290. return 0;
  1291. }
  1292. /**
  1293. * nic_set_multicast_list - set mutl mac address
  1294. * @netdev: net device
  1295. * @p: mac address
  1296. *
  1297. * return void
  1298. */
  1299. void hns_set_multicast_list(struct net_device *ndev)
  1300. {
  1301. struct hns_nic_priv *priv = netdev_priv(ndev);
  1302. struct hnae_handle *h = priv->ae_handle;
  1303. struct netdev_hw_addr *ha = NULL;
  1304. if (!h) {
  1305. netdev_err(ndev, "hnae handle is null\n");
  1306. return;
  1307. }
  1308. if (h->dev->ops->clr_mc_addr)
  1309. if (h->dev->ops->clr_mc_addr(h))
  1310. netdev_err(ndev, "clear multicast address fail\n");
  1311. if (h->dev->ops->set_mc_addr) {
  1312. netdev_for_each_mc_addr(ha, ndev)
  1313. if (h->dev->ops->set_mc_addr(h, ha->addr))
  1314. netdev_err(ndev, "set multicast fail\n");
  1315. }
  1316. }
  1317. void hns_nic_set_rx_mode(struct net_device *ndev)
  1318. {
  1319. struct hns_nic_priv *priv = netdev_priv(ndev);
  1320. struct hnae_handle *h = priv->ae_handle;
  1321. if (h->dev->ops->set_promisc_mode) {
  1322. if (ndev->flags & IFF_PROMISC)
  1323. h->dev->ops->set_promisc_mode(h, 1);
  1324. else
  1325. h->dev->ops->set_promisc_mode(h, 0);
  1326. }
  1327. hns_set_multicast_list(ndev);
  1328. if (__dev_uc_sync(ndev, hns_nic_uc_sync, hns_nic_uc_unsync))
  1329. netdev_err(ndev, "sync uc address fail\n");
  1330. }
  1331. static void hns_nic_get_stats64(struct net_device *ndev,
  1332. struct rtnl_link_stats64 *stats)
  1333. {
  1334. int idx = 0;
  1335. u64 tx_bytes = 0;
  1336. u64 rx_bytes = 0;
  1337. u64 tx_pkts = 0;
  1338. u64 rx_pkts = 0;
  1339. struct hns_nic_priv *priv = netdev_priv(ndev);
  1340. struct hnae_handle *h = priv->ae_handle;
  1341. for (idx = 0; idx < h->q_num; idx++) {
  1342. tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes;
  1343. tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts;
  1344. rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes;
  1345. rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts;
  1346. }
  1347. stats->tx_bytes = tx_bytes;
  1348. stats->tx_packets = tx_pkts;
  1349. stats->rx_bytes = rx_bytes;
  1350. stats->rx_packets = rx_pkts;
  1351. stats->rx_errors = ndev->stats.rx_errors;
  1352. stats->multicast = ndev->stats.multicast;
  1353. stats->rx_length_errors = ndev->stats.rx_length_errors;
  1354. stats->rx_crc_errors = ndev->stats.rx_crc_errors;
  1355. stats->rx_missed_errors = ndev->stats.rx_missed_errors;
  1356. stats->tx_errors = ndev->stats.tx_errors;
  1357. stats->rx_dropped = ndev->stats.rx_dropped;
  1358. stats->tx_dropped = ndev->stats.tx_dropped;
  1359. stats->collisions = ndev->stats.collisions;
  1360. stats->rx_over_errors = ndev->stats.rx_over_errors;
  1361. stats->rx_frame_errors = ndev->stats.rx_frame_errors;
  1362. stats->rx_fifo_errors = ndev->stats.rx_fifo_errors;
  1363. stats->tx_aborted_errors = ndev->stats.tx_aborted_errors;
  1364. stats->tx_carrier_errors = ndev->stats.tx_carrier_errors;
  1365. stats->tx_fifo_errors = ndev->stats.tx_fifo_errors;
  1366. stats->tx_heartbeat_errors = ndev->stats.tx_heartbeat_errors;
  1367. stats->tx_window_errors = ndev->stats.tx_window_errors;
  1368. stats->rx_compressed = ndev->stats.rx_compressed;
  1369. stats->tx_compressed = ndev->stats.tx_compressed;
  1370. }
  1371. static u16
  1372. hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
  1373. void *accel_priv, select_queue_fallback_t fallback)
  1374. {
  1375. struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
  1376. struct hns_nic_priv *priv = netdev_priv(ndev);
  1377. /* fix hardware broadcast/multicast packets queue loopback */
  1378. if (!AE_IS_VER1(priv->enet_ver) &&
  1379. is_multicast_ether_addr(eth_hdr->h_dest))
  1380. return 0;
  1381. else
  1382. return fallback(ndev, skb);
  1383. }
  1384. static const struct net_device_ops hns_nic_netdev_ops = {
  1385. .ndo_open = hns_nic_net_open,
  1386. .ndo_stop = hns_nic_net_stop,
  1387. .ndo_start_xmit = hns_nic_net_xmit,
  1388. .ndo_tx_timeout = hns_nic_net_timeout,
  1389. .ndo_set_mac_address = hns_nic_net_set_mac_address,
  1390. .ndo_change_mtu = hns_nic_change_mtu,
  1391. .ndo_do_ioctl = hns_nic_do_ioctl,
  1392. .ndo_set_features = hns_nic_set_features,
  1393. .ndo_fix_features = hns_nic_fix_features,
  1394. .ndo_get_stats64 = hns_nic_get_stats64,
  1395. #ifdef CONFIG_NET_POLL_CONTROLLER
  1396. .ndo_poll_controller = hns_nic_poll_controller,
  1397. #endif
  1398. .ndo_set_rx_mode = hns_nic_set_rx_mode,
  1399. .ndo_select_queue = hns_nic_select_queue,
  1400. };
  1401. static void hns_nic_update_link_status(struct net_device *netdev)
  1402. {
  1403. struct hns_nic_priv *priv = netdev_priv(netdev);
  1404. struct hnae_handle *h = priv->ae_handle;
  1405. if (h->phy_dev) {
  1406. if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
  1407. return;
  1408. (void)genphy_read_status(h->phy_dev);
  1409. }
  1410. hns_nic_adjust_link(netdev);
  1411. }
  1412. /* for dumping key regs*/
  1413. static void hns_nic_dump(struct hns_nic_priv *priv)
  1414. {
  1415. struct hnae_handle *h = priv->ae_handle;
  1416. struct hnae_ae_ops *ops = h->dev->ops;
  1417. u32 *data, reg_num, i;
  1418. if (ops->get_regs_len && ops->get_regs) {
  1419. reg_num = ops->get_regs_len(priv->ae_handle);
  1420. reg_num = (reg_num + 3ul) & ~3ul;
  1421. data = kcalloc(reg_num, sizeof(u32), GFP_KERNEL);
  1422. if (data) {
  1423. ops->get_regs(priv->ae_handle, data);
  1424. for (i = 0; i < reg_num; i += 4)
  1425. pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
  1426. i, data[i], data[i + 1],
  1427. data[i + 2], data[i + 3]);
  1428. kfree(data);
  1429. }
  1430. }
  1431. for (i = 0; i < h->q_num; i++) {
  1432. pr_info("tx_queue%d_next_to_clean:%d\n",
  1433. i, h->qs[i]->tx_ring.next_to_clean);
  1434. pr_info("tx_queue%d_next_to_use:%d\n",
  1435. i, h->qs[i]->tx_ring.next_to_use);
  1436. pr_info("rx_queue%d_next_to_clean:%d\n",
  1437. i, h->qs[i]->rx_ring.next_to_clean);
  1438. pr_info("rx_queue%d_next_to_use:%d\n",
  1439. i, h->qs[i]->rx_ring.next_to_use);
  1440. }
  1441. }
  1442. /* for resetting subtask */
  1443. static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
  1444. {
  1445. enum hnae_port_type type = priv->ae_handle->port_type;
  1446. if (!test_bit(NIC_STATE2_RESET_REQUESTED, &priv->state))
  1447. return;
  1448. clear_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
  1449. /* If we're already down, removing or resetting, just bail */
  1450. if (test_bit(NIC_STATE_DOWN, &priv->state) ||
  1451. test_bit(NIC_STATE_REMOVING, &priv->state) ||
  1452. test_bit(NIC_STATE_RESETTING, &priv->state))
  1453. return;
  1454. hns_nic_dump(priv);
  1455. netdev_info(priv->netdev, "try to reset %s port!\n",
  1456. (type == HNAE_PORT_DEBUG ? "debug" : "service"));
  1457. rtnl_lock();
  1458. /* put off any impending NetWatchDogTimeout */
  1459. netif_trans_update(priv->netdev);
  1460. if (type == HNAE_PORT_DEBUG) {
  1461. hns_nic_net_reinit(priv->netdev);
  1462. } else {
  1463. netif_carrier_off(priv->netdev);
  1464. netif_tx_disable(priv->netdev);
  1465. }
  1466. rtnl_unlock();
  1467. }
  1468. /* for doing service complete*/
  1469. static void hns_nic_service_event_complete(struct hns_nic_priv *priv)
  1470. {
  1471. WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
  1472. smp_mb__before_atomic();
  1473. clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
  1474. }
  1475. static void hns_nic_service_task(struct work_struct *work)
  1476. {
  1477. struct hns_nic_priv *priv
  1478. = container_of(work, struct hns_nic_priv, service_task);
  1479. struct hnae_handle *h = priv->ae_handle;
  1480. hns_nic_update_link_status(priv->netdev);
  1481. h->dev->ops->update_led_status(h);
  1482. hns_nic_update_stats(priv->netdev);
  1483. hns_nic_reset_subtask(priv);
  1484. hns_nic_service_event_complete(priv);
  1485. }
  1486. static void hns_nic_task_schedule(struct hns_nic_priv *priv)
  1487. {
  1488. if (!test_bit(NIC_STATE_DOWN, &priv->state) &&
  1489. !test_bit(NIC_STATE_REMOVING, &priv->state) &&
  1490. !test_and_set_bit(NIC_STATE_SERVICE_SCHED, &priv->state))
  1491. (void)schedule_work(&priv->service_task);
  1492. }
  1493. static void hns_nic_service_timer(unsigned long data)
  1494. {
  1495. struct hns_nic_priv *priv = (struct hns_nic_priv *)data;
  1496. (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
  1497. hns_nic_task_schedule(priv);
  1498. }
  1499. /**
  1500. * hns_tx_timeout_reset - initiate reset due to Tx timeout
  1501. * @priv: driver private struct
  1502. **/
  1503. static void hns_tx_timeout_reset(struct hns_nic_priv *priv)
  1504. {
  1505. /* Do the reset outside of interrupt context */
  1506. if (!test_bit(NIC_STATE_DOWN, &priv->state)) {
  1507. set_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
  1508. netdev_warn(priv->netdev,
  1509. "initiating reset due to tx timeout(%llu,0x%lx)\n",
  1510. priv->tx_timeout_count, priv->state);
  1511. priv->tx_timeout_count++;
  1512. hns_nic_task_schedule(priv);
  1513. }
  1514. }
  1515. static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
  1516. {
  1517. struct hnae_handle *h = priv->ae_handle;
  1518. struct hns_nic_ring_data *rd;
  1519. bool is_ver1 = AE_IS_VER1(priv->enet_ver);
  1520. int i;
  1521. if (h->q_num > NIC_MAX_Q_PER_VF) {
  1522. netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num);
  1523. return -EINVAL;
  1524. }
  1525. priv->ring_data = kzalloc(h->q_num * sizeof(*priv->ring_data) * 2,
  1526. GFP_KERNEL);
  1527. if (!priv->ring_data)
  1528. return -ENOMEM;
  1529. for (i = 0; i < h->q_num; i++) {
  1530. rd = &priv->ring_data[i];
  1531. rd->queue_index = i;
  1532. rd->ring = &h->qs[i]->tx_ring;
  1533. rd->poll_one = hns_nic_tx_poll_one;
  1534. rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro :
  1535. hns_nic_tx_fini_pro_v2;
  1536. netif_napi_add(priv->netdev, &rd->napi,
  1537. hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
  1538. rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
  1539. }
  1540. for (i = h->q_num; i < h->q_num * 2; i++) {
  1541. rd = &priv->ring_data[i];
  1542. rd->queue_index = i - h->q_num;
  1543. rd->ring = &h->qs[i - h->q_num]->rx_ring;
  1544. rd->poll_one = hns_nic_rx_poll_one;
  1545. rd->ex_process = hns_nic_rx_up_pro;
  1546. rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro :
  1547. hns_nic_rx_fini_pro_v2;
  1548. netif_napi_add(priv->netdev, &rd->napi,
  1549. hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
  1550. rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
  1551. }
  1552. return 0;
  1553. }
  1554. static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv)
  1555. {
  1556. struct hnae_handle *h = priv->ae_handle;
  1557. int i;
  1558. for (i = 0; i < h->q_num * 2; i++) {
  1559. netif_napi_del(&priv->ring_data[i].napi);
  1560. if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
  1561. (void)irq_set_affinity_hint(
  1562. priv->ring_data[i].ring->irq,
  1563. NULL);
  1564. free_irq(priv->ring_data[i].ring->irq,
  1565. &priv->ring_data[i]);
  1566. }
  1567. priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED;
  1568. }
  1569. kfree(priv->ring_data);
  1570. }
  1571. static void hns_nic_set_priv_ops(struct net_device *netdev)
  1572. {
  1573. struct hns_nic_priv *priv = netdev_priv(netdev);
  1574. struct hnae_handle *h = priv->ae_handle;
  1575. if (AE_IS_VER1(priv->enet_ver)) {
  1576. priv->ops.fill_desc = fill_desc;
  1577. priv->ops.get_rxd_bnum = get_rx_desc_bnum;
  1578. priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
  1579. } else {
  1580. priv->ops.get_rxd_bnum = get_v2rx_desc_bnum;
  1581. if ((netdev->features & NETIF_F_TSO) ||
  1582. (netdev->features & NETIF_F_TSO6)) {
  1583. priv->ops.fill_desc = fill_tso_desc;
  1584. priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
  1585. /* This chip only support 7*4096 */
  1586. netif_set_gso_max_size(netdev, 7 * 4096);
  1587. } else {
  1588. priv->ops.fill_desc = fill_v2_desc;
  1589. priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
  1590. }
  1591. /* enable tso when init
  1592. * control tso on/off through TSE bit in bd
  1593. */
  1594. h->dev->ops->set_tso_stats(h, 1);
  1595. }
  1596. }
  1597. static int hns_nic_try_get_ae(struct net_device *ndev)
  1598. {
  1599. struct hns_nic_priv *priv = netdev_priv(ndev);
  1600. struct hnae_handle *h;
  1601. int ret;
  1602. h = hnae_get_handle(&priv->netdev->dev,
  1603. priv->fwnode, priv->port_id, NULL);
  1604. if (IS_ERR_OR_NULL(h)) {
  1605. ret = -ENODEV;
  1606. dev_dbg(priv->dev, "has not handle, register notifier!\n");
  1607. goto out;
  1608. }
  1609. priv->ae_handle = h;
  1610. ret = hns_nic_init_phy(ndev, h);
  1611. if (ret) {
  1612. dev_err(priv->dev, "probe phy device fail!\n");
  1613. goto out_init_phy;
  1614. }
  1615. ret = hns_nic_init_ring_data(priv);
  1616. if (ret) {
  1617. ret = -ENOMEM;
  1618. goto out_init_ring_data;
  1619. }
  1620. hns_nic_set_priv_ops(ndev);
  1621. ret = register_netdev(ndev);
  1622. if (ret) {
  1623. dev_err(priv->dev, "probe register netdev fail!\n");
  1624. goto out_reg_ndev_fail;
  1625. }
  1626. return 0;
  1627. out_reg_ndev_fail:
  1628. hns_nic_uninit_ring_data(priv);
  1629. priv->ring_data = NULL;
  1630. out_init_phy:
  1631. out_init_ring_data:
  1632. hnae_put_handle(priv->ae_handle);
  1633. priv->ae_handle = NULL;
  1634. out:
  1635. return ret;
  1636. }
  1637. static int hns_nic_notifier_action(struct notifier_block *nb,
  1638. unsigned long action, void *data)
  1639. {
  1640. struct hns_nic_priv *priv =
  1641. container_of(nb, struct hns_nic_priv, notifier_block);
  1642. assert(action == HNAE_AE_REGISTER);
  1643. if (!hns_nic_try_get_ae(priv->netdev)) {
  1644. hnae_unregister_notifier(&priv->notifier_block);
  1645. priv->notifier_block.notifier_call = NULL;
  1646. }
  1647. return 0;
  1648. }
  1649. static int hns_nic_dev_probe(struct platform_device *pdev)
  1650. {
  1651. struct device *dev = &pdev->dev;
  1652. struct net_device *ndev;
  1653. struct hns_nic_priv *priv;
  1654. u32 port_id;
  1655. int ret;
  1656. ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF);
  1657. if (!ndev)
  1658. return -ENOMEM;
  1659. platform_set_drvdata(pdev, ndev);
  1660. priv = netdev_priv(ndev);
  1661. priv->dev = dev;
  1662. priv->netdev = ndev;
  1663. if (dev_of_node(dev)) {
  1664. struct device_node *ae_node;
  1665. if (of_device_is_compatible(dev->of_node,
  1666. "hisilicon,hns-nic-v1"))
  1667. priv->enet_ver = AE_VERSION_1;
  1668. else
  1669. priv->enet_ver = AE_VERSION_2;
  1670. ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0);
  1671. if (IS_ERR_OR_NULL(ae_node)) {
  1672. ret = PTR_ERR(ae_node);
  1673. dev_err(dev, "not find ae-handle\n");
  1674. goto out_read_prop_fail;
  1675. }
  1676. priv->fwnode = &ae_node->fwnode;
  1677. } else if (is_acpi_node(dev->fwnode)) {
  1678. struct acpi_reference_args args;
  1679. if (acpi_dev_found(hns_enet_acpi_match[0].id))
  1680. priv->enet_ver = AE_VERSION_1;
  1681. else if (acpi_dev_found(hns_enet_acpi_match[1].id))
  1682. priv->enet_ver = AE_VERSION_2;
  1683. else
  1684. return -ENXIO;
  1685. /* try to find port-idx-in-ae first */
  1686. ret = acpi_node_get_property_reference(dev->fwnode,
  1687. "ae-handle", 0, &args);
  1688. if (ret) {
  1689. dev_err(dev, "not find ae-handle\n");
  1690. goto out_read_prop_fail;
  1691. }
  1692. priv->fwnode = acpi_fwnode_handle(args.adev);
  1693. } else {
  1694. dev_err(dev, "cannot read cfg data from OF or acpi\n");
  1695. return -ENXIO;
  1696. }
  1697. ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id);
  1698. if (ret) {
  1699. /* only for old code compatible */
  1700. ret = device_property_read_u32(dev, "port-id", &port_id);
  1701. if (ret)
  1702. goto out_read_prop_fail;
  1703. /* for old dts, we need to caculate the port offset */
  1704. port_id = port_id < HNS_SRV_OFFSET ? port_id + HNS_DEBUG_OFFSET
  1705. : port_id - HNS_SRV_OFFSET;
  1706. }
  1707. priv->port_id = port_id;
  1708. hns_init_mac_addr(ndev);
  1709. ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
  1710. ndev->priv_flags |= IFF_UNICAST_FLT;
  1711. ndev->netdev_ops = &hns_nic_netdev_ops;
  1712. hns_ethtool_set_ops(ndev);
  1713. ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  1714. NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
  1715. NETIF_F_GRO;
  1716. ndev->vlan_features |=
  1717. NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
  1718. ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
  1719. /* MTU range: 68 - 9578 (v1) or 9706 (v2) */
  1720. ndev->min_mtu = MAC_MIN_MTU;
  1721. switch (priv->enet_ver) {
  1722. case AE_VERSION_2:
  1723. ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
  1724. ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  1725. NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
  1726. NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
  1727. ndev->max_mtu = MAC_MAX_MTU_V2 -
  1728. (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
  1729. break;
  1730. default:
  1731. ndev->max_mtu = MAC_MAX_MTU -
  1732. (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
  1733. break;
  1734. }
  1735. SET_NETDEV_DEV(ndev, dev);
  1736. if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
  1737. dev_dbg(dev, "set mask to 64bit\n");
  1738. else
  1739. dev_err(dev, "set mask to 64bit fail!\n");
  1740. /* carrier off reporting is important to ethtool even BEFORE open */
  1741. netif_carrier_off(ndev);
  1742. setup_timer(&priv->service_timer, hns_nic_service_timer,
  1743. (unsigned long)priv);
  1744. INIT_WORK(&priv->service_task, hns_nic_service_task);
  1745. set_bit(NIC_STATE_SERVICE_INITED, &priv->state);
  1746. clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
  1747. set_bit(NIC_STATE_DOWN, &priv->state);
  1748. if (hns_nic_try_get_ae(priv->netdev)) {
  1749. priv->notifier_block.notifier_call = hns_nic_notifier_action;
  1750. ret = hnae_register_notifier(&priv->notifier_block);
  1751. if (ret) {
  1752. dev_err(dev, "register notifier fail!\n");
  1753. goto out_notify_fail;
  1754. }
  1755. dev_dbg(dev, "has not handle, register notifier!\n");
  1756. }
  1757. return 0;
  1758. out_notify_fail:
  1759. (void)cancel_work_sync(&priv->service_task);
  1760. out_read_prop_fail:
  1761. free_netdev(ndev);
  1762. return ret;
  1763. }
  1764. static int hns_nic_dev_remove(struct platform_device *pdev)
  1765. {
  1766. struct net_device *ndev = platform_get_drvdata(pdev);
  1767. struct hns_nic_priv *priv = netdev_priv(ndev);
  1768. if (ndev->reg_state != NETREG_UNINITIALIZED)
  1769. unregister_netdev(ndev);
  1770. if (priv->ring_data)
  1771. hns_nic_uninit_ring_data(priv);
  1772. priv->ring_data = NULL;
  1773. if (ndev->phydev)
  1774. phy_disconnect(ndev->phydev);
  1775. if (!IS_ERR_OR_NULL(priv->ae_handle))
  1776. hnae_put_handle(priv->ae_handle);
  1777. priv->ae_handle = NULL;
  1778. if (priv->notifier_block.notifier_call)
  1779. hnae_unregister_notifier(&priv->notifier_block);
  1780. priv->notifier_block.notifier_call = NULL;
  1781. set_bit(NIC_STATE_REMOVING, &priv->state);
  1782. (void)cancel_work_sync(&priv->service_task);
  1783. free_netdev(ndev);
  1784. return 0;
  1785. }
  1786. static const struct of_device_id hns_enet_of_match[] = {
  1787. {.compatible = "hisilicon,hns-nic-v1",},
  1788. {.compatible = "hisilicon,hns-nic-v2",},
  1789. {},
  1790. };
  1791. MODULE_DEVICE_TABLE(of, hns_enet_of_match);
  1792. static struct platform_driver hns_nic_dev_driver = {
  1793. .driver = {
  1794. .name = "hns-nic",
  1795. .of_match_table = hns_enet_of_match,
  1796. .acpi_match_table = ACPI_PTR(hns_enet_acpi_match),
  1797. },
  1798. .probe = hns_nic_dev_probe,
  1799. .remove = hns_nic_dev_remove,
  1800. };
  1801. module_platform_driver(hns_nic_dev_driver);
  1802. MODULE_DESCRIPTION("HISILICON HNS Ethernet driver");
  1803. MODULE_AUTHOR("Hisilicon, Inc.");
  1804. MODULE_LICENSE("GPL");
  1805. MODULE_ALIAS("platform:hns-nic");