netsec.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784
  1. // SPDX-License-Identifier: GPL-2.0+
  2. #include <linux/types.h>
  3. #include <linux/clk.h>
  4. #include <linux/platform_device.h>
  5. #include <linux/pm_runtime.h>
  6. #include <linux/acpi.h>
  7. #include <linux/of_mdio.h>
  8. #include <linux/etherdevice.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/io.h>
  11. #include <net/tcp.h>
  12. #include <net/ip6_checksum.h>
  13. #define NETSEC_REG_SOFT_RST 0x104
  14. #define NETSEC_REG_COM_INIT 0x120
  15. #define NETSEC_REG_TOP_STATUS 0x200
  16. #define NETSEC_IRQ_RX BIT(1)
  17. #define NETSEC_IRQ_TX BIT(0)
  18. #define NETSEC_REG_TOP_INTEN 0x204
  19. #define NETSEC_REG_INTEN_SET 0x234
  20. #define NETSEC_REG_INTEN_CLR 0x238
  21. #define NETSEC_REG_NRM_TX_STATUS 0x400
  22. #define NETSEC_REG_NRM_TX_INTEN 0x404
  23. #define NETSEC_REG_NRM_TX_INTEN_SET 0x428
  24. #define NETSEC_REG_NRM_TX_INTEN_CLR 0x42c
  25. #define NRM_TX_ST_NTOWNR BIT(17)
  26. #define NRM_TX_ST_TR_ERR BIT(16)
  27. #define NRM_TX_ST_TXDONE BIT(15)
  28. #define NRM_TX_ST_TMREXP BIT(14)
  29. #define NETSEC_REG_NRM_RX_STATUS 0x440
  30. #define NETSEC_REG_NRM_RX_INTEN 0x444
  31. #define NETSEC_REG_NRM_RX_INTEN_SET 0x468
  32. #define NETSEC_REG_NRM_RX_INTEN_CLR 0x46c
  33. #define NRM_RX_ST_RC_ERR BIT(16)
  34. #define NRM_RX_ST_PKTCNT BIT(15)
  35. #define NRM_RX_ST_TMREXP BIT(14)
  36. #define NETSEC_REG_PKT_CMD_BUF 0xd0
  37. #define NETSEC_REG_CLK_EN 0x100
  38. #define NETSEC_REG_PKT_CTRL 0x140
  39. #define NETSEC_REG_DMA_TMR_CTRL 0x20c
  40. #define NETSEC_REG_F_TAIKI_MC_VER 0x22c
  41. #define NETSEC_REG_F_TAIKI_VER 0x230
  42. #define NETSEC_REG_DMA_HM_CTRL 0x214
  43. #define NETSEC_REG_DMA_MH_CTRL 0x220
  44. #define NETSEC_REG_ADDR_DIS_CORE 0x218
  45. #define NETSEC_REG_DMAC_HM_CMD_BUF 0x210
  46. #define NETSEC_REG_DMAC_MH_CMD_BUF 0x21c
  47. #define NETSEC_REG_NRM_TX_PKTCNT 0x410
  48. #define NETSEC_REG_NRM_TX_DONE_PKTCNT 0x414
  49. #define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT 0x418
  50. #define NETSEC_REG_NRM_TX_TMR 0x41c
  51. #define NETSEC_REG_NRM_RX_PKTCNT 0x454
  52. #define NETSEC_REG_NRM_RX_RXINT_PKTCNT 0x458
  53. #define NETSEC_REG_NRM_TX_TXINT_TMR 0x420
  54. #define NETSEC_REG_NRM_RX_RXINT_TMR 0x460
  55. #define NETSEC_REG_NRM_RX_TMR 0x45c
  56. #define NETSEC_REG_NRM_TX_DESC_START_UP 0x434
  57. #define NETSEC_REG_NRM_TX_DESC_START_LW 0x408
  58. #define NETSEC_REG_NRM_RX_DESC_START_UP 0x474
  59. #define NETSEC_REG_NRM_RX_DESC_START_LW 0x448
  60. #define NETSEC_REG_NRM_TX_CONFIG 0x430
  61. #define NETSEC_REG_NRM_RX_CONFIG 0x470
  62. #define MAC_REG_STATUS 0x1024
  63. #define MAC_REG_DATA 0x11c0
  64. #define MAC_REG_CMD 0x11c4
  65. #define MAC_REG_FLOW_TH 0x11cc
  66. #define MAC_REG_INTF_SEL 0x11d4
  67. #define MAC_REG_DESC_INIT 0x11fc
  68. #define MAC_REG_DESC_SOFT_RST 0x1204
  69. #define NETSEC_REG_MODE_TRANS_COMP_STATUS 0x500
  70. #define GMAC_REG_MCR 0x0000
  71. #define GMAC_REG_MFFR 0x0004
  72. #define GMAC_REG_GAR 0x0010
  73. #define GMAC_REG_GDR 0x0014
  74. #define GMAC_REG_FCR 0x0018
  75. #define GMAC_REG_BMR 0x1000
  76. #define GMAC_REG_RDLAR 0x100c
  77. #define GMAC_REG_TDLAR 0x1010
  78. #define GMAC_REG_OMR 0x1018
  79. #define MHZ(n) ((n) * 1000 * 1000)
  80. #define NETSEC_TX_SHIFT_OWN_FIELD 31
  81. #define NETSEC_TX_SHIFT_LD_FIELD 30
  82. #define NETSEC_TX_SHIFT_DRID_FIELD 24
  83. #define NETSEC_TX_SHIFT_PT_FIELD 21
  84. #define NETSEC_TX_SHIFT_TDRID_FIELD 16
  85. #define NETSEC_TX_SHIFT_CC_FIELD 15
  86. #define NETSEC_TX_SHIFT_FS_FIELD 9
  87. #define NETSEC_TX_LAST 8
  88. #define NETSEC_TX_SHIFT_CO 7
  89. #define NETSEC_TX_SHIFT_SO 6
  90. #define NETSEC_TX_SHIFT_TRS_FIELD 4
  91. #define NETSEC_RX_PKT_OWN_FIELD 31
  92. #define NETSEC_RX_PKT_LD_FIELD 30
  93. #define NETSEC_RX_PKT_SDRID_FIELD 24
  94. #define NETSEC_RX_PKT_FR_FIELD 23
  95. #define NETSEC_RX_PKT_ER_FIELD 21
  96. #define NETSEC_RX_PKT_ERR_FIELD 16
  97. #define NETSEC_RX_PKT_TDRID_FIELD 12
  98. #define NETSEC_RX_PKT_FS_FIELD 9
  99. #define NETSEC_RX_PKT_LS_FIELD 8
  100. #define NETSEC_RX_PKT_CO_FIELD 6
  101. #define NETSEC_RX_PKT_ERR_MASK 3
  102. #define NETSEC_MAX_TX_PKT_LEN 1518
  103. #define NETSEC_MAX_TX_JUMBO_PKT_LEN 9018
  104. #define NETSEC_RING_GMAC 15
  105. #define NETSEC_RING_MAX 2
  106. #define NETSEC_TCP_SEG_LEN_MAX 1460
  107. #define NETSEC_TCP_JUMBO_SEG_LEN_MAX 8960
  108. #define NETSEC_RX_CKSUM_NOTAVAIL 0
  109. #define NETSEC_RX_CKSUM_OK 1
  110. #define NETSEC_RX_CKSUM_NG 2
  111. #define NETSEC_TOP_IRQ_REG_CODE_LOAD_END BIT(20)
  112. #define NETSEC_IRQ_TRANSITION_COMPLETE BIT(4)
  113. #define NETSEC_MODE_TRANS_COMP_IRQ_N2T BIT(20)
  114. #define NETSEC_MODE_TRANS_COMP_IRQ_T2N BIT(19)
  115. #define NETSEC_INT_PKTCNT_MAX 2047
  116. #define NETSEC_FLOW_START_TH_MAX 95
  117. #define NETSEC_FLOW_STOP_TH_MAX 95
  118. #define NETSEC_FLOW_PAUSE_TIME_MIN 5
  119. #define NETSEC_CLK_EN_REG_DOM_ALL 0x3f
  120. #define NETSEC_PKT_CTRL_REG_MODE_NRM BIT(28)
  121. #define NETSEC_PKT_CTRL_REG_EN_JUMBO BIT(27)
  122. #define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER BIT(3)
  123. #define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE BIT(2)
  124. #define NETSEC_PKT_CTRL_REG_LOG_HD_ER BIT(1)
  125. #define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH BIT(0)
  126. #define NETSEC_CLK_EN_REG_DOM_G BIT(5)
  127. #define NETSEC_CLK_EN_REG_DOM_C BIT(1)
  128. #define NETSEC_CLK_EN_REG_DOM_D BIT(0)
  129. #define NETSEC_COM_INIT_REG_DB BIT(2)
  130. #define NETSEC_COM_INIT_REG_CLS BIT(1)
  131. #define NETSEC_COM_INIT_REG_ALL (NETSEC_COM_INIT_REG_CLS | \
  132. NETSEC_COM_INIT_REG_DB)
  133. #define NETSEC_SOFT_RST_REG_RESET 0
  134. #define NETSEC_SOFT_RST_REG_RUN BIT(31)
  135. #define NETSEC_DMA_CTRL_REG_STOP 1
  136. #define MH_CTRL__MODE_TRANS BIT(20)
  137. #define NETSEC_GMAC_CMD_ST_READ 0
  138. #define NETSEC_GMAC_CMD_ST_WRITE BIT(28)
  139. #define NETSEC_GMAC_CMD_ST_BUSY BIT(31)
  140. #define NETSEC_GMAC_BMR_REG_COMMON 0x00412080
  141. #define NETSEC_GMAC_BMR_REG_RESET 0x00020181
  142. #define NETSEC_GMAC_BMR_REG_SWR 0x00000001
  143. #define NETSEC_GMAC_OMR_REG_ST BIT(13)
  144. #define NETSEC_GMAC_OMR_REG_SR BIT(1)
  145. #define NETSEC_GMAC_MCR_REG_IBN BIT(30)
  146. #define NETSEC_GMAC_MCR_REG_CST BIT(25)
  147. #define NETSEC_GMAC_MCR_REG_JE BIT(20)
  148. #define NETSEC_MCR_PS BIT(15)
  149. #define NETSEC_GMAC_MCR_REG_FES BIT(14)
  150. #define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON 0x0000280c
  151. #define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON 0x0001a00c
  152. #define NETSEC_FCR_RFE BIT(2)
  153. #define NETSEC_FCR_TFE BIT(1)
  154. #define NETSEC_GMAC_GAR_REG_GW BIT(1)
  155. #define NETSEC_GMAC_GAR_REG_GB BIT(0)
  156. #define NETSEC_GMAC_GAR_REG_SHIFT_PA 11
  157. #define NETSEC_GMAC_GAR_REG_SHIFT_GR 6
  158. #define GMAC_REG_SHIFT_CR_GAR 2
  159. #define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ 2
  160. #define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ 3
  161. #define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ 0
  162. #define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ 1
  163. #define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ 4
  164. #define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ 5
  165. #define NETSEC_GMAC_RDLAR_REG_COMMON 0x18000
  166. #define NETSEC_GMAC_TDLAR_REG_COMMON 0x1c000
  167. #define NETSEC_REG_NETSEC_VER_F_TAIKI 0x50000
  168. #define NETSEC_REG_DESC_RING_CONFIG_CFG_UP BIT(31)
  169. #define NETSEC_REG_DESC_RING_CONFIG_CH_RST BIT(30)
  170. #define NETSEC_REG_DESC_TMR_MODE 4
  171. #define NETSEC_REG_DESC_ENDIAN 0
  172. #define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST 1
  173. #define NETSEC_MAC_DESC_INIT_REG_INIT 1
  174. #define NETSEC_EEPROM_MAC_ADDRESS 0x00
  175. #define NETSEC_EEPROM_HM_ME_ADDRESS_H 0x08
  176. #define NETSEC_EEPROM_HM_ME_ADDRESS_L 0x0C
  177. #define NETSEC_EEPROM_HM_ME_SIZE 0x10
  178. #define NETSEC_EEPROM_MH_ME_ADDRESS_H 0x14
  179. #define NETSEC_EEPROM_MH_ME_ADDRESS_L 0x18
  180. #define NETSEC_EEPROM_MH_ME_SIZE 0x1C
  181. #define NETSEC_EEPROM_PKT_ME_ADDRESS 0x20
  182. #define NETSEC_EEPROM_PKT_ME_SIZE 0x24
  183. #define DESC_NUM 128
  184. #define NAPI_BUDGET (DESC_NUM / 2)
  185. #define DESC_SZ sizeof(struct netsec_de)
  186. #define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000)
  187. enum ring_id {
  188. NETSEC_RING_TX = 0,
  189. NETSEC_RING_RX
  190. };
  191. struct netsec_desc {
  192. struct sk_buff *skb;
  193. dma_addr_t dma_addr;
  194. void *addr;
  195. u16 len;
  196. };
  197. struct netsec_desc_ring {
  198. dma_addr_t desc_dma;
  199. struct netsec_desc *desc;
  200. void *vaddr;
  201. u16 pkt_cnt;
  202. u16 head, tail;
  203. };
  204. struct netsec_priv {
  205. struct netsec_desc_ring desc_ring[NETSEC_RING_MAX];
  206. struct ethtool_coalesce et_coalesce;
  207. spinlock_t reglock; /* protect reg access */
  208. struct napi_struct napi;
  209. phy_interface_t phy_interface;
  210. struct net_device *ndev;
  211. struct device_node *phy_np;
  212. struct phy_device *phydev;
  213. struct mii_bus *mii_bus;
  214. void __iomem *ioaddr;
  215. void __iomem *eeprom_base;
  216. struct device *dev;
  217. struct clk *clk;
  218. u32 msg_enable;
  219. u32 freq;
  220. bool rx_cksum_offload_flag;
  221. };
  222. struct netsec_de { /* Netsec Descriptor layout */
  223. u32 attr;
  224. u32 data_buf_addr_up;
  225. u32 data_buf_addr_lw;
  226. u32 buf_len_info;
  227. };
  228. struct netsec_tx_pkt_ctrl {
  229. u16 tcp_seg_len;
  230. bool tcp_seg_offload_flag;
  231. bool cksum_offload_flag;
  232. };
  233. struct netsec_rx_pkt_info {
  234. int rx_cksum_result;
  235. int err_code;
  236. bool err_flag;
  237. };
  238. static void netsec_write(struct netsec_priv *priv, u32 reg_addr, u32 val)
  239. {
  240. writel(val, priv->ioaddr + reg_addr);
  241. }
  242. static u32 netsec_read(struct netsec_priv *priv, u32 reg_addr)
  243. {
  244. return readl(priv->ioaddr + reg_addr);
  245. }
  246. /************* MDIO BUS OPS FOLLOW *************/
  247. #define TIMEOUT_SPINS_MAC 1000
  248. #define TIMEOUT_SECONDARY_MS_MAC 100
  249. static u32 netsec_clk_type(u32 freq)
  250. {
  251. if (freq < MHZ(35))
  252. return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ;
  253. if (freq < MHZ(60))
  254. return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ;
  255. if (freq < MHZ(100))
  256. return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ;
  257. if (freq < MHZ(150))
  258. return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ;
  259. if (freq < MHZ(250))
  260. return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ;
  261. return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ;
  262. }
  263. static int netsec_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask)
  264. {
  265. u32 timeout = TIMEOUT_SPINS_MAC;
  266. while (--timeout && netsec_read(priv, addr) & mask)
  267. cpu_relax();
  268. if (timeout)
  269. return 0;
  270. timeout = TIMEOUT_SECONDARY_MS_MAC;
  271. while (--timeout && netsec_read(priv, addr) & mask)
  272. usleep_range(1000, 2000);
  273. if (timeout)
  274. return 0;
  275. netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
  276. return -ETIMEDOUT;
  277. }
  278. static int netsec_mac_write(struct netsec_priv *priv, u32 addr, u32 value)
  279. {
  280. netsec_write(priv, MAC_REG_DATA, value);
  281. netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_WRITE);
  282. return netsec_wait_while_busy(priv,
  283. MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
  284. }
  285. static int netsec_mac_read(struct netsec_priv *priv, u32 addr, u32 *read)
  286. {
  287. int ret;
  288. netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_READ);
  289. ret = netsec_wait_while_busy(priv,
  290. MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
  291. if (ret)
  292. return ret;
  293. *read = netsec_read(priv, MAC_REG_DATA);
  294. return 0;
  295. }
  296. static int netsec_mac_wait_while_busy(struct netsec_priv *priv,
  297. u32 addr, u32 mask)
  298. {
  299. u32 timeout = TIMEOUT_SPINS_MAC;
  300. int ret, data;
  301. do {
  302. ret = netsec_mac_read(priv, addr, &data);
  303. if (ret)
  304. break;
  305. cpu_relax();
  306. } while (--timeout && (data & mask));
  307. if (timeout)
  308. return 0;
  309. timeout = TIMEOUT_SECONDARY_MS_MAC;
  310. do {
  311. usleep_range(1000, 2000);
  312. ret = netsec_mac_read(priv, addr, &data);
  313. if (ret)
  314. break;
  315. cpu_relax();
  316. } while (--timeout && (data & mask));
  317. if (timeout && !ret)
  318. return 0;
  319. netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
  320. return -ETIMEDOUT;
  321. }
  322. static int netsec_mac_update_to_phy_state(struct netsec_priv *priv)
  323. {
  324. struct phy_device *phydev = priv->ndev->phydev;
  325. u32 value = 0;
  326. value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON :
  327. NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON;
  328. if (phydev->speed != SPEED_1000)
  329. value |= NETSEC_MCR_PS;
  330. if (priv->phy_interface != PHY_INTERFACE_MODE_GMII &&
  331. phydev->speed == SPEED_100)
  332. value |= NETSEC_GMAC_MCR_REG_FES;
  333. value |= NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_JE;
  334. if (phy_interface_mode_is_rgmii(priv->phy_interface))
  335. value |= NETSEC_GMAC_MCR_REG_IBN;
  336. if (netsec_mac_write(priv, GMAC_REG_MCR, value))
  337. return -ETIMEDOUT;
  338. return 0;
  339. }
  340. static int netsec_phy_write(struct mii_bus *bus,
  341. int phy_addr, int reg, u16 val)
  342. {
  343. struct netsec_priv *priv = bus->priv;
  344. if (netsec_mac_write(priv, GMAC_REG_GDR, val))
  345. return -ETIMEDOUT;
  346. if (netsec_mac_write(priv, GMAC_REG_GAR,
  347. phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
  348. reg << NETSEC_GMAC_GAR_REG_SHIFT_GR |
  349. NETSEC_GMAC_GAR_REG_GW | NETSEC_GMAC_GAR_REG_GB |
  350. (netsec_clk_type(priv->freq) <<
  351. GMAC_REG_SHIFT_CR_GAR)))
  352. return -ETIMEDOUT;
  353. return netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
  354. NETSEC_GMAC_GAR_REG_GB);
  355. }
  356. static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
  357. {
  358. struct netsec_priv *priv = bus->priv;
  359. u32 data;
  360. int ret;
  361. if (netsec_mac_write(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB |
  362. phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
  363. reg_addr << NETSEC_GMAC_GAR_REG_SHIFT_GR |
  364. (netsec_clk_type(priv->freq) <<
  365. GMAC_REG_SHIFT_CR_GAR)))
  366. return -ETIMEDOUT;
  367. ret = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
  368. NETSEC_GMAC_GAR_REG_GB);
  369. if (ret)
  370. return ret;
  371. ret = netsec_mac_read(priv, GMAC_REG_GDR, &data);
  372. if (ret)
  373. return ret;
  374. return data;
  375. }
  376. /************* ETHTOOL_OPS FOLLOW *************/
  377. static void netsec_et_get_drvinfo(struct net_device *net_device,
  378. struct ethtool_drvinfo *info)
  379. {
  380. strlcpy(info->driver, "netsec", sizeof(info->driver));
  381. strlcpy(info->bus_info, dev_name(net_device->dev.parent),
  382. sizeof(info->bus_info));
  383. }
  384. static int netsec_et_get_coalesce(struct net_device *net_device,
  385. struct ethtool_coalesce *et_coalesce)
  386. {
  387. struct netsec_priv *priv = netdev_priv(net_device);
  388. *et_coalesce = priv->et_coalesce;
  389. return 0;
  390. }
  391. static int netsec_et_set_coalesce(struct net_device *net_device,
  392. struct ethtool_coalesce *et_coalesce)
  393. {
  394. struct netsec_priv *priv = netdev_priv(net_device);
  395. priv->et_coalesce = *et_coalesce;
  396. if (priv->et_coalesce.tx_coalesce_usecs < 50)
  397. priv->et_coalesce.tx_coalesce_usecs = 50;
  398. if (priv->et_coalesce.tx_max_coalesced_frames < 1)
  399. priv->et_coalesce.tx_max_coalesced_frames = 1;
  400. netsec_write(priv, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT,
  401. priv->et_coalesce.tx_max_coalesced_frames);
  402. netsec_write(priv, NETSEC_REG_NRM_TX_TXINT_TMR,
  403. priv->et_coalesce.tx_coalesce_usecs);
  404. netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TXDONE);
  405. netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TMREXP);
  406. if (priv->et_coalesce.rx_coalesce_usecs < 50)
  407. priv->et_coalesce.rx_coalesce_usecs = 50;
  408. if (priv->et_coalesce.rx_max_coalesced_frames < 1)
  409. priv->et_coalesce.rx_max_coalesced_frames = 1;
  410. netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_PKTCNT,
  411. priv->et_coalesce.rx_max_coalesced_frames);
  412. netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_TMR,
  413. priv->et_coalesce.rx_coalesce_usecs);
  414. netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_PKTCNT);
  415. netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_TMREXP);
  416. return 0;
  417. }
  418. static u32 netsec_et_get_msglevel(struct net_device *dev)
  419. {
  420. struct netsec_priv *priv = netdev_priv(dev);
  421. return priv->msg_enable;
  422. }
  423. static void netsec_et_set_msglevel(struct net_device *dev, u32 datum)
  424. {
  425. struct netsec_priv *priv = netdev_priv(dev);
  426. priv->msg_enable = datum;
  427. }
  428. static const struct ethtool_ops netsec_ethtool_ops = {
  429. .get_drvinfo = netsec_et_get_drvinfo,
  430. .get_link_ksettings = phy_ethtool_get_link_ksettings,
  431. .set_link_ksettings = phy_ethtool_set_link_ksettings,
  432. .get_link = ethtool_op_get_link,
  433. .get_coalesce = netsec_et_get_coalesce,
  434. .set_coalesce = netsec_et_set_coalesce,
  435. .get_msglevel = netsec_et_get_msglevel,
  436. .set_msglevel = netsec_et_set_msglevel,
  437. };
  438. /************* NETDEV_OPS FOLLOW *************/
  439. static struct sk_buff *netsec_alloc_skb(struct netsec_priv *priv,
  440. struct netsec_desc *desc)
  441. {
  442. struct sk_buff *skb;
  443. if (device_get_dma_attr(priv->dev) == DEV_DMA_COHERENT) {
  444. skb = netdev_alloc_skb_ip_align(priv->ndev, desc->len);
  445. } else {
  446. desc->len = L1_CACHE_ALIGN(desc->len);
  447. skb = netdev_alloc_skb(priv->ndev, desc->len);
  448. }
  449. if (!skb)
  450. return NULL;
  451. desc->addr = skb->data;
  452. desc->dma_addr = dma_map_single(priv->dev, desc->addr, desc->len,
  453. DMA_FROM_DEVICE);
  454. if (dma_mapping_error(priv->dev, desc->dma_addr)) {
  455. dev_kfree_skb_any(skb);
  456. return NULL;
  457. }
  458. return skb;
  459. }
  460. static void netsec_set_rx_de(struct netsec_priv *priv,
  461. struct netsec_desc_ring *dring, u16 idx,
  462. const struct netsec_desc *desc,
  463. struct sk_buff *skb)
  464. {
  465. struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
  466. u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) |
  467. (1 << NETSEC_RX_PKT_FS_FIELD) |
  468. (1 << NETSEC_RX_PKT_LS_FIELD);
  469. if (idx == DESC_NUM - 1)
  470. attr |= (1 << NETSEC_RX_PKT_LD_FIELD);
  471. de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
  472. de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
  473. de->buf_len_info = desc->len;
  474. de->attr = attr;
  475. dma_wmb();
  476. dring->desc[idx].dma_addr = desc->dma_addr;
  477. dring->desc[idx].addr = desc->addr;
  478. dring->desc[idx].len = desc->len;
  479. dring->desc[idx].skb = skb;
  480. }
  481. static struct sk_buff *netsec_get_rx_de(struct netsec_priv *priv,
  482. struct netsec_desc_ring *dring,
  483. u16 idx,
  484. struct netsec_rx_pkt_info *rxpi,
  485. struct netsec_desc *desc, u16 *len)
  486. {
  487. struct netsec_de de = {};
  488. memcpy(&de, dring->vaddr + DESC_SZ * idx, DESC_SZ);
  489. *len = de.buf_len_info >> 16;
  490. rxpi->err_flag = (de.attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
  491. rxpi->rx_cksum_result = (de.attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
  492. rxpi->err_code = (de.attr >> NETSEC_RX_PKT_ERR_FIELD) &
  493. NETSEC_RX_PKT_ERR_MASK;
  494. *desc = dring->desc[idx];
  495. return desc->skb;
  496. }
  497. static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv,
  498. struct netsec_rx_pkt_info *rxpi,
  499. struct netsec_desc *desc,
  500. u16 *len)
  501. {
  502. struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
  503. struct sk_buff *tmp_skb, *skb = NULL;
  504. struct netsec_desc td;
  505. int tail;
  506. *rxpi = (struct netsec_rx_pkt_info){};
  507. td.len = priv->ndev->mtu + 22;
  508. tmp_skb = netsec_alloc_skb(priv, &td);
  509. dma_rmb();
  510. tail = dring->tail;
  511. if (!tmp_skb) {
  512. netsec_set_rx_de(priv, dring, tail, &dring->desc[tail],
  513. dring->desc[tail].skb);
  514. } else {
  515. skb = netsec_get_rx_de(priv, dring, tail, rxpi, desc, len);
  516. netsec_set_rx_de(priv, dring, tail, &td, tmp_skb);
  517. }
  518. /* move tail ahead */
  519. dring->tail = (dring->tail + 1) % DESC_NUM;
  520. dring->pkt_cnt--;
  521. return skb;
  522. }
  523. static int netsec_clean_tx_dring(struct netsec_priv *priv, int budget)
  524. {
  525. struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
  526. unsigned int pkts, bytes;
  527. dring->pkt_cnt += netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT);
  528. if (dring->pkt_cnt < budget)
  529. budget = dring->pkt_cnt;
  530. pkts = 0;
  531. bytes = 0;
  532. while (pkts < budget) {
  533. struct netsec_desc *desc;
  534. struct netsec_de *entry;
  535. int tail, eop;
  536. tail = dring->tail;
  537. /* move tail ahead */
  538. dring->tail = (tail + 1) % DESC_NUM;
  539. desc = &dring->desc[tail];
  540. entry = dring->vaddr + DESC_SZ * tail;
  541. eop = (entry->attr >> NETSEC_TX_LAST) & 1;
  542. dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
  543. DMA_TO_DEVICE);
  544. if (eop) {
  545. pkts++;
  546. bytes += desc->skb->len;
  547. dev_kfree_skb(desc->skb);
  548. }
  549. *desc = (struct netsec_desc){};
  550. }
  551. dring->pkt_cnt -= budget;
  552. priv->ndev->stats.tx_packets += budget;
  553. priv->ndev->stats.tx_bytes += bytes;
  554. netdev_completed_queue(priv->ndev, budget, bytes);
  555. return budget;
  556. }
  557. static int netsec_process_tx(struct netsec_priv *priv, int budget)
  558. {
  559. struct net_device *ndev = priv->ndev;
  560. int new, done = 0;
  561. do {
  562. new = netsec_clean_tx_dring(priv, budget);
  563. done += new;
  564. budget -= new;
  565. } while (new);
  566. if (done && netif_queue_stopped(ndev))
  567. netif_wake_queue(ndev);
  568. return done;
  569. }
  570. static int netsec_process_rx(struct netsec_priv *priv, int budget)
  571. {
  572. struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
  573. struct net_device *ndev = priv->ndev;
  574. struct netsec_rx_pkt_info rx_info;
  575. int done = 0, rx_num = 0;
  576. struct netsec_desc desc;
  577. struct sk_buff *skb;
  578. u16 len;
  579. while (done < budget) {
  580. if (!rx_num) {
  581. rx_num = netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
  582. dring->pkt_cnt += rx_num;
  583. /* move head 'rx_num' */
  584. dring->head = (dring->head + rx_num) % DESC_NUM;
  585. rx_num = dring->pkt_cnt;
  586. if (!rx_num)
  587. break;
  588. }
  589. done++;
  590. rx_num--;
  591. skb = netsec_get_rx_pkt_data(priv, &rx_info, &desc, &len);
  592. if (unlikely(!skb) || rx_info.err_flag) {
  593. netif_err(priv, drv, priv->ndev,
  594. "%s: rx fail err(%d)\n",
  595. __func__, rx_info.err_code);
  596. ndev->stats.rx_dropped++;
  597. continue;
  598. }
  599. dma_unmap_single(priv->dev, desc.dma_addr, desc.len,
  600. DMA_FROM_DEVICE);
  601. skb_put(skb, len);
  602. skb->protocol = eth_type_trans(skb, priv->ndev);
  603. if (priv->rx_cksum_offload_flag &&
  604. rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK)
  605. skb->ip_summed = CHECKSUM_UNNECESSARY;
  606. if (napi_gro_receive(&priv->napi, skb) != GRO_DROP) {
  607. ndev->stats.rx_packets++;
  608. ndev->stats.rx_bytes += len;
  609. }
  610. }
  611. return done;
  612. }
  613. static int netsec_napi_poll(struct napi_struct *napi, int budget)
  614. {
  615. struct netsec_priv *priv;
  616. struct net_device *ndev;
  617. int tx, rx, done, todo;
  618. priv = container_of(napi, struct netsec_priv, napi);
  619. ndev = priv->ndev;
  620. todo = budget;
  621. do {
  622. if (!todo)
  623. break;
  624. tx = netsec_process_tx(priv, todo);
  625. todo -= tx;
  626. if (!todo)
  627. break;
  628. rx = netsec_process_rx(priv, todo);
  629. todo -= rx;
  630. } while (rx || tx);
  631. done = budget - todo;
  632. if (done < budget && napi_complete_done(napi, done)) {
  633. unsigned long flags;
  634. spin_lock_irqsave(&priv->reglock, flags);
  635. netsec_write(priv, NETSEC_REG_INTEN_SET,
  636. NETSEC_IRQ_RX | NETSEC_IRQ_TX);
  637. spin_unlock_irqrestore(&priv->reglock, flags);
  638. }
  639. return done;
  640. }
  641. static void netsec_set_tx_de(struct netsec_priv *priv,
  642. struct netsec_desc_ring *dring,
  643. const struct netsec_tx_pkt_ctrl *tx_ctrl,
  644. const struct netsec_desc *desc,
  645. struct sk_buff *skb)
  646. {
  647. int idx = dring->head;
  648. struct netsec_de *de;
  649. u32 attr;
  650. de = dring->vaddr + (DESC_SZ * idx);
  651. attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) |
  652. (1 << NETSEC_TX_SHIFT_PT_FIELD) |
  653. (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) |
  654. (1 << NETSEC_TX_SHIFT_FS_FIELD) |
  655. (1 << NETSEC_TX_LAST) |
  656. (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) |
  657. (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) |
  658. (1 << NETSEC_TX_SHIFT_TRS_FIELD);
  659. if (idx == DESC_NUM - 1)
  660. attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD);
  661. de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
  662. de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
  663. de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
  664. de->attr = attr;
  665. dma_wmb();
  666. dring->desc[idx] = *desc;
  667. dring->desc[idx].skb = skb;
  668. /* move head ahead */
  669. dring->head = (dring->head + 1) % DESC_NUM;
  670. }
  671. static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
  672. struct net_device *ndev)
  673. {
  674. struct netsec_priv *priv = netdev_priv(ndev);
  675. struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
  676. struct netsec_tx_pkt_ctrl tx_ctrl = {};
  677. struct netsec_desc tx_desc;
  678. u16 tso_seg_len = 0;
  679. int filled;
  680. /* differentiate between full/emtpy ring */
  681. if (dring->head >= dring->tail)
  682. filled = dring->head - dring->tail;
  683. else
  684. filled = dring->head + DESC_NUM - dring->tail;
  685. if (DESC_NUM - filled < 2) { /* if less than 2 available */
  686. netif_err(priv, drv, priv->ndev, "%s: TxQFull!\n", __func__);
  687. netif_stop_queue(priv->ndev);
  688. dma_wmb();
  689. return NETDEV_TX_BUSY;
  690. }
  691. if (skb->ip_summed == CHECKSUM_PARTIAL)
  692. tx_ctrl.cksum_offload_flag = true;
  693. if (skb_is_gso(skb))
  694. tso_seg_len = skb_shinfo(skb)->gso_size;
  695. if (tso_seg_len > 0) {
  696. if (skb->protocol == htons(ETH_P_IP)) {
  697. ip_hdr(skb)->tot_len = 0;
  698. tcp_hdr(skb)->check =
  699. ~tcp_v4_check(0, ip_hdr(skb)->saddr,
  700. ip_hdr(skb)->daddr, 0);
  701. } else {
  702. ipv6_hdr(skb)->payload_len = 0;
  703. tcp_hdr(skb)->check =
  704. ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
  705. &ipv6_hdr(skb)->daddr,
  706. 0, IPPROTO_TCP, 0);
  707. }
  708. tx_ctrl.tcp_seg_offload_flag = true;
  709. tx_ctrl.tcp_seg_len = tso_seg_len;
  710. }
  711. tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
  712. skb_headlen(skb), DMA_TO_DEVICE);
  713. if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
  714. netif_err(priv, drv, priv->ndev,
  715. "%s: DMA mapping failed\n", __func__);
  716. ndev->stats.tx_dropped++;
  717. dev_kfree_skb_any(skb);
  718. return NETDEV_TX_OK;
  719. }
  720. tx_desc.addr = skb->data;
  721. tx_desc.len = skb_headlen(skb);
  722. skb_tx_timestamp(skb);
  723. netdev_sent_queue(priv->ndev, skb->len);
  724. netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
  725. netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
  726. return NETDEV_TX_OK;
  727. }
  728. static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
  729. {
  730. struct netsec_desc_ring *dring = &priv->desc_ring[id];
  731. struct netsec_desc *desc;
  732. u16 idx;
  733. if (!dring->vaddr || !dring->desc)
  734. return;
  735. for (idx = 0; idx < DESC_NUM; idx++) {
  736. desc = &dring->desc[idx];
  737. if (!desc->addr)
  738. continue;
  739. dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
  740. id == NETSEC_RING_RX ? DMA_FROM_DEVICE :
  741. DMA_TO_DEVICE);
  742. dev_kfree_skb(desc->skb);
  743. }
  744. memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
  745. memset(dring->vaddr, 0, DESC_SZ * DESC_NUM);
  746. dring->head = 0;
  747. dring->tail = 0;
  748. dring->pkt_cnt = 0;
  749. }
  750. static void netsec_free_dring(struct netsec_priv *priv, int id)
  751. {
  752. struct netsec_desc_ring *dring = &priv->desc_ring[id];
  753. if (dring->vaddr) {
  754. dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM,
  755. dring->vaddr, dring->desc_dma);
  756. dring->vaddr = NULL;
  757. }
  758. kfree(dring->desc);
  759. dring->desc = NULL;
  760. }
  761. static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
  762. {
  763. struct netsec_desc_ring *dring = &priv->desc_ring[id];
  764. int ret = 0;
  765. dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
  766. &dring->desc_dma, GFP_KERNEL);
  767. if (!dring->vaddr) {
  768. ret = -ENOMEM;
  769. goto err;
  770. }
  771. dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL);
  772. if (!dring->desc) {
  773. ret = -ENOMEM;
  774. goto err;
  775. }
  776. return 0;
  777. err:
  778. netsec_free_dring(priv, id);
  779. return ret;
  780. }
  781. static int netsec_setup_rx_dring(struct netsec_priv *priv)
  782. {
  783. struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
  784. struct netsec_desc desc;
  785. struct sk_buff *skb;
  786. int n;
  787. desc.len = priv->ndev->mtu + 22;
  788. for (n = 0; n < DESC_NUM; n++) {
  789. skb = netsec_alloc_skb(priv, &desc);
  790. if (!skb) {
  791. netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
  792. return -ENOMEM;
  793. }
  794. netsec_set_rx_de(priv, dring, n, &desc, skb);
  795. }
  796. return 0;
  797. }
  798. static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
  799. u32 addr_h, u32 addr_l, u32 size)
  800. {
  801. u64 base = (u64)addr_h << 32 | addr_l;
  802. void __iomem *ucode;
  803. u32 i;
  804. ucode = ioremap(base, size * sizeof(u32));
  805. if (!ucode)
  806. return -ENOMEM;
  807. for (i = 0; i < size; i++)
  808. netsec_write(priv, reg, readl(ucode + i * 4));
  809. iounmap(ucode);
  810. return 0;
  811. }
  812. static int netsec_netdev_load_microcode(struct netsec_priv *priv)
  813. {
  814. u32 addr_h, addr_l, size;
  815. int err;
  816. addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H);
  817. addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L);
  818. size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE);
  819. err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_HM_CMD_BUF,
  820. addr_h, addr_l, size);
  821. if (err)
  822. return err;
  823. addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H);
  824. addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L);
  825. size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE);
  826. err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_MH_CMD_BUF,
  827. addr_h, addr_l, size);
  828. if (err)
  829. return err;
  830. addr_h = 0;
  831. addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS);
  832. size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE);
  833. err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_PKT_CMD_BUF,
  834. addr_h, addr_l, size);
  835. if (err)
  836. return err;
  837. return 0;
  838. }
  839. static int netsec_reset_hardware(struct netsec_priv *priv,
  840. bool load_ucode)
  841. {
  842. u32 value;
  843. int err;
  844. /* stop DMA engines */
  845. if (!netsec_read(priv, NETSEC_REG_ADDR_DIS_CORE)) {
  846. netsec_write(priv, NETSEC_REG_DMA_HM_CTRL,
  847. NETSEC_DMA_CTRL_REG_STOP);
  848. netsec_write(priv, NETSEC_REG_DMA_MH_CTRL,
  849. NETSEC_DMA_CTRL_REG_STOP);
  850. while (netsec_read(priv, NETSEC_REG_DMA_HM_CTRL) &
  851. NETSEC_DMA_CTRL_REG_STOP)
  852. cpu_relax();
  853. while (netsec_read(priv, NETSEC_REG_DMA_MH_CTRL) &
  854. NETSEC_DMA_CTRL_REG_STOP)
  855. cpu_relax();
  856. }
  857. netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RESET);
  858. netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RUN);
  859. netsec_write(priv, NETSEC_REG_COM_INIT, NETSEC_COM_INIT_REG_ALL);
  860. while (netsec_read(priv, NETSEC_REG_COM_INIT) != 0)
  861. cpu_relax();
  862. /* set desc_start addr */
  863. netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_UP,
  864. upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
  865. netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_LW,
  866. lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
  867. netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_UP,
  868. upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
  869. netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_LW,
  870. lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
  871. /* set normal tx dring ring config */
  872. netsec_write(priv, NETSEC_REG_NRM_TX_CONFIG,
  873. 1 << NETSEC_REG_DESC_ENDIAN);
  874. netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG,
  875. 1 << NETSEC_REG_DESC_ENDIAN);
  876. if (load_ucode) {
  877. err = netsec_netdev_load_microcode(priv);
  878. if (err) {
  879. netif_err(priv, probe, priv->ndev,
  880. "%s: failed to load microcode (%d)\n",
  881. __func__, err);
  882. return err;
  883. }
  884. }
  885. /* start DMA engines */
  886. netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1);
  887. netsec_write(priv, NETSEC_REG_ADDR_DIS_CORE, 0);
  888. usleep_range(1000, 2000);
  889. if (!(netsec_read(priv, NETSEC_REG_TOP_STATUS) &
  890. NETSEC_TOP_IRQ_REG_CODE_LOAD_END)) {
  891. netif_err(priv, probe, priv->ndev,
  892. "microengine start failed\n");
  893. return -ENXIO;
  894. }
  895. netsec_write(priv, NETSEC_REG_TOP_STATUS,
  896. NETSEC_TOP_IRQ_REG_CODE_LOAD_END);
  897. value = NETSEC_PKT_CTRL_REG_MODE_NRM;
  898. if (priv->ndev->mtu > ETH_DATA_LEN)
  899. value |= NETSEC_PKT_CTRL_REG_EN_JUMBO;
  900. /* change to normal mode */
  901. netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, MH_CTRL__MODE_TRANS);
  902. netsec_write(priv, NETSEC_REG_PKT_CTRL, value);
  903. while ((netsec_read(priv, NETSEC_REG_MODE_TRANS_COMP_STATUS) &
  904. NETSEC_MODE_TRANS_COMP_IRQ_T2N) == 0)
  905. cpu_relax();
  906. /* clear any pending EMPTY/ERR irq status */
  907. netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, ~0);
  908. /* Disable TX & RX intr */
  909. netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
  910. return 0;
  911. }
  912. static int netsec_start_gmac(struct netsec_priv *priv)
  913. {
  914. struct phy_device *phydev = priv->ndev->phydev;
  915. u32 value = 0;
  916. int ret;
  917. if (phydev->speed != SPEED_1000)
  918. value = (NETSEC_GMAC_MCR_REG_CST |
  919. NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON);
  920. if (netsec_mac_write(priv, GMAC_REG_MCR, value))
  921. return -ETIMEDOUT;
  922. if (netsec_mac_write(priv, GMAC_REG_BMR,
  923. NETSEC_GMAC_BMR_REG_RESET))
  924. return -ETIMEDOUT;
  925. /* Wait soft reset */
  926. usleep_range(1000, 5000);
  927. ret = netsec_mac_read(priv, GMAC_REG_BMR, &value);
  928. if (ret)
  929. return ret;
  930. if (value & NETSEC_GMAC_BMR_REG_SWR)
  931. return -EAGAIN;
  932. netsec_write(priv, MAC_REG_DESC_SOFT_RST, 1);
  933. if (netsec_wait_while_busy(priv, MAC_REG_DESC_SOFT_RST, 1))
  934. return -ETIMEDOUT;
  935. netsec_write(priv, MAC_REG_DESC_INIT, 1);
  936. if (netsec_wait_while_busy(priv, MAC_REG_DESC_INIT, 1))
  937. return -ETIMEDOUT;
  938. if (netsec_mac_write(priv, GMAC_REG_BMR,
  939. NETSEC_GMAC_BMR_REG_COMMON))
  940. return -ETIMEDOUT;
  941. if (netsec_mac_write(priv, GMAC_REG_RDLAR,
  942. NETSEC_GMAC_RDLAR_REG_COMMON))
  943. return -ETIMEDOUT;
  944. if (netsec_mac_write(priv, GMAC_REG_TDLAR,
  945. NETSEC_GMAC_TDLAR_REG_COMMON))
  946. return -ETIMEDOUT;
  947. if (netsec_mac_write(priv, GMAC_REG_MFFR, 0x80000001))
  948. return -ETIMEDOUT;
  949. ret = netsec_mac_update_to_phy_state(priv);
  950. if (ret)
  951. return ret;
  952. ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
  953. if (ret)
  954. return ret;
  955. value |= NETSEC_GMAC_OMR_REG_SR;
  956. value |= NETSEC_GMAC_OMR_REG_ST;
  957. netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
  958. netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
  959. netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce);
  960. if (netsec_mac_write(priv, GMAC_REG_OMR, value))
  961. return -ETIMEDOUT;
  962. return 0;
  963. }
  964. static int netsec_stop_gmac(struct netsec_priv *priv)
  965. {
  966. u32 value;
  967. int ret;
  968. ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
  969. if (ret)
  970. return ret;
  971. value &= ~NETSEC_GMAC_OMR_REG_SR;
  972. value &= ~NETSEC_GMAC_OMR_REG_ST;
  973. /* disable all interrupts */
  974. netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
  975. netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
  976. return netsec_mac_write(priv, GMAC_REG_OMR, value);
  977. }
  978. static void netsec_phy_adjust_link(struct net_device *ndev)
  979. {
  980. struct netsec_priv *priv = netdev_priv(ndev);
  981. if (ndev->phydev->link)
  982. netsec_start_gmac(priv);
  983. else
  984. netsec_stop_gmac(priv);
  985. phy_print_status(ndev->phydev);
  986. }
  987. static irqreturn_t netsec_irq_handler(int irq, void *dev_id)
  988. {
  989. struct netsec_priv *priv = dev_id;
  990. u32 val, status = netsec_read(priv, NETSEC_REG_TOP_STATUS);
  991. unsigned long flags;
  992. /* Disable interrupts */
  993. if (status & NETSEC_IRQ_TX) {
  994. val = netsec_read(priv, NETSEC_REG_NRM_TX_STATUS);
  995. netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val);
  996. }
  997. if (status & NETSEC_IRQ_RX) {
  998. val = netsec_read(priv, NETSEC_REG_NRM_RX_STATUS);
  999. netsec_write(priv, NETSEC_REG_NRM_RX_STATUS, val);
  1000. }
  1001. spin_lock_irqsave(&priv->reglock, flags);
  1002. netsec_write(priv, NETSEC_REG_INTEN_CLR, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
  1003. spin_unlock_irqrestore(&priv->reglock, flags);
  1004. napi_schedule(&priv->napi);
  1005. return IRQ_HANDLED;
  1006. }
  1007. static int netsec_netdev_open(struct net_device *ndev)
  1008. {
  1009. struct netsec_priv *priv = netdev_priv(ndev);
  1010. int ret;
  1011. pm_runtime_get_sync(priv->dev);
  1012. ret = netsec_setup_rx_dring(priv);
  1013. if (ret) {
  1014. netif_err(priv, probe, priv->ndev,
  1015. "%s: fail setup ring\n", __func__);
  1016. goto err1;
  1017. }
  1018. ret = request_irq(priv->ndev->irq, netsec_irq_handler,
  1019. IRQF_SHARED, "netsec", priv);
  1020. if (ret) {
  1021. netif_err(priv, drv, priv->ndev, "request_irq failed\n");
  1022. goto err2;
  1023. }
  1024. if (dev_of_node(priv->dev)) {
  1025. if (!of_phy_connect(priv->ndev, priv->phy_np,
  1026. netsec_phy_adjust_link, 0,
  1027. priv->phy_interface)) {
  1028. netif_err(priv, link, priv->ndev, "missing PHY\n");
  1029. ret = -ENODEV;
  1030. goto err3;
  1031. }
  1032. } else {
  1033. ret = phy_connect_direct(priv->ndev, priv->phydev,
  1034. netsec_phy_adjust_link,
  1035. priv->phy_interface);
  1036. if (ret) {
  1037. netif_err(priv, link, priv->ndev,
  1038. "phy_connect_direct() failed (%d)\n", ret);
  1039. goto err3;
  1040. }
  1041. }
  1042. phy_start(ndev->phydev);
  1043. netsec_start_gmac(priv);
  1044. napi_enable(&priv->napi);
  1045. netif_start_queue(ndev);
  1046. /* Enable TX+RX intr. */
  1047. netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
  1048. return 0;
  1049. err3:
  1050. free_irq(priv->ndev->irq, priv);
  1051. err2:
  1052. netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
  1053. err1:
  1054. pm_runtime_put_sync(priv->dev);
  1055. return ret;
  1056. }
  1057. static int netsec_netdev_stop(struct net_device *ndev)
  1058. {
  1059. int ret;
  1060. struct netsec_priv *priv = netdev_priv(ndev);
  1061. netif_stop_queue(priv->ndev);
  1062. dma_wmb();
  1063. napi_disable(&priv->napi);
  1064. netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
  1065. netsec_stop_gmac(priv);
  1066. free_irq(priv->ndev->irq, priv);
  1067. netsec_uninit_pkt_dring(priv, NETSEC_RING_TX);
  1068. netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
  1069. ret = netsec_reset_hardware(priv, false);
  1070. phy_stop(ndev->phydev);
  1071. phy_disconnect(ndev->phydev);
  1072. pm_runtime_put_sync(priv->dev);
  1073. return ret;
  1074. }
  1075. static int netsec_netdev_init(struct net_device *ndev)
  1076. {
  1077. struct netsec_priv *priv = netdev_priv(ndev);
  1078. int ret;
  1079. ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
  1080. if (ret)
  1081. return ret;
  1082. ret = netsec_alloc_dring(priv, NETSEC_RING_RX);
  1083. if (ret)
  1084. goto err1;
  1085. ret = netsec_reset_hardware(priv, true);
  1086. if (ret)
  1087. goto err2;
  1088. return 0;
  1089. err2:
  1090. netsec_free_dring(priv, NETSEC_RING_RX);
  1091. err1:
  1092. netsec_free_dring(priv, NETSEC_RING_TX);
  1093. return ret;
  1094. }
  1095. static void netsec_netdev_uninit(struct net_device *ndev)
  1096. {
  1097. struct netsec_priv *priv = netdev_priv(ndev);
  1098. netsec_free_dring(priv, NETSEC_RING_RX);
  1099. netsec_free_dring(priv, NETSEC_RING_TX);
  1100. }
  1101. static int netsec_netdev_set_features(struct net_device *ndev,
  1102. netdev_features_t features)
  1103. {
  1104. struct netsec_priv *priv = netdev_priv(ndev);
  1105. priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM);
  1106. return 0;
  1107. }
  1108. static int netsec_netdev_ioctl(struct net_device *ndev, struct ifreq *ifr,
  1109. int cmd)
  1110. {
  1111. return phy_mii_ioctl(ndev->phydev, ifr, cmd);
  1112. }
  1113. static const struct net_device_ops netsec_netdev_ops = {
  1114. .ndo_init = netsec_netdev_init,
  1115. .ndo_uninit = netsec_netdev_uninit,
  1116. .ndo_open = netsec_netdev_open,
  1117. .ndo_stop = netsec_netdev_stop,
  1118. .ndo_start_xmit = netsec_netdev_start_xmit,
  1119. .ndo_set_features = netsec_netdev_set_features,
  1120. .ndo_set_mac_address = eth_mac_addr,
  1121. .ndo_validate_addr = eth_validate_addr,
  1122. .ndo_do_ioctl = netsec_netdev_ioctl,
  1123. };
  1124. static int netsec_of_probe(struct platform_device *pdev,
  1125. struct netsec_priv *priv)
  1126. {
  1127. priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
  1128. if (!priv->phy_np) {
  1129. dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
  1130. return -EINVAL;
  1131. }
  1132. priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */
  1133. if (IS_ERR(priv->clk)) {
  1134. dev_err(&pdev->dev, "phy_ref_clk not found\n");
  1135. return PTR_ERR(priv->clk);
  1136. }
  1137. priv->freq = clk_get_rate(priv->clk);
  1138. return 0;
  1139. }
  1140. static int netsec_acpi_probe(struct platform_device *pdev,
  1141. struct netsec_priv *priv, u32 *phy_addr)
  1142. {
  1143. int ret;
  1144. if (!IS_ENABLED(CONFIG_ACPI))
  1145. return -ENODEV;
  1146. ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
  1147. if (ret) {
  1148. dev_err(&pdev->dev,
  1149. "missing required property 'phy-channel'\n");
  1150. return ret;
  1151. }
  1152. ret = device_property_read_u32(&pdev->dev,
  1153. "socionext,phy-clock-frequency",
  1154. &priv->freq);
  1155. if (ret)
  1156. dev_err(&pdev->dev,
  1157. "missing required property 'socionext,phy-clock-frequency'\n");
  1158. return ret;
  1159. }
  1160. static void netsec_unregister_mdio(struct netsec_priv *priv)
  1161. {
  1162. struct phy_device *phydev = priv->phydev;
  1163. if (!dev_of_node(priv->dev) && phydev) {
  1164. phy_device_remove(phydev);
  1165. phy_device_free(phydev);
  1166. }
  1167. mdiobus_unregister(priv->mii_bus);
  1168. }
  1169. static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
  1170. {
  1171. struct mii_bus *bus;
  1172. int ret;
  1173. bus = devm_mdiobus_alloc(priv->dev);
  1174. if (!bus)
  1175. return -ENOMEM;
  1176. snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev));
  1177. bus->priv = priv;
  1178. bus->name = "SNI NETSEC MDIO";
  1179. bus->read = netsec_phy_read;
  1180. bus->write = netsec_phy_write;
  1181. bus->parent = priv->dev;
  1182. priv->mii_bus = bus;
  1183. if (dev_of_node(priv->dev)) {
  1184. struct device_node *mdio_node, *parent = dev_of_node(priv->dev);
  1185. mdio_node = of_get_child_by_name(parent, "mdio");
  1186. if (mdio_node) {
  1187. parent = mdio_node;
  1188. } else {
  1189. /* older f/w doesn't populate the mdio subnode,
  1190. * allow relaxed upgrade of f/w in due time.
  1191. */
  1192. dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n");
  1193. }
  1194. ret = of_mdiobus_register(bus, parent);
  1195. of_node_put(mdio_node);
  1196. if (ret) {
  1197. dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
  1198. return ret;
  1199. }
  1200. } else {
  1201. /* Mask out all PHYs from auto probing. */
  1202. bus->phy_mask = ~0;
  1203. ret = mdiobus_register(bus);
  1204. if (ret) {
  1205. dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
  1206. return ret;
  1207. }
  1208. priv->phydev = get_phy_device(bus, phy_addr, false);
  1209. if (IS_ERR(priv->phydev)) {
  1210. ret = PTR_ERR(priv->phydev);
  1211. dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
  1212. priv->phydev = NULL;
  1213. return -ENODEV;
  1214. }
  1215. ret = phy_device_register(priv->phydev);
  1216. if (ret) {
  1217. mdiobus_unregister(bus);
  1218. dev_err(priv->dev,
  1219. "phy_device_register err(%d)\n", ret);
  1220. }
  1221. }
  1222. return ret;
  1223. }
  1224. static int netsec_probe(struct platform_device *pdev)
  1225. {
  1226. struct resource *mmio_res, *eeprom_res, *irq_res;
  1227. u8 *mac, macbuf[ETH_ALEN];
  1228. struct netsec_priv *priv;
  1229. u32 hw_ver, phy_addr = 0;
  1230. struct net_device *ndev;
  1231. int ret;
  1232. mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1233. if (!mmio_res) {
  1234. dev_err(&pdev->dev, "No MMIO resource found.\n");
  1235. return -ENODEV;
  1236. }
  1237. eeprom_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  1238. if (!eeprom_res) {
  1239. dev_info(&pdev->dev, "No EEPROM resource found.\n");
  1240. return -ENODEV;
  1241. }
  1242. irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  1243. if (!irq_res) {
  1244. dev_err(&pdev->dev, "No IRQ resource found.\n");
  1245. return -ENODEV;
  1246. }
  1247. ndev = alloc_etherdev(sizeof(*priv));
  1248. if (!ndev)
  1249. return -ENOMEM;
  1250. priv = netdev_priv(ndev);
  1251. spin_lock_init(&priv->reglock);
  1252. SET_NETDEV_DEV(ndev, &pdev->dev);
  1253. platform_set_drvdata(pdev, priv);
  1254. ndev->irq = irq_res->start;
  1255. priv->dev = &pdev->dev;
  1256. priv->ndev = ndev;
  1257. priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV |
  1258. NETIF_MSG_LINK | NETIF_MSG_PROBE;
  1259. priv->phy_interface = device_get_phy_mode(&pdev->dev);
  1260. if (priv->phy_interface < 0) {
  1261. dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
  1262. ret = -ENODEV;
  1263. goto free_ndev;
  1264. }
  1265. priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start,
  1266. resource_size(mmio_res));
  1267. if (!priv->ioaddr) {
  1268. dev_err(&pdev->dev, "devm_ioremap() failed\n");
  1269. ret = -ENXIO;
  1270. goto free_ndev;
  1271. }
  1272. priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start,
  1273. resource_size(eeprom_res));
  1274. if (!priv->eeprom_base) {
  1275. dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n");
  1276. ret = -ENXIO;
  1277. goto free_ndev;
  1278. }
  1279. mac = device_get_mac_address(&pdev->dev, macbuf, sizeof(macbuf));
  1280. if (mac)
  1281. ether_addr_copy(ndev->dev_addr, mac);
  1282. if (priv->eeprom_base &&
  1283. (!mac || !is_valid_ether_addr(ndev->dev_addr))) {
  1284. void __iomem *macp = priv->eeprom_base +
  1285. NETSEC_EEPROM_MAC_ADDRESS;
  1286. ndev->dev_addr[0] = readb(macp + 3);
  1287. ndev->dev_addr[1] = readb(macp + 2);
  1288. ndev->dev_addr[2] = readb(macp + 1);
  1289. ndev->dev_addr[3] = readb(macp + 0);
  1290. ndev->dev_addr[4] = readb(macp + 7);
  1291. ndev->dev_addr[5] = readb(macp + 6);
  1292. }
  1293. if (!is_valid_ether_addr(ndev->dev_addr)) {
  1294. dev_warn(&pdev->dev, "No MAC address found, using random\n");
  1295. eth_hw_addr_random(ndev);
  1296. }
  1297. if (dev_of_node(&pdev->dev))
  1298. ret = netsec_of_probe(pdev, priv);
  1299. else
  1300. ret = netsec_acpi_probe(pdev, priv, &phy_addr);
  1301. if (ret)
  1302. goto free_ndev;
  1303. if (!priv->freq) {
  1304. dev_err(&pdev->dev, "missing PHY reference clock frequency\n");
  1305. ret = -ENODEV;
  1306. goto free_ndev;
  1307. }
  1308. /* default for throughput */
  1309. priv->et_coalesce.rx_coalesce_usecs = 500;
  1310. priv->et_coalesce.rx_max_coalesced_frames = 8;
  1311. priv->et_coalesce.tx_coalesce_usecs = 500;
  1312. priv->et_coalesce.tx_max_coalesced_frames = 8;
  1313. ret = device_property_read_u32(&pdev->dev, "max-frame-size",
  1314. &ndev->max_mtu);
  1315. if (ret < 0)
  1316. ndev->max_mtu = ETH_DATA_LEN;
  1317. /* runtime_pm coverage just for probe, open/close also cover it */
  1318. pm_runtime_enable(&pdev->dev);
  1319. pm_runtime_get_sync(&pdev->dev);
  1320. hw_ver = netsec_read(priv, NETSEC_REG_F_TAIKI_VER);
  1321. /* this driver only supports F_TAIKI style NETSEC */
  1322. if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver) !=
  1323. NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI)) {
  1324. ret = -ENODEV;
  1325. goto pm_disable;
  1326. }
  1327. dev_info(&pdev->dev, "hardware revision %d.%d\n",
  1328. hw_ver >> 16, hw_ver & 0xffff);
  1329. netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_BUDGET);
  1330. ndev->netdev_ops = &netsec_netdev_ops;
  1331. ndev->ethtool_ops = &netsec_ethtool_ops;
  1332. ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO |
  1333. NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
  1334. ndev->hw_features = ndev->features;
  1335. priv->rx_cksum_offload_flag = true;
  1336. ret = netsec_register_mdio(priv, phy_addr);
  1337. if (ret)
  1338. goto unreg_napi;
  1339. if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)))
  1340. dev_warn(&pdev->dev, "Failed to set DMA mask\n");
  1341. ret = register_netdev(ndev);
  1342. if (ret) {
  1343. netif_err(priv, probe, ndev, "register_netdev() failed\n");
  1344. goto unreg_mii;
  1345. }
  1346. pm_runtime_put_sync(&pdev->dev);
  1347. return 0;
  1348. unreg_mii:
  1349. netsec_unregister_mdio(priv);
  1350. unreg_napi:
  1351. netif_napi_del(&priv->napi);
  1352. pm_disable:
  1353. pm_runtime_put_sync(&pdev->dev);
  1354. pm_runtime_disable(&pdev->dev);
  1355. free_ndev:
  1356. free_netdev(ndev);
  1357. dev_err(&pdev->dev, "init failed\n");
  1358. return ret;
  1359. }
  1360. static int netsec_remove(struct platform_device *pdev)
  1361. {
  1362. struct netsec_priv *priv = platform_get_drvdata(pdev);
  1363. unregister_netdev(priv->ndev);
  1364. netsec_unregister_mdio(priv);
  1365. netif_napi_del(&priv->napi);
  1366. pm_runtime_disable(&pdev->dev);
  1367. free_netdev(priv->ndev);
  1368. return 0;
  1369. }
  1370. #ifdef CONFIG_PM
  1371. static int netsec_runtime_suspend(struct device *dev)
  1372. {
  1373. struct netsec_priv *priv = dev_get_drvdata(dev);
  1374. netsec_write(priv, NETSEC_REG_CLK_EN, 0);
  1375. clk_disable_unprepare(priv->clk);
  1376. return 0;
  1377. }
  1378. static int netsec_runtime_resume(struct device *dev)
  1379. {
  1380. struct netsec_priv *priv = dev_get_drvdata(dev);
  1381. clk_prepare_enable(priv->clk);
  1382. netsec_write(priv, NETSEC_REG_CLK_EN, NETSEC_CLK_EN_REG_DOM_D |
  1383. NETSEC_CLK_EN_REG_DOM_C |
  1384. NETSEC_CLK_EN_REG_DOM_G);
  1385. return 0;
  1386. }
  1387. #endif
  1388. static const struct dev_pm_ops netsec_pm_ops = {
  1389. SET_RUNTIME_PM_OPS(netsec_runtime_suspend, netsec_runtime_resume, NULL)
  1390. };
  1391. static const struct of_device_id netsec_dt_ids[] = {
  1392. { .compatible = "socionext,synquacer-netsec" },
  1393. { }
  1394. };
  1395. MODULE_DEVICE_TABLE(of, netsec_dt_ids);
  1396. #ifdef CONFIG_ACPI
  1397. static const struct acpi_device_id netsec_acpi_ids[] = {
  1398. { "SCX0001" },
  1399. { }
  1400. };
  1401. MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids);
  1402. #endif
  1403. static struct platform_driver netsec_driver = {
  1404. .probe = netsec_probe,
  1405. .remove = netsec_remove,
  1406. .driver = {
  1407. .name = "netsec",
  1408. .pm = &netsec_pm_ops,
  1409. .of_match_table = netsec_dt_ids,
  1410. .acpi_match_table = ACPI_PTR(netsec_acpi_ids),
  1411. },
  1412. };
  1413. module_platform_driver(netsec_driver);
  1414. MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
  1415. MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
  1416. MODULE_DESCRIPTION("NETSEC Ethernet driver");
  1417. MODULE_LICENSE("GPL");