lan743x_main.c 80 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949
  1. /* SPDX-License-Identifier: GPL-2.0+ */
  2. /* Copyright (C) 2018 Microchip Technology Inc. */
  3. #include <linux/module.h>
  4. #include <linux/pci.h>
  5. #include <linux/netdevice.h>
  6. #include <linux/etherdevice.h>
  7. #include <linux/crc32.h>
  8. #include <linux/microchipphy.h>
  9. #include <linux/net_tstamp.h>
  10. #include <linux/phy.h>
  11. #include <linux/rtnetlink.h>
  12. #include <linux/iopoll.h>
  13. #include <linux/crc16.h>
  14. #include "lan743x_main.h"
  15. #include "lan743x_ethtool.h"
  16. static void lan743x_pci_cleanup(struct lan743x_adapter *adapter)
  17. {
  18. pci_release_selected_regions(adapter->pdev,
  19. pci_select_bars(adapter->pdev,
  20. IORESOURCE_MEM));
  21. pci_disable_device(adapter->pdev);
  22. }
  23. static int lan743x_pci_init(struct lan743x_adapter *adapter,
  24. struct pci_dev *pdev)
  25. {
  26. unsigned long bars = 0;
  27. int ret;
  28. adapter->pdev = pdev;
  29. ret = pci_enable_device_mem(pdev);
  30. if (ret)
  31. goto return_error;
  32. netif_info(adapter, probe, adapter->netdev,
  33. "PCI: Vendor ID = 0x%04X, Device ID = 0x%04X\n",
  34. pdev->vendor, pdev->device);
  35. bars = pci_select_bars(pdev, IORESOURCE_MEM);
  36. if (!test_bit(0, &bars))
  37. goto disable_device;
  38. ret = pci_request_selected_regions(pdev, bars, DRIVER_NAME);
  39. if (ret)
  40. goto disable_device;
  41. pci_set_master(pdev);
  42. return 0;
  43. disable_device:
  44. pci_disable_device(adapter->pdev);
  45. return_error:
  46. return ret;
  47. }
  48. u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset)
  49. {
  50. return ioread32(&adapter->csr.csr_address[offset]);
  51. }
  52. void lan743x_csr_write(struct lan743x_adapter *adapter, int offset,
  53. u32 data)
  54. {
  55. iowrite32(data, &adapter->csr.csr_address[offset]);
  56. }
  57. #define LAN743X_CSR_READ_OP(offset) lan743x_csr_read(adapter, offset)
  58. static int lan743x_csr_light_reset(struct lan743x_adapter *adapter)
  59. {
  60. u32 data;
  61. data = lan743x_csr_read(adapter, HW_CFG);
  62. data |= HW_CFG_LRST_;
  63. lan743x_csr_write(adapter, HW_CFG, data);
  64. return readx_poll_timeout(LAN743X_CSR_READ_OP, HW_CFG, data,
  65. !(data & HW_CFG_LRST_), 100000, 10000000);
  66. }
  67. static int lan743x_csr_wait_for_bit(struct lan743x_adapter *adapter,
  68. int offset, u32 bit_mask,
  69. int target_value, int usleep_min,
  70. int usleep_max, int count)
  71. {
  72. u32 data;
  73. return readx_poll_timeout(LAN743X_CSR_READ_OP, offset, data,
  74. target_value == ((data & bit_mask) ? 1 : 0),
  75. usleep_max, usleep_min * count);
  76. }
  77. static int lan743x_csr_init(struct lan743x_adapter *adapter)
  78. {
  79. struct lan743x_csr *csr = &adapter->csr;
  80. resource_size_t bar_start, bar_length;
  81. int result;
  82. bar_start = pci_resource_start(adapter->pdev, 0);
  83. bar_length = pci_resource_len(adapter->pdev, 0);
  84. csr->csr_address = devm_ioremap(&adapter->pdev->dev,
  85. bar_start, bar_length);
  86. if (!csr->csr_address) {
  87. result = -ENOMEM;
  88. goto clean_up;
  89. }
  90. csr->id_rev = lan743x_csr_read(adapter, ID_REV);
  91. csr->fpga_rev = lan743x_csr_read(adapter, FPGA_REV);
  92. netif_info(adapter, probe, adapter->netdev,
  93. "ID_REV = 0x%08X, FPGA_REV = %d.%d\n",
  94. csr->id_rev, FPGA_REV_GET_MAJOR_(csr->fpga_rev),
  95. FPGA_REV_GET_MINOR_(csr->fpga_rev));
  96. if (!ID_REV_IS_VALID_CHIP_ID_(csr->id_rev)) {
  97. result = -ENODEV;
  98. goto clean_up;
  99. }
  100. csr->flags = LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR;
  101. switch (csr->id_rev & ID_REV_CHIP_REV_MASK_) {
  102. case ID_REV_CHIP_REV_A0_:
  103. csr->flags |= LAN743X_CSR_FLAG_IS_A0;
  104. csr->flags &= ~LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR;
  105. break;
  106. case ID_REV_CHIP_REV_B0_:
  107. csr->flags |= LAN743X_CSR_FLAG_IS_B0;
  108. break;
  109. }
  110. result = lan743x_csr_light_reset(adapter);
  111. if (result)
  112. goto clean_up;
  113. return 0;
  114. clean_up:
  115. return result;
  116. }
  117. static void lan743x_intr_software_isr(void *context)
  118. {
  119. struct lan743x_adapter *adapter = context;
  120. struct lan743x_intr *intr = &adapter->intr;
  121. u32 int_sts;
  122. int_sts = lan743x_csr_read(adapter, INT_STS);
  123. if (int_sts & INT_BIT_SW_GP_) {
  124. lan743x_csr_write(adapter, INT_STS, INT_BIT_SW_GP_);
  125. intr->software_isr_flag = 1;
  126. }
  127. }
  128. static void lan743x_tx_isr(void *context, u32 int_sts, u32 flags)
  129. {
  130. struct lan743x_tx *tx = context;
  131. struct lan743x_adapter *adapter = tx->adapter;
  132. bool enable_flag = true;
  133. u32 int_en = 0;
  134. int_en = lan743x_csr_read(adapter, INT_EN_SET);
  135. if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
  136. lan743x_csr_write(adapter, INT_EN_CLR,
  137. INT_BIT_DMA_TX_(tx->channel_number));
  138. }
  139. if (int_sts & INT_BIT_DMA_TX_(tx->channel_number)) {
  140. u32 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
  141. u32 dmac_int_sts;
  142. u32 dmac_int_en;
  143. if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
  144. dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
  145. else
  146. dmac_int_sts = ioc_bit;
  147. if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
  148. dmac_int_en = lan743x_csr_read(adapter,
  149. DMAC_INT_EN_SET);
  150. else
  151. dmac_int_en = ioc_bit;
  152. dmac_int_en &= ioc_bit;
  153. dmac_int_sts &= dmac_int_en;
  154. if (dmac_int_sts & ioc_bit) {
  155. napi_schedule(&tx->napi);
  156. enable_flag = false;/* poll func will enable later */
  157. }
  158. }
  159. if (enable_flag)
  160. /* enable isr */
  161. lan743x_csr_write(adapter, INT_EN_SET,
  162. INT_BIT_DMA_TX_(tx->channel_number));
  163. }
  164. static void lan743x_rx_isr(void *context, u32 int_sts, u32 flags)
  165. {
  166. struct lan743x_rx *rx = context;
  167. struct lan743x_adapter *adapter = rx->adapter;
  168. bool enable_flag = true;
  169. if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
  170. lan743x_csr_write(adapter, INT_EN_CLR,
  171. INT_BIT_DMA_RX_(rx->channel_number));
  172. }
  173. if (int_sts & INT_BIT_DMA_RX_(rx->channel_number)) {
  174. u32 rx_frame_bit = DMAC_INT_BIT_RXFRM_(rx->channel_number);
  175. u32 dmac_int_sts;
  176. u32 dmac_int_en;
  177. if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
  178. dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
  179. else
  180. dmac_int_sts = rx_frame_bit;
  181. if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
  182. dmac_int_en = lan743x_csr_read(adapter,
  183. DMAC_INT_EN_SET);
  184. else
  185. dmac_int_en = rx_frame_bit;
  186. dmac_int_en &= rx_frame_bit;
  187. dmac_int_sts &= dmac_int_en;
  188. if (dmac_int_sts & rx_frame_bit) {
  189. napi_schedule(&rx->napi);
  190. enable_flag = false;/* poll funct will enable later */
  191. }
  192. }
  193. if (enable_flag) {
  194. /* enable isr */
  195. lan743x_csr_write(adapter, INT_EN_SET,
  196. INT_BIT_DMA_RX_(rx->channel_number));
  197. }
  198. }
  199. static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags)
  200. {
  201. struct lan743x_adapter *adapter = context;
  202. unsigned int channel;
  203. if (int_sts & INT_BIT_ALL_RX_) {
  204. for (channel = 0; channel < LAN743X_USED_RX_CHANNELS;
  205. channel++) {
  206. u32 int_bit = INT_BIT_DMA_RX_(channel);
  207. if (int_sts & int_bit) {
  208. lan743x_rx_isr(&adapter->rx[channel],
  209. int_bit, flags);
  210. int_sts &= ~int_bit;
  211. }
  212. }
  213. }
  214. if (int_sts & INT_BIT_ALL_TX_) {
  215. for (channel = 0; channel < LAN743X_USED_TX_CHANNELS;
  216. channel++) {
  217. u32 int_bit = INT_BIT_DMA_TX_(channel);
  218. if (int_sts & int_bit) {
  219. lan743x_tx_isr(&adapter->tx[channel],
  220. int_bit, flags);
  221. int_sts &= ~int_bit;
  222. }
  223. }
  224. }
  225. if (int_sts & INT_BIT_ALL_OTHER_) {
  226. if (int_sts & INT_BIT_SW_GP_) {
  227. lan743x_intr_software_isr(adapter);
  228. int_sts &= ~INT_BIT_SW_GP_;
  229. }
  230. }
  231. if (int_sts)
  232. lan743x_csr_write(adapter, INT_EN_CLR, int_sts);
  233. }
  234. static irqreturn_t lan743x_intr_entry_isr(int irq, void *ptr)
  235. {
  236. struct lan743x_vector *vector = ptr;
  237. struct lan743x_adapter *adapter = vector->adapter;
  238. irqreturn_t result = IRQ_NONE;
  239. u32 int_enables;
  240. u32 int_sts;
  241. if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) {
  242. int_sts = lan743x_csr_read(adapter, INT_STS);
  243. } else if (vector->flags &
  244. (LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C |
  245. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)) {
  246. int_sts = lan743x_csr_read(adapter, INT_STS_R2C);
  247. } else {
  248. /* use mask as implied status */
  249. int_sts = vector->int_mask | INT_BIT_MAS_;
  250. }
  251. if (!(int_sts & INT_BIT_MAS_))
  252. goto irq_done;
  253. if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR)
  254. /* disable vector interrupt */
  255. lan743x_csr_write(adapter,
  256. INT_VEC_EN_CLR,
  257. INT_VEC_EN_(vector->vector_index));
  258. if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR)
  259. /* disable master interrupt */
  260. lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
  261. if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) {
  262. int_enables = lan743x_csr_read(adapter, INT_EN_SET);
  263. } else {
  264. /* use vector mask as implied enable mask */
  265. int_enables = vector->int_mask;
  266. }
  267. int_sts &= int_enables;
  268. int_sts &= vector->int_mask;
  269. if (int_sts) {
  270. if (vector->handler) {
  271. vector->handler(vector->context,
  272. int_sts, vector->flags);
  273. } else {
  274. /* disable interrupts on this vector */
  275. lan743x_csr_write(adapter, INT_EN_CLR,
  276. vector->int_mask);
  277. }
  278. result = IRQ_HANDLED;
  279. }
  280. if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET)
  281. /* enable master interrupt */
  282. lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_);
  283. if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET)
  284. /* enable vector interrupt */
  285. lan743x_csr_write(adapter,
  286. INT_VEC_EN_SET,
  287. INT_VEC_EN_(vector->vector_index));
  288. irq_done:
  289. return result;
  290. }
  291. static int lan743x_intr_test_isr(struct lan743x_adapter *adapter)
  292. {
  293. struct lan743x_intr *intr = &adapter->intr;
  294. int result = -ENODEV;
  295. int timeout = 10;
  296. intr->software_isr_flag = 0;
  297. /* enable interrupt */
  298. lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_SW_GP_);
  299. /* activate interrupt here */
  300. lan743x_csr_write(adapter, INT_SET, INT_BIT_SW_GP_);
  301. while ((timeout > 0) && (!(intr->software_isr_flag))) {
  302. usleep_range(1000, 20000);
  303. timeout--;
  304. }
  305. if (intr->software_isr_flag)
  306. result = 0;
  307. /* disable interrupts */
  308. lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
  309. return result;
  310. }
  311. static int lan743x_intr_register_isr(struct lan743x_adapter *adapter,
  312. int vector_index, u32 flags,
  313. u32 int_mask,
  314. lan743x_vector_handler handler,
  315. void *context)
  316. {
  317. struct lan743x_vector *vector = &adapter->intr.vector_list
  318. [vector_index];
  319. int ret;
  320. vector->adapter = adapter;
  321. vector->flags = flags;
  322. vector->vector_index = vector_index;
  323. vector->int_mask = int_mask;
  324. vector->handler = handler;
  325. vector->context = context;
  326. ret = request_irq(vector->irq,
  327. lan743x_intr_entry_isr,
  328. (flags & LAN743X_VECTOR_FLAG_IRQ_SHARED) ?
  329. IRQF_SHARED : 0, DRIVER_NAME, vector);
  330. if (ret) {
  331. vector->handler = NULL;
  332. vector->context = NULL;
  333. vector->int_mask = 0;
  334. vector->flags = 0;
  335. }
  336. return ret;
  337. }
  338. static void lan743x_intr_unregister_isr(struct lan743x_adapter *adapter,
  339. int vector_index)
  340. {
  341. struct lan743x_vector *vector = &adapter->intr.vector_list
  342. [vector_index];
  343. free_irq(vector->irq, vector);
  344. vector->handler = NULL;
  345. vector->context = NULL;
  346. vector->int_mask = 0;
  347. vector->flags = 0;
  348. }
  349. static u32 lan743x_intr_get_vector_flags(struct lan743x_adapter *adapter,
  350. u32 int_mask)
  351. {
  352. int index;
  353. for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) {
  354. if (adapter->intr.vector_list[index].int_mask & int_mask)
  355. return adapter->intr.vector_list[index].flags;
  356. }
  357. return 0;
  358. }
  359. static void lan743x_intr_close(struct lan743x_adapter *adapter)
  360. {
  361. struct lan743x_intr *intr = &adapter->intr;
  362. int index = 0;
  363. lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
  364. lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF);
  365. for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) {
  366. if (intr->flags & INTR_FLAG_IRQ_REQUESTED(index)) {
  367. lan743x_intr_unregister_isr(adapter, index);
  368. intr->flags &= ~INTR_FLAG_IRQ_REQUESTED(index);
  369. }
  370. }
  371. if (intr->flags & INTR_FLAG_MSI_ENABLED) {
  372. pci_disable_msi(adapter->pdev);
  373. intr->flags &= ~INTR_FLAG_MSI_ENABLED;
  374. }
  375. if (intr->flags & INTR_FLAG_MSIX_ENABLED) {
  376. pci_disable_msix(adapter->pdev);
  377. intr->flags &= ~INTR_FLAG_MSIX_ENABLED;
  378. }
  379. }
  380. static int lan743x_intr_open(struct lan743x_adapter *adapter)
  381. {
  382. struct msix_entry msix_entries[LAN743X_MAX_VECTOR_COUNT];
  383. struct lan743x_intr *intr = &adapter->intr;
  384. u32 int_vec_en_auto_clr = 0;
  385. u32 int_vec_map0 = 0;
  386. u32 int_vec_map1 = 0;
  387. int ret = -ENODEV;
  388. int index = 0;
  389. u32 flags = 0;
  390. intr->number_of_vectors = 0;
  391. /* Try to set up MSIX interrupts */
  392. memset(&msix_entries[0], 0,
  393. sizeof(struct msix_entry) * LAN743X_MAX_VECTOR_COUNT);
  394. for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++)
  395. msix_entries[index].entry = index;
  396. ret = pci_enable_msix_range(adapter->pdev,
  397. msix_entries, 1,
  398. 1 + LAN743X_USED_TX_CHANNELS +
  399. LAN743X_USED_RX_CHANNELS);
  400. if (ret > 0) {
  401. intr->flags |= INTR_FLAG_MSIX_ENABLED;
  402. intr->number_of_vectors = ret;
  403. intr->using_vectors = true;
  404. for (index = 0; index < intr->number_of_vectors; index++)
  405. intr->vector_list[index].irq = msix_entries
  406. [index].vector;
  407. netif_info(adapter, ifup, adapter->netdev,
  408. "using MSIX interrupts, number of vectors = %d\n",
  409. intr->number_of_vectors);
  410. }
  411. /* If MSIX failed try to setup using MSI interrupts */
  412. if (!intr->number_of_vectors) {
  413. if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
  414. if (!pci_enable_msi(adapter->pdev)) {
  415. intr->flags |= INTR_FLAG_MSI_ENABLED;
  416. intr->number_of_vectors = 1;
  417. intr->using_vectors = true;
  418. intr->vector_list[0].irq =
  419. adapter->pdev->irq;
  420. netif_info(adapter, ifup, adapter->netdev,
  421. "using MSI interrupts, number of vectors = %d\n",
  422. intr->number_of_vectors);
  423. }
  424. }
  425. }
  426. /* If MSIX, and MSI failed, setup using legacy interrupt */
  427. if (!intr->number_of_vectors) {
  428. intr->number_of_vectors = 1;
  429. intr->using_vectors = false;
  430. intr->vector_list[0].irq = intr->irq;
  431. netif_info(adapter, ifup, adapter->netdev,
  432. "using legacy interrupts\n");
  433. }
  434. /* At this point we must have at least one irq */
  435. lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0xFFFFFFFF);
  436. /* map all interrupts to vector 0 */
  437. lan743x_csr_write(adapter, INT_VEC_MAP0, 0x00000000);
  438. lan743x_csr_write(adapter, INT_VEC_MAP1, 0x00000000);
  439. lan743x_csr_write(adapter, INT_VEC_MAP2, 0x00000000);
  440. flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
  441. LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
  442. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
  443. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR;
  444. if (intr->using_vectors) {
  445. flags |= LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
  446. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
  447. } else {
  448. flags |= LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR |
  449. LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET |
  450. LAN743X_VECTOR_FLAG_IRQ_SHARED;
  451. }
  452. if (adapter->csr.flags & LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
  453. flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ;
  454. flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C;
  455. flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR;
  456. flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK;
  457. flags |= LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C;
  458. flags |= LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C;
  459. }
  460. ret = lan743x_intr_register_isr(adapter, 0, flags,
  461. INT_BIT_ALL_RX_ | INT_BIT_ALL_TX_ |
  462. INT_BIT_ALL_OTHER_,
  463. lan743x_intr_shared_isr, adapter);
  464. if (ret)
  465. goto clean_up;
  466. intr->flags |= INTR_FLAG_IRQ_REQUESTED(0);
  467. if (intr->using_vectors)
  468. lan743x_csr_write(adapter, INT_VEC_EN_SET,
  469. INT_VEC_EN_(0));
  470. if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
  471. lan743x_csr_write(adapter, INT_MOD_CFG0, LAN743X_INT_MOD);
  472. lan743x_csr_write(adapter, INT_MOD_CFG1, LAN743X_INT_MOD);
  473. lan743x_csr_write(adapter, INT_MOD_CFG2, LAN743X_INT_MOD);
  474. lan743x_csr_write(adapter, INT_MOD_CFG3, LAN743X_INT_MOD);
  475. lan743x_csr_write(adapter, INT_MOD_CFG4, LAN743X_INT_MOD);
  476. lan743x_csr_write(adapter, INT_MOD_CFG5, LAN743X_INT_MOD);
  477. lan743x_csr_write(adapter, INT_MOD_CFG6, LAN743X_INT_MOD);
  478. lan743x_csr_write(adapter, INT_MOD_CFG7, LAN743X_INT_MOD);
  479. lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432);
  480. lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001);
  481. lan743x_csr_write(adapter, INT_MOD_MAP2, 0x00FFFFFF);
  482. }
  483. /* enable interrupts */
  484. lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_);
  485. ret = lan743x_intr_test_isr(adapter);
  486. if (ret)
  487. goto clean_up;
  488. if (intr->number_of_vectors > 1) {
  489. int number_of_tx_vectors = intr->number_of_vectors - 1;
  490. if (number_of_tx_vectors > LAN743X_USED_TX_CHANNELS)
  491. number_of_tx_vectors = LAN743X_USED_TX_CHANNELS;
  492. flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
  493. LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
  494. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
  495. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR |
  496. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
  497. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
  498. if (adapter->csr.flags &
  499. LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
  500. flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
  501. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
  502. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
  503. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
  504. LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
  505. }
  506. for (index = 0; index < number_of_tx_vectors; index++) {
  507. u32 int_bit = INT_BIT_DMA_TX_(index);
  508. int vector = index + 1;
  509. /* map TX interrupt to vector */
  510. int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector);
  511. lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1);
  512. if (flags &
  513. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
  514. int_vec_en_auto_clr |= INT_VEC_EN_(vector);
  515. lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
  516. int_vec_en_auto_clr);
  517. }
  518. /* Remove TX interrupt from shared mask */
  519. intr->vector_list[0].int_mask &= ~int_bit;
  520. ret = lan743x_intr_register_isr(adapter, vector, flags,
  521. int_bit, lan743x_tx_isr,
  522. &adapter->tx[index]);
  523. if (ret)
  524. goto clean_up;
  525. intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector);
  526. if (!(flags &
  527. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET))
  528. lan743x_csr_write(adapter, INT_VEC_EN_SET,
  529. INT_VEC_EN_(vector));
  530. }
  531. }
  532. if ((intr->number_of_vectors - LAN743X_USED_TX_CHANNELS) > 1) {
  533. int number_of_rx_vectors = intr->number_of_vectors -
  534. LAN743X_USED_TX_CHANNELS - 1;
  535. if (number_of_rx_vectors > LAN743X_USED_RX_CHANNELS)
  536. number_of_rx_vectors = LAN743X_USED_RX_CHANNELS;
  537. flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
  538. LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
  539. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
  540. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR |
  541. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
  542. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
  543. if (adapter->csr.flags &
  544. LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
  545. flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
  546. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
  547. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
  548. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
  549. LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
  550. }
  551. for (index = 0; index < number_of_rx_vectors; index++) {
  552. int vector = index + 1 + LAN743X_USED_TX_CHANNELS;
  553. u32 int_bit = INT_BIT_DMA_RX_(index);
  554. /* map RX interrupt to vector */
  555. int_vec_map0 |= INT_VEC_MAP0_RX_VEC_(index, vector);
  556. lan743x_csr_write(adapter, INT_VEC_MAP0, int_vec_map0);
  557. if (flags &
  558. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
  559. int_vec_en_auto_clr |= INT_VEC_EN_(vector);
  560. lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
  561. int_vec_en_auto_clr);
  562. }
  563. /* Remove RX interrupt from shared mask */
  564. intr->vector_list[0].int_mask &= ~int_bit;
  565. ret = lan743x_intr_register_isr(adapter, vector, flags,
  566. int_bit, lan743x_rx_isr,
  567. &adapter->rx[index]);
  568. if (ret)
  569. goto clean_up;
  570. intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector);
  571. lan743x_csr_write(adapter, INT_VEC_EN_SET,
  572. INT_VEC_EN_(vector));
  573. }
  574. }
  575. return 0;
  576. clean_up:
  577. lan743x_intr_close(adapter);
  578. return ret;
  579. }
  580. static int lan743x_dp_write(struct lan743x_adapter *adapter,
  581. u32 select, u32 addr, u32 length, u32 *buf)
  582. {
  583. int ret = -EIO;
  584. u32 dp_sel;
  585. int i;
  586. mutex_lock(&adapter->dp_lock);
  587. if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
  588. 1, 40, 100, 100))
  589. goto unlock;
  590. dp_sel = lan743x_csr_read(adapter, DP_SEL);
  591. dp_sel &= ~DP_SEL_MASK_;
  592. dp_sel |= select;
  593. lan743x_csr_write(adapter, DP_SEL, dp_sel);
  594. for (i = 0; i < length; i++) {
  595. lan743x_csr_write(adapter, DP_ADDR, addr + i);
  596. lan743x_csr_write(adapter, DP_DATA_0, buf[i]);
  597. lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_);
  598. if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
  599. 1, 40, 100, 100))
  600. goto unlock;
  601. }
  602. ret = 0;
  603. unlock:
  604. mutex_unlock(&adapter->dp_lock);
  605. return ret;
  606. }
  607. static u32 lan743x_mac_mii_access(u16 id, u16 index, int read)
  608. {
  609. u32 ret;
  610. ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) &
  611. MAC_MII_ACC_PHY_ADDR_MASK_;
  612. ret |= (index << MAC_MII_ACC_MIIRINDA_SHIFT_) &
  613. MAC_MII_ACC_MIIRINDA_MASK_;
  614. if (read)
  615. ret |= MAC_MII_ACC_MII_READ_;
  616. else
  617. ret |= MAC_MII_ACC_MII_WRITE_;
  618. ret |= MAC_MII_ACC_MII_BUSY_;
  619. return ret;
  620. }
  621. static int lan743x_mac_mii_wait_till_not_busy(struct lan743x_adapter *adapter)
  622. {
  623. u32 data;
  624. return readx_poll_timeout(LAN743X_CSR_READ_OP, MAC_MII_ACC, data,
  625. !(data & MAC_MII_ACC_MII_BUSY_), 0, 1000000);
  626. }
  627. static int lan743x_mdiobus_read(struct mii_bus *bus, int phy_id, int index)
  628. {
  629. struct lan743x_adapter *adapter = bus->priv;
  630. u32 val, mii_access;
  631. int ret;
  632. /* comfirm MII not busy */
  633. ret = lan743x_mac_mii_wait_till_not_busy(adapter);
  634. if (ret < 0)
  635. return ret;
  636. /* set the address, index & direction (read from PHY) */
  637. mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_READ);
  638. lan743x_csr_write(adapter, MAC_MII_ACC, mii_access);
  639. ret = lan743x_mac_mii_wait_till_not_busy(adapter);
  640. if (ret < 0)
  641. return ret;
  642. val = lan743x_csr_read(adapter, MAC_MII_DATA);
  643. return (int)(val & 0xFFFF);
  644. }
  645. static int lan743x_mdiobus_write(struct mii_bus *bus,
  646. int phy_id, int index, u16 regval)
  647. {
  648. struct lan743x_adapter *adapter = bus->priv;
  649. u32 val, mii_access;
  650. int ret;
  651. /* confirm MII not busy */
  652. ret = lan743x_mac_mii_wait_till_not_busy(adapter);
  653. if (ret < 0)
  654. return ret;
  655. val = (u32)regval;
  656. lan743x_csr_write(adapter, MAC_MII_DATA, val);
  657. /* set the address, index & direction (write to PHY) */
  658. mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_WRITE);
  659. lan743x_csr_write(adapter, MAC_MII_ACC, mii_access);
  660. ret = lan743x_mac_mii_wait_till_not_busy(adapter);
  661. return ret;
  662. }
  663. static void lan743x_mac_set_address(struct lan743x_adapter *adapter,
  664. u8 *addr)
  665. {
  666. u32 addr_lo, addr_hi;
  667. addr_lo = addr[0] |
  668. addr[1] << 8 |
  669. addr[2] << 16 |
  670. addr[3] << 24;
  671. addr_hi = addr[4] |
  672. addr[5] << 8;
  673. lan743x_csr_write(adapter, MAC_RX_ADDRL, addr_lo);
  674. lan743x_csr_write(adapter, MAC_RX_ADDRH, addr_hi);
  675. ether_addr_copy(adapter->mac_address, addr);
  676. netif_info(adapter, drv, adapter->netdev,
  677. "MAC address set to %pM\n", addr);
  678. }
  679. static int lan743x_mac_init(struct lan743x_adapter *adapter)
  680. {
  681. bool mac_address_valid = true;
  682. struct net_device *netdev;
  683. u32 mac_addr_hi = 0;
  684. u32 mac_addr_lo = 0;
  685. u32 data;
  686. int ret;
  687. netdev = adapter->netdev;
  688. lan743x_csr_write(adapter, MAC_CR, MAC_CR_RST_);
  689. ret = lan743x_csr_wait_for_bit(adapter, MAC_CR, MAC_CR_RST_,
  690. 0, 1000, 20000, 100);
  691. if (ret)
  692. return ret;
  693. /* setup auto duplex, and speed detection */
  694. data = lan743x_csr_read(adapter, MAC_CR);
  695. data |= MAC_CR_ADD_ | MAC_CR_ASD_;
  696. data |= MAC_CR_CNTR_RST_;
  697. lan743x_csr_write(adapter, MAC_CR, data);
  698. mac_addr_hi = lan743x_csr_read(adapter, MAC_RX_ADDRH);
  699. mac_addr_lo = lan743x_csr_read(adapter, MAC_RX_ADDRL);
  700. adapter->mac_address[0] = mac_addr_lo & 0xFF;
  701. adapter->mac_address[1] = (mac_addr_lo >> 8) & 0xFF;
  702. adapter->mac_address[2] = (mac_addr_lo >> 16) & 0xFF;
  703. adapter->mac_address[3] = (mac_addr_lo >> 24) & 0xFF;
  704. adapter->mac_address[4] = mac_addr_hi & 0xFF;
  705. adapter->mac_address[5] = (mac_addr_hi >> 8) & 0xFF;
  706. if (((mac_addr_hi & 0x0000FFFF) == 0x0000FFFF) &&
  707. mac_addr_lo == 0xFFFFFFFF) {
  708. mac_address_valid = false;
  709. } else if (!is_valid_ether_addr(adapter->mac_address)) {
  710. mac_address_valid = false;
  711. }
  712. if (!mac_address_valid)
  713. eth_random_addr(adapter->mac_address);
  714. lan743x_mac_set_address(adapter, adapter->mac_address);
  715. ether_addr_copy(netdev->dev_addr, adapter->mac_address);
  716. return 0;
  717. }
  718. static int lan743x_mac_open(struct lan743x_adapter *adapter)
  719. {
  720. int ret = 0;
  721. u32 temp;
  722. temp = lan743x_csr_read(adapter, MAC_RX);
  723. lan743x_csr_write(adapter, MAC_RX, temp | MAC_RX_RXEN_);
  724. temp = lan743x_csr_read(adapter, MAC_TX);
  725. lan743x_csr_write(adapter, MAC_TX, temp | MAC_TX_TXEN_);
  726. return ret;
  727. }
  728. static void lan743x_mac_close(struct lan743x_adapter *adapter)
  729. {
  730. u32 temp;
  731. temp = lan743x_csr_read(adapter, MAC_TX);
  732. temp &= ~MAC_TX_TXEN_;
  733. lan743x_csr_write(adapter, MAC_TX, temp);
  734. lan743x_csr_wait_for_bit(adapter, MAC_TX, MAC_TX_TXD_,
  735. 1, 1000, 20000, 100);
  736. temp = lan743x_csr_read(adapter, MAC_RX);
  737. temp &= ~MAC_RX_RXEN_;
  738. lan743x_csr_write(adapter, MAC_RX, temp);
  739. lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
  740. 1, 1000, 20000, 100);
  741. }
  742. static void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter,
  743. bool tx_enable, bool rx_enable)
  744. {
  745. u32 flow_setting = 0;
  746. /* set maximum pause time because when fifo space frees
  747. * up a zero value pause frame will be sent to release the pause
  748. */
  749. flow_setting = MAC_FLOW_CR_FCPT_MASK_;
  750. if (tx_enable)
  751. flow_setting |= MAC_FLOW_CR_TX_FCEN_;
  752. if (rx_enable)
  753. flow_setting |= MAC_FLOW_CR_RX_FCEN_;
  754. lan743x_csr_write(adapter, MAC_FLOW, flow_setting);
  755. }
  756. static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu)
  757. {
  758. int enabled = 0;
  759. u32 mac_rx = 0;
  760. mac_rx = lan743x_csr_read(adapter, MAC_RX);
  761. if (mac_rx & MAC_RX_RXEN_) {
  762. enabled = 1;
  763. if (mac_rx & MAC_RX_RXD_) {
  764. lan743x_csr_write(adapter, MAC_RX, mac_rx);
  765. mac_rx &= ~MAC_RX_RXD_;
  766. }
  767. mac_rx &= ~MAC_RX_RXEN_;
  768. lan743x_csr_write(adapter, MAC_RX, mac_rx);
  769. lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
  770. 1, 1000, 20000, 100);
  771. lan743x_csr_write(adapter, MAC_RX, mac_rx | MAC_RX_RXD_);
  772. }
  773. mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_);
  774. mac_rx |= (((new_mtu + ETH_HLEN + 4) << MAC_RX_MAX_SIZE_SHIFT_) &
  775. MAC_RX_MAX_SIZE_MASK_);
  776. lan743x_csr_write(adapter, MAC_RX, mac_rx);
  777. if (enabled) {
  778. mac_rx |= MAC_RX_RXEN_;
  779. lan743x_csr_write(adapter, MAC_RX, mac_rx);
  780. }
  781. return 0;
  782. }
  783. /* PHY */
  784. static int lan743x_phy_reset(struct lan743x_adapter *adapter)
  785. {
  786. u32 data;
  787. /* Only called with in probe, and before mdiobus_register */
  788. data = lan743x_csr_read(adapter, PMT_CTL);
  789. data |= PMT_CTL_ETH_PHY_RST_;
  790. lan743x_csr_write(adapter, PMT_CTL, data);
  791. return readx_poll_timeout(LAN743X_CSR_READ_OP, PMT_CTL, data,
  792. (!(data & PMT_CTL_ETH_PHY_RST_) &&
  793. (data & PMT_CTL_READY_)),
  794. 50000, 1000000);
  795. }
  796. static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter,
  797. u8 duplex, u16 local_adv,
  798. u16 remote_adv)
  799. {
  800. struct lan743x_phy *phy = &adapter->phy;
  801. u8 cap;
  802. if (phy->fc_autoneg)
  803. cap = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
  804. else
  805. cap = phy->fc_request_control;
  806. lan743x_mac_flow_ctrl_set_enables(adapter,
  807. cap & FLOW_CTRL_TX,
  808. cap & FLOW_CTRL_RX);
  809. }
  810. static int lan743x_phy_init(struct lan743x_adapter *adapter)
  811. {
  812. return lan743x_phy_reset(adapter);
  813. }
  814. static void lan743x_phy_link_status_change(struct net_device *netdev)
  815. {
  816. struct lan743x_adapter *adapter = netdev_priv(netdev);
  817. struct phy_device *phydev = netdev->phydev;
  818. phy_print_status(phydev);
  819. if (phydev->state == PHY_RUNNING) {
  820. struct ethtool_link_ksettings ksettings;
  821. int remote_advertisement = 0;
  822. int local_advertisement = 0;
  823. memset(&ksettings, 0, sizeof(ksettings));
  824. phy_ethtool_get_link_ksettings(netdev, &ksettings);
  825. local_advertisement = phy_read(phydev, MII_ADVERTISE);
  826. if (local_advertisement < 0)
  827. return;
  828. remote_advertisement = phy_read(phydev, MII_LPA);
  829. if (remote_advertisement < 0)
  830. return;
  831. lan743x_phy_update_flowcontrol(adapter,
  832. ksettings.base.duplex,
  833. local_advertisement,
  834. remote_advertisement);
  835. }
  836. }
  837. static void lan743x_phy_close(struct lan743x_adapter *adapter)
  838. {
  839. struct net_device *netdev = adapter->netdev;
  840. phy_stop(netdev->phydev);
  841. phy_disconnect(netdev->phydev);
  842. netdev->phydev = NULL;
  843. }
  844. static int lan743x_phy_open(struct lan743x_adapter *adapter)
  845. {
  846. struct lan743x_phy *phy = &adapter->phy;
  847. struct phy_device *phydev;
  848. struct net_device *netdev;
  849. int ret = -EIO;
  850. u32 mii_adv;
  851. netdev = adapter->netdev;
  852. phydev = phy_find_first(adapter->mdiobus);
  853. if (!phydev)
  854. goto return_error;
  855. ret = phy_connect_direct(netdev, phydev,
  856. lan743x_phy_link_status_change,
  857. PHY_INTERFACE_MODE_GMII);
  858. if (ret)
  859. goto return_error;
  860. /* MAC doesn't support 1000T Half */
  861. phydev->supported &= ~SUPPORTED_1000baseT_Half;
  862. /* support both flow controls */
  863. phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
  864. phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
  865. mii_adv = (u32)mii_advertise_flowctrl(phy->fc_request_control);
  866. phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
  867. phy->fc_autoneg = phydev->autoneg;
  868. phy_start(phydev);
  869. phy_start_aneg(phydev);
  870. return 0;
  871. return_error:
  872. return ret;
  873. }
  874. static void lan743x_rfe_update_mac_address(struct lan743x_adapter *adapter)
  875. {
  876. u8 *mac_addr;
  877. u32 mac_addr_hi = 0;
  878. u32 mac_addr_lo = 0;
  879. /* Add mac address to perfect Filter */
  880. mac_addr = adapter->mac_address;
  881. mac_addr_lo = ((((u32)(mac_addr[0])) << 0) |
  882. (((u32)(mac_addr[1])) << 8) |
  883. (((u32)(mac_addr[2])) << 16) |
  884. (((u32)(mac_addr[3])) << 24));
  885. mac_addr_hi = ((((u32)(mac_addr[4])) << 0) |
  886. (((u32)(mac_addr[5])) << 8));
  887. lan743x_csr_write(adapter, RFE_ADDR_FILT_LO(0), mac_addr_lo);
  888. lan743x_csr_write(adapter, RFE_ADDR_FILT_HI(0),
  889. mac_addr_hi | RFE_ADDR_FILT_HI_VALID_);
  890. }
  891. static void lan743x_rfe_set_multicast(struct lan743x_adapter *adapter)
  892. {
  893. struct net_device *netdev = adapter->netdev;
  894. u32 hash_table[DP_SEL_VHF_HASH_LEN];
  895. u32 rfctl;
  896. u32 data;
  897. rfctl = lan743x_csr_read(adapter, RFE_CTL);
  898. rfctl &= ~(RFE_CTL_AU_ | RFE_CTL_AM_ |
  899. RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
  900. rfctl |= RFE_CTL_AB_;
  901. if (netdev->flags & IFF_PROMISC) {
  902. rfctl |= RFE_CTL_AM_ | RFE_CTL_AU_;
  903. } else {
  904. if (netdev->flags & IFF_ALLMULTI)
  905. rfctl |= RFE_CTL_AM_;
  906. }
  907. memset(hash_table, 0, DP_SEL_VHF_HASH_LEN * sizeof(u32));
  908. if (netdev_mc_count(netdev)) {
  909. struct netdev_hw_addr *ha;
  910. int i;
  911. rfctl |= RFE_CTL_DA_PERFECT_;
  912. i = 1;
  913. netdev_for_each_mc_addr(ha, netdev) {
  914. /* set first 32 into Perfect Filter */
  915. if (i < 33) {
  916. lan743x_csr_write(adapter,
  917. RFE_ADDR_FILT_HI(i), 0);
  918. data = ha->addr[3];
  919. data = ha->addr[2] | (data << 8);
  920. data = ha->addr[1] | (data << 8);
  921. data = ha->addr[0] | (data << 8);
  922. lan743x_csr_write(adapter,
  923. RFE_ADDR_FILT_LO(i), data);
  924. data = ha->addr[5];
  925. data = ha->addr[4] | (data << 8);
  926. data |= RFE_ADDR_FILT_HI_VALID_;
  927. lan743x_csr_write(adapter,
  928. RFE_ADDR_FILT_HI(i), data);
  929. } else {
  930. u32 bitnum = (ether_crc(ETH_ALEN, ha->addr) >>
  931. 23) & 0x1FF;
  932. hash_table[bitnum / 32] |= (1 << (bitnum % 32));
  933. rfctl |= RFE_CTL_MCAST_HASH_;
  934. }
  935. i++;
  936. }
  937. }
  938. lan743x_dp_write(adapter, DP_SEL_RFE_RAM,
  939. DP_SEL_VHF_VLAN_LEN,
  940. DP_SEL_VHF_HASH_LEN, hash_table);
  941. lan743x_csr_write(adapter, RFE_CTL, rfctl);
  942. }
  943. static int lan743x_dmac_init(struct lan743x_adapter *adapter)
  944. {
  945. u32 data = 0;
  946. lan743x_csr_write(adapter, DMAC_CMD, DMAC_CMD_SWR_);
  947. lan743x_csr_wait_for_bit(adapter, DMAC_CMD, DMAC_CMD_SWR_,
  948. 0, 1000, 20000, 100);
  949. switch (DEFAULT_DMA_DESCRIPTOR_SPACING) {
  950. case DMA_DESCRIPTOR_SPACING_16:
  951. data = DMAC_CFG_MAX_DSPACE_16_;
  952. break;
  953. case DMA_DESCRIPTOR_SPACING_32:
  954. data = DMAC_CFG_MAX_DSPACE_32_;
  955. break;
  956. case DMA_DESCRIPTOR_SPACING_64:
  957. data = DMAC_CFG_MAX_DSPACE_64_;
  958. break;
  959. case DMA_DESCRIPTOR_SPACING_128:
  960. data = DMAC_CFG_MAX_DSPACE_128_;
  961. break;
  962. default:
  963. return -EPERM;
  964. }
  965. if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
  966. data |= DMAC_CFG_COAL_EN_;
  967. data |= DMAC_CFG_CH_ARB_SEL_RX_HIGH_;
  968. data |= DMAC_CFG_MAX_READ_REQ_SET_(6);
  969. lan743x_csr_write(adapter, DMAC_CFG, data);
  970. data = DMAC_COAL_CFG_TIMER_LIMIT_SET_(1);
  971. data |= DMAC_COAL_CFG_TIMER_TX_START_;
  972. data |= DMAC_COAL_CFG_FLUSH_INTS_;
  973. data |= DMAC_COAL_CFG_INT_EXIT_COAL_;
  974. data |= DMAC_COAL_CFG_CSR_EXIT_COAL_;
  975. data |= DMAC_COAL_CFG_TX_THRES_SET_(0x0A);
  976. data |= DMAC_COAL_CFG_RX_THRES_SET_(0x0C);
  977. lan743x_csr_write(adapter, DMAC_COAL_CFG, data);
  978. data = DMAC_OBFF_TX_THRES_SET_(0x08);
  979. data |= DMAC_OBFF_RX_THRES_SET_(0x0A);
  980. lan743x_csr_write(adapter, DMAC_OBFF_CFG, data);
  981. return 0;
  982. }
  983. static int lan743x_dmac_tx_get_state(struct lan743x_adapter *adapter,
  984. int tx_channel)
  985. {
  986. u32 dmac_cmd = 0;
  987. dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD);
  988. return DMAC_CHANNEL_STATE_SET((dmac_cmd &
  989. DMAC_CMD_START_T_(tx_channel)),
  990. (dmac_cmd &
  991. DMAC_CMD_STOP_T_(tx_channel)));
  992. }
  993. static int lan743x_dmac_tx_wait_till_stopped(struct lan743x_adapter *adapter,
  994. int tx_channel)
  995. {
  996. int timeout = 100;
  997. int result = 0;
  998. while (timeout &&
  999. ((result = lan743x_dmac_tx_get_state(adapter, tx_channel)) ==
  1000. DMAC_CHANNEL_STATE_STOP_PENDING)) {
  1001. usleep_range(1000, 20000);
  1002. timeout--;
  1003. }
  1004. if (result == DMAC_CHANNEL_STATE_STOP_PENDING)
  1005. result = -ENODEV;
  1006. return result;
  1007. }
  1008. static int lan743x_dmac_rx_get_state(struct lan743x_adapter *adapter,
  1009. int rx_channel)
  1010. {
  1011. u32 dmac_cmd = 0;
  1012. dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD);
  1013. return DMAC_CHANNEL_STATE_SET((dmac_cmd &
  1014. DMAC_CMD_START_R_(rx_channel)),
  1015. (dmac_cmd &
  1016. DMAC_CMD_STOP_R_(rx_channel)));
  1017. }
  1018. static int lan743x_dmac_rx_wait_till_stopped(struct lan743x_adapter *adapter,
  1019. int rx_channel)
  1020. {
  1021. int timeout = 100;
  1022. int result = 0;
  1023. while (timeout &&
  1024. ((result = lan743x_dmac_rx_get_state(adapter, rx_channel)) ==
  1025. DMAC_CHANNEL_STATE_STOP_PENDING)) {
  1026. usleep_range(1000, 20000);
  1027. timeout--;
  1028. }
  1029. if (result == DMAC_CHANNEL_STATE_STOP_PENDING)
  1030. result = -ENODEV;
  1031. return result;
  1032. }
  1033. static void lan743x_tx_release_desc(struct lan743x_tx *tx,
  1034. int descriptor_index, bool cleanup)
  1035. {
  1036. struct lan743x_tx_buffer_info *buffer_info = NULL;
  1037. struct lan743x_tx_descriptor *descriptor = NULL;
  1038. u32 descriptor_type = 0;
  1039. descriptor = &tx->ring_cpu_ptr[descriptor_index];
  1040. buffer_info = &tx->buffer_info[descriptor_index];
  1041. if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_ACTIVE))
  1042. goto done;
  1043. descriptor_type = (descriptor->data0) &
  1044. TX_DESC_DATA0_DTYPE_MASK_;
  1045. if (descriptor_type == TX_DESC_DATA0_DTYPE_DATA_)
  1046. goto clean_up_data_descriptor;
  1047. else
  1048. goto clear_active;
  1049. clean_up_data_descriptor:
  1050. if (buffer_info->dma_ptr) {
  1051. if (buffer_info->flags &
  1052. TX_BUFFER_INFO_FLAG_SKB_FRAGMENT) {
  1053. dma_unmap_page(&tx->adapter->pdev->dev,
  1054. buffer_info->dma_ptr,
  1055. buffer_info->buffer_length,
  1056. DMA_TO_DEVICE);
  1057. } else {
  1058. dma_unmap_single(&tx->adapter->pdev->dev,
  1059. buffer_info->dma_ptr,
  1060. buffer_info->buffer_length,
  1061. DMA_TO_DEVICE);
  1062. }
  1063. buffer_info->dma_ptr = 0;
  1064. buffer_info->buffer_length = 0;
  1065. }
  1066. if (buffer_info->skb) {
  1067. dev_kfree_skb(buffer_info->skb);
  1068. buffer_info->skb = NULL;
  1069. }
  1070. clear_active:
  1071. buffer_info->flags &= ~TX_BUFFER_INFO_FLAG_ACTIVE;
  1072. done:
  1073. memset(buffer_info, 0, sizeof(*buffer_info));
  1074. memset(descriptor, 0, sizeof(*descriptor));
  1075. }
  1076. static int lan743x_tx_next_index(struct lan743x_tx *tx, int index)
  1077. {
  1078. return ((++index) % tx->ring_size);
  1079. }
  1080. static void lan743x_tx_release_completed_descriptors(struct lan743x_tx *tx)
  1081. {
  1082. while ((*tx->head_cpu_ptr) != (tx->last_head)) {
  1083. lan743x_tx_release_desc(tx, tx->last_head, false);
  1084. tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
  1085. }
  1086. }
  1087. static void lan743x_tx_release_all_descriptors(struct lan743x_tx *tx)
  1088. {
  1089. u32 original_head = 0;
  1090. original_head = tx->last_head;
  1091. do {
  1092. lan743x_tx_release_desc(tx, tx->last_head, true);
  1093. tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
  1094. } while (tx->last_head != original_head);
  1095. memset(tx->ring_cpu_ptr, 0,
  1096. sizeof(*tx->ring_cpu_ptr) * (tx->ring_size));
  1097. memset(tx->buffer_info, 0,
  1098. sizeof(*tx->buffer_info) * (tx->ring_size));
  1099. }
  1100. static int lan743x_tx_get_desc_cnt(struct lan743x_tx *tx,
  1101. struct sk_buff *skb)
  1102. {
  1103. int result = 1; /* 1 for the main skb buffer */
  1104. int nr_frags = 0;
  1105. if (skb_is_gso(skb))
  1106. result++; /* requires an extension descriptor */
  1107. nr_frags = skb_shinfo(skb)->nr_frags;
  1108. result += nr_frags; /* 1 for each fragment buffer */
  1109. return result;
  1110. }
  1111. static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx)
  1112. {
  1113. int last_head = tx->last_head;
  1114. int last_tail = tx->last_tail;
  1115. if (last_tail >= last_head)
  1116. return tx->ring_size - last_tail + last_head - 1;
  1117. else
  1118. return last_head - last_tail - 1;
  1119. }
  1120. static int lan743x_tx_frame_start(struct lan743x_tx *tx,
  1121. unsigned char *first_buffer,
  1122. unsigned int first_buffer_length,
  1123. unsigned int frame_length,
  1124. bool check_sum)
  1125. {
  1126. /* called only from within lan743x_tx_xmit_frame.
  1127. * assuming tx->ring_lock has already been acquired.
  1128. */
  1129. struct lan743x_tx_descriptor *tx_descriptor = NULL;
  1130. struct lan743x_tx_buffer_info *buffer_info = NULL;
  1131. struct lan743x_adapter *adapter = tx->adapter;
  1132. struct device *dev = &adapter->pdev->dev;
  1133. dma_addr_t dma_ptr;
  1134. tx->frame_flags |= TX_FRAME_FLAG_IN_PROGRESS;
  1135. tx->frame_first = tx->last_tail;
  1136. tx->frame_tail = tx->frame_first;
  1137. tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
  1138. buffer_info = &tx->buffer_info[tx->frame_tail];
  1139. dma_ptr = dma_map_single(dev, first_buffer, first_buffer_length,
  1140. DMA_TO_DEVICE);
  1141. if (dma_mapping_error(dev, dma_ptr))
  1142. return -ENOMEM;
  1143. tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr);
  1144. tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr);
  1145. tx_descriptor->data3 = (frame_length << 16) &
  1146. TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_;
  1147. buffer_info->skb = NULL;
  1148. buffer_info->dma_ptr = dma_ptr;
  1149. buffer_info->buffer_length = first_buffer_length;
  1150. buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
  1151. tx->frame_data0 = (first_buffer_length &
  1152. TX_DESC_DATA0_BUF_LENGTH_MASK_) |
  1153. TX_DESC_DATA0_DTYPE_DATA_ |
  1154. TX_DESC_DATA0_FS_ |
  1155. TX_DESC_DATA0_FCS_;
  1156. if (check_sum)
  1157. tx->frame_data0 |= TX_DESC_DATA0_ICE_ |
  1158. TX_DESC_DATA0_IPE_ |
  1159. TX_DESC_DATA0_TPE_;
  1160. /* data0 will be programmed in one of other frame assembler functions */
  1161. return 0;
  1162. }
  1163. static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
  1164. unsigned int frame_length)
  1165. {
  1166. /* called only from within lan743x_tx_xmit_frame.
  1167. * assuming tx->ring_lock has already been acquired.
  1168. */
  1169. struct lan743x_tx_descriptor *tx_descriptor = NULL;
  1170. struct lan743x_tx_buffer_info *buffer_info = NULL;
  1171. /* wrap up previous descriptor */
  1172. tx->frame_data0 |= TX_DESC_DATA0_EXT_;
  1173. tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
  1174. tx_descriptor->data0 = tx->frame_data0;
  1175. /* move to next descriptor */
  1176. tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
  1177. tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
  1178. buffer_info = &tx->buffer_info[tx->frame_tail];
  1179. /* add extension descriptor */
  1180. tx_descriptor->data1 = 0;
  1181. tx_descriptor->data2 = 0;
  1182. tx_descriptor->data3 = 0;
  1183. buffer_info->skb = NULL;
  1184. buffer_info->dma_ptr = 0;
  1185. buffer_info->buffer_length = 0;
  1186. buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
  1187. tx->frame_data0 = (frame_length & TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_) |
  1188. TX_DESC_DATA0_DTYPE_EXT_ |
  1189. TX_DESC_DATA0_EXT_LSO_;
  1190. /* data0 will be programmed in one of other frame assembler functions */
  1191. }
  1192. static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
  1193. const struct skb_frag_struct *fragment,
  1194. unsigned int frame_length)
  1195. {
  1196. /* called only from within lan743x_tx_xmit_frame
  1197. * assuming tx->ring_lock has already been acquired
  1198. */
  1199. struct lan743x_tx_descriptor *tx_descriptor = NULL;
  1200. struct lan743x_tx_buffer_info *buffer_info = NULL;
  1201. struct lan743x_adapter *adapter = tx->adapter;
  1202. struct device *dev = &adapter->pdev->dev;
  1203. unsigned int fragment_length = 0;
  1204. dma_addr_t dma_ptr;
  1205. fragment_length = skb_frag_size(fragment);
  1206. if (!fragment_length)
  1207. return 0;
  1208. /* wrap up previous descriptor */
  1209. tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
  1210. tx_descriptor->data0 = tx->frame_data0;
  1211. /* move to next descriptor */
  1212. tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
  1213. tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
  1214. buffer_info = &tx->buffer_info[tx->frame_tail];
  1215. dma_ptr = skb_frag_dma_map(dev, fragment,
  1216. 0, fragment_length,
  1217. DMA_TO_DEVICE);
  1218. if (dma_mapping_error(dev, dma_ptr)) {
  1219. int desc_index;
  1220. /* cleanup all previously setup descriptors */
  1221. desc_index = tx->frame_first;
  1222. while (desc_index != tx->frame_tail) {
  1223. lan743x_tx_release_desc(tx, desc_index, true);
  1224. desc_index = lan743x_tx_next_index(tx, desc_index);
  1225. }
  1226. dma_wmb();
  1227. tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
  1228. tx->frame_first = 0;
  1229. tx->frame_data0 = 0;
  1230. tx->frame_tail = 0;
  1231. return -ENOMEM;
  1232. }
  1233. tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr);
  1234. tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr);
  1235. tx_descriptor->data3 = (frame_length << 16) &
  1236. TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_;
  1237. buffer_info->skb = NULL;
  1238. buffer_info->dma_ptr = dma_ptr;
  1239. buffer_info->buffer_length = fragment_length;
  1240. buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
  1241. buffer_info->flags |= TX_BUFFER_INFO_FLAG_SKB_FRAGMENT;
  1242. tx->frame_data0 = (fragment_length & TX_DESC_DATA0_BUF_LENGTH_MASK_) |
  1243. TX_DESC_DATA0_DTYPE_DATA_ |
  1244. TX_DESC_DATA0_FCS_;
  1245. /* data0 will be programmed in one of other frame assembler functions */
  1246. return 0;
  1247. }
  1248. static void lan743x_tx_frame_end(struct lan743x_tx *tx,
  1249. struct sk_buff *skb,
  1250. bool ignore_sync)
  1251. {
  1252. /* called only from within lan743x_tx_xmit_frame
  1253. * assuming tx->ring_lock has already been acquired
  1254. */
  1255. struct lan743x_tx_descriptor *tx_descriptor = NULL;
  1256. struct lan743x_tx_buffer_info *buffer_info = NULL;
  1257. struct lan743x_adapter *adapter = tx->adapter;
  1258. u32 tx_tail_flags = 0;
  1259. /* wrap up previous descriptor */
  1260. tx->frame_data0 |= TX_DESC_DATA0_LS_;
  1261. tx->frame_data0 |= TX_DESC_DATA0_IOC_;
  1262. tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
  1263. buffer_info = &tx->buffer_info[tx->frame_tail];
  1264. buffer_info->skb = skb;
  1265. if (ignore_sync)
  1266. buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
  1267. tx_descriptor->data0 = tx->frame_data0;
  1268. tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
  1269. tx->last_tail = tx->frame_tail;
  1270. dma_wmb();
  1271. if (tx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
  1272. tx_tail_flags |= TX_TAIL_SET_TOP_INT_VEC_EN_;
  1273. if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET)
  1274. tx_tail_flags |= TX_TAIL_SET_DMAC_INT_EN_ |
  1275. TX_TAIL_SET_TOP_INT_EN_;
  1276. lan743x_csr_write(adapter, TX_TAIL(tx->channel_number),
  1277. tx_tail_flags | tx->frame_tail);
  1278. tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
  1279. }
  1280. static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
  1281. struct sk_buff *skb)
  1282. {
  1283. int required_number_of_descriptors = 0;
  1284. unsigned int start_frame_length = 0;
  1285. unsigned int frame_length = 0;
  1286. unsigned int head_length = 0;
  1287. unsigned long irq_flags = 0;
  1288. bool ignore_sync = false;
  1289. int nr_frags = 0;
  1290. bool gso = false;
  1291. int j;
  1292. required_number_of_descriptors = lan743x_tx_get_desc_cnt(tx, skb);
  1293. spin_lock_irqsave(&tx->ring_lock, irq_flags);
  1294. if (required_number_of_descriptors >
  1295. lan743x_tx_get_avail_desc(tx)) {
  1296. if (required_number_of_descriptors > (tx->ring_size - 1)) {
  1297. dev_kfree_skb(skb);
  1298. } else {
  1299. /* save to overflow buffer */
  1300. tx->overflow_skb = skb;
  1301. netif_stop_queue(tx->adapter->netdev);
  1302. }
  1303. goto unlock;
  1304. }
  1305. /* space available, transmit skb */
  1306. head_length = skb_headlen(skb);
  1307. frame_length = skb_pagelen(skb);
  1308. nr_frags = skb_shinfo(skb)->nr_frags;
  1309. start_frame_length = frame_length;
  1310. gso = skb_is_gso(skb);
  1311. if (gso) {
  1312. start_frame_length = max(skb_shinfo(skb)->gso_size,
  1313. (unsigned short)8);
  1314. }
  1315. if (lan743x_tx_frame_start(tx,
  1316. skb->data, head_length,
  1317. start_frame_length,
  1318. skb->ip_summed == CHECKSUM_PARTIAL)) {
  1319. dev_kfree_skb(skb);
  1320. goto unlock;
  1321. }
  1322. if (gso)
  1323. lan743x_tx_frame_add_lso(tx, frame_length);
  1324. if (nr_frags <= 0)
  1325. goto finish;
  1326. for (j = 0; j < nr_frags; j++) {
  1327. const struct skb_frag_struct *frag;
  1328. frag = &(skb_shinfo(skb)->frags[j]);
  1329. if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) {
  1330. /* upon error no need to call
  1331. * lan743x_tx_frame_end
  1332. * frame assembler clean up was performed inside
  1333. * lan743x_tx_frame_add_fragment
  1334. */
  1335. dev_kfree_skb(skb);
  1336. goto unlock;
  1337. }
  1338. }
  1339. finish:
  1340. lan743x_tx_frame_end(tx, skb, ignore_sync);
  1341. unlock:
  1342. spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
  1343. return NETDEV_TX_OK;
  1344. }
  1345. static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
  1346. {
  1347. struct lan743x_tx *tx = container_of(napi, struct lan743x_tx, napi);
  1348. struct lan743x_adapter *adapter = tx->adapter;
  1349. bool start_transmitter = false;
  1350. unsigned long irq_flags = 0;
  1351. u32 ioc_bit = 0;
  1352. u32 int_sts = 0;
  1353. ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
  1354. int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
  1355. if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C)
  1356. lan743x_csr_write(adapter, DMAC_INT_STS, ioc_bit);
  1357. spin_lock_irqsave(&tx->ring_lock, irq_flags);
  1358. /* clean up tx ring */
  1359. lan743x_tx_release_completed_descriptors(tx);
  1360. if (netif_queue_stopped(adapter->netdev)) {
  1361. if (tx->overflow_skb) {
  1362. if (lan743x_tx_get_desc_cnt(tx, tx->overflow_skb) <=
  1363. lan743x_tx_get_avail_desc(tx))
  1364. start_transmitter = true;
  1365. } else {
  1366. netif_wake_queue(adapter->netdev);
  1367. }
  1368. }
  1369. spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
  1370. if (start_transmitter) {
  1371. /* space is now available, transmit overflow skb */
  1372. lan743x_tx_xmit_frame(tx, tx->overflow_skb);
  1373. tx->overflow_skb = NULL;
  1374. netif_wake_queue(adapter->netdev);
  1375. }
  1376. if (!napi_complete_done(napi, weight))
  1377. goto done;
  1378. /* enable isr */
  1379. lan743x_csr_write(adapter, INT_EN_SET,
  1380. INT_BIT_DMA_TX_(tx->channel_number));
  1381. lan743x_csr_read(adapter, INT_STS);
  1382. done:
  1383. return weight;
  1384. }
  1385. static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx)
  1386. {
  1387. if (tx->head_cpu_ptr) {
  1388. pci_free_consistent(tx->adapter->pdev,
  1389. sizeof(*tx->head_cpu_ptr),
  1390. (void *)(tx->head_cpu_ptr),
  1391. tx->head_dma_ptr);
  1392. tx->head_cpu_ptr = NULL;
  1393. tx->head_dma_ptr = 0;
  1394. }
  1395. kfree(tx->buffer_info);
  1396. tx->buffer_info = NULL;
  1397. if (tx->ring_cpu_ptr) {
  1398. pci_free_consistent(tx->adapter->pdev,
  1399. tx->ring_allocation_size,
  1400. tx->ring_cpu_ptr,
  1401. tx->ring_dma_ptr);
  1402. tx->ring_allocation_size = 0;
  1403. tx->ring_cpu_ptr = NULL;
  1404. tx->ring_dma_ptr = 0;
  1405. }
  1406. tx->ring_size = 0;
  1407. }
  1408. static int lan743x_tx_ring_init(struct lan743x_tx *tx)
  1409. {
  1410. size_t ring_allocation_size = 0;
  1411. void *cpu_ptr = NULL;
  1412. dma_addr_t dma_ptr;
  1413. int ret = -ENOMEM;
  1414. tx->ring_size = LAN743X_TX_RING_SIZE;
  1415. if (tx->ring_size & ~TX_CFG_B_TX_RING_LEN_MASK_) {
  1416. ret = -EINVAL;
  1417. goto cleanup;
  1418. }
  1419. ring_allocation_size = ALIGN(tx->ring_size *
  1420. sizeof(struct lan743x_tx_descriptor),
  1421. PAGE_SIZE);
  1422. dma_ptr = 0;
  1423. cpu_ptr = pci_zalloc_consistent(tx->adapter->pdev,
  1424. ring_allocation_size, &dma_ptr);
  1425. if (!cpu_ptr) {
  1426. ret = -ENOMEM;
  1427. goto cleanup;
  1428. }
  1429. tx->ring_allocation_size = ring_allocation_size;
  1430. tx->ring_cpu_ptr = (struct lan743x_tx_descriptor *)cpu_ptr;
  1431. tx->ring_dma_ptr = dma_ptr;
  1432. cpu_ptr = kcalloc(tx->ring_size, sizeof(*tx->buffer_info), GFP_KERNEL);
  1433. if (!cpu_ptr) {
  1434. ret = -ENOMEM;
  1435. goto cleanup;
  1436. }
  1437. tx->buffer_info = (struct lan743x_tx_buffer_info *)cpu_ptr;
  1438. dma_ptr = 0;
  1439. cpu_ptr = pci_zalloc_consistent(tx->adapter->pdev,
  1440. sizeof(*tx->head_cpu_ptr), &dma_ptr);
  1441. if (!cpu_ptr) {
  1442. ret = -ENOMEM;
  1443. goto cleanup;
  1444. }
  1445. tx->head_cpu_ptr = cpu_ptr;
  1446. tx->head_dma_ptr = dma_ptr;
  1447. if (tx->head_dma_ptr & 0x3) {
  1448. ret = -ENOMEM;
  1449. goto cleanup;
  1450. }
  1451. return 0;
  1452. cleanup:
  1453. lan743x_tx_ring_cleanup(tx);
  1454. return ret;
  1455. }
  1456. static void lan743x_tx_close(struct lan743x_tx *tx)
  1457. {
  1458. struct lan743x_adapter *adapter = tx->adapter;
  1459. lan743x_csr_write(adapter,
  1460. DMAC_CMD,
  1461. DMAC_CMD_STOP_T_(tx->channel_number));
  1462. lan743x_dmac_tx_wait_till_stopped(adapter, tx->channel_number);
  1463. lan743x_csr_write(adapter,
  1464. DMAC_INT_EN_CLR,
  1465. DMAC_INT_BIT_TX_IOC_(tx->channel_number));
  1466. lan743x_csr_write(adapter, INT_EN_CLR,
  1467. INT_BIT_DMA_TX_(tx->channel_number));
  1468. napi_disable(&tx->napi);
  1469. netif_napi_del(&tx->napi);
  1470. lan743x_csr_write(adapter, FCT_TX_CTL,
  1471. FCT_TX_CTL_DIS_(tx->channel_number));
  1472. lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL,
  1473. FCT_TX_CTL_EN_(tx->channel_number),
  1474. 0, 1000, 20000, 100);
  1475. lan743x_tx_release_all_descriptors(tx);
  1476. if (tx->overflow_skb) {
  1477. dev_kfree_skb(tx->overflow_skb);
  1478. tx->overflow_skb = NULL;
  1479. }
  1480. lan743x_tx_ring_cleanup(tx);
  1481. }
  1482. static int lan743x_tx_open(struct lan743x_tx *tx)
  1483. {
  1484. struct lan743x_adapter *adapter = NULL;
  1485. u32 data = 0;
  1486. int ret;
  1487. adapter = tx->adapter;
  1488. ret = lan743x_tx_ring_init(tx);
  1489. if (ret)
  1490. return ret;
  1491. /* initialize fifo */
  1492. lan743x_csr_write(adapter, FCT_TX_CTL,
  1493. FCT_TX_CTL_RESET_(tx->channel_number));
  1494. lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL,
  1495. FCT_TX_CTL_RESET_(tx->channel_number),
  1496. 0, 1000, 20000, 100);
  1497. /* enable fifo */
  1498. lan743x_csr_write(adapter, FCT_TX_CTL,
  1499. FCT_TX_CTL_EN_(tx->channel_number));
  1500. /* reset tx channel */
  1501. lan743x_csr_write(adapter, DMAC_CMD,
  1502. DMAC_CMD_TX_SWR_(tx->channel_number));
  1503. lan743x_csr_wait_for_bit(adapter, DMAC_CMD,
  1504. DMAC_CMD_TX_SWR_(tx->channel_number),
  1505. 0, 1000, 20000, 100);
  1506. /* Write TX_BASE_ADDR */
  1507. lan743x_csr_write(adapter,
  1508. TX_BASE_ADDRH(tx->channel_number),
  1509. DMA_ADDR_HIGH32(tx->ring_dma_ptr));
  1510. lan743x_csr_write(adapter,
  1511. TX_BASE_ADDRL(tx->channel_number),
  1512. DMA_ADDR_LOW32(tx->ring_dma_ptr));
  1513. /* Write TX_CFG_B */
  1514. data = lan743x_csr_read(adapter, TX_CFG_B(tx->channel_number));
  1515. data &= ~TX_CFG_B_TX_RING_LEN_MASK_;
  1516. data |= ((tx->ring_size) & TX_CFG_B_TX_RING_LEN_MASK_);
  1517. if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
  1518. data |= TX_CFG_B_TDMABL_512_;
  1519. lan743x_csr_write(adapter, TX_CFG_B(tx->channel_number), data);
  1520. /* Write TX_CFG_A */
  1521. data = TX_CFG_A_TX_TMR_HPWB_SEL_IOC_ | TX_CFG_A_TX_HP_WB_EN_;
  1522. if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
  1523. data |= TX_CFG_A_TX_HP_WB_ON_INT_TMR_;
  1524. data |= TX_CFG_A_TX_PF_THRES_SET_(0x10);
  1525. data |= TX_CFG_A_TX_PF_PRI_THRES_SET_(0x04);
  1526. data |= TX_CFG_A_TX_HP_WB_THRES_SET_(0x07);
  1527. }
  1528. lan743x_csr_write(adapter, TX_CFG_A(tx->channel_number), data);
  1529. /* Write TX_HEAD_WRITEBACK_ADDR */
  1530. lan743x_csr_write(adapter,
  1531. TX_HEAD_WRITEBACK_ADDRH(tx->channel_number),
  1532. DMA_ADDR_HIGH32(tx->head_dma_ptr));
  1533. lan743x_csr_write(adapter,
  1534. TX_HEAD_WRITEBACK_ADDRL(tx->channel_number),
  1535. DMA_ADDR_LOW32(tx->head_dma_ptr));
  1536. /* set last head */
  1537. tx->last_head = lan743x_csr_read(adapter, TX_HEAD(tx->channel_number));
  1538. /* write TX_TAIL */
  1539. tx->last_tail = 0;
  1540. lan743x_csr_write(adapter, TX_TAIL(tx->channel_number),
  1541. (u32)(tx->last_tail));
  1542. tx->vector_flags = lan743x_intr_get_vector_flags(adapter,
  1543. INT_BIT_DMA_TX_
  1544. (tx->channel_number));
  1545. netif_napi_add(adapter->netdev,
  1546. &tx->napi, lan743x_tx_napi_poll,
  1547. tx->ring_size - 1);
  1548. napi_enable(&tx->napi);
  1549. data = 0;
  1550. if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR)
  1551. data |= TX_CFG_C_TX_TOP_INT_EN_AUTO_CLR_;
  1552. if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR)
  1553. data |= TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_;
  1554. if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C)
  1555. data |= TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_;
  1556. if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)
  1557. data |= TX_CFG_C_TX_INT_EN_R2C_;
  1558. lan743x_csr_write(adapter, TX_CFG_C(tx->channel_number), data);
  1559. if (!(tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET))
  1560. lan743x_csr_write(adapter, INT_EN_SET,
  1561. INT_BIT_DMA_TX_(tx->channel_number));
  1562. lan743x_csr_write(adapter, DMAC_INT_EN_SET,
  1563. DMAC_INT_BIT_TX_IOC_(tx->channel_number));
  1564. /* start dmac channel */
  1565. lan743x_csr_write(adapter, DMAC_CMD,
  1566. DMAC_CMD_START_T_(tx->channel_number));
  1567. return 0;
  1568. }
  1569. static int lan743x_rx_next_index(struct lan743x_rx *rx, int index)
  1570. {
  1571. return ((++index) % rx->ring_size);
  1572. }
  1573. static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index)
  1574. {
  1575. struct lan743x_rx_buffer_info *buffer_info;
  1576. struct lan743x_rx_descriptor *descriptor;
  1577. int length = 0;
  1578. length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
  1579. descriptor = &rx->ring_cpu_ptr[index];
  1580. buffer_info = &rx->buffer_info[index];
  1581. buffer_info->skb = __netdev_alloc_skb(rx->adapter->netdev,
  1582. length,
  1583. GFP_ATOMIC | GFP_DMA);
  1584. if (!(buffer_info->skb))
  1585. return -ENOMEM;
  1586. buffer_info->dma_ptr = dma_map_single(&rx->adapter->pdev->dev,
  1587. buffer_info->skb->data,
  1588. length,
  1589. DMA_FROM_DEVICE);
  1590. if (dma_mapping_error(&rx->adapter->pdev->dev,
  1591. buffer_info->dma_ptr)) {
  1592. buffer_info->dma_ptr = 0;
  1593. return -ENOMEM;
  1594. }
  1595. buffer_info->buffer_length = length;
  1596. descriptor->data1 = DMA_ADDR_LOW32(buffer_info->dma_ptr);
  1597. descriptor->data2 = DMA_ADDR_HIGH32(buffer_info->dma_ptr);
  1598. descriptor->data3 = 0;
  1599. descriptor->data0 = (RX_DESC_DATA0_OWN_ |
  1600. (length & RX_DESC_DATA0_BUF_LENGTH_MASK_));
  1601. skb_reserve(buffer_info->skb, RX_HEAD_PADDING);
  1602. return 0;
  1603. }
  1604. static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index)
  1605. {
  1606. struct lan743x_rx_buffer_info *buffer_info;
  1607. struct lan743x_rx_descriptor *descriptor;
  1608. descriptor = &rx->ring_cpu_ptr[index];
  1609. buffer_info = &rx->buffer_info[index];
  1610. descriptor->data1 = DMA_ADDR_LOW32(buffer_info->dma_ptr);
  1611. descriptor->data2 = DMA_ADDR_HIGH32(buffer_info->dma_ptr);
  1612. descriptor->data3 = 0;
  1613. descriptor->data0 = (RX_DESC_DATA0_OWN_ |
  1614. ((buffer_info->buffer_length) &
  1615. RX_DESC_DATA0_BUF_LENGTH_MASK_));
  1616. }
  1617. static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index)
  1618. {
  1619. struct lan743x_rx_buffer_info *buffer_info;
  1620. struct lan743x_rx_descriptor *descriptor;
  1621. descriptor = &rx->ring_cpu_ptr[index];
  1622. buffer_info = &rx->buffer_info[index];
  1623. memset(descriptor, 0, sizeof(*descriptor));
  1624. if (buffer_info->dma_ptr) {
  1625. dma_unmap_single(&rx->adapter->pdev->dev,
  1626. buffer_info->dma_ptr,
  1627. buffer_info->buffer_length,
  1628. DMA_FROM_DEVICE);
  1629. buffer_info->dma_ptr = 0;
  1630. }
  1631. if (buffer_info->skb) {
  1632. dev_kfree_skb(buffer_info->skb);
  1633. buffer_info->skb = NULL;
  1634. }
  1635. memset(buffer_info, 0, sizeof(*buffer_info));
  1636. }
  1637. static int lan743x_rx_process_packet(struct lan743x_rx *rx)
  1638. {
  1639. struct skb_shared_hwtstamps *hwtstamps = NULL;
  1640. int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
  1641. struct lan743x_rx_buffer_info *buffer_info;
  1642. struct lan743x_rx_descriptor *descriptor;
  1643. int current_head_index = -1;
  1644. int extension_index = -1;
  1645. int first_index = -1;
  1646. int last_index = -1;
  1647. current_head_index = *rx->head_cpu_ptr;
  1648. if (current_head_index < 0 || current_head_index >= rx->ring_size)
  1649. goto done;
  1650. if (rx->last_head < 0 || rx->last_head >= rx->ring_size)
  1651. goto done;
  1652. if (rx->last_head != current_head_index) {
  1653. descriptor = &rx->ring_cpu_ptr[rx->last_head];
  1654. if (descriptor->data0 & RX_DESC_DATA0_OWN_)
  1655. goto done;
  1656. if (!(descriptor->data0 & RX_DESC_DATA0_FS_))
  1657. goto done;
  1658. first_index = rx->last_head;
  1659. if (descriptor->data0 & RX_DESC_DATA0_LS_) {
  1660. last_index = rx->last_head;
  1661. } else {
  1662. int index;
  1663. index = lan743x_rx_next_index(rx, first_index);
  1664. while (index != current_head_index) {
  1665. descriptor = &rx->ring_cpu_ptr[index];
  1666. if (descriptor->data0 & RX_DESC_DATA0_OWN_)
  1667. goto done;
  1668. if (descriptor->data0 & RX_DESC_DATA0_LS_) {
  1669. last_index = index;
  1670. break;
  1671. }
  1672. index = lan743x_rx_next_index(rx, index);
  1673. }
  1674. }
  1675. if (last_index >= 0) {
  1676. descriptor = &rx->ring_cpu_ptr[last_index];
  1677. if (descriptor->data0 & RX_DESC_DATA0_EXT_) {
  1678. /* extension is expected to follow */
  1679. int index = lan743x_rx_next_index(rx,
  1680. last_index);
  1681. if (index != current_head_index) {
  1682. descriptor = &rx->ring_cpu_ptr[index];
  1683. if (descriptor->data0 &
  1684. RX_DESC_DATA0_OWN_) {
  1685. goto done;
  1686. }
  1687. if (descriptor->data0 &
  1688. RX_DESC_DATA0_EXT_) {
  1689. extension_index = index;
  1690. } else {
  1691. goto done;
  1692. }
  1693. } else {
  1694. /* extension is not yet available */
  1695. /* prevent processing of this packet */
  1696. first_index = -1;
  1697. last_index = -1;
  1698. }
  1699. }
  1700. }
  1701. }
  1702. if (first_index >= 0 && last_index >= 0) {
  1703. int real_last_index = last_index;
  1704. struct sk_buff *skb = NULL;
  1705. u32 ts_sec = 0;
  1706. u32 ts_nsec = 0;
  1707. /* packet is available */
  1708. if (first_index == last_index) {
  1709. /* single buffer packet */
  1710. int packet_length;
  1711. buffer_info = &rx->buffer_info[first_index];
  1712. skb = buffer_info->skb;
  1713. descriptor = &rx->ring_cpu_ptr[first_index];
  1714. /* unmap from dma */
  1715. if (buffer_info->dma_ptr) {
  1716. dma_unmap_single(&rx->adapter->pdev->dev,
  1717. buffer_info->dma_ptr,
  1718. buffer_info->buffer_length,
  1719. DMA_FROM_DEVICE);
  1720. buffer_info->dma_ptr = 0;
  1721. buffer_info->buffer_length = 0;
  1722. }
  1723. buffer_info->skb = NULL;
  1724. packet_length = RX_DESC_DATA0_FRAME_LENGTH_GET_
  1725. (descriptor->data0);
  1726. skb_put(skb, packet_length - 4);
  1727. skb->protocol = eth_type_trans(skb,
  1728. rx->adapter->netdev);
  1729. lan743x_rx_allocate_ring_element(rx, first_index);
  1730. } else {
  1731. int index = first_index;
  1732. /* multi buffer packet not supported */
  1733. /* this should not happen since
  1734. * buffers are allocated to be at least jumbo size
  1735. */
  1736. /* clean up buffers */
  1737. if (first_index <= last_index) {
  1738. while ((index >= first_index) &&
  1739. (index <= last_index)) {
  1740. lan743x_rx_release_ring_element(rx,
  1741. index);
  1742. lan743x_rx_allocate_ring_element(rx,
  1743. index);
  1744. index = lan743x_rx_next_index(rx,
  1745. index);
  1746. }
  1747. } else {
  1748. while ((index >= first_index) ||
  1749. (index <= last_index)) {
  1750. lan743x_rx_release_ring_element(rx,
  1751. index);
  1752. lan743x_rx_allocate_ring_element(rx,
  1753. index);
  1754. index = lan743x_rx_next_index(rx,
  1755. index);
  1756. }
  1757. }
  1758. }
  1759. if (extension_index >= 0) {
  1760. descriptor = &rx->ring_cpu_ptr[extension_index];
  1761. buffer_info = &rx->buffer_info[extension_index];
  1762. ts_sec = descriptor->data1;
  1763. ts_nsec = (descriptor->data2 &
  1764. RX_DESC_DATA2_TS_NS_MASK_);
  1765. lan743x_rx_reuse_ring_element(rx, extension_index);
  1766. real_last_index = extension_index;
  1767. }
  1768. if (!skb) {
  1769. result = RX_PROCESS_RESULT_PACKET_DROPPED;
  1770. goto move_forward;
  1771. }
  1772. if (extension_index < 0)
  1773. goto pass_packet_to_os;
  1774. hwtstamps = skb_hwtstamps(skb);
  1775. if (hwtstamps)
  1776. hwtstamps->hwtstamp = ktime_set(ts_sec, ts_nsec);
  1777. pass_packet_to_os:
  1778. /* pass packet to OS */
  1779. napi_gro_receive(&rx->napi, skb);
  1780. result = RX_PROCESS_RESULT_PACKET_RECEIVED;
  1781. move_forward:
  1782. /* push tail and head forward */
  1783. rx->last_tail = real_last_index;
  1784. rx->last_head = lan743x_rx_next_index(rx, real_last_index);
  1785. }
  1786. done:
  1787. return result;
  1788. }
  1789. static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight)
  1790. {
  1791. struct lan743x_rx *rx = container_of(napi, struct lan743x_rx, napi);
  1792. struct lan743x_adapter *adapter = rx->adapter;
  1793. u32 rx_tail_flags = 0;
  1794. int count;
  1795. if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) {
  1796. /* clear int status bit before reading packet */
  1797. lan743x_csr_write(adapter, DMAC_INT_STS,
  1798. DMAC_INT_BIT_RXFRM_(rx->channel_number));
  1799. }
  1800. count = 0;
  1801. while (count < weight) {
  1802. int rx_process_result = -1;
  1803. rx_process_result = lan743x_rx_process_packet(rx);
  1804. if (rx_process_result == RX_PROCESS_RESULT_PACKET_RECEIVED) {
  1805. count++;
  1806. } else if (rx_process_result ==
  1807. RX_PROCESS_RESULT_NOTHING_TO_DO) {
  1808. break;
  1809. } else if (rx_process_result ==
  1810. RX_PROCESS_RESULT_PACKET_DROPPED) {
  1811. continue;
  1812. }
  1813. }
  1814. rx->frame_count += count;
  1815. if (count == weight)
  1816. goto done;
  1817. if (!napi_complete_done(napi, count))
  1818. goto done;
  1819. if (rx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
  1820. rx_tail_flags |= RX_TAIL_SET_TOP_INT_VEC_EN_;
  1821. if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) {
  1822. rx_tail_flags |= RX_TAIL_SET_TOP_INT_EN_;
  1823. } else {
  1824. lan743x_csr_write(adapter, INT_EN_SET,
  1825. INT_BIT_DMA_RX_(rx->channel_number));
  1826. }
  1827. /* update RX_TAIL */
  1828. lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
  1829. rx_tail_flags | rx->last_tail);
  1830. done:
  1831. return count;
  1832. }
  1833. static void lan743x_rx_ring_cleanup(struct lan743x_rx *rx)
  1834. {
  1835. if (rx->buffer_info && rx->ring_cpu_ptr) {
  1836. int index;
  1837. for (index = 0; index < rx->ring_size; index++)
  1838. lan743x_rx_release_ring_element(rx, index);
  1839. }
  1840. if (rx->head_cpu_ptr) {
  1841. pci_free_consistent(rx->adapter->pdev,
  1842. sizeof(*rx->head_cpu_ptr),
  1843. rx->head_cpu_ptr,
  1844. rx->head_dma_ptr);
  1845. rx->head_cpu_ptr = NULL;
  1846. rx->head_dma_ptr = 0;
  1847. }
  1848. kfree(rx->buffer_info);
  1849. rx->buffer_info = NULL;
  1850. if (rx->ring_cpu_ptr) {
  1851. pci_free_consistent(rx->adapter->pdev,
  1852. rx->ring_allocation_size,
  1853. rx->ring_cpu_ptr,
  1854. rx->ring_dma_ptr);
  1855. rx->ring_allocation_size = 0;
  1856. rx->ring_cpu_ptr = NULL;
  1857. rx->ring_dma_ptr = 0;
  1858. }
  1859. rx->ring_size = 0;
  1860. rx->last_head = 0;
  1861. }
  1862. static int lan743x_rx_ring_init(struct lan743x_rx *rx)
  1863. {
  1864. size_t ring_allocation_size = 0;
  1865. dma_addr_t dma_ptr = 0;
  1866. void *cpu_ptr = NULL;
  1867. int ret = -ENOMEM;
  1868. int index = 0;
  1869. rx->ring_size = LAN743X_RX_RING_SIZE;
  1870. if (rx->ring_size <= 1) {
  1871. ret = -EINVAL;
  1872. goto cleanup;
  1873. }
  1874. if (rx->ring_size & ~RX_CFG_B_RX_RING_LEN_MASK_) {
  1875. ret = -EINVAL;
  1876. goto cleanup;
  1877. }
  1878. ring_allocation_size = ALIGN(rx->ring_size *
  1879. sizeof(struct lan743x_rx_descriptor),
  1880. PAGE_SIZE);
  1881. dma_ptr = 0;
  1882. cpu_ptr = pci_zalloc_consistent(rx->adapter->pdev,
  1883. ring_allocation_size, &dma_ptr);
  1884. if (!cpu_ptr) {
  1885. ret = -ENOMEM;
  1886. goto cleanup;
  1887. }
  1888. rx->ring_allocation_size = ring_allocation_size;
  1889. rx->ring_cpu_ptr = (struct lan743x_rx_descriptor *)cpu_ptr;
  1890. rx->ring_dma_ptr = dma_ptr;
  1891. cpu_ptr = kcalloc(rx->ring_size, sizeof(*rx->buffer_info),
  1892. GFP_KERNEL);
  1893. if (!cpu_ptr) {
  1894. ret = -ENOMEM;
  1895. goto cleanup;
  1896. }
  1897. rx->buffer_info = (struct lan743x_rx_buffer_info *)cpu_ptr;
  1898. dma_ptr = 0;
  1899. cpu_ptr = pci_zalloc_consistent(rx->adapter->pdev,
  1900. sizeof(*rx->head_cpu_ptr), &dma_ptr);
  1901. if (!cpu_ptr) {
  1902. ret = -ENOMEM;
  1903. goto cleanup;
  1904. }
  1905. rx->head_cpu_ptr = cpu_ptr;
  1906. rx->head_dma_ptr = dma_ptr;
  1907. if (rx->head_dma_ptr & 0x3) {
  1908. ret = -ENOMEM;
  1909. goto cleanup;
  1910. }
  1911. rx->last_head = 0;
  1912. for (index = 0; index < rx->ring_size; index++) {
  1913. ret = lan743x_rx_allocate_ring_element(rx, index);
  1914. if (ret)
  1915. goto cleanup;
  1916. }
  1917. return 0;
  1918. cleanup:
  1919. lan743x_rx_ring_cleanup(rx);
  1920. return ret;
  1921. }
  1922. static void lan743x_rx_close(struct lan743x_rx *rx)
  1923. {
  1924. struct lan743x_adapter *adapter = rx->adapter;
  1925. lan743x_csr_write(adapter, FCT_RX_CTL,
  1926. FCT_RX_CTL_DIS_(rx->channel_number));
  1927. lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL,
  1928. FCT_RX_CTL_EN_(rx->channel_number),
  1929. 0, 1000, 20000, 100);
  1930. lan743x_csr_write(adapter, DMAC_CMD,
  1931. DMAC_CMD_STOP_R_(rx->channel_number));
  1932. lan743x_dmac_rx_wait_till_stopped(adapter, rx->channel_number);
  1933. lan743x_csr_write(adapter, DMAC_INT_EN_CLR,
  1934. DMAC_INT_BIT_RXFRM_(rx->channel_number));
  1935. lan743x_csr_write(adapter, INT_EN_CLR,
  1936. INT_BIT_DMA_RX_(rx->channel_number));
  1937. napi_disable(&rx->napi);
  1938. netif_napi_del(&rx->napi);
  1939. lan743x_rx_ring_cleanup(rx);
  1940. }
  1941. static int lan743x_rx_open(struct lan743x_rx *rx)
  1942. {
  1943. struct lan743x_adapter *adapter = rx->adapter;
  1944. u32 data = 0;
  1945. int ret;
  1946. rx->frame_count = 0;
  1947. ret = lan743x_rx_ring_init(rx);
  1948. if (ret)
  1949. goto return_error;
  1950. netif_napi_add(adapter->netdev,
  1951. &rx->napi, lan743x_rx_napi_poll,
  1952. rx->ring_size - 1);
  1953. lan743x_csr_write(adapter, DMAC_CMD,
  1954. DMAC_CMD_RX_SWR_(rx->channel_number));
  1955. lan743x_csr_wait_for_bit(adapter, DMAC_CMD,
  1956. DMAC_CMD_RX_SWR_(rx->channel_number),
  1957. 0, 1000, 20000, 100);
  1958. /* set ring base address */
  1959. lan743x_csr_write(adapter,
  1960. RX_BASE_ADDRH(rx->channel_number),
  1961. DMA_ADDR_HIGH32(rx->ring_dma_ptr));
  1962. lan743x_csr_write(adapter,
  1963. RX_BASE_ADDRL(rx->channel_number),
  1964. DMA_ADDR_LOW32(rx->ring_dma_ptr));
  1965. /* set rx write back address */
  1966. lan743x_csr_write(adapter,
  1967. RX_HEAD_WRITEBACK_ADDRH(rx->channel_number),
  1968. DMA_ADDR_HIGH32(rx->head_dma_ptr));
  1969. lan743x_csr_write(adapter,
  1970. RX_HEAD_WRITEBACK_ADDRL(rx->channel_number),
  1971. DMA_ADDR_LOW32(rx->head_dma_ptr));
  1972. data = RX_CFG_A_RX_HP_WB_EN_;
  1973. if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
  1974. data |= (RX_CFG_A_RX_WB_ON_INT_TMR_ |
  1975. RX_CFG_A_RX_WB_THRES_SET_(0x7) |
  1976. RX_CFG_A_RX_PF_THRES_SET_(16) |
  1977. RX_CFG_A_RX_PF_PRI_THRES_SET_(4));
  1978. }
  1979. /* set RX_CFG_A */
  1980. lan743x_csr_write(adapter,
  1981. RX_CFG_A(rx->channel_number), data);
  1982. /* set RX_CFG_B */
  1983. data = lan743x_csr_read(adapter, RX_CFG_B(rx->channel_number));
  1984. data &= ~RX_CFG_B_RX_PAD_MASK_;
  1985. if (!RX_HEAD_PADDING)
  1986. data |= RX_CFG_B_RX_PAD_0_;
  1987. else
  1988. data |= RX_CFG_B_RX_PAD_2_;
  1989. data &= ~RX_CFG_B_RX_RING_LEN_MASK_;
  1990. data |= ((rx->ring_size) & RX_CFG_B_RX_RING_LEN_MASK_);
  1991. data |= RX_CFG_B_TS_ALL_RX_;
  1992. if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
  1993. data |= RX_CFG_B_RDMABL_512_;
  1994. lan743x_csr_write(adapter, RX_CFG_B(rx->channel_number), data);
  1995. rx->vector_flags = lan743x_intr_get_vector_flags(adapter,
  1996. INT_BIT_DMA_RX_
  1997. (rx->channel_number));
  1998. /* set RX_CFG_C */
  1999. data = 0;
  2000. if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR)
  2001. data |= RX_CFG_C_RX_TOP_INT_EN_AUTO_CLR_;
  2002. if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR)
  2003. data |= RX_CFG_C_RX_DMA_INT_STS_AUTO_CLR_;
  2004. if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C)
  2005. data |= RX_CFG_C_RX_INT_STS_R2C_MODE_MASK_;
  2006. if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)
  2007. data |= RX_CFG_C_RX_INT_EN_R2C_;
  2008. lan743x_csr_write(adapter, RX_CFG_C(rx->channel_number), data);
  2009. rx->last_tail = ((u32)(rx->ring_size - 1));
  2010. lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
  2011. rx->last_tail);
  2012. rx->last_head = lan743x_csr_read(adapter, RX_HEAD(rx->channel_number));
  2013. if (rx->last_head) {
  2014. ret = -EIO;
  2015. goto napi_delete;
  2016. }
  2017. napi_enable(&rx->napi);
  2018. lan743x_csr_write(adapter, INT_EN_SET,
  2019. INT_BIT_DMA_RX_(rx->channel_number));
  2020. lan743x_csr_write(adapter, DMAC_INT_STS,
  2021. DMAC_INT_BIT_RXFRM_(rx->channel_number));
  2022. lan743x_csr_write(adapter, DMAC_INT_EN_SET,
  2023. DMAC_INT_BIT_RXFRM_(rx->channel_number));
  2024. lan743x_csr_write(adapter, DMAC_CMD,
  2025. DMAC_CMD_START_R_(rx->channel_number));
  2026. /* initialize fifo */
  2027. lan743x_csr_write(adapter, FCT_RX_CTL,
  2028. FCT_RX_CTL_RESET_(rx->channel_number));
  2029. lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL,
  2030. FCT_RX_CTL_RESET_(rx->channel_number),
  2031. 0, 1000, 20000, 100);
  2032. lan743x_csr_write(adapter, FCT_FLOW(rx->channel_number),
  2033. FCT_FLOW_CTL_REQ_EN_ |
  2034. FCT_FLOW_CTL_ON_THRESHOLD_SET_(0x2A) |
  2035. FCT_FLOW_CTL_OFF_THRESHOLD_SET_(0xA));
  2036. /* enable fifo */
  2037. lan743x_csr_write(adapter, FCT_RX_CTL,
  2038. FCT_RX_CTL_EN_(rx->channel_number));
  2039. return 0;
  2040. napi_delete:
  2041. netif_napi_del(&rx->napi);
  2042. lan743x_rx_ring_cleanup(rx);
  2043. return_error:
  2044. return ret;
  2045. }
  2046. static int lan743x_netdev_close(struct net_device *netdev)
  2047. {
  2048. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2049. int index;
  2050. lan743x_tx_close(&adapter->tx[0]);
  2051. for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++)
  2052. lan743x_rx_close(&adapter->rx[index]);
  2053. lan743x_phy_close(adapter);
  2054. lan743x_mac_close(adapter);
  2055. lan743x_intr_close(adapter);
  2056. return 0;
  2057. }
  2058. static int lan743x_netdev_open(struct net_device *netdev)
  2059. {
  2060. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2061. int index;
  2062. int ret;
  2063. ret = lan743x_intr_open(adapter);
  2064. if (ret)
  2065. goto return_error;
  2066. ret = lan743x_mac_open(adapter);
  2067. if (ret)
  2068. goto close_intr;
  2069. ret = lan743x_phy_open(adapter);
  2070. if (ret)
  2071. goto close_mac;
  2072. for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
  2073. ret = lan743x_rx_open(&adapter->rx[index]);
  2074. if (ret)
  2075. goto close_rx;
  2076. }
  2077. ret = lan743x_tx_open(&adapter->tx[0]);
  2078. if (ret)
  2079. goto close_rx;
  2080. return 0;
  2081. close_rx:
  2082. for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
  2083. if (adapter->rx[index].ring_cpu_ptr)
  2084. lan743x_rx_close(&adapter->rx[index]);
  2085. }
  2086. lan743x_phy_close(adapter);
  2087. close_mac:
  2088. lan743x_mac_close(adapter);
  2089. close_intr:
  2090. lan743x_intr_close(adapter);
  2091. return_error:
  2092. netif_warn(adapter, ifup, adapter->netdev,
  2093. "Error opening LAN743x\n");
  2094. return ret;
  2095. }
  2096. static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb,
  2097. struct net_device *netdev)
  2098. {
  2099. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2100. return lan743x_tx_xmit_frame(&adapter->tx[0], skb);
  2101. }
  2102. static int lan743x_netdev_ioctl(struct net_device *netdev,
  2103. struct ifreq *ifr, int cmd)
  2104. {
  2105. if (!netif_running(netdev))
  2106. return -EINVAL;
  2107. return phy_mii_ioctl(netdev->phydev, ifr, cmd);
  2108. }
  2109. static void lan743x_netdev_set_multicast(struct net_device *netdev)
  2110. {
  2111. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2112. lan743x_rfe_set_multicast(adapter);
  2113. }
  2114. static int lan743x_netdev_change_mtu(struct net_device *netdev, int new_mtu)
  2115. {
  2116. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2117. int ret = 0;
  2118. ret = lan743x_mac_set_mtu(adapter, new_mtu);
  2119. if (!ret)
  2120. netdev->mtu = new_mtu;
  2121. return ret;
  2122. }
  2123. static void lan743x_netdev_get_stats64(struct net_device *netdev,
  2124. struct rtnl_link_stats64 *stats)
  2125. {
  2126. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2127. stats->rx_packets = lan743x_csr_read(adapter, STAT_RX_TOTAL_FRAMES);
  2128. stats->tx_packets = lan743x_csr_read(adapter, STAT_TX_TOTAL_FRAMES);
  2129. stats->rx_bytes = lan743x_csr_read(adapter,
  2130. STAT_RX_UNICAST_BYTE_COUNT) +
  2131. lan743x_csr_read(adapter,
  2132. STAT_RX_BROADCAST_BYTE_COUNT) +
  2133. lan743x_csr_read(adapter,
  2134. STAT_RX_MULTICAST_BYTE_COUNT);
  2135. stats->tx_bytes = lan743x_csr_read(adapter,
  2136. STAT_TX_UNICAST_BYTE_COUNT) +
  2137. lan743x_csr_read(adapter,
  2138. STAT_TX_BROADCAST_BYTE_COUNT) +
  2139. lan743x_csr_read(adapter,
  2140. STAT_TX_MULTICAST_BYTE_COUNT);
  2141. stats->rx_errors = lan743x_csr_read(adapter, STAT_RX_FCS_ERRORS) +
  2142. lan743x_csr_read(adapter,
  2143. STAT_RX_ALIGNMENT_ERRORS) +
  2144. lan743x_csr_read(adapter, STAT_RX_JABBER_ERRORS) +
  2145. lan743x_csr_read(adapter,
  2146. STAT_RX_UNDERSIZE_FRAME_ERRORS) +
  2147. lan743x_csr_read(adapter,
  2148. STAT_RX_OVERSIZE_FRAME_ERRORS);
  2149. stats->tx_errors = lan743x_csr_read(adapter, STAT_TX_FCS_ERRORS) +
  2150. lan743x_csr_read(adapter,
  2151. STAT_TX_EXCESS_DEFERRAL_ERRORS) +
  2152. lan743x_csr_read(adapter, STAT_TX_CARRIER_ERRORS);
  2153. stats->rx_dropped = lan743x_csr_read(adapter,
  2154. STAT_RX_DROPPED_FRAMES);
  2155. stats->tx_dropped = lan743x_csr_read(adapter,
  2156. STAT_TX_EXCESSIVE_COLLISION);
  2157. stats->multicast = lan743x_csr_read(adapter,
  2158. STAT_RX_MULTICAST_FRAMES) +
  2159. lan743x_csr_read(adapter,
  2160. STAT_TX_MULTICAST_FRAMES);
  2161. stats->collisions = lan743x_csr_read(adapter,
  2162. STAT_TX_SINGLE_COLLISIONS) +
  2163. lan743x_csr_read(adapter,
  2164. STAT_TX_MULTIPLE_COLLISIONS) +
  2165. lan743x_csr_read(adapter,
  2166. STAT_TX_LATE_COLLISIONS);
  2167. }
  2168. static int lan743x_netdev_set_mac_address(struct net_device *netdev,
  2169. void *addr)
  2170. {
  2171. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2172. struct sockaddr *sock_addr = addr;
  2173. int ret;
  2174. ret = eth_prepare_mac_addr_change(netdev, sock_addr);
  2175. if (ret)
  2176. return ret;
  2177. ether_addr_copy(netdev->dev_addr, sock_addr->sa_data);
  2178. lan743x_mac_set_address(adapter, sock_addr->sa_data);
  2179. lan743x_rfe_update_mac_address(adapter);
  2180. return 0;
  2181. }
  2182. static const struct net_device_ops lan743x_netdev_ops = {
  2183. .ndo_open = lan743x_netdev_open,
  2184. .ndo_stop = lan743x_netdev_close,
  2185. .ndo_start_xmit = lan743x_netdev_xmit_frame,
  2186. .ndo_do_ioctl = lan743x_netdev_ioctl,
  2187. .ndo_set_rx_mode = lan743x_netdev_set_multicast,
  2188. .ndo_change_mtu = lan743x_netdev_change_mtu,
  2189. .ndo_get_stats64 = lan743x_netdev_get_stats64,
  2190. .ndo_set_mac_address = lan743x_netdev_set_mac_address,
  2191. };
  2192. static void lan743x_hardware_cleanup(struct lan743x_adapter *adapter)
  2193. {
  2194. lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
  2195. }
  2196. static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter)
  2197. {
  2198. mdiobus_unregister(adapter->mdiobus);
  2199. }
  2200. static void lan743x_full_cleanup(struct lan743x_adapter *adapter)
  2201. {
  2202. unregister_netdev(adapter->netdev);
  2203. lan743x_mdiobus_cleanup(adapter);
  2204. lan743x_hardware_cleanup(adapter);
  2205. lan743x_pci_cleanup(adapter);
  2206. }
  2207. static int lan743x_hardware_init(struct lan743x_adapter *adapter,
  2208. struct pci_dev *pdev)
  2209. {
  2210. struct lan743x_tx *tx;
  2211. int index;
  2212. int ret;
  2213. adapter->intr.irq = adapter->pdev->irq;
  2214. lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
  2215. mutex_init(&adapter->dp_lock);
  2216. ret = lan743x_mac_init(adapter);
  2217. if (ret)
  2218. return ret;
  2219. ret = lan743x_phy_init(adapter);
  2220. if (ret)
  2221. return ret;
  2222. lan743x_rfe_update_mac_address(adapter);
  2223. ret = lan743x_dmac_init(adapter);
  2224. if (ret)
  2225. return ret;
  2226. for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
  2227. adapter->rx[index].adapter = adapter;
  2228. adapter->rx[index].channel_number = index;
  2229. }
  2230. tx = &adapter->tx[0];
  2231. tx->adapter = adapter;
  2232. tx->channel_number = 0;
  2233. spin_lock_init(&tx->ring_lock);
  2234. return 0;
  2235. }
  2236. static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
  2237. {
  2238. int ret;
  2239. adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
  2240. if (!(adapter->mdiobus)) {
  2241. ret = -ENOMEM;
  2242. goto return_error;
  2243. }
  2244. adapter->mdiobus->priv = (void *)adapter;
  2245. adapter->mdiobus->read = lan743x_mdiobus_read;
  2246. adapter->mdiobus->write = lan743x_mdiobus_write;
  2247. adapter->mdiobus->name = "lan743x-mdiobus";
  2248. snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE,
  2249. "pci-%s", pci_name(adapter->pdev));
  2250. /* set to internal PHY id */
  2251. adapter->mdiobus->phy_mask = ~(u32)BIT(1);
  2252. /* register mdiobus */
  2253. ret = mdiobus_register(adapter->mdiobus);
  2254. if (ret < 0)
  2255. goto return_error;
  2256. return 0;
  2257. return_error:
  2258. return ret;
  2259. }
  2260. /* lan743x_pcidev_probe - Device Initialization Routine
  2261. * @pdev: PCI device information struct
  2262. * @id: entry in lan743x_pci_tbl
  2263. *
  2264. * Returns 0 on success, negative on failure
  2265. *
  2266. * initializes an adapter identified by a pci_dev structure.
  2267. * The OS initialization, configuring of the adapter private structure,
  2268. * and a hardware reset occur.
  2269. **/
  2270. static int lan743x_pcidev_probe(struct pci_dev *pdev,
  2271. const struct pci_device_id *id)
  2272. {
  2273. struct lan743x_adapter *adapter = NULL;
  2274. struct net_device *netdev = NULL;
  2275. int ret = -ENODEV;
  2276. netdev = devm_alloc_etherdev(&pdev->dev,
  2277. sizeof(struct lan743x_adapter));
  2278. if (!netdev)
  2279. goto return_error;
  2280. SET_NETDEV_DEV(netdev, &pdev->dev);
  2281. pci_set_drvdata(pdev, netdev);
  2282. adapter = netdev_priv(netdev);
  2283. adapter->netdev = netdev;
  2284. adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
  2285. NETIF_MSG_LINK | NETIF_MSG_IFUP |
  2286. NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
  2287. netdev->max_mtu = LAN743X_MAX_FRAME_SIZE;
  2288. ret = lan743x_pci_init(adapter, pdev);
  2289. if (ret)
  2290. goto return_error;
  2291. ret = lan743x_csr_init(adapter);
  2292. if (ret)
  2293. goto cleanup_pci;
  2294. ret = lan743x_hardware_init(adapter, pdev);
  2295. if (ret)
  2296. goto cleanup_pci;
  2297. ret = lan743x_mdiobus_init(adapter);
  2298. if (ret)
  2299. goto cleanup_hardware;
  2300. adapter->netdev->netdev_ops = &lan743x_netdev_ops;
  2301. adapter->netdev->ethtool_ops = &lan743x_ethtool_ops;
  2302. adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
  2303. adapter->netdev->hw_features = adapter->netdev->features;
  2304. /* carrier off reporting is important to ethtool even BEFORE open */
  2305. netif_carrier_off(netdev);
  2306. ret = register_netdev(adapter->netdev);
  2307. if (ret < 0)
  2308. goto cleanup_mdiobus;
  2309. return 0;
  2310. cleanup_mdiobus:
  2311. lan743x_mdiobus_cleanup(adapter);
  2312. cleanup_hardware:
  2313. lan743x_hardware_cleanup(adapter);
  2314. cleanup_pci:
  2315. lan743x_pci_cleanup(adapter);
  2316. return_error:
  2317. pr_warn("Initialization failed\n");
  2318. return ret;
  2319. }
  2320. /**
  2321. * lan743x_pcidev_remove - Device Removal Routine
  2322. * @pdev: PCI device information struct
  2323. *
  2324. * this is called by the PCI subsystem to alert the driver
  2325. * that it should release a PCI device. This could be caused by a
  2326. * Hot-Plug event, or because the driver is going to be removed from
  2327. * memory.
  2328. **/
  2329. static void lan743x_pcidev_remove(struct pci_dev *pdev)
  2330. {
  2331. struct net_device *netdev = pci_get_drvdata(pdev);
  2332. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2333. lan743x_full_cleanup(adapter);
  2334. }
  2335. static void lan743x_pcidev_shutdown(struct pci_dev *pdev)
  2336. {
  2337. struct net_device *netdev = pci_get_drvdata(pdev);
  2338. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2339. rtnl_lock();
  2340. netif_device_detach(netdev);
  2341. /* close netdev when netdev is at running state.
  2342. * For instance, it is true when system goes to sleep by pm-suspend
  2343. * However, it is false when system goes to sleep by suspend GUI menu
  2344. */
  2345. if (netif_running(netdev))
  2346. lan743x_netdev_close(netdev);
  2347. rtnl_unlock();
  2348. #ifdef CONFIG_PM
  2349. pci_save_state(pdev);
  2350. #endif
  2351. /* clean up lan743x portion */
  2352. lan743x_hardware_cleanup(adapter);
  2353. }
  2354. #ifdef CONFIG_PM
  2355. static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len)
  2356. {
  2357. return bitrev16(crc16(0xFFFF, buf, len));
  2358. }
  2359. static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
  2360. {
  2361. const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
  2362. const u8 ipv6_multicast[3] = { 0x33, 0x33 };
  2363. const u8 arp_type[2] = { 0x08, 0x06 };
  2364. int mask_index;
  2365. u32 pmtctl;
  2366. u32 wucsr;
  2367. u32 macrx;
  2368. u16 crc;
  2369. for (mask_index = 0; mask_index < MAC_NUM_OF_WUF_CFG; mask_index++)
  2370. lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 0);
  2371. /* clear wake settings */
  2372. pmtctl = lan743x_csr_read(adapter, PMT_CTL);
  2373. pmtctl |= PMT_CTL_WUPS_MASK_;
  2374. pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ |
  2375. PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ |
  2376. PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_);
  2377. macrx = lan743x_csr_read(adapter, MAC_RX);
  2378. wucsr = 0;
  2379. mask_index = 0;
  2380. pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_;
  2381. if (adapter->wolopts & WAKE_PHY) {
  2382. pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_;
  2383. pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_;
  2384. }
  2385. if (adapter->wolopts & WAKE_MAGIC) {
  2386. wucsr |= MAC_WUCSR_MPEN_;
  2387. macrx |= MAC_RX_RXEN_;
  2388. pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
  2389. }
  2390. if (adapter->wolopts & WAKE_UCAST) {
  2391. wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_PFDA_EN_;
  2392. macrx |= MAC_RX_RXEN_;
  2393. pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
  2394. pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
  2395. }
  2396. if (adapter->wolopts & WAKE_BCAST) {
  2397. wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_BCST_EN_;
  2398. macrx |= MAC_RX_RXEN_;
  2399. pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
  2400. pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
  2401. }
  2402. if (adapter->wolopts & WAKE_MCAST) {
  2403. /* IPv4 multicast */
  2404. crc = lan743x_pm_wakeframe_crc16(ipv4_multicast, 3);
  2405. lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
  2406. MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
  2407. (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
  2408. (crc & MAC_WUF_CFG_CRC16_MASK_));
  2409. lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 7);
  2410. lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
  2411. lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
  2412. lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
  2413. mask_index++;
  2414. /* IPv6 multicast */
  2415. crc = lan743x_pm_wakeframe_crc16(ipv6_multicast, 2);
  2416. lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
  2417. MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
  2418. (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
  2419. (crc & MAC_WUF_CFG_CRC16_MASK_));
  2420. lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 3);
  2421. lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
  2422. lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
  2423. lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
  2424. mask_index++;
  2425. wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
  2426. macrx |= MAC_RX_RXEN_;
  2427. pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
  2428. pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
  2429. }
  2430. if (adapter->wolopts & WAKE_ARP) {
  2431. /* set MAC_WUF_CFG & WUF_MASK
  2432. * for packettype (offset 12,13) = ARP (0x0806)
  2433. */
  2434. crc = lan743x_pm_wakeframe_crc16(arp_type, 2);
  2435. lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
  2436. MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_ALL_ |
  2437. (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
  2438. (crc & MAC_WUF_CFG_CRC16_MASK_));
  2439. lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 0x3000);
  2440. lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
  2441. lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
  2442. lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
  2443. mask_index++;
  2444. wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
  2445. macrx |= MAC_RX_RXEN_;
  2446. pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
  2447. pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
  2448. }
  2449. lan743x_csr_write(adapter, MAC_WUCSR, wucsr);
  2450. lan743x_csr_write(adapter, PMT_CTL, pmtctl);
  2451. lan743x_csr_write(adapter, MAC_RX, macrx);
  2452. }
  2453. static int lan743x_pm_suspend(struct device *dev)
  2454. {
  2455. struct pci_dev *pdev = to_pci_dev(dev);
  2456. struct net_device *netdev = pci_get_drvdata(pdev);
  2457. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2458. int ret;
  2459. lan743x_pcidev_shutdown(pdev);
  2460. /* clear all wakes */
  2461. lan743x_csr_write(adapter, MAC_WUCSR, 0);
  2462. lan743x_csr_write(adapter, MAC_WUCSR2, 0);
  2463. lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF);
  2464. if (adapter->wolopts)
  2465. lan743x_pm_set_wol(adapter);
  2466. /* Host sets PME_En, put D3hot */
  2467. ret = pci_prepare_to_sleep(pdev);
  2468. return 0;
  2469. }
  2470. static int lan743x_pm_resume(struct device *dev)
  2471. {
  2472. struct pci_dev *pdev = to_pci_dev(dev);
  2473. struct net_device *netdev = pci_get_drvdata(pdev);
  2474. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2475. int ret;
  2476. pci_set_power_state(pdev, PCI_D0);
  2477. pci_restore_state(pdev);
  2478. pci_save_state(pdev);
  2479. ret = lan743x_hardware_init(adapter, pdev);
  2480. if (ret) {
  2481. netif_err(adapter, probe, adapter->netdev,
  2482. "lan743x_hardware_init returned %d\n", ret);
  2483. }
  2484. /* open netdev when netdev is at running state while resume.
  2485. * For instance, it is true when system wakesup after pm-suspend
  2486. * However, it is false when system wakes up after suspend GUI menu
  2487. */
  2488. if (netif_running(netdev))
  2489. lan743x_netdev_open(netdev);
  2490. netif_device_attach(netdev);
  2491. return 0;
  2492. }
  2493. const struct dev_pm_ops lan743x_pm_ops = {
  2494. SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume)
  2495. };
  2496. #endif /*CONFIG_PM */
  2497. static const struct pci_device_id lan743x_pcidev_tbl[] = {
  2498. { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
  2499. { 0, }
  2500. };
  2501. static struct pci_driver lan743x_pcidev_driver = {
  2502. .name = DRIVER_NAME,
  2503. .id_table = lan743x_pcidev_tbl,
  2504. .probe = lan743x_pcidev_probe,
  2505. .remove = lan743x_pcidev_remove,
  2506. #ifdef CONFIG_PM
  2507. .driver.pm = &lan743x_pm_ops,
  2508. #endif
  2509. .shutdown = lan743x_pcidev_shutdown,
  2510. };
  2511. module_pci_driver(lan743x_pcidev_driver);
  2512. MODULE_AUTHOR(DRIVER_AUTHOR);
  2513. MODULE_DESCRIPTION(DRIVER_DESC);
  2514. MODULE_LICENSE("GPL");