lan743x_main.c 75 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781
  1. /* SPDX-License-Identifier: GPL-2.0+ */
  2. /* Copyright (C) 2018 Microchip Technology Inc. */
  3. #include <linux/module.h>
  4. #include <linux/pci.h>
  5. #include <linux/netdevice.h>
  6. #include <linux/etherdevice.h>
  7. #include <linux/crc32.h>
  8. #include <linux/microchipphy.h>
  9. #include <linux/net_tstamp.h>
  10. #include <linux/phy.h>
  11. #include <linux/rtnetlink.h>
  12. #include <linux/iopoll.h>
  13. #include "lan743x_main.h"
  14. static void lan743x_pci_cleanup(struct lan743x_adapter *adapter)
  15. {
  16. pci_release_selected_regions(adapter->pdev,
  17. pci_select_bars(adapter->pdev,
  18. IORESOURCE_MEM));
  19. pci_disable_device(adapter->pdev);
  20. }
  21. static int lan743x_pci_init(struct lan743x_adapter *adapter,
  22. struct pci_dev *pdev)
  23. {
  24. unsigned long bars = 0;
  25. int ret;
  26. adapter->pdev = pdev;
  27. ret = pci_enable_device_mem(pdev);
  28. if (ret)
  29. goto return_error;
  30. netif_info(adapter, probe, adapter->netdev,
  31. "PCI: Vendor ID = 0x%04X, Device ID = 0x%04X\n",
  32. pdev->vendor, pdev->device);
  33. bars = pci_select_bars(pdev, IORESOURCE_MEM);
  34. if (!test_bit(0, &bars))
  35. goto disable_device;
  36. ret = pci_request_selected_regions(pdev, bars, DRIVER_NAME);
  37. if (ret)
  38. goto disable_device;
  39. pci_set_master(pdev);
  40. return 0;
  41. disable_device:
  42. pci_disable_device(adapter->pdev);
  43. return_error:
  44. return ret;
  45. }
  46. u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset)
  47. {
  48. return ioread32(&adapter->csr.csr_address[offset]);
  49. }
  50. void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, u32 data)
  51. {
  52. iowrite32(data, &adapter->csr.csr_address[offset]);
  53. }
  54. #define LAN743X_CSR_READ_OP(offset) lan743x_csr_read(adapter, offset)
  55. static int lan743x_csr_light_reset(struct lan743x_adapter *adapter)
  56. {
  57. u32 data;
  58. data = lan743x_csr_read(adapter, HW_CFG);
  59. data |= HW_CFG_LRST_;
  60. lan743x_csr_write(adapter, HW_CFG, data);
  61. return readx_poll_timeout(LAN743X_CSR_READ_OP, HW_CFG, data,
  62. !(data & HW_CFG_LRST_), 100000, 10000000);
  63. }
  64. static int lan743x_csr_wait_for_bit(struct lan743x_adapter *adapter,
  65. int offset, u32 bit_mask,
  66. int target_value, int usleep_min,
  67. int usleep_max, int count)
  68. {
  69. u32 data;
  70. return readx_poll_timeout(LAN743X_CSR_READ_OP, offset, data,
  71. target_value == ((data & bit_mask) ? 1 : 0),
  72. usleep_max, usleep_min * count);
  73. }
  74. static int lan743x_csr_init(struct lan743x_adapter *adapter)
  75. {
  76. struct lan743x_csr *csr = &adapter->csr;
  77. resource_size_t bar_start, bar_length;
  78. int result;
  79. bar_start = pci_resource_start(adapter->pdev, 0);
  80. bar_length = pci_resource_len(adapter->pdev, 0);
  81. csr->csr_address = devm_ioremap(&adapter->pdev->dev,
  82. bar_start, bar_length);
  83. if (!csr->csr_address) {
  84. result = -ENOMEM;
  85. goto clean_up;
  86. }
  87. csr->id_rev = lan743x_csr_read(adapter, ID_REV);
  88. csr->fpga_rev = lan743x_csr_read(adapter, FPGA_REV);
  89. netif_info(adapter, probe, adapter->netdev,
  90. "ID_REV = 0x%08X, FPGA_REV = %d.%d\n",
  91. csr->id_rev, FPGA_REV_GET_MAJOR_(csr->fpga_rev),
  92. FPGA_REV_GET_MINOR_(csr->fpga_rev));
  93. if (!ID_REV_IS_VALID_CHIP_ID_(csr->id_rev)) {
  94. result = -ENODEV;
  95. goto clean_up;
  96. }
  97. csr->flags = LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR;
  98. switch (csr->id_rev & ID_REV_CHIP_REV_MASK_) {
  99. case ID_REV_CHIP_REV_A0_:
  100. csr->flags |= LAN743X_CSR_FLAG_IS_A0;
  101. csr->flags &= ~LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR;
  102. break;
  103. case ID_REV_CHIP_REV_B0_:
  104. csr->flags |= LAN743X_CSR_FLAG_IS_B0;
  105. break;
  106. }
  107. result = lan743x_csr_light_reset(adapter);
  108. if (result)
  109. goto clean_up;
  110. return 0;
  111. clean_up:
  112. return result;
  113. }
  114. static void lan743x_intr_software_isr(void *context)
  115. {
  116. struct lan743x_adapter *adapter = context;
  117. struct lan743x_intr *intr = &adapter->intr;
  118. u32 int_sts;
  119. int_sts = lan743x_csr_read(adapter, INT_STS);
  120. if (int_sts & INT_BIT_SW_GP_) {
  121. lan743x_csr_write(adapter, INT_STS, INT_BIT_SW_GP_);
  122. intr->software_isr_flag = 1;
  123. }
  124. }
  125. static void lan743x_tx_isr(void *context, u32 int_sts, u32 flags)
  126. {
  127. struct lan743x_tx *tx = context;
  128. struct lan743x_adapter *adapter = tx->adapter;
  129. bool enable_flag = true;
  130. u32 int_en = 0;
  131. int_en = lan743x_csr_read(adapter, INT_EN_SET);
  132. if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
  133. lan743x_csr_write(adapter, INT_EN_CLR,
  134. INT_BIT_DMA_TX_(tx->channel_number));
  135. }
  136. if (int_sts & INT_BIT_DMA_TX_(tx->channel_number)) {
  137. u32 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
  138. u32 dmac_int_sts;
  139. u32 dmac_int_en;
  140. if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
  141. dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
  142. else
  143. dmac_int_sts = ioc_bit;
  144. if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
  145. dmac_int_en = lan743x_csr_read(adapter,
  146. DMAC_INT_EN_SET);
  147. else
  148. dmac_int_en = ioc_bit;
  149. dmac_int_en &= ioc_bit;
  150. dmac_int_sts &= dmac_int_en;
  151. if (dmac_int_sts & ioc_bit) {
  152. napi_schedule(&tx->napi);
  153. enable_flag = false;/* poll func will enable later */
  154. }
  155. }
  156. if (enable_flag)
  157. /* enable isr */
  158. lan743x_csr_write(adapter, INT_EN_SET,
  159. INT_BIT_DMA_TX_(tx->channel_number));
  160. }
  161. static void lan743x_rx_isr(void *context, u32 int_sts, u32 flags)
  162. {
  163. struct lan743x_rx *rx = context;
  164. struct lan743x_adapter *adapter = rx->adapter;
  165. bool enable_flag = true;
  166. if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
  167. lan743x_csr_write(adapter, INT_EN_CLR,
  168. INT_BIT_DMA_RX_(rx->channel_number));
  169. }
  170. if (int_sts & INT_BIT_DMA_RX_(rx->channel_number)) {
  171. u32 rx_frame_bit = DMAC_INT_BIT_RXFRM_(rx->channel_number);
  172. u32 dmac_int_sts;
  173. u32 dmac_int_en;
  174. if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
  175. dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
  176. else
  177. dmac_int_sts = rx_frame_bit;
  178. if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
  179. dmac_int_en = lan743x_csr_read(adapter,
  180. DMAC_INT_EN_SET);
  181. else
  182. dmac_int_en = rx_frame_bit;
  183. dmac_int_en &= rx_frame_bit;
  184. dmac_int_sts &= dmac_int_en;
  185. if (dmac_int_sts & rx_frame_bit) {
  186. napi_schedule(&rx->napi);
  187. enable_flag = false;/* poll funct will enable later */
  188. }
  189. }
  190. if (enable_flag) {
  191. /* enable isr */
  192. lan743x_csr_write(adapter, INT_EN_SET,
  193. INT_BIT_DMA_RX_(rx->channel_number));
  194. }
  195. }
  196. static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags)
  197. {
  198. struct lan743x_adapter *adapter = context;
  199. unsigned int channel;
  200. if (int_sts & INT_BIT_ALL_RX_) {
  201. for (channel = 0; channel < LAN743X_USED_RX_CHANNELS;
  202. channel++) {
  203. u32 int_bit = INT_BIT_DMA_RX_(channel);
  204. if (int_sts & int_bit) {
  205. lan743x_rx_isr(&adapter->rx[channel],
  206. int_bit, flags);
  207. int_sts &= ~int_bit;
  208. }
  209. }
  210. }
  211. if (int_sts & INT_BIT_ALL_TX_) {
  212. for (channel = 0; channel < LAN743X_USED_TX_CHANNELS;
  213. channel++) {
  214. u32 int_bit = INT_BIT_DMA_TX_(channel);
  215. if (int_sts & int_bit) {
  216. lan743x_tx_isr(&adapter->tx[channel],
  217. int_bit, flags);
  218. int_sts &= ~int_bit;
  219. }
  220. }
  221. }
  222. if (int_sts & INT_BIT_ALL_OTHER_) {
  223. if (int_sts & INT_BIT_SW_GP_) {
  224. lan743x_intr_software_isr(adapter);
  225. int_sts &= ~INT_BIT_SW_GP_;
  226. }
  227. }
  228. if (int_sts)
  229. lan743x_csr_write(adapter, INT_EN_CLR, int_sts);
  230. }
  231. static irqreturn_t lan743x_intr_entry_isr(int irq, void *ptr)
  232. {
  233. struct lan743x_vector *vector = ptr;
  234. struct lan743x_adapter *adapter = vector->adapter;
  235. irqreturn_t result = IRQ_NONE;
  236. u32 int_enables;
  237. u32 int_sts;
  238. if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) {
  239. int_sts = lan743x_csr_read(adapter, INT_STS);
  240. } else if (vector->flags &
  241. (LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C |
  242. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)) {
  243. int_sts = lan743x_csr_read(adapter, INT_STS_R2C);
  244. } else {
  245. /* use mask as implied status */
  246. int_sts = vector->int_mask | INT_BIT_MAS_;
  247. }
  248. if (!(int_sts & INT_BIT_MAS_))
  249. goto irq_done;
  250. if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR)
  251. /* disable vector interrupt */
  252. lan743x_csr_write(adapter,
  253. INT_VEC_EN_CLR,
  254. INT_VEC_EN_(vector->vector_index));
  255. if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR)
  256. /* disable master interrupt */
  257. lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
  258. if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) {
  259. int_enables = lan743x_csr_read(adapter, INT_EN_SET);
  260. } else {
  261. /* use vector mask as implied enable mask */
  262. int_enables = vector->int_mask;
  263. }
  264. int_sts &= int_enables;
  265. int_sts &= vector->int_mask;
  266. if (int_sts) {
  267. if (vector->handler) {
  268. vector->handler(vector->context,
  269. int_sts, vector->flags);
  270. } else {
  271. /* disable interrupts on this vector */
  272. lan743x_csr_write(adapter, INT_EN_CLR,
  273. vector->int_mask);
  274. }
  275. result = IRQ_HANDLED;
  276. }
  277. if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET)
  278. /* enable master interrupt */
  279. lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_);
  280. if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET)
  281. /* enable vector interrupt */
  282. lan743x_csr_write(adapter,
  283. INT_VEC_EN_SET,
  284. INT_VEC_EN_(vector->vector_index));
  285. irq_done:
  286. return result;
  287. }
  288. static int lan743x_intr_test_isr(struct lan743x_adapter *adapter)
  289. {
  290. struct lan743x_intr *intr = &adapter->intr;
  291. int result = -ENODEV;
  292. int timeout = 10;
  293. intr->software_isr_flag = 0;
  294. /* enable interrupt */
  295. lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_SW_GP_);
  296. /* activate interrupt here */
  297. lan743x_csr_write(adapter, INT_SET, INT_BIT_SW_GP_);
  298. while ((timeout > 0) && (!(intr->software_isr_flag))) {
  299. usleep_range(1000, 20000);
  300. timeout--;
  301. }
  302. if (intr->software_isr_flag)
  303. result = 0;
  304. /* disable interrupts */
  305. lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
  306. return result;
  307. }
  308. static int lan743x_intr_register_isr(struct lan743x_adapter *adapter,
  309. int vector_index, u32 flags,
  310. u32 int_mask,
  311. lan743x_vector_handler handler,
  312. void *context)
  313. {
  314. struct lan743x_vector *vector = &adapter->intr.vector_list
  315. [vector_index];
  316. int ret;
  317. vector->adapter = adapter;
  318. vector->flags = flags;
  319. vector->vector_index = vector_index;
  320. vector->int_mask = int_mask;
  321. vector->handler = handler;
  322. vector->context = context;
  323. ret = request_irq(vector->irq,
  324. lan743x_intr_entry_isr,
  325. (flags & LAN743X_VECTOR_FLAG_IRQ_SHARED) ?
  326. IRQF_SHARED : 0, DRIVER_NAME, vector);
  327. if (ret) {
  328. vector->handler = NULL;
  329. vector->context = NULL;
  330. vector->int_mask = 0;
  331. vector->flags = 0;
  332. }
  333. return ret;
  334. }
  335. static void lan743x_intr_unregister_isr(struct lan743x_adapter *adapter,
  336. int vector_index)
  337. {
  338. struct lan743x_vector *vector = &adapter->intr.vector_list
  339. [vector_index];
  340. free_irq(vector->irq, vector);
  341. vector->handler = NULL;
  342. vector->context = NULL;
  343. vector->int_mask = 0;
  344. vector->flags = 0;
  345. }
  346. static u32 lan743x_intr_get_vector_flags(struct lan743x_adapter *adapter,
  347. u32 int_mask)
  348. {
  349. int index;
  350. for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) {
  351. if (adapter->intr.vector_list[index].int_mask & int_mask)
  352. return adapter->intr.vector_list[index].flags;
  353. }
  354. return 0;
  355. }
  356. static void lan743x_intr_close(struct lan743x_adapter *adapter)
  357. {
  358. struct lan743x_intr *intr = &adapter->intr;
  359. int index = 0;
  360. lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
  361. lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF);
  362. for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) {
  363. if (intr->flags & INTR_FLAG_IRQ_REQUESTED(index)) {
  364. lan743x_intr_unregister_isr(adapter, index);
  365. intr->flags &= ~INTR_FLAG_IRQ_REQUESTED(index);
  366. }
  367. }
  368. if (intr->flags & INTR_FLAG_MSI_ENABLED) {
  369. pci_disable_msi(adapter->pdev);
  370. intr->flags &= ~INTR_FLAG_MSI_ENABLED;
  371. }
  372. if (intr->flags & INTR_FLAG_MSIX_ENABLED) {
  373. pci_disable_msix(adapter->pdev);
  374. intr->flags &= ~INTR_FLAG_MSIX_ENABLED;
  375. }
  376. }
  377. static int lan743x_intr_open(struct lan743x_adapter *adapter)
  378. {
  379. struct msix_entry msix_entries[LAN743X_MAX_VECTOR_COUNT];
  380. struct lan743x_intr *intr = &adapter->intr;
  381. u32 int_vec_en_auto_clr = 0;
  382. u32 int_vec_map0 = 0;
  383. u32 int_vec_map1 = 0;
  384. int ret = -ENODEV;
  385. int index = 0;
  386. u32 flags = 0;
  387. intr->number_of_vectors = 0;
  388. /* Try to set up MSIX interrupts */
  389. memset(&msix_entries[0], 0,
  390. sizeof(struct msix_entry) * LAN743X_MAX_VECTOR_COUNT);
  391. for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++)
  392. msix_entries[index].entry = index;
  393. ret = pci_enable_msix_range(adapter->pdev,
  394. msix_entries, 1,
  395. 1 + LAN743X_USED_TX_CHANNELS +
  396. LAN743X_USED_RX_CHANNELS);
  397. if (ret > 0) {
  398. intr->flags |= INTR_FLAG_MSIX_ENABLED;
  399. intr->number_of_vectors = ret;
  400. intr->using_vectors = true;
  401. for (index = 0; index < intr->number_of_vectors; index++)
  402. intr->vector_list[index].irq = msix_entries
  403. [index].vector;
  404. netif_info(adapter, ifup, adapter->netdev,
  405. "using MSIX interrupts, number of vectors = %d\n",
  406. intr->number_of_vectors);
  407. }
  408. /* If MSIX failed try to setup using MSI interrupts */
  409. if (!intr->number_of_vectors) {
  410. if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
  411. if (!pci_enable_msi(adapter->pdev)) {
  412. intr->flags |= INTR_FLAG_MSI_ENABLED;
  413. intr->number_of_vectors = 1;
  414. intr->using_vectors = true;
  415. intr->vector_list[0].irq =
  416. adapter->pdev->irq;
  417. netif_info(adapter, ifup, adapter->netdev,
  418. "using MSI interrupts, number of vectors = %d\n",
  419. intr->number_of_vectors);
  420. }
  421. }
  422. }
  423. /* If MSIX, and MSI failed, setup using legacy interrupt */
  424. if (!intr->number_of_vectors) {
  425. intr->number_of_vectors = 1;
  426. intr->using_vectors = false;
  427. intr->vector_list[0].irq = intr->irq;
  428. netif_info(adapter, ifup, adapter->netdev,
  429. "using legacy interrupts\n");
  430. }
  431. /* At this point we must have at least one irq */
  432. lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0xFFFFFFFF);
  433. /* map all interrupts to vector 0 */
  434. lan743x_csr_write(adapter, INT_VEC_MAP0, 0x00000000);
  435. lan743x_csr_write(adapter, INT_VEC_MAP1, 0x00000000);
  436. lan743x_csr_write(adapter, INT_VEC_MAP2, 0x00000000);
  437. flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
  438. LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
  439. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
  440. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR;
  441. if (intr->using_vectors) {
  442. flags |= LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
  443. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
  444. } else {
  445. flags |= LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR |
  446. LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET |
  447. LAN743X_VECTOR_FLAG_IRQ_SHARED;
  448. }
  449. if (adapter->csr.flags & LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
  450. flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ;
  451. flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C;
  452. flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR;
  453. flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK;
  454. flags |= LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C;
  455. flags |= LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C;
  456. }
  457. ret = lan743x_intr_register_isr(adapter, 0, flags,
  458. INT_BIT_ALL_RX_ | INT_BIT_ALL_TX_ |
  459. INT_BIT_ALL_OTHER_,
  460. lan743x_intr_shared_isr, adapter);
  461. if (ret)
  462. goto clean_up;
  463. intr->flags |= INTR_FLAG_IRQ_REQUESTED(0);
  464. if (intr->using_vectors)
  465. lan743x_csr_write(adapter, INT_VEC_EN_SET,
  466. INT_VEC_EN_(0));
  467. if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
  468. lan743x_csr_write(adapter, INT_MOD_CFG0, LAN743X_INT_MOD);
  469. lan743x_csr_write(adapter, INT_MOD_CFG1, LAN743X_INT_MOD);
  470. lan743x_csr_write(adapter, INT_MOD_CFG2, LAN743X_INT_MOD);
  471. lan743x_csr_write(adapter, INT_MOD_CFG3, LAN743X_INT_MOD);
  472. lan743x_csr_write(adapter, INT_MOD_CFG4, LAN743X_INT_MOD);
  473. lan743x_csr_write(adapter, INT_MOD_CFG5, LAN743X_INT_MOD);
  474. lan743x_csr_write(adapter, INT_MOD_CFG6, LAN743X_INT_MOD);
  475. lan743x_csr_write(adapter, INT_MOD_CFG7, LAN743X_INT_MOD);
  476. lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432);
  477. lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001);
  478. lan743x_csr_write(adapter, INT_MOD_MAP2, 0x00FFFFFF);
  479. }
  480. /* enable interrupts */
  481. lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_);
  482. ret = lan743x_intr_test_isr(adapter);
  483. if (ret)
  484. goto clean_up;
  485. if (intr->number_of_vectors > 1) {
  486. int number_of_tx_vectors = intr->number_of_vectors - 1;
  487. if (number_of_tx_vectors > LAN743X_USED_TX_CHANNELS)
  488. number_of_tx_vectors = LAN743X_USED_TX_CHANNELS;
  489. flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
  490. LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
  491. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
  492. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR |
  493. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
  494. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
  495. if (adapter->csr.flags &
  496. LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
  497. flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
  498. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
  499. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
  500. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
  501. LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
  502. }
  503. for (index = 0; index < number_of_tx_vectors; index++) {
  504. u32 int_bit = INT_BIT_DMA_TX_(index);
  505. int vector = index + 1;
  506. /* map TX interrupt to vector */
  507. int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector);
  508. lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1);
  509. if (flags &
  510. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
  511. int_vec_en_auto_clr |= INT_VEC_EN_(vector);
  512. lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
  513. int_vec_en_auto_clr);
  514. }
  515. /* Remove TX interrupt from shared mask */
  516. intr->vector_list[0].int_mask &= ~int_bit;
  517. ret = lan743x_intr_register_isr(adapter, vector, flags,
  518. int_bit, lan743x_tx_isr,
  519. &adapter->tx[index]);
  520. if (ret)
  521. goto clean_up;
  522. intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector);
  523. if (!(flags &
  524. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET))
  525. lan743x_csr_write(adapter, INT_VEC_EN_SET,
  526. INT_VEC_EN_(vector));
  527. }
  528. }
  529. if ((intr->number_of_vectors - LAN743X_USED_TX_CHANNELS) > 1) {
  530. int number_of_rx_vectors = intr->number_of_vectors -
  531. LAN743X_USED_TX_CHANNELS - 1;
  532. if (number_of_rx_vectors > LAN743X_USED_RX_CHANNELS)
  533. number_of_rx_vectors = LAN743X_USED_RX_CHANNELS;
  534. flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
  535. LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
  536. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
  537. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR |
  538. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
  539. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
  540. if (adapter->csr.flags &
  541. LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
  542. flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
  543. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
  544. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
  545. LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
  546. LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
  547. }
  548. for (index = 0; index < number_of_rx_vectors; index++) {
  549. int vector = index + 1 + LAN743X_USED_TX_CHANNELS;
  550. u32 int_bit = INT_BIT_DMA_RX_(index);
  551. /* map RX interrupt to vector */
  552. int_vec_map0 |= INT_VEC_MAP0_RX_VEC_(index, vector);
  553. lan743x_csr_write(adapter, INT_VEC_MAP0, int_vec_map0);
  554. if (flags &
  555. LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
  556. int_vec_en_auto_clr |= INT_VEC_EN_(vector);
  557. lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
  558. int_vec_en_auto_clr);
  559. }
  560. /* Remove RX interrupt from shared mask */
  561. intr->vector_list[0].int_mask &= ~int_bit;
  562. ret = lan743x_intr_register_isr(adapter, vector, flags,
  563. int_bit, lan743x_rx_isr,
  564. &adapter->rx[index]);
  565. if (ret)
  566. goto clean_up;
  567. intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector);
  568. lan743x_csr_write(adapter, INT_VEC_EN_SET,
  569. INT_VEC_EN_(vector));
  570. }
  571. }
  572. return 0;
  573. clean_up:
  574. lan743x_intr_close(adapter);
  575. return ret;
  576. }
  577. static int lan743x_dp_write(struct lan743x_adapter *adapter,
  578. u32 select, u32 addr, u32 length, u32 *buf)
  579. {
  580. int ret = -EIO;
  581. u32 dp_sel;
  582. int i;
  583. mutex_lock(&adapter->dp_lock);
  584. if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
  585. 1, 40, 100, 100))
  586. goto unlock;
  587. dp_sel = lan743x_csr_read(adapter, DP_SEL);
  588. dp_sel &= ~DP_SEL_MASK_;
  589. dp_sel |= select;
  590. lan743x_csr_write(adapter, DP_SEL, dp_sel);
  591. for (i = 0; i < length; i++) {
  592. lan743x_csr_write(adapter, DP_ADDR, addr + i);
  593. lan743x_csr_write(adapter, DP_DATA_0, buf[i]);
  594. lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_);
  595. if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
  596. 1, 40, 100, 100))
  597. goto unlock;
  598. }
  599. ret = 0;
  600. unlock:
  601. mutex_unlock(&adapter->dp_lock);
  602. return ret;
  603. }
  604. static u32 lan743x_mac_mii_access(u16 id, u16 index, int read)
  605. {
  606. u32 ret;
  607. ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) &
  608. MAC_MII_ACC_PHY_ADDR_MASK_;
  609. ret |= (index << MAC_MII_ACC_MIIRINDA_SHIFT_) &
  610. MAC_MII_ACC_MIIRINDA_MASK_;
  611. if (read)
  612. ret |= MAC_MII_ACC_MII_READ_;
  613. else
  614. ret |= MAC_MII_ACC_MII_WRITE_;
  615. ret |= MAC_MII_ACC_MII_BUSY_;
  616. return ret;
  617. }
  618. static int lan743x_mac_mii_wait_till_not_busy(struct lan743x_adapter *adapter)
  619. {
  620. u32 data;
  621. return readx_poll_timeout(LAN743X_CSR_READ_OP, MAC_MII_ACC, data,
  622. !(data & MAC_MII_ACC_MII_BUSY_), 0, 1000000);
  623. }
  624. static int lan743x_mdiobus_read(struct mii_bus *bus, int phy_id, int index)
  625. {
  626. struct lan743x_adapter *adapter = bus->priv;
  627. u32 val, mii_access;
  628. int ret;
  629. /* comfirm MII not busy */
  630. ret = lan743x_mac_mii_wait_till_not_busy(adapter);
  631. if (ret < 0)
  632. return ret;
  633. /* set the address, index & direction (read from PHY) */
  634. mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_READ);
  635. lan743x_csr_write(adapter, MAC_MII_ACC, mii_access);
  636. ret = lan743x_mac_mii_wait_till_not_busy(adapter);
  637. if (ret < 0)
  638. return ret;
  639. val = lan743x_csr_read(adapter, MAC_MII_DATA);
  640. return (int)(val & 0xFFFF);
  641. }
  642. static int lan743x_mdiobus_write(struct mii_bus *bus,
  643. int phy_id, int index, u16 regval)
  644. {
  645. struct lan743x_adapter *adapter = bus->priv;
  646. u32 val, mii_access;
  647. int ret;
  648. /* confirm MII not busy */
  649. ret = lan743x_mac_mii_wait_till_not_busy(adapter);
  650. if (ret < 0)
  651. return ret;
  652. val = (u32)regval;
  653. lan743x_csr_write(adapter, MAC_MII_DATA, val);
  654. /* set the address, index & direction (write to PHY) */
  655. mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_WRITE);
  656. lan743x_csr_write(adapter, MAC_MII_ACC, mii_access);
  657. ret = lan743x_mac_mii_wait_till_not_busy(adapter);
  658. return ret;
  659. }
  660. static void lan743x_mac_set_address(struct lan743x_adapter *adapter,
  661. u8 *addr)
  662. {
  663. u32 addr_lo, addr_hi;
  664. addr_lo = addr[0] |
  665. addr[1] << 8 |
  666. addr[2] << 16 |
  667. addr[3] << 24;
  668. addr_hi = addr[4] |
  669. addr[5] << 8;
  670. lan743x_csr_write(adapter, MAC_RX_ADDRL, addr_lo);
  671. lan743x_csr_write(adapter, MAC_RX_ADDRH, addr_hi);
  672. ether_addr_copy(adapter->mac_address, addr);
  673. netif_info(adapter, drv, adapter->netdev,
  674. "MAC address set to %pM\n", addr);
  675. }
  676. static int lan743x_mac_init(struct lan743x_adapter *adapter)
  677. {
  678. bool mac_address_valid = true;
  679. struct net_device *netdev;
  680. u32 mac_addr_hi = 0;
  681. u32 mac_addr_lo = 0;
  682. u32 data;
  683. int ret;
  684. netdev = adapter->netdev;
  685. lan743x_csr_write(adapter, MAC_CR, MAC_CR_RST_);
  686. ret = lan743x_csr_wait_for_bit(adapter, MAC_CR, MAC_CR_RST_,
  687. 0, 1000, 20000, 100);
  688. if (ret)
  689. return ret;
  690. /* setup auto duplex, and speed detection */
  691. data = lan743x_csr_read(adapter, MAC_CR);
  692. data |= MAC_CR_ADD_ | MAC_CR_ASD_;
  693. data |= MAC_CR_CNTR_RST_;
  694. lan743x_csr_write(adapter, MAC_CR, data);
  695. mac_addr_hi = lan743x_csr_read(adapter, MAC_RX_ADDRH);
  696. mac_addr_lo = lan743x_csr_read(adapter, MAC_RX_ADDRL);
  697. adapter->mac_address[0] = mac_addr_lo & 0xFF;
  698. adapter->mac_address[1] = (mac_addr_lo >> 8) & 0xFF;
  699. adapter->mac_address[2] = (mac_addr_lo >> 16) & 0xFF;
  700. adapter->mac_address[3] = (mac_addr_lo >> 24) & 0xFF;
  701. adapter->mac_address[4] = mac_addr_hi & 0xFF;
  702. adapter->mac_address[5] = (mac_addr_hi >> 8) & 0xFF;
  703. if (((mac_addr_hi & 0x0000FFFF) == 0x0000FFFF) &&
  704. mac_addr_lo == 0xFFFFFFFF) {
  705. mac_address_valid = false;
  706. } else if (!is_valid_ether_addr(adapter->mac_address)) {
  707. mac_address_valid = false;
  708. }
  709. if (!mac_address_valid)
  710. random_ether_addr(adapter->mac_address);
  711. lan743x_mac_set_address(adapter, adapter->mac_address);
  712. ether_addr_copy(netdev->dev_addr, adapter->mac_address);
  713. return 0;
  714. }
  715. static int lan743x_mac_open(struct lan743x_adapter *adapter)
  716. {
  717. int ret = 0;
  718. u32 temp;
  719. temp = lan743x_csr_read(adapter, MAC_RX);
  720. lan743x_csr_write(adapter, MAC_RX, temp | MAC_RX_RXEN_);
  721. temp = lan743x_csr_read(adapter, MAC_TX);
  722. lan743x_csr_write(adapter, MAC_TX, temp | MAC_TX_TXEN_);
  723. return ret;
  724. }
  725. static void lan743x_mac_close(struct lan743x_adapter *adapter)
  726. {
  727. u32 temp;
  728. temp = lan743x_csr_read(adapter, MAC_TX);
  729. temp &= ~MAC_TX_TXEN_;
  730. lan743x_csr_write(adapter, MAC_TX, temp);
  731. lan743x_csr_wait_for_bit(adapter, MAC_TX, MAC_TX_TXD_,
  732. 1, 1000, 20000, 100);
  733. temp = lan743x_csr_read(adapter, MAC_RX);
  734. temp &= ~MAC_RX_RXEN_;
  735. lan743x_csr_write(adapter, MAC_RX, temp);
  736. lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
  737. 1, 1000, 20000, 100);
  738. }
  739. static void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter,
  740. bool tx_enable, bool rx_enable)
  741. {
  742. u32 flow_setting = 0;
  743. /* set maximum pause time because when fifo space frees
  744. * up a zero value pause frame will be sent to release the pause
  745. */
  746. flow_setting = MAC_FLOW_CR_FCPT_MASK_;
  747. if (tx_enable)
  748. flow_setting |= MAC_FLOW_CR_TX_FCEN_;
  749. if (rx_enable)
  750. flow_setting |= MAC_FLOW_CR_RX_FCEN_;
  751. lan743x_csr_write(adapter, MAC_FLOW, flow_setting);
  752. }
  753. static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu)
  754. {
  755. int enabled = 0;
  756. u32 mac_rx = 0;
  757. mac_rx = lan743x_csr_read(adapter, MAC_RX);
  758. if (mac_rx & MAC_RX_RXEN_) {
  759. enabled = 1;
  760. if (mac_rx & MAC_RX_RXD_) {
  761. lan743x_csr_write(adapter, MAC_RX, mac_rx);
  762. mac_rx &= ~MAC_RX_RXD_;
  763. }
  764. mac_rx &= ~MAC_RX_RXEN_;
  765. lan743x_csr_write(adapter, MAC_RX, mac_rx);
  766. lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
  767. 1, 1000, 20000, 100);
  768. lan743x_csr_write(adapter, MAC_RX, mac_rx | MAC_RX_RXD_);
  769. }
  770. mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_);
  771. mac_rx |= (((new_mtu + ETH_HLEN + 4) << MAC_RX_MAX_SIZE_SHIFT_) &
  772. MAC_RX_MAX_SIZE_MASK_);
  773. lan743x_csr_write(adapter, MAC_RX, mac_rx);
  774. if (enabled) {
  775. mac_rx |= MAC_RX_RXEN_;
  776. lan743x_csr_write(adapter, MAC_RX, mac_rx);
  777. }
  778. return 0;
  779. }
  780. /* PHY */
  781. static int lan743x_phy_reset(struct lan743x_adapter *adapter)
  782. {
  783. u32 data;
  784. /* Only called with in probe, and before mdiobus_register */
  785. data = lan743x_csr_read(adapter, PMT_CTL);
  786. data |= PMT_CTL_ETH_PHY_RST_;
  787. lan743x_csr_write(adapter, PMT_CTL, data);
  788. return readx_poll_timeout(LAN743X_CSR_READ_OP, PMT_CTL, data,
  789. (!(data & PMT_CTL_ETH_PHY_RST_) &&
  790. (data & PMT_CTL_READY_)),
  791. 50000, 1000000);
  792. }
  793. static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter,
  794. u8 duplex, u16 local_adv,
  795. u16 remote_adv)
  796. {
  797. struct lan743x_phy *phy = &adapter->phy;
  798. u8 cap;
  799. if (phy->fc_autoneg)
  800. cap = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
  801. else
  802. cap = phy->fc_request_control;
  803. lan743x_mac_flow_ctrl_set_enables(adapter,
  804. cap & FLOW_CTRL_TX,
  805. cap & FLOW_CTRL_RX);
  806. }
  807. static int lan743x_phy_init(struct lan743x_adapter *adapter)
  808. {
  809. struct net_device *netdev;
  810. int ret;
  811. netdev = adapter->netdev;
  812. ret = lan743x_phy_reset(adapter);
  813. if (ret)
  814. return ret;
  815. return 0;
  816. }
  817. static void lan743x_phy_link_status_change(struct net_device *netdev)
  818. {
  819. struct lan743x_adapter *adapter = netdev_priv(netdev);
  820. struct phy_device *phydev = netdev->phydev;
  821. phy_print_status(phydev);
  822. if (phydev->state == PHY_RUNNING) {
  823. struct ethtool_link_ksettings ksettings;
  824. struct lan743x_phy *phy = NULL;
  825. int remote_advertisement = 0;
  826. int local_advertisement = 0;
  827. phy = &adapter->phy;
  828. memset(&ksettings, 0, sizeof(ksettings));
  829. phy_ethtool_get_link_ksettings(netdev, &ksettings);
  830. local_advertisement = phy_read(phydev, MII_ADVERTISE);
  831. if (local_advertisement < 0)
  832. return;
  833. remote_advertisement = phy_read(phydev, MII_LPA);
  834. if (remote_advertisement < 0)
  835. return;
  836. lan743x_phy_update_flowcontrol(adapter,
  837. ksettings.base.duplex,
  838. local_advertisement,
  839. remote_advertisement);
  840. }
  841. }
  842. static void lan743x_phy_close(struct lan743x_adapter *adapter)
  843. {
  844. struct net_device *netdev = adapter->netdev;
  845. phy_stop(netdev->phydev);
  846. phy_disconnect(netdev->phydev);
  847. netdev->phydev = NULL;
  848. }
  849. static int lan743x_phy_open(struct lan743x_adapter *adapter)
  850. {
  851. struct lan743x_phy *phy = &adapter->phy;
  852. struct phy_device *phydev;
  853. struct net_device *netdev;
  854. int ret = -EIO;
  855. u32 mii_adv;
  856. netdev = adapter->netdev;
  857. phydev = phy_find_first(adapter->mdiobus);
  858. if (!phydev)
  859. goto return_error;
  860. ret = phy_connect_direct(netdev, phydev,
  861. lan743x_phy_link_status_change,
  862. PHY_INTERFACE_MODE_GMII);
  863. if (ret)
  864. goto return_error;
  865. /* MAC doesn't support 1000T Half */
  866. phydev->supported &= ~SUPPORTED_1000baseT_Half;
  867. /* support both flow controls */
  868. phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
  869. phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
  870. mii_adv = (u32)mii_advertise_flowctrl(phy->fc_request_control);
  871. phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
  872. phy->fc_autoneg = phydev->autoneg;
  873. phy_start(phydev);
  874. phy_start_aneg(phydev);
  875. return 0;
  876. return_error:
  877. return ret;
  878. }
  879. static void lan743x_rfe_update_mac_address(struct lan743x_adapter *adapter)
  880. {
  881. u8 *mac_addr;
  882. u32 mac_addr_hi = 0;
  883. u32 mac_addr_lo = 0;
  884. /* Add mac address to perfect Filter */
  885. mac_addr = adapter->mac_address;
  886. mac_addr_lo = ((((u32)(mac_addr[0])) << 0) |
  887. (((u32)(mac_addr[1])) << 8) |
  888. (((u32)(mac_addr[2])) << 16) |
  889. (((u32)(mac_addr[3])) << 24));
  890. mac_addr_hi = ((((u32)(mac_addr[4])) << 0) |
  891. (((u32)(mac_addr[5])) << 8));
  892. lan743x_csr_write(adapter, RFE_ADDR_FILT_LO(0), mac_addr_lo);
  893. lan743x_csr_write(adapter, RFE_ADDR_FILT_HI(0),
  894. mac_addr_hi | RFE_ADDR_FILT_HI_VALID_);
  895. }
  896. static void lan743x_rfe_set_multicast(struct lan743x_adapter *adapter)
  897. {
  898. struct net_device *netdev = adapter->netdev;
  899. u32 hash_table[DP_SEL_VHF_HASH_LEN];
  900. u32 rfctl;
  901. u32 data;
  902. rfctl = lan743x_csr_read(adapter, RFE_CTL);
  903. rfctl &= ~(RFE_CTL_AU_ | RFE_CTL_AM_ |
  904. RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
  905. rfctl |= RFE_CTL_AB_;
  906. if (netdev->flags & IFF_PROMISC) {
  907. rfctl |= RFE_CTL_AM_ | RFE_CTL_AU_;
  908. } else {
  909. if (netdev->flags & IFF_ALLMULTI)
  910. rfctl |= RFE_CTL_AM_;
  911. }
  912. memset(hash_table, 0, DP_SEL_VHF_HASH_LEN * sizeof(u32));
  913. if (netdev_mc_count(netdev)) {
  914. struct netdev_hw_addr *ha;
  915. int i;
  916. rfctl |= RFE_CTL_DA_PERFECT_;
  917. i = 1;
  918. netdev_for_each_mc_addr(ha, netdev) {
  919. /* set first 32 into Perfect Filter */
  920. if (i < 33) {
  921. lan743x_csr_write(adapter,
  922. RFE_ADDR_FILT_HI(i), 0);
  923. data = ha->addr[3];
  924. data = ha->addr[2] | (data << 8);
  925. data = ha->addr[1] | (data << 8);
  926. data = ha->addr[0] | (data << 8);
  927. lan743x_csr_write(adapter,
  928. RFE_ADDR_FILT_LO(i), data);
  929. data = ha->addr[5];
  930. data = ha->addr[4] | (data << 8);
  931. data |= RFE_ADDR_FILT_HI_VALID_;
  932. lan743x_csr_write(adapter,
  933. RFE_ADDR_FILT_HI(i), data);
  934. } else {
  935. u32 bitnum = (ether_crc(ETH_ALEN, ha->addr) >>
  936. 23) & 0x1FF;
  937. hash_table[bitnum / 32] |= (1 << (bitnum % 32));
  938. rfctl |= RFE_CTL_MCAST_HASH_;
  939. }
  940. i++;
  941. }
  942. }
  943. lan743x_dp_write(adapter, DP_SEL_RFE_RAM,
  944. DP_SEL_VHF_VLAN_LEN,
  945. DP_SEL_VHF_HASH_LEN, hash_table);
  946. lan743x_csr_write(adapter, RFE_CTL, rfctl);
  947. }
  948. static int lan743x_dmac_init(struct lan743x_adapter *adapter)
  949. {
  950. u32 data = 0;
  951. lan743x_csr_write(adapter, DMAC_CMD, DMAC_CMD_SWR_);
  952. lan743x_csr_wait_for_bit(adapter, DMAC_CMD, DMAC_CMD_SWR_,
  953. 0, 1000, 20000, 100);
  954. switch (DEFAULT_DMA_DESCRIPTOR_SPACING) {
  955. case DMA_DESCRIPTOR_SPACING_16:
  956. data = DMAC_CFG_MAX_DSPACE_16_;
  957. break;
  958. case DMA_DESCRIPTOR_SPACING_32:
  959. data = DMAC_CFG_MAX_DSPACE_32_;
  960. break;
  961. case DMA_DESCRIPTOR_SPACING_64:
  962. data = DMAC_CFG_MAX_DSPACE_64_;
  963. break;
  964. case DMA_DESCRIPTOR_SPACING_128:
  965. data = DMAC_CFG_MAX_DSPACE_128_;
  966. break;
  967. default:
  968. return -EPERM;
  969. }
  970. if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
  971. data |= DMAC_CFG_COAL_EN_;
  972. data |= DMAC_CFG_CH_ARB_SEL_RX_HIGH_;
  973. data |= DMAC_CFG_MAX_READ_REQ_SET_(6);
  974. lan743x_csr_write(adapter, DMAC_CFG, data);
  975. data = DMAC_COAL_CFG_TIMER_LIMIT_SET_(1);
  976. data |= DMAC_COAL_CFG_TIMER_TX_START_;
  977. data |= DMAC_COAL_CFG_FLUSH_INTS_;
  978. data |= DMAC_COAL_CFG_INT_EXIT_COAL_;
  979. data |= DMAC_COAL_CFG_CSR_EXIT_COAL_;
  980. data |= DMAC_COAL_CFG_TX_THRES_SET_(0x0A);
  981. data |= DMAC_COAL_CFG_RX_THRES_SET_(0x0C);
  982. lan743x_csr_write(adapter, DMAC_COAL_CFG, data);
  983. data = DMAC_OBFF_TX_THRES_SET_(0x08);
  984. data |= DMAC_OBFF_RX_THRES_SET_(0x0A);
  985. lan743x_csr_write(adapter, DMAC_OBFF_CFG, data);
  986. return 0;
  987. }
  988. static int lan743x_dmac_tx_get_state(struct lan743x_adapter *adapter,
  989. int tx_channel)
  990. {
  991. u32 dmac_cmd = 0;
  992. dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD);
  993. return DMAC_CHANNEL_STATE_SET((dmac_cmd &
  994. DMAC_CMD_START_T_(tx_channel)),
  995. (dmac_cmd &
  996. DMAC_CMD_STOP_T_(tx_channel)));
  997. }
  998. static int lan743x_dmac_tx_wait_till_stopped(struct lan743x_adapter *adapter,
  999. int tx_channel)
  1000. {
  1001. int timeout = 100;
  1002. int result = 0;
  1003. while (timeout &&
  1004. ((result = lan743x_dmac_tx_get_state(adapter, tx_channel)) ==
  1005. DMAC_CHANNEL_STATE_STOP_PENDING)) {
  1006. usleep_range(1000, 20000);
  1007. timeout--;
  1008. }
  1009. if (result == DMAC_CHANNEL_STATE_STOP_PENDING)
  1010. result = -ENODEV;
  1011. return result;
  1012. }
  1013. static int lan743x_dmac_rx_get_state(struct lan743x_adapter *adapter,
  1014. int rx_channel)
  1015. {
  1016. u32 dmac_cmd = 0;
  1017. dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD);
  1018. return DMAC_CHANNEL_STATE_SET((dmac_cmd &
  1019. DMAC_CMD_START_R_(rx_channel)),
  1020. (dmac_cmd &
  1021. DMAC_CMD_STOP_R_(rx_channel)));
  1022. }
  1023. static int lan743x_dmac_rx_wait_till_stopped(struct lan743x_adapter *adapter,
  1024. int rx_channel)
  1025. {
  1026. int timeout = 100;
  1027. int result = 0;
  1028. while (timeout &&
  1029. ((result = lan743x_dmac_rx_get_state(adapter, rx_channel)) ==
  1030. DMAC_CHANNEL_STATE_STOP_PENDING)) {
  1031. usleep_range(1000, 20000);
  1032. timeout--;
  1033. }
  1034. if (result == DMAC_CHANNEL_STATE_STOP_PENDING)
  1035. result = -ENODEV;
  1036. return result;
  1037. }
  1038. static void lan743x_tx_release_desc(struct lan743x_tx *tx,
  1039. int descriptor_index, bool cleanup)
  1040. {
  1041. struct lan743x_tx_buffer_info *buffer_info = NULL;
  1042. struct lan743x_tx_descriptor *descriptor = NULL;
  1043. u32 descriptor_type = 0;
  1044. descriptor = &tx->ring_cpu_ptr[descriptor_index];
  1045. buffer_info = &tx->buffer_info[descriptor_index];
  1046. if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_ACTIVE))
  1047. goto done;
  1048. descriptor_type = (descriptor->data0) &
  1049. TX_DESC_DATA0_DTYPE_MASK_;
  1050. if (descriptor_type == TX_DESC_DATA0_DTYPE_DATA_)
  1051. goto clean_up_data_descriptor;
  1052. else
  1053. goto clear_active;
  1054. clean_up_data_descriptor:
  1055. if (buffer_info->dma_ptr) {
  1056. if (buffer_info->flags &
  1057. TX_BUFFER_INFO_FLAG_SKB_FRAGMENT) {
  1058. dma_unmap_page(&tx->adapter->pdev->dev,
  1059. buffer_info->dma_ptr,
  1060. buffer_info->buffer_length,
  1061. DMA_TO_DEVICE);
  1062. } else {
  1063. dma_unmap_single(&tx->adapter->pdev->dev,
  1064. buffer_info->dma_ptr,
  1065. buffer_info->buffer_length,
  1066. DMA_TO_DEVICE);
  1067. }
  1068. buffer_info->dma_ptr = 0;
  1069. buffer_info->buffer_length = 0;
  1070. }
  1071. if (buffer_info->skb) {
  1072. dev_kfree_skb(buffer_info->skb);
  1073. buffer_info->skb = NULL;
  1074. }
  1075. clear_active:
  1076. buffer_info->flags &= ~TX_BUFFER_INFO_FLAG_ACTIVE;
  1077. done:
  1078. memset(buffer_info, 0, sizeof(*buffer_info));
  1079. memset(descriptor, 0, sizeof(*descriptor));
  1080. }
  1081. static int lan743x_tx_next_index(struct lan743x_tx *tx, int index)
  1082. {
  1083. return ((++index) % tx->ring_size);
  1084. }
  1085. static void lan743x_tx_release_completed_descriptors(struct lan743x_tx *tx)
  1086. {
  1087. while ((*tx->head_cpu_ptr) != (tx->last_head)) {
  1088. lan743x_tx_release_desc(tx, tx->last_head, false);
  1089. tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
  1090. }
  1091. }
  1092. static void lan743x_tx_release_all_descriptors(struct lan743x_tx *tx)
  1093. {
  1094. u32 original_head = 0;
  1095. original_head = tx->last_head;
  1096. do {
  1097. lan743x_tx_release_desc(tx, tx->last_head, true);
  1098. tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
  1099. } while (tx->last_head != original_head);
  1100. memset(tx->ring_cpu_ptr, 0,
  1101. sizeof(*tx->ring_cpu_ptr) * (tx->ring_size));
  1102. memset(tx->buffer_info, 0,
  1103. sizeof(*tx->buffer_info) * (tx->ring_size));
  1104. }
  1105. static int lan743x_tx_get_desc_cnt(struct lan743x_tx *tx,
  1106. struct sk_buff *skb)
  1107. {
  1108. int result = 1; /* 1 for the main skb buffer */
  1109. int nr_frags = 0;
  1110. if (skb_is_gso(skb))
  1111. result++; /* requires an extension descriptor */
  1112. nr_frags = skb_shinfo(skb)->nr_frags;
  1113. result += nr_frags; /* 1 for each fragment buffer */
  1114. return result;
  1115. }
  1116. static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx)
  1117. {
  1118. int last_head = tx->last_head;
  1119. int last_tail = tx->last_tail;
  1120. if (last_tail >= last_head)
  1121. return tx->ring_size - last_tail + last_head - 1;
  1122. else
  1123. return last_head - last_tail - 1;
  1124. }
  1125. static int lan743x_tx_frame_start(struct lan743x_tx *tx,
  1126. unsigned char *first_buffer,
  1127. unsigned int first_buffer_length,
  1128. unsigned int frame_length,
  1129. bool check_sum)
  1130. {
  1131. /* called only from within lan743x_tx_xmit_frame.
  1132. * assuming tx->ring_lock has already been acquired.
  1133. */
  1134. struct lan743x_tx_descriptor *tx_descriptor = NULL;
  1135. struct lan743x_tx_buffer_info *buffer_info = NULL;
  1136. struct lan743x_adapter *adapter = tx->adapter;
  1137. struct device *dev = &adapter->pdev->dev;
  1138. dma_addr_t dma_ptr;
  1139. tx->frame_flags |= TX_FRAME_FLAG_IN_PROGRESS;
  1140. tx->frame_first = tx->last_tail;
  1141. tx->frame_tail = tx->frame_first;
  1142. tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
  1143. buffer_info = &tx->buffer_info[tx->frame_tail];
  1144. dma_ptr = dma_map_single(dev, first_buffer, first_buffer_length,
  1145. DMA_TO_DEVICE);
  1146. if (dma_mapping_error(dev, dma_ptr))
  1147. return -ENOMEM;
  1148. tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr);
  1149. tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr);
  1150. tx_descriptor->data3 = (frame_length << 16) &
  1151. TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_;
  1152. buffer_info->skb = NULL;
  1153. buffer_info->dma_ptr = dma_ptr;
  1154. buffer_info->buffer_length = first_buffer_length;
  1155. buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
  1156. tx->frame_data0 = (first_buffer_length &
  1157. TX_DESC_DATA0_BUF_LENGTH_MASK_) |
  1158. TX_DESC_DATA0_DTYPE_DATA_ |
  1159. TX_DESC_DATA0_FS_ |
  1160. TX_DESC_DATA0_FCS_;
  1161. if (check_sum)
  1162. tx->frame_data0 |= TX_DESC_DATA0_ICE_ |
  1163. TX_DESC_DATA0_IPE_ |
  1164. TX_DESC_DATA0_TPE_;
  1165. /* data0 will be programmed in one of other frame assembler functions */
  1166. return 0;
  1167. }
  1168. static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
  1169. unsigned int frame_length)
  1170. {
  1171. /* called only from within lan743x_tx_xmit_frame.
  1172. * assuming tx->ring_lock has already been acquired.
  1173. */
  1174. struct lan743x_tx_descriptor *tx_descriptor = NULL;
  1175. struct lan743x_tx_buffer_info *buffer_info = NULL;
  1176. /* wrap up previous descriptor */
  1177. tx->frame_data0 |= TX_DESC_DATA0_EXT_;
  1178. tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
  1179. tx_descriptor->data0 = tx->frame_data0;
  1180. /* move to next descriptor */
  1181. tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
  1182. tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
  1183. buffer_info = &tx->buffer_info[tx->frame_tail];
  1184. /* add extension descriptor */
  1185. tx_descriptor->data1 = 0;
  1186. tx_descriptor->data2 = 0;
  1187. tx_descriptor->data3 = 0;
  1188. buffer_info->skb = NULL;
  1189. buffer_info->dma_ptr = 0;
  1190. buffer_info->buffer_length = 0;
  1191. buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
  1192. tx->frame_data0 = (frame_length & TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_) |
  1193. TX_DESC_DATA0_DTYPE_EXT_ |
  1194. TX_DESC_DATA0_EXT_LSO_;
  1195. /* data0 will be programmed in one of other frame assembler functions */
  1196. }
  1197. static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
  1198. const struct skb_frag_struct *fragment,
  1199. unsigned int frame_length)
  1200. {
  1201. /* called only from within lan743x_tx_xmit_frame
  1202. * assuming tx->ring_lock has already been acquired
  1203. */
  1204. struct lan743x_tx_descriptor *tx_descriptor = NULL;
  1205. struct lan743x_tx_buffer_info *buffer_info = NULL;
  1206. struct lan743x_adapter *adapter = tx->adapter;
  1207. struct device *dev = &adapter->pdev->dev;
  1208. unsigned int fragment_length = 0;
  1209. dma_addr_t dma_ptr;
  1210. fragment_length = skb_frag_size(fragment);
  1211. if (!fragment_length)
  1212. return 0;
  1213. /* wrap up previous descriptor */
  1214. tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
  1215. tx_descriptor->data0 = tx->frame_data0;
  1216. /* move to next descriptor */
  1217. tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
  1218. tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
  1219. buffer_info = &tx->buffer_info[tx->frame_tail];
  1220. dma_ptr = skb_frag_dma_map(dev, fragment,
  1221. 0, fragment_length,
  1222. DMA_TO_DEVICE);
  1223. if (dma_mapping_error(dev, dma_ptr)) {
  1224. int desc_index;
  1225. /* cleanup all previously setup descriptors */
  1226. desc_index = tx->frame_first;
  1227. while (desc_index != tx->frame_tail) {
  1228. lan743x_tx_release_desc(tx, desc_index, true);
  1229. desc_index = lan743x_tx_next_index(tx, desc_index);
  1230. }
  1231. dma_wmb();
  1232. tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
  1233. tx->frame_first = 0;
  1234. tx->frame_data0 = 0;
  1235. tx->frame_tail = 0;
  1236. return -ENOMEM;
  1237. }
  1238. tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr);
  1239. tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr);
  1240. tx_descriptor->data3 = (frame_length << 16) &
  1241. TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_;
  1242. buffer_info->skb = NULL;
  1243. buffer_info->dma_ptr = dma_ptr;
  1244. buffer_info->buffer_length = fragment_length;
  1245. buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
  1246. buffer_info->flags |= TX_BUFFER_INFO_FLAG_SKB_FRAGMENT;
  1247. tx->frame_data0 = (fragment_length & TX_DESC_DATA0_BUF_LENGTH_MASK_) |
  1248. TX_DESC_DATA0_DTYPE_DATA_ |
  1249. TX_DESC_DATA0_FCS_;
  1250. /* data0 will be programmed in one of other frame assembler functions */
  1251. return 0;
  1252. }
  1253. static void lan743x_tx_frame_end(struct lan743x_tx *tx,
  1254. struct sk_buff *skb,
  1255. bool ignore_sync)
  1256. {
  1257. /* called only from within lan743x_tx_xmit_frame
  1258. * assuming tx->ring_lock has already been acquired
  1259. */
  1260. struct lan743x_tx_descriptor *tx_descriptor = NULL;
  1261. struct lan743x_tx_buffer_info *buffer_info = NULL;
  1262. struct lan743x_adapter *adapter = tx->adapter;
  1263. u32 tx_tail_flags = 0;
  1264. /* wrap up previous descriptor */
  1265. tx->frame_data0 |= TX_DESC_DATA0_LS_;
  1266. tx->frame_data0 |= TX_DESC_DATA0_IOC_;
  1267. tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
  1268. buffer_info = &tx->buffer_info[tx->frame_tail];
  1269. buffer_info->skb = skb;
  1270. if (ignore_sync)
  1271. buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
  1272. tx_descriptor->data0 = tx->frame_data0;
  1273. tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
  1274. tx->last_tail = tx->frame_tail;
  1275. dma_wmb();
  1276. if (tx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
  1277. tx_tail_flags |= TX_TAIL_SET_TOP_INT_VEC_EN_;
  1278. if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET)
  1279. tx_tail_flags |= TX_TAIL_SET_DMAC_INT_EN_ |
  1280. TX_TAIL_SET_TOP_INT_EN_;
  1281. lan743x_csr_write(adapter, TX_TAIL(tx->channel_number),
  1282. tx_tail_flags | tx->frame_tail);
  1283. tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
  1284. }
  1285. static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
  1286. struct sk_buff *skb)
  1287. {
  1288. int required_number_of_descriptors = 0;
  1289. unsigned int start_frame_length = 0;
  1290. unsigned int frame_length = 0;
  1291. unsigned int head_length = 0;
  1292. unsigned long irq_flags = 0;
  1293. bool ignore_sync = false;
  1294. int nr_frags = 0;
  1295. bool gso = false;
  1296. int j;
  1297. required_number_of_descriptors = lan743x_tx_get_desc_cnt(tx, skb);
  1298. spin_lock_irqsave(&tx->ring_lock, irq_flags);
  1299. if (required_number_of_descriptors >
  1300. lan743x_tx_get_avail_desc(tx)) {
  1301. if (required_number_of_descriptors > (tx->ring_size - 1)) {
  1302. dev_kfree_skb(skb);
  1303. } else {
  1304. /* save to overflow buffer */
  1305. tx->overflow_skb = skb;
  1306. netif_stop_queue(tx->adapter->netdev);
  1307. }
  1308. goto unlock;
  1309. }
  1310. /* space available, transmit skb */
  1311. head_length = skb_headlen(skb);
  1312. frame_length = skb_pagelen(skb);
  1313. nr_frags = skb_shinfo(skb)->nr_frags;
  1314. start_frame_length = frame_length;
  1315. gso = skb_is_gso(skb);
  1316. if (gso) {
  1317. start_frame_length = max(skb_shinfo(skb)->gso_size,
  1318. (unsigned short)8);
  1319. }
  1320. if (lan743x_tx_frame_start(tx,
  1321. skb->data, head_length,
  1322. start_frame_length,
  1323. skb->ip_summed == CHECKSUM_PARTIAL)) {
  1324. dev_kfree_skb(skb);
  1325. goto unlock;
  1326. }
  1327. if (gso)
  1328. lan743x_tx_frame_add_lso(tx, frame_length);
  1329. if (nr_frags <= 0)
  1330. goto finish;
  1331. for (j = 0; j < nr_frags; j++) {
  1332. const struct skb_frag_struct *frag;
  1333. frag = &(skb_shinfo(skb)->frags[j]);
  1334. if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) {
  1335. /* upon error no need to call
  1336. * lan743x_tx_frame_end
  1337. * frame assembler clean up was performed inside
  1338. * lan743x_tx_frame_add_fragment
  1339. */
  1340. dev_kfree_skb(skb);
  1341. goto unlock;
  1342. }
  1343. }
  1344. finish:
  1345. lan743x_tx_frame_end(tx, skb, ignore_sync);
  1346. unlock:
  1347. spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
  1348. return NETDEV_TX_OK;
  1349. }
  1350. static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
  1351. {
  1352. struct lan743x_tx *tx = container_of(napi, struct lan743x_tx, napi);
  1353. struct lan743x_adapter *adapter = tx->adapter;
  1354. bool start_transmitter = false;
  1355. unsigned long irq_flags = 0;
  1356. u32 ioc_bit = 0;
  1357. u32 int_sts = 0;
  1358. adapter = tx->adapter;
  1359. ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
  1360. int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
  1361. if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C)
  1362. lan743x_csr_write(adapter, DMAC_INT_STS, ioc_bit);
  1363. spin_lock_irqsave(&tx->ring_lock, irq_flags);
  1364. /* clean up tx ring */
  1365. lan743x_tx_release_completed_descriptors(tx);
  1366. if (netif_queue_stopped(adapter->netdev)) {
  1367. if (tx->overflow_skb) {
  1368. if (lan743x_tx_get_desc_cnt(tx, tx->overflow_skb) <=
  1369. lan743x_tx_get_avail_desc(tx))
  1370. start_transmitter = true;
  1371. } else {
  1372. netif_wake_queue(adapter->netdev);
  1373. }
  1374. }
  1375. spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
  1376. if (start_transmitter) {
  1377. /* space is now available, transmit overflow skb */
  1378. lan743x_tx_xmit_frame(tx, tx->overflow_skb);
  1379. tx->overflow_skb = NULL;
  1380. netif_wake_queue(adapter->netdev);
  1381. }
  1382. if (!napi_complete_done(napi, weight))
  1383. goto done;
  1384. /* enable isr */
  1385. lan743x_csr_write(adapter, INT_EN_SET,
  1386. INT_BIT_DMA_TX_(tx->channel_number));
  1387. lan743x_csr_read(adapter, INT_STS);
  1388. done:
  1389. return weight;
  1390. }
  1391. static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx)
  1392. {
  1393. if (tx->head_cpu_ptr) {
  1394. pci_free_consistent(tx->adapter->pdev,
  1395. sizeof(*tx->head_cpu_ptr),
  1396. (void *)(tx->head_cpu_ptr),
  1397. tx->head_dma_ptr);
  1398. tx->head_cpu_ptr = NULL;
  1399. tx->head_dma_ptr = 0;
  1400. }
  1401. kfree(tx->buffer_info);
  1402. tx->buffer_info = NULL;
  1403. if (tx->ring_cpu_ptr) {
  1404. pci_free_consistent(tx->adapter->pdev,
  1405. tx->ring_allocation_size,
  1406. tx->ring_cpu_ptr,
  1407. tx->ring_dma_ptr);
  1408. tx->ring_allocation_size = 0;
  1409. tx->ring_cpu_ptr = NULL;
  1410. tx->ring_dma_ptr = 0;
  1411. }
  1412. tx->ring_size = 0;
  1413. }
  1414. static int lan743x_tx_ring_init(struct lan743x_tx *tx)
  1415. {
  1416. size_t ring_allocation_size = 0;
  1417. void *cpu_ptr = NULL;
  1418. dma_addr_t dma_ptr;
  1419. int ret = -ENOMEM;
  1420. tx->ring_size = LAN743X_TX_RING_SIZE;
  1421. if (tx->ring_size & ~TX_CFG_B_TX_RING_LEN_MASK_) {
  1422. ret = -EINVAL;
  1423. goto cleanup;
  1424. }
  1425. ring_allocation_size = ALIGN(tx->ring_size *
  1426. sizeof(struct lan743x_tx_descriptor),
  1427. PAGE_SIZE);
  1428. dma_ptr = 0;
  1429. cpu_ptr = pci_zalloc_consistent(tx->adapter->pdev,
  1430. ring_allocation_size, &dma_ptr);
  1431. if (!cpu_ptr) {
  1432. ret = -ENOMEM;
  1433. goto cleanup;
  1434. }
  1435. tx->ring_allocation_size = ring_allocation_size;
  1436. tx->ring_cpu_ptr = (struct lan743x_tx_descriptor *)cpu_ptr;
  1437. tx->ring_dma_ptr = dma_ptr;
  1438. cpu_ptr = kcalloc(tx->ring_size, sizeof(*tx->buffer_info), GFP_KERNEL);
  1439. if (!cpu_ptr) {
  1440. ret = -ENOMEM;
  1441. goto cleanup;
  1442. }
  1443. tx->buffer_info = (struct lan743x_tx_buffer_info *)cpu_ptr;
  1444. dma_ptr = 0;
  1445. cpu_ptr = pci_zalloc_consistent(tx->adapter->pdev,
  1446. sizeof(*tx->head_cpu_ptr), &dma_ptr);
  1447. if (!cpu_ptr) {
  1448. ret = -ENOMEM;
  1449. goto cleanup;
  1450. }
  1451. tx->head_cpu_ptr = cpu_ptr;
  1452. tx->head_dma_ptr = dma_ptr;
  1453. if (tx->head_dma_ptr & 0x3) {
  1454. ret = -ENOMEM;
  1455. goto cleanup;
  1456. }
  1457. return 0;
  1458. cleanup:
  1459. lan743x_tx_ring_cleanup(tx);
  1460. return ret;
  1461. }
  1462. static void lan743x_tx_close(struct lan743x_tx *tx)
  1463. {
  1464. struct lan743x_adapter *adapter = tx->adapter;
  1465. lan743x_csr_write(adapter,
  1466. DMAC_CMD,
  1467. DMAC_CMD_STOP_T_(tx->channel_number));
  1468. lan743x_dmac_tx_wait_till_stopped(adapter, tx->channel_number);
  1469. lan743x_csr_write(adapter,
  1470. DMAC_INT_EN_CLR,
  1471. DMAC_INT_BIT_TX_IOC_(tx->channel_number));
  1472. lan743x_csr_write(adapter, INT_EN_CLR,
  1473. INT_BIT_DMA_TX_(tx->channel_number));
  1474. napi_disable(&tx->napi);
  1475. netif_napi_del(&tx->napi);
  1476. lan743x_csr_write(adapter, FCT_TX_CTL,
  1477. FCT_TX_CTL_DIS_(tx->channel_number));
  1478. lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL,
  1479. FCT_TX_CTL_EN_(tx->channel_number),
  1480. 0, 1000, 20000, 100);
  1481. lan743x_tx_release_all_descriptors(tx);
  1482. if (tx->overflow_skb) {
  1483. dev_kfree_skb(tx->overflow_skb);
  1484. tx->overflow_skb = NULL;
  1485. }
  1486. lan743x_tx_ring_cleanup(tx);
  1487. }
  1488. static int lan743x_tx_open(struct lan743x_tx *tx)
  1489. {
  1490. struct lan743x_adapter *adapter = NULL;
  1491. u32 data = 0;
  1492. int ret;
  1493. adapter = tx->adapter;
  1494. ret = lan743x_tx_ring_init(tx);
  1495. if (ret)
  1496. return ret;
  1497. /* initialize fifo */
  1498. lan743x_csr_write(adapter, FCT_TX_CTL,
  1499. FCT_TX_CTL_RESET_(tx->channel_number));
  1500. lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL,
  1501. FCT_TX_CTL_RESET_(tx->channel_number),
  1502. 0, 1000, 20000, 100);
  1503. /* enable fifo */
  1504. lan743x_csr_write(adapter, FCT_TX_CTL,
  1505. FCT_TX_CTL_EN_(tx->channel_number));
  1506. /* reset tx channel */
  1507. lan743x_csr_write(adapter, DMAC_CMD,
  1508. DMAC_CMD_TX_SWR_(tx->channel_number));
  1509. lan743x_csr_wait_for_bit(adapter, DMAC_CMD,
  1510. DMAC_CMD_TX_SWR_(tx->channel_number),
  1511. 0, 1000, 20000, 100);
  1512. /* Write TX_BASE_ADDR */
  1513. lan743x_csr_write(adapter,
  1514. TX_BASE_ADDRH(tx->channel_number),
  1515. DMA_ADDR_HIGH32(tx->ring_dma_ptr));
  1516. lan743x_csr_write(adapter,
  1517. TX_BASE_ADDRL(tx->channel_number),
  1518. DMA_ADDR_LOW32(tx->ring_dma_ptr));
  1519. /* Write TX_CFG_B */
  1520. data = lan743x_csr_read(adapter, TX_CFG_B(tx->channel_number));
  1521. data &= ~TX_CFG_B_TX_RING_LEN_MASK_;
  1522. data |= ((tx->ring_size) & TX_CFG_B_TX_RING_LEN_MASK_);
  1523. if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
  1524. data |= TX_CFG_B_TDMABL_512_;
  1525. lan743x_csr_write(adapter, TX_CFG_B(tx->channel_number), data);
  1526. /* Write TX_CFG_A */
  1527. data = TX_CFG_A_TX_TMR_HPWB_SEL_IOC_ | TX_CFG_A_TX_HP_WB_EN_;
  1528. if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
  1529. data |= TX_CFG_A_TX_HP_WB_ON_INT_TMR_;
  1530. data |= TX_CFG_A_TX_PF_THRES_SET_(0x10);
  1531. data |= TX_CFG_A_TX_PF_PRI_THRES_SET_(0x04);
  1532. data |= TX_CFG_A_TX_HP_WB_THRES_SET_(0x07);
  1533. }
  1534. lan743x_csr_write(adapter, TX_CFG_A(tx->channel_number), data);
  1535. /* Write TX_HEAD_WRITEBACK_ADDR */
  1536. lan743x_csr_write(adapter,
  1537. TX_HEAD_WRITEBACK_ADDRH(tx->channel_number),
  1538. DMA_ADDR_HIGH32(tx->head_dma_ptr));
  1539. lan743x_csr_write(adapter,
  1540. TX_HEAD_WRITEBACK_ADDRL(tx->channel_number),
  1541. DMA_ADDR_LOW32(tx->head_dma_ptr));
  1542. /* set last head */
  1543. tx->last_head = lan743x_csr_read(adapter, TX_HEAD(tx->channel_number));
  1544. /* write TX_TAIL */
  1545. tx->last_tail = 0;
  1546. lan743x_csr_write(adapter, TX_TAIL(tx->channel_number),
  1547. (u32)(tx->last_tail));
  1548. tx->vector_flags = lan743x_intr_get_vector_flags(adapter,
  1549. INT_BIT_DMA_TX_
  1550. (tx->channel_number));
  1551. netif_napi_add(adapter->netdev,
  1552. &tx->napi, lan743x_tx_napi_poll,
  1553. tx->ring_size - 1);
  1554. napi_enable(&tx->napi);
  1555. data = 0;
  1556. if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR)
  1557. data |= TX_CFG_C_TX_TOP_INT_EN_AUTO_CLR_;
  1558. if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR)
  1559. data |= TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_;
  1560. if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C)
  1561. data |= TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_;
  1562. if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)
  1563. data |= TX_CFG_C_TX_INT_EN_R2C_;
  1564. lan743x_csr_write(adapter, TX_CFG_C(tx->channel_number), data);
  1565. if (!(tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET))
  1566. lan743x_csr_write(adapter, INT_EN_SET,
  1567. INT_BIT_DMA_TX_(tx->channel_number));
  1568. lan743x_csr_write(adapter, DMAC_INT_EN_SET,
  1569. DMAC_INT_BIT_TX_IOC_(tx->channel_number));
  1570. /* start dmac channel */
  1571. lan743x_csr_write(adapter, DMAC_CMD,
  1572. DMAC_CMD_START_T_(tx->channel_number));
  1573. return 0;
  1574. }
  1575. static int lan743x_rx_next_index(struct lan743x_rx *rx, int index)
  1576. {
  1577. return ((++index) % rx->ring_size);
  1578. }
  1579. static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index)
  1580. {
  1581. struct lan743x_rx_buffer_info *buffer_info;
  1582. struct lan743x_rx_descriptor *descriptor;
  1583. int length = 0;
  1584. length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
  1585. descriptor = &rx->ring_cpu_ptr[index];
  1586. buffer_info = &rx->buffer_info[index];
  1587. buffer_info->skb = __netdev_alloc_skb(rx->adapter->netdev,
  1588. length,
  1589. GFP_ATOMIC | GFP_DMA);
  1590. if (!(buffer_info->skb))
  1591. return -ENOMEM;
  1592. buffer_info->dma_ptr = dma_map_single(&rx->adapter->pdev->dev,
  1593. buffer_info->skb->data,
  1594. length,
  1595. DMA_FROM_DEVICE);
  1596. if (dma_mapping_error(&rx->adapter->pdev->dev,
  1597. buffer_info->dma_ptr)) {
  1598. buffer_info->dma_ptr = 0;
  1599. return -ENOMEM;
  1600. }
  1601. buffer_info->buffer_length = length;
  1602. descriptor->data1 = DMA_ADDR_LOW32(buffer_info->dma_ptr);
  1603. descriptor->data2 = DMA_ADDR_HIGH32(buffer_info->dma_ptr);
  1604. descriptor->data3 = 0;
  1605. descriptor->data0 = (RX_DESC_DATA0_OWN_ |
  1606. (length & RX_DESC_DATA0_BUF_LENGTH_MASK_));
  1607. skb_reserve(buffer_info->skb, RX_HEAD_PADDING);
  1608. return 0;
  1609. }
  1610. static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index)
  1611. {
  1612. struct lan743x_rx_buffer_info *buffer_info;
  1613. struct lan743x_rx_descriptor *descriptor;
  1614. descriptor = &rx->ring_cpu_ptr[index];
  1615. buffer_info = &rx->buffer_info[index];
  1616. descriptor->data1 = DMA_ADDR_LOW32(buffer_info->dma_ptr);
  1617. descriptor->data2 = DMA_ADDR_HIGH32(buffer_info->dma_ptr);
  1618. descriptor->data3 = 0;
  1619. descriptor->data0 = (RX_DESC_DATA0_OWN_ |
  1620. ((buffer_info->buffer_length) &
  1621. RX_DESC_DATA0_BUF_LENGTH_MASK_));
  1622. }
  1623. static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index)
  1624. {
  1625. struct lan743x_rx_buffer_info *buffer_info;
  1626. struct lan743x_rx_descriptor *descriptor;
  1627. descriptor = &rx->ring_cpu_ptr[index];
  1628. buffer_info = &rx->buffer_info[index];
  1629. memset(descriptor, 0, sizeof(*descriptor));
  1630. if (buffer_info->dma_ptr) {
  1631. dma_unmap_single(&rx->adapter->pdev->dev,
  1632. buffer_info->dma_ptr,
  1633. buffer_info->buffer_length,
  1634. DMA_FROM_DEVICE);
  1635. buffer_info->dma_ptr = 0;
  1636. }
  1637. if (buffer_info->skb) {
  1638. dev_kfree_skb(buffer_info->skb);
  1639. buffer_info->skb = NULL;
  1640. }
  1641. memset(buffer_info, 0, sizeof(*buffer_info));
  1642. }
  1643. static int lan743x_rx_process_packet(struct lan743x_rx *rx)
  1644. {
  1645. struct skb_shared_hwtstamps *hwtstamps = NULL;
  1646. int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
  1647. struct lan743x_rx_buffer_info *buffer_info;
  1648. struct lan743x_rx_descriptor *descriptor;
  1649. int current_head_index = -1;
  1650. int extension_index = -1;
  1651. int first_index = -1;
  1652. int last_index = -1;
  1653. current_head_index = *rx->head_cpu_ptr;
  1654. if (current_head_index < 0 || current_head_index >= rx->ring_size)
  1655. goto done;
  1656. if (rx->last_head < 0 || rx->last_head >= rx->ring_size)
  1657. goto done;
  1658. if (rx->last_head != current_head_index) {
  1659. descriptor = &rx->ring_cpu_ptr[rx->last_head];
  1660. if (descriptor->data0 & RX_DESC_DATA0_OWN_)
  1661. goto done;
  1662. if (!(descriptor->data0 & RX_DESC_DATA0_FS_))
  1663. goto done;
  1664. first_index = rx->last_head;
  1665. if (descriptor->data0 & RX_DESC_DATA0_LS_) {
  1666. last_index = rx->last_head;
  1667. } else {
  1668. int index;
  1669. index = lan743x_rx_next_index(rx, first_index);
  1670. while (index != current_head_index) {
  1671. descriptor = &rx->ring_cpu_ptr[index];
  1672. if (descriptor->data0 & RX_DESC_DATA0_OWN_)
  1673. goto done;
  1674. if (descriptor->data0 & RX_DESC_DATA0_LS_) {
  1675. last_index = index;
  1676. break;
  1677. }
  1678. index = lan743x_rx_next_index(rx, index);
  1679. }
  1680. }
  1681. if (last_index >= 0) {
  1682. descriptor = &rx->ring_cpu_ptr[last_index];
  1683. if (descriptor->data0 & RX_DESC_DATA0_EXT_) {
  1684. /* extension is expected to follow */
  1685. int index = lan743x_rx_next_index(rx,
  1686. last_index);
  1687. if (index != current_head_index) {
  1688. descriptor = &rx->ring_cpu_ptr[index];
  1689. if (descriptor->data0 &
  1690. RX_DESC_DATA0_OWN_) {
  1691. goto done;
  1692. }
  1693. if (descriptor->data0 &
  1694. RX_DESC_DATA0_EXT_) {
  1695. extension_index = index;
  1696. } else {
  1697. goto done;
  1698. }
  1699. } else {
  1700. /* extension is not yet available */
  1701. /* prevent processing of this packet */
  1702. first_index = -1;
  1703. last_index = -1;
  1704. }
  1705. }
  1706. }
  1707. }
  1708. if (first_index >= 0 && last_index >= 0) {
  1709. int real_last_index = last_index;
  1710. struct sk_buff *skb = NULL;
  1711. u32 ts_sec = 0;
  1712. u32 ts_nsec = 0;
  1713. /* packet is available */
  1714. if (first_index == last_index) {
  1715. /* single buffer packet */
  1716. int packet_length;
  1717. buffer_info = &rx->buffer_info[first_index];
  1718. skb = buffer_info->skb;
  1719. descriptor = &rx->ring_cpu_ptr[first_index];
  1720. /* unmap from dma */
  1721. if (buffer_info->dma_ptr) {
  1722. dma_unmap_single(&rx->adapter->pdev->dev,
  1723. buffer_info->dma_ptr,
  1724. buffer_info->buffer_length,
  1725. DMA_FROM_DEVICE);
  1726. buffer_info->dma_ptr = 0;
  1727. buffer_info->buffer_length = 0;
  1728. }
  1729. buffer_info->skb = NULL;
  1730. packet_length = RX_DESC_DATA0_FRAME_LENGTH_GET_
  1731. (descriptor->data0);
  1732. skb_put(skb, packet_length - 4);
  1733. skb->protocol = eth_type_trans(skb,
  1734. rx->adapter->netdev);
  1735. lan743x_rx_allocate_ring_element(rx, first_index);
  1736. } else {
  1737. int index = first_index;
  1738. /* multi buffer packet not supported */
  1739. /* this should not happen since
  1740. * buffers are allocated to be at least jumbo size
  1741. */
  1742. /* clean up buffers */
  1743. if (first_index <= last_index) {
  1744. while ((index >= first_index) &&
  1745. (index <= last_index)) {
  1746. lan743x_rx_release_ring_element(rx,
  1747. index);
  1748. lan743x_rx_allocate_ring_element(rx,
  1749. index);
  1750. index = lan743x_rx_next_index(rx,
  1751. index);
  1752. }
  1753. } else {
  1754. while ((index >= first_index) ||
  1755. (index <= last_index)) {
  1756. lan743x_rx_release_ring_element(rx,
  1757. index);
  1758. lan743x_rx_allocate_ring_element(rx,
  1759. index);
  1760. index = lan743x_rx_next_index(rx,
  1761. index);
  1762. }
  1763. }
  1764. }
  1765. if (extension_index >= 0) {
  1766. descriptor = &rx->ring_cpu_ptr[extension_index];
  1767. buffer_info = &rx->buffer_info[extension_index];
  1768. ts_sec = descriptor->data1;
  1769. ts_nsec = (descriptor->data2 &
  1770. RX_DESC_DATA2_TS_NS_MASK_);
  1771. lan743x_rx_reuse_ring_element(rx, extension_index);
  1772. real_last_index = extension_index;
  1773. }
  1774. if (!skb) {
  1775. result = RX_PROCESS_RESULT_PACKET_DROPPED;
  1776. goto move_forward;
  1777. }
  1778. if (extension_index < 0)
  1779. goto pass_packet_to_os;
  1780. hwtstamps = skb_hwtstamps(skb);
  1781. if (hwtstamps)
  1782. hwtstamps->hwtstamp = ktime_set(ts_sec, ts_nsec);
  1783. pass_packet_to_os:
  1784. /* pass packet to OS */
  1785. napi_gro_receive(&rx->napi, skb);
  1786. result = RX_PROCESS_RESULT_PACKET_RECEIVED;
  1787. move_forward:
  1788. /* push tail and head forward */
  1789. rx->last_tail = real_last_index;
  1790. rx->last_head = lan743x_rx_next_index(rx, real_last_index);
  1791. }
  1792. done:
  1793. return result;
  1794. }
  1795. static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight)
  1796. {
  1797. struct lan743x_rx *rx = container_of(napi, struct lan743x_rx, napi);
  1798. struct lan743x_adapter *adapter = rx->adapter;
  1799. u32 rx_tail_flags = 0;
  1800. int count;
  1801. if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) {
  1802. /* clear int status bit before reading packet */
  1803. lan743x_csr_write(adapter, DMAC_INT_STS,
  1804. DMAC_INT_BIT_RXFRM_(rx->channel_number));
  1805. }
  1806. count = 0;
  1807. while (count < weight) {
  1808. int rx_process_result = -1;
  1809. rx_process_result = lan743x_rx_process_packet(rx);
  1810. if (rx_process_result == RX_PROCESS_RESULT_PACKET_RECEIVED) {
  1811. count++;
  1812. } else if (rx_process_result ==
  1813. RX_PROCESS_RESULT_NOTHING_TO_DO) {
  1814. break;
  1815. } else if (rx_process_result ==
  1816. RX_PROCESS_RESULT_PACKET_DROPPED) {
  1817. continue;
  1818. }
  1819. }
  1820. rx->frame_count += count;
  1821. if (count == weight)
  1822. goto done;
  1823. if (!napi_complete_done(napi, count))
  1824. goto done;
  1825. if (rx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
  1826. rx_tail_flags |= RX_TAIL_SET_TOP_INT_VEC_EN_;
  1827. if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) {
  1828. rx_tail_flags |= RX_TAIL_SET_TOP_INT_EN_;
  1829. } else {
  1830. lan743x_csr_write(adapter, INT_EN_SET,
  1831. INT_BIT_DMA_RX_(rx->channel_number));
  1832. }
  1833. /* update RX_TAIL */
  1834. lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
  1835. rx_tail_flags | rx->last_tail);
  1836. done:
  1837. return count;
  1838. }
  1839. static void lan743x_rx_ring_cleanup(struct lan743x_rx *rx)
  1840. {
  1841. if (rx->buffer_info && rx->ring_cpu_ptr) {
  1842. int index;
  1843. for (index = 0; index < rx->ring_size; index++)
  1844. lan743x_rx_release_ring_element(rx, index);
  1845. }
  1846. if (rx->head_cpu_ptr) {
  1847. pci_free_consistent(rx->adapter->pdev,
  1848. sizeof(*rx->head_cpu_ptr),
  1849. rx->head_cpu_ptr,
  1850. rx->head_dma_ptr);
  1851. rx->head_cpu_ptr = NULL;
  1852. rx->head_dma_ptr = 0;
  1853. }
  1854. kfree(rx->buffer_info);
  1855. rx->buffer_info = NULL;
  1856. if (rx->ring_cpu_ptr) {
  1857. pci_free_consistent(rx->adapter->pdev,
  1858. rx->ring_allocation_size,
  1859. rx->ring_cpu_ptr,
  1860. rx->ring_dma_ptr);
  1861. rx->ring_allocation_size = 0;
  1862. rx->ring_cpu_ptr = NULL;
  1863. rx->ring_dma_ptr = 0;
  1864. }
  1865. rx->ring_size = 0;
  1866. rx->last_head = 0;
  1867. }
  1868. static int lan743x_rx_ring_init(struct lan743x_rx *rx)
  1869. {
  1870. size_t ring_allocation_size = 0;
  1871. dma_addr_t dma_ptr = 0;
  1872. void *cpu_ptr = NULL;
  1873. int ret = -ENOMEM;
  1874. int index = 0;
  1875. rx->ring_size = LAN743X_RX_RING_SIZE;
  1876. if (rx->ring_size <= 1) {
  1877. ret = -EINVAL;
  1878. goto cleanup;
  1879. }
  1880. if (rx->ring_size & ~RX_CFG_B_RX_RING_LEN_MASK_) {
  1881. ret = -EINVAL;
  1882. goto cleanup;
  1883. }
  1884. ring_allocation_size = ALIGN(rx->ring_size *
  1885. sizeof(struct lan743x_rx_descriptor),
  1886. PAGE_SIZE);
  1887. dma_ptr = 0;
  1888. cpu_ptr = pci_zalloc_consistent(rx->adapter->pdev,
  1889. ring_allocation_size, &dma_ptr);
  1890. if (!cpu_ptr) {
  1891. ret = -ENOMEM;
  1892. goto cleanup;
  1893. }
  1894. rx->ring_allocation_size = ring_allocation_size;
  1895. rx->ring_cpu_ptr = (struct lan743x_rx_descriptor *)cpu_ptr;
  1896. rx->ring_dma_ptr = dma_ptr;
  1897. cpu_ptr = kcalloc(rx->ring_size, sizeof(*rx->buffer_info),
  1898. GFP_KERNEL);
  1899. if (!cpu_ptr) {
  1900. ret = -ENOMEM;
  1901. goto cleanup;
  1902. }
  1903. rx->buffer_info = (struct lan743x_rx_buffer_info *)cpu_ptr;
  1904. dma_ptr = 0;
  1905. cpu_ptr = pci_zalloc_consistent(rx->adapter->pdev,
  1906. sizeof(*rx->head_cpu_ptr), &dma_ptr);
  1907. if (!cpu_ptr) {
  1908. ret = -ENOMEM;
  1909. goto cleanup;
  1910. }
  1911. rx->head_cpu_ptr = cpu_ptr;
  1912. rx->head_dma_ptr = dma_ptr;
  1913. if (rx->head_dma_ptr & 0x3) {
  1914. ret = -ENOMEM;
  1915. goto cleanup;
  1916. }
  1917. rx->last_head = 0;
  1918. for (index = 0; index < rx->ring_size; index++) {
  1919. ret = lan743x_rx_allocate_ring_element(rx, index);
  1920. if (ret)
  1921. goto cleanup;
  1922. }
  1923. return 0;
  1924. cleanup:
  1925. lan743x_rx_ring_cleanup(rx);
  1926. return ret;
  1927. }
  1928. static void lan743x_rx_close(struct lan743x_rx *rx)
  1929. {
  1930. struct lan743x_adapter *adapter = rx->adapter;
  1931. lan743x_csr_write(adapter, FCT_RX_CTL,
  1932. FCT_RX_CTL_DIS_(rx->channel_number));
  1933. lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL,
  1934. FCT_RX_CTL_EN_(rx->channel_number),
  1935. 0, 1000, 20000, 100);
  1936. lan743x_csr_write(adapter, DMAC_CMD,
  1937. DMAC_CMD_STOP_R_(rx->channel_number));
  1938. lan743x_dmac_rx_wait_till_stopped(adapter, rx->channel_number);
  1939. lan743x_csr_write(adapter, DMAC_INT_EN_CLR,
  1940. DMAC_INT_BIT_RXFRM_(rx->channel_number));
  1941. lan743x_csr_write(adapter, INT_EN_CLR,
  1942. INT_BIT_DMA_RX_(rx->channel_number));
  1943. napi_disable(&rx->napi);
  1944. netif_napi_del(&rx->napi);
  1945. lan743x_rx_ring_cleanup(rx);
  1946. }
  1947. static int lan743x_rx_open(struct lan743x_rx *rx)
  1948. {
  1949. struct lan743x_adapter *adapter = rx->adapter;
  1950. u32 data = 0;
  1951. int ret;
  1952. rx->frame_count = 0;
  1953. ret = lan743x_rx_ring_init(rx);
  1954. if (ret)
  1955. goto return_error;
  1956. netif_napi_add(adapter->netdev,
  1957. &rx->napi, lan743x_rx_napi_poll,
  1958. rx->ring_size - 1);
  1959. lan743x_csr_write(adapter, DMAC_CMD,
  1960. DMAC_CMD_RX_SWR_(rx->channel_number));
  1961. lan743x_csr_wait_for_bit(adapter, DMAC_CMD,
  1962. DMAC_CMD_RX_SWR_(rx->channel_number),
  1963. 0, 1000, 20000, 100);
  1964. /* set ring base address */
  1965. lan743x_csr_write(adapter,
  1966. RX_BASE_ADDRH(rx->channel_number),
  1967. DMA_ADDR_HIGH32(rx->ring_dma_ptr));
  1968. lan743x_csr_write(adapter,
  1969. RX_BASE_ADDRL(rx->channel_number),
  1970. DMA_ADDR_LOW32(rx->ring_dma_ptr));
  1971. /* set rx write back address */
  1972. lan743x_csr_write(adapter,
  1973. RX_HEAD_WRITEBACK_ADDRH(rx->channel_number),
  1974. DMA_ADDR_HIGH32(rx->head_dma_ptr));
  1975. lan743x_csr_write(adapter,
  1976. RX_HEAD_WRITEBACK_ADDRL(rx->channel_number),
  1977. DMA_ADDR_LOW32(rx->head_dma_ptr));
  1978. data = RX_CFG_A_RX_HP_WB_EN_;
  1979. if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
  1980. data |= (RX_CFG_A_RX_WB_ON_INT_TMR_ |
  1981. RX_CFG_A_RX_WB_THRES_SET_(0x7) |
  1982. RX_CFG_A_RX_PF_THRES_SET_(16) |
  1983. RX_CFG_A_RX_PF_PRI_THRES_SET_(4));
  1984. }
  1985. /* set RX_CFG_A */
  1986. lan743x_csr_write(adapter,
  1987. RX_CFG_A(rx->channel_number), data);
  1988. /* set RX_CFG_B */
  1989. data = lan743x_csr_read(adapter, RX_CFG_B(rx->channel_number));
  1990. data &= ~RX_CFG_B_RX_PAD_MASK_;
  1991. if (!RX_HEAD_PADDING)
  1992. data |= RX_CFG_B_RX_PAD_0_;
  1993. else
  1994. data |= RX_CFG_B_RX_PAD_2_;
  1995. data &= ~RX_CFG_B_RX_RING_LEN_MASK_;
  1996. data |= ((rx->ring_size) & RX_CFG_B_RX_RING_LEN_MASK_);
  1997. data |= RX_CFG_B_TS_ALL_RX_;
  1998. if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
  1999. data |= RX_CFG_B_RDMABL_512_;
  2000. lan743x_csr_write(adapter, RX_CFG_B(rx->channel_number), data);
  2001. rx->vector_flags = lan743x_intr_get_vector_flags(adapter,
  2002. INT_BIT_DMA_RX_
  2003. (rx->channel_number));
  2004. /* set RX_CFG_C */
  2005. data = 0;
  2006. if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR)
  2007. data |= RX_CFG_C_RX_TOP_INT_EN_AUTO_CLR_;
  2008. if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR)
  2009. data |= RX_CFG_C_RX_DMA_INT_STS_AUTO_CLR_;
  2010. if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C)
  2011. data |= RX_CFG_C_RX_INT_STS_R2C_MODE_MASK_;
  2012. if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)
  2013. data |= RX_CFG_C_RX_INT_EN_R2C_;
  2014. lan743x_csr_write(adapter, RX_CFG_C(rx->channel_number), data);
  2015. rx->last_tail = ((u32)(rx->ring_size - 1));
  2016. lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
  2017. rx->last_tail);
  2018. rx->last_head = lan743x_csr_read(adapter, RX_HEAD(rx->channel_number));
  2019. if (rx->last_head) {
  2020. ret = -EIO;
  2021. goto napi_delete;
  2022. }
  2023. napi_enable(&rx->napi);
  2024. lan743x_csr_write(adapter, INT_EN_SET,
  2025. INT_BIT_DMA_RX_(rx->channel_number));
  2026. lan743x_csr_write(adapter, DMAC_INT_STS,
  2027. DMAC_INT_BIT_RXFRM_(rx->channel_number));
  2028. lan743x_csr_write(adapter, DMAC_INT_EN_SET,
  2029. DMAC_INT_BIT_RXFRM_(rx->channel_number));
  2030. lan743x_csr_write(adapter, DMAC_CMD,
  2031. DMAC_CMD_START_R_(rx->channel_number));
  2032. /* initialize fifo */
  2033. lan743x_csr_write(adapter, FCT_RX_CTL,
  2034. FCT_RX_CTL_RESET_(rx->channel_number));
  2035. lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL,
  2036. FCT_RX_CTL_RESET_(rx->channel_number),
  2037. 0, 1000, 20000, 100);
  2038. lan743x_csr_write(adapter, FCT_FLOW(rx->channel_number),
  2039. FCT_FLOW_CTL_REQ_EN_ |
  2040. FCT_FLOW_CTL_ON_THRESHOLD_SET_(0x2A) |
  2041. FCT_FLOW_CTL_OFF_THRESHOLD_SET_(0xA));
  2042. /* enable fifo */
  2043. lan743x_csr_write(adapter, FCT_RX_CTL,
  2044. FCT_RX_CTL_EN_(rx->channel_number));
  2045. return 0;
  2046. napi_delete:
  2047. netif_napi_del(&rx->napi);
  2048. lan743x_rx_ring_cleanup(rx);
  2049. return_error:
  2050. return ret;
  2051. }
  2052. static int lan743x_netdev_close(struct net_device *netdev)
  2053. {
  2054. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2055. int index;
  2056. lan743x_tx_close(&adapter->tx[0]);
  2057. for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++)
  2058. lan743x_rx_close(&adapter->rx[index]);
  2059. lan743x_phy_close(adapter);
  2060. lan743x_mac_close(adapter);
  2061. lan743x_intr_close(adapter);
  2062. return 0;
  2063. }
  2064. static int lan743x_netdev_open(struct net_device *netdev)
  2065. {
  2066. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2067. int index;
  2068. int ret;
  2069. ret = lan743x_intr_open(adapter);
  2070. if (ret)
  2071. goto return_error;
  2072. ret = lan743x_mac_open(adapter);
  2073. if (ret)
  2074. goto close_intr;
  2075. ret = lan743x_phy_open(adapter);
  2076. if (ret)
  2077. goto close_mac;
  2078. for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
  2079. ret = lan743x_rx_open(&adapter->rx[index]);
  2080. if (ret)
  2081. goto close_rx;
  2082. }
  2083. ret = lan743x_tx_open(&adapter->tx[0]);
  2084. if (ret)
  2085. goto close_rx;
  2086. return 0;
  2087. close_rx:
  2088. for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
  2089. if (adapter->rx[index].ring_cpu_ptr)
  2090. lan743x_rx_close(&adapter->rx[index]);
  2091. }
  2092. lan743x_phy_close(adapter);
  2093. close_mac:
  2094. lan743x_mac_close(adapter);
  2095. close_intr:
  2096. lan743x_intr_close(adapter);
  2097. return_error:
  2098. netif_warn(adapter, ifup, adapter->netdev,
  2099. "Error opening LAN743x\n");
  2100. return ret;
  2101. }
  2102. static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb,
  2103. struct net_device *netdev)
  2104. {
  2105. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2106. return lan743x_tx_xmit_frame(&adapter->tx[0], skb);
  2107. }
  2108. static int lan743x_netdev_ioctl(struct net_device *netdev,
  2109. struct ifreq *ifr, int cmd)
  2110. {
  2111. if (!netif_running(netdev))
  2112. return -EINVAL;
  2113. return phy_mii_ioctl(netdev->phydev, ifr, cmd);
  2114. }
  2115. static void lan743x_netdev_set_multicast(struct net_device *netdev)
  2116. {
  2117. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2118. lan743x_rfe_set_multicast(adapter);
  2119. }
  2120. static int lan743x_netdev_change_mtu(struct net_device *netdev, int new_mtu)
  2121. {
  2122. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2123. int ret = 0;
  2124. ret = lan743x_mac_set_mtu(adapter, new_mtu);
  2125. if (!ret)
  2126. netdev->mtu = new_mtu;
  2127. return ret;
  2128. }
  2129. static void lan743x_netdev_get_stats64(struct net_device *netdev,
  2130. struct rtnl_link_stats64 *stats)
  2131. {
  2132. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2133. stats->rx_packets = lan743x_csr_read(adapter, STAT_RX_TOTAL_FRAMES);
  2134. stats->tx_packets = lan743x_csr_read(adapter, STAT_TX_TOTAL_FRAMES);
  2135. stats->rx_bytes = lan743x_csr_read(adapter,
  2136. STAT_RX_UNICAST_BYTE_COUNT) +
  2137. lan743x_csr_read(adapter,
  2138. STAT_RX_BROADCAST_BYTE_COUNT) +
  2139. lan743x_csr_read(adapter,
  2140. STAT_RX_MULTICAST_BYTE_COUNT);
  2141. stats->tx_bytes = lan743x_csr_read(adapter,
  2142. STAT_TX_UNICAST_BYTE_COUNT) +
  2143. lan743x_csr_read(adapter,
  2144. STAT_TX_BROADCAST_BYTE_COUNT) +
  2145. lan743x_csr_read(adapter,
  2146. STAT_TX_MULTICAST_BYTE_COUNT);
  2147. stats->rx_errors = lan743x_csr_read(adapter, STAT_RX_FCS_ERRORS) +
  2148. lan743x_csr_read(adapter,
  2149. STAT_RX_ALIGNMENT_ERRORS) +
  2150. lan743x_csr_read(adapter, STAT_RX_JABBER_ERRORS) +
  2151. lan743x_csr_read(adapter,
  2152. STAT_RX_UNDERSIZE_FRAME_ERRORS) +
  2153. lan743x_csr_read(adapter,
  2154. STAT_RX_OVERSIZE_FRAME_ERRORS);
  2155. stats->tx_errors = lan743x_csr_read(adapter, STAT_TX_FCS_ERRORS) +
  2156. lan743x_csr_read(adapter,
  2157. STAT_TX_EXCESS_DEFERRAL_ERRORS) +
  2158. lan743x_csr_read(adapter, STAT_TX_CARRIER_ERRORS);
  2159. stats->rx_dropped = lan743x_csr_read(adapter,
  2160. STAT_RX_DROPPED_FRAMES);
  2161. stats->tx_dropped = lan743x_csr_read(adapter,
  2162. STAT_TX_EXCESSIVE_COLLISION);
  2163. stats->multicast = lan743x_csr_read(adapter,
  2164. STAT_RX_MULTICAST_FRAMES) +
  2165. lan743x_csr_read(adapter,
  2166. STAT_TX_MULTICAST_FRAMES);
  2167. stats->collisions = lan743x_csr_read(adapter,
  2168. STAT_TX_SINGLE_COLLISIONS) +
  2169. lan743x_csr_read(adapter,
  2170. STAT_TX_MULTIPLE_COLLISIONS) +
  2171. lan743x_csr_read(adapter,
  2172. STAT_TX_LATE_COLLISIONS);
  2173. }
  2174. static int lan743x_netdev_set_mac_address(struct net_device *netdev,
  2175. void *addr)
  2176. {
  2177. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2178. struct sockaddr *sock_addr = addr;
  2179. int ret;
  2180. ret = eth_prepare_mac_addr_change(netdev, sock_addr);
  2181. if (ret)
  2182. return ret;
  2183. ether_addr_copy(netdev->dev_addr, sock_addr->sa_data);
  2184. lan743x_mac_set_address(adapter, sock_addr->sa_data);
  2185. lan743x_rfe_update_mac_address(adapter);
  2186. return 0;
  2187. }
  2188. static const struct net_device_ops lan743x_netdev_ops = {
  2189. .ndo_open = lan743x_netdev_open,
  2190. .ndo_stop = lan743x_netdev_close,
  2191. .ndo_start_xmit = lan743x_netdev_xmit_frame,
  2192. .ndo_do_ioctl = lan743x_netdev_ioctl,
  2193. .ndo_set_rx_mode = lan743x_netdev_set_multicast,
  2194. .ndo_change_mtu = lan743x_netdev_change_mtu,
  2195. .ndo_get_stats64 = lan743x_netdev_get_stats64,
  2196. .ndo_set_mac_address = lan743x_netdev_set_mac_address,
  2197. };
  2198. static void lan743x_hardware_cleanup(struct lan743x_adapter *adapter)
  2199. {
  2200. lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
  2201. }
  2202. static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter)
  2203. {
  2204. mdiobus_unregister(adapter->mdiobus);
  2205. }
  2206. static void lan743x_full_cleanup(struct lan743x_adapter *adapter)
  2207. {
  2208. unregister_netdev(adapter->netdev);
  2209. lan743x_mdiobus_cleanup(adapter);
  2210. lan743x_hardware_cleanup(adapter);
  2211. lan743x_pci_cleanup(adapter);
  2212. }
  2213. static int lan743x_hardware_init(struct lan743x_adapter *adapter,
  2214. struct pci_dev *pdev)
  2215. {
  2216. struct lan743x_tx *tx;
  2217. int index;
  2218. int ret;
  2219. adapter->intr.irq = adapter->pdev->irq;
  2220. lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
  2221. mutex_init(&adapter->dp_lock);
  2222. ret = lan743x_mac_init(adapter);
  2223. if (ret)
  2224. return ret;
  2225. ret = lan743x_phy_init(adapter);
  2226. if (ret)
  2227. return ret;
  2228. lan743x_rfe_update_mac_address(adapter);
  2229. ret = lan743x_dmac_init(adapter);
  2230. if (ret)
  2231. return ret;
  2232. for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
  2233. adapter->rx[index].adapter = adapter;
  2234. adapter->rx[index].channel_number = index;
  2235. }
  2236. tx = &adapter->tx[0];
  2237. tx->adapter = adapter;
  2238. tx->channel_number = 0;
  2239. spin_lock_init(&tx->ring_lock);
  2240. return 0;
  2241. }
  2242. static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
  2243. {
  2244. int ret;
  2245. adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
  2246. if (!(adapter->mdiobus)) {
  2247. ret = -ENOMEM;
  2248. goto return_error;
  2249. }
  2250. adapter->mdiobus->priv = (void *)adapter;
  2251. adapter->mdiobus->read = lan743x_mdiobus_read;
  2252. adapter->mdiobus->write = lan743x_mdiobus_write;
  2253. adapter->mdiobus->name = "lan743x-mdiobus";
  2254. snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE,
  2255. "pci-%s", pci_name(adapter->pdev));
  2256. /* set to internal PHY id */
  2257. adapter->mdiobus->phy_mask = ~(u32)BIT(1);
  2258. /* register mdiobus */
  2259. ret = mdiobus_register(adapter->mdiobus);
  2260. if (ret < 0)
  2261. goto return_error;
  2262. return 0;
  2263. return_error:
  2264. return ret;
  2265. }
  2266. /* lan743x_pcidev_probe - Device Initialization Routine
  2267. * @pdev: PCI device information struct
  2268. * @id: entry in lan743x_pci_tbl
  2269. *
  2270. * Returns 0 on success, negative on failure
  2271. *
  2272. * initializes an adapter identified by a pci_dev structure.
  2273. * The OS initialization, configuring of the adapter private structure,
  2274. * and a hardware reset occur.
  2275. **/
  2276. static int lan743x_pcidev_probe(struct pci_dev *pdev,
  2277. const struct pci_device_id *id)
  2278. {
  2279. struct lan743x_adapter *adapter = NULL;
  2280. struct net_device *netdev = NULL;
  2281. int ret = -ENODEV;
  2282. netdev = devm_alloc_etherdev(&pdev->dev,
  2283. sizeof(struct lan743x_adapter));
  2284. if (!netdev)
  2285. goto return_error;
  2286. SET_NETDEV_DEV(netdev, &pdev->dev);
  2287. pci_set_drvdata(pdev, netdev);
  2288. adapter = netdev_priv(netdev);
  2289. adapter->netdev = netdev;
  2290. adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
  2291. NETIF_MSG_LINK | NETIF_MSG_IFUP |
  2292. NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
  2293. netdev->max_mtu = LAN743X_MAX_FRAME_SIZE;
  2294. ret = lan743x_pci_init(adapter, pdev);
  2295. if (ret)
  2296. goto return_error;
  2297. ret = lan743x_csr_init(adapter);
  2298. if (ret)
  2299. goto cleanup_pci;
  2300. ret = lan743x_hardware_init(adapter, pdev);
  2301. if (ret)
  2302. goto cleanup_pci;
  2303. ret = lan743x_mdiobus_init(adapter);
  2304. if (ret)
  2305. goto cleanup_hardware;
  2306. adapter->netdev->netdev_ops = &lan743x_netdev_ops;
  2307. adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
  2308. adapter->netdev->hw_features = adapter->netdev->features;
  2309. /* carrier off reporting is important to ethtool even BEFORE open */
  2310. netif_carrier_off(netdev);
  2311. ret = register_netdev(adapter->netdev);
  2312. if (ret < 0)
  2313. goto cleanup_mdiobus;
  2314. return 0;
  2315. cleanup_mdiobus:
  2316. lan743x_mdiobus_cleanup(adapter);
  2317. cleanup_hardware:
  2318. lan743x_hardware_cleanup(adapter);
  2319. cleanup_pci:
  2320. lan743x_pci_cleanup(adapter);
  2321. return_error:
  2322. pr_warn("Initialization failed\n");
  2323. return ret;
  2324. }
  2325. /**
  2326. * lan743x_pcidev_remove - Device Removal Routine
  2327. * @pdev: PCI device information struct
  2328. *
  2329. * this is called by the PCI subsystem to alert the driver
  2330. * that it should release a PCI device. This could be caused by a
  2331. * Hot-Plug event, or because the driver is going to be removed from
  2332. * memory.
  2333. **/
  2334. static void lan743x_pcidev_remove(struct pci_dev *pdev)
  2335. {
  2336. struct net_device *netdev = pci_get_drvdata(pdev);
  2337. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2338. lan743x_full_cleanup(adapter);
  2339. }
  2340. static void lan743x_pcidev_shutdown(struct pci_dev *pdev)
  2341. {
  2342. struct net_device *netdev = pci_get_drvdata(pdev);
  2343. struct lan743x_adapter *adapter = netdev_priv(netdev);
  2344. rtnl_lock();
  2345. netif_device_detach(netdev);
  2346. /* close netdev when netdev is at running state.
  2347. * For instance, it is true when system goes to sleep by pm-suspend
  2348. * However, it is false when system goes to sleep by suspend GUI menu
  2349. */
  2350. if (netif_running(netdev))
  2351. lan743x_netdev_close(netdev);
  2352. rtnl_unlock();
  2353. /* clean up lan743x portion */
  2354. lan743x_hardware_cleanup(adapter);
  2355. }
  2356. static const struct pci_device_id lan743x_pcidev_tbl[] = {
  2357. { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
  2358. { 0, }
  2359. };
  2360. static struct pci_driver lan743x_pcidev_driver = {
  2361. .name = DRIVER_NAME,
  2362. .id_table = lan743x_pcidev_tbl,
  2363. .probe = lan743x_pcidev_probe,
  2364. .remove = lan743x_pcidev_remove,
  2365. .shutdown = lan743x_pcidev_shutdown,
  2366. };
  2367. module_pci_driver(lan743x_pcidev_driver);
  2368. MODULE_AUTHOR(DRIVER_AUTHOR);
  2369. MODULE_DESCRIPTION(DRIVER_DESC);
  2370. MODULE_LICENSE("GPL");