farch.c 85 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892
  1. /****************************************************************************
  2. * Driver for Solarflare network controllers and boards
  3. * Copyright 2005-2006 Fen Systems Ltd.
  4. * Copyright 2006-2013 Solarflare Communications Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation, incorporated herein by reference.
  9. */
  10. #include <linux/bitops.h>
  11. #include <linux/delay.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/pci.h>
  14. #include <linux/module.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/crc32.h>
  17. #include "net_driver.h"
  18. #include "bitfield.h"
  19. #include "efx.h"
  20. #include "nic.h"
  21. #include "farch_regs.h"
  22. #include "io.h"
  23. #include "workarounds.h"
  24. /* Falcon-architecture (SFC4000) support */
  25. /**************************************************************************
  26. *
  27. * Configurable values
  28. *
  29. **************************************************************************
  30. */
  31. /* This is set to 16 for a good reason. In summary, if larger than
  32. * 16, the descriptor cache holds more than a default socket
  33. * buffer's worth of packets (for UDP we can only have at most one
  34. * socket buffer's worth outstanding). This combined with the fact
  35. * that we only get 1 TX event per descriptor cache means the NIC
  36. * goes idle.
  37. */
  38. #define TX_DC_ENTRIES 16
  39. #define TX_DC_ENTRIES_ORDER 1
  40. #define RX_DC_ENTRIES 64
  41. #define RX_DC_ENTRIES_ORDER 3
  42. /* If EF4_MAX_INT_ERRORS internal errors occur within
  43. * EF4_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
  44. * disable it.
  45. */
  46. #define EF4_INT_ERROR_EXPIRE 3600
  47. #define EF4_MAX_INT_ERRORS 5
  48. /* Depth of RX flush request fifo */
  49. #define EF4_RX_FLUSH_COUNT 4
  50. /* Driver generated events */
  51. #define _EF4_CHANNEL_MAGIC_TEST 0x000101
  52. #define _EF4_CHANNEL_MAGIC_FILL 0x000102
  53. #define _EF4_CHANNEL_MAGIC_RX_DRAIN 0x000103
  54. #define _EF4_CHANNEL_MAGIC_TX_DRAIN 0x000104
  55. #define _EF4_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
  56. #define _EF4_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
  57. #define EF4_CHANNEL_MAGIC_TEST(_channel) \
  58. _EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_TEST, (_channel)->channel)
  59. #define EF4_CHANNEL_MAGIC_FILL(_rx_queue) \
  60. _EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_FILL, \
  61. ef4_rx_queue_index(_rx_queue))
  62. #define EF4_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
  63. _EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_RX_DRAIN, \
  64. ef4_rx_queue_index(_rx_queue))
  65. #define EF4_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
  66. _EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_TX_DRAIN, \
  67. (_tx_queue)->queue)
  68. static void ef4_farch_magic_event(struct ef4_channel *channel, u32 magic);
  69. /**************************************************************************
  70. *
  71. * Hardware access
  72. *
  73. **************************************************************************/
  74. static inline void ef4_write_buf_tbl(struct ef4_nic *efx, ef4_qword_t *value,
  75. unsigned int index)
  76. {
  77. ef4_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
  78. value, index);
  79. }
  80. static bool ef4_masked_compare_oword(const ef4_oword_t *a, const ef4_oword_t *b,
  81. const ef4_oword_t *mask)
  82. {
  83. return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
  84. ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
  85. }
  86. int ef4_farch_test_registers(struct ef4_nic *efx,
  87. const struct ef4_farch_register_test *regs,
  88. size_t n_regs)
  89. {
  90. unsigned address = 0;
  91. int i, j;
  92. ef4_oword_t mask, imask, original, reg, buf;
  93. for (i = 0; i < n_regs; ++i) {
  94. address = regs[i].address;
  95. mask = imask = regs[i].mask;
  96. EF4_INVERT_OWORD(imask);
  97. ef4_reado(efx, &original, address);
  98. /* bit sweep on and off */
  99. for (j = 0; j < 128; j++) {
  100. if (!EF4_EXTRACT_OWORD32(mask, j, j))
  101. continue;
  102. /* Test this testable bit can be set in isolation */
  103. EF4_AND_OWORD(reg, original, mask);
  104. EF4_SET_OWORD32(reg, j, j, 1);
  105. ef4_writeo(efx, &reg, address);
  106. ef4_reado(efx, &buf, address);
  107. if (ef4_masked_compare_oword(&reg, &buf, &mask))
  108. goto fail;
  109. /* Test this testable bit can be cleared in isolation */
  110. EF4_OR_OWORD(reg, original, mask);
  111. EF4_SET_OWORD32(reg, j, j, 0);
  112. ef4_writeo(efx, &reg, address);
  113. ef4_reado(efx, &buf, address);
  114. if (ef4_masked_compare_oword(&reg, &buf, &mask))
  115. goto fail;
  116. }
  117. ef4_writeo(efx, &original, address);
  118. }
  119. return 0;
  120. fail:
  121. netif_err(efx, hw, efx->net_dev,
  122. "wrote "EF4_OWORD_FMT" read "EF4_OWORD_FMT
  123. " at address 0x%x mask "EF4_OWORD_FMT"\n", EF4_OWORD_VAL(reg),
  124. EF4_OWORD_VAL(buf), address, EF4_OWORD_VAL(mask));
  125. return -EIO;
  126. }
  127. /**************************************************************************
  128. *
  129. * Special buffer handling
  130. * Special buffers are used for event queues and the TX and RX
  131. * descriptor rings.
  132. *
  133. *************************************************************************/
  134. /*
  135. * Initialise a special buffer
  136. *
  137. * This will define a buffer (previously allocated via
  138. * ef4_alloc_special_buffer()) in the buffer table, allowing
  139. * it to be used for event queues, descriptor rings etc.
  140. */
  141. static void
  142. ef4_init_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
  143. {
  144. ef4_qword_t buf_desc;
  145. unsigned int index;
  146. dma_addr_t dma_addr;
  147. int i;
  148. EF4_BUG_ON_PARANOID(!buffer->buf.addr);
  149. /* Write buffer descriptors to NIC */
  150. for (i = 0; i < buffer->entries; i++) {
  151. index = buffer->index + i;
  152. dma_addr = buffer->buf.dma_addr + (i * EF4_BUF_SIZE);
  153. netif_dbg(efx, probe, efx->net_dev,
  154. "mapping special buffer %d at %llx\n",
  155. index, (unsigned long long)dma_addr);
  156. EF4_POPULATE_QWORD_3(buf_desc,
  157. FRF_AZ_BUF_ADR_REGION, 0,
  158. FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
  159. FRF_AZ_BUF_OWNER_ID_FBUF, 0);
  160. ef4_write_buf_tbl(efx, &buf_desc, index);
  161. }
  162. }
  163. /* Unmaps a buffer and clears the buffer table entries */
  164. static void
  165. ef4_fini_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
  166. {
  167. ef4_oword_t buf_tbl_upd;
  168. unsigned int start = buffer->index;
  169. unsigned int end = (buffer->index + buffer->entries - 1);
  170. if (!buffer->entries)
  171. return;
  172. netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
  173. buffer->index, buffer->index + buffer->entries - 1);
  174. EF4_POPULATE_OWORD_4(buf_tbl_upd,
  175. FRF_AZ_BUF_UPD_CMD, 0,
  176. FRF_AZ_BUF_CLR_CMD, 1,
  177. FRF_AZ_BUF_CLR_END_ID, end,
  178. FRF_AZ_BUF_CLR_START_ID, start);
  179. ef4_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
  180. }
  181. /*
  182. * Allocate a new special buffer
  183. *
  184. * This allocates memory for a new buffer, clears it and allocates a
  185. * new buffer ID range. It does not write into the buffer table.
  186. *
  187. * This call will allocate 4KB buffers, since 8KB buffers can't be
  188. * used for event queues and descriptor rings.
  189. */
  190. static int ef4_alloc_special_buffer(struct ef4_nic *efx,
  191. struct ef4_special_buffer *buffer,
  192. unsigned int len)
  193. {
  194. len = ALIGN(len, EF4_BUF_SIZE);
  195. if (ef4_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
  196. return -ENOMEM;
  197. buffer->entries = len / EF4_BUF_SIZE;
  198. BUG_ON(buffer->buf.dma_addr & (EF4_BUF_SIZE - 1));
  199. /* Select new buffer ID */
  200. buffer->index = efx->next_buffer_table;
  201. efx->next_buffer_table += buffer->entries;
  202. netif_dbg(efx, probe, efx->net_dev,
  203. "allocating special buffers %d-%d at %llx+%x "
  204. "(virt %p phys %llx)\n", buffer->index,
  205. buffer->index + buffer->entries - 1,
  206. (u64)buffer->buf.dma_addr, len,
  207. buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
  208. return 0;
  209. }
  210. static void
  211. ef4_free_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
  212. {
  213. if (!buffer->buf.addr)
  214. return;
  215. netif_dbg(efx, hw, efx->net_dev,
  216. "deallocating special buffers %d-%d at %llx+%x "
  217. "(virt %p phys %llx)\n", buffer->index,
  218. buffer->index + buffer->entries - 1,
  219. (u64)buffer->buf.dma_addr, buffer->buf.len,
  220. buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
  221. ef4_nic_free_buffer(efx, &buffer->buf);
  222. buffer->entries = 0;
  223. }
  224. /**************************************************************************
  225. *
  226. * TX path
  227. *
  228. **************************************************************************/
  229. /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
  230. static inline void ef4_farch_notify_tx_desc(struct ef4_tx_queue *tx_queue)
  231. {
  232. unsigned write_ptr;
  233. ef4_dword_t reg;
  234. write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
  235. EF4_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
  236. ef4_writed_page(tx_queue->efx, &reg,
  237. FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
  238. }
  239. /* Write pointer and first descriptor for TX descriptor ring */
  240. static inline void ef4_farch_push_tx_desc(struct ef4_tx_queue *tx_queue,
  241. const ef4_qword_t *txd)
  242. {
  243. unsigned write_ptr;
  244. ef4_oword_t reg;
  245. BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
  246. BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
  247. write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
  248. EF4_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
  249. FRF_AZ_TX_DESC_WPTR, write_ptr);
  250. reg.qword[0] = *txd;
  251. ef4_writeo_page(tx_queue->efx, &reg,
  252. FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
  253. }
  254. /* For each entry inserted into the software descriptor ring, create a
  255. * descriptor in the hardware TX descriptor ring (in host memory), and
  256. * write a doorbell.
  257. */
  258. void ef4_farch_tx_write(struct ef4_tx_queue *tx_queue)
  259. {
  260. struct ef4_tx_buffer *buffer;
  261. ef4_qword_t *txd;
  262. unsigned write_ptr;
  263. unsigned old_write_count = tx_queue->write_count;
  264. tx_queue->xmit_more_available = false;
  265. if (unlikely(tx_queue->write_count == tx_queue->insert_count))
  266. return;
  267. do {
  268. write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
  269. buffer = &tx_queue->buffer[write_ptr];
  270. txd = ef4_tx_desc(tx_queue, write_ptr);
  271. ++tx_queue->write_count;
  272. EF4_BUG_ON_PARANOID(buffer->flags & EF4_TX_BUF_OPTION);
  273. /* Create TX descriptor ring entry */
  274. BUILD_BUG_ON(EF4_TX_BUF_CONT != 1);
  275. EF4_POPULATE_QWORD_4(*txd,
  276. FSF_AZ_TX_KER_CONT,
  277. buffer->flags & EF4_TX_BUF_CONT,
  278. FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
  279. FSF_AZ_TX_KER_BUF_REGION, 0,
  280. FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
  281. } while (tx_queue->write_count != tx_queue->insert_count);
  282. wmb(); /* Ensure descriptors are written before they are fetched */
  283. if (ef4_nic_may_push_tx_desc(tx_queue, old_write_count)) {
  284. txd = ef4_tx_desc(tx_queue,
  285. old_write_count & tx_queue->ptr_mask);
  286. ef4_farch_push_tx_desc(tx_queue, txd);
  287. ++tx_queue->pushes;
  288. } else {
  289. ef4_farch_notify_tx_desc(tx_queue);
  290. }
  291. }
  292. unsigned int ef4_farch_tx_limit_len(struct ef4_tx_queue *tx_queue,
  293. dma_addr_t dma_addr, unsigned int len)
  294. {
  295. /* Don't cross 4K boundaries with descriptors. */
  296. unsigned int limit = (~dma_addr & (EF4_PAGE_SIZE - 1)) + 1;
  297. len = min(limit, len);
  298. if (EF4_WORKAROUND_5391(tx_queue->efx) && (dma_addr & 0xf))
  299. len = min_t(unsigned int, len, 512 - (dma_addr & 0xf));
  300. return len;
  301. }
  302. /* Allocate hardware resources for a TX queue */
  303. int ef4_farch_tx_probe(struct ef4_tx_queue *tx_queue)
  304. {
  305. struct ef4_nic *efx = tx_queue->efx;
  306. unsigned entries;
  307. entries = tx_queue->ptr_mask + 1;
  308. return ef4_alloc_special_buffer(efx, &tx_queue->txd,
  309. entries * sizeof(ef4_qword_t));
  310. }
  311. void ef4_farch_tx_init(struct ef4_tx_queue *tx_queue)
  312. {
  313. struct ef4_nic *efx = tx_queue->efx;
  314. ef4_oword_t reg;
  315. /* Pin TX descriptor ring */
  316. ef4_init_special_buffer(efx, &tx_queue->txd);
  317. /* Push TX descriptor ring to card */
  318. EF4_POPULATE_OWORD_10(reg,
  319. FRF_AZ_TX_DESCQ_EN, 1,
  320. FRF_AZ_TX_ISCSI_DDIG_EN, 0,
  321. FRF_AZ_TX_ISCSI_HDIG_EN, 0,
  322. FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
  323. FRF_AZ_TX_DESCQ_EVQ_ID,
  324. tx_queue->channel->channel,
  325. FRF_AZ_TX_DESCQ_OWNER_ID, 0,
  326. FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
  327. FRF_AZ_TX_DESCQ_SIZE,
  328. __ffs(tx_queue->txd.entries),
  329. FRF_AZ_TX_DESCQ_TYPE, 0,
  330. FRF_BZ_TX_NON_IP_DROP_DIS, 1);
  331. if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
  332. int csum = tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD;
  333. EF4_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
  334. EF4_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
  335. !csum);
  336. }
  337. ef4_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
  338. tx_queue->queue);
  339. if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0) {
  340. /* Only 128 bits in this register */
  341. BUILD_BUG_ON(EF4_MAX_TX_QUEUES > 128);
  342. ef4_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
  343. if (tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD)
  344. __clear_bit_le(tx_queue->queue, &reg);
  345. else
  346. __set_bit_le(tx_queue->queue, &reg);
  347. ef4_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
  348. }
  349. if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
  350. EF4_POPULATE_OWORD_1(reg,
  351. FRF_BZ_TX_PACE,
  352. (tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI) ?
  353. FFE_BZ_TX_PACE_OFF :
  354. FFE_BZ_TX_PACE_RESERVED);
  355. ef4_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
  356. tx_queue->queue);
  357. }
  358. }
  359. static void ef4_farch_flush_tx_queue(struct ef4_tx_queue *tx_queue)
  360. {
  361. struct ef4_nic *efx = tx_queue->efx;
  362. ef4_oword_t tx_flush_descq;
  363. WARN_ON(atomic_read(&tx_queue->flush_outstanding));
  364. atomic_set(&tx_queue->flush_outstanding, 1);
  365. EF4_POPULATE_OWORD_2(tx_flush_descq,
  366. FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
  367. FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
  368. ef4_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
  369. }
  370. void ef4_farch_tx_fini(struct ef4_tx_queue *tx_queue)
  371. {
  372. struct ef4_nic *efx = tx_queue->efx;
  373. ef4_oword_t tx_desc_ptr;
  374. /* Remove TX descriptor ring from card */
  375. EF4_ZERO_OWORD(tx_desc_ptr);
  376. ef4_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
  377. tx_queue->queue);
  378. /* Unpin TX descriptor ring */
  379. ef4_fini_special_buffer(efx, &tx_queue->txd);
  380. }
  381. /* Free buffers backing TX queue */
  382. void ef4_farch_tx_remove(struct ef4_tx_queue *tx_queue)
  383. {
  384. ef4_free_special_buffer(tx_queue->efx, &tx_queue->txd);
  385. }
  386. /**************************************************************************
  387. *
  388. * RX path
  389. *
  390. **************************************************************************/
  391. /* This creates an entry in the RX descriptor queue */
  392. static inline void
  393. ef4_farch_build_rx_desc(struct ef4_rx_queue *rx_queue, unsigned index)
  394. {
  395. struct ef4_rx_buffer *rx_buf;
  396. ef4_qword_t *rxd;
  397. rxd = ef4_rx_desc(rx_queue, index);
  398. rx_buf = ef4_rx_buffer(rx_queue, index);
  399. EF4_POPULATE_QWORD_3(*rxd,
  400. FSF_AZ_RX_KER_BUF_SIZE,
  401. rx_buf->len -
  402. rx_queue->efx->type->rx_buffer_padding,
  403. FSF_AZ_RX_KER_BUF_REGION, 0,
  404. FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
  405. }
  406. /* This writes to the RX_DESC_WPTR register for the specified receive
  407. * descriptor ring.
  408. */
  409. void ef4_farch_rx_write(struct ef4_rx_queue *rx_queue)
  410. {
  411. struct ef4_nic *efx = rx_queue->efx;
  412. ef4_dword_t reg;
  413. unsigned write_ptr;
  414. while (rx_queue->notified_count != rx_queue->added_count) {
  415. ef4_farch_build_rx_desc(
  416. rx_queue,
  417. rx_queue->notified_count & rx_queue->ptr_mask);
  418. ++rx_queue->notified_count;
  419. }
  420. wmb();
  421. write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
  422. EF4_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
  423. ef4_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
  424. ef4_rx_queue_index(rx_queue));
  425. }
  426. int ef4_farch_rx_probe(struct ef4_rx_queue *rx_queue)
  427. {
  428. struct ef4_nic *efx = rx_queue->efx;
  429. unsigned entries;
  430. entries = rx_queue->ptr_mask + 1;
  431. return ef4_alloc_special_buffer(efx, &rx_queue->rxd,
  432. entries * sizeof(ef4_qword_t));
  433. }
  434. void ef4_farch_rx_init(struct ef4_rx_queue *rx_queue)
  435. {
  436. ef4_oword_t rx_desc_ptr;
  437. struct ef4_nic *efx = rx_queue->efx;
  438. bool is_b0 = ef4_nic_rev(efx) >= EF4_REV_FALCON_B0;
  439. bool iscsi_digest_en = is_b0;
  440. bool jumbo_en;
  441. /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
  442. * DMA to continue after a PCIe page boundary (and scattering
  443. * is not possible). In Falcon B0 and Siena, it enables
  444. * scatter.
  445. */
  446. jumbo_en = !is_b0 || efx->rx_scatter;
  447. netif_dbg(efx, hw, efx->net_dev,
  448. "RX queue %d ring in special buffers %d-%d\n",
  449. ef4_rx_queue_index(rx_queue), rx_queue->rxd.index,
  450. rx_queue->rxd.index + rx_queue->rxd.entries - 1);
  451. rx_queue->scatter_n = 0;
  452. /* Pin RX descriptor ring */
  453. ef4_init_special_buffer(efx, &rx_queue->rxd);
  454. /* Push RX descriptor ring to card */
  455. EF4_POPULATE_OWORD_10(rx_desc_ptr,
  456. FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
  457. FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
  458. FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
  459. FRF_AZ_RX_DESCQ_EVQ_ID,
  460. ef4_rx_queue_channel(rx_queue)->channel,
  461. FRF_AZ_RX_DESCQ_OWNER_ID, 0,
  462. FRF_AZ_RX_DESCQ_LABEL,
  463. ef4_rx_queue_index(rx_queue),
  464. FRF_AZ_RX_DESCQ_SIZE,
  465. __ffs(rx_queue->rxd.entries),
  466. FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
  467. FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
  468. FRF_AZ_RX_DESCQ_EN, 1);
  469. ef4_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
  470. ef4_rx_queue_index(rx_queue));
  471. }
  472. static void ef4_farch_flush_rx_queue(struct ef4_rx_queue *rx_queue)
  473. {
  474. struct ef4_nic *efx = rx_queue->efx;
  475. ef4_oword_t rx_flush_descq;
  476. EF4_POPULATE_OWORD_2(rx_flush_descq,
  477. FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
  478. FRF_AZ_RX_FLUSH_DESCQ,
  479. ef4_rx_queue_index(rx_queue));
  480. ef4_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
  481. }
  482. void ef4_farch_rx_fini(struct ef4_rx_queue *rx_queue)
  483. {
  484. ef4_oword_t rx_desc_ptr;
  485. struct ef4_nic *efx = rx_queue->efx;
  486. /* Remove RX descriptor ring from card */
  487. EF4_ZERO_OWORD(rx_desc_ptr);
  488. ef4_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
  489. ef4_rx_queue_index(rx_queue));
  490. /* Unpin RX descriptor ring */
  491. ef4_fini_special_buffer(efx, &rx_queue->rxd);
  492. }
  493. /* Free buffers backing RX queue */
  494. void ef4_farch_rx_remove(struct ef4_rx_queue *rx_queue)
  495. {
  496. ef4_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
  497. }
  498. /**************************************************************************
  499. *
  500. * Flush handling
  501. *
  502. **************************************************************************/
  503. /* ef4_farch_flush_queues() must be woken up when all flushes are completed,
  504. * or more RX flushes can be kicked off.
  505. */
  506. static bool ef4_farch_flush_wake(struct ef4_nic *efx)
  507. {
  508. /* Ensure that all updates are visible to ef4_farch_flush_queues() */
  509. smp_mb();
  510. return (atomic_read(&efx->active_queues) == 0 ||
  511. (atomic_read(&efx->rxq_flush_outstanding) < EF4_RX_FLUSH_COUNT
  512. && atomic_read(&efx->rxq_flush_pending) > 0));
  513. }
  514. static bool ef4_check_tx_flush_complete(struct ef4_nic *efx)
  515. {
  516. bool i = true;
  517. ef4_oword_t txd_ptr_tbl;
  518. struct ef4_channel *channel;
  519. struct ef4_tx_queue *tx_queue;
  520. ef4_for_each_channel(channel, efx) {
  521. ef4_for_each_channel_tx_queue(tx_queue, channel) {
  522. ef4_reado_table(efx, &txd_ptr_tbl,
  523. FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
  524. if (EF4_OWORD_FIELD(txd_ptr_tbl,
  525. FRF_AZ_TX_DESCQ_FLUSH) ||
  526. EF4_OWORD_FIELD(txd_ptr_tbl,
  527. FRF_AZ_TX_DESCQ_EN)) {
  528. netif_dbg(efx, hw, efx->net_dev,
  529. "flush did not complete on TXQ %d\n",
  530. tx_queue->queue);
  531. i = false;
  532. } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
  533. 1, 0)) {
  534. /* The flush is complete, but we didn't
  535. * receive a flush completion event
  536. */
  537. netif_dbg(efx, hw, efx->net_dev,
  538. "flush complete on TXQ %d, so drain "
  539. "the queue\n", tx_queue->queue);
  540. /* Don't need to increment active_queues as it
  541. * has already been incremented for the queues
  542. * which did not drain
  543. */
  544. ef4_farch_magic_event(channel,
  545. EF4_CHANNEL_MAGIC_TX_DRAIN(
  546. tx_queue));
  547. }
  548. }
  549. }
  550. return i;
  551. }
  552. /* Flush all the transmit queues, and continue flushing receive queues until
  553. * they're all flushed. Wait for the DRAIN events to be received so that there
  554. * are no more RX and TX events left on any channel. */
  555. static int ef4_farch_do_flush(struct ef4_nic *efx)
  556. {
  557. unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
  558. struct ef4_channel *channel;
  559. struct ef4_rx_queue *rx_queue;
  560. struct ef4_tx_queue *tx_queue;
  561. int rc = 0;
  562. ef4_for_each_channel(channel, efx) {
  563. ef4_for_each_channel_tx_queue(tx_queue, channel) {
  564. ef4_farch_flush_tx_queue(tx_queue);
  565. }
  566. ef4_for_each_channel_rx_queue(rx_queue, channel) {
  567. rx_queue->flush_pending = true;
  568. atomic_inc(&efx->rxq_flush_pending);
  569. }
  570. }
  571. while (timeout && atomic_read(&efx->active_queues) > 0) {
  572. /* The hardware supports four concurrent rx flushes, each of
  573. * which may need to be retried if there is an outstanding
  574. * descriptor fetch
  575. */
  576. ef4_for_each_channel(channel, efx) {
  577. ef4_for_each_channel_rx_queue(rx_queue, channel) {
  578. if (atomic_read(&efx->rxq_flush_outstanding) >=
  579. EF4_RX_FLUSH_COUNT)
  580. break;
  581. if (rx_queue->flush_pending) {
  582. rx_queue->flush_pending = false;
  583. atomic_dec(&efx->rxq_flush_pending);
  584. atomic_inc(&efx->rxq_flush_outstanding);
  585. ef4_farch_flush_rx_queue(rx_queue);
  586. }
  587. }
  588. }
  589. timeout = wait_event_timeout(efx->flush_wq,
  590. ef4_farch_flush_wake(efx),
  591. timeout);
  592. }
  593. if (atomic_read(&efx->active_queues) &&
  594. !ef4_check_tx_flush_complete(efx)) {
  595. netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
  596. "(rx %d+%d)\n", atomic_read(&efx->active_queues),
  597. atomic_read(&efx->rxq_flush_outstanding),
  598. atomic_read(&efx->rxq_flush_pending));
  599. rc = -ETIMEDOUT;
  600. atomic_set(&efx->active_queues, 0);
  601. atomic_set(&efx->rxq_flush_pending, 0);
  602. atomic_set(&efx->rxq_flush_outstanding, 0);
  603. }
  604. return rc;
  605. }
  606. int ef4_farch_fini_dmaq(struct ef4_nic *efx)
  607. {
  608. struct ef4_channel *channel;
  609. struct ef4_tx_queue *tx_queue;
  610. struct ef4_rx_queue *rx_queue;
  611. int rc = 0;
  612. /* Do not attempt to write to the NIC during EEH recovery */
  613. if (efx->state != STATE_RECOVERY) {
  614. /* Only perform flush if DMA is enabled */
  615. if (efx->pci_dev->is_busmaster) {
  616. efx->type->prepare_flush(efx);
  617. rc = ef4_farch_do_flush(efx);
  618. efx->type->finish_flush(efx);
  619. }
  620. ef4_for_each_channel(channel, efx) {
  621. ef4_for_each_channel_rx_queue(rx_queue, channel)
  622. ef4_farch_rx_fini(rx_queue);
  623. ef4_for_each_channel_tx_queue(tx_queue, channel)
  624. ef4_farch_tx_fini(tx_queue);
  625. }
  626. }
  627. return rc;
  628. }
  629. /* Reset queue and flush accounting after FLR
  630. *
  631. * One possible cause of FLR recovery is that DMA may be failing (eg. if bus
  632. * mastering was disabled), in which case we don't receive (RXQ) flush
  633. * completion events. This means that efx->rxq_flush_outstanding remained at 4
  634. * after the FLR; also, efx->active_queues was non-zero (as no flush completion
  635. * events were received, and we didn't go through ef4_check_tx_flush_complete())
  636. * If we don't fix this up, on the next call to ef4_realloc_channels() we won't
  637. * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
  638. * for batched flush requests; and the efx->active_queues gets messed up because
  639. * we keep incrementing for the newly initialised queues, but it never went to
  640. * zero previously. Then we get a timeout every time we try to restart the
  641. * queues, as it doesn't go back to zero when we should be flushing the queues.
  642. */
  643. void ef4_farch_finish_flr(struct ef4_nic *efx)
  644. {
  645. atomic_set(&efx->rxq_flush_pending, 0);
  646. atomic_set(&efx->rxq_flush_outstanding, 0);
  647. atomic_set(&efx->active_queues, 0);
  648. }
  649. /**************************************************************************
  650. *
  651. * Event queue processing
  652. * Event queues are processed by per-channel tasklets.
  653. *
  654. **************************************************************************/
  655. /* Update a channel's event queue's read pointer (RPTR) register
  656. *
  657. * This writes the EVQ_RPTR_REG register for the specified channel's
  658. * event queue.
  659. */
  660. void ef4_farch_ev_read_ack(struct ef4_channel *channel)
  661. {
  662. ef4_dword_t reg;
  663. struct ef4_nic *efx = channel->efx;
  664. EF4_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
  665. channel->eventq_read_ptr & channel->eventq_mask);
  666. /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
  667. * of 4 bytes, but it is really 16 bytes just like later revisions.
  668. */
  669. ef4_writed(efx, &reg,
  670. efx->type->evq_rptr_tbl_base +
  671. FR_BZ_EVQ_RPTR_STEP * channel->channel);
  672. }
  673. /* Use HW to insert a SW defined event */
  674. void ef4_farch_generate_event(struct ef4_nic *efx, unsigned int evq,
  675. ef4_qword_t *event)
  676. {
  677. ef4_oword_t drv_ev_reg;
  678. BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
  679. FRF_AZ_DRV_EV_DATA_WIDTH != 64);
  680. drv_ev_reg.u32[0] = event->u32[0];
  681. drv_ev_reg.u32[1] = event->u32[1];
  682. drv_ev_reg.u32[2] = 0;
  683. drv_ev_reg.u32[3] = 0;
  684. EF4_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
  685. ef4_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
  686. }
  687. static void ef4_farch_magic_event(struct ef4_channel *channel, u32 magic)
  688. {
  689. ef4_qword_t event;
  690. EF4_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
  691. FSE_AZ_EV_CODE_DRV_GEN_EV,
  692. FSF_AZ_DRV_GEN_EV_MAGIC, magic);
  693. ef4_farch_generate_event(channel->efx, channel->channel, &event);
  694. }
  695. /* Handle a transmit completion event
  696. *
  697. * The NIC batches TX completion events; the message we receive is of
  698. * the form "complete all TX events up to this index".
  699. */
  700. static int
  701. ef4_farch_handle_tx_event(struct ef4_channel *channel, ef4_qword_t *event)
  702. {
  703. unsigned int tx_ev_desc_ptr;
  704. unsigned int tx_ev_q_label;
  705. struct ef4_tx_queue *tx_queue;
  706. struct ef4_nic *efx = channel->efx;
  707. int tx_packets = 0;
  708. if (unlikely(ACCESS_ONCE(efx->reset_pending)))
  709. return 0;
  710. if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
  711. /* Transmit completion */
  712. tx_ev_desc_ptr = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
  713. tx_ev_q_label = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
  714. tx_queue = ef4_channel_get_tx_queue(
  715. channel, tx_ev_q_label % EF4_TXQ_TYPES);
  716. tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
  717. tx_queue->ptr_mask);
  718. ef4_xmit_done(tx_queue, tx_ev_desc_ptr);
  719. } else if (EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
  720. /* Rewrite the FIFO write pointer */
  721. tx_ev_q_label = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
  722. tx_queue = ef4_channel_get_tx_queue(
  723. channel, tx_ev_q_label % EF4_TXQ_TYPES);
  724. netif_tx_lock(efx->net_dev);
  725. ef4_farch_notify_tx_desc(tx_queue);
  726. netif_tx_unlock(efx->net_dev);
  727. } else if (EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
  728. ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
  729. } else {
  730. netif_err(efx, tx_err, efx->net_dev,
  731. "channel %d unexpected TX event "
  732. EF4_QWORD_FMT"\n", channel->channel,
  733. EF4_QWORD_VAL(*event));
  734. }
  735. return tx_packets;
  736. }
  737. /* Detect errors included in the rx_evt_pkt_ok bit. */
  738. static u16 ef4_farch_handle_rx_not_ok(struct ef4_rx_queue *rx_queue,
  739. const ef4_qword_t *event)
  740. {
  741. struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
  742. struct ef4_nic *efx = rx_queue->efx;
  743. bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
  744. bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
  745. bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
  746. bool rx_ev_other_err, rx_ev_pause_frm;
  747. bool rx_ev_hdr_type, rx_ev_mcast_pkt;
  748. unsigned rx_ev_pkt_type;
  749. rx_ev_hdr_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
  750. rx_ev_mcast_pkt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
  751. rx_ev_tobe_disc = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
  752. rx_ev_pkt_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
  753. rx_ev_buf_owner_id_err = EF4_QWORD_FIELD(*event,
  754. FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
  755. rx_ev_ip_hdr_chksum_err = EF4_QWORD_FIELD(*event,
  756. FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
  757. rx_ev_tcp_udp_chksum_err = EF4_QWORD_FIELD(*event,
  758. FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
  759. rx_ev_eth_crc_err = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
  760. rx_ev_frm_trunc = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
  761. rx_ev_drib_nib = ((ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) ?
  762. 0 : EF4_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
  763. rx_ev_pause_frm = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
  764. /* Every error apart from tobe_disc and pause_frm */
  765. rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
  766. rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
  767. rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
  768. /* Count errors that are not in MAC stats. Ignore expected
  769. * checksum errors during self-test. */
  770. if (rx_ev_frm_trunc)
  771. ++channel->n_rx_frm_trunc;
  772. else if (rx_ev_tobe_disc)
  773. ++channel->n_rx_tobe_disc;
  774. else if (!efx->loopback_selftest) {
  775. if (rx_ev_ip_hdr_chksum_err)
  776. ++channel->n_rx_ip_hdr_chksum_err;
  777. else if (rx_ev_tcp_udp_chksum_err)
  778. ++channel->n_rx_tcp_udp_chksum_err;
  779. }
  780. /* TOBE_DISC is expected on unicast mismatches; don't print out an
  781. * error message. FRM_TRUNC indicates RXDP dropped the packet due
  782. * to a FIFO overflow.
  783. */
  784. #ifdef DEBUG
  785. if (rx_ev_other_err && net_ratelimit()) {
  786. netif_dbg(efx, rx_err, efx->net_dev,
  787. " RX queue %d unexpected RX event "
  788. EF4_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
  789. ef4_rx_queue_index(rx_queue), EF4_QWORD_VAL(*event),
  790. rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
  791. rx_ev_ip_hdr_chksum_err ?
  792. " [IP_HDR_CHKSUM_ERR]" : "",
  793. rx_ev_tcp_udp_chksum_err ?
  794. " [TCP_UDP_CHKSUM_ERR]" : "",
  795. rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
  796. rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
  797. rx_ev_drib_nib ? " [DRIB_NIB]" : "",
  798. rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
  799. rx_ev_pause_frm ? " [PAUSE]" : "");
  800. }
  801. #endif
  802. /* The frame must be discarded if any of these are true. */
  803. return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
  804. rx_ev_tobe_disc | rx_ev_pause_frm) ?
  805. EF4_RX_PKT_DISCARD : 0;
  806. }
  807. /* Handle receive events that are not in-order. Return true if this
  808. * can be handled as a partial packet discard, false if it's more
  809. * serious.
  810. */
  811. static bool
  812. ef4_farch_handle_rx_bad_index(struct ef4_rx_queue *rx_queue, unsigned index)
  813. {
  814. struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
  815. struct ef4_nic *efx = rx_queue->efx;
  816. unsigned expected, dropped;
  817. if (rx_queue->scatter_n &&
  818. index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
  819. rx_queue->ptr_mask)) {
  820. ++channel->n_rx_nodesc_trunc;
  821. return true;
  822. }
  823. expected = rx_queue->removed_count & rx_queue->ptr_mask;
  824. dropped = (index - expected) & rx_queue->ptr_mask;
  825. netif_info(efx, rx_err, efx->net_dev,
  826. "dropped %d events (index=%d expected=%d)\n",
  827. dropped, index, expected);
  828. ef4_schedule_reset(efx, EF4_WORKAROUND_5676(efx) ?
  829. RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
  830. return false;
  831. }
  832. /* Handle a packet received event
  833. *
  834. * The NIC gives a "discard" flag if it's a unicast packet with the
  835. * wrong destination address
  836. * Also "is multicast" and "matches multicast filter" flags can be used to
  837. * discard non-matching multicast packets.
  838. */
  839. static void
  840. ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event)
  841. {
  842. unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
  843. unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
  844. unsigned expected_ptr;
  845. bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
  846. u16 flags;
  847. struct ef4_rx_queue *rx_queue;
  848. struct ef4_nic *efx = channel->efx;
  849. if (unlikely(ACCESS_ONCE(efx->reset_pending)))
  850. return;
  851. rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
  852. rx_ev_sop = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
  853. WARN_ON(EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
  854. channel->channel);
  855. rx_queue = ef4_channel_get_rx_queue(channel);
  856. rx_ev_desc_ptr = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
  857. expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
  858. rx_queue->ptr_mask);
  859. /* Check for partial drops and other errors */
  860. if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
  861. unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
  862. if (rx_ev_desc_ptr != expected_ptr &&
  863. !ef4_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
  864. return;
  865. /* Discard all pending fragments */
  866. if (rx_queue->scatter_n) {
  867. ef4_rx_packet(
  868. rx_queue,
  869. rx_queue->removed_count & rx_queue->ptr_mask,
  870. rx_queue->scatter_n, 0, EF4_RX_PKT_DISCARD);
  871. rx_queue->removed_count += rx_queue->scatter_n;
  872. rx_queue->scatter_n = 0;
  873. }
  874. /* Return if there is no new fragment */
  875. if (rx_ev_desc_ptr != expected_ptr)
  876. return;
  877. /* Discard new fragment if not SOP */
  878. if (!rx_ev_sop) {
  879. ef4_rx_packet(
  880. rx_queue,
  881. rx_queue->removed_count & rx_queue->ptr_mask,
  882. 1, 0, EF4_RX_PKT_DISCARD);
  883. ++rx_queue->removed_count;
  884. return;
  885. }
  886. }
  887. ++rx_queue->scatter_n;
  888. if (rx_ev_cont)
  889. return;
  890. rx_ev_byte_cnt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
  891. rx_ev_pkt_ok = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
  892. rx_ev_hdr_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
  893. if (likely(rx_ev_pkt_ok)) {
  894. /* If packet is marked as OK then we can rely on the
  895. * hardware checksum and classification.
  896. */
  897. flags = 0;
  898. switch (rx_ev_hdr_type) {
  899. case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
  900. flags |= EF4_RX_PKT_TCP;
  901. /* fall through */
  902. case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
  903. flags |= EF4_RX_PKT_CSUMMED;
  904. /* fall through */
  905. case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
  906. case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
  907. break;
  908. }
  909. } else {
  910. flags = ef4_farch_handle_rx_not_ok(rx_queue, event);
  911. }
  912. /* Detect multicast packets that didn't match the filter */
  913. rx_ev_mcast_pkt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
  914. if (rx_ev_mcast_pkt) {
  915. unsigned int rx_ev_mcast_hash_match =
  916. EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
  917. if (unlikely(!rx_ev_mcast_hash_match)) {
  918. ++channel->n_rx_mcast_mismatch;
  919. flags |= EF4_RX_PKT_DISCARD;
  920. }
  921. }
  922. channel->irq_mod_score += 2;
  923. /* Handle received packet */
  924. ef4_rx_packet(rx_queue,
  925. rx_queue->removed_count & rx_queue->ptr_mask,
  926. rx_queue->scatter_n, rx_ev_byte_cnt, flags);
  927. rx_queue->removed_count += rx_queue->scatter_n;
  928. rx_queue->scatter_n = 0;
  929. }
  930. /* If this flush done event corresponds to a &struct ef4_tx_queue, then
  931. * send an %EF4_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
  932. * of all transmit completions.
  933. */
  934. static void
  935. ef4_farch_handle_tx_flush_done(struct ef4_nic *efx, ef4_qword_t *event)
  936. {
  937. struct ef4_tx_queue *tx_queue;
  938. int qid;
  939. qid = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
  940. if (qid < EF4_TXQ_TYPES * efx->n_tx_channels) {
  941. tx_queue = ef4_get_tx_queue(efx, qid / EF4_TXQ_TYPES,
  942. qid % EF4_TXQ_TYPES);
  943. if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
  944. ef4_farch_magic_event(tx_queue->channel,
  945. EF4_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
  946. }
  947. }
  948. }
  949. /* If this flush done event corresponds to a &struct ef4_rx_queue: If the flush
  950. * was successful then send an %EF4_CHANNEL_MAGIC_RX_DRAIN, otherwise add
  951. * the RX queue back to the mask of RX queues in need of flushing.
  952. */
  953. static void
  954. ef4_farch_handle_rx_flush_done(struct ef4_nic *efx, ef4_qword_t *event)
  955. {
  956. struct ef4_channel *channel;
  957. struct ef4_rx_queue *rx_queue;
  958. int qid;
  959. bool failed;
  960. qid = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
  961. failed = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
  962. if (qid >= efx->n_channels)
  963. return;
  964. channel = ef4_get_channel(efx, qid);
  965. if (!ef4_channel_has_rx_queue(channel))
  966. return;
  967. rx_queue = ef4_channel_get_rx_queue(channel);
  968. if (failed) {
  969. netif_info(efx, hw, efx->net_dev,
  970. "RXQ %d flush retry\n", qid);
  971. rx_queue->flush_pending = true;
  972. atomic_inc(&efx->rxq_flush_pending);
  973. } else {
  974. ef4_farch_magic_event(ef4_rx_queue_channel(rx_queue),
  975. EF4_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
  976. }
  977. atomic_dec(&efx->rxq_flush_outstanding);
  978. if (ef4_farch_flush_wake(efx))
  979. wake_up(&efx->flush_wq);
  980. }
  981. static void
  982. ef4_farch_handle_drain_event(struct ef4_channel *channel)
  983. {
  984. struct ef4_nic *efx = channel->efx;
  985. WARN_ON(atomic_read(&efx->active_queues) == 0);
  986. atomic_dec(&efx->active_queues);
  987. if (ef4_farch_flush_wake(efx))
  988. wake_up(&efx->flush_wq);
  989. }
  990. static void ef4_farch_handle_generated_event(struct ef4_channel *channel,
  991. ef4_qword_t *event)
  992. {
  993. struct ef4_nic *efx = channel->efx;
  994. struct ef4_rx_queue *rx_queue =
  995. ef4_channel_has_rx_queue(channel) ?
  996. ef4_channel_get_rx_queue(channel) : NULL;
  997. unsigned magic, code;
  998. magic = EF4_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
  999. code = _EF4_CHANNEL_MAGIC_CODE(magic);
  1000. if (magic == EF4_CHANNEL_MAGIC_TEST(channel)) {
  1001. channel->event_test_cpu = raw_smp_processor_id();
  1002. } else if (rx_queue && magic == EF4_CHANNEL_MAGIC_FILL(rx_queue)) {
  1003. /* The queue must be empty, so we won't receive any rx
  1004. * events, so ef4_process_channel() won't refill the
  1005. * queue. Refill it here */
  1006. ef4_fast_push_rx_descriptors(rx_queue, true);
  1007. } else if (rx_queue && magic == EF4_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
  1008. ef4_farch_handle_drain_event(channel);
  1009. } else if (code == _EF4_CHANNEL_MAGIC_TX_DRAIN) {
  1010. ef4_farch_handle_drain_event(channel);
  1011. } else {
  1012. netif_dbg(efx, hw, efx->net_dev, "channel %d received "
  1013. "generated event "EF4_QWORD_FMT"\n",
  1014. channel->channel, EF4_QWORD_VAL(*event));
  1015. }
  1016. }
  1017. static void
  1018. ef4_farch_handle_driver_event(struct ef4_channel *channel, ef4_qword_t *event)
  1019. {
  1020. struct ef4_nic *efx = channel->efx;
  1021. unsigned int ev_sub_code;
  1022. unsigned int ev_sub_data;
  1023. ev_sub_code = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
  1024. ev_sub_data = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
  1025. switch (ev_sub_code) {
  1026. case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
  1027. netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
  1028. channel->channel, ev_sub_data);
  1029. ef4_farch_handle_tx_flush_done(efx, event);
  1030. break;
  1031. case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
  1032. netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
  1033. channel->channel, ev_sub_data);
  1034. ef4_farch_handle_rx_flush_done(efx, event);
  1035. break;
  1036. case FSE_AZ_EVQ_INIT_DONE_EV:
  1037. netif_dbg(efx, hw, efx->net_dev,
  1038. "channel %d EVQ %d initialised\n",
  1039. channel->channel, ev_sub_data);
  1040. break;
  1041. case FSE_AZ_SRM_UPD_DONE_EV:
  1042. netif_vdbg(efx, hw, efx->net_dev,
  1043. "channel %d SRAM update done\n", channel->channel);
  1044. break;
  1045. case FSE_AZ_WAKE_UP_EV:
  1046. netif_vdbg(efx, hw, efx->net_dev,
  1047. "channel %d RXQ %d wakeup event\n",
  1048. channel->channel, ev_sub_data);
  1049. break;
  1050. case FSE_AZ_TIMER_EV:
  1051. netif_vdbg(efx, hw, efx->net_dev,
  1052. "channel %d RX queue %d timer expired\n",
  1053. channel->channel, ev_sub_data);
  1054. break;
  1055. case FSE_AA_RX_RECOVER_EV:
  1056. netif_err(efx, rx_err, efx->net_dev,
  1057. "channel %d seen DRIVER RX_RESET event. "
  1058. "Resetting.\n", channel->channel);
  1059. atomic_inc(&efx->rx_reset);
  1060. ef4_schedule_reset(efx,
  1061. EF4_WORKAROUND_6555(efx) ?
  1062. RESET_TYPE_RX_RECOVERY :
  1063. RESET_TYPE_DISABLE);
  1064. break;
  1065. case FSE_BZ_RX_DSC_ERROR_EV:
  1066. netif_err(efx, rx_err, efx->net_dev,
  1067. "RX DMA Q %d reports descriptor fetch error."
  1068. " RX Q %d is disabled.\n", ev_sub_data,
  1069. ev_sub_data);
  1070. ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
  1071. break;
  1072. case FSE_BZ_TX_DSC_ERROR_EV:
  1073. netif_err(efx, tx_err, efx->net_dev,
  1074. "TX DMA Q %d reports descriptor fetch error."
  1075. " TX Q %d is disabled.\n", ev_sub_data,
  1076. ev_sub_data);
  1077. ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
  1078. break;
  1079. default:
  1080. netif_vdbg(efx, hw, efx->net_dev,
  1081. "channel %d unknown driver event code %d "
  1082. "data %04x\n", channel->channel, ev_sub_code,
  1083. ev_sub_data);
  1084. break;
  1085. }
  1086. }
  1087. int ef4_farch_ev_process(struct ef4_channel *channel, int budget)
  1088. {
  1089. struct ef4_nic *efx = channel->efx;
  1090. unsigned int read_ptr;
  1091. ef4_qword_t event, *p_event;
  1092. int ev_code;
  1093. int tx_packets = 0;
  1094. int spent = 0;
  1095. if (budget <= 0)
  1096. return spent;
  1097. read_ptr = channel->eventq_read_ptr;
  1098. for (;;) {
  1099. p_event = ef4_event(channel, read_ptr);
  1100. event = *p_event;
  1101. if (!ef4_event_present(&event))
  1102. /* End of events */
  1103. break;
  1104. netif_vdbg(channel->efx, intr, channel->efx->net_dev,
  1105. "channel %d event is "EF4_QWORD_FMT"\n",
  1106. channel->channel, EF4_QWORD_VAL(event));
  1107. /* Clear this event by marking it all ones */
  1108. EF4_SET_QWORD(*p_event);
  1109. ++read_ptr;
  1110. ev_code = EF4_QWORD_FIELD(event, FSF_AZ_EV_CODE);
  1111. switch (ev_code) {
  1112. case FSE_AZ_EV_CODE_RX_EV:
  1113. ef4_farch_handle_rx_event(channel, &event);
  1114. if (++spent == budget)
  1115. goto out;
  1116. break;
  1117. case FSE_AZ_EV_CODE_TX_EV:
  1118. tx_packets += ef4_farch_handle_tx_event(channel,
  1119. &event);
  1120. if (tx_packets > efx->txq_entries) {
  1121. spent = budget;
  1122. goto out;
  1123. }
  1124. break;
  1125. case FSE_AZ_EV_CODE_DRV_GEN_EV:
  1126. ef4_farch_handle_generated_event(channel, &event);
  1127. break;
  1128. case FSE_AZ_EV_CODE_DRIVER_EV:
  1129. ef4_farch_handle_driver_event(channel, &event);
  1130. break;
  1131. case FSE_AZ_EV_CODE_GLOBAL_EV:
  1132. if (efx->type->handle_global_event &&
  1133. efx->type->handle_global_event(channel, &event))
  1134. break;
  1135. /* else fall through */
  1136. default:
  1137. netif_err(channel->efx, hw, channel->efx->net_dev,
  1138. "channel %d unknown event type %d (data "
  1139. EF4_QWORD_FMT ")\n", channel->channel,
  1140. ev_code, EF4_QWORD_VAL(event));
  1141. }
  1142. }
  1143. out:
  1144. channel->eventq_read_ptr = read_ptr;
  1145. return spent;
  1146. }
  1147. /* Allocate buffer table entries for event queue */
  1148. int ef4_farch_ev_probe(struct ef4_channel *channel)
  1149. {
  1150. struct ef4_nic *efx = channel->efx;
  1151. unsigned entries;
  1152. entries = channel->eventq_mask + 1;
  1153. return ef4_alloc_special_buffer(efx, &channel->eventq,
  1154. entries * sizeof(ef4_qword_t));
  1155. }
  1156. int ef4_farch_ev_init(struct ef4_channel *channel)
  1157. {
  1158. ef4_oword_t reg;
  1159. struct ef4_nic *efx = channel->efx;
  1160. netif_dbg(efx, hw, efx->net_dev,
  1161. "channel %d event queue in special buffers %d-%d\n",
  1162. channel->channel, channel->eventq.index,
  1163. channel->eventq.index + channel->eventq.entries - 1);
  1164. /* Pin event queue buffer */
  1165. ef4_init_special_buffer(efx, &channel->eventq);
  1166. /* Fill event queue with all ones (i.e. empty events) */
  1167. memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
  1168. /* Push event queue to card */
  1169. EF4_POPULATE_OWORD_3(reg,
  1170. FRF_AZ_EVQ_EN, 1,
  1171. FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
  1172. FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
  1173. ef4_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
  1174. channel->channel);
  1175. return 0;
  1176. }
  1177. void ef4_farch_ev_fini(struct ef4_channel *channel)
  1178. {
  1179. ef4_oword_t reg;
  1180. struct ef4_nic *efx = channel->efx;
  1181. /* Remove event queue from card */
  1182. EF4_ZERO_OWORD(reg);
  1183. ef4_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
  1184. channel->channel);
  1185. /* Unpin event queue */
  1186. ef4_fini_special_buffer(efx, &channel->eventq);
  1187. }
  1188. /* Free buffers backing event queue */
  1189. void ef4_farch_ev_remove(struct ef4_channel *channel)
  1190. {
  1191. ef4_free_special_buffer(channel->efx, &channel->eventq);
  1192. }
  1193. void ef4_farch_ev_test_generate(struct ef4_channel *channel)
  1194. {
  1195. ef4_farch_magic_event(channel, EF4_CHANNEL_MAGIC_TEST(channel));
  1196. }
  1197. void ef4_farch_rx_defer_refill(struct ef4_rx_queue *rx_queue)
  1198. {
  1199. ef4_farch_magic_event(ef4_rx_queue_channel(rx_queue),
  1200. EF4_CHANNEL_MAGIC_FILL(rx_queue));
  1201. }
  1202. /**************************************************************************
  1203. *
  1204. * Hardware interrupts
  1205. * The hardware interrupt handler does very little work; all the event
  1206. * queue processing is carried out by per-channel tasklets.
  1207. *
  1208. **************************************************************************/
  1209. /* Enable/disable/generate interrupts */
  1210. static inline void ef4_farch_interrupts(struct ef4_nic *efx,
  1211. bool enabled, bool force)
  1212. {
  1213. ef4_oword_t int_en_reg_ker;
  1214. EF4_POPULATE_OWORD_3(int_en_reg_ker,
  1215. FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
  1216. FRF_AZ_KER_INT_KER, force,
  1217. FRF_AZ_DRV_INT_EN_KER, enabled);
  1218. ef4_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
  1219. }
  1220. void ef4_farch_irq_enable_master(struct ef4_nic *efx)
  1221. {
  1222. EF4_ZERO_OWORD(*((ef4_oword_t *) efx->irq_status.addr));
  1223. wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
  1224. ef4_farch_interrupts(efx, true, false);
  1225. }
  1226. void ef4_farch_irq_disable_master(struct ef4_nic *efx)
  1227. {
  1228. /* Disable interrupts */
  1229. ef4_farch_interrupts(efx, false, false);
  1230. }
  1231. /* Generate a test interrupt
  1232. * Interrupt must already have been enabled, otherwise nasty things
  1233. * may happen.
  1234. */
  1235. int ef4_farch_irq_test_generate(struct ef4_nic *efx)
  1236. {
  1237. ef4_farch_interrupts(efx, true, true);
  1238. return 0;
  1239. }
  1240. /* Process a fatal interrupt
  1241. * Disable bus mastering ASAP and schedule a reset
  1242. */
  1243. irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx)
  1244. {
  1245. struct falcon_nic_data *nic_data = efx->nic_data;
  1246. ef4_oword_t *int_ker = efx->irq_status.addr;
  1247. ef4_oword_t fatal_intr;
  1248. int error, mem_perr;
  1249. ef4_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
  1250. error = EF4_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
  1251. netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EF4_OWORD_FMT" status "
  1252. EF4_OWORD_FMT ": %s\n", EF4_OWORD_VAL(*int_ker),
  1253. EF4_OWORD_VAL(fatal_intr),
  1254. error ? "disabling bus mastering" : "no recognised error");
  1255. /* If this is a memory parity error dump which blocks are offending */
  1256. mem_perr = (EF4_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
  1257. EF4_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
  1258. if (mem_perr) {
  1259. ef4_oword_t reg;
  1260. ef4_reado(efx, &reg, FR_AZ_MEM_STAT);
  1261. netif_err(efx, hw, efx->net_dev,
  1262. "SYSTEM ERROR: memory parity error "EF4_OWORD_FMT"\n",
  1263. EF4_OWORD_VAL(reg));
  1264. }
  1265. /* Disable both devices */
  1266. pci_clear_master(efx->pci_dev);
  1267. if (ef4_nic_is_dual_func(efx))
  1268. pci_clear_master(nic_data->pci_dev2);
  1269. ef4_farch_irq_disable_master(efx);
  1270. /* Count errors and reset or disable the NIC accordingly */
  1271. if (efx->int_error_count == 0 ||
  1272. time_after(jiffies, efx->int_error_expire)) {
  1273. efx->int_error_count = 0;
  1274. efx->int_error_expire =
  1275. jiffies + EF4_INT_ERROR_EXPIRE * HZ;
  1276. }
  1277. if (++efx->int_error_count < EF4_MAX_INT_ERRORS) {
  1278. netif_err(efx, hw, efx->net_dev,
  1279. "SYSTEM ERROR - reset scheduled\n");
  1280. ef4_schedule_reset(efx, RESET_TYPE_INT_ERROR);
  1281. } else {
  1282. netif_err(efx, hw, efx->net_dev,
  1283. "SYSTEM ERROR - max number of errors seen."
  1284. "NIC will be disabled\n");
  1285. ef4_schedule_reset(efx, RESET_TYPE_DISABLE);
  1286. }
  1287. return IRQ_HANDLED;
  1288. }
  1289. /* Handle a legacy interrupt
  1290. * Acknowledges the interrupt and schedule event queue processing.
  1291. */
  1292. irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id)
  1293. {
  1294. struct ef4_nic *efx = dev_id;
  1295. bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
  1296. ef4_oword_t *int_ker = efx->irq_status.addr;
  1297. irqreturn_t result = IRQ_NONE;
  1298. struct ef4_channel *channel;
  1299. ef4_dword_t reg;
  1300. u32 queues;
  1301. int syserr;
  1302. /* Read the ISR which also ACKs the interrupts */
  1303. ef4_readd(efx, &reg, FR_BZ_INT_ISR0);
  1304. queues = EF4_EXTRACT_DWORD(reg, 0, 31);
  1305. /* Legacy interrupts are disabled too late by the EEH kernel
  1306. * code. Disable them earlier.
  1307. * If an EEH error occurred, the read will have returned all ones.
  1308. */
  1309. if (EF4_DWORD_IS_ALL_ONES(reg) && ef4_try_recovery(efx) &&
  1310. !efx->eeh_disabled_legacy_irq) {
  1311. disable_irq_nosync(efx->legacy_irq);
  1312. efx->eeh_disabled_legacy_irq = true;
  1313. }
  1314. /* Handle non-event-queue sources */
  1315. if (queues & (1U << efx->irq_level) && soft_enabled) {
  1316. syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
  1317. if (unlikely(syserr))
  1318. return ef4_farch_fatal_interrupt(efx);
  1319. efx->last_irq_cpu = raw_smp_processor_id();
  1320. }
  1321. if (queues != 0) {
  1322. efx->irq_zero_count = 0;
  1323. /* Schedule processing of any interrupting queues */
  1324. if (likely(soft_enabled)) {
  1325. ef4_for_each_channel(channel, efx) {
  1326. if (queues & 1)
  1327. ef4_schedule_channel_irq(channel);
  1328. queues >>= 1;
  1329. }
  1330. }
  1331. result = IRQ_HANDLED;
  1332. } else {
  1333. ef4_qword_t *event;
  1334. /* Legacy ISR read can return zero once (SF bug 15783) */
  1335. /* We can't return IRQ_HANDLED more than once on seeing ISR=0
  1336. * because this might be a shared interrupt. */
  1337. if (efx->irq_zero_count++ == 0)
  1338. result = IRQ_HANDLED;
  1339. /* Ensure we schedule or rearm all event queues */
  1340. if (likely(soft_enabled)) {
  1341. ef4_for_each_channel(channel, efx) {
  1342. event = ef4_event(channel,
  1343. channel->eventq_read_ptr);
  1344. if (ef4_event_present(event))
  1345. ef4_schedule_channel_irq(channel);
  1346. else
  1347. ef4_farch_ev_read_ack(channel);
  1348. }
  1349. }
  1350. }
  1351. if (result == IRQ_HANDLED)
  1352. netif_vdbg(efx, intr, efx->net_dev,
  1353. "IRQ %d on CPU %d status " EF4_DWORD_FMT "\n",
  1354. irq, raw_smp_processor_id(), EF4_DWORD_VAL(reg));
  1355. return result;
  1356. }
  1357. /* Handle an MSI interrupt
  1358. *
  1359. * Handle an MSI hardware interrupt. This routine schedules event
  1360. * queue processing. No interrupt acknowledgement cycle is necessary.
  1361. * Also, we never need to check that the interrupt is for us, since
  1362. * MSI interrupts cannot be shared.
  1363. */
  1364. irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id)
  1365. {
  1366. struct ef4_msi_context *context = dev_id;
  1367. struct ef4_nic *efx = context->efx;
  1368. ef4_oword_t *int_ker = efx->irq_status.addr;
  1369. int syserr;
  1370. netif_vdbg(efx, intr, efx->net_dev,
  1371. "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
  1372. irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
  1373. if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
  1374. return IRQ_HANDLED;
  1375. /* Handle non-event-queue sources */
  1376. if (context->index == efx->irq_level) {
  1377. syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
  1378. if (unlikely(syserr))
  1379. return ef4_farch_fatal_interrupt(efx);
  1380. efx->last_irq_cpu = raw_smp_processor_id();
  1381. }
  1382. /* Schedule processing of the channel */
  1383. ef4_schedule_channel_irq(efx->channel[context->index]);
  1384. return IRQ_HANDLED;
  1385. }
  1386. /* Setup RSS indirection table.
  1387. * This maps from the hash value of the packet to RXQ
  1388. */
  1389. void ef4_farch_rx_push_indir_table(struct ef4_nic *efx)
  1390. {
  1391. size_t i = 0;
  1392. ef4_dword_t dword;
  1393. BUG_ON(ef4_nic_rev(efx) < EF4_REV_FALCON_B0);
  1394. BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
  1395. FR_BZ_RX_INDIRECTION_TBL_ROWS);
  1396. for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
  1397. EF4_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
  1398. efx->rx_indir_table[i]);
  1399. ef4_writed(efx, &dword,
  1400. FR_BZ_RX_INDIRECTION_TBL +
  1401. FR_BZ_RX_INDIRECTION_TBL_STEP * i);
  1402. }
  1403. }
  1404. /* Looks at available SRAM resources and works out how many queues we
  1405. * can support, and where things like descriptor caches should live.
  1406. *
  1407. * SRAM is split up as follows:
  1408. * 0 buftbl entries for channels
  1409. * efx->vf_buftbl_base buftbl entries for SR-IOV
  1410. * efx->rx_dc_base RX descriptor caches
  1411. * efx->tx_dc_base TX descriptor caches
  1412. */
  1413. void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw)
  1414. {
  1415. unsigned vi_count, buftbl_min;
  1416. /* Account for the buffer table entries backing the datapath channels
  1417. * and the descriptor caches for those channels.
  1418. */
  1419. buftbl_min = ((efx->n_rx_channels * EF4_MAX_DMAQ_SIZE +
  1420. efx->n_tx_channels * EF4_TXQ_TYPES * EF4_MAX_DMAQ_SIZE +
  1421. efx->n_channels * EF4_MAX_EVQ_SIZE)
  1422. * sizeof(ef4_qword_t) / EF4_BUF_SIZE);
  1423. vi_count = max(efx->n_channels, efx->n_tx_channels * EF4_TXQ_TYPES);
  1424. efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
  1425. efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
  1426. }
  1427. u32 ef4_farch_fpga_ver(struct ef4_nic *efx)
  1428. {
  1429. ef4_oword_t altera_build;
  1430. ef4_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
  1431. return EF4_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
  1432. }
  1433. void ef4_farch_init_common(struct ef4_nic *efx)
  1434. {
  1435. ef4_oword_t temp;
  1436. /* Set positions of descriptor caches in SRAM. */
  1437. EF4_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
  1438. ef4_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
  1439. EF4_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
  1440. ef4_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
  1441. /* Set TX descriptor cache size. */
  1442. BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
  1443. EF4_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
  1444. ef4_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
  1445. /* Set RX descriptor cache size. Set low watermark to size-8, as
  1446. * this allows most efficient prefetching.
  1447. */
  1448. BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
  1449. EF4_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
  1450. ef4_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
  1451. EF4_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
  1452. ef4_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
  1453. /* Program INT_KER address */
  1454. EF4_POPULATE_OWORD_2(temp,
  1455. FRF_AZ_NORM_INT_VEC_DIS_KER,
  1456. EF4_INT_MODE_USE_MSI(efx),
  1457. FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
  1458. ef4_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
  1459. /* Use a valid MSI-X vector */
  1460. efx->irq_level = 0;
  1461. /* Enable all the genuinely fatal interrupts. (They are still
  1462. * masked by the overall interrupt mask, controlled by
  1463. * falcon_interrupts()).
  1464. *
  1465. * Note: All other fatal interrupts are enabled
  1466. */
  1467. EF4_POPULATE_OWORD_3(temp,
  1468. FRF_AZ_ILL_ADR_INT_KER_EN, 1,
  1469. FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
  1470. FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
  1471. EF4_INVERT_OWORD(temp);
  1472. ef4_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
  1473. /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
  1474. * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
  1475. */
  1476. ef4_reado(efx, &temp, FR_AZ_TX_RESERVED);
  1477. EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
  1478. EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
  1479. EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
  1480. EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
  1481. EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
  1482. /* Enable SW_EV to inherit in char driver - assume harmless here */
  1483. EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
  1484. /* Prefetch threshold 2 => fetch when descriptor cache half empty */
  1485. EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
  1486. /* Disable hardware watchdog which can misfire */
  1487. EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
  1488. /* Squash TX of packets of 16 bytes or less */
  1489. if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0)
  1490. EF4_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
  1491. ef4_writeo(efx, &temp, FR_AZ_TX_RESERVED);
  1492. if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
  1493. EF4_POPULATE_OWORD_4(temp,
  1494. /* Default values */
  1495. FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
  1496. FRF_BZ_TX_PACE_SB_AF, 0xb,
  1497. FRF_BZ_TX_PACE_FB_BASE, 0,
  1498. /* Allow large pace values in the
  1499. * fast bin. */
  1500. FRF_BZ_TX_PACE_BIN_TH,
  1501. FFE_BZ_TX_PACE_RESERVED);
  1502. ef4_writeo(efx, &temp, FR_BZ_TX_PACE);
  1503. }
  1504. }
  1505. /**************************************************************************
  1506. *
  1507. * Filter tables
  1508. *
  1509. **************************************************************************
  1510. */
  1511. /* "Fudge factors" - difference between programmed value and actual depth.
  1512. * Due to pipelined implementation we need to program H/W with a value that
  1513. * is larger than the hop limit we want.
  1514. */
  1515. #define EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
  1516. #define EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
  1517. /* Hard maximum search limit. Hardware will time-out beyond 200-something.
  1518. * We also need to avoid infinite loops in ef4_farch_filter_search() when the
  1519. * table is full.
  1520. */
  1521. #define EF4_FARCH_FILTER_CTL_SRCH_MAX 200
  1522. /* Don't try very hard to find space for performance hints, as this is
  1523. * counter-productive. */
  1524. #define EF4_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
  1525. enum ef4_farch_filter_type {
  1526. EF4_FARCH_FILTER_TCP_FULL = 0,
  1527. EF4_FARCH_FILTER_TCP_WILD,
  1528. EF4_FARCH_FILTER_UDP_FULL,
  1529. EF4_FARCH_FILTER_UDP_WILD,
  1530. EF4_FARCH_FILTER_MAC_FULL = 4,
  1531. EF4_FARCH_FILTER_MAC_WILD,
  1532. EF4_FARCH_FILTER_UC_DEF = 8,
  1533. EF4_FARCH_FILTER_MC_DEF,
  1534. EF4_FARCH_FILTER_TYPE_COUNT, /* number of specific types */
  1535. };
  1536. enum ef4_farch_filter_table_id {
  1537. EF4_FARCH_FILTER_TABLE_RX_IP = 0,
  1538. EF4_FARCH_FILTER_TABLE_RX_MAC,
  1539. EF4_FARCH_FILTER_TABLE_RX_DEF,
  1540. EF4_FARCH_FILTER_TABLE_TX_MAC,
  1541. EF4_FARCH_FILTER_TABLE_COUNT,
  1542. };
  1543. enum ef4_farch_filter_index {
  1544. EF4_FARCH_FILTER_INDEX_UC_DEF,
  1545. EF4_FARCH_FILTER_INDEX_MC_DEF,
  1546. EF4_FARCH_FILTER_SIZE_RX_DEF,
  1547. };
  1548. struct ef4_farch_filter_spec {
  1549. u8 type:4;
  1550. u8 priority:4;
  1551. u8 flags;
  1552. u16 dmaq_id;
  1553. u32 data[3];
  1554. };
  1555. struct ef4_farch_filter_table {
  1556. enum ef4_farch_filter_table_id id;
  1557. u32 offset; /* address of table relative to BAR */
  1558. unsigned size; /* number of entries */
  1559. unsigned step; /* step between entries */
  1560. unsigned used; /* number currently used */
  1561. unsigned long *used_bitmap;
  1562. struct ef4_farch_filter_spec *spec;
  1563. unsigned search_limit[EF4_FARCH_FILTER_TYPE_COUNT];
  1564. };
  1565. struct ef4_farch_filter_state {
  1566. struct ef4_farch_filter_table table[EF4_FARCH_FILTER_TABLE_COUNT];
  1567. };
  1568. static void
  1569. ef4_farch_filter_table_clear_entry(struct ef4_nic *efx,
  1570. struct ef4_farch_filter_table *table,
  1571. unsigned int filter_idx);
  1572. /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
  1573. * key derived from the n-tuple. The initial LFSR state is 0xffff. */
  1574. static u16 ef4_farch_filter_hash(u32 key)
  1575. {
  1576. u16 tmp;
  1577. /* First 16 rounds */
  1578. tmp = 0x1fff ^ key >> 16;
  1579. tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
  1580. tmp = tmp ^ tmp >> 9;
  1581. /* Last 16 rounds */
  1582. tmp = tmp ^ tmp << 13 ^ key;
  1583. tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
  1584. return tmp ^ tmp >> 9;
  1585. }
  1586. /* To allow for hash collisions, filter search continues at these
  1587. * increments from the first possible entry selected by the hash. */
  1588. static u16 ef4_farch_filter_increment(u32 key)
  1589. {
  1590. return key * 2 - 1;
  1591. }
  1592. static enum ef4_farch_filter_table_id
  1593. ef4_farch_filter_spec_table_id(const struct ef4_farch_filter_spec *spec)
  1594. {
  1595. BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
  1596. (EF4_FARCH_FILTER_TCP_FULL >> 2));
  1597. BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
  1598. (EF4_FARCH_FILTER_TCP_WILD >> 2));
  1599. BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
  1600. (EF4_FARCH_FILTER_UDP_FULL >> 2));
  1601. BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
  1602. (EF4_FARCH_FILTER_UDP_WILD >> 2));
  1603. BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_MAC !=
  1604. (EF4_FARCH_FILTER_MAC_FULL >> 2));
  1605. BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_MAC !=
  1606. (EF4_FARCH_FILTER_MAC_WILD >> 2));
  1607. BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_TX_MAC !=
  1608. EF4_FARCH_FILTER_TABLE_RX_MAC + 2);
  1609. return (spec->type >> 2) + ((spec->flags & EF4_FILTER_FLAG_TX) ? 2 : 0);
  1610. }
  1611. static void ef4_farch_filter_push_rx_config(struct ef4_nic *efx)
  1612. {
  1613. struct ef4_farch_filter_state *state = efx->filter_state;
  1614. struct ef4_farch_filter_table *table;
  1615. ef4_oword_t filter_ctl;
  1616. ef4_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
  1617. table = &state->table[EF4_FARCH_FILTER_TABLE_RX_IP];
  1618. EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
  1619. table->search_limit[EF4_FARCH_FILTER_TCP_FULL] +
  1620. EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
  1621. EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
  1622. table->search_limit[EF4_FARCH_FILTER_TCP_WILD] +
  1623. EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
  1624. EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
  1625. table->search_limit[EF4_FARCH_FILTER_UDP_FULL] +
  1626. EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
  1627. EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
  1628. table->search_limit[EF4_FARCH_FILTER_UDP_WILD] +
  1629. EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
  1630. table = &state->table[EF4_FARCH_FILTER_TABLE_RX_MAC];
  1631. if (table->size) {
  1632. EF4_SET_OWORD_FIELD(
  1633. filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
  1634. table->search_limit[EF4_FARCH_FILTER_MAC_FULL] +
  1635. EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
  1636. EF4_SET_OWORD_FIELD(
  1637. filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
  1638. table->search_limit[EF4_FARCH_FILTER_MAC_WILD] +
  1639. EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
  1640. }
  1641. table = &state->table[EF4_FARCH_FILTER_TABLE_RX_DEF];
  1642. if (table->size) {
  1643. EF4_SET_OWORD_FIELD(
  1644. filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
  1645. table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
  1646. EF4_SET_OWORD_FIELD(
  1647. filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
  1648. !!(table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].flags &
  1649. EF4_FILTER_FLAG_RX_RSS));
  1650. EF4_SET_OWORD_FIELD(
  1651. filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
  1652. table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
  1653. EF4_SET_OWORD_FIELD(
  1654. filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
  1655. !!(table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].flags &
  1656. EF4_FILTER_FLAG_RX_RSS));
  1657. /* There is a single bit to enable RX scatter for all
  1658. * unmatched packets. Only set it if scatter is
  1659. * enabled in both filter specs.
  1660. */
  1661. EF4_SET_OWORD_FIELD(
  1662. filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
  1663. !!(table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].flags &
  1664. table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].flags &
  1665. EF4_FILTER_FLAG_RX_SCATTER));
  1666. } else if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
  1667. /* We don't expose 'default' filters because unmatched
  1668. * packets always go to the queue number found in the
  1669. * RSS table. But we still need to set the RX scatter
  1670. * bit here.
  1671. */
  1672. EF4_SET_OWORD_FIELD(
  1673. filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
  1674. efx->rx_scatter);
  1675. }
  1676. ef4_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
  1677. }
  1678. static void ef4_farch_filter_push_tx_limits(struct ef4_nic *efx)
  1679. {
  1680. struct ef4_farch_filter_state *state = efx->filter_state;
  1681. struct ef4_farch_filter_table *table;
  1682. ef4_oword_t tx_cfg;
  1683. ef4_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
  1684. table = &state->table[EF4_FARCH_FILTER_TABLE_TX_MAC];
  1685. if (table->size) {
  1686. EF4_SET_OWORD_FIELD(
  1687. tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
  1688. table->search_limit[EF4_FARCH_FILTER_MAC_FULL] +
  1689. EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
  1690. EF4_SET_OWORD_FIELD(
  1691. tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
  1692. table->search_limit[EF4_FARCH_FILTER_MAC_WILD] +
  1693. EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
  1694. }
  1695. ef4_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
  1696. }
  1697. static int
  1698. ef4_farch_filter_from_gen_spec(struct ef4_farch_filter_spec *spec,
  1699. const struct ef4_filter_spec *gen_spec)
  1700. {
  1701. bool is_full = false;
  1702. if ((gen_spec->flags & EF4_FILTER_FLAG_RX_RSS) &&
  1703. gen_spec->rss_context != EF4_FILTER_RSS_CONTEXT_DEFAULT)
  1704. return -EINVAL;
  1705. spec->priority = gen_spec->priority;
  1706. spec->flags = gen_spec->flags;
  1707. spec->dmaq_id = gen_spec->dmaq_id;
  1708. switch (gen_spec->match_flags) {
  1709. case (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
  1710. EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT |
  1711. EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT):
  1712. is_full = true;
  1713. /* fall through */
  1714. case (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
  1715. EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT): {
  1716. __be32 rhost, host1, host2;
  1717. __be16 rport, port1, port2;
  1718. EF4_BUG_ON_PARANOID(!(gen_spec->flags & EF4_FILTER_FLAG_RX));
  1719. if (gen_spec->ether_type != htons(ETH_P_IP))
  1720. return -EPROTONOSUPPORT;
  1721. if (gen_spec->loc_port == 0 ||
  1722. (is_full && gen_spec->rem_port == 0))
  1723. return -EADDRNOTAVAIL;
  1724. switch (gen_spec->ip_proto) {
  1725. case IPPROTO_TCP:
  1726. spec->type = (is_full ? EF4_FARCH_FILTER_TCP_FULL :
  1727. EF4_FARCH_FILTER_TCP_WILD);
  1728. break;
  1729. case IPPROTO_UDP:
  1730. spec->type = (is_full ? EF4_FARCH_FILTER_UDP_FULL :
  1731. EF4_FARCH_FILTER_UDP_WILD);
  1732. break;
  1733. default:
  1734. return -EPROTONOSUPPORT;
  1735. }
  1736. /* Filter is constructed in terms of source and destination,
  1737. * with the odd wrinkle that the ports are swapped in a UDP
  1738. * wildcard filter. We need to convert from local and remote
  1739. * (= zero for wildcard) addresses.
  1740. */
  1741. rhost = is_full ? gen_spec->rem_host[0] : 0;
  1742. rport = is_full ? gen_spec->rem_port : 0;
  1743. host1 = rhost;
  1744. host2 = gen_spec->loc_host[0];
  1745. if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
  1746. port1 = gen_spec->loc_port;
  1747. port2 = rport;
  1748. } else {
  1749. port1 = rport;
  1750. port2 = gen_spec->loc_port;
  1751. }
  1752. spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
  1753. spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
  1754. spec->data[2] = ntohl(host2);
  1755. break;
  1756. }
  1757. case EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_OUTER_VID:
  1758. is_full = true;
  1759. /* fall through */
  1760. case EF4_FILTER_MATCH_LOC_MAC:
  1761. spec->type = (is_full ? EF4_FARCH_FILTER_MAC_FULL :
  1762. EF4_FARCH_FILTER_MAC_WILD);
  1763. spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
  1764. spec->data[1] = (gen_spec->loc_mac[2] << 24 |
  1765. gen_spec->loc_mac[3] << 16 |
  1766. gen_spec->loc_mac[4] << 8 |
  1767. gen_spec->loc_mac[5]);
  1768. spec->data[2] = (gen_spec->loc_mac[0] << 8 |
  1769. gen_spec->loc_mac[1]);
  1770. break;
  1771. case EF4_FILTER_MATCH_LOC_MAC_IG:
  1772. spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
  1773. EF4_FARCH_FILTER_MC_DEF :
  1774. EF4_FARCH_FILTER_UC_DEF);
  1775. memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
  1776. break;
  1777. default:
  1778. return -EPROTONOSUPPORT;
  1779. }
  1780. return 0;
  1781. }
  1782. static void
  1783. ef4_farch_filter_to_gen_spec(struct ef4_filter_spec *gen_spec,
  1784. const struct ef4_farch_filter_spec *spec)
  1785. {
  1786. bool is_full = false;
  1787. /* *gen_spec should be completely initialised, to be consistent
  1788. * with ef4_filter_init_{rx,tx}() and in case we want to copy
  1789. * it back to userland.
  1790. */
  1791. memset(gen_spec, 0, sizeof(*gen_spec));
  1792. gen_spec->priority = spec->priority;
  1793. gen_spec->flags = spec->flags;
  1794. gen_spec->dmaq_id = spec->dmaq_id;
  1795. switch (spec->type) {
  1796. case EF4_FARCH_FILTER_TCP_FULL:
  1797. case EF4_FARCH_FILTER_UDP_FULL:
  1798. is_full = true;
  1799. /* fall through */
  1800. case EF4_FARCH_FILTER_TCP_WILD:
  1801. case EF4_FARCH_FILTER_UDP_WILD: {
  1802. __be32 host1, host2;
  1803. __be16 port1, port2;
  1804. gen_spec->match_flags =
  1805. EF4_FILTER_MATCH_ETHER_TYPE |
  1806. EF4_FILTER_MATCH_IP_PROTO |
  1807. EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT;
  1808. if (is_full)
  1809. gen_spec->match_flags |= (EF4_FILTER_MATCH_REM_HOST |
  1810. EF4_FILTER_MATCH_REM_PORT);
  1811. gen_spec->ether_type = htons(ETH_P_IP);
  1812. gen_spec->ip_proto =
  1813. (spec->type == EF4_FARCH_FILTER_TCP_FULL ||
  1814. spec->type == EF4_FARCH_FILTER_TCP_WILD) ?
  1815. IPPROTO_TCP : IPPROTO_UDP;
  1816. host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
  1817. port1 = htons(spec->data[0]);
  1818. host2 = htonl(spec->data[2]);
  1819. port2 = htons(spec->data[1] >> 16);
  1820. if (spec->flags & EF4_FILTER_FLAG_TX) {
  1821. gen_spec->loc_host[0] = host1;
  1822. gen_spec->rem_host[0] = host2;
  1823. } else {
  1824. gen_spec->loc_host[0] = host2;
  1825. gen_spec->rem_host[0] = host1;
  1826. }
  1827. if (!!(gen_spec->flags & EF4_FILTER_FLAG_TX) ^
  1828. (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
  1829. gen_spec->loc_port = port1;
  1830. gen_spec->rem_port = port2;
  1831. } else {
  1832. gen_spec->loc_port = port2;
  1833. gen_spec->rem_port = port1;
  1834. }
  1835. break;
  1836. }
  1837. case EF4_FARCH_FILTER_MAC_FULL:
  1838. is_full = true;
  1839. /* fall through */
  1840. case EF4_FARCH_FILTER_MAC_WILD:
  1841. gen_spec->match_flags = EF4_FILTER_MATCH_LOC_MAC;
  1842. if (is_full)
  1843. gen_spec->match_flags |= EF4_FILTER_MATCH_OUTER_VID;
  1844. gen_spec->loc_mac[0] = spec->data[2] >> 8;
  1845. gen_spec->loc_mac[1] = spec->data[2];
  1846. gen_spec->loc_mac[2] = spec->data[1] >> 24;
  1847. gen_spec->loc_mac[3] = spec->data[1] >> 16;
  1848. gen_spec->loc_mac[4] = spec->data[1] >> 8;
  1849. gen_spec->loc_mac[5] = spec->data[1];
  1850. gen_spec->outer_vid = htons(spec->data[0]);
  1851. break;
  1852. case EF4_FARCH_FILTER_UC_DEF:
  1853. case EF4_FARCH_FILTER_MC_DEF:
  1854. gen_spec->match_flags = EF4_FILTER_MATCH_LOC_MAC_IG;
  1855. gen_spec->loc_mac[0] = spec->type == EF4_FARCH_FILTER_MC_DEF;
  1856. break;
  1857. default:
  1858. WARN_ON(1);
  1859. break;
  1860. }
  1861. }
  1862. static void
  1863. ef4_farch_filter_init_rx_auto(struct ef4_nic *efx,
  1864. struct ef4_farch_filter_spec *spec)
  1865. {
  1866. /* If there's only one channel then disable RSS for non VF
  1867. * traffic, thereby allowing VFs to use RSS when the PF can't.
  1868. */
  1869. spec->priority = EF4_FILTER_PRI_AUTO;
  1870. spec->flags = (EF4_FILTER_FLAG_RX |
  1871. (ef4_rss_enabled(efx) ? EF4_FILTER_FLAG_RX_RSS : 0) |
  1872. (efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0));
  1873. spec->dmaq_id = 0;
  1874. }
  1875. /* Build a filter entry and return its n-tuple key. */
  1876. static u32 ef4_farch_filter_build(ef4_oword_t *filter,
  1877. struct ef4_farch_filter_spec *spec)
  1878. {
  1879. u32 data3;
  1880. switch (ef4_farch_filter_spec_table_id(spec)) {
  1881. case EF4_FARCH_FILTER_TABLE_RX_IP: {
  1882. bool is_udp = (spec->type == EF4_FARCH_FILTER_UDP_FULL ||
  1883. spec->type == EF4_FARCH_FILTER_UDP_WILD);
  1884. EF4_POPULATE_OWORD_7(
  1885. *filter,
  1886. FRF_BZ_RSS_EN,
  1887. !!(spec->flags & EF4_FILTER_FLAG_RX_RSS),
  1888. FRF_BZ_SCATTER_EN,
  1889. !!(spec->flags & EF4_FILTER_FLAG_RX_SCATTER),
  1890. FRF_BZ_TCP_UDP, is_udp,
  1891. FRF_BZ_RXQ_ID, spec->dmaq_id,
  1892. EF4_DWORD_2, spec->data[2],
  1893. EF4_DWORD_1, spec->data[1],
  1894. EF4_DWORD_0, spec->data[0]);
  1895. data3 = is_udp;
  1896. break;
  1897. }
  1898. case EF4_FARCH_FILTER_TABLE_RX_MAC: {
  1899. bool is_wild = spec->type == EF4_FARCH_FILTER_MAC_WILD;
  1900. EF4_POPULATE_OWORD_7(
  1901. *filter,
  1902. FRF_CZ_RMFT_RSS_EN,
  1903. !!(spec->flags & EF4_FILTER_FLAG_RX_RSS),
  1904. FRF_CZ_RMFT_SCATTER_EN,
  1905. !!(spec->flags & EF4_FILTER_FLAG_RX_SCATTER),
  1906. FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
  1907. FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
  1908. FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
  1909. FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
  1910. FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
  1911. data3 = is_wild;
  1912. break;
  1913. }
  1914. case EF4_FARCH_FILTER_TABLE_TX_MAC: {
  1915. bool is_wild = spec->type == EF4_FARCH_FILTER_MAC_WILD;
  1916. EF4_POPULATE_OWORD_5(*filter,
  1917. FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
  1918. FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
  1919. FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
  1920. FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
  1921. FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
  1922. data3 = is_wild | spec->dmaq_id << 1;
  1923. break;
  1924. }
  1925. default:
  1926. BUG();
  1927. }
  1928. return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
  1929. }
  1930. static bool ef4_farch_filter_equal(const struct ef4_farch_filter_spec *left,
  1931. const struct ef4_farch_filter_spec *right)
  1932. {
  1933. if (left->type != right->type ||
  1934. memcmp(left->data, right->data, sizeof(left->data)))
  1935. return false;
  1936. if (left->flags & EF4_FILTER_FLAG_TX &&
  1937. left->dmaq_id != right->dmaq_id)
  1938. return false;
  1939. return true;
  1940. }
  1941. /*
  1942. * Construct/deconstruct external filter IDs. At least the RX filter
  1943. * IDs must be ordered by matching priority, for RX NFC semantics.
  1944. *
  1945. * Deconstruction needs to be robust against invalid IDs so that
  1946. * ef4_filter_remove_id_safe() and ef4_filter_get_filter_safe() can
  1947. * accept user-provided IDs.
  1948. */
  1949. #define EF4_FARCH_FILTER_MATCH_PRI_COUNT 5
  1950. static const u8 ef4_farch_filter_type_match_pri[EF4_FARCH_FILTER_TYPE_COUNT] = {
  1951. [EF4_FARCH_FILTER_TCP_FULL] = 0,
  1952. [EF4_FARCH_FILTER_UDP_FULL] = 0,
  1953. [EF4_FARCH_FILTER_TCP_WILD] = 1,
  1954. [EF4_FARCH_FILTER_UDP_WILD] = 1,
  1955. [EF4_FARCH_FILTER_MAC_FULL] = 2,
  1956. [EF4_FARCH_FILTER_MAC_WILD] = 3,
  1957. [EF4_FARCH_FILTER_UC_DEF] = 4,
  1958. [EF4_FARCH_FILTER_MC_DEF] = 4,
  1959. };
  1960. static const enum ef4_farch_filter_table_id ef4_farch_filter_range_table[] = {
  1961. EF4_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */
  1962. EF4_FARCH_FILTER_TABLE_RX_IP,
  1963. EF4_FARCH_FILTER_TABLE_RX_MAC,
  1964. EF4_FARCH_FILTER_TABLE_RX_MAC,
  1965. EF4_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
  1966. EF4_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */
  1967. EF4_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */
  1968. };
  1969. #define EF4_FARCH_FILTER_INDEX_WIDTH 13
  1970. #define EF4_FARCH_FILTER_INDEX_MASK ((1 << EF4_FARCH_FILTER_INDEX_WIDTH) - 1)
  1971. static inline u32
  1972. ef4_farch_filter_make_id(const struct ef4_farch_filter_spec *spec,
  1973. unsigned int index)
  1974. {
  1975. unsigned int range;
  1976. range = ef4_farch_filter_type_match_pri[spec->type];
  1977. if (!(spec->flags & EF4_FILTER_FLAG_RX))
  1978. range += EF4_FARCH_FILTER_MATCH_PRI_COUNT;
  1979. return range << EF4_FARCH_FILTER_INDEX_WIDTH | index;
  1980. }
  1981. static inline enum ef4_farch_filter_table_id
  1982. ef4_farch_filter_id_table_id(u32 id)
  1983. {
  1984. unsigned int range = id >> EF4_FARCH_FILTER_INDEX_WIDTH;
  1985. if (range < ARRAY_SIZE(ef4_farch_filter_range_table))
  1986. return ef4_farch_filter_range_table[range];
  1987. else
  1988. return EF4_FARCH_FILTER_TABLE_COUNT; /* invalid */
  1989. }
  1990. static inline unsigned int ef4_farch_filter_id_index(u32 id)
  1991. {
  1992. return id & EF4_FARCH_FILTER_INDEX_MASK;
  1993. }
  1994. u32 ef4_farch_filter_get_rx_id_limit(struct ef4_nic *efx)
  1995. {
  1996. struct ef4_farch_filter_state *state = efx->filter_state;
  1997. unsigned int range = EF4_FARCH_FILTER_MATCH_PRI_COUNT - 1;
  1998. enum ef4_farch_filter_table_id table_id;
  1999. do {
  2000. table_id = ef4_farch_filter_range_table[range];
  2001. if (state->table[table_id].size != 0)
  2002. return range << EF4_FARCH_FILTER_INDEX_WIDTH |
  2003. state->table[table_id].size;
  2004. } while (range--);
  2005. return 0;
  2006. }
  2007. s32 ef4_farch_filter_insert(struct ef4_nic *efx,
  2008. struct ef4_filter_spec *gen_spec,
  2009. bool replace_equal)
  2010. {
  2011. struct ef4_farch_filter_state *state = efx->filter_state;
  2012. struct ef4_farch_filter_table *table;
  2013. struct ef4_farch_filter_spec spec;
  2014. ef4_oword_t filter;
  2015. int rep_index, ins_index;
  2016. unsigned int depth = 0;
  2017. int rc;
  2018. rc = ef4_farch_filter_from_gen_spec(&spec, gen_spec);
  2019. if (rc)
  2020. return rc;
  2021. table = &state->table[ef4_farch_filter_spec_table_id(&spec)];
  2022. if (table->size == 0)
  2023. return -EINVAL;
  2024. netif_vdbg(efx, hw, efx->net_dev,
  2025. "%s: type %d search_limit=%d", __func__, spec.type,
  2026. table->search_limit[spec.type]);
  2027. if (table->id == EF4_FARCH_FILTER_TABLE_RX_DEF) {
  2028. /* One filter spec per type */
  2029. BUILD_BUG_ON(EF4_FARCH_FILTER_INDEX_UC_DEF != 0);
  2030. BUILD_BUG_ON(EF4_FARCH_FILTER_INDEX_MC_DEF !=
  2031. EF4_FARCH_FILTER_MC_DEF - EF4_FARCH_FILTER_UC_DEF);
  2032. rep_index = spec.type - EF4_FARCH_FILTER_UC_DEF;
  2033. ins_index = rep_index;
  2034. spin_lock_bh(&efx->filter_lock);
  2035. } else {
  2036. /* Search concurrently for
  2037. * (1) a filter to be replaced (rep_index): any filter
  2038. * with the same match values, up to the current
  2039. * search depth for this type, and
  2040. * (2) the insertion point (ins_index): (1) or any
  2041. * free slot before it or up to the maximum search
  2042. * depth for this priority
  2043. * We fail if we cannot find (2).
  2044. *
  2045. * We can stop once either
  2046. * (a) we find (1), in which case we have definitely
  2047. * found (2) as well; or
  2048. * (b) we have searched exhaustively for (1), and have
  2049. * either found (2) or searched exhaustively for it
  2050. */
  2051. u32 key = ef4_farch_filter_build(&filter, &spec);
  2052. unsigned int hash = ef4_farch_filter_hash(key);
  2053. unsigned int incr = ef4_farch_filter_increment(key);
  2054. unsigned int max_rep_depth = table->search_limit[spec.type];
  2055. unsigned int max_ins_depth =
  2056. spec.priority <= EF4_FILTER_PRI_HINT ?
  2057. EF4_FARCH_FILTER_CTL_SRCH_HINT_MAX :
  2058. EF4_FARCH_FILTER_CTL_SRCH_MAX;
  2059. unsigned int i = hash & (table->size - 1);
  2060. ins_index = -1;
  2061. depth = 1;
  2062. spin_lock_bh(&efx->filter_lock);
  2063. for (;;) {
  2064. if (!test_bit(i, table->used_bitmap)) {
  2065. if (ins_index < 0)
  2066. ins_index = i;
  2067. } else if (ef4_farch_filter_equal(&spec,
  2068. &table->spec[i])) {
  2069. /* Case (a) */
  2070. if (ins_index < 0)
  2071. ins_index = i;
  2072. rep_index = i;
  2073. break;
  2074. }
  2075. if (depth >= max_rep_depth &&
  2076. (ins_index >= 0 || depth >= max_ins_depth)) {
  2077. /* Case (b) */
  2078. if (ins_index < 0) {
  2079. rc = -EBUSY;
  2080. goto out;
  2081. }
  2082. rep_index = -1;
  2083. break;
  2084. }
  2085. i = (i + incr) & (table->size - 1);
  2086. ++depth;
  2087. }
  2088. }
  2089. /* If we found a filter to be replaced, check whether we
  2090. * should do so
  2091. */
  2092. if (rep_index >= 0) {
  2093. struct ef4_farch_filter_spec *saved_spec =
  2094. &table->spec[rep_index];
  2095. if (spec.priority == saved_spec->priority && !replace_equal) {
  2096. rc = -EEXIST;
  2097. goto out;
  2098. }
  2099. if (spec.priority < saved_spec->priority) {
  2100. rc = -EPERM;
  2101. goto out;
  2102. }
  2103. if (saved_spec->priority == EF4_FILTER_PRI_AUTO ||
  2104. saved_spec->flags & EF4_FILTER_FLAG_RX_OVER_AUTO)
  2105. spec.flags |= EF4_FILTER_FLAG_RX_OVER_AUTO;
  2106. }
  2107. /* Insert the filter */
  2108. if (ins_index != rep_index) {
  2109. __set_bit(ins_index, table->used_bitmap);
  2110. ++table->used;
  2111. }
  2112. table->spec[ins_index] = spec;
  2113. if (table->id == EF4_FARCH_FILTER_TABLE_RX_DEF) {
  2114. ef4_farch_filter_push_rx_config(efx);
  2115. } else {
  2116. if (table->search_limit[spec.type] < depth) {
  2117. table->search_limit[spec.type] = depth;
  2118. if (spec.flags & EF4_FILTER_FLAG_TX)
  2119. ef4_farch_filter_push_tx_limits(efx);
  2120. else
  2121. ef4_farch_filter_push_rx_config(efx);
  2122. }
  2123. ef4_writeo(efx, &filter,
  2124. table->offset + table->step * ins_index);
  2125. /* If we were able to replace a filter by inserting
  2126. * at a lower depth, clear the replaced filter
  2127. */
  2128. if (ins_index != rep_index && rep_index >= 0)
  2129. ef4_farch_filter_table_clear_entry(efx, table,
  2130. rep_index);
  2131. }
  2132. netif_vdbg(efx, hw, efx->net_dev,
  2133. "%s: filter type %d index %d rxq %u set",
  2134. __func__, spec.type, ins_index, spec.dmaq_id);
  2135. rc = ef4_farch_filter_make_id(&spec, ins_index);
  2136. out:
  2137. spin_unlock_bh(&efx->filter_lock);
  2138. return rc;
  2139. }
  2140. static void
  2141. ef4_farch_filter_table_clear_entry(struct ef4_nic *efx,
  2142. struct ef4_farch_filter_table *table,
  2143. unsigned int filter_idx)
  2144. {
  2145. static ef4_oword_t filter;
  2146. EF4_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap));
  2147. BUG_ON(table->offset == 0); /* can't clear MAC default filters */
  2148. __clear_bit(filter_idx, table->used_bitmap);
  2149. --table->used;
  2150. memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
  2151. ef4_writeo(efx, &filter, table->offset + table->step * filter_idx);
  2152. /* If this filter required a greater search depth than
  2153. * any other, the search limit for its type can now be
  2154. * decreased. However, it is hard to determine that
  2155. * unless the table has become completely empty - in
  2156. * which case, all its search limits can be set to 0.
  2157. */
  2158. if (unlikely(table->used == 0)) {
  2159. memset(table->search_limit, 0, sizeof(table->search_limit));
  2160. if (table->id == EF4_FARCH_FILTER_TABLE_TX_MAC)
  2161. ef4_farch_filter_push_tx_limits(efx);
  2162. else
  2163. ef4_farch_filter_push_rx_config(efx);
  2164. }
  2165. }
  2166. static int ef4_farch_filter_remove(struct ef4_nic *efx,
  2167. struct ef4_farch_filter_table *table,
  2168. unsigned int filter_idx,
  2169. enum ef4_filter_priority priority)
  2170. {
  2171. struct ef4_farch_filter_spec *spec = &table->spec[filter_idx];
  2172. if (!test_bit(filter_idx, table->used_bitmap) ||
  2173. spec->priority != priority)
  2174. return -ENOENT;
  2175. if (spec->flags & EF4_FILTER_FLAG_RX_OVER_AUTO) {
  2176. ef4_farch_filter_init_rx_auto(efx, spec);
  2177. ef4_farch_filter_push_rx_config(efx);
  2178. } else {
  2179. ef4_farch_filter_table_clear_entry(efx, table, filter_idx);
  2180. }
  2181. return 0;
  2182. }
  2183. int ef4_farch_filter_remove_safe(struct ef4_nic *efx,
  2184. enum ef4_filter_priority priority,
  2185. u32 filter_id)
  2186. {
  2187. struct ef4_farch_filter_state *state = efx->filter_state;
  2188. enum ef4_farch_filter_table_id table_id;
  2189. struct ef4_farch_filter_table *table;
  2190. unsigned int filter_idx;
  2191. struct ef4_farch_filter_spec *spec;
  2192. int rc;
  2193. table_id = ef4_farch_filter_id_table_id(filter_id);
  2194. if ((unsigned int)table_id >= EF4_FARCH_FILTER_TABLE_COUNT)
  2195. return -ENOENT;
  2196. table = &state->table[table_id];
  2197. filter_idx = ef4_farch_filter_id_index(filter_id);
  2198. if (filter_idx >= table->size)
  2199. return -ENOENT;
  2200. spec = &table->spec[filter_idx];
  2201. spin_lock_bh(&efx->filter_lock);
  2202. rc = ef4_farch_filter_remove(efx, table, filter_idx, priority);
  2203. spin_unlock_bh(&efx->filter_lock);
  2204. return rc;
  2205. }
  2206. int ef4_farch_filter_get_safe(struct ef4_nic *efx,
  2207. enum ef4_filter_priority priority,
  2208. u32 filter_id, struct ef4_filter_spec *spec_buf)
  2209. {
  2210. struct ef4_farch_filter_state *state = efx->filter_state;
  2211. enum ef4_farch_filter_table_id table_id;
  2212. struct ef4_farch_filter_table *table;
  2213. struct ef4_farch_filter_spec *spec;
  2214. unsigned int filter_idx;
  2215. int rc;
  2216. table_id = ef4_farch_filter_id_table_id(filter_id);
  2217. if ((unsigned int)table_id >= EF4_FARCH_FILTER_TABLE_COUNT)
  2218. return -ENOENT;
  2219. table = &state->table[table_id];
  2220. filter_idx = ef4_farch_filter_id_index(filter_id);
  2221. if (filter_idx >= table->size)
  2222. return -ENOENT;
  2223. spec = &table->spec[filter_idx];
  2224. spin_lock_bh(&efx->filter_lock);
  2225. if (test_bit(filter_idx, table->used_bitmap) &&
  2226. spec->priority == priority) {
  2227. ef4_farch_filter_to_gen_spec(spec_buf, spec);
  2228. rc = 0;
  2229. } else {
  2230. rc = -ENOENT;
  2231. }
  2232. spin_unlock_bh(&efx->filter_lock);
  2233. return rc;
  2234. }
  2235. static void
  2236. ef4_farch_filter_table_clear(struct ef4_nic *efx,
  2237. enum ef4_farch_filter_table_id table_id,
  2238. enum ef4_filter_priority priority)
  2239. {
  2240. struct ef4_farch_filter_state *state = efx->filter_state;
  2241. struct ef4_farch_filter_table *table = &state->table[table_id];
  2242. unsigned int filter_idx;
  2243. spin_lock_bh(&efx->filter_lock);
  2244. for (filter_idx = 0; filter_idx < table->size; ++filter_idx) {
  2245. if (table->spec[filter_idx].priority != EF4_FILTER_PRI_AUTO)
  2246. ef4_farch_filter_remove(efx, table,
  2247. filter_idx, priority);
  2248. }
  2249. spin_unlock_bh(&efx->filter_lock);
  2250. }
  2251. int ef4_farch_filter_clear_rx(struct ef4_nic *efx,
  2252. enum ef4_filter_priority priority)
  2253. {
  2254. ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_IP,
  2255. priority);
  2256. ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_MAC,
  2257. priority);
  2258. ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_DEF,
  2259. priority);
  2260. return 0;
  2261. }
  2262. u32 ef4_farch_filter_count_rx_used(struct ef4_nic *efx,
  2263. enum ef4_filter_priority priority)
  2264. {
  2265. struct ef4_farch_filter_state *state = efx->filter_state;
  2266. enum ef4_farch_filter_table_id table_id;
  2267. struct ef4_farch_filter_table *table;
  2268. unsigned int filter_idx;
  2269. u32 count = 0;
  2270. spin_lock_bh(&efx->filter_lock);
  2271. for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP;
  2272. table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF;
  2273. table_id++) {
  2274. table = &state->table[table_id];
  2275. for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
  2276. if (test_bit(filter_idx, table->used_bitmap) &&
  2277. table->spec[filter_idx].priority == priority)
  2278. ++count;
  2279. }
  2280. }
  2281. spin_unlock_bh(&efx->filter_lock);
  2282. return count;
  2283. }
  2284. s32 ef4_farch_filter_get_rx_ids(struct ef4_nic *efx,
  2285. enum ef4_filter_priority priority,
  2286. u32 *buf, u32 size)
  2287. {
  2288. struct ef4_farch_filter_state *state = efx->filter_state;
  2289. enum ef4_farch_filter_table_id table_id;
  2290. struct ef4_farch_filter_table *table;
  2291. unsigned int filter_idx;
  2292. s32 count = 0;
  2293. spin_lock_bh(&efx->filter_lock);
  2294. for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP;
  2295. table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF;
  2296. table_id++) {
  2297. table = &state->table[table_id];
  2298. for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
  2299. if (test_bit(filter_idx, table->used_bitmap) &&
  2300. table->spec[filter_idx].priority == priority) {
  2301. if (count == size) {
  2302. count = -EMSGSIZE;
  2303. goto out;
  2304. }
  2305. buf[count++] = ef4_farch_filter_make_id(
  2306. &table->spec[filter_idx], filter_idx);
  2307. }
  2308. }
  2309. }
  2310. out:
  2311. spin_unlock_bh(&efx->filter_lock);
  2312. return count;
  2313. }
  2314. /* Restore filter stater after reset */
  2315. void ef4_farch_filter_table_restore(struct ef4_nic *efx)
  2316. {
  2317. struct ef4_farch_filter_state *state = efx->filter_state;
  2318. enum ef4_farch_filter_table_id table_id;
  2319. struct ef4_farch_filter_table *table;
  2320. ef4_oword_t filter;
  2321. unsigned int filter_idx;
  2322. spin_lock_bh(&efx->filter_lock);
  2323. for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) {
  2324. table = &state->table[table_id];
  2325. /* Check whether this is a regular register table */
  2326. if (table->step == 0)
  2327. continue;
  2328. for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
  2329. if (!test_bit(filter_idx, table->used_bitmap))
  2330. continue;
  2331. ef4_farch_filter_build(&filter, &table->spec[filter_idx]);
  2332. ef4_writeo(efx, &filter,
  2333. table->offset + table->step * filter_idx);
  2334. }
  2335. }
  2336. ef4_farch_filter_push_rx_config(efx);
  2337. ef4_farch_filter_push_tx_limits(efx);
  2338. spin_unlock_bh(&efx->filter_lock);
  2339. }
  2340. void ef4_farch_filter_table_remove(struct ef4_nic *efx)
  2341. {
  2342. struct ef4_farch_filter_state *state = efx->filter_state;
  2343. enum ef4_farch_filter_table_id table_id;
  2344. for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) {
  2345. kfree(state->table[table_id].used_bitmap);
  2346. vfree(state->table[table_id].spec);
  2347. }
  2348. kfree(state);
  2349. }
  2350. int ef4_farch_filter_table_probe(struct ef4_nic *efx)
  2351. {
  2352. struct ef4_farch_filter_state *state;
  2353. struct ef4_farch_filter_table *table;
  2354. unsigned table_id;
  2355. state = kzalloc(sizeof(struct ef4_farch_filter_state), GFP_KERNEL);
  2356. if (!state)
  2357. return -ENOMEM;
  2358. efx->filter_state = state;
  2359. if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
  2360. table = &state->table[EF4_FARCH_FILTER_TABLE_RX_IP];
  2361. table->id = EF4_FARCH_FILTER_TABLE_RX_IP;
  2362. table->offset = FR_BZ_RX_FILTER_TBL0;
  2363. table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
  2364. table->step = FR_BZ_RX_FILTER_TBL0_STEP;
  2365. }
  2366. for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) {
  2367. table = &state->table[table_id];
  2368. if (table->size == 0)
  2369. continue;
  2370. table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
  2371. sizeof(unsigned long),
  2372. GFP_KERNEL);
  2373. if (!table->used_bitmap)
  2374. goto fail;
  2375. table->spec = vzalloc(table->size * sizeof(*table->spec));
  2376. if (!table->spec)
  2377. goto fail;
  2378. }
  2379. table = &state->table[EF4_FARCH_FILTER_TABLE_RX_DEF];
  2380. if (table->size) {
  2381. /* RX default filters must always exist */
  2382. struct ef4_farch_filter_spec *spec;
  2383. unsigned i;
  2384. for (i = 0; i < EF4_FARCH_FILTER_SIZE_RX_DEF; i++) {
  2385. spec = &table->spec[i];
  2386. spec->type = EF4_FARCH_FILTER_UC_DEF + i;
  2387. ef4_farch_filter_init_rx_auto(efx, spec);
  2388. __set_bit(i, table->used_bitmap);
  2389. }
  2390. }
  2391. ef4_farch_filter_push_rx_config(efx);
  2392. return 0;
  2393. fail:
  2394. ef4_farch_filter_table_remove(efx);
  2395. return -ENOMEM;
  2396. }
  2397. /* Update scatter enable flags for filters pointing to our own RX queues */
  2398. void ef4_farch_filter_update_rx_scatter(struct ef4_nic *efx)
  2399. {
  2400. struct ef4_farch_filter_state *state = efx->filter_state;
  2401. enum ef4_farch_filter_table_id table_id;
  2402. struct ef4_farch_filter_table *table;
  2403. ef4_oword_t filter;
  2404. unsigned int filter_idx;
  2405. spin_lock_bh(&efx->filter_lock);
  2406. for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP;
  2407. table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF;
  2408. table_id++) {
  2409. table = &state->table[table_id];
  2410. for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
  2411. if (!test_bit(filter_idx, table->used_bitmap) ||
  2412. table->spec[filter_idx].dmaq_id >=
  2413. efx->n_rx_channels)
  2414. continue;
  2415. if (efx->rx_scatter)
  2416. table->spec[filter_idx].flags |=
  2417. EF4_FILTER_FLAG_RX_SCATTER;
  2418. else
  2419. table->spec[filter_idx].flags &=
  2420. ~EF4_FILTER_FLAG_RX_SCATTER;
  2421. if (table_id == EF4_FARCH_FILTER_TABLE_RX_DEF)
  2422. /* Pushed by ef4_farch_filter_push_rx_config() */
  2423. continue;
  2424. ef4_farch_filter_build(&filter, &table->spec[filter_idx]);
  2425. ef4_writeo(efx, &filter,
  2426. table->offset + table->step * filter_idx);
  2427. }
  2428. }
  2429. ef4_farch_filter_push_rx_config(efx);
  2430. spin_unlock_bh(&efx->filter_lock);
  2431. }
  2432. #ifdef CONFIG_RFS_ACCEL
  2433. s32 ef4_farch_filter_rfs_insert(struct ef4_nic *efx,
  2434. struct ef4_filter_spec *gen_spec)
  2435. {
  2436. return ef4_farch_filter_insert(efx, gen_spec, true);
  2437. }
  2438. bool ef4_farch_filter_rfs_expire_one(struct ef4_nic *efx, u32 flow_id,
  2439. unsigned int index)
  2440. {
  2441. struct ef4_farch_filter_state *state = efx->filter_state;
  2442. struct ef4_farch_filter_table *table =
  2443. &state->table[EF4_FARCH_FILTER_TABLE_RX_IP];
  2444. if (test_bit(index, table->used_bitmap) &&
  2445. table->spec[index].priority == EF4_FILTER_PRI_HINT &&
  2446. rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
  2447. flow_id, index)) {
  2448. ef4_farch_filter_table_clear_entry(efx, table, index);
  2449. return true;
  2450. }
  2451. return false;
  2452. }
  2453. #endif /* CONFIG_RFS_ACCEL */
  2454. void ef4_farch_filter_sync_rx_mode(struct ef4_nic *efx)
  2455. {
  2456. struct net_device *net_dev = efx->net_dev;
  2457. struct netdev_hw_addr *ha;
  2458. union ef4_multicast_hash *mc_hash = &efx->multicast_hash;
  2459. u32 crc;
  2460. int bit;
  2461. if (!ef4_dev_registered(efx))
  2462. return;
  2463. netif_addr_lock_bh(net_dev);
  2464. efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
  2465. /* Build multicast hash table */
  2466. if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
  2467. memset(mc_hash, 0xff, sizeof(*mc_hash));
  2468. } else {
  2469. memset(mc_hash, 0x00, sizeof(*mc_hash));
  2470. netdev_for_each_mc_addr(ha, net_dev) {
  2471. crc = ether_crc_le(ETH_ALEN, ha->addr);
  2472. bit = crc & (EF4_MCAST_HASH_ENTRIES - 1);
  2473. __set_bit_le(bit, mc_hash);
  2474. }
  2475. /* Broadcast packets go through the multicast hash filter.
  2476. * ether_crc_le() of the broadcast address is 0xbe2612ff
  2477. * so we always add bit 0xff to the mask.
  2478. */
  2479. __set_bit_le(0xff, mc_hash);
  2480. }
  2481. netif_addr_unlock_bh(net_dev);
  2482. }