ena_com.c 74 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715
  1. /*
  2. * Copyright 2015 Amazon.com, Inc. or its affiliates.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include "ena_com.h"
  33. /*****************************************************************************/
  34. /*****************************************************************************/
  35. /* Timeout in micro-sec */
  36. #define ADMIN_CMD_TIMEOUT_US (3000000)
  37. #define ENA_ASYNC_QUEUE_DEPTH 16
  38. #define ENA_ADMIN_QUEUE_DEPTH 32
  39. #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
  40. ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
  41. | (ENA_COMMON_SPEC_VERSION_MINOR))
  42. #define ENA_CTRL_MAJOR 0
  43. #define ENA_CTRL_MINOR 0
  44. #define ENA_CTRL_SUB_MINOR 1
  45. #define MIN_ENA_CTRL_VER \
  46. (((ENA_CTRL_MAJOR) << \
  47. (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
  48. ((ENA_CTRL_MINOR) << \
  49. (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
  50. (ENA_CTRL_SUB_MINOR))
  51. #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
  52. #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
  53. #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
  54. #define ENA_REGS_ADMIN_INTR_MASK 1
  55. #define ENA_POLL_MS 5
  56. /*****************************************************************************/
  57. /*****************************************************************************/
  58. /*****************************************************************************/
  59. enum ena_cmd_status {
  60. ENA_CMD_SUBMITTED,
  61. ENA_CMD_COMPLETED,
  62. /* Abort - canceled by the driver */
  63. ENA_CMD_ABORTED,
  64. };
  65. struct ena_comp_ctx {
  66. struct completion wait_event;
  67. struct ena_admin_acq_entry *user_cqe;
  68. u32 comp_size;
  69. enum ena_cmd_status status;
  70. /* status from the device */
  71. u8 comp_status;
  72. u8 cmd_opcode;
  73. bool occupied;
  74. };
  75. struct ena_com_stats_ctx {
  76. struct ena_admin_aq_get_stats_cmd get_cmd;
  77. struct ena_admin_acq_get_stats_resp get_resp;
  78. };
  79. static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
  80. struct ena_common_mem_addr *ena_addr,
  81. dma_addr_t addr)
  82. {
  83. if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
  84. pr_err("dma address has more bits that the device supports\n");
  85. return -EINVAL;
  86. }
  87. ena_addr->mem_addr_low = lower_32_bits(addr);
  88. ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
  89. return 0;
  90. }
  91. static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
  92. {
  93. struct ena_com_admin_sq *sq = &queue->sq;
  94. u16 size = ADMIN_SQ_SIZE(queue->q_depth);
  95. sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
  96. GFP_KERNEL);
  97. if (!sq->entries) {
  98. pr_err("memory allocation failed");
  99. return -ENOMEM;
  100. }
  101. sq->head = 0;
  102. sq->tail = 0;
  103. sq->phase = 1;
  104. sq->db_addr = NULL;
  105. return 0;
  106. }
  107. static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
  108. {
  109. struct ena_com_admin_cq *cq = &queue->cq;
  110. u16 size = ADMIN_CQ_SIZE(queue->q_depth);
  111. cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
  112. GFP_KERNEL);
  113. if (!cq->entries) {
  114. pr_err("memory allocation failed");
  115. return -ENOMEM;
  116. }
  117. cq->head = 0;
  118. cq->phase = 1;
  119. return 0;
  120. }
  121. static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
  122. struct ena_aenq_handlers *aenq_handlers)
  123. {
  124. struct ena_com_aenq *aenq = &dev->aenq;
  125. u32 addr_low, addr_high, aenq_caps;
  126. u16 size;
  127. dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
  128. size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
  129. aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr,
  130. GFP_KERNEL);
  131. if (!aenq->entries) {
  132. pr_err("memory allocation failed");
  133. return -ENOMEM;
  134. }
  135. aenq->head = aenq->q_depth;
  136. aenq->phase = 1;
  137. addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
  138. addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
  139. writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
  140. writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
  141. aenq_caps = 0;
  142. aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
  143. aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
  144. << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
  145. ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
  146. writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
  147. if (unlikely(!aenq_handlers)) {
  148. pr_err("aenq handlers pointer is NULL\n");
  149. return -EINVAL;
  150. }
  151. aenq->aenq_handlers = aenq_handlers;
  152. return 0;
  153. }
  154. static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
  155. struct ena_comp_ctx *comp_ctx)
  156. {
  157. comp_ctx->occupied = false;
  158. atomic_dec(&queue->outstanding_cmds);
  159. }
  160. static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
  161. u16 command_id, bool capture)
  162. {
  163. if (unlikely(command_id >= queue->q_depth)) {
  164. pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
  165. command_id, queue->q_depth);
  166. return NULL;
  167. }
  168. if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
  169. pr_err("Completion context is occupied\n");
  170. return NULL;
  171. }
  172. if (capture) {
  173. atomic_inc(&queue->outstanding_cmds);
  174. queue->comp_ctx[command_id].occupied = true;
  175. }
  176. return &queue->comp_ctx[command_id];
  177. }
  178. static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
  179. struct ena_admin_aq_entry *cmd,
  180. size_t cmd_size_in_bytes,
  181. struct ena_admin_acq_entry *comp,
  182. size_t comp_size_in_bytes)
  183. {
  184. struct ena_comp_ctx *comp_ctx;
  185. u16 tail_masked, cmd_id;
  186. u16 queue_size_mask;
  187. u16 cnt;
  188. queue_size_mask = admin_queue->q_depth - 1;
  189. tail_masked = admin_queue->sq.tail & queue_size_mask;
  190. /* In case of queue FULL */
  191. cnt = atomic_read(&admin_queue->outstanding_cmds);
  192. if (cnt >= admin_queue->q_depth) {
  193. pr_debug("admin queue is full.\n");
  194. admin_queue->stats.out_of_space++;
  195. return ERR_PTR(-ENOSPC);
  196. }
  197. cmd_id = admin_queue->curr_cmd_id;
  198. cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
  199. ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
  200. cmd->aq_common_descriptor.command_id |= cmd_id &
  201. ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
  202. comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
  203. if (unlikely(!comp_ctx))
  204. return ERR_PTR(-EINVAL);
  205. comp_ctx->status = ENA_CMD_SUBMITTED;
  206. comp_ctx->comp_size = (u32)comp_size_in_bytes;
  207. comp_ctx->user_cqe = comp;
  208. comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
  209. reinit_completion(&comp_ctx->wait_event);
  210. memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
  211. admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
  212. queue_size_mask;
  213. admin_queue->sq.tail++;
  214. admin_queue->stats.submitted_cmd++;
  215. if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
  216. admin_queue->sq.phase = !admin_queue->sq.phase;
  217. writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
  218. return comp_ctx;
  219. }
  220. static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
  221. {
  222. size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
  223. struct ena_comp_ctx *comp_ctx;
  224. u16 i;
  225. queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
  226. if (unlikely(!queue->comp_ctx)) {
  227. pr_err("memory allocation failed");
  228. return -ENOMEM;
  229. }
  230. for (i = 0; i < queue->q_depth; i++) {
  231. comp_ctx = get_comp_ctxt(queue, i, false);
  232. if (comp_ctx)
  233. init_completion(&comp_ctx->wait_event);
  234. }
  235. return 0;
  236. }
  237. static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
  238. struct ena_admin_aq_entry *cmd,
  239. size_t cmd_size_in_bytes,
  240. struct ena_admin_acq_entry *comp,
  241. size_t comp_size_in_bytes)
  242. {
  243. unsigned long flags;
  244. struct ena_comp_ctx *comp_ctx;
  245. spin_lock_irqsave(&admin_queue->q_lock, flags);
  246. if (unlikely(!admin_queue->running_state)) {
  247. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  248. return ERR_PTR(-ENODEV);
  249. }
  250. comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
  251. cmd_size_in_bytes,
  252. comp,
  253. comp_size_in_bytes);
  254. if (IS_ERR(comp_ctx))
  255. admin_queue->running_state = false;
  256. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  257. return comp_ctx;
  258. }
  259. static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
  260. struct ena_com_create_io_ctx *ctx,
  261. struct ena_com_io_sq *io_sq)
  262. {
  263. size_t size;
  264. int dev_node = 0;
  265. memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
  266. io_sq->desc_entry_size =
  267. (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
  268. sizeof(struct ena_eth_io_tx_desc) :
  269. sizeof(struct ena_eth_io_rx_desc);
  270. size = io_sq->desc_entry_size * io_sq->q_depth;
  271. if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
  272. dev_node = dev_to_node(ena_dev->dmadev);
  273. set_dev_node(ena_dev->dmadev, ctx->numa_node);
  274. io_sq->desc_addr.virt_addr =
  275. dma_zalloc_coherent(ena_dev->dmadev, size,
  276. &io_sq->desc_addr.phys_addr,
  277. GFP_KERNEL);
  278. set_dev_node(ena_dev->dmadev, dev_node);
  279. if (!io_sq->desc_addr.virt_addr) {
  280. io_sq->desc_addr.virt_addr =
  281. dma_zalloc_coherent(ena_dev->dmadev, size,
  282. &io_sq->desc_addr.phys_addr,
  283. GFP_KERNEL);
  284. }
  285. } else {
  286. dev_node = dev_to_node(ena_dev->dmadev);
  287. set_dev_node(ena_dev->dmadev, ctx->numa_node);
  288. io_sq->desc_addr.virt_addr =
  289. devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
  290. set_dev_node(ena_dev->dmadev, dev_node);
  291. if (!io_sq->desc_addr.virt_addr) {
  292. io_sq->desc_addr.virt_addr =
  293. devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
  294. }
  295. }
  296. if (!io_sq->desc_addr.virt_addr) {
  297. pr_err("memory allocation failed");
  298. return -ENOMEM;
  299. }
  300. io_sq->tail = 0;
  301. io_sq->next_to_comp = 0;
  302. io_sq->phase = 1;
  303. return 0;
  304. }
  305. static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
  306. struct ena_com_create_io_ctx *ctx,
  307. struct ena_com_io_cq *io_cq)
  308. {
  309. size_t size;
  310. int prev_node = 0;
  311. memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
  312. /* Use the basic completion descriptor for Rx */
  313. io_cq->cdesc_entry_size_in_bytes =
  314. (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
  315. sizeof(struct ena_eth_io_tx_cdesc) :
  316. sizeof(struct ena_eth_io_rx_cdesc_base);
  317. size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
  318. prev_node = dev_to_node(ena_dev->dmadev);
  319. set_dev_node(ena_dev->dmadev, ctx->numa_node);
  320. io_cq->cdesc_addr.virt_addr =
  321. dma_zalloc_coherent(ena_dev->dmadev, size,
  322. &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
  323. set_dev_node(ena_dev->dmadev, prev_node);
  324. if (!io_cq->cdesc_addr.virt_addr) {
  325. io_cq->cdesc_addr.virt_addr =
  326. dma_zalloc_coherent(ena_dev->dmadev, size,
  327. &io_cq->cdesc_addr.phys_addr,
  328. GFP_KERNEL);
  329. }
  330. if (!io_cq->cdesc_addr.virt_addr) {
  331. pr_err("memory allocation failed");
  332. return -ENOMEM;
  333. }
  334. io_cq->phase = 1;
  335. io_cq->head = 0;
  336. return 0;
  337. }
  338. static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
  339. struct ena_admin_acq_entry *cqe)
  340. {
  341. struct ena_comp_ctx *comp_ctx;
  342. u16 cmd_id;
  343. cmd_id = cqe->acq_common_descriptor.command &
  344. ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
  345. comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
  346. if (unlikely(!comp_ctx)) {
  347. pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
  348. admin_queue->running_state = false;
  349. return;
  350. }
  351. comp_ctx->status = ENA_CMD_COMPLETED;
  352. comp_ctx->comp_status = cqe->acq_common_descriptor.status;
  353. if (comp_ctx->user_cqe)
  354. memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
  355. if (!admin_queue->polling)
  356. complete(&comp_ctx->wait_event);
  357. }
  358. static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
  359. {
  360. struct ena_admin_acq_entry *cqe = NULL;
  361. u16 comp_num = 0;
  362. u16 head_masked;
  363. u8 phase;
  364. head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
  365. phase = admin_queue->cq.phase;
  366. cqe = &admin_queue->cq.entries[head_masked];
  367. /* Go over all the completions */
  368. while ((cqe->acq_common_descriptor.flags &
  369. ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
  370. /* Do not read the rest of the completion entry before the
  371. * phase bit was validated
  372. */
  373. rmb();
  374. ena_com_handle_single_admin_completion(admin_queue, cqe);
  375. head_masked++;
  376. comp_num++;
  377. if (unlikely(head_masked == admin_queue->q_depth)) {
  378. head_masked = 0;
  379. phase = !phase;
  380. }
  381. cqe = &admin_queue->cq.entries[head_masked];
  382. }
  383. admin_queue->cq.head += comp_num;
  384. admin_queue->cq.phase = phase;
  385. admin_queue->sq.head += comp_num;
  386. admin_queue->stats.completed_cmd += comp_num;
  387. }
  388. static int ena_com_comp_status_to_errno(u8 comp_status)
  389. {
  390. if (unlikely(comp_status != 0))
  391. pr_err("admin command failed[%u]\n", comp_status);
  392. if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
  393. return -EINVAL;
  394. switch (comp_status) {
  395. case ENA_ADMIN_SUCCESS:
  396. return 0;
  397. case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
  398. return -ENOMEM;
  399. case ENA_ADMIN_UNSUPPORTED_OPCODE:
  400. return -EOPNOTSUPP;
  401. case ENA_ADMIN_BAD_OPCODE:
  402. case ENA_ADMIN_MALFORMED_REQUEST:
  403. case ENA_ADMIN_ILLEGAL_PARAMETER:
  404. case ENA_ADMIN_UNKNOWN_ERROR:
  405. return -EINVAL;
  406. }
  407. return 0;
  408. }
  409. static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
  410. struct ena_com_admin_queue *admin_queue)
  411. {
  412. unsigned long flags, timeout;
  413. int ret;
  414. timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
  415. while (1) {
  416. spin_lock_irqsave(&admin_queue->q_lock, flags);
  417. ena_com_handle_admin_completion(admin_queue);
  418. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  419. if (comp_ctx->status != ENA_CMD_SUBMITTED)
  420. break;
  421. if (time_is_before_jiffies(timeout)) {
  422. pr_err("Wait for completion (polling) timeout\n");
  423. /* ENA didn't have any completion */
  424. spin_lock_irqsave(&admin_queue->q_lock, flags);
  425. admin_queue->stats.no_completion++;
  426. admin_queue->running_state = false;
  427. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  428. ret = -ETIME;
  429. goto err;
  430. }
  431. msleep(ENA_POLL_MS);
  432. }
  433. if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
  434. pr_err("Command was aborted\n");
  435. spin_lock_irqsave(&admin_queue->q_lock, flags);
  436. admin_queue->stats.aborted_cmd++;
  437. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  438. ret = -ENODEV;
  439. goto err;
  440. }
  441. WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
  442. comp_ctx->status);
  443. ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
  444. err:
  445. comp_ctxt_release(admin_queue, comp_ctx);
  446. return ret;
  447. }
  448. static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
  449. struct ena_com_admin_queue *admin_queue)
  450. {
  451. unsigned long flags;
  452. int ret;
  453. wait_for_completion_timeout(&comp_ctx->wait_event,
  454. usecs_to_jiffies(
  455. admin_queue->completion_timeout));
  456. /* In case the command wasn't completed find out the root cause.
  457. * There might be 2 kinds of errors
  458. * 1) No completion (timeout reached)
  459. * 2) There is completion but the device didn't get any msi-x interrupt.
  460. */
  461. if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
  462. spin_lock_irqsave(&admin_queue->q_lock, flags);
  463. ena_com_handle_admin_completion(admin_queue);
  464. admin_queue->stats.no_completion++;
  465. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  466. if (comp_ctx->status == ENA_CMD_COMPLETED)
  467. pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
  468. comp_ctx->cmd_opcode);
  469. else
  470. pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
  471. comp_ctx->cmd_opcode, comp_ctx->status);
  472. admin_queue->running_state = false;
  473. ret = -ETIME;
  474. goto err;
  475. }
  476. ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
  477. err:
  478. comp_ctxt_release(admin_queue, comp_ctx);
  479. return ret;
  480. }
  481. /* This method read the hardware device register through posting writes
  482. * and waiting for response
  483. * On timeout the function will return ENA_MMIO_READ_TIMEOUT
  484. */
  485. static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
  486. {
  487. struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
  488. volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
  489. mmio_read->read_resp;
  490. u32 mmio_read_reg, ret, i;
  491. unsigned long flags;
  492. u32 timeout = mmio_read->reg_read_to;
  493. might_sleep();
  494. if (timeout == 0)
  495. timeout = ENA_REG_READ_TIMEOUT;
  496. /* If readless is disabled, perform regular read */
  497. if (!mmio_read->readless_supported)
  498. return readl(ena_dev->reg_bar + offset);
  499. spin_lock_irqsave(&mmio_read->lock, flags);
  500. mmio_read->seq_num++;
  501. read_resp->req_id = mmio_read->seq_num + 0xDEAD;
  502. mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
  503. ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
  504. mmio_read_reg |= mmio_read->seq_num &
  505. ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
  506. /* make sure read_resp->req_id get updated before the hw can write
  507. * there
  508. */
  509. wmb();
  510. writel_relaxed(mmio_read_reg,
  511. ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
  512. mmiowb();
  513. for (i = 0; i < timeout; i++) {
  514. if (read_resp->req_id == mmio_read->seq_num)
  515. break;
  516. udelay(1);
  517. }
  518. if (unlikely(i == timeout)) {
  519. pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
  520. mmio_read->seq_num, offset, read_resp->req_id,
  521. read_resp->reg_off);
  522. ret = ENA_MMIO_READ_TIMEOUT;
  523. goto err;
  524. }
  525. if (read_resp->reg_off != offset) {
  526. pr_err("Read failure: wrong offset provided");
  527. ret = ENA_MMIO_READ_TIMEOUT;
  528. } else {
  529. ret = read_resp->reg_val;
  530. }
  531. err:
  532. spin_unlock_irqrestore(&mmio_read->lock, flags);
  533. return ret;
  534. }
  535. /* There are two types to wait for completion.
  536. * Polling mode - wait until the completion is available.
  537. * Async mode - wait on wait queue until the completion is ready
  538. * (or the timeout expired).
  539. * It is expected that the IRQ called ena_com_handle_admin_completion
  540. * to mark the completions.
  541. */
  542. static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
  543. struct ena_com_admin_queue *admin_queue)
  544. {
  545. if (admin_queue->polling)
  546. return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
  547. admin_queue);
  548. return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
  549. admin_queue);
  550. }
  551. static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
  552. struct ena_com_io_sq *io_sq)
  553. {
  554. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  555. struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
  556. struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
  557. u8 direction;
  558. int ret;
  559. memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
  560. if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
  561. direction = ENA_ADMIN_SQ_DIRECTION_TX;
  562. else
  563. direction = ENA_ADMIN_SQ_DIRECTION_RX;
  564. destroy_cmd.sq.sq_identity |= (direction <<
  565. ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
  566. ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
  567. destroy_cmd.sq.sq_idx = io_sq->idx;
  568. destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
  569. ret = ena_com_execute_admin_command(admin_queue,
  570. (struct ena_admin_aq_entry *)&destroy_cmd,
  571. sizeof(destroy_cmd),
  572. (struct ena_admin_acq_entry *)&destroy_resp,
  573. sizeof(destroy_resp));
  574. if (unlikely(ret && (ret != -ENODEV)))
  575. pr_err("failed to destroy io sq error: %d\n", ret);
  576. return ret;
  577. }
  578. static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
  579. struct ena_com_io_sq *io_sq,
  580. struct ena_com_io_cq *io_cq)
  581. {
  582. size_t size;
  583. if (io_cq->cdesc_addr.virt_addr) {
  584. size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
  585. dma_free_coherent(ena_dev->dmadev, size,
  586. io_cq->cdesc_addr.virt_addr,
  587. io_cq->cdesc_addr.phys_addr);
  588. io_cq->cdesc_addr.virt_addr = NULL;
  589. }
  590. if (io_sq->desc_addr.virt_addr) {
  591. size = io_sq->desc_entry_size * io_sq->q_depth;
  592. if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
  593. dma_free_coherent(ena_dev->dmadev, size,
  594. io_sq->desc_addr.virt_addr,
  595. io_sq->desc_addr.phys_addr);
  596. else
  597. devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
  598. io_sq->desc_addr.virt_addr = NULL;
  599. }
  600. }
  601. static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
  602. u16 exp_state)
  603. {
  604. u32 val, i;
  605. /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
  606. timeout = (timeout * 100) / ENA_POLL_MS;
  607. for (i = 0; i < timeout; i++) {
  608. val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
  609. if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
  610. pr_err("Reg read timeout occurred\n");
  611. return -ETIME;
  612. }
  613. if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
  614. exp_state)
  615. return 0;
  616. msleep(ENA_POLL_MS);
  617. }
  618. return -ETIME;
  619. }
  620. static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
  621. enum ena_admin_aq_feature_id feature_id)
  622. {
  623. u32 feature_mask = 1 << feature_id;
  624. /* Device attributes is always supported */
  625. if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
  626. !(ena_dev->supported_features & feature_mask))
  627. return false;
  628. return true;
  629. }
  630. static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
  631. struct ena_admin_get_feat_resp *get_resp,
  632. enum ena_admin_aq_feature_id feature_id,
  633. dma_addr_t control_buf_dma_addr,
  634. u32 control_buff_size)
  635. {
  636. struct ena_com_admin_queue *admin_queue;
  637. struct ena_admin_get_feat_cmd get_cmd;
  638. int ret;
  639. if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
  640. pr_debug("Feature %d isn't supported\n", feature_id);
  641. return -EOPNOTSUPP;
  642. }
  643. memset(&get_cmd, 0x0, sizeof(get_cmd));
  644. admin_queue = &ena_dev->admin_queue;
  645. get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
  646. if (control_buff_size)
  647. get_cmd.aq_common_descriptor.flags =
  648. ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
  649. else
  650. get_cmd.aq_common_descriptor.flags = 0;
  651. ret = ena_com_mem_addr_set(ena_dev,
  652. &get_cmd.control_buffer.address,
  653. control_buf_dma_addr);
  654. if (unlikely(ret)) {
  655. pr_err("memory address set failed\n");
  656. return ret;
  657. }
  658. get_cmd.control_buffer.length = control_buff_size;
  659. get_cmd.feat_common.feature_id = feature_id;
  660. ret = ena_com_execute_admin_command(admin_queue,
  661. (struct ena_admin_aq_entry *)
  662. &get_cmd,
  663. sizeof(get_cmd),
  664. (struct ena_admin_acq_entry *)
  665. get_resp,
  666. sizeof(*get_resp));
  667. if (unlikely(ret))
  668. pr_err("Failed to submit get_feature command %d error: %d\n",
  669. feature_id, ret);
  670. return ret;
  671. }
  672. static int ena_com_get_feature(struct ena_com_dev *ena_dev,
  673. struct ena_admin_get_feat_resp *get_resp,
  674. enum ena_admin_aq_feature_id feature_id)
  675. {
  676. return ena_com_get_feature_ex(ena_dev,
  677. get_resp,
  678. feature_id,
  679. 0,
  680. 0);
  681. }
  682. static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
  683. {
  684. struct ena_rss *rss = &ena_dev->rss;
  685. rss->hash_key =
  686. dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
  687. &rss->hash_key_dma_addr, GFP_KERNEL);
  688. if (unlikely(!rss->hash_key))
  689. return -ENOMEM;
  690. return 0;
  691. }
  692. static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
  693. {
  694. struct ena_rss *rss = &ena_dev->rss;
  695. if (rss->hash_key)
  696. dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
  697. rss->hash_key, rss->hash_key_dma_addr);
  698. rss->hash_key = NULL;
  699. }
  700. static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
  701. {
  702. struct ena_rss *rss = &ena_dev->rss;
  703. rss->hash_ctrl =
  704. dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
  705. &rss->hash_ctrl_dma_addr, GFP_KERNEL);
  706. if (unlikely(!rss->hash_ctrl))
  707. return -ENOMEM;
  708. return 0;
  709. }
  710. static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
  711. {
  712. struct ena_rss *rss = &ena_dev->rss;
  713. if (rss->hash_ctrl)
  714. dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
  715. rss->hash_ctrl, rss->hash_ctrl_dma_addr);
  716. rss->hash_ctrl = NULL;
  717. }
  718. static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
  719. u16 log_size)
  720. {
  721. struct ena_rss *rss = &ena_dev->rss;
  722. struct ena_admin_get_feat_resp get_resp;
  723. size_t tbl_size;
  724. int ret;
  725. ret = ena_com_get_feature(ena_dev, &get_resp,
  726. ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
  727. if (unlikely(ret))
  728. return ret;
  729. if ((get_resp.u.ind_table.min_size > log_size) ||
  730. (get_resp.u.ind_table.max_size < log_size)) {
  731. pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
  732. 1 << log_size, 1 << get_resp.u.ind_table.min_size,
  733. 1 << get_resp.u.ind_table.max_size);
  734. return -EINVAL;
  735. }
  736. tbl_size = (1ULL << log_size) *
  737. sizeof(struct ena_admin_rss_ind_table_entry);
  738. rss->rss_ind_tbl =
  739. dma_zalloc_coherent(ena_dev->dmadev, tbl_size,
  740. &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
  741. if (unlikely(!rss->rss_ind_tbl))
  742. goto mem_err1;
  743. tbl_size = (1ULL << log_size) * sizeof(u16);
  744. rss->host_rss_ind_tbl =
  745. devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
  746. if (unlikely(!rss->host_rss_ind_tbl))
  747. goto mem_err2;
  748. rss->tbl_log_size = log_size;
  749. return 0;
  750. mem_err2:
  751. tbl_size = (1ULL << log_size) *
  752. sizeof(struct ena_admin_rss_ind_table_entry);
  753. dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
  754. rss->rss_ind_tbl_dma_addr);
  755. rss->rss_ind_tbl = NULL;
  756. mem_err1:
  757. rss->tbl_log_size = 0;
  758. return -ENOMEM;
  759. }
  760. static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
  761. {
  762. struct ena_rss *rss = &ena_dev->rss;
  763. size_t tbl_size = (1ULL << rss->tbl_log_size) *
  764. sizeof(struct ena_admin_rss_ind_table_entry);
  765. if (rss->rss_ind_tbl)
  766. dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
  767. rss->rss_ind_tbl_dma_addr);
  768. rss->rss_ind_tbl = NULL;
  769. if (rss->host_rss_ind_tbl)
  770. devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
  771. rss->host_rss_ind_tbl = NULL;
  772. }
  773. static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
  774. struct ena_com_io_sq *io_sq, u16 cq_idx)
  775. {
  776. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  777. struct ena_admin_aq_create_sq_cmd create_cmd;
  778. struct ena_admin_acq_create_sq_resp_desc cmd_completion;
  779. u8 direction;
  780. int ret;
  781. memset(&create_cmd, 0x0, sizeof(create_cmd));
  782. create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
  783. if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
  784. direction = ENA_ADMIN_SQ_DIRECTION_TX;
  785. else
  786. direction = ENA_ADMIN_SQ_DIRECTION_RX;
  787. create_cmd.sq_identity |= (direction <<
  788. ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
  789. ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
  790. create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
  791. ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
  792. create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
  793. ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
  794. ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
  795. create_cmd.sq_caps_3 |=
  796. ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
  797. create_cmd.cq_idx = cq_idx;
  798. create_cmd.sq_depth = io_sq->q_depth;
  799. if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
  800. ret = ena_com_mem_addr_set(ena_dev,
  801. &create_cmd.sq_ba,
  802. io_sq->desc_addr.phys_addr);
  803. if (unlikely(ret)) {
  804. pr_err("memory address set failed\n");
  805. return ret;
  806. }
  807. }
  808. ret = ena_com_execute_admin_command(admin_queue,
  809. (struct ena_admin_aq_entry *)&create_cmd,
  810. sizeof(create_cmd),
  811. (struct ena_admin_acq_entry *)&cmd_completion,
  812. sizeof(cmd_completion));
  813. if (unlikely(ret)) {
  814. pr_err("Failed to create IO SQ. error: %d\n", ret);
  815. return ret;
  816. }
  817. io_sq->idx = cmd_completion.sq_idx;
  818. io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
  819. (uintptr_t)cmd_completion.sq_doorbell_offset);
  820. if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
  821. io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
  822. + cmd_completion.llq_headers_offset);
  823. io_sq->desc_addr.pbuf_dev_addr =
  824. (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
  825. cmd_completion.llq_descriptors_offset);
  826. }
  827. pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
  828. return ret;
  829. }
  830. static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
  831. {
  832. struct ena_rss *rss = &ena_dev->rss;
  833. struct ena_com_io_sq *io_sq;
  834. u16 qid;
  835. int i;
  836. for (i = 0; i < 1 << rss->tbl_log_size; i++) {
  837. qid = rss->host_rss_ind_tbl[i];
  838. if (qid >= ENA_TOTAL_NUM_QUEUES)
  839. return -EINVAL;
  840. io_sq = &ena_dev->io_sq_queues[qid];
  841. if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
  842. return -EINVAL;
  843. rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
  844. }
  845. return 0;
  846. }
  847. static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
  848. {
  849. u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
  850. struct ena_rss *rss = &ena_dev->rss;
  851. u8 idx;
  852. u16 i;
  853. for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
  854. dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
  855. for (i = 0; i < 1 << rss->tbl_log_size; i++) {
  856. if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
  857. return -EINVAL;
  858. idx = (u8)rss->rss_ind_tbl[i].cq_idx;
  859. if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
  860. return -EINVAL;
  861. rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
  862. }
  863. return 0;
  864. }
  865. static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
  866. {
  867. size_t size;
  868. size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
  869. ena_dev->intr_moder_tbl =
  870. devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
  871. if (!ena_dev->intr_moder_tbl)
  872. return -ENOMEM;
  873. ena_com_config_default_interrupt_moderation_table(ena_dev);
  874. return 0;
  875. }
  876. static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
  877. u16 intr_delay_resolution)
  878. {
  879. struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
  880. unsigned int i;
  881. if (!intr_delay_resolution) {
  882. pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
  883. intr_delay_resolution = 1;
  884. }
  885. ena_dev->intr_delay_resolution = intr_delay_resolution;
  886. /* update Rx */
  887. for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
  888. intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
  889. /* update Tx */
  890. ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
  891. }
  892. /*****************************************************************************/
  893. /******************************* API ******************************/
  894. /*****************************************************************************/
  895. int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
  896. struct ena_admin_aq_entry *cmd,
  897. size_t cmd_size,
  898. struct ena_admin_acq_entry *comp,
  899. size_t comp_size)
  900. {
  901. struct ena_comp_ctx *comp_ctx;
  902. int ret;
  903. comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
  904. comp, comp_size);
  905. if (IS_ERR(comp_ctx)) {
  906. if (comp_ctx == ERR_PTR(-ENODEV))
  907. pr_debug("Failed to submit command [%ld]\n",
  908. PTR_ERR(comp_ctx));
  909. else
  910. pr_err("Failed to submit command [%ld]\n",
  911. PTR_ERR(comp_ctx));
  912. return PTR_ERR(comp_ctx);
  913. }
  914. ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
  915. if (unlikely(ret)) {
  916. if (admin_queue->running_state)
  917. pr_err("Failed to process command. ret = %d\n", ret);
  918. else
  919. pr_debug("Failed to process command. ret = %d\n", ret);
  920. }
  921. return ret;
  922. }
  923. int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
  924. struct ena_com_io_cq *io_cq)
  925. {
  926. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  927. struct ena_admin_aq_create_cq_cmd create_cmd;
  928. struct ena_admin_acq_create_cq_resp_desc cmd_completion;
  929. int ret;
  930. memset(&create_cmd, 0x0, sizeof(create_cmd));
  931. create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
  932. create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
  933. ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
  934. create_cmd.cq_caps_1 |=
  935. ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
  936. create_cmd.msix_vector = io_cq->msix_vector;
  937. create_cmd.cq_depth = io_cq->q_depth;
  938. ret = ena_com_mem_addr_set(ena_dev,
  939. &create_cmd.cq_ba,
  940. io_cq->cdesc_addr.phys_addr);
  941. if (unlikely(ret)) {
  942. pr_err("memory address set failed\n");
  943. return ret;
  944. }
  945. ret = ena_com_execute_admin_command(admin_queue,
  946. (struct ena_admin_aq_entry *)&create_cmd,
  947. sizeof(create_cmd),
  948. (struct ena_admin_acq_entry *)&cmd_completion,
  949. sizeof(cmd_completion));
  950. if (unlikely(ret)) {
  951. pr_err("Failed to create IO CQ. error: %d\n", ret);
  952. return ret;
  953. }
  954. io_cq->idx = cmd_completion.cq_idx;
  955. io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
  956. cmd_completion.cq_interrupt_unmask_register_offset);
  957. if (cmd_completion.cq_head_db_register_offset)
  958. io_cq->cq_head_db_reg =
  959. (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
  960. cmd_completion.cq_head_db_register_offset);
  961. if (cmd_completion.numa_node_register_offset)
  962. io_cq->numa_node_cfg_reg =
  963. (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
  964. cmd_completion.numa_node_register_offset);
  965. pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
  966. return ret;
  967. }
  968. int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
  969. struct ena_com_io_sq **io_sq,
  970. struct ena_com_io_cq **io_cq)
  971. {
  972. if (qid >= ENA_TOTAL_NUM_QUEUES) {
  973. pr_err("Invalid queue number %d but the max is %d\n", qid,
  974. ENA_TOTAL_NUM_QUEUES);
  975. return -EINVAL;
  976. }
  977. *io_sq = &ena_dev->io_sq_queues[qid];
  978. *io_cq = &ena_dev->io_cq_queues[qid];
  979. return 0;
  980. }
  981. void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
  982. {
  983. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  984. struct ena_comp_ctx *comp_ctx;
  985. u16 i;
  986. if (!admin_queue->comp_ctx)
  987. return;
  988. for (i = 0; i < admin_queue->q_depth; i++) {
  989. comp_ctx = get_comp_ctxt(admin_queue, i, false);
  990. if (unlikely(!comp_ctx))
  991. break;
  992. comp_ctx->status = ENA_CMD_ABORTED;
  993. complete(&comp_ctx->wait_event);
  994. }
  995. }
  996. void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
  997. {
  998. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  999. unsigned long flags;
  1000. spin_lock_irqsave(&admin_queue->q_lock, flags);
  1001. while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
  1002. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  1003. msleep(ENA_POLL_MS);
  1004. spin_lock_irqsave(&admin_queue->q_lock, flags);
  1005. }
  1006. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  1007. }
  1008. int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
  1009. struct ena_com_io_cq *io_cq)
  1010. {
  1011. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1012. struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
  1013. struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
  1014. int ret;
  1015. memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
  1016. destroy_cmd.cq_idx = io_cq->idx;
  1017. destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
  1018. ret = ena_com_execute_admin_command(admin_queue,
  1019. (struct ena_admin_aq_entry *)&destroy_cmd,
  1020. sizeof(destroy_cmd),
  1021. (struct ena_admin_acq_entry *)&destroy_resp,
  1022. sizeof(destroy_resp));
  1023. if (unlikely(ret && (ret != -ENODEV)))
  1024. pr_err("Failed to destroy IO CQ. error: %d\n", ret);
  1025. return ret;
  1026. }
  1027. bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
  1028. {
  1029. return ena_dev->admin_queue.running_state;
  1030. }
  1031. void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
  1032. {
  1033. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1034. unsigned long flags;
  1035. spin_lock_irqsave(&admin_queue->q_lock, flags);
  1036. ena_dev->admin_queue.running_state = state;
  1037. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  1038. }
  1039. void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
  1040. {
  1041. u16 depth = ena_dev->aenq.q_depth;
  1042. WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
  1043. /* Init head_db to mark that all entries in the queue
  1044. * are initially available
  1045. */
  1046. writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
  1047. }
  1048. int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
  1049. {
  1050. struct ena_com_admin_queue *admin_queue;
  1051. struct ena_admin_set_feat_cmd cmd;
  1052. struct ena_admin_set_feat_resp resp;
  1053. struct ena_admin_get_feat_resp get_resp;
  1054. int ret;
  1055. ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
  1056. if (ret) {
  1057. pr_info("Can't get aenq configuration\n");
  1058. return ret;
  1059. }
  1060. if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
  1061. pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
  1062. get_resp.u.aenq.supported_groups, groups_flag);
  1063. return -EOPNOTSUPP;
  1064. }
  1065. memset(&cmd, 0x0, sizeof(cmd));
  1066. admin_queue = &ena_dev->admin_queue;
  1067. cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
  1068. cmd.aq_common_descriptor.flags = 0;
  1069. cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
  1070. cmd.u.aenq.enabled_groups = groups_flag;
  1071. ret = ena_com_execute_admin_command(admin_queue,
  1072. (struct ena_admin_aq_entry *)&cmd,
  1073. sizeof(cmd),
  1074. (struct ena_admin_acq_entry *)&resp,
  1075. sizeof(resp));
  1076. if (unlikely(ret))
  1077. pr_err("Failed to config AENQ ret: %d\n", ret);
  1078. return ret;
  1079. }
  1080. int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
  1081. {
  1082. u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
  1083. int width;
  1084. if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
  1085. pr_err("Reg read timeout occurred\n");
  1086. return -ETIME;
  1087. }
  1088. width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
  1089. ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
  1090. pr_debug("ENA dma width: %d\n", width);
  1091. if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
  1092. pr_err("DMA width illegal value: %d\n", width);
  1093. return -EINVAL;
  1094. }
  1095. ena_dev->dma_addr_bits = width;
  1096. return width;
  1097. }
  1098. int ena_com_validate_version(struct ena_com_dev *ena_dev)
  1099. {
  1100. u32 ver;
  1101. u32 ctrl_ver;
  1102. u32 ctrl_ver_masked;
  1103. /* Make sure the ENA version and the controller version are at least
  1104. * as the driver expects
  1105. */
  1106. ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
  1107. ctrl_ver = ena_com_reg_bar_read32(ena_dev,
  1108. ENA_REGS_CONTROLLER_VERSION_OFF);
  1109. if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
  1110. (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
  1111. pr_err("Reg read timeout occurred\n");
  1112. return -ETIME;
  1113. }
  1114. pr_info("ena device version: %d.%d\n",
  1115. (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
  1116. ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
  1117. ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
  1118. if (ver < MIN_ENA_VER) {
  1119. pr_err("ENA version is lower than the minimal version the driver supports\n");
  1120. return -1;
  1121. }
  1122. pr_info("ena controller version: %d.%d.%d implementation version %d\n",
  1123. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
  1124. ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
  1125. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
  1126. ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
  1127. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
  1128. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
  1129. ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
  1130. ctrl_ver_masked =
  1131. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
  1132. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
  1133. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
  1134. /* Validate the ctrl version without the implementation ID */
  1135. if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
  1136. pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
  1137. return -1;
  1138. }
  1139. return 0;
  1140. }
  1141. void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
  1142. {
  1143. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1144. struct ena_com_admin_cq *cq = &admin_queue->cq;
  1145. struct ena_com_admin_sq *sq = &admin_queue->sq;
  1146. struct ena_com_aenq *aenq = &ena_dev->aenq;
  1147. u16 size;
  1148. if (admin_queue->comp_ctx)
  1149. devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
  1150. admin_queue->comp_ctx = NULL;
  1151. size = ADMIN_SQ_SIZE(admin_queue->q_depth);
  1152. if (sq->entries)
  1153. dma_free_coherent(ena_dev->dmadev, size, sq->entries,
  1154. sq->dma_addr);
  1155. sq->entries = NULL;
  1156. size = ADMIN_CQ_SIZE(admin_queue->q_depth);
  1157. if (cq->entries)
  1158. dma_free_coherent(ena_dev->dmadev, size, cq->entries,
  1159. cq->dma_addr);
  1160. cq->entries = NULL;
  1161. size = ADMIN_AENQ_SIZE(aenq->q_depth);
  1162. if (ena_dev->aenq.entries)
  1163. dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
  1164. aenq->dma_addr);
  1165. aenq->entries = NULL;
  1166. }
  1167. void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
  1168. {
  1169. u32 mask_value = 0;
  1170. if (polling)
  1171. mask_value = ENA_REGS_ADMIN_INTR_MASK;
  1172. writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
  1173. ena_dev->admin_queue.polling = polling;
  1174. }
  1175. int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
  1176. {
  1177. struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
  1178. spin_lock_init(&mmio_read->lock);
  1179. mmio_read->read_resp =
  1180. dma_zalloc_coherent(ena_dev->dmadev,
  1181. sizeof(*mmio_read->read_resp),
  1182. &mmio_read->read_resp_dma_addr, GFP_KERNEL);
  1183. if (unlikely(!mmio_read->read_resp))
  1184. return -ENOMEM;
  1185. ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
  1186. mmio_read->read_resp->req_id = 0x0;
  1187. mmio_read->seq_num = 0x0;
  1188. mmio_read->readless_supported = true;
  1189. return 0;
  1190. }
  1191. void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
  1192. {
  1193. struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
  1194. mmio_read->readless_supported = readless_supported;
  1195. }
  1196. void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
  1197. {
  1198. struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
  1199. writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
  1200. writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
  1201. dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
  1202. mmio_read->read_resp, mmio_read->read_resp_dma_addr);
  1203. mmio_read->read_resp = NULL;
  1204. }
  1205. void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
  1206. {
  1207. struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
  1208. u32 addr_low, addr_high;
  1209. addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
  1210. addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
  1211. writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
  1212. writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
  1213. }
  1214. int ena_com_admin_init(struct ena_com_dev *ena_dev,
  1215. struct ena_aenq_handlers *aenq_handlers,
  1216. bool init_spinlock)
  1217. {
  1218. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1219. u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
  1220. int ret;
  1221. dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
  1222. if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
  1223. pr_err("Reg read timeout occurred\n");
  1224. return -ETIME;
  1225. }
  1226. if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
  1227. pr_err("Device isn't ready, abort com init\n");
  1228. return -ENODEV;
  1229. }
  1230. admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
  1231. admin_queue->q_dmadev = ena_dev->dmadev;
  1232. admin_queue->polling = false;
  1233. admin_queue->curr_cmd_id = 0;
  1234. atomic_set(&admin_queue->outstanding_cmds, 0);
  1235. if (init_spinlock)
  1236. spin_lock_init(&admin_queue->q_lock);
  1237. ret = ena_com_init_comp_ctxt(admin_queue);
  1238. if (ret)
  1239. goto error;
  1240. ret = ena_com_admin_init_sq(admin_queue);
  1241. if (ret)
  1242. goto error;
  1243. ret = ena_com_admin_init_cq(admin_queue);
  1244. if (ret)
  1245. goto error;
  1246. admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
  1247. ENA_REGS_AQ_DB_OFF);
  1248. addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
  1249. addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
  1250. writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
  1251. writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
  1252. addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
  1253. addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
  1254. writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
  1255. writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
  1256. aq_caps = 0;
  1257. aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
  1258. aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
  1259. ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
  1260. ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
  1261. acq_caps = 0;
  1262. acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
  1263. acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
  1264. ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
  1265. ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
  1266. writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
  1267. writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
  1268. ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
  1269. if (ret)
  1270. goto error;
  1271. admin_queue->running_state = true;
  1272. return 0;
  1273. error:
  1274. ena_com_admin_destroy(ena_dev);
  1275. return ret;
  1276. }
  1277. int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
  1278. struct ena_com_create_io_ctx *ctx)
  1279. {
  1280. struct ena_com_io_sq *io_sq;
  1281. struct ena_com_io_cq *io_cq;
  1282. int ret;
  1283. if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
  1284. pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
  1285. ctx->qid, ENA_TOTAL_NUM_QUEUES);
  1286. return -EINVAL;
  1287. }
  1288. io_sq = &ena_dev->io_sq_queues[ctx->qid];
  1289. io_cq = &ena_dev->io_cq_queues[ctx->qid];
  1290. memset(io_sq, 0x0, sizeof(*io_sq));
  1291. memset(io_cq, 0x0, sizeof(*io_cq));
  1292. /* Init CQ */
  1293. io_cq->q_depth = ctx->queue_size;
  1294. io_cq->direction = ctx->direction;
  1295. io_cq->qid = ctx->qid;
  1296. io_cq->msix_vector = ctx->msix_vector;
  1297. io_sq->q_depth = ctx->queue_size;
  1298. io_sq->direction = ctx->direction;
  1299. io_sq->qid = ctx->qid;
  1300. io_sq->mem_queue_type = ctx->mem_queue_type;
  1301. if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
  1302. /* header length is limited to 8 bits */
  1303. io_sq->tx_max_header_size =
  1304. min_t(u32, ena_dev->tx_max_header_size, SZ_256);
  1305. ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
  1306. if (ret)
  1307. goto error;
  1308. ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
  1309. if (ret)
  1310. goto error;
  1311. ret = ena_com_create_io_cq(ena_dev, io_cq);
  1312. if (ret)
  1313. goto error;
  1314. ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
  1315. if (ret)
  1316. goto destroy_io_cq;
  1317. return 0;
  1318. destroy_io_cq:
  1319. ena_com_destroy_io_cq(ena_dev, io_cq);
  1320. error:
  1321. ena_com_io_queue_free(ena_dev, io_sq, io_cq);
  1322. return ret;
  1323. }
  1324. void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
  1325. {
  1326. struct ena_com_io_sq *io_sq;
  1327. struct ena_com_io_cq *io_cq;
  1328. if (qid >= ENA_TOTAL_NUM_QUEUES) {
  1329. pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
  1330. ENA_TOTAL_NUM_QUEUES);
  1331. return;
  1332. }
  1333. io_sq = &ena_dev->io_sq_queues[qid];
  1334. io_cq = &ena_dev->io_cq_queues[qid];
  1335. ena_com_destroy_io_sq(ena_dev, io_sq);
  1336. ena_com_destroy_io_cq(ena_dev, io_cq);
  1337. ena_com_io_queue_free(ena_dev, io_sq, io_cq);
  1338. }
  1339. int ena_com_get_link_params(struct ena_com_dev *ena_dev,
  1340. struct ena_admin_get_feat_resp *resp)
  1341. {
  1342. return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
  1343. }
  1344. int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
  1345. struct ena_com_dev_get_features_ctx *get_feat_ctx)
  1346. {
  1347. struct ena_admin_get_feat_resp get_resp;
  1348. int rc;
  1349. rc = ena_com_get_feature(ena_dev, &get_resp,
  1350. ENA_ADMIN_DEVICE_ATTRIBUTES);
  1351. if (rc)
  1352. return rc;
  1353. memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
  1354. sizeof(get_resp.u.dev_attr));
  1355. ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
  1356. rc = ena_com_get_feature(ena_dev, &get_resp,
  1357. ENA_ADMIN_MAX_QUEUES_NUM);
  1358. if (rc)
  1359. return rc;
  1360. memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
  1361. sizeof(get_resp.u.max_queue));
  1362. ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
  1363. rc = ena_com_get_feature(ena_dev, &get_resp,
  1364. ENA_ADMIN_AENQ_CONFIG);
  1365. if (rc)
  1366. return rc;
  1367. memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
  1368. sizeof(get_resp.u.aenq));
  1369. rc = ena_com_get_feature(ena_dev, &get_resp,
  1370. ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
  1371. if (rc)
  1372. return rc;
  1373. memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
  1374. sizeof(get_resp.u.offload));
  1375. /* Driver hints isn't mandatory admin command. So in case the
  1376. * command isn't supported set driver hints to 0
  1377. */
  1378. rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS);
  1379. if (!rc)
  1380. memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
  1381. sizeof(get_resp.u.hw_hints));
  1382. else if (rc == -EOPNOTSUPP)
  1383. memset(&get_feat_ctx->hw_hints, 0x0,
  1384. sizeof(get_feat_ctx->hw_hints));
  1385. else
  1386. return rc;
  1387. return 0;
  1388. }
  1389. void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
  1390. {
  1391. ena_com_handle_admin_completion(&ena_dev->admin_queue);
  1392. }
  1393. /* ena_handle_specific_aenq_event:
  1394. * return the handler that is relevant to the specific event group
  1395. */
  1396. static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
  1397. u16 group)
  1398. {
  1399. struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
  1400. if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
  1401. return aenq_handlers->handlers[group];
  1402. return aenq_handlers->unimplemented_handler;
  1403. }
  1404. /* ena_aenq_intr_handler:
  1405. * handles the aenq incoming events.
  1406. * pop events from the queue and apply the specific handler
  1407. */
  1408. void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
  1409. {
  1410. struct ena_admin_aenq_entry *aenq_e;
  1411. struct ena_admin_aenq_common_desc *aenq_common;
  1412. struct ena_com_aenq *aenq = &dev->aenq;
  1413. ena_aenq_handler handler_cb;
  1414. u16 masked_head, processed = 0;
  1415. u8 phase;
  1416. masked_head = aenq->head & (aenq->q_depth - 1);
  1417. phase = aenq->phase;
  1418. aenq_e = &aenq->entries[masked_head]; /* Get first entry */
  1419. aenq_common = &aenq_e->aenq_common_desc;
  1420. /* Go over all the events */
  1421. while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
  1422. phase) {
  1423. pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
  1424. aenq_common->group, aenq_common->syndrom,
  1425. (u64)aenq_common->timestamp_low +
  1426. ((u64)aenq_common->timestamp_high << 32));
  1427. /* Handle specific event*/
  1428. handler_cb = ena_com_get_specific_aenq_cb(dev,
  1429. aenq_common->group);
  1430. handler_cb(data, aenq_e); /* call the actual event handler*/
  1431. /* Get next event entry */
  1432. masked_head++;
  1433. processed++;
  1434. if (unlikely(masked_head == aenq->q_depth)) {
  1435. masked_head = 0;
  1436. phase = !phase;
  1437. }
  1438. aenq_e = &aenq->entries[masked_head];
  1439. aenq_common = &aenq_e->aenq_common_desc;
  1440. }
  1441. aenq->head += processed;
  1442. aenq->phase = phase;
  1443. /* Don't update aenq doorbell if there weren't any processed events */
  1444. if (!processed)
  1445. return;
  1446. /* write the aenq doorbell after all AENQ descriptors were read */
  1447. mb();
  1448. writel_relaxed((u32)aenq->head,
  1449. dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
  1450. mmiowb();
  1451. }
  1452. int ena_com_dev_reset(struct ena_com_dev *ena_dev,
  1453. enum ena_regs_reset_reason_types reset_reason)
  1454. {
  1455. u32 stat, timeout, cap, reset_val;
  1456. int rc;
  1457. stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
  1458. cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
  1459. if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
  1460. (cap == ENA_MMIO_READ_TIMEOUT))) {
  1461. pr_err("Reg read32 timeout occurred\n");
  1462. return -ETIME;
  1463. }
  1464. if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
  1465. pr_err("Device isn't ready, can't reset device\n");
  1466. return -EINVAL;
  1467. }
  1468. timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
  1469. ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
  1470. if (timeout == 0) {
  1471. pr_err("Invalid timeout value\n");
  1472. return -EINVAL;
  1473. }
  1474. /* start reset */
  1475. reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
  1476. reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
  1477. ENA_REGS_DEV_CTL_RESET_REASON_MASK;
  1478. writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
  1479. /* Write again the MMIO read request address */
  1480. ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
  1481. rc = wait_for_reset_state(ena_dev, timeout,
  1482. ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
  1483. if (rc != 0) {
  1484. pr_err("Reset indication didn't turn on\n");
  1485. return rc;
  1486. }
  1487. /* reset done */
  1488. writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
  1489. rc = wait_for_reset_state(ena_dev, timeout, 0);
  1490. if (rc != 0) {
  1491. pr_err("Reset indication didn't turn off\n");
  1492. return rc;
  1493. }
  1494. timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
  1495. ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
  1496. if (timeout)
  1497. /* the resolution of timeout reg is 100ms */
  1498. ena_dev->admin_queue.completion_timeout = timeout * 100000;
  1499. else
  1500. ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
  1501. return 0;
  1502. }
  1503. static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
  1504. struct ena_com_stats_ctx *ctx,
  1505. enum ena_admin_get_stats_type type)
  1506. {
  1507. struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
  1508. struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
  1509. struct ena_com_admin_queue *admin_queue;
  1510. int ret;
  1511. admin_queue = &ena_dev->admin_queue;
  1512. get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
  1513. get_cmd->aq_common_descriptor.flags = 0;
  1514. get_cmd->type = type;
  1515. ret = ena_com_execute_admin_command(admin_queue,
  1516. (struct ena_admin_aq_entry *)get_cmd,
  1517. sizeof(*get_cmd),
  1518. (struct ena_admin_acq_entry *)get_resp,
  1519. sizeof(*get_resp));
  1520. if (unlikely(ret))
  1521. pr_err("Failed to get stats. error: %d\n", ret);
  1522. return ret;
  1523. }
  1524. int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
  1525. struct ena_admin_basic_stats *stats)
  1526. {
  1527. struct ena_com_stats_ctx ctx;
  1528. int ret;
  1529. memset(&ctx, 0x0, sizeof(ctx));
  1530. ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
  1531. if (likely(ret == 0))
  1532. memcpy(stats, &ctx.get_resp.basic_stats,
  1533. sizeof(ctx.get_resp.basic_stats));
  1534. return ret;
  1535. }
  1536. int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
  1537. {
  1538. struct ena_com_admin_queue *admin_queue;
  1539. struct ena_admin_set_feat_cmd cmd;
  1540. struct ena_admin_set_feat_resp resp;
  1541. int ret;
  1542. if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
  1543. pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
  1544. return -EOPNOTSUPP;
  1545. }
  1546. memset(&cmd, 0x0, sizeof(cmd));
  1547. admin_queue = &ena_dev->admin_queue;
  1548. cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
  1549. cmd.aq_common_descriptor.flags = 0;
  1550. cmd.feat_common.feature_id = ENA_ADMIN_MTU;
  1551. cmd.u.mtu.mtu = mtu;
  1552. ret = ena_com_execute_admin_command(admin_queue,
  1553. (struct ena_admin_aq_entry *)&cmd,
  1554. sizeof(cmd),
  1555. (struct ena_admin_acq_entry *)&resp,
  1556. sizeof(resp));
  1557. if (unlikely(ret))
  1558. pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
  1559. return ret;
  1560. }
  1561. int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
  1562. struct ena_admin_feature_offload_desc *offload)
  1563. {
  1564. int ret;
  1565. struct ena_admin_get_feat_resp resp;
  1566. ret = ena_com_get_feature(ena_dev, &resp,
  1567. ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
  1568. if (unlikely(ret)) {
  1569. pr_err("Failed to get offload capabilities %d\n", ret);
  1570. return ret;
  1571. }
  1572. memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
  1573. return 0;
  1574. }
  1575. int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
  1576. {
  1577. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1578. struct ena_rss *rss = &ena_dev->rss;
  1579. struct ena_admin_set_feat_cmd cmd;
  1580. struct ena_admin_set_feat_resp resp;
  1581. struct ena_admin_get_feat_resp get_resp;
  1582. int ret;
  1583. if (!ena_com_check_supported_feature_id(ena_dev,
  1584. ENA_ADMIN_RSS_HASH_FUNCTION)) {
  1585. pr_debug("Feature %d isn't supported\n",
  1586. ENA_ADMIN_RSS_HASH_FUNCTION);
  1587. return -EOPNOTSUPP;
  1588. }
  1589. /* Validate hash function is supported */
  1590. ret = ena_com_get_feature(ena_dev, &get_resp,
  1591. ENA_ADMIN_RSS_HASH_FUNCTION);
  1592. if (unlikely(ret))
  1593. return ret;
  1594. if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
  1595. pr_err("Func hash %d isn't supported by device, abort\n",
  1596. rss->hash_func);
  1597. return -EOPNOTSUPP;
  1598. }
  1599. memset(&cmd, 0x0, sizeof(cmd));
  1600. cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
  1601. cmd.aq_common_descriptor.flags =
  1602. ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
  1603. cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
  1604. cmd.u.flow_hash_func.init_val = rss->hash_init_val;
  1605. cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
  1606. ret = ena_com_mem_addr_set(ena_dev,
  1607. &cmd.control_buffer.address,
  1608. rss->hash_key_dma_addr);
  1609. if (unlikely(ret)) {
  1610. pr_err("memory address set failed\n");
  1611. return ret;
  1612. }
  1613. cmd.control_buffer.length = sizeof(*rss->hash_key);
  1614. ret = ena_com_execute_admin_command(admin_queue,
  1615. (struct ena_admin_aq_entry *)&cmd,
  1616. sizeof(cmd),
  1617. (struct ena_admin_acq_entry *)&resp,
  1618. sizeof(resp));
  1619. if (unlikely(ret)) {
  1620. pr_err("Failed to set hash function %d. error: %d\n",
  1621. rss->hash_func, ret);
  1622. return -EINVAL;
  1623. }
  1624. return 0;
  1625. }
  1626. int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
  1627. enum ena_admin_hash_functions func,
  1628. const u8 *key, u16 key_len, u32 init_val)
  1629. {
  1630. struct ena_rss *rss = &ena_dev->rss;
  1631. struct ena_admin_get_feat_resp get_resp;
  1632. struct ena_admin_feature_rss_flow_hash_control *hash_key =
  1633. rss->hash_key;
  1634. int rc;
  1635. /* Make sure size is a mult of DWs */
  1636. if (unlikely(key_len & 0x3))
  1637. return -EINVAL;
  1638. rc = ena_com_get_feature_ex(ena_dev, &get_resp,
  1639. ENA_ADMIN_RSS_HASH_FUNCTION,
  1640. rss->hash_key_dma_addr,
  1641. sizeof(*rss->hash_key));
  1642. if (unlikely(rc))
  1643. return rc;
  1644. if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
  1645. pr_err("Flow hash function %d isn't supported\n", func);
  1646. return -EOPNOTSUPP;
  1647. }
  1648. switch (func) {
  1649. case ENA_ADMIN_TOEPLITZ:
  1650. if (key_len > sizeof(hash_key->key)) {
  1651. pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
  1652. key_len, sizeof(hash_key->key));
  1653. return -EINVAL;
  1654. }
  1655. memcpy(hash_key->key, key, key_len);
  1656. rss->hash_init_val = init_val;
  1657. hash_key->keys_num = key_len >> 2;
  1658. break;
  1659. case ENA_ADMIN_CRC32:
  1660. rss->hash_init_val = init_val;
  1661. break;
  1662. default:
  1663. pr_err("Invalid hash function (%d)\n", func);
  1664. return -EINVAL;
  1665. }
  1666. rc = ena_com_set_hash_function(ena_dev);
  1667. /* Restore the old function */
  1668. if (unlikely(rc))
  1669. ena_com_get_hash_function(ena_dev, NULL, NULL);
  1670. return rc;
  1671. }
  1672. int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
  1673. enum ena_admin_hash_functions *func,
  1674. u8 *key)
  1675. {
  1676. struct ena_rss *rss = &ena_dev->rss;
  1677. struct ena_admin_get_feat_resp get_resp;
  1678. struct ena_admin_feature_rss_flow_hash_control *hash_key =
  1679. rss->hash_key;
  1680. int rc;
  1681. rc = ena_com_get_feature_ex(ena_dev, &get_resp,
  1682. ENA_ADMIN_RSS_HASH_FUNCTION,
  1683. rss->hash_key_dma_addr,
  1684. sizeof(*rss->hash_key));
  1685. if (unlikely(rc))
  1686. return rc;
  1687. rss->hash_func = get_resp.u.flow_hash_func.selected_func;
  1688. if (func)
  1689. *func = rss->hash_func;
  1690. if (key)
  1691. memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
  1692. return 0;
  1693. }
  1694. int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
  1695. enum ena_admin_flow_hash_proto proto,
  1696. u16 *fields)
  1697. {
  1698. struct ena_rss *rss = &ena_dev->rss;
  1699. struct ena_admin_get_feat_resp get_resp;
  1700. int rc;
  1701. rc = ena_com_get_feature_ex(ena_dev, &get_resp,
  1702. ENA_ADMIN_RSS_HASH_INPUT,
  1703. rss->hash_ctrl_dma_addr,
  1704. sizeof(*rss->hash_ctrl));
  1705. if (unlikely(rc))
  1706. return rc;
  1707. if (fields)
  1708. *fields = rss->hash_ctrl->selected_fields[proto].fields;
  1709. return 0;
  1710. }
  1711. int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
  1712. {
  1713. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1714. struct ena_rss *rss = &ena_dev->rss;
  1715. struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
  1716. struct ena_admin_set_feat_cmd cmd;
  1717. struct ena_admin_set_feat_resp resp;
  1718. int ret;
  1719. if (!ena_com_check_supported_feature_id(ena_dev,
  1720. ENA_ADMIN_RSS_HASH_INPUT)) {
  1721. pr_debug("Feature %d isn't supported\n",
  1722. ENA_ADMIN_RSS_HASH_INPUT);
  1723. return -EOPNOTSUPP;
  1724. }
  1725. memset(&cmd, 0x0, sizeof(cmd));
  1726. cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
  1727. cmd.aq_common_descriptor.flags =
  1728. ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
  1729. cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
  1730. cmd.u.flow_hash_input.enabled_input_sort =
  1731. ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
  1732. ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
  1733. ret = ena_com_mem_addr_set(ena_dev,
  1734. &cmd.control_buffer.address,
  1735. rss->hash_ctrl_dma_addr);
  1736. if (unlikely(ret)) {
  1737. pr_err("memory address set failed\n");
  1738. return ret;
  1739. }
  1740. cmd.control_buffer.length = sizeof(*hash_ctrl);
  1741. ret = ena_com_execute_admin_command(admin_queue,
  1742. (struct ena_admin_aq_entry *)&cmd,
  1743. sizeof(cmd),
  1744. (struct ena_admin_acq_entry *)&resp,
  1745. sizeof(resp));
  1746. if (unlikely(ret))
  1747. pr_err("Failed to set hash input. error: %d\n", ret);
  1748. return ret;
  1749. }
  1750. int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
  1751. {
  1752. struct ena_rss *rss = &ena_dev->rss;
  1753. struct ena_admin_feature_rss_hash_control *hash_ctrl =
  1754. rss->hash_ctrl;
  1755. u16 available_fields = 0;
  1756. int rc, i;
  1757. /* Get the supported hash input */
  1758. rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
  1759. if (unlikely(rc))
  1760. return rc;
  1761. hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
  1762. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
  1763. ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
  1764. hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
  1765. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
  1766. ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
  1767. hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
  1768. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
  1769. ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
  1770. hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
  1771. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
  1772. ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
  1773. hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
  1774. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
  1775. hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
  1776. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
  1777. hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
  1778. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
  1779. hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
  1780. ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
  1781. for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
  1782. available_fields = hash_ctrl->selected_fields[i].fields &
  1783. hash_ctrl->supported_fields[i].fields;
  1784. if (available_fields != hash_ctrl->selected_fields[i].fields) {
  1785. pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
  1786. i, hash_ctrl->supported_fields[i].fields,
  1787. hash_ctrl->selected_fields[i].fields);
  1788. return -EOPNOTSUPP;
  1789. }
  1790. }
  1791. rc = ena_com_set_hash_ctrl(ena_dev);
  1792. /* In case of failure, restore the old hash ctrl */
  1793. if (unlikely(rc))
  1794. ena_com_get_hash_ctrl(ena_dev, 0, NULL);
  1795. return rc;
  1796. }
  1797. int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
  1798. enum ena_admin_flow_hash_proto proto,
  1799. u16 hash_fields)
  1800. {
  1801. struct ena_rss *rss = &ena_dev->rss;
  1802. struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
  1803. u16 supported_fields;
  1804. int rc;
  1805. if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
  1806. pr_err("Invalid proto num (%u)\n", proto);
  1807. return -EINVAL;
  1808. }
  1809. /* Get the ctrl table */
  1810. rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
  1811. if (unlikely(rc))
  1812. return rc;
  1813. /* Make sure all the fields are supported */
  1814. supported_fields = hash_ctrl->supported_fields[proto].fields;
  1815. if ((hash_fields & supported_fields) != hash_fields) {
  1816. pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
  1817. proto, hash_fields, supported_fields);
  1818. }
  1819. hash_ctrl->selected_fields[proto].fields = hash_fields;
  1820. rc = ena_com_set_hash_ctrl(ena_dev);
  1821. /* In case of failure, restore the old hash ctrl */
  1822. if (unlikely(rc))
  1823. ena_com_get_hash_ctrl(ena_dev, 0, NULL);
  1824. return 0;
  1825. }
  1826. int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
  1827. u16 entry_idx, u16 entry_value)
  1828. {
  1829. struct ena_rss *rss = &ena_dev->rss;
  1830. if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
  1831. return -EINVAL;
  1832. if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
  1833. return -EINVAL;
  1834. rss->host_rss_ind_tbl[entry_idx] = entry_value;
  1835. return 0;
  1836. }
  1837. int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
  1838. {
  1839. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1840. struct ena_rss *rss = &ena_dev->rss;
  1841. struct ena_admin_set_feat_cmd cmd;
  1842. struct ena_admin_set_feat_resp resp;
  1843. int ret;
  1844. if (!ena_com_check_supported_feature_id(
  1845. ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
  1846. pr_debug("Feature %d isn't supported\n",
  1847. ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
  1848. return -EOPNOTSUPP;
  1849. }
  1850. ret = ena_com_ind_tbl_convert_to_device(ena_dev);
  1851. if (ret) {
  1852. pr_err("Failed to convert host indirection table to device table\n");
  1853. return ret;
  1854. }
  1855. memset(&cmd, 0x0, sizeof(cmd));
  1856. cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
  1857. cmd.aq_common_descriptor.flags =
  1858. ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
  1859. cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
  1860. cmd.u.ind_table.size = rss->tbl_log_size;
  1861. cmd.u.ind_table.inline_index = 0xFFFFFFFF;
  1862. ret = ena_com_mem_addr_set(ena_dev,
  1863. &cmd.control_buffer.address,
  1864. rss->rss_ind_tbl_dma_addr);
  1865. if (unlikely(ret)) {
  1866. pr_err("memory address set failed\n");
  1867. return ret;
  1868. }
  1869. cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
  1870. sizeof(struct ena_admin_rss_ind_table_entry);
  1871. ret = ena_com_execute_admin_command(admin_queue,
  1872. (struct ena_admin_aq_entry *)&cmd,
  1873. sizeof(cmd),
  1874. (struct ena_admin_acq_entry *)&resp,
  1875. sizeof(resp));
  1876. if (unlikely(ret))
  1877. pr_err("Failed to set indirect table. error: %d\n", ret);
  1878. return ret;
  1879. }
  1880. int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
  1881. {
  1882. struct ena_rss *rss = &ena_dev->rss;
  1883. struct ena_admin_get_feat_resp get_resp;
  1884. u32 tbl_size;
  1885. int i, rc;
  1886. tbl_size = (1ULL << rss->tbl_log_size) *
  1887. sizeof(struct ena_admin_rss_ind_table_entry);
  1888. rc = ena_com_get_feature_ex(ena_dev, &get_resp,
  1889. ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
  1890. rss->rss_ind_tbl_dma_addr,
  1891. tbl_size);
  1892. if (unlikely(rc))
  1893. return rc;
  1894. if (!ind_tbl)
  1895. return 0;
  1896. rc = ena_com_ind_tbl_convert_from_device(ena_dev);
  1897. if (unlikely(rc))
  1898. return rc;
  1899. for (i = 0; i < (1 << rss->tbl_log_size); i++)
  1900. ind_tbl[i] = rss->host_rss_ind_tbl[i];
  1901. return 0;
  1902. }
  1903. int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
  1904. {
  1905. int rc;
  1906. memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
  1907. rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
  1908. if (unlikely(rc))
  1909. goto err_indr_tbl;
  1910. rc = ena_com_hash_key_allocate(ena_dev);
  1911. if (unlikely(rc))
  1912. goto err_hash_key;
  1913. rc = ena_com_hash_ctrl_init(ena_dev);
  1914. if (unlikely(rc))
  1915. goto err_hash_ctrl;
  1916. return 0;
  1917. err_hash_ctrl:
  1918. ena_com_hash_key_destroy(ena_dev);
  1919. err_hash_key:
  1920. ena_com_indirect_table_destroy(ena_dev);
  1921. err_indr_tbl:
  1922. return rc;
  1923. }
  1924. void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
  1925. {
  1926. ena_com_indirect_table_destroy(ena_dev);
  1927. ena_com_hash_key_destroy(ena_dev);
  1928. ena_com_hash_ctrl_destroy(ena_dev);
  1929. memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
  1930. }
  1931. int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
  1932. {
  1933. struct ena_host_attribute *host_attr = &ena_dev->host_attr;
  1934. host_attr->host_info =
  1935. dma_zalloc_coherent(ena_dev->dmadev, SZ_4K,
  1936. &host_attr->host_info_dma_addr, GFP_KERNEL);
  1937. if (unlikely(!host_attr->host_info))
  1938. return -ENOMEM;
  1939. return 0;
  1940. }
  1941. int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
  1942. u32 debug_area_size)
  1943. {
  1944. struct ena_host_attribute *host_attr = &ena_dev->host_attr;
  1945. host_attr->debug_area_virt_addr =
  1946. dma_zalloc_coherent(ena_dev->dmadev, debug_area_size,
  1947. &host_attr->debug_area_dma_addr, GFP_KERNEL);
  1948. if (unlikely(!host_attr->debug_area_virt_addr)) {
  1949. host_attr->debug_area_size = 0;
  1950. return -ENOMEM;
  1951. }
  1952. host_attr->debug_area_size = debug_area_size;
  1953. return 0;
  1954. }
  1955. void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
  1956. {
  1957. struct ena_host_attribute *host_attr = &ena_dev->host_attr;
  1958. if (host_attr->host_info) {
  1959. dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
  1960. host_attr->host_info_dma_addr);
  1961. host_attr->host_info = NULL;
  1962. }
  1963. }
  1964. void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
  1965. {
  1966. struct ena_host_attribute *host_attr = &ena_dev->host_attr;
  1967. if (host_attr->debug_area_virt_addr) {
  1968. dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
  1969. host_attr->debug_area_virt_addr,
  1970. host_attr->debug_area_dma_addr);
  1971. host_attr->debug_area_virt_addr = NULL;
  1972. }
  1973. }
  1974. int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
  1975. {
  1976. struct ena_host_attribute *host_attr = &ena_dev->host_attr;
  1977. struct ena_com_admin_queue *admin_queue;
  1978. struct ena_admin_set_feat_cmd cmd;
  1979. struct ena_admin_set_feat_resp resp;
  1980. int ret;
  1981. /* Host attribute config is called before ena_com_get_dev_attr_feat
  1982. * so ena_com can't check if the feature is supported.
  1983. */
  1984. memset(&cmd, 0x0, sizeof(cmd));
  1985. admin_queue = &ena_dev->admin_queue;
  1986. cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
  1987. cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
  1988. ret = ena_com_mem_addr_set(ena_dev,
  1989. &cmd.u.host_attr.debug_ba,
  1990. host_attr->debug_area_dma_addr);
  1991. if (unlikely(ret)) {
  1992. pr_err("memory address set failed\n");
  1993. return ret;
  1994. }
  1995. ret = ena_com_mem_addr_set(ena_dev,
  1996. &cmd.u.host_attr.os_info_ba,
  1997. host_attr->host_info_dma_addr);
  1998. if (unlikely(ret)) {
  1999. pr_err("memory address set failed\n");
  2000. return ret;
  2001. }
  2002. cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
  2003. ret = ena_com_execute_admin_command(admin_queue,
  2004. (struct ena_admin_aq_entry *)&cmd,
  2005. sizeof(cmd),
  2006. (struct ena_admin_acq_entry *)&resp,
  2007. sizeof(resp));
  2008. if (unlikely(ret))
  2009. pr_err("Failed to set host attributes: %d\n", ret);
  2010. return ret;
  2011. }
  2012. /* Interrupt moderation */
  2013. bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
  2014. {
  2015. return ena_com_check_supported_feature_id(ena_dev,
  2016. ENA_ADMIN_INTERRUPT_MODERATION);
  2017. }
  2018. int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
  2019. u32 tx_coalesce_usecs)
  2020. {
  2021. if (!ena_dev->intr_delay_resolution) {
  2022. pr_err("Illegal interrupt delay granularity value\n");
  2023. return -EFAULT;
  2024. }
  2025. ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
  2026. ena_dev->intr_delay_resolution;
  2027. return 0;
  2028. }
  2029. int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
  2030. u32 rx_coalesce_usecs)
  2031. {
  2032. if (!ena_dev->intr_delay_resolution) {
  2033. pr_err("Illegal interrupt delay granularity value\n");
  2034. return -EFAULT;
  2035. }
  2036. /* We use LOWEST entry of moderation table for storing
  2037. * nonadaptive interrupt coalescing values
  2038. */
  2039. ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
  2040. rx_coalesce_usecs / ena_dev->intr_delay_resolution;
  2041. return 0;
  2042. }
  2043. void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
  2044. {
  2045. if (ena_dev->intr_moder_tbl)
  2046. devm_kfree(ena_dev->dmadev, ena_dev->intr_moder_tbl);
  2047. ena_dev->intr_moder_tbl = NULL;
  2048. }
  2049. int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
  2050. {
  2051. struct ena_admin_get_feat_resp get_resp;
  2052. u16 delay_resolution;
  2053. int rc;
  2054. rc = ena_com_get_feature(ena_dev, &get_resp,
  2055. ENA_ADMIN_INTERRUPT_MODERATION);
  2056. if (rc) {
  2057. if (rc == -EOPNOTSUPP) {
  2058. pr_debug("Feature %d isn't supported\n",
  2059. ENA_ADMIN_INTERRUPT_MODERATION);
  2060. rc = 0;
  2061. } else {
  2062. pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
  2063. rc);
  2064. }
  2065. /* no moderation supported, disable adaptive support */
  2066. ena_com_disable_adaptive_moderation(ena_dev);
  2067. return rc;
  2068. }
  2069. rc = ena_com_init_interrupt_moderation_table(ena_dev);
  2070. if (rc)
  2071. goto err;
  2072. /* if moderation is supported by device we set adaptive moderation */
  2073. delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
  2074. ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
  2075. ena_com_enable_adaptive_moderation(ena_dev);
  2076. return 0;
  2077. err:
  2078. ena_com_destroy_interrupt_moderation(ena_dev);
  2079. return rc;
  2080. }
  2081. void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
  2082. {
  2083. struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
  2084. if (!intr_moder_tbl)
  2085. return;
  2086. intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
  2087. ENA_INTR_LOWEST_USECS;
  2088. intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
  2089. ENA_INTR_LOWEST_PKTS;
  2090. intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
  2091. ENA_INTR_LOWEST_BYTES;
  2092. intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
  2093. ENA_INTR_LOW_USECS;
  2094. intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
  2095. ENA_INTR_LOW_PKTS;
  2096. intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
  2097. ENA_INTR_LOW_BYTES;
  2098. intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
  2099. ENA_INTR_MID_USECS;
  2100. intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
  2101. ENA_INTR_MID_PKTS;
  2102. intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
  2103. ENA_INTR_MID_BYTES;
  2104. intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
  2105. ENA_INTR_HIGH_USECS;
  2106. intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
  2107. ENA_INTR_HIGH_PKTS;
  2108. intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
  2109. ENA_INTR_HIGH_BYTES;
  2110. intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
  2111. ENA_INTR_HIGHEST_USECS;
  2112. intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
  2113. ENA_INTR_HIGHEST_PKTS;
  2114. intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
  2115. ENA_INTR_HIGHEST_BYTES;
  2116. }
  2117. unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
  2118. {
  2119. return ena_dev->intr_moder_tx_interval;
  2120. }
  2121. unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
  2122. {
  2123. struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
  2124. if (intr_moder_tbl)
  2125. return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
  2126. return 0;
  2127. }
  2128. void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
  2129. enum ena_intr_moder_level level,
  2130. struct ena_intr_moder_entry *entry)
  2131. {
  2132. struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
  2133. if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
  2134. return;
  2135. intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
  2136. if (ena_dev->intr_delay_resolution)
  2137. intr_moder_tbl[level].intr_moder_interval /=
  2138. ena_dev->intr_delay_resolution;
  2139. intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
  2140. /* use hardcoded value until ethtool supports bytecount parameter */
  2141. if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
  2142. intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
  2143. }
  2144. void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
  2145. enum ena_intr_moder_level level,
  2146. struct ena_intr_moder_entry *entry)
  2147. {
  2148. struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
  2149. if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
  2150. return;
  2151. entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
  2152. if (ena_dev->intr_delay_resolution)
  2153. entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
  2154. entry->pkts_per_interval =
  2155. intr_moder_tbl[level].pkts_per_interval;
  2156. entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
  2157. }