ena_com.c 72 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671
  1. /*
  2. * Copyright 2015 Amazon.com, Inc. or its affiliates.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include "ena_com.h"
  33. /*****************************************************************************/
  34. /*****************************************************************************/
  35. /* Timeout in micro-sec */
  36. #define ADMIN_CMD_TIMEOUT_US (3000000)
  37. #define ENA_ASYNC_QUEUE_DEPTH 16
  38. #define ENA_ADMIN_QUEUE_DEPTH 32
  39. #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
  40. ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
  41. | (ENA_COMMON_SPEC_VERSION_MINOR))
  42. #define ENA_CTRL_MAJOR 0
  43. #define ENA_CTRL_MINOR 0
  44. #define ENA_CTRL_SUB_MINOR 1
  45. #define MIN_ENA_CTRL_VER \
  46. (((ENA_CTRL_MAJOR) << \
  47. (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
  48. ((ENA_CTRL_MINOR) << \
  49. (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
  50. (ENA_CTRL_SUB_MINOR))
  51. #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
  52. #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
  53. #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
  54. /*****************************************************************************/
  55. /*****************************************************************************/
  56. /*****************************************************************************/
  57. enum ena_cmd_status {
  58. ENA_CMD_SUBMITTED,
  59. ENA_CMD_COMPLETED,
  60. /* Abort - canceled by the driver */
  61. ENA_CMD_ABORTED,
  62. };
  63. struct ena_comp_ctx {
  64. struct completion wait_event;
  65. struct ena_admin_acq_entry *user_cqe;
  66. u32 comp_size;
  67. enum ena_cmd_status status;
  68. /* status from the device */
  69. u8 comp_status;
  70. u8 cmd_opcode;
  71. bool occupied;
  72. };
  73. struct ena_com_stats_ctx {
  74. struct ena_admin_aq_get_stats_cmd get_cmd;
  75. struct ena_admin_acq_get_stats_resp get_resp;
  76. };
  77. static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
  78. struct ena_common_mem_addr *ena_addr,
  79. dma_addr_t addr)
  80. {
  81. if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
  82. pr_err("dma address has more bits that the device supports\n");
  83. return -EINVAL;
  84. }
  85. ena_addr->mem_addr_low = (u32)addr;
  86. ena_addr->mem_addr_high = (u64)addr >> 32;
  87. return 0;
  88. }
  89. static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
  90. {
  91. struct ena_com_admin_sq *sq = &queue->sq;
  92. u16 size = ADMIN_SQ_SIZE(queue->q_depth);
  93. sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
  94. GFP_KERNEL);
  95. if (!sq->entries) {
  96. pr_err("memory allocation failed");
  97. return -ENOMEM;
  98. }
  99. sq->head = 0;
  100. sq->tail = 0;
  101. sq->phase = 1;
  102. sq->db_addr = NULL;
  103. return 0;
  104. }
  105. static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
  106. {
  107. struct ena_com_admin_cq *cq = &queue->cq;
  108. u16 size = ADMIN_CQ_SIZE(queue->q_depth);
  109. cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
  110. GFP_KERNEL);
  111. if (!cq->entries) {
  112. pr_err("memory allocation failed");
  113. return -ENOMEM;
  114. }
  115. cq->head = 0;
  116. cq->phase = 1;
  117. return 0;
  118. }
  119. static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
  120. struct ena_aenq_handlers *aenq_handlers)
  121. {
  122. struct ena_com_aenq *aenq = &dev->aenq;
  123. u32 addr_low, addr_high, aenq_caps;
  124. u16 size;
  125. dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
  126. size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
  127. aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr,
  128. GFP_KERNEL);
  129. if (!aenq->entries) {
  130. pr_err("memory allocation failed");
  131. return -ENOMEM;
  132. }
  133. aenq->head = aenq->q_depth;
  134. aenq->phase = 1;
  135. addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
  136. addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
  137. writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
  138. writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
  139. aenq_caps = 0;
  140. aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
  141. aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
  142. << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
  143. ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
  144. writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
  145. if (unlikely(!aenq_handlers)) {
  146. pr_err("aenq handlers pointer is NULL\n");
  147. return -EINVAL;
  148. }
  149. aenq->aenq_handlers = aenq_handlers;
  150. return 0;
  151. }
  152. static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
  153. struct ena_comp_ctx *comp_ctx)
  154. {
  155. comp_ctx->occupied = false;
  156. atomic_dec(&queue->outstanding_cmds);
  157. }
  158. static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
  159. u16 command_id, bool capture)
  160. {
  161. if (unlikely(command_id >= queue->q_depth)) {
  162. pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
  163. command_id, queue->q_depth);
  164. return NULL;
  165. }
  166. if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
  167. pr_err("Completion context is occupied\n");
  168. return NULL;
  169. }
  170. if (capture) {
  171. atomic_inc(&queue->outstanding_cmds);
  172. queue->comp_ctx[command_id].occupied = true;
  173. }
  174. return &queue->comp_ctx[command_id];
  175. }
  176. static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
  177. struct ena_admin_aq_entry *cmd,
  178. size_t cmd_size_in_bytes,
  179. struct ena_admin_acq_entry *comp,
  180. size_t comp_size_in_bytes)
  181. {
  182. struct ena_comp_ctx *comp_ctx;
  183. u16 tail_masked, cmd_id;
  184. u16 queue_size_mask;
  185. u16 cnt;
  186. queue_size_mask = admin_queue->q_depth - 1;
  187. tail_masked = admin_queue->sq.tail & queue_size_mask;
  188. /* In case of queue FULL */
  189. cnt = admin_queue->sq.tail - admin_queue->sq.head;
  190. if (cnt >= admin_queue->q_depth) {
  191. pr_debug("admin queue is FULL (tail %d head %d depth: %d)\n",
  192. admin_queue->sq.tail, admin_queue->sq.head,
  193. admin_queue->q_depth);
  194. admin_queue->stats.out_of_space++;
  195. return ERR_PTR(-ENOSPC);
  196. }
  197. cmd_id = admin_queue->curr_cmd_id;
  198. cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
  199. ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
  200. cmd->aq_common_descriptor.command_id |= cmd_id &
  201. ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
  202. comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
  203. if (unlikely(!comp_ctx))
  204. return ERR_PTR(-EINVAL);
  205. comp_ctx->status = ENA_CMD_SUBMITTED;
  206. comp_ctx->comp_size = (u32)comp_size_in_bytes;
  207. comp_ctx->user_cqe = comp;
  208. comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
  209. reinit_completion(&comp_ctx->wait_event);
  210. memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
  211. admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
  212. queue_size_mask;
  213. admin_queue->sq.tail++;
  214. admin_queue->stats.submitted_cmd++;
  215. if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
  216. admin_queue->sq.phase = !admin_queue->sq.phase;
  217. writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
  218. return comp_ctx;
  219. }
  220. static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
  221. {
  222. size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
  223. struct ena_comp_ctx *comp_ctx;
  224. u16 i;
  225. queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
  226. if (unlikely(!queue->comp_ctx)) {
  227. pr_err("memory allocation failed");
  228. return -ENOMEM;
  229. }
  230. for (i = 0; i < queue->q_depth; i++) {
  231. comp_ctx = get_comp_ctxt(queue, i, false);
  232. if (comp_ctx)
  233. init_completion(&comp_ctx->wait_event);
  234. }
  235. return 0;
  236. }
  237. static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
  238. struct ena_admin_aq_entry *cmd,
  239. size_t cmd_size_in_bytes,
  240. struct ena_admin_acq_entry *comp,
  241. size_t comp_size_in_bytes)
  242. {
  243. unsigned long flags;
  244. struct ena_comp_ctx *comp_ctx;
  245. spin_lock_irqsave(&admin_queue->q_lock, flags);
  246. if (unlikely(!admin_queue->running_state)) {
  247. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  248. return ERR_PTR(-ENODEV);
  249. }
  250. comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
  251. cmd_size_in_bytes,
  252. comp,
  253. comp_size_in_bytes);
  254. if (unlikely(IS_ERR(comp_ctx)))
  255. admin_queue->running_state = false;
  256. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  257. return comp_ctx;
  258. }
  259. static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
  260. struct ena_com_create_io_ctx *ctx,
  261. struct ena_com_io_sq *io_sq)
  262. {
  263. size_t size;
  264. int dev_node = 0;
  265. memset(&io_sq->desc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
  266. io_sq->desc_entry_size =
  267. (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
  268. sizeof(struct ena_eth_io_tx_desc) :
  269. sizeof(struct ena_eth_io_rx_desc);
  270. size = io_sq->desc_entry_size * io_sq->q_depth;
  271. if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
  272. dev_node = dev_to_node(ena_dev->dmadev);
  273. set_dev_node(ena_dev->dmadev, ctx->numa_node);
  274. io_sq->desc_addr.virt_addr =
  275. dma_zalloc_coherent(ena_dev->dmadev, size,
  276. &io_sq->desc_addr.phys_addr,
  277. GFP_KERNEL);
  278. set_dev_node(ena_dev->dmadev, dev_node);
  279. if (!io_sq->desc_addr.virt_addr) {
  280. io_sq->desc_addr.virt_addr =
  281. dma_zalloc_coherent(ena_dev->dmadev, size,
  282. &io_sq->desc_addr.phys_addr,
  283. GFP_KERNEL);
  284. }
  285. } else {
  286. dev_node = dev_to_node(ena_dev->dmadev);
  287. set_dev_node(ena_dev->dmadev, ctx->numa_node);
  288. io_sq->desc_addr.virt_addr =
  289. devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
  290. set_dev_node(ena_dev->dmadev, dev_node);
  291. if (!io_sq->desc_addr.virt_addr) {
  292. io_sq->desc_addr.virt_addr =
  293. devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
  294. }
  295. }
  296. if (!io_sq->desc_addr.virt_addr) {
  297. pr_err("memory allocation failed");
  298. return -ENOMEM;
  299. }
  300. io_sq->tail = 0;
  301. io_sq->next_to_comp = 0;
  302. io_sq->phase = 1;
  303. return 0;
  304. }
  305. static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
  306. struct ena_com_create_io_ctx *ctx,
  307. struct ena_com_io_cq *io_cq)
  308. {
  309. size_t size;
  310. int prev_node = 0;
  311. memset(&io_cq->cdesc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
  312. /* Use the basic completion descriptor for Rx */
  313. io_cq->cdesc_entry_size_in_bytes =
  314. (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
  315. sizeof(struct ena_eth_io_tx_cdesc) :
  316. sizeof(struct ena_eth_io_rx_cdesc_base);
  317. size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
  318. prev_node = dev_to_node(ena_dev->dmadev);
  319. set_dev_node(ena_dev->dmadev, ctx->numa_node);
  320. io_cq->cdesc_addr.virt_addr =
  321. dma_zalloc_coherent(ena_dev->dmadev, size,
  322. &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
  323. set_dev_node(ena_dev->dmadev, prev_node);
  324. if (!io_cq->cdesc_addr.virt_addr) {
  325. io_cq->cdesc_addr.virt_addr =
  326. dma_zalloc_coherent(ena_dev->dmadev, size,
  327. &io_cq->cdesc_addr.phys_addr,
  328. GFP_KERNEL);
  329. }
  330. if (!io_cq->cdesc_addr.virt_addr) {
  331. pr_err("memory allocation failed");
  332. return -ENOMEM;
  333. }
  334. io_cq->phase = 1;
  335. io_cq->head = 0;
  336. return 0;
  337. }
  338. static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
  339. struct ena_admin_acq_entry *cqe)
  340. {
  341. struct ena_comp_ctx *comp_ctx;
  342. u16 cmd_id;
  343. cmd_id = cqe->acq_common_descriptor.command &
  344. ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
  345. comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
  346. if (unlikely(!comp_ctx)) {
  347. pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
  348. admin_queue->running_state = false;
  349. return;
  350. }
  351. comp_ctx->status = ENA_CMD_COMPLETED;
  352. comp_ctx->comp_status = cqe->acq_common_descriptor.status;
  353. if (comp_ctx->user_cqe)
  354. memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
  355. if (!admin_queue->polling)
  356. complete(&comp_ctx->wait_event);
  357. }
  358. static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
  359. {
  360. struct ena_admin_acq_entry *cqe = NULL;
  361. u16 comp_num = 0;
  362. u16 head_masked;
  363. u8 phase;
  364. head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
  365. phase = admin_queue->cq.phase;
  366. cqe = &admin_queue->cq.entries[head_masked];
  367. /* Go over all the completions */
  368. while ((cqe->acq_common_descriptor.flags &
  369. ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
  370. /* Do not read the rest of the completion entry before the
  371. * phase bit was validated
  372. */
  373. rmb();
  374. ena_com_handle_single_admin_completion(admin_queue, cqe);
  375. head_masked++;
  376. comp_num++;
  377. if (unlikely(head_masked == admin_queue->q_depth)) {
  378. head_masked = 0;
  379. phase = !phase;
  380. }
  381. cqe = &admin_queue->cq.entries[head_masked];
  382. }
  383. admin_queue->cq.head += comp_num;
  384. admin_queue->cq.phase = phase;
  385. admin_queue->sq.head += comp_num;
  386. admin_queue->stats.completed_cmd += comp_num;
  387. }
  388. static int ena_com_comp_status_to_errno(u8 comp_status)
  389. {
  390. if (unlikely(comp_status != 0))
  391. pr_err("admin command failed[%u]\n", comp_status);
  392. if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
  393. return -EINVAL;
  394. switch (comp_status) {
  395. case ENA_ADMIN_SUCCESS:
  396. return 0;
  397. case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
  398. return -ENOMEM;
  399. case ENA_ADMIN_UNSUPPORTED_OPCODE:
  400. return -EPERM;
  401. case ENA_ADMIN_BAD_OPCODE:
  402. case ENA_ADMIN_MALFORMED_REQUEST:
  403. case ENA_ADMIN_ILLEGAL_PARAMETER:
  404. case ENA_ADMIN_UNKNOWN_ERROR:
  405. return -EINVAL;
  406. }
  407. return 0;
  408. }
  409. static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
  410. struct ena_com_admin_queue *admin_queue)
  411. {
  412. unsigned long flags;
  413. u32 start_time;
  414. int ret;
  415. start_time = ((u32)jiffies_to_usecs(jiffies));
  416. while (comp_ctx->status == ENA_CMD_SUBMITTED) {
  417. if ((((u32)jiffies_to_usecs(jiffies)) - start_time) >
  418. ADMIN_CMD_TIMEOUT_US) {
  419. pr_err("Wait for completion (polling) timeout\n");
  420. /* ENA didn't have any completion */
  421. spin_lock_irqsave(&admin_queue->q_lock, flags);
  422. admin_queue->stats.no_completion++;
  423. admin_queue->running_state = false;
  424. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  425. ret = -ETIME;
  426. goto err;
  427. }
  428. spin_lock_irqsave(&admin_queue->q_lock, flags);
  429. ena_com_handle_admin_completion(admin_queue);
  430. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  431. msleep(100);
  432. }
  433. if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
  434. pr_err("Command was aborted\n");
  435. spin_lock_irqsave(&admin_queue->q_lock, flags);
  436. admin_queue->stats.aborted_cmd++;
  437. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  438. ret = -ENODEV;
  439. goto err;
  440. }
  441. WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
  442. comp_ctx->status);
  443. ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
  444. err:
  445. comp_ctxt_release(admin_queue, comp_ctx);
  446. return ret;
  447. }
  448. static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
  449. struct ena_com_admin_queue *admin_queue)
  450. {
  451. unsigned long flags;
  452. int ret;
  453. wait_for_completion_timeout(&comp_ctx->wait_event,
  454. usecs_to_jiffies(ADMIN_CMD_TIMEOUT_US));
  455. /* In case the command wasn't completed find out the root cause.
  456. * There might be 2 kinds of errors
  457. * 1) No completion (timeout reached)
  458. * 2) There is completion but the device didn't get any msi-x interrupt.
  459. */
  460. if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
  461. spin_lock_irqsave(&admin_queue->q_lock, flags);
  462. ena_com_handle_admin_completion(admin_queue);
  463. admin_queue->stats.no_completion++;
  464. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  465. if (comp_ctx->status == ENA_CMD_COMPLETED)
  466. pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
  467. comp_ctx->cmd_opcode);
  468. else
  469. pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
  470. comp_ctx->cmd_opcode, comp_ctx->status);
  471. admin_queue->running_state = false;
  472. ret = -ETIME;
  473. goto err;
  474. }
  475. ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
  476. err:
  477. comp_ctxt_release(admin_queue, comp_ctx);
  478. return ret;
  479. }
  480. /* This method read the hardware device register through posting writes
  481. * and waiting for response
  482. * On timeout the function will return ENA_MMIO_READ_TIMEOUT
  483. */
  484. static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
  485. {
  486. struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
  487. volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
  488. mmio_read->read_resp;
  489. u32 mmio_read_reg, ret;
  490. unsigned long flags;
  491. int i;
  492. might_sleep();
  493. /* If readless is disabled, perform regular read */
  494. if (!mmio_read->readless_supported)
  495. return readl(ena_dev->reg_bar + offset);
  496. spin_lock_irqsave(&mmio_read->lock, flags);
  497. mmio_read->seq_num++;
  498. read_resp->req_id = mmio_read->seq_num + 0xDEAD;
  499. mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
  500. ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
  501. mmio_read_reg |= mmio_read->seq_num &
  502. ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
  503. /* make sure read_resp->req_id get updated before the hw can write
  504. * there
  505. */
  506. wmb();
  507. writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
  508. for (i = 0; i < ENA_REG_READ_TIMEOUT; i++) {
  509. if (read_resp->req_id == mmio_read->seq_num)
  510. break;
  511. udelay(1);
  512. }
  513. if (unlikely(i == ENA_REG_READ_TIMEOUT)) {
  514. pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
  515. mmio_read->seq_num, offset, read_resp->req_id,
  516. read_resp->reg_off);
  517. ret = ENA_MMIO_READ_TIMEOUT;
  518. goto err;
  519. }
  520. if (read_resp->reg_off != offset) {
  521. pr_err("Read failure: wrong offset provided");
  522. ret = ENA_MMIO_READ_TIMEOUT;
  523. } else {
  524. ret = read_resp->reg_val;
  525. }
  526. err:
  527. spin_unlock_irqrestore(&mmio_read->lock, flags);
  528. return ret;
  529. }
  530. /* There are two types to wait for completion.
  531. * Polling mode - wait until the completion is available.
  532. * Async mode - wait on wait queue until the completion is ready
  533. * (or the timeout expired).
  534. * It is expected that the IRQ called ena_com_handle_admin_completion
  535. * to mark the completions.
  536. */
  537. static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
  538. struct ena_com_admin_queue *admin_queue)
  539. {
  540. if (admin_queue->polling)
  541. return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
  542. admin_queue);
  543. return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
  544. admin_queue);
  545. }
  546. static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
  547. struct ena_com_io_sq *io_sq)
  548. {
  549. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  550. struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
  551. struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
  552. u8 direction;
  553. int ret;
  554. memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
  555. if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
  556. direction = ENA_ADMIN_SQ_DIRECTION_TX;
  557. else
  558. direction = ENA_ADMIN_SQ_DIRECTION_RX;
  559. destroy_cmd.sq.sq_identity |= (direction <<
  560. ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
  561. ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
  562. destroy_cmd.sq.sq_idx = io_sq->idx;
  563. destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
  564. ret = ena_com_execute_admin_command(admin_queue,
  565. (struct ena_admin_aq_entry *)&destroy_cmd,
  566. sizeof(destroy_cmd),
  567. (struct ena_admin_acq_entry *)&destroy_resp,
  568. sizeof(destroy_resp));
  569. if (unlikely(ret && (ret != -ENODEV)))
  570. pr_err("failed to destroy io sq error: %d\n", ret);
  571. return ret;
  572. }
  573. static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
  574. struct ena_com_io_sq *io_sq,
  575. struct ena_com_io_cq *io_cq)
  576. {
  577. size_t size;
  578. if (io_cq->cdesc_addr.virt_addr) {
  579. size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
  580. dma_free_coherent(ena_dev->dmadev, size,
  581. io_cq->cdesc_addr.virt_addr,
  582. io_cq->cdesc_addr.phys_addr);
  583. io_cq->cdesc_addr.virt_addr = NULL;
  584. }
  585. if (io_sq->desc_addr.virt_addr) {
  586. size = io_sq->desc_entry_size * io_sq->q_depth;
  587. if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
  588. dma_free_coherent(ena_dev->dmadev, size,
  589. io_sq->desc_addr.virt_addr,
  590. io_sq->desc_addr.phys_addr);
  591. else
  592. devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
  593. io_sq->desc_addr.virt_addr = NULL;
  594. }
  595. }
  596. static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
  597. u16 exp_state)
  598. {
  599. u32 val, i;
  600. for (i = 0; i < timeout; i++) {
  601. val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
  602. if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
  603. pr_err("Reg read timeout occurred\n");
  604. return -ETIME;
  605. }
  606. if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
  607. exp_state)
  608. return 0;
  609. /* The resolution of the timeout is 100ms */
  610. msleep(100);
  611. }
  612. return -ETIME;
  613. }
  614. static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
  615. enum ena_admin_aq_feature_id feature_id)
  616. {
  617. u32 feature_mask = 1 << feature_id;
  618. /* Device attributes is always supported */
  619. if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
  620. !(ena_dev->supported_features & feature_mask))
  621. return false;
  622. return true;
  623. }
  624. static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
  625. struct ena_admin_get_feat_resp *get_resp,
  626. enum ena_admin_aq_feature_id feature_id,
  627. dma_addr_t control_buf_dma_addr,
  628. u32 control_buff_size)
  629. {
  630. struct ena_com_admin_queue *admin_queue;
  631. struct ena_admin_get_feat_cmd get_cmd;
  632. int ret;
  633. if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
  634. pr_debug("Feature %d isn't supported\n", feature_id);
  635. return -EPERM;
  636. }
  637. memset(&get_cmd, 0x0, sizeof(get_cmd));
  638. admin_queue = &ena_dev->admin_queue;
  639. get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
  640. if (control_buff_size)
  641. get_cmd.aq_common_descriptor.flags =
  642. ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
  643. else
  644. get_cmd.aq_common_descriptor.flags = 0;
  645. ret = ena_com_mem_addr_set(ena_dev,
  646. &get_cmd.control_buffer.address,
  647. control_buf_dma_addr);
  648. if (unlikely(ret)) {
  649. pr_err("memory address set failed\n");
  650. return ret;
  651. }
  652. get_cmd.control_buffer.length = control_buff_size;
  653. get_cmd.feat_common.feature_id = feature_id;
  654. ret = ena_com_execute_admin_command(admin_queue,
  655. (struct ena_admin_aq_entry *)
  656. &get_cmd,
  657. sizeof(get_cmd),
  658. (struct ena_admin_acq_entry *)
  659. get_resp,
  660. sizeof(*get_resp));
  661. if (unlikely(ret))
  662. pr_err("Failed to submit get_feature command %d error: %d\n",
  663. feature_id, ret);
  664. return ret;
  665. }
  666. static int ena_com_get_feature(struct ena_com_dev *ena_dev,
  667. struct ena_admin_get_feat_resp *get_resp,
  668. enum ena_admin_aq_feature_id feature_id)
  669. {
  670. return ena_com_get_feature_ex(ena_dev,
  671. get_resp,
  672. feature_id,
  673. 0,
  674. 0);
  675. }
  676. static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
  677. {
  678. struct ena_rss *rss = &ena_dev->rss;
  679. rss->hash_key =
  680. dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
  681. &rss->hash_key_dma_addr, GFP_KERNEL);
  682. if (unlikely(!rss->hash_key))
  683. return -ENOMEM;
  684. return 0;
  685. }
  686. static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
  687. {
  688. struct ena_rss *rss = &ena_dev->rss;
  689. if (rss->hash_key)
  690. dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
  691. rss->hash_key, rss->hash_key_dma_addr);
  692. rss->hash_key = NULL;
  693. }
  694. static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
  695. {
  696. struct ena_rss *rss = &ena_dev->rss;
  697. rss->hash_ctrl =
  698. dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
  699. &rss->hash_ctrl_dma_addr, GFP_KERNEL);
  700. if (unlikely(!rss->hash_ctrl))
  701. return -ENOMEM;
  702. return 0;
  703. }
  704. static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
  705. {
  706. struct ena_rss *rss = &ena_dev->rss;
  707. if (rss->hash_ctrl)
  708. dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
  709. rss->hash_ctrl, rss->hash_ctrl_dma_addr);
  710. rss->hash_ctrl = NULL;
  711. }
  712. static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
  713. u16 log_size)
  714. {
  715. struct ena_rss *rss = &ena_dev->rss;
  716. struct ena_admin_get_feat_resp get_resp;
  717. size_t tbl_size;
  718. int ret;
  719. ret = ena_com_get_feature(ena_dev, &get_resp,
  720. ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
  721. if (unlikely(ret))
  722. return ret;
  723. if ((get_resp.u.ind_table.min_size > log_size) ||
  724. (get_resp.u.ind_table.max_size < log_size)) {
  725. pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
  726. 1 << log_size, 1 << get_resp.u.ind_table.min_size,
  727. 1 << get_resp.u.ind_table.max_size);
  728. return -EINVAL;
  729. }
  730. tbl_size = (1ULL << log_size) *
  731. sizeof(struct ena_admin_rss_ind_table_entry);
  732. rss->rss_ind_tbl =
  733. dma_zalloc_coherent(ena_dev->dmadev, tbl_size,
  734. &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
  735. if (unlikely(!rss->rss_ind_tbl))
  736. goto mem_err1;
  737. tbl_size = (1ULL << log_size) * sizeof(u16);
  738. rss->host_rss_ind_tbl =
  739. devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
  740. if (unlikely(!rss->host_rss_ind_tbl))
  741. goto mem_err2;
  742. rss->tbl_log_size = log_size;
  743. return 0;
  744. mem_err2:
  745. tbl_size = (1ULL << log_size) *
  746. sizeof(struct ena_admin_rss_ind_table_entry);
  747. dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
  748. rss->rss_ind_tbl_dma_addr);
  749. rss->rss_ind_tbl = NULL;
  750. mem_err1:
  751. rss->tbl_log_size = 0;
  752. return -ENOMEM;
  753. }
  754. static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
  755. {
  756. struct ena_rss *rss = &ena_dev->rss;
  757. size_t tbl_size = (1ULL << rss->tbl_log_size) *
  758. sizeof(struct ena_admin_rss_ind_table_entry);
  759. if (rss->rss_ind_tbl)
  760. dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
  761. rss->rss_ind_tbl_dma_addr);
  762. rss->rss_ind_tbl = NULL;
  763. if (rss->host_rss_ind_tbl)
  764. devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
  765. rss->host_rss_ind_tbl = NULL;
  766. }
  767. static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
  768. struct ena_com_io_sq *io_sq, u16 cq_idx)
  769. {
  770. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  771. struct ena_admin_aq_create_sq_cmd create_cmd;
  772. struct ena_admin_acq_create_sq_resp_desc cmd_completion;
  773. u8 direction;
  774. int ret;
  775. memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_sq_cmd));
  776. create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
  777. if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
  778. direction = ENA_ADMIN_SQ_DIRECTION_TX;
  779. else
  780. direction = ENA_ADMIN_SQ_DIRECTION_RX;
  781. create_cmd.sq_identity |= (direction <<
  782. ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
  783. ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
  784. create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
  785. ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
  786. create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
  787. ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
  788. ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
  789. create_cmd.sq_caps_3 |=
  790. ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
  791. create_cmd.cq_idx = cq_idx;
  792. create_cmd.sq_depth = io_sq->q_depth;
  793. if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
  794. ret = ena_com_mem_addr_set(ena_dev,
  795. &create_cmd.sq_ba,
  796. io_sq->desc_addr.phys_addr);
  797. if (unlikely(ret)) {
  798. pr_err("memory address set failed\n");
  799. return ret;
  800. }
  801. }
  802. ret = ena_com_execute_admin_command(admin_queue,
  803. (struct ena_admin_aq_entry *)&create_cmd,
  804. sizeof(create_cmd),
  805. (struct ena_admin_acq_entry *)&cmd_completion,
  806. sizeof(cmd_completion));
  807. if (unlikely(ret)) {
  808. pr_err("Failed to create IO SQ. error: %d\n", ret);
  809. return ret;
  810. }
  811. io_sq->idx = cmd_completion.sq_idx;
  812. io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
  813. (uintptr_t)cmd_completion.sq_doorbell_offset);
  814. if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
  815. io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
  816. + cmd_completion.llq_headers_offset);
  817. io_sq->desc_addr.pbuf_dev_addr =
  818. (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
  819. cmd_completion.llq_descriptors_offset);
  820. }
  821. pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
  822. return ret;
  823. }
  824. static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
  825. {
  826. struct ena_rss *rss = &ena_dev->rss;
  827. struct ena_com_io_sq *io_sq;
  828. u16 qid;
  829. int i;
  830. for (i = 0; i < 1 << rss->tbl_log_size; i++) {
  831. qid = rss->host_rss_ind_tbl[i];
  832. if (qid >= ENA_TOTAL_NUM_QUEUES)
  833. return -EINVAL;
  834. io_sq = &ena_dev->io_sq_queues[qid];
  835. if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
  836. return -EINVAL;
  837. rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
  838. }
  839. return 0;
  840. }
  841. static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
  842. {
  843. u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
  844. struct ena_rss *rss = &ena_dev->rss;
  845. u8 idx;
  846. u16 i;
  847. for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
  848. dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
  849. for (i = 0; i < 1 << rss->tbl_log_size; i++) {
  850. if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
  851. return -EINVAL;
  852. idx = (u8)rss->rss_ind_tbl[i].cq_idx;
  853. if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
  854. return -EINVAL;
  855. rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
  856. }
  857. return 0;
  858. }
  859. static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
  860. {
  861. size_t size;
  862. size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
  863. ena_dev->intr_moder_tbl =
  864. devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
  865. if (!ena_dev->intr_moder_tbl)
  866. return -ENOMEM;
  867. ena_com_config_default_interrupt_moderation_table(ena_dev);
  868. return 0;
  869. }
  870. static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
  871. u16 intr_delay_resolution)
  872. {
  873. struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
  874. unsigned int i;
  875. if (!intr_delay_resolution) {
  876. pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
  877. intr_delay_resolution = 1;
  878. }
  879. ena_dev->intr_delay_resolution = intr_delay_resolution;
  880. /* update Rx */
  881. for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
  882. intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
  883. /* update Tx */
  884. ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
  885. }
  886. /*****************************************************************************/
  887. /******************************* API ******************************/
  888. /*****************************************************************************/
  889. int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
  890. struct ena_admin_aq_entry *cmd,
  891. size_t cmd_size,
  892. struct ena_admin_acq_entry *comp,
  893. size_t comp_size)
  894. {
  895. struct ena_comp_ctx *comp_ctx;
  896. int ret;
  897. comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
  898. comp, comp_size);
  899. if (unlikely(IS_ERR(comp_ctx))) {
  900. if (comp_ctx == ERR_PTR(-ENODEV))
  901. pr_debug("Failed to submit command [%ld]\n",
  902. PTR_ERR(comp_ctx));
  903. else
  904. pr_err("Failed to submit command [%ld]\n",
  905. PTR_ERR(comp_ctx));
  906. return PTR_ERR(comp_ctx);
  907. }
  908. ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
  909. if (unlikely(ret)) {
  910. if (admin_queue->running_state)
  911. pr_err("Failed to process command. ret = %d\n", ret);
  912. else
  913. pr_debug("Failed to process command. ret = %d\n", ret);
  914. }
  915. return ret;
  916. }
  917. int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
  918. struct ena_com_io_cq *io_cq)
  919. {
  920. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  921. struct ena_admin_aq_create_cq_cmd create_cmd;
  922. struct ena_admin_acq_create_cq_resp_desc cmd_completion;
  923. int ret;
  924. memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_cq_cmd));
  925. create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
  926. create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
  927. ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
  928. create_cmd.cq_caps_1 |=
  929. ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
  930. create_cmd.msix_vector = io_cq->msix_vector;
  931. create_cmd.cq_depth = io_cq->q_depth;
  932. ret = ena_com_mem_addr_set(ena_dev,
  933. &create_cmd.cq_ba,
  934. io_cq->cdesc_addr.phys_addr);
  935. if (unlikely(ret)) {
  936. pr_err("memory address set failed\n");
  937. return ret;
  938. }
  939. ret = ena_com_execute_admin_command(admin_queue,
  940. (struct ena_admin_aq_entry *)&create_cmd,
  941. sizeof(create_cmd),
  942. (struct ena_admin_acq_entry *)&cmd_completion,
  943. sizeof(cmd_completion));
  944. if (unlikely(ret)) {
  945. pr_err("Failed to create IO CQ. error: %d\n", ret);
  946. return ret;
  947. }
  948. io_cq->idx = cmd_completion.cq_idx;
  949. io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
  950. cmd_completion.cq_interrupt_unmask_register_offset);
  951. if (cmd_completion.cq_head_db_register_offset)
  952. io_cq->cq_head_db_reg =
  953. (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
  954. cmd_completion.cq_head_db_register_offset);
  955. if (cmd_completion.numa_node_register_offset)
  956. io_cq->numa_node_cfg_reg =
  957. (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
  958. cmd_completion.numa_node_register_offset);
  959. pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
  960. return ret;
  961. }
  962. int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
  963. struct ena_com_io_sq **io_sq,
  964. struct ena_com_io_cq **io_cq)
  965. {
  966. if (qid >= ENA_TOTAL_NUM_QUEUES) {
  967. pr_err("Invalid queue number %d but the max is %d\n", qid,
  968. ENA_TOTAL_NUM_QUEUES);
  969. return -EINVAL;
  970. }
  971. *io_sq = &ena_dev->io_sq_queues[qid];
  972. *io_cq = &ena_dev->io_cq_queues[qid];
  973. return 0;
  974. }
  975. void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
  976. {
  977. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  978. struct ena_comp_ctx *comp_ctx;
  979. u16 i;
  980. if (!admin_queue->comp_ctx)
  981. return;
  982. for (i = 0; i < admin_queue->q_depth; i++) {
  983. comp_ctx = get_comp_ctxt(admin_queue, i, false);
  984. if (unlikely(!comp_ctx))
  985. break;
  986. comp_ctx->status = ENA_CMD_ABORTED;
  987. complete(&comp_ctx->wait_event);
  988. }
  989. }
  990. void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
  991. {
  992. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  993. unsigned long flags;
  994. spin_lock_irqsave(&admin_queue->q_lock, flags);
  995. while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
  996. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  997. msleep(20);
  998. spin_lock_irqsave(&admin_queue->q_lock, flags);
  999. }
  1000. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  1001. }
  1002. int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
  1003. struct ena_com_io_cq *io_cq)
  1004. {
  1005. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1006. struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
  1007. struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
  1008. int ret;
  1009. memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
  1010. destroy_cmd.cq_idx = io_cq->idx;
  1011. destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
  1012. ret = ena_com_execute_admin_command(admin_queue,
  1013. (struct ena_admin_aq_entry *)&destroy_cmd,
  1014. sizeof(destroy_cmd),
  1015. (struct ena_admin_acq_entry *)&destroy_resp,
  1016. sizeof(destroy_resp));
  1017. if (unlikely(ret && (ret != -ENODEV)))
  1018. pr_err("Failed to destroy IO CQ. error: %d\n", ret);
  1019. return ret;
  1020. }
  1021. bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
  1022. {
  1023. return ena_dev->admin_queue.running_state;
  1024. }
  1025. void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
  1026. {
  1027. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1028. unsigned long flags;
  1029. spin_lock_irqsave(&admin_queue->q_lock, flags);
  1030. ena_dev->admin_queue.running_state = state;
  1031. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  1032. }
  1033. void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
  1034. {
  1035. u16 depth = ena_dev->aenq.q_depth;
  1036. WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
  1037. /* Init head_db to mark that all entries in the queue
  1038. * are initially available
  1039. */
  1040. writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
  1041. }
  1042. int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
  1043. {
  1044. struct ena_com_admin_queue *admin_queue;
  1045. struct ena_admin_set_feat_cmd cmd;
  1046. struct ena_admin_set_feat_resp resp;
  1047. struct ena_admin_get_feat_resp get_resp;
  1048. int ret;
  1049. ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
  1050. if (ret) {
  1051. pr_info("Can't get aenq configuration\n");
  1052. return ret;
  1053. }
  1054. if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
  1055. pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
  1056. get_resp.u.aenq.supported_groups, groups_flag);
  1057. return -EPERM;
  1058. }
  1059. memset(&cmd, 0x0, sizeof(cmd));
  1060. admin_queue = &ena_dev->admin_queue;
  1061. cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
  1062. cmd.aq_common_descriptor.flags = 0;
  1063. cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
  1064. cmd.u.aenq.enabled_groups = groups_flag;
  1065. ret = ena_com_execute_admin_command(admin_queue,
  1066. (struct ena_admin_aq_entry *)&cmd,
  1067. sizeof(cmd),
  1068. (struct ena_admin_acq_entry *)&resp,
  1069. sizeof(resp));
  1070. if (unlikely(ret))
  1071. pr_err("Failed to config AENQ ret: %d\n", ret);
  1072. return ret;
  1073. }
  1074. int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
  1075. {
  1076. u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
  1077. int width;
  1078. if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
  1079. pr_err("Reg read timeout occurred\n");
  1080. return -ETIME;
  1081. }
  1082. width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
  1083. ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
  1084. pr_debug("ENA dma width: %d\n", width);
  1085. if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
  1086. pr_err("DMA width illegal value: %d\n", width);
  1087. return -EINVAL;
  1088. }
  1089. ena_dev->dma_addr_bits = width;
  1090. return width;
  1091. }
  1092. int ena_com_validate_version(struct ena_com_dev *ena_dev)
  1093. {
  1094. u32 ver;
  1095. u32 ctrl_ver;
  1096. u32 ctrl_ver_masked;
  1097. /* Make sure the ENA version and the controller version are at least
  1098. * as the driver expects
  1099. */
  1100. ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
  1101. ctrl_ver = ena_com_reg_bar_read32(ena_dev,
  1102. ENA_REGS_CONTROLLER_VERSION_OFF);
  1103. if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
  1104. (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
  1105. pr_err("Reg read timeout occurred\n");
  1106. return -ETIME;
  1107. }
  1108. pr_info("ena device version: %d.%d\n",
  1109. (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
  1110. ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
  1111. ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
  1112. if (ver < MIN_ENA_VER) {
  1113. pr_err("ENA version is lower than the minimal version the driver supports\n");
  1114. return -1;
  1115. }
  1116. pr_info("ena controller version: %d.%d.%d implementation version %d\n",
  1117. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
  1118. ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
  1119. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
  1120. ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
  1121. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
  1122. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
  1123. ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
  1124. ctrl_ver_masked =
  1125. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
  1126. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
  1127. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
  1128. /* Validate the ctrl version without the implementation ID */
  1129. if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
  1130. pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
  1131. return -1;
  1132. }
  1133. return 0;
  1134. }
  1135. void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
  1136. {
  1137. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1138. struct ena_com_admin_cq *cq = &admin_queue->cq;
  1139. struct ena_com_admin_sq *sq = &admin_queue->sq;
  1140. struct ena_com_aenq *aenq = &ena_dev->aenq;
  1141. u16 size;
  1142. if (admin_queue->comp_ctx)
  1143. devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
  1144. admin_queue->comp_ctx = NULL;
  1145. size = ADMIN_SQ_SIZE(admin_queue->q_depth);
  1146. if (sq->entries)
  1147. dma_free_coherent(ena_dev->dmadev, size, sq->entries,
  1148. sq->dma_addr);
  1149. sq->entries = NULL;
  1150. size = ADMIN_CQ_SIZE(admin_queue->q_depth);
  1151. if (cq->entries)
  1152. dma_free_coherent(ena_dev->dmadev, size, cq->entries,
  1153. cq->dma_addr);
  1154. cq->entries = NULL;
  1155. size = ADMIN_AENQ_SIZE(aenq->q_depth);
  1156. if (ena_dev->aenq.entries)
  1157. dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
  1158. aenq->dma_addr);
  1159. aenq->entries = NULL;
  1160. }
  1161. void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
  1162. {
  1163. ena_dev->admin_queue.polling = polling;
  1164. }
  1165. int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
  1166. {
  1167. struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
  1168. spin_lock_init(&mmio_read->lock);
  1169. mmio_read->read_resp =
  1170. dma_zalloc_coherent(ena_dev->dmadev,
  1171. sizeof(*mmio_read->read_resp),
  1172. &mmio_read->read_resp_dma_addr, GFP_KERNEL);
  1173. if (unlikely(!mmio_read->read_resp))
  1174. return -ENOMEM;
  1175. ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
  1176. mmio_read->read_resp->req_id = 0x0;
  1177. mmio_read->seq_num = 0x0;
  1178. mmio_read->readless_supported = true;
  1179. return 0;
  1180. }
  1181. void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
  1182. {
  1183. struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
  1184. mmio_read->readless_supported = readless_supported;
  1185. }
  1186. void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
  1187. {
  1188. struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
  1189. writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
  1190. writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
  1191. dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
  1192. mmio_read->read_resp, mmio_read->read_resp_dma_addr);
  1193. mmio_read->read_resp = NULL;
  1194. }
  1195. void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
  1196. {
  1197. struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
  1198. u32 addr_low, addr_high;
  1199. addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
  1200. addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
  1201. writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
  1202. writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
  1203. }
  1204. int ena_com_admin_init(struct ena_com_dev *ena_dev,
  1205. struct ena_aenq_handlers *aenq_handlers,
  1206. bool init_spinlock)
  1207. {
  1208. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1209. u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
  1210. int ret;
  1211. dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
  1212. if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
  1213. pr_err("Reg read timeout occurred\n");
  1214. return -ETIME;
  1215. }
  1216. if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
  1217. pr_err("Device isn't ready, abort com init\n");
  1218. return -ENODEV;
  1219. }
  1220. admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
  1221. admin_queue->q_dmadev = ena_dev->dmadev;
  1222. admin_queue->polling = false;
  1223. admin_queue->curr_cmd_id = 0;
  1224. atomic_set(&admin_queue->outstanding_cmds, 0);
  1225. if (init_spinlock)
  1226. spin_lock_init(&admin_queue->q_lock);
  1227. ret = ena_com_init_comp_ctxt(admin_queue);
  1228. if (ret)
  1229. goto error;
  1230. ret = ena_com_admin_init_sq(admin_queue);
  1231. if (ret)
  1232. goto error;
  1233. ret = ena_com_admin_init_cq(admin_queue);
  1234. if (ret)
  1235. goto error;
  1236. admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
  1237. ENA_REGS_AQ_DB_OFF);
  1238. addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
  1239. addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
  1240. writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
  1241. writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
  1242. addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
  1243. addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
  1244. writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
  1245. writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
  1246. aq_caps = 0;
  1247. aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
  1248. aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
  1249. ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
  1250. ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
  1251. acq_caps = 0;
  1252. acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
  1253. acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
  1254. ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
  1255. ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
  1256. writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
  1257. writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
  1258. ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
  1259. if (ret)
  1260. goto error;
  1261. admin_queue->running_state = true;
  1262. return 0;
  1263. error:
  1264. ena_com_admin_destroy(ena_dev);
  1265. return ret;
  1266. }
  1267. int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
  1268. struct ena_com_create_io_ctx *ctx)
  1269. {
  1270. struct ena_com_io_sq *io_sq;
  1271. struct ena_com_io_cq *io_cq;
  1272. int ret;
  1273. if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
  1274. pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
  1275. ctx->qid, ENA_TOTAL_NUM_QUEUES);
  1276. return -EINVAL;
  1277. }
  1278. io_sq = &ena_dev->io_sq_queues[ctx->qid];
  1279. io_cq = &ena_dev->io_cq_queues[ctx->qid];
  1280. memset(io_sq, 0x0, sizeof(struct ena_com_io_sq));
  1281. memset(io_cq, 0x0, sizeof(struct ena_com_io_cq));
  1282. /* Init CQ */
  1283. io_cq->q_depth = ctx->queue_size;
  1284. io_cq->direction = ctx->direction;
  1285. io_cq->qid = ctx->qid;
  1286. io_cq->msix_vector = ctx->msix_vector;
  1287. io_sq->q_depth = ctx->queue_size;
  1288. io_sq->direction = ctx->direction;
  1289. io_sq->qid = ctx->qid;
  1290. io_sq->mem_queue_type = ctx->mem_queue_type;
  1291. if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
  1292. /* header length is limited to 8 bits */
  1293. io_sq->tx_max_header_size =
  1294. min_t(u32, ena_dev->tx_max_header_size, SZ_256);
  1295. ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
  1296. if (ret)
  1297. goto error;
  1298. ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
  1299. if (ret)
  1300. goto error;
  1301. ret = ena_com_create_io_cq(ena_dev, io_cq);
  1302. if (ret)
  1303. goto error;
  1304. ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
  1305. if (ret)
  1306. goto destroy_io_cq;
  1307. return 0;
  1308. destroy_io_cq:
  1309. ena_com_destroy_io_cq(ena_dev, io_cq);
  1310. error:
  1311. ena_com_io_queue_free(ena_dev, io_sq, io_cq);
  1312. return ret;
  1313. }
  1314. void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
  1315. {
  1316. struct ena_com_io_sq *io_sq;
  1317. struct ena_com_io_cq *io_cq;
  1318. if (qid >= ENA_TOTAL_NUM_QUEUES) {
  1319. pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
  1320. ENA_TOTAL_NUM_QUEUES);
  1321. return;
  1322. }
  1323. io_sq = &ena_dev->io_sq_queues[qid];
  1324. io_cq = &ena_dev->io_cq_queues[qid];
  1325. ena_com_destroy_io_sq(ena_dev, io_sq);
  1326. ena_com_destroy_io_cq(ena_dev, io_cq);
  1327. ena_com_io_queue_free(ena_dev, io_sq, io_cq);
  1328. }
  1329. int ena_com_get_link_params(struct ena_com_dev *ena_dev,
  1330. struct ena_admin_get_feat_resp *resp)
  1331. {
  1332. return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
  1333. }
  1334. int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
  1335. struct ena_com_dev_get_features_ctx *get_feat_ctx)
  1336. {
  1337. struct ena_admin_get_feat_resp get_resp;
  1338. int rc;
  1339. rc = ena_com_get_feature(ena_dev, &get_resp,
  1340. ENA_ADMIN_DEVICE_ATTRIBUTES);
  1341. if (rc)
  1342. return rc;
  1343. memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
  1344. sizeof(get_resp.u.dev_attr));
  1345. ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
  1346. rc = ena_com_get_feature(ena_dev, &get_resp,
  1347. ENA_ADMIN_MAX_QUEUES_NUM);
  1348. if (rc)
  1349. return rc;
  1350. memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
  1351. sizeof(get_resp.u.max_queue));
  1352. ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
  1353. rc = ena_com_get_feature(ena_dev, &get_resp,
  1354. ENA_ADMIN_AENQ_CONFIG);
  1355. if (rc)
  1356. return rc;
  1357. memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
  1358. sizeof(get_resp.u.aenq));
  1359. rc = ena_com_get_feature(ena_dev, &get_resp,
  1360. ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
  1361. if (rc)
  1362. return rc;
  1363. memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
  1364. sizeof(get_resp.u.offload));
  1365. return 0;
  1366. }
  1367. void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
  1368. {
  1369. ena_com_handle_admin_completion(&ena_dev->admin_queue);
  1370. }
  1371. /* ena_handle_specific_aenq_event:
  1372. * return the handler that is relevant to the specific event group
  1373. */
  1374. static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
  1375. u16 group)
  1376. {
  1377. struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
  1378. if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
  1379. return aenq_handlers->handlers[group];
  1380. return aenq_handlers->unimplemented_handler;
  1381. }
  1382. /* ena_aenq_intr_handler:
  1383. * handles the aenq incoming events.
  1384. * pop events from the queue and apply the specific handler
  1385. */
  1386. void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
  1387. {
  1388. struct ena_admin_aenq_entry *aenq_e;
  1389. struct ena_admin_aenq_common_desc *aenq_common;
  1390. struct ena_com_aenq *aenq = &dev->aenq;
  1391. ena_aenq_handler handler_cb;
  1392. u16 masked_head, processed = 0;
  1393. u8 phase;
  1394. masked_head = aenq->head & (aenq->q_depth - 1);
  1395. phase = aenq->phase;
  1396. aenq_e = &aenq->entries[masked_head]; /* Get first entry */
  1397. aenq_common = &aenq_e->aenq_common_desc;
  1398. /* Go over all the events */
  1399. while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
  1400. phase) {
  1401. pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
  1402. aenq_common->group, aenq_common->syndrom,
  1403. (u64)aenq_common->timestamp_low +
  1404. ((u64)aenq_common->timestamp_high << 32));
  1405. /* Handle specific event*/
  1406. handler_cb = ena_com_get_specific_aenq_cb(dev,
  1407. aenq_common->group);
  1408. handler_cb(data, aenq_e); /* call the actual event handler*/
  1409. /* Get next event entry */
  1410. masked_head++;
  1411. processed++;
  1412. if (unlikely(masked_head == aenq->q_depth)) {
  1413. masked_head = 0;
  1414. phase = !phase;
  1415. }
  1416. aenq_e = &aenq->entries[masked_head];
  1417. aenq_common = &aenq_e->aenq_common_desc;
  1418. }
  1419. aenq->head += processed;
  1420. aenq->phase = phase;
  1421. /* Don't update aenq doorbell if there weren't any processed events */
  1422. if (!processed)
  1423. return;
  1424. /* write the aenq doorbell after all AENQ descriptors were read */
  1425. mb();
  1426. writel((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
  1427. }
  1428. int ena_com_dev_reset(struct ena_com_dev *ena_dev)
  1429. {
  1430. u32 stat, timeout, cap, reset_val;
  1431. int rc;
  1432. stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
  1433. cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
  1434. if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
  1435. (cap == ENA_MMIO_READ_TIMEOUT))) {
  1436. pr_err("Reg read32 timeout occurred\n");
  1437. return -ETIME;
  1438. }
  1439. if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
  1440. pr_err("Device isn't ready, can't reset device\n");
  1441. return -EINVAL;
  1442. }
  1443. timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
  1444. ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
  1445. if (timeout == 0) {
  1446. pr_err("Invalid timeout value\n");
  1447. return -EINVAL;
  1448. }
  1449. /* start reset */
  1450. reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
  1451. writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
  1452. /* Write again the MMIO read request address */
  1453. ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
  1454. rc = wait_for_reset_state(ena_dev, timeout,
  1455. ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
  1456. if (rc != 0) {
  1457. pr_err("Reset indication didn't turn on\n");
  1458. return rc;
  1459. }
  1460. /* reset done */
  1461. writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
  1462. rc = wait_for_reset_state(ena_dev, timeout, 0);
  1463. if (rc != 0) {
  1464. pr_err("Reset indication didn't turn off\n");
  1465. return rc;
  1466. }
  1467. return 0;
  1468. }
  1469. static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
  1470. struct ena_com_stats_ctx *ctx,
  1471. enum ena_admin_get_stats_type type)
  1472. {
  1473. struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
  1474. struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
  1475. struct ena_com_admin_queue *admin_queue;
  1476. int ret;
  1477. admin_queue = &ena_dev->admin_queue;
  1478. get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
  1479. get_cmd->aq_common_descriptor.flags = 0;
  1480. get_cmd->type = type;
  1481. ret = ena_com_execute_admin_command(admin_queue,
  1482. (struct ena_admin_aq_entry *)get_cmd,
  1483. sizeof(*get_cmd),
  1484. (struct ena_admin_acq_entry *)get_resp,
  1485. sizeof(*get_resp));
  1486. if (unlikely(ret))
  1487. pr_err("Failed to get stats. error: %d\n", ret);
  1488. return ret;
  1489. }
  1490. int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
  1491. struct ena_admin_basic_stats *stats)
  1492. {
  1493. struct ena_com_stats_ctx ctx;
  1494. int ret;
  1495. memset(&ctx, 0x0, sizeof(ctx));
  1496. ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
  1497. if (likely(ret == 0))
  1498. memcpy(stats, &ctx.get_resp.basic_stats,
  1499. sizeof(ctx.get_resp.basic_stats));
  1500. return ret;
  1501. }
  1502. int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
  1503. {
  1504. struct ena_com_admin_queue *admin_queue;
  1505. struct ena_admin_set_feat_cmd cmd;
  1506. struct ena_admin_set_feat_resp resp;
  1507. int ret;
  1508. if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
  1509. pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
  1510. return -EPERM;
  1511. }
  1512. memset(&cmd, 0x0, sizeof(cmd));
  1513. admin_queue = &ena_dev->admin_queue;
  1514. cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
  1515. cmd.aq_common_descriptor.flags = 0;
  1516. cmd.feat_common.feature_id = ENA_ADMIN_MTU;
  1517. cmd.u.mtu.mtu = mtu;
  1518. ret = ena_com_execute_admin_command(admin_queue,
  1519. (struct ena_admin_aq_entry *)&cmd,
  1520. sizeof(cmd),
  1521. (struct ena_admin_acq_entry *)&resp,
  1522. sizeof(resp));
  1523. if (unlikely(ret))
  1524. pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
  1525. return ret;
  1526. }
  1527. int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
  1528. struct ena_admin_feature_offload_desc *offload)
  1529. {
  1530. int ret;
  1531. struct ena_admin_get_feat_resp resp;
  1532. ret = ena_com_get_feature(ena_dev, &resp,
  1533. ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
  1534. if (unlikely(ret)) {
  1535. pr_err("Failed to get offload capabilities %d\n", ret);
  1536. return ret;
  1537. }
  1538. memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
  1539. return 0;
  1540. }
  1541. int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
  1542. {
  1543. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1544. struct ena_rss *rss = &ena_dev->rss;
  1545. struct ena_admin_set_feat_cmd cmd;
  1546. struct ena_admin_set_feat_resp resp;
  1547. struct ena_admin_get_feat_resp get_resp;
  1548. int ret;
  1549. if (!ena_com_check_supported_feature_id(ena_dev,
  1550. ENA_ADMIN_RSS_HASH_FUNCTION)) {
  1551. pr_debug("Feature %d isn't supported\n",
  1552. ENA_ADMIN_RSS_HASH_FUNCTION);
  1553. return -EPERM;
  1554. }
  1555. /* Validate hash function is supported */
  1556. ret = ena_com_get_feature(ena_dev, &get_resp,
  1557. ENA_ADMIN_RSS_HASH_FUNCTION);
  1558. if (unlikely(ret))
  1559. return ret;
  1560. if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
  1561. pr_err("Func hash %d isn't supported by device, abort\n",
  1562. rss->hash_func);
  1563. return -EPERM;
  1564. }
  1565. memset(&cmd, 0x0, sizeof(cmd));
  1566. cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
  1567. cmd.aq_common_descriptor.flags =
  1568. ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
  1569. cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
  1570. cmd.u.flow_hash_func.init_val = rss->hash_init_val;
  1571. cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
  1572. ret = ena_com_mem_addr_set(ena_dev,
  1573. &cmd.control_buffer.address,
  1574. rss->hash_key_dma_addr);
  1575. if (unlikely(ret)) {
  1576. pr_err("memory address set failed\n");
  1577. return ret;
  1578. }
  1579. cmd.control_buffer.length = sizeof(*rss->hash_key);
  1580. ret = ena_com_execute_admin_command(admin_queue,
  1581. (struct ena_admin_aq_entry *)&cmd,
  1582. sizeof(cmd),
  1583. (struct ena_admin_acq_entry *)&resp,
  1584. sizeof(resp));
  1585. if (unlikely(ret)) {
  1586. pr_err("Failed to set hash function %d. error: %d\n",
  1587. rss->hash_func, ret);
  1588. return -EINVAL;
  1589. }
  1590. return 0;
  1591. }
  1592. int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
  1593. enum ena_admin_hash_functions func,
  1594. const u8 *key, u16 key_len, u32 init_val)
  1595. {
  1596. struct ena_rss *rss = &ena_dev->rss;
  1597. struct ena_admin_get_feat_resp get_resp;
  1598. struct ena_admin_feature_rss_flow_hash_control *hash_key =
  1599. rss->hash_key;
  1600. int rc;
  1601. /* Make sure size is a mult of DWs */
  1602. if (unlikely(key_len & 0x3))
  1603. return -EINVAL;
  1604. rc = ena_com_get_feature_ex(ena_dev, &get_resp,
  1605. ENA_ADMIN_RSS_HASH_FUNCTION,
  1606. rss->hash_key_dma_addr,
  1607. sizeof(*rss->hash_key));
  1608. if (unlikely(rc))
  1609. return rc;
  1610. if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
  1611. pr_err("Flow hash function %d isn't supported\n", func);
  1612. return -EPERM;
  1613. }
  1614. switch (func) {
  1615. case ENA_ADMIN_TOEPLITZ:
  1616. if (key_len > sizeof(hash_key->key)) {
  1617. pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
  1618. key_len, sizeof(hash_key->key));
  1619. return -EINVAL;
  1620. }
  1621. memcpy(hash_key->key, key, key_len);
  1622. rss->hash_init_val = init_val;
  1623. hash_key->keys_num = key_len >> 2;
  1624. break;
  1625. case ENA_ADMIN_CRC32:
  1626. rss->hash_init_val = init_val;
  1627. break;
  1628. default:
  1629. pr_err("Invalid hash function (%d)\n", func);
  1630. return -EINVAL;
  1631. }
  1632. rc = ena_com_set_hash_function(ena_dev);
  1633. /* Restore the old function */
  1634. if (unlikely(rc))
  1635. ena_com_get_hash_function(ena_dev, NULL, NULL);
  1636. return rc;
  1637. }
  1638. int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
  1639. enum ena_admin_hash_functions *func,
  1640. u8 *key)
  1641. {
  1642. struct ena_rss *rss = &ena_dev->rss;
  1643. struct ena_admin_get_feat_resp get_resp;
  1644. struct ena_admin_feature_rss_flow_hash_control *hash_key =
  1645. rss->hash_key;
  1646. int rc;
  1647. rc = ena_com_get_feature_ex(ena_dev, &get_resp,
  1648. ENA_ADMIN_RSS_HASH_FUNCTION,
  1649. rss->hash_key_dma_addr,
  1650. sizeof(*rss->hash_key));
  1651. if (unlikely(rc))
  1652. return rc;
  1653. rss->hash_func = get_resp.u.flow_hash_func.selected_func;
  1654. if (func)
  1655. *func = rss->hash_func;
  1656. if (key)
  1657. memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
  1658. return 0;
  1659. }
  1660. int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
  1661. enum ena_admin_flow_hash_proto proto,
  1662. u16 *fields)
  1663. {
  1664. struct ena_rss *rss = &ena_dev->rss;
  1665. struct ena_admin_get_feat_resp get_resp;
  1666. int rc;
  1667. rc = ena_com_get_feature_ex(ena_dev, &get_resp,
  1668. ENA_ADMIN_RSS_HASH_INPUT,
  1669. rss->hash_ctrl_dma_addr,
  1670. sizeof(*rss->hash_ctrl));
  1671. if (unlikely(rc))
  1672. return rc;
  1673. if (fields)
  1674. *fields = rss->hash_ctrl->selected_fields[proto].fields;
  1675. return 0;
  1676. }
  1677. int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
  1678. {
  1679. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1680. struct ena_rss *rss = &ena_dev->rss;
  1681. struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
  1682. struct ena_admin_set_feat_cmd cmd;
  1683. struct ena_admin_set_feat_resp resp;
  1684. int ret;
  1685. if (!ena_com_check_supported_feature_id(ena_dev,
  1686. ENA_ADMIN_RSS_HASH_INPUT)) {
  1687. pr_debug("Feature %d isn't supported\n",
  1688. ENA_ADMIN_RSS_HASH_INPUT);
  1689. return -EPERM;
  1690. }
  1691. memset(&cmd, 0x0, sizeof(cmd));
  1692. cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
  1693. cmd.aq_common_descriptor.flags =
  1694. ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
  1695. cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
  1696. cmd.u.flow_hash_input.enabled_input_sort =
  1697. ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
  1698. ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
  1699. ret = ena_com_mem_addr_set(ena_dev,
  1700. &cmd.control_buffer.address,
  1701. rss->hash_ctrl_dma_addr);
  1702. if (unlikely(ret)) {
  1703. pr_err("memory address set failed\n");
  1704. return ret;
  1705. }
  1706. cmd.control_buffer.length = sizeof(*hash_ctrl);
  1707. ret = ena_com_execute_admin_command(admin_queue,
  1708. (struct ena_admin_aq_entry *)&cmd,
  1709. sizeof(cmd),
  1710. (struct ena_admin_acq_entry *)&resp,
  1711. sizeof(resp));
  1712. if (unlikely(ret))
  1713. pr_err("Failed to set hash input. error: %d\n", ret);
  1714. return ret;
  1715. }
  1716. int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
  1717. {
  1718. struct ena_rss *rss = &ena_dev->rss;
  1719. struct ena_admin_feature_rss_hash_control *hash_ctrl =
  1720. rss->hash_ctrl;
  1721. u16 available_fields = 0;
  1722. int rc, i;
  1723. /* Get the supported hash input */
  1724. rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
  1725. if (unlikely(rc))
  1726. return rc;
  1727. hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
  1728. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
  1729. ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
  1730. hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
  1731. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
  1732. ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
  1733. hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
  1734. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
  1735. ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
  1736. hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
  1737. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
  1738. ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
  1739. hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
  1740. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
  1741. hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
  1742. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
  1743. hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
  1744. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
  1745. hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
  1746. ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
  1747. for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
  1748. available_fields = hash_ctrl->selected_fields[i].fields &
  1749. hash_ctrl->supported_fields[i].fields;
  1750. if (available_fields != hash_ctrl->selected_fields[i].fields) {
  1751. pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
  1752. i, hash_ctrl->supported_fields[i].fields,
  1753. hash_ctrl->selected_fields[i].fields);
  1754. return -EPERM;
  1755. }
  1756. }
  1757. rc = ena_com_set_hash_ctrl(ena_dev);
  1758. /* In case of failure, restore the old hash ctrl */
  1759. if (unlikely(rc))
  1760. ena_com_get_hash_ctrl(ena_dev, 0, NULL);
  1761. return rc;
  1762. }
  1763. int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
  1764. enum ena_admin_flow_hash_proto proto,
  1765. u16 hash_fields)
  1766. {
  1767. struct ena_rss *rss = &ena_dev->rss;
  1768. struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
  1769. u16 supported_fields;
  1770. int rc;
  1771. if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
  1772. pr_err("Invalid proto num (%u)\n", proto);
  1773. return -EINVAL;
  1774. }
  1775. /* Get the ctrl table */
  1776. rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
  1777. if (unlikely(rc))
  1778. return rc;
  1779. /* Make sure all the fields are supported */
  1780. supported_fields = hash_ctrl->supported_fields[proto].fields;
  1781. if ((hash_fields & supported_fields) != hash_fields) {
  1782. pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
  1783. proto, hash_fields, supported_fields);
  1784. }
  1785. hash_ctrl->selected_fields[proto].fields = hash_fields;
  1786. rc = ena_com_set_hash_ctrl(ena_dev);
  1787. /* In case of failure, restore the old hash ctrl */
  1788. if (unlikely(rc))
  1789. ena_com_get_hash_ctrl(ena_dev, 0, NULL);
  1790. return 0;
  1791. }
  1792. int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
  1793. u16 entry_idx, u16 entry_value)
  1794. {
  1795. struct ena_rss *rss = &ena_dev->rss;
  1796. if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
  1797. return -EINVAL;
  1798. if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
  1799. return -EINVAL;
  1800. rss->host_rss_ind_tbl[entry_idx] = entry_value;
  1801. return 0;
  1802. }
  1803. int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
  1804. {
  1805. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1806. struct ena_rss *rss = &ena_dev->rss;
  1807. struct ena_admin_set_feat_cmd cmd;
  1808. struct ena_admin_set_feat_resp resp;
  1809. int ret;
  1810. if (!ena_com_check_supported_feature_id(
  1811. ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
  1812. pr_debug("Feature %d isn't supported\n",
  1813. ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
  1814. return -EPERM;
  1815. }
  1816. ret = ena_com_ind_tbl_convert_to_device(ena_dev);
  1817. if (ret) {
  1818. pr_err("Failed to convert host indirection table to device table\n");
  1819. return ret;
  1820. }
  1821. memset(&cmd, 0x0, sizeof(cmd));
  1822. cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
  1823. cmd.aq_common_descriptor.flags =
  1824. ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
  1825. cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
  1826. cmd.u.ind_table.size = rss->tbl_log_size;
  1827. cmd.u.ind_table.inline_index = 0xFFFFFFFF;
  1828. ret = ena_com_mem_addr_set(ena_dev,
  1829. &cmd.control_buffer.address,
  1830. rss->rss_ind_tbl_dma_addr);
  1831. if (unlikely(ret)) {
  1832. pr_err("memory address set failed\n");
  1833. return ret;
  1834. }
  1835. cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
  1836. sizeof(struct ena_admin_rss_ind_table_entry);
  1837. ret = ena_com_execute_admin_command(admin_queue,
  1838. (struct ena_admin_aq_entry *)&cmd,
  1839. sizeof(cmd),
  1840. (struct ena_admin_acq_entry *)&resp,
  1841. sizeof(resp));
  1842. if (unlikely(ret))
  1843. pr_err("Failed to set indirect table. error: %d\n", ret);
  1844. return ret;
  1845. }
  1846. int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
  1847. {
  1848. struct ena_rss *rss = &ena_dev->rss;
  1849. struct ena_admin_get_feat_resp get_resp;
  1850. u32 tbl_size;
  1851. int i, rc;
  1852. tbl_size = (1ULL << rss->tbl_log_size) *
  1853. sizeof(struct ena_admin_rss_ind_table_entry);
  1854. rc = ena_com_get_feature_ex(ena_dev, &get_resp,
  1855. ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
  1856. rss->rss_ind_tbl_dma_addr,
  1857. tbl_size);
  1858. if (unlikely(rc))
  1859. return rc;
  1860. if (!ind_tbl)
  1861. return 0;
  1862. rc = ena_com_ind_tbl_convert_from_device(ena_dev);
  1863. if (unlikely(rc))
  1864. return rc;
  1865. for (i = 0; i < (1 << rss->tbl_log_size); i++)
  1866. ind_tbl[i] = rss->host_rss_ind_tbl[i];
  1867. return 0;
  1868. }
  1869. int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
  1870. {
  1871. int rc;
  1872. memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
  1873. rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
  1874. if (unlikely(rc))
  1875. goto err_indr_tbl;
  1876. rc = ena_com_hash_key_allocate(ena_dev);
  1877. if (unlikely(rc))
  1878. goto err_hash_key;
  1879. rc = ena_com_hash_ctrl_init(ena_dev);
  1880. if (unlikely(rc))
  1881. goto err_hash_ctrl;
  1882. return 0;
  1883. err_hash_ctrl:
  1884. ena_com_hash_key_destroy(ena_dev);
  1885. err_hash_key:
  1886. ena_com_indirect_table_destroy(ena_dev);
  1887. err_indr_tbl:
  1888. return rc;
  1889. }
  1890. void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
  1891. {
  1892. ena_com_indirect_table_destroy(ena_dev);
  1893. ena_com_hash_key_destroy(ena_dev);
  1894. ena_com_hash_ctrl_destroy(ena_dev);
  1895. memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
  1896. }
  1897. int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
  1898. {
  1899. struct ena_host_attribute *host_attr = &ena_dev->host_attr;
  1900. host_attr->host_info =
  1901. dma_zalloc_coherent(ena_dev->dmadev, SZ_4K,
  1902. &host_attr->host_info_dma_addr, GFP_KERNEL);
  1903. if (unlikely(!host_attr->host_info))
  1904. return -ENOMEM;
  1905. return 0;
  1906. }
  1907. int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
  1908. u32 debug_area_size)
  1909. {
  1910. struct ena_host_attribute *host_attr = &ena_dev->host_attr;
  1911. host_attr->debug_area_virt_addr =
  1912. dma_zalloc_coherent(ena_dev->dmadev, debug_area_size,
  1913. &host_attr->debug_area_dma_addr, GFP_KERNEL);
  1914. if (unlikely(!host_attr->debug_area_virt_addr)) {
  1915. host_attr->debug_area_size = 0;
  1916. return -ENOMEM;
  1917. }
  1918. host_attr->debug_area_size = debug_area_size;
  1919. return 0;
  1920. }
  1921. void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
  1922. {
  1923. struct ena_host_attribute *host_attr = &ena_dev->host_attr;
  1924. if (host_attr->host_info) {
  1925. dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
  1926. host_attr->host_info_dma_addr);
  1927. host_attr->host_info = NULL;
  1928. }
  1929. }
  1930. void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
  1931. {
  1932. struct ena_host_attribute *host_attr = &ena_dev->host_attr;
  1933. if (host_attr->debug_area_virt_addr) {
  1934. dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
  1935. host_attr->debug_area_virt_addr,
  1936. host_attr->debug_area_dma_addr);
  1937. host_attr->debug_area_virt_addr = NULL;
  1938. }
  1939. }
  1940. int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
  1941. {
  1942. struct ena_host_attribute *host_attr = &ena_dev->host_attr;
  1943. struct ena_com_admin_queue *admin_queue;
  1944. struct ena_admin_set_feat_cmd cmd;
  1945. struct ena_admin_set_feat_resp resp;
  1946. int ret;
  1947. /* Host attribute config is called before ena_com_get_dev_attr_feat
  1948. * so ena_com can't check if the feature is supported.
  1949. */
  1950. memset(&cmd, 0x0, sizeof(cmd));
  1951. admin_queue = &ena_dev->admin_queue;
  1952. cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
  1953. cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
  1954. ret = ena_com_mem_addr_set(ena_dev,
  1955. &cmd.u.host_attr.debug_ba,
  1956. host_attr->debug_area_dma_addr);
  1957. if (unlikely(ret)) {
  1958. pr_err("memory address set failed\n");
  1959. return ret;
  1960. }
  1961. ret = ena_com_mem_addr_set(ena_dev,
  1962. &cmd.u.host_attr.os_info_ba,
  1963. host_attr->host_info_dma_addr);
  1964. if (unlikely(ret)) {
  1965. pr_err("memory address set failed\n");
  1966. return ret;
  1967. }
  1968. cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
  1969. ret = ena_com_execute_admin_command(admin_queue,
  1970. (struct ena_admin_aq_entry *)&cmd,
  1971. sizeof(cmd),
  1972. (struct ena_admin_acq_entry *)&resp,
  1973. sizeof(resp));
  1974. if (unlikely(ret))
  1975. pr_err("Failed to set host attributes: %d\n", ret);
  1976. return ret;
  1977. }
  1978. /* Interrupt moderation */
  1979. bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
  1980. {
  1981. return ena_com_check_supported_feature_id(ena_dev,
  1982. ENA_ADMIN_INTERRUPT_MODERATION);
  1983. }
  1984. int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
  1985. u32 tx_coalesce_usecs)
  1986. {
  1987. if (!ena_dev->intr_delay_resolution) {
  1988. pr_err("Illegal interrupt delay granularity value\n");
  1989. return -EFAULT;
  1990. }
  1991. ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
  1992. ena_dev->intr_delay_resolution;
  1993. return 0;
  1994. }
  1995. int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
  1996. u32 rx_coalesce_usecs)
  1997. {
  1998. if (!ena_dev->intr_delay_resolution) {
  1999. pr_err("Illegal interrupt delay granularity value\n");
  2000. return -EFAULT;
  2001. }
  2002. /* We use LOWEST entry of moderation table for storing
  2003. * nonadaptive interrupt coalescing values
  2004. */
  2005. ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
  2006. rx_coalesce_usecs / ena_dev->intr_delay_resolution;
  2007. return 0;
  2008. }
  2009. void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
  2010. {
  2011. if (ena_dev->intr_moder_tbl)
  2012. devm_kfree(ena_dev->dmadev, ena_dev->intr_moder_tbl);
  2013. ena_dev->intr_moder_tbl = NULL;
  2014. }
  2015. int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
  2016. {
  2017. struct ena_admin_get_feat_resp get_resp;
  2018. u16 delay_resolution;
  2019. int rc;
  2020. rc = ena_com_get_feature(ena_dev, &get_resp,
  2021. ENA_ADMIN_INTERRUPT_MODERATION);
  2022. if (rc) {
  2023. if (rc == -EPERM) {
  2024. pr_debug("Feature %d isn't supported\n",
  2025. ENA_ADMIN_INTERRUPT_MODERATION);
  2026. rc = 0;
  2027. } else {
  2028. pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
  2029. rc);
  2030. }
  2031. /* no moderation supported, disable adaptive support */
  2032. ena_com_disable_adaptive_moderation(ena_dev);
  2033. return rc;
  2034. }
  2035. rc = ena_com_init_interrupt_moderation_table(ena_dev);
  2036. if (rc)
  2037. goto err;
  2038. /* if moderation is supported by device we set adaptive moderation */
  2039. delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
  2040. ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
  2041. ena_com_enable_adaptive_moderation(ena_dev);
  2042. return 0;
  2043. err:
  2044. ena_com_destroy_interrupt_moderation(ena_dev);
  2045. return rc;
  2046. }
  2047. void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
  2048. {
  2049. struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
  2050. if (!intr_moder_tbl)
  2051. return;
  2052. intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
  2053. ENA_INTR_LOWEST_USECS;
  2054. intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
  2055. ENA_INTR_LOWEST_PKTS;
  2056. intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
  2057. ENA_INTR_LOWEST_BYTES;
  2058. intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
  2059. ENA_INTR_LOW_USECS;
  2060. intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
  2061. ENA_INTR_LOW_PKTS;
  2062. intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
  2063. ENA_INTR_LOW_BYTES;
  2064. intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
  2065. ENA_INTR_MID_USECS;
  2066. intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
  2067. ENA_INTR_MID_PKTS;
  2068. intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
  2069. ENA_INTR_MID_BYTES;
  2070. intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
  2071. ENA_INTR_HIGH_USECS;
  2072. intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
  2073. ENA_INTR_HIGH_PKTS;
  2074. intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
  2075. ENA_INTR_HIGH_BYTES;
  2076. intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
  2077. ENA_INTR_HIGHEST_USECS;
  2078. intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
  2079. ENA_INTR_HIGHEST_PKTS;
  2080. intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
  2081. ENA_INTR_HIGHEST_BYTES;
  2082. }
  2083. unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
  2084. {
  2085. return ena_dev->intr_moder_tx_interval;
  2086. }
  2087. unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
  2088. {
  2089. struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
  2090. if (intr_moder_tbl)
  2091. return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
  2092. return 0;
  2093. }
  2094. void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
  2095. enum ena_intr_moder_level level,
  2096. struct ena_intr_moder_entry *entry)
  2097. {
  2098. struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
  2099. if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
  2100. return;
  2101. intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
  2102. if (ena_dev->intr_delay_resolution)
  2103. intr_moder_tbl[level].intr_moder_interval /=
  2104. ena_dev->intr_delay_resolution;
  2105. intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
  2106. /* use hardcoded value until ethtool supports bytecount parameter */
  2107. if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
  2108. intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
  2109. }
  2110. void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
  2111. enum ena_intr_moder_level level,
  2112. struct ena_intr_moder_entry *entry)
  2113. {
  2114. struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
  2115. if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
  2116. return;
  2117. entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
  2118. if (ena_dev->intr_delay_resolution)
  2119. entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
  2120. entry->pkts_per_interval =
  2121. intr_moder_tbl[level].pkts_per_interval;
  2122. entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
  2123. }