ena_com.c 72 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666
  1. /*
  2. * Copyright 2015 Amazon.com, Inc. or its affiliates.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include "ena_com.h"
  33. /*****************************************************************************/
  34. /*****************************************************************************/
  35. /* Timeout in micro-sec */
  36. #define ADMIN_CMD_TIMEOUT_US (1000000)
  37. #define ENA_ASYNC_QUEUE_DEPTH 4
  38. #define ENA_ADMIN_QUEUE_DEPTH 32
  39. #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
  40. ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
  41. | (ENA_COMMON_SPEC_VERSION_MINOR))
  42. #define ENA_CTRL_MAJOR 0
  43. #define ENA_CTRL_MINOR 0
  44. #define ENA_CTRL_SUB_MINOR 1
  45. #define MIN_ENA_CTRL_VER \
  46. (((ENA_CTRL_MAJOR) << \
  47. (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
  48. ((ENA_CTRL_MINOR) << \
  49. (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
  50. (ENA_CTRL_SUB_MINOR))
  51. #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
  52. #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
  53. #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
  54. /*****************************************************************************/
  55. /*****************************************************************************/
  56. /*****************************************************************************/
  57. enum ena_cmd_status {
  58. ENA_CMD_SUBMITTED,
  59. ENA_CMD_COMPLETED,
  60. /* Abort - canceled by the driver */
  61. ENA_CMD_ABORTED,
  62. };
  63. struct ena_comp_ctx {
  64. struct completion wait_event;
  65. struct ena_admin_acq_entry *user_cqe;
  66. u32 comp_size;
  67. enum ena_cmd_status status;
  68. /* status from the device */
  69. u8 comp_status;
  70. u8 cmd_opcode;
  71. bool occupied;
  72. };
  73. struct ena_com_stats_ctx {
  74. struct ena_admin_aq_get_stats_cmd get_cmd;
  75. struct ena_admin_acq_get_stats_resp get_resp;
  76. };
  77. static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
  78. struct ena_common_mem_addr *ena_addr,
  79. dma_addr_t addr)
  80. {
  81. if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
  82. pr_err("dma address has more bits that the device supports\n");
  83. return -EINVAL;
  84. }
  85. ena_addr->mem_addr_low = (u32)addr;
  86. ena_addr->mem_addr_high = (u64)addr >> 32;
  87. return 0;
  88. }
  89. static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
  90. {
  91. struct ena_com_admin_sq *sq = &queue->sq;
  92. u16 size = ADMIN_SQ_SIZE(queue->q_depth);
  93. sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
  94. GFP_KERNEL);
  95. if (!sq->entries) {
  96. pr_err("memory allocation failed");
  97. return -ENOMEM;
  98. }
  99. sq->head = 0;
  100. sq->tail = 0;
  101. sq->phase = 1;
  102. sq->db_addr = NULL;
  103. return 0;
  104. }
  105. static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
  106. {
  107. struct ena_com_admin_cq *cq = &queue->cq;
  108. u16 size = ADMIN_CQ_SIZE(queue->q_depth);
  109. cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
  110. GFP_KERNEL);
  111. if (!cq->entries) {
  112. pr_err("memory allocation failed");
  113. return -ENOMEM;
  114. }
  115. cq->head = 0;
  116. cq->phase = 1;
  117. return 0;
  118. }
  119. static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
  120. struct ena_aenq_handlers *aenq_handlers)
  121. {
  122. struct ena_com_aenq *aenq = &dev->aenq;
  123. u32 addr_low, addr_high, aenq_caps;
  124. u16 size;
  125. dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
  126. size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
  127. aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr,
  128. GFP_KERNEL);
  129. if (!aenq->entries) {
  130. pr_err("memory allocation failed");
  131. return -ENOMEM;
  132. }
  133. aenq->head = aenq->q_depth;
  134. aenq->phase = 1;
  135. addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
  136. addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
  137. writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
  138. writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
  139. aenq_caps = 0;
  140. aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
  141. aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
  142. << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
  143. ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
  144. writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
  145. if (unlikely(!aenq_handlers)) {
  146. pr_err("aenq handlers pointer is NULL\n");
  147. return -EINVAL;
  148. }
  149. aenq->aenq_handlers = aenq_handlers;
  150. return 0;
  151. }
  152. static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
  153. struct ena_comp_ctx *comp_ctx)
  154. {
  155. comp_ctx->occupied = false;
  156. atomic_dec(&queue->outstanding_cmds);
  157. }
  158. static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
  159. u16 command_id, bool capture)
  160. {
  161. if (unlikely(command_id >= queue->q_depth)) {
  162. pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
  163. command_id, queue->q_depth);
  164. return NULL;
  165. }
  166. if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
  167. pr_err("Completion context is occupied\n");
  168. return NULL;
  169. }
  170. if (capture) {
  171. atomic_inc(&queue->outstanding_cmds);
  172. queue->comp_ctx[command_id].occupied = true;
  173. }
  174. return &queue->comp_ctx[command_id];
  175. }
  176. static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
  177. struct ena_admin_aq_entry *cmd,
  178. size_t cmd_size_in_bytes,
  179. struct ena_admin_acq_entry *comp,
  180. size_t comp_size_in_bytes)
  181. {
  182. struct ena_comp_ctx *comp_ctx;
  183. u16 tail_masked, cmd_id;
  184. u16 queue_size_mask;
  185. u16 cnt;
  186. queue_size_mask = admin_queue->q_depth - 1;
  187. tail_masked = admin_queue->sq.tail & queue_size_mask;
  188. /* In case of queue FULL */
  189. cnt = admin_queue->sq.tail - admin_queue->sq.head;
  190. if (cnt >= admin_queue->q_depth) {
  191. pr_debug("admin queue is FULL (tail %d head %d depth: %d)\n",
  192. admin_queue->sq.tail, admin_queue->sq.head,
  193. admin_queue->q_depth);
  194. admin_queue->stats.out_of_space++;
  195. return ERR_PTR(-ENOSPC);
  196. }
  197. cmd_id = admin_queue->curr_cmd_id;
  198. cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
  199. ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
  200. cmd->aq_common_descriptor.command_id |= cmd_id &
  201. ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
  202. comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
  203. if (unlikely(!comp_ctx))
  204. return ERR_PTR(-EINVAL);
  205. comp_ctx->status = ENA_CMD_SUBMITTED;
  206. comp_ctx->comp_size = (u32)comp_size_in_bytes;
  207. comp_ctx->user_cqe = comp;
  208. comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
  209. reinit_completion(&comp_ctx->wait_event);
  210. memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
  211. admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
  212. queue_size_mask;
  213. admin_queue->sq.tail++;
  214. admin_queue->stats.submitted_cmd++;
  215. if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
  216. admin_queue->sq.phase = !admin_queue->sq.phase;
  217. writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
  218. return comp_ctx;
  219. }
  220. static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
  221. {
  222. size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
  223. struct ena_comp_ctx *comp_ctx;
  224. u16 i;
  225. queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
  226. if (unlikely(!queue->comp_ctx)) {
  227. pr_err("memory allocation failed");
  228. return -ENOMEM;
  229. }
  230. for (i = 0; i < queue->q_depth; i++) {
  231. comp_ctx = get_comp_ctxt(queue, i, false);
  232. if (comp_ctx)
  233. init_completion(&comp_ctx->wait_event);
  234. }
  235. return 0;
  236. }
  237. static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
  238. struct ena_admin_aq_entry *cmd,
  239. size_t cmd_size_in_bytes,
  240. struct ena_admin_acq_entry *comp,
  241. size_t comp_size_in_bytes)
  242. {
  243. unsigned long flags;
  244. struct ena_comp_ctx *comp_ctx;
  245. spin_lock_irqsave(&admin_queue->q_lock, flags);
  246. if (unlikely(!admin_queue->running_state)) {
  247. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  248. return ERR_PTR(-ENODEV);
  249. }
  250. comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
  251. cmd_size_in_bytes,
  252. comp,
  253. comp_size_in_bytes);
  254. if (unlikely(IS_ERR(comp_ctx)))
  255. admin_queue->running_state = false;
  256. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  257. return comp_ctx;
  258. }
  259. static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
  260. struct ena_com_create_io_ctx *ctx,
  261. struct ena_com_io_sq *io_sq)
  262. {
  263. size_t size;
  264. int dev_node = 0;
  265. memset(&io_sq->desc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
  266. io_sq->desc_entry_size =
  267. (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
  268. sizeof(struct ena_eth_io_tx_desc) :
  269. sizeof(struct ena_eth_io_rx_desc);
  270. size = io_sq->desc_entry_size * io_sq->q_depth;
  271. if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
  272. dev_node = dev_to_node(ena_dev->dmadev);
  273. set_dev_node(ena_dev->dmadev, ctx->numa_node);
  274. io_sq->desc_addr.virt_addr =
  275. dma_zalloc_coherent(ena_dev->dmadev, size,
  276. &io_sq->desc_addr.phys_addr,
  277. GFP_KERNEL);
  278. set_dev_node(ena_dev->dmadev, dev_node);
  279. if (!io_sq->desc_addr.virt_addr) {
  280. io_sq->desc_addr.virt_addr =
  281. dma_zalloc_coherent(ena_dev->dmadev, size,
  282. &io_sq->desc_addr.phys_addr,
  283. GFP_KERNEL);
  284. }
  285. } else {
  286. dev_node = dev_to_node(ena_dev->dmadev);
  287. set_dev_node(ena_dev->dmadev, ctx->numa_node);
  288. io_sq->desc_addr.virt_addr =
  289. devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
  290. set_dev_node(ena_dev->dmadev, dev_node);
  291. if (!io_sq->desc_addr.virt_addr) {
  292. io_sq->desc_addr.virt_addr =
  293. devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
  294. }
  295. }
  296. if (!io_sq->desc_addr.virt_addr) {
  297. pr_err("memory allocation failed");
  298. return -ENOMEM;
  299. }
  300. io_sq->tail = 0;
  301. io_sq->next_to_comp = 0;
  302. io_sq->phase = 1;
  303. return 0;
  304. }
  305. static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
  306. struct ena_com_create_io_ctx *ctx,
  307. struct ena_com_io_cq *io_cq)
  308. {
  309. size_t size;
  310. int prev_node = 0;
  311. memset(&io_cq->cdesc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
  312. /* Use the basic completion descriptor for Rx */
  313. io_cq->cdesc_entry_size_in_bytes =
  314. (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
  315. sizeof(struct ena_eth_io_tx_cdesc) :
  316. sizeof(struct ena_eth_io_rx_cdesc_base);
  317. size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
  318. prev_node = dev_to_node(ena_dev->dmadev);
  319. set_dev_node(ena_dev->dmadev, ctx->numa_node);
  320. io_cq->cdesc_addr.virt_addr =
  321. dma_zalloc_coherent(ena_dev->dmadev, size,
  322. &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
  323. set_dev_node(ena_dev->dmadev, prev_node);
  324. if (!io_cq->cdesc_addr.virt_addr) {
  325. io_cq->cdesc_addr.virt_addr =
  326. dma_zalloc_coherent(ena_dev->dmadev, size,
  327. &io_cq->cdesc_addr.phys_addr,
  328. GFP_KERNEL);
  329. }
  330. if (!io_cq->cdesc_addr.virt_addr) {
  331. pr_err("memory allocation failed");
  332. return -ENOMEM;
  333. }
  334. io_cq->phase = 1;
  335. io_cq->head = 0;
  336. return 0;
  337. }
  338. static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
  339. struct ena_admin_acq_entry *cqe)
  340. {
  341. struct ena_comp_ctx *comp_ctx;
  342. u16 cmd_id;
  343. cmd_id = cqe->acq_common_descriptor.command &
  344. ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
  345. comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
  346. if (unlikely(!comp_ctx)) {
  347. pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
  348. admin_queue->running_state = false;
  349. return;
  350. }
  351. comp_ctx->status = ENA_CMD_COMPLETED;
  352. comp_ctx->comp_status = cqe->acq_common_descriptor.status;
  353. if (comp_ctx->user_cqe)
  354. memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
  355. if (!admin_queue->polling)
  356. complete(&comp_ctx->wait_event);
  357. }
  358. static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
  359. {
  360. struct ena_admin_acq_entry *cqe = NULL;
  361. u16 comp_num = 0;
  362. u16 head_masked;
  363. u8 phase;
  364. head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
  365. phase = admin_queue->cq.phase;
  366. cqe = &admin_queue->cq.entries[head_masked];
  367. /* Go over all the completions */
  368. while ((cqe->acq_common_descriptor.flags &
  369. ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
  370. /* Do not read the rest of the completion entry before the
  371. * phase bit was validated
  372. */
  373. rmb();
  374. ena_com_handle_single_admin_completion(admin_queue, cqe);
  375. head_masked++;
  376. comp_num++;
  377. if (unlikely(head_masked == admin_queue->q_depth)) {
  378. head_masked = 0;
  379. phase = !phase;
  380. }
  381. cqe = &admin_queue->cq.entries[head_masked];
  382. }
  383. admin_queue->cq.head += comp_num;
  384. admin_queue->cq.phase = phase;
  385. admin_queue->sq.head += comp_num;
  386. admin_queue->stats.completed_cmd += comp_num;
  387. }
  388. static int ena_com_comp_status_to_errno(u8 comp_status)
  389. {
  390. if (unlikely(comp_status != 0))
  391. pr_err("admin command failed[%u]\n", comp_status);
  392. if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
  393. return -EINVAL;
  394. switch (comp_status) {
  395. case ENA_ADMIN_SUCCESS:
  396. return 0;
  397. case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
  398. return -ENOMEM;
  399. case ENA_ADMIN_UNSUPPORTED_OPCODE:
  400. return -EPERM;
  401. case ENA_ADMIN_BAD_OPCODE:
  402. case ENA_ADMIN_MALFORMED_REQUEST:
  403. case ENA_ADMIN_ILLEGAL_PARAMETER:
  404. case ENA_ADMIN_UNKNOWN_ERROR:
  405. return -EINVAL;
  406. }
  407. return 0;
  408. }
  409. static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
  410. struct ena_com_admin_queue *admin_queue)
  411. {
  412. unsigned long flags;
  413. u32 start_time;
  414. int ret;
  415. start_time = ((u32)jiffies_to_usecs(jiffies));
  416. while (comp_ctx->status == ENA_CMD_SUBMITTED) {
  417. if ((((u32)jiffies_to_usecs(jiffies)) - start_time) >
  418. ADMIN_CMD_TIMEOUT_US) {
  419. pr_err("Wait for completion (polling) timeout\n");
  420. /* ENA didn't have any completion */
  421. spin_lock_irqsave(&admin_queue->q_lock, flags);
  422. admin_queue->stats.no_completion++;
  423. admin_queue->running_state = false;
  424. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  425. ret = -ETIME;
  426. goto err;
  427. }
  428. spin_lock_irqsave(&admin_queue->q_lock, flags);
  429. ena_com_handle_admin_completion(admin_queue);
  430. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  431. msleep(100);
  432. }
  433. if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
  434. pr_err("Command was aborted\n");
  435. spin_lock_irqsave(&admin_queue->q_lock, flags);
  436. admin_queue->stats.aborted_cmd++;
  437. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  438. ret = -ENODEV;
  439. goto err;
  440. }
  441. WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
  442. comp_ctx->status);
  443. ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
  444. err:
  445. comp_ctxt_release(admin_queue, comp_ctx);
  446. return ret;
  447. }
  448. static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
  449. struct ena_com_admin_queue *admin_queue)
  450. {
  451. unsigned long flags;
  452. int ret;
  453. wait_for_completion_timeout(&comp_ctx->wait_event,
  454. usecs_to_jiffies(ADMIN_CMD_TIMEOUT_US));
  455. /* In case the command wasn't completed find out the root cause.
  456. * There might be 2 kinds of errors
  457. * 1) No completion (timeout reached)
  458. * 2) There is completion but the device didn't get any msi-x interrupt.
  459. */
  460. if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
  461. spin_lock_irqsave(&admin_queue->q_lock, flags);
  462. ena_com_handle_admin_completion(admin_queue);
  463. admin_queue->stats.no_completion++;
  464. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  465. if (comp_ctx->status == ENA_CMD_COMPLETED)
  466. pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
  467. comp_ctx->cmd_opcode);
  468. else
  469. pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
  470. comp_ctx->cmd_opcode, comp_ctx->status);
  471. admin_queue->running_state = false;
  472. ret = -ETIME;
  473. goto err;
  474. }
  475. ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
  476. err:
  477. comp_ctxt_release(admin_queue, comp_ctx);
  478. return ret;
  479. }
  480. /* This method read the hardware device register through posting writes
  481. * and waiting for response
  482. * On timeout the function will return ENA_MMIO_READ_TIMEOUT
  483. */
  484. static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
  485. {
  486. struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
  487. volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
  488. mmio_read->read_resp;
  489. u32 mmio_read_reg, ret;
  490. unsigned long flags;
  491. int i;
  492. might_sleep();
  493. /* If readless is disabled, perform regular read */
  494. if (!mmio_read->readless_supported)
  495. return readl(ena_dev->reg_bar + offset);
  496. spin_lock_irqsave(&mmio_read->lock, flags);
  497. mmio_read->seq_num++;
  498. read_resp->req_id = mmio_read->seq_num + 0xDEAD;
  499. mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
  500. ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
  501. mmio_read_reg |= mmio_read->seq_num &
  502. ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
  503. /* make sure read_resp->req_id get updated before the hw can write
  504. * there
  505. */
  506. wmb();
  507. writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
  508. for (i = 0; i < ENA_REG_READ_TIMEOUT; i++) {
  509. if (read_resp->req_id == mmio_read->seq_num)
  510. break;
  511. udelay(1);
  512. }
  513. if (unlikely(i == ENA_REG_READ_TIMEOUT)) {
  514. pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
  515. mmio_read->seq_num, offset, read_resp->req_id,
  516. read_resp->reg_off);
  517. ret = ENA_MMIO_READ_TIMEOUT;
  518. goto err;
  519. }
  520. if (read_resp->reg_off != offset) {
  521. pr_err("Read failure: wrong offset provided");
  522. ret = ENA_MMIO_READ_TIMEOUT;
  523. } else {
  524. ret = read_resp->reg_val;
  525. }
  526. err:
  527. spin_unlock_irqrestore(&mmio_read->lock, flags);
  528. return ret;
  529. }
  530. /* There are two types to wait for completion.
  531. * Polling mode - wait until the completion is available.
  532. * Async mode - wait on wait queue until the completion is ready
  533. * (or the timeout expired).
  534. * It is expected that the IRQ called ena_com_handle_admin_completion
  535. * to mark the completions.
  536. */
  537. static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
  538. struct ena_com_admin_queue *admin_queue)
  539. {
  540. if (admin_queue->polling)
  541. return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
  542. admin_queue);
  543. return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
  544. admin_queue);
  545. }
  546. static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
  547. struct ena_com_io_sq *io_sq)
  548. {
  549. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  550. struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
  551. struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
  552. u8 direction;
  553. int ret;
  554. memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
  555. if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
  556. direction = ENA_ADMIN_SQ_DIRECTION_TX;
  557. else
  558. direction = ENA_ADMIN_SQ_DIRECTION_RX;
  559. destroy_cmd.sq.sq_identity |= (direction <<
  560. ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
  561. ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
  562. destroy_cmd.sq.sq_idx = io_sq->idx;
  563. destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
  564. ret = ena_com_execute_admin_command(admin_queue,
  565. (struct ena_admin_aq_entry *)&destroy_cmd,
  566. sizeof(destroy_cmd),
  567. (struct ena_admin_acq_entry *)&destroy_resp,
  568. sizeof(destroy_resp));
  569. if (unlikely(ret && (ret != -ENODEV)))
  570. pr_err("failed to destroy io sq error: %d\n", ret);
  571. return ret;
  572. }
  573. static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
  574. struct ena_com_io_sq *io_sq,
  575. struct ena_com_io_cq *io_cq)
  576. {
  577. size_t size;
  578. if (io_cq->cdesc_addr.virt_addr) {
  579. size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
  580. dma_free_coherent(ena_dev->dmadev, size,
  581. io_cq->cdesc_addr.virt_addr,
  582. io_cq->cdesc_addr.phys_addr);
  583. io_cq->cdesc_addr.virt_addr = NULL;
  584. }
  585. if (io_sq->desc_addr.virt_addr) {
  586. size = io_sq->desc_entry_size * io_sq->q_depth;
  587. if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
  588. dma_free_coherent(ena_dev->dmadev, size,
  589. io_sq->desc_addr.virt_addr,
  590. io_sq->desc_addr.phys_addr);
  591. else
  592. devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
  593. io_sq->desc_addr.virt_addr = NULL;
  594. }
  595. }
  596. static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
  597. u16 exp_state)
  598. {
  599. u32 val, i;
  600. for (i = 0; i < timeout; i++) {
  601. val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
  602. if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
  603. pr_err("Reg read timeout occurred\n");
  604. return -ETIME;
  605. }
  606. if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
  607. exp_state)
  608. return 0;
  609. /* The resolution of the timeout is 100ms */
  610. msleep(100);
  611. }
  612. return -ETIME;
  613. }
  614. static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
  615. enum ena_admin_aq_feature_id feature_id)
  616. {
  617. u32 feature_mask = 1 << feature_id;
  618. /* Device attributes is always supported */
  619. if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
  620. !(ena_dev->supported_features & feature_mask))
  621. return false;
  622. return true;
  623. }
  624. static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
  625. struct ena_admin_get_feat_resp *get_resp,
  626. enum ena_admin_aq_feature_id feature_id,
  627. dma_addr_t control_buf_dma_addr,
  628. u32 control_buff_size)
  629. {
  630. struct ena_com_admin_queue *admin_queue;
  631. struct ena_admin_get_feat_cmd get_cmd;
  632. int ret;
  633. if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
  634. pr_info("Feature %d isn't supported\n", feature_id);
  635. return -EPERM;
  636. }
  637. memset(&get_cmd, 0x0, sizeof(get_cmd));
  638. admin_queue = &ena_dev->admin_queue;
  639. get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
  640. if (control_buff_size)
  641. get_cmd.aq_common_descriptor.flags =
  642. ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
  643. else
  644. get_cmd.aq_common_descriptor.flags = 0;
  645. ret = ena_com_mem_addr_set(ena_dev,
  646. &get_cmd.control_buffer.address,
  647. control_buf_dma_addr);
  648. if (unlikely(ret)) {
  649. pr_err("memory address set failed\n");
  650. return ret;
  651. }
  652. get_cmd.control_buffer.length = control_buff_size;
  653. get_cmd.feat_common.feature_id = feature_id;
  654. ret = ena_com_execute_admin_command(admin_queue,
  655. (struct ena_admin_aq_entry *)
  656. &get_cmd,
  657. sizeof(get_cmd),
  658. (struct ena_admin_acq_entry *)
  659. get_resp,
  660. sizeof(*get_resp));
  661. if (unlikely(ret))
  662. pr_err("Failed to submit get_feature command %d error: %d\n",
  663. feature_id, ret);
  664. return ret;
  665. }
  666. static int ena_com_get_feature(struct ena_com_dev *ena_dev,
  667. struct ena_admin_get_feat_resp *get_resp,
  668. enum ena_admin_aq_feature_id feature_id)
  669. {
  670. return ena_com_get_feature_ex(ena_dev,
  671. get_resp,
  672. feature_id,
  673. 0,
  674. 0);
  675. }
  676. static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
  677. {
  678. struct ena_rss *rss = &ena_dev->rss;
  679. rss->hash_key =
  680. dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
  681. &rss->hash_key_dma_addr, GFP_KERNEL);
  682. if (unlikely(!rss->hash_key))
  683. return -ENOMEM;
  684. return 0;
  685. }
  686. static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
  687. {
  688. struct ena_rss *rss = &ena_dev->rss;
  689. if (rss->hash_key)
  690. dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
  691. rss->hash_key, rss->hash_key_dma_addr);
  692. rss->hash_key = NULL;
  693. }
  694. static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
  695. {
  696. struct ena_rss *rss = &ena_dev->rss;
  697. rss->hash_ctrl =
  698. dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
  699. &rss->hash_ctrl_dma_addr, GFP_KERNEL);
  700. if (unlikely(!rss->hash_ctrl))
  701. return -ENOMEM;
  702. return 0;
  703. }
  704. static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
  705. {
  706. struct ena_rss *rss = &ena_dev->rss;
  707. if (rss->hash_ctrl)
  708. dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
  709. rss->hash_ctrl, rss->hash_ctrl_dma_addr);
  710. rss->hash_ctrl = NULL;
  711. }
  712. static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
  713. u16 log_size)
  714. {
  715. struct ena_rss *rss = &ena_dev->rss;
  716. struct ena_admin_get_feat_resp get_resp;
  717. size_t tbl_size;
  718. int ret;
  719. ret = ena_com_get_feature(ena_dev, &get_resp,
  720. ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
  721. if (unlikely(ret))
  722. return ret;
  723. if ((get_resp.u.ind_table.min_size > log_size) ||
  724. (get_resp.u.ind_table.max_size < log_size)) {
  725. pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
  726. 1 << log_size, 1 << get_resp.u.ind_table.min_size,
  727. 1 << get_resp.u.ind_table.max_size);
  728. return -EINVAL;
  729. }
  730. tbl_size = (1ULL << log_size) *
  731. sizeof(struct ena_admin_rss_ind_table_entry);
  732. rss->rss_ind_tbl =
  733. dma_zalloc_coherent(ena_dev->dmadev, tbl_size,
  734. &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
  735. if (unlikely(!rss->rss_ind_tbl))
  736. goto mem_err1;
  737. tbl_size = (1ULL << log_size) * sizeof(u16);
  738. rss->host_rss_ind_tbl =
  739. devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
  740. if (unlikely(!rss->host_rss_ind_tbl))
  741. goto mem_err2;
  742. rss->tbl_log_size = log_size;
  743. return 0;
  744. mem_err2:
  745. tbl_size = (1ULL << log_size) *
  746. sizeof(struct ena_admin_rss_ind_table_entry);
  747. dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
  748. rss->rss_ind_tbl_dma_addr);
  749. rss->rss_ind_tbl = NULL;
  750. mem_err1:
  751. rss->tbl_log_size = 0;
  752. return -ENOMEM;
  753. }
  754. static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
  755. {
  756. struct ena_rss *rss = &ena_dev->rss;
  757. size_t tbl_size = (1ULL << rss->tbl_log_size) *
  758. sizeof(struct ena_admin_rss_ind_table_entry);
  759. if (rss->rss_ind_tbl)
  760. dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
  761. rss->rss_ind_tbl_dma_addr);
  762. rss->rss_ind_tbl = NULL;
  763. if (rss->host_rss_ind_tbl)
  764. devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
  765. rss->host_rss_ind_tbl = NULL;
  766. }
  767. static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
  768. struct ena_com_io_sq *io_sq, u16 cq_idx)
  769. {
  770. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  771. struct ena_admin_aq_create_sq_cmd create_cmd;
  772. struct ena_admin_acq_create_sq_resp_desc cmd_completion;
  773. u8 direction;
  774. int ret;
  775. memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_sq_cmd));
  776. create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
  777. if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
  778. direction = ENA_ADMIN_SQ_DIRECTION_TX;
  779. else
  780. direction = ENA_ADMIN_SQ_DIRECTION_RX;
  781. create_cmd.sq_identity |= (direction <<
  782. ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
  783. ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
  784. create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
  785. ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
  786. create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
  787. ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
  788. ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
  789. create_cmd.sq_caps_3 |=
  790. ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
  791. create_cmd.cq_idx = cq_idx;
  792. create_cmd.sq_depth = io_sq->q_depth;
  793. if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
  794. ret = ena_com_mem_addr_set(ena_dev,
  795. &create_cmd.sq_ba,
  796. io_sq->desc_addr.phys_addr);
  797. if (unlikely(ret)) {
  798. pr_err("memory address set failed\n");
  799. return ret;
  800. }
  801. }
  802. ret = ena_com_execute_admin_command(admin_queue,
  803. (struct ena_admin_aq_entry *)&create_cmd,
  804. sizeof(create_cmd),
  805. (struct ena_admin_acq_entry *)&cmd_completion,
  806. sizeof(cmd_completion));
  807. if (unlikely(ret)) {
  808. pr_err("Failed to create IO SQ. error: %d\n", ret);
  809. return ret;
  810. }
  811. io_sq->idx = cmd_completion.sq_idx;
  812. io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
  813. (uintptr_t)cmd_completion.sq_doorbell_offset);
  814. if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
  815. io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
  816. + cmd_completion.llq_headers_offset);
  817. io_sq->desc_addr.pbuf_dev_addr =
  818. (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
  819. cmd_completion.llq_descriptors_offset);
  820. }
  821. pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
  822. return ret;
  823. }
  824. static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
  825. {
  826. struct ena_rss *rss = &ena_dev->rss;
  827. struct ena_com_io_sq *io_sq;
  828. u16 qid;
  829. int i;
  830. for (i = 0; i < 1 << rss->tbl_log_size; i++) {
  831. qid = rss->host_rss_ind_tbl[i];
  832. if (qid >= ENA_TOTAL_NUM_QUEUES)
  833. return -EINVAL;
  834. io_sq = &ena_dev->io_sq_queues[qid];
  835. if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
  836. return -EINVAL;
  837. rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
  838. }
  839. return 0;
  840. }
  841. static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
  842. {
  843. u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
  844. struct ena_rss *rss = &ena_dev->rss;
  845. u8 idx;
  846. u16 i;
  847. for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
  848. dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
  849. for (i = 0; i < 1 << rss->tbl_log_size; i++) {
  850. if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
  851. return -EINVAL;
  852. idx = (u8)rss->rss_ind_tbl[i].cq_idx;
  853. if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
  854. return -EINVAL;
  855. rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
  856. }
  857. return 0;
  858. }
  859. static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
  860. {
  861. size_t size;
  862. size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
  863. ena_dev->intr_moder_tbl =
  864. devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
  865. if (!ena_dev->intr_moder_tbl)
  866. return -ENOMEM;
  867. ena_com_config_default_interrupt_moderation_table(ena_dev);
  868. return 0;
  869. }
  870. static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
  871. u16 intr_delay_resolution)
  872. {
  873. struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
  874. unsigned int i;
  875. if (!intr_delay_resolution) {
  876. pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
  877. intr_delay_resolution = 1;
  878. }
  879. ena_dev->intr_delay_resolution = intr_delay_resolution;
  880. /* update Rx */
  881. for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
  882. intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
  883. /* update Tx */
  884. ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
  885. }
  886. /*****************************************************************************/
  887. /******************************* API ******************************/
  888. /*****************************************************************************/
  889. int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
  890. struct ena_admin_aq_entry *cmd,
  891. size_t cmd_size,
  892. struct ena_admin_acq_entry *comp,
  893. size_t comp_size)
  894. {
  895. struct ena_comp_ctx *comp_ctx;
  896. int ret;
  897. comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
  898. comp, comp_size);
  899. if (unlikely(IS_ERR(comp_ctx))) {
  900. pr_err("Failed to submit command [%ld]\n", PTR_ERR(comp_ctx));
  901. return PTR_ERR(comp_ctx);
  902. }
  903. ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
  904. if (unlikely(ret)) {
  905. if (admin_queue->running_state)
  906. pr_err("Failed to process command. ret = %d\n", ret);
  907. else
  908. pr_debug("Failed to process command. ret = %d\n", ret);
  909. }
  910. return ret;
  911. }
  912. int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
  913. struct ena_com_io_cq *io_cq)
  914. {
  915. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  916. struct ena_admin_aq_create_cq_cmd create_cmd;
  917. struct ena_admin_acq_create_cq_resp_desc cmd_completion;
  918. int ret;
  919. memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_cq_cmd));
  920. create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
  921. create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
  922. ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
  923. create_cmd.cq_caps_1 |=
  924. ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
  925. create_cmd.msix_vector = io_cq->msix_vector;
  926. create_cmd.cq_depth = io_cq->q_depth;
  927. ret = ena_com_mem_addr_set(ena_dev,
  928. &create_cmd.cq_ba,
  929. io_cq->cdesc_addr.phys_addr);
  930. if (unlikely(ret)) {
  931. pr_err("memory address set failed\n");
  932. return ret;
  933. }
  934. ret = ena_com_execute_admin_command(admin_queue,
  935. (struct ena_admin_aq_entry *)&create_cmd,
  936. sizeof(create_cmd),
  937. (struct ena_admin_acq_entry *)&cmd_completion,
  938. sizeof(cmd_completion));
  939. if (unlikely(ret)) {
  940. pr_err("Failed to create IO CQ. error: %d\n", ret);
  941. return ret;
  942. }
  943. io_cq->idx = cmd_completion.cq_idx;
  944. io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
  945. cmd_completion.cq_interrupt_unmask_register_offset);
  946. if (cmd_completion.cq_head_db_register_offset)
  947. io_cq->cq_head_db_reg =
  948. (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
  949. cmd_completion.cq_head_db_register_offset);
  950. if (cmd_completion.numa_node_register_offset)
  951. io_cq->numa_node_cfg_reg =
  952. (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
  953. cmd_completion.numa_node_register_offset);
  954. pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
  955. return ret;
  956. }
  957. int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
  958. struct ena_com_io_sq **io_sq,
  959. struct ena_com_io_cq **io_cq)
  960. {
  961. if (qid >= ENA_TOTAL_NUM_QUEUES) {
  962. pr_err("Invalid queue number %d but the max is %d\n", qid,
  963. ENA_TOTAL_NUM_QUEUES);
  964. return -EINVAL;
  965. }
  966. *io_sq = &ena_dev->io_sq_queues[qid];
  967. *io_cq = &ena_dev->io_cq_queues[qid];
  968. return 0;
  969. }
  970. void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
  971. {
  972. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  973. struct ena_comp_ctx *comp_ctx;
  974. u16 i;
  975. if (!admin_queue->comp_ctx)
  976. return;
  977. for (i = 0; i < admin_queue->q_depth; i++) {
  978. comp_ctx = get_comp_ctxt(admin_queue, i, false);
  979. if (unlikely(!comp_ctx))
  980. break;
  981. comp_ctx->status = ENA_CMD_ABORTED;
  982. complete(&comp_ctx->wait_event);
  983. }
  984. }
  985. void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
  986. {
  987. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  988. unsigned long flags;
  989. spin_lock_irqsave(&admin_queue->q_lock, flags);
  990. while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
  991. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  992. msleep(20);
  993. spin_lock_irqsave(&admin_queue->q_lock, flags);
  994. }
  995. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  996. }
  997. int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
  998. struct ena_com_io_cq *io_cq)
  999. {
  1000. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1001. struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
  1002. struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
  1003. int ret;
  1004. memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
  1005. destroy_cmd.cq_idx = io_cq->idx;
  1006. destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
  1007. ret = ena_com_execute_admin_command(admin_queue,
  1008. (struct ena_admin_aq_entry *)&destroy_cmd,
  1009. sizeof(destroy_cmd),
  1010. (struct ena_admin_acq_entry *)&destroy_resp,
  1011. sizeof(destroy_resp));
  1012. if (unlikely(ret && (ret != -ENODEV)))
  1013. pr_err("Failed to destroy IO CQ. error: %d\n", ret);
  1014. return ret;
  1015. }
  1016. bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
  1017. {
  1018. return ena_dev->admin_queue.running_state;
  1019. }
  1020. void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
  1021. {
  1022. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1023. unsigned long flags;
  1024. spin_lock_irqsave(&admin_queue->q_lock, flags);
  1025. ena_dev->admin_queue.running_state = state;
  1026. spin_unlock_irqrestore(&admin_queue->q_lock, flags);
  1027. }
  1028. void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
  1029. {
  1030. u16 depth = ena_dev->aenq.q_depth;
  1031. WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
  1032. /* Init head_db to mark that all entries in the queue
  1033. * are initially available
  1034. */
  1035. writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
  1036. }
  1037. int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
  1038. {
  1039. struct ena_com_admin_queue *admin_queue;
  1040. struct ena_admin_set_feat_cmd cmd;
  1041. struct ena_admin_set_feat_resp resp;
  1042. struct ena_admin_get_feat_resp get_resp;
  1043. int ret;
  1044. ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
  1045. if (ret) {
  1046. pr_info("Can't get aenq configuration\n");
  1047. return ret;
  1048. }
  1049. if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
  1050. pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
  1051. get_resp.u.aenq.supported_groups, groups_flag);
  1052. return -EPERM;
  1053. }
  1054. memset(&cmd, 0x0, sizeof(cmd));
  1055. admin_queue = &ena_dev->admin_queue;
  1056. cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
  1057. cmd.aq_common_descriptor.flags = 0;
  1058. cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
  1059. cmd.u.aenq.enabled_groups = groups_flag;
  1060. ret = ena_com_execute_admin_command(admin_queue,
  1061. (struct ena_admin_aq_entry *)&cmd,
  1062. sizeof(cmd),
  1063. (struct ena_admin_acq_entry *)&resp,
  1064. sizeof(resp));
  1065. if (unlikely(ret))
  1066. pr_err("Failed to config AENQ ret: %d\n", ret);
  1067. return ret;
  1068. }
  1069. int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
  1070. {
  1071. u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
  1072. int width;
  1073. if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
  1074. pr_err("Reg read timeout occurred\n");
  1075. return -ETIME;
  1076. }
  1077. width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
  1078. ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
  1079. pr_debug("ENA dma width: %d\n", width);
  1080. if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
  1081. pr_err("DMA width illegal value: %d\n", width);
  1082. return -EINVAL;
  1083. }
  1084. ena_dev->dma_addr_bits = width;
  1085. return width;
  1086. }
  1087. int ena_com_validate_version(struct ena_com_dev *ena_dev)
  1088. {
  1089. u32 ver;
  1090. u32 ctrl_ver;
  1091. u32 ctrl_ver_masked;
  1092. /* Make sure the ENA version and the controller version are at least
  1093. * as the driver expects
  1094. */
  1095. ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
  1096. ctrl_ver = ena_com_reg_bar_read32(ena_dev,
  1097. ENA_REGS_CONTROLLER_VERSION_OFF);
  1098. if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
  1099. (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
  1100. pr_err("Reg read timeout occurred\n");
  1101. return -ETIME;
  1102. }
  1103. pr_info("ena device version: %d.%d\n",
  1104. (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
  1105. ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
  1106. ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
  1107. if (ver < MIN_ENA_VER) {
  1108. pr_err("ENA version is lower than the minimal version the driver supports\n");
  1109. return -1;
  1110. }
  1111. pr_info("ena controller version: %d.%d.%d implementation version %d\n",
  1112. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
  1113. ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
  1114. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
  1115. ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
  1116. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
  1117. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
  1118. ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
  1119. ctrl_ver_masked =
  1120. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
  1121. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
  1122. (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
  1123. /* Validate the ctrl version without the implementation ID */
  1124. if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
  1125. pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
  1126. return -1;
  1127. }
  1128. return 0;
  1129. }
  1130. void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
  1131. {
  1132. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1133. struct ena_com_admin_cq *cq = &admin_queue->cq;
  1134. struct ena_com_admin_sq *sq = &admin_queue->sq;
  1135. struct ena_com_aenq *aenq = &ena_dev->aenq;
  1136. u16 size;
  1137. if (admin_queue->comp_ctx)
  1138. devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
  1139. admin_queue->comp_ctx = NULL;
  1140. size = ADMIN_SQ_SIZE(admin_queue->q_depth);
  1141. if (sq->entries)
  1142. dma_free_coherent(ena_dev->dmadev, size, sq->entries,
  1143. sq->dma_addr);
  1144. sq->entries = NULL;
  1145. size = ADMIN_CQ_SIZE(admin_queue->q_depth);
  1146. if (cq->entries)
  1147. dma_free_coherent(ena_dev->dmadev, size, cq->entries,
  1148. cq->dma_addr);
  1149. cq->entries = NULL;
  1150. size = ADMIN_AENQ_SIZE(aenq->q_depth);
  1151. if (ena_dev->aenq.entries)
  1152. dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
  1153. aenq->dma_addr);
  1154. aenq->entries = NULL;
  1155. }
  1156. void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
  1157. {
  1158. ena_dev->admin_queue.polling = polling;
  1159. }
  1160. int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
  1161. {
  1162. struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
  1163. spin_lock_init(&mmio_read->lock);
  1164. mmio_read->read_resp =
  1165. dma_zalloc_coherent(ena_dev->dmadev,
  1166. sizeof(*mmio_read->read_resp),
  1167. &mmio_read->read_resp_dma_addr, GFP_KERNEL);
  1168. if (unlikely(!mmio_read->read_resp))
  1169. return -ENOMEM;
  1170. ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
  1171. mmio_read->read_resp->req_id = 0x0;
  1172. mmio_read->seq_num = 0x0;
  1173. mmio_read->readless_supported = true;
  1174. return 0;
  1175. }
  1176. void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
  1177. {
  1178. struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
  1179. mmio_read->readless_supported = readless_supported;
  1180. }
  1181. void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
  1182. {
  1183. struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
  1184. writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
  1185. writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
  1186. dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
  1187. mmio_read->read_resp, mmio_read->read_resp_dma_addr);
  1188. mmio_read->read_resp = NULL;
  1189. }
  1190. void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
  1191. {
  1192. struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
  1193. u32 addr_low, addr_high;
  1194. addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
  1195. addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
  1196. writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
  1197. writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
  1198. }
  1199. int ena_com_admin_init(struct ena_com_dev *ena_dev,
  1200. struct ena_aenq_handlers *aenq_handlers,
  1201. bool init_spinlock)
  1202. {
  1203. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1204. u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
  1205. int ret;
  1206. dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
  1207. if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
  1208. pr_err("Reg read timeout occurred\n");
  1209. return -ETIME;
  1210. }
  1211. if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
  1212. pr_err("Device isn't ready, abort com init\n");
  1213. return -ENODEV;
  1214. }
  1215. admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
  1216. admin_queue->q_dmadev = ena_dev->dmadev;
  1217. admin_queue->polling = false;
  1218. admin_queue->curr_cmd_id = 0;
  1219. atomic_set(&admin_queue->outstanding_cmds, 0);
  1220. if (init_spinlock)
  1221. spin_lock_init(&admin_queue->q_lock);
  1222. ret = ena_com_init_comp_ctxt(admin_queue);
  1223. if (ret)
  1224. goto error;
  1225. ret = ena_com_admin_init_sq(admin_queue);
  1226. if (ret)
  1227. goto error;
  1228. ret = ena_com_admin_init_cq(admin_queue);
  1229. if (ret)
  1230. goto error;
  1231. admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
  1232. ENA_REGS_AQ_DB_OFF);
  1233. addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
  1234. addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
  1235. writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
  1236. writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
  1237. addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
  1238. addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
  1239. writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
  1240. writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
  1241. aq_caps = 0;
  1242. aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
  1243. aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
  1244. ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
  1245. ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
  1246. acq_caps = 0;
  1247. acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
  1248. acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
  1249. ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
  1250. ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
  1251. writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
  1252. writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
  1253. ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
  1254. if (ret)
  1255. goto error;
  1256. admin_queue->running_state = true;
  1257. return 0;
  1258. error:
  1259. ena_com_admin_destroy(ena_dev);
  1260. return ret;
  1261. }
  1262. int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
  1263. struct ena_com_create_io_ctx *ctx)
  1264. {
  1265. struct ena_com_io_sq *io_sq;
  1266. struct ena_com_io_cq *io_cq;
  1267. int ret;
  1268. if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
  1269. pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
  1270. ctx->qid, ENA_TOTAL_NUM_QUEUES);
  1271. return -EINVAL;
  1272. }
  1273. io_sq = &ena_dev->io_sq_queues[ctx->qid];
  1274. io_cq = &ena_dev->io_cq_queues[ctx->qid];
  1275. memset(io_sq, 0x0, sizeof(struct ena_com_io_sq));
  1276. memset(io_cq, 0x0, sizeof(struct ena_com_io_cq));
  1277. /* Init CQ */
  1278. io_cq->q_depth = ctx->queue_size;
  1279. io_cq->direction = ctx->direction;
  1280. io_cq->qid = ctx->qid;
  1281. io_cq->msix_vector = ctx->msix_vector;
  1282. io_sq->q_depth = ctx->queue_size;
  1283. io_sq->direction = ctx->direction;
  1284. io_sq->qid = ctx->qid;
  1285. io_sq->mem_queue_type = ctx->mem_queue_type;
  1286. if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
  1287. /* header length is limited to 8 bits */
  1288. io_sq->tx_max_header_size =
  1289. min_t(u32, ena_dev->tx_max_header_size, SZ_256);
  1290. ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
  1291. if (ret)
  1292. goto error;
  1293. ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
  1294. if (ret)
  1295. goto error;
  1296. ret = ena_com_create_io_cq(ena_dev, io_cq);
  1297. if (ret)
  1298. goto error;
  1299. ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
  1300. if (ret)
  1301. goto destroy_io_cq;
  1302. return 0;
  1303. destroy_io_cq:
  1304. ena_com_destroy_io_cq(ena_dev, io_cq);
  1305. error:
  1306. ena_com_io_queue_free(ena_dev, io_sq, io_cq);
  1307. return ret;
  1308. }
  1309. void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
  1310. {
  1311. struct ena_com_io_sq *io_sq;
  1312. struct ena_com_io_cq *io_cq;
  1313. if (qid >= ENA_TOTAL_NUM_QUEUES) {
  1314. pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
  1315. ENA_TOTAL_NUM_QUEUES);
  1316. return;
  1317. }
  1318. io_sq = &ena_dev->io_sq_queues[qid];
  1319. io_cq = &ena_dev->io_cq_queues[qid];
  1320. ena_com_destroy_io_sq(ena_dev, io_sq);
  1321. ena_com_destroy_io_cq(ena_dev, io_cq);
  1322. ena_com_io_queue_free(ena_dev, io_sq, io_cq);
  1323. }
  1324. int ena_com_get_link_params(struct ena_com_dev *ena_dev,
  1325. struct ena_admin_get_feat_resp *resp)
  1326. {
  1327. return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
  1328. }
  1329. int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
  1330. struct ena_com_dev_get_features_ctx *get_feat_ctx)
  1331. {
  1332. struct ena_admin_get_feat_resp get_resp;
  1333. int rc;
  1334. rc = ena_com_get_feature(ena_dev, &get_resp,
  1335. ENA_ADMIN_DEVICE_ATTRIBUTES);
  1336. if (rc)
  1337. return rc;
  1338. memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
  1339. sizeof(get_resp.u.dev_attr));
  1340. ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
  1341. rc = ena_com_get_feature(ena_dev, &get_resp,
  1342. ENA_ADMIN_MAX_QUEUES_NUM);
  1343. if (rc)
  1344. return rc;
  1345. memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
  1346. sizeof(get_resp.u.max_queue));
  1347. ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
  1348. rc = ena_com_get_feature(ena_dev, &get_resp,
  1349. ENA_ADMIN_AENQ_CONFIG);
  1350. if (rc)
  1351. return rc;
  1352. memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
  1353. sizeof(get_resp.u.aenq));
  1354. rc = ena_com_get_feature(ena_dev, &get_resp,
  1355. ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
  1356. if (rc)
  1357. return rc;
  1358. memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
  1359. sizeof(get_resp.u.offload));
  1360. return 0;
  1361. }
  1362. void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
  1363. {
  1364. ena_com_handle_admin_completion(&ena_dev->admin_queue);
  1365. }
  1366. /* ena_handle_specific_aenq_event:
  1367. * return the handler that is relevant to the specific event group
  1368. */
  1369. static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
  1370. u16 group)
  1371. {
  1372. struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
  1373. if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
  1374. return aenq_handlers->handlers[group];
  1375. return aenq_handlers->unimplemented_handler;
  1376. }
  1377. /* ena_aenq_intr_handler:
  1378. * handles the aenq incoming events.
  1379. * pop events from the queue and apply the specific handler
  1380. */
  1381. void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
  1382. {
  1383. struct ena_admin_aenq_entry *aenq_e;
  1384. struct ena_admin_aenq_common_desc *aenq_common;
  1385. struct ena_com_aenq *aenq = &dev->aenq;
  1386. ena_aenq_handler handler_cb;
  1387. u16 masked_head, processed = 0;
  1388. u8 phase;
  1389. masked_head = aenq->head & (aenq->q_depth - 1);
  1390. phase = aenq->phase;
  1391. aenq_e = &aenq->entries[masked_head]; /* Get first entry */
  1392. aenq_common = &aenq_e->aenq_common_desc;
  1393. /* Go over all the events */
  1394. while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
  1395. phase) {
  1396. pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
  1397. aenq_common->group, aenq_common->syndrom,
  1398. (u64)aenq_common->timestamp_low +
  1399. ((u64)aenq_common->timestamp_high << 32));
  1400. /* Handle specific event*/
  1401. handler_cb = ena_com_get_specific_aenq_cb(dev,
  1402. aenq_common->group);
  1403. handler_cb(data, aenq_e); /* call the actual event handler*/
  1404. /* Get next event entry */
  1405. masked_head++;
  1406. processed++;
  1407. if (unlikely(masked_head == aenq->q_depth)) {
  1408. masked_head = 0;
  1409. phase = !phase;
  1410. }
  1411. aenq_e = &aenq->entries[masked_head];
  1412. aenq_common = &aenq_e->aenq_common_desc;
  1413. }
  1414. aenq->head += processed;
  1415. aenq->phase = phase;
  1416. /* Don't update aenq doorbell if there weren't any processed events */
  1417. if (!processed)
  1418. return;
  1419. /* write the aenq doorbell after all AENQ descriptors were read */
  1420. mb();
  1421. writel((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
  1422. }
  1423. int ena_com_dev_reset(struct ena_com_dev *ena_dev)
  1424. {
  1425. u32 stat, timeout, cap, reset_val;
  1426. int rc;
  1427. stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
  1428. cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
  1429. if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
  1430. (cap == ENA_MMIO_READ_TIMEOUT))) {
  1431. pr_err("Reg read32 timeout occurred\n");
  1432. return -ETIME;
  1433. }
  1434. if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
  1435. pr_err("Device isn't ready, can't reset device\n");
  1436. return -EINVAL;
  1437. }
  1438. timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
  1439. ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
  1440. if (timeout == 0) {
  1441. pr_err("Invalid timeout value\n");
  1442. return -EINVAL;
  1443. }
  1444. /* start reset */
  1445. reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
  1446. writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
  1447. /* Write again the MMIO read request address */
  1448. ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
  1449. rc = wait_for_reset_state(ena_dev, timeout,
  1450. ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
  1451. if (rc != 0) {
  1452. pr_err("Reset indication didn't turn on\n");
  1453. return rc;
  1454. }
  1455. /* reset done */
  1456. writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
  1457. rc = wait_for_reset_state(ena_dev, timeout, 0);
  1458. if (rc != 0) {
  1459. pr_err("Reset indication didn't turn off\n");
  1460. return rc;
  1461. }
  1462. return 0;
  1463. }
  1464. static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
  1465. struct ena_com_stats_ctx *ctx,
  1466. enum ena_admin_get_stats_type type)
  1467. {
  1468. struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
  1469. struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
  1470. struct ena_com_admin_queue *admin_queue;
  1471. int ret;
  1472. admin_queue = &ena_dev->admin_queue;
  1473. get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
  1474. get_cmd->aq_common_descriptor.flags = 0;
  1475. get_cmd->type = type;
  1476. ret = ena_com_execute_admin_command(admin_queue,
  1477. (struct ena_admin_aq_entry *)get_cmd,
  1478. sizeof(*get_cmd),
  1479. (struct ena_admin_acq_entry *)get_resp,
  1480. sizeof(*get_resp));
  1481. if (unlikely(ret))
  1482. pr_err("Failed to get stats. error: %d\n", ret);
  1483. return ret;
  1484. }
  1485. int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
  1486. struct ena_admin_basic_stats *stats)
  1487. {
  1488. struct ena_com_stats_ctx ctx;
  1489. int ret;
  1490. memset(&ctx, 0x0, sizeof(ctx));
  1491. ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
  1492. if (likely(ret == 0))
  1493. memcpy(stats, &ctx.get_resp.basic_stats,
  1494. sizeof(ctx.get_resp.basic_stats));
  1495. return ret;
  1496. }
  1497. int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
  1498. {
  1499. struct ena_com_admin_queue *admin_queue;
  1500. struct ena_admin_set_feat_cmd cmd;
  1501. struct ena_admin_set_feat_resp resp;
  1502. int ret;
  1503. if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
  1504. pr_info("Feature %d isn't supported\n", ENA_ADMIN_MTU);
  1505. return -EPERM;
  1506. }
  1507. memset(&cmd, 0x0, sizeof(cmd));
  1508. admin_queue = &ena_dev->admin_queue;
  1509. cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
  1510. cmd.aq_common_descriptor.flags = 0;
  1511. cmd.feat_common.feature_id = ENA_ADMIN_MTU;
  1512. cmd.u.mtu.mtu = mtu;
  1513. ret = ena_com_execute_admin_command(admin_queue,
  1514. (struct ena_admin_aq_entry *)&cmd,
  1515. sizeof(cmd),
  1516. (struct ena_admin_acq_entry *)&resp,
  1517. sizeof(resp));
  1518. if (unlikely(ret))
  1519. pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
  1520. return ret;
  1521. }
  1522. int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
  1523. struct ena_admin_feature_offload_desc *offload)
  1524. {
  1525. int ret;
  1526. struct ena_admin_get_feat_resp resp;
  1527. ret = ena_com_get_feature(ena_dev, &resp,
  1528. ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
  1529. if (unlikely(ret)) {
  1530. pr_err("Failed to get offload capabilities %d\n", ret);
  1531. return ret;
  1532. }
  1533. memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
  1534. return 0;
  1535. }
  1536. int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
  1537. {
  1538. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1539. struct ena_rss *rss = &ena_dev->rss;
  1540. struct ena_admin_set_feat_cmd cmd;
  1541. struct ena_admin_set_feat_resp resp;
  1542. struct ena_admin_get_feat_resp get_resp;
  1543. int ret;
  1544. if (!ena_com_check_supported_feature_id(ena_dev,
  1545. ENA_ADMIN_RSS_HASH_FUNCTION)) {
  1546. pr_info("Feature %d isn't supported\n",
  1547. ENA_ADMIN_RSS_HASH_FUNCTION);
  1548. return -EPERM;
  1549. }
  1550. /* Validate hash function is supported */
  1551. ret = ena_com_get_feature(ena_dev, &get_resp,
  1552. ENA_ADMIN_RSS_HASH_FUNCTION);
  1553. if (unlikely(ret))
  1554. return ret;
  1555. if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
  1556. pr_err("Func hash %d isn't supported by device, abort\n",
  1557. rss->hash_func);
  1558. return -EPERM;
  1559. }
  1560. memset(&cmd, 0x0, sizeof(cmd));
  1561. cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
  1562. cmd.aq_common_descriptor.flags =
  1563. ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
  1564. cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
  1565. cmd.u.flow_hash_func.init_val = rss->hash_init_val;
  1566. cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
  1567. ret = ena_com_mem_addr_set(ena_dev,
  1568. &cmd.control_buffer.address,
  1569. rss->hash_key_dma_addr);
  1570. if (unlikely(ret)) {
  1571. pr_err("memory address set failed\n");
  1572. return ret;
  1573. }
  1574. cmd.control_buffer.length = sizeof(*rss->hash_key);
  1575. ret = ena_com_execute_admin_command(admin_queue,
  1576. (struct ena_admin_aq_entry *)&cmd,
  1577. sizeof(cmd),
  1578. (struct ena_admin_acq_entry *)&resp,
  1579. sizeof(resp));
  1580. if (unlikely(ret)) {
  1581. pr_err("Failed to set hash function %d. error: %d\n",
  1582. rss->hash_func, ret);
  1583. return -EINVAL;
  1584. }
  1585. return 0;
  1586. }
  1587. int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
  1588. enum ena_admin_hash_functions func,
  1589. const u8 *key, u16 key_len, u32 init_val)
  1590. {
  1591. struct ena_rss *rss = &ena_dev->rss;
  1592. struct ena_admin_get_feat_resp get_resp;
  1593. struct ena_admin_feature_rss_flow_hash_control *hash_key =
  1594. rss->hash_key;
  1595. int rc;
  1596. /* Make sure size is a mult of DWs */
  1597. if (unlikely(key_len & 0x3))
  1598. return -EINVAL;
  1599. rc = ena_com_get_feature_ex(ena_dev, &get_resp,
  1600. ENA_ADMIN_RSS_HASH_FUNCTION,
  1601. rss->hash_key_dma_addr,
  1602. sizeof(*rss->hash_key));
  1603. if (unlikely(rc))
  1604. return rc;
  1605. if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
  1606. pr_err("Flow hash function %d isn't supported\n", func);
  1607. return -EPERM;
  1608. }
  1609. switch (func) {
  1610. case ENA_ADMIN_TOEPLITZ:
  1611. if (key_len > sizeof(hash_key->key)) {
  1612. pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
  1613. key_len, sizeof(hash_key->key));
  1614. return -EINVAL;
  1615. }
  1616. memcpy(hash_key->key, key, key_len);
  1617. rss->hash_init_val = init_val;
  1618. hash_key->keys_num = key_len >> 2;
  1619. break;
  1620. case ENA_ADMIN_CRC32:
  1621. rss->hash_init_val = init_val;
  1622. break;
  1623. default:
  1624. pr_err("Invalid hash function (%d)\n", func);
  1625. return -EINVAL;
  1626. }
  1627. rc = ena_com_set_hash_function(ena_dev);
  1628. /* Restore the old function */
  1629. if (unlikely(rc))
  1630. ena_com_get_hash_function(ena_dev, NULL, NULL);
  1631. return rc;
  1632. }
  1633. int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
  1634. enum ena_admin_hash_functions *func,
  1635. u8 *key)
  1636. {
  1637. struct ena_rss *rss = &ena_dev->rss;
  1638. struct ena_admin_get_feat_resp get_resp;
  1639. struct ena_admin_feature_rss_flow_hash_control *hash_key =
  1640. rss->hash_key;
  1641. int rc;
  1642. rc = ena_com_get_feature_ex(ena_dev, &get_resp,
  1643. ENA_ADMIN_RSS_HASH_FUNCTION,
  1644. rss->hash_key_dma_addr,
  1645. sizeof(*rss->hash_key));
  1646. if (unlikely(rc))
  1647. return rc;
  1648. rss->hash_func = get_resp.u.flow_hash_func.selected_func;
  1649. if (func)
  1650. *func = rss->hash_func;
  1651. if (key)
  1652. memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
  1653. return 0;
  1654. }
  1655. int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
  1656. enum ena_admin_flow_hash_proto proto,
  1657. u16 *fields)
  1658. {
  1659. struct ena_rss *rss = &ena_dev->rss;
  1660. struct ena_admin_get_feat_resp get_resp;
  1661. int rc;
  1662. rc = ena_com_get_feature_ex(ena_dev, &get_resp,
  1663. ENA_ADMIN_RSS_HASH_INPUT,
  1664. rss->hash_ctrl_dma_addr,
  1665. sizeof(*rss->hash_ctrl));
  1666. if (unlikely(rc))
  1667. return rc;
  1668. if (fields)
  1669. *fields = rss->hash_ctrl->selected_fields[proto].fields;
  1670. return 0;
  1671. }
  1672. int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
  1673. {
  1674. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1675. struct ena_rss *rss = &ena_dev->rss;
  1676. struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
  1677. struct ena_admin_set_feat_cmd cmd;
  1678. struct ena_admin_set_feat_resp resp;
  1679. int ret;
  1680. if (!ena_com_check_supported_feature_id(ena_dev,
  1681. ENA_ADMIN_RSS_HASH_INPUT)) {
  1682. pr_info("Feature %d isn't supported\n", ENA_ADMIN_RSS_HASH_INPUT);
  1683. return -EPERM;
  1684. }
  1685. memset(&cmd, 0x0, sizeof(cmd));
  1686. cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
  1687. cmd.aq_common_descriptor.flags =
  1688. ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
  1689. cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
  1690. cmd.u.flow_hash_input.enabled_input_sort =
  1691. ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
  1692. ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
  1693. ret = ena_com_mem_addr_set(ena_dev,
  1694. &cmd.control_buffer.address,
  1695. rss->hash_ctrl_dma_addr);
  1696. if (unlikely(ret)) {
  1697. pr_err("memory address set failed\n");
  1698. return ret;
  1699. }
  1700. cmd.control_buffer.length = sizeof(*hash_ctrl);
  1701. ret = ena_com_execute_admin_command(admin_queue,
  1702. (struct ena_admin_aq_entry *)&cmd,
  1703. sizeof(cmd),
  1704. (struct ena_admin_acq_entry *)&resp,
  1705. sizeof(resp));
  1706. if (unlikely(ret))
  1707. pr_err("Failed to set hash input. error: %d\n", ret);
  1708. return ret;
  1709. }
  1710. int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
  1711. {
  1712. struct ena_rss *rss = &ena_dev->rss;
  1713. struct ena_admin_feature_rss_hash_control *hash_ctrl =
  1714. rss->hash_ctrl;
  1715. u16 available_fields = 0;
  1716. int rc, i;
  1717. /* Get the supported hash input */
  1718. rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
  1719. if (unlikely(rc))
  1720. return rc;
  1721. hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
  1722. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
  1723. ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
  1724. hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
  1725. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
  1726. ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
  1727. hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
  1728. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
  1729. ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
  1730. hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
  1731. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
  1732. ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
  1733. hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
  1734. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
  1735. hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
  1736. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
  1737. hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
  1738. ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
  1739. hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
  1740. ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
  1741. for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
  1742. available_fields = hash_ctrl->selected_fields[i].fields &
  1743. hash_ctrl->supported_fields[i].fields;
  1744. if (available_fields != hash_ctrl->selected_fields[i].fields) {
  1745. pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
  1746. i, hash_ctrl->supported_fields[i].fields,
  1747. hash_ctrl->selected_fields[i].fields);
  1748. return -EPERM;
  1749. }
  1750. }
  1751. rc = ena_com_set_hash_ctrl(ena_dev);
  1752. /* In case of failure, restore the old hash ctrl */
  1753. if (unlikely(rc))
  1754. ena_com_get_hash_ctrl(ena_dev, 0, NULL);
  1755. return rc;
  1756. }
  1757. int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
  1758. enum ena_admin_flow_hash_proto proto,
  1759. u16 hash_fields)
  1760. {
  1761. struct ena_rss *rss = &ena_dev->rss;
  1762. struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
  1763. u16 supported_fields;
  1764. int rc;
  1765. if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
  1766. pr_err("Invalid proto num (%u)\n", proto);
  1767. return -EINVAL;
  1768. }
  1769. /* Get the ctrl table */
  1770. rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
  1771. if (unlikely(rc))
  1772. return rc;
  1773. /* Make sure all the fields are supported */
  1774. supported_fields = hash_ctrl->supported_fields[proto].fields;
  1775. if ((hash_fields & supported_fields) != hash_fields) {
  1776. pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
  1777. proto, hash_fields, supported_fields);
  1778. }
  1779. hash_ctrl->selected_fields[proto].fields = hash_fields;
  1780. rc = ena_com_set_hash_ctrl(ena_dev);
  1781. /* In case of failure, restore the old hash ctrl */
  1782. if (unlikely(rc))
  1783. ena_com_get_hash_ctrl(ena_dev, 0, NULL);
  1784. return 0;
  1785. }
  1786. int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
  1787. u16 entry_idx, u16 entry_value)
  1788. {
  1789. struct ena_rss *rss = &ena_dev->rss;
  1790. if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
  1791. return -EINVAL;
  1792. if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
  1793. return -EINVAL;
  1794. rss->host_rss_ind_tbl[entry_idx] = entry_value;
  1795. return 0;
  1796. }
  1797. int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
  1798. {
  1799. struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
  1800. struct ena_rss *rss = &ena_dev->rss;
  1801. struct ena_admin_set_feat_cmd cmd;
  1802. struct ena_admin_set_feat_resp resp;
  1803. int ret;
  1804. if (!ena_com_check_supported_feature_id(
  1805. ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
  1806. pr_info("Feature %d isn't supported\n",
  1807. ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
  1808. return -EPERM;
  1809. }
  1810. ret = ena_com_ind_tbl_convert_to_device(ena_dev);
  1811. if (ret) {
  1812. pr_err("Failed to convert host indirection table to device table\n");
  1813. return ret;
  1814. }
  1815. memset(&cmd, 0x0, sizeof(cmd));
  1816. cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
  1817. cmd.aq_common_descriptor.flags =
  1818. ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
  1819. cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
  1820. cmd.u.ind_table.size = rss->tbl_log_size;
  1821. cmd.u.ind_table.inline_index = 0xFFFFFFFF;
  1822. ret = ena_com_mem_addr_set(ena_dev,
  1823. &cmd.control_buffer.address,
  1824. rss->rss_ind_tbl_dma_addr);
  1825. if (unlikely(ret)) {
  1826. pr_err("memory address set failed\n");
  1827. return ret;
  1828. }
  1829. cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
  1830. sizeof(struct ena_admin_rss_ind_table_entry);
  1831. ret = ena_com_execute_admin_command(admin_queue,
  1832. (struct ena_admin_aq_entry *)&cmd,
  1833. sizeof(cmd),
  1834. (struct ena_admin_acq_entry *)&resp,
  1835. sizeof(resp));
  1836. if (unlikely(ret))
  1837. pr_err("Failed to set indirect table. error: %d\n", ret);
  1838. return ret;
  1839. }
  1840. int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
  1841. {
  1842. struct ena_rss *rss = &ena_dev->rss;
  1843. struct ena_admin_get_feat_resp get_resp;
  1844. u32 tbl_size;
  1845. int i, rc;
  1846. tbl_size = (1ULL << rss->tbl_log_size) *
  1847. sizeof(struct ena_admin_rss_ind_table_entry);
  1848. rc = ena_com_get_feature_ex(ena_dev, &get_resp,
  1849. ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
  1850. rss->rss_ind_tbl_dma_addr,
  1851. tbl_size);
  1852. if (unlikely(rc))
  1853. return rc;
  1854. if (!ind_tbl)
  1855. return 0;
  1856. rc = ena_com_ind_tbl_convert_from_device(ena_dev);
  1857. if (unlikely(rc))
  1858. return rc;
  1859. for (i = 0; i < (1 << rss->tbl_log_size); i++)
  1860. ind_tbl[i] = rss->host_rss_ind_tbl[i];
  1861. return 0;
  1862. }
  1863. int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
  1864. {
  1865. int rc;
  1866. memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
  1867. rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
  1868. if (unlikely(rc))
  1869. goto err_indr_tbl;
  1870. rc = ena_com_hash_key_allocate(ena_dev);
  1871. if (unlikely(rc))
  1872. goto err_hash_key;
  1873. rc = ena_com_hash_ctrl_init(ena_dev);
  1874. if (unlikely(rc))
  1875. goto err_hash_ctrl;
  1876. return 0;
  1877. err_hash_ctrl:
  1878. ena_com_hash_key_destroy(ena_dev);
  1879. err_hash_key:
  1880. ena_com_indirect_table_destroy(ena_dev);
  1881. err_indr_tbl:
  1882. return rc;
  1883. }
  1884. void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
  1885. {
  1886. ena_com_indirect_table_destroy(ena_dev);
  1887. ena_com_hash_key_destroy(ena_dev);
  1888. ena_com_hash_ctrl_destroy(ena_dev);
  1889. memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
  1890. }
  1891. int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
  1892. {
  1893. struct ena_host_attribute *host_attr = &ena_dev->host_attr;
  1894. host_attr->host_info =
  1895. dma_zalloc_coherent(ena_dev->dmadev, SZ_4K,
  1896. &host_attr->host_info_dma_addr, GFP_KERNEL);
  1897. if (unlikely(!host_attr->host_info))
  1898. return -ENOMEM;
  1899. return 0;
  1900. }
  1901. int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
  1902. u32 debug_area_size)
  1903. {
  1904. struct ena_host_attribute *host_attr = &ena_dev->host_attr;
  1905. host_attr->debug_area_virt_addr =
  1906. dma_zalloc_coherent(ena_dev->dmadev, debug_area_size,
  1907. &host_attr->debug_area_dma_addr, GFP_KERNEL);
  1908. if (unlikely(!host_attr->debug_area_virt_addr)) {
  1909. host_attr->debug_area_size = 0;
  1910. return -ENOMEM;
  1911. }
  1912. host_attr->debug_area_size = debug_area_size;
  1913. return 0;
  1914. }
  1915. void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
  1916. {
  1917. struct ena_host_attribute *host_attr = &ena_dev->host_attr;
  1918. if (host_attr->host_info) {
  1919. dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
  1920. host_attr->host_info_dma_addr);
  1921. host_attr->host_info = NULL;
  1922. }
  1923. }
  1924. void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
  1925. {
  1926. struct ena_host_attribute *host_attr = &ena_dev->host_attr;
  1927. if (host_attr->debug_area_virt_addr) {
  1928. dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
  1929. host_attr->debug_area_virt_addr,
  1930. host_attr->debug_area_dma_addr);
  1931. host_attr->debug_area_virt_addr = NULL;
  1932. }
  1933. }
  1934. int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
  1935. {
  1936. struct ena_host_attribute *host_attr = &ena_dev->host_attr;
  1937. struct ena_com_admin_queue *admin_queue;
  1938. struct ena_admin_set_feat_cmd cmd;
  1939. struct ena_admin_set_feat_resp resp;
  1940. int ret;
  1941. if (!ena_com_check_supported_feature_id(ena_dev,
  1942. ENA_ADMIN_HOST_ATTR_CONFIG)) {
  1943. pr_warn("Set host attribute isn't supported\n");
  1944. return -EPERM;
  1945. }
  1946. memset(&cmd, 0x0, sizeof(cmd));
  1947. admin_queue = &ena_dev->admin_queue;
  1948. cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
  1949. cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
  1950. ret = ena_com_mem_addr_set(ena_dev,
  1951. &cmd.u.host_attr.debug_ba,
  1952. host_attr->debug_area_dma_addr);
  1953. if (unlikely(ret)) {
  1954. pr_err("memory address set failed\n");
  1955. return ret;
  1956. }
  1957. ret = ena_com_mem_addr_set(ena_dev,
  1958. &cmd.u.host_attr.os_info_ba,
  1959. host_attr->host_info_dma_addr);
  1960. if (unlikely(ret)) {
  1961. pr_err("memory address set failed\n");
  1962. return ret;
  1963. }
  1964. cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
  1965. ret = ena_com_execute_admin_command(admin_queue,
  1966. (struct ena_admin_aq_entry *)&cmd,
  1967. sizeof(cmd),
  1968. (struct ena_admin_acq_entry *)&resp,
  1969. sizeof(resp));
  1970. if (unlikely(ret))
  1971. pr_err("Failed to set host attributes: %d\n", ret);
  1972. return ret;
  1973. }
  1974. /* Interrupt moderation */
  1975. bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
  1976. {
  1977. return ena_com_check_supported_feature_id(ena_dev,
  1978. ENA_ADMIN_INTERRUPT_MODERATION);
  1979. }
  1980. int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
  1981. u32 tx_coalesce_usecs)
  1982. {
  1983. if (!ena_dev->intr_delay_resolution) {
  1984. pr_err("Illegal interrupt delay granularity value\n");
  1985. return -EFAULT;
  1986. }
  1987. ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
  1988. ena_dev->intr_delay_resolution;
  1989. return 0;
  1990. }
  1991. int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
  1992. u32 rx_coalesce_usecs)
  1993. {
  1994. if (!ena_dev->intr_delay_resolution) {
  1995. pr_err("Illegal interrupt delay granularity value\n");
  1996. return -EFAULT;
  1997. }
  1998. /* We use LOWEST entry of moderation table for storing
  1999. * nonadaptive interrupt coalescing values
  2000. */
  2001. ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
  2002. rx_coalesce_usecs / ena_dev->intr_delay_resolution;
  2003. return 0;
  2004. }
  2005. void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
  2006. {
  2007. if (ena_dev->intr_moder_tbl)
  2008. devm_kfree(ena_dev->dmadev, ena_dev->intr_moder_tbl);
  2009. ena_dev->intr_moder_tbl = NULL;
  2010. }
  2011. int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
  2012. {
  2013. struct ena_admin_get_feat_resp get_resp;
  2014. u16 delay_resolution;
  2015. int rc;
  2016. rc = ena_com_get_feature(ena_dev, &get_resp,
  2017. ENA_ADMIN_INTERRUPT_MODERATION);
  2018. if (rc) {
  2019. if (rc == -EPERM) {
  2020. pr_info("Feature %d isn't supported\n",
  2021. ENA_ADMIN_INTERRUPT_MODERATION);
  2022. rc = 0;
  2023. } else {
  2024. pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
  2025. rc);
  2026. }
  2027. /* no moderation supported, disable adaptive support */
  2028. ena_com_disable_adaptive_moderation(ena_dev);
  2029. return rc;
  2030. }
  2031. rc = ena_com_init_interrupt_moderation_table(ena_dev);
  2032. if (rc)
  2033. goto err;
  2034. /* if moderation is supported by device we set adaptive moderation */
  2035. delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
  2036. ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
  2037. ena_com_enable_adaptive_moderation(ena_dev);
  2038. return 0;
  2039. err:
  2040. ena_com_destroy_interrupt_moderation(ena_dev);
  2041. return rc;
  2042. }
  2043. void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
  2044. {
  2045. struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
  2046. if (!intr_moder_tbl)
  2047. return;
  2048. intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
  2049. ENA_INTR_LOWEST_USECS;
  2050. intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
  2051. ENA_INTR_LOWEST_PKTS;
  2052. intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
  2053. ENA_INTR_LOWEST_BYTES;
  2054. intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
  2055. ENA_INTR_LOW_USECS;
  2056. intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
  2057. ENA_INTR_LOW_PKTS;
  2058. intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
  2059. ENA_INTR_LOW_BYTES;
  2060. intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
  2061. ENA_INTR_MID_USECS;
  2062. intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
  2063. ENA_INTR_MID_PKTS;
  2064. intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
  2065. ENA_INTR_MID_BYTES;
  2066. intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
  2067. ENA_INTR_HIGH_USECS;
  2068. intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
  2069. ENA_INTR_HIGH_PKTS;
  2070. intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
  2071. ENA_INTR_HIGH_BYTES;
  2072. intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
  2073. ENA_INTR_HIGHEST_USECS;
  2074. intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
  2075. ENA_INTR_HIGHEST_PKTS;
  2076. intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
  2077. ENA_INTR_HIGHEST_BYTES;
  2078. }
  2079. unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
  2080. {
  2081. return ena_dev->intr_moder_tx_interval;
  2082. }
  2083. unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
  2084. {
  2085. struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
  2086. if (intr_moder_tbl)
  2087. return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
  2088. return 0;
  2089. }
  2090. void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
  2091. enum ena_intr_moder_level level,
  2092. struct ena_intr_moder_entry *entry)
  2093. {
  2094. struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
  2095. if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
  2096. return;
  2097. intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
  2098. if (ena_dev->intr_delay_resolution)
  2099. intr_moder_tbl[level].intr_moder_interval /=
  2100. ena_dev->intr_delay_resolution;
  2101. intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
  2102. /* use hardcoded value until ethtool supports bytecount parameter */
  2103. if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
  2104. intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
  2105. }
  2106. void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
  2107. enum ena_intr_moder_level level,
  2108. struct ena_intr_moder_entry *entry)
  2109. {
  2110. struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
  2111. if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
  2112. return;
  2113. entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
  2114. if (ena_dev->intr_delay_resolution)
  2115. entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
  2116. entry->pkts_per_interval =
  2117. intr_moder_tbl[level].pkts_per_interval;
  2118. entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
  2119. }