ena_com.h 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047
  1. /*
  2. * Copyright 2015 Amazon.com, Inc. or its affiliates.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #ifndef ENA_COM
  33. #define ENA_COM
  34. #include <linux/compiler.h>
  35. #include <linux/delay.h>
  36. #include <linux/dma-mapping.h>
  37. #include <linux/gfp.h>
  38. #include <linux/sched.h>
  39. #include <linux/sizes.h>
  40. #include <linux/spinlock.h>
  41. #include <linux/types.h>
  42. #include <linux/wait.h>
  43. #include "ena_common_defs.h"
  44. #include "ena_admin_defs.h"
  45. #include "ena_eth_io_defs.h"
  46. #include "ena_regs_defs.h"
  47. #undef pr_fmt
  48. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  49. #define ENA_MAX_NUM_IO_QUEUES 128U
  50. /* We need to queues for each IO (on for Tx and one for Rx) */
  51. #define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES))
  52. #define ENA_MAX_HANDLERS 256
  53. #define ENA_MAX_PHYS_ADDR_SIZE_BITS 48
  54. /* Unit in usec */
  55. #define ENA_REG_READ_TIMEOUT 200000
  56. #define ADMIN_SQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aq_entry))
  57. #define ADMIN_CQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_acq_entry))
  58. #define ADMIN_AENQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aenq_entry))
  59. /*****************************************************************************/
  60. /*****************************************************************************/
  61. /* ENA adaptive interrupt moderation settings */
  62. #define ENA_INTR_LOWEST_USECS (0)
  63. #define ENA_INTR_LOWEST_PKTS (3)
  64. #define ENA_INTR_LOWEST_BYTES (2 * 1524)
  65. #define ENA_INTR_LOW_USECS (32)
  66. #define ENA_INTR_LOW_PKTS (12)
  67. #define ENA_INTR_LOW_BYTES (16 * 1024)
  68. #define ENA_INTR_MID_USECS (80)
  69. #define ENA_INTR_MID_PKTS (48)
  70. #define ENA_INTR_MID_BYTES (64 * 1024)
  71. #define ENA_INTR_HIGH_USECS (128)
  72. #define ENA_INTR_HIGH_PKTS (96)
  73. #define ENA_INTR_HIGH_BYTES (128 * 1024)
  74. #define ENA_INTR_HIGHEST_USECS (192)
  75. #define ENA_INTR_HIGHEST_PKTS (128)
  76. #define ENA_INTR_HIGHEST_BYTES (192 * 1024)
  77. #define ENA_INTR_INITIAL_TX_INTERVAL_USECS 196
  78. #define ENA_INTR_INITIAL_RX_INTERVAL_USECS 4
  79. #define ENA_INTR_DELAY_OLD_VALUE_WEIGHT 6
  80. #define ENA_INTR_DELAY_NEW_VALUE_WEIGHT 4
  81. #define ENA_INTR_MODER_LEVEL_STRIDE 2
  82. #define ENA_INTR_BYTE_COUNT_NOT_SUPPORTED 0xFFFFFF
  83. #define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF
  84. enum ena_intr_moder_level {
  85. ENA_INTR_MODER_LOWEST = 0,
  86. ENA_INTR_MODER_LOW,
  87. ENA_INTR_MODER_MID,
  88. ENA_INTR_MODER_HIGH,
  89. ENA_INTR_MODER_HIGHEST,
  90. ENA_INTR_MAX_NUM_OF_LEVELS,
  91. };
  92. struct ena_intr_moder_entry {
  93. unsigned int intr_moder_interval;
  94. unsigned int pkts_per_interval;
  95. unsigned int bytes_per_interval;
  96. };
  97. enum queue_direction {
  98. ENA_COM_IO_QUEUE_DIRECTION_TX,
  99. ENA_COM_IO_QUEUE_DIRECTION_RX
  100. };
  101. struct ena_com_buf {
  102. dma_addr_t paddr; /**< Buffer physical address */
  103. u16 len; /**< Buffer length in bytes */
  104. };
  105. struct ena_com_rx_buf_info {
  106. u16 len;
  107. u16 req_id;
  108. };
  109. struct ena_com_io_desc_addr {
  110. u8 __iomem *pbuf_dev_addr; /* LLQ address */
  111. u8 *virt_addr;
  112. dma_addr_t phys_addr;
  113. };
  114. struct ena_com_tx_meta {
  115. u16 mss;
  116. u16 l3_hdr_len;
  117. u16 l3_hdr_offset;
  118. u16 l4_hdr_len; /* In words */
  119. };
  120. struct ena_com_io_cq {
  121. struct ena_com_io_desc_addr cdesc_addr;
  122. /* Interrupt unmask register */
  123. u32 __iomem *unmask_reg;
  124. /* The completion queue head doorbell register */
  125. u32 __iomem *cq_head_db_reg;
  126. /* numa configuration register (for TPH) */
  127. u32 __iomem *numa_node_cfg_reg;
  128. /* The value to write to the above register to unmask
  129. * the interrupt of this queue
  130. */
  131. u32 msix_vector;
  132. enum queue_direction direction;
  133. /* holds the number of cdesc of the current packet */
  134. u16 cur_rx_pkt_cdesc_count;
  135. /* save the firt cdesc idx of the current packet */
  136. u16 cur_rx_pkt_cdesc_start_idx;
  137. u16 q_depth;
  138. /* Caller qid */
  139. u16 qid;
  140. /* Device queue index */
  141. u16 idx;
  142. u16 head;
  143. u16 last_head_update;
  144. u8 phase;
  145. u8 cdesc_entry_size_in_bytes;
  146. } ____cacheline_aligned;
  147. struct ena_com_io_sq {
  148. struct ena_com_io_desc_addr desc_addr;
  149. u32 __iomem *db_addr;
  150. u8 __iomem *header_addr;
  151. enum queue_direction direction;
  152. enum ena_admin_placement_policy_type mem_queue_type;
  153. u32 msix_vector;
  154. struct ena_com_tx_meta cached_tx_meta;
  155. u16 q_depth;
  156. u16 qid;
  157. u16 idx;
  158. u16 tail;
  159. u16 next_to_comp;
  160. u32 tx_max_header_size;
  161. u8 phase;
  162. u8 desc_entry_size;
  163. u8 dma_addr_bits;
  164. } ____cacheline_aligned;
  165. struct ena_com_admin_cq {
  166. struct ena_admin_acq_entry *entries;
  167. dma_addr_t dma_addr;
  168. u16 head;
  169. u8 phase;
  170. };
  171. struct ena_com_admin_sq {
  172. struct ena_admin_aq_entry *entries;
  173. dma_addr_t dma_addr;
  174. u32 __iomem *db_addr;
  175. u16 head;
  176. u16 tail;
  177. u8 phase;
  178. };
  179. struct ena_com_stats_admin {
  180. u32 aborted_cmd;
  181. u32 submitted_cmd;
  182. u32 completed_cmd;
  183. u32 out_of_space;
  184. u32 no_completion;
  185. };
  186. struct ena_com_admin_queue {
  187. void *q_dmadev;
  188. spinlock_t q_lock; /* spinlock for the admin queue */
  189. struct ena_comp_ctx *comp_ctx;
  190. u32 completion_timeout;
  191. u16 q_depth;
  192. struct ena_com_admin_cq cq;
  193. struct ena_com_admin_sq sq;
  194. /* Indicate if the admin queue should poll for completion */
  195. bool polling;
  196. u16 curr_cmd_id;
  197. /* Indicate that the ena was initialized and can
  198. * process new admin commands
  199. */
  200. bool running_state;
  201. /* Count the number of outstanding admin commands */
  202. atomic_t outstanding_cmds;
  203. struct ena_com_stats_admin stats;
  204. };
  205. struct ena_aenq_handlers;
  206. struct ena_com_aenq {
  207. u16 head;
  208. u8 phase;
  209. struct ena_admin_aenq_entry *entries;
  210. dma_addr_t dma_addr;
  211. u16 q_depth;
  212. struct ena_aenq_handlers *aenq_handlers;
  213. };
  214. struct ena_com_mmio_read {
  215. struct ena_admin_ena_mmio_req_read_less_resp *read_resp;
  216. dma_addr_t read_resp_dma_addr;
  217. u32 reg_read_to; /* in us */
  218. u16 seq_num;
  219. bool readless_supported;
  220. /* spin lock to ensure a single outstanding read */
  221. spinlock_t lock;
  222. };
  223. struct ena_rss {
  224. /* Indirect table */
  225. u16 *host_rss_ind_tbl;
  226. struct ena_admin_rss_ind_table_entry *rss_ind_tbl;
  227. dma_addr_t rss_ind_tbl_dma_addr;
  228. u16 tbl_log_size;
  229. /* Hash key */
  230. enum ena_admin_hash_functions hash_func;
  231. struct ena_admin_feature_rss_flow_hash_control *hash_key;
  232. dma_addr_t hash_key_dma_addr;
  233. u32 hash_init_val;
  234. /* Flow Control */
  235. struct ena_admin_feature_rss_hash_control *hash_ctrl;
  236. dma_addr_t hash_ctrl_dma_addr;
  237. };
  238. struct ena_host_attribute {
  239. /* Debug area */
  240. u8 *debug_area_virt_addr;
  241. dma_addr_t debug_area_dma_addr;
  242. u32 debug_area_size;
  243. /* Host information */
  244. struct ena_admin_host_info *host_info;
  245. dma_addr_t host_info_dma_addr;
  246. };
  247. /* Each ena_dev is a PCI function. */
  248. struct ena_com_dev {
  249. struct ena_com_admin_queue admin_queue;
  250. struct ena_com_aenq aenq;
  251. struct ena_com_io_cq io_cq_queues[ENA_TOTAL_NUM_QUEUES];
  252. struct ena_com_io_sq io_sq_queues[ENA_TOTAL_NUM_QUEUES];
  253. u8 __iomem *reg_bar;
  254. void __iomem *mem_bar;
  255. void *dmadev;
  256. enum ena_admin_placement_policy_type tx_mem_queue_type;
  257. u32 tx_max_header_size;
  258. u16 stats_func; /* Selected function for extended statistic dump */
  259. u16 stats_queue; /* Selected queue for extended statistic dump */
  260. struct ena_com_mmio_read mmio_read;
  261. struct ena_rss rss;
  262. u32 supported_features;
  263. u32 dma_addr_bits;
  264. struct ena_host_attribute host_attr;
  265. bool adaptive_coalescing;
  266. u16 intr_delay_resolution;
  267. u32 intr_moder_tx_interval;
  268. struct ena_intr_moder_entry *intr_moder_tbl;
  269. };
  270. struct ena_com_dev_get_features_ctx {
  271. struct ena_admin_queue_feature_desc max_queues;
  272. struct ena_admin_device_attr_feature_desc dev_attr;
  273. struct ena_admin_feature_aenq_desc aenq;
  274. struct ena_admin_feature_offload_desc offload;
  275. struct ena_admin_ena_hw_hints hw_hints;
  276. };
  277. struct ena_com_create_io_ctx {
  278. enum ena_admin_placement_policy_type mem_queue_type;
  279. enum queue_direction direction;
  280. int numa_node;
  281. u32 msix_vector;
  282. u16 queue_size;
  283. u16 qid;
  284. };
  285. typedef void (*ena_aenq_handler)(void *data,
  286. struct ena_admin_aenq_entry *aenq_e);
  287. /* Holds aenq handlers. Indexed by AENQ event group */
  288. struct ena_aenq_handlers {
  289. ena_aenq_handler handlers[ENA_MAX_HANDLERS];
  290. ena_aenq_handler unimplemented_handler;
  291. };
  292. /*****************************************************************************/
  293. /*****************************************************************************/
  294. /* ena_com_mmio_reg_read_request_init - Init the mmio reg read mechanism
  295. * @ena_dev: ENA communication layer struct
  296. *
  297. * Initialize the register read mechanism.
  298. *
  299. * @note: This method must be the first stage in the initialization sequence.
  300. *
  301. * @return - 0 on success, negative value on failure.
  302. */
  303. int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
  304. /* ena_com_set_mmio_read_mode - Enable/disable the mmio reg read mechanism
  305. * @ena_dev: ENA communication layer struct
  306. * @readless_supported: readless mode (enable/disable)
  307. */
  308. void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev,
  309. bool readless_supported);
  310. /* ena_com_mmio_reg_read_request_write_dev_addr - Write the mmio reg read return
  311. * value physical address.
  312. * @ena_dev: ENA communication layer struct
  313. */
  314. void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev);
  315. /* ena_com_mmio_reg_read_request_destroy - Destroy the mmio reg read mechanism
  316. * @ena_dev: ENA communication layer struct
  317. */
  318. void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev);
  319. /* ena_com_admin_init - Init the admin and the async queues
  320. * @ena_dev: ENA communication layer struct
  321. * @aenq_handlers: Those handlers to be called upon event.
  322. * @init_spinlock: Indicate if this method should init the admin spinlock or
  323. * the spinlock was init before (for example, in a case of FLR).
  324. *
  325. * Initialize the admin submission and completion queues.
  326. * Initialize the asynchronous events notification queues.
  327. *
  328. * @return - 0 on success, negative value on failure.
  329. */
  330. int ena_com_admin_init(struct ena_com_dev *ena_dev,
  331. struct ena_aenq_handlers *aenq_handlers,
  332. bool init_spinlock);
  333. /* ena_com_admin_destroy - Destroy the admin and the async events queues.
  334. * @ena_dev: ENA communication layer struct
  335. *
  336. * @note: Before calling this method, the caller must validate that the device
  337. * won't send any additional admin completions/aenq.
  338. * To achieve that, a FLR is recommended.
  339. */
  340. void ena_com_admin_destroy(struct ena_com_dev *ena_dev);
  341. /* ena_com_dev_reset - Perform device FLR to the device.
  342. * @ena_dev: ENA communication layer struct
  343. * @reset_reason: Specify what is the trigger for the reset in case of an error.
  344. *
  345. * @return - 0 on success, negative value on failure.
  346. */
  347. int ena_com_dev_reset(struct ena_com_dev *ena_dev,
  348. enum ena_regs_reset_reason_types reset_reason);
  349. /* ena_com_create_io_queue - Create io queue.
  350. * @ena_dev: ENA communication layer struct
  351. * @ctx - create context structure
  352. *
  353. * Create the submission and the completion queues.
  354. *
  355. * @return - 0 on success, negative value on failure.
  356. */
  357. int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
  358. struct ena_com_create_io_ctx *ctx);
  359. /* ena_com_destroy_io_queue - Destroy IO queue with the queue id - qid.
  360. * @ena_dev: ENA communication layer struct
  361. * @qid - the caller virtual queue id.
  362. */
  363. void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid);
  364. /* ena_com_get_io_handlers - Return the io queue handlers
  365. * @ena_dev: ENA communication layer struct
  366. * @qid - the caller virtual queue id.
  367. * @io_sq - IO submission queue handler
  368. * @io_cq - IO completion queue handler.
  369. *
  370. * @return - 0 on success, negative value on failure.
  371. */
  372. int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
  373. struct ena_com_io_sq **io_sq,
  374. struct ena_com_io_cq **io_cq);
  375. /* ena_com_admin_aenq_enable - ENAble asynchronous event notifications
  376. * @ena_dev: ENA communication layer struct
  377. *
  378. * After this method, aenq event can be received via AENQ.
  379. */
  380. void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev);
  381. /* ena_com_set_admin_running_state - Set the state of the admin queue
  382. * @ena_dev: ENA communication layer struct
  383. *
  384. * Change the state of the admin queue (enable/disable)
  385. */
  386. void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state);
  387. /* ena_com_get_admin_running_state - Get the admin queue state
  388. * @ena_dev: ENA communication layer struct
  389. *
  390. * Retrieve the state of the admin queue (enable/disable)
  391. *
  392. * @return - current polling mode (enable/disable)
  393. */
  394. bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev);
  395. /* ena_com_set_admin_polling_mode - Set the admin completion queue polling mode
  396. * @ena_dev: ENA communication layer struct
  397. * @polling: ENAble/Disable polling mode
  398. *
  399. * Set the admin completion mode.
  400. */
  401. void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
  402. /* ena_com_set_admin_polling_mode - Get the admin completion queue polling mode
  403. * @ena_dev: ENA communication layer struct
  404. *
  405. * Get the admin completion mode.
  406. * If polling mode is on, ena_com_execute_admin_command will perform a
  407. * polling on the admin completion queue for the commands completion,
  408. * otherwise it will wait on wait event.
  409. *
  410. * @return state
  411. */
  412. bool ena_com_get_ena_admin_polling_mode(struct ena_com_dev *ena_dev);
  413. /* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
  414. * @ena_dev: ENA communication layer struct
  415. *
  416. * This method go over the admin completion queue and wake up all the pending
  417. * threads that wait on the commands wait event.
  418. *
  419. * @note: Should be called after MSI-X interrupt.
  420. */
  421. void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev);
  422. /* ena_com_aenq_intr_handler - AENQ interrupt handler
  423. * @ena_dev: ENA communication layer struct
  424. *
  425. * This method go over the async event notification queue and call the proper
  426. * aenq handler.
  427. */
  428. void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data);
  429. /* ena_com_abort_admin_commands - Abort all the outstanding admin commands.
  430. * @ena_dev: ENA communication layer struct
  431. *
  432. * This method aborts all the outstanding admin commands.
  433. * The caller should then call ena_com_wait_for_abort_completion to make sure
  434. * all the commands were completed.
  435. */
  436. void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev);
  437. /* ena_com_wait_for_abort_completion - Wait for admin commands abort.
  438. * @ena_dev: ENA communication layer struct
  439. *
  440. * This method wait until all the outstanding admin commands will be completed.
  441. */
  442. void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev);
  443. /* ena_com_validate_version - Validate the device parameters
  444. * @ena_dev: ENA communication layer struct
  445. *
  446. * This method validate the device parameters are the same as the saved
  447. * parameters in ena_dev.
  448. * This method is useful after device reset, to validate the device mac address
  449. * and the device offloads are the same as before the reset.
  450. *
  451. * @return - 0 on success negative value otherwise.
  452. */
  453. int ena_com_validate_version(struct ena_com_dev *ena_dev);
  454. /* ena_com_get_link_params - Retrieve physical link parameters.
  455. * @ena_dev: ENA communication layer struct
  456. * @resp: Link parameters
  457. *
  458. * Retrieve the physical link parameters,
  459. * like speed, auto-negotiation and full duplex support.
  460. *
  461. * @return - 0 on Success negative value otherwise.
  462. */
  463. int ena_com_get_link_params(struct ena_com_dev *ena_dev,
  464. struct ena_admin_get_feat_resp *resp);
  465. /* ena_com_get_dma_width - Retrieve physical dma address width the device
  466. * supports.
  467. * @ena_dev: ENA communication layer struct
  468. *
  469. * Retrieve the maximum physical address bits the device can handle.
  470. *
  471. * @return: > 0 on Success and negative value otherwise.
  472. */
  473. int ena_com_get_dma_width(struct ena_com_dev *ena_dev);
  474. /* ena_com_set_aenq_config - Set aenq groups configurations
  475. * @ena_dev: ENA communication layer struct
  476. * @groups flag: bit fields flags of enum ena_admin_aenq_group.
  477. *
  478. * Configure which aenq event group the driver would like to receive.
  479. *
  480. * @return: 0 on Success and negative value otherwise.
  481. */
  482. int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag);
  483. /* ena_com_get_dev_attr_feat - Get device features
  484. * @ena_dev: ENA communication layer struct
  485. * @get_feat_ctx: returned context that contain the get features.
  486. *
  487. * @return: 0 on Success and negative value otherwise.
  488. */
  489. int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
  490. struct ena_com_dev_get_features_ctx *get_feat_ctx);
  491. /* ena_com_get_dev_basic_stats - Get device basic statistics
  492. * @ena_dev: ENA communication layer struct
  493. * @stats: stats return value
  494. *
  495. * @return: 0 on Success and negative value otherwise.
  496. */
  497. int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
  498. struct ena_admin_basic_stats *stats);
  499. /* ena_com_set_dev_mtu - Configure the device mtu.
  500. * @ena_dev: ENA communication layer struct
  501. * @mtu: mtu value
  502. *
  503. * @return: 0 on Success and negative value otherwise.
  504. */
  505. int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu);
  506. /* ena_com_get_offload_settings - Retrieve the device offloads capabilities
  507. * @ena_dev: ENA communication layer struct
  508. * @offlad: offload return value
  509. *
  510. * @return: 0 on Success and negative value otherwise.
  511. */
  512. int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
  513. struct ena_admin_feature_offload_desc *offload);
  514. /* ena_com_rss_init - Init RSS
  515. * @ena_dev: ENA communication layer struct
  516. * @log_size: indirection log size
  517. *
  518. * Allocate RSS/RFS resources.
  519. * The caller then can configure rss using ena_com_set_hash_function,
  520. * ena_com_set_hash_ctrl and ena_com_indirect_table_set.
  521. *
  522. * @return: 0 on Success and negative value otherwise.
  523. */
  524. int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);
  525. /* ena_com_rss_destroy - Destroy rss
  526. * @ena_dev: ENA communication layer struct
  527. *
  528. * Free all the RSS/RFS resources.
  529. */
  530. void ena_com_rss_destroy(struct ena_com_dev *ena_dev);
  531. /* ena_com_fill_hash_function - Fill RSS hash function
  532. * @ena_dev: ENA communication layer struct
  533. * @func: The hash function (Toeplitz or crc)
  534. * @key: Hash key (for toeplitz hash)
  535. * @key_len: key length (max length 10 DW)
  536. * @init_val: initial value for the hash function
  537. *
  538. * Fill the ena_dev resources with the desire hash function, hash key, key_len
  539. * and key initial value (if needed by the hash function).
  540. * To flush the key into the device the caller should call
  541. * ena_com_set_hash_function.
  542. *
  543. * @return: 0 on Success and negative value otherwise.
  544. */
  545. int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
  546. enum ena_admin_hash_functions func,
  547. const u8 *key, u16 key_len, u32 init_val);
  548. /* ena_com_set_hash_function - Flush the hash function and it dependencies to
  549. * the device.
  550. * @ena_dev: ENA communication layer struct
  551. *
  552. * Flush the hash function and it dependencies (key, key length and
  553. * initial value) if needed.
  554. *
  555. * @note: Prior to this method the caller should call ena_com_fill_hash_function
  556. *
  557. * @return: 0 on Success and negative value otherwise.
  558. */
  559. int ena_com_set_hash_function(struct ena_com_dev *ena_dev);
  560. /* ena_com_get_hash_function - Retrieve the hash function and the hash key
  561. * from the device.
  562. * @ena_dev: ENA communication layer struct
  563. * @func: hash function
  564. * @key: hash key
  565. *
  566. * Retrieve the hash function and the hash key from the device.
  567. *
  568. * @note: If the caller called ena_com_fill_hash_function but didn't flash
  569. * it to the device, the new configuration will be lost.
  570. *
  571. * @return: 0 on Success and negative value otherwise.
  572. */
  573. int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
  574. enum ena_admin_hash_functions *func,
  575. u8 *key);
  576. /* ena_com_fill_hash_ctrl - Fill RSS hash control
  577. * @ena_dev: ENA communication layer struct.
  578. * @proto: The protocol to configure.
  579. * @hash_fields: bit mask of ena_admin_flow_hash_fields
  580. *
  581. * Fill the ena_dev resources with the desire hash control (the ethernet
  582. * fields that take part of the hash) for a specific protocol.
  583. * To flush the hash control to the device, the caller should call
  584. * ena_com_set_hash_ctrl.
  585. *
  586. * @return: 0 on Success and negative value otherwise.
  587. */
  588. int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
  589. enum ena_admin_flow_hash_proto proto,
  590. u16 hash_fields);
  591. /* ena_com_set_hash_ctrl - Flush the hash control resources to the device.
  592. * @ena_dev: ENA communication layer struct
  593. *
  594. * Flush the hash control (the ethernet fields that take part of the hash)
  595. *
  596. * @note: Prior to this method the caller should call ena_com_fill_hash_ctrl.
  597. *
  598. * @return: 0 on Success and negative value otherwise.
  599. */
  600. int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev);
  601. /* ena_com_get_hash_ctrl - Retrieve the hash control from the device.
  602. * @ena_dev: ENA communication layer struct
  603. * @proto: The protocol to retrieve.
  604. * @fields: bit mask of ena_admin_flow_hash_fields.
  605. *
  606. * Retrieve the hash control from the device.
  607. *
  608. * @note, If the caller called ena_com_fill_hash_ctrl but didn't flash
  609. * it to the device, the new configuration will be lost.
  610. *
  611. * @return: 0 on Success and negative value otherwise.
  612. */
  613. int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
  614. enum ena_admin_flow_hash_proto proto,
  615. u16 *fields);
  616. /* ena_com_set_default_hash_ctrl - Set the hash control to a default
  617. * configuration.
  618. * @ena_dev: ENA communication layer struct
  619. *
  620. * Fill the ena_dev resources with the default hash control configuration.
  621. * To flush the hash control to the device, the caller should call
  622. * ena_com_set_hash_ctrl.
  623. *
  624. * @return: 0 on Success and negative value otherwise.
  625. */
  626. int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev);
  627. /* ena_com_indirect_table_fill_entry - Fill a single entry in the RSS
  628. * indirection table
  629. * @ena_dev: ENA communication layer struct.
  630. * @entry_idx - indirection table entry.
  631. * @entry_value - redirection value
  632. *
  633. * Fill a single entry of the RSS indirection table in the ena_dev resources.
  634. * To flush the indirection table to the device, the called should call
  635. * ena_com_indirect_table_set.
  636. *
  637. * @return: 0 on Success and negative value otherwise.
  638. */
  639. int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
  640. u16 entry_idx, u16 entry_value);
  641. /* ena_com_indirect_table_set - Flush the indirection table to the device.
  642. * @ena_dev: ENA communication layer struct
  643. *
  644. * Flush the indirection hash control to the device.
  645. * Prior to this method the caller should call ena_com_indirect_table_fill_entry
  646. *
  647. * @return: 0 on Success and negative value otherwise.
  648. */
  649. int ena_com_indirect_table_set(struct ena_com_dev *ena_dev);
  650. /* ena_com_indirect_table_get - Retrieve the indirection table from the device.
  651. * @ena_dev: ENA communication layer struct
  652. * @ind_tbl: indirection table
  653. *
  654. * Retrieve the RSS indirection table from the device.
  655. *
  656. * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flash
  657. * it to the device, the new configuration will be lost.
  658. *
  659. * @return: 0 on Success and negative value otherwise.
  660. */
  661. int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl);
  662. /* ena_com_allocate_host_info - Allocate host info resources.
  663. * @ena_dev: ENA communication layer struct
  664. *
  665. * @return: 0 on Success and negative value otherwise.
  666. */
  667. int ena_com_allocate_host_info(struct ena_com_dev *ena_dev);
  668. /* ena_com_allocate_debug_area - Allocate debug area.
  669. * @ena_dev: ENA communication layer struct
  670. * @debug_area_size - debug area size.
  671. *
  672. * @return: 0 on Success and negative value otherwise.
  673. */
  674. int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
  675. u32 debug_area_size);
  676. /* ena_com_delete_debug_area - Free the debug area resources.
  677. * @ena_dev: ENA communication layer struct
  678. *
  679. * Free the allocate debug area.
  680. */
  681. void ena_com_delete_debug_area(struct ena_com_dev *ena_dev);
  682. /* ena_com_delete_host_info - Free the host info resources.
  683. * @ena_dev: ENA communication layer struct
  684. *
  685. * Free the allocate host info.
  686. */
  687. void ena_com_delete_host_info(struct ena_com_dev *ena_dev);
  688. /* ena_com_set_host_attributes - Update the device with the host
  689. * attributes (debug area and host info) base address.
  690. * @ena_dev: ENA communication layer struct
  691. *
  692. * @return: 0 on Success and negative value otherwise.
  693. */
  694. int ena_com_set_host_attributes(struct ena_com_dev *ena_dev);
  695. /* ena_com_create_io_cq - Create io completion queue.
  696. * @ena_dev: ENA communication layer struct
  697. * @io_cq - io completion queue handler
  698. * Create IO completion queue.
  699. *
  700. * @return - 0 on success, negative value on failure.
  701. */
  702. int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
  703. struct ena_com_io_cq *io_cq);
  704. /* ena_com_destroy_io_cq - Destroy io completion queue.
  705. * @ena_dev: ENA communication layer struct
  706. * @io_cq - io completion queue handler
  707. * Destroy IO completion queue.
  708. *
  709. * @return - 0 on success, negative value on failure.
  710. */
  711. int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
  712. struct ena_com_io_cq *io_cq);
  713. /* ena_com_execute_admin_command - Execute admin command
  714. * @admin_queue: admin queue.
  715. * @cmd: the admin command to execute.
  716. * @cmd_size: the command size.
  717. * @cmd_completion: command completion return value.
  718. * @cmd_comp_size: command completion size.
  719. * Submit an admin command and then wait until the device will return a
  720. * completion.
  721. * The completion will be copyed into cmd_comp.
  722. *
  723. * @return - 0 on success, negative value on failure.
  724. */
  725. int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
  726. struct ena_admin_aq_entry *cmd,
  727. size_t cmd_size,
  728. struct ena_admin_acq_entry *cmd_comp,
  729. size_t cmd_comp_size);
  730. /* ena_com_init_interrupt_moderation - Init interrupt moderation
  731. * @ena_dev: ENA communication layer struct
  732. *
  733. * @return - 0 on success, negative value on failure.
  734. */
  735. int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev);
  736. /* ena_com_destroy_interrupt_moderation - Destroy interrupt moderation resources
  737. * @ena_dev: ENA communication layer struct
  738. */
  739. void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev);
  740. /* ena_com_interrupt_moderation_supported - Return if interrupt moderation
  741. * capability is supported by the device.
  742. *
  743. * @return - supported or not.
  744. */
  745. bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev);
  746. /* ena_com_config_default_interrupt_moderation_table - Restore the interrupt
  747. * moderation table back to the default parameters.
  748. * @ena_dev: ENA communication layer struct
  749. */
  750. void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev);
  751. /* ena_com_update_nonadaptive_moderation_interval_tx - Update the
  752. * non-adaptive interval in Tx direction.
  753. * @ena_dev: ENA communication layer struct
  754. * @tx_coalesce_usecs: Interval in usec.
  755. *
  756. * @return - 0 on success, negative value on failure.
  757. */
  758. int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
  759. u32 tx_coalesce_usecs);
  760. /* ena_com_update_nonadaptive_moderation_interval_rx - Update the
  761. * non-adaptive interval in Rx direction.
  762. * @ena_dev: ENA communication layer struct
  763. * @rx_coalesce_usecs: Interval in usec.
  764. *
  765. * @return - 0 on success, negative value on failure.
  766. */
  767. int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
  768. u32 rx_coalesce_usecs);
  769. /* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the
  770. * non-adaptive interval in Tx direction.
  771. * @ena_dev: ENA communication layer struct
  772. *
  773. * @return - interval in usec
  774. */
  775. unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev);
  776. /* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the
  777. * non-adaptive interval in Rx direction.
  778. * @ena_dev: ENA communication layer struct
  779. *
  780. * @return - interval in usec
  781. */
  782. unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev);
  783. /* ena_com_init_intr_moderation_entry - Update a single entry in the interrupt
  784. * moderation table.
  785. * @ena_dev: ENA communication layer struct
  786. * @level: Interrupt moderation table level
  787. * @entry: Entry value
  788. *
  789. * Update a single entry in the interrupt moderation table.
  790. */
  791. void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
  792. enum ena_intr_moder_level level,
  793. struct ena_intr_moder_entry *entry);
  794. /* ena_com_get_intr_moderation_entry - Init ena_intr_moder_entry.
  795. * @ena_dev: ENA communication layer struct
  796. * @level: Interrupt moderation table level
  797. * @entry: Entry to fill.
  798. *
  799. * Initialize the entry according to the adaptive interrupt moderation table.
  800. */
  801. void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
  802. enum ena_intr_moder_level level,
  803. struct ena_intr_moder_entry *entry);
  804. static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
  805. {
  806. return ena_dev->adaptive_coalescing;
  807. }
  808. static inline void ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev)
  809. {
  810. ena_dev->adaptive_coalescing = true;
  811. }
  812. static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev)
  813. {
  814. ena_dev->adaptive_coalescing = false;
  815. }
  816. /* ena_com_calculate_interrupt_delay - Calculate new interrupt delay
  817. * @ena_dev: ENA communication layer struct
  818. * @pkts: Number of packets since the last update
  819. * @bytes: Number of bytes received since the last update.
  820. * @smoothed_interval: Returned interval
  821. * @moder_tbl_idx: Current table level as input update new level as return
  822. * value.
  823. */
  824. static inline void ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev,
  825. unsigned int pkts,
  826. unsigned int bytes,
  827. unsigned int *smoothed_interval,
  828. unsigned int *moder_tbl_idx)
  829. {
  830. enum ena_intr_moder_level curr_moder_idx, new_moder_idx;
  831. struct ena_intr_moder_entry *curr_moder_entry;
  832. struct ena_intr_moder_entry *pred_moder_entry;
  833. struct ena_intr_moder_entry *new_moder_entry;
  834. struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
  835. unsigned int interval;
  836. /* We apply adaptive moderation on Rx path only.
  837. * Tx uses static interrupt moderation.
  838. */
  839. if (!pkts || !bytes)
  840. /* Tx interrupt, or spurious interrupt,
  841. * in both cases we just use same delay values
  842. */
  843. return;
  844. curr_moder_idx = (enum ena_intr_moder_level)(*moder_tbl_idx);
  845. if (unlikely(curr_moder_idx >= ENA_INTR_MAX_NUM_OF_LEVELS)) {
  846. pr_err("Wrong moderation index %u\n", curr_moder_idx);
  847. return;
  848. }
  849. curr_moder_entry = &intr_moder_tbl[curr_moder_idx];
  850. new_moder_idx = curr_moder_idx;
  851. if (curr_moder_idx == ENA_INTR_MODER_LOWEST) {
  852. if ((pkts > curr_moder_entry->pkts_per_interval) ||
  853. (bytes > curr_moder_entry->bytes_per_interval))
  854. new_moder_idx =
  855. (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE);
  856. } else {
  857. pred_moder_entry = &intr_moder_tbl[curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE];
  858. if ((pkts <= pred_moder_entry->pkts_per_interval) ||
  859. (bytes <= pred_moder_entry->bytes_per_interval))
  860. new_moder_idx =
  861. (enum ena_intr_moder_level)(curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE);
  862. else if ((pkts > curr_moder_entry->pkts_per_interval) ||
  863. (bytes > curr_moder_entry->bytes_per_interval)) {
  864. if (curr_moder_idx != ENA_INTR_MODER_HIGHEST)
  865. new_moder_idx =
  866. (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE);
  867. }
  868. }
  869. new_moder_entry = &intr_moder_tbl[new_moder_idx];
  870. interval = new_moder_entry->intr_moder_interval;
  871. *smoothed_interval = (
  872. (interval * ENA_INTR_DELAY_NEW_VALUE_WEIGHT +
  873. ENA_INTR_DELAY_OLD_VALUE_WEIGHT * (*smoothed_interval)) + 5) /
  874. 10;
  875. *moder_tbl_idx = new_moder_idx;
  876. }
  877. /* ena_com_update_intr_reg - Prepare interrupt register
  878. * @intr_reg: interrupt register to update.
  879. * @rx_delay_interval: Rx interval in usecs
  880. * @tx_delay_interval: Tx interval in usecs
  881. * @unmask: unask enable/disable
  882. *
  883. * Prepare interrupt update register with the supplied parameters.
  884. */
  885. static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg,
  886. u32 rx_delay_interval,
  887. u32 tx_delay_interval,
  888. bool unmask)
  889. {
  890. intr_reg->intr_control = 0;
  891. intr_reg->intr_control |= rx_delay_interval &
  892. ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
  893. intr_reg->intr_control |=
  894. (tx_delay_interval << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT)
  895. & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
  896. if (unmask)
  897. intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
  898. }
  899. #endif /* !(ENA_COM) */