kfd_packet_manager.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/slab.h>
  24. #include <linux/mutex.h>
  25. #include "kfd_device_queue_manager.h"
  26. #include "kfd_kernel_queue.h"
  27. #include "kfd_priv.h"
  28. #include "kfd_pm4_headers.h"
  29. #include "kfd_pm4_opcodes.h"
  30. static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
  31. unsigned int buffer_size_bytes)
  32. {
  33. unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
  34. BUG_ON((temp * sizeof(uint32_t)) > buffer_size_bytes);
  35. *wptr = temp;
  36. }
  37. static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size)
  38. {
  39. union PM4_MES_TYPE_3_HEADER header;
  40. header.u32all = 0;
  41. header.opcode = opcode;
  42. header.count = packet_size/sizeof(uint32_t) - 2;
  43. header.type = PM4_TYPE_3;
  44. return header.u32all;
  45. }
  46. static void pm_calc_rlib_size(struct packet_manager *pm,
  47. unsigned int *rlib_size,
  48. bool *over_subscription)
  49. {
  50. unsigned int process_count, queue_count;
  51. BUG_ON(!pm || !rlib_size || !over_subscription);
  52. process_count = pm->dqm->processes_count;
  53. queue_count = pm->dqm->queue_count;
  54. /* check if there is over subscription*/
  55. *over_subscription = false;
  56. if ((process_count > 1) ||
  57. queue_count > PIPE_PER_ME_CP_SCHEDULING * QUEUES_PER_PIPE) {
  58. *over_subscription = true;
  59. pr_debug("kfd: over subscribed runlist\n");
  60. }
  61. /* calculate run list ib allocation size */
  62. *rlib_size = process_count * sizeof(struct pm4_map_process) +
  63. queue_count * sizeof(struct pm4_map_queues);
  64. /*
  65. * Increase the allocation size in case we need a chained run list
  66. * when over subscription
  67. */
  68. if (*over_subscription)
  69. *rlib_size += sizeof(struct pm4_runlist);
  70. pr_debug("kfd: runlist ib size %d\n", *rlib_size);
  71. }
  72. static int pm_allocate_runlist_ib(struct packet_manager *pm,
  73. unsigned int **rl_buffer,
  74. uint64_t *rl_gpu_buffer,
  75. unsigned int *rl_buffer_size,
  76. bool *is_over_subscription)
  77. {
  78. int retval;
  79. BUG_ON(!pm);
  80. BUG_ON(pm->allocated == true);
  81. BUG_ON(is_over_subscription == NULL);
  82. pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
  83. retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
  84. &pm->ib_buffer_obj);
  85. if (retval != 0) {
  86. pr_err("kfd: failed to allocate runlist IB\n");
  87. return retval;
  88. }
  89. *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
  90. *rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
  91. memset(*rl_buffer, 0, *rl_buffer_size);
  92. pm->allocated = true;
  93. return retval;
  94. }
  95. static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
  96. uint64_t ib, size_t ib_size_in_dwords, bool chain)
  97. {
  98. struct pm4_runlist *packet;
  99. BUG_ON(!pm || !buffer || !ib);
  100. packet = (struct pm4_runlist *)buffer;
  101. memset(buffer, 0, sizeof(struct pm4_runlist));
  102. packet->header.u32all = build_pm4_header(IT_RUN_LIST,
  103. sizeof(struct pm4_runlist));
  104. packet->bitfields4.ib_size = ib_size_in_dwords;
  105. packet->bitfields4.chain = chain ? 1 : 0;
  106. packet->bitfields4.offload_polling = 0;
  107. packet->bitfields4.valid = 1;
  108. packet->ordinal2 = lower_32_bits(ib);
  109. packet->bitfields3.ib_base_hi = upper_32_bits(ib);
  110. return 0;
  111. }
  112. static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
  113. struct qcm_process_device *qpd)
  114. {
  115. struct pm4_map_process *packet;
  116. struct queue *cur;
  117. uint32_t num_queues;
  118. BUG_ON(!pm || !buffer || !qpd);
  119. packet = (struct pm4_map_process *)buffer;
  120. pr_debug("kfd: In func %s\n", __func__);
  121. memset(buffer, 0, sizeof(struct pm4_map_process));
  122. packet->header.u32all = build_pm4_header(IT_MAP_PROCESS,
  123. sizeof(struct pm4_map_process));
  124. packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
  125. packet->bitfields2.process_quantum = 1;
  126. packet->bitfields2.pasid = qpd->pqm->process->pasid;
  127. packet->bitfields3.page_table_base = qpd->page_table_base;
  128. packet->bitfields10.gds_size = qpd->gds_size;
  129. packet->bitfields10.num_gws = qpd->num_gws;
  130. packet->bitfields10.num_oac = qpd->num_oac;
  131. num_queues = 0;
  132. list_for_each_entry(cur, &qpd->queues_list, list)
  133. num_queues++;
  134. packet->bitfields10.num_queues = num_queues;
  135. packet->sh_mem_config = qpd->sh_mem_config;
  136. packet->sh_mem_bases = qpd->sh_mem_bases;
  137. packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
  138. packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
  139. packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
  140. packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
  141. return 0;
  142. }
  143. static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
  144. struct queue *q)
  145. {
  146. struct pm4_map_queues *packet;
  147. BUG_ON(!pm || !buffer || !q);
  148. pr_debug("kfd: In func %s\n", __func__);
  149. packet = (struct pm4_map_queues *)buffer;
  150. memset(buffer, 0, sizeof(struct pm4_map_queues));
  151. packet->header.u32all = build_pm4_header(IT_MAP_QUEUES,
  152. sizeof(struct pm4_map_queues));
  153. packet->bitfields2.alloc_format =
  154. alloc_format__mes_map_queues__one_per_pipe;
  155. packet->bitfields2.num_queues = 1;
  156. packet->bitfields2.queue_sel =
  157. queue_sel__mes_map_queues__map_to_hws_determined_queue_slots;
  158. packet->bitfields2.vidmem = (q->properties.is_interop) ?
  159. vidmem__mes_map_queues__uses_video_memory :
  160. vidmem__mes_map_queues__uses_no_video_memory;
  161. switch (q->properties.type) {
  162. case KFD_QUEUE_TYPE_COMPUTE:
  163. case KFD_QUEUE_TYPE_DIQ:
  164. packet->bitfields2.engine_sel =
  165. engine_sel__mes_map_queues__compute;
  166. break;
  167. case KFD_QUEUE_TYPE_SDMA:
  168. packet->bitfields2.engine_sel =
  169. engine_sel__mes_map_queues__sdma0;
  170. break;
  171. default:
  172. BUG();
  173. break;
  174. }
  175. packet->mes_map_queues_ordinals[0].bitfields3.doorbell_offset =
  176. q->properties.doorbell_off;
  177. packet->mes_map_queues_ordinals[0].mqd_addr_lo =
  178. lower_32_bits(q->gart_mqd_addr);
  179. packet->mes_map_queues_ordinals[0].mqd_addr_hi =
  180. upper_32_bits(q->gart_mqd_addr);
  181. packet->mes_map_queues_ordinals[0].wptr_addr_lo =
  182. lower_32_bits((uint64_t)q->properties.write_ptr);
  183. packet->mes_map_queues_ordinals[0].wptr_addr_hi =
  184. upper_32_bits((uint64_t)q->properties.write_ptr);
  185. return 0;
  186. }
  187. static int pm_create_runlist_ib(struct packet_manager *pm,
  188. struct list_head *queues,
  189. uint64_t *rl_gpu_addr,
  190. size_t *rl_size_bytes)
  191. {
  192. unsigned int alloc_size_bytes;
  193. unsigned int *rl_buffer, rl_wptr, i;
  194. int retval, proccesses_mapped;
  195. struct device_process_node *cur;
  196. struct qcm_process_device *qpd;
  197. struct queue *q;
  198. struct kernel_queue *kq;
  199. bool is_over_subscription;
  200. BUG_ON(!pm || !queues || !rl_size_bytes || !rl_gpu_addr);
  201. rl_wptr = retval = proccesses_mapped = 0;
  202. retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
  203. &alloc_size_bytes, &is_over_subscription);
  204. if (retval != 0)
  205. return retval;
  206. *rl_size_bytes = alloc_size_bytes;
  207. pr_debug("kfd: In func %s\n", __func__);
  208. pr_debug("kfd: building runlist ib process count: %d queues count %d\n",
  209. pm->dqm->processes_count, pm->dqm->queue_count);
  210. /* build the run list ib packet */
  211. list_for_each_entry(cur, queues, list) {
  212. qpd = cur->qpd;
  213. /* build map process packet */
  214. if (proccesses_mapped >= pm->dqm->processes_count) {
  215. pr_debug("kfd: not enough space left in runlist IB\n");
  216. pm_release_ib(pm);
  217. return -ENOMEM;
  218. }
  219. retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd);
  220. if (retval != 0)
  221. return retval;
  222. proccesses_mapped++;
  223. inc_wptr(&rl_wptr, sizeof(struct pm4_map_process),
  224. alloc_size_bytes);
  225. list_for_each_entry(kq, &qpd->priv_queue_list, list) {
  226. if (kq->queue->properties.is_active != true)
  227. continue;
  228. retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr],
  229. kq->queue);
  230. if (retval != 0)
  231. return retval;
  232. inc_wptr(&rl_wptr, sizeof(struct pm4_map_queues),
  233. alloc_size_bytes);
  234. }
  235. list_for_each_entry(q, &qpd->queues_list, list) {
  236. if (q->properties.is_active != true)
  237. continue;
  238. retval = pm_create_map_queue(pm,
  239. &rl_buffer[rl_wptr], q);
  240. if (retval != 0)
  241. return retval;
  242. inc_wptr(&rl_wptr, sizeof(struct pm4_map_queues),
  243. alloc_size_bytes);
  244. }
  245. }
  246. pr_debug("kfd: finished map process and queues to runlist\n");
  247. if (is_over_subscription)
  248. pm_create_runlist(pm, &rl_buffer[rl_wptr], *rl_gpu_addr,
  249. alloc_size_bytes / sizeof(uint32_t), true);
  250. for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
  251. pr_debug("0x%2X ", rl_buffer[i]);
  252. pr_debug("\n");
  253. return 0;
  254. }
  255. int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
  256. {
  257. BUG_ON(!dqm);
  258. pm->dqm = dqm;
  259. mutex_init(&pm->lock);
  260. pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
  261. if (pm->priv_queue == NULL) {
  262. mutex_destroy(&pm->lock);
  263. return -ENOMEM;
  264. }
  265. pm->allocated = false;
  266. return 0;
  267. }
  268. void pm_uninit(struct packet_manager *pm)
  269. {
  270. BUG_ON(!pm);
  271. mutex_destroy(&pm->lock);
  272. kernel_queue_uninit(pm->priv_queue);
  273. }
  274. int pm_send_set_resources(struct packet_manager *pm,
  275. struct scheduling_resources *res)
  276. {
  277. struct pm4_set_resources *packet;
  278. BUG_ON(!pm || !res);
  279. pr_debug("kfd: In func %s\n", __func__);
  280. mutex_lock(&pm->lock);
  281. pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
  282. sizeof(*packet) / sizeof(uint32_t),
  283. (unsigned int **)&packet);
  284. if (packet == NULL) {
  285. mutex_unlock(&pm->lock);
  286. pr_err("kfd: failed to allocate buffer on kernel queue\n");
  287. return -ENOMEM;
  288. }
  289. memset(packet, 0, sizeof(struct pm4_set_resources));
  290. packet->header.u32all = build_pm4_header(IT_SET_RESOURCES,
  291. sizeof(struct pm4_set_resources));
  292. packet->bitfields2.queue_type =
  293. queue_type__mes_set_resources__hsa_interface_queue_hiq;
  294. packet->bitfields2.vmid_mask = res->vmid_mask;
  295. packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY;
  296. packet->bitfields7.oac_mask = res->oac_mask;
  297. packet->bitfields8.gds_heap_base = res->gds_heap_base;
  298. packet->bitfields8.gds_heap_size = res->gds_heap_size;
  299. packet->gws_mask_lo = lower_32_bits(res->gws_mask);
  300. packet->gws_mask_hi = upper_32_bits(res->gws_mask);
  301. packet->queue_mask_lo = lower_32_bits(res->queue_mask);
  302. packet->queue_mask_hi = upper_32_bits(res->queue_mask);
  303. pm->priv_queue->ops.submit_packet(pm->priv_queue);
  304. mutex_unlock(&pm->lock);
  305. return 0;
  306. }
  307. int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
  308. {
  309. uint64_t rl_gpu_ib_addr;
  310. uint32_t *rl_buffer;
  311. size_t rl_ib_size, packet_size_dwords;
  312. int retval;
  313. BUG_ON(!pm || !dqm_queues);
  314. retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
  315. &rl_ib_size);
  316. if (retval != 0)
  317. goto fail_create_runlist_ib;
  318. pr_debug("kfd: runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
  319. packet_size_dwords = sizeof(struct pm4_runlist) / sizeof(uint32_t);
  320. mutex_lock(&pm->lock);
  321. retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
  322. packet_size_dwords, &rl_buffer);
  323. if (retval != 0)
  324. goto fail_acquire_packet_buffer;
  325. retval = pm_create_runlist(pm, rl_buffer, rl_gpu_ib_addr,
  326. rl_ib_size / sizeof(uint32_t), false);
  327. if (retval != 0)
  328. goto fail_create_runlist;
  329. pm->priv_queue->ops.submit_packet(pm->priv_queue);
  330. mutex_unlock(&pm->lock);
  331. return retval;
  332. fail_create_runlist:
  333. pm->priv_queue->ops.rollback_packet(pm->priv_queue);
  334. fail_acquire_packet_buffer:
  335. mutex_unlock(&pm->lock);
  336. fail_create_runlist_ib:
  337. if (pm->allocated == true)
  338. pm_release_ib(pm);
  339. return retval;
  340. }
  341. int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
  342. uint32_t fence_value)
  343. {
  344. int retval;
  345. struct pm4_query_status *packet;
  346. BUG_ON(!pm || !fence_address);
  347. mutex_lock(&pm->lock);
  348. retval = pm->priv_queue->ops.acquire_packet_buffer(
  349. pm->priv_queue,
  350. sizeof(struct pm4_query_status) / sizeof(uint32_t),
  351. (unsigned int **)&packet);
  352. if (retval != 0)
  353. goto fail_acquire_packet_buffer;
  354. packet->header.u32all = build_pm4_header(IT_QUERY_STATUS,
  355. sizeof(struct pm4_query_status));
  356. packet->bitfields2.context_id = 0;
  357. packet->bitfields2.interrupt_sel =
  358. interrupt_sel__mes_query_status__completion_status;
  359. packet->bitfields2.command =
  360. command__mes_query_status__fence_only_after_write_ack;
  361. packet->addr_hi = upper_32_bits((uint64_t)fence_address);
  362. packet->addr_lo = lower_32_bits((uint64_t)fence_address);
  363. packet->data_hi = upper_32_bits((uint64_t)fence_value);
  364. packet->data_lo = lower_32_bits((uint64_t)fence_value);
  365. pm->priv_queue->ops.submit_packet(pm->priv_queue);
  366. mutex_unlock(&pm->lock);
  367. return 0;
  368. fail_acquire_packet_buffer:
  369. mutex_unlock(&pm->lock);
  370. return retval;
  371. }
  372. int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
  373. enum kfd_preempt_type_filter mode,
  374. uint32_t filter_param, bool reset,
  375. unsigned int sdma_engine)
  376. {
  377. int retval;
  378. uint32_t *buffer;
  379. struct pm4_unmap_queues *packet;
  380. BUG_ON(!pm);
  381. mutex_lock(&pm->lock);
  382. retval = pm->priv_queue->ops.acquire_packet_buffer(
  383. pm->priv_queue,
  384. sizeof(struct pm4_unmap_queues) / sizeof(uint32_t),
  385. &buffer);
  386. if (retval != 0)
  387. goto err_acquire_packet_buffer;
  388. packet = (struct pm4_unmap_queues *)buffer;
  389. memset(buffer, 0, sizeof(struct pm4_unmap_queues));
  390. packet->header.u32all = build_pm4_header(IT_UNMAP_QUEUES,
  391. sizeof(struct pm4_unmap_queues));
  392. switch (type) {
  393. case KFD_QUEUE_TYPE_COMPUTE:
  394. case KFD_QUEUE_TYPE_DIQ:
  395. packet->bitfields2.engine_sel =
  396. engine_sel__mes_unmap_queues__compute;
  397. break;
  398. case KFD_QUEUE_TYPE_SDMA:
  399. packet->bitfields2.engine_sel =
  400. engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
  401. break;
  402. default:
  403. BUG();
  404. break;
  405. }
  406. if (reset)
  407. packet->bitfields2.action =
  408. action__mes_unmap_queues__reset_queues;
  409. else
  410. packet->bitfields2.action =
  411. action__mes_unmap_queues__preempt_queues;
  412. switch (mode) {
  413. case KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE:
  414. packet->bitfields2.queue_sel =
  415. queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
  416. packet->bitfields2.num_queues = 1;
  417. packet->bitfields3b.doorbell_offset0 = filter_param;
  418. break;
  419. case KFD_PREEMPT_TYPE_FILTER_BY_PASID:
  420. packet->bitfields2.queue_sel =
  421. queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
  422. packet->bitfields3a.pasid = filter_param;
  423. break;
  424. case KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES:
  425. packet->bitfields2.queue_sel =
  426. queue_sel__mes_unmap_queues__perform_request_on_all_active_queues;
  427. break;
  428. default:
  429. BUG();
  430. break;
  431. };
  432. pm->priv_queue->ops.submit_packet(pm->priv_queue);
  433. mutex_unlock(&pm->lock);
  434. return 0;
  435. err_acquire_packet_buffer:
  436. mutex_unlock(&pm->lock);
  437. return retval;
  438. }
  439. void pm_release_ib(struct packet_manager *pm)
  440. {
  441. BUG_ON(!pm);
  442. mutex_lock(&pm->lock);
  443. if (pm->allocated) {
  444. kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
  445. pm->allocated = false;
  446. }
  447. mutex_unlock(&pm->lock);
  448. }