kfd_packet_manager.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/slab.h>
  24. #include <linux/mutex.h>
  25. #include "kfd_device_queue_manager.h"
  26. #include "kfd_kernel_queue.h"
  27. #include "kfd_priv.h"
  28. static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
  29. unsigned int buffer_size_bytes)
  30. {
  31. unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
  32. WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
  33. "Runlist IB overflow");
  34. *wptr = temp;
  35. }
  36. static void pm_calc_rlib_size(struct packet_manager *pm,
  37. unsigned int *rlib_size,
  38. bool *over_subscription)
  39. {
  40. unsigned int process_count, queue_count, compute_queue_count;
  41. unsigned int map_queue_size;
  42. unsigned int max_proc_per_quantum = 1;
  43. struct kfd_dev *dev = pm->dqm->dev;
  44. process_count = pm->dqm->processes_count;
  45. queue_count = pm->dqm->queue_count;
  46. compute_queue_count = queue_count - pm->dqm->sdma_queue_count;
  47. /* check if there is over subscription
  48. * Note: the arbitration between the number of VMIDs and
  49. * hws_max_conc_proc has been done in
  50. * kgd2kfd_device_init().
  51. */
  52. *over_subscription = false;
  53. if (dev->max_proc_per_quantum > 1)
  54. max_proc_per_quantum = dev->max_proc_per_quantum;
  55. if ((process_count > max_proc_per_quantum) ||
  56. compute_queue_count > get_queues_num(pm->dqm)) {
  57. *over_subscription = true;
  58. pr_debug("Over subscribed runlist\n");
  59. }
  60. map_queue_size = pm->pmf->map_queues_size;
  61. /* calculate run list ib allocation size */
  62. *rlib_size = process_count * pm->pmf->map_process_size +
  63. queue_count * map_queue_size;
  64. /*
  65. * Increase the allocation size in case we need a chained run list
  66. * when over subscription
  67. */
  68. if (*over_subscription)
  69. *rlib_size += pm->pmf->runlist_size;
  70. pr_debug("runlist ib size %d\n", *rlib_size);
  71. }
  72. static int pm_allocate_runlist_ib(struct packet_manager *pm,
  73. unsigned int **rl_buffer,
  74. uint64_t *rl_gpu_buffer,
  75. unsigned int *rl_buffer_size,
  76. bool *is_over_subscription)
  77. {
  78. int retval;
  79. if (WARN_ON(pm->allocated))
  80. return -EINVAL;
  81. pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
  82. mutex_lock(&pm->lock);
  83. retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
  84. &pm->ib_buffer_obj);
  85. if (retval) {
  86. pr_err("Failed to allocate runlist IB\n");
  87. goto out;
  88. }
  89. *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
  90. *rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
  91. memset(*rl_buffer, 0, *rl_buffer_size);
  92. pm->allocated = true;
  93. out:
  94. mutex_unlock(&pm->lock);
  95. return retval;
  96. }
  97. static int pm_create_runlist_ib(struct packet_manager *pm,
  98. struct list_head *queues,
  99. uint64_t *rl_gpu_addr,
  100. size_t *rl_size_bytes)
  101. {
  102. unsigned int alloc_size_bytes;
  103. unsigned int *rl_buffer, rl_wptr, i;
  104. int retval, proccesses_mapped;
  105. struct device_process_node *cur;
  106. struct qcm_process_device *qpd;
  107. struct queue *q;
  108. struct kernel_queue *kq;
  109. bool is_over_subscription;
  110. rl_wptr = retval = proccesses_mapped = 0;
  111. retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
  112. &alloc_size_bytes, &is_over_subscription);
  113. if (retval)
  114. return retval;
  115. *rl_size_bytes = alloc_size_bytes;
  116. pm->ib_size_bytes = alloc_size_bytes;
  117. pr_debug("Building runlist ib process count: %d queues count %d\n",
  118. pm->dqm->processes_count, pm->dqm->queue_count);
  119. /* build the run list ib packet */
  120. list_for_each_entry(cur, queues, list) {
  121. qpd = cur->qpd;
  122. /* build map process packet */
  123. if (proccesses_mapped >= pm->dqm->processes_count) {
  124. pr_debug("Not enough space left in runlist IB\n");
  125. pm_release_ib(pm);
  126. return -ENOMEM;
  127. }
  128. retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd);
  129. if (retval)
  130. return retval;
  131. proccesses_mapped++;
  132. inc_wptr(&rl_wptr, pm->pmf->map_process_size,
  133. alloc_size_bytes);
  134. list_for_each_entry(kq, &qpd->priv_queue_list, list) {
  135. if (!kq->queue->properties.is_active)
  136. continue;
  137. pr_debug("static_queue, mapping kernel q %d, is debug status %d\n",
  138. kq->queue->queue, qpd->is_debug);
  139. retval = pm->pmf->map_queues(pm,
  140. &rl_buffer[rl_wptr],
  141. kq->queue,
  142. qpd->is_debug);
  143. if (retval)
  144. return retval;
  145. inc_wptr(&rl_wptr,
  146. pm->pmf->map_queues_size,
  147. alloc_size_bytes);
  148. }
  149. list_for_each_entry(q, &qpd->queues_list, list) {
  150. if (!q->properties.is_active)
  151. continue;
  152. pr_debug("static_queue, mapping user queue %d, is debug status %d\n",
  153. q->queue, qpd->is_debug);
  154. retval = pm->pmf->map_queues(pm,
  155. &rl_buffer[rl_wptr],
  156. q,
  157. qpd->is_debug);
  158. if (retval)
  159. return retval;
  160. inc_wptr(&rl_wptr,
  161. pm->pmf->map_queues_size,
  162. alloc_size_bytes);
  163. }
  164. }
  165. pr_debug("Finished map process and queues to runlist\n");
  166. if (is_over_subscription)
  167. retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
  168. *rl_gpu_addr,
  169. alloc_size_bytes / sizeof(uint32_t),
  170. true);
  171. for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
  172. pr_debug("0x%2X ", rl_buffer[i]);
  173. pr_debug("\n");
  174. return retval;
  175. }
  176. int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
  177. {
  178. switch (dqm->dev->device_info->asic_family) {
  179. case CHIP_KAVERI:
  180. case CHIP_HAWAII:
  181. /* PM4 packet structures on CIK are the same as on VI */
  182. case CHIP_CARRIZO:
  183. case CHIP_TONGA:
  184. case CHIP_FIJI:
  185. case CHIP_POLARIS10:
  186. case CHIP_POLARIS11:
  187. pm->pmf = &kfd_vi_pm_funcs;
  188. break;
  189. case CHIP_VEGA10:
  190. case CHIP_RAVEN:
  191. pm->pmf = &kfd_v9_pm_funcs;
  192. break;
  193. default:
  194. WARN(1, "Unexpected ASIC family %u",
  195. dqm->dev->device_info->asic_family);
  196. return -EINVAL;
  197. }
  198. pm->dqm = dqm;
  199. mutex_init(&pm->lock);
  200. pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
  201. if (!pm->priv_queue) {
  202. mutex_destroy(&pm->lock);
  203. return -ENOMEM;
  204. }
  205. pm->allocated = false;
  206. return 0;
  207. }
  208. void pm_uninit(struct packet_manager *pm)
  209. {
  210. mutex_destroy(&pm->lock);
  211. kernel_queue_uninit(pm->priv_queue);
  212. }
  213. int pm_send_set_resources(struct packet_manager *pm,
  214. struct scheduling_resources *res)
  215. {
  216. uint32_t *buffer, size;
  217. int retval = 0;
  218. size = pm->pmf->set_resources_size;
  219. mutex_lock(&pm->lock);
  220. pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
  221. size / sizeof(uint32_t),
  222. (unsigned int **)&buffer);
  223. if (!buffer) {
  224. pr_err("Failed to allocate buffer on kernel queue\n");
  225. retval = -ENOMEM;
  226. goto out;
  227. }
  228. retval = pm->pmf->set_resources(pm, buffer, res);
  229. if (!retval)
  230. pm->priv_queue->ops.submit_packet(pm->priv_queue);
  231. else
  232. pm->priv_queue->ops.rollback_packet(pm->priv_queue);
  233. out:
  234. mutex_unlock(&pm->lock);
  235. return retval;
  236. }
  237. int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
  238. {
  239. uint64_t rl_gpu_ib_addr;
  240. uint32_t *rl_buffer;
  241. size_t rl_ib_size, packet_size_dwords;
  242. int retval;
  243. retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
  244. &rl_ib_size);
  245. if (retval)
  246. goto fail_create_runlist_ib;
  247. pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
  248. packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
  249. mutex_lock(&pm->lock);
  250. retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
  251. packet_size_dwords, &rl_buffer);
  252. if (retval)
  253. goto fail_acquire_packet_buffer;
  254. retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr,
  255. rl_ib_size / sizeof(uint32_t), false);
  256. if (retval)
  257. goto fail_create_runlist;
  258. pm->priv_queue->ops.submit_packet(pm->priv_queue);
  259. mutex_unlock(&pm->lock);
  260. return retval;
  261. fail_create_runlist:
  262. pm->priv_queue->ops.rollback_packet(pm->priv_queue);
  263. fail_acquire_packet_buffer:
  264. mutex_unlock(&pm->lock);
  265. fail_create_runlist_ib:
  266. pm_release_ib(pm);
  267. return retval;
  268. }
  269. int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
  270. uint32_t fence_value)
  271. {
  272. uint32_t *buffer, size;
  273. int retval = 0;
  274. if (WARN_ON(!fence_address))
  275. return -EFAULT;
  276. size = pm->pmf->query_status_size;
  277. mutex_lock(&pm->lock);
  278. pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
  279. size / sizeof(uint32_t), (unsigned int **)&buffer);
  280. if (!buffer) {
  281. pr_err("Failed to allocate buffer on kernel queue\n");
  282. retval = -ENOMEM;
  283. goto out;
  284. }
  285. retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value);
  286. if (!retval)
  287. pm->priv_queue->ops.submit_packet(pm->priv_queue);
  288. else
  289. pm->priv_queue->ops.rollback_packet(pm->priv_queue);
  290. out:
  291. mutex_unlock(&pm->lock);
  292. return retval;
  293. }
  294. int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
  295. enum kfd_unmap_queues_filter filter,
  296. uint32_t filter_param, bool reset,
  297. unsigned int sdma_engine)
  298. {
  299. uint32_t *buffer, size;
  300. int retval = 0;
  301. size = pm->pmf->unmap_queues_size;
  302. mutex_lock(&pm->lock);
  303. pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
  304. size / sizeof(uint32_t), (unsigned int **)&buffer);
  305. if (!buffer) {
  306. pr_err("Failed to allocate buffer on kernel queue\n");
  307. retval = -ENOMEM;
  308. goto out;
  309. }
  310. retval = pm->pmf->unmap_queues(pm, buffer, type, filter, filter_param,
  311. reset, sdma_engine);
  312. if (!retval)
  313. pm->priv_queue->ops.submit_packet(pm->priv_queue);
  314. else
  315. pm->priv_queue->ops.rollback_packet(pm->priv_queue);
  316. out:
  317. mutex_unlock(&pm->lock);
  318. return retval;
  319. }
  320. void pm_release_ib(struct packet_manager *pm)
  321. {
  322. mutex_lock(&pm->lock);
  323. if (pm->allocated) {
  324. kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
  325. pm->allocated = false;
  326. }
  327. mutex_unlock(&pm->lock);
  328. }
  329. #if defined(CONFIG_DEBUG_FS)
  330. int pm_debugfs_runlist(struct seq_file *m, void *data)
  331. {
  332. struct packet_manager *pm = data;
  333. mutex_lock(&pm->lock);
  334. if (!pm->allocated) {
  335. seq_puts(m, " No active runlist\n");
  336. goto out;
  337. }
  338. seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
  339. pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false);
  340. out:
  341. mutex_unlock(&pm->lock);
  342. return 0;
  343. }
  344. #endif