kfd_device_queue_manager.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/slab.h>
  24. #include <linux/list.h>
  25. #include <linux/types.h>
  26. #include <linux/printk.h>
  27. #include <linux/bitops.h>
  28. #include <linux/sched.h>
  29. #include "kfd_priv.h"
  30. #include "kfd_device_queue_manager.h"
  31. #include "kfd_mqd_manager.h"
  32. #include "cik_regs.h"
  33. #include "kfd_kernel_queue.h"
  34. /* Size of the per-pipe EOP queue */
  35. #define CIK_HPD_EOP_BYTES_LOG2 11
  36. #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
  37. static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
  38. unsigned int pasid, unsigned int vmid);
  39. static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
  40. struct queue *q,
  41. struct qcm_process_device *qpd);
  42. static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock);
  43. static int destroy_queues_cpsch(struct device_queue_manager *dqm,
  44. bool preempt_static_queues, bool lock);
  45. static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
  46. struct queue *q,
  47. struct qcm_process_device *qpd);
  48. static void deallocate_sdma_queue(struct device_queue_manager *dqm,
  49. unsigned int sdma_queue_id);
  50. static inline
  51. enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
  52. {
  53. if (type == KFD_QUEUE_TYPE_SDMA)
  54. return KFD_MQD_TYPE_SDMA;
  55. return KFD_MQD_TYPE_CP;
  56. }
  57. unsigned int get_first_pipe(struct device_queue_manager *dqm)
  58. {
  59. BUG_ON(!dqm || !dqm->dev);
  60. return dqm->dev->shared_resources.first_compute_pipe;
  61. }
  62. unsigned int get_pipes_num(struct device_queue_manager *dqm)
  63. {
  64. BUG_ON(!dqm || !dqm->dev);
  65. return dqm->dev->shared_resources.compute_pipe_count;
  66. }
  67. static inline unsigned int get_pipes_num_cpsch(void)
  68. {
  69. return PIPE_PER_ME_CP_SCHEDULING;
  70. }
  71. void program_sh_mem_settings(struct device_queue_manager *dqm,
  72. struct qcm_process_device *qpd)
  73. {
  74. return dqm->dev->kfd2kgd->program_sh_mem_settings(
  75. dqm->dev->kgd, qpd->vmid,
  76. qpd->sh_mem_config,
  77. qpd->sh_mem_ape1_base,
  78. qpd->sh_mem_ape1_limit,
  79. qpd->sh_mem_bases);
  80. }
  81. static int allocate_vmid(struct device_queue_manager *dqm,
  82. struct qcm_process_device *qpd,
  83. struct queue *q)
  84. {
  85. int bit, allocated_vmid;
  86. if (dqm->vmid_bitmap == 0)
  87. return -ENOMEM;
  88. bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap, CIK_VMID_NUM);
  89. clear_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
  90. /* Kaveri kfd vmid's starts from vmid 8 */
  91. allocated_vmid = bit + KFD_VMID_START_OFFSET;
  92. pr_debug("kfd: vmid allocation %d\n", allocated_vmid);
  93. qpd->vmid = allocated_vmid;
  94. q->properties.vmid = allocated_vmid;
  95. set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
  96. program_sh_mem_settings(dqm, qpd);
  97. return 0;
  98. }
  99. static void deallocate_vmid(struct device_queue_manager *dqm,
  100. struct qcm_process_device *qpd,
  101. struct queue *q)
  102. {
  103. int bit = qpd->vmid - KFD_VMID_START_OFFSET;
  104. /* Release the vmid mapping */
  105. set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
  106. set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
  107. qpd->vmid = 0;
  108. q->properties.vmid = 0;
  109. }
  110. static int create_queue_nocpsch(struct device_queue_manager *dqm,
  111. struct queue *q,
  112. struct qcm_process_device *qpd,
  113. int *allocated_vmid)
  114. {
  115. int retval;
  116. BUG_ON(!dqm || !q || !qpd || !allocated_vmid);
  117. pr_debug("kfd: In func %s\n", __func__);
  118. print_queue(q);
  119. mutex_lock(&dqm->lock);
  120. if (dqm->total_queue_count >= max_num_of_queues_per_device) {
  121. pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
  122. dqm->total_queue_count);
  123. mutex_unlock(&dqm->lock);
  124. return -EPERM;
  125. }
  126. if (list_empty(&qpd->queues_list)) {
  127. retval = allocate_vmid(dqm, qpd, q);
  128. if (retval != 0) {
  129. mutex_unlock(&dqm->lock);
  130. return retval;
  131. }
  132. }
  133. *allocated_vmid = qpd->vmid;
  134. q->properties.vmid = qpd->vmid;
  135. if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
  136. retval = create_compute_queue_nocpsch(dqm, q, qpd);
  137. if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
  138. retval = create_sdma_queue_nocpsch(dqm, q, qpd);
  139. if (retval != 0) {
  140. if (list_empty(&qpd->queues_list)) {
  141. deallocate_vmid(dqm, qpd, q);
  142. *allocated_vmid = 0;
  143. }
  144. mutex_unlock(&dqm->lock);
  145. return retval;
  146. }
  147. list_add(&q->list, &qpd->queues_list);
  148. if (q->properties.is_active)
  149. dqm->queue_count++;
  150. if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
  151. dqm->sdma_queue_count++;
  152. /*
  153. * Unconditionally increment this counter, regardless of the queue's
  154. * type or whether the queue is active.
  155. */
  156. dqm->total_queue_count++;
  157. pr_debug("Total of %d queues are accountable so far\n",
  158. dqm->total_queue_count);
  159. mutex_unlock(&dqm->lock);
  160. return 0;
  161. }
  162. static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
  163. {
  164. bool set;
  165. int pipe, bit, i;
  166. set = false;
  167. for (pipe = dqm->next_pipe_to_allocate, i = 0; i < get_pipes_num(dqm);
  168. pipe = ((pipe + 1) % get_pipes_num(dqm)), ++i) {
  169. if (dqm->allocated_queues[pipe] != 0) {
  170. bit = find_first_bit(
  171. (unsigned long *)&dqm->allocated_queues[pipe],
  172. QUEUES_PER_PIPE);
  173. clear_bit(bit,
  174. (unsigned long *)&dqm->allocated_queues[pipe]);
  175. q->pipe = pipe;
  176. q->queue = bit;
  177. set = true;
  178. break;
  179. }
  180. }
  181. if (set == false)
  182. return -EBUSY;
  183. pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n",
  184. __func__, q->pipe, q->queue);
  185. /* horizontal hqd allocation */
  186. dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_num(dqm);
  187. return 0;
  188. }
  189. static inline void deallocate_hqd(struct device_queue_manager *dqm,
  190. struct queue *q)
  191. {
  192. set_bit(q->queue, (unsigned long *)&dqm->allocated_queues[q->pipe]);
  193. }
  194. static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
  195. struct queue *q,
  196. struct qcm_process_device *qpd)
  197. {
  198. int retval;
  199. struct mqd_manager *mqd;
  200. BUG_ON(!dqm || !q || !qpd);
  201. mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
  202. if (mqd == NULL)
  203. return -ENOMEM;
  204. retval = allocate_hqd(dqm, q);
  205. if (retval != 0)
  206. return retval;
  207. retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
  208. &q->gart_mqd_addr, &q->properties);
  209. if (retval != 0) {
  210. deallocate_hqd(dqm, q);
  211. return retval;
  212. }
  213. pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n",
  214. q->pipe,
  215. q->queue);
  216. retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
  217. q->queue, (uint32_t __user *) q->properties.write_ptr);
  218. if (retval != 0) {
  219. deallocate_hqd(dqm, q);
  220. mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
  221. return retval;
  222. }
  223. return 0;
  224. }
  225. static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
  226. struct qcm_process_device *qpd,
  227. struct queue *q)
  228. {
  229. int retval;
  230. struct mqd_manager *mqd;
  231. BUG_ON(!dqm || !q || !q->mqd || !qpd);
  232. retval = 0;
  233. pr_debug("kfd: In Func %s\n", __func__);
  234. mutex_lock(&dqm->lock);
  235. if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
  236. mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
  237. if (mqd == NULL) {
  238. retval = -ENOMEM;
  239. goto out;
  240. }
  241. deallocate_hqd(dqm, q);
  242. } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
  243. mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
  244. if (mqd == NULL) {
  245. retval = -ENOMEM;
  246. goto out;
  247. }
  248. dqm->sdma_queue_count--;
  249. deallocate_sdma_queue(dqm, q->sdma_id);
  250. } else {
  251. pr_debug("q->properties.type is invalid (%d)\n",
  252. q->properties.type);
  253. retval = -EINVAL;
  254. goto out;
  255. }
  256. retval = mqd->destroy_mqd(mqd, q->mqd,
  257. KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
  258. QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
  259. q->pipe, q->queue);
  260. if (retval != 0)
  261. goto out;
  262. mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
  263. list_del(&q->list);
  264. if (list_empty(&qpd->queues_list))
  265. deallocate_vmid(dqm, qpd, q);
  266. if (q->properties.is_active)
  267. dqm->queue_count--;
  268. /*
  269. * Unconditionally decrement this counter, regardless of the queue's
  270. * type
  271. */
  272. dqm->total_queue_count--;
  273. pr_debug("Total of %d queues are accountable so far\n",
  274. dqm->total_queue_count);
  275. out:
  276. mutex_unlock(&dqm->lock);
  277. return retval;
  278. }
  279. static int update_queue(struct device_queue_manager *dqm, struct queue *q)
  280. {
  281. int retval;
  282. struct mqd_manager *mqd;
  283. bool prev_active = false;
  284. BUG_ON(!dqm || !q || !q->mqd);
  285. mutex_lock(&dqm->lock);
  286. mqd = dqm->ops.get_mqd_manager(dqm,
  287. get_mqd_type_from_queue_type(q->properties.type));
  288. if (mqd == NULL) {
  289. mutex_unlock(&dqm->lock);
  290. return -ENOMEM;
  291. }
  292. if (q->properties.is_active == true)
  293. prev_active = true;
  294. /*
  295. *
  296. * check active state vs. the previous state
  297. * and modify counter accordingly
  298. */
  299. retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
  300. if ((q->properties.is_active == true) && (prev_active == false))
  301. dqm->queue_count++;
  302. else if ((q->properties.is_active == false) && (prev_active == true))
  303. dqm->queue_count--;
  304. if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
  305. retval = execute_queues_cpsch(dqm, false);
  306. mutex_unlock(&dqm->lock);
  307. return retval;
  308. }
  309. static struct mqd_manager *get_mqd_manager_nocpsch(
  310. struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
  311. {
  312. struct mqd_manager *mqd;
  313. BUG_ON(!dqm || type >= KFD_MQD_TYPE_MAX);
  314. pr_debug("kfd: In func %s mqd type %d\n", __func__, type);
  315. mqd = dqm->mqds[type];
  316. if (!mqd) {
  317. mqd = mqd_manager_init(type, dqm->dev);
  318. if (mqd == NULL)
  319. pr_err("kfd: mqd manager is NULL");
  320. dqm->mqds[type] = mqd;
  321. }
  322. return mqd;
  323. }
  324. static int register_process_nocpsch(struct device_queue_manager *dqm,
  325. struct qcm_process_device *qpd)
  326. {
  327. struct device_process_node *n;
  328. int retval;
  329. BUG_ON(!dqm || !qpd);
  330. pr_debug("kfd: In func %s\n", __func__);
  331. n = kzalloc(sizeof(struct device_process_node), GFP_KERNEL);
  332. if (!n)
  333. return -ENOMEM;
  334. n->qpd = qpd;
  335. mutex_lock(&dqm->lock);
  336. list_add(&n->list, &dqm->queues);
  337. retval = dqm->ops_asic_specific.register_process(dqm, qpd);
  338. dqm->processes_count++;
  339. mutex_unlock(&dqm->lock);
  340. return retval;
  341. }
  342. static int unregister_process_nocpsch(struct device_queue_manager *dqm,
  343. struct qcm_process_device *qpd)
  344. {
  345. int retval;
  346. struct device_process_node *cur, *next;
  347. BUG_ON(!dqm || !qpd);
  348. pr_debug("In func %s\n", __func__);
  349. pr_debug("qpd->queues_list is %s\n",
  350. list_empty(&qpd->queues_list) ? "empty" : "not empty");
  351. retval = 0;
  352. mutex_lock(&dqm->lock);
  353. list_for_each_entry_safe(cur, next, &dqm->queues, list) {
  354. if (qpd == cur->qpd) {
  355. list_del(&cur->list);
  356. kfree(cur);
  357. dqm->processes_count--;
  358. goto out;
  359. }
  360. }
  361. /* qpd not found in dqm list */
  362. retval = 1;
  363. out:
  364. mutex_unlock(&dqm->lock);
  365. return retval;
  366. }
  367. static int
  368. set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
  369. unsigned int vmid)
  370. {
  371. uint32_t pasid_mapping;
  372. pasid_mapping = (pasid == 0) ? 0 :
  373. (uint32_t)pasid |
  374. ATC_VMID_PASID_MAPPING_VALID;
  375. return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
  376. dqm->dev->kgd, pasid_mapping,
  377. vmid);
  378. }
  379. int init_pipelines(struct device_queue_manager *dqm,
  380. unsigned int pipes_num, unsigned int first_pipe)
  381. {
  382. void *hpdptr;
  383. struct mqd_manager *mqd;
  384. unsigned int i, err, inx;
  385. uint64_t pipe_hpd_addr;
  386. BUG_ON(!dqm || !dqm->dev);
  387. pr_debug("kfd: In func %s\n", __func__);
  388. /*
  389. * Allocate memory for the HPDs. This is hardware-owned per-pipe data.
  390. * The driver never accesses this memory after zeroing it.
  391. * It doesn't even have to be saved/restored on suspend/resume
  392. * because it contains no data when there are no active queues.
  393. */
  394. err = kfd_gtt_sa_allocate(dqm->dev, CIK_HPD_EOP_BYTES * pipes_num,
  395. &dqm->pipeline_mem);
  396. if (err) {
  397. pr_err("kfd: error allocate vidmem num pipes: %d\n",
  398. pipes_num);
  399. return -ENOMEM;
  400. }
  401. hpdptr = dqm->pipeline_mem->cpu_ptr;
  402. dqm->pipelines_addr = dqm->pipeline_mem->gpu_addr;
  403. memset(hpdptr, 0, CIK_HPD_EOP_BYTES * pipes_num);
  404. mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
  405. if (mqd == NULL) {
  406. kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
  407. return -ENOMEM;
  408. }
  409. for (i = 0; i < pipes_num; i++) {
  410. inx = i + first_pipe;
  411. /*
  412. * HPD buffer on GTT is allocated by amdkfd, no need to waste
  413. * space in GTT for pipelines we don't initialize
  414. */
  415. pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
  416. pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
  417. /* = log2(bytes/4)-1 */
  418. dqm->dev->kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
  419. CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
  420. }
  421. return 0;
  422. }
  423. static void init_interrupts(struct device_queue_manager *dqm)
  424. {
  425. unsigned int i;
  426. BUG_ON(dqm == NULL);
  427. for (i = 0 ; i < get_pipes_num(dqm) ; i++)
  428. dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd,
  429. i + get_first_pipe(dqm));
  430. }
  431. static int init_scheduler(struct device_queue_manager *dqm)
  432. {
  433. int retval;
  434. BUG_ON(!dqm);
  435. pr_debug("kfd: In %s\n", __func__);
  436. retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
  437. return retval;
  438. }
  439. static int initialize_nocpsch(struct device_queue_manager *dqm)
  440. {
  441. int i;
  442. BUG_ON(!dqm);
  443. pr_debug("kfd: In func %s num of pipes: %d\n",
  444. __func__, get_pipes_num(dqm));
  445. mutex_init(&dqm->lock);
  446. INIT_LIST_HEAD(&dqm->queues);
  447. dqm->queue_count = dqm->next_pipe_to_allocate = 0;
  448. dqm->sdma_queue_count = 0;
  449. dqm->allocated_queues = kcalloc(get_pipes_num(dqm),
  450. sizeof(unsigned int), GFP_KERNEL);
  451. if (!dqm->allocated_queues) {
  452. mutex_destroy(&dqm->lock);
  453. return -ENOMEM;
  454. }
  455. for (i = 0; i < get_pipes_num(dqm); i++)
  456. dqm->allocated_queues[i] = (1 << QUEUES_PER_PIPE) - 1;
  457. dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1;
  458. dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
  459. init_scheduler(dqm);
  460. return 0;
  461. }
  462. static void uninitialize_nocpsch(struct device_queue_manager *dqm)
  463. {
  464. int i;
  465. BUG_ON(!dqm);
  466. BUG_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
  467. kfree(dqm->allocated_queues);
  468. for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
  469. kfree(dqm->mqds[i]);
  470. mutex_destroy(&dqm->lock);
  471. kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
  472. }
  473. static int start_nocpsch(struct device_queue_manager *dqm)
  474. {
  475. init_interrupts(dqm);
  476. return 0;
  477. }
  478. static int stop_nocpsch(struct device_queue_manager *dqm)
  479. {
  480. return 0;
  481. }
  482. static int allocate_sdma_queue(struct device_queue_manager *dqm,
  483. unsigned int *sdma_queue_id)
  484. {
  485. int bit;
  486. if (dqm->sdma_bitmap == 0)
  487. return -ENOMEM;
  488. bit = find_first_bit((unsigned long *)&dqm->sdma_bitmap,
  489. CIK_SDMA_QUEUES);
  490. clear_bit(bit, (unsigned long *)&dqm->sdma_bitmap);
  491. *sdma_queue_id = bit;
  492. return 0;
  493. }
  494. static void deallocate_sdma_queue(struct device_queue_manager *dqm,
  495. unsigned int sdma_queue_id)
  496. {
  497. if (sdma_queue_id >= CIK_SDMA_QUEUES)
  498. return;
  499. set_bit(sdma_queue_id, (unsigned long *)&dqm->sdma_bitmap);
  500. }
  501. static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
  502. struct queue *q,
  503. struct qcm_process_device *qpd)
  504. {
  505. struct mqd_manager *mqd;
  506. int retval;
  507. mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
  508. if (!mqd)
  509. return -ENOMEM;
  510. retval = allocate_sdma_queue(dqm, &q->sdma_id);
  511. if (retval != 0)
  512. return retval;
  513. q->properties.sdma_queue_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
  514. q->properties.sdma_engine_id = q->sdma_id / CIK_SDMA_ENGINE_NUM;
  515. pr_debug("kfd: sdma id is: %d\n", q->sdma_id);
  516. pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id);
  517. pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id);
  518. dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
  519. retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
  520. &q->gart_mqd_addr, &q->properties);
  521. if (retval != 0) {
  522. deallocate_sdma_queue(dqm, q->sdma_id);
  523. return retval;
  524. }
  525. retval = mqd->load_mqd(mqd, q->mqd, 0,
  526. 0, NULL);
  527. if (retval != 0) {
  528. deallocate_sdma_queue(dqm, q->sdma_id);
  529. mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
  530. return retval;
  531. }
  532. return 0;
  533. }
  534. /*
  535. * Device Queue Manager implementation for cp scheduler
  536. */
  537. static int set_sched_resources(struct device_queue_manager *dqm)
  538. {
  539. struct scheduling_resources res;
  540. unsigned int queue_num, queue_mask;
  541. BUG_ON(!dqm);
  542. pr_debug("kfd: In func %s\n", __func__);
  543. queue_num = get_pipes_num_cpsch() * QUEUES_PER_PIPE;
  544. queue_mask = (1 << queue_num) - 1;
  545. res.vmid_mask = (1 << VMID_PER_DEVICE) - 1;
  546. res.vmid_mask <<= KFD_VMID_START_OFFSET;
  547. res.queue_mask = queue_mask << (get_first_pipe(dqm) * QUEUES_PER_PIPE);
  548. res.gws_mask = res.oac_mask = res.gds_heap_base =
  549. res.gds_heap_size = 0;
  550. pr_debug("kfd: scheduling resources:\n"
  551. " vmid mask: 0x%8X\n"
  552. " queue mask: 0x%8llX\n",
  553. res.vmid_mask, res.queue_mask);
  554. return pm_send_set_resources(&dqm->packets, &res);
  555. }
  556. static int initialize_cpsch(struct device_queue_manager *dqm)
  557. {
  558. int retval;
  559. BUG_ON(!dqm);
  560. pr_debug("kfd: In func %s num of pipes: %d\n",
  561. __func__, get_pipes_num_cpsch());
  562. mutex_init(&dqm->lock);
  563. INIT_LIST_HEAD(&dqm->queues);
  564. dqm->queue_count = dqm->processes_count = 0;
  565. dqm->sdma_queue_count = 0;
  566. dqm->active_runlist = false;
  567. retval = dqm->ops_asic_specific.initialize(dqm);
  568. if (retval != 0)
  569. goto fail_init_pipelines;
  570. return 0;
  571. fail_init_pipelines:
  572. mutex_destroy(&dqm->lock);
  573. return retval;
  574. }
  575. static int start_cpsch(struct device_queue_manager *dqm)
  576. {
  577. struct device_process_node *node;
  578. int retval;
  579. BUG_ON(!dqm);
  580. retval = 0;
  581. retval = pm_init(&dqm->packets, dqm);
  582. if (retval != 0)
  583. goto fail_packet_manager_init;
  584. retval = set_sched_resources(dqm);
  585. if (retval != 0)
  586. goto fail_set_sched_resources;
  587. pr_debug("kfd: allocating fence memory\n");
  588. /* allocate fence memory on the gart */
  589. retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
  590. &dqm->fence_mem);
  591. if (retval != 0)
  592. goto fail_allocate_vidmem;
  593. dqm->fence_addr = dqm->fence_mem->cpu_ptr;
  594. dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
  595. init_interrupts(dqm);
  596. list_for_each_entry(node, &dqm->queues, list)
  597. if (node->qpd->pqm->process && dqm->dev)
  598. kfd_bind_process_to_device(dqm->dev,
  599. node->qpd->pqm->process);
  600. execute_queues_cpsch(dqm, true);
  601. return 0;
  602. fail_allocate_vidmem:
  603. fail_set_sched_resources:
  604. pm_uninit(&dqm->packets);
  605. fail_packet_manager_init:
  606. return retval;
  607. }
  608. static int stop_cpsch(struct device_queue_manager *dqm)
  609. {
  610. struct device_process_node *node;
  611. struct kfd_process_device *pdd;
  612. BUG_ON(!dqm);
  613. destroy_queues_cpsch(dqm, true, true);
  614. list_for_each_entry(node, &dqm->queues, list) {
  615. pdd = qpd_to_pdd(node->qpd);
  616. pdd->bound = false;
  617. }
  618. kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
  619. pm_uninit(&dqm->packets);
  620. return 0;
  621. }
  622. static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
  623. struct kernel_queue *kq,
  624. struct qcm_process_device *qpd)
  625. {
  626. BUG_ON(!dqm || !kq || !qpd);
  627. pr_debug("kfd: In func %s\n", __func__);
  628. mutex_lock(&dqm->lock);
  629. if (dqm->total_queue_count >= max_num_of_queues_per_device) {
  630. pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
  631. dqm->total_queue_count);
  632. mutex_unlock(&dqm->lock);
  633. return -EPERM;
  634. }
  635. /*
  636. * Unconditionally increment this counter, regardless of the queue's
  637. * type or whether the queue is active.
  638. */
  639. dqm->total_queue_count++;
  640. pr_debug("Total of %d queues are accountable so far\n",
  641. dqm->total_queue_count);
  642. list_add(&kq->list, &qpd->priv_queue_list);
  643. dqm->queue_count++;
  644. qpd->is_debug = true;
  645. execute_queues_cpsch(dqm, false);
  646. mutex_unlock(&dqm->lock);
  647. return 0;
  648. }
  649. static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
  650. struct kernel_queue *kq,
  651. struct qcm_process_device *qpd)
  652. {
  653. BUG_ON(!dqm || !kq);
  654. pr_debug("kfd: In %s\n", __func__);
  655. mutex_lock(&dqm->lock);
  656. /* here we actually preempt the DIQ */
  657. destroy_queues_cpsch(dqm, true, false);
  658. list_del(&kq->list);
  659. dqm->queue_count--;
  660. qpd->is_debug = false;
  661. execute_queues_cpsch(dqm, false);
  662. /*
  663. * Unconditionally decrement this counter, regardless of the queue's
  664. * type.
  665. */
  666. dqm->total_queue_count--;
  667. pr_debug("Total of %d queues are accountable so far\n",
  668. dqm->total_queue_count);
  669. mutex_unlock(&dqm->lock);
  670. }
  671. static void select_sdma_engine_id(struct queue *q)
  672. {
  673. static int sdma_id;
  674. q->sdma_id = sdma_id;
  675. sdma_id = (sdma_id + 1) % 2;
  676. }
  677. static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
  678. struct qcm_process_device *qpd, int *allocate_vmid)
  679. {
  680. int retval;
  681. struct mqd_manager *mqd;
  682. BUG_ON(!dqm || !q || !qpd);
  683. retval = 0;
  684. if (allocate_vmid)
  685. *allocate_vmid = 0;
  686. mutex_lock(&dqm->lock);
  687. if (dqm->total_queue_count >= max_num_of_queues_per_device) {
  688. pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
  689. dqm->total_queue_count);
  690. retval = -EPERM;
  691. goto out;
  692. }
  693. if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
  694. select_sdma_engine_id(q);
  695. mqd = dqm->ops.get_mqd_manager(dqm,
  696. get_mqd_type_from_queue_type(q->properties.type));
  697. if (mqd == NULL) {
  698. mutex_unlock(&dqm->lock);
  699. return -ENOMEM;
  700. }
  701. dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
  702. retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
  703. &q->gart_mqd_addr, &q->properties);
  704. if (retval != 0)
  705. goto out;
  706. list_add(&q->list, &qpd->queues_list);
  707. if (q->properties.is_active) {
  708. dqm->queue_count++;
  709. retval = execute_queues_cpsch(dqm, false);
  710. }
  711. if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
  712. dqm->sdma_queue_count++;
  713. /*
  714. * Unconditionally increment this counter, regardless of the queue's
  715. * type or whether the queue is active.
  716. */
  717. dqm->total_queue_count++;
  718. pr_debug("Total of %d queues are accountable so far\n",
  719. dqm->total_queue_count);
  720. out:
  721. mutex_unlock(&dqm->lock);
  722. return retval;
  723. }
  724. int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
  725. unsigned int fence_value,
  726. unsigned long timeout)
  727. {
  728. BUG_ON(!fence_addr);
  729. timeout += jiffies;
  730. while (*fence_addr != fence_value) {
  731. if (time_after(jiffies, timeout)) {
  732. pr_err("kfd: qcm fence wait loop timeout expired\n");
  733. return -ETIME;
  734. }
  735. schedule();
  736. }
  737. return 0;
  738. }
  739. static int destroy_sdma_queues(struct device_queue_manager *dqm,
  740. unsigned int sdma_engine)
  741. {
  742. return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
  743. KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES, 0, false,
  744. sdma_engine);
  745. }
  746. static int destroy_queues_cpsch(struct device_queue_manager *dqm,
  747. bool preempt_static_queues, bool lock)
  748. {
  749. int retval;
  750. enum kfd_preempt_type_filter preempt_type;
  751. struct kfd_process *p;
  752. BUG_ON(!dqm);
  753. retval = 0;
  754. if (lock)
  755. mutex_lock(&dqm->lock);
  756. if (dqm->active_runlist == false)
  757. goto out;
  758. pr_debug("kfd: Before destroying queues, sdma queue count is : %u\n",
  759. dqm->sdma_queue_count);
  760. if (dqm->sdma_queue_count > 0) {
  761. destroy_sdma_queues(dqm, 0);
  762. destroy_sdma_queues(dqm, 1);
  763. }
  764. preempt_type = preempt_static_queues ?
  765. KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES :
  766. KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES;
  767. retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
  768. preempt_type, 0, false, 0);
  769. if (retval != 0)
  770. goto out;
  771. *dqm->fence_addr = KFD_FENCE_INIT;
  772. pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
  773. KFD_FENCE_COMPLETED);
  774. /* should be timed out */
  775. retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
  776. QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
  777. if (retval != 0) {
  778. p = kfd_get_process(current);
  779. p->reset_wavefronts = true;
  780. goto out;
  781. }
  782. pm_release_ib(&dqm->packets);
  783. dqm->active_runlist = false;
  784. out:
  785. if (lock)
  786. mutex_unlock(&dqm->lock);
  787. return retval;
  788. }
  789. static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
  790. {
  791. int retval;
  792. BUG_ON(!dqm);
  793. if (lock)
  794. mutex_lock(&dqm->lock);
  795. retval = destroy_queues_cpsch(dqm, false, false);
  796. if (retval != 0) {
  797. pr_err("kfd: the cp might be in an unrecoverable state due to an unsuccessful queues preemption");
  798. goto out;
  799. }
  800. if (dqm->queue_count <= 0 || dqm->processes_count <= 0) {
  801. retval = 0;
  802. goto out;
  803. }
  804. if (dqm->active_runlist) {
  805. retval = 0;
  806. goto out;
  807. }
  808. retval = pm_send_runlist(&dqm->packets, &dqm->queues);
  809. if (retval != 0) {
  810. pr_err("kfd: failed to execute runlist");
  811. goto out;
  812. }
  813. dqm->active_runlist = true;
  814. out:
  815. if (lock)
  816. mutex_unlock(&dqm->lock);
  817. return retval;
  818. }
  819. static int destroy_queue_cpsch(struct device_queue_manager *dqm,
  820. struct qcm_process_device *qpd,
  821. struct queue *q)
  822. {
  823. int retval;
  824. struct mqd_manager *mqd;
  825. bool preempt_all_queues;
  826. BUG_ON(!dqm || !qpd || !q);
  827. preempt_all_queues = false;
  828. retval = 0;
  829. /* remove queue from list to prevent rescheduling after preemption */
  830. mutex_lock(&dqm->lock);
  831. if (qpd->is_debug) {
  832. /*
  833. * error, currently we do not allow to destroy a queue
  834. * of a currently debugged process
  835. */
  836. retval = -EBUSY;
  837. goto failed_try_destroy_debugged_queue;
  838. }
  839. mqd = dqm->ops.get_mqd_manager(dqm,
  840. get_mqd_type_from_queue_type(q->properties.type));
  841. if (!mqd) {
  842. retval = -ENOMEM;
  843. goto failed;
  844. }
  845. if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
  846. dqm->sdma_queue_count--;
  847. list_del(&q->list);
  848. if (q->properties.is_active)
  849. dqm->queue_count--;
  850. execute_queues_cpsch(dqm, false);
  851. mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
  852. /*
  853. * Unconditionally decrement this counter, regardless of the queue's
  854. * type
  855. */
  856. dqm->total_queue_count--;
  857. pr_debug("Total of %d queues are accountable so far\n",
  858. dqm->total_queue_count);
  859. mutex_unlock(&dqm->lock);
  860. return 0;
  861. failed:
  862. failed_try_destroy_debugged_queue:
  863. mutex_unlock(&dqm->lock);
  864. return retval;
  865. }
  866. /*
  867. * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
  868. * stay in user mode.
  869. */
  870. #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
  871. /* APE1 limit is inclusive and 64K aligned. */
  872. #define APE1_LIMIT_ALIGNMENT 0xFFFF
  873. static bool set_cache_memory_policy(struct device_queue_manager *dqm,
  874. struct qcm_process_device *qpd,
  875. enum cache_policy default_policy,
  876. enum cache_policy alternate_policy,
  877. void __user *alternate_aperture_base,
  878. uint64_t alternate_aperture_size)
  879. {
  880. bool retval;
  881. pr_debug("kfd: In func %s\n", __func__);
  882. mutex_lock(&dqm->lock);
  883. if (alternate_aperture_size == 0) {
  884. /* base > limit disables APE1 */
  885. qpd->sh_mem_ape1_base = 1;
  886. qpd->sh_mem_ape1_limit = 0;
  887. } else {
  888. /*
  889. * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
  890. * SH_MEM_APE1_BASE[31:0], 0x0000 }
  891. * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
  892. * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
  893. * Verify that the base and size parameters can be
  894. * represented in this format and convert them.
  895. * Additionally restrict APE1 to user-mode addresses.
  896. */
  897. uint64_t base = (uintptr_t)alternate_aperture_base;
  898. uint64_t limit = base + alternate_aperture_size - 1;
  899. if (limit <= base)
  900. goto out;
  901. if ((base & APE1_FIXED_BITS_MASK) != 0)
  902. goto out;
  903. if ((limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT)
  904. goto out;
  905. qpd->sh_mem_ape1_base = base >> 16;
  906. qpd->sh_mem_ape1_limit = limit >> 16;
  907. }
  908. retval = dqm->ops_asic_specific.set_cache_memory_policy(
  909. dqm,
  910. qpd,
  911. default_policy,
  912. alternate_policy,
  913. alternate_aperture_base,
  914. alternate_aperture_size);
  915. if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
  916. program_sh_mem_settings(dqm, qpd);
  917. pr_debug("kfd: sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
  918. qpd->sh_mem_config, qpd->sh_mem_ape1_base,
  919. qpd->sh_mem_ape1_limit);
  920. mutex_unlock(&dqm->lock);
  921. return retval;
  922. out:
  923. mutex_unlock(&dqm->lock);
  924. return false;
  925. }
  926. struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
  927. {
  928. struct device_queue_manager *dqm;
  929. BUG_ON(!dev);
  930. pr_debug("kfd: loading device queue manager\n");
  931. dqm = kzalloc(sizeof(struct device_queue_manager), GFP_KERNEL);
  932. if (!dqm)
  933. return NULL;
  934. dqm->dev = dev;
  935. switch (sched_policy) {
  936. case KFD_SCHED_POLICY_HWS:
  937. case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
  938. /* initialize dqm for cp scheduling */
  939. dqm->ops.create_queue = create_queue_cpsch;
  940. dqm->ops.initialize = initialize_cpsch;
  941. dqm->ops.start = start_cpsch;
  942. dqm->ops.stop = stop_cpsch;
  943. dqm->ops.destroy_queue = destroy_queue_cpsch;
  944. dqm->ops.update_queue = update_queue;
  945. dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
  946. dqm->ops.register_process = register_process_nocpsch;
  947. dqm->ops.unregister_process = unregister_process_nocpsch;
  948. dqm->ops.uninitialize = uninitialize_nocpsch;
  949. dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
  950. dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
  951. dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
  952. break;
  953. case KFD_SCHED_POLICY_NO_HWS:
  954. /* initialize dqm for no cp scheduling */
  955. dqm->ops.start = start_nocpsch;
  956. dqm->ops.stop = stop_nocpsch;
  957. dqm->ops.create_queue = create_queue_nocpsch;
  958. dqm->ops.destroy_queue = destroy_queue_nocpsch;
  959. dqm->ops.update_queue = update_queue;
  960. dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
  961. dqm->ops.register_process = register_process_nocpsch;
  962. dqm->ops.unregister_process = unregister_process_nocpsch;
  963. dqm->ops.initialize = initialize_nocpsch;
  964. dqm->ops.uninitialize = uninitialize_nocpsch;
  965. dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
  966. break;
  967. default:
  968. BUG();
  969. break;
  970. }
  971. switch (dev->device_info->asic_family) {
  972. case CHIP_CARRIZO:
  973. device_queue_manager_init_vi(&dqm->ops_asic_specific);
  974. break;
  975. case CHIP_KAVERI:
  976. device_queue_manager_init_cik(&dqm->ops_asic_specific);
  977. break;
  978. }
  979. if (dqm->ops.initialize(dqm) != 0) {
  980. kfree(dqm);
  981. return NULL;
  982. }
  983. return dqm;
  984. }
  985. void device_queue_manager_uninit(struct device_queue_manager *dqm)
  986. {
  987. BUG_ON(!dqm);
  988. dqm->ops.uninitialize(dqm);
  989. kfree(dqm);
  990. }