amdgpu_vce.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871
  1. /*
  2. * Copyright 2013 Advanced Micro Devices, Inc.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sub license, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20. *
  21. * The above copyright notice and this permission notice (including the
  22. * next paragraph) shall be included in all copies or substantial portions
  23. * of the Software.
  24. *
  25. * Authors: Christian König <christian.koenig@amd.com>
  26. */
  27. #include <linux/firmware.h>
  28. #include <linux/module.h>
  29. #include <drm/drmP.h>
  30. #include <drm/drm.h>
  31. #include "amdgpu.h"
  32. #include "amdgpu_pm.h"
  33. #include "amdgpu_vce.h"
  34. #include "cikd.h"
  35. /* 1 second timeout */
  36. #define VCE_IDLE_TIMEOUT_MS 1000
  37. /* Firmware Names */
  38. #ifdef CONFIG_DRM_AMDGPU_CIK
  39. #define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin"
  40. #define FIRMWARE_KABINI "radeon/kabini_vce.bin"
  41. #define FIRMWARE_KAVERI "radeon/kaveri_vce.bin"
  42. #define FIRMWARE_HAWAII "radeon/hawaii_vce.bin"
  43. #define FIRMWARE_MULLINS "radeon/mullins_vce.bin"
  44. #endif
  45. #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
  46. #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
  47. #define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
  48. #define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
  49. #ifdef CONFIG_DRM_AMDGPU_CIK
  50. MODULE_FIRMWARE(FIRMWARE_BONAIRE);
  51. MODULE_FIRMWARE(FIRMWARE_KABINI);
  52. MODULE_FIRMWARE(FIRMWARE_KAVERI);
  53. MODULE_FIRMWARE(FIRMWARE_HAWAII);
  54. MODULE_FIRMWARE(FIRMWARE_MULLINS);
  55. #endif
  56. MODULE_FIRMWARE(FIRMWARE_TONGA);
  57. MODULE_FIRMWARE(FIRMWARE_CARRIZO);
  58. MODULE_FIRMWARE(FIRMWARE_FIJI);
  59. MODULE_FIRMWARE(FIRMWARE_STONEY);
  60. static void amdgpu_vce_idle_work_handler(struct work_struct *work);
  61. /**
  62. * amdgpu_vce_init - allocate memory, load vce firmware
  63. *
  64. * @adev: amdgpu_device pointer
  65. *
  66. * First step to get VCE online, allocate memory and load the firmware
  67. */
  68. int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
  69. {
  70. const char *fw_name;
  71. const struct common_firmware_header *hdr;
  72. unsigned ucode_version, version_major, version_minor, binary_id;
  73. int i, r;
  74. INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
  75. switch (adev->asic_type) {
  76. #ifdef CONFIG_DRM_AMDGPU_CIK
  77. case CHIP_BONAIRE:
  78. fw_name = FIRMWARE_BONAIRE;
  79. break;
  80. case CHIP_KAVERI:
  81. fw_name = FIRMWARE_KAVERI;
  82. break;
  83. case CHIP_KABINI:
  84. fw_name = FIRMWARE_KABINI;
  85. break;
  86. case CHIP_HAWAII:
  87. fw_name = FIRMWARE_HAWAII;
  88. break;
  89. case CHIP_MULLINS:
  90. fw_name = FIRMWARE_MULLINS;
  91. break;
  92. #endif
  93. case CHIP_TONGA:
  94. fw_name = FIRMWARE_TONGA;
  95. break;
  96. case CHIP_CARRIZO:
  97. fw_name = FIRMWARE_CARRIZO;
  98. break;
  99. case CHIP_FIJI:
  100. fw_name = FIRMWARE_FIJI;
  101. break;
  102. case CHIP_STONEY:
  103. fw_name = FIRMWARE_STONEY;
  104. break;
  105. default:
  106. return -EINVAL;
  107. }
  108. r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
  109. if (r) {
  110. dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
  111. fw_name);
  112. return r;
  113. }
  114. r = amdgpu_ucode_validate(adev->vce.fw);
  115. if (r) {
  116. dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
  117. fw_name);
  118. release_firmware(adev->vce.fw);
  119. adev->vce.fw = NULL;
  120. return r;
  121. }
  122. hdr = (const struct common_firmware_header *)adev->vce.fw->data;
  123. ucode_version = le32_to_cpu(hdr->ucode_version);
  124. version_major = (ucode_version >> 20) & 0xfff;
  125. version_minor = (ucode_version >> 8) & 0xfff;
  126. binary_id = ucode_version & 0xff;
  127. DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
  128. version_major, version_minor, binary_id);
  129. adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
  130. (binary_id << 8));
  131. /* allocate firmware, stack and heap BO */
  132. r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
  133. AMDGPU_GEM_DOMAIN_VRAM,
  134. AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
  135. NULL, NULL, &adev->vce.vcpu_bo);
  136. if (r) {
  137. dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
  138. return r;
  139. }
  140. r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
  141. if (r) {
  142. amdgpu_bo_unref(&adev->vce.vcpu_bo);
  143. dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
  144. return r;
  145. }
  146. r = amdgpu_bo_pin(adev->vce.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
  147. &adev->vce.gpu_addr);
  148. amdgpu_bo_unreserve(adev->vce.vcpu_bo);
  149. if (r) {
  150. amdgpu_bo_unref(&adev->vce.vcpu_bo);
  151. dev_err(adev->dev, "(%d) VCE bo pin failed\n", r);
  152. return r;
  153. }
  154. for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
  155. atomic_set(&adev->vce.handles[i], 0);
  156. adev->vce.filp[i] = NULL;
  157. }
  158. return 0;
  159. }
  160. /**
  161. * amdgpu_vce_fini - free memory
  162. *
  163. * @adev: amdgpu_device pointer
  164. *
  165. * Last step on VCE teardown, free firmware memory
  166. */
  167. int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
  168. {
  169. if (adev->vce.vcpu_bo == NULL)
  170. return 0;
  171. amdgpu_bo_unref(&adev->vce.vcpu_bo);
  172. amdgpu_ring_fini(&adev->vce.ring[0]);
  173. amdgpu_ring_fini(&adev->vce.ring[1]);
  174. release_firmware(adev->vce.fw);
  175. return 0;
  176. }
  177. /**
  178. * amdgpu_vce_suspend - unpin VCE fw memory
  179. *
  180. * @adev: amdgpu_device pointer
  181. *
  182. */
  183. int amdgpu_vce_suspend(struct amdgpu_device *adev)
  184. {
  185. int i;
  186. if (adev->vce.vcpu_bo == NULL)
  187. return 0;
  188. for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
  189. if (atomic_read(&adev->vce.handles[i]))
  190. break;
  191. if (i == AMDGPU_MAX_VCE_HANDLES)
  192. return 0;
  193. /* TODO: suspending running encoding sessions isn't supported */
  194. return -EINVAL;
  195. }
  196. /**
  197. * amdgpu_vce_resume - pin VCE fw memory
  198. *
  199. * @adev: amdgpu_device pointer
  200. *
  201. */
  202. int amdgpu_vce_resume(struct amdgpu_device *adev)
  203. {
  204. void *cpu_addr;
  205. const struct common_firmware_header *hdr;
  206. unsigned offset;
  207. int r;
  208. if (adev->vce.vcpu_bo == NULL)
  209. return -EINVAL;
  210. r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
  211. if (r) {
  212. dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
  213. return r;
  214. }
  215. r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
  216. if (r) {
  217. amdgpu_bo_unreserve(adev->vce.vcpu_bo);
  218. dev_err(adev->dev, "(%d) VCE map failed\n", r);
  219. return r;
  220. }
  221. hdr = (const struct common_firmware_header *)adev->vce.fw->data;
  222. offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
  223. memcpy(cpu_addr, (adev->vce.fw->data) + offset,
  224. (adev->vce.fw->size) - offset);
  225. amdgpu_bo_kunmap(adev->vce.vcpu_bo);
  226. amdgpu_bo_unreserve(adev->vce.vcpu_bo);
  227. return 0;
  228. }
  229. /**
  230. * amdgpu_vce_idle_work_handler - power off VCE
  231. *
  232. * @work: pointer to work structure
  233. *
  234. * power of VCE when it's not used any more
  235. */
  236. static void amdgpu_vce_idle_work_handler(struct work_struct *work)
  237. {
  238. struct amdgpu_device *adev =
  239. container_of(work, struct amdgpu_device, vce.idle_work.work);
  240. if ((amdgpu_fence_count_emitted(&adev->vce.ring[0]) == 0) &&
  241. (amdgpu_fence_count_emitted(&adev->vce.ring[1]) == 0)) {
  242. if (adev->pm.dpm_enabled) {
  243. amdgpu_dpm_enable_vce(adev, false);
  244. } else {
  245. amdgpu_asic_set_vce_clocks(adev, 0, 0);
  246. }
  247. } else {
  248. schedule_delayed_work(&adev->vce.idle_work,
  249. msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
  250. }
  251. }
  252. /**
  253. * amdgpu_vce_note_usage - power up VCE
  254. *
  255. * @adev: amdgpu_device pointer
  256. *
  257. * Make sure VCE is powerd up when we want to use it
  258. */
  259. static void amdgpu_vce_note_usage(struct amdgpu_device *adev)
  260. {
  261. bool streams_changed = false;
  262. bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
  263. set_clocks &= schedule_delayed_work(&adev->vce.idle_work,
  264. msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
  265. if (adev->pm.dpm_enabled) {
  266. /* XXX figure out if the streams changed */
  267. streams_changed = false;
  268. }
  269. if (set_clocks || streams_changed) {
  270. if (adev->pm.dpm_enabled) {
  271. amdgpu_dpm_enable_vce(adev, true);
  272. } else {
  273. amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
  274. }
  275. }
  276. }
  277. /**
  278. * amdgpu_vce_free_handles - free still open VCE handles
  279. *
  280. * @adev: amdgpu_device pointer
  281. * @filp: drm file pointer
  282. *
  283. * Close all VCE handles still open by this file pointer
  284. */
  285. void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
  286. {
  287. struct amdgpu_ring *ring = &adev->vce.ring[0];
  288. int i, r;
  289. for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
  290. uint32_t handle = atomic_read(&adev->vce.handles[i]);
  291. if (!handle || adev->vce.filp[i] != filp)
  292. continue;
  293. amdgpu_vce_note_usage(adev);
  294. r = amdgpu_vce_get_destroy_msg(ring, handle, NULL);
  295. if (r)
  296. DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
  297. adev->vce.filp[i] = NULL;
  298. atomic_set(&adev->vce.handles[i], 0);
  299. }
  300. }
  301. static int amdgpu_vce_free_job(
  302. struct amdgpu_job *job)
  303. {
  304. amdgpu_ib_free(job->adev, job->ibs);
  305. kfree(job->ibs);
  306. return 0;
  307. }
  308. /**
  309. * amdgpu_vce_get_create_msg - generate a VCE create msg
  310. *
  311. * @adev: amdgpu_device pointer
  312. * @ring: ring we should submit the msg to
  313. * @handle: VCE session handle to use
  314. * @fence: optional fence to return
  315. *
  316. * Open up a stream for HW test
  317. */
  318. int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
  319. struct fence **fence)
  320. {
  321. const unsigned ib_size_dw = 1024;
  322. struct amdgpu_ib *ib = NULL;
  323. struct fence *f = NULL;
  324. struct amdgpu_device *adev = ring->adev;
  325. uint64_t dummy;
  326. int i, r;
  327. ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
  328. if (!ib)
  329. return -ENOMEM;
  330. r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib);
  331. if (r) {
  332. DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
  333. kfree(ib);
  334. return r;
  335. }
  336. dummy = ib->gpu_addr + 1024;
  337. /* stitch together an VCE create msg */
  338. ib->length_dw = 0;
  339. ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
  340. ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
  341. ib->ptr[ib->length_dw++] = handle;
  342. ib->ptr[ib->length_dw++] = 0x00000030; /* len */
  343. ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
  344. ib->ptr[ib->length_dw++] = 0x00000000;
  345. ib->ptr[ib->length_dw++] = 0x00000042;
  346. ib->ptr[ib->length_dw++] = 0x0000000a;
  347. ib->ptr[ib->length_dw++] = 0x00000001;
  348. ib->ptr[ib->length_dw++] = 0x00000080;
  349. ib->ptr[ib->length_dw++] = 0x00000060;
  350. ib->ptr[ib->length_dw++] = 0x00000100;
  351. ib->ptr[ib->length_dw++] = 0x00000100;
  352. ib->ptr[ib->length_dw++] = 0x0000000c;
  353. ib->ptr[ib->length_dw++] = 0x00000000;
  354. ib->ptr[ib->length_dw++] = 0x00000014; /* len */
  355. ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
  356. ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
  357. ib->ptr[ib->length_dw++] = dummy;
  358. ib->ptr[ib->length_dw++] = 0x00000001;
  359. for (i = ib->length_dw; i < ib_size_dw; ++i)
  360. ib->ptr[i] = 0x0;
  361. r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
  362. &amdgpu_vce_free_job,
  363. AMDGPU_FENCE_OWNER_UNDEFINED,
  364. &f);
  365. if (r)
  366. goto err;
  367. if (fence)
  368. *fence = fence_get(f);
  369. fence_put(f);
  370. if (amdgpu_enable_scheduler)
  371. return 0;
  372. err:
  373. amdgpu_ib_free(adev, ib);
  374. kfree(ib);
  375. return r;
  376. }
  377. /**
  378. * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
  379. *
  380. * @adev: amdgpu_device pointer
  381. * @ring: ring we should submit the msg to
  382. * @handle: VCE session handle to use
  383. * @fence: optional fence to return
  384. *
  385. * Close up a stream for HW test or if userspace failed to do so
  386. */
  387. int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
  388. struct fence **fence)
  389. {
  390. const unsigned ib_size_dw = 1024;
  391. struct amdgpu_ib *ib = NULL;
  392. struct fence *f = NULL;
  393. struct amdgpu_device *adev = ring->adev;
  394. uint64_t dummy;
  395. int i, r;
  396. ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
  397. if (!ib)
  398. return -ENOMEM;
  399. r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib);
  400. if (r) {
  401. kfree(ib);
  402. DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
  403. return r;
  404. }
  405. dummy = ib->gpu_addr + 1024;
  406. /* stitch together an VCE destroy msg */
  407. ib->length_dw = 0;
  408. ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
  409. ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
  410. ib->ptr[ib->length_dw++] = handle;
  411. ib->ptr[ib->length_dw++] = 0x00000014; /* len */
  412. ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
  413. ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
  414. ib->ptr[ib->length_dw++] = dummy;
  415. ib->ptr[ib->length_dw++] = 0x00000001;
  416. ib->ptr[ib->length_dw++] = 0x00000008; /* len */
  417. ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
  418. for (i = ib->length_dw; i < ib_size_dw; ++i)
  419. ib->ptr[i] = 0x0;
  420. r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
  421. &amdgpu_vce_free_job,
  422. AMDGPU_FENCE_OWNER_UNDEFINED,
  423. &f);
  424. if (r)
  425. goto err;
  426. if (fence)
  427. *fence = fence_get(f);
  428. fence_put(f);
  429. if (amdgpu_enable_scheduler)
  430. return 0;
  431. err:
  432. amdgpu_ib_free(adev, ib);
  433. kfree(ib);
  434. return r;
  435. }
  436. /**
  437. * amdgpu_vce_cs_reloc - command submission relocation
  438. *
  439. * @p: parser context
  440. * @lo: address of lower dword
  441. * @hi: address of higher dword
  442. * @size: minimum size
  443. *
  444. * Patch relocation inside command stream with real buffer address
  445. */
  446. static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
  447. int lo, int hi, unsigned size, uint32_t index)
  448. {
  449. struct amdgpu_bo_va_mapping *mapping;
  450. struct amdgpu_ib *ib = &p->ibs[ib_idx];
  451. struct amdgpu_bo *bo;
  452. uint64_t addr;
  453. if (index == 0xffffffff)
  454. index = 0;
  455. addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
  456. ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
  457. addr += ((uint64_t)size) * ((uint64_t)index);
  458. mapping = amdgpu_cs_find_mapping(p, addr, &bo);
  459. if (mapping == NULL) {
  460. DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
  461. addr, lo, hi, size, index);
  462. return -EINVAL;
  463. }
  464. if ((addr + (uint64_t)size) >
  465. ((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
  466. DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
  467. addr, lo, hi);
  468. return -EINVAL;
  469. }
  470. addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
  471. addr += amdgpu_bo_gpu_offset(bo);
  472. addr -= ((uint64_t)size) * ((uint64_t)index);
  473. ib->ptr[lo] = addr & 0xFFFFFFFF;
  474. ib->ptr[hi] = addr >> 32;
  475. return 0;
  476. }
  477. /**
  478. * amdgpu_vce_validate_handle - validate stream handle
  479. *
  480. * @p: parser context
  481. * @handle: handle to validate
  482. * @allocated: allocated a new handle?
  483. *
  484. * Validates the handle and return the found session index or -EINVAL
  485. * we we don't have another free session index.
  486. */
  487. static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
  488. uint32_t handle, bool *allocated)
  489. {
  490. unsigned i;
  491. *allocated = false;
  492. /* validate the handle */
  493. for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
  494. if (atomic_read(&p->adev->vce.handles[i]) == handle) {
  495. if (p->adev->vce.filp[i] != p->filp) {
  496. DRM_ERROR("VCE handle collision detected!\n");
  497. return -EINVAL;
  498. }
  499. return i;
  500. }
  501. }
  502. /* handle not found try to alloc a new one */
  503. for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
  504. if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
  505. p->adev->vce.filp[i] = p->filp;
  506. p->adev->vce.img_size[i] = 0;
  507. *allocated = true;
  508. return i;
  509. }
  510. }
  511. DRM_ERROR("No more free VCE handles!\n");
  512. return -EINVAL;
  513. }
  514. /**
  515. * amdgpu_vce_cs_parse - parse and validate the command stream
  516. *
  517. * @p: parser context
  518. *
  519. */
  520. int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
  521. {
  522. struct amdgpu_ib *ib = &p->ibs[ib_idx];
  523. unsigned fb_idx = 0, bs_idx = 0;
  524. int session_idx = -1;
  525. bool destroyed = false;
  526. bool created = false;
  527. bool allocated = false;
  528. uint32_t tmp, handle = 0;
  529. uint32_t *size = &tmp;
  530. int i, r = 0, idx = 0;
  531. amdgpu_vce_note_usage(p->adev);
  532. while (idx < ib->length_dw) {
  533. uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
  534. uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
  535. if ((len < 8) || (len & 3)) {
  536. DRM_ERROR("invalid VCE command length (%d)!\n", len);
  537. r = -EINVAL;
  538. goto out;
  539. }
  540. if (destroyed) {
  541. DRM_ERROR("No other command allowed after destroy!\n");
  542. r = -EINVAL;
  543. goto out;
  544. }
  545. switch (cmd) {
  546. case 0x00000001: // session
  547. handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
  548. session_idx = amdgpu_vce_validate_handle(p, handle,
  549. &allocated);
  550. if (session_idx < 0)
  551. return session_idx;
  552. size = &p->adev->vce.img_size[session_idx];
  553. break;
  554. case 0x00000002: // task info
  555. fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
  556. bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
  557. break;
  558. case 0x01000001: // create
  559. created = true;
  560. if (!allocated) {
  561. DRM_ERROR("Handle already in use!\n");
  562. r = -EINVAL;
  563. goto out;
  564. }
  565. *size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
  566. amdgpu_get_ib_value(p, ib_idx, idx + 10) *
  567. 8 * 3 / 2;
  568. break;
  569. case 0x04000001: // config extension
  570. case 0x04000002: // pic control
  571. case 0x04000005: // rate control
  572. case 0x04000007: // motion estimation
  573. case 0x04000008: // rdo
  574. case 0x04000009: // vui
  575. case 0x05000002: // auxiliary buffer
  576. break;
  577. case 0x03000001: // encode
  578. r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
  579. *size, 0);
  580. if (r)
  581. goto out;
  582. r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
  583. *size / 3, 0);
  584. if (r)
  585. goto out;
  586. break;
  587. case 0x02000001: // destroy
  588. destroyed = true;
  589. break;
  590. case 0x05000001: // context buffer
  591. r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
  592. *size * 2, 0);
  593. if (r)
  594. goto out;
  595. break;
  596. case 0x05000004: // video bitstream buffer
  597. tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
  598. r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
  599. tmp, bs_idx);
  600. if (r)
  601. goto out;
  602. break;
  603. case 0x05000005: // feedback buffer
  604. r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
  605. 4096, fb_idx);
  606. if (r)
  607. goto out;
  608. break;
  609. default:
  610. DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
  611. r = -EINVAL;
  612. goto out;
  613. }
  614. if (session_idx == -1) {
  615. DRM_ERROR("no session command at start of IB\n");
  616. r = -EINVAL;
  617. goto out;
  618. }
  619. idx += len / 4;
  620. }
  621. if (allocated && !created) {
  622. DRM_ERROR("New session without create command!\n");
  623. r = -ENOENT;
  624. }
  625. out:
  626. if ((!r && destroyed) || (r && allocated)) {
  627. /*
  628. * IB contains a destroy msg or we have allocated an
  629. * handle and got an error, anyway free the handle
  630. */
  631. for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
  632. atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0);
  633. }
  634. return r;
  635. }
  636. /**
  637. * amdgpu_vce_ring_emit_semaphore - emit a semaphore command
  638. *
  639. * @ring: engine to use
  640. * @semaphore: address of semaphore
  641. * @emit_wait: true=emit wait, false=emit signal
  642. *
  643. */
  644. bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
  645. struct amdgpu_semaphore *semaphore,
  646. bool emit_wait)
  647. {
  648. uint64_t addr = semaphore->gpu_addr;
  649. amdgpu_ring_write(ring, VCE_CMD_SEMAPHORE);
  650. amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF);
  651. amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF);
  652. amdgpu_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0));
  653. if (!emit_wait)
  654. amdgpu_ring_write(ring, VCE_CMD_END);
  655. return true;
  656. }
  657. /**
  658. * amdgpu_vce_ring_emit_ib - execute indirect buffer
  659. *
  660. * @ring: engine to use
  661. * @ib: the IB to execute
  662. *
  663. */
  664. void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
  665. {
  666. amdgpu_ring_write(ring, VCE_CMD_IB);
  667. amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
  668. amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
  669. amdgpu_ring_write(ring, ib->length_dw);
  670. }
  671. /**
  672. * amdgpu_vce_ring_emit_fence - add a fence command to the ring
  673. *
  674. * @ring: engine to use
  675. * @fence: the fence
  676. *
  677. */
  678. void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
  679. unsigned flags)
  680. {
  681. WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
  682. amdgpu_ring_write(ring, VCE_CMD_FENCE);
  683. amdgpu_ring_write(ring, addr);
  684. amdgpu_ring_write(ring, upper_32_bits(addr));
  685. amdgpu_ring_write(ring, seq);
  686. amdgpu_ring_write(ring, VCE_CMD_TRAP);
  687. amdgpu_ring_write(ring, VCE_CMD_END);
  688. }
  689. /**
  690. * amdgpu_vce_ring_test_ring - test if VCE ring is working
  691. *
  692. * @ring: the engine to test on
  693. *
  694. */
  695. int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
  696. {
  697. struct amdgpu_device *adev = ring->adev;
  698. uint32_t rptr = amdgpu_ring_get_rptr(ring);
  699. unsigned i;
  700. int r;
  701. r = amdgpu_ring_lock(ring, 16);
  702. if (r) {
  703. DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
  704. ring->idx, r);
  705. return r;
  706. }
  707. amdgpu_ring_write(ring, VCE_CMD_END);
  708. amdgpu_ring_unlock_commit(ring);
  709. for (i = 0; i < adev->usec_timeout; i++) {
  710. if (amdgpu_ring_get_rptr(ring) != rptr)
  711. break;
  712. DRM_UDELAY(1);
  713. }
  714. if (i < adev->usec_timeout) {
  715. DRM_INFO("ring test on %d succeeded in %d usecs\n",
  716. ring->idx, i);
  717. } else {
  718. DRM_ERROR("amdgpu: ring %d test failed\n",
  719. ring->idx);
  720. r = -ETIMEDOUT;
  721. }
  722. return r;
  723. }
  724. /**
  725. * amdgpu_vce_ring_test_ib - test if VCE IBs are working
  726. *
  727. * @ring: the engine to test on
  728. *
  729. */
  730. int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
  731. {
  732. struct fence *fence = NULL;
  733. int r;
  734. /* skip vce ring1 ib test for now, since it's not reliable */
  735. if (ring == &ring->adev->vce.ring[1])
  736. return 0;
  737. r = amdgpu_vce_get_create_msg(ring, 1, NULL);
  738. if (r) {
  739. DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
  740. goto error;
  741. }
  742. r = amdgpu_vce_get_destroy_msg(ring, 1, &fence);
  743. if (r) {
  744. DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
  745. goto error;
  746. }
  747. r = fence_wait(fence, false);
  748. if (r) {
  749. DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
  750. } else {
  751. DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
  752. }
  753. error:
  754. fence_put(fence);
  755. return r;
  756. }