job.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727
  1. /*
  2. * Tegra host1x Job
  3. *
  4. * Copyright (c) 2010-2015, NVIDIA Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include <linux/dma-mapping.h>
  19. #include <linux/err.h>
  20. #include <linux/host1x.h>
  21. #include <linux/kref.h>
  22. #include <linux/module.h>
  23. #include <linux/scatterlist.h>
  24. #include <linux/slab.h>
  25. #include <linux/vmalloc.h>
  26. #include <trace/events/host1x.h>
  27. #include "channel.h"
  28. #include "dev.h"
  29. #include "job.h"
  30. #include "syncpt.h"
  31. #define HOST1X_WAIT_SYNCPT_OFFSET 0x8
  32. struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
  33. u32 num_cmdbufs, u32 num_relocs,
  34. u32 num_waitchks)
  35. {
  36. struct host1x_job *job = NULL;
  37. unsigned int num_unpins = num_cmdbufs + num_relocs;
  38. u64 total;
  39. void *mem;
  40. /* Check that we're not going to overflow */
  41. total = sizeof(struct host1x_job) +
  42. (u64)num_relocs * sizeof(struct host1x_reloc) +
  43. (u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
  44. (u64)num_waitchks * sizeof(struct host1x_waitchk) +
  45. (u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
  46. (u64)num_unpins * sizeof(dma_addr_t) +
  47. (u64)num_unpins * sizeof(u32 *);
  48. if (total > ULONG_MAX)
  49. return NULL;
  50. mem = job = kzalloc(total, GFP_KERNEL);
  51. if (!job)
  52. return NULL;
  53. kref_init(&job->ref);
  54. job->channel = ch;
  55. /* Redistribute memory to the structs */
  56. mem += sizeof(struct host1x_job);
  57. job->relocarray = num_relocs ? mem : NULL;
  58. mem += num_relocs * sizeof(struct host1x_reloc);
  59. job->unpins = num_unpins ? mem : NULL;
  60. mem += num_unpins * sizeof(struct host1x_job_unpin_data);
  61. job->waitchk = num_waitchks ? mem : NULL;
  62. mem += num_waitchks * sizeof(struct host1x_waitchk);
  63. job->gathers = num_cmdbufs ? mem : NULL;
  64. mem += num_cmdbufs * sizeof(struct host1x_job_gather);
  65. job->addr_phys = num_unpins ? mem : NULL;
  66. job->reloc_addr_phys = job->addr_phys;
  67. job->gather_addr_phys = &job->addr_phys[num_relocs];
  68. return job;
  69. }
  70. EXPORT_SYMBOL(host1x_job_alloc);
  71. struct host1x_job *host1x_job_get(struct host1x_job *job)
  72. {
  73. kref_get(&job->ref);
  74. return job;
  75. }
  76. EXPORT_SYMBOL(host1x_job_get);
  77. static void job_free(struct kref *ref)
  78. {
  79. struct host1x_job *job = container_of(ref, struct host1x_job, ref);
  80. kfree(job);
  81. }
  82. void host1x_job_put(struct host1x_job *job)
  83. {
  84. kref_put(&job->ref, job_free);
  85. }
  86. EXPORT_SYMBOL(host1x_job_put);
  87. void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
  88. u32 words, u32 offset)
  89. {
  90. struct host1x_job_gather *cur_gather = &job->gathers[job->num_gathers];
  91. cur_gather->words = words;
  92. cur_gather->bo = bo;
  93. cur_gather->offset = offset;
  94. job->num_gathers++;
  95. }
  96. EXPORT_SYMBOL(host1x_job_add_gather);
  97. /*
  98. * NULL an already satisfied WAIT_SYNCPT host method, by patching its
  99. * args in the command stream. The method data is changed to reference
  100. * a reserved (never given out or incr) HOST1X_SYNCPT_RESERVED syncpt
  101. * with a matching threshold value of 0, so is guaranteed to be popped
  102. * by the host HW.
  103. */
  104. static void host1x_syncpt_patch_offset(struct host1x_syncpt *sp,
  105. struct host1x_bo *h, u32 offset)
  106. {
  107. void *patch_addr = NULL;
  108. /* patch the wait */
  109. patch_addr = host1x_bo_kmap(h, offset >> PAGE_SHIFT);
  110. if (patch_addr) {
  111. host1x_syncpt_patch_wait(sp,
  112. patch_addr + (offset & ~PAGE_MASK));
  113. host1x_bo_kunmap(h, offset >> PAGE_SHIFT, patch_addr);
  114. } else
  115. pr_err("Could not map cmdbuf for wait check\n");
  116. }
  117. /*
  118. * Check driver supplied waitchk structs for syncpt thresholds
  119. * that have already been satisfied and NULL the comparison (to
  120. * avoid a wrap condition in the HW).
  121. */
  122. static int do_waitchks(struct host1x_job *job, struct host1x *host,
  123. struct host1x_job_gather *g)
  124. {
  125. struct host1x_bo *patch = g->bo;
  126. int i;
  127. /* compare syncpt vs wait threshold */
  128. for (i = 0; i < job->num_waitchk; i++) {
  129. struct host1x_waitchk *wait = &job->waitchk[i];
  130. struct host1x_syncpt *sp =
  131. host1x_syncpt_get(host, wait->syncpt_id);
  132. /* validate syncpt id */
  133. if (wait->syncpt_id > host1x_syncpt_nb_pts(host))
  134. continue;
  135. /* skip all other gathers */
  136. if (patch != wait->bo)
  137. continue;
  138. trace_host1x_syncpt_wait_check(wait->bo, wait->offset,
  139. wait->syncpt_id, wait->thresh,
  140. host1x_syncpt_read_min(sp));
  141. if (host1x_syncpt_is_expired(sp, wait->thresh)) {
  142. dev_dbg(host->dev,
  143. "drop WAIT id %u (%s) thresh 0x%x, min 0x%x\n",
  144. wait->syncpt_id, sp->name, wait->thresh,
  145. host1x_syncpt_read_min(sp));
  146. host1x_syncpt_patch_offset(sp, patch,
  147. g->offset + wait->offset);
  148. }
  149. wait->bo = NULL;
  150. }
  151. return 0;
  152. }
  153. static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
  154. {
  155. unsigned int i;
  156. int err;
  157. job->num_unpins = 0;
  158. for (i = 0; i < job->num_relocs; i++) {
  159. struct host1x_reloc *reloc = &job->relocarray[i];
  160. struct sg_table *sgt;
  161. dma_addr_t phys_addr;
  162. reloc->target.bo = host1x_bo_get(reloc->target.bo);
  163. if (!reloc->target.bo) {
  164. err = -EINVAL;
  165. goto unpin;
  166. }
  167. phys_addr = host1x_bo_pin(reloc->target.bo, &sgt);
  168. if (!phys_addr) {
  169. err = -EINVAL;
  170. goto unpin;
  171. }
  172. job->addr_phys[job->num_unpins] = phys_addr;
  173. job->unpins[job->num_unpins].bo = reloc->target.bo;
  174. job->unpins[job->num_unpins].sgt = sgt;
  175. job->num_unpins++;
  176. }
  177. for (i = 0; i < job->num_gathers; i++) {
  178. struct host1x_job_gather *g = &job->gathers[i];
  179. size_t gather_size = 0;
  180. struct scatterlist *sg;
  181. struct sg_table *sgt;
  182. dma_addr_t phys_addr;
  183. unsigned long shift;
  184. struct iova *alloc;
  185. unsigned int j;
  186. g->bo = host1x_bo_get(g->bo);
  187. if (!g->bo) {
  188. err = -EINVAL;
  189. goto unpin;
  190. }
  191. phys_addr = host1x_bo_pin(g->bo, &sgt);
  192. if (!phys_addr) {
  193. err = -EINVAL;
  194. goto unpin;
  195. }
  196. if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
  197. for_each_sg(sgt->sgl, sg, sgt->nents, j)
  198. gather_size += sg->length;
  199. gather_size = iova_align(&host->iova, gather_size);
  200. shift = iova_shift(&host->iova);
  201. alloc = alloc_iova(&host->iova, gather_size >> shift,
  202. host->iova_end >> shift, true);
  203. if (!alloc) {
  204. err = -ENOMEM;
  205. goto unpin;
  206. }
  207. err = iommu_map_sg(host->domain,
  208. iova_dma_addr(&host->iova, alloc),
  209. sgt->sgl, sgt->nents, IOMMU_READ);
  210. if (err == 0) {
  211. __free_iova(&host->iova, alloc);
  212. err = -EINVAL;
  213. goto unpin;
  214. }
  215. job->addr_phys[job->num_unpins] =
  216. iova_dma_addr(&host->iova, alloc);
  217. job->unpins[job->num_unpins].size = gather_size;
  218. } else {
  219. job->addr_phys[job->num_unpins] = phys_addr;
  220. }
  221. job->gather_addr_phys[i] = job->addr_phys[job->num_unpins];
  222. job->unpins[job->num_unpins].bo = g->bo;
  223. job->unpins[job->num_unpins].sgt = sgt;
  224. job->num_unpins++;
  225. }
  226. return 0;
  227. unpin:
  228. host1x_job_unpin(job);
  229. return err;
  230. }
  231. static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
  232. {
  233. int i = 0;
  234. u32 last_page = ~0;
  235. void *cmdbuf_page_addr = NULL;
  236. struct host1x_bo *cmdbuf = g->bo;
  237. /* pin & patch the relocs for one gather */
  238. for (i = 0; i < job->num_relocs; i++) {
  239. struct host1x_reloc *reloc = &job->relocarray[i];
  240. u32 reloc_addr = (job->reloc_addr_phys[i] +
  241. reloc->target.offset) >> reloc->shift;
  242. u32 *target;
  243. /* skip all other gathers */
  244. if (cmdbuf != reloc->cmdbuf.bo)
  245. continue;
  246. if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
  247. target = (u32 *)job->gather_copy_mapped +
  248. reloc->cmdbuf.offset / sizeof(u32) +
  249. g->offset / sizeof(u32);
  250. goto patch_reloc;
  251. }
  252. if (last_page != reloc->cmdbuf.offset >> PAGE_SHIFT) {
  253. if (cmdbuf_page_addr)
  254. host1x_bo_kunmap(cmdbuf, last_page,
  255. cmdbuf_page_addr);
  256. cmdbuf_page_addr = host1x_bo_kmap(cmdbuf,
  257. reloc->cmdbuf.offset >> PAGE_SHIFT);
  258. last_page = reloc->cmdbuf.offset >> PAGE_SHIFT;
  259. if (unlikely(!cmdbuf_page_addr)) {
  260. pr_err("Could not map cmdbuf for relocation\n");
  261. return -ENOMEM;
  262. }
  263. }
  264. target = cmdbuf_page_addr + (reloc->cmdbuf.offset & ~PAGE_MASK);
  265. patch_reloc:
  266. *target = reloc_addr;
  267. }
  268. if (cmdbuf_page_addr)
  269. host1x_bo_kunmap(cmdbuf, last_page, cmdbuf_page_addr);
  270. return 0;
  271. }
  272. static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
  273. unsigned int offset)
  274. {
  275. offset *= sizeof(u32);
  276. if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
  277. return false;
  278. /* relocation shift value validation isn't implemented yet */
  279. if (reloc->shift)
  280. return false;
  281. return true;
  282. }
  283. static bool check_wait(struct host1x_waitchk *wait, struct host1x_bo *cmdbuf,
  284. unsigned int offset)
  285. {
  286. offset *= sizeof(u32);
  287. if (wait->bo != cmdbuf || wait->offset != offset)
  288. return false;
  289. return true;
  290. }
  291. struct host1x_firewall {
  292. struct host1x_job *job;
  293. struct device *dev;
  294. unsigned int num_relocs;
  295. struct host1x_reloc *reloc;
  296. unsigned int num_waitchks;
  297. struct host1x_waitchk *waitchk;
  298. struct host1x_bo *cmdbuf;
  299. unsigned int offset;
  300. u32 words;
  301. u32 class;
  302. u32 reg;
  303. u32 mask;
  304. u32 count;
  305. };
  306. static int check_register(struct host1x_firewall *fw, unsigned long offset)
  307. {
  308. if (!fw->job->is_addr_reg)
  309. return 0;
  310. if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
  311. if (!fw->num_relocs)
  312. return -EINVAL;
  313. if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
  314. return -EINVAL;
  315. fw->num_relocs--;
  316. fw->reloc++;
  317. }
  318. if (offset == HOST1X_WAIT_SYNCPT_OFFSET) {
  319. if (fw->class != HOST1X_CLASS_HOST1X)
  320. return -EINVAL;
  321. if (!fw->num_waitchks)
  322. return -EINVAL;
  323. if (!check_wait(fw->waitchk, fw->cmdbuf, fw->offset))
  324. return -EINVAL;
  325. fw->num_waitchks--;
  326. fw->waitchk++;
  327. }
  328. return 0;
  329. }
  330. static int check_class(struct host1x_firewall *fw, u32 class)
  331. {
  332. if (!fw->job->is_valid_class) {
  333. if (fw->class != class)
  334. return -EINVAL;
  335. } else {
  336. if (!fw->job->is_valid_class(fw->class))
  337. return -EINVAL;
  338. }
  339. return 0;
  340. }
  341. static int check_mask(struct host1x_firewall *fw)
  342. {
  343. u32 mask = fw->mask;
  344. u32 reg = fw->reg;
  345. int ret;
  346. while (mask) {
  347. if (fw->words == 0)
  348. return -EINVAL;
  349. if (mask & 1) {
  350. ret = check_register(fw, reg);
  351. if (ret < 0)
  352. return ret;
  353. fw->words--;
  354. fw->offset++;
  355. }
  356. mask >>= 1;
  357. reg++;
  358. }
  359. return 0;
  360. }
  361. static int check_incr(struct host1x_firewall *fw)
  362. {
  363. u32 count = fw->count;
  364. u32 reg = fw->reg;
  365. int ret;
  366. while (count) {
  367. if (fw->words == 0)
  368. return -EINVAL;
  369. ret = check_register(fw, reg);
  370. if (ret < 0)
  371. return ret;
  372. reg++;
  373. fw->words--;
  374. fw->offset++;
  375. count--;
  376. }
  377. return 0;
  378. }
  379. static int check_nonincr(struct host1x_firewall *fw)
  380. {
  381. u32 count = fw->count;
  382. int ret;
  383. while (count) {
  384. if (fw->words == 0)
  385. return -EINVAL;
  386. ret = check_register(fw, fw->reg);
  387. if (ret < 0)
  388. return ret;
  389. fw->words--;
  390. fw->offset++;
  391. count--;
  392. }
  393. return 0;
  394. }
  395. static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
  396. {
  397. u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped +
  398. (g->offset / sizeof(u32));
  399. u32 job_class = fw->class;
  400. int err = 0;
  401. fw->words = g->words;
  402. fw->cmdbuf = g->bo;
  403. fw->offset = 0;
  404. while (fw->words && !err) {
  405. u32 word = cmdbuf_base[fw->offset];
  406. u32 opcode = (word & 0xf0000000) >> 28;
  407. fw->mask = 0;
  408. fw->reg = 0;
  409. fw->count = 0;
  410. fw->words--;
  411. fw->offset++;
  412. switch (opcode) {
  413. case 0:
  414. fw->class = word >> 6 & 0x3ff;
  415. fw->mask = word & 0x3f;
  416. fw->reg = word >> 16 & 0xfff;
  417. err = check_class(fw, job_class);
  418. if (!err)
  419. err = check_mask(fw);
  420. if (err)
  421. goto out;
  422. break;
  423. case 1:
  424. fw->reg = word >> 16 & 0xfff;
  425. fw->count = word & 0xffff;
  426. err = check_incr(fw);
  427. if (err)
  428. goto out;
  429. break;
  430. case 2:
  431. fw->reg = word >> 16 & 0xfff;
  432. fw->count = word & 0xffff;
  433. err = check_nonincr(fw);
  434. if (err)
  435. goto out;
  436. break;
  437. case 3:
  438. fw->mask = word & 0xffff;
  439. fw->reg = word >> 16 & 0xfff;
  440. err = check_mask(fw);
  441. if (err)
  442. goto out;
  443. break;
  444. case 4:
  445. case 14:
  446. break;
  447. default:
  448. err = -EINVAL;
  449. break;
  450. }
  451. }
  452. out:
  453. return err;
  454. }
  455. static inline int copy_gathers(struct host1x_job *job, struct device *dev)
  456. {
  457. struct host1x_firewall fw;
  458. size_t size = 0;
  459. size_t offset = 0;
  460. int i;
  461. fw.job = job;
  462. fw.dev = dev;
  463. fw.reloc = job->relocarray;
  464. fw.num_relocs = job->num_relocs;
  465. fw.waitchk = job->waitchk;
  466. fw.num_waitchks = job->num_waitchk;
  467. fw.class = job->class;
  468. for (i = 0; i < job->num_gathers; i++) {
  469. struct host1x_job_gather *g = &job->gathers[i];
  470. size += g->words * sizeof(u32);
  471. }
  472. /*
  473. * Try a non-blocking allocation from a higher priority pools first,
  474. * as awaiting for the allocation here is a major performance hit.
  475. */
  476. job->gather_copy_mapped = dma_alloc_wc(dev, size, &job->gather_copy,
  477. GFP_NOWAIT);
  478. /* the higher priority allocation failed, try the generic-blocking */
  479. if (!job->gather_copy_mapped)
  480. job->gather_copy_mapped = dma_alloc_wc(dev, size,
  481. &job->gather_copy,
  482. GFP_KERNEL);
  483. if (!job->gather_copy_mapped)
  484. return -ENOMEM;
  485. job->gather_copy_size = size;
  486. for (i = 0; i < job->num_gathers; i++) {
  487. struct host1x_job_gather *g = &job->gathers[i];
  488. void *gather;
  489. /* Copy the gather */
  490. gather = host1x_bo_mmap(g->bo);
  491. memcpy(job->gather_copy_mapped + offset, gather + g->offset,
  492. g->words * sizeof(u32));
  493. host1x_bo_munmap(g->bo, gather);
  494. /* Store the location in the buffer */
  495. g->base = job->gather_copy;
  496. g->offset = offset;
  497. /* Validate the job */
  498. if (validate(&fw, g))
  499. return -EINVAL;
  500. offset += g->words * sizeof(u32);
  501. }
  502. /* No relocs and waitchks should remain at this point */
  503. if (fw.num_relocs || fw.num_waitchks)
  504. return -EINVAL;
  505. return 0;
  506. }
  507. int host1x_job_pin(struct host1x_job *job, struct device *dev)
  508. {
  509. int err;
  510. unsigned int i, j;
  511. struct host1x *host = dev_get_drvdata(dev->parent);
  512. DECLARE_BITMAP(waitchk_mask, host1x_syncpt_nb_pts(host));
  513. bitmap_zero(waitchk_mask, host1x_syncpt_nb_pts(host));
  514. for (i = 0; i < job->num_waitchk; i++) {
  515. u32 syncpt_id = job->waitchk[i].syncpt_id;
  516. if (syncpt_id < host1x_syncpt_nb_pts(host))
  517. set_bit(syncpt_id, waitchk_mask);
  518. }
  519. /* get current syncpt values for waitchk */
  520. for_each_set_bit(i, waitchk_mask, host1x_syncpt_nb_pts(host))
  521. host1x_syncpt_load(host->syncpt + i);
  522. /* pin memory */
  523. err = pin_job(host, job);
  524. if (err)
  525. goto out;
  526. if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
  527. err = copy_gathers(job, dev);
  528. if (err)
  529. goto out;
  530. }
  531. /* patch gathers */
  532. for (i = 0; i < job->num_gathers; i++) {
  533. struct host1x_job_gather *g = &job->gathers[i];
  534. /* process each gather mem only once */
  535. if (g->handled)
  536. continue;
  537. /* copy_gathers() sets gathers base if firewall is enabled */
  538. if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
  539. g->base = job->gather_addr_phys[i];
  540. for (j = i + 1; j < job->num_gathers; j++) {
  541. if (job->gathers[j].bo == g->bo) {
  542. job->gathers[j].handled = true;
  543. job->gathers[j].base = g->base;
  544. }
  545. }
  546. err = do_relocs(job, g);
  547. if (err)
  548. break;
  549. err = do_waitchks(job, host, g);
  550. if (err)
  551. break;
  552. }
  553. out:
  554. if (err)
  555. host1x_job_unpin(job);
  556. wmb();
  557. return err;
  558. }
  559. EXPORT_SYMBOL(host1x_job_pin);
  560. void host1x_job_unpin(struct host1x_job *job)
  561. {
  562. struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
  563. unsigned int i;
  564. for (i = 0; i < job->num_unpins; i++) {
  565. struct host1x_job_unpin_data *unpin = &job->unpins[i];
  566. if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
  567. iommu_unmap(host->domain, job->addr_phys[i],
  568. unpin->size);
  569. free_iova(&host->iova,
  570. iova_pfn(&host->iova, job->addr_phys[i]));
  571. }
  572. host1x_bo_unpin(unpin->bo, unpin->sgt);
  573. host1x_bo_put(unpin->bo);
  574. }
  575. job->num_unpins = 0;
  576. if (job->gather_copy_size)
  577. dma_free_wc(job->channel->dev, job->gather_copy_size,
  578. job->gather_copy_mapped, job->gather_copy);
  579. }
  580. EXPORT_SYMBOL(host1x_job_unpin);
  581. /*
  582. * Debug routine used to dump job entries
  583. */
  584. void host1x_job_dump(struct device *dev, struct host1x_job *job)
  585. {
  586. dev_dbg(dev, " SYNCPT_ID %d\n", job->syncpt_id);
  587. dev_dbg(dev, " SYNCPT_VAL %d\n", job->syncpt_end);
  588. dev_dbg(dev, " FIRST_GET 0x%x\n", job->first_get);
  589. dev_dbg(dev, " TIMEOUT %d\n", job->timeout);
  590. dev_dbg(dev, " NUM_SLOTS %d\n", job->num_slots);
  591. dev_dbg(dev, " NUM_HANDLES %d\n", job->num_unpins);
  592. }