job.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655
  1. /*
  2. * Tegra host1x Job
  3. *
  4. * Copyright (c) 2010-2015, NVIDIA Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include <linux/dma-mapping.h>
  19. #include <linux/err.h>
  20. #include <linux/host1x.h>
  21. #include <linux/kref.h>
  22. #include <linux/module.h>
  23. #include <linux/scatterlist.h>
  24. #include <linux/slab.h>
  25. #include <linux/vmalloc.h>
  26. #include <trace/events/host1x.h>
  27. #include "channel.h"
  28. #include "dev.h"
  29. #include "job.h"
  30. #include "syncpt.h"
  31. struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
  32. u32 num_cmdbufs, u32 num_relocs,
  33. u32 num_waitchks)
  34. {
  35. struct host1x_job *job = NULL;
  36. unsigned int num_unpins = num_cmdbufs + num_relocs;
  37. u64 total;
  38. void *mem;
  39. /* Check that we're not going to overflow */
  40. total = sizeof(struct host1x_job) +
  41. (u64)num_relocs * sizeof(struct host1x_reloc) +
  42. (u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
  43. (u64)num_waitchks * sizeof(struct host1x_waitchk) +
  44. (u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
  45. (u64)num_unpins * sizeof(dma_addr_t) +
  46. (u64)num_unpins * sizeof(u32 *);
  47. if (total > ULONG_MAX)
  48. return NULL;
  49. mem = job = kzalloc(total, GFP_KERNEL);
  50. if (!job)
  51. return NULL;
  52. kref_init(&job->ref);
  53. job->channel = ch;
  54. /* Redistribute memory to the structs */
  55. mem += sizeof(struct host1x_job);
  56. job->relocarray = num_relocs ? mem : NULL;
  57. mem += num_relocs * sizeof(struct host1x_reloc);
  58. job->unpins = num_unpins ? mem : NULL;
  59. mem += num_unpins * sizeof(struct host1x_job_unpin_data);
  60. job->waitchk = num_waitchks ? mem : NULL;
  61. mem += num_waitchks * sizeof(struct host1x_waitchk);
  62. job->gathers = num_cmdbufs ? mem : NULL;
  63. mem += num_cmdbufs * sizeof(struct host1x_job_gather);
  64. job->addr_phys = num_unpins ? mem : NULL;
  65. job->reloc_addr_phys = job->addr_phys;
  66. job->gather_addr_phys = &job->addr_phys[num_relocs];
  67. return job;
  68. }
  69. EXPORT_SYMBOL(host1x_job_alloc);
  70. struct host1x_job *host1x_job_get(struct host1x_job *job)
  71. {
  72. kref_get(&job->ref);
  73. return job;
  74. }
  75. EXPORT_SYMBOL(host1x_job_get);
  76. static void job_free(struct kref *ref)
  77. {
  78. struct host1x_job *job = container_of(ref, struct host1x_job, ref);
  79. kfree(job);
  80. }
  81. void host1x_job_put(struct host1x_job *job)
  82. {
  83. kref_put(&job->ref, job_free);
  84. }
  85. EXPORT_SYMBOL(host1x_job_put);
  86. void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
  87. u32 words, u32 offset)
  88. {
  89. struct host1x_job_gather *cur_gather = &job->gathers[job->num_gathers];
  90. cur_gather->words = words;
  91. cur_gather->bo = bo;
  92. cur_gather->offset = offset;
  93. job->num_gathers++;
  94. }
  95. EXPORT_SYMBOL(host1x_job_add_gather);
  96. /*
  97. * NULL an already satisfied WAIT_SYNCPT host method, by patching its
  98. * args in the command stream. The method data is changed to reference
  99. * a reserved (never given out or incr) HOST1X_SYNCPT_RESERVED syncpt
  100. * with a matching threshold value of 0, so is guaranteed to be popped
  101. * by the host HW.
  102. */
  103. static void host1x_syncpt_patch_offset(struct host1x_syncpt *sp,
  104. struct host1x_bo *h, u32 offset)
  105. {
  106. void *patch_addr = NULL;
  107. /* patch the wait */
  108. patch_addr = host1x_bo_kmap(h, offset >> PAGE_SHIFT);
  109. if (patch_addr) {
  110. host1x_syncpt_patch_wait(sp,
  111. patch_addr + (offset & ~PAGE_MASK));
  112. host1x_bo_kunmap(h, offset >> PAGE_SHIFT, patch_addr);
  113. } else
  114. pr_err("Could not map cmdbuf for wait check\n");
  115. }
  116. /*
  117. * Check driver supplied waitchk structs for syncpt thresholds
  118. * that have already been satisfied and NULL the comparison (to
  119. * avoid a wrap condition in the HW).
  120. */
  121. static int do_waitchks(struct host1x_job *job, struct host1x *host,
  122. struct host1x_bo *patch)
  123. {
  124. int i;
  125. /* compare syncpt vs wait threshold */
  126. for (i = 0; i < job->num_waitchk; i++) {
  127. struct host1x_waitchk *wait = &job->waitchk[i];
  128. struct host1x_syncpt *sp =
  129. host1x_syncpt_get(host, wait->syncpt_id);
  130. /* validate syncpt id */
  131. if (wait->syncpt_id > host1x_syncpt_nb_pts(host))
  132. continue;
  133. /* skip all other gathers */
  134. if (patch != wait->bo)
  135. continue;
  136. trace_host1x_syncpt_wait_check(wait->bo, wait->offset,
  137. wait->syncpt_id, wait->thresh,
  138. host1x_syncpt_read_min(sp));
  139. if (host1x_syncpt_is_expired(sp, wait->thresh)) {
  140. dev_dbg(host->dev,
  141. "drop WAIT id %u (%s) thresh 0x%x, min 0x%x\n",
  142. wait->syncpt_id, sp->name, wait->thresh,
  143. host1x_syncpt_read_min(sp));
  144. host1x_syncpt_patch_offset(sp, patch, wait->offset);
  145. }
  146. wait->bo = NULL;
  147. }
  148. return 0;
  149. }
  150. static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
  151. {
  152. unsigned int i;
  153. int err;
  154. job->num_unpins = 0;
  155. for (i = 0; i < job->num_relocs; i++) {
  156. struct host1x_reloc *reloc = &job->relocarray[i];
  157. struct sg_table *sgt;
  158. dma_addr_t phys_addr;
  159. reloc->target.bo = host1x_bo_get(reloc->target.bo);
  160. if (!reloc->target.bo) {
  161. err = -EINVAL;
  162. goto unpin;
  163. }
  164. phys_addr = host1x_bo_pin(reloc->target.bo, &sgt);
  165. if (!phys_addr) {
  166. err = -EINVAL;
  167. goto unpin;
  168. }
  169. job->addr_phys[job->num_unpins] = phys_addr;
  170. job->unpins[job->num_unpins].bo = reloc->target.bo;
  171. job->unpins[job->num_unpins].sgt = sgt;
  172. job->num_unpins++;
  173. }
  174. for (i = 0; i < job->num_gathers; i++) {
  175. struct host1x_job_gather *g = &job->gathers[i];
  176. size_t gather_size = 0;
  177. struct scatterlist *sg;
  178. struct sg_table *sgt;
  179. dma_addr_t phys_addr;
  180. unsigned long shift;
  181. struct iova *alloc;
  182. unsigned int j;
  183. g->bo = host1x_bo_get(g->bo);
  184. if (!g->bo) {
  185. err = -EINVAL;
  186. goto unpin;
  187. }
  188. phys_addr = host1x_bo_pin(g->bo, &sgt);
  189. if (!phys_addr) {
  190. err = -EINVAL;
  191. goto unpin;
  192. }
  193. if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
  194. for_each_sg(sgt->sgl, sg, sgt->nents, j)
  195. gather_size += sg->length;
  196. gather_size = iova_align(&host->iova, gather_size);
  197. shift = iova_shift(&host->iova);
  198. alloc = alloc_iova(&host->iova, gather_size >> shift,
  199. host->iova_end >> shift, true);
  200. if (!alloc) {
  201. err = -ENOMEM;
  202. goto unpin;
  203. }
  204. err = iommu_map_sg(host->domain,
  205. iova_dma_addr(&host->iova, alloc),
  206. sgt->sgl, sgt->nents, IOMMU_READ);
  207. if (err == 0) {
  208. __free_iova(&host->iova, alloc);
  209. err = -EINVAL;
  210. goto unpin;
  211. }
  212. job->addr_phys[job->num_unpins] =
  213. iova_dma_addr(&host->iova, alloc);
  214. job->unpins[job->num_unpins].size = gather_size;
  215. } else {
  216. job->addr_phys[job->num_unpins] = phys_addr;
  217. }
  218. job->gather_addr_phys[i] = job->addr_phys[job->num_unpins];
  219. job->unpins[job->num_unpins].bo = g->bo;
  220. job->unpins[job->num_unpins].sgt = sgt;
  221. job->num_unpins++;
  222. }
  223. return 0;
  224. unpin:
  225. host1x_job_unpin(job);
  226. return err;
  227. }
  228. static int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
  229. {
  230. int i = 0;
  231. u32 last_page = ~0;
  232. void *cmdbuf_page_addr = NULL;
  233. /* pin & patch the relocs for one gather */
  234. for (i = 0; i < job->num_relocs; i++) {
  235. struct host1x_reloc *reloc = &job->relocarray[i];
  236. u32 reloc_addr = (job->reloc_addr_phys[i] +
  237. reloc->target.offset) >> reloc->shift;
  238. u32 *target;
  239. /* skip all other gathers */
  240. if (cmdbuf != reloc->cmdbuf.bo)
  241. continue;
  242. if (last_page != reloc->cmdbuf.offset >> PAGE_SHIFT) {
  243. if (cmdbuf_page_addr)
  244. host1x_bo_kunmap(cmdbuf, last_page,
  245. cmdbuf_page_addr);
  246. cmdbuf_page_addr = host1x_bo_kmap(cmdbuf,
  247. reloc->cmdbuf.offset >> PAGE_SHIFT);
  248. last_page = reloc->cmdbuf.offset >> PAGE_SHIFT;
  249. if (unlikely(!cmdbuf_page_addr)) {
  250. pr_err("Could not map cmdbuf for relocation\n");
  251. return -ENOMEM;
  252. }
  253. }
  254. target = cmdbuf_page_addr + (reloc->cmdbuf.offset & ~PAGE_MASK);
  255. *target = reloc_addr;
  256. }
  257. if (cmdbuf_page_addr)
  258. host1x_bo_kunmap(cmdbuf, last_page, cmdbuf_page_addr);
  259. return 0;
  260. }
  261. static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
  262. unsigned int offset)
  263. {
  264. offset *= sizeof(u32);
  265. if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
  266. return false;
  267. return true;
  268. }
  269. struct host1x_firewall {
  270. struct host1x_job *job;
  271. struct device *dev;
  272. unsigned int num_relocs;
  273. struct host1x_reloc *reloc;
  274. struct host1x_bo *cmdbuf;
  275. unsigned int offset;
  276. u32 words;
  277. u32 class;
  278. u32 reg;
  279. u32 mask;
  280. u32 count;
  281. };
  282. static int check_register(struct host1x_firewall *fw, unsigned long offset)
  283. {
  284. if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
  285. if (!fw->num_relocs)
  286. return -EINVAL;
  287. if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
  288. return -EINVAL;
  289. fw->num_relocs--;
  290. fw->reloc++;
  291. }
  292. return 0;
  293. }
  294. static int check_mask(struct host1x_firewall *fw)
  295. {
  296. u32 mask = fw->mask;
  297. u32 reg = fw->reg;
  298. int ret;
  299. while (mask) {
  300. if (fw->words == 0)
  301. return -EINVAL;
  302. if (mask & 1) {
  303. ret = check_register(fw, reg);
  304. if (ret < 0)
  305. return ret;
  306. fw->words--;
  307. fw->offset++;
  308. }
  309. mask >>= 1;
  310. reg++;
  311. }
  312. return 0;
  313. }
  314. static int check_incr(struct host1x_firewall *fw)
  315. {
  316. u32 count = fw->count;
  317. u32 reg = fw->reg;
  318. int ret;
  319. while (count) {
  320. if (fw->words == 0)
  321. return -EINVAL;
  322. ret = check_register(fw, reg);
  323. if (ret < 0)
  324. return ret;
  325. reg++;
  326. fw->words--;
  327. fw->offset++;
  328. count--;
  329. }
  330. return 0;
  331. }
  332. static int check_nonincr(struct host1x_firewall *fw)
  333. {
  334. u32 count = fw->count;
  335. int ret;
  336. while (count) {
  337. if (fw->words == 0)
  338. return -EINVAL;
  339. ret = check_register(fw, fw->reg);
  340. if (ret < 0)
  341. return ret;
  342. fw->words--;
  343. fw->offset++;
  344. count--;
  345. }
  346. return 0;
  347. }
  348. static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
  349. {
  350. u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped +
  351. (g->offset / sizeof(u32));
  352. int err = 0;
  353. if (!fw->job->is_addr_reg)
  354. return 0;
  355. fw->words = g->words;
  356. fw->cmdbuf = g->bo;
  357. fw->offset = 0;
  358. while (fw->words && !err) {
  359. u32 word = cmdbuf_base[fw->offset];
  360. u32 opcode = (word & 0xf0000000) >> 28;
  361. fw->mask = 0;
  362. fw->reg = 0;
  363. fw->count = 0;
  364. fw->words--;
  365. fw->offset++;
  366. switch (opcode) {
  367. case 0:
  368. fw->class = word >> 6 & 0x3ff;
  369. fw->mask = word & 0x3f;
  370. fw->reg = word >> 16 & 0xfff;
  371. err = check_mask(fw);
  372. if (err)
  373. goto out;
  374. break;
  375. case 1:
  376. fw->reg = word >> 16 & 0xfff;
  377. fw->count = word & 0xffff;
  378. err = check_incr(fw);
  379. if (err)
  380. goto out;
  381. break;
  382. case 2:
  383. fw->reg = word >> 16 & 0xfff;
  384. fw->count = word & 0xffff;
  385. err = check_nonincr(fw);
  386. if (err)
  387. goto out;
  388. break;
  389. case 3:
  390. fw->mask = word & 0xffff;
  391. fw->reg = word >> 16 & 0xfff;
  392. err = check_mask(fw);
  393. if (err)
  394. goto out;
  395. break;
  396. case 4:
  397. case 5:
  398. case 14:
  399. break;
  400. default:
  401. err = -EINVAL;
  402. break;
  403. }
  404. }
  405. out:
  406. return err;
  407. }
  408. static inline int copy_gathers(struct host1x_job *job, struct device *dev)
  409. {
  410. struct host1x_firewall fw;
  411. size_t size = 0;
  412. size_t offset = 0;
  413. int i;
  414. fw.job = job;
  415. fw.dev = dev;
  416. fw.reloc = job->relocarray;
  417. fw.num_relocs = job->num_relocs;
  418. fw.class = 0;
  419. for (i = 0; i < job->num_gathers; i++) {
  420. struct host1x_job_gather *g = &job->gathers[i];
  421. size += g->words * sizeof(u32);
  422. }
  423. job->gather_copy_mapped = dma_alloc_wc(dev, size, &job->gather_copy,
  424. GFP_KERNEL);
  425. if (!job->gather_copy_mapped) {
  426. job->gather_copy_mapped = NULL;
  427. return -ENOMEM;
  428. }
  429. job->gather_copy_size = size;
  430. for (i = 0; i < job->num_gathers; i++) {
  431. struct host1x_job_gather *g = &job->gathers[i];
  432. void *gather;
  433. /* Copy the gather */
  434. gather = host1x_bo_mmap(g->bo);
  435. memcpy(job->gather_copy_mapped + offset, gather + g->offset,
  436. g->words * sizeof(u32));
  437. host1x_bo_munmap(g->bo, gather);
  438. /* Store the location in the buffer */
  439. g->base = job->gather_copy;
  440. g->offset = offset;
  441. /* Validate the job */
  442. if (validate(&fw, g))
  443. return -EINVAL;
  444. offset += g->words * sizeof(u32);
  445. }
  446. /* No relocs should remain at this point */
  447. if (fw.num_relocs)
  448. return -EINVAL;
  449. return 0;
  450. }
  451. int host1x_job_pin(struct host1x_job *job, struct device *dev)
  452. {
  453. int err;
  454. unsigned int i, j;
  455. struct host1x *host = dev_get_drvdata(dev->parent);
  456. DECLARE_BITMAP(waitchk_mask, host1x_syncpt_nb_pts(host));
  457. bitmap_zero(waitchk_mask, host1x_syncpt_nb_pts(host));
  458. for (i = 0; i < job->num_waitchk; i++) {
  459. u32 syncpt_id = job->waitchk[i].syncpt_id;
  460. if (syncpt_id < host1x_syncpt_nb_pts(host))
  461. set_bit(syncpt_id, waitchk_mask);
  462. }
  463. /* get current syncpt values for waitchk */
  464. for_each_set_bit(i, waitchk_mask, host1x_syncpt_nb_pts(host))
  465. host1x_syncpt_load(host->syncpt + i);
  466. /* pin memory */
  467. err = pin_job(host, job);
  468. if (err)
  469. goto out;
  470. /* patch gathers */
  471. for (i = 0; i < job->num_gathers; i++) {
  472. struct host1x_job_gather *g = &job->gathers[i];
  473. /* process each gather mem only once */
  474. if (g->handled)
  475. continue;
  476. g->base = job->gather_addr_phys[i];
  477. for (j = i + 1; j < job->num_gathers; j++) {
  478. if (job->gathers[j].bo == g->bo) {
  479. job->gathers[j].handled = true;
  480. job->gathers[j].base = g->base;
  481. }
  482. }
  483. err = do_relocs(job, g->bo);
  484. if (err)
  485. break;
  486. err = do_waitchks(job, host, g->bo);
  487. if (err)
  488. break;
  489. }
  490. if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && !err) {
  491. err = copy_gathers(job, dev);
  492. if (err) {
  493. host1x_job_unpin(job);
  494. return err;
  495. }
  496. }
  497. out:
  498. wmb();
  499. return err;
  500. }
  501. EXPORT_SYMBOL(host1x_job_pin);
  502. void host1x_job_unpin(struct host1x_job *job)
  503. {
  504. struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
  505. unsigned int i;
  506. for (i = 0; i < job->num_unpins; i++) {
  507. struct host1x_job_unpin_data *unpin = &job->unpins[i];
  508. if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
  509. iommu_unmap(host->domain, job->addr_phys[i],
  510. unpin->size);
  511. free_iova(&host->iova,
  512. iova_pfn(&host->iova, job->addr_phys[i]));
  513. }
  514. host1x_bo_unpin(unpin->bo, unpin->sgt);
  515. host1x_bo_put(unpin->bo);
  516. }
  517. job->num_unpins = 0;
  518. if (job->gather_copy_size)
  519. dma_free_wc(job->channel->dev, job->gather_copy_size,
  520. job->gather_copy_mapped, job->gather_copy);
  521. }
  522. EXPORT_SYMBOL(host1x_job_unpin);
  523. /*
  524. * Debug routine used to dump job entries
  525. */
  526. void host1x_job_dump(struct device *dev, struct host1x_job *job)
  527. {
  528. dev_dbg(dev, " SYNCPT_ID %d\n", job->syncpt_id);
  529. dev_dbg(dev, " SYNCPT_VAL %d\n", job->syncpt_end);
  530. dev_dbg(dev, " FIRST_GET 0x%x\n", job->first_get);
  531. dev_dbg(dev, " TIMEOUT %d\n", job->timeout);
  532. dev_dbg(dev, " NUM_SLOTS %d\n", job->num_slots);
  533. dev_dbg(dev, " NUM_HANDLES %d\n", job->num_unpins);
  534. }