base.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542
  1. /*
  2. * Copyright 2010 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "priv.h"
  25. #include <core/gpuobj.h>
  26. #include <subdev/fb.h>
  27. void
  28. nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
  29. {
  30. struct nvkm_vm *vm = vma->vm;
  31. struct nvkm_mmu *mmu = vm->mmu;
  32. struct nvkm_mm_node *r;
  33. int big = vma->node->type != mmu->func->spg_shift;
  34. u32 offset = vma->node->offset + (delta >> 12);
  35. u32 bits = vma->node->type - 12;
  36. u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
  37. u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
  38. u32 max = 1 << (mmu->func->pgt_bits - bits);
  39. u32 end, len;
  40. delta = 0;
  41. list_for_each_entry(r, &node->regions, rl_entry) {
  42. u64 phys = (u64)r->offset << 12;
  43. u32 num = r->length >> bits;
  44. while (num) {
  45. struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
  46. end = (pte + num);
  47. if (unlikely(end >= max))
  48. end = max;
  49. len = end - pte;
  50. mmu->func->map(vma, pgt, node, pte, len, phys, delta);
  51. num -= len;
  52. pte += len;
  53. if (unlikely(end >= max)) {
  54. phys += len << (bits + 12);
  55. pde++;
  56. pte = 0;
  57. }
  58. delta += (u64)len << vma->node->type;
  59. }
  60. }
  61. mmu->func->flush(vm);
  62. }
  63. static void
  64. nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
  65. struct nvkm_mem *mem)
  66. {
  67. struct nvkm_vm *vm = vma->vm;
  68. struct nvkm_mmu *mmu = vm->mmu;
  69. int big = vma->node->type != mmu->func->spg_shift;
  70. u32 offset = vma->node->offset + (delta >> 12);
  71. u32 bits = vma->node->type - 12;
  72. u32 num = length >> vma->node->type;
  73. u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
  74. u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
  75. u32 max = 1 << (mmu->func->pgt_bits - bits);
  76. unsigned m, sglen;
  77. u32 end, len;
  78. int i;
  79. struct scatterlist *sg;
  80. for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
  81. struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
  82. sglen = sg_dma_len(sg) >> PAGE_SHIFT;
  83. end = pte + sglen;
  84. if (unlikely(end >= max))
  85. end = max;
  86. len = end - pte;
  87. for (m = 0; m < len; m++) {
  88. dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
  89. mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
  90. num--;
  91. pte++;
  92. if (num == 0)
  93. goto finish;
  94. }
  95. if (unlikely(end >= max)) {
  96. pde++;
  97. pte = 0;
  98. }
  99. if (m < sglen) {
  100. for (; m < sglen; m++) {
  101. dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
  102. mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
  103. num--;
  104. pte++;
  105. if (num == 0)
  106. goto finish;
  107. }
  108. }
  109. }
  110. finish:
  111. mmu->func->flush(vm);
  112. }
  113. static void
  114. nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
  115. struct nvkm_mem *mem)
  116. {
  117. struct nvkm_vm *vm = vma->vm;
  118. struct nvkm_mmu *mmu = vm->mmu;
  119. dma_addr_t *list = mem->pages;
  120. int big = vma->node->type != mmu->func->spg_shift;
  121. u32 offset = vma->node->offset + (delta >> 12);
  122. u32 bits = vma->node->type - 12;
  123. u32 num = length >> vma->node->type;
  124. u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
  125. u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
  126. u32 max = 1 << (mmu->func->pgt_bits - bits);
  127. u32 end, len;
  128. while (num) {
  129. struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
  130. end = (pte + num);
  131. if (unlikely(end >= max))
  132. end = max;
  133. len = end - pte;
  134. mmu->func->map_sg(vma, pgt, mem, pte, len, list);
  135. num -= len;
  136. pte += len;
  137. list += len;
  138. if (unlikely(end >= max)) {
  139. pde++;
  140. pte = 0;
  141. }
  142. }
  143. mmu->func->flush(vm);
  144. }
  145. void
  146. nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node)
  147. {
  148. if (node->sg)
  149. nvkm_vm_map_sg_table(vma, 0, node->size << 12, node);
  150. else
  151. if (node->pages)
  152. nvkm_vm_map_sg(vma, 0, node->size << 12, node);
  153. else
  154. nvkm_vm_map_at(vma, 0, node);
  155. }
  156. void
  157. nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
  158. {
  159. struct nvkm_vm *vm = vma->vm;
  160. struct nvkm_mmu *mmu = vm->mmu;
  161. int big = vma->node->type != mmu->func->spg_shift;
  162. u32 offset = vma->node->offset + (delta >> 12);
  163. u32 bits = vma->node->type - 12;
  164. u32 num = length >> vma->node->type;
  165. u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
  166. u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
  167. u32 max = 1 << (mmu->func->pgt_bits - bits);
  168. u32 end, len;
  169. while (num) {
  170. struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
  171. end = (pte + num);
  172. if (unlikely(end >= max))
  173. end = max;
  174. len = end - pte;
  175. mmu->func->unmap(vma, pgt, pte, len);
  176. num -= len;
  177. pte += len;
  178. if (unlikely(end >= max)) {
  179. pde++;
  180. pte = 0;
  181. }
  182. }
  183. mmu->func->flush(vm);
  184. }
  185. void
  186. nvkm_vm_unmap(struct nvkm_vma *vma)
  187. {
  188. nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
  189. }
  190. static void
  191. nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
  192. {
  193. struct nvkm_mmu *mmu = vm->mmu;
  194. struct nvkm_vm_pgd *vpgd;
  195. struct nvkm_vm_pgt *vpgt;
  196. struct nvkm_memory *pgt;
  197. u32 pde;
  198. for (pde = fpde; pde <= lpde; pde++) {
  199. vpgt = &vm->pgt[pde - vm->fpde];
  200. if (--vpgt->refcount[big])
  201. continue;
  202. pgt = vpgt->mem[big];
  203. vpgt->mem[big] = NULL;
  204. list_for_each_entry(vpgd, &vm->pgd_list, head) {
  205. mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
  206. }
  207. nvkm_memory_del(&pgt);
  208. }
  209. }
  210. static int
  211. nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type)
  212. {
  213. struct nvkm_mmu *mmu = vm->mmu;
  214. struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
  215. struct nvkm_vm_pgd *vpgd;
  216. int big = (type != mmu->func->spg_shift);
  217. u32 pgt_size;
  218. int ret;
  219. pgt_size = (1 << (mmu->func->pgt_bits + 12)) >> type;
  220. pgt_size *= 8;
  221. ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
  222. pgt_size, 0x1000, true, &vpgt->mem[big]);
  223. if (unlikely(ret))
  224. return ret;
  225. list_for_each_entry(vpgd, &vm->pgd_list, head) {
  226. mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
  227. }
  228. vpgt->refcount[big]++;
  229. return 0;
  230. }
  231. int
  232. nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
  233. struct nvkm_vma *vma)
  234. {
  235. struct nvkm_mmu *mmu = vm->mmu;
  236. u32 align = (1 << page_shift) >> 12;
  237. u32 msize = size >> 12;
  238. u32 fpde, lpde, pde;
  239. int ret;
  240. mutex_lock(&vm->mutex);
  241. ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align,
  242. &vma->node);
  243. if (unlikely(ret != 0)) {
  244. mutex_unlock(&vm->mutex);
  245. return ret;
  246. }
  247. fpde = (vma->node->offset >> mmu->func->pgt_bits);
  248. lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
  249. for (pde = fpde; pde <= lpde; pde++) {
  250. struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
  251. int big = (vma->node->type != mmu->func->spg_shift);
  252. if (likely(vpgt->refcount[big])) {
  253. vpgt->refcount[big]++;
  254. continue;
  255. }
  256. ret = nvkm_vm_map_pgt(vm, pde, vma->node->type);
  257. if (ret) {
  258. if (pde != fpde)
  259. nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1);
  260. nvkm_mm_free(&vm->mm, &vma->node);
  261. mutex_unlock(&vm->mutex);
  262. return ret;
  263. }
  264. }
  265. mutex_unlock(&vm->mutex);
  266. vma->vm = NULL;
  267. nvkm_vm_ref(vm, &vma->vm, NULL);
  268. vma->offset = (u64)vma->node->offset << 12;
  269. vma->access = access;
  270. return 0;
  271. }
  272. void
  273. nvkm_vm_put(struct nvkm_vma *vma)
  274. {
  275. struct nvkm_mmu *mmu;
  276. struct nvkm_vm *vm;
  277. u32 fpde, lpde;
  278. if (unlikely(vma->node == NULL))
  279. return;
  280. vm = vma->vm;
  281. mmu = vm->mmu;
  282. fpde = (vma->node->offset >> mmu->func->pgt_bits);
  283. lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
  284. mutex_lock(&vm->mutex);
  285. nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde);
  286. nvkm_mm_free(&vm->mm, &vma->node);
  287. mutex_unlock(&vm->mutex);
  288. nvkm_vm_ref(NULL, &vma->vm, NULL);
  289. }
  290. int
  291. nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
  292. {
  293. struct nvkm_mmu *mmu = vm->mmu;
  294. struct nvkm_memory *pgt;
  295. int ret;
  296. ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
  297. (size >> mmu->func->spg_shift) * 8, 0x1000, true, &pgt);
  298. if (ret == 0) {
  299. vm->pgt[0].refcount[0] = 1;
  300. vm->pgt[0].mem[0] = pgt;
  301. nvkm_memory_boot(pgt, vm);
  302. }
  303. return ret;
  304. }
  305. int
  306. nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
  307. u32 block, struct lock_class_key *key, struct nvkm_vm **pvm)
  308. {
  309. static struct lock_class_key _key;
  310. struct nvkm_vm *vm;
  311. u64 mm_length = (offset + length) - mm_offset;
  312. int ret;
  313. vm = kzalloc(sizeof(*vm), GFP_KERNEL);
  314. if (!vm)
  315. return -ENOMEM;
  316. __mutex_init(&vm->mutex, "&vm->mutex", key ? key : &_key);
  317. INIT_LIST_HEAD(&vm->pgd_list);
  318. vm->mmu = mmu;
  319. kref_init(&vm->refcount);
  320. vm->fpde = offset >> (mmu->func->pgt_bits + 12);
  321. vm->lpde = (offset + length - 1) >> (mmu->func->pgt_bits + 12);
  322. vm->pgt = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
  323. if (!vm->pgt) {
  324. kfree(vm);
  325. return -ENOMEM;
  326. }
  327. ret = nvkm_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
  328. block >> 12);
  329. if (ret) {
  330. vfree(vm->pgt);
  331. kfree(vm);
  332. return ret;
  333. }
  334. *pvm = vm;
  335. return 0;
  336. }
  337. int
  338. nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
  339. struct lock_class_key *key, struct nvkm_vm **pvm)
  340. {
  341. struct nvkm_mmu *mmu = device->mmu;
  342. if (!mmu->func->create)
  343. return -EINVAL;
  344. return mmu->func->create(mmu, offset, length, mm_offset, key, pvm);
  345. }
  346. static int
  347. nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd)
  348. {
  349. struct nvkm_mmu *mmu = vm->mmu;
  350. struct nvkm_vm_pgd *vpgd;
  351. int i;
  352. if (!pgd)
  353. return 0;
  354. vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
  355. if (!vpgd)
  356. return -ENOMEM;
  357. vpgd->obj = pgd;
  358. mutex_lock(&vm->mutex);
  359. for (i = vm->fpde; i <= vm->lpde; i++)
  360. mmu->func->map_pgt(pgd, i, vm->pgt[i - vm->fpde].mem);
  361. list_add(&vpgd->head, &vm->pgd_list);
  362. mutex_unlock(&vm->mutex);
  363. return 0;
  364. }
  365. static void
  366. nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd)
  367. {
  368. struct nvkm_vm_pgd *vpgd, *tmp;
  369. if (!mpgd)
  370. return;
  371. mutex_lock(&vm->mutex);
  372. list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
  373. if (vpgd->obj == mpgd) {
  374. list_del(&vpgd->head);
  375. kfree(vpgd);
  376. break;
  377. }
  378. }
  379. mutex_unlock(&vm->mutex);
  380. }
  381. static void
  382. nvkm_vm_del(struct kref *kref)
  383. {
  384. struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount);
  385. struct nvkm_vm_pgd *vpgd, *tmp;
  386. list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
  387. nvkm_vm_unlink(vm, vpgd->obj);
  388. }
  389. nvkm_mm_fini(&vm->mm);
  390. vfree(vm->pgt);
  391. kfree(vm);
  392. }
  393. int
  394. nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_gpuobj *pgd)
  395. {
  396. if (ref) {
  397. int ret = nvkm_vm_link(ref, pgd);
  398. if (ret)
  399. return ret;
  400. kref_get(&ref->refcount);
  401. }
  402. if (*ptr) {
  403. nvkm_vm_unlink(*ptr, pgd);
  404. kref_put(&(*ptr)->refcount, nvkm_vm_del);
  405. }
  406. *ptr = ref;
  407. return 0;
  408. }
  409. static int
  410. nvkm_mmu_oneinit(struct nvkm_subdev *subdev)
  411. {
  412. struct nvkm_mmu *mmu = nvkm_mmu(subdev);
  413. if (mmu->func->oneinit)
  414. return mmu->func->oneinit(mmu);
  415. return 0;
  416. }
  417. static int
  418. nvkm_mmu_init(struct nvkm_subdev *subdev)
  419. {
  420. struct nvkm_mmu *mmu = nvkm_mmu(subdev);
  421. if (mmu->func->init)
  422. mmu->func->init(mmu);
  423. return 0;
  424. }
  425. static void *
  426. nvkm_mmu_dtor(struct nvkm_subdev *subdev)
  427. {
  428. struct nvkm_mmu *mmu = nvkm_mmu(subdev);
  429. if (mmu->func->dtor)
  430. return mmu->func->dtor(mmu);
  431. return mmu;
  432. }
  433. static const struct nvkm_subdev_func
  434. nvkm_mmu = {
  435. .dtor = nvkm_mmu_dtor,
  436. .oneinit = nvkm_mmu_oneinit,
  437. .init = nvkm_mmu_init,
  438. };
  439. void
  440. nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
  441. int index, struct nvkm_mmu *mmu)
  442. {
  443. nvkm_subdev_ctor(&nvkm_mmu, device, index, &mmu->subdev);
  444. mmu->func = func;
  445. mmu->limit = func->limit;
  446. mmu->dma_bits = func->dma_bits;
  447. mmu->lpg_shift = func->lpg_shift;
  448. }
  449. int
  450. nvkm_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
  451. int index, struct nvkm_mmu **pmmu)
  452. {
  453. if (!(*pmmu = kzalloc(sizeof(**pmmu), GFP_KERNEL)))
  454. return -ENOMEM;
  455. nvkm_mmu_ctor(func, device, index, *pmmu);
  456. return 0;
  457. }