nouveau_bo.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535
  1. /*
  2. * Copyright 2007 Dave Airlied
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the "Software"),
  7. * to deal in the Software without restriction, including without limitation
  8. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9. * and/or sell copies of the Software, and to permit persons to whom the
  10. * Software is furnished to do so, subject to the following conditions:
  11. *
  12. * The above copyright notice and this permission notice (including the next
  13. * paragraph) shall be included in all copies or substantial portions of the
  14. * Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. */
  24. /*
  25. * Authors: Dave Airlied <airlied@linux.ie>
  26. * Ben Skeggs <darktama@iinet.net.au>
  27. * Jeremy Kolb <jkolb@brandeis.edu>
  28. */
  29. #include <core/engine.h>
  30. #include <linux/swiotlb.h>
  31. #include <subdev/fb.h>
  32. #include <subdev/vm.h>
  33. #include <subdev/bar.h>
  34. #include "nouveau_drm.h"
  35. #include "nouveau_dma.h"
  36. #include "nouveau_fence.h"
  37. #include "nouveau_bo.h"
  38. #include "nouveau_ttm.h"
  39. #include "nouveau_gem.h"
  40. /*
  41. * NV10-NV40 tiling helpers
  42. */
  43. static void
  44. nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
  45. u32 addr, u32 size, u32 pitch, u32 flags)
  46. {
  47. struct nouveau_drm *drm = nouveau_drm(dev);
  48. int i = reg - drm->tile.reg;
  49. struct nouveau_fb *pfb = nouveau_fb(drm->device);
  50. struct nouveau_fb_tile *tile = &pfb->tile.region[i];
  51. struct nouveau_engine *engine;
  52. nouveau_fence_unref(&reg->fence);
  53. if (tile->pitch)
  54. pfb->tile.fini(pfb, i, tile);
  55. if (pitch)
  56. pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
  57. pfb->tile.prog(pfb, i, tile);
  58. if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
  59. engine->tile_prog(engine, i);
  60. if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
  61. engine->tile_prog(engine, i);
  62. }
  63. static struct nouveau_drm_tile *
  64. nv10_bo_get_tile_region(struct drm_device *dev, int i)
  65. {
  66. struct nouveau_drm *drm = nouveau_drm(dev);
  67. struct nouveau_drm_tile *tile = &drm->tile.reg[i];
  68. spin_lock(&drm->tile.lock);
  69. if (!tile->used &&
  70. (!tile->fence || nouveau_fence_done(tile->fence)))
  71. tile->used = true;
  72. else
  73. tile = NULL;
  74. spin_unlock(&drm->tile.lock);
  75. return tile;
  76. }
  77. static void
  78. nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
  79. struct nouveau_fence *fence)
  80. {
  81. struct nouveau_drm *drm = nouveau_drm(dev);
  82. if (tile) {
  83. spin_lock(&drm->tile.lock);
  84. tile->fence = nouveau_fence_ref(fence);
  85. tile->used = false;
  86. spin_unlock(&drm->tile.lock);
  87. }
  88. }
  89. static struct nouveau_drm_tile *
  90. nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
  91. u32 size, u32 pitch, u32 flags)
  92. {
  93. struct nouveau_drm *drm = nouveau_drm(dev);
  94. struct nouveau_fb *pfb = nouveau_fb(drm->device);
  95. struct nouveau_drm_tile *tile, *found = NULL;
  96. int i;
  97. for (i = 0; i < pfb->tile.regions; i++) {
  98. tile = nv10_bo_get_tile_region(dev, i);
  99. if (pitch && !found) {
  100. found = tile;
  101. continue;
  102. } else if (tile && pfb->tile.region[i].pitch) {
  103. /* Kill an unused tile region. */
  104. nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
  105. }
  106. nv10_bo_put_tile_region(dev, tile, NULL);
  107. }
  108. if (found)
  109. nv10_bo_update_tile_region(dev, found, addr, size,
  110. pitch, flags);
  111. return found;
  112. }
  113. static void
  114. nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
  115. {
  116. struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
  117. struct drm_device *dev = drm->dev;
  118. struct nouveau_bo *nvbo = nouveau_bo(bo);
  119. if (unlikely(nvbo->gem.filp))
  120. DRM_ERROR("bo %p still attached to GEM object\n", bo);
  121. WARN_ON(nvbo->pin_refcnt > 0);
  122. nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
  123. kfree(nvbo);
  124. }
  125. static void
  126. nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
  127. int *align, int *size)
  128. {
  129. struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
  130. struct nouveau_device *device = nv_device(drm->device);
  131. if (device->card_type < NV_50) {
  132. if (nvbo->tile_mode) {
  133. if (device->chipset >= 0x40) {
  134. *align = 65536;
  135. *size = roundup(*size, 64 * nvbo->tile_mode);
  136. } else if (device->chipset >= 0x30) {
  137. *align = 32768;
  138. *size = roundup(*size, 64 * nvbo->tile_mode);
  139. } else if (device->chipset >= 0x20) {
  140. *align = 16384;
  141. *size = roundup(*size, 64 * nvbo->tile_mode);
  142. } else if (device->chipset >= 0x10) {
  143. *align = 16384;
  144. *size = roundup(*size, 32 * nvbo->tile_mode);
  145. }
  146. }
  147. } else {
  148. *size = roundup(*size, (1 << nvbo->page_shift));
  149. *align = max((1 << nvbo->page_shift), *align);
  150. }
  151. *size = roundup(*size, PAGE_SIZE);
  152. }
  153. int
  154. nouveau_bo_new(struct drm_device *dev, int size, int align,
  155. uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
  156. struct sg_table *sg,
  157. struct nouveau_bo **pnvbo)
  158. {
  159. struct nouveau_drm *drm = nouveau_drm(dev);
  160. struct nouveau_bo *nvbo;
  161. size_t acc_size;
  162. int ret;
  163. int type = ttm_bo_type_device;
  164. int lpg_shift = 12;
  165. int max_size;
  166. if (drm->client.base.vm)
  167. lpg_shift = drm->client.base.vm->vmm->lpg_shift;
  168. max_size = INT_MAX & ~((1 << lpg_shift) - 1);
  169. if (size <= 0 || size > max_size) {
  170. nv_warn(drm, "skipped size %x\n", (u32)size);
  171. return -EINVAL;
  172. }
  173. if (sg)
  174. type = ttm_bo_type_sg;
  175. nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
  176. if (!nvbo)
  177. return -ENOMEM;
  178. INIT_LIST_HEAD(&nvbo->head);
  179. INIT_LIST_HEAD(&nvbo->entry);
  180. INIT_LIST_HEAD(&nvbo->vma_list);
  181. nvbo->tile_mode = tile_mode;
  182. nvbo->tile_flags = tile_flags;
  183. nvbo->bo.bdev = &drm->ttm.bdev;
  184. nvbo->page_shift = 12;
  185. if (drm->client.base.vm) {
  186. if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
  187. nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift;
  188. }
  189. nouveau_bo_fixup_align(nvbo, flags, &align, &size);
  190. nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
  191. nouveau_bo_placement_set(nvbo, flags, 0);
  192. acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
  193. sizeof(struct nouveau_bo));
  194. ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
  195. type, &nvbo->placement,
  196. align >> PAGE_SHIFT, false, NULL, acc_size, sg,
  197. nouveau_bo_del_ttm);
  198. if (ret) {
  199. /* ttm will call nouveau_bo_del_ttm if it fails.. */
  200. return ret;
  201. }
  202. *pnvbo = nvbo;
  203. return 0;
  204. }
  205. static void
  206. set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
  207. {
  208. *n = 0;
  209. if (type & TTM_PL_FLAG_VRAM)
  210. pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
  211. if (type & TTM_PL_FLAG_TT)
  212. pl[(*n)++] = TTM_PL_FLAG_TT | flags;
  213. if (type & TTM_PL_FLAG_SYSTEM)
  214. pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
  215. }
  216. static void
  217. set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
  218. {
  219. struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
  220. struct nouveau_fb *pfb = nouveau_fb(drm->device);
  221. u32 vram_pages = pfb->ram->size >> PAGE_SHIFT;
  222. if ((nv_device(drm->device)->card_type == NV_10 ||
  223. nv_device(drm->device)->card_type == NV_11) &&
  224. nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
  225. nvbo->bo.mem.num_pages < vram_pages / 4) {
  226. /*
  227. * Make sure that the color and depth buffers are handled
  228. * by independent memory controller units. Up to a 9x
  229. * speed up when alpha-blending and depth-test are enabled
  230. * at the same time.
  231. */
  232. if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
  233. nvbo->placement.fpfn = vram_pages / 2;
  234. nvbo->placement.lpfn = ~0;
  235. } else {
  236. nvbo->placement.fpfn = 0;
  237. nvbo->placement.lpfn = vram_pages / 2;
  238. }
  239. }
  240. }
  241. void
  242. nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
  243. {
  244. struct ttm_placement *pl = &nvbo->placement;
  245. uint32_t flags = TTM_PL_MASK_CACHING |
  246. (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
  247. pl->placement = nvbo->placements;
  248. set_placement_list(nvbo->placements, &pl->num_placement,
  249. type, flags);
  250. pl->busy_placement = nvbo->busy_placements;
  251. set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
  252. type | busy, flags);
  253. set_placement_range(nvbo, type);
  254. }
  255. int
  256. nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
  257. {
  258. struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
  259. struct ttm_buffer_object *bo = &nvbo->bo;
  260. int ret;
  261. ret = ttm_bo_reserve(bo, false, false, false, 0);
  262. if (ret)
  263. goto out;
  264. if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
  265. NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
  266. 1 << bo->mem.mem_type, memtype);
  267. ret = -EINVAL;
  268. goto out;
  269. }
  270. if (nvbo->pin_refcnt++)
  271. goto out;
  272. nouveau_bo_placement_set(nvbo, memtype, 0);
  273. ret = nouveau_bo_validate(nvbo, false, false);
  274. if (ret == 0) {
  275. switch (bo->mem.mem_type) {
  276. case TTM_PL_VRAM:
  277. drm->gem.vram_available -= bo->mem.size;
  278. break;
  279. case TTM_PL_TT:
  280. drm->gem.gart_available -= bo->mem.size;
  281. break;
  282. default:
  283. break;
  284. }
  285. }
  286. out:
  287. ttm_bo_unreserve(bo);
  288. return ret;
  289. }
  290. int
  291. nouveau_bo_unpin(struct nouveau_bo *nvbo)
  292. {
  293. struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
  294. struct ttm_buffer_object *bo = &nvbo->bo;
  295. int ret, ref;
  296. ret = ttm_bo_reserve(bo, false, false, false, 0);
  297. if (ret)
  298. return ret;
  299. ref = --nvbo->pin_refcnt;
  300. WARN_ON_ONCE(ref < 0);
  301. if (ref)
  302. goto out;
  303. nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
  304. ret = nouveau_bo_validate(nvbo, false, false);
  305. if (ret == 0) {
  306. switch (bo->mem.mem_type) {
  307. case TTM_PL_VRAM:
  308. drm->gem.vram_available += bo->mem.size;
  309. break;
  310. case TTM_PL_TT:
  311. drm->gem.gart_available += bo->mem.size;
  312. break;
  313. default:
  314. break;
  315. }
  316. }
  317. out:
  318. ttm_bo_unreserve(bo);
  319. return ret;
  320. }
  321. int
  322. nouveau_bo_map(struct nouveau_bo *nvbo)
  323. {
  324. int ret;
  325. ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
  326. if (ret)
  327. return ret;
  328. ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
  329. ttm_bo_unreserve(&nvbo->bo);
  330. return ret;
  331. }
  332. void
  333. nouveau_bo_unmap(struct nouveau_bo *nvbo)
  334. {
  335. if (nvbo)
  336. ttm_bo_kunmap(&nvbo->kmap);
  337. }
  338. int
  339. nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
  340. bool no_wait_gpu)
  341. {
  342. int ret;
  343. ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
  344. interruptible, no_wait_gpu);
  345. if (ret)
  346. return ret;
  347. return 0;
  348. }
  349. u16
  350. nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
  351. {
  352. bool is_iomem;
  353. u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
  354. mem = &mem[index];
  355. if (is_iomem)
  356. return ioread16_native((void __force __iomem *)mem);
  357. else
  358. return *mem;
  359. }
  360. void
  361. nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
  362. {
  363. bool is_iomem;
  364. u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
  365. mem = &mem[index];
  366. if (is_iomem)
  367. iowrite16_native(val, (void __force __iomem *)mem);
  368. else
  369. *mem = val;
  370. }
  371. u32
  372. nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
  373. {
  374. bool is_iomem;
  375. u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
  376. mem = &mem[index];
  377. if (is_iomem)
  378. return ioread32_native((void __force __iomem *)mem);
  379. else
  380. return *mem;
  381. }
  382. void
  383. nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
  384. {
  385. bool is_iomem;
  386. u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
  387. mem = &mem[index];
  388. if (is_iomem)
  389. iowrite32_native(val, (void __force __iomem *)mem);
  390. else
  391. *mem = val;
  392. }
  393. static struct ttm_tt *
  394. nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
  395. uint32_t page_flags, struct page *dummy_read)
  396. {
  397. #if __OS_HAS_AGP
  398. struct nouveau_drm *drm = nouveau_bdev(bdev);
  399. struct drm_device *dev = drm->dev;
  400. if (drm->agp.stat == ENABLED) {
  401. return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
  402. page_flags, dummy_read);
  403. }
  404. #endif
  405. return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
  406. }
  407. static int
  408. nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
  409. {
  410. /* We'll do this from user space. */
  411. return 0;
  412. }
  413. static int
  414. nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
  415. struct ttm_mem_type_manager *man)
  416. {
  417. struct nouveau_drm *drm = nouveau_bdev(bdev);
  418. switch (type) {
  419. case TTM_PL_SYSTEM:
  420. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
  421. man->available_caching = TTM_PL_MASK_CACHING;
  422. man->default_caching = TTM_PL_FLAG_CACHED;
  423. break;
  424. case TTM_PL_VRAM:
  425. if (nv_device(drm->device)->card_type >= NV_50) {
  426. man->func = &nouveau_vram_manager;
  427. man->io_reserve_fastpath = false;
  428. man->use_io_reserve_lru = true;
  429. } else {
  430. man->func = &ttm_bo_manager_func;
  431. }
  432. man->flags = TTM_MEMTYPE_FLAG_FIXED |
  433. TTM_MEMTYPE_FLAG_MAPPABLE;
  434. man->available_caching = TTM_PL_FLAG_UNCACHED |
  435. TTM_PL_FLAG_WC;
  436. man->default_caching = TTM_PL_FLAG_WC;
  437. break;
  438. case TTM_PL_TT:
  439. if (nv_device(drm->device)->card_type >= NV_50)
  440. man->func = &nouveau_gart_manager;
  441. else
  442. if (drm->agp.stat != ENABLED)
  443. man->func = &nv04_gart_manager;
  444. else
  445. man->func = &ttm_bo_manager_func;
  446. if (drm->agp.stat == ENABLED) {
  447. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
  448. man->available_caching = TTM_PL_FLAG_UNCACHED |
  449. TTM_PL_FLAG_WC;
  450. man->default_caching = TTM_PL_FLAG_WC;
  451. } else {
  452. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
  453. TTM_MEMTYPE_FLAG_CMA;
  454. man->available_caching = TTM_PL_MASK_CACHING;
  455. man->default_caching = TTM_PL_FLAG_CACHED;
  456. }
  457. break;
  458. default:
  459. return -EINVAL;
  460. }
  461. return 0;
  462. }
  463. static void
  464. nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
  465. {
  466. struct nouveau_bo *nvbo = nouveau_bo(bo);
  467. switch (bo->mem.mem_type) {
  468. case TTM_PL_VRAM:
  469. nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
  470. TTM_PL_FLAG_SYSTEM);
  471. break;
  472. default:
  473. nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
  474. break;
  475. }
  476. *pl = nvbo->placement;
  477. }
  478. static int
  479. nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
  480. {
  481. int ret = RING_SPACE(chan, 2);
  482. if (ret == 0) {
  483. BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
  484. OUT_RING (chan, handle & 0x0000ffff);
  485. FIRE_RING (chan);
  486. }
  487. return ret;
  488. }
  489. static int
  490. nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
  491. struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
  492. {
  493. struct nouveau_mem *node = old_mem->mm_node;
  494. int ret = RING_SPACE(chan, 10);
  495. if (ret == 0) {
  496. BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
  497. OUT_RING (chan, upper_32_bits(node->vma[0].offset));
  498. OUT_RING (chan, lower_32_bits(node->vma[0].offset));
  499. OUT_RING (chan, upper_32_bits(node->vma[1].offset));
  500. OUT_RING (chan, lower_32_bits(node->vma[1].offset));
  501. OUT_RING (chan, PAGE_SIZE);
  502. OUT_RING (chan, PAGE_SIZE);
  503. OUT_RING (chan, PAGE_SIZE);
  504. OUT_RING (chan, new_mem->num_pages);
  505. BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
  506. }
  507. return ret;
  508. }
  509. static int
  510. nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
  511. {
  512. int ret = RING_SPACE(chan, 2);
  513. if (ret == 0) {
  514. BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
  515. OUT_RING (chan, handle);
  516. }
  517. return ret;
  518. }
  519. static int
  520. nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
  521. struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
  522. {
  523. struct nouveau_mem *node = old_mem->mm_node;
  524. u64 src_offset = node->vma[0].offset;
  525. u64 dst_offset = node->vma[1].offset;
  526. u32 page_count = new_mem->num_pages;
  527. int ret;
  528. page_count = new_mem->num_pages;
  529. while (page_count) {
  530. int line_count = (page_count > 8191) ? 8191 : page_count;
  531. ret = RING_SPACE(chan, 11);
  532. if (ret)
  533. return ret;
  534. BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
  535. OUT_RING (chan, upper_32_bits(src_offset));
  536. OUT_RING (chan, lower_32_bits(src_offset));
  537. OUT_RING (chan, upper_32_bits(dst_offset));
  538. OUT_RING (chan, lower_32_bits(dst_offset));
  539. OUT_RING (chan, PAGE_SIZE);
  540. OUT_RING (chan, PAGE_SIZE);
  541. OUT_RING (chan, PAGE_SIZE);
  542. OUT_RING (chan, line_count);
  543. BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
  544. OUT_RING (chan, 0x00000110);
  545. page_count -= line_count;
  546. src_offset += (PAGE_SIZE * line_count);
  547. dst_offset += (PAGE_SIZE * line_count);
  548. }
  549. return 0;
  550. }
  551. static int
  552. nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
  553. struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
  554. {
  555. struct nouveau_mem *node = old_mem->mm_node;
  556. u64 src_offset = node->vma[0].offset;
  557. u64 dst_offset = node->vma[1].offset;
  558. u32 page_count = new_mem->num_pages;
  559. int ret;
  560. page_count = new_mem->num_pages;
  561. while (page_count) {
  562. int line_count = (page_count > 2047) ? 2047 : page_count;
  563. ret = RING_SPACE(chan, 12);
  564. if (ret)
  565. return ret;
  566. BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
  567. OUT_RING (chan, upper_32_bits(dst_offset));
  568. OUT_RING (chan, lower_32_bits(dst_offset));
  569. BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
  570. OUT_RING (chan, upper_32_bits(src_offset));
  571. OUT_RING (chan, lower_32_bits(src_offset));
  572. OUT_RING (chan, PAGE_SIZE); /* src_pitch */
  573. OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
  574. OUT_RING (chan, PAGE_SIZE); /* line_length */
  575. OUT_RING (chan, line_count);
  576. BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
  577. OUT_RING (chan, 0x00100110);
  578. page_count -= line_count;
  579. src_offset += (PAGE_SIZE * line_count);
  580. dst_offset += (PAGE_SIZE * line_count);
  581. }
  582. return 0;
  583. }
  584. static int
  585. nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
  586. struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
  587. {
  588. struct nouveau_mem *node = old_mem->mm_node;
  589. u64 src_offset = node->vma[0].offset;
  590. u64 dst_offset = node->vma[1].offset;
  591. u32 page_count = new_mem->num_pages;
  592. int ret;
  593. page_count = new_mem->num_pages;
  594. while (page_count) {
  595. int line_count = (page_count > 8191) ? 8191 : page_count;
  596. ret = RING_SPACE(chan, 11);
  597. if (ret)
  598. return ret;
  599. BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
  600. OUT_RING (chan, upper_32_bits(src_offset));
  601. OUT_RING (chan, lower_32_bits(src_offset));
  602. OUT_RING (chan, upper_32_bits(dst_offset));
  603. OUT_RING (chan, lower_32_bits(dst_offset));
  604. OUT_RING (chan, PAGE_SIZE);
  605. OUT_RING (chan, PAGE_SIZE);
  606. OUT_RING (chan, PAGE_SIZE);
  607. OUT_RING (chan, line_count);
  608. BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
  609. OUT_RING (chan, 0x00000110);
  610. page_count -= line_count;
  611. src_offset += (PAGE_SIZE * line_count);
  612. dst_offset += (PAGE_SIZE * line_count);
  613. }
  614. return 0;
  615. }
  616. static int
  617. nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
  618. struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
  619. {
  620. struct nouveau_mem *node = old_mem->mm_node;
  621. int ret = RING_SPACE(chan, 7);
  622. if (ret == 0) {
  623. BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
  624. OUT_RING (chan, upper_32_bits(node->vma[0].offset));
  625. OUT_RING (chan, lower_32_bits(node->vma[0].offset));
  626. OUT_RING (chan, upper_32_bits(node->vma[1].offset));
  627. OUT_RING (chan, lower_32_bits(node->vma[1].offset));
  628. OUT_RING (chan, 0x00000000 /* COPY */);
  629. OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
  630. }
  631. return ret;
  632. }
  633. static int
  634. nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
  635. struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
  636. {
  637. struct nouveau_mem *node = old_mem->mm_node;
  638. int ret = RING_SPACE(chan, 7);
  639. if (ret == 0) {
  640. BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
  641. OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
  642. OUT_RING (chan, upper_32_bits(node->vma[0].offset));
  643. OUT_RING (chan, lower_32_bits(node->vma[0].offset));
  644. OUT_RING (chan, upper_32_bits(node->vma[1].offset));
  645. OUT_RING (chan, lower_32_bits(node->vma[1].offset));
  646. OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
  647. }
  648. return ret;
  649. }
  650. static int
  651. nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
  652. {
  653. int ret = RING_SPACE(chan, 6);
  654. if (ret == 0) {
  655. BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
  656. OUT_RING (chan, handle);
  657. BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
  658. OUT_RING (chan, NvNotify0);
  659. OUT_RING (chan, NvDmaFB);
  660. OUT_RING (chan, NvDmaFB);
  661. }
  662. return ret;
  663. }
  664. static int
  665. nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
  666. struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
  667. {
  668. struct nouveau_mem *node = old_mem->mm_node;
  669. u64 length = (new_mem->num_pages << PAGE_SHIFT);
  670. u64 src_offset = node->vma[0].offset;
  671. u64 dst_offset = node->vma[1].offset;
  672. int src_tiled = !!node->memtype;
  673. int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype;
  674. int ret;
  675. while (length) {
  676. u32 amount, stride, height;
  677. ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
  678. if (ret)
  679. return ret;
  680. amount = min(length, (u64)(4 * 1024 * 1024));
  681. stride = 16 * 4;
  682. height = amount / stride;
  683. if (src_tiled) {
  684. BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
  685. OUT_RING (chan, 0);
  686. OUT_RING (chan, 0);
  687. OUT_RING (chan, stride);
  688. OUT_RING (chan, height);
  689. OUT_RING (chan, 1);
  690. OUT_RING (chan, 0);
  691. OUT_RING (chan, 0);
  692. } else {
  693. BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
  694. OUT_RING (chan, 1);
  695. }
  696. if (dst_tiled) {
  697. BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
  698. OUT_RING (chan, 0);
  699. OUT_RING (chan, 0);
  700. OUT_RING (chan, stride);
  701. OUT_RING (chan, height);
  702. OUT_RING (chan, 1);
  703. OUT_RING (chan, 0);
  704. OUT_RING (chan, 0);
  705. } else {
  706. BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
  707. OUT_RING (chan, 1);
  708. }
  709. BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
  710. OUT_RING (chan, upper_32_bits(src_offset));
  711. OUT_RING (chan, upper_32_bits(dst_offset));
  712. BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
  713. OUT_RING (chan, lower_32_bits(src_offset));
  714. OUT_RING (chan, lower_32_bits(dst_offset));
  715. OUT_RING (chan, stride);
  716. OUT_RING (chan, stride);
  717. OUT_RING (chan, stride);
  718. OUT_RING (chan, height);
  719. OUT_RING (chan, 0x00000101);
  720. OUT_RING (chan, 0x00000000);
  721. BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
  722. OUT_RING (chan, 0);
  723. length -= amount;
  724. src_offset += amount;
  725. dst_offset += amount;
  726. }
  727. return 0;
  728. }
  729. static int
  730. nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
  731. {
  732. int ret = RING_SPACE(chan, 4);
  733. if (ret == 0) {
  734. BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
  735. OUT_RING (chan, handle);
  736. BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
  737. OUT_RING (chan, NvNotify0);
  738. }
  739. return ret;
  740. }
  741. static inline uint32_t
  742. nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
  743. struct nouveau_channel *chan, struct ttm_mem_reg *mem)
  744. {
  745. if (mem->mem_type == TTM_PL_TT)
  746. return NvDmaTT;
  747. return NvDmaFB;
  748. }
  749. static int
  750. nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
  751. struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
  752. {
  753. u32 src_offset = old_mem->start << PAGE_SHIFT;
  754. u32 dst_offset = new_mem->start << PAGE_SHIFT;
  755. u32 page_count = new_mem->num_pages;
  756. int ret;
  757. ret = RING_SPACE(chan, 3);
  758. if (ret)
  759. return ret;
  760. BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
  761. OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
  762. OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
  763. page_count = new_mem->num_pages;
  764. while (page_count) {
  765. int line_count = (page_count > 2047) ? 2047 : page_count;
  766. ret = RING_SPACE(chan, 11);
  767. if (ret)
  768. return ret;
  769. BEGIN_NV04(chan, NvSubCopy,
  770. NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
  771. OUT_RING (chan, src_offset);
  772. OUT_RING (chan, dst_offset);
  773. OUT_RING (chan, PAGE_SIZE); /* src_pitch */
  774. OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
  775. OUT_RING (chan, PAGE_SIZE); /* line_length */
  776. OUT_RING (chan, line_count);
  777. OUT_RING (chan, 0x00000101);
  778. OUT_RING (chan, 0x00000000);
  779. BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
  780. OUT_RING (chan, 0);
  781. page_count -= line_count;
  782. src_offset += (PAGE_SIZE * line_count);
  783. dst_offset += (PAGE_SIZE * line_count);
  784. }
  785. return 0;
  786. }
  787. static int
  788. nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
  789. struct ttm_mem_reg *mem)
  790. {
  791. struct nouveau_mem *old_node = bo->mem.mm_node;
  792. struct nouveau_mem *new_node = mem->mm_node;
  793. u64 size = (u64)mem->num_pages << PAGE_SHIFT;
  794. int ret;
  795. ret = nouveau_vm_get(nv_client(drm)->vm, size, old_node->page_shift,
  796. NV_MEM_ACCESS_RW, &old_node->vma[0]);
  797. if (ret)
  798. return ret;
  799. ret = nouveau_vm_get(nv_client(drm)->vm, size, new_node->page_shift,
  800. NV_MEM_ACCESS_RW, &old_node->vma[1]);
  801. if (ret) {
  802. nouveau_vm_put(&old_node->vma[0]);
  803. return ret;
  804. }
  805. nouveau_vm_map(&old_node->vma[0], old_node);
  806. nouveau_vm_map(&old_node->vma[1], new_node);
  807. return 0;
  808. }
  809. static int
  810. nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
  811. bool no_wait_gpu, struct ttm_mem_reg *new_mem)
  812. {
  813. struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
  814. struct nouveau_channel *chan = drm->ttm.chan;
  815. struct nouveau_fence *fence;
  816. int ret;
  817. /* create temporary vmas for the transfer and attach them to the
  818. * old nouveau_mem node, these will get cleaned up after ttm has
  819. * destroyed the ttm_mem_reg
  820. */
  821. if (nv_device(drm->device)->card_type >= NV_50) {
  822. ret = nouveau_bo_move_prep(drm, bo, new_mem);
  823. if (ret)
  824. return ret;
  825. }
  826. mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING);
  827. ret = nouveau_fence_sync(bo->sync_obj, chan);
  828. if (ret == 0) {
  829. ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
  830. if (ret == 0) {
  831. ret = nouveau_fence_new(chan, false, &fence);
  832. if (ret == 0) {
  833. ret = ttm_bo_move_accel_cleanup(bo, fence,
  834. evict,
  835. no_wait_gpu,
  836. new_mem);
  837. nouveau_fence_unref(&fence);
  838. }
  839. }
  840. }
  841. mutex_unlock(&chan->cli->mutex);
  842. return ret;
  843. }
  844. void
  845. nouveau_bo_move_init(struct nouveau_drm *drm)
  846. {
  847. static const struct {
  848. const char *name;
  849. int engine;
  850. u32 oclass;
  851. int (*exec)(struct nouveau_channel *,
  852. struct ttm_buffer_object *,
  853. struct ttm_mem_reg *, struct ttm_mem_reg *);
  854. int (*init)(struct nouveau_channel *, u32 handle);
  855. } _methods[] = {
  856. { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
  857. { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
  858. { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
  859. { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
  860. { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
  861. { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
  862. { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
  863. { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
  864. { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
  865. {},
  866. { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
  867. }, *mthd = _methods;
  868. const char *name = "CPU";
  869. int ret;
  870. do {
  871. struct nouveau_object *object;
  872. struct nouveau_channel *chan;
  873. u32 handle = (mthd->engine << 16) | mthd->oclass;
  874. if (mthd->engine)
  875. chan = drm->cechan;
  876. else
  877. chan = drm->channel;
  878. if (chan == NULL)
  879. continue;
  880. ret = nouveau_object_new(nv_object(drm), chan->handle, handle,
  881. mthd->oclass, NULL, 0, &object);
  882. if (ret == 0) {
  883. ret = mthd->init(chan, handle);
  884. if (ret) {
  885. nouveau_object_del(nv_object(drm),
  886. chan->handle, handle);
  887. continue;
  888. }
  889. drm->ttm.move = mthd->exec;
  890. drm->ttm.chan = chan;
  891. name = mthd->name;
  892. break;
  893. }
  894. } while ((++mthd)->exec);
  895. NV_INFO(drm, "MM: using %s for buffer copies\n", name);
  896. }
  897. static int
  898. nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
  899. bool no_wait_gpu, struct ttm_mem_reg *new_mem)
  900. {
  901. u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
  902. struct ttm_placement placement;
  903. struct ttm_mem_reg tmp_mem;
  904. int ret;
  905. placement.fpfn = placement.lpfn = 0;
  906. placement.num_placement = placement.num_busy_placement = 1;
  907. placement.placement = placement.busy_placement = &placement_memtype;
  908. tmp_mem = *new_mem;
  909. tmp_mem.mm_node = NULL;
  910. ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
  911. if (ret)
  912. return ret;
  913. ret = ttm_tt_bind(bo->ttm, &tmp_mem);
  914. if (ret)
  915. goto out;
  916. ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
  917. if (ret)
  918. goto out;
  919. ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
  920. out:
  921. ttm_bo_mem_put(bo, &tmp_mem);
  922. return ret;
  923. }
  924. static int
  925. nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
  926. bool no_wait_gpu, struct ttm_mem_reg *new_mem)
  927. {
  928. u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
  929. struct ttm_placement placement;
  930. struct ttm_mem_reg tmp_mem;
  931. int ret;
  932. placement.fpfn = placement.lpfn = 0;
  933. placement.num_placement = placement.num_busy_placement = 1;
  934. placement.placement = placement.busy_placement = &placement_memtype;
  935. tmp_mem = *new_mem;
  936. tmp_mem.mm_node = NULL;
  937. ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
  938. if (ret)
  939. return ret;
  940. ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
  941. if (ret)
  942. goto out;
  943. ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
  944. if (ret)
  945. goto out;
  946. out:
  947. ttm_bo_mem_put(bo, &tmp_mem);
  948. return ret;
  949. }
  950. static void
  951. nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
  952. {
  953. struct nouveau_bo *nvbo = nouveau_bo(bo);
  954. struct nouveau_vma *vma;
  955. /* ttm can now (stupidly) pass the driver bos it didn't create... */
  956. if (bo->destroy != nouveau_bo_del_ttm)
  957. return;
  958. list_for_each_entry(vma, &nvbo->vma_list, head) {
  959. if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
  960. (new_mem->mem_type == TTM_PL_VRAM ||
  961. nvbo->page_shift != vma->vm->vmm->lpg_shift)) {
  962. nouveau_vm_map(vma, new_mem->mm_node);
  963. } else {
  964. nouveau_vm_unmap(vma);
  965. }
  966. }
  967. }
  968. static int
  969. nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
  970. struct nouveau_drm_tile **new_tile)
  971. {
  972. struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
  973. struct drm_device *dev = drm->dev;
  974. struct nouveau_bo *nvbo = nouveau_bo(bo);
  975. u64 offset = new_mem->start << PAGE_SHIFT;
  976. *new_tile = NULL;
  977. if (new_mem->mem_type != TTM_PL_VRAM)
  978. return 0;
  979. if (nv_device(drm->device)->card_type >= NV_10) {
  980. *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
  981. nvbo->tile_mode,
  982. nvbo->tile_flags);
  983. }
  984. return 0;
  985. }
  986. static void
  987. nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
  988. struct nouveau_drm_tile *new_tile,
  989. struct nouveau_drm_tile **old_tile)
  990. {
  991. struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
  992. struct drm_device *dev = drm->dev;
  993. nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj);
  994. *old_tile = new_tile;
  995. }
  996. static int
  997. nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
  998. bool no_wait_gpu, struct ttm_mem_reg *new_mem)
  999. {
  1000. struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
  1001. struct nouveau_bo *nvbo = nouveau_bo(bo);
  1002. struct ttm_mem_reg *old_mem = &bo->mem;
  1003. struct nouveau_drm_tile *new_tile = NULL;
  1004. int ret = 0;
  1005. if (nv_device(drm->device)->card_type < NV_50) {
  1006. ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
  1007. if (ret)
  1008. return ret;
  1009. }
  1010. /* Fake bo copy. */
  1011. if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
  1012. BUG_ON(bo->mem.mm_node != NULL);
  1013. bo->mem = *new_mem;
  1014. new_mem->mm_node = NULL;
  1015. goto out;
  1016. }
  1017. /* Hardware assisted copy. */
  1018. if (drm->ttm.move) {
  1019. if (new_mem->mem_type == TTM_PL_SYSTEM)
  1020. ret = nouveau_bo_move_flipd(bo, evict, intr,
  1021. no_wait_gpu, new_mem);
  1022. else if (old_mem->mem_type == TTM_PL_SYSTEM)
  1023. ret = nouveau_bo_move_flips(bo, evict, intr,
  1024. no_wait_gpu, new_mem);
  1025. else
  1026. ret = nouveau_bo_move_m2mf(bo, evict, intr,
  1027. no_wait_gpu, new_mem);
  1028. if (!ret)
  1029. goto out;
  1030. }
  1031. /* Fallback to software copy. */
  1032. spin_lock(&bo->bdev->fence_lock);
  1033. ret = ttm_bo_wait(bo, true, intr, no_wait_gpu);
  1034. spin_unlock(&bo->bdev->fence_lock);
  1035. if (ret == 0)
  1036. ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
  1037. out:
  1038. if (nv_device(drm->device)->card_type < NV_50) {
  1039. if (ret)
  1040. nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
  1041. else
  1042. nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
  1043. }
  1044. return ret;
  1045. }
  1046. static int
  1047. nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
  1048. {
  1049. struct nouveau_bo *nvbo = nouveau_bo(bo);
  1050. return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
  1051. }
  1052. static int
  1053. nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  1054. {
  1055. struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  1056. struct nouveau_drm *drm = nouveau_bdev(bdev);
  1057. struct nouveau_mem *node = mem->mm_node;
  1058. struct drm_device *dev = drm->dev;
  1059. int ret;
  1060. mem->bus.addr = NULL;
  1061. mem->bus.offset = 0;
  1062. mem->bus.size = mem->num_pages << PAGE_SHIFT;
  1063. mem->bus.base = 0;
  1064. mem->bus.is_iomem = false;
  1065. if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
  1066. return -EINVAL;
  1067. switch (mem->mem_type) {
  1068. case TTM_PL_SYSTEM:
  1069. /* System memory */
  1070. return 0;
  1071. case TTM_PL_TT:
  1072. #if __OS_HAS_AGP
  1073. if (drm->agp.stat == ENABLED) {
  1074. mem->bus.offset = mem->start << PAGE_SHIFT;
  1075. mem->bus.base = drm->agp.base;
  1076. mem->bus.is_iomem = !dev->agp->cant_use_aperture;
  1077. }
  1078. #endif
  1079. if (nv_device(drm->device)->card_type < NV_50 || !node->memtype)
  1080. /* untiled */
  1081. break;
  1082. /* fallthrough, tiled memory */
  1083. case TTM_PL_VRAM:
  1084. mem->bus.offset = mem->start << PAGE_SHIFT;
  1085. mem->bus.base = nv_device_resource_start(nouveau_dev(dev), 1);
  1086. mem->bus.is_iomem = true;
  1087. if (nv_device(drm->device)->card_type >= NV_50) {
  1088. struct nouveau_bar *bar = nouveau_bar(drm->device);
  1089. ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
  1090. &node->bar_vma);
  1091. if (ret)
  1092. return ret;
  1093. mem->bus.offset = node->bar_vma.offset;
  1094. }
  1095. break;
  1096. default:
  1097. return -EINVAL;
  1098. }
  1099. return 0;
  1100. }
  1101. static void
  1102. nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  1103. {
  1104. struct nouveau_drm *drm = nouveau_bdev(bdev);
  1105. struct nouveau_bar *bar = nouveau_bar(drm->device);
  1106. struct nouveau_mem *node = mem->mm_node;
  1107. if (!node->bar_vma.node)
  1108. return;
  1109. bar->unmap(bar, &node->bar_vma);
  1110. }
  1111. static int
  1112. nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
  1113. {
  1114. struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
  1115. struct nouveau_bo *nvbo = nouveau_bo(bo);
  1116. struct nouveau_device *device = nv_device(drm->device);
  1117. u32 mappable = nv_device_resource_len(device, 1) >> PAGE_SHIFT;
  1118. int ret;
  1119. /* as long as the bo isn't in vram, and isn't tiled, we've got
  1120. * nothing to do here.
  1121. */
  1122. if (bo->mem.mem_type != TTM_PL_VRAM) {
  1123. if (nv_device(drm->device)->card_type < NV_50 ||
  1124. !nouveau_bo_tile_layout(nvbo))
  1125. return 0;
  1126. if (bo->mem.mem_type == TTM_PL_SYSTEM) {
  1127. nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
  1128. ret = nouveau_bo_validate(nvbo, false, false);
  1129. if (ret)
  1130. return ret;
  1131. }
  1132. return 0;
  1133. }
  1134. /* make sure bo is in mappable vram */
  1135. if (nv_device(drm->device)->card_type >= NV_50 ||
  1136. bo->mem.start + bo->mem.num_pages < mappable)
  1137. return 0;
  1138. nvbo->placement.fpfn = 0;
  1139. nvbo->placement.lpfn = mappable;
  1140. nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
  1141. return nouveau_bo_validate(nvbo, false, false);
  1142. }
  1143. static int
  1144. nouveau_ttm_tt_populate(struct ttm_tt *ttm)
  1145. {
  1146. struct ttm_dma_tt *ttm_dma = (void *)ttm;
  1147. struct nouveau_drm *drm;
  1148. struct nouveau_device *device;
  1149. struct drm_device *dev;
  1150. unsigned i;
  1151. int r;
  1152. bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
  1153. if (ttm->state != tt_unpopulated)
  1154. return 0;
  1155. if (slave && ttm->sg) {
  1156. /* make userspace faulting work */
  1157. drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
  1158. ttm_dma->dma_address, ttm->num_pages);
  1159. ttm->state = tt_unbound;
  1160. return 0;
  1161. }
  1162. drm = nouveau_bdev(ttm->bdev);
  1163. device = nv_device(drm->device);
  1164. dev = drm->dev;
  1165. #if __OS_HAS_AGP
  1166. if (drm->agp.stat == ENABLED) {
  1167. return ttm_agp_tt_populate(ttm);
  1168. }
  1169. #endif
  1170. #ifdef CONFIG_SWIOTLB
  1171. if (swiotlb_nr_tbl()) {
  1172. return ttm_dma_populate((void *)ttm, dev->dev);
  1173. }
  1174. #endif
  1175. r = ttm_pool_populate(ttm);
  1176. if (r) {
  1177. return r;
  1178. }
  1179. for (i = 0; i < ttm->num_pages; i++) {
  1180. ttm_dma->dma_address[i] = nv_device_map_page(device,
  1181. ttm->pages[i]);
  1182. if (!ttm_dma->dma_address[i]) {
  1183. while (--i) {
  1184. nv_device_unmap_page(device,
  1185. ttm_dma->dma_address[i]);
  1186. ttm_dma->dma_address[i] = 0;
  1187. }
  1188. ttm_pool_unpopulate(ttm);
  1189. return -EFAULT;
  1190. }
  1191. }
  1192. return 0;
  1193. }
  1194. static void
  1195. nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
  1196. {
  1197. struct ttm_dma_tt *ttm_dma = (void *)ttm;
  1198. struct nouveau_drm *drm;
  1199. struct nouveau_device *device;
  1200. struct drm_device *dev;
  1201. unsigned i;
  1202. bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
  1203. if (slave)
  1204. return;
  1205. drm = nouveau_bdev(ttm->bdev);
  1206. device = nv_device(drm->device);
  1207. dev = drm->dev;
  1208. #if __OS_HAS_AGP
  1209. if (drm->agp.stat == ENABLED) {
  1210. ttm_agp_tt_unpopulate(ttm);
  1211. return;
  1212. }
  1213. #endif
  1214. #ifdef CONFIG_SWIOTLB
  1215. if (swiotlb_nr_tbl()) {
  1216. ttm_dma_unpopulate((void *)ttm, dev->dev);
  1217. return;
  1218. }
  1219. #endif
  1220. for (i = 0; i < ttm->num_pages; i++) {
  1221. if (ttm_dma->dma_address[i]) {
  1222. nv_device_unmap_page(device, ttm_dma->dma_address[i]);
  1223. }
  1224. }
  1225. ttm_pool_unpopulate(ttm);
  1226. }
  1227. void
  1228. nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
  1229. {
  1230. struct nouveau_fence *new_fence = nouveau_fence_ref(fence);
  1231. struct nouveau_fence *old_fence = NULL;
  1232. spin_lock(&nvbo->bo.bdev->fence_lock);
  1233. old_fence = nvbo->bo.sync_obj;
  1234. nvbo->bo.sync_obj = new_fence;
  1235. spin_unlock(&nvbo->bo.bdev->fence_lock);
  1236. nouveau_fence_unref(&old_fence);
  1237. }
  1238. static void
  1239. nouveau_bo_fence_unref(void **sync_obj)
  1240. {
  1241. nouveau_fence_unref((struct nouveau_fence **)sync_obj);
  1242. }
  1243. static void *
  1244. nouveau_bo_fence_ref(void *sync_obj)
  1245. {
  1246. return nouveau_fence_ref(sync_obj);
  1247. }
  1248. static bool
  1249. nouveau_bo_fence_signalled(void *sync_obj)
  1250. {
  1251. return nouveau_fence_done(sync_obj);
  1252. }
  1253. static int
  1254. nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr)
  1255. {
  1256. return nouveau_fence_wait(sync_obj, lazy, intr);
  1257. }
  1258. static int
  1259. nouveau_bo_fence_flush(void *sync_obj)
  1260. {
  1261. return 0;
  1262. }
  1263. struct ttm_bo_driver nouveau_bo_driver = {
  1264. .ttm_tt_create = &nouveau_ttm_tt_create,
  1265. .ttm_tt_populate = &nouveau_ttm_tt_populate,
  1266. .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
  1267. .invalidate_caches = nouveau_bo_invalidate_caches,
  1268. .init_mem_type = nouveau_bo_init_mem_type,
  1269. .evict_flags = nouveau_bo_evict_flags,
  1270. .move_notify = nouveau_bo_move_ntfy,
  1271. .move = nouveau_bo_move,
  1272. .verify_access = nouveau_bo_verify_access,
  1273. .sync_obj_signaled = nouveau_bo_fence_signalled,
  1274. .sync_obj_wait = nouveau_bo_fence_wait,
  1275. .sync_obj_flush = nouveau_bo_fence_flush,
  1276. .sync_obj_unref = nouveau_bo_fence_unref,
  1277. .sync_obj_ref = nouveau_bo_fence_ref,
  1278. .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
  1279. .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
  1280. .io_mem_free = &nouveau_ttm_io_mem_free,
  1281. };
  1282. struct nouveau_vma *
  1283. nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
  1284. {
  1285. struct nouveau_vma *vma;
  1286. list_for_each_entry(vma, &nvbo->vma_list, head) {
  1287. if (vma->vm == vm)
  1288. return vma;
  1289. }
  1290. return NULL;
  1291. }
  1292. int
  1293. nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
  1294. struct nouveau_vma *vma)
  1295. {
  1296. const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
  1297. int ret;
  1298. ret = nouveau_vm_get(vm, size, nvbo->page_shift,
  1299. NV_MEM_ACCESS_RW, vma);
  1300. if (ret)
  1301. return ret;
  1302. if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
  1303. (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
  1304. nvbo->page_shift != vma->vm->vmm->lpg_shift))
  1305. nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
  1306. list_add_tail(&vma->head, &nvbo->vma_list);
  1307. vma->refcount = 1;
  1308. return 0;
  1309. }
  1310. void
  1311. nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
  1312. {
  1313. if (vma->node) {
  1314. if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
  1315. nouveau_vm_unmap(vma);
  1316. nouveau_vm_put(vma);
  1317. list_del(&vma->head);
  1318. }
  1319. }