omap_gem.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487
  1. /*
  2. * drivers/gpu/drm/omapdrm/omap_gem.c
  3. *
  4. * Copyright (C) 2011 Texas Instruments
  5. * Author: Rob Clark <rob.clark@linaro.org>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License version 2 as published by
  9. * the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but WITHOUT
  12. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  14. * more details.
  15. *
  16. * You should have received a copy of the GNU General Public License along with
  17. * this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/shmem_fs.h>
  20. #include <linux/spinlock.h>
  21. #include <drm/drm_vma_manager.h>
  22. #include "omap_drv.h"
  23. #include "omap_dmm_tiler.h"
  24. /* remove these once drm core helpers are merged */
  25. struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
  26. void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
  27. bool dirty, bool accessed);
  28. int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
  29. /*
  30. * GEM buffer object implementation.
  31. */
  32. #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
  33. /* note: we use upper 8 bits of flags for driver-internal flags: */
  34. #define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
  35. #define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
  36. #define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
  37. struct omap_gem_object {
  38. struct drm_gem_object base;
  39. struct list_head mm_list;
  40. uint32_t flags;
  41. /** width/height for tiled formats (rounded up to slot boundaries) */
  42. uint16_t width, height;
  43. /** roll applied when mapping to DMM */
  44. uint32_t roll;
  45. /**
  46. * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
  47. * is set and the paddr is valid. Also if the buffer is remapped in
  48. * TILER and paddr_cnt > 0, then paddr is valid. But if you are using
  49. * the physical address and OMAP_BO_DMA is not set, then you should
  50. * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
  51. * not removed from under your feet.
  52. *
  53. * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
  54. * buffer is requested, but doesn't mean that it is. Use the
  55. * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
  56. * physical address.
  57. */
  58. dma_addr_t paddr;
  59. /**
  60. * # of users of paddr
  61. */
  62. uint32_t paddr_cnt;
  63. /**
  64. * tiler block used when buffer is remapped in DMM/TILER.
  65. */
  66. struct tiler_block *block;
  67. /**
  68. * Array of backing pages, if allocated. Note that pages are never
  69. * allocated for buffers originally allocated from contiguous memory
  70. */
  71. struct page **pages;
  72. /** addresses corresponding to pages in above array */
  73. dma_addr_t *addrs;
  74. /**
  75. * Virtual address, if mapped.
  76. */
  77. void *vaddr;
  78. /**
  79. * sync-object allocated on demand (if needed)
  80. *
  81. * Per-buffer sync-object for tracking pending and completed hw/dma
  82. * read and write operations. The layout in memory is dictated by
  83. * the SGX firmware, which uses this information to stall the command
  84. * stream if a surface is not ready yet.
  85. *
  86. * Note that when buffer is used by SGX, the sync-object needs to be
  87. * allocated from a special heap of sync-objects. This way many sync
  88. * objects can be packed in a page, and not waste GPU virtual address
  89. * space. Because of this we have to have a omap_gem_set_sync_object()
  90. * API to allow replacement of the syncobj after it has (potentially)
  91. * already been allocated. A bit ugly but I haven't thought of a
  92. * better alternative.
  93. */
  94. struct {
  95. uint32_t write_pending;
  96. uint32_t write_complete;
  97. uint32_t read_pending;
  98. uint32_t read_complete;
  99. } *sync;
  100. };
  101. static int get_pages(struct drm_gem_object *obj, struct page ***pages);
  102. static uint64_t mmap_offset(struct drm_gem_object *obj);
  103. /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
  104. * not necessarily pinned in TILER all the time, and (b) when they are
  105. * they are not necessarily page aligned, we reserve one or more small
  106. * regions in each of the 2d containers to use as a user-GART where we
  107. * can create a second page-aligned mapping of parts of the buffer
  108. * being accessed from userspace.
  109. *
  110. * Note that we could optimize slightly when we know that multiple
  111. * tiler containers are backed by the same PAT.. but I'll leave that
  112. * for later..
  113. */
  114. #define NUM_USERGART_ENTRIES 2
  115. struct usergart_entry {
  116. struct tiler_block *block; /* the reserved tiler block */
  117. dma_addr_t paddr;
  118. struct drm_gem_object *obj; /* the current pinned obj */
  119. pgoff_t obj_pgoff; /* page offset of obj currently
  120. mapped in */
  121. };
  122. static struct {
  123. struct usergart_entry entry[NUM_USERGART_ENTRIES];
  124. int height; /* height in rows */
  125. int height_shift; /* ilog2(height in rows) */
  126. int slot_shift; /* ilog2(width per slot) */
  127. int stride_pfn; /* stride in pages */
  128. int last; /* index of last used entry */
  129. } *usergart;
  130. static void evict_entry(struct drm_gem_object *obj,
  131. enum tiler_fmt fmt, struct usergart_entry *entry)
  132. {
  133. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  134. int n = usergart[fmt].height;
  135. size_t size = PAGE_SIZE * n;
  136. loff_t off = mmap_offset(obj) +
  137. (entry->obj_pgoff << PAGE_SHIFT);
  138. const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
  139. if (m > 1) {
  140. int i;
  141. /* if stride > than PAGE_SIZE then sparse mapping: */
  142. for (i = n; i > 0; i--) {
  143. unmap_mapping_range(obj->dev->anon_inode->i_mapping,
  144. off, PAGE_SIZE, 1);
  145. off += PAGE_SIZE * m;
  146. }
  147. } else {
  148. unmap_mapping_range(obj->dev->anon_inode->i_mapping,
  149. off, size, 1);
  150. }
  151. entry->obj = NULL;
  152. }
  153. /* Evict a buffer from usergart, if it is mapped there */
  154. static void evict(struct drm_gem_object *obj)
  155. {
  156. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  157. if (omap_obj->flags & OMAP_BO_TILED) {
  158. enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
  159. int i;
  160. if (!usergart)
  161. return;
  162. for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
  163. struct usergart_entry *entry = &usergart[fmt].entry[i];
  164. if (entry->obj == obj)
  165. evict_entry(obj, fmt, entry);
  166. }
  167. }
  168. }
  169. /* GEM objects can either be allocated from contiguous memory (in which
  170. * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
  171. * contiguous buffers can be remapped in TILER/DMM if they need to be
  172. * contiguous... but we don't do this all the time to reduce pressure
  173. * on TILER/DMM space when we know at allocation time that the buffer
  174. * will need to be scanned out.
  175. */
  176. static inline bool is_shmem(struct drm_gem_object *obj)
  177. {
  178. return obj->filp != NULL;
  179. }
  180. /**
  181. * shmem buffers that are mapped cached can simulate coherency via using
  182. * page faulting to keep track of dirty pages
  183. */
  184. static inline bool is_cached_coherent(struct drm_gem_object *obj)
  185. {
  186. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  187. return is_shmem(obj) &&
  188. ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
  189. }
  190. static DEFINE_SPINLOCK(sync_lock);
  191. /** ensure backing pages are allocated */
  192. static int omap_gem_attach_pages(struct drm_gem_object *obj)
  193. {
  194. struct drm_device *dev = obj->dev;
  195. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  196. struct page **pages;
  197. int npages = obj->size >> PAGE_SHIFT;
  198. int i, ret;
  199. dma_addr_t *addrs;
  200. WARN_ON(omap_obj->pages);
  201. pages = drm_gem_get_pages(obj);
  202. if (IS_ERR(pages)) {
  203. dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
  204. return PTR_ERR(pages);
  205. }
  206. /* for non-cached buffers, ensure the new pages are clean because
  207. * DSS, GPU, etc. are not cache coherent:
  208. */
  209. if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
  210. addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
  211. if (!addrs) {
  212. ret = -ENOMEM;
  213. goto free_pages;
  214. }
  215. for (i = 0; i < npages; i++) {
  216. addrs[i] = dma_map_page(dev->dev, pages[i],
  217. 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
  218. }
  219. } else {
  220. addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
  221. if (!addrs) {
  222. ret = -ENOMEM;
  223. goto free_pages;
  224. }
  225. }
  226. omap_obj->addrs = addrs;
  227. omap_obj->pages = pages;
  228. return 0;
  229. free_pages:
  230. drm_gem_put_pages(obj, pages, true, false);
  231. return ret;
  232. }
  233. /** release backing pages */
  234. static void omap_gem_detach_pages(struct drm_gem_object *obj)
  235. {
  236. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  237. /* for non-cached buffers, ensure the new pages are clean because
  238. * DSS, GPU, etc. are not cache coherent:
  239. */
  240. if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
  241. int i, npages = obj->size >> PAGE_SHIFT;
  242. for (i = 0; i < npages; i++) {
  243. dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
  244. PAGE_SIZE, DMA_BIDIRECTIONAL);
  245. }
  246. }
  247. kfree(omap_obj->addrs);
  248. omap_obj->addrs = NULL;
  249. drm_gem_put_pages(obj, omap_obj->pages, true, false);
  250. omap_obj->pages = NULL;
  251. }
  252. /* get buffer flags */
  253. uint32_t omap_gem_flags(struct drm_gem_object *obj)
  254. {
  255. return to_omap_bo(obj)->flags;
  256. }
  257. /** get mmap offset */
  258. static uint64_t mmap_offset(struct drm_gem_object *obj)
  259. {
  260. struct drm_device *dev = obj->dev;
  261. int ret;
  262. size_t size;
  263. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  264. /* Make it mmapable */
  265. size = omap_gem_mmap_size(obj);
  266. ret = drm_gem_create_mmap_offset_size(obj, size);
  267. if (ret) {
  268. dev_err(dev->dev, "could not allocate mmap offset\n");
  269. return 0;
  270. }
  271. return drm_vma_node_offset_addr(&obj->vma_node);
  272. }
  273. uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
  274. {
  275. uint64_t offset;
  276. mutex_lock(&obj->dev->struct_mutex);
  277. offset = mmap_offset(obj);
  278. mutex_unlock(&obj->dev->struct_mutex);
  279. return offset;
  280. }
  281. /** get mmap size */
  282. size_t omap_gem_mmap_size(struct drm_gem_object *obj)
  283. {
  284. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  285. size_t size = obj->size;
  286. if (omap_obj->flags & OMAP_BO_TILED) {
  287. /* for tiled buffers, the virtual size has stride rounded up
  288. * to 4kb.. (to hide the fact that row n+1 might start 16kb or
  289. * 32kb later!). But we don't back the entire buffer with
  290. * pages, only the valid picture part.. so need to adjust for
  291. * this in the size used to mmap and generate mmap offset
  292. */
  293. size = tiler_vsize(gem2fmt(omap_obj->flags),
  294. omap_obj->width, omap_obj->height);
  295. }
  296. return size;
  297. }
  298. /* get tiled size, returns -EINVAL if not tiled buffer */
  299. int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
  300. {
  301. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  302. if (omap_obj->flags & OMAP_BO_TILED) {
  303. *w = omap_obj->width;
  304. *h = omap_obj->height;
  305. return 0;
  306. }
  307. return -EINVAL;
  308. }
  309. /* Normal handling for the case of faulting in non-tiled buffers */
  310. static int fault_1d(struct drm_gem_object *obj,
  311. struct vm_area_struct *vma, struct vm_fault *vmf)
  312. {
  313. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  314. unsigned long pfn;
  315. pgoff_t pgoff;
  316. /* We don't use vmf->pgoff since that has the fake offset: */
  317. pgoff = ((unsigned long)vmf->virtual_address -
  318. vma->vm_start) >> PAGE_SHIFT;
  319. if (omap_obj->pages) {
  320. omap_gem_cpu_sync(obj, pgoff);
  321. pfn = page_to_pfn(omap_obj->pages[pgoff]);
  322. } else {
  323. BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
  324. pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
  325. }
  326. VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
  327. pfn, pfn << PAGE_SHIFT);
  328. return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
  329. }
  330. /* Special handling for the case of faulting in 2d tiled buffers */
  331. static int fault_2d(struct drm_gem_object *obj,
  332. struct vm_area_struct *vma, struct vm_fault *vmf)
  333. {
  334. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  335. struct usergart_entry *entry;
  336. enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
  337. struct page *pages[64]; /* XXX is this too much to have on stack? */
  338. unsigned long pfn;
  339. pgoff_t pgoff, base_pgoff;
  340. void __user *vaddr;
  341. int i, ret, slots;
  342. /*
  343. * Note the height of the slot is also equal to the number of pages
  344. * that need to be mapped in to fill 4kb wide CPU page. If the slot
  345. * height is 64, then 64 pages fill a 4kb wide by 64 row region.
  346. */
  347. const int n = usergart[fmt].height;
  348. const int n_shift = usergart[fmt].height_shift;
  349. /*
  350. * If buffer width in bytes > PAGE_SIZE then the virtual stride is
  351. * rounded up to next multiple of PAGE_SIZE.. this need to be taken
  352. * into account in some of the math, so figure out virtual stride
  353. * in pages
  354. */
  355. const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
  356. /* We don't use vmf->pgoff since that has the fake offset: */
  357. pgoff = ((unsigned long)vmf->virtual_address -
  358. vma->vm_start) >> PAGE_SHIFT;
  359. /*
  360. * Actual address we start mapping at is rounded down to previous slot
  361. * boundary in the y direction:
  362. */
  363. base_pgoff = round_down(pgoff, m << n_shift);
  364. /* figure out buffer width in slots */
  365. slots = omap_obj->width >> usergart[fmt].slot_shift;
  366. vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
  367. entry = &usergart[fmt].entry[usergart[fmt].last];
  368. /* evict previous buffer using this usergart entry, if any: */
  369. if (entry->obj)
  370. evict_entry(entry->obj, fmt, entry);
  371. entry->obj = obj;
  372. entry->obj_pgoff = base_pgoff;
  373. /* now convert base_pgoff to phys offset from virt offset: */
  374. base_pgoff = (base_pgoff >> n_shift) * slots;
  375. /* for wider-than 4k.. figure out which part of the slot-row we want: */
  376. if (m > 1) {
  377. int off = pgoff % m;
  378. entry->obj_pgoff += off;
  379. base_pgoff /= m;
  380. slots = min(slots - (off << n_shift), n);
  381. base_pgoff += off << n_shift;
  382. vaddr += off << PAGE_SHIFT;
  383. }
  384. /*
  385. * Map in pages. Beyond the valid pixel part of the buffer, we set
  386. * pages[i] to NULL to get a dummy page mapped in.. if someone
  387. * reads/writes it they will get random/undefined content, but at
  388. * least it won't be corrupting whatever other random page used to
  389. * be mapped in, or other undefined behavior.
  390. */
  391. memcpy(pages, &omap_obj->pages[base_pgoff],
  392. sizeof(struct page *) * slots);
  393. memset(pages + slots, 0,
  394. sizeof(struct page *) * (n - slots));
  395. ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
  396. if (ret) {
  397. dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
  398. return ret;
  399. }
  400. pfn = entry->paddr >> PAGE_SHIFT;
  401. VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
  402. pfn, pfn << PAGE_SHIFT);
  403. for (i = n; i > 0; i--) {
  404. vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
  405. pfn += usergart[fmt].stride_pfn;
  406. vaddr += PAGE_SIZE * m;
  407. }
  408. /* simple round-robin: */
  409. usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES;
  410. return 0;
  411. }
  412. /**
  413. * omap_gem_fault - pagefault handler for GEM objects
  414. * @vma: the VMA of the GEM object
  415. * @vmf: fault detail
  416. *
  417. * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
  418. * does most of the work for us including the actual map/unmap calls
  419. * but we need to do the actual page work.
  420. *
  421. * The VMA was set up by GEM. In doing so it also ensured that the
  422. * vma->vm_private_data points to the GEM object that is backing this
  423. * mapping.
  424. */
  425. int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  426. {
  427. struct drm_gem_object *obj = vma->vm_private_data;
  428. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  429. struct drm_device *dev = obj->dev;
  430. struct page **pages;
  431. int ret;
  432. /* Make sure we don't parallel update on a fault, nor move or remove
  433. * something from beneath our feet
  434. */
  435. mutex_lock(&dev->struct_mutex);
  436. /* if a shmem backed object, make sure we have pages attached now */
  437. ret = get_pages(obj, &pages);
  438. if (ret)
  439. goto fail;
  440. /* where should we do corresponding put_pages().. we are mapping
  441. * the original page, rather than thru a GART, so we can't rely
  442. * on eviction to trigger this. But munmap() or all mappings should
  443. * probably trigger put_pages()?
  444. */
  445. if (omap_obj->flags & OMAP_BO_TILED)
  446. ret = fault_2d(obj, vma, vmf);
  447. else
  448. ret = fault_1d(obj, vma, vmf);
  449. fail:
  450. mutex_unlock(&dev->struct_mutex);
  451. switch (ret) {
  452. case 0:
  453. case -ERESTARTSYS:
  454. case -EINTR:
  455. return VM_FAULT_NOPAGE;
  456. case -ENOMEM:
  457. return VM_FAULT_OOM;
  458. default:
  459. return VM_FAULT_SIGBUS;
  460. }
  461. }
  462. /** We override mainly to fix up some of the vm mapping flags.. */
  463. int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  464. {
  465. int ret;
  466. ret = drm_gem_mmap(filp, vma);
  467. if (ret) {
  468. DBG("mmap failed: %d", ret);
  469. return ret;
  470. }
  471. return omap_gem_mmap_obj(vma->vm_private_data, vma);
  472. }
  473. int omap_gem_mmap_obj(struct drm_gem_object *obj,
  474. struct vm_area_struct *vma)
  475. {
  476. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  477. vma->vm_flags &= ~VM_PFNMAP;
  478. vma->vm_flags |= VM_MIXEDMAP;
  479. if (omap_obj->flags & OMAP_BO_WC) {
  480. vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  481. } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
  482. vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
  483. } else {
  484. /*
  485. * We do have some private objects, at least for scanout buffers
  486. * on hardware without DMM/TILER. But these are allocated write-
  487. * combine
  488. */
  489. if (WARN_ON(!obj->filp))
  490. return -EINVAL;
  491. /*
  492. * Shunt off cached objs to shmem file so they have their own
  493. * address_space (so unmap_mapping_range does what we want,
  494. * in particular in the case of mmap'd dmabufs)
  495. */
  496. fput(vma->vm_file);
  497. vma->vm_pgoff = 0;
  498. vma->vm_file = get_file(obj->filp);
  499. vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  500. }
  501. return 0;
  502. }
  503. /**
  504. * omap_gem_dumb_create - create a dumb buffer
  505. * @drm_file: our client file
  506. * @dev: our device
  507. * @args: the requested arguments copied from userspace
  508. *
  509. * Allocate a buffer suitable for use for a frame buffer of the
  510. * form described by user space. Give userspace a handle by which
  511. * to reference it.
  512. */
  513. int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  514. struct drm_mode_create_dumb *args)
  515. {
  516. union omap_gem_size gsize;
  517. args->pitch = align_pitch(0, args->width, args->bpp);
  518. args->size = PAGE_ALIGN(args->pitch * args->height);
  519. gsize = (union omap_gem_size){
  520. .bytes = args->size,
  521. };
  522. return omap_gem_new_handle(dev, file, gsize,
  523. OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
  524. }
  525. /**
  526. * omap_gem_dumb_map - buffer mapping for dumb interface
  527. * @file: our drm client file
  528. * @dev: drm device
  529. * @handle: GEM handle to the object (from dumb_create)
  530. *
  531. * Do the necessary setup to allow the mapping of the frame buffer
  532. * into user memory. We don't have to do much here at the moment.
  533. */
  534. int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
  535. uint32_t handle, uint64_t *offset)
  536. {
  537. struct drm_gem_object *obj;
  538. int ret = 0;
  539. /* GEM does all our handle to object mapping */
  540. obj = drm_gem_object_lookup(dev, file, handle);
  541. if (obj == NULL) {
  542. ret = -ENOENT;
  543. goto fail;
  544. }
  545. *offset = omap_gem_mmap_offset(obj);
  546. drm_gem_object_unreference_unlocked(obj);
  547. fail:
  548. return ret;
  549. }
  550. /* Set scrolling position. This allows us to implement fast scrolling
  551. * for console.
  552. *
  553. * Call only from non-atomic contexts.
  554. */
  555. int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
  556. {
  557. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  558. uint32_t npages = obj->size >> PAGE_SHIFT;
  559. int ret = 0;
  560. if (roll > npages) {
  561. dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
  562. return -EINVAL;
  563. }
  564. omap_obj->roll = roll;
  565. mutex_lock(&obj->dev->struct_mutex);
  566. /* if we aren't mapped yet, we don't need to do anything */
  567. if (omap_obj->block) {
  568. struct page **pages;
  569. ret = get_pages(obj, &pages);
  570. if (ret)
  571. goto fail;
  572. ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
  573. if (ret)
  574. dev_err(obj->dev->dev, "could not repin: %d\n", ret);
  575. }
  576. fail:
  577. mutex_unlock(&obj->dev->struct_mutex);
  578. return ret;
  579. }
  580. /* Sync the buffer for CPU access.. note pages should already be
  581. * attached, ie. omap_gem_get_pages()
  582. */
  583. void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
  584. {
  585. struct drm_device *dev = obj->dev;
  586. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  587. if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
  588. dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
  589. PAGE_SIZE, DMA_BIDIRECTIONAL);
  590. omap_obj->addrs[pgoff] = 0;
  591. }
  592. }
  593. /* sync the buffer for DMA access */
  594. void omap_gem_dma_sync(struct drm_gem_object *obj,
  595. enum dma_data_direction dir)
  596. {
  597. struct drm_device *dev = obj->dev;
  598. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  599. if (is_cached_coherent(obj)) {
  600. int i, npages = obj->size >> PAGE_SHIFT;
  601. struct page **pages = omap_obj->pages;
  602. bool dirty = false;
  603. for (i = 0; i < npages; i++) {
  604. if (!omap_obj->addrs[i]) {
  605. omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0,
  606. PAGE_SIZE, DMA_BIDIRECTIONAL);
  607. dirty = true;
  608. }
  609. }
  610. if (dirty) {
  611. unmap_mapping_range(obj->filp->f_mapping, 0,
  612. omap_gem_mmap_size(obj), 1);
  613. }
  614. }
  615. }
  616. /* Get physical address for DMA.. if 'remap' is true, and the buffer is not
  617. * already contiguous, remap it to pin in physically contiguous memory.. (ie.
  618. * map in TILER)
  619. */
  620. int omap_gem_get_paddr(struct drm_gem_object *obj,
  621. dma_addr_t *paddr, bool remap)
  622. {
  623. struct omap_drm_private *priv = obj->dev->dev_private;
  624. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  625. int ret = 0;
  626. mutex_lock(&obj->dev->struct_mutex);
  627. if (remap && is_shmem(obj) && priv->has_dmm) {
  628. if (omap_obj->paddr_cnt == 0) {
  629. struct page **pages;
  630. uint32_t npages = obj->size >> PAGE_SHIFT;
  631. enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
  632. struct tiler_block *block;
  633. BUG_ON(omap_obj->block);
  634. ret = get_pages(obj, &pages);
  635. if (ret)
  636. goto fail;
  637. if (omap_obj->flags & OMAP_BO_TILED) {
  638. block = tiler_reserve_2d(fmt,
  639. omap_obj->width,
  640. omap_obj->height, 0);
  641. } else {
  642. block = tiler_reserve_1d(obj->size);
  643. }
  644. if (IS_ERR(block)) {
  645. ret = PTR_ERR(block);
  646. dev_err(obj->dev->dev,
  647. "could not remap: %d (%d)\n", ret, fmt);
  648. goto fail;
  649. }
  650. /* TODO: enable async refill.. */
  651. ret = tiler_pin(block, pages, npages,
  652. omap_obj->roll, true);
  653. if (ret) {
  654. tiler_release(block);
  655. dev_err(obj->dev->dev,
  656. "could not pin: %d\n", ret);
  657. goto fail;
  658. }
  659. omap_obj->paddr = tiler_ssptr(block);
  660. omap_obj->block = block;
  661. DBG("got paddr: %pad", &omap_obj->paddr);
  662. }
  663. omap_obj->paddr_cnt++;
  664. *paddr = omap_obj->paddr;
  665. } else if (omap_obj->flags & OMAP_BO_DMA) {
  666. *paddr = omap_obj->paddr;
  667. } else {
  668. ret = -EINVAL;
  669. goto fail;
  670. }
  671. fail:
  672. mutex_unlock(&obj->dev->struct_mutex);
  673. return ret;
  674. }
  675. /* Release physical address, when DMA is no longer being performed.. this
  676. * could potentially unpin and unmap buffers from TILER
  677. */
  678. int omap_gem_put_paddr(struct drm_gem_object *obj)
  679. {
  680. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  681. int ret = 0;
  682. mutex_lock(&obj->dev->struct_mutex);
  683. if (omap_obj->paddr_cnt > 0) {
  684. omap_obj->paddr_cnt--;
  685. if (omap_obj->paddr_cnt == 0) {
  686. ret = tiler_unpin(omap_obj->block);
  687. if (ret) {
  688. dev_err(obj->dev->dev,
  689. "could not unpin pages: %d\n", ret);
  690. goto fail;
  691. }
  692. ret = tiler_release(omap_obj->block);
  693. if (ret) {
  694. dev_err(obj->dev->dev,
  695. "could not release unmap: %d\n", ret);
  696. }
  697. omap_obj->paddr = 0;
  698. omap_obj->block = NULL;
  699. }
  700. }
  701. fail:
  702. mutex_unlock(&obj->dev->struct_mutex);
  703. return ret;
  704. }
  705. /* Get rotated scanout address (only valid if already pinned), at the
  706. * specified orientation and x,y offset from top-left corner of buffer
  707. * (only valid for tiled 2d buffers)
  708. */
  709. int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
  710. int x, int y, dma_addr_t *paddr)
  711. {
  712. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  713. int ret = -EINVAL;
  714. mutex_lock(&obj->dev->struct_mutex);
  715. if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
  716. (omap_obj->flags & OMAP_BO_TILED)) {
  717. *paddr = tiler_tsptr(omap_obj->block, orient, x, y);
  718. ret = 0;
  719. }
  720. mutex_unlock(&obj->dev->struct_mutex);
  721. return ret;
  722. }
  723. /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
  724. int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
  725. {
  726. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  727. int ret = -EINVAL;
  728. if (omap_obj->flags & OMAP_BO_TILED)
  729. ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
  730. return ret;
  731. }
  732. /* acquire pages when needed (for example, for DMA where physically
  733. * contiguous buffer is not required
  734. */
  735. static int get_pages(struct drm_gem_object *obj, struct page ***pages)
  736. {
  737. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  738. int ret = 0;
  739. if (is_shmem(obj) && !omap_obj->pages) {
  740. ret = omap_gem_attach_pages(obj);
  741. if (ret) {
  742. dev_err(obj->dev->dev, "could not attach pages\n");
  743. return ret;
  744. }
  745. }
  746. /* TODO: even phys-contig.. we should have a list of pages? */
  747. *pages = omap_obj->pages;
  748. return 0;
  749. }
  750. /* if !remap, and we don't have pages backing, then fail, rather than
  751. * increasing the pin count (which we don't really do yet anyways,
  752. * because we don't support swapping pages back out). And 'remap'
  753. * might not be quite the right name, but I wanted to keep it working
  754. * similarly to omap_gem_get_paddr(). Note though that mutex is not
  755. * aquired if !remap (because this can be called in atomic ctxt),
  756. * but probably omap_gem_get_paddr() should be changed to work in the
  757. * same way. If !remap, a matching omap_gem_put_pages() call is not
  758. * required (and should not be made).
  759. */
  760. int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
  761. bool remap)
  762. {
  763. int ret;
  764. if (!remap) {
  765. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  766. if (!omap_obj->pages)
  767. return -ENOMEM;
  768. *pages = omap_obj->pages;
  769. return 0;
  770. }
  771. mutex_lock(&obj->dev->struct_mutex);
  772. ret = get_pages(obj, pages);
  773. mutex_unlock(&obj->dev->struct_mutex);
  774. return ret;
  775. }
  776. /* release pages when DMA no longer being performed */
  777. int omap_gem_put_pages(struct drm_gem_object *obj)
  778. {
  779. /* do something here if we dynamically attach/detach pages.. at
  780. * least they would no longer need to be pinned if everyone has
  781. * released the pages..
  782. */
  783. return 0;
  784. }
  785. /* Get kernel virtual address for CPU access.. this more or less only
  786. * exists for omap_fbdev. This should be called with struct_mutex
  787. * held.
  788. */
  789. void *omap_gem_vaddr(struct drm_gem_object *obj)
  790. {
  791. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  792. WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
  793. if (!omap_obj->vaddr) {
  794. struct page **pages;
  795. int ret = get_pages(obj, &pages);
  796. if (ret)
  797. return ERR_PTR(ret);
  798. omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
  799. VM_MAP, pgprot_writecombine(PAGE_KERNEL));
  800. }
  801. return omap_obj->vaddr;
  802. }
  803. #ifdef CONFIG_PM
  804. /* re-pin objects in DMM in resume path: */
  805. int omap_gem_resume(struct device *dev)
  806. {
  807. struct drm_device *drm_dev = dev_get_drvdata(dev);
  808. struct omap_drm_private *priv = drm_dev->dev_private;
  809. struct omap_gem_object *omap_obj;
  810. int ret = 0;
  811. list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
  812. if (omap_obj->block) {
  813. struct drm_gem_object *obj = &omap_obj->base;
  814. uint32_t npages = obj->size >> PAGE_SHIFT;
  815. WARN_ON(!omap_obj->pages); /* this can't happen */
  816. ret = tiler_pin(omap_obj->block,
  817. omap_obj->pages, npages,
  818. omap_obj->roll, true);
  819. if (ret) {
  820. dev_err(dev, "could not repin: %d\n", ret);
  821. return ret;
  822. }
  823. }
  824. }
  825. return 0;
  826. }
  827. #endif
  828. #ifdef CONFIG_DEBUG_FS
  829. void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
  830. {
  831. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  832. uint64_t off;
  833. off = drm_vma_node_start(&obj->vma_node);
  834. seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
  835. omap_obj->flags, obj->name, obj->refcount.refcount.counter,
  836. off, &omap_obj->paddr, omap_obj->paddr_cnt,
  837. omap_obj->vaddr, omap_obj->roll);
  838. if (omap_obj->flags & OMAP_BO_TILED) {
  839. seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
  840. if (omap_obj->block) {
  841. struct tcm_area *area = &omap_obj->block->area;
  842. seq_printf(m, " (%dx%d, %dx%d)",
  843. area->p0.x, area->p0.y,
  844. area->p1.x, area->p1.y);
  845. }
  846. } else {
  847. seq_printf(m, " %d", obj->size);
  848. }
  849. seq_printf(m, "\n");
  850. }
  851. void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
  852. {
  853. struct omap_gem_object *omap_obj;
  854. int count = 0;
  855. size_t size = 0;
  856. list_for_each_entry(omap_obj, list, mm_list) {
  857. struct drm_gem_object *obj = &omap_obj->base;
  858. seq_printf(m, " ");
  859. omap_gem_describe(obj, m);
  860. count++;
  861. size += obj->size;
  862. }
  863. seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
  864. }
  865. #endif
  866. /* Buffer Synchronization:
  867. */
  868. struct omap_gem_sync_waiter {
  869. struct list_head list;
  870. struct omap_gem_object *omap_obj;
  871. enum omap_gem_op op;
  872. uint32_t read_target, write_target;
  873. /* notify called w/ sync_lock held */
  874. void (*notify)(void *arg);
  875. void *arg;
  876. };
  877. /* list of omap_gem_sync_waiter.. the notify fxn gets called back when
  878. * the read and/or write target count is achieved which can call a user
  879. * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
  880. * cpu access), etc.
  881. */
  882. static LIST_HEAD(waiters);
  883. static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
  884. {
  885. struct omap_gem_object *omap_obj = waiter->omap_obj;
  886. if ((waiter->op & OMAP_GEM_READ) &&
  887. (omap_obj->sync->write_complete < waiter->write_target))
  888. return true;
  889. if ((waiter->op & OMAP_GEM_WRITE) &&
  890. (omap_obj->sync->read_complete < waiter->read_target))
  891. return true;
  892. return false;
  893. }
  894. /* macro for sync debug.. */
  895. #define SYNCDBG 0
  896. #define SYNC(fmt, ...) do { if (SYNCDBG) \
  897. printk(KERN_ERR "%s:%d: "fmt"\n", \
  898. __func__, __LINE__, ##__VA_ARGS__); \
  899. } while (0)
  900. static void sync_op_update(void)
  901. {
  902. struct omap_gem_sync_waiter *waiter, *n;
  903. list_for_each_entry_safe(waiter, n, &waiters, list) {
  904. if (!is_waiting(waiter)) {
  905. list_del(&waiter->list);
  906. SYNC("notify: %p", waiter);
  907. waiter->notify(waiter->arg);
  908. kfree(waiter);
  909. }
  910. }
  911. }
  912. static inline int sync_op(struct drm_gem_object *obj,
  913. enum omap_gem_op op, bool start)
  914. {
  915. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  916. int ret = 0;
  917. spin_lock(&sync_lock);
  918. if (!omap_obj->sync) {
  919. omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
  920. if (!omap_obj->sync) {
  921. ret = -ENOMEM;
  922. goto unlock;
  923. }
  924. }
  925. if (start) {
  926. if (op & OMAP_GEM_READ)
  927. omap_obj->sync->read_pending++;
  928. if (op & OMAP_GEM_WRITE)
  929. omap_obj->sync->write_pending++;
  930. } else {
  931. if (op & OMAP_GEM_READ)
  932. omap_obj->sync->read_complete++;
  933. if (op & OMAP_GEM_WRITE)
  934. omap_obj->sync->write_complete++;
  935. sync_op_update();
  936. }
  937. unlock:
  938. spin_unlock(&sync_lock);
  939. return ret;
  940. }
  941. /* it is a bit lame to handle updates in this sort of polling way, but
  942. * in case of PVR, the GPU can directly update read/write complete
  943. * values, and not really tell us which ones it updated.. this also
  944. * means that sync_lock is not quite sufficient. So we'll need to
  945. * do something a bit better when it comes time to add support for
  946. * separate 2d hw..
  947. */
  948. void omap_gem_op_update(void)
  949. {
  950. spin_lock(&sync_lock);
  951. sync_op_update();
  952. spin_unlock(&sync_lock);
  953. }
  954. /* mark the start of read and/or write operation */
  955. int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
  956. {
  957. return sync_op(obj, op, true);
  958. }
  959. int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
  960. {
  961. return sync_op(obj, op, false);
  962. }
  963. static DECLARE_WAIT_QUEUE_HEAD(sync_event);
  964. static void sync_notify(void *arg)
  965. {
  966. struct task_struct **waiter_task = arg;
  967. *waiter_task = NULL;
  968. wake_up_all(&sync_event);
  969. }
  970. int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
  971. {
  972. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  973. int ret = 0;
  974. if (omap_obj->sync) {
  975. struct task_struct *waiter_task = current;
  976. struct omap_gem_sync_waiter *waiter =
  977. kzalloc(sizeof(*waiter), GFP_KERNEL);
  978. if (!waiter)
  979. return -ENOMEM;
  980. waiter->omap_obj = omap_obj;
  981. waiter->op = op;
  982. waiter->read_target = omap_obj->sync->read_pending;
  983. waiter->write_target = omap_obj->sync->write_pending;
  984. waiter->notify = sync_notify;
  985. waiter->arg = &waiter_task;
  986. spin_lock(&sync_lock);
  987. if (is_waiting(waiter)) {
  988. SYNC("waited: %p", waiter);
  989. list_add_tail(&waiter->list, &waiters);
  990. spin_unlock(&sync_lock);
  991. ret = wait_event_interruptible(sync_event,
  992. (waiter_task == NULL));
  993. spin_lock(&sync_lock);
  994. if (waiter_task) {
  995. SYNC("interrupted: %p", waiter);
  996. /* we were interrupted */
  997. list_del(&waiter->list);
  998. waiter_task = NULL;
  999. } else {
  1000. /* freed in sync_op_update() */
  1001. waiter = NULL;
  1002. }
  1003. }
  1004. spin_unlock(&sync_lock);
  1005. kfree(waiter);
  1006. }
  1007. return ret;
  1008. }
  1009. /* call fxn(arg), either synchronously or asynchronously if the op
  1010. * is currently blocked.. fxn() can be called from any context
  1011. *
  1012. * (TODO for now fxn is called back from whichever context calls
  1013. * omap_gem_op_update().. but this could be better defined later
  1014. * if needed)
  1015. *
  1016. * TODO more code in common w/ _sync()..
  1017. */
  1018. int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
  1019. void (*fxn)(void *arg), void *arg)
  1020. {
  1021. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  1022. if (omap_obj->sync) {
  1023. struct omap_gem_sync_waiter *waiter =
  1024. kzalloc(sizeof(*waiter), GFP_ATOMIC);
  1025. if (!waiter)
  1026. return -ENOMEM;
  1027. waiter->omap_obj = omap_obj;
  1028. waiter->op = op;
  1029. waiter->read_target = omap_obj->sync->read_pending;
  1030. waiter->write_target = omap_obj->sync->write_pending;
  1031. waiter->notify = fxn;
  1032. waiter->arg = arg;
  1033. spin_lock(&sync_lock);
  1034. if (is_waiting(waiter)) {
  1035. SYNC("waited: %p", waiter);
  1036. list_add_tail(&waiter->list, &waiters);
  1037. spin_unlock(&sync_lock);
  1038. return 0;
  1039. }
  1040. spin_unlock(&sync_lock);
  1041. kfree(waiter);
  1042. }
  1043. /* no waiting.. */
  1044. fxn(arg);
  1045. return 0;
  1046. }
  1047. /* special API so PVR can update the buffer to use a sync-object allocated
  1048. * from it's sync-obj heap. Only used for a newly allocated (from PVR's
  1049. * perspective) sync-object, so we overwrite the new syncobj w/ values
  1050. * from the already allocated syncobj (if there is one)
  1051. */
  1052. int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
  1053. {
  1054. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  1055. int ret = 0;
  1056. spin_lock(&sync_lock);
  1057. if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
  1058. /* clearing a previously set syncobj */
  1059. syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync),
  1060. GFP_ATOMIC);
  1061. if (!syncobj) {
  1062. ret = -ENOMEM;
  1063. goto unlock;
  1064. }
  1065. omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
  1066. omap_obj->sync = syncobj;
  1067. } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
  1068. /* replacing an existing syncobj */
  1069. if (omap_obj->sync) {
  1070. memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
  1071. kfree(omap_obj->sync);
  1072. }
  1073. omap_obj->flags |= OMAP_BO_EXT_SYNC;
  1074. omap_obj->sync = syncobj;
  1075. }
  1076. unlock:
  1077. spin_unlock(&sync_lock);
  1078. return ret;
  1079. }
  1080. /* don't call directly.. called from GEM core when it is time to actually
  1081. * free the object..
  1082. */
  1083. void omap_gem_free_object(struct drm_gem_object *obj)
  1084. {
  1085. struct drm_device *dev = obj->dev;
  1086. struct omap_drm_private *priv = dev->dev_private;
  1087. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  1088. evict(obj);
  1089. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  1090. spin_lock(&priv->list_lock);
  1091. list_del(&omap_obj->mm_list);
  1092. spin_unlock(&priv->list_lock);
  1093. drm_gem_free_mmap_offset(obj);
  1094. /* this means the object is still pinned.. which really should
  1095. * not happen. I think..
  1096. */
  1097. WARN_ON(omap_obj->paddr_cnt > 0);
  1098. /* don't free externally allocated backing memory */
  1099. if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
  1100. if (omap_obj->pages)
  1101. omap_gem_detach_pages(obj);
  1102. if (!is_shmem(obj)) {
  1103. dma_free_writecombine(dev->dev, obj->size,
  1104. omap_obj->vaddr, omap_obj->paddr);
  1105. } else if (omap_obj->vaddr) {
  1106. vunmap(omap_obj->vaddr);
  1107. }
  1108. }
  1109. /* don't free externally allocated syncobj */
  1110. if (!(omap_obj->flags & OMAP_BO_EXT_SYNC))
  1111. kfree(omap_obj->sync);
  1112. drm_gem_object_release(obj);
  1113. kfree(obj);
  1114. }
  1115. /* convenience method to construct a GEM buffer object, and userspace handle */
  1116. int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
  1117. union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
  1118. {
  1119. struct drm_gem_object *obj;
  1120. int ret;
  1121. obj = omap_gem_new(dev, gsize, flags);
  1122. if (!obj)
  1123. return -ENOMEM;
  1124. ret = drm_gem_handle_create(file, obj, handle);
  1125. if (ret) {
  1126. drm_gem_object_release(obj);
  1127. kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
  1128. return ret;
  1129. }
  1130. /* drop reference from allocate - handle holds it now */
  1131. drm_gem_object_unreference_unlocked(obj);
  1132. return 0;
  1133. }
  1134. /* GEM buffer object constructor */
  1135. struct drm_gem_object *omap_gem_new(struct drm_device *dev,
  1136. union omap_gem_size gsize, uint32_t flags)
  1137. {
  1138. struct omap_drm_private *priv = dev->dev_private;
  1139. struct omap_gem_object *omap_obj;
  1140. struct drm_gem_object *obj = NULL;
  1141. struct address_space *mapping;
  1142. size_t size;
  1143. int ret;
  1144. if (flags & OMAP_BO_TILED) {
  1145. if (!usergart) {
  1146. dev_err(dev->dev, "Tiled buffers require DMM\n");
  1147. goto fail;
  1148. }
  1149. /* tiled buffers are always shmem paged backed.. when they are
  1150. * scanned out, they are remapped into DMM/TILER
  1151. */
  1152. flags &= ~OMAP_BO_SCANOUT;
  1153. /* currently don't allow cached buffers.. there is some caching
  1154. * stuff that needs to be handled better
  1155. */
  1156. flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
  1157. flags |= tiler_get_cpu_cache_flags();
  1158. /* align dimensions to slot boundaries... */
  1159. tiler_align(gem2fmt(flags),
  1160. &gsize.tiled.width, &gsize.tiled.height);
  1161. /* ...and calculate size based on aligned dimensions */
  1162. size = tiler_size(gem2fmt(flags),
  1163. gsize.tiled.width, gsize.tiled.height);
  1164. } else {
  1165. size = PAGE_ALIGN(gsize.bytes);
  1166. }
  1167. omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
  1168. if (!omap_obj)
  1169. goto fail;
  1170. spin_lock(&priv->list_lock);
  1171. list_add(&omap_obj->mm_list, &priv->obj_list);
  1172. spin_unlock(&priv->list_lock);
  1173. obj = &omap_obj->base;
  1174. if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
  1175. /* attempt to allocate contiguous memory if we don't
  1176. * have DMM for remappign discontiguous buffers
  1177. */
  1178. omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
  1179. &omap_obj->paddr, GFP_KERNEL);
  1180. if (omap_obj->vaddr)
  1181. flags |= OMAP_BO_DMA;
  1182. }
  1183. omap_obj->flags = flags;
  1184. if (flags & OMAP_BO_TILED) {
  1185. omap_obj->width = gsize.tiled.width;
  1186. omap_obj->height = gsize.tiled.height;
  1187. }
  1188. if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) {
  1189. drm_gem_private_object_init(dev, obj, size);
  1190. } else {
  1191. ret = drm_gem_object_init(dev, obj, size);
  1192. if (ret)
  1193. goto fail;
  1194. mapping = file_inode(obj->filp)->i_mapping;
  1195. mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
  1196. }
  1197. return obj;
  1198. fail:
  1199. if (obj)
  1200. omap_gem_free_object(obj);
  1201. return NULL;
  1202. }
  1203. /* init/cleanup.. if DMM is used, we need to set some stuff up.. */
  1204. void omap_gem_init(struct drm_device *dev)
  1205. {
  1206. struct omap_drm_private *priv = dev->dev_private;
  1207. const enum tiler_fmt fmts[] = {
  1208. TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
  1209. };
  1210. int i, j;
  1211. if (!dmm_is_available()) {
  1212. /* DMM only supported on OMAP4 and later, so this isn't fatal */
  1213. dev_warn(dev->dev, "DMM not available, disable DMM support\n");
  1214. return;
  1215. }
  1216. usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
  1217. if (!usergart)
  1218. return;
  1219. /* reserve 4k aligned/wide regions for userspace mappings: */
  1220. for (i = 0; i < ARRAY_SIZE(fmts); i++) {
  1221. uint16_t h = 1, w = PAGE_SIZE >> i;
  1222. tiler_align(fmts[i], &w, &h);
  1223. /* note: since each region is 1 4kb page wide, and minimum
  1224. * number of rows, the height ends up being the same as the
  1225. * # of pages in the region
  1226. */
  1227. usergart[i].height = h;
  1228. usergart[i].height_shift = ilog2(h);
  1229. usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
  1230. usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
  1231. for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
  1232. struct usergart_entry *entry = &usergart[i].entry[j];
  1233. struct tiler_block *block =
  1234. tiler_reserve_2d(fmts[i], w, h,
  1235. PAGE_SIZE);
  1236. if (IS_ERR(block)) {
  1237. dev_err(dev->dev,
  1238. "reserve failed: %d, %d, %ld\n",
  1239. i, j, PTR_ERR(block));
  1240. return;
  1241. }
  1242. entry->paddr = tiler_ssptr(block);
  1243. entry->block = block;
  1244. DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
  1245. &entry->paddr,
  1246. usergart[i].stride_pfn << PAGE_SHIFT);
  1247. }
  1248. }
  1249. priv->has_dmm = true;
  1250. }
  1251. void omap_gem_deinit(struct drm_device *dev)
  1252. {
  1253. /* I believe we can rely on there being no more outstanding GEM
  1254. * objects which could depend on usergart/dmm at this point.
  1255. */
  1256. kfree(usergart);
  1257. }