omap_gem.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376
  1. /*
  2. * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
  3. * Author: Rob Clark <rob.clark@linaro.org>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/seq_file.h>
  18. #include <linux/shmem_fs.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/pfn_t.h>
  21. #include <drm/drm_vma_manager.h>
  22. #include "omap_drv.h"
  23. #include "omap_dmm_tiler.h"
  24. /*
  25. * GEM buffer object implementation.
  26. */
  27. /* note: we use upper 8 bits of flags for driver-internal flags: */
  28. #define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
  29. #define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
  30. #define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */
  31. struct omap_gem_object {
  32. struct drm_gem_object base;
  33. struct list_head mm_list;
  34. u32 flags;
  35. /** width/height for tiled formats (rounded up to slot boundaries) */
  36. u16 width, height;
  37. /** roll applied when mapping to DMM */
  38. u32 roll;
  39. /**
  40. * dma_addr contains the buffer DMA address. It is valid for
  41. *
  42. * - buffers allocated through the DMA mapping API (with the
  43. * OMAP_BO_MEM_DMA_API flag set)
  44. *
  45. * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
  46. * if they are physically contiguous (when sgt->orig_nents == 1)
  47. *
  48. * - buffers mapped through the TILER when dma_addr_cnt is not zero, in
  49. * which case the DMA address points to the TILER aperture
  50. *
  51. * Physically contiguous buffers have their DMA address equal to the
  52. * physical address as we don't remap those buffers through the TILER.
  53. *
  54. * Buffers mapped to the TILER have their DMA address pointing to the
  55. * TILER aperture. As TILER mappings are refcounted (through
  56. * dma_addr_cnt) the DMA address must be accessed through omap_gem_pin()
  57. * to ensure that the mapping won't disappear unexpectedly. References
  58. * must be released with omap_gem_unpin().
  59. */
  60. dma_addr_t dma_addr;
  61. /**
  62. * # of users of dma_addr
  63. */
  64. u32 dma_addr_cnt;
  65. /**
  66. * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
  67. * is set and the sgt field is valid.
  68. */
  69. struct sg_table *sgt;
  70. /**
  71. * tiler block used when buffer is remapped in DMM/TILER.
  72. */
  73. struct tiler_block *block;
  74. /**
  75. * Array of backing pages, if allocated. Note that pages are never
  76. * allocated for buffers originally allocated from contiguous memory
  77. */
  78. struct page **pages;
  79. /** addresses corresponding to pages in above array */
  80. dma_addr_t *dma_addrs;
  81. /**
  82. * Virtual address, if mapped.
  83. */
  84. void *vaddr;
  85. };
  86. #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
  87. /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
  88. * not necessarily pinned in TILER all the time, and (b) when they are
  89. * they are not necessarily page aligned, we reserve one or more small
  90. * regions in each of the 2d containers to use as a user-GART where we
  91. * can create a second page-aligned mapping of parts of the buffer
  92. * being accessed from userspace.
  93. *
  94. * Note that we could optimize slightly when we know that multiple
  95. * tiler containers are backed by the same PAT.. but I'll leave that
  96. * for later..
  97. */
  98. #define NUM_USERGART_ENTRIES 2
  99. struct omap_drm_usergart_entry {
  100. struct tiler_block *block; /* the reserved tiler block */
  101. dma_addr_t dma_addr;
  102. struct drm_gem_object *obj; /* the current pinned obj */
  103. pgoff_t obj_pgoff; /* page offset of obj currently
  104. mapped in */
  105. };
  106. struct omap_drm_usergart {
  107. struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
  108. int height; /* height in rows */
  109. int height_shift; /* ilog2(height in rows) */
  110. int slot_shift; /* ilog2(width per slot) */
  111. int stride_pfn; /* stride in pages */
  112. int last; /* index of last used entry */
  113. };
  114. /* -----------------------------------------------------------------------------
  115. * Helpers
  116. */
  117. /** get mmap offset */
  118. static u64 mmap_offset(struct drm_gem_object *obj)
  119. {
  120. struct drm_device *dev = obj->dev;
  121. int ret;
  122. size_t size;
  123. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  124. /* Make it mmapable */
  125. size = omap_gem_mmap_size(obj);
  126. ret = drm_gem_create_mmap_offset_size(obj, size);
  127. if (ret) {
  128. dev_err(dev->dev, "could not allocate mmap offset\n");
  129. return 0;
  130. }
  131. return drm_vma_node_offset_addr(&obj->vma_node);
  132. }
  133. static bool is_contiguous(struct omap_gem_object *omap_obj)
  134. {
  135. if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
  136. return true;
  137. if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
  138. return true;
  139. return false;
  140. }
  141. /* -----------------------------------------------------------------------------
  142. * Eviction
  143. */
  144. static void evict_entry(struct drm_gem_object *obj,
  145. enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
  146. {
  147. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  148. struct omap_drm_private *priv = obj->dev->dev_private;
  149. int n = priv->usergart[fmt].height;
  150. size_t size = PAGE_SIZE * n;
  151. loff_t off = mmap_offset(obj) +
  152. (entry->obj_pgoff << PAGE_SHIFT);
  153. const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
  154. if (m > 1) {
  155. int i;
  156. /* if stride > than PAGE_SIZE then sparse mapping: */
  157. for (i = n; i > 0; i--) {
  158. unmap_mapping_range(obj->dev->anon_inode->i_mapping,
  159. off, PAGE_SIZE, 1);
  160. off += PAGE_SIZE * m;
  161. }
  162. } else {
  163. unmap_mapping_range(obj->dev->anon_inode->i_mapping,
  164. off, size, 1);
  165. }
  166. entry->obj = NULL;
  167. }
  168. /* Evict a buffer from usergart, if it is mapped there */
  169. static void evict(struct drm_gem_object *obj)
  170. {
  171. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  172. struct omap_drm_private *priv = obj->dev->dev_private;
  173. if (omap_obj->flags & OMAP_BO_TILED) {
  174. enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
  175. int i;
  176. for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
  177. struct omap_drm_usergart_entry *entry =
  178. &priv->usergart[fmt].entry[i];
  179. if (entry->obj == obj)
  180. evict_entry(obj, fmt, entry);
  181. }
  182. }
  183. }
  184. /* -----------------------------------------------------------------------------
  185. * Page Management
  186. */
  187. /** ensure backing pages are allocated */
  188. static int omap_gem_attach_pages(struct drm_gem_object *obj)
  189. {
  190. struct drm_device *dev = obj->dev;
  191. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  192. struct page **pages;
  193. int npages = obj->size >> PAGE_SHIFT;
  194. int i, ret;
  195. dma_addr_t *addrs;
  196. WARN_ON(omap_obj->pages);
  197. pages = drm_gem_get_pages(obj);
  198. if (IS_ERR(pages)) {
  199. dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
  200. return PTR_ERR(pages);
  201. }
  202. /* for non-cached buffers, ensure the new pages are clean because
  203. * DSS, GPU, etc. are not cache coherent:
  204. */
  205. if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
  206. addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
  207. if (!addrs) {
  208. ret = -ENOMEM;
  209. goto free_pages;
  210. }
  211. for (i = 0; i < npages; i++) {
  212. addrs[i] = dma_map_page(dev->dev, pages[i],
  213. 0, PAGE_SIZE, DMA_TO_DEVICE);
  214. if (dma_mapping_error(dev->dev, addrs[i])) {
  215. dev_warn(dev->dev,
  216. "%s: failed to map page\n", __func__);
  217. for (i = i - 1; i >= 0; --i) {
  218. dma_unmap_page(dev->dev, addrs[i],
  219. PAGE_SIZE, DMA_TO_DEVICE);
  220. }
  221. ret = -ENOMEM;
  222. goto free_addrs;
  223. }
  224. }
  225. } else {
  226. addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
  227. if (!addrs) {
  228. ret = -ENOMEM;
  229. goto free_pages;
  230. }
  231. }
  232. omap_obj->dma_addrs = addrs;
  233. omap_obj->pages = pages;
  234. return 0;
  235. free_addrs:
  236. kfree(addrs);
  237. free_pages:
  238. drm_gem_put_pages(obj, pages, true, false);
  239. return ret;
  240. }
  241. /* acquire pages when needed (for example, for DMA where physically
  242. * contiguous buffer is not required
  243. */
  244. static int get_pages(struct drm_gem_object *obj, struct page ***pages)
  245. {
  246. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  247. int ret = 0;
  248. if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) {
  249. ret = omap_gem_attach_pages(obj);
  250. if (ret) {
  251. dev_err(obj->dev->dev, "could not attach pages\n");
  252. return ret;
  253. }
  254. }
  255. /* TODO: even phys-contig.. we should have a list of pages? */
  256. *pages = omap_obj->pages;
  257. return 0;
  258. }
  259. /** release backing pages */
  260. static void omap_gem_detach_pages(struct drm_gem_object *obj)
  261. {
  262. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  263. unsigned int npages = obj->size >> PAGE_SHIFT;
  264. unsigned int i;
  265. for (i = 0; i < npages; i++) {
  266. if (omap_obj->dma_addrs[i])
  267. dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
  268. PAGE_SIZE, DMA_TO_DEVICE);
  269. }
  270. kfree(omap_obj->dma_addrs);
  271. omap_obj->dma_addrs = NULL;
  272. drm_gem_put_pages(obj, omap_obj->pages, true, false);
  273. omap_obj->pages = NULL;
  274. }
  275. /* get buffer flags */
  276. u32 omap_gem_flags(struct drm_gem_object *obj)
  277. {
  278. return to_omap_bo(obj)->flags;
  279. }
  280. u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
  281. {
  282. u64 offset;
  283. mutex_lock(&obj->dev->struct_mutex);
  284. offset = mmap_offset(obj);
  285. mutex_unlock(&obj->dev->struct_mutex);
  286. return offset;
  287. }
  288. /** get mmap size */
  289. size_t omap_gem_mmap_size(struct drm_gem_object *obj)
  290. {
  291. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  292. size_t size = obj->size;
  293. if (omap_obj->flags & OMAP_BO_TILED) {
  294. /* for tiled buffers, the virtual size has stride rounded up
  295. * to 4kb.. (to hide the fact that row n+1 might start 16kb or
  296. * 32kb later!). But we don't back the entire buffer with
  297. * pages, only the valid picture part.. so need to adjust for
  298. * this in the size used to mmap and generate mmap offset
  299. */
  300. size = tiler_vsize(gem2fmt(omap_obj->flags),
  301. omap_obj->width, omap_obj->height);
  302. }
  303. return size;
  304. }
  305. /* -----------------------------------------------------------------------------
  306. * Fault Handling
  307. */
  308. /* Normal handling for the case of faulting in non-tiled buffers */
  309. static int fault_1d(struct drm_gem_object *obj,
  310. struct vm_area_struct *vma, struct vm_fault *vmf)
  311. {
  312. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  313. unsigned long pfn;
  314. pgoff_t pgoff;
  315. /* We don't use vmf->pgoff since that has the fake offset: */
  316. pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
  317. if (omap_obj->pages) {
  318. omap_gem_cpu_sync_page(obj, pgoff);
  319. pfn = page_to_pfn(omap_obj->pages[pgoff]);
  320. } else {
  321. BUG_ON(!is_contiguous(omap_obj));
  322. pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
  323. }
  324. VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
  325. pfn, pfn << PAGE_SHIFT);
  326. return vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
  327. }
  328. /* Special handling for the case of faulting in 2d tiled buffers */
  329. static int fault_2d(struct drm_gem_object *obj,
  330. struct vm_area_struct *vma, struct vm_fault *vmf)
  331. {
  332. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  333. struct omap_drm_private *priv = obj->dev->dev_private;
  334. struct omap_drm_usergart_entry *entry;
  335. enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
  336. struct page *pages[64]; /* XXX is this too much to have on stack? */
  337. unsigned long pfn;
  338. pgoff_t pgoff, base_pgoff;
  339. unsigned long vaddr;
  340. int i, ret, slots;
  341. /*
  342. * Note the height of the slot is also equal to the number of pages
  343. * that need to be mapped in to fill 4kb wide CPU page. If the slot
  344. * height is 64, then 64 pages fill a 4kb wide by 64 row region.
  345. */
  346. const int n = priv->usergart[fmt].height;
  347. const int n_shift = priv->usergart[fmt].height_shift;
  348. /*
  349. * If buffer width in bytes > PAGE_SIZE then the virtual stride is
  350. * rounded up to next multiple of PAGE_SIZE.. this need to be taken
  351. * into account in some of the math, so figure out virtual stride
  352. * in pages
  353. */
  354. const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
  355. /* We don't use vmf->pgoff since that has the fake offset: */
  356. pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
  357. /*
  358. * Actual address we start mapping at is rounded down to previous slot
  359. * boundary in the y direction:
  360. */
  361. base_pgoff = round_down(pgoff, m << n_shift);
  362. /* figure out buffer width in slots */
  363. slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
  364. vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
  365. entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
  366. /* evict previous buffer using this usergart entry, if any: */
  367. if (entry->obj)
  368. evict_entry(entry->obj, fmt, entry);
  369. entry->obj = obj;
  370. entry->obj_pgoff = base_pgoff;
  371. /* now convert base_pgoff to phys offset from virt offset: */
  372. base_pgoff = (base_pgoff >> n_shift) * slots;
  373. /* for wider-than 4k.. figure out which part of the slot-row we want: */
  374. if (m > 1) {
  375. int off = pgoff % m;
  376. entry->obj_pgoff += off;
  377. base_pgoff /= m;
  378. slots = min(slots - (off << n_shift), n);
  379. base_pgoff += off << n_shift;
  380. vaddr += off << PAGE_SHIFT;
  381. }
  382. /*
  383. * Map in pages. Beyond the valid pixel part of the buffer, we set
  384. * pages[i] to NULL to get a dummy page mapped in.. if someone
  385. * reads/writes it they will get random/undefined content, but at
  386. * least it won't be corrupting whatever other random page used to
  387. * be mapped in, or other undefined behavior.
  388. */
  389. memcpy(pages, &omap_obj->pages[base_pgoff],
  390. sizeof(struct page *) * slots);
  391. memset(pages + slots, 0,
  392. sizeof(struct page *) * (n - slots));
  393. ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
  394. if (ret) {
  395. dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
  396. return ret;
  397. }
  398. pfn = entry->dma_addr >> PAGE_SHIFT;
  399. VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
  400. pfn, pfn << PAGE_SHIFT);
  401. for (i = n; i > 0; i--) {
  402. vm_insert_mixed(vma, vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
  403. pfn += priv->usergart[fmt].stride_pfn;
  404. vaddr += PAGE_SIZE * m;
  405. }
  406. /* simple round-robin: */
  407. priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
  408. % NUM_USERGART_ENTRIES;
  409. return 0;
  410. }
  411. /**
  412. * omap_gem_fault - pagefault handler for GEM objects
  413. * @vmf: fault detail
  414. *
  415. * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
  416. * does most of the work for us including the actual map/unmap calls
  417. * but we need to do the actual page work.
  418. *
  419. * The VMA was set up by GEM. In doing so it also ensured that the
  420. * vma->vm_private_data points to the GEM object that is backing this
  421. * mapping.
  422. */
  423. int omap_gem_fault(struct vm_fault *vmf)
  424. {
  425. struct vm_area_struct *vma = vmf->vma;
  426. struct drm_gem_object *obj = vma->vm_private_data;
  427. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  428. struct drm_device *dev = obj->dev;
  429. struct page **pages;
  430. int ret;
  431. /* Make sure we don't parallel update on a fault, nor move or remove
  432. * something from beneath our feet
  433. */
  434. mutex_lock(&dev->struct_mutex);
  435. /* if a shmem backed object, make sure we have pages attached now */
  436. ret = get_pages(obj, &pages);
  437. if (ret)
  438. goto fail;
  439. /* where should we do corresponding put_pages().. we are mapping
  440. * the original page, rather than thru a GART, so we can't rely
  441. * on eviction to trigger this. But munmap() or all mappings should
  442. * probably trigger put_pages()?
  443. */
  444. if (omap_obj->flags & OMAP_BO_TILED)
  445. ret = fault_2d(obj, vma, vmf);
  446. else
  447. ret = fault_1d(obj, vma, vmf);
  448. fail:
  449. mutex_unlock(&dev->struct_mutex);
  450. switch (ret) {
  451. case 0:
  452. case -ERESTARTSYS:
  453. case -EINTR:
  454. case -EBUSY:
  455. /*
  456. * EBUSY is ok: this just means that another thread
  457. * already did the job.
  458. */
  459. return VM_FAULT_NOPAGE;
  460. case -ENOMEM:
  461. return VM_FAULT_OOM;
  462. default:
  463. return VM_FAULT_SIGBUS;
  464. }
  465. }
  466. /** We override mainly to fix up some of the vm mapping flags.. */
  467. int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  468. {
  469. int ret;
  470. ret = drm_gem_mmap(filp, vma);
  471. if (ret) {
  472. DBG("mmap failed: %d", ret);
  473. return ret;
  474. }
  475. return omap_gem_mmap_obj(vma->vm_private_data, vma);
  476. }
  477. int omap_gem_mmap_obj(struct drm_gem_object *obj,
  478. struct vm_area_struct *vma)
  479. {
  480. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  481. vma->vm_flags &= ~VM_PFNMAP;
  482. vma->vm_flags |= VM_MIXEDMAP;
  483. if (omap_obj->flags & OMAP_BO_WC) {
  484. vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  485. } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
  486. vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
  487. } else {
  488. /*
  489. * We do have some private objects, at least for scanout buffers
  490. * on hardware without DMM/TILER. But these are allocated write-
  491. * combine
  492. */
  493. if (WARN_ON(!obj->filp))
  494. return -EINVAL;
  495. /*
  496. * Shunt off cached objs to shmem file so they have their own
  497. * address_space (so unmap_mapping_range does what we want,
  498. * in particular in the case of mmap'd dmabufs)
  499. */
  500. fput(vma->vm_file);
  501. vma->vm_pgoff = 0;
  502. vma->vm_file = get_file(obj->filp);
  503. vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  504. }
  505. return 0;
  506. }
  507. /* -----------------------------------------------------------------------------
  508. * Dumb Buffers
  509. */
  510. /**
  511. * omap_gem_dumb_create - create a dumb buffer
  512. * @drm_file: our client file
  513. * @dev: our device
  514. * @args: the requested arguments copied from userspace
  515. *
  516. * Allocate a buffer suitable for use for a frame buffer of the
  517. * form described by user space. Give userspace a handle by which
  518. * to reference it.
  519. */
  520. int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  521. struct drm_mode_create_dumb *args)
  522. {
  523. union omap_gem_size gsize;
  524. args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  525. args->size = PAGE_ALIGN(args->pitch * args->height);
  526. gsize = (union omap_gem_size){
  527. .bytes = args->size,
  528. };
  529. return omap_gem_new_handle(dev, file, gsize,
  530. OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
  531. }
  532. /**
  533. * omap_gem_dumb_map - buffer mapping for dumb interface
  534. * @file: our drm client file
  535. * @dev: drm device
  536. * @handle: GEM handle to the object (from dumb_create)
  537. *
  538. * Do the necessary setup to allow the mapping of the frame buffer
  539. * into user memory. We don't have to do much here at the moment.
  540. */
  541. int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
  542. u32 handle, u64 *offset)
  543. {
  544. struct drm_gem_object *obj;
  545. int ret = 0;
  546. /* GEM does all our handle to object mapping */
  547. obj = drm_gem_object_lookup(file, handle);
  548. if (obj == NULL) {
  549. ret = -ENOENT;
  550. goto fail;
  551. }
  552. *offset = omap_gem_mmap_offset(obj);
  553. drm_gem_object_unreference_unlocked(obj);
  554. fail:
  555. return ret;
  556. }
  557. #ifdef CONFIG_DRM_FBDEV_EMULATION
  558. /* Set scrolling position. This allows us to implement fast scrolling
  559. * for console.
  560. *
  561. * Call only from non-atomic contexts.
  562. */
  563. int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
  564. {
  565. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  566. u32 npages = obj->size >> PAGE_SHIFT;
  567. int ret = 0;
  568. if (roll > npages) {
  569. dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
  570. return -EINVAL;
  571. }
  572. omap_obj->roll = roll;
  573. mutex_lock(&obj->dev->struct_mutex);
  574. /* if we aren't mapped yet, we don't need to do anything */
  575. if (omap_obj->block) {
  576. struct page **pages;
  577. ret = get_pages(obj, &pages);
  578. if (ret)
  579. goto fail;
  580. ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
  581. if (ret)
  582. dev_err(obj->dev->dev, "could not repin: %d\n", ret);
  583. }
  584. fail:
  585. mutex_unlock(&obj->dev->struct_mutex);
  586. return ret;
  587. }
  588. #endif
  589. /* -----------------------------------------------------------------------------
  590. * Memory Management & DMA Sync
  591. */
  592. /*
  593. * shmem buffers that are mapped cached are not coherent.
  594. *
  595. * We keep track of dirty pages using page faulting to perform cache management.
  596. * When a page is mapped to the CPU in read/write mode the device can't access
  597. * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
  598. * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
  599. * unmapped from the CPU.
  600. */
  601. static inline bool is_cached_coherent(struct drm_gem_object *obj)
  602. {
  603. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  604. return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
  605. ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
  606. }
  607. /* Sync the buffer for CPU access.. note pages should already be
  608. * attached, ie. omap_gem_get_pages()
  609. */
  610. void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
  611. {
  612. struct drm_device *dev = obj->dev;
  613. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  614. if (is_cached_coherent(obj))
  615. return;
  616. if (omap_obj->dma_addrs[pgoff]) {
  617. dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
  618. PAGE_SIZE, DMA_TO_DEVICE);
  619. omap_obj->dma_addrs[pgoff] = 0;
  620. }
  621. }
  622. /* sync the buffer for DMA access */
  623. void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
  624. enum dma_data_direction dir)
  625. {
  626. struct drm_device *dev = obj->dev;
  627. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  628. int i, npages = obj->size >> PAGE_SHIFT;
  629. struct page **pages = omap_obj->pages;
  630. bool dirty = false;
  631. if (is_cached_coherent(obj))
  632. return;
  633. for (i = 0; i < npages; i++) {
  634. if (!omap_obj->dma_addrs[i]) {
  635. dma_addr_t addr;
  636. addr = dma_map_page(dev->dev, pages[i], 0,
  637. PAGE_SIZE, dir);
  638. if (dma_mapping_error(dev->dev, addr)) {
  639. dev_warn(dev->dev, "%s: failed to map page\n",
  640. __func__);
  641. break;
  642. }
  643. dirty = true;
  644. omap_obj->dma_addrs[i] = addr;
  645. }
  646. }
  647. if (dirty) {
  648. unmap_mapping_range(obj->filp->f_mapping, 0,
  649. omap_gem_mmap_size(obj), 1);
  650. }
  651. }
  652. /**
  653. * omap_gem_pin() - Pin a GEM object in memory
  654. * @obj: the GEM object
  655. * @dma_addr: the DMA address
  656. *
  657. * Pin the given GEM object in memory and fill the dma_addr pointer with the
  658. * object's DMA address. If the buffer is not physically contiguous it will be
  659. * remapped through the TILER to provide a contiguous view.
  660. *
  661. * Pins are reference-counted, calling this function multiple times is allowed
  662. * as long the corresponding omap_gem_unpin() calls are balanced.
  663. *
  664. * Return 0 on success or a negative error code otherwise.
  665. */
  666. int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
  667. {
  668. struct omap_drm_private *priv = obj->dev->dev_private;
  669. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  670. int ret = 0;
  671. mutex_lock(&obj->dev->struct_mutex);
  672. if (!is_contiguous(omap_obj) && priv->has_dmm) {
  673. if (omap_obj->dma_addr_cnt == 0) {
  674. struct page **pages;
  675. u32 npages = obj->size >> PAGE_SHIFT;
  676. enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
  677. struct tiler_block *block;
  678. BUG_ON(omap_obj->block);
  679. ret = get_pages(obj, &pages);
  680. if (ret)
  681. goto fail;
  682. if (omap_obj->flags & OMAP_BO_TILED) {
  683. block = tiler_reserve_2d(fmt,
  684. omap_obj->width,
  685. omap_obj->height, 0);
  686. } else {
  687. block = tiler_reserve_1d(obj->size);
  688. }
  689. if (IS_ERR(block)) {
  690. ret = PTR_ERR(block);
  691. dev_err(obj->dev->dev,
  692. "could not remap: %d (%d)\n", ret, fmt);
  693. goto fail;
  694. }
  695. /* TODO: enable async refill.. */
  696. ret = tiler_pin(block, pages, npages,
  697. omap_obj->roll, true);
  698. if (ret) {
  699. tiler_release(block);
  700. dev_err(obj->dev->dev,
  701. "could not pin: %d\n", ret);
  702. goto fail;
  703. }
  704. omap_obj->dma_addr = tiler_ssptr(block);
  705. omap_obj->block = block;
  706. DBG("got dma address: %pad", &omap_obj->dma_addr);
  707. }
  708. omap_obj->dma_addr_cnt++;
  709. *dma_addr = omap_obj->dma_addr;
  710. } else if (is_contiguous(omap_obj)) {
  711. *dma_addr = omap_obj->dma_addr;
  712. } else {
  713. ret = -EINVAL;
  714. goto fail;
  715. }
  716. fail:
  717. mutex_unlock(&obj->dev->struct_mutex);
  718. return ret;
  719. }
  720. /**
  721. * omap_gem_unpin() - Unpin a GEM object from memory
  722. * @obj: the GEM object
  723. *
  724. * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
  725. * reference-counted, the actualy unpin will only be performed when the number
  726. * of calls to this function matches the number of calls to omap_gem_pin().
  727. */
  728. void omap_gem_unpin(struct drm_gem_object *obj)
  729. {
  730. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  731. int ret;
  732. mutex_lock(&obj->dev->struct_mutex);
  733. if (omap_obj->dma_addr_cnt > 0) {
  734. omap_obj->dma_addr_cnt--;
  735. if (omap_obj->dma_addr_cnt == 0) {
  736. ret = tiler_unpin(omap_obj->block);
  737. if (ret) {
  738. dev_err(obj->dev->dev,
  739. "could not unpin pages: %d\n", ret);
  740. }
  741. ret = tiler_release(omap_obj->block);
  742. if (ret) {
  743. dev_err(obj->dev->dev,
  744. "could not release unmap: %d\n", ret);
  745. }
  746. omap_obj->dma_addr = 0;
  747. omap_obj->block = NULL;
  748. }
  749. }
  750. mutex_unlock(&obj->dev->struct_mutex);
  751. }
  752. /* Get rotated scanout address (only valid if already pinned), at the
  753. * specified orientation and x,y offset from top-left corner of buffer
  754. * (only valid for tiled 2d buffers)
  755. */
  756. int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
  757. int x, int y, dma_addr_t *dma_addr)
  758. {
  759. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  760. int ret = -EINVAL;
  761. mutex_lock(&obj->dev->struct_mutex);
  762. if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
  763. (omap_obj->flags & OMAP_BO_TILED)) {
  764. *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
  765. ret = 0;
  766. }
  767. mutex_unlock(&obj->dev->struct_mutex);
  768. return ret;
  769. }
  770. /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
  771. int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
  772. {
  773. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  774. int ret = -EINVAL;
  775. if (omap_obj->flags & OMAP_BO_TILED)
  776. ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
  777. return ret;
  778. }
  779. /* if !remap, and we don't have pages backing, then fail, rather than
  780. * increasing the pin count (which we don't really do yet anyways,
  781. * because we don't support swapping pages back out). And 'remap'
  782. * might not be quite the right name, but I wanted to keep it working
  783. * similarly to omap_gem_pin(). Note though that mutex is not
  784. * aquired if !remap (because this can be called in atomic ctxt),
  785. * but probably omap_gem_unpin() should be changed to work in the
  786. * same way. If !remap, a matching omap_gem_put_pages() call is not
  787. * required (and should not be made).
  788. */
  789. int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
  790. bool remap)
  791. {
  792. int ret;
  793. if (!remap) {
  794. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  795. if (!omap_obj->pages)
  796. return -ENOMEM;
  797. *pages = omap_obj->pages;
  798. return 0;
  799. }
  800. mutex_lock(&obj->dev->struct_mutex);
  801. ret = get_pages(obj, pages);
  802. mutex_unlock(&obj->dev->struct_mutex);
  803. return ret;
  804. }
  805. /* release pages when DMA no longer being performed */
  806. int omap_gem_put_pages(struct drm_gem_object *obj)
  807. {
  808. /* do something here if we dynamically attach/detach pages.. at
  809. * least they would no longer need to be pinned if everyone has
  810. * released the pages..
  811. */
  812. return 0;
  813. }
  814. #ifdef CONFIG_DRM_FBDEV_EMULATION
  815. /* Get kernel virtual address for CPU access.. this more or less only
  816. * exists for omap_fbdev. This should be called with struct_mutex
  817. * held.
  818. */
  819. void *omap_gem_vaddr(struct drm_gem_object *obj)
  820. {
  821. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  822. WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
  823. if (!omap_obj->vaddr) {
  824. struct page **pages;
  825. int ret = get_pages(obj, &pages);
  826. if (ret)
  827. return ERR_PTR(ret);
  828. omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
  829. VM_MAP, pgprot_writecombine(PAGE_KERNEL));
  830. }
  831. return omap_obj->vaddr;
  832. }
  833. #endif
  834. /* -----------------------------------------------------------------------------
  835. * Power Management
  836. */
  837. #ifdef CONFIG_PM
  838. /* re-pin objects in DMM in resume path: */
  839. int omap_gem_resume(struct drm_device *dev)
  840. {
  841. struct omap_drm_private *priv = dev->dev_private;
  842. struct omap_gem_object *omap_obj;
  843. int ret = 0;
  844. list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
  845. if (omap_obj->block) {
  846. struct drm_gem_object *obj = &omap_obj->base;
  847. u32 npages = obj->size >> PAGE_SHIFT;
  848. WARN_ON(!omap_obj->pages); /* this can't happen */
  849. ret = tiler_pin(omap_obj->block,
  850. omap_obj->pages, npages,
  851. omap_obj->roll, true);
  852. if (ret) {
  853. dev_err(dev->dev, "could not repin: %d\n", ret);
  854. return ret;
  855. }
  856. }
  857. }
  858. return 0;
  859. }
  860. #endif
  861. /* -----------------------------------------------------------------------------
  862. * DebugFS
  863. */
  864. #ifdef CONFIG_DEBUG_FS
  865. void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
  866. {
  867. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  868. u64 off;
  869. off = drm_vma_node_start(&obj->vma_node);
  870. seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
  871. omap_obj->flags, obj->name, kref_read(&obj->refcount),
  872. off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt,
  873. omap_obj->vaddr, omap_obj->roll);
  874. if (omap_obj->flags & OMAP_BO_TILED) {
  875. seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
  876. if (omap_obj->block) {
  877. struct tcm_area *area = &omap_obj->block->area;
  878. seq_printf(m, " (%dx%d, %dx%d)",
  879. area->p0.x, area->p0.y,
  880. area->p1.x, area->p1.y);
  881. }
  882. } else {
  883. seq_printf(m, " %zu", obj->size);
  884. }
  885. seq_printf(m, "\n");
  886. }
  887. void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
  888. {
  889. struct omap_gem_object *omap_obj;
  890. int count = 0;
  891. size_t size = 0;
  892. list_for_each_entry(omap_obj, list, mm_list) {
  893. struct drm_gem_object *obj = &omap_obj->base;
  894. seq_printf(m, " ");
  895. omap_gem_describe(obj, m);
  896. count++;
  897. size += obj->size;
  898. }
  899. seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
  900. }
  901. #endif
  902. /* -----------------------------------------------------------------------------
  903. * Constructor & Destructor
  904. */
  905. void omap_gem_free_object(struct drm_gem_object *obj)
  906. {
  907. struct drm_device *dev = obj->dev;
  908. struct omap_drm_private *priv = dev->dev_private;
  909. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  910. evict(obj);
  911. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  912. spin_lock(&priv->list_lock);
  913. list_del(&omap_obj->mm_list);
  914. spin_unlock(&priv->list_lock);
  915. /* this means the object is still pinned.. which really should
  916. * not happen. I think..
  917. */
  918. WARN_ON(omap_obj->dma_addr_cnt > 0);
  919. if (omap_obj->pages) {
  920. if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
  921. kfree(omap_obj->pages);
  922. else
  923. omap_gem_detach_pages(obj);
  924. }
  925. if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
  926. dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
  927. omap_obj->dma_addr);
  928. } else if (omap_obj->vaddr) {
  929. vunmap(omap_obj->vaddr);
  930. } else if (obj->import_attach) {
  931. drm_prime_gem_destroy(obj, omap_obj->sgt);
  932. }
  933. drm_gem_object_release(obj);
  934. kfree(omap_obj);
  935. }
  936. /* GEM buffer object constructor */
  937. struct drm_gem_object *omap_gem_new(struct drm_device *dev,
  938. union omap_gem_size gsize, u32 flags)
  939. {
  940. struct omap_drm_private *priv = dev->dev_private;
  941. struct omap_gem_object *omap_obj;
  942. struct drm_gem_object *obj;
  943. struct address_space *mapping;
  944. size_t size;
  945. int ret;
  946. /* Validate the flags and compute the memory and cache flags. */
  947. if (flags & OMAP_BO_TILED) {
  948. if (!priv->usergart) {
  949. dev_err(dev->dev, "Tiled buffers require DMM\n");
  950. return NULL;
  951. }
  952. /*
  953. * Tiled buffers are always shmem paged backed. When they are
  954. * scanned out, they are remapped into DMM/TILER.
  955. */
  956. flags &= ~OMAP_BO_SCANOUT;
  957. flags |= OMAP_BO_MEM_SHMEM;
  958. /*
  959. * Currently don't allow cached buffers. There is some caching
  960. * stuff that needs to be handled better.
  961. */
  962. flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
  963. flags |= tiler_get_cpu_cache_flags();
  964. } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
  965. /*
  966. * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
  967. * tiled. However, to lower the pressure on memory allocation,
  968. * use contiguous memory only if no TILER is available.
  969. */
  970. flags |= OMAP_BO_MEM_DMA_API;
  971. } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
  972. /*
  973. * All other buffers not backed by dma_buf are shmem-backed.
  974. */
  975. flags |= OMAP_BO_MEM_SHMEM;
  976. }
  977. /* Allocate the initialize the OMAP GEM object. */
  978. omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
  979. if (!omap_obj)
  980. return NULL;
  981. obj = &omap_obj->base;
  982. omap_obj->flags = flags;
  983. if (flags & OMAP_BO_TILED) {
  984. /*
  985. * For tiled buffers align dimensions to slot boundaries and
  986. * calculate size based on aligned dimensions.
  987. */
  988. tiler_align(gem2fmt(flags), &gsize.tiled.width,
  989. &gsize.tiled.height);
  990. size = tiler_size(gem2fmt(flags), gsize.tiled.width,
  991. gsize.tiled.height);
  992. omap_obj->width = gsize.tiled.width;
  993. omap_obj->height = gsize.tiled.height;
  994. } else {
  995. size = PAGE_ALIGN(gsize.bytes);
  996. }
  997. /* Initialize the GEM object. */
  998. if (!(flags & OMAP_BO_MEM_SHMEM)) {
  999. drm_gem_private_object_init(dev, obj, size);
  1000. } else {
  1001. ret = drm_gem_object_init(dev, obj, size);
  1002. if (ret)
  1003. goto err_free;
  1004. mapping = obj->filp->f_mapping;
  1005. mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
  1006. }
  1007. /* Allocate memory if needed. */
  1008. if (flags & OMAP_BO_MEM_DMA_API) {
  1009. omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
  1010. &omap_obj->dma_addr,
  1011. GFP_KERNEL);
  1012. if (!omap_obj->vaddr)
  1013. goto err_release;
  1014. }
  1015. spin_lock(&priv->list_lock);
  1016. list_add(&omap_obj->mm_list, &priv->obj_list);
  1017. spin_unlock(&priv->list_lock);
  1018. return obj;
  1019. err_release:
  1020. drm_gem_object_release(obj);
  1021. err_free:
  1022. kfree(omap_obj);
  1023. return NULL;
  1024. }
  1025. struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
  1026. struct sg_table *sgt)
  1027. {
  1028. struct omap_drm_private *priv = dev->dev_private;
  1029. struct omap_gem_object *omap_obj;
  1030. struct drm_gem_object *obj;
  1031. union omap_gem_size gsize;
  1032. /* Without a DMM only physically contiguous buffers can be supported. */
  1033. if (sgt->orig_nents != 1 && !priv->has_dmm)
  1034. return ERR_PTR(-EINVAL);
  1035. mutex_lock(&dev->struct_mutex);
  1036. gsize.bytes = PAGE_ALIGN(size);
  1037. obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
  1038. if (!obj) {
  1039. obj = ERR_PTR(-ENOMEM);
  1040. goto done;
  1041. }
  1042. omap_obj = to_omap_bo(obj);
  1043. omap_obj->sgt = sgt;
  1044. if (sgt->orig_nents == 1) {
  1045. omap_obj->dma_addr = sg_dma_address(sgt->sgl);
  1046. } else {
  1047. /* Create pages list from sgt */
  1048. struct sg_page_iter iter;
  1049. struct page **pages;
  1050. unsigned int npages;
  1051. unsigned int i = 0;
  1052. npages = DIV_ROUND_UP(size, PAGE_SIZE);
  1053. pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
  1054. if (!pages) {
  1055. omap_gem_free_object(obj);
  1056. obj = ERR_PTR(-ENOMEM);
  1057. goto done;
  1058. }
  1059. omap_obj->pages = pages;
  1060. for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
  1061. pages[i++] = sg_page_iter_page(&iter);
  1062. if (i > npages)
  1063. break;
  1064. }
  1065. if (WARN_ON(i != npages)) {
  1066. omap_gem_free_object(obj);
  1067. obj = ERR_PTR(-ENOMEM);
  1068. goto done;
  1069. }
  1070. }
  1071. done:
  1072. mutex_unlock(&dev->struct_mutex);
  1073. return obj;
  1074. }
  1075. /* convenience method to construct a GEM buffer object, and userspace handle */
  1076. int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
  1077. union omap_gem_size gsize, u32 flags, u32 *handle)
  1078. {
  1079. struct drm_gem_object *obj;
  1080. int ret;
  1081. obj = omap_gem_new(dev, gsize, flags);
  1082. if (!obj)
  1083. return -ENOMEM;
  1084. ret = drm_gem_handle_create(file, obj, handle);
  1085. if (ret) {
  1086. omap_gem_free_object(obj);
  1087. return ret;
  1088. }
  1089. /* drop reference from allocate - handle holds it now */
  1090. drm_gem_object_unreference_unlocked(obj);
  1091. return 0;
  1092. }
  1093. /* -----------------------------------------------------------------------------
  1094. * Init & Cleanup
  1095. */
  1096. /* If DMM is used, we need to set some stuff up.. */
  1097. void omap_gem_init(struct drm_device *dev)
  1098. {
  1099. struct omap_drm_private *priv = dev->dev_private;
  1100. struct omap_drm_usergart *usergart;
  1101. const enum tiler_fmt fmts[] = {
  1102. TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
  1103. };
  1104. int i, j;
  1105. if (!dmm_is_available()) {
  1106. /* DMM only supported on OMAP4 and later, so this isn't fatal */
  1107. dev_warn(dev->dev, "DMM not available, disable DMM support\n");
  1108. return;
  1109. }
  1110. usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
  1111. if (!usergart)
  1112. return;
  1113. /* reserve 4k aligned/wide regions for userspace mappings: */
  1114. for (i = 0; i < ARRAY_SIZE(fmts); i++) {
  1115. u16 h = 1, w = PAGE_SIZE >> i;
  1116. tiler_align(fmts[i], &w, &h);
  1117. /* note: since each region is 1 4kb page wide, and minimum
  1118. * number of rows, the height ends up being the same as the
  1119. * # of pages in the region
  1120. */
  1121. usergart[i].height = h;
  1122. usergart[i].height_shift = ilog2(h);
  1123. usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
  1124. usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
  1125. for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
  1126. struct omap_drm_usergart_entry *entry;
  1127. struct tiler_block *block;
  1128. entry = &usergart[i].entry[j];
  1129. block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
  1130. if (IS_ERR(block)) {
  1131. dev_err(dev->dev,
  1132. "reserve failed: %d, %d, %ld\n",
  1133. i, j, PTR_ERR(block));
  1134. return;
  1135. }
  1136. entry->dma_addr = tiler_ssptr(block);
  1137. entry->block = block;
  1138. DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
  1139. &entry->dma_addr,
  1140. usergart[i].stride_pfn << PAGE_SHIFT);
  1141. }
  1142. }
  1143. priv->usergart = usergart;
  1144. priv->has_dmm = true;
  1145. }
  1146. void omap_gem_deinit(struct drm_device *dev)
  1147. {
  1148. struct omap_drm_private *priv = dev->dev_private;
  1149. /* I believe we can rely on there being no more outstanding GEM
  1150. * objects which could depend on usergart/dmm at this point.
  1151. */
  1152. kfree(priv->usergart);
  1153. }