omap_gem.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374
  1. /*
  2. * drivers/gpu/drm/omapdrm/omap_gem.c
  3. *
  4. * Copyright (C) 2011 Texas Instruments
  5. * Author: Rob Clark <rob.clark@linaro.org>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License version 2 as published by
  9. * the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but WITHOUT
  12. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  14. * more details.
  15. *
  16. * You should have received a copy of the GNU General Public License along with
  17. * this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/seq_file.h>
  20. #include <linux/shmem_fs.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/pfn_t.h>
  23. #include <drm/drm_vma_manager.h>
  24. #include "omap_drv.h"
  25. #include "omap_dmm_tiler.h"
  26. /*
  27. * GEM buffer object implementation.
  28. */
  29. /* note: we use upper 8 bits of flags for driver-internal flags: */
  30. #define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
  31. #define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
  32. #define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */
  33. struct omap_gem_object {
  34. struct drm_gem_object base;
  35. struct list_head mm_list;
  36. uint32_t flags;
  37. /** width/height for tiled formats (rounded up to slot boundaries) */
  38. uint16_t width, height;
  39. /** roll applied when mapping to DMM */
  40. uint32_t roll;
  41. /**
  42. * dma_addr contains the buffer DMA address. It is valid for
  43. *
  44. * - buffers allocated through the DMA mapping API (with the
  45. * OMAP_BO_MEM_DMA_API flag set)
  46. *
  47. * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
  48. * if they are physically contiguous (when sgt->orig_nents == 1)
  49. *
  50. * - buffers mapped through the TILER when dma_addr_cnt is not zero, in
  51. * which case the DMA address points to the TILER aperture
  52. *
  53. * Physically contiguous buffers have their DMA address equal to the
  54. * physical address as we don't remap those buffers through the TILER.
  55. *
  56. * Buffers mapped to the TILER have their DMA address pointing to the
  57. * TILER aperture. As TILER mappings are refcounted (through
  58. * dma_addr_cnt) the DMA address must be accessed through omap_gem_pin()
  59. * to ensure that the mapping won't disappear unexpectedly. References
  60. * must be released with omap_gem_unpin().
  61. */
  62. dma_addr_t dma_addr;
  63. /**
  64. * # of users of dma_addr
  65. */
  66. uint32_t dma_addr_cnt;
  67. /**
  68. * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
  69. * is set and the sgt field is valid.
  70. */
  71. struct sg_table *sgt;
  72. /**
  73. * tiler block used when buffer is remapped in DMM/TILER.
  74. */
  75. struct tiler_block *block;
  76. /**
  77. * Array of backing pages, if allocated. Note that pages are never
  78. * allocated for buffers originally allocated from contiguous memory
  79. */
  80. struct page **pages;
  81. /** addresses corresponding to pages in above array */
  82. dma_addr_t *addrs;
  83. /**
  84. * Virtual address, if mapped.
  85. */
  86. void *vaddr;
  87. };
  88. #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
  89. /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
  90. * not necessarily pinned in TILER all the time, and (b) when they are
  91. * they are not necessarily page aligned, we reserve one or more small
  92. * regions in each of the 2d containers to use as a user-GART where we
  93. * can create a second page-aligned mapping of parts of the buffer
  94. * being accessed from userspace.
  95. *
  96. * Note that we could optimize slightly when we know that multiple
  97. * tiler containers are backed by the same PAT.. but I'll leave that
  98. * for later..
  99. */
  100. #define NUM_USERGART_ENTRIES 2
  101. struct omap_drm_usergart_entry {
  102. struct tiler_block *block; /* the reserved tiler block */
  103. dma_addr_t dma_addr;
  104. struct drm_gem_object *obj; /* the current pinned obj */
  105. pgoff_t obj_pgoff; /* page offset of obj currently
  106. mapped in */
  107. };
  108. struct omap_drm_usergart {
  109. struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
  110. int height; /* height in rows */
  111. int height_shift; /* ilog2(height in rows) */
  112. int slot_shift; /* ilog2(width per slot) */
  113. int stride_pfn; /* stride in pages */
  114. int last; /* index of last used entry */
  115. };
  116. /* -----------------------------------------------------------------------------
  117. * Helpers
  118. */
  119. /** get mmap offset */
  120. static uint64_t mmap_offset(struct drm_gem_object *obj)
  121. {
  122. struct drm_device *dev = obj->dev;
  123. int ret;
  124. size_t size;
  125. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  126. /* Make it mmapable */
  127. size = omap_gem_mmap_size(obj);
  128. ret = drm_gem_create_mmap_offset_size(obj, size);
  129. if (ret) {
  130. dev_err(dev->dev, "could not allocate mmap offset\n");
  131. return 0;
  132. }
  133. return drm_vma_node_offset_addr(&obj->vma_node);
  134. }
  135. static bool is_contiguous(struct omap_gem_object *omap_obj)
  136. {
  137. if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
  138. return true;
  139. if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
  140. return true;
  141. return false;
  142. }
  143. /* -----------------------------------------------------------------------------
  144. * Eviction
  145. */
  146. static void evict_entry(struct drm_gem_object *obj,
  147. enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
  148. {
  149. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  150. struct omap_drm_private *priv = obj->dev->dev_private;
  151. int n = priv->usergart[fmt].height;
  152. size_t size = PAGE_SIZE * n;
  153. loff_t off = mmap_offset(obj) +
  154. (entry->obj_pgoff << PAGE_SHIFT);
  155. const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
  156. if (m > 1) {
  157. int i;
  158. /* if stride > than PAGE_SIZE then sparse mapping: */
  159. for (i = n; i > 0; i--) {
  160. unmap_mapping_range(obj->dev->anon_inode->i_mapping,
  161. off, PAGE_SIZE, 1);
  162. off += PAGE_SIZE * m;
  163. }
  164. } else {
  165. unmap_mapping_range(obj->dev->anon_inode->i_mapping,
  166. off, size, 1);
  167. }
  168. entry->obj = NULL;
  169. }
  170. /* Evict a buffer from usergart, if it is mapped there */
  171. static void evict(struct drm_gem_object *obj)
  172. {
  173. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  174. struct omap_drm_private *priv = obj->dev->dev_private;
  175. if (omap_obj->flags & OMAP_BO_TILED) {
  176. enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
  177. int i;
  178. for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
  179. struct omap_drm_usergart_entry *entry =
  180. &priv->usergart[fmt].entry[i];
  181. if (entry->obj == obj)
  182. evict_entry(obj, fmt, entry);
  183. }
  184. }
  185. }
  186. /* -----------------------------------------------------------------------------
  187. * Page Management
  188. */
  189. /** ensure backing pages are allocated */
  190. static int omap_gem_attach_pages(struct drm_gem_object *obj)
  191. {
  192. struct drm_device *dev = obj->dev;
  193. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  194. struct page **pages;
  195. int npages = obj->size >> PAGE_SHIFT;
  196. int i, ret;
  197. dma_addr_t *addrs;
  198. WARN_ON(omap_obj->pages);
  199. pages = drm_gem_get_pages(obj);
  200. if (IS_ERR(pages)) {
  201. dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
  202. return PTR_ERR(pages);
  203. }
  204. /* for non-cached buffers, ensure the new pages are clean because
  205. * DSS, GPU, etc. are not cache coherent:
  206. */
  207. if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
  208. addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
  209. if (!addrs) {
  210. ret = -ENOMEM;
  211. goto free_pages;
  212. }
  213. for (i = 0; i < npages; i++) {
  214. addrs[i] = dma_map_page(dev->dev, pages[i],
  215. 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
  216. if (dma_mapping_error(dev->dev, addrs[i])) {
  217. dev_warn(dev->dev,
  218. "%s: failed to map page\n", __func__);
  219. for (i = i - 1; i >= 0; --i) {
  220. dma_unmap_page(dev->dev, addrs[i],
  221. PAGE_SIZE, DMA_BIDIRECTIONAL);
  222. }
  223. ret = -ENOMEM;
  224. goto free_addrs;
  225. }
  226. }
  227. } else {
  228. addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
  229. if (!addrs) {
  230. ret = -ENOMEM;
  231. goto free_pages;
  232. }
  233. }
  234. omap_obj->addrs = addrs;
  235. omap_obj->pages = pages;
  236. return 0;
  237. free_addrs:
  238. kfree(addrs);
  239. free_pages:
  240. drm_gem_put_pages(obj, pages, true, false);
  241. return ret;
  242. }
  243. /* acquire pages when needed (for example, for DMA where physically
  244. * contiguous buffer is not required
  245. */
  246. static int get_pages(struct drm_gem_object *obj, struct page ***pages)
  247. {
  248. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  249. int ret = 0;
  250. if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) {
  251. ret = omap_gem_attach_pages(obj);
  252. if (ret) {
  253. dev_err(obj->dev->dev, "could not attach pages\n");
  254. return ret;
  255. }
  256. }
  257. /* TODO: even phys-contig.. we should have a list of pages? */
  258. *pages = omap_obj->pages;
  259. return 0;
  260. }
  261. /** release backing pages */
  262. static void omap_gem_detach_pages(struct drm_gem_object *obj)
  263. {
  264. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  265. /* for non-cached buffers, ensure the new pages are clean because
  266. * DSS, GPU, etc. are not cache coherent:
  267. */
  268. if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
  269. int i, npages = obj->size >> PAGE_SHIFT;
  270. for (i = 0; i < npages; i++) {
  271. if (omap_obj->addrs[i])
  272. dma_unmap_page(obj->dev->dev,
  273. omap_obj->addrs[i],
  274. PAGE_SIZE, DMA_BIDIRECTIONAL);
  275. }
  276. }
  277. kfree(omap_obj->addrs);
  278. omap_obj->addrs = NULL;
  279. drm_gem_put_pages(obj, omap_obj->pages, true, false);
  280. omap_obj->pages = NULL;
  281. }
  282. /* get buffer flags */
  283. uint32_t omap_gem_flags(struct drm_gem_object *obj)
  284. {
  285. return to_omap_bo(obj)->flags;
  286. }
  287. uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
  288. {
  289. uint64_t offset;
  290. mutex_lock(&obj->dev->struct_mutex);
  291. offset = mmap_offset(obj);
  292. mutex_unlock(&obj->dev->struct_mutex);
  293. return offset;
  294. }
  295. /** get mmap size */
  296. size_t omap_gem_mmap_size(struct drm_gem_object *obj)
  297. {
  298. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  299. size_t size = obj->size;
  300. if (omap_obj->flags & OMAP_BO_TILED) {
  301. /* for tiled buffers, the virtual size has stride rounded up
  302. * to 4kb.. (to hide the fact that row n+1 might start 16kb or
  303. * 32kb later!). But we don't back the entire buffer with
  304. * pages, only the valid picture part.. so need to adjust for
  305. * this in the size used to mmap and generate mmap offset
  306. */
  307. size = tiler_vsize(gem2fmt(omap_obj->flags),
  308. omap_obj->width, omap_obj->height);
  309. }
  310. return size;
  311. }
  312. /* -----------------------------------------------------------------------------
  313. * Fault Handling
  314. */
  315. /* Normal handling for the case of faulting in non-tiled buffers */
  316. static int fault_1d(struct drm_gem_object *obj,
  317. struct vm_area_struct *vma, struct vm_fault *vmf)
  318. {
  319. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  320. unsigned long pfn;
  321. pgoff_t pgoff;
  322. /* We don't use vmf->pgoff since that has the fake offset: */
  323. pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
  324. if (omap_obj->pages) {
  325. omap_gem_cpu_sync(obj, pgoff);
  326. pfn = page_to_pfn(omap_obj->pages[pgoff]);
  327. } else {
  328. BUG_ON(!is_contiguous(omap_obj));
  329. pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
  330. }
  331. VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
  332. pfn, pfn << PAGE_SHIFT);
  333. return vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
  334. }
  335. /* Special handling for the case of faulting in 2d tiled buffers */
  336. static int fault_2d(struct drm_gem_object *obj,
  337. struct vm_area_struct *vma, struct vm_fault *vmf)
  338. {
  339. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  340. struct omap_drm_private *priv = obj->dev->dev_private;
  341. struct omap_drm_usergart_entry *entry;
  342. enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
  343. struct page *pages[64]; /* XXX is this too much to have on stack? */
  344. unsigned long pfn;
  345. pgoff_t pgoff, base_pgoff;
  346. unsigned long vaddr;
  347. int i, ret, slots;
  348. /*
  349. * Note the height of the slot is also equal to the number of pages
  350. * that need to be mapped in to fill 4kb wide CPU page. If the slot
  351. * height is 64, then 64 pages fill a 4kb wide by 64 row region.
  352. */
  353. const int n = priv->usergart[fmt].height;
  354. const int n_shift = priv->usergart[fmt].height_shift;
  355. /*
  356. * If buffer width in bytes > PAGE_SIZE then the virtual stride is
  357. * rounded up to next multiple of PAGE_SIZE.. this need to be taken
  358. * into account in some of the math, so figure out virtual stride
  359. * in pages
  360. */
  361. const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
  362. /* We don't use vmf->pgoff since that has the fake offset: */
  363. pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
  364. /*
  365. * Actual address we start mapping at is rounded down to previous slot
  366. * boundary in the y direction:
  367. */
  368. base_pgoff = round_down(pgoff, m << n_shift);
  369. /* figure out buffer width in slots */
  370. slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
  371. vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
  372. entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
  373. /* evict previous buffer using this usergart entry, if any: */
  374. if (entry->obj)
  375. evict_entry(entry->obj, fmt, entry);
  376. entry->obj = obj;
  377. entry->obj_pgoff = base_pgoff;
  378. /* now convert base_pgoff to phys offset from virt offset: */
  379. base_pgoff = (base_pgoff >> n_shift) * slots;
  380. /* for wider-than 4k.. figure out which part of the slot-row we want: */
  381. if (m > 1) {
  382. int off = pgoff % m;
  383. entry->obj_pgoff += off;
  384. base_pgoff /= m;
  385. slots = min(slots - (off << n_shift), n);
  386. base_pgoff += off << n_shift;
  387. vaddr += off << PAGE_SHIFT;
  388. }
  389. /*
  390. * Map in pages. Beyond the valid pixel part of the buffer, we set
  391. * pages[i] to NULL to get a dummy page mapped in.. if someone
  392. * reads/writes it they will get random/undefined content, but at
  393. * least it won't be corrupting whatever other random page used to
  394. * be mapped in, or other undefined behavior.
  395. */
  396. memcpy(pages, &omap_obj->pages[base_pgoff],
  397. sizeof(struct page *) * slots);
  398. memset(pages + slots, 0,
  399. sizeof(struct page *) * (n - slots));
  400. ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
  401. if (ret) {
  402. dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
  403. return ret;
  404. }
  405. pfn = entry->dma_addr >> PAGE_SHIFT;
  406. VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
  407. pfn, pfn << PAGE_SHIFT);
  408. for (i = n; i > 0; i--) {
  409. vm_insert_mixed(vma, vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
  410. pfn += priv->usergart[fmt].stride_pfn;
  411. vaddr += PAGE_SIZE * m;
  412. }
  413. /* simple round-robin: */
  414. priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
  415. % NUM_USERGART_ENTRIES;
  416. return 0;
  417. }
  418. /**
  419. * omap_gem_fault - pagefault handler for GEM objects
  420. * @vmf: fault detail
  421. *
  422. * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
  423. * does most of the work for us including the actual map/unmap calls
  424. * but we need to do the actual page work.
  425. *
  426. * The VMA was set up by GEM. In doing so it also ensured that the
  427. * vma->vm_private_data points to the GEM object that is backing this
  428. * mapping.
  429. */
  430. int omap_gem_fault(struct vm_fault *vmf)
  431. {
  432. struct vm_area_struct *vma = vmf->vma;
  433. struct drm_gem_object *obj = vma->vm_private_data;
  434. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  435. struct drm_device *dev = obj->dev;
  436. struct page **pages;
  437. int ret;
  438. /* Make sure we don't parallel update on a fault, nor move or remove
  439. * something from beneath our feet
  440. */
  441. mutex_lock(&dev->struct_mutex);
  442. /* if a shmem backed object, make sure we have pages attached now */
  443. ret = get_pages(obj, &pages);
  444. if (ret)
  445. goto fail;
  446. /* where should we do corresponding put_pages().. we are mapping
  447. * the original page, rather than thru a GART, so we can't rely
  448. * on eviction to trigger this. But munmap() or all mappings should
  449. * probably trigger put_pages()?
  450. */
  451. if (omap_obj->flags & OMAP_BO_TILED)
  452. ret = fault_2d(obj, vma, vmf);
  453. else
  454. ret = fault_1d(obj, vma, vmf);
  455. fail:
  456. mutex_unlock(&dev->struct_mutex);
  457. switch (ret) {
  458. case 0:
  459. case -ERESTARTSYS:
  460. case -EINTR:
  461. case -EBUSY:
  462. /*
  463. * EBUSY is ok: this just means that another thread
  464. * already did the job.
  465. */
  466. return VM_FAULT_NOPAGE;
  467. case -ENOMEM:
  468. return VM_FAULT_OOM;
  469. default:
  470. return VM_FAULT_SIGBUS;
  471. }
  472. }
  473. /** We override mainly to fix up some of the vm mapping flags.. */
  474. int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  475. {
  476. int ret;
  477. ret = drm_gem_mmap(filp, vma);
  478. if (ret) {
  479. DBG("mmap failed: %d", ret);
  480. return ret;
  481. }
  482. return omap_gem_mmap_obj(vma->vm_private_data, vma);
  483. }
  484. int omap_gem_mmap_obj(struct drm_gem_object *obj,
  485. struct vm_area_struct *vma)
  486. {
  487. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  488. vma->vm_flags &= ~VM_PFNMAP;
  489. vma->vm_flags |= VM_MIXEDMAP;
  490. if (omap_obj->flags & OMAP_BO_WC) {
  491. vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  492. } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
  493. vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
  494. } else {
  495. /*
  496. * We do have some private objects, at least for scanout buffers
  497. * on hardware without DMM/TILER. But these are allocated write-
  498. * combine
  499. */
  500. if (WARN_ON(!obj->filp))
  501. return -EINVAL;
  502. /*
  503. * Shunt off cached objs to shmem file so they have their own
  504. * address_space (so unmap_mapping_range does what we want,
  505. * in particular in the case of mmap'd dmabufs)
  506. */
  507. fput(vma->vm_file);
  508. vma->vm_pgoff = 0;
  509. vma->vm_file = get_file(obj->filp);
  510. vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  511. }
  512. return 0;
  513. }
  514. /* -----------------------------------------------------------------------------
  515. * Dumb Buffers
  516. */
  517. /**
  518. * omap_gem_dumb_create - create a dumb buffer
  519. * @drm_file: our client file
  520. * @dev: our device
  521. * @args: the requested arguments copied from userspace
  522. *
  523. * Allocate a buffer suitable for use for a frame buffer of the
  524. * form described by user space. Give userspace a handle by which
  525. * to reference it.
  526. */
  527. int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  528. struct drm_mode_create_dumb *args)
  529. {
  530. union omap_gem_size gsize;
  531. args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  532. args->size = PAGE_ALIGN(args->pitch * args->height);
  533. gsize = (union omap_gem_size){
  534. .bytes = args->size,
  535. };
  536. return omap_gem_new_handle(dev, file, gsize,
  537. OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
  538. }
  539. /**
  540. * omap_gem_dumb_map - buffer mapping for dumb interface
  541. * @file: our drm client file
  542. * @dev: drm device
  543. * @handle: GEM handle to the object (from dumb_create)
  544. *
  545. * Do the necessary setup to allow the mapping of the frame buffer
  546. * into user memory. We don't have to do much here at the moment.
  547. */
  548. int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
  549. uint32_t handle, uint64_t *offset)
  550. {
  551. struct drm_gem_object *obj;
  552. int ret = 0;
  553. /* GEM does all our handle to object mapping */
  554. obj = drm_gem_object_lookup(file, handle);
  555. if (obj == NULL) {
  556. ret = -ENOENT;
  557. goto fail;
  558. }
  559. *offset = omap_gem_mmap_offset(obj);
  560. drm_gem_object_unreference_unlocked(obj);
  561. fail:
  562. return ret;
  563. }
  564. #ifdef CONFIG_DRM_FBDEV_EMULATION
  565. /* Set scrolling position. This allows us to implement fast scrolling
  566. * for console.
  567. *
  568. * Call only from non-atomic contexts.
  569. */
  570. int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
  571. {
  572. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  573. uint32_t npages = obj->size >> PAGE_SHIFT;
  574. int ret = 0;
  575. if (roll > npages) {
  576. dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
  577. return -EINVAL;
  578. }
  579. omap_obj->roll = roll;
  580. mutex_lock(&obj->dev->struct_mutex);
  581. /* if we aren't mapped yet, we don't need to do anything */
  582. if (omap_obj->block) {
  583. struct page **pages;
  584. ret = get_pages(obj, &pages);
  585. if (ret)
  586. goto fail;
  587. ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
  588. if (ret)
  589. dev_err(obj->dev->dev, "could not repin: %d\n", ret);
  590. }
  591. fail:
  592. mutex_unlock(&obj->dev->struct_mutex);
  593. return ret;
  594. }
  595. #endif
  596. /* -----------------------------------------------------------------------------
  597. * Memory Management & DMA Sync
  598. */
  599. /**
  600. * shmem buffers that are mapped cached can simulate coherency via using
  601. * page faulting to keep track of dirty pages
  602. */
  603. static inline bool is_cached_coherent(struct drm_gem_object *obj)
  604. {
  605. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  606. return (omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
  607. ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
  608. }
  609. /* Sync the buffer for CPU access.. note pages should already be
  610. * attached, ie. omap_gem_get_pages()
  611. */
  612. void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
  613. {
  614. struct drm_device *dev = obj->dev;
  615. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  616. if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
  617. dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
  618. PAGE_SIZE, DMA_BIDIRECTIONAL);
  619. omap_obj->addrs[pgoff] = 0;
  620. }
  621. }
  622. /* sync the buffer for DMA access */
  623. void omap_gem_dma_sync(struct drm_gem_object *obj,
  624. enum dma_data_direction dir)
  625. {
  626. struct drm_device *dev = obj->dev;
  627. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  628. int i, npages = obj->size >> PAGE_SHIFT;
  629. struct page **pages = omap_obj->pages;
  630. bool dirty = false;
  631. if (!is_cached_coherent(obj))
  632. return;
  633. for (i = 0; i < npages; i++) {
  634. if (!omap_obj->addrs[i]) {
  635. dma_addr_t addr;
  636. addr = dma_map_page(dev->dev, pages[i], 0,
  637. PAGE_SIZE, DMA_BIDIRECTIONAL);
  638. if (dma_mapping_error(dev->dev, addr)) {
  639. dev_warn(dev->dev, "%s: failed to map page\n",
  640. __func__);
  641. break;
  642. }
  643. dirty = true;
  644. omap_obj->addrs[i] = addr;
  645. }
  646. }
  647. if (dirty) {
  648. unmap_mapping_range(obj->filp->f_mapping, 0,
  649. omap_gem_mmap_size(obj), 1);
  650. }
  651. }
  652. /**
  653. * omap_gem_pin() - Pin a GEM object in memory
  654. * @obj: the GEM object
  655. * @dma_addr: the DMA address
  656. *
  657. * Pin the given GEM object in memory and fill the dma_addr pointer with the
  658. * object's DMA address. If the buffer is not physically contiguous it will be
  659. * remapped through the TILER to provide a contiguous view.
  660. *
  661. * Pins are reference-counted, calling this function multiple times is allowed
  662. * as long the corresponding omap_gem_unpin() calls are balanced.
  663. *
  664. * Return 0 on success or a negative error code otherwise.
  665. */
  666. int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
  667. {
  668. struct omap_drm_private *priv = obj->dev->dev_private;
  669. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  670. int ret = 0;
  671. mutex_lock(&obj->dev->struct_mutex);
  672. if (!is_contiguous(omap_obj) && priv->has_dmm) {
  673. if (omap_obj->dma_addr_cnt == 0) {
  674. struct page **pages;
  675. uint32_t npages = obj->size >> PAGE_SHIFT;
  676. enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
  677. struct tiler_block *block;
  678. BUG_ON(omap_obj->block);
  679. ret = get_pages(obj, &pages);
  680. if (ret)
  681. goto fail;
  682. if (omap_obj->flags & OMAP_BO_TILED) {
  683. block = tiler_reserve_2d(fmt,
  684. omap_obj->width,
  685. omap_obj->height, 0);
  686. } else {
  687. block = tiler_reserve_1d(obj->size);
  688. }
  689. if (IS_ERR(block)) {
  690. ret = PTR_ERR(block);
  691. dev_err(obj->dev->dev,
  692. "could not remap: %d (%d)\n", ret, fmt);
  693. goto fail;
  694. }
  695. /* TODO: enable async refill.. */
  696. ret = tiler_pin(block, pages, npages,
  697. omap_obj->roll, true);
  698. if (ret) {
  699. tiler_release(block);
  700. dev_err(obj->dev->dev,
  701. "could not pin: %d\n", ret);
  702. goto fail;
  703. }
  704. omap_obj->dma_addr = tiler_ssptr(block);
  705. omap_obj->block = block;
  706. DBG("got dma address: %pad", &omap_obj->dma_addr);
  707. }
  708. omap_obj->dma_addr_cnt++;
  709. *dma_addr = omap_obj->dma_addr;
  710. } else if (is_contiguous(omap_obj)) {
  711. *dma_addr = omap_obj->dma_addr;
  712. } else {
  713. ret = -EINVAL;
  714. goto fail;
  715. }
  716. fail:
  717. mutex_unlock(&obj->dev->struct_mutex);
  718. return ret;
  719. }
  720. /**
  721. * omap_gem_unpin() - Unpin a GEM object from memory
  722. * @obj: the GEM object
  723. *
  724. * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
  725. * reference-counted, the actualy unpin will only be performed when the number
  726. * of calls to this function matches the number of calls to omap_gem_pin().
  727. */
  728. void omap_gem_unpin(struct drm_gem_object *obj)
  729. {
  730. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  731. int ret;
  732. mutex_lock(&obj->dev->struct_mutex);
  733. if (omap_obj->dma_addr_cnt > 0) {
  734. omap_obj->dma_addr_cnt--;
  735. if (omap_obj->dma_addr_cnt == 0) {
  736. ret = tiler_unpin(omap_obj->block);
  737. if (ret) {
  738. dev_err(obj->dev->dev,
  739. "could not unpin pages: %d\n", ret);
  740. }
  741. ret = tiler_release(omap_obj->block);
  742. if (ret) {
  743. dev_err(obj->dev->dev,
  744. "could not release unmap: %d\n", ret);
  745. }
  746. omap_obj->dma_addr = 0;
  747. omap_obj->block = NULL;
  748. }
  749. }
  750. mutex_unlock(&obj->dev->struct_mutex);
  751. }
  752. /* Get rotated scanout address (only valid if already pinned), at the
  753. * specified orientation and x,y offset from top-left corner of buffer
  754. * (only valid for tiled 2d buffers)
  755. */
  756. int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, uint32_t orient,
  757. int x, int y, dma_addr_t *dma_addr)
  758. {
  759. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  760. int ret = -EINVAL;
  761. mutex_lock(&obj->dev->struct_mutex);
  762. if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
  763. (omap_obj->flags & OMAP_BO_TILED)) {
  764. *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
  765. ret = 0;
  766. }
  767. mutex_unlock(&obj->dev->struct_mutex);
  768. return ret;
  769. }
  770. /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
  771. int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
  772. {
  773. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  774. int ret = -EINVAL;
  775. if (omap_obj->flags & OMAP_BO_TILED)
  776. ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
  777. return ret;
  778. }
  779. /* if !remap, and we don't have pages backing, then fail, rather than
  780. * increasing the pin count (which we don't really do yet anyways,
  781. * because we don't support swapping pages back out). And 'remap'
  782. * might not be quite the right name, but I wanted to keep it working
  783. * similarly to omap_gem_pin(). Note though that mutex is not
  784. * aquired if !remap (because this can be called in atomic ctxt),
  785. * but probably omap_gem_unpin() should be changed to work in the
  786. * same way. If !remap, a matching omap_gem_put_pages() call is not
  787. * required (and should not be made).
  788. */
  789. int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
  790. bool remap)
  791. {
  792. int ret;
  793. if (!remap) {
  794. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  795. if (!omap_obj->pages)
  796. return -ENOMEM;
  797. *pages = omap_obj->pages;
  798. return 0;
  799. }
  800. mutex_lock(&obj->dev->struct_mutex);
  801. ret = get_pages(obj, pages);
  802. mutex_unlock(&obj->dev->struct_mutex);
  803. return ret;
  804. }
  805. /* release pages when DMA no longer being performed */
  806. int omap_gem_put_pages(struct drm_gem_object *obj)
  807. {
  808. /* do something here if we dynamically attach/detach pages.. at
  809. * least they would no longer need to be pinned if everyone has
  810. * released the pages..
  811. */
  812. return 0;
  813. }
  814. #ifdef CONFIG_DRM_FBDEV_EMULATION
  815. /* Get kernel virtual address for CPU access.. this more or less only
  816. * exists for omap_fbdev. This should be called with struct_mutex
  817. * held.
  818. */
  819. void *omap_gem_vaddr(struct drm_gem_object *obj)
  820. {
  821. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  822. WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
  823. if (!omap_obj->vaddr) {
  824. struct page **pages;
  825. int ret = get_pages(obj, &pages);
  826. if (ret)
  827. return ERR_PTR(ret);
  828. omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
  829. VM_MAP, pgprot_writecombine(PAGE_KERNEL));
  830. }
  831. return omap_obj->vaddr;
  832. }
  833. #endif
  834. /* -----------------------------------------------------------------------------
  835. * Power Management
  836. */
  837. #ifdef CONFIG_PM
  838. /* re-pin objects in DMM in resume path: */
  839. int omap_gem_resume(struct device *dev)
  840. {
  841. struct drm_device *drm_dev = dev_get_drvdata(dev);
  842. struct omap_drm_private *priv = drm_dev->dev_private;
  843. struct omap_gem_object *omap_obj;
  844. int ret = 0;
  845. list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
  846. if (omap_obj->block) {
  847. struct drm_gem_object *obj = &omap_obj->base;
  848. uint32_t npages = obj->size >> PAGE_SHIFT;
  849. WARN_ON(!omap_obj->pages); /* this can't happen */
  850. ret = tiler_pin(omap_obj->block,
  851. omap_obj->pages, npages,
  852. omap_obj->roll, true);
  853. if (ret) {
  854. dev_err(dev, "could not repin: %d\n", ret);
  855. return ret;
  856. }
  857. }
  858. }
  859. return 0;
  860. }
  861. #endif
  862. /* -----------------------------------------------------------------------------
  863. * DebugFS
  864. */
  865. #ifdef CONFIG_DEBUG_FS
  866. void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
  867. {
  868. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  869. uint64_t off;
  870. off = drm_vma_node_start(&obj->vma_node);
  871. seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
  872. omap_obj->flags, obj->name, kref_read(&obj->refcount),
  873. off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt,
  874. omap_obj->vaddr, omap_obj->roll);
  875. if (omap_obj->flags & OMAP_BO_TILED) {
  876. seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
  877. if (omap_obj->block) {
  878. struct tcm_area *area = &omap_obj->block->area;
  879. seq_printf(m, " (%dx%d, %dx%d)",
  880. area->p0.x, area->p0.y,
  881. area->p1.x, area->p1.y);
  882. }
  883. } else {
  884. seq_printf(m, " %d", obj->size);
  885. }
  886. seq_printf(m, "\n");
  887. }
  888. void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
  889. {
  890. struct omap_gem_object *omap_obj;
  891. int count = 0;
  892. size_t size = 0;
  893. list_for_each_entry(omap_obj, list, mm_list) {
  894. struct drm_gem_object *obj = &omap_obj->base;
  895. seq_printf(m, " ");
  896. omap_gem_describe(obj, m);
  897. count++;
  898. size += obj->size;
  899. }
  900. seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
  901. }
  902. #endif
  903. /* -----------------------------------------------------------------------------
  904. * Constructor & Destructor
  905. */
  906. void omap_gem_free_object(struct drm_gem_object *obj)
  907. {
  908. struct drm_device *dev = obj->dev;
  909. struct omap_drm_private *priv = dev->dev_private;
  910. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  911. evict(obj);
  912. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  913. spin_lock(&priv->list_lock);
  914. list_del(&omap_obj->mm_list);
  915. spin_unlock(&priv->list_lock);
  916. /* this means the object is still pinned.. which really should
  917. * not happen. I think..
  918. */
  919. WARN_ON(omap_obj->dma_addr_cnt > 0);
  920. if (omap_obj->pages) {
  921. if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
  922. kfree(omap_obj->pages);
  923. else
  924. omap_gem_detach_pages(obj);
  925. }
  926. if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
  927. dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
  928. omap_obj->dma_addr);
  929. } else if (omap_obj->vaddr) {
  930. vunmap(omap_obj->vaddr);
  931. } else if (obj->import_attach) {
  932. drm_prime_gem_destroy(obj, omap_obj->sgt);
  933. }
  934. drm_gem_object_release(obj);
  935. kfree(omap_obj);
  936. }
  937. /* GEM buffer object constructor */
  938. struct drm_gem_object *omap_gem_new(struct drm_device *dev,
  939. union omap_gem_size gsize, uint32_t flags)
  940. {
  941. struct omap_drm_private *priv = dev->dev_private;
  942. struct omap_gem_object *omap_obj;
  943. struct drm_gem_object *obj;
  944. struct address_space *mapping;
  945. size_t size;
  946. int ret;
  947. /* Validate the flags and compute the memory and cache flags. */
  948. if (flags & OMAP_BO_TILED) {
  949. if (!priv->usergart) {
  950. dev_err(dev->dev, "Tiled buffers require DMM\n");
  951. return NULL;
  952. }
  953. /*
  954. * Tiled buffers are always shmem paged backed. When they are
  955. * scanned out, they are remapped into DMM/TILER.
  956. */
  957. flags &= ~OMAP_BO_SCANOUT;
  958. flags |= OMAP_BO_MEM_SHMEM;
  959. /*
  960. * Currently don't allow cached buffers. There is some caching
  961. * stuff that needs to be handled better.
  962. */
  963. flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
  964. flags |= tiler_get_cpu_cache_flags();
  965. } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
  966. /*
  967. * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
  968. * tiled. However, to lower the pressure on memory allocation,
  969. * use contiguous memory only if no TILER is available.
  970. */
  971. flags |= OMAP_BO_MEM_DMA_API;
  972. } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
  973. /*
  974. * All other buffers not backed by dma_buf are shmem-backed.
  975. */
  976. flags |= OMAP_BO_MEM_SHMEM;
  977. }
  978. /* Allocate the initialize the OMAP GEM object. */
  979. omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
  980. if (!omap_obj)
  981. return NULL;
  982. obj = &omap_obj->base;
  983. omap_obj->flags = flags;
  984. if (flags & OMAP_BO_TILED) {
  985. /*
  986. * For tiled buffers align dimensions to slot boundaries and
  987. * calculate size based on aligned dimensions.
  988. */
  989. tiler_align(gem2fmt(flags), &gsize.tiled.width,
  990. &gsize.tiled.height);
  991. size = tiler_size(gem2fmt(flags), gsize.tiled.width,
  992. gsize.tiled.height);
  993. omap_obj->width = gsize.tiled.width;
  994. omap_obj->height = gsize.tiled.height;
  995. } else {
  996. size = PAGE_ALIGN(gsize.bytes);
  997. }
  998. /* Initialize the GEM object. */
  999. if (!(flags & OMAP_BO_MEM_SHMEM)) {
  1000. drm_gem_private_object_init(dev, obj, size);
  1001. } else {
  1002. ret = drm_gem_object_init(dev, obj, size);
  1003. if (ret)
  1004. goto err_free;
  1005. mapping = obj->filp->f_mapping;
  1006. mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
  1007. }
  1008. /* Allocate memory if needed. */
  1009. if (flags & OMAP_BO_MEM_DMA_API) {
  1010. omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
  1011. &omap_obj->dma_addr,
  1012. GFP_KERNEL);
  1013. if (!omap_obj->vaddr)
  1014. goto err_release;
  1015. }
  1016. spin_lock(&priv->list_lock);
  1017. list_add(&omap_obj->mm_list, &priv->obj_list);
  1018. spin_unlock(&priv->list_lock);
  1019. return obj;
  1020. err_release:
  1021. drm_gem_object_release(obj);
  1022. err_free:
  1023. kfree(omap_obj);
  1024. return NULL;
  1025. }
  1026. struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
  1027. struct sg_table *sgt)
  1028. {
  1029. struct omap_drm_private *priv = dev->dev_private;
  1030. struct omap_gem_object *omap_obj;
  1031. struct drm_gem_object *obj;
  1032. union omap_gem_size gsize;
  1033. /* Without a DMM only physically contiguous buffers can be supported. */
  1034. if (sgt->orig_nents != 1 && !priv->has_dmm)
  1035. return ERR_PTR(-EINVAL);
  1036. mutex_lock(&dev->struct_mutex);
  1037. gsize.bytes = PAGE_ALIGN(size);
  1038. obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
  1039. if (!obj) {
  1040. obj = ERR_PTR(-ENOMEM);
  1041. goto done;
  1042. }
  1043. omap_obj = to_omap_bo(obj);
  1044. omap_obj->sgt = sgt;
  1045. if (sgt->orig_nents == 1) {
  1046. omap_obj->dma_addr = sg_dma_address(sgt->sgl);
  1047. } else {
  1048. /* Create pages list from sgt */
  1049. struct sg_page_iter iter;
  1050. struct page **pages;
  1051. unsigned int npages;
  1052. unsigned int i = 0;
  1053. npages = DIV_ROUND_UP(size, PAGE_SIZE);
  1054. pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
  1055. if (!pages) {
  1056. omap_gem_free_object(obj);
  1057. obj = ERR_PTR(-ENOMEM);
  1058. goto done;
  1059. }
  1060. omap_obj->pages = pages;
  1061. for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
  1062. pages[i++] = sg_page_iter_page(&iter);
  1063. if (i > npages)
  1064. break;
  1065. }
  1066. if (WARN_ON(i != npages)) {
  1067. omap_gem_free_object(obj);
  1068. obj = ERR_PTR(-ENOMEM);
  1069. goto done;
  1070. }
  1071. }
  1072. done:
  1073. mutex_unlock(&dev->struct_mutex);
  1074. return obj;
  1075. }
  1076. /* convenience method to construct a GEM buffer object, and userspace handle */
  1077. int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
  1078. union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
  1079. {
  1080. struct drm_gem_object *obj;
  1081. int ret;
  1082. obj = omap_gem_new(dev, gsize, flags);
  1083. if (!obj)
  1084. return -ENOMEM;
  1085. ret = drm_gem_handle_create(file, obj, handle);
  1086. if (ret) {
  1087. omap_gem_free_object(obj);
  1088. return ret;
  1089. }
  1090. /* drop reference from allocate - handle holds it now */
  1091. drm_gem_object_unreference_unlocked(obj);
  1092. return 0;
  1093. }
  1094. /* -----------------------------------------------------------------------------
  1095. * Init & Cleanup
  1096. */
  1097. /* If DMM is used, we need to set some stuff up.. */
  1098. void omap_gem_init(struct drm_device *dev)
  1099. {
  1100. struct omap_drm_private *priv = dev->dev_private;
  1101. struct omap_drm_usergart *usergart;
  1102. const enum tiler_fmt fmts[] = {
  1103. TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
  1104. };
  1105. int i, j;
  1106. if (!dmm_is_available()) {
  1107. /* DMM only supported on OMAP4 and later, so this isn't fatal */
  1108. dev_warn(dev->dev, "DMM not available, disable DMM support\n");
  1109. return;
  1110. }
  1111. usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
  1112. if (!usergart)
  1113. return;
  1114. /* reserve 4k aligned/wide regions for userspace mappings: */
  1115. for (i = 0; i < ARRAY_SIZE(fmts); i++) {
  1116. uint16_t h = 1, w = PAGE_SIZE >> i;
  1117. tiler_align(fmts[i], &w, &h);
  1118. /* note: since each region is 1 4kb page wide, and minimum
  1119. * number of rows, the height ends up being the same as the
  1120. * # of pages in the region
  1121. */
  1122. usergart[i].height = h;
  1123. usergart[i].height_shift = ilog2(h);
  1124. usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
  1125. usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
  1126. for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
  1127. struct omap_drm_usergart_entry *entry;
  1128. struct tiler_block *block;
  1129. entry = &usergart[i].entry[j];
  1130. block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
  1131. if (IS_ERR(block)) {
  1132. dev_err(dev->dev,
  1133. "reserve failed: %d, %d, %ld\n",
  1134. i, j, PTR_ERR(block));
  1135. return;
  1136. }
  1137. entry->dma_addr = tiler_ssptr(block);
  1138. entry->block = block;
  1139. DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
  1140. &entry->dma_addr,
  1141. usergart[i].stride_pfn << PAGE_SHIFT);
  1142. }
  1143. }
  1144. priv->usergart = usergart;
  1145. priv->has_dmm = true;
  1146. }
  1147. void omap_gem_deinit(struct drm_device *dev)
  1148. {
  1149. struct omap_drm_private *priv = dev->dev_private;
  1150. /* I believe we can rely on there being no more outstanding GEM
  1151. * objects which could depend on usergart/dmm at this point.
  1152. */
  1153. kfree(priv->usergart);
  1154. }