omap_gem.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581
  1. /*
  2. * drivers/gpu/drm/omapdrm/omap_gem.c
  3. *
  4. * Copyright (C) 2011 Texas Instruments
  5. * Author: Rob Clark <rob.clark@linaro.org>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License version 2 as published by
  9. * the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but WITHOUT
  12. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  14. * more details.
  15. *
  16. * You should have received a copy of the GNU General Public License along with
  17. * this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/shmem_fs.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/pfn_t.h>
  22. #include <drm/drm_vma_manager.h>
  23. #include "omap_drv.h"
  24. #include "omap_dmm_tiler.h"
  25. /*
  26. * GEM buffer object implementation.
  27. */
  28. /* note: we use upper 8 bits of flags for driver-internal flags: */
  29. #define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
  30. #define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
  31. #define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */
  32. struct omap_gem_object {
  33. struct drm_gem_object base;
  34. struct list_head mm_list;
  35. uint32_t flags;
  36. /** width/height for tiled formats (rounded up to slot boundaries) */
  37. uint16_t width, height;
  38. /** roll applied when mapping to DMM */
  39. uint32_t roll;
  40. /**
  41. * paddr contains the buffer DMA address. It is valid for
  42. *
  43. * - buffers allocated through the DMA mapping API (with the
  44. * OMAP_BO_MEM_DMA_API flag set)
  45. *
  46. * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
  47. * if they are physically contiguous (when sgt->orig_nents == 1)
  48. *
  49. * - buffers mapped through the TILER when paddr_cnt is not zero, in
  50. * which case the DMA address points to the TILER aperture
  51. *
  52. * Physically contiguous buffers have their DMA address equal to the
  53. * physical address as we don't remap those buffers through the TILER.
  54. *
  55. * Buffers mapped to the TILER have their DMA address pointing to the
  56. * TILER aperture. As TILER mappings are refcounted (through paddr_cnt)
  57. * the DMA address must be accessed through omap_get_get_paddr() to
  58. * ensure that the mapping won't disappear unexpectedly. References must
  59. * be released with omap_gem_put_paddr().
  60. */
  61. dma_addr_t paddr;
  62. /**
  63. * # of users of paddr
  64. */
  65. uint32_t paddr_cnt;
  66. /**
  67. * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
  68. * is set and the sgt field is valid.
  69. */
  70. struct sg_table *sgt;
  71. /**
  72. * tiler block used when buffer is remapped in DMM/TILER.
  73. */
  74. struct tiler_block *block;
  75. /**
  76. * Array of backing pages, if allocated. Note that pages are never
  77. * allocated for buffers originally allocated from contiguous memory
  78. */
  79. struct page **pages;
  80. /** addresses corresponding to pages in above array */
  81. dma_addr_t *addrs;
  82. /**
  83. * Virtual address, if mapped.
  84. */
  85. void *vaddr;
  86. /**
  87. * sync-object allocated on demand (if needed)
  88. *
  89. * Per-buffer sync-object for tracking pending and completed hw/dma
  90. * read and write operations.
  91. */
  92. struct {
  93. uint32_t write_pending;
  94. uint32_t write_complete;
  95. uint32_t read_pending;
  96. uint32_t read_complete;
  97. } *sync;
  98. };
  99. #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
  100. /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
  101. * not necessarily pinned in TILER all the time, and (b) when they are
  102. * they are not necessarily page aligned, we reserve one or more small
  103. * regions in each of the 2d containers to use as a user-GART where we
  104. * can create a second page-aligned mapping of parts of the buffer
  105. * being accessed from userspace.
  106. *
  107. * Note that we could optimize slightly when we know that multiple
  108. * tiler containers are backed by the same PAT.. but I'll leave that
  109. * for later..
  110. */
  111. #define NUM_USERGART_ENTRIES 2
  112. struct omap_drm_usergart_entry {
  113. struct tiler_block *block; /* the reserved tiler block */
  114. dma_addr_t paddr;
  115. struct drm_gem_object *obj; /* the current pinned obj */
  116. pgoff_t obj_pgoff; /* page offset of obj currently
  117. mapped in */
  118. };
  119. struct omap_drm_usergart {
  120. struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
  121. int height; /* height in rows */
  122. int height_shift; /* ilog2(height in rows) */
  123. int slot_shift; /* ilog2(width per slot) */
  124. int stride_pfn; /* stride in pages */
  125. int last; /* index of last used entry */
  126. };
  127. /* -----------------------------------------------------------------------------
  128. * Helpers
  129. */
  130. /** get mmap offset */
  131. static uint64_t mmap_offset(struct drm_gem_object *obj)
  132. {
  133. struct drm_device *dev = obj->dev;
  134. int ret;
  135. size_t size;
  136. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  137. /* Make it mmapable */
  138. size = omap_gem_mmap_size(obj);
  139. ret = drm_gem_create_mmap_offset_size(obj, size);
  140. if (ret) {
  141. dev_err(dev->dev, "could not allocate mmap offset\n");
  142. return 0;
  143. }
  144. return drm_vma_node_offset_addr(&obj->vma_node);
  145. }
  146. static bool is_contiguous(struct omap_gem_object *omap_obj)
  147. {
  148. if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
  149. return true;
  150. if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
  151. return true;
  152. return false;
  153. }
  154. /* -----------------------------------------------------------------------------
  155. * Eviction
  156. */
  157. static void evict_entry(struct drm_gem_object *obj,
  158. enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
  159. {
  160. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  161. struct omap_drm_private *priv = obj->dev->dev_private;
  162. int n = priv->usergart[fmt].height;
  163. size_t size = PAGE_SIZE * n;
  164. loff_t off = mmap_offset(obj) +
  165. (entry->obj_pgoff << PAGE_SHIFT);
  166. const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
  167. if (m > 1) {
  168. int i;
  169. /* if stride > than PAGE_SIZE then sparse mapping: */
  170. for (i = n; i > 0; i--) {
  171. unmap_mapping_range(obj->dev->anon_inode->i_mapping,
  172. off, PAGE_SIZE, 1);
  173. off += PAGE_SIZE * m;
  174. }
  175. } else {
  176. unmap_mapping_range(obj->dev->anon_inode->i_mapping,
  177. off, size, 1);
  178. }
  179. entry->obj = NULL;
  180. }
  181. /* Evict a buffer from usergart, if it is mapped there */
  182. static void evict(struct drm_gem_object *obj)
  183. {
  184. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  185. struct omap_drm_private *priv = obj->dev->dev_private;
  186. if (omap_obj->flags & OMAP_BO_TILED) {
  187. enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
  188. int i;
  189. for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
  190. struct omap_drm_usergart_entry *entry =
  191. &priv->usergart[fmt].entry[i];
  192. if (entry->obj == obj)
  193. evict_entry(obj, fmt, entry);
  194. }
  195. }
  196. }
  197. /* -----------------------------------------------------------------------------
  198. * Page Management
  199. */
  200. /** ensure backing pages are allocated */
  201. static int omap_gem_attach_pages(struct drm_gem_object *obj)
  202. {
  203. struct drm_device *dev = obj->dev;
  204. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  205. struct page **pages;
  206. int npages = obj->size >> PAGE_SHIFT;
  207. int i, ret;
  208. dma_addr_t *addrs;
  209. WARN_ON(omap_obj->pages);
  210. pages = drm_gem_get_pages(obj);
  211. if (IS_ERR(pages)) {
  212. dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
  213. return PTR_ERR(pages);
  214. }
  215. /* for non-cached buffers, ensure the new pages are clean because
  216. * DSS, GPU, etc. are not cache coherent:
  217. */
  218. if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
  219. addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
  220. if (!addrs) {
  221. ret = -ENOMEM;
  222. goto free_pages;
  223. }
  224. for (i = 0; i < npages; i++) {
  225. addrs[i] = dma_map_page(dev->dev, pages[i],
  226. 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
  227. if (dma_mapping_error(dev->dev, addrs[i])) {
  228. dev_warn(dev->dev,
  229. "%s: failed to map page\n", __func__);
  230. for (i = i - 1; i >= 0; --i) {
  231. dma_unmap_page(dev->dev, addrs[i],
  232. PAGE_SIZE, DMA_BIDIRECTIONAL);
  233. }
  234. ret = -ENOMEM;
  235. goto free_addrs;
  236. }
  237. }
  238. } else {
  239. addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
  240. if (!addrs) {
  241. ret = -ENOMEM;
  242. goto free_pages;
  243. }
  244. }
  245. omap_obj->addrs = addrs;
  246. omap_obj->pages = pages;
  247. return 0;
  248. free_addrs:
  249. kfree(addrs);
  250. free_pages:
  251. drm_gem_put_pages(obj, pages, true, false);
  252. return ret;
  253. }
  254. /* acquire pages when needed (for example, for DMA where physically
  255. * contiguous buffer is not required
  256. */
  257. static int get_pages(struct drm_gem_object *obj, struct page ***pages)
  258. {
  259. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  260. int ret = 0;
  261. if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) {
  262. ret = omap_gem_attach_pages(obj);
  263. if (ret) {
  264. dev_err(obj->dev->dev, "could not attach pages\n");
  265. return ret;
  266. }
  267. }
  268. /* TODO: even phys-contig.. we should have a list of pages? */
  269. *pages = omap_obj->pages;
  270. return 0;
  271. }
  272. /** release backing pages */
  273. static void omap_gem_detach_pages(struct drm_gem_object *obj)
  274. {
  275. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  276. /* for non-cached buffers, ensure the new pages are clean because
  277. * DSS, GPU, etc. are not cache coherent:
  278. */
  279. if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
  280. int i, npages = obj->size >> PAGE_SHIFT;
  281. for (i = 0; i < npages; i++) {
  282. dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
  283. PAGE_SIZE, DMA_BIDIRECTIONAL);
  284. }
  285. }
  286. kfree(omap_obj->addrs);
  287. omap_obj->addrs = NULL;
  288. drm_gem_put_pages(obj, omap_obj->pages, true, false);
  289. omap_obj->pages = NULL;
  290. }
  291. /* get buffer flags */
  292. uint32_t omap_gem_flags(struct drm_gem_object *obj)
  293. {
  294. return to_omap_bo(obj)->flags;
  295. }
  296. uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
  297. {
  298. uint64_t offset;
  299. mutex_lock(&obj->dev->struct_mutex);
  300. offset = mmap_offset(obj);
  301. mutex_unlock(&obj->dev->struct_mutex);
  302. return offset;
  303. }
  304. /** get mmap size */
  305. size_t omap_gem_mmap_size(struct drm_gem_object *obj)
  306. {
  307. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  308. size_t size = obj->size;
  309. if (omap_obj->flags & OMAP_BO_TILED) {
  310. /* for tiled buffers, the virtual size has stride rounded up
  311. * to 4kb.. (to hide the fact that row n+1 might start 16kb or
  312. * 32kb later!). But we don't back the entire buffer with
  313. * pages, only the valid picture part.. so need to adjust for
  314. * this in the size used to mmap and generate mmap offset
  315. */
  316. size = tiler_vsize(gem2fmt(omap_obj->flags),
  317. omap_obj->width, omap_obj->height);
  318. }
  319. return size;
  320. }
  321. /* get tiled size, returns -EINVAL if not tiled buffer */
  322. int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
  323. {
  324. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  325. if (omap_obj->flags & OMAP_BO_TILED) {
  326. *w = omap_obj->width;
  327. *h = omap_obj->height;
  328. return 0;
  329. }
  330. return -EINVAL;
  331. }
  332. /* -----------------------------------------------------------------------------
  333. * Fault Handling
  334. */
  335. /* Normal handling for the case of faulting in non-tiled buffers */
  336. static int fault_1d(struct drm_gem_object *obj,
  337. struct vm_area_struct *vma, struct vm_fault *vmf)
  338. {
  339. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  340. unsigned long pfn;
  341. pgoff_t pgoff;
  342. /* We don't use vmf->pgoff since that has the fake offset: */
  343. pgoff = ((unsigned long)vmf->virtual_address -
  344. vma->vm_start) >> PAGE_SHIFT;
  345. if (omap_obj->pages) {
  346. omap_gem_cpu_sync(obj, pgoff);
  347. pfn = page_to_pfn(omap_obj->pages[pgoff]);
  348. } else {
  349. BUG_ON(!is_contiguous(omap_obj));
  350. pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
  351. }
  352. VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
  353. pfn, pfn << PAGE_SHIFT);
  354. return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
  355. __pfn_to_pfn_t(pfn, PFN_DEV));
  356. }
  357. /* Special handling for the case of faulting in 2d tiled buffers */
  358. static int fault_2d(struct drm_gem_object *obj,
  359. struct vm_area_struct *vma, struct vm_fault *vmf)
  360. {
  361. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  362. struct omap_drm_private *priv = obj->dev->dev_private;
  363. struct omap_drm_usergart_entry *entry;
  364. enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
  365. struct page *pages[64]; /* XXX is this too much to have on stack? */
  366. unsigned long pfn;
  367. pgoff_t pgoff, base_pgoff;
  368. void __user *vaddr;
  369. int i, ret, slots;
  370. /*
  371. * Note the height of the slot is also equal to the number of pages
  372. * that need to be mapped in to fill 4kb wide CPU page. If the slot
  373. * height is 64, then 64 pages fill a 4kb wide by 64 row region.
  374. */
  375. const int n = priv->usergart[fmt].height;
  376. const int n_shift = priv->usergart[fmt].height_shift;
  377. /*
  378. * If buffer width in bytes > PAGE_SIZE then the virtual stride is
  379. * rounded up to next multiple of PAGE_SIZE.. this need to be taken
  380. * into account in some of the math, so figure out virtual stride
  381. * in pages
  382. */
  383. const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
  384. /* We don't use vmf->pgoff since that has the fake offset: */
  385. pgoff = ((unsigned long)vmf->virtual_address -
  386. vma->vm_start) >> PAGE_SHIFT;
  387. /*
  388. * Actual address we start mapping at is rounded down to previous slot
  389. * boundary in the y direction:
  390. */
  391. base_pgoff = round_down(pgoff, m << n_shift);
  392. /* figure out buffer width in slots */
  393. slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
  394. vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
  395. entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
  396. /* evict previous buffer using this usergart entry, if any: */
  397. if (entry->obj)
  398. evict_entry(entry->obj, fmt, entry);
  399. entry->obj = obj;
  400. entry->obj_pgoff = base_pgoff;
  401. /* now convert base_pgoff to phys offset from virt offset: */
  402. base_pgoff = (base_pgoff >> n_shift) * slots;
  403. /* for wider-than 4k.. figure out which part of the slot-row we want: */
  404. if (m > 1) {
  405. int off = pgoff % m;
  406. entry->obj_pgoff += off;
  407. base_pgoff /= m;
  408. slots = min(slots - (off << n_shift), n);
  409. base_pgoff += off << n_shift;
  410. vaddr += off << PAGE_SHIFT;
  411. }
  412. /*
  413. * Map in pages. Beyond the valid pixel part of the buffer, we set
  414. * pages[i] to NULL to get a dummy page mapped in.. if someone
  415. * reads/writes it they will get random/undefined content, but at
  416. * least it won't be corrupting whatever other random page used to
  417. * be mapped in, or other undefined behavior.
  418. */
  419. memcpy(pages, &omap_obj->pages[base_pgoff],
  420. sizeof(struct page *) * slots);
  421. memset(pages + slots, 0,
  422. sizeof(struct page *) * (n - slots));
  423. ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
  424. if (ret) {
  425. dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
  426. return ret;
  427. }
  428. pfn = entry->paddr >> PAGE_SHIFT;
  429. VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
  430. pfn, pfn << PAGE_SHIFT);
  431. for (i = n; i > 0; i--) {
  432. vm_insert_mixed(vma, (unsigned long)vaddr,
  433. __pfn_to_pfn_t(pfn, PFN_DEV));
  434. pfn += priv->usergart[fmt].stride_pfn;
  435. vaddr += PAGE_SIZE * m;
  436. }
  437. /* simple round-robin: */
  438. priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
  439. % NUM_USERGART_ENTRIES;
  440. return 0;
  441. }
  442. /**
  443. * omap_gem_fault - pagefault handler for GEM objects
  444. * @vma: the VMA of the GEM object
  445. * @vmf: fault detail
  446. *
  447. * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
  448. * does most of the work for us including the actual map/unmap calls
  449. * but we need to do the actual page work.
  450. *
  451. * The VMA was set up by GEM. In doing so it also ensured that the
  452. * vma->vm_private_data points to the GEM object that is backing this
  453. * mapping.
  454. */
  455. int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  456. {
  457. struct drm_gem_object *obj = vma->vm_private_data;
  458. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  459. struct drm_device *dev = obj->dev;
  460. struct page **pages;
  461. int ret;
  462. /* Make sure we don't parallel update on a fault, nor move or remove
  463. * something from beneath our feet
  464. */
  465. mutex_lock(&dev->struct_mutex);
  466. /* if a shmem backed object, make sure we have pages attached now */
  467. ret = get_pages(obj, &pages);
  468. if (ret)
  469. goto fail;
  470. /* where should we do corresponding put_pages().. we are mapping
  471. * the original page, rather than thru a GART, so we can't rely
  472. * on eviction to trigger this. But munmap() or all mappings should
  473. * probably trigger put_pages()?
  474. */
  475. if (omap_obj->flags & OMAP_BO_TILED)
  476. ret = fault_2d(obj, vma, vmf);
  477. else
  478. ret = fault_1d(obj, vma, vmf);
  479. fail:
  480. mutex_unlock(&dev->struct_mutex);
  481. switch (ret) {
  482. case 0:
  483. case -ERESTARTSYS:
  484. case -EINTR:
  485. return VM_FAULT_NOPAGE;
  486. case -ENOMEM:
  487. return VM_FAULT_OOM;
  488. default:
  489. return VM_FAULT_SIGBUS;
  490. }
  491. }
  492. /** We override mainly to fix up some of the vm mapping flags.. */
  493. int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  494. {
  495. int ret;
  496. ret = drm_gem_mmap(filp, vma);
  497. if (ret) {
  498. DBG("mmap failed: %d", ret);
  499. return ret;
  500. }
  501. return omap_gem_mmap_obj(vma->vm_private_data, vma);
  502. }
  503. int omap_gem_mmap_obj(struct drm_gem_object *obj,
  504. struct vm_area_struct *vma)
  505. {
  506. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  507. vma->vm_flags &= ~VM_PFNMAP;
  508. vma->vm_flags |= VM_MIXEDMAP;
  509. if (omap_obj->flags & OMAP_BO_WC) {
  510. vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  511. } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
  512. vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
  513. } else {
  514. /*
  515. * We do have some private objects, at least for scanout buffers
  516. * on hardware without DMM/TILER. But these are allocated write-
  517. * combine
  518. */
  519. if (WARN_ON(!obj->filp))
  520. return -EINVAL;
  521. /*
  522. * Shunt off cached objs to shmem file so they have their own
  523. * address_space (so unmap_mapping_range does what we want,
  524. * in particular in the case of mmap'd dmabufs)
  525. */
  526. fput(vma->vm_file);
  527. vma->vm_pgoff = 0;
  528. vma->vm_file = get_file(obj->filp);
  529. vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  530. }
  531. return 0;
  532. }
  533. /* -----------------------------------------------------------------------------
  534. * Dumb Buffers
  535. */
  536. /**
  537. * omap_gem_dumb_create - create a dumb buffer
  538. * @drm_file: our client file
  539. * @dev: our device
  540. * @args: the requested arguments copied from userspace
  541. *
  542. * Allocate a buffer suitable for use for a frame buffer of the
  543. * form described by user space. Give userspace a handle by which
  544. * to reference it.
  545. */
  546. int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  547. struct drm_mode_create_dumb *args)
  548. {
  549. union omap_gem_size gsize;
  550. args->pitch = align_pitch(0, args->width, args->bpp);
  551. args->size = PAGE_ALIGN(args->pitch * args->height);
  552. gsize = (union omap_gem_size){
  553. .bytes = args->size,
  554. };
  555. return omap_gem_new_handle(dev, file, gsize,
  556. OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
  557. }
  558. /**
  559. * omap_gem_dumb_map - buffer mapping for dumb interface
  560. * @file: our drm client file
  561. * @dev: drm device
  562. * @handle: GEM handle to the object (from dumb_create)
  563. *
  564. * Do the necessary setup to allow the mapping of the frame buffer
  565. * into user memory. We don't have to do much here at the moment.
  566. */
  567. int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
  568. uint32_t handle, uint64_t *offset)
  569. {
  570. struct drm_gem_object *obj;
  571. int ret = 0;
  572. /* GEM does all our handle to object mapping */
  573. obj = drm_gem_object_lookup(dev, file, handle);
  574. if (obj == NULL) {
  575. ret = -ENOENT;
  576. goto fail;
  577. }
  578. *offset = omap_gem_mmap_offset(obj);
  579. drm_gem_object_unreference_unlocked(obj);
  580. fail:
  581. return ret;
  582. }
  583. #ifdef CONFIG_DRM_FBDEV_EMULATION
  584. /* Set scrolling position. This allows us to implement fast scrolling
  585. * for console.
  586. *
  587. * Call only from non-atomic contexts.
  588. */
  589. int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
  590. {
  591. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  592. uint32_t npages = obj->size >> PAGE_SHIFT;
  593. int ret = 0;
  594. if (roll > npages) {
  595. dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
  596. return -EINVAL;
  597. }
  598. omap_obj->roll = roll;
  599. mutex_lock(&obj->dev->struct_mutex);
  600. /* if we aren't mapped yet, we don't need to do anything */
  601. if (omap_obj->block) {
  602. struct page **pages;
  603. ret = get_pages(obj, &pages);
  604. if (ret)
  605. goto fail;
  606. ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
  607. if (ret)
  608. dev_err(obj->dev->dev, "could not repin: %d\n", ret);
  609. }
  610. fail:
  611. mutex_unlock(&obj->dev->struct_mutex);
  612. return ret;
  613. }
  614. #endif
  615. /* -----------------------------------------------------------------------------
  616. * Memory Management & DMA Sync
  617. */
  618. /**
  619. * shmem buffers that are mapped cached can simulate coherency via using
  620. * page faulting to keep track of dirty pages
  621. */
  622. static inline bool is_cached_coherent(struct drm_gem_object *obj)
  623. {
  624. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  625. return (omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
  626. ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
  627. }
  628. /* Sync the buffer for CPU access.. note pages should already be
  629. * attached, ie. omap_gem_get_pages()
  630. */
  631. void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
  632. {
  633. struct drm_device *dev = obj->dev;
  634. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  635. if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
  636. dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
  637. PAGE_SIZE, DMA_BIDIRECTIONAL);
  638. omap_obj->addrs[pgoff] = 0;
  639. }
  640. }
  641. /* sync the buffer for DMA access */
  642. void omap_gem_dma_sync(struct drm_gem_object *obj,
  643. enum dma_data_direction dir)
  644. {
  645. struct drm_device *dev = obj->dev;
  646. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  647. if (is_cached_coherent(obj)) {
  648. int i, npages = obj->size >> PAGE_SHIFT;
  649. struct page **pages = omap_obj->pages;
  650. bool dirty = false;
  651. for (i = 0; i < npages; i++) {
  652. if (!omap_obj->addrs[i]) {
  653. dma_addr_t addr;
  654. addr = dma_map_page(dev->dev, pages[i], 0,
  655. PAGE_SIZE, DMA_BIDIRECTIONAL);
  656. if (dma_mapping_error(dev->dev, addr)) {
  657. dev_warn(dev->dev,
  658. "%s: failed to map page\n",
  659. __func__);
  660. break;
  661. }
  662. dirty = true;
  663. omap_obj->addrs[i] = addr;
  664. }
  665. }
  666. if (dirty) {
  667. unmap_mapping_range(obj->filp->f_mapping, 0,
  668. omap_gem_mmap_size(obj), 1);
  669. }
  670. }
  671. }
  672. /* Get physical address for DMA.. if 'remap' is true, and the buffer is not
  673. * already contiguous, remap it to pin in physically contiguous memory.. (ie.
  674. * map in TILER)
  675. */
  676. int omap_gem_get_paddr(struct drm_gem_object *obj,
  677. dma_addr_t *paddr, bool remap)
  678. {
  679. struct omap_drm_private *priv = obj->dev->dev_private;
  680. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  681. int ret = 0;
  682. mutex_lock(&obj->dev->struct_mutex);
  683. if (!is_contiguous(omap_obj) && remap && priv->has_dmm) {
  684. if (omap_obj->paddr_cnt == 0) {
  685. struct page **pages;
  686. uint32_t npages = obj->size >> PAGE_SHIFT;
  687. enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
  688. struct tiler_block *block;
  689. BUG_ON(omap_obj->block);
  690. ret = get_pages(obj, &pages);
  691. if (ret)
  692. goto fail;
  693. if (omap_obj->flags & OMAP_BO_TILED) {
  694. block = tiler_reserve_2d(fmt,
  695. omap_obj->width,
  696. omap_obj->height, 0);
  697. } else {
  698. block = tiler_reserve_1d(obj->size);
  699. }
  700. if (IS_ERR(block)) {
  701. ret = PTR_ERR(block);
  702. dev_err(obj->dev->dev,
  703. "could not remap: %d (%d)\n", ret, fmt);
  704. goto fail;
  705. }
  706. /* TODO: enable async refill.. */
  707. ret = tiler_pin(block, pages, npages,
  708. omap_obj->roll, true);
  709. if (ret) {
  710. tiler_release(block);
  711. dev_err(obj->dev->dev,
  712. "could not pin: %d\n", ret);
  713. goto fail;
  714. }
  715. omap_obj->paddr = tiler_ssptr(block);
  716. omap_obj->block = block;
  717. DBG("got paddr: %pad", &omap_obj->paddr);
  718. }
  719. omap_obj->paddr_cnt++;
  720. *paddr = omap_obj->paddr;
  721. } else if (is_contiguous(omap_obj)) {
  722. *paddr = omap_obj->paddr;
  723. } else {
  724. ret = -EINVAL;
  725. goto fail;
  726. }
  727. fail:
  728. mutex_unlock(&obj->dev->struct_mutex);
  729. return ret;
  730. }
  731. /* Release physical address, when DMA is no longer being performed.. this
  732. * could potentially unpin and unmap buffers from TILER
  733. */
  734. void omap_gem_put_paddr(struct drm_gem_object *obj)
  735. {
  736. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  737. int ret;
  738. mutex_lock(&obj->dev->struct_mutex);
  739. if (omap_obj->paddr_cnt > 0) {
  740. omap_obj->paddr_cnt--;
  741. if (omap_obj->paddr_cnt == 0) {
  742. ret = tiler_unpin(omap_obj->block);
  743. if (ret) {
  744. dev_err(obj->dev->dev,
  745. "could not unpin pages: %d\n", ret);
  746. }
  747. ret = tiler_release(omap_obj->block);
  748. if (ret) {
  749. dev_err(obj->dev->dev,
  750. "could not release unmap: %d\n", ret);
  751. }
  752. omap_obj->paddr = 0;
  753. omap_obj->block = NULL;
  754. }
  755. }
  756. mutex_unlock(&obj->dev->struct_mutex);
  757. }
  758. /* Get rotated scanout address (only valid if already pinned), at the
  759. * specified orientation and x,y offset from top-left corner of buffer
  760. * (only valid for tiled 2d buffers)
  761. */
  762. int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
  763. int x, int y, dma_addr_t *paddr)
  764. {
  765. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  766. int ret = -EINVAL;
  767. mutex_lock(&obj->dev->struct_mutex);
  768. if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
  769. (omap_obj->flags & OMAP_BO_TILED)) {
  770. *paddr = tiler_tsptr(omap_obj->block, orient, x, y);
  771. ret = 0;
  772. }
  773. mutex_unlock(&obj->dev->struct_mutex);
  774. return ret;
  775. }
  776. /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
  777. int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
  778. {
  779. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  780. int ret = -EINVAL;
  781. if (omap_obj->flags & OMAP_BO_TILED)
  782. ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
  783. return ret;
  784. }
  785. /* if !remap, and we don't have pages backing, then fail, rather than
  786. * increasing the pin count (which we don't really do yet anyways,
  787. * because we don't support swapping pages back out). And 'remap'
  788. * might not be quite the right name, but I wanted to keep it working
  789. * similarly to omap_gem_get_paddr(). Note though that mutex is not
  790. * aquired if !remap (because this can be called in atomic ctxt),
  791. * but probably omap_gem_get_paddr() should be changed to work in the
  792. * same way. If !remap, a matching omap_gem_put_pages() call is not
  793. * required (and should not be made).
  794. */
  795. int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
  796. bool remap)
  797. {
  798. int ret;
  799. if (!remap) {
  800. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  801. if (!omap_obj->pages)
  802. return -ENOMEM;
  803. *pages = omap_obj->pages;
  804. return 0;
  805. }
  806. mutex_lock(&obj->dev->struct_mutex);
  807. ret = get_pages(obj, pages);
  808. mutex_unlock(&obj->dev->struct_mutex);
  809. return ret;
  810. }
  811. /* release pages when DMA no longer being performed */
  812. int omap_gem_put_pages(struct drm_gem_object *obj)
  813. {
  814. /* do something here if we dynamically attach/detach pages.. at
  815. * least they would no longer need to be pinned if everyone has
  816. * released the pages..
  817. */
  818. return 0;
  819. }
  820. #ifdef CONFIG_DRM_FBDEV_EMULATION
  821. /* Get kernel virtual address for CPU access.. this more or less only
  822. * exists for omap_fbdev. This should be called with struct_mutex
  823. * held.
  824. */
  825. void *omap_gem_vaddr(struct drm_gem_object *obj)
  826. {
  827. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  828. WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
  829. if (!omap_obj->vaddr) {
  830. struct page **pages;
  831. int ret = get_pages(obj, &pages);
  832. if (ret)
  833. return ERR_PTR(ret);
  834. omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
  835. VM_MAP, pgprot_writecombine(PAGE_KERNEL));
  836. }
  837. return omap_obj->vaddr;
  838. }
  839. #endif
  840. /* -----------------------------------------------------------------------------
  841. * Power Management
  842. */
  843. #ifdef CONFIG_PM
  844. /* re-pin objects in DMM in resume path: */
  845. int omap_gem_resume(struct device *dev)
  846. {
  847. struct drm_device *drm_dev = dev_get_drvdata(dev);
  848. struct omap_drm_private *priv = drm_dev->dev_private;
  849. struct omap_gem_object *omap_obj;
  850. int ret = 0;
  851. list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
  852. if (omap_obj->block) {
  853. struct drm_gem_object *obj = &omap_obj->base;
  854. uint32_t npages = obj->size >> PAGE_SHIFT;
  855. WARN_ON(!omap_obj->pages); /* this can't happen */
  856. ret = tiler_pin(omap_obj->block,
  857. omap_obj->pages, npages,
  858. omap_obj->roll, true);
  859. if (ret) {
  860. dev_err(dev, "could not repin: %d\n", ret);
  861. return ret;
  862. }
  863. }
  864. }
  865. return 0;
  866. }
  867. #endif
  868. /* -----------------------------------------------------------------------------
  869. * DebugFS
  870. */
  871. #ifdef CONFIG_DEBUG_FS
  872. void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
  873. {
  874. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  875. uint64_t off;
  876. off = drm_vma_node_start(&obj->vma_node);
  877. seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
  878. omap_obj->flags, obj->name, obj->refcount.refcount.counter,
  879. off, &omap_obj->paddr, omap_obj->paddr_cnt,
  880. omap_obj->vaddr, omap_obj->roll);
  881. if (omap_obj->flags & OMAP_BO_TILED) {
  882. seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
  883. if (omap_obj->block) {
  884. struct tcm_area *area = &omap_obj->block->area;
  885. seq_printf(m, " (%dx%d, %dx%d)",
  886. area->p0.x, area->p0.y,
  887. area->p1.x, area->p1.y);
  888. }
  889. } else {
  890. seq_printf(m, " %d", obj->size);
  891. }
  892. seq_printf(m, "\n");
  893. }
  894. void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
  895. {
  896. struct omap_gem_object *omap_obj;
  897. int count = 0;
  898. size_t size = 0;
  899. list_for_each_entry(omap_obj, list, mm_list) {
  900. struct drm_gem_object *obj = &omap_obj->base;
  901. seq_printf(m, " ");
  902. omap_gem_describe(obj, m);
  903. count++;
  904. size += obj->size;
  905. }
  906. seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
  907. }
  908. #endif
  909. /* -----------------------------------------------------------------------------
  910. * Buffer Synchronization
  911. */
  912. static DEFINE_SPINLOCK(sync_lock);
  913. struct omap_gem_sync_waiter {
  914. struct list_head list;
  915. struct omap_gem_object *omap_obj;
  916. enum omap_gem_op op;
  917. uint32_t read_target, write_target;
  918. /* notify called w/ sync_lock held */
  919. void (*notify)(void *arg);
  920. void *arg;
  921. };
  922. /* list of omap_gem_sync_waiter.. the notify fxn gets called back when
  923. * the read and/or write target count is achieved which can call a user
  924. * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
  925. * cpu access), etc.
  926. */
  927. static LIST_HEAD(waiters);
  928. static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
  929. {
  930. struct omap_gem_object *omap_obj = waiter->omap_obj;
  931. if ((waiter->op & OMAP_GEM_READ) &&
  932. (omap_obj->sync->write_complete < waiter->write_target))
  933. return true;
  934. if ((waiter->op & OMAP_GEM_WRITE) &&
  935. (omap_obj->sync->read_complete < waiter->read_target))
  936. return true;
  937. return false;
  938. }
  939. /* macro for sync debug.. */
  940. #define SYNCDBG 0
  941. #define SYNC(fmt, ...) do { if (SYNCDBG) \
  942. printk(KERN_ERR "%s:%d: "fmt"\n", \
  943. __func__, __LINE__, ##__VA_ARGS__); \
  944. } while (0)
  945. static void sync_op_update(void)
  946. {
  947. struct omap_gem_sync_waiter *waiter, *n;
  948. list_for_each_entry_safe(waiter, n, &waiters, list) {
  949. if (!is_waiting(waiter)) {
  950. list_del(&waiter->list);
  951. SYNC("notify: %p", waiter);
  952. waiter->notify(waiter->arg);
  953. kfree(waiter);
  954. }
  955. }
  956. }
  957. static inline int sync_op(struct drm_gem_object *obj,
  958. enum omap_gem_op op, bool start)
  959. {
  960. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  961. int ret = 0;
  962. spin_lock(&sync_lock);
  963. if (!omap_obj->sync) {
  964. omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
  965. if (!omap_obj->sync) {
  966. ret = -ENOMEM;
  967. goto unlock;
  968. }
  969. }
  970. if (start) {
  971. if (op & OMAP_GEM_READ)
  972. omap_obj->sync->read_pending++;
  973. if (op & OMAP_GEM_WRITE)
  974. omap_obj->sync->write_pending++;
  975. } else {
  976. if (op & OMAP_GEM_READ)
  977. omap_obj->sync->read_complete++;
  978. if (op & OMAP_GEM_WRITE)
  979. omap_obj->sync->write_complete++;
  980. sync_op_update();
  981. }
  982. unlock:
  983. spin_unlock(&sync_lock);
  984. return ret;
  985. }
  986. /* mark the start of read and/or write operation */
  987. int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
  988. {
  989. return sync_op(obj, op, true);
  990. }
  991. int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
  992. {
  993. return sync_op(obj, op, false);
  994. }
  995. static DECLARE_WAIT_QUEUE_HEAD(sync_event);
  996. static void sync_notify(void *arg)
  997. {
  998. struct task_struct **waiter_task = arg;
  999. *waiter_task = NULL;
  1000. wake_up_all(&sync_event);
  1001. }
  1002. int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
  1003. {
  1004. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  1005. int ret = 0;
  1006. if (omap_obj->sync) {
  1007. struct task_struct *waiter_task = current;
  1008. struct omap_gem_sync_waiter *waiter =
  1009. kzalloc(sizeof(*waiter), GFP_KERNEL);
  1010. if (!waiter)
  1011. return -ENOMEM;
  1012. waiter->omap_obj = omap_obj;
  1013. waiter->op = op;
  1014. waiter->read_target = omap_obj->sync->read_pending;
  1015. waiter->write_target = omap_obj->sync->write_pending;
  1016. waiter->notify = sync_notify;
  1017. waiter->arg = &waiter_task;
  1018. spin_lock(&sync_lock);
  1019. if (is_waiting(waiter)) {
  1020. SYNC("waited: %p", waiter);
  1021. list_add_tail(&waiter->list, &waiters);
  1022. spin_unlock(&sync_lock);
  1023. ret = wait_event_interruptible(sync_event,
  1024. (waiter_task == NULL));
  1025. spin_lock(&sync_lock);
  1026. if (waiter_task) {
  1027. SYNC("interrupted: %p", waiter);
  1028. /* we were interrupted */
  1029. list_del(&waiter->list);
  1030. waiter_task = NULL;
  1031. } else {
  1032. /* freed in sync_op_update() */
  1033. waiter = NULL;
  1034. }
  1035. }
  1036. spin_unlock(&sync_lock);
  1037. kfree(waiter);
  1038. }
  1039. return ret;
  1040. }
  1041. /* call fxn(arg), either synchronously or asynchronously if the op
  1042. * is currently blocked.. fxn() can be called from any context
  1043. *
  1044. * (TODO for now fxn is called back from whichever context calls
  1045. * omap_gem_op_finish().. but this could be better defined later
  1046. * if needed)
  1047. *
  1048. * TODO more code in common w/ _sync()..
  1049. */
  1050. int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
  1051. void (*fxn)(void *arg), void *arg)
  1052. {
  1053. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  1054. if (omap_obj->sync) {
  1055. struct omap_gem_sync_waiter *waiter =
  1056. kzalloc(sizeof(*waiter), GFP_ATOMIC);
  1057. if (!waiter)
  1058. return -ENOMEM;
  1059. waiter->omap_obj = omap_obj;
  1060. waiter->op = op;
  1061. waiter->read_target = omap_obj->sync->read_pending;
  1062. waiter->write_target = omap_obj->sync->write_pending;
  1063. waiter->notify = fxn;
  1064. waiter->arg = arg;
  1065. spin_lock(&sync_lock);
  1066. if (is_waiting(waiter)) {
  1067. SYNC("waited: %p", waiter);
  1068. list_add_tail(&waiter->list, &waiters);
  1069. spin_unlock(&sync_lock);
  1070. return 0;
  1071. }
  1072. spin_unlock(&sync_lock);
  1073. kfree(waiter);
  1074. }
  1075. /* no waiting.. */
  1076. fxn(arg);
  1077. return 0;
  1078. }
  1079. /* -----------------------------------------------------------------------------
  1080. * Constructor & Destructor
  1081. */
  1082. void omap_gem_free_object(struct drm_gem_object *obj)
  1083. {
  1084. struct drm_device *dev = obj->dev;
  1085. struct omap_drm_private *priv = dev->dev_private;
  1086. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  1087. evict(obj);
  1088. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  1089. spin_lock(&priv->list_lock);
  1090. list_del(&omap_obj->mm_list);
  1091. spin_unlock(&priv->list_lock);
  1092. /* this means the object is still pinned.. which really should
  1093. * not happen. I think..
  1094. */
  1095. WARN_ON(omap_obj->paddr_cnt > 0);
  1096. if (omap_obj->pages) {
  1097. if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
  1098. kfree(omap_obj->pages);
  1099. else
  1100. omap_gem_detach_pages(obj);
  1101. }
  1102. if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
  1103. dma_free_writecombine(dev->dev, obj->size,
  1104. omap_obj->vaddr, omap_obj->paddr);
  1105. } else if (omap_obj->vaddr) {
  1106. vunmap(omap_obj->vaddr);
  1107. } else if (obj->import_attach) {
  1108. drm_prime_gem_destroy(obj, omap_obj->sgt);
  1109. }
  1110. kfree(omap_obj->sync);
  1111. drm_gem_object_release(obj);
  1112. kfree(omap_obj);
  1113. }
  1114. /* GEM buffer object constructor */
  1115. struct drm_gem_object *omap_gem_new(struct drm_device *dev,
  1116. union omap_gem_size gsize, uint32_t flags)
  1117. {
  1118. struct omap_drm_private *priv = dev->dev_private;
  1119. struct omap_gem_object *omap_obj;
  1120. struct drm_gem_object *obj;
  1121. struct address_space *mapping;
  1122. size_t size;
  1123. int ret;
  1124. /* Validate the flags and compute the memory and cache flags. */
  1125. if (flags & OMAP_BO_TILED) {
  1126. if (!priv->usergart) {
  1127. dev_err(dev->dev, "Tiled buffers require DMM\n");
  1128. return NULL;
  1129. }
  1130. /*
  1131. * Tiled buffers are always shmem paged backed. When they are
  1132. * scanned out, they are remapped into DMM/TILER.
  1133. */
  1134. flags &= ~OMAP_BO_SCANOUT;
  1135. flags |= OMAP_BO_MEM_SHMEM;
  1136. /*
  1137. * Currently don't allow cached buffers. There is some caching
  1138. * stuff that needs to be handled better.
  1139. */
  1140. flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
  1141. flags |= tiler_get_cpu_cache_flags();
  1142. } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
  1143. /*
  1144. * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
  1145. * tiled. However, to lower the pressure on memory allocation,
  1146. * use contiguous memory only if no TILER is available.
  1147. */
  1148. flags |= OMAP_BO_MEM_DMA_API;
  1149. } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
  1150. /*
  1151. * All other buffers not backed by dma_buf are shmem-backed.
  1152. */
  1153. flags |= OMAP_BO_MEM_SHMEM;
  1154. }
  1155. /* Allocate the initialize the OMAP GEM object. */
  1156. omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
  1157. if (!omap_obj)
  1158. return NULL;
  1159. obj = &omap_obj->base;
  1160. omap_obj->flags = flags;
  1161. if (flags & OMAP_BO_TILED) {
  1162. /*
  1163. * For tiled buffers align dimensions to slot boundaries and
  1164. * calculate size based on aligned dimensions.
  1165. */
  1166. tiler_align(gem2fmt(flags), &gsize.tiled.width,
  1167. &gsize.tiled.height);
  1168. size = tiler_size(gem2fmt(flags), gsize.tiled.width,
  1169. gsize.tiled.height);
  1170. omap_obj->width = gsize.tiled.width;
  1171. omap_obj->height = gsize.tiled.height;
  1172. } else {
  1173. size = PAGE_ALIGN(gsize.bytes);
  1174. }
  1175. spin_lock(&priv->list_lock);
  1176. list_add(&omap_obj->mm_list, &priv->obj_list);
  1177. spin_unlock(&priv->list_lock);
  1178. /* Allocate memory if needed. */
  1179. if (flags & OMAP_BO_MEM_DMA_API) {
  1180. omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
  1181. &omap_obj->paddr,
  1182. GFP_KERNEL);
  1183. if (!omap_obj->vaddr)
  1184. goto fail;
  1185. }
  1186. /* Initialize the GEM object. */
  1187. if (!(flags & OMAP_BO_MEM_SHMEM)) {
  1188. drm_gem_private_object_init(dev, obj, size);
  1189. } else {
  1190. ret = drm_gem_object_init(dev, obj, size);
  1191. if (ret)
  1192. goto fail;
  1193. mapping = file_inode(obj->filp)->i_mapping;
  1194. mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
  1195. }
  1196. return obj;
  1197. fail:
  1198. omap_gem_free_object(obj);
  1199. return NULL;
  1200. }
  1201. struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
  1202. struct sg_table *sgt)
  1203. {
  1204. struct omap_drm_private *priv = dev->dev_private;
  1205. struct omap_gem_object *omap_obj;
  1206. struct drm_gem_object *obj;
  1207. union omap_gem_size gsize;
  1208. /* Without a DMM only physically contiguous buffers can be supported. */
  1209. if (sgt->orig_nents != 1 && !priv->has_dmm)
  1210. return ERR_PTR(-EINVAL);
  1211. mutex_lock(&dev->struct_mutex);
  1212. gsize.bytes = PAGE_ALIGN(size);
  1213. obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
  1214. if (!obj) {
  1215. obj = ERR_PTR(-ENOMEM);
  1216. goto done;
  1217. }
  1218. omap_obj = to_omap_bo(obj);
  1219. omap_obj->sgt = sgt;
  1220. if (sgt->orig_nents == 1) {
  1221. omap_obj->paddr = sg_dma_address(sgt->sgl);
  1222. } else {
  1223. /* Create pages list from sgt */
  1224. struct sg_page_iter iter;
  1225. struct page **pages;
  1226. unsigned int npages;
  1227. unsigned int i = 0;
  1228. npages = DIV_ROUND_UP(size, PAGE_SIZE);
  1229. pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
  1230. if (!pages) {
  1231. omap_gem_free_object(obj);
  1232. obj = ERR_PTR(-ENOMEM);
  1233. goto done;
  1234. }
  1235. omap_obj->pages = pages;
  1236. for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
  1237. pages[i++] = sg_page_iter_page(&iter);
  1238. if (i > npages)
  1239. break;
  1240. }
  1241. if (WARN_ON(i != npages)) {
  1242. omap_gem_free_object(obj);
  1243. obj = ERR_PTR(-ENOMEM);
  1244. goto done;
  1245. }
  1246. }
  1247. done:
  1248. mutex_unlock(&dev->struct_mutex);
  1249. return obj;
  1250. }
  1251. /* convenience method to construct a GEM buffer object, and userspace handle */
  1252. int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
  1253. union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
  1254. {
  1255. struct drm_gem_object *obj;
  1256. int ret;
  1257. obj = omap_gem_new(dev, gsize, flags);
  1258. if (!obj)
  1259. return -ENOMEM;
  1260. ret = drm_gem_handle_create(file, obj, handle);
  1261. if (ret) {
  1262. omap_gem_free_object(obj);
  1263. return ret;
  1264. }
  1265. /* drop reference from allocate - handle holds it now */
  1266. drm_gem_object_unreference_unlocked(obj);
  1267. return 0;
  1268. }
  1269. /* -----------------------------------------------------------------------------
  1270. * Init & Cleanup
  1271. */
  1272. /* If DMM is used, we need to set some stuff up.. */
  1273. void omap_gem_init(struct drm_device *dev)
  1274. {
  1275. struct omap_drm_private *priv = dev->dev_private;
  1276. struct omap_drm_usergart *usergart;
  1277. const enum tiler_fmt fmts[] = {
  1278. TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
  1279. };
  1280. int i, j;
  1281. if (!dmm_is_available()) {
  1282. /* DMM only supported on OMAP4 and later, so this isn't fatal */
  1283. dev_warn(dev->dev, "DMM not available, disable DMM support\n");
  1284. return;
  1285. }
  1286. usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
  1287. if (!usergart)
  1288. return;
  1289. /* reserve 4k aligned/wide regions for userspace mappings: */
  1290. for (i = 0; i < ARRAY_SIZE(fmts); i++) {
  1291. uint16_t h = 1, w = PAGE_SIZE >> i;
  1292. tiler_align(fmts[i], &w, &h);
  1293. /* note: since each region is 1 4kb page wide, and minimum
  1294. * number of rows, the height ends up being the same as the
  1295. * # of pages in the region
  1296. */
  1297. usergart[i].height = h;
  1298. usergart[i].height_shift = ilog2(h);
  1299. usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
  1300. usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
  1301. for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
  1302. struct omap_drm_usergart_entry *entry;
  1303. struct tiler_block *block;
  1304. entry = &usergart[i].entry[j];
  1305. block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
  1306. if (IS_ERR(block)) {
  1307. dev_err(dev->dev,
  1308. "reserve failed: %d, %d, %ld\n",
  1309. i, j, PTR_ERR(block));
  1310. return;
  1311. }
  1312. entry->paddr = tiler_ssptr(block);
  1313. entry->block = block;
  1314. DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
  1315. &entry->paddr,
  1316. usergart[i].stride_pfn << PAGE_SHIFT);
  1317. }
  1318. }
  1319. priv->usergart = usergart;
  1320. priv->has_dmm = true;
  1321. }
  1322. void omap_gem_deinit(struct drm_device *dev)
  1323. {
  1324. struct omap_drm_private *priv = dev->dev_private;
  1325. /* I believe we can rely on there being no more outstanding GEM
  1326. * objects which could depend on usergart/dmm at this point.
  1327. */
  1328. kfree(priv->usergart);
  1329. }