omap_gem.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578
  1. /*
  2. * drivers/gpu/drm/omapdrm/omap_gem.c
  3. *
  4. * Copyright (C) 2011 Texas Instruments
  5. * Author: Rob Clark <rob.clark@linaro.org>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License version 2 as published by
  9. * the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but WITHOUT
  12. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  14. * more details.
  15. *
  16. * You should have received a copy of the GNU General Public License along with
  17. * this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/seq_file.h>
  20. #include <linux/shmem_fs.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/pfn_t.h>
  23. #include <drm/drm_vma_manager.h>
  24. #include "omap_drv.h"
  25. #include "omap_dmm_tiler.h"
  26. /*
  27. * GEM buffer object implementation.
  28. */
  29. /* note: we use upper 8 bits of flags for driver-internal flags: */
  30. #define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
  31. #define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
  32. #define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */
  33. struct omap_gem_object {
  34. struct drm_gem_object base;
  35. struct list_head mm_list;
  36. uint32_t flags;
  37. /** width/height for tiled formats (rounded up to slot boundaries) */
  38. uint16_t width, height;
  39. /** roll applied when mapping to DMM */
  40. uint32_t roll;
  41. /**
  42. * paddr contains the buffer DMA address. It is valid for
  43. *
  44. * - buffers allocated through the DMA mapping API (with the
  45. * OMAP_BO_MEM_DMA_API flag set)
  46. *
  47. * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
  48. * if they are physically contiguous (when sgt->orig_nents == 1)
  49. *
  50. * - buffers mapped through the TILER when paddr_cnt is not zero, in
  51. * which case the DMA address points to the TILER aperture
  52. *
  53. * Physically contiguous buffers have their DMA address equal to the
  54. * physical address as we don't remap those buffers through the TILER.
  55. *
  56. * Buffers mapped to the TILER have their DMA address pointing to the
  57. * TILER aperture. As TILER mappings are refcounted (through paddr_cnt)
  58. * the DMA address must be accessed through omap_get_get_paddr() to
  59. * ensure that the mapping won't disappear unexpectedly. References must
  60. * be released with omap_gem_put_paddr().
  61. */
  62. dma_addr_t paddr;
  63. /**
  64. * # of users of paddr
  65. */
  66. uint32_t paddr_cnt;
  67. /**
  68. * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
  69. * is set and the sgt field is valid.
  70. */
  71. struct sg_table *sgt;
  72. /**
  73. * tiler block used when buffer is remapped in DMM/TILER.
  74. */
  75. struct tiler_block *block;
  76. /**
  77. * Array of backing pages, if allocated. Note that pages are never
  78. * allocated for buffers originally allocated from contiguous memory
  79. */
  80. struct page **pages;
  81. /** addresses corresponding to pages in above array */
  82. dma_addr_t *addrs;
  83. /**
  84. * Virtual address, if mapped.
  85. */
  86. void *vaddr;
  87. /**
  88. * sync-object allocated on demand (if needed)
  89. *
  90. * Per-buffer sync-object for tracking pending and completed hw/dma
  91. * read and write operations.
  92. */
  93. struct {
  94. uint32_t write_pending;
  95. uint32_t write_complete;
  96. uint32_t read_pending;
  97. uint32_t read_complete;
  98. } *sync;
  99. };
  100. #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
  101. /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
  102. * not necessarily pinned in TILER all the time, and (b) when they are
  103. * they are not necessarily page aligned, we reserve one or more small
  104. * regions in each of the 2d containers to use as a user-GART where we
  105. * can create a second page-aligned mapping of parts of the buffer
  106. * being accessed from userspace.
  107. *
  108. * Note that we could optimize slightly when we know that multiple
  109. * tiler containers are backed by the same PAT.. but I'll leave that
  110. * for later..
  111. */
  112. #define NUM_USERGART_ENTRIES 2
  113. struct omap_drm_usergart_entry {
  114. struct tiler_block *block; /* the reserved tiler block */
  115. dma_addr_t paddr;
  116. struct drm_gem_object *obj; /* the current pinned obj */
  117. pgoff_t obj_pgoff; /* page offset of obj currently
  118. mapped in */
  119. };
  120. struct omap_drm_usergart {
  121. struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
  122. int height; /* height in rows */
  123. int height_shift; /* ilog2(height in rows) */
  124. int slot_shift; /* ilog2(width per slot) */
  125. int stride_pfn; /* stride in pages */
  126. int last; /* index of last used entry */
  127. };
  128. /* -----------------------------------------------------------------------------
  129. * Helpers
  130. */
  131. /** get mmap offset */
  132. static uint64_t mmap_offset(struct drm_gem_object *obj)
  133. {
  134. struct drm_device *dev = obj->dev;
  135. int ret;
  136. size_t size;
  137. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  138. /* Make it mmapable */
  139. size = omap_gem_mmap_size(obj);
  140. ret = drm_gem_create_mmap_offset_size(obj, size);
  141. if (ret) {
  142. dev_err(dev->dev, "could not allocate mmap offset\n");
  143. return 0;
  144. }
  145. return drm_vma_node_offset_addr(&obj->vma_node);
  146. }
  147. static bool is_contiguous(struct omap_gem_object *omap_obj)
  148. {
  149. if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
  150. return true;
  151. if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
  152. return true;
  153. return false;
  154. }
  155. /* -----------------------------------------------------------------------------
  156. * Eviction
  157. */
  158. static void evict_entry(struct drm_gem_object *obj,
  159. enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
  160. {
  161. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  162. struct omap_drm_private *priv = obj->dev->dev_private;
  163. int n = priv->usergart[fmt].height;
  164. size_t size = PAGE_SIZE * n;
  165. loff_t off = mmap_offset(obj) +
  166. (entry->obj_pgoff << PAGE_SHIFT);
  167. const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
  168. if (m > 1) {
  169. int i;
  170. /* if stride > than PAGE_SIZE then sparse mapping: */
  171. for (i = n; i > 0; i--) {
  172. unmap_mapping_range(obj->dev->anon_inode->i_mapping,
  173. off, PAGE_SIZE, 1);
  174. off += PAGE_SIZE * m;
  175. }
  176. } else {
  177. unmap_mapping_range(obj->dev->anon_inode->i_mapping,
  178. off, size, 1);
  179. }
  180. entry->obj = NULL;
  181. }
  182. /* Evict a buffer from usergart, if it is mapped there */
  183. static void evict(struct drm_gem_object *obj)
  184. {
  185. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  186. struct omap_drm_private *priv = obj->dev->dev_private;
  187. if (omap_obj->flags & OMAP_BO_TILED) {
  188. enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
  189. int i;
  190. for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
  191. struct omap_drm_usergart_entry *entry =
  192. &priv->usergart[fmt].entry[i];
  193. if (entry->obj == obj)
  194. evict_entry(obj, fmt, entry);
  195. }
  196. }
  197. }
  198. /* -----------------------------------------------------------------------------
  199. * Page Management
  200. */
  201. /** ensure backing pages are allocated */
  202. static int omap_gem_attach_pages(struct drm_gem_object *obj)
  203. {
  204. struct drm_device *dev = obj->dev;
  205. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  206. struct page **pages;
  207. int npages = obj->size >> PAGE_SHIFT;
  208. int i, ret;
  209. dma_addr_t *addrs;
  210. WARN_ON(omap_obj->pages);
  211. pages = drm_gem_get_pages(obj);
  212. if (IS_ERR(pages)) {
  213. dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
  214. return PTR_ERR(pages);
  215. }
  216. /* for non-cached buffers, ensure the new pages are clean because
  217. * DSS, GPU, etc. are not cache coherent:
  218. */
  219. if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
  220. addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
  221. if (!addrs) {
  222. ret = -ENOMEM;
  223. goto free_pages;
  224. }
  225. for (i = 0; i < npages; i++) {
  226. addrs[i] = dma_map_page(dev->dev, pages[i],
  227. 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
  228. if (dma_mapping_error(dev->dev, addrs[i])) {
  229. dev_warn(dev->dev,
  230. "%s: failed to map page\n", __func__);
  231. for (i = i - 1; i >= 0; --i) {
  232. dma_unmap_page(dev->dev, addrs[i],
  233. PAGE_SIZE, DMA_BIDIRECTIONAL);
  234. }
  235. ret = -ENOMEM;
  236. goto free_addrs;
  237. }
  238. }
  239. } else {
  240. addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
  241. if (!addrs) {
  242. ret = -ENOMEM;
  243. goto free_pages;
  244. }
  245. }
  246. omap_obj->addrs = addrs;
  247. omap_obj->pages = pages;
  248. return 0;
  249. free_addrs:
  250. kfree(addrs);
  251. free_pages:
  252. drm_gem_put_pages(obj, pages, true, false);
  253. return ret;
  254. }
  255. /* acquire pages when needed (for example, for DMA where physically
  256. * contiguous buffer is not required
  257. */
  258. static int get_pages(struct drm_gem_object *obj, struct page ***pages)
  259. {
  260. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  261. int ret = 0;
  262. if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) {
  263. ret = omap_gem_attach_pages(obj);
  264. if (ret) {
  265. dev_err(obj->dev->dev, "could not attach pages\n");
  266. return ret;
  267. }
  268. }
  269. /* TODO: even phys-contig.. we should have a list of pages? */
  270. *pages = omap_obj->pages;
  271. return 0;
  272. }
  273. /** release backing pages */
  274. static void omap_gem_detach_pages(struct drm_gem_object *obj)
  275. {
  276. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  277. /* for non-cached buffers, ensure the new pages are clean because
  278. * DSS, GPU, etc. are not cache coherent:
  279. */
  280. if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
  281. int i, npages = obj->size >> PAGE_SHIFT;
  282. for (i = 0; i < npages; i++) {
  283. dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
  284. PAGE_SIZE, DMA_BIDIRECTIONAL);
  285. }
  286. }
  287. kfree(omap_obj->addrs);
  288. omap_obj->addrs = NULL;
  289. drm_gem_put_pages(obj, omap_obj->pages, true, false);
  290. omap_obj->pages = NULL;
  291. }
  292. /* get buffer flags */
  293. uint32_t omap_gem_flags(struct drm_gem_object *obj)
  294. {
  295. return to_omap_bo(obj)->flags;
  296. }
  297. uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
  298. {
  299. uint64_t offset;
  300. mutex_lock(&obj->dev->struct_mutex);
  301. offset = mmap_offset(obj);
  302. mutex_unlock(&obj->dev->struct_mutex);
  303. return offset;
  304. }
  305. /** get mmap size */
  306. size_t omap_gem_mmap_size(struct drm_gem_object *obj)
  307. {
  308. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  309. size_t size = obj->size;
  310. if (omap_obj->flags & OMAP_BO_TILED) {
  311. /* for tiled buffers, the virtual size has stride rounded up
  312. * to 4kb.. (to hide the fact that row n+1 might start 16kb or
  313. * 32kb later!). But we don't back the entire buffer with
  314. * pages, only the valid picture part.. so need to adjust for
  315. * this in the size used to mmap and generate mmap offset
  316. */
  317. size = tiler_vsize(gem2fmt(omap_obj->flags),
  318. omap_obj->width, omap_obj->height);
  319. }
  320. return size;
  321. }
  322. /* -----------------------------------------------------------------------------
  323. * Fault Handling
  324. */
  325. /* Normal handling for the case of faulting in non-tiled buffers */
  326. static int fault_1d(struct drm_gem_object *obj,
  327. struct vm_area_struct *vma, struct vm_fault *vmf)
  328. {
  329. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  330. unsigned long pfn;
  331. pgoff_t pgoff;
  332. /* We don't use vmf->pgoff since that has the fake offset: */
  333. pgoff = ((unsigned long)vmf->virtual_address -
  334. vma->vm_start) >> PAGE_SHIFT;
  335. if (omap_obj->pages) {
  336. omap_gem_cpu_sync(obj, pgoff);
  337. pfn = page_to_pfn(omap_obj->pages[pgoff]);
  338. } else {
  339. BUG_ON(!is_contiguous(omap_obj));
  340. pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
  341. }
  342. VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
  343. pfn, pfn << PAGE_SHIFT);
  344. return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
  345. __pfn_to_pfn_t(pfn, PFN_DEV));
  346. }
  347. /* Special handling for the case of faulting in 2d tiled buffers */
  348. static int fault_2d(struct drm_gem_object *obj,
  349. struct vm_area_struct *vma, struct vm_fault *vmf)
  350. {
  351. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  352. struct omap_drm_private *priv = obj->dev->dev_private;
  353. struct omap_drm_usergart_entry *entry;
  354. enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
  355. struct page *pages[64]; /* XXX is this too much to have on stack? */
  356. unsigned long pfn;
  357. pgoff_t pgoff, base_pgoff;
  358. void __user *vaddr;
  359. int i, ret, slots;
  360. /*
  361. * Note the height of the slot is also equal to the number of pages
  362. * that need to be mapped in to fill 4kb wide CPU page. If the slot
  363. * height is 64, then 64 pages fill a 4kb wide by 64 row region.
  364. */
  365. const int n = priv->usergart[fmt].height;
  366. const int n_shift = priv->usergart[fmt].height_shift;
  367. /*
  368. * If buffer width in bytes > PAGE_SIZE then the virtual stride is
  369. * rounded up to next multiple of PAGE_SIZE.. this need to be taken
  370. * into account in some of the math, so figure out virtual stride
  371. * in pages
  372. */
  373. const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
  374. /* We don't use vmf->pgoff since that has the fake offset: */
  375. pgoff = ((unsigned long)vmf->virtual_address -
  376. vma->vm_start) >> PAGE_SHIFT;
  377. /*
  378. * Actual address we start mapping at is rounded down to previous slot
  379. * boundary in the y direction:
  380. */
  381. base_pgoff = round_down(pgoff, m << n_shift);
  382. /* figure out buffer width in slots */
  383. slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
  384. vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
  385. entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
  386. /* evict previous buffer using this usergart entry, if any: */
  387. if (entry->obj)
  388. evict_entry(entry->obj, fmt, entry);
  389. entry->obj = obj;
  390. entry->obj_pgoff = base_pgoff;
  391. /* now convert base_pgoff to phys offset from virt offset: */
  392. base_pgoff = (base_pgoff >> n_shift) * slots;
  393. /* for wider-than 4k.. figure out which part of the slot-row we want: */
  394. if (m > 1) {
  395. int off = pgoff % m;
  396. entry->obj_pgoff += off;
  397. base_pgoff /= m;
  398. slots = min(slots - (off << n_shift), n);
  399. base_pgoff += off << n_shift;
  400. vaddr += off << PAGE_SHIFT;
  401. }
  402. /*
  403. * Map in pages. Beyond the valid pixel part of the buffer, we set
  404. * pages[i] to NULL to get a dummy page mapped in.. if someone
  405. * reads/writes it they will get random/undefined content, but at
  406. * least it won't be corrupting whatever other random page used to
  407. * be mapped in, or other undefined behavior.
  408. */
  409. memcpy(pages, &omap_obj->pages[base_pgoff],
  410. sizeof(struct page *) * slots);
  411. memset(pages + slots, 0,
  412. sizeof(struct page *) * (n - slots));
  413. ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
  414. if (ret) {
  415. dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
  416. return ret;
  417. }
  418. pfn = entry->paddr >> PAGE_SHIFT;
  419. VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
  420. pfn, pfn << PAGE_SHIFT);
  421. for (i = n; i > 0; i--) {
  422. vm_insert_mixed(vma, (unsigned long)vaddr,
  423. __pfn_to_pfn_t(pfn, PFN_DEV));
  424. pfn += priv->usergart[fmt].stride_pfn;
  425. vaddr += PAGE_SIZE * m;
  426. }
  427. /* simple round-robin: */
  428. priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
  429. % NUM_USERGART_ENTRIES;
  430. return 0;
  431. }
  432. /**
  433. * omap_gem_fault - pagefault handler for GEM objects
  434. * @vma: the VMA of the GEM object
  435. * @vmf: fault detail
  436. *
  437. * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
  438. * does most of the work for us including the actual map/unmap calls
  439. * but we need to do the actual page work.
  440. *
  441. * The VMA was set up by GEM. In doing so it also ensured that the
  442. * vma->vm_private_data points to the GEM object that is backing this
  443. * mapping.
  444. */
  445. int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  446. {
  447. struct drm_gem_object *obj = vma->vm_private_data;
  448. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  449. struct drm_device *dev = obj->dev;
  450. struct page **pages;
  451. int ret;
  452. /* Make sure we don't parallel update on a fault, nor move or remove
  453. * something from beneath our feet
  454. */
  455. mutex_lock(&dev->struct_mutex);
  456. /* if a shmem backed object, make sure we have pages attached now */
  457. ret = get_pages(obj, &pages);
  458. if (ret)
  459. goto fail;
  460. /* where should we do corresponding put_pages().. we are mapping
  461. * the original page, rather than thru a GART, so we can't rely
  462. * on eviction to trigger this. But munmap() or all mappings should
  463. * probably trigger put_pages()?
  464. */
  465. if (omap_obj->flags & OMAP_BO_TILED)
  466. ret = fault_2d(obj, vma, vmf);
  467. else
  468. ret = fault_1d(obj, vma, vmf);
  469. fail:
  470. mutex_unlock(&dev->struct_mutex);
  471. switch (ret) {
  472. case 0:
  473. case -ERESTARTSYS:
  474. case -EINTR:
  475. case -EBUSY:
  476. /*
  477. * EBUSY is ok: this just means that another thread
  478. * already did the job.
  479. */
  480. return VM_FAULT_NOPAGE;
  481. case -ENOMEM:
  482. return VM_FAULT_OOM;
  483. default:
  484. return VM_FAULT_SIGBUS;
  485. }
  486. }
  487. /** We override mainly to fix up some of the vm mapping flags.. */
  488. int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  489. {
  490. int ret;
  491. ret = drm_gem_mmap(filp, vma);
  492. if (ret) {
  493. DBG("mmap failed: %d", ret);
  494. return ret;
  495. }
  496. return omap_gem_mmap_obj(vma->vm_private_data, vma);
  497. }
  498. int omap_gem_mmap_obj(struct drm_gem_object *obj,
  499. struct vm_area_struct *vma)
  500. {
  501. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  502. vma->vm_flags &= ~VM_PFNMAP;
  503. vma->vm_flags |= VM_MIXEDMAP;
  504. if (omap_obj->flags & OMAP_BO_WC) {
  505. vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  506. } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
  507. vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
  508. } else {
  509. /*
  510. * We do have some private objects, at least for scanout buffers
  511. * on hardware without DMM/TILER. But these are allocated write-
  512. * combine
  513. */
  514. if (WARN_ON(!obj->filp))
  515. return -EINVAL;
  516. /*
  517. * Shunt off cached objs to shmem file so they have their own
  518. * address_space (so unmap_mapping_range does what we want,
  519. * in particular in the case of mmap'd dmabufs)
  520. */
  521. fput(vma->vm_file);
  522. vma->vm_pgoff = 0;
  523. vma->vm_file = get_file(obj->filp);
  524. vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  525. }
  526. return 0;
  527. }
  528. /* -----------------------------------------------------------------------------
  529. * Dumb Buffers
  530. */
  531. /**
  532. * omap_gem_dumb_create - create a dumb buffer
  533. * @drm_file: our client file
  534. * @dev: our device
  535. * @args: the requested arguments copied from userspace
  536. *
  537. * Allocate a buffer suitable for use for a frame buffer of the
  538. * form described by user space. Give userspace a handle by which
  539. * to reference it.
  540. */
  541. int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  542. struct drm_mode_create_dumb *args)
  543. {
  544. union omap_gem_size gsize;
  545. args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  546. args->size = PAGE_ALIGN(args->pitch * args->height);
  547. gsize = (union omap_gem_size){
  548. .bytes = args->size,
  549. };
  550. return omap_gem_new_handle(dev, file, gsize,
  551. OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
  552. }
  553. /**
  554. * omap_gem_dumb_map - buffer mapping for dumb interface
  555. * @file: our drm client file
  556. * @dev: drm device
  557. * @handle: GEM handle to the object (from dumb_create)
  558. *
  559. * Do the necessary setup to allow the mapping of the frame buffer
  560. * into user memory. We don't have to do much here at the moment.
  561. */
  562. int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
  563. uint32_t handle, uint64_t *offset)
  564. {
  565. struct drm_gem_object *obj;
  566. int ret = 0;
  567. /* GEM does all our handle to object mapping */
  568. obj = drm_gem_object_lookup(file, handle);
  569. if (obj == NULL) {
  570. ret = -ENOENT;
  571. goto fail;
  572. }
  573. *offset = omap_gem_mmap_offset(obj);
  574. drm_gem_object_unreference_unlocked(obj);
  575. fail:
  576. return ret;
  577. }
  578. #ifdef CONFIG_DRM_FBDEV_EMULATION
  579. /* Set scrolling position. This allows us to implement fast scrolling
  580. * for console.
  581. *
  582. * Call only from non-atomic contexts.
  583. */
  584. int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
  585. {
  586. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  587. uint32_t npages = obj->size >> PAGE_SHIFT;
  588. int ret = 0;
  589. if (roll > npages) {
  590. dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
  591. return -EINVAL;
  592. }
  593. omap_obj->roll = roll;
  594. mutex_lock(&obj->dev->struct_mutex);
  595. /* if we aren't mapped yet, we don't need to do anything */
  596. if (omap_obj->block) {
  597. struct page **pages;
  598. ret = get_pages(obj, &pages);
  599. if (ret)
  600. goto fail;
  601. ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
  602. if (ret)
  603. dev_err(obj->dev->dev, "could not repin: %d\n", ret);
  604. }
  605. fail:
  606. mutex_unlock(&obj->dev->struct_mutex);
  607. return ret;
  608. }
  609. #endif
  610. /* -----------------------------------------------------------------------------
  611. * Memory Management & DMA Sync
  612. */
  613. /**
  614. * shmem buffers that are mapped cached can simulate coherency via using
  615. * page faulting to keep track of dirty pages
  616. */
  617. static inline bool is_cached_coherent(struct drm_gem_object *obj)
  618. {
  619. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  620. return (omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
  621. ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
  622. }
  623. /* Sync the buffer for CPU access.. note pages should already be
  624. * attached, ie. omap_gem_get_pages()
  625. */
  626. void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
  627. {
  628. struct drm_device *dev = obj->dev;
  629. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  630. if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
  631. dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
  632. PAGE_SIZE, DMA_BIDIRECTIONAL);
  633. omap_obj->addrs[pgoff] = 0;
  634. }
  635. }
  636. /* sync the buffer for DMA access */
  637. void omap_gem_dma_sync(struct drm_gem_object *obj,
  638. enum dma_data_direction dir)
  639. {
  640. struct drm_device *dev = obj->dev;
  641. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  642. if (is_cached_coherent(obj)) {
  643. int i, npages = obj->size >> PAGE_SHIFT;
  644. struct page **pages = omap_obj->pages;
  645. bool dirty = false;
  646. for (i = 0; i < npages; i++) {
  647. if (!omap_obj->addrs[i]) {
  648. dma_addr_t addr;
  649. addr = dma_map_page(dev->dev, pages[i], 0,
  650. PAGE_SIZE, DMA_BIDIRECTIONAL);
  651. if (dma_mapping_error(dev->dev, addr)) {
  652. dev_warn(dev->dev,
  653. "%s: failed to map page\n",
  654. __func__);
  655. break;
  656. }
  657. dirty = true;
  658. omap_obj->addrs[i] = addr;
  659. }
  660. }
  661. if (dirty) {
  662. unmap_mapping_range(obj->filp->f_mapping, 0,
  663. omap_gem_mmap_size(obj), 1);
  664. }
  665. }
  666. }
  667. /* Get physical address for DMA.. if 'remap' is true, and the buffer is not
  668. * already contiguous, remap it to pin in physically contiguous memory.. (ie.
  669. * map in TILER)
  670. */
  671. int omap_gem_get_paddr(struct drm_gem_object *obj,
  672. dma_addr_t *paddr, bool remap)
  673. {
  674. struct omap_drm_private *priv = obj->dev->dev_private;
  675. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  676. int ret = 0;
  677. mutex_lock(&obj->dev->struct_mutex);
  678. if (!is_contiguous(omap_obj) && remap && priv->has_dmm) {
  679. if (omap_obj->paddr_cnt == 0) {
  680. struct page **pages;
  681. uint32_t npages = obj->size >> PAGE_SHIFT;
  682. enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
  683. struct tiler_block *block;
  684. BUG_ON(omap_obj->block);
  685. ret = get_pages(obj, &pages);
  686. if (ret)
  687. goto fail;
  688. if (omap_obj->flags & OMAP_BO_TILED) {
  689. block = tiler_reserve_2d(fmt,
  690. omap_obj->width,
  691. omap_obj->height, 0);
  692. } else {
  693. block = tiler_reserve_1d(obj->size);
  694. }
  695. if (IS_ERR(block)) {
  696. ret = PTR_ERR(block);
  697. dev_err(obj->dev->dev,
  698. "could not remap: %d (%d)\n", ret, fmt);
  699. goto fail;
  700. }
  701. /* TODO: enable async refill.. */
  702. ret = tiler_pin(block, pages, npages,
  703. omap_obj->roll, true);
  704. if (ret) {
  705. tiler_release(block);
  706. dev_err(obj->dev->dev,
  707. "could not pin: %d\n", ret);
  708. goto fail;
  709. }
  710. omap_obj->paddr = tiler_ssptr(block);
  711. omap_obj->block = block;
  712. DBG("got paddr: %pad", &omap_obj->paddr);
  713. }
  714. omap_obj->paddr_cnt++;
  715. *paddr = omap_obj->paddr;
  716. } else if (is_contiguous(omap_obj)) {
  717. *paddr = omap_obj->paddr;
  718. } else {
  719. ret = -EINVAL;
  720. goto fail;
  721. }
  722. fail:
  723. mutex_unlock(&obj->dev->struct_mutex);
  724. return ret;
  725. }
  726. /* Release physical address, when DMA is no longer being performed.. this
  727. * could potentially unpin and unmap buffers from TILER
  728. */
  729. void omap_gem_put_paddr(struct drm_gem_object *obj)
  730. {
  731. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  732. int ret;
  733. mutex_lock(&obj->dev->struct_mutex);
  734. if (omap_obj->paddr_cnt > 0) {
  735. omap_obj->paddr_cnt--;
  736. if (omap_obj->paddr_cnt == 0) {
  737. ret = tiler_unpin(omap_obj->block);
  738. if (ret) {
  739. dev_err(obj->dev->dev,
  740. "could not unpin pages: %d\n", ret);
  741. }
  742. ret = tiler_release(omap_obj->block);
  743. if (ret) {
  744. dev_err(obj->dev->dev,
  745. "could not release unmap: %d\n", ret);
  746. }
  747. omap_obj->paddr = 0;
  748. omap_obj->block = NULL;
  749. }
  750. }
  751. mutex_unlock(&obj->dev->struct_mutex);
  752. }
  753. /* Get rotated scanout address (only valid if already pinned), at the
  754. * specified orientation and x,y offset from top-left corner of buffer
  755. * (only valid for tiled 2d buffers)
  756. */
  757. int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
  758. int x, int y, dma_addr_t *paddr)
  759. {
  760. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  761. int ret = -EINVAL;
  762. mutex_lock(&obj->dev->struct_mutex);
  763. if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
  764. (omap_obj->flags & OMAP_BO_TILED)) {
  765. *paddr = tiler_tsptr(omap_obj->block, orient, x, y);
  766. ret = 0;
  767. }
  768. mutex_unlock(&obj->dev->struct_mutex);
  769. return ret;
  770. }
  771. /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
  772. int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
  773. {
  774. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  775. int ret = -EINVAL;
  776. if (omap_obj->flags & OMAP_BO_TILED)
  777. ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
  778. return ret;
  779. }
  780. /* if !remap, and we don't have pages backing, then fail, rather than
  781. * increasing the pin count (which we don't really do yet anyways,
  782. * because we don't support swapping pages back out). And 'remap'
  783. * might not be quite the right name, but I wanted to keep it working
  784. * similarly to omap_gem_get_paddr(). Note though that mutex is not
  785. * aquired if !remap (because this can be called in atomic ctxt),
  786. * but probably omap_gem_get_paddr() should be changed to work in the
  787. * same way. If !remap, a matching omap_gem_put_pages() call is not
  788. * required (and should not be made).
  789. */
  790. int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
  791. bool remap)
  792. {
  793. int ret;
  794. if (!remap) {
  795. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  796. if (!omap_obj->pages)
  797. return -ENOMEM;
  798. *pages = omap_obj->pages;
  799. return 0;
  800. }
  801. mutex_lock(&obj->dev->struct_mutex);
  802. ret = get_pages(obj, pages);
  803. mutex_unlock(&obj->dev->struct_mutex);
  804. return ret;
  805. }
  806. /* release pages when DMA no longer being performed */
  807. int omap_gem_put_pages(struct drm_gem_object *obj)
  808. {
  809. /* do something here if we dynamically attach/detach pages.. at
  810. * least they would no longer need to be pinned if everyone has
  811. * released the pages..
  812. */
  813. return 0;
  814. }
  815. #ifdef CONFIG_DRM_FBDEV_EMULATION
  816. /* Get kernel virtual address for CPU access.. this more or less only
  817. * exists for omap_fbdev. This should be called with struct_mutex
  818. * held.
  819. */
  820. void *omap_gem_vaddr(struct drm_gem_object *obj)
  821. {
  822. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  823. WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
  824. if (!omap_obj->vaddr) {
  825. struct page **pages;
  826. int ret = get_pages(obj, &pages);
  827. if (ret)
  828. return ERR_PTR(ret);
  829. omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
  830. VM_MAP, pgprot_writecombine(PAGE_KERNEL));
  831. }
  832. return omap_obj->vaddr;
  833. }
  834. #endif
  835. /* -----------------------------------------------------------------------------
  836. * Power Management
  837. */
  838. #ifdef CONFIG_PM
  839. /* re-pin objects in DMM in resume path: */
  840. int omap_gem_resume(struct device *dev)
  841. {
  842. struct drm_device *drm_dev = dev_get_drvdata(dev);
  843. struct omap_drm_private *priv = drm_dev->dev_private;
  844. struct omap_gem_object *omap_obj;
  845. int ret = 0;
  846. list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
  847. if (omap_obj->block) {
  848. struct drm_gem_object *obj = &omap_obj->base;
  849. uint32_t npages = obj->size >> PAGE_SHIFT;
  850. WARN_ON(!omap_obj->pages); /* this can't happen */
  851. ret = tiler_pin(omap_obj->block,
  852. omap_obj->pages, npages,
  853. omap_obj->roll, true);
  854. if (ret) {
  855. dev_err(dev, "could not repin: %d\n", ret);
  856. return ret;
  857. }
  858. }
  859. }
  860. return 0;
  861. }
  862. #endif
  863. /* -----------------------------------------------------------------------------
  864. * DebugFS
  865. */
  866. #ifdef CONFIG_DEBUG_FS
  867. void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
  868. {
  869. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  870. uint64_t off;
  871. off = drm_vma_node_start(&obj->vma_node);
  872. seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
  873. omap_obj->flags, obj->name, obj->refcount.refcount.counter,
  874. off, &omap_obj->paddr, omap_obj->paddr_cnt,
  875. omap_obj->vaddr, omap_obj->roll);
  876. if (omap_obj->flags & OMAP_BO_TILED) {
  877. seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
  878. if (omap_obj->block) {
  879. struct tcm_area *area = &omap_obj->block->area;
  880. seq_printf(m, " (%dx%d, %dx%d)",
  881. area->p0.x, area->p0.y,
  882. area->p1.x, area->p1.y);
  883. }
  884. } else {
  885. seq_printf(m, " %d", obj->size);
  886. }
  887. seq_printf(m, "\n");
  888. }
  889. void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
  890. {
  891. struct omap_gem_object *omap_obj;
  892. int count = 0;
  893. size_t size = 0;
  894. list_for_each_entry(omap_obj, list, mm_list) {
  895. struct drm_gem_object *obj = &omap_obj->base;
  896. seq_printf(m, " ");
  897. omap_gem_describe(obj, m);
  898. count++;
  899. size += obj->size;
  900. }
  901. seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
  902. }
  903. #endif
  904. /* -----------------------------------------------------------------------------
  905. * Buffer Synchronization
  906. */
  907. static DEFINE_SPINLOCK(sync_lock);
  908. struct omap_gem_sync_waiter {
  909. struct list_head list;
  910. struct omap_gem_object *omap_obj;
  911. enum omap_gem_op op;
  912. uint32_t read_target, write_target;
  913. /* notify called w/ sync_lock held */
  914. void (*notify)(void *arg);
  915. void *arg;
  916. };
  917. /* list of omap_gem_sync_waiter.. the notify fxn gets called back when
  918. * the read and/or write target count is achieved which can call a user
  919. * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
  920. * cpu access), etc.
  921. */
  922. static LIST_HEAD(waiters);
  923. static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
  924. {
  925. struct omap_gem_object *omap_obj = waiter->omap_obj;
  926. if ((waiter->op & OMAP_GEM_READ) &&
  927. (omap_obj->sync->write_complete < waiter->write_target))
  928. return true;
  929. if ((waiter->op & OMAP_GEM_WRITE) &&
  930. (omap_obj->sync->read_complete < waiter->read_target))
  931. return true;
  932. return false;
  933. }
  934. /* macro for sync debug.. */
  935. #define SYNCDBG 0
  936. #define SYNC(fmt, ...) do { if (SYNCDBG) \
  937. printk(KERN_ERR "%s:%d: "fmt"\n", \
  938. __func__, __LINE__, ##__VA_ARGS__); \
  939. } while (0)
  940. static void sync_op_update(void)
  941. {
  942. struct omap_gem_sync_waiter *waiter, *n;
  943. list_for_each_entry_safe(waiter, n, &waiters, list) {
  944. if (!is_waiting(waiter)) {
  945. list_del(&waiter->list);
  946. SYNC("notify: %p", waiter);
  947. waiter->notify(waiter->arg);
  948. kfree(waiter);
  949. }
  950. }
  951. }
  952. static inline int sync_op(struct drm_gem_object *obj,
  953. enum omap_gem_op op, bool start)
  954. {
  955. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  956. int ret = 0;
  957. spin_lock(&sync_lock);
  958. if (!omap_obj->sync) {
  959. omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
  960. if (!omap_obj->sync) {
  961. ret = -ENOMEM;
  962. goto unlock;
  963. }
  964. }
  965. if (start) {
  966. if (op & OMAP_GEM_READ)
  967. omap_obj->sync->read_pending++;
  968. if (op & OMAP_GEM_WRITE)
  969. omap_obj->sync->write_pending++;
  970. } else {
  971. if (op & OMAP_GEM_READ)
  972. omap_obj->sync->read_complete++;
  973. if (op & OMAP_GEM_WRITE)
  974. omap_obj->sync->write_complete++;
  975. sync_op_update();
  976. }
  977. unlock:
  978. spin_unlock(&sync_lock);
  979. return ret;
  980. }
  981. /* mark the start of read and/or write operation */
  982. int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
  983. {
  984. return sync_op(obj, op, true);
  985. }
  986. int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
  987. {
  988. return sync_op(obj, op, false);
  989. }
  990. static DECLARE_WAIT_QUEUE_HEAD(sync_event);
  991. static void sync_notify(void *arg)
  992. {
  993. struct task_struct **waiter_task = arg;
  994. *waiter_task = NULL;
  995. wake_up_all(&sync_event);
  996. }
  997. int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
  998. {
  999. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  1000. int ret = 0;
  1001. if (omap_obj->sync) {
  1002. struct task_struct *waiter_task = current;
  1003. struct omap_gem_sync_waiter *waiter =
  1004. kzalloc(sizeof(*waiter), GFP_KERNEL);
  1005. if (!waiter)
  1006. return -ENOMEM;
  1007. waiter->omap_obj = omap_obj;
  1008. waiter->op = op;
  1009. waiter->read_target = omap_obj->sync->read_pending;
  1010. waiter->write_target = omap_obj->sync->write_pending;
  1011. waiter->notify = sync_notify;
  1012. waiter->arg = &waiter_task;
  1013. spin_lock(&sync_lock);
  1014. if (is_waiting(waiter)) {
  1015. SYNC("waited: %p", waiter);
  1016. list_add_tail(&waiter->list, &waiters);
  1017. spin_unlock(&sync_lock);
  1018. ret = wait_event_interruptible(sync_event,
  1019. (waiter_task == NULL));
  1020. spin_lock(&sync_lock);
  1021. if (waiter_task) {
  1022. SYNC("interrupted: %p", waiter);
  1023. /* we were interrupted */
  1024. list_del(&waiter->list);
  1025. waiter_task = NULL;
  1026. } else {
  1027. /* freed in sync_op_update() */
  1028. waiter = NULL;
  1029. }
  1030. }
  1031. spin_unlock(&sync_lock);
  1032. kfree(waiter);
  1033. }
  1034. return ret;
  1035. }
  1036. /* call fxn(arg), either synchronously or asynchronously if the op
  1037. * is currently blocked.. fxn() can be called from any context
  1038. *
  1039. * (TODO for now fxn is called back from whichever context calls
  1040. * omap_gem_op_finish().. but this could be better defined later
  1041. * if needed)
  1042. *
  1043. * TODO more code in common w/ _sync()..
  1044. */
  1045. int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
  1046. void (*fxn)(void *arg), void *arg)
  1047. {
  1048. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  1049. if (omap_obj->sync) {
  1050. struct omap_gem_sync_waiter *waiter =
  1051. kzalloc(sizeof(*waiter), GFP_ATOMIC);
  1052. if (!waiter)
  1053. return -ENOMEM;
  1054. waiter->omap_obj = omap_obj;
  1055. waiter->op = op;
  1056. waiter->read_target = omap_obj->sync->read_pending;
  1057. waiter->write_target = omap_obj->sync->write_pending;
  1058. waiter->notify = fxn;
  1059. waiter->arg = arg;
  1060. spin_lock(&sync_lock);
  1061. if (is_waiting(waiter)) {
  1062. SYNC("waited: %p", waiter);
  1063. list_add_tail(&waiter->list, &waiters);
  1064. spin_unlock(&sync_lock);
  1065. return 0;
  1066. }
  1067. spin_unlock(&sync_lock);
  1068. kfree(waiter);
  1069. }
  1070. /* no waiting.. */
  1071. fxn(arg);
  1072. return 0;
  1073. }
  1074. /* -----------------------------------------------------------------------------
  1075. * Constructor & Destructor
  1076. */
  1077. void omap_gem_free_object(struct drm_gem_object *obj)
  1078. {
  1079. struct drm_device *dev = obj->dev;
  1080. struct omap_drm_private *priv = dev->dev_private;
  1081. struct omap_gem_object *omap_obj = to_omap_bo(obj);
  1082. evict(obj);
  1083. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  1084. spin_lock(&priv->list_lock);
  1085. list_del(&omap_obj->mm_list);
  1086. spin_unlock(&priv->list_lock);
  1087. /* this means the object is still pinned.. which really should
  1088. * not happen. I think..
  1089. */
  1090. WARN_ON(omap_obj->paddr_cnt > 0);
  1091. if (omap_obj->pages) {
  1092. if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
  1093. kfree(omap_obj->pages);
  1094. else
  1095. omap_gem_detach_pages(obj);
  1096. }
  1097. if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
  1098. dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
  1099. omap_obj->paddr);
  1100. } else if (omap_obj->vaddr) {
  1101. vunmap(omap_obj->vaddr);
  1102. } else if (obj->import_attach) {
  1103. drm_prime_gem_destroy(obj, omap_obj->sgt);
  1104. }
  1105. kfree(omap_obj->sync);
  1106. drm_gem_object_release(obj);
  1107. kfree(omap_obj);
  1108. }
  1109. /* GEM buffer object constructor */
  1110. struct drm_gem_object *omap_gem_new(struct drm_device *dev,
  1111. union omap_gem_size gsize, uint32_t flags)
  1112. {
  1113. struct omap_drm_private *priv = dev->dev_private;
  1114. struct omap_gem_object *omap_obj;
  1115. struct drm_gem_object *obj;
  1116. struct address_space *mapping;
  1117. size_t size;
  1118. int ret;
  1119. /* Validate the flags and compute the memory and cache flags. */
  1120. if (flags & OMAP_BO_TILED) {
  1121. if (!priv->usergart) {
  1122. dev_err(dev->dev, "Tiled buffers require DMM\n");
  1123. return NULL;
  1124. }
  1125. /*
  1126. * Tiled buffers are always shmem paged backed. When they are
  1127. * scanned out, they are remapped into DMM/TILER.
  1128. */
  1129. flags &= ~OMAP_BO_SCANOUT;
  1130. flags |= OMAP_BO_MEM_SHMEM;
  1131. /*
  1132. * Currently don't allow cached buffers. There is some caching
  1133. * stuff that needs to be handled better.
  1134. */
  1135. flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
  1136. flags |= tiler_get_cpu_cache_flags();
  1137. } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
  1138. /*
  1139. * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
  1140. * tiled. However, to lower the pressure on memory allocation,
  1141. * use contiguous memory only if no TILER is available.
  1142. */
  1143. flags |= OMAP_BO_MEM_DMA_API;
  1144. } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
  1145. /*
  1146. * All other buffers not backed by dma_buf are shmem-backed.
  1147. */
  1148. flags |= OMAP_BO_MEM_SHMEM;
  1149. }
  1150. /* Allocate the initialize the OMAP GEM object. */
  1151. omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
  1152. if (!omap_obj)
  1153. return NULL;
  1154. obj = &omap_obj->base;
  1155. omap_obj->flags = flags;
  1156. if (flags & OMAP_BO_TILED) {
  1157. /*
  1158. * For tiled buffers align dimensions to slot boundaries and
  1159. * calculate size based on aligned dimensions.
  1160. */
  1161. tiler_align(gem2fmt(flags), &gsize.tiled.width,
  1162. &gsize.tiled.height);
  1163. size = tiler_size(gem2fmt(flags), gsize.tiled.width,
  1164. gsize.tiled.height);
  1165. omap_obj->width = gsize.tiled.width;
  1166. omap_obj->height = gsize.tiled.height;
  1167. } else {
  1168. size = PAGE_ALIGN(gsize.bytes);
  1169. }
  1170. /* Initialize the GEM object. */
  1171. if (!(flags & OMAP_BO_MEM_SHMEM)) {
  1172. drm_gem_private_object_init(dev, obj, size);
  1173. } else {
  1174. ret = drm_gem_object_init(dev, obj, size);
  1175. if (ret)
  1176. goto err_free;
  1177. mapping = file_inode(obj->filp)->i_mapping;
  1178. mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
  1179. }
  1180. /* Allocate memory if needed. */
  1181. if (flags & OMAP_BO_MEM_DMA_API) {
  1182. omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
  1183. &omap_obj->paddr,
  1184. GFP_KERNEL);
  1185. if (!omap_obj->vaddr)
  1186. goto err_release;
  1187. }
  1188. spin_lock(&priv->list_lock);
  1189. list_add(&omap_obj->mm_list, &priv->obj_list);
  1190. spin_unlock(&priv->list_lock);
  1191. return obj;
  1192. err_release:
  1193. drm_gem_object_release(obj);
  1194. err_free:
  1195. kfree(omap_obj);
  1196. return NULL;
  1197. }
  1198. struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
  1199. struct sg_table *sgt)
  1200. {
  1201. struct omap_drm_private *priv = dev->dev_private;
  1202. struct omap_gem_object *omap_obj;
  1203. struct drm_gem_object *obj;
  1204. union omap_gem_size gsize;
  1205. /* Without a DMM only physically contiguous buffers can be supported. */
  1206. if (sgt->orig_nents != 1 && !priv->has_dmm)
  1207. return ERR_PTR(-EINVAL);
  1208. mutex_lock(&dev->struct_mutex);
  1209. gsize.bytes = PAGE_ALIGN(size);
  1210. obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
  1211. if (!obj) {
  1212. obj = ERR_PTR(-ENOMEM);
  1213. goto done;
  1214. }
  1215. omap_obj = to_omap_bo(obj);
  1216. omap_obj->sgt = sgt;
  1217. if (sgt->orig_nents == 1) {
  1218. omap_obj->paddr = sg_dma_address(sgt->sgl);
  1219. } else {
  1220. /* Create pages list from sgt */
  1221. struct sg_page_iter iter;
  1222. struct page **pages;
  1223. unsigned int npages;
  1224. unsigned int i = 0;
  1225. npages = DIV_ROUND_UP(size, PAGE_SIZE);
  1226. pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
  1227. if (!pages) {
  1228. omap_gem_free_object(obj);
  1229. obj = ERR_PTR(-ENOMEM);
  1230. goto done;
  1231. }
  1232. omap_obj->pages = pages;
  1233. for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
  1234. pages[i++] = sg_page_iter_page(&iter);
  1235. if (i > npages)
  1236. break;
  1237. }
  1238. if (WARN_ON(i != npages)) {
  1239. omap_gem_free_object(obj);
  1240. obj = ERR_PTR(-ENOMEM);
  1241. goto done;
  1242. }
  1243. }
  1244. done:
  1245. mutex_unlock(&dev->struct_mutex);
  1246. return obj;
  1247. }
  1248. /* convenience method to construct a GEM buffer object, and userspace handle */
  1249. int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
  1250. union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
  1251. {
  1252. struct drm_gem_object *obj;
  1253. int ret;
  1254. obj = omap_gem_new(dev, gsize, flags);
  1255. if (!obj)
  1256. return -ENOMEM;
  1257. ret = drm_gem_handle_create(file, obj, handle);
  1258. if (ret) {
  1259. omap_gem_free_object(obj);
  1260. return ret;
  1261. }
  1262. /* drop reference from allocate - handle holds it now */
  1263. drm_gem_object_unreference_unlocked(obj);
  1264. return 0;
  1265. }
  1266. /* -----------------------------------------------------------------------------
  1267. * Init & Cleanup
  1268. */
  1269. /* If DMM is used, we need to set some stuff up.. */
  1270. void omap_gem_init(struct drm_device *dev)
  1271. {
  1272. struct omap_drm_private *priv = dev->dev_private;
  1273. struct omap_drm_usergart *usergart;
  1274. const enum tiler_fmt fmts[] = {
  1275. TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
  1276. };
  1277. int i, j;
  1278. if (!dmm_is_available()) {
  1279. /* DMM only supported on OMAP4 and later, so this isn't fatal */
  1280. dev_warn(dev->dev, "DMM not available, disable DMM support\n");
  1281. return;
  1282. }
  1283. usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
  1284. if (!usergart)
  1285. return;
  1286. /* reserve 4k aligned/wide regions for userspace mappings: */
  1287. for (i = 0; i < ARRAY_SIZE(fmts); i++) {
  1288. uint16_t h = 1, w = PAGE_SIZE >> i;
  1289. tiler_align(fmts[i], &w, &h);
  1290. /* note: since each region is 1 4kb page wide, and minimum
  1291. * number of rows, the height ends up being the same as the
  1292. * # of pages in the region
  1293. */
  1294. usergart[i].height = h;
  1295. usergart[i].height_shift = ilog2(h);
  1296. usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
  1297. usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
  1298. for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
  1299. struct omap_drm_usergart_entry *entry;
  1300. struct tiler_block *block;
  1301. entry = &usergart[i].entry[j];
  1302. block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
  1303. if (IS_ERR(block)) {
  1304. dev_err(dev->dev,
  1305. "reserve failed: %d, %d, %ld\n",
  1306. i, j, PTR_ERR(block));
  1307. return;
  1308. }
  1309. entry->paddr = tiler_ssptr(block);
  1310. entry->block = block;
  1311. DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
  1312. &entry->paddr,
  1313. usergart[i].stride_pfn << PAGE_SHIFT);
  1314. }
  1315. }
  1316. priv->usergart = usergart;
  1317. priv->has_dmm = true;
  1318. }
  1319. void omap_gem_deinit(struct drm_device *dev)
  1320. {
  1321. struct omap_drm_private *priv = dev->dev_private;
  1322. /* I believe we can rely on there being no more outstanding GEM
  1323. * objects which could depend on usergart/dmm at this point.
  1324. */
  1325. kfree(priv->usergart);
  1326. }