drm_vm.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733
  1. /**
  2. * \file drm_vm.c
  3. * Memory mapping for DRM
  4. *
  5. * \author Rickard E. (Rik) Faith <faith@valinux.com>
  6. * \author Gareth Hughes <gareth@valinux.com>
  7. */
  8. /*
  9. * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
  10. *
  11. * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  12. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  13. * All Rights Reserved.
  14. *
  15. * Permission is hereby granted, free of charge, to any person obtaining a
  16. * copy of this software and associated documentation files (the "Software"),
  17. * to deal in the Software without restriction, including without limitation
  18. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  19. * and/or sell copies of the Software, and to permit persons to whom the
  20. * Software is furnished to do so, subject to the following conditions:
  21. *
  22. * The above copyright notice and this permission notice (including the next
  23. * paragraph) shall be included in all copies or substantial portions of the
  24. * Software.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  27. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  28. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  29. * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  30. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  31. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  32. * OTHER DEALINGS IN THE SOFTWARE.
  33. */
  34. #include <drm/drmP.h>
  35. #include <linux/export.h>
  36. #include <linux/seq_file.h>
  37. #if defined(__ia64__)
  38. #include <linux/efi.h>
  39. #include <linux/slab.h>
  40. #endif
  41. #include <asm/pgtable.h>
  42. #include "drm_legacy.h"
  43. struct drm_vma_entry {
  44. struct list_head head;
  45. struct vm_area_struct *vma;
  46. pid_t pid;
  47. };
  48. static void drm_vm_open(struct vm_area_struct *vma);
  49. static void drm_vm_close(struct vm_area_struct *vma);
  50. static pgprot_t drm_io_prot(struct drm_local_map *map,
  51. struct vm_area_struct *vma)
  52. {
  53. pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
  54. #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
  55. if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
  56. tmp = pgprot_noncached(tmp);
  57. else
  58. tmp = pgprot_writecombine(tmp);
  59. #elif defined(__ia64__)
  60. if (efi_range_is_wc(vma->vm_start, vma->vm_end -
  61. vma->vm_start))
  62. tmp = pgprot_writecombine(tmp);
  63. else
  64. tmp = pgprot_noncached(tmp);
  65. #elif defined(__sparc__) || defined(__arm__) || defined(__mips__)
  66. tmp = pgprot_noncached(tmp);
  67. #endif
  68. return tmp;
  69. }
  70. static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
  71. {
  72. pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
  73. #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
  74. tmp |= _PAGE_NO_CACHE;
  75. #endif
  76. return tmp;
  77. }
  78. /**
  79. * \c fault method for AGP virtual memory.
  80. *
  81. * \param vma virtual memory area.
  82. * \param address access address.
  83. * \return pointer to the page structure.
  84. *
  85. * Find the right map and if it's AGP memory find the real physical page to
  86. * map, get the page, increment the use count and return it.
  87. */
  88. #if __OS_HAS_AGP
  89. static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  90. {
  91. struct drm_file *priv = vma->vm_file->private_data;
  92. struct drm_device *dev = priv->minor->dev;
  93. struct drm_local_map *map = NULL;
  94. struct drm_map_list *r_list;
  95. struct drm_hash_item *hash;
  96. /*
  97. * Find the right map
  98. */
  99. if (!dev->agp)
  100. goto vm_fault_error;
  101. if (!dev->agp || !dev->agp->cant_use_aperture)
  102. goto vm_fault_error;
  103. if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
  104. goto vm_fault_error;
  105. r_list = drm_hash_entry(hash, struct drm_map_list, hash);
  106. map = r_list->map;
  107. if (map && map->type == _DRM_AGP) {
  108. /*
  109. * Using vm_pgoff as a selector forces us to use this unusual
  110. * addressing scheme.
  111. */
  112. resource_size_t offset = (unsigned long)vmf->virtual_address -
  113. vma->vm_start;
  114. resource_size_t baddr = map->offset + offset;
  115. struct drm_agp_mem *agpmem;
  116. struct page *page;
  117. #ifdef __alpha__
  118. /*
  119. * Adjust to a bus-relative address
  120. */
  121. baddr -= dev->hose->mem_space->start;
  122. #endif
  123. /*
  124. * It's AGP memory - find the real physical page to map
  125. */
  126. list_for_each_entry(agpmem, &dev->agp->memory, head) {
  127. if (agpmem->bound <= baddr &&
  128. agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
  129. break;
  130. }
  131. if (&agpmem->head == &dev->agp->memory)
  132. goto vm_fault_error;
  133. /*
  134. * Get the page, inc the use count, and return it
  135. */
  136. offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
  137. page = agpmem->memory->pages[offset];
  138. get_page(page);
  139. vmf->page = page;
  140. DRM_DEBUG
  141. ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
  142. (unsigned long long)baddr,
  143. agpmem->memory->pages[offset],
  144. (unsigned long long)offset,
  145. page_count(page));
  146. return 0;
  147. }
  148. vm_fault_error:
  149. return VM_FAULT_SIGBUS; /* Disallow mremap */
  150. }
  151. #else /* __OS_HAS_AGP */
  152. static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  153. {
  154. return VM_FAULT_SIGBUS;
  155. }
  156. #endif /* __OS_HAS_AGP */
  157. /**
  158. * \c nopage method for shared virtual memory.
  159. *
  160. * \param vma virtual memory area.
  161. * \param address access address.
  162. * \return pointer to the page structure.
  163. *
  164. * Get the mapping, find the real physical page to map, get the page, and
  165. * return it.
  166. */
  167. static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  168. {
  169. struct drm_local_map *map = vma->vm_private_data;
  170. unsigned long offset;
  171. unsigned long i;
  172. struct page *page;
  173. if (!map)
  174. return VM_FAULT_SIGBUS; /* Nothing allocated */
  175. offset = (unsigned long)vmf->virtual_address - vma->vm_start;
  176. i = (unsigned long)map->handle + offset;
  177. page = vmalloc_to_page((void *)i);
  178. if (!page)
  179. return VM_FAULT_SIGBUS;
  180. get_page(page);
  181. vmf->page = page;
  182. DRM_DEBUG("shm_fault 0x%lx\n", offset);
  183. return 0;
  184. }
  185. /**
  186. * \c close method for shared virtual memory.
  187. *
  188. * \param vma virtual memory area.
  189. *
  190. * Deletes map information if we are the last
  191. * person to close a mapping and it's not in the global maplist.
  192. */
  193. static void drm_vm_shm_close(struct vm_area_struct *vma)
  194. {
  195. struct drm_file *priv = vma->vm_file->private_data;
  196. struct drm_device *dev = priv->minor->dev;
  197. struct drm_vma_entry *pt, *temp;
  198. struct drm_local_map *map;
  199. struct drm_map_list *r_list;
  200. int found_maps = 0;
  201. DRM_DEBUG("0x%08lx,0x%08lx\n",
  202. vma->vm_start, vma->vm_end - vma->vm_start);
  203. map = vma->vm_private_data;
  204. mutex_lock(&dev->struct_mutex);
  205. list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
  206. if (pt->vma->vm_private_data == map)
  207. found_maps++;
  208. if (pt->vma == vma) {
  209. list_del(&pt->head);
  210. kfree(pt);
  211. }
  212. }
  213. /* We were the only map that was found */
  214. if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
  215. /* Check to see if we are in the maplist, if we are not, then
  216. * we delete this mappings information.
  217. */
  218. found_maps = 0;
  219. list_for_each_entry(r_list, &dev->maplist, head) {
  220. if (r_list->map == map)
  221. found_maps++;
  222. }
  223. if (!found_maps) {
  224. drm_dma_handle_t dmah;
  225. switch (map->type) {
  226. case _DRM_REGISTERS:
  227. case _DRM_FRAME_BUFFER:
  228. arch_phys_wc_del(map->mtrr);
  229. iounmap(map->handle);
  230. break;
  231. case _DRM_SHM:
  232. vfree(map->handle);
  233. break;
  234. case _DRM_AGP:
  235. case _DRM_SCATTER_GATHER:
  236. break;
  237. case _DRM_CONSISTENT:
  238. dmah.vaddr = map->handle;
  239. dmah.busaddr = map->offset;
  240. dmah.size = map->size;
  241. __drm_legacy_pci_free(dev, &dmah);
  242. break;
  243. }
  244. kfree(map);
  245. }
  246. }
  247. mutex_unlock(&dev->struct_mutex);
  248. }
  249. /**
  250. * \c fault method for DMA virtual memory.
  251. *
  252. * \param vma virtual memory area.
  253. * \param address access address.
  254. * \return pointer to the page structure.
  255. *
  256. * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
  257. */
  258. static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  259. {
  260. struct drm_file *priv = vma->vm_file->private_data;
  261. struct drm_device *dev = priv->minor->dev;
  262. struct drm_device_dma *dma = dev->dma;
  263. unsigned long offset;
  264. unsigned long page_nr;
  265. struct page *page;
  266. if (!dma)
  267. return VM_FAULT_SIGBUS; /* Error */
  268. if (!dma->pagelist)
  269. return VM_FAULT_SIGBUS; /* Nothing allocated */
  270. offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
  271. page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
  272. page = virt_to_page((void *)dma->pagelist[page_nr]);
  273. get_page(page);
  274. vmf->page = page;
  275. DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
  276. return 0;
  277. }
  278. /**
  279. * \c fault method for scatter-gather virtual memory.
  280. *
  281. * \param vma virtual memory area.
  282. * \param address access address.
  283. * \return pointer to the page structure.
  284. *
  285. * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
  286. */
  287. static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  288. {
  289. struct drm_local_map *map = vma->vm_private_data;
  290. struct drm_file *priv = vma->vm_file->private_data;
  291. struct drm_device *dev = priv->minor->dev;
  292. struct drm_sg_mem *entry = dev->sg;
  293. unsigned long offset;
  294. unsigned long map_offset;
  295. unsigned long page_offset;
  296. struct page *page;
  297. if (!entry)
  298. return VM_FAULT_SIGBUS; /* Error */
  299. if (!entry->pagelist)
  300. return VM_FAULT_SIGBUS; /* Nothing allocated */
  301. offset = (unsigned long)vmf->virtual_address - vma->vm_start;
  302. map_offset = map->offset - (unsigned long)dev->sg->virtual;
  303. page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
  304. page = entry->pagelist[page_offset];
  305. get_page(page);
  306. vmf->page = page;
  307. return 0;
  308. }
  309. static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  310. {
  311. return drm_do_vm_fault(vma, vmf);
  312. }
  313. static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  314. {
  315. return drm_do_vm_shm_fault(vma, vmf);
  316. }
  317. static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  318. {
  319. return drm_do_vm_dma_fault(vma, vmf);
  320. }
  321. static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  322. {
  323. return drm_do_vm_sg_fault(vma, vmf);
  324. }
  325. /** AGP virtual memory operations */
  326. static const struct vm_operations_struct drm_vm_ops = {
  327. .fault = drm_vm_fault,
  328. .open = drm_vm_open,
  329. .close = drm_vm_close,
  330. };
  331. /** Shared virtual memory operations */
  332. static const struct vm_operations_struct drm_vm_shm_ops = {
  333. .fault = drm_vm_shm_fault,
  334. .open = drm_vm_open,
  335. .close = drm_vm_shm_close,
  336. };
  337. /** DMA virtual memory operations */
  338. static const struct vm_operations_struct drm_vm_dma_ops = {
  339. .fault = drm_vm_dma_fault,
  340. .open = drm_vm_open,
  341. .close = drm_vm_close,
  342. };
  343. /** Scatter-gather virtual memory operations */
  344. static const struct vm_operations_struct drm_vm_sg_ops = {
  345. .fault = drm_vm_sg_fault,
  346. .open = drm_vm_open,
  347. .close = drm_vm_close,
  348. };
  349. /**
  350. * \c open method for shared virtual memory.
  351. *
  352. * \param vma virtual memory area.
  353. *
  354. * Create a new drm_vma_entry structure as the \p vma private data entry and
  355. * add it to drm_device::vmalist.
  356. */
  357. void drm_vm_open_locked(struct drm_device *dev,
  358. struct vm_area_struct *vma)
  359. {
  360. struct drm_vma_entry *vma_entry;
  361. DRM_DEBUG("0x%08lx,0x%08lx\n",
  362. vma->vm_start, vma->vm_end - vma->vm_start);
  363. vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
  364. if (vma_entry) {
  365. vma_entry->vma = vma;
  366. vma_entry->pid = current->pid;
  367. list_add(&vma_entry->head, &dev->vmalist);
  368. }
  369. }
  370. static void drm_vm_open(struct vm_area_struct *vma)
  371. {
  372. struct drm_file *priv = vma->vm_file->private_data;
  373. struct drm_device *dev = priv->minor->dev;
  374. mutex_lock(&dev->struct_mutex);
  375. drm_vm_open_locked(dev, vma);
  376. mutex_unlock(&dev->struct_mutex);
  377. }
  378. void drm_vm_close_locked(struct drm_device *dev,
  379. struct vm_area_struct *vma)
  380. {
  381. struct drm_vma_entry *pt, *temp;
  382. DRM_DEBUG("0x%08lx,0x%08lx\n",
  383. vma->vm_start, vma->vm_end - vma->vm_start);
  384. list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
  385. if (pt->vma == vma) {
  386. list_del(&pt->head);
  387. kfree(pt);
  388. break;
  389. }
  390. }
  391. }
  392. /**
  393. * \c close method for all virtual memory types.
  394. *
  395. * \param vma virtual memory area.
  396. *
  397. * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
  398. * free it.
  399. */
  400. static void drm_vm_close(struct vm_area_struct *vma)
  401. {
  402. struct drm_file *priv = vma->vm_file->private_data;
  403. struct drm_device *dev = priv->minor->dev;
  404. mutex_lock(&dev->struct_mutex);
  405. drm_vm_close_locked(dev, vma);
  406. mutex_unlock(&dev->struct_mutex);
  407. }
  408. /**
  409. * mmap DMA memory.
  410. *
  411. * \param file_priv DRM file private.
  412. * \param vma virtual memory area.
  413. * \return zero on success or a negative number on failure.
  414. *
  415. * Sets the virtual memory area operations structure to vm_dma_ops, the file
  416. * pointer, and calls vm_open().
  417. */
  418. static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
  419. {
  420. struct drm_file *priv = filp->private_data;
  421. struct drm_device *dev;
  422. struct drm_device_dma *dma;
  423. unsigned long length = vma->vm_end - vma->vm_start;
  424. dev = priv->minor->dev;
  425. dma = dev->dma;
  426. DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
  427. vma->vm_start, vma->vm_end, vma->vm_pgoff);
  428. /* Length must match exact page count */
  429. if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
  430. return -EINVAL;
  431. }
  432. if (!capable(CAP_SYS_ADMIN) &&
  433. (dma->flags & _DRM_DMA_USE_PCI_RO)) {
  434. vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
  435. #if defined(__i386__) || defined(__x86_64__)
  436. pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
  437. #else
  438. /* Ye gads this is ugly. With more thought
  439. we could move this up higher and use
  440. `protection_map' instead. */
  441. vma->vm_page_prot =
  442. __pgprot(pte_val
  443. (pte_wrprotect
  444. (__pte(pgprot_val(vma->vm_page_prot)))));
  445. #endif
  446. }
  447. vma->vm_ops = &drm_vm_dma_ops;
  448. vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
  449. drm_vm_open_locked(dev, vma);
  450. return 0;
  451. }
  452. static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
  453. {
  454. #ifdef __alpha__
  455. return dev->hose->dense_mem_base;
  456. #else
  457. return 0;
  458. #endif
  459. }
  460. /**
  461. * mmap DMA memory.
  462. *
  463. * \param file_priv DRM file private.
  464. * \param vma virtual memory area.
  465. * \return zero on success or a negative number on failure.
  466. *
  467. * If the virtual memory area has no offset associated with it then it's a DMA
  468. * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
  469. * checks that the restricted flag is not set, sets the virtual memory operations
  470. * according to the mapping type and remaps the pages. Finally sets the file
  471. * pointer and calls vm_open().
  472. */
  473. static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
  474. {
  475. struct drm_file *priv = filp->private_data;
  476. struct drm_device *dev = priv->minor->dev;
  477. struct drm_local_map *map = NULL;
  478. resource_size_t offset = 0;
  479. struct drm_hash_item *hash;
  480. DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
  481. vma->vm_start, vma->vm_end, vma->vm_pgoff);
  482. if (!priv->authenticated)
  483. return -EACCES;
  484. /* We check for "dma". On Apple's UniNorth, it's valid to have
  485. * the AGP mapped at physical address 0
  486. * --BenH.
  487. */
  488. if (!vma->vm_pgoff
  489. #if __OS_HAS_AGP
  490. && (!dev->agp
  491. || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
  492. #endif
  493. )
  494. return drm_mmap_dma(filp, vma);
  495. if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
  496. DRM_ERROR("Could not find map\n");
  497. return -EINVAL;
  498. }
  499. map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
  500. if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
  501. return -EPERM;
  502. /* Check for valid size. */
  503. if (map->size < vma->vm_end - vma->vm_start)
  504. return -EINVAL;
  505. if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
  506. vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
  507. #if defined(__i386__) || defined(__x86_64__)
  508. pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
  509. #else
  510. /* Ye gads this is ugly. With more thought
  511. we could move this up higher and use
  512. `protection_map' instead. */
  513. vma->vm_page_prot =
  514. __pgprot(pte_val
  515. (pte_wrprotect
  516. (__pte(pgprot_val(vma->vm_page_prot)))));
  517. #endif
  518. }
  519. switch (map->type) {
  520. #if !defined(__arm__)
  521. case _DRM_AGP:
  522. if (dev->agp && dev->agp->cant_use_aperture) {
  523. /*
  524. * On some platforms we can't talk to bus dma address from the CPU, so for
  525. * memory of type DRM_AGP, we'll deal with sorting out the real physical
  526. * pages and mappings in fault()
  527. */
  528. #if defined(__powerpc__)
  529. pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
  530. #endif
  531. vma->vm_ops = &drm_vm_ops;
  532. break;
  533. }
  534. /* fall through to _DRM_FRAME_BUFFER... */
  535. #endif
  536. case _DRM_FRAME_BUFFER:
  537. case _DRM_REGISTERS:
  538. offset = drm_core_get_reg_ofs(dev);
  539. vma->vm_page_prot = drm_io_prot(map, vma);
  540. if (io_remap_pfn_range(vma, vma->vm_start,
  541. (map->offset + offset) >> PAGE_SHIFT,
  542. vma->vm_end - vma->vm_start,
  543. vma->vm_page_prot))
  544. return -EAGAIN;
  545. DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
  546. " offset = 0x%llx\n",
  547. map->type,
  548. vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
  549. vma->vm_ops = &drm_vm_ops;
  550. break;
  551. case _DRM_CONSISTENT:
  552. /* Consistent memory is really like shared memory. But
  553. * it's allocated in a different way, so avoid fault */
  554. if (remap_pfn_range(vma, vma->vm_start,
  555. page_to_pfn(virt_to_page(map->handle)),
  556. vma->vm_end - vma->vm_start, vma->vm_page_prot))
  557. return -EAGAIN;
  558. vma->vm_page_prot = drm_dma_prot(map->type, vma);
  559. /* fall through to _DRM_SHM */
  560. case _DRM_SHM:
  561. vma->vm_ops = &drm_vm_shm_ops;
  562. vma->vm_private_data = (void *)map;
  563. break;
  564. case _DRM_SCATTER_GATHER:
  565. vma->vm_ops = &drm_vm_sg_ops;
  566. vma->vm_private_data = (void *)map;
  567. vma->vm_page_prot = drm_dma_prot(map->type, vma);
  568. break;
  569. default:
  570. return -EINVAL; /* This should never happen. */
  571. }
  572. vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
  573. drm_vm_open_locked(dev, vma);
  574. return 0;
  575. }
  576. int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
  577. {
  578. struct drm_file *priv = filp->private_data;
  579. struct drm_device *dev = priv->minor->dev;
  580. int ret;
  581. if (drm_device_is_unplugged(dev))
  582. return -ENODEV;
  583. mutex_lock(&dev->struct_mutex);
  584. ret = drm_mmap_locked(filp, vma);
  585. mutex_unlock(&dev->struct_mutex);
  586. return ret;
  587. }
  588. EXPORT_SYMBOL(drm_legacy_mmap);
  589. void drm_legacy_vma_flush(struct drm_device *dev)
  590. {
  591. struct drm_vma_entry *vma, *vma_temp;
  592. /* Clear vma list (only needed for legacy drivers) */
  593. list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
  594. list_del(&vma->head);
  595. kfree(vma);
  596. }
  597. }
  598. int drm_vma_info(struct seq_file *m, void *data)
  599. {
  600. struct drm_info_node *node = (struct drm_info_node *) m->private;
  601. struct drm_device *dev = node->minor->dev;
  602. struct drm_vma_entry *pt;
  603. struct vm_area_struct *vma;
  604. unsigned long vma_count = 0;
  605. #if defined(__i386__)
  606. unsigned int pgprot;
  607. #endif
  608. mutex_lock(&dev->struct_mutex);
  609. list_for_each_entry(pt, &dev->vmalist, head)
  610. vma_count++;
  611. seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n",
  612. vma_count, high_memory,
  613. (void *)(unsigned long)virt_to_phys(high_memory));
  614. list_for_each_entry(pt, &dev->vmalist, head) {
  615. vma = pt->vma;
  616. if (!vma)
  617. continue;
  618. seq_printf(m,
  619. "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
  620. pt->pid,
  621. (void *)vma->vm_start, (void *)vma->vm_end,
  622. vma->vm_flags & VM_READ ? 'r' : '-',
  623. vma->vm_flags & VM_WRITE ? 'w' : '-',
  624. vma->vm_flags & VM_EXEC ? 'x' : '-',
  625. vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
  626. vma->vm_flags & VM_LOCKED ? 'l' : '-',
  627. vma->vm_flags & VM_IO ? 'i' : '-',
  628. vma->vm_pgoff);
  629. #if defined(__i386__)
  630. pgprot = pgprot_val(vma->vm_page_prot);
  631. seq_printf(m, " %c%c%c%c%c%c%c%c%c",
  632. pgprot & _PAGE_PRESENT ? 'p' : '-',
  633. pgprot & _PAGE_RW ? 'w' : 'r',
  634. pgprot & _PAGE_USER ? 'u' : 's',
  635. pgprot & _PAGE_PWT ? 't' : 'b',
  636. pgprot & _PAGE_PCD ? 'u' : 'c',
  637. pgprot & _PAGE_ACCESSED ? 'a' : '-',
  638. pgprot & _PAGE_DIRTY ? 'd' : '-',
  639. pgprot & _PAGE_PSE ? 'm' : 'k',
  640. pgprot & _PAGE_GLOBAL ? 'g' : 'l');
  641. #endif
  642. seq_printf(m, "\n");
  643. }
  644. mutex_unlock(&dev->struct_mutex);
  645. return 0;
  646. }