i915_gem_shrinker.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500
  1. /*
  2. * Copyright © 2008-2015 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. */
  24. #include <linux/oom.h>
  25. #include <linux/shmem_fs.h>
  26. #include <linux/slab.h>
  27. #include <linux/swap.h>
  28. #include <linux/pci.h>
  29. #include <linux/dma-buf.h>
  30. #include <linux/vmalloc.h>
  31. #include <drm/drmP.h>
  32. #include <drm/i915_drm.h>
  33. #include "i915_drv.h"
  34. #include "i915_trace.h"
  35. static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
  36. {
  37. switch (mutex_trylock_recursive(&dev->struct_mutex)) {
  38. case MUTEX_TRYLOCK_FAILED:
  39. return false;
  40. case MUTEX_TRYLOCK_SUCCESS:
  41. *unlock = true;
  42. return true;
  43. case MUTEX_TRYLOCK_RECURSIVE:
  44. *unlock = false;
  45. return true;
  46. }
  47. BUG();
  48. }
  49. static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
  50. {
  51. if (!unlock)
  52. return;
  53. mutex_unlock(&dev->struct_mutex);
  54. }
  55. static bool any_vma_pinned(struct drm_i915_gem_object *obj)
  56. {
  57. struct i915_vma *vma;
  58. list_for_each_entry(vma, &obj->vma_list, obj_link)
  59. if (i915_vma_is_pinned(vma))
  60. return true;
  61. return false;
  62. }
  63. static bool swap_available(void)
  64. {
  65. return get_nr_swap_pages() > 0;
  66. }
  67. static bool can_release_pages(struct drm_i915_gem_object *obj)
  68. {
  69. if (!obj->mm.pages)
  70. return false;
  71. /* Consider only shrinkable ojects. */
  72. if (!i915_gem_object_is_shrinkable(obj))
  73. return false;
  74. /* Only report true if by unbinding the object and putting its pages
  75. * we can actually make forward progress towards freeing physical
  76. * pages.
  77. *
  78. * If the pages are pinned for any other reason than being bound
  79. * to the GPU, simply unbinding from the GPU is not going to succeed
  80. * in releasing our pin count on the pages themselves.
  81. */
  82. if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
  83. return false;
  84. if (any_vma_pinned(obj))
  85. return false;
  86. /* We can only return physical pages to the system if we can either
  87. * discard the contents (because the user has marked them as being
  88. * purgeable) or if we can move their contents out to swap.
  89. */
  90. return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
  91. }
  92. static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
  93. {
  94. if (i915_gem_object_unbind(obj) == 0)
  95. __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
  96. return !READ_ONCE(obj->mm.pages);
  97. }
  98. /**
  99. * i915_gem_shrink - Shrink buffer object caches
  100. * @dev_priv: i915 device
  101. * @target: amount of memory to make available, in pages
  102. * @flags: control flags for selecting cache types
  103. *
  104. * This function is the main interface to the shrinker. It will try to release
  105. * up to @target pages of main memory backing storage from buffer objects.
  106. * Selection of the specific caches can be done with @flags. This is e.g. useful
  107. * when purgeable objects should be removed from caches preferentially.
  108. *
  109. * Note that it's not guaranteed that released amount is actually available as
  110. * free system memory - the pages might still be in-used to due to other reasons
  111. * (like cpu mmaps) or the mm core has reused them before we could grab them.
  112. * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
  113. * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
  114. *
  115. * Also note that any kind of pinning (both per-vma address space pins and
  116. * backing storage pins at the buffer object level) result in the shrinker code
  117. * having to skip the object.
  118. *
  119. * Returns:
  120. * The number of pages of backing storage actually released.
  121. */
  122. unsigned long
  123. i915_gem_shrink(struct drm_i915_private *dev_priv,
  124. unsigned long target, unsigned flags)
  125. {
  126. const struct {
  127. struct list_head *list;
  128. unsigned int bit;
  129. } phases[] = {
  130. { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
  131. { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
  132. { NULL, 0 },
  133. }, *phase;
  134. unsigned long count = 0;
  135. bool unlock;
  136. if (!i915_gem_shrinker_lock(&dev_priv->drm, &unlock))
  137. return 0;
  138. trace_i915_gem_shrink(dev_priv, target, flags);
  139. i915_gem_retire_requests(dev_priv);
  140. /*
  141. * Unbinding of objects will require HW access; Let us not wake the
  142. * device just to recover a little memory. If absolutely necessary,
  143. * we will force the wake during oom-notifier.
  144. */
  145. if ((flags & I915_SHRINK_BOUND) &&
  146. !intel_runtime_pm_get_if_in_use(dev_priv))
  147. flags &= ~I915_SHRINK_BOUND;
  148. /*
  149. * As we may completely rewrite the (un)bound list whilst unbinding
  150. * (due to retiring requests) we have to strictly process only
  151. * one element of the list at the time, and recheck the list
  152. * on every iteration.
  153. *
  154. * In particular, we must hold a reference whilst removing the
  155. * object as we may end up waiting for and/or retiring the objects.
  156. * This might release the final reference (held by the active list)
  157. * and result in the object being freed from under us. This is
  158. * similar to the precautions the eviction code must take whilst
  159. * removing objects.
  160. *
  161. * Also note that although these lists do not hold a reference to
  162. * the object we can safely grab one here: The final object
  163. * unreferencing and the bound_list are both protected by the
  164. * dev->struct_mutex and so we won't ever be able to observe an
  165. * object on the bound_list with a reference count equals 0.
  166. */
  167. for (phase = phases; phase->list; phase++) {
  168. struct list_head still_in_list;
  169. struct drm_i915_gem_object *obj;
  170. if ((flags & phase->bit) == 0)
  171. continue;
  172. INIT_LIST_HEAD(&still_in_list);
  173. while (count < target &&
  174. (obj = list_first_entry_or_null(phase->list,
  175. typeof(*obj),
  176. global_link))) {
  177. list_move_tail(&obj->global_link, &still_in_list);
  178. if (!obj->mm.pages) {
  179. list_del_init(&obj->global_link);
  180. continue;
  181. }
  182. if (flags & I915_SHRINK_PURGEABLE &&
  183. obj->mm.madv != I915_MADV_DONTNEED)
  184. continue;
  185. if (flags & I915_SHRINK_VMAPS &&
  186. !is_vmalloc_addr(obj->mm.mapping))
  187. continue;
  188. if (!(flags & I915_SHRINK_ACTIVE) &&
  189. (i915_gem_object_is_active(obj) ||
  190. i915_gem_object_is_framebuffer(obj)))
  191. continue;
  192. if (!can_release_pages(obj))
  193. continue;
  194. if (unsafe_drop_pages(obj)) {
  195. /* May arrive from get_pages on another bo */
  196. mutex_lock_nested(&obj->mm.lock,
  197. I915_MM_SHRINKER);
  198. if (!obj->mm.pages) {
  199. __i915_gem_object_invalidate(obj);
  200. list_del_init(&obj->global_link);
  201. count += obj->base.size >> PAGE_SHIFT;
  202. }
  203. mutex_unlock(&obj->mm.lock);
  204. }
  205. }
  206. list_splice_tail(&still_in_list, phase->list);
  207. }
  208. if (flags & I915_SHRINK_BOUND)
  209. intel_runtime_pm_put(dev_priv);
  210. i915_gem_retire_requests(dev_priv);
  211. i915_gem_shrinker_unlock(&dev_priv->drm, unlock);
  212. return count;
  213. }
  214. /**
  215. * i915_gem_shrink_all - Shrink buffer object caches completely
  216. * @dev_priv: i915 device
  217. *
  218. * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
  219. * caches completely. It also first waits for and retires all outstanding
  220. * requests to also be able to release backing storage for active objects.
  221. *
  222. * This should only be used in code to intentionally quiescent the gpu or as a
  223. * last-ditch effort when memory seems to have run out.
  224. *
  225. * Returns:
  226. * The number of pages of backing storage actually released.
  227. */
  228. unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
  229. {
  230. unsigned long freed;
  231. intel_runtime_pm_get(dev_priv);
  232. freed = i915_gem_shrink(dev_priv, -1UL,
  233. I915_SHRINK_BOUND |
  234. I915_SHRINK_UNBOUND |
  235. I915_SHRINK_ACTIVE);
  236. intel_runtime_pm_put(dev_priv);
  237. return freed;
  238. }
  239. static unsigned long
  240. i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
  241. {
  242. struct drm_i915_private *dev_priv =
  243. container_of(shrinker, struct drm_i915_private, mm.shrinker);
  244. struct drm_device *dev = &dev_priv->drm;
  245. struct drm_i915_gem_object *obj;
  246. unsigned long count;
  247. bool unlock;
  248. if (!i915_gem_shrinker_lock(dev, &unlock))
  249. return 0;
  250. i915_gem_retire_requests(dev_priv);
  251. count = 0;
  252. list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link)
  253. if (can_release_pages(obj))
  254. count += obj->base.size >> PAGE_SHIFT;
  255. list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
  256. if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
  257. count += obj->base.size >> PAGE_SHIFT;
  258. }
  259. i915_gem_shrinker_unlock(dev, unlock);
  260. return count;
  261. }
  262. static unsigned long
  263. i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
  264. {
  265. struct drm_i915_private *dev_priv =
  266. container_of(shrinker, struct drm_i915_private, mm.shrinker);
  267. struct drm_device *dev = &dev_priv->drm;
  268. unsigned long freed;
  269. bool unlock;
  270. if (!i915_gem_shrinker_lock(dev, &unlock))
  271. return SHRINK_STOP;
  272. freed = i915_gem_shrink(dev_priv,
  273. sc->nr_to_scan,
  274. I915_SHRINK_BOUND |
  275. I915_SHRINK_UNBOUND |
  276. I915_SHRINK_PURGEABLE);
  277. if (freed < sc->nr_to_scan)
  278. freed += i915_gem_shrink(dev_priv,
  279. sc->nr_to_scan - freed,
  280. I915_SHRINK_BOUND |
  281. I915_SHRINK_UNBOUND);
  282. i915_gem_shrinker_unlock(dev, unlock);
  283. return freed;
  284. }
  285. struct shrinker_lock_uninterruptible {
  286. bool was_interruptible;
  287. bool unlock;
  288. };
  289. static bool
  290. i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
  291. struct shrinker_lock_uninterruptible *slu,
  292. int timeout_ms)
  293. {
  294. unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
  295. do {
  296. if (i915_gem_wait_for_idle(dev_priv, 0) == 0 &&
  297. i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock))
  298. break;
  299. schedule_timeout_killable(1);
  300. if (fatal_signal_pending(current))
  301. return false;
  302. if (time_after(jiffies, timeout)) {
  303. pr_err("Unable to lock GPU to purge memory.\n");
  304. return false;
  305. }
  306. } while (1);
  307. slu->was_interruptible = dev_priv->mm.interruptible;
  308. dev_priv->mm.interruptible = false;
  309. return true;
  310. }
  311. static void
  312. i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
  313. struct shrinker_lock_uninterruptible *slu)
  314. {
  315. dev_priv->mm.interruptible = slu->was_interruptible;
  316. i915_gem_shrinker_unlock(&dev_priv->drm, slu->unlock);
  317. }
  318. static int
  319. i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
  320. {
  321. struct drm_i915_private *dev_priv =
  322. container_of(nb, struct drm_i915_private, mm.oom_notifier);
  323. struct shrinker_lock_uninterruptible slu;
  324. struct drm_i915_gem_object *obj;
  325. unsigned long unevictable, bound, unbound, freed_pages;
  326. if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
  327. return NOTIFY_DONE;
  328. freed_pages = i915_gem_shrink_all(dev_priv);
  329. /* Because we may be allocating inside our own driver, we cannot
  330. * assert that there are no objects with pinned pages that are not
  331. * being pointed to by hardware.
  332. */
  333. unbound = bound = unevictable = 0;
  334. list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
  335. if (!obj->mm.pages)
  336. continue;
  337. if (!can_release_pages(obj))
  338. unevictable += obj->base.size >> PAGE_SHIFT;
  339. else
  340. unbound += obj->base.size >> PAGE_SHIFT;
  341. }
  342. list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
  343. if (!obj->mm.pages)
  344. continue;
  345. if (!can_release_pages(obj))
  346. unevictable += obj->base.size >> PAGE_SHIFT;
  347. else
  348. bound += obj->base.size >> PAGE_SHIFT;
  349. }
  350. i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
  351. if (freed_pages || unbound || bound)
  352. pr_info("Purging GPU memory, %lu pages freed, "
  353. "%lu pages still pinned.\n",
  354. freed_pages, unevictable);
  355. if (unbound || bound)
  356. pr_err("%lu and %lu pages still available in the "
  357. "bound and unbound GPU page lists.\n",
  358. bound, unbound);
  359. *(unsigned long *)ptr += freed_pages;
  360. return NOTIFY_DONE;
  361. }
  362. static int
  363. i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
  364. {
  365. struct drm_i915_private *dev_priv =
  366. container_of(nb, struct drm_i915_private, mm.vmap_notifier);
  367. struct shrinker_lock_uninterruptible slu;
  368. struct i915_vma *vma, *next;
  369. unsigned long freed_pages = 0;
  370. int ret;
  371. if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
  372. return NOTIFY_DONE;
  373. /* Force everything onto the inactive lists */
  374. ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
  375. if (ret)
  376. goto out;
  377. intel_runtime_pm_get(dev_priv);
  378. freed_pages += i915_gem_shrink(dev_priv, -1UL,
  379. I915_SHRINK_BOUND |
  380. I915_SHRINK_UNBOUND |
  381. I915_SHRINK_ACTIVE |
  382. I915_SHRINK_VMAPS);
  383. intel_runtime_pm_put(dev_priv);
  384. /* We also want to clear any cached iomaps as they wrap vmap */
  385. list_for_each_entry_safe(vma, next,
  386. &dev_priv->ggtt.base.inactive_list, vm_link) {
  387. unsigned long count = vma->node.size >> PAGE_SHIFT;
  388. if (vma->iomap && i915_vma_unbind(vma) == 0)
  389. freed_pages += count;
  390. }
  391. out:
  392. i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
  393. *(unsigned long *)ptr += freed_pages;
  394. return NOTIFY_DONE;
  395. }
  396. /**
  397. * i915_gem_shrinker_init - Initialize i915 shrinker
  398. * @dev_priv: i915 device
  399. *
  400. * This function registers and sets up the i915 shrinker and OOM handler.
  401. */
  402. void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
  403. {
  404. dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
  405. dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
  406. dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
  407. WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
  408. dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
  409. WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
  410. dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
  411. WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
  412. }
  413. /**
  414. * i915_gem_shrinker_cleanup - Clean up i915 shrinker
  415. * @dev_priv: i915 device
  416. *
  417. * This function unregisters the i915 shrinker and OOM handler.
  418. */
  419. void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv)
  420. {
  421. WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
  422. WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
  423. unregister_shrinker(&dev_priv->mm.shrinker);
  424. }