ttm_memory.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697
  1. /* SPDX-License-Identifier: GPL-2.0 OR MIT */
  2. /**************************************************************************
  3. *
  4. * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. **************************************************************************/
  28. #define pr_fmt(fmt) "[TTM] " fmt
  29. #include <drm/ttm/ttm_memory.h>
  30. #include <drm/ttm/ttm_module.h>
  31. #include <drm/ttm/ttm_page_alloc.h>
  32. #include <linux/spinlock.h>
  33. #include <linux/sched.h>
  34. #include <linux/wait.h>
  35. #include <linux/mm.h>
  36. #include <linux/module.h>
  37. #include <linux/slab.h>
  38. #include <linux/swap.h>
  39. #define TTM_MEMORY_ALLOC_RETRIES 4
  40. struct ttm_mem_zone {
  41. struct kobject kobj;
  42. struct ttm_mem_global *glob;
  43. const char *name;
  44. uint64_t zone_mem;
  45. uint64_t emer_mem;
  46. uint64_t max_mem;
  47. uint64_t swap_limit;
  48. uint64_t used_mem;
  49. };
  50. static struct attribute ttm_mem_sys = {
  51. .name = "zone_memory",
  52. .mode = S_IRUGO
  53. };
  54. static struct attribute ttm_mem_emer = {
  55. .name = "emergency_memory",
  56. .mode = S_IRUGO | S_IWUSR
  57. };
  58. static struct attribute ttm_mem_max = {
  59. .name = "available_memory",
  60. .mode = S_IRUGO | S_IWUSR
  61. };
  62. static struct attribute ttm_mem_swap = {
  63. .name = "swap_limit",
  64. .mode = S_IRUGO | S_IWUSR
  65. };
  66. static struct attribute ttm_mem_used = {
  67. .name = "used_memory",
  68. .mode = S_IRUGO
  69. };
  70. static void ttm_mem_zone_kobj_release(struct kobject *kobj)
  71. {
  72. struct ttm_mem_zone *zone =
  73. container_of(kobj, struct ttm_mem_zone, kobj);
  74. pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
  75. zone->name, (unsigned long long)zone->used_mem >> 10);
  76. kfree(zone);
  77. }
  78. static ssize_t ttm_mem_zone_show(struct kobject *kobj,
  79. struct attribute *attr,
  80. char *buffer)
  81. {
  82. struct ttm_mem_zone *zone =
  83. container_of(kobj, struct ttm_mem_zone, kobj);
  84. uint64_t val = 0;
  85. spin_lock(&zone->glob->lock);
  86. if (attr == &ttm_mem_sys)
  87. val = zone->zone_mem;
  88. else if (attr == &ttm_mem_emer)
  89. val = zone->emer_mem;
  90. else if (attr == &ttm_mem_max)
  91. val = zone->max_mem;
  92. else if (attr == &ttm_mem_swap)
  93. val = zone->swap_limit;
  94. else if (attr == &ttm_mem_used)
  95. val = zone->used_mem;
  96. spin_unlock(&zone->glob->lock);
  97. return snprintf(buffer, PAGE_SIZE, "%llu\n",
  98. (unsigned long long) val >> 10);
  99. }
  100. static void ttm_check_swapping(struct ttm_mem_global *glob);
  101. static ssize_t ttm_mem_zone_store(struct kobject *kobj,
  102. struct attribute *attr,
  103. const char *buffer,
  104. size_t size)
  105. {
  106. struct ttm_mem_zone *zone =
  107. container_of(kobj, struct ttm_mem_zone, kobj);
  108. int chars;
  109. unsigned long val;
  110. uint64_t val64;
  111. chars = sscanf(buffer, "%lu", &val);
  112. if (chars == 0)
  113. return size;
  114. val64 = val;
  115. val64 <<= 10;
  116. spin_lock(&zone->glob->lock);
  117. if (val64 > zone->zone_mem)
  118. val64 = zone->zone_mem;
  119. if (attr == &ttm_mem_emer) {
  120. zone->emer_mem = val64;
  121. if (zone->max_mem > val64)
  122. zone->max_mem = val64;
  123. } else if (attr == &ttm_mem_max) {
  124. zone->max_mem = val64;
  125. if (zone->emer_mem < val64)
  126. zone->emer_mem = val64;
  127. } else if (attr == &ttm_mem_swap)
  128. zone->swap_limit = val64;
  129. spin_unlock(&zone->glob->lock);
  130. ttm_check_swapping(zone->glob);
  131. return size;
  132. }
  133. static struct attribute *ttm_mem_zone_attrs[] = {
  134. &ttm_mem_sys,
  135. &ttm_mem_emer,
  136. &ttm_mem_max,
  137. &ttm_mem_swap,
  138. &ttm_mem_used,
  139. NULL
  140. };
  141. static const struct sysfs_ops ttm_mem_zone_ops = {
  142. .show = &ttm_mem_zone_show,
  143. .store = &ttm_mem_zone_store
  144. };
  145. static struct kobj_type ttm_mem_zone_kobj_type = {
  146. .release = &ttm_mem_zone_kobj_release,
  147. .sysfs_ops = &ttm_mem_zone_ops,
  148. .default_attrs = ttm_mem_zone_attrs,
  149. };
  150. static struct attribute ttm_mem_global_lower_mem_limit = {
  151. .name = "lower_mem_limit",
  152. .mode = S_IRUGO | S_IWUSR
  153. };
  154. static ssize_t ttm_mem_global_show(struct kobject *kobj,
  155. struct attribute *attr,
  156. char *buffer)
  157. {
  158. struct ttm_mem_global *glob =
  159. container_of(kobj, struct ttm_mem_global, kobj);
  160. uint64_t val = 0;
  161. spin_lock(&glob->lock);
  162. val = glob->lower_mem_limit;
  163. spin_unlock(&glob->lock);
  164. /* convert from number of pages to KB */
  165. val <<= (PAGE_SHIFT - 10);
  166. return snprintf(buffer, PAGE_SIZE, "%llu\n",
  167. (unsigned long long) val);
  168. }
  169. static ssize_t ttm_mem_global_store(struct kobject *kobj,
  170. struct attribute *attr,
  171. const char *buffer,
  172. size_t size)
  173. {
  174. int chars;
  175. uint64_t val64;
  176. unsigned long val;
  177. struct ttm_mem_global *glob =
  178. container_of(kobj, struct ttm_mem_global, kobj);
  179. chars = sscanf(buffer, "%lu", &val);
  180. if (chars == 0)
  181. return size;
  182. val64 = val;
  183. /* convert from KB to number of pages */
  184. val64 >>= (PAGE_SHIFT - 10);
  185. spin_lock(&glob->lock);
  186. glob->lower_mem_limit = val64;
  187. spin_unlock(&glob->lock);
  188. return size;
  189. }
  190. static void ttm_mem_global_kobj_release(struct kobject *kobj)
  191. {
  192. struct ttm_mem_global *glob =
  193. container_of(kobj, struct ttm_mem_global, kobj);
  194. kfree(glob);
  195. }
  196. static struct attribute *ttm_mem_global_attrs[] = {
  197. &ttm_mem_global_lower_mem_limit,
  198. NULL
  199. };
  200. static const struct sysfs_ops ttm_mem_global_ops = {
  201. .show = &ttm_mem_global_show,
  202. .store = &ttm_mem_global_store,
  203. };
  204. static struct kobj_type ttm_mem_glob_kobj_type = {
  205. .release = &ttm_mem_global_kobj_release,
  206. .sysfs_ops = &ttm_mem_global_ops,
  207. .default_attrs = ttm_mem_global_attrs,
  208. };
  209. static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
  210. bool from_wq, uint64_t extra)
  211. {
  212. unsigned int i;
  213. struct ttm_mem_zone *zone;
  214. uint64_t target;
  215. for (i = 0; i < glob->num_zones; ++i) {
  216. zone = glob->zones[i];
  217. if (from_wq)
  218. target = zone->swap_limit;
  219. else if (capable(CAP_SYS_ADMIN))
  220. target = zone->emer_mem;
  221. else
  222. target = zone->max_mem;
  223. target = (extra > target) ? 0ULL : target;
  224. if (zone->used_mem > target)
  225. return true;
  226. }
  227. return false;
  228. }
  229. /**
  230. * At this point we only support a single shrink callback.
  231. * Extend this if needed, perhaps using a linked list of callbacks.
  232. * Note that this function is reentrant:
  233. * many threads may try to swap out at any given time.
  234. */
  235. static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
  236. uint64_t extra, struct ttm_operation_ctx *ctx)
  237. {
  238. int ret;
  239. spin_lock(&glob->lock);
  240. while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
  241. spin_unlock(&glob->lock);
  242. ret = ttm_bo_swapout(glob->bo_glob, ctx);
  243. spin_lock(&glob->lock);
  244. if (unlikely(ret != 0))
  245. break;
  246. }
  247. spin_unlock(&glob->lock);
  248. }
  249. static void ttm_shrink_work(struct work_struct *work)
  250. {
  251. struct ttm_operation_ctx ctx = {
  252. .interruptible = false,
  253. .no_wait_gpu = false
  254. };
  255. struct ttm_mem_global *glob =
  256. container_of(work, struct ttm_mem_global, work);
  257. ttm_shrink(glob, true, 0ULL, &ctx);
  258. }
  259. static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
  260. const struct sysinfo *si)
  261. {
  262. struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
  263. uint64_t mem;
  264. int ret;
  265. if (unlikely(!zone))
  266. return -ENOMEM;
  267. mem = si->totalram - si->totalhigh;
  268. mem *= si->mem_unit;
  269. zone->name = "kernel";
  270. zone->zone_mem = mem;
  271. zone->max_mem = mem >> 1;
  272. zone->emer_mem = (mem >> 1) + (mem >> 2);
  273. zone->swap_limit = zone->max_mem - (mem >> 3);
  274. zone->used_mem = 0;
  275. zone->glob = glob;
  276. glob->zone_kernel = zone;
  277. ret = kobject_init_and_add(
  278. &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
  279. if (unlikely(ret != 0)) {
  280. kobject_put(&zone->kobj);
  281. return ret;
  282. }
  283. glob->zones[glob->num_zones++] = zone;
  284. return 0;
  285. }
  286. #ifdef CONFIG_HIGHMEM
  287. static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
  288. const struct sysinfo *si)
  289. {
  290. struct ttm_mem_zone *zone;
  291. uint64_t mem;
  292. int ret;
  293. if (si->totalhigh == 0)
  294. return 0;
  295. zone = kzalloc(sizeof(*zone), GFP_KERNEL);
  296. if (unlikely(!zone))
  297. return -ENOMEM;
  298. mem = si->totalram;
  299. mem *= si->mem_unit;
  300. zone->name = "highmem";
  301. zone->zone_mem = mem;
  302. zone->max_mem = mem >> 1;
  303. zone->emer_mem = (mem >> 1) + (mem >> 2);
  304. zone->swap_limit = zone->max_mem - (mem >> 3);
  305. zone->used_mem = 0;
  306. zone->glob = glob;
  307. glob->zone_highmem = zone;
  308. ret = kobject_init_and_add(
  309. &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s",
  310. zone->name);
  311. if (unlikely(ret != 0)) {
  312. kobject_put(&zone->kobj);
  313. return ret;
  314. }
  315. glob->zones[glob->num_zones++] = zone;
  316. return 0;
  317. }
  318. #else
  319. static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
  320. const struct sysinfo *si)
  321. {
  322. struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
  323. uint64_t mem;
  324. int ret;
  325. if (unlikely(!zone))
  326. return -ENOMEM;
  327. mem = si->totalram;
  328. mem *= si->mem_unit;
  329. /**
  330. * No special dma32 zone needed.
  331. */
  332. if (mem <= ((uint64_t) 1ULL << 32)) {
  333. kfree(zone);
  334. return 0;
  335. }
  336. /*
  337. * Limit max dma32 memory to 4GB for now
  338. * until we can figure out how big this
  339. * zone really is.
  340. */
  341. mem = ((uint64_t) 1ULL << 32);
  342. zone->name = "dma32";
  343. zone->zone_mem = mem;
  344. zone->max_mem = mem >> 1;
  345. zone->emer_mem = (mem >> 1) + (mem >> 2);
  346. zone->swap_limit = zone->max_mem - (mem >> 3);
  347. zone->used_mem = 0;
  348. zone->glob = glob;
  349. glob->zone_dma32 = zone;
  350. ret = kobject_init_and_add(
  351. &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
  352. if (unlikely(ret != 0)) {
  353. kobject_put(&zone->kobj);
  354. return ret;
  355. }
  356. glob->zones[glob->num_zones++] = zone;
  357. return 0;
  358. }
  359. #endif
  360. int ttm_mem_global_init(struct ttm_mem_global *glob)
  361. {
  362. struct sysinfo si;
  363. int ret;
  364. int i;
  365. struct ttm_mem_zone *zone;
  366. spin_lock_init(&glob->lock);
  367. glob->swap_queue = create_singlethread_workqueue("ttm_swap");
  368. INIT_WORK(&glob->work, ttm_shrink_work);
  369. ret = kobject_init_and_add(
  370. &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
  371. if (unlikely(ret != 0)) {
  372. kobject_put(&glob->kobj);
  373. return ret;
  374. }
  375. si_meminfo(&si);
  376. /* set it as 0 by default to keep original behavior of OOM */
  377. glob->lower_mem_limit = 0;
  378. ret = ttm_mem_init_kernel_zone(glob, &si);
  379. if (unlikely(ret != 0))
  380. goto out_no_zone;
  381. #ifdef CONFIG_HIGHMEM
  382. ret = ttm_mem_init_highmem_zone(glob, &si);
  383. if (unlikely(ret != 0))
  384. goto out_no_zone;
  385. #else
  386. ret = ttm_mem_init_dma32_zone(glob, &si);
  387. if (unlikely(ret != 0))
  388. goto out_no_zone;
  389. #endif
  390. for (i = 0; i < glob->num_zones; ++i) {
  391. zone = glob->zones[i];
  392. pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
  393. zone->name, (unsigned long long)zone->max_mem >> 10);
  394. }
  395. ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
  396. ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
  397. return 0;
  398. out_no_zone:
  399. ttm_mem_global_release(glob);
  400. return ret;
  401. }
  402. EXPORT_SYMBOL(ttm_mem_global_init);
  403. void ttm_mem_global_release(struct ttm_mem_global *glob)
  404. {
  405. unsigned int i;
  406. struct ttm_mem_zone *zone;
  407. /* let the page allocator first stop the shrink work. */
  408. ttm_page_alloc_fini();
  409. ttm_dma_page_alloc_fini();
  410. flush_workqueue(glob->swap_queue);
  411. destroy_workqueue(glob->swap_queue);
  412. glob->swap_queue = NULL;
  413. for (i = 0; i < glob->num_zones; ++i) {
  414. zone = glob->zones[i];
  415. kobject_del(&zone->kobj);
  416. kobject_put(&zone->kobj);
  417. }
  418. kobject_del(&glob->kobj);
  419. kobject_put(&glob->kobj);
  420. }
  421. EXPORT_SYMBOL(ttm_mem_global_release);
  422. static void ttm_check_swapping(struct ttm_mem_global *glob)
  423. {
  424. bool needs_swapping = false;
  425. unsigned int i;
  426. struct ttm_mem_zone *zone;
  427. spin_lock(&glob->lock);
  428. for (i = 0; i < glob->num_zones; ++i) {
  429. zone = glob->zones[i];
  430. if (zone->used_mem > zone->swap_limit) {
  431. needs_swapping = true;
  432. break;
  433. }
  434. }
  435. spin_unlock(&glob->lock);
  436. if (unlikely(needs_swapping))
  437. (void)queue_work(glob->swap_queue, &glob->work);
  438. }
  439. static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
  440. struct ttm_mem_zone *single_zone,
  441. uint64_t amount)
  442. {
  443. unsigned int i;
  444. struct ttm_mem_zone *zone;
  445. spin_lock(&glob->lock);
  446. for (i = 0; i < glob->num_zones; ++i) {
  447. zone = glob->zones[i];
  448. if (single_zone && zone != single_zone)
  449. continue;
  450. zone->used_mem -= amount;
  451. }
  452. spin_unlock(&glob->lock);
  453. }
  454. void ttm_mem_global_free(struct ttm_mem_global *glob,
  455. uint64_t amount)
  456. {
  457. return ttm_mem_global_free_zone(glob, NULL, amount);
  458. }
  459. EXPORT_SYMBOL(ttm_mem_global_free);
  460. /*
  461. * check if the available mem is under lower memory limit
  462. *
  463. * a. if no swap disk at all or free swap space is under swap_mem_limit
  464. * but available system mem is bigger than sys_mem_limit, allow TTM
  465. * allocation;
  466. *
  467. * b. if the available system mem is less than sys_mem_limit but free
  468. * swap disk is bigger than swap_mem_limit, allow TTM allocation.
  469. */
  470. bool
  471. ttm_check_under_lowerlimit(struct ttm_mem_global *glob,
  472. uint64_t num_pages,
  473. struct ttm_operation_ctx *ctx)
  474. {
  475. int64_t available;
  476. if (ctx->flags & TTM_OPT_FLAG_FORCE_ALLOC)
  477. return false;
  478. available = get_nr_swap_pages() + si_mem_available();
  479. available -= num_pages;
  480. if (available < glob->lower_mem_limit)
  481. return true;
  482. return false;
  483. }
  484. EXPORT_SYMBOL(ttm_check_under_lowerlimit);
  485. static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
  486. struct ttm_mem_zone *single_zone,
  487. uint64_t amount, bool reserve)
  488. {
  489. uint64_t limit;
  490. int ret = -ENOMEM;
  491. unsigned int i;
  492. struct ttm_mem_zone *zone;
  493. spin_lock(&glob->lock);
  494. for (i = 0; i < glob->num_zones; ++i) {
  495. zone = glob->zones[i];
  496. if (single_zone && zone != single_zone)
  497. continue;
  498. limit = (capable(CAP_SYS_ADMIN)) ?
  499. zone->emer_mem : zone->max_mem;
  500. if (zone->used_mem > limit)
  501. goto out_unlock;
  502. }
  503. if (reserve) {
  504. for (i = 0; i < glob->num_zones; ++i) {
  505. zone = glob->zones[i];
  506. if (single_zone && zone != single_zone)
  507. continue;
  508. zone->used_mem += amount;
  509. }
  510. }
  511. ret = 0;
  512. out_unlock:
  513. spin_unlock(&glob->lock);
  514. ttm_check_swapping(glob);
  515. return ret;
  516. }
  517. static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
  518. struct ttm_mem_zone *single_zone,
  519. uint64_t memory,
  520. struct ttm_operation_ctx *ctx)
  521. {
  522. int count = TTM_MEMORY_ALLOC_RETRIES;
  523. while (unlikely(ttm_mem_global_reserve(glob,
  524. single_zone,
  525. memory, true)
  526. != 0)) {
  527. if (ctx->no_wait_gpu)
  528. return -ENOMEM;
  529. if (unlikely(count-- == 0))
  530. return -ENOMEM;
  531. ttm_shrink(glob, false, memory + (memory >> 2) + 16, ctx);
  532. }
  533. return 0;
  534. }
  535. int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
  536. struct ttm_operation_ctx *ctx)
  537. {
  538. /**
  539. * Normal allocations of kernel memory are registered in
  540. * all zones.
  541. */
  542. return ttm_mem_global_alloc_zone(glob, NULL, memory, ctx);
  543. }
  544. EXPORT_SYMBOL(ttm_mem_global_alloc);
  545. int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
  546. struct page *page, uint64_t size,
  547. struct ttm_operation_ctx *ctx)
  548. {
  549. struct ttm_mem_zone *zone = NULL;
  550. /**
  551. * Page allocations may be registed in a single zone
  552. * only if highmem or !dma32.
  553. */
  554. #ifdef CONFIG_HIGHMEM
  555. if (PageHighMem(page) && glob->zone_highmem != NULL)
  556. zone = glob->zone_highmem;
  557. #else
  558. if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
  559. zone = glob->zone_kernel;
  560. #endif
  561. return ttm_mem_global_alloc_zone(glob, zone, size, ctx);
  562. }
  563. void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page,
  564. uint64_t size)
  565. {
  566. struct ttm_mem_zone *zone = NULL;
  567. #ifdef CONFIG_HIGHMEM
  568. if (PageHighMem(page) && glob->zone_highmem != NULL)
  569. zone = glob->zone_highmem;
  570. #else
  571. if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
  572. zone = glob->zone_kernel;
  573. #endif
  574. ttm_mem_global_free_zone(glob, zone, size);
  575. }
  576. size_t ttm_round_pot(size_t size)
  577. {
  578. if ((size & (size - 1)) == 0)
  579. return size;
  580. else if (size > PAGE_SIZE)
  581. return PAGE_ALIGN(size);
  582. else {
  583. size_t tmp_size = 4;
  584. while (tmp_size < size)
  585. tmp_size <<= 1;
  586. return tmp_size;
  587. }
  588. return 0;
  589. }
  590. EXPORT_SYMBOL(ttm_round_pot);
  591. uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob)
  592. {
  593. return glob->zone_kernel->max_mem;
  594. }
  595. EXPORT_SYMBOL(ttm_get_kernel_zone_memory_size);