binder_alloc.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054
  1. /* binder_alloc.c
  2. *
  3. * Android IPC Subsystem
  4. *
  5. * Copyright (C) 2007-2017 Google, Inc.
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18. #include <linux/list.h>
  19. #include <linux/sched/mm.h>
  20. #include <linux/module.h>
  21. #include <linux/rtmutex.h>
  22. #include <linux/rbtree.h>
  23. #include <linux/seq_file.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/slab.h>
  26. #include <linux/sched.h>
  27. #include <linux/list_lru.h>
  28. #include <linux/ratelimit.h>
  29. #include <asm/cacheflush.h>
  30. #include "binder_alloc.h"
  31. #include "binder_trace.h"
  32. struct list_lru binder_alloc_lru;
  33. static DEFINE_MUTEX(binder_alloc_mmap_lock);
  34. enum {
  35. BINDER_DEBUG_USER_ERROR = 1U << 0,
  36. BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
  37. BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
  38. BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
  39. };
  40. static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
  41. module_param_named(debug_mask, binder_alloc_debug_mask,
  42. uint, 0644);
  43. #define binder_alloc_debug(mask, x...) \
  44. do { \
  45. if (binder_alloc_debug_mask & mask) \
  46. pr_info_ratelimited(x); \
  47. } while (0)
  48. static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
  49. {
  50. return list_entry(buffer->entry.next, struct binder_buffer, entry);
  51. }
  52. static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
  53. {
  54. return list_entry(buffer->entry.prev, struct binder_buffer, entry);
  55. }
  56. static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
  57. struct binder_buffer *buffer)
  58. {
  59. if (list_is_last(&buffer->entry, &alloc->buffers))
  60. return (u8 *)alloc->buffer +
  61. alloc->buffer_size - (u8 *)buffer->data;
  62. return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
  63. }
  64. static void binder_insert_free_buffer(struct binder_alloc *alloc,
  65. struct binder_buffer *new_buffer)
  66. {
  67. struct rb_node **p = &alloc->free_buffers.rb_node;
  68. struct rb_node *parent = NULL;
  69. struct binder_buffer *buffer;
  70. size_t buffer_size;
  71. size_t new_buffer_size;
  72. BUG_ON(!new_buffer->free);
  73. new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
  74. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  75. "%d: add free buffer, size %zd, at %pK\n",
  76. alloc->pid, new_buffer_size, new_buffer);
  77. while (*p) {
  78. parent = *p;
  79. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  80. BUG_ON(!buffer->free);
  81. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  82. if (new_buffer_size < buffer_size)
  83. p = &parent->rb_left;
  84. else
  85. p = &parent->rb_right;
  86. }
  87. rb_link_node(&new_buffer->rb_node, parent, p);
  88. rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
  89. }
  90. static void binder_insert_allocated_buffer_locked(
  91. struct binder_alloc *alloc, struct binder_buffer *new_buffer)
  92. {
  93. struct rb_node **p = &alloc->allocated_buffers.rb_node;
  94. struct rb_node *parent = NULL;
  95. struct binder_buffer *buffer;
  96. BUG_ON(new_buffer->free);
  97. while (*p) {
  98. parent = *p;
  99. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  100. BUG_ON(buffer->free);
  101. if (new_buffer->data < buffer->data)
  102. p = &parent->rb_left;
  103. else if (new_buffer->data > buffer->data)
  104. p = &parent->rb_right;
  105. else
  106. BUG();
  107. }
  108. rb_link_node(&new_buffer->rb_node, parent, p);
  109. rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
  110. }
  111. static struct binder_buffer *binder_alloc_prepare_to_free_locked(
  112. struct binder_alloc *alloc,
  113. uintptr_t user_ptr)
  114. {
  115. struct rb_node *n = alloc->allocated_buffers.rb_node;
  116. struct binder_buffer *buffer;
  117. void *kern_ptr;
  118. kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
  119. while (n) {
  120. buffer = rb_entry(n, struct binder_buffer, rb_node);
  121. BUG_ON(buffer->free);
  122. if (kern_ptr < buffer->data)
  123. n = n->rb_left;
  124. else if (kern_ptr > buffer->data)
  125. n = n->rb_right;
  126. else {
  127. /*
  128. * Guard against user threads attempting to
  129. * free the buffer when in use by kernel or
  130. * after it's already been freed.
  131. */
  132. if (!buffer->allow_user_free)
  133. return ERR_PTR(-EPERM);
  134. buffer->allow_user_free = 0;
  135. return buffer;
  136. }
  137. }
  138. return NULL;
  139. }
  140. /**
  141. * binder_alloc_buffer_lookup() - get buffer given user ptr
  142. * @alloc: binder_alloc for this proc
  143. * @user_ptr: User pointer to buffer data
  144. *
  145. * Validate userspace pointer to buffer data and return buffer corresponding to
  146. * that user pointer. Search the rb tree for buffer that matches user data
  147. * pointer.
  148. *
  149. * Return: Pointer to buffer or NULL
  150. */
  151. struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
  152. uintptr_t user_ptr)
  153. {
  154. struct binder_buffer *buffer;
  155. mutex_lock(&alloc->mutex);
  156. buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
  157. mutex_unlock(&alloc->mutex);
  158. return buffer;
  159. }
  160. static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
  161. void *start, void *end)
  162. {
  163. void *page_addr;
  164. unsigned long user_page_addr;
  165. struct binder_lru_page *page;
  166. struct vm_area_struct *vma = NULL;
  167. struct mm_struct *mm = NULL;
  168. bool need_mm = false;
  169. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  170. "%d: %s pages %pK-%pK\n", alloc->pid,
  171. allocate ? "allocate" : "free", start, end);
  172. if (end <= start)
  173. return 0;
  174. trace_binder_update_page_range(alloc, allocate, start, end);
  175. if (allocate == 0)
  176. goto free_range;
  177. for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
  178. page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
  179. if (!page->page_ptr) {
  180. need_mm = true;
  181. break;
  182. }
  183. }
  184. if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
  185. mm = alloc->vma_vm_mm;
  186. if (mm) {
  187. down_read(&mm->mmap_sem);
  188. vma = alloc->vma;
  189. }
  190. if (!vma && need_mm) {
  191. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  192. "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
  193. alloc->pid);
  194. goto err_no_vma;
  195. }
  196. for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
  197. int ret;
  198. bool on_lru;
  199. size_t index;
  200. index = (page_addr - alloc->buffer) / PAGE_SIZE;
  201. page = &alloc->pages[index];
  202. if (page->page_ptr) {
  203. trace_binder_alloc_lru_start(alloc, index);
  204. on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
  205. WARN_ON(!on_lru);
  206. trace_binder_alloc_lru_end(alloc, index);
  207. continue;
  208. }
  209. if (WARN_ON(!vma))
  210. goto err_page_ptr_cleared;
  211. trace_binder_alloc_page_start(alloc, index);
  212. page->page_ptr = alloc_page(GFP_KERNEL |
  213. __GFP_HIGHMEM |
  214. __GFP_ZERO);
  215. if (!page->page_ptr) {
  216. pr_err("%d: binder_alloc_buf failed for page at %pK\n",
  217. alloc->pid, page_addr);
  218. goto err_alloc_page_failed;
  219. }
  220. page->alloc = alloc;
  221. INIT_LIST_HEAD(&page->lru);
  222. ret = map_kernel_range_noflush((unsigned long)page_addr,
  223. PAGE_SIZE, PAGE_KERNEL,
  224. &page->page_ptr);
  225. flush_cache_vmap((unsigned long)page_addr,
  226. (unsigned long)page_addr + PAGE_SIZE);
  227. if (ret != 1) {
  228. pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
  229. alloc->pid, page_addr);
  230. goto err_map_kernel_failed;
  231. }
  232. user_page_addr =
  233. (uintptr_t)page_addr + alloc->user_buffer_offset;
  234. ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
  235. if (ret) {
  236. pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
  237. alloc->pid, user_page_addr);
  238. goto err_vm_insert_page_failed;
  239. }
  240. if (index + 1 > alloc->pages_high)
  241. alloc->pages_high = index + 1;
  242. trace_binder_alloc_page_end(alloc, index);
  243. /* vm_insert_page does not seem to increment the refcount */
  244. }
  245. if (mm) {
  246. up_read(&mm->mmap_sem);
  247. mmput(mm);
  248. }
  249. return 0;
  250. free_range:
  251. for (page_addr = end - PAGE_SIZE; page_addr >= start;
  252. page_addr -= PAGE_SIZE) {
  253. bool ret;
  254. size_t index;
  255. index = (page_addr - alloc->buffer) / PAGE_SIZE;
  256. page = &alloc->pages[index];
  257. trace_binder_free_lru_start(alloc, index);
  258. ret = list_lru_add(&binder_alloc_lru, &page->lru);
  259. WARN_ON(!ret);
  260. trace_binder_free_lru_end(alloc, index);
  261. continue;
  262. err_vm_insert_page_failed:
  263. unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
  264. err_map_kernel_failed:
  265. __free_page(page->page_ptr);
  266. page->page_ptr = NULL;
  267. err_alloc_page_failed:
  268. err_page_ptr_cleared:
  269. ;
  270. }
  271. err_no_vma:
  272. if (mm) {
  273. up_read(&mm->mmap_sem);
  274. mmput(mm);
  275. }
  276. return vma ? -ENOMEM : -ESRCH;
  277. }
  278. static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
  279. struct vm_area_struct *vma)
  280. {
  281. if (vma)
  282. alloc->vma_vm_mm = vma->vm_mm;
  283. /*
  284. * If we see alloc->vma is not NULL, buffer data structures set up
  285. * completely. Look at smp_rmb side binder_alloc_get_vma.
  286. * We also want to guarantee new alloc->vma_vm_mm is always visible
  287. * if alloc->vma is set.
  288. */
  289. smp_wmb();
  290. alloc->vma = vma;
  291. }
  292. static inline struct vm_area_struct *binder_alloc_get_vma(
  293. struct binder_alloc *alloc)
  294. {
  295. struct vm_area_struct *vma = NULL;
  296. if (alloc->vma) {
  297. /* Look at description in binder_alloc_set_vma */
  298. smp_rmb();
  299. vma = alloc->vma;
  300. }
  301. return vma;
  302. }
  303. static struct binder_buffer *binder_alloc_new_buf_locked(
  304. struct binder_alloc *alloc,
  305. size_t data_size,
  306. size_t offsets_size,
  307. size_t extra_buffers_size,
  308. int is_async)
  309. {
  310. struct rb_node *n = alloc->free_buffers.rb_node;
  311. struct binder_buffer *buffer;
  312. size_t buffer_size;
  313. struct rb_node *best_fit = NULL;
  314. void *has_page_addr;
  315. void *end_page_addr;
  316. size_t size, data_offsets_size;
  317. int ret;
  318. if (!binder_alloc_get_vma(alloc)) {
  319. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  320. "%d: binder_alloc_buf, no vma\n",
  321. alloc->pid);
  322. return ERR_PTR(-ESRCH);
  323. }
  324. data_offsets_size = ALIGN(data_size, sizeof(void *)) +
  325. ALIGN(offsets_size, sizeof(void *));
  326. if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
  327. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  328. "%d: got transaction with invalid size %zd-%zd\n",
  329. alloc->pid, data_size, offsets_size);
  330. return ERR_PTR(-EINVAL);
  331. }
  332. size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
  333. if (size < data_offsets_size || size < extra_buffers_size) {
  334. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  335. "%d: got transaction with invalid extra_buffers_size %zd\n",
  336. alloc->pid, extra_buffers_size);
  337. return ERR_PTR(-EINVAL);
  338. }
  339. if (is_async &&
  340. alloc->free_async_space < size + sizeof(struct binder_buffer)) {
  341. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  342. "%d: binder_alloc_buf size %zd failed, no async space left\n",
  343. alloc->pid, size);
  344. return ERR_PTR(-ENOSPC);
  345. }
  346. /* Pad 0-size buffers so they get assigned unique addresses */
  347. size = max(size, sizeof(void *));
  348. while (n) {
  349. buffer = rb_entry(n, struct binder_buffer, rb_node);
  350. BUG_ON(!buffer->free);
  351. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  352. if (size < buffer_size) {
  353. best_fit = n;
  354. n = n->rb_left;
  355. } else if (size > buffer_size)
  356. n = n->rb_right;
  357. else {
  358. best_fit = n;
  359. break;
  360. }
  361. }
  362. if (best_fit == NULL) {
  363. size_t allocated_buffers = 0;
  364. size_t largest_alloc_size = 0;
  365. size_t total_alloc_size = 0;
  366. size_t free_buffers = 0;
  367. size_t largest_free_size = 0;
  368. size_t total_free_size = 0;
  369. for (n = rb_first(&alloc->allocated_buffers); n != NULL;
  370. n = rb_next(n)) {
  371. buffer = rb_entry(n, struct binder_buffer, rb_node);
  372. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  373. allocated_buffers++;
  374. total_alloc_size += buffer_size;
  375. if (buffer_size > largest_alloc_size)
  376. largest_alloc_size = buffer_size;
  377. }
  378. for (n = rb_first(&alloc->free_buffers); n != NULL;
  379. n = rb_next(n)) {
  380. buffer = rb_entry(n, struct binder_buffer, rb_node);
  381. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  382. free_buffers++;
  383. total_free_size += buffer_size;
  384. if (buffer_size > largest_free_size)
  385. largest_free_size = buffer_size;
  386. }
  387. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  388. "%d: binder_alloc_buf size %zd failed, no address space\n",
  389. alloc->pid, size);
  390. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  391. "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
  392. total_alloc_size, allocated_buffers,
  393. largest_alloc_size, total_free_size,
  394. free_buffers, largest_free_size);
  395. return ERR_PTR(-ENOSPC);
  396. }
  397. if (n == NULL) {
  398. buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
  399. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  400. }
  401. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  402. "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
  403. alloc->pid, size, buffer, buffer_size);
  404. has_page_addr =
  405. (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
  406. WARN_ON(n && buffer_size != size);
  407. end_page_addr =
  408. (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
  409. if (end_page_addr > has_page_addr)
  410. end_page_addr = has_page_addr;
  411. ret = binder_update_page_range(alloc, 1,
  412. (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr);
  413. if (ret)
  414. return ERR_PTR(ret);
  415. if (buffer_size != size) {
  416. struct binder_buffer *new_buffer;
  417. new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  418. if (!new_buffer) {
  419. pr_err("%s: %d failed to alloc new buffer struct\n",
  420. __func__, alloc->pid);
  421. goto err_alloc_buf_struct_failed;
  422. }
  423. new_buffer->data = (u8 *)buffer->data + size;
  424. list_add(&new_buffer->entry, &buffer->entry);
  425. new_buffer->free = 1;
  426. binder_insert_free_buffer(alloc, new_buffer);
  427. }
  428. rb_erase(best_fit, &alloc->free_buffers);
  429. buffer->free = 0;
  430. buffer->allow_user_free = 0;
  431. binder_insert_allocated_buffer_locked(alloc, buffer);
  432. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  433. "%d: binder_alloc_buf size %zd got %pK\n",
  434. alloc->pid, size, buffer);
  435. buffer->data_size = data_size;
  436. buffer->offsets_size = offsets_size;
  437. buffer->async_transaction = is_async;
  438. buffer->extra_buffers_size = extra_buffers_size;
  439. if (is_async) {
  440. alloc->free_async_space -= size + sizeof(struct binder_buffer);
  441. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  442. "%d: binder_alloc_buf size %zd async free %zd\n",
  443. alloc->pid, size, alloc->free_async_space);
  444. }
  445. return buffer;
  446. err_alloc_buf_struct_failed:
  447. binder_update_page_range(alloc, 0,
  448. (void *)PAGE_ALIGN((uintptr_t)buffer->data),
  449. end_page_addr);
  450. return ERR_PTR(-ENOMEM);
  451. }
  452. /**
  453. * binder_alloc_new_buf() - Allocate a new binder buffer
  454. * @alloc: binder_alloc for this proc
  455. * @data_size: size of user data buffer
  456. * @offsets_size: user specified buffer offset
  457. * @extra_buffers_size: size of extra space for meta-data (eg, security context)
  458. * @is_async: buffer for async transaction
  459. *
  460. * Allocate a new buffer given the requested sizes. Returns
  461. * the kernel version of the buffer pointer. The size allocated
  462. * is the sum of the three given sizes (each rounded up to
  463. * pointer-sized boundary)
  464. *
  465. * Return: The allocated buffer or %NULL if error
  466. */
  467. struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
  468. size_t data_size,
  469. size_t offsets_size,
  470. size_t extra_buffers_size,
  471. int is_async)
  472. {
  473. struct binder_buffer *buffer;
  474. mutex_lock(&alloc->mutex);
  475. buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
  476. extra_buffers_size, is_async);
  477. mutex_unlock(&alloc->mutex);
  478. return buffer;
  479. }
  480. static void *buffer_start_page(struct binder_buffer *buffer)
  481. {
  482. return (void *)((uintptr_t)buffer->data & PAGE_MASK);
  483. }
  484. static void *prev_buffer_end_page(struct binder_buffer *buffer)
  485. {
  486. return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
  487. }
  488. static void binder_delete_free_buffer(struct binder_alloc *alloc,
  489. struct binder_buffer *buffer)
  490. {
  491. struct binder_buffer *prev, *next = NULL;
  492. bool to_free = true;
  493. BUG_ON(alloc->buffers.next == &buffer->entry);
  494. prev = binder_buffer_prev(buffer);
  495. BUG_ON(!prev->free);
  496. if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
  497. to_free = false;
  498. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  499. "%d: merge free, buffer %pK share page with %pK\n",
  500. alloc->pid, buffer->data, prev->data);
  501. }
  502. if (!list_is_last(&buffer->entry, &alloc->buffers)) {
  503. next = binder_buffer_next(buffer);
  504. if (buffer_start_page(next) == buffer_start_page(buffer)) {
  505. to_free = false;
  506. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  507. "%d: merge free, buffer %pK share page with %pK\n",
  508. alloc->pid,
  509. buffer->data,
  510. next->data);
  511. }
  512. }
  513. if (PAGE_ALIGNED(buffer->data)) {
  514. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  515. "%d: merge free, buffer start %pK is page aligned\n",
  516. alloc->pid, buffer->data);
  517. to_free = false;
  518. }
  519. if (to_free) {
  520. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  521. "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
  522. alloc->pid, buffer->data,
  523. prev->data, next ? next->data : NULL);
  524. binder_update_page_range(alloc, 0, buffer_start_page(buffer),
  525. buffer_start_page(buffer) + PAGE_SIZE);
  526. }
  527. list_del(&buffer->entry);
  528. kfree(buffer);
  529. }
  530. static void binder_free_buf_locked(struct binder_alloc *alloc,
  531. struct binder_buffer *buffer)
  532. {
  533. size_t size, buffer_size;
  534. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  535. size = ALIGN(buffer->data_size, sizeof(void *)) +
  536. ALIGN(buffer->offsets_size, sizeof(void *)) +
  537. ALIGN(buffer->extra_buffers_size, sizeof(void *));
  538. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  539. "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
  540. alloc->pid, buffer, size, buffer_size);
  541. BUG_ON(buffer->free);
  542. BUG_ON(size > buffer_size);
  543. BUG_ON(buffer->transaction != NULL);
  544. BUG_ON(buffer->data < alloc->buffer);
  545. BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
  546. if (buffer->async_transaction) {
  547. alloc->free_async_space += size + sizeof(struct binder_buffer);
  548. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  549. "%d: binder_free_buf size %zd async free %zd\n",
  550. alloc->pid, size, alloc->free_async_space);
  551. }
  552. binder_update_page_range(alloc, 0,
  553. (void *)PAGE_ALIGN((uintptr_t)buffer->data),
  554. (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK));
  555. rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
  556. buffer->free = 1;
  557. if (!list_is_last(&buffer->entry, &alloc->buffers)) {
  558. struct binder_buffer *next = binder_buffer_next(buffer);
  559. if (next->free) {
  560. rb_erase(&next->rb_node, &alloc->free_buffers);
  561. binder_delete_free_buffer(alloc, next);
  562. }
  563. }
  564. if (alloc->buffers.next != &buffer->entry) {
  565. struct binder_buffer *prev = binder_buffer_prev(buffer);
  566. if (prev->free) {
  567. binder_delete_free_buffer(alloc, buffer);
  568. rb_erase(&prev->rb_node, &alloc->free_buffers);
  569. buffer = prev;
  570. }
  571. }
  572. binder_insert_free_buffer(alloc, buffer);
  573. }
  574. /**
  575. * binder_alloc_free_buf() - free a binder buffer
  576. * @alloc: binder_alloc for this proc
  577. * @buffer: kernel pointer to buffer
  578. *
  579. * Free the buffer allocated via binder_alloc_new_buffer()
  580. */
  581. void binder_alloc_free_buf(struct binder_alloc *alloc,
  582. struct binder_buffer *buffer)
  583. {
  584. mutex_lock(&alloc->mutex);
  585. binder_free_buf_locked(alloc, buffer);
  586. mutex_unlock(&alloc->mutex);
  587. }
  588. /**
  589. * binder_alloc_mmap_handler() - map virtual address space for proc
  590. * @alloc: alloc structure for this proc
  591. * @vma: vma passed to mmap()
  592. *
  593. * Called by binder_mmap() to initialize the space specified in
  594. * vma for allocating binder buffers
  595. *
  596. * Return:
  597. * 0 = success
  598. * -EBUSY = address space already mapped
  599. * -ENOMEM = failed to map memory to given address space
  600. */
  601. int binder_alloc_mmap_handler(struct binder_alloc *alloc,
  602. struct vm_area_struct *vma)
  603. {
  604. int ret;
  605. struct vm_struct *area;
  606. const char *failure_string;
  607. struct binder_buffer *buffer;
  608. mutex_lock(&binder_alloc_mmap_lock);
  609. if (alloc->buffer) {
  610. ret = -EBUSY;
  611. failure_string = "already mapped";
  612. goto err_already_mapped;
  613. }
  614. area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC);
  615. if (area == NULL) {
  616. ret = -ENOMEM;
  617. failure_string = "get_vm_area";
  618. goto err_get_vm_area_failed;
  619. }
  620. alloc->buffer = area->addr;
  621. alloc->user_buffer_offset =
  622. vma->vm_start - (uintptr_t)alloc->buffer;
  623. mutex_unlock(&binder_alloc_mmap_lock);
  624. #ifdef CONFIG_CPU_CACHE_VIPT
  625. if (cache_is_vipt_aliasing()) {
  626. while (CACHE_COLOUR(
  627. (vma->vm_start ^ (uint32_t)alloc->buffer))) {
  628. pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
  629. __func__, alloc->pid, vma->vm_start,
  630. vma->vm_end, alloc->buffer);
  631. vma->vm_start += PAGE_SIZE;
  632. }
  633. }
  634. #endif
  635. alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE,
  636. sizeof(alloc->pages[0]),
  637. GFP_KERNEL);
  638. if (alloc->pages == NULL) {
  639. ret = -ENOMEM;
  640. failure_string = "alloc page array";
  641. goto err_alloc_pages_failed;
  642. }
  643. alloc->buffer_size = vma->vm_end - vma->vm_start;
  644. buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  645. if (!buffer) {
  646. ret = -ENOMEM;
  647. failure_string = "alloc buffer struct";
  648. goto err_alloc_buf_struct_failed;
  649. }
  650. buffer->data = alloc->buffer;
  651. list_add(&buffer->entry, &alloc->buffers);
  652. buffer->free = 1;
  653. binder_insert_free_buffer(alloc, buffer);
  654. alloc->free_async_space = alloc->buffer_size / 2;
  655. binder_alloc_set_vma(alloc, vma);
  656. mmgrab(alloc->vma_vm_mm);
  657. return 0;
  658. err_alloc_buf_struct_failed:
  659. kfree(alloc->pages);
  660. alloc->pages = NULL;
  661. err_alloc_pages_failed:
  662. mutex_lock(&binder_alloc_mmap_lock);
  663. vfree(alloc->buffer);
  664. alloc->buffer = NULL;
  665. err_get_vm_area_failed:
  666. err_already_mapped:
  667. mutex_unlock(&binder_alloc_mmap_lock);
  668. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  669. "%s: %d %lx-%lx %s failed %d\n", __func__,
  670. alloc->pid, vma->vm_start, vma->vm_end,
  671. failure_string, ret);
  672. return ret;
  673. }
  674. void binder_alloc_deferred_release(struct binder_alloc *alloc)
  675. {
  676. struct rb_node *n;
  677. int buffers, page_count;
  678. struct binder_buffer *buffer;
  679. buffers = 0;
  680. mutex_lock(&alloc->mutex);
  681. BUG_ON(alloc->vma);
  682. while ((n = rb_first(&alloc->allocated_buffers))) {
  683. buffer = rb_entry(n, struct binder_buffer, rb_node);
  684. /* Transaction should already have been freed */
  685. BUG_ON(buffer->transaction);
  686. binder_free_buf_locked(alloc, buffer);
  687. buffers++;
  688. }
  689. while (!list_empty(&alloc->buffers)) {
  690. buffer = list_first_entry(&alloc->buffers,
  691. struct binder_buffer, entry);
  692. WARN_ON(!buffer->free);
  693. list_del(&buffer->entry);
  694. WARN_ON_ONCE(!list_empty(&alloc->buffers));
  695. kfree(buffer);
  696. }
  697. page_count = 0;
  698. if (alloc->pages) {
  699. int i;
  700. for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
  701. void *page_addr;
  702. bool on_lru;
  703. if (!alloc->pages[i].page_ptr)
  704. continue;
  705. on_lru = list_lru_del(&binder_alloc_lru,
  706. &alloc->pages[i].lru);
  707. page_addr = alloc->buffer + i * PAGE_SIZE;
  708. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  709. "%s: %d: page %d at %pK %s\n",
  710. __func__, alloc->pid, i, page_addr,
  711. on_lru ? "on lru" : "active");
  712. unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
  713. __free_page(alloc->pages[i].page_ptr);
  714. page_count++;
  715. }
  716. kfree(alloc->pages);
  717. vfree(alloc->buffer);
  718. }
  719. mutex_unlock(&alloc->mutex);
  720. if (alloc->vma_vm_mm)
  721. mmdrop(alloc->vma_vm_mm);
  722. binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
  723. "%s: %d buffers %d, pages %d\n",
  724. __func__, alloc->pid, buffers, page_count);
  725. }
  726. static void print_binder_buffer(struct seq_file *m, const char *prefix,
  727. struct binder_buffer *buffer)
  728. {
  729. seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
  730. prefix, buffer->debug_id, buffer->data,
  731. buffer->data_size, buffer->offsets_size,
  732. buffer->extra_buffers_size,
  733. buffer->transaction ? "active" : "delivered");
  734. }
  735. /**
  736. * binder_alloc_print_allocated() - print buffer info
  737. * @m: seq_file for output via seq_printf()
  738. * @alloc: binder_alloc for this proc
  739. *
  740. * Prints information about every buffer associated with
  741. * the binder_alloc state to the given seq_file
  742. */
  743. void binder_alloc_print_allocated(struct seq_file *m,
  744. struct binder_alloc *alloc)
  745. {
  746. struct rb_node *n;
  747. mutex_lock(&alloc->mutex);
  748. for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
  749. print_binder_buffer(m, " buffer",
  750. rb_entry(n, struct binder_buffer, rb_node));
  751. mutex_unlock(&alloc->mutex);
  752. }
  753. /**
  754. * binder_alloc_print_pages() - print page usage
  755. * @m: seq_file for output via seq_printf()
  756. * @alloc: binder_alloc for this proc
  757. */
  758. void binder_alloc_print_pages(struct seq_file *m,
  759. struct binder_alloc *alloc)
  760. {
  761. struct binder_lru_page *page;
  762. int i;
  763. int active = 0;
  764. int lru = 0;
  765. int free = 0;
  766. mutex_lock(&alloc->mutex);
  767. for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
  768. page = &alloc->pages[i];
  769. if (!page->page_ptr)
  770. free++;
  771. else if (list_empty(&page->lru))
  772. active++;
  773. else
  774. lru++;
  775. }
  776. mutex_unlock(&alloc->mutex);
  777. seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
  778. seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
  779. }
  780. /**
  781. * binder_alloc_get_allocated_count() - return count of buffers
  782. * @alloc: binder_alloc for this proc
  783. *
  784. * Return: count of allocated buffers
  785. */
  786. int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
  787. {
  788. struct rb_node *n;
  789. int count = 0;
  790. mutex_lock(&alloc->mutex);
  791. for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
  792. count++;
  793. mutex_unlock(&alloc->mutex);
  794. return count;
  795. }
  796. /**
  797. * binder_alloc_vma_close() - invalidate address space
  798. * @alloc: binder_alloc for this proc
  799. *
  800. * Called from binder_vma_close() when releasing address space.
  801. * Clears alloc->vma to prevent new incoming transactions from
  802. * allocating more buffers.
  803. */
  804. void binder_alloc_vma_close(struct binder_alloc *alloc)
  805. {
  806. binder_alloc_set_vma(alloc, NULL);
  807. }
  808. /**
  809. * binder_alloc_free_page() - shrinker callback to free pages
  810. * @item: item to free
  811. * @lock: lock protecting the item
  812. * @cb_arg: callback argument
  813. *
  814. * Called from list_lru_walk() in binder_shrink_scan() to free
  815. * up pages when the system is under memory pressure.
  816. */
  817. enum lru_status binder_alloc_free_page(struct list_head *item,
  818. struct list_lru_one *lru,
  819. spinlock_t *lock,
  820. void *cb_arg)
  821. {
  822. struct mm_struct *mm = NULL;
  823. struct binder_lru_page *page = container_of(item,
  824. struct binder_lru_page,
  825. lru);
  826. struct binder_alloc *alloc;
  827. uintptr_t page_addr;
  828. size_t index;
  829. struct vm_area_struct *vma;
  830. alloc = page->alloc;
  831. if (!mutex_trylock(&alloc->mutex))
  832. goto err_get_alloc_mutex_failed;
  833. if (!page->page_ptr)
  834. goto err_page_already_freed;
  835. index = page - alloc->pages;
  836. page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
  837. vma = binder_alloc_get_vma(alloc);
  838. if (vma) {
  839. if (!mmget_not_zero(alloc->vma_vm_mm))
  840. goto err_mmget;
  841. mm = alloc->vma_vm_mm;
  842. if (!down_write_trylock(&mm->mmap_sem))
  843. goto err_down_write_mmap_sem_failed;
  844. }
  845. list_lru_isolate(lru, item);
  846. spin_unlock(lock);
  847. if (vma) {
  848. trace_binder_unmap_user_start(alloc, index);
  849. zap_page_range(vma,
  850. page_addr + alloc->user_buffer_offset,
  851. PAGE_SIZE);
  852. trace_binder_unmap_user_end(alloc, index);
  853. up_write(&mm->mmap_sem);
  854. mmput(mm);
  855. }
  856. trace_binder_unmap_kernel_start(alloc, index);
  857. unmap_kernel_range(page_addr, PAGE_SIZE);
  858. __free_page(page->page_ptr);
  859. page->page_ptr = NULL;
  860. trace_binder_unmap_kernel_end(alloc, index);
  861. spin_lock(lock);
  862. mutex_unlock(&alloc->mutex);
  863. return LRU_REMOVED_RETRY;
  864. err_down_write_mmap_sem_failed:
  865. mmput_async(mm);
  866. err_mmget:
  867. err_page_already_freed:
  868. mutex_unlock(&alloc->mutex);
  869. err_get_alloc_mutex_failed:
  870. return LRU_SKIP;
  871. }
  872. static unsigned long
  873. binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
  874. {
  875. unsigned long ret = list_lru_count(&binder_alloc_lru);
  876. return ret;
  877. }
  878. static unsigned long
  879. binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
  880. {
  881. unsigned long ret;
  882. ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
  883. NULL, sc->nr_to_scan);
  884. return ret;
  885. }
  886. static struct shrinker binder_shrinker = {
  887. .count_objects = binder_shrink_count,
  888. .scan_objects = binder_shrink_scan,
  889. .seeks = DEFAULT_SEEKS,
  890. };
  891. /**
  892. * binder_alloc_init() - called by binder_open() for per-proc initialization
  893. * @alloc: binder_alloc for this proc
  894. *
  895. * Called from binder_open() to initialize binder_alloc fields for
  896. * new binder proc
  897. */
  898. void binder_alloc_init(struct binder_alloc *alloc)
  899. {
  900. alloc->pid = current->group_leader->pid;
  901. mutex_init(&alloc->mutex);
  902. INIT_LIST_HEAD(&alloc->buffers);
  903. }
  904. int binder_alloc_shrinker_init(void)
  905. {
  906. int ret = list_lru_init(&binder_alloc_lru);
  907. if (ret == 0) {
  908. ret = register_shrinker(&binder_shrinker);
  909. if (ret)
  910. list_lru_destroy(&binder_alloc_lru);
  911. }
  912. return ret;
  913. }